Whamcloud - gitweb
LU-8066 libcfs: call kernel_param_unlock on error
[fs/lustre-release.git] / libcfs / libcfs / tracefile.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * libcfs/libcfs/tracefile.c
33  *
34  * Author: Zach Brown <zab@clusterfs.com>
35  * Author: Phil Schwan <phil@clusterfs.com>
36  */
37
38
39 #define DEBUG_SUBSYSTEM S_LNET
40 #define LUSTRE_TRACEFILE_PRIVATE
41 #include "tracefile.h"
42
43 #include <linux/kthread.h>
44 #include <libcfs/libcfs.h>
45
46 /* XXX move things up to the top, comment */
47 union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
48
49 char cfs_tracefile[TRACEFILE_NAME_SIZE];
50 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
51 static struct tracefiled_ctl trace_tctl;
52 static DEFINE_MUTEX(cfs_trace_thread_mutex);
53 static int thread_running = 0;
54
55 static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
56
57 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
58                                         struct cfs_trace_cpu_data *tcd);
59
60 static inline struct cfs_trace_page *
61 cfs_tage_from_list(struct list_head *list)
62 {
63         return list_entry(list, struct cfs_trace_page, linkage);
64 }
65
66 static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
67 {
68         struct page            *page;
69         struct cfs_trace_page *tage;
70
71         /* My caller is trying to free memory */
72         if (!in_interrupt() && memory_pressure_get())
73                 return NULL;
74
75         /*
76          * Don't spam console with allocation failures: they will be reported
77          * by upper layer anyway.
78          */
79         gfp |= __GFP_NOWARN;
80         page = alloc_page(gfp);
81         if (page == NULL)
82                 return NULL;
83
84         tage = kmalloc(sizeof(*tage), gfp);
85         if (tage == NULL) {
86                 __free_page(page);
87                 return NULL;
88         }
89
90         tage->page = page;
91         atomic_inc(&cfs_tage_allocated);
92         return tage;
93 }
94
95 static void cfs_tage_free(struct cfs_trace_page *tage)
96 {
97         __LASSERT(tage != NULL);
98         __LASSERT(tage->page != NULL);
99
100         __free_page(tage->page);
101         kfree(tage);
102         atomic_dec(&cfs_tage_allocated);
103 }
104
105 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
106                              struct list_head *queue)
107 {
108         __LASSERT(tage != NULL);
109         __LASSERT(queue != NULL);
110
111         list_move_tail(&tage->linkage, queue);
112 }
113
114 int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
115                            struct list_head *stock)
116 {
117         int i;
118
119         /*
120          * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
121          * from here: this will lead to infinite recursion.
122          */
123
124         for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) {
125                 struct cfs_trace_page *tage;
126
127                 tage = cfs_tage_alloc(gfp);
128                 if (tage == NULL)
129                         break;
130                 list_add_tail(&tage->linkage, stock);
131         }
132         return i;
133 }
134
135 /* return a page that has 'len' bytes left at the end */
136 static struct cfs_trace_page *
137 cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
138 {
139         struct cfs_trace_page *tage;
140
141         if (tcd->tcd_cur_pages > 0) {
142                 __LASSERT(!list_empty(&tcd->tcd_pages));
143                 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
144                 if (tage->used + len <= PAGE_SIZE)
145                         return tage;
146         }
147
148         if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
149                 if (tcd->tcd_cur_stock_pages > 0) {
150                         tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
151                         --tcd->tcd_cur_stock_pages;
152                         list_del_init(&tage->linkage);
153                 } else {
154                         tage = cfs_tage_alloc(GFP_ATOMIC);
155                         if (unlikely(tage == NULL)) {
156                                 if ((!memory_pressure_get() ||
157                                      in_interrupt()) && printk_ratelimit())
158                                         printk(KERN_WARNING
159                                                "cannot allocate a tage (%ld)\n",
160                                                tcd->tcd_cur_pages);
161                                 return NULL;
162                         }
163                 }
164
165                 tage->used = 0;
166                 tage->cpu = smp_processor_id();
167                 tage->type = tcd->tcd_type;
168                 list_add_tail(&tage->linkage, &tcd->tcd_pages);
169                 tcd->tcd_cur_pages++;
170
171                 if (tcd->tcd_cur_pages > 8 && thread_running) {
172                         struct tracefiled_ctl *tctl = &trace_tctl;
173                         /*
174                          * wake up tracefiled to process some pages.
175                          */
176                         wake_up(&tctl->tctl_waitq);
177                 }
178                 return tage;
179         }
180         return NULL;
181 }
182
183 static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
184 {
185         int pgcount = tcd->tcd_cur_pages / 10;
186         struct page_collection pc;
187         struct cfs_trace_page *tage;
188         struct cfs_trace_page *tmp;
189
190         /*
191          * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
192          * from here: this will lead to infinite recursion.
193          */
194
195         if (printk_ratelimit())
196                 printk(KERN_WARNING "debug daemon buffer overflowed; "
197                         "discarding 10%% of pages (%d of %ld)\n",
198                         pgcount + 1, tcd->tcd_cur_pages);
199
200         INIT_LIST_HEAD(&pc.pc_pages);
201
202         list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
203                 if (pgcount-- == 0)
204                         break;
205
206                 list_move_tail(&tage->linkage, &pc.pc_pages);
207                 tcd->tcd_cur_pages--;
208         }
209         put_pages_on_tcd_daemon_list(&pc, tcd);
210 }
211
212 /* return a page that has 'len' bytes left at the end */
213 static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
214                                                  unsigned long len)
215 {
216         struct cfs_trace_page *tage;
217
218         /*
219          * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
220          * from here: this will lead to infinite recursion.
221          */
222
223         if (len > PAGE_SIZE) {
224                 printk(KERN_ERR
225                        "cowardly refusing to write %lu bytes in a page\n", len);
226                 return NULL;
227         }
228
229         tage = cfs_trace_get_tage_try(tcd, len);
230         if (tage != NULL)
231                 return tage;
232         if (thread_running)
233                 cfs_tcd_shrink(tcd);
234         if (tcd->tcd_cur_pages > 0) {
235                 tage = cfs_tage_from_list(tcd->tcd_pages.next);
236                 tage->used = 0;
237                 cfs_tage_to_tail(tage, &tcd->tcd_pages);
238         }
239         return tage;
240 }
241
242 int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
243                      const char *format, ...)
244 {
245         va_list args;
246         int     rc;
247
248         va_start(args, format);
249         rc = libcfs_debug_vmsg2(msgdata, format, args, NULL);
250         va_end(args);
251
252         return rc;
253 }
254 EXPORT_SYMBOL(libcfs_debug_msg);
255
256 int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
257                        const char *format1, va_list args,
258                        const char *format2, ...)
259 {
260         struct cfs_trace_cpu_data *tcd = NULL;
261         struct ptldebug_header     header = {0};
262         struct cfs_trace_page     *tage;
263         /* string_buf is used only if tcd != NULL, and is always set then */
264         char                      *string_buf = NULL;
265         char                      *debug_buf;
266         int                        known_size;
267         int                        needed = 85; /* average message length */
268         int                        max_nob;
269         va_list                    ap;
270         int                        i;
271         int                        remain;
272         int                        mask = msgdata->msg_mask;
273         char                      *file = (char *)msgdata->msg_file;
274         struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
275
276         if (strchr(file, '/'))
277                 file = strrchr(file, '/') + 1;
278
279         tcd = cfs_trace_get_tcd();
280
281         /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
282          * pins us to a particular CPU.  This avoids an smp_processor_id()
283          * warning on Linux when debugging is enabled. */
284         cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
285
286         if (tcd == NULL)                /* arch may not log in IRQ context */
287                 goto console;
288
289         if (tcd->tcd_cur_pages == 0)
290                 header.ph_flags |= PH_FLAG_FIRST_RECORD;
291
292         if (tcd->tcd_shutting_down) {
293                 cfs_trace_put_tcd(tcd);
294                 tcd = NULL;
295                 goto console;
296         }
297
298         known_size = strlen(file) + 1;
299         if (msgdata->msg_fn)
300                 known_size += strlen(msgdata->msg_fn) + 1;
301
302         if (libcfs_debug_binary)
303                 known_size += sizeof(header);
304
305         /*/
306          * '2' used because vsnprintf return real size required for output
307          * _without_ terminating NULL.
308          * if needed is to small for this format.
309          */
310         for (i = 0; i < 2; i++) {
311                 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
312                 if (tage == NULL) {
313                         if (needed + known_size > PAGE_SIZE)
314                                 mask |= D_ERROR;
315
316                         cfs_trace_put_tcd(tcd);
317                         tcd = NULL;
318                         goto console;
319                 }
320
321                 string_buf = (char *)page_address(tage->page) +
322                                         tage->used + known_size;
323
324                 max_nob = PAGE_SIZE - tage->used - known_size;
325                 if (max_nob <= 0) {
326                         printk(KERN_EMERG "negative max_nob: %d\n",
327                                max_nob);
328                         mask |= D_ERROR;
329                         cfs_trace_put_tcd(tcd);
330                         tcd = NULL;
331                         goto console;
332                 }
333
334                 needed = 0;
335                 if (format1) {
336                         va_copy(ap, args);
337                         needed = vsnprintf(string_buf, max_nob, format1, ap);
338                         va_end(ap);
339                 }
340
341                 if (format2) {
342                         remain = max_nob - needed;
343                         if (remain < 0)
344                                 remain = 0;
345
346                         va_start(ap, format2);
347                         needed += vsnprintf(string_buf + needed, remain,
348                                             format2, ap);
349                         va_end(ap);
350                 }
351
352                 if (needed < max_nob) /* well. printing ok.. */
353                         break;
354         }
355
356         if (*(string_buf+needed-1) != '\n')
357                 printk(KERN_INFO "format at %s:%d:%s doesn't end in "
358                        "newline\n", file, msgdata->msg_line, msgdata->msg_fn);
359
360         header.ph_len = known_size + needed;
361         debug_buf = (char *)page_address(tage->page) + tage->used;
362
363         if (libcfs_debug_binary) {
364                 memcpy(debug_buf, &header, sizeof(header));
365                 tage->used += sizeof(header);
366                 debug_buf += sizeof(header);
367         }
368
369         strcpy(debug_buf, file);
370         tage->used += strlen(file) + 1;
371         debug_buf += strlen(file) + 1;
372
373         if (msgdata->msg_fn) {
374                 strcpy(debug_buf, msgdata->msg_fn);
375                 tage->used += strlen(msgdata->msg_fn) + 1;
376                 debug_buf += strlen(msgdata->msg_fn) + 1;
377         }
378
379         __LASSERT(debug_buf == string_buf);
380
381         tage->used += needed;
382         __LASSERT(tage->used <= PAGE_SIZE);
383
384 console:
385         if ((mask & libcfs_printk) == 0) {
386                 /* no console output requested */
387                 if (tcd != NULL)
388                         cfs_trace_put_tcd(tcd);
389                 return 1;
390         }
391
392         if (cdls != NULL) {
393                 if (libcfs_console_ratelimit &&
394                     cdls->cdls_next != 0 &&     /* not first time ever */
395                     !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
396                         /* skipping a console message */
397                         cdls->cdls_count++;
398                         if (tcd != NULL)
399                                 cfs_trace_put_tcd(tcd);
400                         return 1;
401                 }
402
403                 if (cfs_time_after(cfs_time_current(), cdls->cdls_next +
404                                                        libcfs_console_max_delay
405                                                        + cfs_time_seconds(10))) {
406                         /* last timeout was a long time ago */
407                         cdls->cdls_delay /= libcfs_console_backoff * 4;
408                 } else {
409                         cdls->cdls_delay *= libcfs_console_backoff;
410                 }
411
412                 if (cdls->cdls_delay < libcfs_console_min_delay)
413                         cdls->cdls_delay = libcfs_console_min_delay;
414                 else if (cdls->cdls_delay > libcfs_console_max_delay)
415                         cdls->cdls_delay = libcfs_console_max_delay;
416
417                 /* ensure cdls_next is never zero after it's been seen */
418                 cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1;
419         }
420
421         if (tcd != NULL) {
422                 cfs_print_to_console(&header, mask, string_buf, needed, file,
423                                      msgdata->msg_fn);
424                 cfs_trace_put_tcd(tcd);
425         } else {
426                 string_buf = cfs_trace_get_console_buffer();
427
428                 needed = 0;
429                 if (format1 != NULL) {
430                         va_copy(ap, args);
431                         needed = vsnprintf(string_buf,
432                                            CFS_TRACE_CONSOLE_BUFFER_SIZE,
433                                            format1, ap);
434                         va_end(ap);
435                 }
436                 if (format2 != NULL) {
437                         remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed;
438                         if (remain > 0) {
439                                 va_start(ap, format2);
440                                 needed += vsnprintf(string_buf+needed, remain,
441                                                     format2, ap);
442                                 va_end(ap);
443                         }
444                 }
445                 cfs_print_to_console(&header, mask,
446                                      string_buf, needed, file, msgdata->msg_fn);
447
448                 put_cpu();
449         }
450
451         if (cdls != NULL && cdls->cdls_count != 0) {
452                 string_buf = cfs_trace_get_console_buffer();
453
454                 needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
455                                   "Skipped %d previous similar message%s\n",
456                                   cdls->cdls_count,
457                                   (cdls->cdls_count > 1) ? "s" : "");
458
459                 cfs_print_to_console(&header, mask,
460                                      string_buf, needed, file, msgdata->msg_fn);
461
462                 put_cpu();
463                 cdls->cdls_count = 0;
464         }
465
466         return 0;
467 }
468 EXPORT_SYMBOL(libcfs_debug_vmsg2);
469
470 void
471 cfs_trace_assertion_failed(const char *str,
472                            struct libcfs_debug_msg_data *msgdata)
473 {
474         struct ptldebug_header hdr;
475
476         libcfs_panic_in_progress = 1;
477         libcfs_catastrophe = 1;
478         smp_mb();
479
480         cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
481
482         cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
483                              msgdata->msg_file, msgdata->msg_fn);
484
485         panic("Lustre debug assertion failure\n");
486
487         /* not reached */
488 }
489
490 static void
491 panic_collect_pages(struct page_collection *pc)
492 {
493         /* Do the collect_pages job on a single CPU: assumes that all other
494          * CPUs have been stopped during a panic.  If this isn't true for some
495          * arch, this will have to be implemented separately in each arch.  */
496         int                        i;
497         int                        j;
498         struct cfs_trace_cpu_data *tcd;
499
500         INIT_LIST_HEAD(&pc->pc_pages);
501
502         cfs_tcd_for_each(tcd, i, j) {
503                 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
504                 tcd->tcd_cur_pages = 0;
505
506                 if (pc->pc_want_daemon_pages) {
507                         list_splice_init(&tcd->tcd_daemon_pages,
508                                                 &pc->pc_pages);
509                         tcd->tcd_cur_daemon_pages = 0;
510                 }
511         }
512 }
513
514 static void collect_pages_on_all_cpus(struct page_collection *pc)
515 {
516         struct cfs_trace_cpu_data *tcd;
517         int i, cpu;
518
519         for_each_possible_cpu(cpu) {
520                 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
521                         list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
522                         tcd->tcd_cur_pages = 0;
523                         if (pc->pc_want_daemon_pages) {
524                                 list_splice_init(&tcd->tcd_daemon_pages,
525                                                         &pc->pc_pages);
526                                 tcd->tcd_cur_daemon_pages = 0;
527                         }
528                 }
529         }
530 }
531
532 static void collect_pages(struct page_collection *pc)
533 {
534         INIT_LIST_HEAD(&pc->pc_pages);
535
536         if (libcfs_panic_in_progress)
537                 panic_collect_pages(pc);
538         else
539                 collect_pages_on_all_cpus(pc);
540 }
541
542 static void put_pages_back_on_all_cpus(struct page_collection *pc)
543 {
544         struct cfs_trace_cpu_data *tcd;
545         struct list_head *cur_head;
546         struct cfs_trace_page *tage;
547         struct cfs_trace_page *tmp;
548         int i, cpu;
549
550         for_each_possible_cpu(cpu) {
551                 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
552                         cur_head = tcd->tcd_pages.next;
553
554                         list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
555                                                  linkage) {
556
557                                 __LASSERT_TAGE_INVARIANT(tage);
558
559                                 if (tage->cpu != cpu || tage->type != i)
560                                         continue;
561
562                                 cfs_tage_to_tail(tage, cur_head);
563                                 tcd->tcd_cur_pages++;
564                         }
565                 }
566         }
567 }
568
569 static void put_pages_back(struct page_collection *pc)
570 {
571         if (!libcfs_panic_in_progress)
572                 put_pages_back_on_all_cpus(pc);
573 }
574
575 /* Add pages to a per-cpu debug daemon ringbuffer.  This buffer makes sure that
576  * we have a good amount of data at all times for dumping during an LBUG, even
577  * if we have been steadily writing (and otherwise discarding) pages via the
578  * debug daemon. */
579 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
580                                          struct cfs_trace_cpu_data *tcd)
581 {
582         struct cfs_trace_page *tage;
583         struct cfs_trace_page *tmp;
584
585         list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
586                 __LASSERT_TAGE_INVARIANT(tage);
587
588                 if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
589                         continue;
590
591                 cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
592                 tcd->tcd_cur_daemon_pages++;
593
594                 if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
595                         struct cfs_trace_page *victim;
596
597                         __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
598                         victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
599
600                         __LASSERT_TAGE_INVARIANT(victim);
601
602                         list_del(&victim->linkage);
603                         cfs_tage_free(victim);
604                         tcd->tcd_cur_daemon_pages--;
605                 }
606         }
607 }
608
609 static void put_pages_on_daemon_list(struct page_collection *pc)
610 {
611         struct cfs_trace_cpu_data *tcd;
612         int i, cpu;
613
614         for_each_possible_cpu(cpu) {
615                 cfs_tcd_for_each_type_lock(tcd, i, cpu)
616                         put_pages_on_tcd_daemon_list(pc, tcd);
617         }
618 }
619
620 void cfs_trace_debug_print(void)
621 {
622         struct page_collection pc;
623         struct cfs_trace_page *tage;
624         struct cfs_trace_page *tmp;
625
626         pc.pc_want_daemon_pages = 1;
627         collect_pages(&pc);
628         list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
629                 char *p, *file, *fn;
630                 struct page *page;
631
632                 __LASSERT_TAGE_INVARIANT(tage);
633
634                 page = tage->page;
635                 p = page_address(page);
636                 while (p < ((char *)page_address(page) + tage->used)) {
637                         struct ptldebug_header *hdr;
638                         int len;
639                         hdr = (void *)p;
640                         p += sizeof(*hdr);
641                         file = p;
642                         p += strlen(file) + 1;
643                         fn = p;
644                         p += strlen(fn) + 1;
645                         len = hdr->ph_len - (int)(p - (char *)hdr);
646
647                         cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
648
649                         p += len;
650                 }
651
652                 list_del(&tage->linkage);
653                 cfs_tage_free(tage);
654         }
655 }
656
657 int cfs_tracefile_dump_all_pages(char *filename)
658 {
659         struct page_collection  pc;
660         struct file             *filp;
661         struct cfs_trace_page   *tage;
662         struct cfs_trace_page   *tmp;
663         mm_segment_t            __oldfs;
664         char                    *buf;
665         int rc;
666
667         cfs_tracefile_write_lock();
668
669         filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
670         if (IS_ERR(filp)) {
671                 rc = PTR_ERR(filp);
672                 filp = NULL;
673                 printk(KERN_ERR "LustreError: can't open %s for dump: rc %d\n",
674                       filename, rc);
675                 goto out;
676         }
677
678         pc.pc_want_daemon_pages = 1;
679         collect_pages(&pc);
680         if (list_empty(&pc.pc_pages)) {
681                 rc = 0;
682                 goto close;
683         }
684         __oldfs = get_fs();
685         set_fs(get_ds());
686
687         /* ok, for now, just write the pages.  in the future we'll be building
688          * iobufs with the pages and calling generic_direct_IO */
689         list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
690
691                 __LASSERT_TAGE_INVARIANT(tage);
692
693                 buf = kmap(tage->page);
694                 rc = vfs_write(filp, (__force const char __user *)buf,
695                                tage->used, &filp->f_pos);
696                 kunmap(tage->page);
697                 if (rc != (int)tage->used) {
698                         printk(KERN_WARNING "wanted to write %u but wrote "
699                                "%d\n", tage->used, rc);
700                         put_pages_back(&pc);
701                         __LASSERT(list_empty(&pc.pc_pages));
702                         break;
703                 }
704                 list_del(&tage->linkage);
705                 cfs_tage_free(tage);
706         }
707         set_fs(__oldfs);
708         rc = ll_vfs_fsync_range(filp, 0, LLONG_MAX, 1);
709         if (rc)
710                 printk(KERN_ERR "sync returns %d\n", rc);
711 close:
712         filp_close(filp, NULL);
713 out:
714         cfs_tracefile_write_unlock();
715         return rc;
716 }
717
718 void cfs_trace_flush_pages(void)
719 {
720         struct page_collection pc;
721         struct cfs_trace_page *tage;
722         struct cfs_trace_page *tmp;
723
724         pc.pc_want_daemon_pages = 1;
725         collect_pages(&pc);
726         list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
727
728                 __LASSERT_TAGE_INVARIANT(tage);
729
730                 list_del(&tage->linkage);
731                 cfs_tage_free(tage);
732         }
733 }
734
735 int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
736                             const char __user *usr_buffer, int usr_buffer_nob)
737 {
738         int    nob;
739
740         if (usr_buffer_nob > knl_buffer_nob)
741                 return -EOVERFLOW;
742
743         if (copy_from_user(knl_buffer, usr_buffer, usr_buffer_nob))
744                 return -EFAULT;
745
746         nob = strnlen(knl_buffer, usr_buffer_nob);
747         while (nob-- >= 0)                      /* strip trailing whitespace */
748                 if (!isspace(knl_buffer[nob]))
749                         break;
750
751         if (nob < 0)                            /* empty string */
752                 return -EINVAL;
753
754         if (nob == knl_buffer_nob)              /* no space to terminate */
755                 return -EOVERFLOW;
756
757         knl_buffer[nob + 1] = 0;                /* terminate */
758         return 0;
759 }
760 EXPORT_SYMBOL(cfs_trace_copyin_string);
761
762 int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
763                              const char *knl_buffer, char *append)
764 {
765         /* NB if 'append' != NULL, it's a single character to append to the
766          * copied out string - usually "\n", for /proc entries and "" (i.e. a
767          * terminating zero byte) for sysctl entries */
768         int   nob = strlen(knl_buffer);
769
770         if (nob > usr_buffer_nob)
771                 nob = usr_buffer_nob;
772
773         if (copy_to_user(usr_buffer, knl_buffer, nob))
774                 return -EFAULT;
775
776         if (append != NULL && nob < usr_buffer_nob) {
777                 if (copy_to_user(usr_buffer + nob, append, 1))
778                         return -EFAULT;
779
780                 nob++;
781         }
782
783         return nob;
784 }
785 EXPORT_SYMBOL(cfs_trace_copyout_string);
786
787 int cfs_trace_allocate_string_buffer(char **str, int nob)
788 {
789         if (nob > 2 * PAGE_SIZE)        /* string must be "sensible" */
790                 return -EINVAL;
791
792         *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
793         if (*str == NULL)
794                 return -ENOMEM;
795
796         return 0;
797 }
798
799 int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
800 {
801         char         *str;
802         int           rc;
803
804         rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
805         if (rc != 0)
806                 return rc;
807
808         rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
809                                      usr_str, usr_str_nob);
810         if (rc != 0)
811                 goto out;
812
813         if (str[0] != '/') {
814                 rc = -EINVAL;
815                 goto out;
816         }
817         rc = cfs_tracefile_dump_all_pages(str);
818 out:
819         kfree(str);
820         return rc;
821 }
822
823 int cfs_trace_daemon_command(char *str)
824 {
825         int       rc = 0;
826
827         cfs_tracefile_write_lock();
828
829         if (strcmp(str, "stop") == 0) {
830                 cfs_tracefile_write_unlock();
831                 cfs_trace_stop_thread();
832                 cfs_tracefile_write_lock();
833                 memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
834
835         } else if (strncmp(str, "size=", 5) == 0) {
836                 cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0);
837                 if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480)
838                         cfs_tracefile_size = CFS_TRACEFILE_SIZE;
839                 else
840                         cfs_tracefile_size <<= 20;
841
842         } else if (strlen(str) >= sizeof(cfs_tracefile)) {
843                 rc = -ENAMETOOLONG;
844         } else if (str[0] != '/') {
845                 rc = -EINVAL;
846         } else {
847                 strcpy(cfs_tracefile, str);
848
849                 printk(KERN_INFO
850                        "Lustre: debug daemon will attempt to start writing "
851                        "to %s (%lukB max)\n", cfs_tracefile,
852                        (long)(cfs_tracefile_size >> 10));
853
854                 cfs_trace_start_thread();
855         }
856
857         cfs_tracefile_write_unlock();
858         return rc;
859 }
860
861 int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
862 {
863         char *str;
864         int   rc;
865
866         rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
867         if (rc != 0)
868                 return rc;
869
870         rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
871                                  usr_str, usr_str_nob);
872         if (rc == 0)
873                 rc = cfs_trace_daemon_command(str);
874
875         kfree(str);
876         return rc;
877 }
878
879 int cfs_trace_set_debug_mb(int mb)
880 {
881         int i;
882         int j;
883         int pages;
884         int limit = cfs_trace_max_debug_mb();
885         struct cfs_trace_cpu_data *tcd;
886
887         if (mb < num_possible_cpus()) {
888                 printk(KERN_WARNING
889                        "Lustre: %d MB is too small for debug buffer size, "
890                        "setting it to %d MB.\n", mb, num_possible_cpus());
891                 mb = num_possible_cpus();
892         }
893
894         if (mb > limit) {
895                 printk(KERN_WARNING
896                        "Lustre: %d MB is too large for debug buffer size, "
897                        "setting it to %d MB.\n", mb, limit);
898                 mb = limit;
899         }
900
901         mb /= num_possible_cpus();
902         pages = mb << (20 - PAGE_SHIFT);
903
904         cfs_tracefile_write_lock();
905
906         cfs_tcd_for_each(tcd, i, j)
907                 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
908
909         cfs_tracefile_write_unlock();
910
911         return 0;
912 }
913
914 int cfs_trace_get_debug_mb(void)
915 {
916         int i;
917         int j;
918         struct cfs_trace_cpu_data *tcd;
919         int total_pages = 0;
920
921         cfs_tracefile_read_lock();
922
923         cfs_tcd_for_each(tcd, i, j)
924                 total_pages += tcd->tcd_max_pages;
925
926         cfs_tracefile_read_unlock();
927
928         return (total_pages >> (20 - PAGE_SHIFT)) + 1;
929 }
930
931 static int tracefiled(void *arg)
932 {
933         struct page_collection pc;
934         struct tracefiled_ctl *tctl = arg;
935         struct cfs_trace_page *tage;
936         struct cfs_trace_page *tmp;
937         mm_segment_t __oldfs;
938         struct file *filp;
939         char *buf;
940         int last_loop = 0;
941         int rc;
942
943         /* we're started late enough that we pick up init's fs context */
944         /* this is so broken in uml?  what on earth is going on? */
945
946         complete(&tctl->tctl_start);
947
948         while (1) {
949                 wait_queue_t __wait;
950
951                 pc.pc_want_daemon_pages = 0;
952                 collect_pages(&pc);
953                 if (list_empty(&pc.pc_pages))
954                         goto end_loop;
955
956                 filp = NULL;
957                 cfs_tracefile_read_lock();
958                 if (cfs_tracefile[0] != 0) {
959                         filp = filp_open(cfs_tracefile,
960                                          O_CREAT | O_RDWR | O_LARGEFILE,
961                                          0600);
962                         if (IS_ERR(filp)) {
963                                 rc = PTR_ERR(filp);
964                                 filp = NULL;
965                                 printk(KERN_WARNING "couldn't open %s: "
966                                        "%d\n", cfs_tracefile, rc);
967                         }
968                 }
969                 cfs_tracefile_read_unlock();
970                 if (filp == NULL) {
971                         put_pages_on_daemon_list(&pc);
972                         __LASSERT(list_empty(&pc.pc_pages));
973                         goto end_loop;
974                 }
975                 __oldfs = get_fs();
976                 set_fs(get_ds());
977
978                 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
979                         struct dentry *de = file_dentry(filp);
980                         static loff_t f_pos;
981
982                         __LASSERT_TAGE_INVARIANT(tage);
983
984                         if (f_pos >= (off_t)cfs_tracefile_size)
985                                 f_pos = 0;
986                         else if (f_pos > i_size_read(de->d_inode))
987                                 f_pos = i_size_read(de->d_inode);
988
989                         buf = kmap(tage->page);
990                         rc = vfs_write(filp, (__force const char __user *)buf,
991                                        tage->used, &f_pos);
992                         kunmap(tage->page);
993                         if (rc != (int)tage->used) {
994                                 printk(KERN_WARNING "wanted to write %u "
995                                        "but wrote %d\n", tage->used, rc);
996                                 put_pages_back(&pc);
997                                 __LASSERT(list_empty(&pc.pc_pages));
998                                 break;
999                         }
1000                 }
1001                 set_fs(__oldfs);
1002
1003                 filp_close(filp, NULL);
1004                 put_pages_on_daemon_list(&pc);
1005                 if (!list_empty(&pc.pc_pages)) {
1006                         int i;
1007
1008                         printk(KERN_ALERT "Lustre: trace pages aren't "
1009                                " empty\n");
1010                         printk(KERN_ERR "total cpus(%d): ",
1011                                num_possible_cpus());
1012                         for (i = 0; i < num_possible_cpus(); i++)
1013                                 if (cpu_online(i))
1014                                         printk(KERN_ERR "%d(on) ", i);
1015                                 else
1016                                         printk(KERN_ERR "%d(off) ", i);
1017                         printk(KERN_ERR "\n");
1018
1019                         i = 0;
1020                         list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1021                                                      linkage)
1022                                 printk(KERN_ERR "page %d belongs to cpu "
1023                                        "%d\n", ++i, tage->cpu);
1024                         printk(KERN_ERR "There are %d pages unwritten\n",
1025                                i);
1026                 }
1027                 __LASSERT(list_empty(&pc.pc_pages));
1028 end_loop:
1029                 if (atomic_read(&tctl->tctl_shutdown)) {
1030                         if (last_loop == 0) {
1031                                 last_loop = 1;
1032                                 continue;
1033                         } else {
1034                                 break;
1035                         }
1036                 }
1037                 init_waitqueue_entry(&__wait, current);
1038                 add_wait_queue(&tctl->tctl_waitq, &__wait);
1039                 set_current_state(TASK_INTERRUPTIBLE);
1040                 schedule_timeout(cfs_time_seconds(1));
1041                 remove_wait_queue(&tctl->tctl_waitq, &__wait);
1042         }
1043         complete(&tctl->tctl_stop);
1044         return 0;
1045 }
1046
1047 int cfs_trace_start_thread(void)
1048 {
1049         struct tracefiled_ctl *tctl = &trace_tctl;
1050         int rc = 0;
1051
1052         mutex_lock(&cfs_trace_thread_mutex);
1053         if (thread_running)
1054                 goto out;
1055
1056         init_completion(&tctl->tctl_start);
1057         init_completion(&tctl->tctl_stop);
1058         init_waitqueue_head(&tctl->tctl_waitq);
1059         atomic_set(&tctl->tctl_shutdown, 0);
1060
1061         if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
1062                 rc = -ECHILD;
1063                 goto out;
1064         }
1065
1066         wait_for_completion(&tctl->tctl_start);
1067         thread_running = 1;
1068 out:
1069         mutex_unlock(&cfs_trace_thread_mutex);
1070         return rc;
1071 }
1072
1073 void cfs_trace_stop_thread(void)
1074 {
1075         struct tracefiled_ctl *tctl = &trace_tctl;
1076
1077         mutex_lock(&cfs_trace_thread_mutex);
1078         if (thread_running) {
1079                 printk(KERN_INFO
1080                        "Lustre: shutting down debug daemon thread...\n");
1081                 atomic_set(&tctl->tctl_shutdown, 1);
1082                 wait_for_completion(&tctl->tctl_stop);
1083                 thread_running = 0;
1084         }
1085         mutex_unlock(&cfs_trace_thread_mutex);
1086 }
1087
1088 int cfs_tracefile_init(int max_pages)
1089 {
1090         struct cfs_trace_cpu_data *tcd;
1091         int     i;
1092         int     j;
1093         int     rc;
1094         int     factor;
1095
1096         rc = cfs_tracefile_init_arch();
1097         if (rc != 0)
1098                 return rc;
1099
1100         cfs_tcd_for_each(tcd, i, j) {
1101                 /* tcd_pages_factor is initialized int tracefile_init_arch. */
1102                 factor = tcd->tcd_pages_factor;
1103                 INIT_LIST_HEAD(&tcd->tcd_pages);
1104                 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1105                 INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
1106                 tcd->tcd_cur_pages = 0;
1107                 tcd->tcd_cur_stock_pages = 0;
1108                 tcd->tcd_cur_daemon_pages = 0;
1109                 tcd->tcd_max_pages = (max_pages * factor) / 100;
1110                 LASSERT(tcd->tcd_max_pages > 0);
1111                 tcd->tcd_shutting_down = 0;
1112         }
1113         return 0;
1114 }
1115
1116 static void trace_cleanup_on_all_cpus(void)
1117 {
1118         struct cfs_trace_cpu_data *tcd;
1119         struct cfs_trace_page *tage;
1120         struct cfs_trace_page *tmp;
1121         int i, cpu;
1122
1123         for_each_possible_cpu(cpu) {
1124                 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
1125                         tcd->tcd_shutting_down = 1;
1126
1127                         list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
1128                                 __LASSERT_TAGE_INVARIANT(tage);
1129
1130                                 list_del(&tage->linkage);
1131                                 cfs_tage_free(tage);
1132                         }
1133                         tcd->tcd_cur_pages = 0;
1134                 }
1135         }
1136 }
1137
1138 static void cfs_trace_cleanup(void)
1139 {
1140         struct page_collection pc;
1141
1142         INIT_LIST_HEAD(&pc.pc_pages);
1143
1144         trace_cleanup_on_all_cpus();
1145
1146         cfs_tracefile_fini_arch();
1147 }
1148
1149 void cfs_tracefile_exit(void)
1150 {
1151         cfs_trace_stop_thread();
1152         cfs_trace_cleanup();
1153 }