+ struct task_struct *dumper;
+
+ ENTRY;
+
+ if (mutex_trylock(&libcfs_debug_dumplog_lock) == 0)
+ return;
+
+ /* If a previous call was interrupted, debug_complete->done
+ * might be elevated, and so we won't actually wait here.
+ * So we reinit the completion to ensure we wait for
+ * one thread to complete, though it might not be the one
+ * we start if there are overlaping thread.
+ */
+ reinit_completion(&debug_complete);
+ dumper = kthread_run(libcfs_debug_dumplog_thread,
+ (void *)(long)current->pid,
+ "libcfs_debug_dumper");
+ if (IS_ERR(dumper))
+ pr_err("LustreError: cannot start log dump thread: rc = %ld\n",
+ PTR_ERR(dumper));
+ else
+ wait_for_completion_interruptible(&debug_complete);
+
+ mutex_unlock(&libcfs_debug_dumplog_lock);
+}
+EXPORT_SYMBOL(libcfs_debug_dumplog);
+
+/* coverity[+kill] */
+void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
+{
+ libcfs_catastrophe = 1;
+ libcfs_debug_msg(msgdata, "LBUG\n");
+
+ if (in_interrupt()) {
+ panic("LBUG in interrupt.\n");
+ /* not reached */
+ }
+
+ libcfs_debug_dumpstack(NULL);
+ if (libcfs_panic_on_lbug)
+ panic("LBUG");
+ else
+ libcfs_debug_dumplog();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ while (1)
+ schedule();
+}
+EXPORT_SYMBOL(lbug_with_loc);
+
+#ifdef CONFIG_STACKTRACE
+
+#ifndef HAVE_SAVE_STACK_TRACE_TSK
+#define save_stack_trace_tsk(tsk, trace) \
+do { \
+ if (tsk == current) \
+ save_stack_trace(trace); \
+ else \
+ pr_info("No stack, save_stack_trace_tsk() not exported\n"); \
+} while (0)
+#endif
+
+static void cfs_print_stack_trace(unsigned long *entries, unsigned int nr)
+{
+ unsigned int i;
+
+ /* Prefer %pB for backtraced symbolic names since it was added in:
+ * Linux v2.6.38-6557-g0f77a8d37825
+ * vsprintf: Introduce %pB format specifier
+ */
+ for (i = 0; i < nr; i++)
+ pr_info("[<0>] %pB\n", (void *)entries[i]);
+}
+
+#define MAX_ST_ENTRIES 100
+static DEFINE_SPINLOCK(st_lock);
+
+/* Linux v5.1-rc5 214d8ca6ee ("stacktrace: Provide common infrastructure")
+ * CONFIG_ARCH_STACKWALK indicates that save_stack_trace_tsk symbol is not
+ * exported. Use symbol_get() to find if save_stack_trace_tsk is available.
+ */
+#ifdef CONFIG_ARCH_STACKWALK
+typedef unsigned int (stack_trace_save_tsk_t)(struct task_struct *task,
+ unsigned long *store,
+ unsigned int size,
+ unsigned int skipnr);
+static stack_trace_save_tsk_t *task_dump_stack;
+#endif
+
+void __init cfs_debug_init(void)
+{
+#ifdef CONFIG_ARCH_STACKWALK
+ task_dump_stack = (void *)
+ cfs_kallsyms_lookup_name("stack_trace_save_tsk");
+
+#endif
+}
+
+static void libcfs_call_trace(struct task_struct *tsk)
+{
+ static unsigned long entries[MAX_ST_ENTRIES];
+#ifdef CONFIG_ARCH_STACKWALK
+ unsigned int nr_entries;
+
+ spin_lock(&st_lock);
+ pr_info("Pid: %d, comm: %.20s %s %s\n", tsk->pid, tsk->comm,
+ init_utsname()->release, init_utsname()->version);
+ pr_info("Call Trace TBD:\n");
+ if (task_dump_stack) {
+ nr_entries = task_dump_stack(tsk, entries, MAX_ST_ENTRIES, 0);
+ cfs_print_stack_trace(entries, nr_entries);
+ }
+ spin_unlock(&st_lock);
+#else
+ struct stack_trace trace;
+
+ trace.nr_entries = 0;
+ trace.max_entries = MAX_ST_ENTRIES;
+ trace.entries = entries;
+ trace.skip = 0;
+
+ spin_lock(&st_lock);
+ pr_info("Pid: %d, comm: %.20s %s %s\n", tsk->pid, tsk->comm,
+ init_utsname()->release, init_utsname()->version);
+ pr_info("Call Trace:\n");
+ save_stack_trace_tsk(tsk, &trace);
+ cfs_print_stack_trace(trace.entries, trace.nr_entries);
+ spin_unlock(&st_lock);
+#endif
+}
+
+#else /* !CONFIG_STACKTRACE */
+
+#ifdef CONFIG_X86
+#include <linux/nmi.h>
+#include <asm/stacktrace.h>
+
+#ifdef HAVE_STACKTRACE_OPS
+static int print_trace_stack(void *data, char *name)
+{
+ printk(" <%s> ", name);
+ return 0;
+}
+
+#ifdef STACKTRACE_OPS_ADDRESS_RETURN_INT
+static int
+#else
+static void
+#endif
+print_trace_address(void *data, unsigned long addr, int reliable)
+{
+ char fmt[32];
+
+ touch_nmi_watchdog();
+ sprintf(fmt, " [<%016lx>] %s%%s\n", addr, reliable ? "" : "? ");
+ __print_symbol(fmt, addr);
+#ifdef STACKTRACE_OPS_ADDRESS_RETURN_INT
+ return 0;
+#endif
+}
+
+static const struct stacktrace_ops print_trace_ops = {
+ .stack = print_trace_stack,
+ .address = print_trace_address,
+ .walk_stack = print_context_stack,
+};
+#endif /* HAVE_STACKTRACE_OPS */
+
+static void libcfs_call_trace(struct task_struct *tsk)
+{
+#ifdef HAVE_STACKTRACE_OPS
+ printk("Pid: %d, comm: %.20s\n", tsk->pid, tsk->comm);
+ printk("\nCall Trace:\n");
+ dump_trace(tsk, NULL, NULL, 0, &print_trace_ops, NULL);
+ printk("\n");
+#else /* !HAVE_STACKTRACE_OPS */
+ if (tsk == current)
+ dump_stack();
+ else
+ CWARN("can't show stack: kernel doesn't export show_task\n");
+#endif /* HAVE_STACKTRACE_OPS */
+}
+
+#else /* !CONFIG_X86 */
+
+static void libcfs_call_trace(struct task_struct *tsk)
+{
+ if (tsk == current)
+ dump_stack();
+ else
+ CWARN("can't show stack: kernel doesn't export show_task\n");
+}
+
+#endif /* CONFIG_X86 */
+
+#endif /* CONFIG_STACKTRACE */
+
+void libcfs_debug_dumpstack(struct task_struct *tsk)
+{
+ libcfs_call_trace(tsk ?: current);
+}
+EXPORT_SYMBOL(libcfs_debug_dumpstack);
+
+static int panic_notifier(struct notifier_block *self, unsigned long unused1,
+ void *unused2)
+{
+ if (libcfs_panic_in_progress)
+ return 0;
+
+ libcfs_panic_in_progress = 1;
+ mb();
+
+#ifdef LNET_DUMP_ON_PANIC
+ /* This is currently disabled because it spews far too much to the
+ * console on the rare cases it is ever triggered. */
+
+ if (in_interrupt()) {
+ cfs_trace_debug_print();
+ } else {
+ libcfs_debug_dumplog_internal((void *)(long)current->pid);
+ }
+#endif
+ return 0;
+}
+
+static struct notifier_block libcfs_panic_notifier = {
+ .notifier_call = panic_notifier,
+ .next = NULL,
+ .priority = 10000,
+};
+
+static void libcfs_register_panic_notifier(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &libcfs_panic_notifier);
+}
+
+static void libcfs_unregister_panic_notifier(void)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &libcfs_panic_notifier);