1 Index: linux-2.6.10/arch/i386/Kconfig.debug
2 ===================================================================
3 --- linux-2.6.10.orig/arch/i386/Kconfig.debug 2005-04-05 16:29:30.191000944 +0800
4 +++ linux-2.6.10/arch/i386/Kconfig.debug 2005-04-05 16:47:53.904211032 +0800
7 source "lib/Kconfig.debug"
10 + tristate "Crash dump support (EXPERIMENTAL)"
11 + depends on EXPERIMENTAL
14 + Say Y here to enable saving an image of system memory when a panic
15 + or other error occurs. Dumps can also be forced with the SysRq+d
16 + key if MAGIC_SYSRQ is enabled.
20 + depends on CRASH_DUMP
23 +config CRASH_DUMP_BLOCKDEV
24 + tristate "Crash dump block device driver"
25 + depends on CRASH_DUMP
27 + Say Y to allow saving crash dumps directly to a disk device.
29 +config CRASH_DUMP_NETDEV
30 + tristate "Crash dump network device driver"
31 + depends on CRASH_DUMP
33 + Say Y to allow saving crash dumps over a network device.
35 +config CRASH_DUMP_MEMDEV
36 + bool "Crash dump staged memory driver"
37 + depends on CRASH_DUMP
39 + Say Y to allow intermediate saving crash dumps in spare
40 + memory pages which would then be written out to disk
43 +config CRASH_DUMP_SOFTBOOT
44 + bool "Save crash dump across a soft reboot"
45 + depends on CRASH_DUMP_MEMDEV
47 + Say Y to allow a crash dump to be preserved in memory
48 + pages across a soft reboot and written out to disk
49 + thereafter. For this to work, CRASH_DUMP must be
50 + configured as part of the kernel (not as a module).
52 +config CRASH_DUMP_COMPRESS_RLE
53 + tristate "Crash dump RLE compression"
54 + depends on CRASH_DUMP
56 + Say Y to allow saving dumps with Run Length Encoding compression.
58 +config CRASH_DUMP_COMPRESS_GZIP
59 + tristate "Crash dump GZIP compression"
62 + depends on CRASH_DUMP
64 + Say Y to allow saving dumps with Gnu Zip compression.
67 bool "Early printk" if EMBEDDED
70 with klogd/syslogd or the X server. You should normally N here,
71 unless you want to debug such a crash.
73 -config DEBUG_STACKOVERFLOW
74 - bool "Check for stack overflows"
75 +config DEBUG_STACKOVERFLOW
76 + bool "Check for stack overflows"
77 depends on DEBUG_KERNEL
80 Index: linux-2.6.10/arch/i386/mm/init.c
81 ===================================================================
82 --- linux-2.6.10.orig/arch/i386/mm/init.c 2005-04-05 16:47:05.157621640 +0800
83 +++ linux-2.6.10/arch/i386/mm/init.c 2005-04-05 16:47:53.909210272 +0800
88 +/* To enable modules to check if a page is in RAM */
89 +int pfn_is_ram(unsigned long pfn)
91 + return (page_is_ram(pfn));
98 Index: linux-2.6.10/arch/i386/kernel/traps.c
99 ===================================================================
100 --- linux-2.6.10.orig/arch/i386/kernel/traps.c 2005-04-05 16:47:05.156621792 +0800
101 +++ linux-2.6.10/arch/i386/kernel/traps.c 2005-04-05 16:47:53.906210728 +0800
103 #include <linux/ptrace.h>
104 #include <linux/utsname.h>
105 #include <linux/kprobes.h>
106 +#include <linux/dump.h>
109 #include <linux/ioport.h>
113 spin_unlock_irq(&die.lock);
114 + dump((char *)str, regs);
116 panic("Fatal exception in interrupt");
119 printk(" on CPU%d, eip %08lx, registers:\n",
120 smp_processor_id(), regs->eip);
121 show_registers(regs);
122 + dump((char *)msg, regs);
123 printk("console shuts up ...\n");
125 spin_unlock(&nmi_print_lock);
126 Index: linux-2.6.10/arch/i386/kernel/setup.c
127 ===================================================================
128 --- linux-2.6.10.orig/arch/i386/kernel/setup.c 2004-12-25 05:34:45.000000000 +0800
129 +++ linux-2.6.10/arch/i386/kernel/setup.c 2005-04-05 16:47:53.905210880 +0800
132 #define LOWMEMSIZE() (0x9f000)
134 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
135 +unsigned long crashdump_addr = 0xdeadbeef;
138 static void __init parse_cmdline_early (char ** cmdline_p)
140 char c = ' ', *to = command_line, *from = saved_command_line;
142 if (c == ' ' && !memcmp(from, "vmalloc=", 8))
143 __VMALLOC_RESERVE = memparse(from+8, &from);
145 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
146 + if (c == ' ' && !memcmp(from, "crashdump=", 10))
147 + crashdump_addr = memparse(from+10, &from);
153 @@ -1288,6 +1297,10 @@
155 static char * __init machine_specific_memory_setup(void);
157 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
158 +extern void crashdump_reserve(void);
162 * Determine if we were loaded by an EFI loader. If so, then we have also been
163 * passed the efi memmap, systab, etc., so we should use these data structures
164 @@ -1393,6 +1406,10 @@
168 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
169 + crashdump_reserve(); /* Preserve crash dump state from prev boot */
174 #ifdef CONFIG_X86_GENERICARCH
175 Index: linux-2.6.10/arch/i386/kernel/smp.c
176 ===================================================================
177 --- linux-2.6.10.orig/arch/i386/kernel/smp.c 2005-04-05 16:47:05.154622096 +0800
178 +++ linux-2.6.10/arch/i386/kernel/smp.c 2005-04-05 16:47:53.908210424 +0800
180 #include <linux/mc146818rtc.h>
181 #include <linux/cache.h>
182 #include <linux/interrupt.h>
183 +#include <linux/dump.h>
185 #include <asm/mtrr.h>
186 #include <asm/tlbflush.h>
189 cfg = __prepare_ICR(shortcut, vector);
191 + if (vector == DUMP_VECTOR) {
193 + * Setup DUMP IPI to be delivered as an NMI
195 + cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
199 * Send the IPI. The write to APIC_ICR fires this off.
204 cfg = __prepare_ICR(0, vector);
206 + if (vector == DUMP_VECTOR) {
208 + * Setup DUMP IPI to be delivered as an NMI
210 + cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
214 * Send the IPI. The write to APIC_ICR fires this off.
217 static struct call_data_struct * call_data;
219 +void dump_send_ipi(void)
221 + send_IPI_allbutself(DUMP_VECTOR);
225 * this function sends a 'generic call function' IPI to all other CPUs
231 -static void stop_this_cpu (void * dummy)
232 +void stop_this_cpu (void * dummy)
237 atomic_inc(&call_data->finished);
241 Index: linux-2.6.10/arch/i386/kernel/i386_ksyms.c
242 ===================================================================
243 --- linux-2.6.10.orig/arch/i386/kernel/i386_ksyms.c 2004-12-25 05:35:40.000000000 +0800
244 +++ linux-2.6.10/arch/i386/kernel/i386_ksyms.c 2005-04-05 16:47:53.907210576 +0800
246 #include <linux/tty.h>
247 #include <linux/highmem.h>
248 #include <linux/time.h>
249 +#include <linux/nmi.h>
251 #include <asm/semaphore.h>
252 #include <asm/processor.h>
254 #include <asm/tlbflush.h>
257 +#include <asm/e820.h>
258 #include <asm/kdebug.h>
260 extern void dump_thread(struct pt_regs *, struct user *);
264 EXPORT_SYMBOL(csum_partial);
266 +#ifdef CONFIG_CRASH_DUMP_MODULE
268 +extern irq_desc_t irq_desc[NR_IRQS];
269 +extern cpumask_t irq_affinity[NR_IRQS];
270 +extern void stop_this_cpu(void *);
271 +EXPORT_SYMBOL(irq_desc);
272 +EXPORT_SYMBOL(irq_affinity);
273 +EXPORT_SYMBOL(stop_this_cpu);
274 +EXPORT_SYMBOL(dump_send_ipi);
276 +extern int pfn_is_ram(unsigned long);
277 +EXPORT_SYMBOL(pfn_is_ram);
278 +#ifdef ARCH_HAS_NMI_WATCHDOG
279 +EXPORT_SYMBOL(touch_nmi_watchdog);
282 Index: linux-2.6.10/arch/s390/Kconfig.debug
283 ===================================================================
284 --- linux-2.6.10.orig/arch/s390/Kconfig.debug 2004-12-25 05:34:31.000000000 +0800
285 +++ linux-2.6.10/arch/s390/Kconfig.debug 2005-04-05 16:47:53.921208448 +0800
288 source "lib/Kconfig.debug"
291 + bool "Kerntypes debugging information"
294 + Say Y here to save additional kernel debugging information in the
295 + file init/kerntypes.o. This information is used by crash analysis
296 + tools such as lcrash to assign structures to kernel addresses.
300 Index: linux-2.6.10/arch/s390/boot/Makefile
301 ===================================================================
302 --- linux-2.6.10.orig/arch/s390/boot/Makefile 2004-12-25 05:35:49.000000000 +0800
303 +++ linux-2.6.10/arch/s390/boot/Makefile 2005-04-05 16:47:53.922208296 +0800
306 install: $(CONFIGURE) $(obj)/image
307 sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \
308 - System.map Kerntypes "$(INSTALL_PATH)"
309 + System.map init/Kerntypes "$(INSTALL_PATH)"
310 Index: linux-2.6.10/arch/s390/boot/install.sh
311 ===================================================================
312 --- linux-2.6.10.orig/arch/s390/boot/install.sh 2004-12-25 05:35:01.000000000 +0800
313 +++ linux-2.6.10/arch/s390/boot/install.sh 2005-04-05 16:47:53.921208448 +0800
315 # $1 - kernel version
316 # $2 - kernel image file
317 # $3 - kernel map file
318 -# $4 - default install path (blank if root directory)
319 +# $4 - kernel type file
320 +# $5 - default install path (blank if root directory)
323 # User may have a custom install script
326 # Default install - same as make zlilo
328 -if [ -f $4/vmlinuz ]; then
329 - mv $4/vmlinuz $4/vmlinuz.old
330 +if [ -f $5/vmlinuz ]; then
331 + mv $5/vmlinuz $5/vmlinuz.old
334 -if [ -f $4/System.map ]; then
335 - mv $4/System.map $4/System.old
336 +if [ -f $5/System.map ]; then
337 + mv $5/System.map $5/System.old
344 Index: linux-2.6.10/arch/ia64/Kconfig.debug
345 ===================================================================
346 --- linux-2.6.10.orig/arch/ia64/Kconfig.debug 2004-12-25 05:34:32.000000000 +0800
347 +++ linux-2.6.10/arch/ia64/Kconfig.debug 2005-04-05 16:47:53.917209056 +0800
350 source "lib/Kconfig.debug"
353 + tristate "Crash dump support (EXPERIMENTAL)"
354 + depends on EXPERIMENTAL
357 + Say Y here to enable saving an image of system memory when a panic
358 + or other error occurs. Dumps can also be forced with the SysRq+d
359 + key if MAGIC_SYSRQ is enabled.
363 + depends on CRASH_DUMP
366 +config CRASH_DUMP_BLOCKDEV
367 + tristate "Crash dump block device driver"
368 + depends on CRASH_DUMP
370 + Say Y to allow saving crash dumps directly to a disk device.
372 +config CRASH_DUMP_NETDEV
373 + tristate "Crash dump network device driver"
374 + depends on CRASH_DUMP
376 + Say Y to allow saving crash dumps over a network device.
378 +config CRASH_DUMP_MEMDEV
379 + bool "Crash dump staged memory driver"
380 + depends on CRASH_DUMP
382 + Say Y to allow intermediate saving crash dumps in spare
383 + memory pages which would then be written out to disk
386 +config CRASH_DUMP_SOFTBOOT
387 + bool "Save crash dump across a soft reboot"
388 + depends on CRASH_DUMP_MEMDEV
390 + Say Y to allow a crash dump to be preserved in memory
391 + pages across a soft reboot and written out to disk
392 + thereafter. For this to work, CRASH_DUMP must be
393 + configured as part of the kernel (not as a module).
395 +config CRASH_DUMP_COMPRESS_RLE
396 + tristate "Crash dump RLE compression"
397 + depends on CRASH_DUMP
399 + Say Y to allow saving dumps with Run Length Encoding compression.
401 +config CRASH_DUMP_COMPRESS_GZIP
402 + tristate "Crash dump GZIP compression"
403 + select ZLIB_INFLATE
404 + select ZLIB_DEFLATE
405 + depends on CRASH_DUMP
407 + Say Y to allow saving dumps with Gnu Zip compression.
412 prompt "Physical memory granularity"
413 default IA64_GRANULE_64MB
414 Index: linux-2.6.10/arch/ia64/kernel/traps.c
415 ===================================================================
416 --- linux-2.6.10.orig/arch/ia64/kernel/traps.c 2004-12-25 05:35:39.000000000 +0800
417 +++ linux-2.6.10/arch/ia64/kernel/traps.c 2005-04-05 16:47:53.918208904 +0800
419 #include <asm/intrinsics.h>
420 #include <asm/processor.h>
421 #include <asm/uaccess.h>
422 +#include <asm/nmi.h>
423 +#include <linux/dump.h>
425 extern spinlock_t timerlist_lock;
428 printk("%s[%d]: %s %ld [%d]\n",
429 current->comm, current->pid, str, err, ++die_counter);
431 + dump((char *)str, regs);
433 printk(KERN_ERR "Recursive die() failure, output suppressed\n");
435 Index: linux-2.6.10/arch/ia64/kernel/ia64_ksyms.c
436 ===================================================================
437 --- linux-2.6.10.orig/arch/ia64/kernel/ia64_ksyms.c 2005-04-05 16:29:27.954340968 +0800
438 +++ linux-2.6.10/arch/ia64/kernel/ia64_ksyms.c 2005-04-05 16:47:53.917209056 +0800
441 #include <linux/config.h>
442 #include <linux/module.h>
444 #include <linux/string.h>
445 EXPORT_SYMBOL(memset);
446 EXPORT_SYMBOL(memchr);
448 EXPORT_SYMBOL(strstr);
449 EXPORT_SYMBOL(strpbrk);
451 +#include <linux/syscalls.h>
452 +EXPORT_SYMBOL(sys_ioctl);
454 #include <asm/checksum.h>
455 EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
462 +#include <asm/hw_irq.h>
464 +#ifdef CONFIG_CRASH_DUMP_MODULE
466 +extern irq_desc_t _irq_desc[NR_IRQS];
467 +extern cpumask_t irq_affinity[NR_IRQS];
468 +extern void stop_this_cpu(void *);
469 +extern int (*dump_ipi_function_ptr)(struct pt_regs *);
470 +extern void dump_send_ipi(void);
471 +EXPORT_SYMBOL(_irq_desc);
472 +EXPORT_SYMBOL(irq_affinity);
473 +EXPORT_SYMBOL(stop_this_cpu);
474 +EXPORT_SYMBOL(dump_send_ipi);
475 +EXPORT_SYMBOL(dump_ipi_function_ptr);
479 Index: linux-2.6.10/arch/ia64/kernel/irq.c
480 ===================================================================
481 --- linux-2.6.10.orig/arch/ia64/kernel/irq.c 2004-12-25 05:35:27.000000000 +0800
482 +++ linux-2.6.10/arch/ia64/kernel/irq.c 2005-04-05 16:47:53.919208752 +0800
485 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
487 +#if defined(CONFIG_CRASH_DUMP) || defined (CONFIG_CRASH_DUMP_MODULE)
488 +cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
490 static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
493 static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
495 Index: linux-2.6.10/arch/ia64/kernel/smp.c
496 ===================================================================
497 --- linux-2.6.10.orig/arch/ia64/kernel/smp.c 2004-12-25 05:35:40.000000000 +0800
498 +++ linux-2.6.10/arch/ia64/kernel/smp.c 2005-04-05 16:47:53.920208600 +0800
500 #include <linux/efi.h>
501 #include <linux/bitops.h>
503 +#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
504 +#include <linux/dump.h>
507 #include <asm/atomic.h>
508 #include <asm/current.h>
509 #include <asm/delay.h>
511 #define IPI_CALL_FUNC 0
512 #define IPI_CPU_STOP 1
514 +#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
515 +#define IPI_DUMP_INTERRUPT 4
516 + int (*dump_ipi_function_ptr)(struct pt_regs *) = NULL;
519 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */
520 static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
523 spin_unlock_irq(&call_lock);
528 +/*changed static void stop_this_cpu -> void stop_this_cpu */
537 +#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
538 + case IPI_DUMP_INTERRUPT:
539 + if( dump_ipi_function_ptr != NULL ) {
540 + if (!dump_ipi_function_ptr(regs)) {
541 + printk(KERN_ERR "(*dump_ipi_function_ptr)(): rejected IPI_DUMP_INTERRUPT\n");
548 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
551 send_IPI_allbutself(IPI_CPU_STOP);
553 +EXPORT_SYMBOL(smp_send_stop);
556 setup_profiling_timer (unsigned int multiplier)
561 +#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
562 +void dump_send_ipi(void)
564 + send_IPI_allbutself(IPI_DUMP_INTERRUPT);
567 Index: linux-2.6.10/arch/ppc64/Kconfig.debug
568 ===================================================================
569 --- linux-2.6.10.orig/arch/ppc64/Kconfig.debug 2004-12-25 05:35:27.000000000 +0800
570 +++ linux-2.6.10/arch/ppc64/Kconfig.debug 2005-04-05 16:47:53.922208296 +0800
573 source "lib/Kconfig.debug"
577 + depends on CRASH_DUMP
581 + tristate "Crash dump support"
584 + Say Y here to enable saving an image of system memory when a panic
585 + or other error occurs. Dumps can also be forced with the SysRq+d
586 + key if MAGIC_SYSRQ is enabled.
588 +config CRASH_DUMP_BLOCKDEV
589 + tristate "Crash dump block device driver"
590 + depends on CRASH_DUMP
592 + Say Y to allow saving crash dumps directly to a disk device.
594 +config CRASH_DUMP_NETDEV
595 + tristate "Crash dump network device driver"
596 + depends on CRASH_DUMP
598 + Say Y to allow saving crash dumps over a network device.
600 +config CRASH_DUMP_MEMDEV
601 + bool "Crash dump staged memory driver"
602 + depends on CRASH_DUMP
604 + Say Y to allow intermediate saving crash dumps in spare
605 + memory pages which would then be written out to disk
606 + later. Need 'kexec' support for this to work.
607 + **** Not supported at present ****
609 +config CRASH_DUMP_SOFTBOOT
610 + bool "Save crash dump across a soft reboot"
612 + Say Y to allow a crash dump to be preserved in memory
613 + pages across a soft reboot and written out to disk
614 + thereafter. For this to work, CRASH_DUMP must be
615 + configured as part of the kernel (not as a module).
616 + Need 'kexec' support to use this option.
617 + **** Not supported at present ****
619 +config CRASH_DUMP_COMPRESS_RLE
620 + tristate "Crash dump RLE compression"
621 + depends on CRASH_DUMP
623 + Say Y to allow saving dumps with Run Length Encoding compression.
625 +config CRASH_DUMP_COMPRESS_GZIP
626 + tristate "Crash dump GZIP compression"
627 + select ZLIB_INFLATE
628 + select ZLIB_DEFLATE
629 + depends on CRASH_DUMP
631 + Say Y to allow saving dumps with Gnu Zip compression.
633 config DEBUG_STACKOVERFLOW
634 bool "Check for stack overflows"
635 depends on DEBUG_KERNEL
636 Index: linux-2.6.10/arch/ppc64/kernel/traps.c
637 ===================================================================
638 --- linux-2.6.10.orig/arch/ppc64/kernel/traps.c 2004-12-25 05:34:47.000000000 +0800
639 +++ linux-2.6.10/arch/ppc64/kernel/traps.c 2005-04-05 16:47:53.923208144 +0800
641 #include <linux/interrupt.h>
642 #include <linux/init.h>
643 #include <linux/module.h>
644 +#include <linux/dump.h>
646 #include <asm/pgtable.h>
647 #include <asm/uaccess.h>
652 + dump((char *)str, regs);
654 spin_unlock_irq(&die_lock);
656 Index: linux-2.6.10/arch/ppc64/kernel/ppc_ksyms.c
657 ===================================================================
658 --- linux-2.6.10.orig/arch/ppc64/kernel/ppc_ksyms.c 2004-12-25 05:34:26.000000000 +0800
659 +++ linux-2.6.10/arch/ppc64/kernel/ppc_ksyms.c 2005-04-05 16:47:53.925207840 +0800
661 EXPORT_SYMBOL(get_wchan);
662 EXPORT_SYMBOL(console_drivers);
664 +#ifdef CONFIG_CRASH_DUMP_MODULE
665 +extern int dump_page_is_ram(unsigned long);
666 +EXPORT_SYMBOL(dump_page_is_ram);
668 +EXPORT_SYMBOL(irq_affinity);
669 +extern void stop_this_cpu(void *);
670 +EXPORT_SYMBOL(stop_this_cpu);
671 +EXPORT_SYMBOL(dump_send_ipi);
675 EXPORT_SYMBOL(tb_ticks_per_usec);
677 EXPORT_SYMBOL(cur_cpu_spec);
678 Index: linux-2.6.10/arch/ppc64/kernel/lmb.c
679 ===================================================================
680 --- linux-2.6.10.orig/arch/ppc64/kernel/lmb.c 2004-12-25 05:34:58.000000000 +0800
681 +++ linux-2.6.10/arch/ppc64/kernel/lmb.c 2005-04-05 16:47:53.924207992 +0800
689 + * This is the copy of page_is_ram (mm/init.c). The difference is
690 + * it identifies all memory holes.
692 +int dump_page_is_ram(unsigned long pfn)
695 + unsigned long paddr = (pfn << PAGE_SHIFT);
697 + for (i=0; i < lmb.memory.cnt ;i++) {
698 + unsigned long base;
700 +#ifdef CONFIG_MSCHUNKS
701 + base = lmb.memory.region[i].physbase;
703 + base = lmb.memory.region[i].base;
705 + if ((paddr >= base) &&
706 + (paddr < (base + lmb.memory.region[i].size))) {
714 Index: linux-2.6.10/arch/ppc64/kernel/xics.c
715 ===================================================================
716 --- linux-2.6.10.orig/arch/ppc64/kernel/xics.c 2004-12-25 05:34:58.000000000 +0800
717 +++ linux-2.6.10/arch/ppc64/kernel/xics.c 2005-04-05 16:47:53.925207840 +0800
719 smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
722 -#ifdef CONFIG_DEBUGGER
723 +#if defined(CONFIG_DEBUGGER) || defined(CONFIG_CRASH_DUMP) \
724 + || defined(CONFIG_CRASH_DUMP_MODULE)
725 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
726 &xics_ipi_message[cpu].value)) {
728 Index: linux-2.6.10/arch/ppc64/kernel/smp.c
729 ===================================================================
730 --- linux-2.6.10.orig/arch/ppc64/kernel/smp.c 2004-12-25 05:35:23.000000000 +0800
731 +++ linux-2.6.10/arch/ppc64/kernel/smp.c 2005-04-05 16:47:53.926207688 +0800
733 #include <linux/spinlock.h>
734 #include <linux/cache.h>
735 #include <linux/err.h>
736 +#include <linux/dump.h>
737 #include <linux/sysdev.h>
738 #include <linux/cpu.h>
741 struct smp_ops_t *smp_ops;
743 static volatile unsigned int cpu_callin_map[NR_CPUS];
744 +static int (*dump_ipi_function_ptr)(struct pt_regs *) = NULL;
746 extern unsigned char stab_array[];
752 -#ifdef CONFIG_DEBUGGER
753 +#if defined(CONFIG_DEBUGGER) || defined(CONFIG_CRASH_DUMP) \
754 + || defined(CONFIG_CRASH_DUMP_MODULE)
755 case PPC_MSG_DEBUGGER_BREAK:
756 - debugger_ipi(regs);
757 + if (dump_ipi_function_ptr) {
758 + dump_ipi_function_ptr(regs);
760 +#ifdef CONFIG_DEBUGGER
762 + debugger_ipi(regs);
771 -static void stop_this_cpu(void *dummy)
772 +void dump_send_ipi(int (*dump_ipi_callback)(struct pt_regs *))
774 + dump_ipi_function_ptr = dump_ipi_callback;
775 + if (dump_ipi_callback) {
777 + smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK);
781 +void stop_this_cpu(void *dummy)
785 Index: linux-2.6.10/arch/x86_64/Kconfig.debug
786 ===================================================================
787 --- linux-2.6.10.orig/arch/x86_64/Kconfig.debug 2004-12-25 05:34:01.000000000 +0800
788 +++ linux-2.6.10/arch/x86_64/Kconfig.debug 2005-04-05 16:47:53.909210272 +0800
791 source "lib/Kconfig.debug"
794 + tristate "Crash dump support (EXPERIMENTAL)"
795 + depends on EXPERIMENTAL
798 + Say Y here to enable saving an image of system memory when a panic
799 + or other error occurs. Dumps can also be forced with the SysRq+d
800 + key if MAGIC_SYSRQ is enabled.
804 + depends on CRASH_DUMP
807 +config CRASH_DUMP_BLOCKDEV
808 + tristate "Crash dump block device driver"
809 + depends on CRASH_DUMP
811 + Say Y to allow saving crash dumps directly to a disk device.
813 +config CRASH_DUMP_NETDEV
814 + tristate "Crash dump network device driver"
815 + depends on CRASH_DUMP
817 + Say Y to allow saving crash dumps over a network device.
819 +config CRASH_DUMP_MEMDEV
820 + bool "Crash dump staged memory driver"
821 + depends on CRASH_DUMP
823 + Say Y to allow intermediate saving crash dumps in spare
824 + memory pages which would then be written out to disk
827 +config CRASH_DUMP_SOFTBOOT
828 + bool "Save crash dump across a soft reboot"
829 + depends on CRASH_DUMP_MEMDEV
831 + Say Y to allow a crash dump to be preserved in memory
832 + lkcd-kernpages across a soft reboot and written out to disk
833 + thereafter. For this to work, CRASH_DUMP must be
834 + configured as part of the kernel (not as a module).
836 +config CRASH_DUMP_COMPRESS_RLE
837 + tristate "Crash dump RLE compression"
838 + depends on CRASH_DUMP
840 + Say Y to allow saving dumps with Run Length Encoding compression.
843 +config CRASH_DUMP_COMPRESS_GZIP
844 + tristate "Crash dump GZIP compression"
845 + select ZLIB_INFLATE
846 + select ZLIB_DEFLATE
847 + depends on CRASH_DUMP
849 + Say Y to allow saving dumps with Gnu Zip compression.
853 # !SMP for now because the context switch early causes GPF in segment reloading
854 # and the GS base checking does the wrong thing then, causing a hang.
856 Index: linux-2.6.10/arch/x86_64/mm/init.c
857 ===================================================================
858 --- linux-2.6.10.orig/arch/x86_64/mm/init.c 2005-04-05 16:29:30.040023896 +0800
859 +++ linux-2.6.10/arch/x86_64/mm/init.c 2005-04-05 16:47:53.916209208 +0800
864 -static inline int page_is_ram (unsigned long pagenr)
865 +inline int page_is_ram (unsigned long pagenr)
869 Index: linux-2.6.10/arch/x86_64/kernel/traps.c
870 ===================================================================
871 --- linux-2.6.10.orig/arch/x86_64/kernel/traps.c 2004-12-25 05:33:49.000000000 +0800
872 +++ linux-2.6.10/arch/x86_64/kernel/traps.c 2005-04-05 16:47:53.915209360 +0800
874 #include <linux/spinlock.h>
875 #include <linux/interrupt.h>
876 #include <linux/module.h>
877 +#include <linux/dump.h>
878 #include <linux/moduleparam.h>
880 #include <asm/system.h>
883 notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
884 show_registers(regs);
885 + dump((char *)str, regs);
886 /* Executive summary in case the oops scrolled away */
887 printk(KERN_ALERT "RIP ");
888 printk_address(regs->rip);
889 Index: linux-2.6.10/arch/x86_64/kernel/setup.c
890 ===================================================================
891 --- linux-2.6.10.orig/arch/x86_64/kernel/setup.c 2004-12-25 05:33:50.000000000 +0800
892 +++ linux-2.6.10/arch/x86_64/kernel/setup.c 2005-04-05 16:47:53.911209968 +0800
897 +unsigned long crashdump_addr = 0xdeadbeef;
899 static __init void parse_cmdline_early (char ** cmdline_p)
901 char c = ' ', *to = command_line, *from = COMMAND_LINE;
904 if (!memcmp(from,"oops=panic", 10))
907 + if (c == ' ' && !memcmp(from, "crashdump=", 10))
908 + crashdump_addr = memparse(from+10, &from);
913 reserve_bootmem_generic(addr, PAGE_SIZE);
916 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
917 +extern void crashdump_reserve(void);
920 void __init setup_arch(char **cmdline_p)
922 unsigned long low_mem_size;
927 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
928 + crashdump_reserve(); /* Preserve crash dump state from prev boot */
932 #ifdef CONFIG_ACPI_BOOT
933 Index: linux-2.6.10/arch/x86_64/kernel/smp.c
934 ===================================================================
935 --- linux-2.6.10.orig/arch/x86_64/kernel/smp.c 2004-12-25 05:35:50.000000000 +0800
936 +++ linux-2.6.10/arch/x86_64/kernel/smp.c 2005-04-05 16:47:53.915209360 +0800
938 #include <linux/kernel_stat.h>
939 #include <linux/mc146818rtc.h>
940 #include <linux/interrupt.h>
941 +#include <linux/dump.h>
943 #include <asm/mtrr.h>
944 #include <asm/pgalloc.h>
949 + if (vector == DUMP_VECTOR) {
951 + * Setup DUMP IPI to be delivered as an NMI
953 + cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
957 * I'm not happy about this global shared spinlock in the
958 * MM hot path, but we'll see how contended it is.
960 send_IPI_allbutself(KDB_VECTOR);
964 +/* void dump_send_ipi(int (*dump_ipi_handler)(struct pt_regs *)); */
965 +void dump_send_ipi(void)
967 + send_IPI_allbutself(DUMP_VECTOR);
971 * this function sends a 'reschedule' IPI to another CPU.
972 * it goes straight through and wastes no time serializing
977 +void stop_this_cpu(void* dummy)
982 + cpu_clear(smp_processor_id(), cpu_online_map);
983 + local_irq_disable();
984 + disable_local_APIC();
989 void smp_stop_cpu(void)
992 Index: linux-2.6.10/arch/x86_64/kernel/x8664_ksyms.c
993 ===================================================================
994 --- linux-2.6.10.orig/arch/x86_64/kernel/x8664_ksyms.c 2004-12-25 05:34:01.000000000 +0800
995 +++ linux-2.6.10/arch/x86_64/kernel/x8664_ksyms.c 2005-04-05 16:47:53.914209512 +0800
997 #include <asm/unistd.h>
998 #include <asm/delay.h>
999 #include <asm/tlbflush.h>
1000 +#include <asm/e820.h>
1001 #include <asm/kdebug.h>
1003 extern spinlock_t rtc_lock;
1004 @@ -216,6 +217,20 @@
1005 extern unsigned long __supported_pte_mask;
1006 EXPORT_SYMBOL(__supported_pte_mask);
1008 +#ifdef CONFIG_CRASH_DUMP_MODULE
1010 +extern irq_desc_t irq_desc[NR_IRQS];
1011 +extern cpumask_t irq_affinity[NR_IRQS];
1012 +extern void stop_this_cpu(void *);
1013 +EXPORT_SYMBOL(irq_desc);
1014 +EXPORT_SYMBOL(irq_affinity);
1015 +EXPORT_SYMBOL(dump_send_ipi);
1016 +EXPORT_SYMBOL(stop_this_cpu);
1018 +extern int page_is_ram(unsigned long);
1019 +EXPORT_SYMBOL(page_is_ram);
1023 EXPORT_SYMBOL(flush_tlb_page);
1024 EXPORT_SYMBOL_GPL(flush_tlb_all);
1025 Index: linux-2.6.10/arch/x86_64/kernel/pci-gart.c
1026 ===================================================================
1027 --- linux-2.6.10.orig/arch/x86_64/kernel/pci-gart.c 2004-12-25 05:34:32.000000000 +0800
1028 +++ linux-2.6.10/arch/x86_64/kernel/pci-gart.c 2005-04-05 16:47:53.913209664 +0800
1030 dma_addr_t bad_dma_address;
1032 unsigned long iommu_bus_base; /* GART remapping area (physical) */
1033 -static unsigned long iommu_size; /* size of remapping area bytes */
1034 +unsigned long iommu_size; /* size of remapping area bytes */
1035 static unsigned long iommu_pages; /* .. and in pages */
1037 u32 *iommu_gatt_base; /* Remapping table */
1038 Index: linux-2.6.10/init/version.c
1039 ===================================================================
1040 --- linux-2.6.10.orig/init/version.c 2004-12-25 05:34:45.000000000 +0800
1041 +++ linux-2.6.10/init/version.c 2005-04-05 16:47:53.896212248 +0800
1043 #include <linux/uts.h>
1044 #include <linux/utsname.h>
1045 #include <linux/version.h>
1046 +#include <linux/stringify.h>
1048 #define version(a) Version_ ## a
1049 #define version_string(a) version(a)
1051 const char *linux_banner =
1052 "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
1053 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
1055 +const char *LINUX_COMPILE_VERSION_ID = __stringify(LINUX_COMPILE_VERSION_ID);
1056 +LINUX_COMPILE_VERSION_ID_TYPE;
1057 Index: linux-2.6.10/init/kerntypes.c
1058 ===================================================================
1059 --- linux-2.6.10.orig/init/kerntypes.c 2005-04-05 19:01:49.158500672 +0800
1060 +++ linux-2.6.10/init/kerntypes.c 2005-04-05 16:47:53.895212400 +0800
1065 + * Copyright (C) 2000 Tom Morano (tjm@sgi.com) and
1066 + * Matt D. Robinson (yakker@alacritech.com)
1068 + * Dummy module that includes headers for all kernel types of interest.
1069 + * The kernel type information is used by the lcrash utility when
1070 + * analyzing system crash dumps or the live system. Using the type
1071 + * information for the running system, rather than kernel header files,
1072 + * makes for a more flexible and robust analysis tool.
1074 + * This source code is released under version 2 of the GNU GPL.
1077 +#include <linux/compile.h>
1078 +#include <linux/module.h>
1079 +#include <linux/mm.h>
1080 +#include <linux/vmalloc.h>
1081 +#include <linux/config.h>
1082 +#include <linux/utsname.h>
1083 +#include <linux/kernel_stat.h>
1084 +#include <linux/dump.h>
1086 +#include <asm/kerntypes.h>
1088 +#ifdef LINUX_COMPILE_VERSION_ID_TYPE
1089 +/* Define version type for version validation of dump and kerntypes */
1090 +LINUX_COMPILE_VERSION_ID_TYPE;
1092 +#if defined(CONFIG_SMP) && defined(CONFIG_CRASH_DUMP)
1093 +extern struct runqueue runqueues;
1094 +struct runqueue rn;
1097 +struct new_utsname *p;
1099 +kerntypes_dummy(void)
1102 Index: linux-2.6.10/init/main.c
1103 ===================================================================
1104 --- linux-2.6.10.orig/init/main.c 2005-04-05 16:29:30.028025720 +0800
1105 +++ linux-2.6.10/init/main.c 2005-04-05 16:47:53.897212096 +0800
1106 @@ -109,6 +109,16 @@
1107 EXPORT_SYMBOL(system_state);
1110 + * The kernel_magic value represents the address of _end, which allows
1111 + * namelist tools to "match" each other respectively. That way a tool
1112 + * that looks at /dev/mem can verify that it is using the right System.map
1113 + * file -- if kernel_magic doesn't equal the namelist value of _end,
1114 + * something's wrong.
1116 +extern unsigned long _end;
1117 +unsigned long *kernel_magic = &_end;
1120 * Boot command-line arguments
1122 #define MAX_INIT_ARGS 32
1123 Index: linux-2.6.10/init/Makefile
1124 ===================================================================
1125 --- linux-2.6.10.orig/init/Makefile 2004-12-25 05:34:32.000000000 +0800
1126 +++ linux-2.6.10/init/Makefile 2005-04-05 16:47:53.897212096 +0800
1128 mounts-$(CONFIG_BLK_DEV_INITRD) += do_mounts_initrd.o
1129 mounts-$(CONFIG_BLK_DEV_MD) += do_mounts_md.o
1131 +extra-$(CONFIG_KERNTYPES) += kerntypes.o
1132 +#For IA64, compile kerntypes in dwarf-2 format.
1133 +ifeq ($(CONFIG_IA64),y)
1134 +CFLAGS_kerntypes.o := -gdwarf-2
1136 +CFLAGS_kerntypes.o := -gstabs
1139 # files to be removed upon make clean
1140 clean-files := ../include/linux/compile.h
1142 # dependencies on generated files need to be listed explicitly
1144 -$(obj)/version.o: include/linux/compile.h
1145 +$(obj)/version.o $(obj)/kerntypes.o: include/linux/compile.h
1147 # compile.h changes depending on hostname, generation number, etc,
1148 # so we regenerate it always.
1150 include/linux/compile.h: FORCE
1152 @$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CC) $(CFLAGS)"
1154 Index: linux-2.6.10/include/asm-um/kerntypes.h
1155 ===================================================================
1156 --- linux-2.6.10.orig/include/asm-um/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
1157 +++ linux-2.6.10/include/asm-um/kerntypes.h 2005-04-05 16:47:53.864217112 +0800
1160 + * asm-um/kerntypes.h
1162 + * Arch-dependent header file that includes headers for all arch-specific
1163 + * types of interest.
1164 + * The kernel type information is used by the lcrash utility when
1165 + * analyzing system crash dumps or the live system. Using the type
1166 + * information for the running system, rather than kernel header files,
1167 + * makes for a more flexible and robust analysis tool.
1169 + * This source code is released under the GNU GPL.
1172 +/* Usermode-Linux-specific header files */
1173 +#ifndef _UM_KERNTYPES_H
1174 +#define _UM_KERNTYPES_H
1176 +/* Use the default */
1177 +#include <asm-generic/kerntypes.h>
1179 +#endif /* _UM_KERNTYPES_H */
1180 Index: linux-2.6.10/include/linux/sysctl.h
1181 ===================================================================
1182 --- linux-2.6.10.orig/include/linux/sysctl.h 2005-04-05 16:29:27.969338688 +0800
1183 +++ linux-2.6.10/include/linux/sysctl.h 2005-04-05 16:47:53.894212552 +0800
1185 KERN_HZ_TIMER=65, /* int: hz timer on or off */
1186 KERN_UNKNOWN_NMI_PANIC=66, /* int: unknown nmi panic flag */
1187 KERN_SETUID_DUMPABLE=67, /* int: behaviour of dumps for setuid core */
1188 + KERN_DUMP=68, /* directory: dump parameters */
1192 Index: linux-2.6.10/include/linux/sched.h
1193 ===================================================================
1194 --- linux-2.6.10.orig/include/linux/sched.h 2005-04-05 16:47:05.178618448 +0800
1195 +++ linux-2.6.10/include/linux/sched.h 2005-04-05 16:47:53.891213008 +0800
1197 extern int nr_threads;
1198 extern int last_pid;
1199 DECLARE_PER_CPU(unsigned long, process_counts);
1200 +DECLARE_PER_CPU(struct runqueue, runqueues);
1201 extern int nr_processes(void);
1202 extern unsigned long nr_running(void);
1203 extern unsigned long nr_uninterruptible(void);
1204 @@ -760,6 +761,110 @@
1208 + * These are the runqueue data structures:
1211 +#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
1213 +typedef struct runqueue runqueue_t;
1215 +struct prio_array {
1216 + unsigned int nr_active;
1217 + unsigned long bitmap[BITMAP_SIZE];
1218 + struct list_head queue[MAX_PRIO];
1222 + * This is the main, per-CPU runqueue data structure.
1224 + * Locking rule: those places that want to lock multiple runqueues
1225 + * (such as the load balancing or the thread migration code), lock
1226 + * acquire operations must be ordered by ascending &runqueue.
1232 + * nr_running and cpu_load should be in the same cacheline because
1233 + * remote CPUs use both these fields when doing load calculation.
1235 + unsigned long nr_running;
1237 + unsigned long cpu_load;
1239 + unsigned long long nr_switches;
1242 + * This is part of a global counter where only the total sum
1243 + * over all CPUs matters. A task can increase this counter on
1244 + * one CPU and if it got migrated afterwards it may decrease
1245 + * it on another CPU. Always updated under the runqueue lock:
1247 + unsigned long nr_uninterruptible;
1249 + unsigned long expired_timestamp;
1250 + unsigned long long timestamp_last_tick;
1251 + task_t *curr, *idle;
1252 + struct mm_struct *prev_mm;
1253 + prio_array_t *active, *expired, arrays[2];
1254 + int best_expired_prio;
1255 + atomic_t nr_iowait;
1258 + struct sched_domain *sd;
1260 + /* For active balancing */
1261 + int active_balance;
1264 + task_t *migration_thread;
1265 + struct list_head migration_queue;
1268 +#ifdef CONFIG_SCHEDSTATS
1269 + /* latency stats */
1270 + struct sched_info rq_sched_info;
1272 + /* sys_sched_yield() stats */
1273 + unsigned long yld_exp_empty;
1274 + unsigned long yld_act_empty;
1275 + unsigned long yld_both_empty;
1276 + unsigned long yld_cnt;
1278 + /* schedule() stats */
1279 + unsigned long sched_noswitch;
1280 + unsigned long sched_switch;
1281 + unsigned long sched_cnt;
1282 + unsigned long sched_goidle;
1284 + /* pull_task() stats */
1285 + unsigned long pt_gained[MAX_IDLE_TYPES];
1286 + unsigned long pt_lost[MAX_IDLE_TYPES];
1288 + /* active_load_balance() stats */
1289 + unsigned long alb_cnt;
1290 + unsigned long alb_lost;
1291 + unsigned long alb_gained;
1292 + unsigned long alb_failed;
1294 + /* try_to_wake_up() stats */
1295 + unsigned long ttwu_cnt;
1296 + unsigned long ttwu_attempts;
1297 + unsigned long ttwu_moved;
1299 + /* wake_up_new_task() stats */
1300 + unsigned long wunt_cnt;
1301 + unsigned long wunt_moved;
1303 + /* sched_migrate_task() stats */
1304 + unsigned long smt_cnt;
1306 + /* sched_balance_exec() stats */
1307 + unsigned long sbe_cnt;
1312 * The default (Linux) execution domain.
1314 extern struct exec_domain default_exec_domain;
1315 Index: linux-2.6.10/include/linux/miscdevice.h
1316 ===================================================================
1317 --- linux-2.6.10.orig/include/linux/miscdevice.h 2004-12-25 05:34:58.000000000 +0800
1318 +++ linux-2.6.10/include/linux/miscdevice.h 2005-04-05 16:47:53.893212704 +0800
1320 #define MICROCODE_MINOR 184
1321 #define MWAVE_MINOR 219 /* ACP/Mwave Modem */
1322 #define MPT_MINOR 220
1323 +#define CRASH_DUMP_MINOR 230 /* LKCD */
1324 #define MISC_DYNAMIC_MINOR 255
1326 #define TUN_MINOR 200
1327 Index: linux-2.6.10/include/linux/dump.h
1328 ===================================================================
1329 --- linux-2.6.10.orig/include/linux/dump.h 2005-04-05 19:01:49.158500672 +0800
1330 +++ linux-2.6.10/include/linux/dump.h 2005-04-05 16:47:53.893212704 +0800
1333 + * Kernel header file for Linux crash dumps.
1335 + * Created by: Matt Robinson (yakker@sgi.com)
1336 + * Copyright 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
1338 + * vmdump.h to dump.h by: Matt D. Robinson (yakker@sourceforge.net)
1339 + * Copyright 2001 - 2002 Matt D. Robinson. All rights reserved.
1340 + * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
1342 + * Most of this is the same old stuff from vmdump.h, except now we're
1343 + * actually a stand-alone driver plugged into the block layer interface,
1344 + * with the exception that we now allow for compression modes externally
1345 + * loaded (e.g., someone can come up with their own).
1347 + * This code is released under version 2 of the GNU GPL.
1350 +/* This header file includes all structure definitions for crash dumps. */
1354 +#if defined(CONFIG_CRASH_DUMP) || defined (CONFIG_CRASH_DUMP_MODULE)
1356 +#include <linux/list.h>
1357 +#include <linux/notifier.h>
1358 +#include <linux/dumpdev.h>
1359 +#include <asm/ioctl.h>
1362 + * Predefine default DUMP_PAGE constants, asm header may override.
1364 + * On ia64 discontinuous memory systems it's possible for the memory
1365 + * banks to stop at 2**12 page alignments, the smallest possible page
1366 + * size. But the system page size, PAGE_SIZE, is in fact larger.
1368 +#define DUMP_PAGE_SHIFT PAGE_SHIFT
1369 +#define DUMP_PAGE_MASK PAGE_MASK
1370 +#define DUMP_PAGE_ALIGN(addr) PAGE_ALIGN(addr)
1373 + * Dump offset changed from 4Kb to 64Kb to support multiple PAGE_SIZE
1374 + * (kernel page size). Assumption goes that 64K is the highest page size
1378 +#define DUMP_HEADER_OFFSET (1ULL << 16)
1380 +#define OLDMINORBITS 8
1381 +#define OLDMINORMASK ((1U << OLDMINORBITS) -1)
1383 +/* Making DUMP_PAGE_SIZE = PAGE_SIZE, to support dumping on architectures
1384 + * which support page sizes (PAGE_SIZE) greater than 4KB.
1385 + * Will it affect ia64 discontinuous memory systems ????
1387 +#define DUMP_PAGE_SIZE PAGE_SIZE
1389 +/* thread_info lies at the bottom of stack, (Except IA64). */
1390 +#define STACK_START_POSITION(tsk) (tsk->thread_info)
1392 + * Predefined default memcpy() to use when copying memory to the dump buffer.
1394 + * On ia64 there is a heads up function that can be called to let the prom
1395 + * machine check monitor know that the current activity is risky and it should
1396 + * ignore the fault (nofault). In this case the ia64 header will redefine this
1397 + * macro to __dump_memcpy() and use it's arch specific version.
1399 +#define DUMP_memcpy memcpy
1400 +#define bzero(a,b) memset(a, 0, b)
1402 +/* necessary header files */
1403 +#include <asm/dump.h> /* for architecture-specific header */
1406 + * Size of the buffer that's used to hold:
1408 + * 1. the dump header (padded to fill the complete buffer)
1409 + * 2. the possibly compressed page headers and data
1411 + * = 256k for page size >= 64k
1412 + * = 64k for page size < 64k
1414 +#if (PAGE_SHIFT >= 16)
1415 +#define DUMP_BUFFER_SIZE (256 * 1024) /* size of dump buffer */
1417 +#define DUMP_BUFFER_SIZE (64 * 1024) /* size of dump buffer */
1420 +#define DUMP_HEADER_SIZE DUMP_BUFFER_SIZE
1422 +/* standard header definitions */
1423 +#define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */
1424 +#define DUMP_MAGIC_LIVE 0xa8190173618f23cdULL /* live magic number */
1425 +#define DUMP_VERSION_NUMBER 0x8 /* dump version number */
1426 +#define DUMP_PANIC_LEN 0x100 /* dump panic string length */
1428 +/* dump levels - type specific stuff added later -- add as necessary */
1429 +#define DUMP_LEVEL_NONE 0x0 /* no dumping at all -- just bail */
1430 +#define DUMP_LEVEL_HEADER 0x1 /* kernel dump header only */
1431 +#define DUMP_LEVEL_KERN 0x2 /* dump header and kernel pages */
1432 +#define DUMP_LEVEL_USED 0x4 /* dump header, kernel/user pages */
1433 +#define DUMP_LEVEL_ALL_RAM 0x8 /* dump header, all RAM pages */
1434 +#define DUMP_LEVEL_ALL 0x10 /* dump all memory RAM and firmware */
1437 +/* dump compression options -- add as necessary */
1438 +#define DUMP_COMPRESS_NONE 0x0 /* don't compress this dump */
1439 +#define DUMP_COMPRESS_RLE 0x1 /* use RLE compression */
1440 +#define DUMP_COMPRESS_GZIP 0x2 /* use GZIP compression */
1442 +/* dump flags - any dump-type specific flags -- add as necessary */
1443 +#define DUMP_FLAGS_NONE 0x0 /* no flags are set for this dump */
1444 +#define DUMP_FLAGS_SOFTBOOT 0x2 /* 2 stage soft-boot based dump */
1445 +#define DUMP_FLAGS_NONDISRUPT 0X1 /* non-disruptive dumping */
1447 +#define DUMP_FLAGS_TARGETMASK 0xf0000000 /* handle special case targets */
1448 +#define DUMP_FLAGS_DISKDUMP 0x80000000 /* dump to local disk */
1449 +#define DUMP_FLAGS_NETDUMP 0x40000000 /* dump over the network */
1451 +/* dump header flags -- add as necessary */
1452 +#define DUMP_DH_FLAGS_NONE 0x0 /* no flags set (error condition!) */
1453 +#define DUMP_DH_RAW 0x1 /* raw page (no compression) */
1454 +#define DUMP_DH_COMPRESSED 0x2 /* page is compressed */
1455 +#define DUMP_DH_END 0x4 /* end marker on a full dump */
1456 +#define DUMP_DH_TRUNCATED 0x8 /* dump is incomplete */
1457 +#define DUMP_DH_TEST_PATTERN 0x10 /* dump page is a test pattern */
1458 +#define DUMP_DH_NOT_USED 0x20 /* 1st bit not used in flags */
1460 +/* names for various dump parameters in /proc/kernel */
1461 +#define DUMP_ROOT_NAME "sys/dump"
1462 +#define DUMP_DEVICE_NAME "device"
1463 +#define DUMP_COMPRESS_NAME "compress"
1464 +#define DUMP_LEVEL_NAME "level"
1465 +#define DUMP_FLAGS_NAME "flags"
1466 +#define DUMP_ADDR_NAME "addr"
1468 +#define DUMP_SYSRQ_KEY 'd' /* key to use for MAGIC_SYSRQ key */
1470 +/* CTL_DUMP names: */
1473 + CTL_DUMP_DEVICE=1,
1474 + CTL_DUMP_COMPRESS=3,
1482 +/* page size for gzip compression -- buffered slightly beyond hardware PAGE_SIZE used by DUMP */
1483 +#define DUMP_DPC_PAGE_SIZE (DUMP_PAGE_SIZE + 512)
1485 +/* dump ioctl() control options */
1486 +#define DIOSDUMPDEV _IOW('p', 0xA0, unsigned int) /* set the dump device */
1487 +#define DIOGDUMPDEV _IOR('p', 0xA1, unsigned int) /* get the dump device */
1488 +#define DIOSDUMPLEVEL _IOW('p', 0xA2, unsigned int) /* set the dump level */
1489 +#define DIOGDUMPLEVEL _IOR('p', 0xA3, unsigned int) /* get the dump level */
1490 +#define DIOSDUMPFLAGS _IOW('p', 0xA4, unsigned int) /* set the dump flag parameters */
1491 +#define DIOGDUMPFLAGS _IOR('p', 0xA5, unsigned int) /* get the dump flag parameters */
1492 +#define DIOSDUMPCOMPRESS _IOW('p', 0xA6, unsigned int) /* set the dump compress level */
1493 +#define DIOGDUMPCOMPRESS _IOR('p', 0xA7, unsigned int) /* get the dump compress level */
1495 +/* these ioctls are used only by netdump module */
1496 +#define DIOSTARGETIP _IOW('p', 0xA8, unsigned int) /* set the target m/c's ip */
1497 +#define DIOGTARGETIP _IOR('p', 0xA9, unsigned int) /* get the target m/c's ip */
1498 +#define DIOSTARGETPORT _IOW('p', 0xAA, unsigned int) /* set the target m/c's port */
1499 +#define DIOGTARGETPORT _IOR('p', 0xAB, unsigned int) /* get the target m/c's port */
1500 +#define DIOSSOURCEPORT _IOW('p', 0xAC, unsigned int) /* set the source m/c's port */
1501 +#define DIOGSOURCEPORT _IOR('p', 0xAD, unsigned int) /* get the source m/c's port */
1502 +#define DIOSETHADDR _IOW('p', 0xAE, unsigned int) /* set ethernet address */
1503 +#define DIOGETHADDR _IOR('p', 0xAF, unsigned int) /* get ethernet address */
1504 +#define DIOGDUMPOKAY _IOR('p', 0xB0, unsigned int) /* check if dump is configured */
1505 +#define DIOSDUMPTAKE _IOW('p', 0xB1, unsigned int) /* Take a manual dump */
1508 + * Structure: __dump_header
1509 + * Function: This is the header dumped at the top of every valid crash
1512 +struct __dump_header {
1513 + /* the dump magic number -- unique to verify dump is valid */
1514 + u64 dh_magic_number;
1516 + /* the version number of this dump */
1519 + /* the size of this header (in case we can't read it) */
1520 + u32 dh_header_size;
1522 + /* the level of this dump (just a header?) */
1523 + u32 dh_dump_level;
1526 + * We assume dump_page_size to be 4K in every case.
1527 + * Store here the configurable system page size (4K, 8K, 16K, etc.)
1531 + /* the size of all physical memory */
1532 + u64 dh_memory_size;
1534 + /* the start of physical memory */
1535 + u64 dh_memory_start;
1537 + /* the end of physical memory */
1538 + u64 dh_memory_end;
1540 + /* the number of hardware/physical pages in this dump specifically */
1541 + u32 dh_num_dump_pages;
1543 + /* the panic string, if available */
1544 + char dh_panic_string[DUMP_PANIC_LEN];
1546 + /* timeval depends on architecture, two long values */
1550 + } dh_time; /* the time of the system crash */
1552 + /* the NEW utsname (uname) information -- in character form */
1553 + /* we do this so we don't have to include utsname.h */
1554 + /* plus it helps us be more architecture independent */
1555 + /* now maybe one day soon they'll make the [65] a #define! */
1556 + char dh_utsname_sysname[65];
1557 + char dh_utsname_nodename[65];
1558 + char dh_utsname_release[65];
1559 + char dh_utsname_version[65];
1560 + char dh_utsname_machine[65];
1561 + char dh_utsname_domainname[65];
1563 + /* the address of current task (OLD = void *, NEW = u64) */
1564 + u64 dh_current_task;
1566 + /* what type of compression we're using in this dump (if any) */
1567 + u32 dh_dump_compress;
1569 + /* any additional flags */
1570 + u32 dh_dump_flags;
1572 + /* any additional flags */
1573 + u32 dh_dump_device;
1574 +} __attribute__((packed));
1577 + * Structure: __dump_page
1578 + * Function: To act as the header associated to each physical page of
1579 + * memory saved in the system crash dump. This allows for
1580 + * easy reassembly of each crash dump page. The address bits
1581 + * are split to make things easier for 64-bit/32-bit system
1584 + * dp_byte_offset and dp_page_index are landmarks that are helpful when
1585 + * looking at a hex dump of /dev/vmdump,
1587 +struct __dump_page {
1588 + /* the address of this dump page */
1591 + /* the size of this dump page */
1594 + /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */
1596 +} __attribute__((packed));
1599 + * Structure: __lkcdinfo
1600 + * Function: This structure contains information needed for the lkcdutils
1601 + * package (particularly lcrash) to determine what information is
1602 + * associated to this kernel, specifically.
1604 +struct __lkcdinfo {
1608 + int linux_release;
1619 + * Structure: __dump_compress
1620 + * Function: This is what an individual compression mechanism can use
1621 + * to plug in their own compression techniques. It's always
1622 + * best to build these as individual modules so that people
1623 + * can put in whatever they want.
1625 +struct __dump_compress {
1626 + /* the list_head structure for list storage */
1627 + struct list_head list;
1629 + /* the type of compression to use (DUMP_COMPRESS_XXX) */
1630 + int compress_type;
1631 + const char *compress_name;
1633 + /* the compression function to call */
1634 + u32 (*compress_func)(const u8 *, u32, u8 *, u32, unsigned long);
1637 +/* functions for dump compression registration */
1638 +extern void dump_register_compression(struct __dump_compress *);
1639 +extern void dump_unregister_compression(int);
1642 + * Structure dump_mbank[]:
1644 + * For CONFIG_DISCONTIGMEM systems this array specifies the
1645 + * memory banks/chunks that need to be dumped after a panic.
1647 + * For classic systems it specifies a single set of pages from
1650 +struct __dump_mbank {
1658 +#define DUMP_MBANK_TYPE_CONVENTIONAL_MEMORY 1
1659 +#define DUMP_MBANK_TYPE_OTHER 2
1661 +#define MAXCHUNKS 256
1662 +extern int dump_mbanks;
1663 +extern struct __dump_mbank dump_mbank[MAXCHUNKS];
1665 +/* notification event codes */
1666 +#define DUMP_BEGIN 0x0001 /* dump beginning */
1667 +#define DUMP_END 0x0002 /* dump ending */
1669 +/* Scheduler soft spin control.
1671 + * 0 - no dump in progress
1672 + * 1 - cpu0 is dumping, ...
1674 +extern unsigned long dump_oncpu;
1675 +extern void dump_execute(const char *, const struct pt_regs *);
1678 + * Notifier list for kernel code which wants to be called
1681 +extern struct notifier_block *dump_notifier_list;
1682 +static inline int register_dump_notifier(struct notifier_block *nb)
1684 + return notifier_chain_register(&dump_notifier_list, nb);
1686 +static inline int unregister_dump_notifier(struct notifier_block * nb)
1688 + return notifier_chain_unregister(&dump_notifier_list, nb);
1691 +extern void (*dump_function_ptr)(const char *, const struct pt_regs *);
1692 +static inline void dump(char * str, struct pt_regs * regs)
1694 + if (dump_function_ptr)
1695 + dump_function_ptr(str, regs);
1699 + * Common Arch Specific Functions should be declared here.
1700 + * This allows the C compiler to detect discrepancies.
1702 +extern void __dump_open(void);
1703 +extern void __dump_cleanup(void);
1704 +extern void __dump_clean_irq_state(void);
1705 +extern void __dump_init(u64);
1706 +extern void __dump_save_regs(struct pt_regs *, const struct pt_regs *);
1707 +extern void __dump_save_context(int cpu, const struct pt_regs *, struct task_struct *tsk);
1708 +extern int __dump_configure_header(const struct pt_regs *);
1709 +extern int __dump_irq_enable(void);
1710 +extern void __dump_irq_restore(void);
1711 +extern int __dump_page_valid(unsigned long index);
1713 +extern void __dump_save_other_cpus(void);
1715 +#define __dump_save_other_cpus()
1718 +extern int manual_handle_crashdump(void);
1720 +/* to track all used (compound + zero order) pages */
1721 +#define PageInuse(p) (PageCompound(p) || page_count(p))
1723 +#endif /* __KERNEL__ */
1725 +#else /* !CONFIG_CRASH_DUMP */
1727 +/* If not configured then make code disappear! */
1728 +#define register_dump_watchdog(x) do { } while(0)
1729 +#define unregister_dump_watchdog(x) do { } while(0)
1730 +#define register_dump_notifier(x) do { } while(0)
1731 +#define unregister_dump_notifier(x) do { } while(0)
1732 +#define dump_in_progress() 0
1733 +#define dump(x, y) do { } while(0)
1735 +#endif /* !CONFIG_CRASH_DUMP */
1737 +#endif /* _DUMP_H */
1738 Index: linux-2.6.10/include/linux/dumpdev.h
1739 ===================================================================
1740 --- linux-2.6.10.orig/include/linux/dumpdev.h 2005-04-05 19:01:49.158500672 +0800
1741 +++ linux-2.6.10/include/linux/dumpdev.h 2005-04-05 16:47:53.890213160 +0800
1744 + * Generic dump device interfaces for flexible system dump
1745 + * (Enables variation of dump target types e.g disk, network, memory)
1747 + * These interfaces have evolved based on discussions on lkcd-devel.
1748 + * Eventually the intent is to support primary and secondary or
1749 + * alternate targets registered at the same time, with scope for
1750 + * situation based failover or multiple dump devices used for parallel
1753 + * Started: Oct 2002 - Suparna Bhattacharya (suparna@in.ibm.com)
1755 + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
1756 + * Copyright (C) 2002 International Business Machines Corp.
1758 + * This code is released under version 2 of the GNU GPL.
1761 +#ifndef _LINUX_DUMPDEV_H
1762 +#define _LINUX_DUMPDEV_H
1764 +#include <linux/kernel.h>
1765 +#include <linux/wait.h>
1766 +#include <linux/netpoll.h>
1767 +#include <linux/bio.h>
1769 +/* Determined by the dump target (device) type */
1773 +struct dump_dev_ops {
1774 + int (*open)(struct dump_dev *, unsigned long); /* configure */
1775 + int (*release)(struct dump_dev *); /* unconfigure */
1776 + int (*silence)(struct dump_dev *); /* when dump starts */
1777 + int (*resume)(struct dump_dev *); /* when dump is over */
1778 + int (*seek)(struct dump_dev *, loff_t);
1779 + /* trigger a write (async in nature typically) */
1780 + int (*write)(struct dump_dev *, void *, unsigned long);
1781 + /* not usually used during dump, but option available */
1782 + int (*read)(struct dump_dev *, void *, unsigned long);
1783 + /* use to poll for completion */
1784 + int (*ready)(struct dump_dev *, void *);
1785 + int (*ioctl)(struct dump_dev *, unsigned int, unsigned long);
1789 + char type_name[32]; /* block, net-poll etc */
1790 + unsigned long device_id; /* interpreted differently for various types */
1791 + struct dump_dev_ops *ops;
1792 + struct list_head list;
1793 + loff_t curr_offset;
1794 + struct netpoll np;
1798 + * dump_dev type variations:
1802 +struct dump_blockdev {
1803 + struct dump_dev ddev;
1805 + struct block_device *bdev;
1807 + loff_t start_offset;
1812 +static inline struct dump_blockdev *DUMP_BDEV(struct dump_dev *dev)
1814 + return container_of(dev, struct dump_blockdev, ddev);
1818 +/* mem - for internal use by soft-boot based dumper */
1819 +struct dump_memdev {
1820 + struct dump_dev ddev;
1821 + unsigned long indirect_map_root;
1822 + unsigned long nr_free;
1823 + struct page *curr_page;
1824 + unsigned long *curr_map;
1825 + unsigned long curr_map_offset;
1826 + unsigned long last_offset;
1827 + unsigned long last_used_offset;
1828 + unsigned long last_bs_offset;
1831 +static inline struct dump_memdev *DUMP_MDEV(struct dump_dev *dev)
1833 + return container_of(dev, struct dump_memdev, ddev);
1836 +/* Todo/future - meant for raw dedicated interfaces e.g. mini-ide driver */
1838 + struct dump_dev ddev;
1840 + int (*reset)(struct dump_rdev *, unsigned int,
1842 + /* ... to do ... */
1845 +/* just to get the size right when saving config across a soft-reboot */
1846 +struct dump_anydev {
1848 + struct dump_blockdev bddev;
1849 + /* .. add other types here .. */
1855 +/* Dump device / target operation wrappers */
1856 +/* These assume that dump_dev is initiatized to dump_config.dumper->dev */
1858 +extern struct dump_dev *dump_dev;
1860 +static inline int dump_dev_open(unsigned long arg)
1862 + return dump_dev->ops->open(dump_dev, arg);
1865 +static inline int dump_dev_release(void)
1867 + return dump_dev->ops->release(dump_dev);
1870 +static inline int dump_dev_silence(void)
1872 + return dump_dev->ops->silence(dump_dev);
1875 +static inline int dump_dev_resume(void)
1877 + return dump_dev->ops->resume(dump_dev);
1880 +static inline int dump_dev_seek(loff_t offset)
1882 + return dump_dev->ops->seek(dump_dev, offset);
1885 +static inline int dump_dev_write(void *buf, unsigned long len)
1887 + return dump_dev->ops->write(dump_dev, buf, len);
1890 +static inline int dump_dev_ready(void *buf)
1892 + return dump_dev->ops->ready(dump_dev, buf);
1895 +static inline int dump_dev_ioctl(unsigned int cmd, unsigned long arg)
1897 + if (!dump_dev || !dump_dev->ops->ioctl)
1899 + return dump_dev->ops->ioctl(dump_dev, cmd, arg);
1902 +extern int dump_register_device(struct dump_dev *);
1903 +extern void dump_unregister_device(struct dump_dev *);
1905 +#endif /* _LINUX_DUMPDEV_H */
1906 Index: linux-2.6.10/include/linux/dump_netdev.h
1907 ===================================================================
1908 --- linux-2.6.10.orig/include/linux/dump_netdev.h 2005-04-05 19:01:49.158500672 +0800
1909 +++ linux-2.6.10/include/linux/dump_netdev.h 2005-04-05 16:47:53.889213312 +0800
1912 + * linux/drivers/net/netconsole.h
1914 + * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
1916 + * This file contains the implementation of an IRQ-safe, crash-safe
1917 + * kernel console implementation that outputs kernel messages to the
1920 + * Modification history:
1922 + * 2001-09-17 started by Ingo Molnar.
1925 +/****************************************************************
1926 + * This program is free software; you can redistribute it and/or modify
1927 + * it under the terms of the GNU General Public License as published by
1928 + * the Free Software Foundation; either version 2, or (at your option)
1929 + * any later version.
1931 + * This program is distributed in the hope that it will be useful,
1932 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1933 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1934 + * GNU General Public License for more details.
1936 + * You should have received a copy of the GNU General Public License
1937 + * along with this program; if not, write to the Free Software
1938 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
1940 + ****************************************************************/
1942 +#define NETCONSOLE_VERSION 0x03
1944 +enum netdump_commands {
1946 + COMM_SEND_MEM = 1,
1950 + COMM_GET_NR_PAGES = 5,
1951 + COMM_GET_PAGE_SIZE = 6,
1952 + COMM_START_NETDUMP_ACK = 7,
1953 + COMM_GET_REGS = 8,
1954 + COMM_GET_MAGIC = 9,
1955 + COMM_START_WRITE_NETDUMP_ACK = 10,
1958 +typedef struct netdump_req_s {
1966 +enum netdump_replies {
1971 + REPLY_RESERVED = 4,
1973 + REPLY_NR_PAGES = 6,
1974 + REPLY_PAGE_SIZE = 7,
1975 + REPLY_START_NETDUMP = 8,
1976 + REPLY_END_NETDUMP = 9,
1979 + REPLY_START_WRITE_NETDUMP = 12,
1982 +typedef struct netdump_reply_s {
1988 +#define HEADER_LEN (1 + sizeof(reply_t))
1991 Index: linux-2.6.10/include/asm-parisc/kerntypes.h
1992 ===================================================================
1993 --- linux-2.6.10.orig/include/asm-parisc/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
1994 +++ linux-2.6.10/include/asm-parisc/kerntypes.h 2005-04-05 16:47:53.870216200 +0800
1997 + * asm-parisc/kerntypes.h
1999 + * Arch-dependent header file that includes headers for all arch-specific
2000 + * types of interest.
2001 + * The kernel type information is used by the lcrash utility when
2002 + * analyzing system crash dumps or the live system. Using the type
2003 + * information for the running system, rather than kernel header files,
2004 + * makes for a more flexible and robust analysis tool.
2006 + * This source code is released under the GNU GPL.
2009 +/* PA-RISC-specific header files */
2010 +#ifndef _PARISC_KERNTYPES_H
2011 +#define _PARISC_KERNTYPES_H
2013 +/* Use the default */
2014 +#include <asm-generic/kerntypes.h>
2016 +#endif /* _PARISC_KERNTYPES_H */
2017 Index: linux-2.6.10/include/asm-h8300/kerntypes.h
2018 ===================================================================
2019 --- linux-2.6.10.orig/include/asm-h8300/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
2020 +++ linux-2.6.10/include/asm-h8300/kerntypes.h 2005-04-05 16:47:53.880214680 +0800
2023 + * asm-h8300/kerntypes.h
2025 + * Arch-dependent header file that includes headers for all arch-specific
2026 + * types of interest.
2027 + * The kernel type information is used by the lcrash utility when
2028 + * analyzing system crash dumps or the live system. Using the type
2029 + * information for the running system, rather than kernel header files,
2030 + * makes for a more flexible and robust analysis tool.
2032 + * This source code is released under the GNU GPL.
2035 +/* H8300-specific header files */
2036 +#ifndef _H8300_KERNTYPES_H
2037 +#define _H8300_KERNTYPES_H
2039 +/* Use the default */
2040 +#include <asm-generic/kerntypes.h>
2042 +#endif /* _H8300_KERNTYPES_H */
2043 Index: linux-2.6.10/include/asm-ppc/kerntypes.h
2044 ===================================================================
2045 --- linux-2.6.10.orig/include/asm-ppc/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
2046 +++ linux-2.6.10/include/asm-ppc/kerntypes.h 2005-04-05 16:47:53.882214376 +0800
2049 + * asm-ppc/kerntypes.h
2051 + * Arch-dependent header file that includes headers for all arch-specific
2052 + * types of interest.
2053 + * The kernel type information is used by the lcrash utility when
2054 + * analyzing system crash dumps or the live system. Using the type
2055 + * information for the running system, rather than kernel header files,
2056 + * makes for a more flexible and robust analysis tool.
2058 + * This source code is released under the GNU GPL.
2061 +/* PowerPC-specific header files */
2062 +#ifndef _PPC_KERNTYPES_H
2063 +#define _PPC_KERNTYPES_H
2065 +/* Use the default */
2066 +#include <asm-generic/kerntypes.h>
2068 +#endif /* _PPC_KERNTYPES_H */
2069 Index: linux-2.6.10/include/asm-alpha/kerntypes.h
2070 ===================================================================
2071 --- linux-2.6.10.orig/include/asm-alpha/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
2072 +++ linux-2.6.10/include/asm-alpha/kerntypes.h 2005-04-05 16:47:53.876215288 +0800
2075 + * asm-alpha/kerntypes.h
2077 + * Arch-dependent header file that includes headers for all arch-specific
2078 + * types of interest.
2079 + * The kernel type information is used by the lcrash utility when
2080 + * analyzing system crash dumps or the live system. Using the type
2081 + * information for the running system, rather than kernel header files,
2082 + * makes for a more flexible and robust analysis tool.
2084 + * This source code is released under the GNU GPL.
2087 +/* Alpha-specific header files */
2088 +#ifndef _ALPHA_KERNTYPES_H
2089 +#define _ALPHA_KERNTYPES_H
2091 +/* Use the default */
2092 +#include <asm-generic/kerntypes.h>
2094 +#endif /* _ALPHA_KERNTYPES_H */
2095 Index: linux-2.6.10/include/asm-arm26/kerntypes.h
2096 ===================================================================
2097 --- linux-2.6.10.orig/include/asm-arm26/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
2098 +++ linux-2.6.10/include/asm-arm26/kerntypes.h 2005-04-05 16:47:53.865216960 +0800
2101 + * asm-arm26/kerntypes.h
2103 + * Arch-dependent header file that includes headers for all arch-specific
2104 + * types of interest.
2105 + * The kernel type information is used by the lcrash utility when
2106 + * analyzing system crash dumps or the live system. Using the type
2107 + * information for the running system, rather than kernel header files,
2108 + * makes for a more flexible and robust analysis tool.
2110 + * This source code is released under the GNU GPL.
2113 +/* ARM26-specific header files */
2114 +#ifndef _ARM26_KERNTYPES_H
2115 +#define _ARM26_KERNTYPES_H
2117 +/* Use the default */
2118 +#include <asm-generic/kerntypes.h>
2120 +#endif /* _ARM26_KERNTYPES_H */
2121 Index: linux-2.6.10/include/asm-sh/kerntypes.h
2122 ===================================================================
2123 --- linux-2.6.10.orig/include/asm-sh/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
2124 +++ linux-2.6.10/include/asm-sh/kerntypes.h 2005-04-05 16:47:53.877215136 +0800
2127 + * asm-sh/kerntypes.h
2129 + * Arch-dependent header file that includes headers for all arch-specific
2130 + * types of interest.
2131 + * The kernel type information is used by the lcrash utility when
2132 + * analyzing system crash dumps or the live system. Using the type
2133 + * information for the running system, rather than kernel header files,
2134 + * makes for a more flexible and robust analysis tool.
2136 + * This source code is released under the GNU GPL.
2139 +/* Super-H-specific header files */
2140 +#ifndef _SH_KERNTYPES_H
2141 +#define _SH_KERNTYPES_H
2143 +/* Use the default */
2144 +#include <asm-generic/kerntypes.h>
2146 +#endif /* _SH_KERNTYPES_H */
2147 Index: linux-2.6.10/include/asm-ia64/nmi.h
2148 ===================================================================
2149 --- linux-2.6.10.orig/include/asm-ia64/nmi.h 2005-04-05 19:01:49.158500672 +0800
2150 +++ linux-2.6.10/include/asm-ia64/nmi.h 2005-04-05 16:47:53.883214224 +0800
2153 + * linux/include/asm-ia64/nmi.h
2158 +#include <linux/pm.h>
2162 +typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
2165 + * set_nmi_callback
2167 + * Set a handler for an NMI. Only one handler may be
2168 + * set. Return 1 if the NMI was handled.
2170 +void set_nmi_callback(nmi_callback_t callback);
2173 + * unset_nmi_callback
2175 + * Remove the handler previously set.
2177 +void unset_nmi_callback(void);
2179 +#endif /* ASM_NMI_H */
2180 Index: linux-2.6.10/include/asm-ia64/dump.h
2181 ===================================================================
2182 --- linux-2.6.10.orig/include/asm-ia64/dump.h 2005-04-05 19:01:49.158500672 +0800
2183 +++ linux-2.6.10/include/asm-ia64/dump.h 2005-04-05 16:47:53.884214072 +0800
2186 + * Kernel header file for Linux crash dumps.
2188 + * Created by: Matt Robinson (yakker@sgi.com)
2190 + * Copyright 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
2192 + * This code is released under version 2 of the GNU GPL.
2195 +/* This header file holds the architecture specific crash dump header */
2196 +#ifndef _ASM_DUMP_H
2197 +#define _ASM_DUMP_H
2200 +#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
2201 +#define DUMP_ASM_VERSION_NUMBER 0x4 /* version number */
2204 +#include <linux/efi.h>
2205 +#include <asm/pal.h>
2206 +#include <asm/ptrace.h>
2209 +extern cpumask_t irq_affinity[];
2210 +extern int (*dump_ipi_function_ptr)(struct pt_regs *);
2211 +extern void dump_send_ipi(void);
2212 +#else /* !CONFIG_SMP */
2213 +#define dump_send_ipi() do { } while(0)
2216 +#else /* !__KERNEL__ */
2217 +/* necessary header files */
2218 +#include <asm/ptrace.h> /* for pt_regs */
2219 +#include <linux/threads.h>
2220 +#endif /* __KERNEL__ */
2223 + * mkswap.c calls getpagesize() to get the system page size,
2224 + * which is not necessarily the same as the hardware page size.
2226 + * For ia64 the kernel PAGE_SIZE can be configured from 4KB ... 16KB.
2228 + * The physical memory is layed out out in the hardware/minimal pages.
2229 + * This is the size we need to use for dumping physical pages.
2231 + * Note ths hardware/minimal page size being use in;
2232 + * arch/ia64/kernel/efi.c`efi_memmap_walk():
2233 + * curr.end = curr.start + (md->num_pages << 12);
2235 + * Since the system page size could change between the kernel we boot
2236 + * on the the kernel that cause the core dume we may want to have something
2237 + * more constant like the maximum system page size (See include/asm-ia64/page.h).
2239 +/* IA64 manages the stack in differnt manner as compared to other architectures.
2240 + * task_struct lies at the bottom of stack.
2242 +#undef STACK_START_POSITION
2243 +#define STACK_START_POSITION(tsk) (tsk)
2244 +#define DUMP_MIN_PAGE_SHIFT 12
2245 +#define DUMP_MIN_PAGE_SIZE (1UL << DUMP_MIN_PAGE_SHIFT)
2246 +#define DUMP_MIN_PAGE_MASK (~(DUMP_MIN_PAGE_SIZE - 1))
2247 +#define DUMP_MIN_PAGE_ALIGN(addr) (((addr) + DUMP_MIN_PAGE_SIZE - 1) & DUMP_MIN_PAGE_MASK)
2249 +#define DUMP_MAX_PAGE_SHIFT 16
2250 +#define DUMP_MAX_PAGE_SIZE (1UL << DUMP_MAX_PAGE_SHIFT)
2251 +#define DUMP_MAX_PAGE_MASK (~(DUMP_MAX_PAGE_SIZE - 1))
2252 +#define DUMP_MAX_PAGE_ALIGN(addr) (((addr) + DUMP_MAX_PAGE_SIZE - 1) & DUMP_MAX_PAGE_MASK)
2254 +#define DUMP_EF_PAGE_SHIFT DUMP_MIN_PAGE_SHIFT
2256 +extern int _end,_start;
2259 + * Structure: dump_header_asm_t
2260 + * Function: This is the header for architecture-specific stuff. It
2261 + * follows right after the dump header.
2263 +/*typedef struct _dump_header_asm {*/
2265 +typedef struct __dump_header_asm {
2267 + /* the dump magic number -- unique to verify dump is valid */
2268 + uint64_t dha_magic_number;
2270 + /* the version number of this dump */
2271 + uint32_t dha_version;
2273 + /* the size of this header (in case we can't read it) */
2274 + uint32_t dha_header_size;
2276 + /* pointer to pt_regs, (OLD: (struct pt_regs *, NEW: (uint64_t)) */
2277 + uint64_t dha_pt_regs;
2279 + /* the dump registers */
2280 + struct pt_regs dha_regs;
2282 + /* the rnat register saved after flushrs */
2283 + uint64_t dha_rnat;
2285 + /* the pfs register saved after flushrs */
2288 + /* the bspstore register saved after flushrs */
2289 + uint64_t dha_bspstore;
2291 + /* smp specific */
2292 + uint32_t dha_smp_num_cpus;
2293 + uint32_t dha_dumping_cpu;
2294 + struct pt_regs dha_smp_regs[NR_CPUS];
2295 + uint64_t dha_smp_current_task[NR_CPUS];
2296 + uint64_t dha_stack[NR_CPUS];
2297 + uint64_t dha_stack_ptr[NR_CPUS];
2299 +} __attribute__((packed)) dump_header_asm_t;
2302 +extern struct __dump_header_asm dump_header_asm;
2305 +static inline void get_current_regs(struct pt_regs *regs)
2308 + * REMIND: Looking at functions/Macros like:
2309 + * DO_SAVE_SWITCH_STACK
2310 + * ia64_switch_to()
2311 + * ia64_save_extra()
2313 + * to implement this new feature that Matt seem to have added
2314 + * to panic.c; seems all platforms are now expected to provide
2315 + * this function to dump the current registers into the pt_regs
2318 + volatile unsigned long rsc_value;/*for storing the rsc value*/
2319 + volatile unsigned long ic_value;
2321 + __asm__ __volatile__("mov %0=b6;;":"=r"(regs->b6));
2322 + __asm__ __volatile__("mov %0=b7;;":"=r"(regs->b7));
2324 + __asm__ __volatile__("mov %0=ar.csd;;":"=r"(regs->ar_csd));
2325 + __asm__ __volatile__("mov %0=ar.ssd;;":"=r"(regs->ar_ssd));
2326 + __asm__ __volatile__("mov %0=psr;;":"=r"(ic_value));
2327 + if(ic_value & 0x1000)/*Within an interrupt*/
2329 + __asm__ __volatile__("mov %0=cr.ipsr;;":"=r"(regs->cr_ipsr));
2330 + __asm__ __volatile__("mov %0=cr.iip;;":"=r"(regs->cr_iip));
2331 + __asm__ __volatile__("mov %0=cr.ifs;;":"=r"(regs->cr_ifs));
2335 + regs->cr_ipsr=regs->cr_iip=regs->cr_ifs=(unsigned long)-1;
2337 + __asm__ __volatile__("mov %0=ar.unat;;":"=r"(regs->ar_unat));
2338 + __asm__ __volatile__("mov %0=ar.pfs;;":"=r"(regs->ar_pfs));
2339 + __asm__ __volatile__("mov %0=ar.rsc;;":"=r"(rsc_value));
2340 + regs->ar_rsc = rsc_value;
2341 + /*loadrs is from 16th bit to 29th bit of rsc*/
2342 + regs->loadrs = rsc_value >> 16 & (unsigned long)0x3fff;
2343 + /*setting the rsc.mode value to 0 (rsc.mode is the last two bits of rsc)*/
2344 + __asm__ __volatile__("mov ar.rsc=%0;;"::"r"(rsc_value & (unsigned long)(~3)));
2345 + __asm__ __volatile__("mov %0=ar.rnat;;":"=r"(regs->ar_rnat));
2346 + __asm__ __volatile__("mov %0=ar.bspstore;;":"=r"(regs->ar_bspstore));
2347 + /*copying the original value back*/
2348 + __asm__ __volatile__("mov ar.rsc=%0;;"::"r"(rsc_value));
2349 + __asm__ __volatile__("mov %0=pr;;":"=r"(regs->pr));
2350 + __asm__ __volatile__("mov %0=ar.fpsr;;":"=r"(regs->ar_fpsr));
2351 + __asm__ __volatile__("mov %0=ar.ccv;;":"=r"(regs->ar_ccv));
2353 + __asm__ __volatile__("mov %0=r2;;":"=r"(regs->r2));
2354 + __asm__ __volatile__("mov %0=r3;;":"=r"(regs->r3));
2355 + __asm__ __volatile__("mov %0=r8;;":"=r"(regs->r8));
2356 + __asm__ __volatile__("mov %0=r9;;":"=r"(regs->r9));
2357 + __asm__ __volatile__("mov %0=r10;;":"=r"(regs->r10));
2358 + __asm__ __volatile__("mov %0=r11;;":"=r"(regs->r11));
2359 + __asm__ __volatile__("mov %0=r12;;":"=r"(regs->r12));
2360 + __asm__ __volatile__("mov %0=r13;;":"=r"(regs->r13));
2361 + __asm__ __volatile__("mov %0=r14;;":"=r"(regs->r14));
2362 + __asm__ __volatile__("mov %0=r15;;":"=r"(regs->r15));
2363 + __asm__ __volatile__("mov %0=r16;;":"=r"(regs->r16));
2364 + __asm__ __volatile__("mov %0=r17;;":"=r"(regs->r17));
2365 + __asm__ __volatile__("mov %0=r18;;":"=r"(regs->r18));
2366 + __asm__ __volatile__("mov %0=r19;;":"=r"(regs->r19));
2367 + __asm__ __volatile__("mov %0=r20;;":"=r"(regs->r20));
2368 + __asm__ __volatile__("mov %0=r21;;":"=r"(regs->r21));
2369 + __asm__ __volatile__("mov %0=r22;;":"=r"(regs->r22));
2370 + __asm__ __volatile__("mov %0=r23;;":"=r"(regs->r23));
2371 + __asm__ __volatile__("mov %0=r24;;":"=r"(regs->r24));
2372 + __asm__ __volatile__("mov %0=r25;;":"=r"(regs->r25));
2373 + __asm__ __volatile__("mov %0=r26;;":"=r"(regs->r26));
2374 + __asm__ __volatile__("mov %0=r27;;":"=r"(regs->r27));
2375 + __asm__ __volatile__("mov %0=r28;;":"=r"(regs->r28));
2376 + __asm__ __volatile__("mov %0=r29;;":"=r"(regs->r29));
2377 + __asm__ __volatile__("mov %0=r30;;":"=r"(regs->r30));
2378 + __asm__ __volatile__("mov %0=r31;;":"=r"(regs->r31));
2381 +/* Perhaps added to Common Arch Specific Functions and moved to dump.h some day */
2382 +extern void * __dump_memcpy(void *, const void *, size_t);
2383 +#endif /* __KERNEL__ */
2385 +#endif /* _ASM_DUMP_H */
2386 Index: linux-2.6.10/include/asm-ia64/kerntypes.h
2387 ===================================================================
2388 --- linux-2.6.10.orig/include/asm-ia64/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
2389 +++ linux-2.6.10/include/asm-ia64/kerntypes.h 2005-04-05 16:47:53.884214072 +0800
2392 + * asm-ia64/kerntypes.h
2394 + * Arch-dependent header file that includes headers for all arch-specific
2395 + * types of interest.
2396 + * The kernel type information is used by the lcrash utility when
2397 + * analyzing system crash dumps or the live system. Using the type
2398 + * information for the running system, rather than kernel header files,
2399 + * makes for a more flexible and robust analysis tool.
2401 + * This source code is released under the GNU GPL.
2404 +/* IA64-specific header files */
2405 +#ifndef _IA64_KERNTYPES_H
2406 +#define _IA64_KERNTYPES_H
2408 +/* Use the default */
2409 +#include <asm-generic/kerntypes.h>
2411 +#endif /* _IA64_KERNTYPES_H */
2412 Index: linux-2.6.10/include/asm-ppc64/dump.h
2413 ===================================================================
2414 --- linux-2.6.10.orig/include/asm-ppc64/dump.h 2005-04-05 19:01:49.158500672 +0800
2415 +++ linux-2.6.10/include/asm-ppc64/dump.h 2005-04-05 16:47:53.878214984 +0800
2418 + * Kernel header file for Linux crash dumps.
2420 + * Created by: Todd Inglett <tinglett@vnet.ibm.com>
2422 + * Copyright 2002 - 2004 International Business Machines
2424 + * This code is released under version 2 of the GNU GPL.
2427 +/* This header file holds the architecture specific crash dump header */
2428 +#ifndef _ASM_DUMP_H
2429 +#define _ASM_DUMP_H
2431 +/* necessary header files */
2432 +#include <asm/ptrace.h> /* for pt_regs */
2433 +#include <asm/kmap_types.h>
2434 +#include <linux/threads.h>
2437 +#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
2438 +#define DUMP_ASM_VERSION_NUMBER 0x5 /* version number */
2441 + * Structure: __dump_header_asm
2442 + * Function: This is the header for architecture-specific stuff. It
2443 + * follows right after the dump header.
2445 +struct __dump_header_asm {
2447 + /* the dump magic number -- unique to verify dump is valid */
2448 + uint64_t dha_magic_number;
2450 + /* the version number of this dump */
2451 + uint32_t dha_version;
2453 + /* the size of this header (in case we can't read it) */
2454 + uint32_t dha_header_size;
2456 + /* the dump registers */
2457 + struct pt_regs dha_regs;
2459 + /* smp specific */
2460 + uint32_t dha_smp_num_cpus;
2461 + int dha_dumping_cpu;
2462 + struct pt_regs dha_smp_regs[NR_CPUS];
2463 + uint64_t dha_smp_current_task[NR_CPUS];
2464 + uint64_t dha_stack[NR_CPUS];
2465 + uint64_t dha_stack_ptr[NR_CPUS];
2466 +} __attribute__((packed));
2469 +static inline void get_current_regs(struct pt_regs *regs)
2471 + unsigned long tmp1, tmp2;
2473 + __asm__ __volatile__ (
2487 + "std 13,104(%2)\n"
2488 + "std 14,112(%2)\n"
2489 + "std 15,120(%2)\n"
2490 + "std 16,128(%2)\n"
2491 + "std 17,136(%2)\n"
2492 + "std 18,144(%2)\n"
2493 + "std 19,152(%2)\n"
2494 + "std 20,160(%2)\n"
2495 + "std 21,168(%2)\n"
2496 + "std 22,176(%2)\n"
2497 + "std 23,184(%2)\n"
2498 + "std 24,192(%2)\n"
2499 + "std 25,200(%2)\n"
2500 + "std 26,208(%2)\n"
2501 + "std 27,216(%2)\n"
2502 + "std 28,224(%2)\n"
2503 + "std 29,232(%2)\n"
2504 + "std 30,240(%2)\n"
2505 + "std 31,248(%2)\n"
2507 + "std %0, 264(%2)\n"
2509 + "std %0, 280(%2)\n"
2511 + "std %0, 288(%2)\n"
2514 + "std %1, 256(%2)\n"
2517 + "std %0, 296(%2)\n"
2518 + : "=&r" (tmp1), "=&r" (tmp2)
2522 +extern struct __dump_header_asm dump_header_asm;
2525 +extern void dump_send_ipi(int (*dump_ipi_callback)(struct pt_regs *));
2527 +#define dump_send_ipi() do { } while(0)
2529 +#endif /* __KERNEL__ */
2531 +#endif /* _ASM_DUMP_H */
2532 Index: linux-2.6.10/include/asm-ppc64/kerntypes.h
2533 ===================================================================
2534 --- linux-2.6.10.orig/include/asm-ppc64/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
2535 +++ linux-2.6.10/include/asm-ppc64/kerntypes.h 2005-04-05 16:47:53.879214832 +0800
2538 + * asm-ppc64/kerntypes.h
2540 + * Arch-dependent header file that includes headers for all arch-specific
2541 + * types of interest.
2542 + * The kernel type information is used by the lcrash utility when
2543 + * analyzing system crash dumps or the live system. Using the type
2544 + * information for the running system, rather than kernel header files,
2545 + * makes for a more flexible and robust analysis tool.
2547 + * This source code is released under the GNU GPL.
2550 +/* PPC64-specific header files */
2551 +#ifndef _PPC64_KERNTYPES_H
2552 +#define _PPC64_KERNTYPES_H
2554 +/* Use the default */
2555 +#include <asm-generic/kerntypes.h>
2557 +#endif /* _PPC64_KERNTYPES_H */
2558 Index: linux-2.6.10/include/asm-ppc64/kmap_types.h
2559 ===================================================================
2560 --- linux-2.6.10.orig/include/asm-ppc64/kmap_types.h 2004-12-25 05:34:45.000000000 +0800
2561 +++ linux-2.6.10/include/asm-ppc64/kmap_types.h 2005-04-05 16:47:53.878214984 +0800
2572 Index: linux-2.6.10/include/asm-ppc64/smp.h
2573 ===================================================================
2574 --- linux-2.6.10.orig/include/asm-ppc64/smp.h 2004-12-25 05:33:47.000000000 +0800
2575 +++ linux-2.6.10/include/asm-ppc64/smp.h 2005-04-05 16:47:53.877215136 +0800
2577 extern void smp_send_debugger_break(int cpu);
2579 extern void smp_message_recv(int, struct pt_regs *);
2581 +extern void dump_send_ipi(int (*dump_ipi_callback)(struct pt_regs *));
2583 #define smp_processor_id() (get_paca()->paca_index)
2584 #define hard_smp_processor_id() (get_paca()->hw_cpu_id)
2585 Index: linux-2.6.10/include/asm-cris/kerntypes.h
2586 ===================================================================
2587 --- linux-2.6.10.orig/include/asm-cris/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
2588 +++ linux-2.6.10/include/asm-cris/kerntypes.h 2005-04-05 16:47:53.874215592 +0800
2591 + * asm-cris/kerntypes.h
2593 + * Arch-dependent header file that includes headers for all arch-specific
2594 + * types of interest.
2595 + * The kernel type information is used by the lcrash utility when
2596 + * analyzing system crash dumps or the live system. Using the type
2597 + * information for the running system, rather than kernel header files,
2598 + * makes for a more flexible and robust analysis tool.
2600 + * This source code is released under the GNU GPL.
2603 +/* CRIS-specific header files */
2604 +#ifndef _CRIS_KERNTYPES_H
2605 +#define _CRIS_KERNTYPES_H
2607 +/* Use the default */
2608 +#include <asm-generic/kerntypes.h>
2610 +#endif /* _CRIS_KERNTYPES_H */
2611 Index: linux-2.6.10/include/asm-m68knommu/kerntypes.h
2612 ===================================================================
2613 --- linux-2.6.10.orig/include/asm-m68knommu/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
2614 +++ linux-2.6.10/include/asm-m68knommu/kerntypes.h 2005-04-05 16:47:53.870216200 +0800
2617 + * asm-m68knommu/kerntypes.h
2619 + * Arch-dependent header file that includes headers for all arch-specific
2620 + * types of interest.
2621 + * The kernel type information is used by the lcrash utility when
2622 + * analyzing system crash dumps or the live system. Using the type
2623 + * information for the running system, rather than kernel header files,
2624 + * makes for a more flexible and robust analysis tool.
2626 + * This source code is released under the GNU GPL.
2629 +/* m68k/no-MMU-specific header files */
2630 +#ifndef _M68KNOMMU_KERNTYPES_H
2631 +#define _M68KNOMMU_KERNTYPES_H
2633 +/* Use the default */
2634 +#include <asm-generic/kerntypes.h>
2636 +#endif /* _M68KNOMMU_KERNTYPES_H */
2637 Index: linux-2.6.10/include/asm-v850/kerntypes.h
2638 ===================================================================
2639 --- linux-2.6.10.orig/include/asm-v850/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
2640 +++ linux-2.6.10/include/asm-v850/kerntypes.h 2005-04-05 16:47:53.888213464 +0800
2643 + * asm-v850/kerntypes.h
2645 + * Arch-dependent header file that includes headers for all arch-specific
2646 + * types of interest.
2647 + * The kernel type information is used by the lcrash utility when
2648 + * analyzing system crash dumps or the live system. Using the type
2649 + * information for the running system, rather than kernel header files,
2650 + * makes for a more flexible and robust analysis tool.
2652 + * This source code is released under the GNU GPL.
2655 +/* V850-specific header files */
2656 +#ifndef _V850_KERNTYPES_H
2657 +#define _V850_KERNTYPES_H
2659 +/* Use the default */
2660 +#include <asm-generic/kerntypes.h>
2662 +#endif /* _V850_KERNTYPES_H */
2663 Index: linux-2.6.10/include/asm-x86_64/dump.h
2664 ===================================================================
2665 --- linux-2.6.10.orig/include/asm-x86_64/dump.h 2005-04-05 19:01:49.158500672 +0800
2666 +++ linux-2.6.10/include/asm-x86_64/dump.h 2005-04-05 16:47:53.868216504 +0800
2669 + * Kernel header file for Linux crash dumps.
2671 + * Created by: Matt Robinson (yakker@sgi.com)
2673 + * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
2674 + * x86_64 lkcd port Sachin Sant ( sachinp@in.ibm.com)
2675 + * This code is released under version 2 of the GNU GPL.
2678 +/* This header file holds the architecture specific crash dump header */
2679 +#ifndef _ASM_DUMP_H
2680 +#define _ASM_DUMP_H
2682 +/* necessary header files */
2683 +#include <asm/ptrace.h> /* for pt_regs */
2684 +#include <linux/threads.h>
2687 +#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
2688 +#define DUMP_ASM_VERSION_NUMBER 0x2 /* version number */
2692 + * Structure: dump_header_asm_t
2693 + * Function: This is the header for architecture-specific stuff. It
2694 + * follows right after the dump header.
2696 +struct __dump_header_asm {
2698 + /* the dump magic number -- unique to verify dump is valid */
2699 + uint64_t dha_magic_number;
2701 + /* the version number of this dump */
2702 + uint32_t dha_version;
2704 + /* the size of this header (in case we can't read it) */
2705 + uint32_t dha_header_size;
2707 + /* the dump registers */
2708 + struct pt_regs dha_regs;
2710 + /* smp specific */
2711 + uint32_t dha_smp_num_cpus;
2712 + int dha_dumping_cpu;
2713 + struct pt_regs dha_smp_regs[NR_CPUS];
2714 + uint64_t dha_smp_current_task[NR_CPUS];
2715 + uint64_t dha_stack[NR_CPUS];
2716 + uint64_t dha_stack_ptr[NR_CPUS];
2717 +} __attribute__((packed));
2720 +static inline void get_current_regs(struct pt_regs *regs)
2723 + __asm__ __volatile__("movq %%r15,%0" : "=m"(regs->r15));
2724 + __asm__ __volatile__("movq %%r14,%0" : "=m"(regs->r14));
2725 + __asm__ __volatile__("movq %%r13,%0" : "=m"(regs->r13));
2726 + __asm__ __volatile__("movq %%r12,%0" : "=m"(regs->r12));
2727 + __asm__ __volatile__("movq %%r11,%0" : "=m"(regs->r11));
2728 + __asm__ __volatile__("movq %%r10,%0" : "=m"(regs->r10));
2729 + __asm__ __volatile__("movq %%r9,%0" : "=m"(regs->r9));
2730 + __asm__ __volatile__("movq %%r8,%0" : "=m"(regs->r8));
2731 + __asm__ __volatile__("movq %%rbx,%0" : "=m"(regs->rbx));
2732 + __asm__ __volatile__("movq %%rcx,%0" : "=m"(regs->rcx));
2733 + __asm__ __volatile__("movq %%rdx,%0" : "=m"(regs->rdx));
2734 + __asm__ __volatile__("movq %%rsi,%0" : "=m"(regs->rsi));
2735 + __asm__ __volatile__("movq %%rdi,%0" : "=m"(regs->rdi));
2736 + __asm__ __volatile__("movq %%rbp,%0" : "=m"(regs->rbp));
2737 + __asm__ __volatile__("movq %%rax,%0" : "=m"(regs->rax));
2738 + __asm__ __volatile__("movq %%rsp,%0" : "=m"(regs->rsp));
2739 + __asm__ __volatile__("movl %%ss, %0" :"=r"(seg));
2740 + regs->ss = (unsigned long)seg;
2741 + __asm__ __volatile__("movl %%cs, %0" :"=r"(seg));
2742 + regs->cs = (unsigned long)seg;
2743 + __asm__ __volatile__("pushfq; popq %0" :"=m"(regs->eflags));
2744 + regs->rip = (unsigned long)current_text_addr();
2748 +extern volatile int dump_in_progress;
2749 +extern struct __dump_header_asm dump_header_asm;
2754 +extern void dump_send_ipi(void);
2756 +#define dump_send_ipi() do { } while(0)
2758 +#endif /* __KERNEL__ */
2760 +#endif /* _ASM_DUMP_H */
2761 Index: linux-2.6.10/include/asm-x86_64/kerntypes.h
2762 ===================================================================
2763 --- linux-2.6.10.orig/include/asm-x86_64/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
2764 +++ linux-2.6.10/include/asm-x86_64/kerntypes.h 2005-04-05 16:47:53.869216352 +0800
2767 + * asm-x86_64/kerntypes.h
2769 + * Arch-dependent header file that includes headers for all arch-specific
2770 + * types of interest.
2771 + * The kernel type information is used by the lcrash utility when
2772 + * analyzing system crash dumps or the live system. Using the type
2773 + * information for the running system, rather than kernel header files,
2774 + * makes for a more flexible and robust analysis tool.
2776 + * This source code is released under the GNU GPL.
2779 +/* x86_64-specific header files */
2780 +#ifndef _X86_64_KERNTYPES_H
2781 +#define _X86_64_KERNTYPES_H
2783 +/* Use the default */
2784 +#include <asm-generic/kerntypes.h>
2786 +#endif /* _X86_64_KERNTYPES_H */
2787 Index: linux-2.6.10/include/asm-x86_64/hw_irq.h
2788 ===================================================================
2789 --- linux-2.6.10.orig/include/asm-x86_64/hw_irq.h 2004-12-25 05:35:39.000000000 +0800
2790 +++ linux-2.6.10/include/asm-x86_64/hw_irq.h 2005-04-05 16:47:53.869216352 +0800
2793 #define IA32_SYSCALL_VECTOR 0x80
2797 * Vectors 0x20-0x2f are used for ISA interrupts.
2800 #define TASK_MIGRATION_VECTOR 0xfb
2801 #define CALL_FUNCTION_VECTOR 0xfa
2802 #define KDB_VECTOR 0xf9
2803 +#define DUMP_VECTOR 0xf8
2805 #define THERMAL_APIC_VECTOR 0xf0
2807 Index: linux-2.6.10/include/asm-x86_64/kmap_types.h
2808 ===================================================================
2809 --- linux-2.6.10.orig/include/asm-x86_64/kmap_types.h 2004-12-25 05:35:23.000000000 +0800
2810 +++ linux-2.6.10/include/asm-x86_64/kmap_types.h 2005-04-05 16:47:53.868216504 +0800
2821 Index: linux-2.6.10/include/asm-x86_64/smp.h
2822 ===================================================================
2823 --- linux-2.6.10.orig/include/asm-x86_64/smp.h 2004-12-25 05:33:48.000000000 +0800
2824 +++ linux-2.6.10/include/asm-x86_64/smp.h 2005-04-05 16:47:53.867216656 +0800
2826 extern int pic_mode;
2827 extern int smp_num_siblings;
2828 extern void smp_flush_tlb(void);
2829 +extern void dump_send_ipi(void);
2830 extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
2831 extern void smp_send_reschedule(int cpu);
2832 extern void smp_invalidate_rcv(void); /* Process an NMI */
2833 Index: linux-2.6.10/include/asm-s390/dump.h
2834 ===================================================================
2835 --- linux-2.6.10.orig/include/asm-s390/dump.h 2005-04-05 19:01:49.158500672 +0800
2836 +++ linux-2.6.10/include/asm-s390/dump.h 2005-04-05 16:47:53.865216960 +0800
2839 + * Kernel header file for Linux crash dumps.
2842 +/* Nothing to be done here, we have proper hardware support */
2843 +#ifndef _ASM_DUMP_H
2844 +#define _ASM_DUMP_H
2848 Index: linux-2.6.10/include/asm-s390/kerntypes.h
2849 ===================================================================
2850 --- linux-2.6.10.orig/include/asm-s390/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
2851 +++ linux-2.6.10/include/asm-s390/kerntypes.h 2005-04-05 16:47:53.866216808 +0800
2854 + * asm-s390/kerntypes.h
2856 + * Arch-dependent header file that includes headers for all arch-specific
2857 + * types of interest.
2858 + * The kernel type information is used by the lcrash utility when
2859 + * analyzing system crash dumps or the live system. Using the type
2860 + * information for the running system, rather than kernel header files,
2861 + * makes for a more flexible and robust analysis tool.
2863 + * This source code is released under the GNU GPL.
2866 +/* S/390 specific header files */
2867 +#ifndef _S390_KERNTYPES_H
2868 +#define _S390_KERNTYPES_H
2870 +#include <asm/lowcore.h>
2871 +#include <asm/debug.h>
2872 +#include <asm/ccwdev.h>
2873 +#include <asm/ccwgroup.h>
2874 +#include <asm/qdio.h>
2876 +/* channel subsystem driver */
2877 +#include "../../drivers/s390/cio/cio.h"
2878 +#include "../../drivers/s390/cio/chsc.h"
2879 +#include "../../drivers/s390/cio/css.h"
2880 +#include "../../drivers/s390/cio/device.h"
2881 +#include "../../drivers/s390/cio/qdio.h"
2883 +/* dasd device driver */
2884 +#include "../../drivers/s390/block/dasd_int.h"
2885 +#include "../../drivers/s390/block/dasd_diag.h"
2886 +#include "../../drivers/s390/block/dasd_eckd.h"
2887 +#include "../../drivers/s390/block/dasd_fba.h"
2889 +/* networking drivers */
2890 +#include "../../drivers/s390/net/fsm.h"
2891 +#include "../../drivers/s390/net/iucv.h"
2892 +#include "../../drivers/s390/net/lcs.h"
2894 +/* zfcp device driver */
2895 +#include "../../drivers/s390/scsi/zfcp_def.h"
2896 +#include "../../drivers/s390/scsi/zfcp_fsf.h"
2898 +#endif /* _S390_KERNTYPES_H */
2899 Index: linux-2.6.10/include/asm-sparc64/kerntypes.h
2900 ===================================================================
2901 --- linux-2.6.10.orig/include/asm-sparc64/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
2902 +++ linux-2.6.10/include/asm-sparc64/kerntypes.h 2005-04-05 16:47:53.872215896 +0800
2905 + * asm-sparc64/kerntypes.h
2907 + * Arch-dependent header file that includes headers for all arch-specific
2908 + * types of interest.
2909 + * The kernel type information is used by the lcrash utility when
2910 + * analyzing system crash dumps or the live system. Using the type
2911 + * information for the running system, rather than kernel header files,
2912 + * makes for a more flexible and robust analysis tool.
2914 + * This source code is released under the GNU GPL.
2917 +/* SPARC64-specific header files */
2918 +#ifndef _SPARC64_KERNTYPES_H
2919 +#define _SPARC64_KERNTYPES_H
2921 +/* Use the default */
2922 +#include <asm-generic/kerntypes.h>
2924 +#endif /* _SPARC64_KERNTYPES_H */
2925 Index: linux-2.6.10/include/asm-mips/kerntypes.h
2926 ===================================================================
2927 --- linux-2.6.10.orig/include/asm-mips/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
2928 +++ linux-2.6.10/include/asm-mips/kerntypes.h 2005-04-05 16:47:53.881214528 +0800
2931 + * asm-mips/kerntypes.h
2933 + * Arch-dependent header file that includes headers for all arch-specific
2934 + * types of interest.
2935 + * The kernel type information is used by the lcrash utility when
2936 + * analyzing system crash dumps or the live system. Using the type
2937 + * information for the running system, rather than kernel header files,
2938 + * makes for a more flexible and robust analysis tool.
2940 + * This source code is released under the GNU GPL.
2943 +/* MIPS-specific header files */
2944 +#ifndef _MIPS_KERNTYPES_H
2945 +#define _MIPS_KERNTYPES_H
2947 +/* Use the default */
2948 +#include <asm-generic/kerntypes.h>
2950 +#endif /* _MIPS_KERNTYPES_H */
2951 Index: linux-2.6.10/include/asm-m68k/kerntypes.h
2952 ===================================================================
2953 --- linux-2.6.10.orig/include/asm-m68k/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
2954 +++ linux-2.6.10/include/asm-m68k/kerntypes.h 2005-04-05 16:47:53.875215440 +0800
2957 + * asm-m68k/kerntypes.h
2959 + * Arch-dependent header file that includes headers for all arch-specific
2960 + * types of interest.
2961 + * The kernel type information is used by the lcrash utility when
2962 + * analyzing system crash dumps or the live system. Using the type
2963 + * information for the running system, rather than kernel header files,
2964 + * makes for a more flexible and robust analysis tool.
2966 + * This source code is released under the GNU GPL.
2969 +/* m68k-specific header files */
2970 +#ifndef _M68K_KERNTYPES_H
2971 +#define _M68K_KERNTYPES_H
2973 +/* Use the default */
2974 +#include <asm-generic/kerntypes.h>
2976 +#endif /* _M68K_KERNTYPES_H */
2977 Index: linux-2.6.10/include/asm-generic/kerntypes.h
2978 ===================================================================
2979 --- linux-2.6.10.orig/include/asm-generic/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
2980 +++ linux-2.6.10/include/asm-generic/kerntypes.h 2005-04-05 16:47:53.871216048 +0800
2983 + * asm-generic/kerntypes.h
2985 + * Arch-dependent header file that includes headers for all arch-specific
2986 + * types of interest.
2987 + * The kernel type information is used by the lcrash utility when
2988 + * analyzing system crash dumps or the live system. Using the type
2989 + * information for the running system, rather than kernel header files,
2990 + * makes for a more flexible and robust analysis tool.
2992 + * This source code is released under the GNU GPL.
2995 +/* Arch-independent header files */
2996 +#ifndef _GENERIC_KERNTYPES_H
2997 +#define _GENERIC_KERNTYPES_H
2999 +#include <linux/pci.h>
3001 +#endif /* _GENERIC_KERNTYPES_H */
3002 Index: linux-2.6.10/include/asm-i386/dump.h
3003 ===================================================================
3004 --- linux-2.6.10.orig/include/asm-i386/dump.h 2005-04-05 19:01:49.158500672 +0800
3005 +++ linux-2.6.10/include/asm-i386/dump.h 2005-04-05 16:47:53.886213768 +0800
3008 + * Kernel header file for Linux crash dumps.
3010 + * Created by: Matt Robinson (yakker@sgi.com)
3012 + * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
3014 + * This code is released under version 2 of the GNU GPL.
3017 +/* This header file holds the architecture specific crash dump header */
3018 +#ifndef _ASM_DUMP_H
3019 +#define _ASM_DUMP_H
3021 +/* necessary header files */
3022 +#include <asm/ptrace.h>
3023 +#include <asm/page.h>
3024 +#include <linux/threads.h>
3025 +#include <linux/mm.h>
3028 +#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
3029 +#define DUMP_ASM_VERSION_NUMBER 0x3 /* version number */
3032 + * Structure: __dump_header_asm
3033 + * Function: This is the header for architecture-specific stuff. It
3034 + * follows right after the dump header.
3036 +struct __dump_header_asm {
3037 + /* the dump magic number -- unique to verify dump is valid */
3038 + u64 dha_magic_number;
3040 + /* the version number of this dump */
3043 + /* the size of this header (in case we can't read it) */
3044 + u32 dha_header_size;
3046 + /* the esp for i386 systems */
3049 + /* the eip for i386 systems */
3052 + /* the dump registers */
3053 + struct pt_regs dha_regs;
3055 + /* smp specific */
3056 + u32 dha_smp_num_cpus;
3057 + u32 dha_dumping_cpu;
3058 + struct pt_regs dha_smp_regs[NR_CPUS];
3059 + u32 dha_smp_current_task[NR_CPUS];
3060 + u32 dha_stack[NR_CPUS];
3061 + u32 dha_stack_ptr[NR_CPUS];
3062 +} __attribute__((packed));
3066 +extern struct __dump_header_asm dump_header_asm;
3069 +extern cpumask_t irq_affinity[];
3070 +extern int (*dump_ipi_function_ptr)(struct pt_regs *);
3071 +extern void dump_send_ipi(void);
3073 +#define dump_send_ipi() do { } while(0)
3076 +static inline void get_current_regs(struct pt_regs *regs)
3078 + __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
3079 + __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
3080 + __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
3081 + __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
3082 + __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
3083 + __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
3084 + __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
3085 + __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
3086 + __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
3087 + __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
3088 + __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
3089 + __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
3090 + __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
3091 + regs->eip = (unsigned long)current_text_addr();
3094 +#endif /* __KERNEL__ */
3096 +#endif /* _ASM_DUMP_H */
3097 Index: linux-2.6.10/include/asm-i386/kerntypes.h
3098 ===================================================================
3099 --- linux-2.6.10.orig/include/asm-i386/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
3100 +++ linux-2.6.10/include/asm-i386/kerntypes.h 2005-04-05 16:47:53.887213616 +0800
3103 + * asm-i386/kerntypes.h
3105 + * Arch-dependent header file that includes headers for all arch-specific
3106 + * types of interest.
3107 + * The kernel type information is used by the lcrash utility when
3108 + * analyzing system crash dumps or the live system. Using the type
3109 + * information for the running system, rather than kernel header files,
3110 + * makes for a more flexible and robust analysis tool.
3112 + * This source code is released under the GNU GPL.
3115 +/* ix86-specific header files */
3116 +#ifndef _I386_KERNTYPES_H
3117 +#define _I386_KERNTYPES_H
3119 +/* Use the default */
3120 +#include <asm-generic/kerntypes.h>
3122 +#endif /* _I386_KERNTYPES_H */
3123 Index: linux-2.6.10/include/asm-i386/kmap_types.h
3124 ===================================================================
3125 --- linux-2.6.10.orig/include/asm-i386/kmap_types.h 2004-12-25 05:35:23.000000000 +0800
3126 +++ linux-2.6.10/include/asm-i386/kmap_types.h 2005-04-05 16:47:53.886213768 +0800
3137 Index: linux-2.6.10/include/asm-i386/smp.h
3138 ===================================================================
3139 --- linux-2.6.10.orig/include/asm-i386/smp.h 2004-12-25 05:35:50.000000000 +0800
3140 +++ linux-2.6.10/include/asm-i386/smp.h 2005-04-05 16:47:53.885213920 +0800
3142 extern cpumask_t cpu_sibling_map[];
3144 extern void smp_flush_tlb(void);
3145 +extern void dump_send_ipi(void);
3146 extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
3147 extern void smp_invalidate_rcv(void); /* Process an NMI */
3148 extern void (*mtrr_hook) (void);
3149 Index: linux-2.6.10/include/asm-i386/mach-default/irq_vectors.h
3150 ===================================================================
3151 --- linux-2.6.10.orig/include/asm-i386/mach-default/irq_vectors.h 2004-12-25 05:34:26.000000000 +0800
3152 +++ linux-2.6.10/include/asm-i386/mach-default/irq_vectors.h 2005-04-05 16:47:53.887213616 +0800
3154 #define INVALIDATE_TLB_VECTOR 0xfd
3155 #define RESCHEDULE_VECTOR 0xfc
3156 #define CALL_FUNCTION_VECTOR 0xfb
3157 +#define DUMP_VECTOR 0xfa
3159 #define THERMAL_APIC_VECTOR 0xf0
3161 Index: linux-2.6.10/include/asm-arm/kerntypes.h
3162 ===================================================================
3163 --- linux-2.6.10.orig/include/asm-arm/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
3164 +++ linux-2.6.10/include/asm-arm/kerntypes.h 2005-04-05 16:47:53.873215744 +0800
3167 + * asm-arm/kerntypes.h
3169 + * Arch-dependent header file that includes headers for all arch-specific
3170 + * types of interest.
3171 + * The kernel type information is used by the lcrash utility when
3172 + * analyzing system crash dumps or the live system. Using the type
3173 + * information for the running system, rather than kernel header files,
3174 + * makes for a more flexible and robust analysis tool.
3176 + * This source code is released under the GNU GPL.
3179 +/* ARM-specific header files */
3180 +#ifndef _ARM_KERNTYPES_H
3181 +#define _ARM_KERNTYPES_H
3183 +/* Use the default */
3184 +#include <asm-generic/kerntypes.h>
3186 +#endif /* _ARM_KERNTYPES_H */
3187 Index: linux-2.6.10/include/asm-sparc/kerntypes.h
3188 ===================================================================
3189 --- linux-2.6.10.orig/include/asm-sparc/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
3190 +++ linux-2.6.10/include/asm-sparc/kerntypes.h 2005-04-05 16:47:53.874215592 +0800
3193 + * asm-sparc/kerntypes.h
3195 + * Arch-dependent header file that includes headers for all arch-specific
3196 + * types of interest.
3197 + * The kernel type information is used by the lcrash utility when
3198 + * analyzing system crash dumps or the live system. Using the type
3199 + * information for the running system, rather than kernel header files,
3200 + * makes for a more flexible and robust analysis tool.
3202 + * This source code is released under the GNU GPL.
3205 +/* SPARC-specific header files */
3206 +#ifndef _SPARC_KERNTYPES_H
3207 +#define _SPARC_KERNTYPES_H
3209 +/* Use the default */
3210 +#include <asm-generic/kerntypes.h>
3212 +#endif /* _SPARC_KERNTYPES_H */
3213 Index: linux-2.6.10/include/asm-mips64/kerntypes.h
3214 ===================================================================
3215 --- linux-2.6.10.orig/include/asm-mips64/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
3216 +++ linux-2.6.10/include/asm-mips64/kerntypes.h 2005-04-05 16:47:53.881214528 +0800
3219 + * asm-mips64/kerntypes.h
3221 + * Arch-dependent header file that includes headers for all arch-specific
3222 + * types of interest.
3223 + * The kernel type information is used by the lcrash utility when
3224 + * analyzing system crash dumps or the live system. Using the type
3225 + * information for the running system, rather than kernel header files,
3226 + * makes for a more flexible and robust analysis tool.
3228 + * This source code is released under the GNU GPL.
3231 +/* MIPS64-specific header files */
3232 +#ifndef _MIPS64_KERNTYPES_H
3233 +#define _MIPS64_KERNTYPES_H
3235 +/* Use the default */
3236 +#include <asm-generic/kerntypes.h>
3238 +#endif /* _MIPS64_KERNTYPES_H */
3239 Index: linux-2.6.10/net/Kconfig
3240 ===================================================================
3241 --- linux-2.6.10.orig/net/Kconfig 2005-04-05 16:29:27.896349784 +0800
3242 +++ linux-2.6.10/net/Kconfig 2005-04-05 16:47:53.895212400 +0800
3247 - def_bool NETCONSOLE
3248 + def_bool NETCONSOLE || CRASH_DUMP_NETDEV
3251 bool "Netpoll support for trapping incoming packets"
3252 Index: linux-2.6.10/scripts/mkcompile_h
3253 ===================================================================
3254 --- linux-2.6.10.orig/scripts/mkcompile_h 2004-12-25 05:35:50.000000000 +0800
3255 +++ linux-2.6.10/scripts/mkcompile_h 2005-04-05 16:47:53.950204040 +0800
3259 UTS_TRUNCATE="sed -e s/\(.\{1,$UTS_LEN\}\).*/\1/"
3261 +LINUX_COMPILE_VERSION_ID="__linux_compile_version_id__`hostname | tr -c '[0-9A-Za-z\n]' '__'`_`LANG=C date | tr -c '[0-9A-Za-z\n]' '_'`"
3262 # Generate a temporary compile.h
3264 ( echo /\* This file is auto generated, version $VERSION \*/
3268 echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | tail -n 1`\"
3269 + echo \#define LINUX_COMPILE_VERSION_ID $LINUX_COMPILE_VERSION_ID
3270 + echo \#define LINUX_COMPILE_VERSION_ID_TYPE typedef char* "$LINUX_COMPILE_VERSION_ID""_t"
3273 # Only replace the real compile.h if the new one is different,
3274 Index: linux-2.6.10/mm/bootmem.c
3275 ===================================================================
3276 --- linux-2.6.10.orig/mm/bootmem.c 2004-12-25 05:34:30.000000000 +0800
3277 +++ linux-2.6.10/mm/bootmem.c 2005-04-05 16:47:53.903211184 +0800
3280 unsigned long max_low_pfn;
3281 unsigned long min_low_pfn;
3282 +EXPORT_SYMBOL(min_low_pfn);
3283 unsigned long max_pfn;
3285 EXPORT_SYMBOL(max_pfn); /* This is exported so
3287 if (j + 16 < BITS_PER_LONG)
3288 prefetchw(page + j + 16);
3289 __ClearPageReserved(page + j);
3290 + set_page_count(page + j, 1);
3292 __free_pages(page, ffs(BITS_PER_LONG)-1);
3294 Index: linux-2.6.10/mm/page_alloc.c
3295 ===================================================================
3296 --- linux-2.6.10.orig/mm/page_alloc.c 2005-04-05 16:29:28.218300840 +0800
3297 +++ linux-2.6.10/mm/page_alloc.c 2005-04-05 16:47:53.902211336 +0800
3299 EXPORT_SYMBOL(totalram_pages);
3300 EXPORT_SYMBOL(nr_swap_pages);
3302 +#ifdef CONFIG_CRASH_DUMP_MODULE
3303 +/* This symbol has to be exported to use 'for_each_pgdat' macro by modules. */
3304 +EXPORT_SYMBOL(pgdat_list);
3308 * Used by page_zone() to look up the address of the struct zone whose
3309 * id is encoded in the upper bits of page->flags
3310 @@ -281,8 +286,11 @@
3311 arch_free_page(page, order);
3313 mod_page_state(pgfree, 1 << order);
3314 - for (i = 0 ; i < (1 << order) ; ++i)
3315 + for (i = 0 ; i < (1 << order) ; ++i){
3317 + __put_page(page + i);
3318 free_pages_check(__FUNCTION__, page + i);
3320 list_add(&page->lru, &list);
3321 kernel_map_pages(page, 1<<order, 0);
3322 free_pages_bulk(page_zone(page), 1, &list, order);
3323 @@ -322,44 +330,34 @@
3327 -static inline void set_page_refs(struct page *page, int order)
3330 - set_page_count(page, 1);
3335 - * We need to reference all the pages for this order, otherwise if
3336 - * anyone accesses one of the pages with (get/put) it will be freed.
3338 - for (i = 0; i < (1 << order); i++)
3339 - set_page_count(page+i, 1);
3340 -#endif /* CONFIG_MMU */
3344 * This page is about to be returned from the page allocator
3346 -static void prep_new_page(struct page *page, int order)
3347 +static void prep_new_page(struct page *_page, int order)
3349 - if (page->mapping || page_mapped(page) ||
3357 - 1 << PG_swapcache |
3358 - 1 << PG_writeback )))
3361 + for(i = 0; i < (1 << order); i++){
3362 + struct page *page = _page + i;
3364 + if (page->mapping || page_mapped(page) ||
3372 + 1 << PG_swapcache |
3373 + 1 << PG_writeback )))
3374 bad_page(__FUNCTION__, page);
3376 - page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
3377 - 1 << PG_referenced | 1 << PG_arch_1 |
3378 - 1 << PG_checked | 1 << PG_mappedtodisk);
3379 - page->private = 0;
3380 - set_page_refs(page, order);
3381 + page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
3382 + 1 << PG_referenced | 1 << PG_arch_1 |
3383 + 1 << PG_checked | 1 << PG_mappedtodisk);
3384 + page->private = 0;
3385 + set_page_count(page, 1);
3390 Index: linux-2.6.10/kernel/sched.c
3391 ===================================================================
3392 --- linux-2.6.10.orig/kernel/sched.c 2005-04-05 16:29:30.335978904 +0800
3393 +++ linux-2.6.10/kernel/sched.c 2005-04-05 16:47:53.901211488 +0800
3395 #define cpu_to_node_mask(cpu) (cpu_online_map)
3398 +/* used to soft spin in sched while dump is in progress */
3399 +unsigned long dump_oncpu;
3400 +EXPORT_SYMBOL(dump_oncpu);
3403 * Convert user-nice values [ -20 ... 0 ... 19 ]
3404 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
3405 @@ -184,109 +188,6 @@
3406 #define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \
3407 < (long long) (sd)->cache_hot_time)
3410 - * These are the runqueue data structures:
3413 -#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
3415 -typedef struct runqueue runqueue_t;
3417 -struct prio_array {
3418 - unsigned int nr_active;
3419 - unsigned long bitmap[BITMAP_SIZE];
3420 - struct list_head queue[MAX_PRIO];
3424 - * This is the main, per-CPU runqueue data structure.
3426 - * Locking rule: those places that want to lock multiple runqueues
3427 - * (such as the load balancing or the thread migration code), lock
3428 - * acquire operations must be ordered by ascending &runqueue.
3434 - * nr_running and cpu_load should be in the same cacheline because
3435 - * remote CPUs use both these fields when doing load calculation.
3437 - unsigned long nr_running;
3439 - unsigned long cpu_load;
3441 - unsigned long long nr_switches;
3444 - * This is part of a global counter where only the total sum
3445 - * over all CPUs matters. A task can increase this counter on
3446 - * one CPU and if it got migrated afterwards it may decrease
3447 - * it on another CPU. Always updated under the runqueue lock:
3449 - unsigned long nr_uninterruptible;
3451 - unsigned long expired_timestamp;
3452 - unsigned long long timestamp_last_tick;
3453 - task_t *curr, *idle;
3454 - struct mm_struct *prev_mm;
3455 - prio_array_t *active, *expired, arrays[2];
3456 - int best_expired_prio;
3457 - atomic_t nr_iowait;
3460 - struct sched_domain *sd;
3462 - /* For active balancing */
3463 - int active_balance;
3466 - task_t *migration_thread;
3467 - struct list_head migration_queue;
3470 -#ifdef CONFIG_SCHEDSTATS
3471 - /* latency stats */
3472 - struct sched_info rq_sched_info;
3474 - /* sys_sched_yield() stats */
3475 - unsigned long yld_exp_empty;
3476 - unsigned long yld_act_empty;
3477 - unsigned long yld_both_empty;
3478 - unsigned long yld_cnt;
3480 - /* schedule() stats */
3481 - unsigned long sched_noswitch;
3482 - unsigned long sched_switch;
3483 - unsigned long sched_cnt;
3484 - unsigned long sched_goidle;
3486 - /* pull_task() stats */
3487 - unsigned long pt_gained[MAX_IDLE_TYPES];
3488 - unsigned long pt_lost[MAX_IDLE_TYPES];
3490 - /* active_load_balance() stats */
3491 - unsigned long alb_cnt;
3492 - unsigned long alb_lost;
3493 - unsigned long alb_gained;
3494 - unsigned long alb_failed;
3496 - /* try_to_wake_up() stats */
3497 - unsigned long ttwu_cnt;
3498 - unsigned long ttwu_attempts;
3499 - unsigned long ttwu_moved;
3501 - /* wake_up_new_task() stats */
3502 - unsigned long wunt_cnt;
3503 - unsigned long wunt_moved;
3505 - /* sched_migrate_task() stats */
3506 - unsigned long smt_cnt;
3508 - /* sched_balance_exec() stats */
3509 - unsigned long sbe_cnt;
3513 static DEFINE_PER_CPU(struct runqueue, runqueues);
3515 @@ -2535,6 +2436,15 @@
3516 unsigned long run_time;
3520 + * If crash dump is in progress, this other cpu's
3521 + * need to wait until it completes.
3522 + * NB: this code is optimized away for kernels without
3523 + * dumping enabled.
3525 + if (unlikely(dump_oncpu))
3526 + goto dump_scheduling_disabled;
3529 * Test if we are atomic. Since do_exit() needs to call into
3530 * schedule() atomically, we ignore that path for now.
3531 @@ -2698,6 +2608,16 @@
3532 preempt_enable_no_resched();
3533 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
3538 + dump_scheduling_disabled:
3539 + /* allow scheduling only if this is the dumping cpu */
3540 + if (dump_oncpu != smp_processor_id()+1) {
3541 + while (dump_oncpu)
3547 EXPORT_SYMBOL(schedule);
3548 Index: linux-2.6.10/kernel/panic.c
3549 ===================================================================
3550 --- linux-2.6.10.orig/kernel/panic.c 2004-12-25 05:35:29.000000000 +0800
3551 +++ linux-2.6.10/kernel/panic.c 2005-04-05 16:47:53.898211944 +0800
3553 #include <linux/sysrq.h>
3554 #include <linux/interrupt.h>
3555 #include <linux/nmi.h>
3556 +#ifdef CONFIG_KEXEC
3557 +#include <linux/kexec.h>
3563 +void (*dump_function_ptr)(const char *, const struct pt_regs *) = 0;
3565 EXPORT_SYMBOL(panic_timeout);
3566 +EXPORT_SYMBOL(dump_function_ptr);
3568 struct notifier_block *panic_notifier_list;
3571 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
3574 + notifier_call_chain(&panic_notifier_list, 0, buf);
3580 - notifier_call_chain(&panic_notifier_list, 0, buf);
3583 panic_blink = no_blink;
3585 * We can't use the "normal" timers since we just panicked..
3587 printk(KERN_EMERG "Rebooting in %d seconds..",panic_timeout);
3588 +#ifdef CONFIG_KEXEC
3590 + struct kimage *image;
3591 + image = xchg(&kexec_image, 0);
3593 + printk(KERN_EMERG "by starting a new kernel ..\n");
3594 + mdelay(panic_timeout*1000);
3595 + machine_kexec(image);
3600 for (i = 0; i < panic_timeout*1000; ) {
3601 touch_nmi_watchdog();
3602 i += panic_blink(i);
3603 Index: linux-2.6.10/drivers/block/ll_rw_blk.c
3604 ===================================================================
3605 --- linux-2.6.10.orig/drivers/block/ll_rw_blk.c 2005-04-05 16:29:30.310982704 +0800
3606 +++ linux-2.6.10/drivers/block/ll_rw_blk.c 2005-04-05 16:47:53.949204192 +0800
3608 #include <linux/slab.h>
3609 #include <linux/swap.h>
3610 #include <linux/writeback.h>
3611 +#include <linux/dump.h>
3614 * for max sense size
3615 @@ -2628,7 +2629,8 @@
3617 int ret, nr_sectors = bio_sectors(bio);
3620 + if (likely(!dump_oncpu))
3622 /* Test device or partition size, when known. */
3623 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
3625 Index: linux-2.6.10/drivers/dump/dump_i386.c
3626 ===================================================================
3627 --- linux-2.6.10.orig/drivers/dump/dump_i386.c 2005-04-05 19:01:49.158500672 +0800
3628 +++ linux-2.6.10/drivers/dump/dump_i386.c 2005-04-05 16:47:53.940205560 +0800
3631 + * Architecture specific (i386) functions for Linux crash dumps.
3633 + * Created by: Matt Robinson (yakker@sgi.com)
3635 + * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
3637 + * 2.3 kernel modifications by: Matt D. Robinson (yakker@turbolinux.com)
3638 + * Copyright 2000 TurboLinux, Inc. All rights reserved.
3640 + * This code is released under version 2 of the GNU GPL.
3644 + * The hooks for dumping the kernel virtual memory to disk are in this
3645 + * file. Any time a modification is made to the virtual memory mechanism,
3646 + * these routines must be changed to use the new mechanisms.
3648 +#include <linux/init.h>
3649 +#include <linux/types.h>
3650 +#include <linux/kernel.h>
3651 +#include <linux/smp.h>
3652 +#include <linux/fs.h>
3653 +#include <linux/vmalloc.h>
3654 +#include <linux/mm.h>
3655 +#include <linux/dump.h>
3656 +#include "dump_methods.h"
3657 +#include <linux/irq.h>
3659 +#include <asm/processor.h>
3660 +#include <asm/e820.h>
3661 +#include <asm/hardirq.h>
3662 +#include <asm/nmi.h>
3664 +static __s32 saved_irq_count; /* saved preempt_count() flags */
3667 +alloc_dha_stack(void)
3672 + if (dump_header_asm.dha_stack[0])
3675 + ptr = vmalloc(THREAD_SIZE * num_online_cpus());
3677 + printk("vmalloc for dha_stacks failed\n");
3681 + for (i = 0; i < num_online_cpus(); i++) {
3682 + dump_header_asm.dha_stack[i] = (u32)((unsigned long)ptr +
3683 + (i * THREAD_SIZE));
3689 +free_dha_stack(void)
3691 + if (dump_header_asm.dha_stack[0]) {
3692 + vfree((void *)dump_header_asm.dha_stack[0]);
3693 + dump_header_asm.dha_stack[0] = 0;
3700 +__dump_save_regs(struct pt_regs *dest_regs, const struct pt_regs *regs)
3702 + *dest_regs = *regs;
3704 + /* In case of panic dumps, we collects regs on entry to panic.
3705 + * so, we shouldn't 'fix' ssesp here again. But it is hard to
3706 + * tell just looking at regs whether ssesp need fixing. We make
3707 + * this decision by looking at xss in regs. If we have better
3708 + * means to determine that ssesp are valid (by some flag which
3709 + * tells that we are here due to panic dump), then we can use
3710 + * that instead of this kludge.
3712 + if (!user_mode(regs)) {
3713 + if ((0xffff & regs->xss) == __KERNEL_DS)
3714 + /* already fixed up */
3716 + dest_regs->esp = (unsigned long)&(regs->esp);
3717 + __asm__ __volatile__ ("movw %%ss, %%ax;"
3718 + :"=a"(dest_regs->xss));
3723 +__dump_save_context(int cpu, const struct pt_regs *regs,
3724 + struct task_struct *tsk)
3726 + dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
3727 + __dump_save_regs(&dump_header_asm.dha_smp_regs[cpu], regs);
3729 + /* take a snapshot of the stack */
3730 + /* doing this enables us to tolerate slight drifts on this cpu */
3732 + if (dump_header_asm.dha_stack[cpu]) {
3733 + memcpy((void *)dump_header_asm.dha_stack[cpu],
3734 + STACK_START_POSITION(tsk),
3737 + dump_header_asm.dha_stack_ptr[cpu] = (unsigned long)(tsk->thread_info);
3741 +extern cpumask_t irq_affinity[];
3742 +extern irq_desc_t irq_desc[];
3743 +extern void dump_send_ipi(void);
3745 +static int dump_expect_ipi[NR_CPUS];
3746 +static atomic_t waiting_for_dump_ipi;
3747 +static cpumask_t saved_affinity[NR_IRQS];
3749 +extern void stop_this_cpu(void *); /* exported by i386 kernel */
3752 +dump_nmi_callback(struct pt_regs *regs, int cpu)
3754 + if (!dump_expect_ipi[cpu])
3757 + dump_expect_ipi[cpu] = 0;
3759 + dump_save_this_cpu(regs);
3760 + atomic_dec(&waiting_for_dump_ipi);
3763 + switch (dump_silence_level) {
3764 + case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
3765 + while (dump_oncpu) {
3766 + barrier(); /* paranoia */
3767 + if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
3768 + goto level_changed;
3770 + cpu_relax(); /* kill time nicely */
3774 + case DUMP_HALT_CPUS: /* Execute halt */
3775 + stop_this_cpu(NULL);
3778 + case DUMP_SOFT_SPIN_CPUS:
3779 + /* Mark the task so it spins in schedule */
3780 + set_tsk_thread_flag(current, TIF_NEED_RESCHED);
3787 +/* save registers on other processors */
3789 +__dump_save_other_cpus(void)
3791 + int i, cpu = smp_processor_id();
3792 + int other_cpus = num_online_cpus()-1;
3794 + if (other_cpus > 0) {
3795 + atomic_set(&waiting_for_dump_ipi, other_cpus);
3797 + for (i = 0; i < NR_CPUS; i++) {
3798 + dump_expect_ipi[i] = (i != cpu && cpu_online(i));
3801 + /* short circuit normal NMI handling temporarily */
3802 + set_nmi_callback(dump_nmi_callback);
3806 + /* may be we dont need to wait for NMI to be processed.
3807 + just write out the header at the end of dumping, if
3808 + this IPI is not processed until then, there probably
3809 + is a problem and we just fail to capture state of
3811 + while(atomic_read(&waiting_for_dump_ipi) > 0) {
3815 + unset_nmi_callback();
3820 + * Routine to save the old irq affinities and change affinities of all irqs to
3821 + * the dumping cpu.
3824 +set_irq_affinity(void)
3827 + cpumask_t cpu = CPU_MASK_NONE;
3829 + cpu_set(smp_processor_id(), cpu);
3830 + memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
3831 + for (i = 0; i < NR_IRQS; i++) {
3832 + if (irq_desc[i].handler == NULL)
3834 + irq_affinity[i] = cpu;
3835 + if (irq_desc[i].handler->set_affinity != NULL)
3836 + irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
3841 + * Restore old irq affinities.
3844 +reset_irq_affinity(void)
3848 + memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
3849 + for (i = 0; i < NR_IRQS; i++) {
3850 + if (irq_desc[i].handler == NULL)
3852 + if (irq_desc[i].handler->set_affinity != NULL)
3853 + irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
3857 +#else /* !CONFIG_SMP */
3858 +#define set_irq_affinity() do { } while (0)
3859 +#define reset_irq_affinity() do { } while (0)
3860 +#define save_other_cpu_states() do { } while (0)
3861 +#endif /* !CONFIG_SMP */
3864 + * Kludge - dump from interrupt context is unreliable (Fixme)
3866 + * We do this so that softirqs initiated for dump i/o
3867 + * get processed and we don't hang while waiting for i/o
3868 + * to complete or in any irq synchronization attempt.
3870 + * This is not quite legal of course, as it has the side
3871 + * effect of making all interrupts & softirqs triggered
3872 + * while dump is in progress complete before currently
3873 + * pending softirqs and the currently executing interrupt
3879 + saved_irq_count = irq_count();
3880 + preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
3884 +irq_bh_restore(void)
3886 + preempt_count() |= saved_irq_count;
3890 + * Name: __dump_irq_enable
3891 + * Func: Reset system so interrupts are enabled.
3892 + * This is used for dump methods that require interrupts
3893 + * Eventually, all methods will have interrupts disabled
3894 + * and this code can be removed.
3896 + * Change irq affinities
3897 + * Re-enable interrupts
3900 +__dump_irq_enable(void)
3902 + set_irq_affinity();
3904 + local_irq_enable();
3909 + * Name: __dump_irq_restore
3910 + * Func: Resume the system state in an architecture-specific way.
3914 +__dump_irq_restore(void)
3916 + local_irq_disable();
3917 + reset_irq_affinity();
3922 + * Name: __dump_configure_header()
3923 + * Func: Meant to fill in arch specific header fields except per-cpu state
3924 + * already captured via __dump_save_context for all CPUs.
3927 +__dump_configure_header(const struct pt_regs *regs)
3933 + * Name: __dump_init()
3934 + * Func: Initialize the dumping routine process.
3937 +__dump_init(uint64_t local_memory_start)
3943 + * Name: __dump_open()
3944 + * Func: Open the dump device (architecture specific).
3949 + alloc_dha_stack();
3953 + * Name: __dump_cleanup()
3954 + * Func: Free any architecture specific data structures. This is called
3955 + * when the dump module is being removed.
3958 +__dump_cleanup(void)
3963 +extern int pfn_is_ram(unsigned long);
3966 + * Name: __dump_page_valid()
3967 + * Func: Check if page is valid to dump.
3970 +__dump_page_valid(unsigned long index)
3972 + if (!pfn_valid(index))
3975 + return pfn_is_ram(index);
3979 + * Name: manual_handle_crashdump()
3980 + * Func: Interface for the lkcd dump command. Calls dump_execute()
3983 +manual_handle_crashdump(void) {
3985 + struct pt_regs regs;
3987 + get_current_regs(®s);
3988 + dump_execute("manual", ®s);
3993 + * Name: __dump_clean_irq_state()
3994 + * Func: Clean up from the previous IRQ handling state. Such as oops from
3995 + * interrupt handler or bottom half.
3998 +__dump_clean_irq_state(void)
4002 Index: linux-2.6.10/drivers/dump/dump_ia64.c
4003 ===================================================================
4004 --- linux-2.6.10.orig/drivers/dump/dump_ia64.c 2005-04-05 19:01:49.158500672 +0800
4005 +++ linux-2.6.10/drivers/dump/dump_ia64.c 2005-04-05 16:47:53.928207384 +0800
4008 + * Architecture specific (ia64) functions for Linux crash dumps.
4010 + * Created by: Matt Robinson (yakker@sgi.com)
4011 + * Contributions from SGI, IBM, and others.
4013 + * 2.4 kernel modifications by: Matt D. Robinson (yakker@alacritech.com)
4014 + * ia64 kernel modifications by: Piet Delaney (piet@www.piet.net)
4016 + * Copyright (C) 2001 - 2002 Matt D. Robinson (yakker@alacritech.com)
4017 + * Copyright (C) 2002 Silicon Graphics, Inc. All rights reserved.
4018 + * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
4020 + * This code is released under version 2 of the GNU GPL.
4024 + * The hooks for dumping the kernel virtual memory to disk are in this
4025 + * file. Any time a modification is made to the virtual memory mechanism,
4026 + * these routines must be changed to use the new mechanisms.
4028 +#include <linux/init.h>
4029 +#include <linux/types.h>
4030 +#include <linux/kernel.h>
4031 +#include <linux/smp.h>
4032 +#include <linux/fs.h>
4033 +#include <linux/vmalloc.h>
4034 +#include <linux/dump.h>
4035 +#include "dump_methods.h"
4036 +#include <linux/mm.h>
4037 +#include <asm/processor.h>
4038 +#include <asm-ia64/dump.h>
4039 +#include <asm/hardirq.h>
4040 +#include <linux/irq.h>
4041 +#include <linux/delay.h>
4043 +static __s32 saved_irq_count; /* saved preempt_count() flags */
4046 +static int alloc_dha_stack(void)
4051 + if (dump_header_asm.dha_stack[0])
4055 + ptr = vmalloc(THREAD_SIZE * num_online_cpus());
4057 + printk("vmalloc for dha_stacks failed\n");
4060 + bzero(ptr,THREAD_SIZE );
4062 + for (i = 0; i < num_online_cpus(); i++) {
4063 + dump_header_asm.dha_stack[i] = (uint64_t)((unsigned long)ptr + (i * THREAD_SIZE));
4068 +static int free_dha_stack(void)
4070 + if (dump_header_asm.dha_stack[0])
4072 + vfree((void*)dump_header_asm.dha_stack[0]);
4073 + dump_header_asm.dha_stack[0] = 0;
4078 +/* a structure to get arguments into the following callback routine */
4081 + struct task_struct *tsk;
4085 +do_save_sw(struct unw_frame_info *info, void *arg)
4087 + struct unw_args *uwargs = (struct unw_args *)arg;
4088 + int cpu = uwargs->cpu;
4089 + struct task_struct *tsk = uwargs->tsk;
4091 + dump_header_asm.dha_stack_ptr[cpu] = (uint64_t)info->sw;
4093 + if (tsk && dump_header_asm.dha_stack[cpu]) {
4094 + memcpy((void *)dump_header_asm.dha_stack[cpu],
4095 + STACK_START_POSITION(tsk),
4101 +__dump_save_context(int cpu, const struct pt_regs *regs,
4102 + struct task_struct *tsk)
4104 + struct unw_args uwargs;
4106 + dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
4109 + dump_header_asm.dha_smp_regs[cpu] = *regs;
4112 + /* save a snapshot of the stack in a nice state for unwinding */
4116 + unw_init_running(do_save_sw, (void *)&uwargs);
4121 +extern cpumask_t irq_affinity[];
4122 +#define irq_desc _irq_desc
4123 +extern irq_desc_t irq_desc[];
4124 +extern void dump_send_ipi(void);
4125 +static cpumask_t saved_affinity[NR_IRQS];
4128 + * Routine to save the old irq affinities and change affinities of all irqs to
4129 + * the dumping cpu.
4132 +set_irq_affinity(void)
4135 + cpumask_t cpu = CPU_MASK_NONE;
4137 + cpu_set(smp_processor_id(), cpu);
4138 + memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
4139 + for (i = 0; i < NR_IRQS; i++) {
4140 + if (irq_desc[i].handler == NULL)
4142 + irq_affinity[i] = cpu;
4143 + if (irq_desc[i].handler->set_affinity != NULL)
4144 + irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
4149 + * Restore old irq affinities.
4152 +reset_irq_affinity(void)
4156 + memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
4157 + for (i = 0; i < NR_IRQS; i++) {
4158 + if (irq_desc[i].handler == NULL)
4160 + if (irq_desc[i].handler->set_affinity != NULL)
4161 + irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
4165 +#else /* !CONFIG_SMP */
4166 +#define set_irq_affinity() do { } while (0)
4167 +#define reset_irq_affinity() do { } while (0)
4168 +#define save_other_cpu_states() do { } while (0)
4169 +#endif /* !CONFIG_SMP */
4172 +static int dump_expect_ipi[NR_CPUS];
4173 +static atomic_t waiting_for_dump_ipi;
4174 +static int wait_for_dump_ipi = 2000; /* wait 2000 ms for ipi to be handled */
4175 +extern void (*dump_trace_ptr)(struct pt_regs *);
4178 +extern void stop_this_cpu(void);
4181 +dump_nmi_callback(struct pt_regs *regs, int cpu)
4183 + if (!dump_expect_ipi[cpu])
4186 + dump_expect_ipi[cpu] = 0;
4188 + dump_save_this_cpu(regs);
4189 + atomic_dec(&waiting_for_dump_ipi);
4192 + switch (dump_silence_level) {
4193 + case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
4194 + while (dump_oncpu) {
4195 + barrier(); /* paranoia */
4196 + if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
4197 + goto level_changed;
4199 + cpu_relax(); /* kill time nicely */
4203 + case DUMP_HALT_CPUS: /* Execute halt */
4207 + case DUMP_SOFT_SPIN_CPUS:
4208 + /* Mark the task so it spins in schedule */
4209 + set_tsk_thread_flag(current, TIF_NEED_RESCHED);
4216 +int IPI_handler(struct pt_regs *regs)
4219 + cpu = task_cpu(current);
4220 + return(dump_nmi_callback(regs, cpu));
4223 +/* save registers on other processors */
4225 +__dump_save_other_cpus(void)
4227 + int i, cpu = smp_processor_id();
4228 + int other_cpus = num_online_cpus()-1;
4229 + int wait_time = wait_for_dump_ipi;
4231 + if (other_cpus > 0) {
4232 + atomic_set(&waiting_for_dump_ipi, other_cpus);
4234 + for (i = 0; i < NR_CPUS; i++) {
4235 + dump_expect_ipi[i] = (i != cpu && cpu_online(i));
4238 + dump_ipi_function_ptr = IPI_handler;
4243 + /* may be we dont need to wait for IPI to be processed.
4244 + * just write out the header at the end of dumping, if
4245 + * this IPI is not processed until then, there probably
4246 + * is a problem and we just fail to capture state of
4248 + while(wait_time-- && (atomic_read(&waiting_for_dump_ipi) > 0)) {
4252 + if (wait_time <= 0) {
4253 + printk("dump ipi timeout, proceeding...\n");
4259 + * Kludge - dump from interrupt context is unreliable (Fixme)
4261 + * We do this so that softirqs initiated for dump i/o
4262 + * get processed and we don't hang while waiting for i/o
4263 + * to complete or in any irq synchronization attempt.
4265 + * This is not quite legal of course, as it has the side
4266 + * effect of making all interrupts & softirqs triggered
4267 + * while dump is in progress complete before currently
4268 + * pending softirqs and the currently executing interrupt
4274 + saved_irq_count = irq_count();
4275 + preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
4279 +irq_bh_restore(void)
4281 + preempt_count() |= saved_irq_count;
4285 + * Name: __dump_configure_header()
4286 + * Func: Configure the dump header with all proper values.
4289 +__dump_configure_header(const struct pt_regs *regs)
4295 +#define dim(x) (sizeof(x)/sizeof(*(x)))
4298 + * Name: __dump_irq_enable
4299 + * Func: Reset system so interrupts are enabled.
4300 + * This is used for dump methods that require interrupts
4301 + * Eventually, all methods will have interrupts disabled
4302 + * and this code can be removed.
4304 + * Change irq affinities
4305 + * Re-enable interrupts
4308 +__dump_irq_enable(void)
4310 + set_irq_affinity();
4314 + * reduce the task priority level
4315 + * to get disk interrupts
4317 + ia64_setreg(_IA64_REG_CR_TPR, 0);
4319 + local_irq_enable();
4324 + * Name: __dump_irq_restore
4325 + * Func: Resume the system state in an architecture-specific way.
4329 +__dump_irq_restore(void)
4331 + local_irq_disable();
4332 + reset_irq_affinity();
4337 + * Name: __dump_page_valid()
4338 + * Func: Check if page is valid to dump.
4341 +__dump_page_valid(unsigned long index)
4343 + if (!pfn_valid(index))
4351 + * Name: __dump_init()
4352 + * Func: Initialize the dumping routine process. This is in case
4353 + * it's necessary in the future.
4356 +__dump_init(uint64_t local_memory_start)
4362 + * Name: __dump_open()
4363 + * Func: Open the dump device (architecture specific). This is in
4364 + * case it's necessary in the future.
4369 + alloc_dha_stack();
4375 + * Name: __dump_cleanup()
4376 + * Func: Free any architecture specific data structures. This is called
4377 + * when the dump module is being removed.
4380 +__dump_cleanup(void)
4389 +int __dump_memcpy_mc_expected = 0; /* Doesn't help yet */
4392 + * An ia64 version of memcpy() that trys to avoid machine checks.
4395 + * By itself __dump_memcpy_mc_expected() ins't providing any
4396 + * protection against Machine Checks. We are looking into the
4397 + * possability of adding code to the arch/ia64/kernel/mca.c fuction
4398 + * ia64_mca_ucmc_handler() to restore state so that a IA64_MCA_CORRECTED
4399 + * can be returned to the firmware. Curently it always returns
4400 + * IA64_MCA_COLD_BOOT and reboots the machine.
4403 +void * __dump_memcpy(void * dest, const void *src, size_t count)
4407 + if (__dump_memcpy_mc_expected) {
4408 + ia64_pal_mc_expected((u64) 1, 0);
4411 + vp = memcpy(dest, src, count);
4413 + if (__dump_memcpy_mc_expected) {
4414 + ia64_pal_mc_expected((u64) 0, 0);
4420 + * Name: manual_handle_crashdump()
4421 + * Func: Interface for the lkcd dump command. Calls dump_execute()
4424 +manual_handle_crashdump(void) {
4426 + struct pt_regs regs;
4428 + get_current_regs(®s);
4429 + dump_execute("manual", ®s);
4434 + * Name: __dump_clean_irq_state()
4435 + * Func: Clean up from the previous IRQ handling state. Such as oops from
4436 + * interrupt handler or bottom half.
4439 +__dump_clean_irq_state(void)
4441 + unsigned long saved_tpr;
4442 + unsigned long TPR_MASK = 0xFFFFFFFFFFFEFF0F;
4445 + /* Get the processors task priority register */
4446 + saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
4447 + /* clear the mmi and mic bit's of the TPR to unmask interrupts */
4448 + saved_tpr = saved_tpr & TPR_MASK;
4449 + ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
4452 + /* Tell the processor we're done with the interrupt
4453 + * that got us here.
4458 + /* local implementation of irq_exit(); */
4459 + preempt_count() -= IRQ_EXIT_OFFSET;
4460 + preempt_enable_no_resched();
4465 Index: linux-2.6.10/drivers/dump/dump_rle.c
4466 ===================================================================
4467 --- linux-2.6.10.orig/drivers/dump/dump_rle.c 2005-04-05 19:01:49.158500672 +0800
4468 +++ linux-2.6.10/drivers/dump/dump_rle.c 2005-04-05 16:47:53.935206320 +0800
4471 + * RLE Compression functions for kernel crash dumps.
4473 + * Created by: Matt Robinson (yakker@sourceforge.net)
4474 + * Copyright 2001 Matt D. Robinson. All rights reserved.
4476 + * This code is released under version 2 of the GNU GPL.
4480 +#include <linux/config.h>
4481 +#include <linux/module.h>
4482 +#include <linux/sched.h>
4483 +#include <linux/fs.h>
4484 +#include <linux/file.h>
4485 +#include <linux/init.h>
4486 +#include <linux/dump.h>
4489 + * Name: dump_compress_rle()
4490 + * Func: Compress a DUMP_PAGE_SIZE (hardware) page down to something more
4491 + * reasonable, if possible. This is the same routine we use in IRIX.
4494 +dump_compress_rle(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
4495 + unsigned long loc)
4497 + u16 ri, wi, count = 0;
4498 + u_char value = 0, cur_byte;
4501 + * If the block should happen to "compress" to larger than the
4502 + * buffer size, allocate a larger one and change cur_buf_size.
4507 + while (ri < oldsize) {
4509 + cur_byte = value = old[ri];
4512 + if (count == 255) {
4513 + if (wi + 3 > oldsize) {
4517 + new[wi++] = count;
4518 + new[wi++] = value;
4519 + value = cur_byte = old[ri];
4522 + if ((cur_byte = old[ri]) == value) {
4526 + if (wi + 3 > oldsize) {
4530 + new[wi++] = count;
4531 + new[wi++] = value;
4532 + } else if (count == 1) {
4534 + if (wi + 3 > oldsize) {
4541 + if (wi + 2 > oldsize) {
4544 + new[wi++] = value;
4545 + new[wi++] = value;
4547 + } else { /* count == 0 */
4549 + if (wi + 2 > oldsize) {
4552 + new[wi++] = value;
4553 + new[wi++] = value;
4555 + if (wi + 1 > oldsize) {
4558 + new[wi++] = value;
4560 + } /* if count > 1 */
4565 + } /* if byte == value */
4567 + } /* if count == 255 */
4569 + } /* if ri == 0 */
4574 + if (wi + 3 > oldsize) {
4578 + new[wi++] = count;
4579 + new[wi++] = value;
4580 + } else if (count == 1) {
4582 + if (wi + 3 > oldsize)
4588 + if (wi + 2 > oldsize)
4590 + new[wi++] = value;
4591 + new[wi++] = value;
4593 + } else { /* count == 0 */
4595 + if (wi + 2 > oldsize)
4597 + new[wi++] = value;
4598 + new[wi++] = value;
4600 + if (wi + 1 > oldsize)
4602 + new[wi++] = value;
4604 + } /* if count > 1 */
4611 +/* setup the rle compression functionality */
4612 +static struct __dump_compress dump_rle_compression = {
4613 + .compress_type = DUMP_COMPRESS_RLE,
4614 + .compress_func = dump_compress_rle,
4615 + .compress_name = "RLE",
4619 + * Name: dump_compress_rle_init()
4620 + * Func: Initialize rle compression for dumping.
4623 +dump_compress_rle_init(void)
4625 + dump_register_compression(&dump_rle_compression);
4630 + * Name: dump_compress_rle_cleanup()
4631 + * Func: Remove rle compression for dumping.
4634 +dump_compress_rle_cleanup(void)
4636 + dump_unregister_compression(DUMP_COMPRESS_RLE);
4639 +/* module initialization */
4640 +module_init(dump_compress_rle_init);
4641 +module_exit(dump_compress_rle_cleanup);
4643 +MODULE_LICENSE("GPL");
4644 +MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
4645 +MODULE_DESCRIPTION("RLE compression module for crash dump driver");
4646 Index: linux-2.6.10/drivers/dump/dump_execute.c
4647 ===================================================================
4648 --- linux-2.6.10.orig/drivers/dump/dump_execute.c 2005-04-05 19:01:49.158500672 +0800
4649 +++ linux-2.6.10/drivers/dump/dump_execute.c 2005-04-05 16:47:53.943205104 +0800
4652 + * The file has the common/generic dump execution code
4654 + * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
4655 + * Split and rewrote high level dump execute code to make use
4656 + * of dump method interfaces.
4658 + * Derived from original code in dump_base.c created by
4659 + * Matt Robinson <yakker@sourceforge.net>)
4661 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
4662 + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
4663 + * Copyright (C) 2002 International Business Machines Corp.
4665 + * Assumes dumper and dump config settings are in place
4666 + * (invokes corresponding dumper specific routines as applicable)
4668 + * This code is released under version 2 of the GNU GPL.
4670 +#include <linux/kernel.h>
4671 +#include <linux/notifier.h>
4672 +#include <linux/dump.h>
4673 +#include <linux/delay.h>
4674 +#include <linux/reboot.h>
4675 +#include "dump_methods.h"
4677 +struct notifier_block *dump_notifier_list; /* dump started/ended callback */
4679 +extern int panic_timeout;
4681 +/* Dump progress indicator */
4685 + static const char twiddle[4] = { '|', '\\', '-', '/' };
4686 + printk("%c\b", twiddle[i&3]);
4689 +/* Make the device ready and write out the header */
4690 +int dump_begin(void)
4694 + /* dump_dev = dump_config.dumper->dev; */
4696 + if ((err = dump_dev_silence())) {
4697 + /* quiesce failed, can't risk continuing */
4698 + /* Todo/Future: switch to alternate dump scheme if possible */
4699 + printk("dump silence dev failed ! error %d\n", err);
4703 + pr_debug("Writing dump header\n");
4704 + if ((err = dump_update_header())) {
4705 + printk("dump update header failed ! error %d\n", err);
4706 + dump_dev_resume();
4710 + dump_config.dumper->curr_offset = DUMP_BUFFER_SIZE;
4716 + * Write the dump terminator, a final header update and let go of
4717 + * exclusive use of the device for dump.
4719 +int dump_complete(void)
4723 + if (dump_config.level != DUMP_LEVEL_HEADER) {
4724 + if ((ret = dump_update_end_marker())) {
4725 + printk("dump update end marker error %d\n", ret);
4727 + if ((ret = dump_update_header())) {
4728 + printk("dump update header error %d\n", ret);
4731 + ret = dump_dev_resume();
4733 + if ((panic_timeout > 0) && (!(dump_config.flags & (DUMP_FLAGS_SOFTBOOT | DUMP_FLAGS_NONDISRUPT)))) {
4734 + mdelay(panic_timeout * 1000);
4735 + machine_restart(NULL);
4741 +/* Saves all dump data */
4742 +int dump_execute_savedump(void)
4744 + int ret = 0, err = 0;
4746 + if ((ret = dump_begin())) {
4750 + if (dump_config.level != DUMP_LEVEL_HEADER) {
4751 + ret = dump_sequencer();
4753 + if ((err = dump_complete())) {
4754 + printk("Dump complete failed. Error %d\n", err);
4760 +extern void dump_calc_bootmap_pages(void);
4762 +/* Does all the real work: Capture and save state */
4763 +int dump_generic_execute(const char *panic_str, const struct pt_regs *regs)
4767 +#ifdef CONFIG_DISCONTIGMEM
4768 + printk(KERN_INFO "Reconfiguring memory bank information....\n");
4769 + printk(KERN_INFO "This may take a while....\n");
4770 + dump_reconfigure_mbanks();
4773 + if ((ret = dump_configure_header(panic_str, regs))) {
4774 + printk("dump config header failed ! error %d\n", ret);
4778 + dump_calc_bootmap_pages();
4779 + /* tell interested parties that a dump is about to start */
4780 + notifier_call_chain(&dump_notifier_list, DUMP_BEGIN,
4781 + &dump_config.dump_device);
4783 + if (dump_config.level != DUMP_LEVEL_NONE)
4784 + ret = dump_execute_savedump();
4786 + pr_debug("dumped %ld blocks of %d bytes each\n",
4787 + dump_config.dumper->count, DUMP_BUFFER_SIZE);
4789 + /* tell interested parties that a dump has completed */
4790 + notifier_call_chain(&dump_notifier_list, DUMP_END,
4791 + &dump_config.dump_device);
4795 Index: linux-2.6.10/drivers/dump/dump_netdev.c
4796 ===================================================================
4797 --- linux-2.6.10.orig/drivers/dump/dump_netdev.c 2005-04-05 19:01:49.158500672 +0800
4798 +++ linux-2.6.10/drivers/dump/dump_netdev.c 2005-04-05 16:47:53.936206168 +0800
4801 + * Implements the dump driver interface for saving a dump via network
4804 + * Some of this code has been taken/adapted from Ingo Molnar's netconsole
4805 + * code. LKCD team expresses its thanks to Ingo.
4807 + * Started: June 2002 - Mohamed Abbas <mohamed.abbas@intel.com>
4808 + * Adapted netconsole code to implement LKCD dump over the network.
4810 + * Nov 2002 - Bharata B. Rao <bharata@in.ibm.com>
4811 + * Innumerable code cleanups, simplification and some fixes.
4812 + * Netdump configuration done by ioctl instead of using module parameters.
4813 + * Oct 2003 - Prasanna S Panchamukhi <prasanna@in.ibm.com>
4814 + * Netdump code modified to use Netpoll API's.
4816 + * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
4817 + * Copyright (C) 2002 International Business Machines Corp.
4819 + * This code is released under version 2 of the GNU GPL.
4822 +#include <net/tcp.h>
4823 +#include <net/udp.h>
4824 +#include <linux/delay.h>
4825 +#include <linux/random.h>
4826 +#include <linux/reboot.h>
4827 +#include <linux/module.h>
4828 +#include <linux/dump.h>
4829 +#include <linux/dump_netdev.h>
4831 +#include <asm/unaligned.h>
4833 +static int startup_handshake;
4834 +static int page_counter;
4835 +static unsigned long flags_global;
4836 +static int netdump_in_progress;
4839 + * security depends on the trusted path between the netconsole
4840 + * server and netconsole client, since none of the packets are
4841 + * encrypted. The random magic number protects the protocol
4842 + * against spoofing.
4844 +static u64 dump_magic;
4847 + * We maintain a small pool of fully-sized skbs,
4848 + * to make sure the message gets out even in
4849 + * extreme OOM situations.
4852 +static void rx_hook(struct netpoll *np, int port, char *msg, int size);
4856 +static void rx_hook(struct netpoll *np, int port, char *msg, int size)
4858 + req_t * __req = (req_t *) msg;
4860 + * First check if were are dumping or doing startup handshake, if
4861 + * not quickly return.
4864 + if (!netdump_in_progress)
4867 + if ((ntohl(__req->command) != COMM_GET_MAGIC) &&
4868 + (ntohl(__req->command) != COMM_HELLO) &&
4869 + (ntohl(__req->command) != COMM_START_WRITE_NETDUMP_ACK) &&
4870 + (ntohl(__req->command) != COMM_START_NETDUMP_ACK) &&
4871 + (memcmp(&__req->magic, &dump_magic, sizeof(dump_magic)) != 0))
4874 + req.magic = ntohl(__req->magic);
4875 + req.command = ntohl(__req->command);
4876 + req.from = ntohl(__req->from);
4877 + req.to = ntohl(__req->to);
4878 + req.nr = ntohl(__req->nr);
4883 +static char netdump_membuf[1024 + HEADER_LEN + 1];
4885 + * Fill the netdump_membuf with the header information from reply_t structure
4886 + * and send it down to netpoll_send_udp() routine.
4889 +netdump_send_packet(struct netpoll *np, reply_t *reply, size_t data_len) {
4892 + b = &netdump_membuf[1];
4893 + netdump_membuf[0] = NETCONSOLE_VERSION;
4894 + put_unaligned(htonl(reply->nr), (u32 *) b);
4895 + put_unaligned(htonl(reply->code), (u32 *) (b + sizeof(reply->code)));
4896 + put_unaligned(htonl(reply->info), (u32 *) (b + sizeof(reply->code) +
4897 + sizeof(reply->info)));
4898 + netpoll_send_udp(np, netdump_membuf, data_len + HEADER_LEN);
4902 +dump_send_mem(struct netpoll *np, req_t *req, const char* buff, size_t len)
4906 + int nr_chunks = len/1024;
4909 + reply.nr = req->nr;
4910 + reply.code = REPLY_MEM;
4911 + if ( nr_chunks <= 0)
4913 + for (i = 0; i < nr_chunks; i++) {
4914 + unsigned int offset = i*1024;
4915 + reply.info = offset;
4916 + memcpy((netdump_membuf + HEADER_LEN), (buff + offset), 1024);
4917 + netdump_send_packet(np, &reply, 1024);
4922 + * This function waits for the client to acknowledge the receipt
4923 + * of the netdump startup reply, with the possibility of packets
4924 + * getting lost. We resend the startup packet if no ACK is received,
4925 + * after a 1 second delay.
4927 + * (The client can test the success of the handshake via the HELLO
4928 + * command, and send ACKs until we enter netdump mode.)
4931 +dump_handshake(struct dump_dev *net_dev)
4937 + if (startup_handshake) {
4938 + sprintf((netdump_membuf + HEADER_LEN),
4939 + "NETDUMP start, waiting for start-ACK.\n");
4940 + reply.code = REPLY_START_NETDUMP;
4944 + sprintf((netdump_membuf + HEADER_LEN),
4945 + "NETDUMP start, waiting for start-ACK.\n");
4946 + reply.code = REPLY_START_WRITE_NETDUMP;
4947 + reply.nr = net_dev->curr_offset;
4948 + reply.info = net_dev->curr_offset;
4950 + str_len = strlen(netdump_membuf + HEADER_LEN);
4952 + /* send 300 handshake packets before declaring failure */
4953 + for (i = 0; i < 300; i++) {
4954 + netdump_send_packet(&net_dev->np, &reply, str_len);
4957 + for (j = 0; j < 10000; j++) {
4959 + netpoll_poll(&net_dev->np);
4965 + * if there is no new request, try sending the handshaking
4972 + * check if the new request is of the expected type,
4973 + * if so, return, else try sending the handshaking
4976 + if (startup_handshake) {
4977 + if (req.command == COMM_HELLO || req.command ==
4978 + COMM_START_NETDUMP_ACK) {
4985 + if (req.command == COMM_SEND_MEM) {
4997 +do_netdump(struct dump_dev *net_dev, const char* buff, size_t len)
5001 + int repeatCounter, counter, total_loop;
5004 + netdump_in_progress = 1;
5006 + if (dump_handshake(net_dev) < 0) {
5007 + printk("network dump failed due to handshake failure\n");
5012 + * Ideally startup handshake should be done during dump configuration,
5013 + * i.e., in dump_net_open(). This will be done when I figure out
5014 + * the dependency between startup handshake, subsequent write and
5015 + * various commands wrt to net-server.
5017 + if (startup_handshake)
5018 + startup_handshake = 0;
5021 + repeatCounter = 0;
5025 + netpoll_poll(&net_dev->np);
5030 + if (repeatCounter > 5) {
5032 + if (counter > 10000) {
5033 + if (total_loop >= 100000) {
5034 + printk("Time OUT LEAVE NOW\n");
5038 + printk("Try number %d out of "
5039 + "10 before Time Out\n",
5044 + repeatCounter = 0;
5048 + repeatCounter = 0;
5052 + switch (req.command) {
5056 + case COMM_SEND_MEM:
5057 + dump_send_mem(&net_dev->np, &req, buff, len);
5061 + case COMM_START_WRITE_NETDUMP_ACK:
5066 + sprintf((netdump_membuf + HEADER_LEN),
5067 + "Hello, this is netdump version " "0.%02d\n",
5068 + NETCONSOLE_VERSION);
5069 + str_len = strlen(netdump_membuf + HEADER_LEN);
5070 + reply.code = REPLY_HELLO;
5071 + reply.nr = req.nr;
5072 + reply.info = net_dev->curr_offset;
5073 + netdump_send_packet(&net_dev->np, &reply, str_len);
5076 + case COMM_GET_PAGE_SIZE:
5077 + sprintf((netdump_membuf + HEADER_LEN),
5078 + "PAGE_SIZE: %ld\n", PAGE_SIZE);
5079 + str_len = strlen(netdump_membuf + HEADER_LEN);
5080 + reply.code = REPLY_PAGE_SIZE;
5081 + reply.nr = req.nr;
5082 + reply.info = PAGE_SIZE;
5083 + netdump_send_packet(&net_dev->np, &reply, str_len);
5086 + case COMM_GET_NR_PAGES:
5087 + reply.code = REPLY_NR_PAGES;
5088 + reply.nr = req.nr;
5089 + reply.info = num_physpages;
5090 + reply.info = page_counter;
5091 + sprintf((netdump_membuf + HEADER_LEN),
5092 + "Number of pages: %ld\n", num_physpages);
5093 + str_len = strlen(netdump_membuf + HEADER_LEN);
5094 + netdump_send_packet(&net_dev->np, &reply, str_len);
5097 + case COMM_GET_MAGIC:
5098 + reply.code = REPLY_MAGIC;
5099 + reply.nr = req.nr;
5100 + reply.info = NETCONSOLE_VERSION;
5101 + sprintf((netdump_membuf + HEADER_LEN),
5102 + (char *)&dump_magic, sizeof(dump_magic));
5103 + str_len = strlen(netdump_membuf + HEADER_LEN);
5104 + netdump_send_packet(&net_dev->np, &reply, str_len);
5108 + reply.code = REPLY_ERROR;
5109 + reply.nr = req.nr;
5110 + reply.info = req.command;
5111 + sprintf((netdump_membuf + HEADER_LEN),
5112 + "Got unknown command code %d!\n", req.command);
5113 + str_len = strlen(netdump_membuf + HEADER_LEN);
5114 + netdump_send_packet(&net_dev->np, &reply, str_len);
5119 + netdump_in_progress = 0;
5124 +dump_validate_config(struct netpoll *np)
5126 + if (!np->local_ip) {
5127 + printk("network device %s has no local address, "
5128 + "aborting.\n", np->name);
5132 +#define IP(x) ((unsigned char *)&np->local_ip)[x]
5133 + printk("Source %d.%d.%d.%d", IP(0), IP(1), IP(2), IP(3));
5136 + if (!np->local_port) {
5137 + printk("source_port parameter not specified, aborting.\n");
5141 + if (!np->remote_ip) {
5142 + printk("target_ip parameter not specified, aborting.\n");
5146 + np->remote_ip = ntohl(np->remote_ip);
5147 +#define IP(x) ((unsigned char *)&np->remote_ip)[x]
5148 + printk("Target %d.%d.%d.%d", IP(0), IP(1), IP(2), IP(3));
5151 + if (!np->remote_port) {
5152 + printk("target_port parameter not specified, aborting.\n");
5155 + printk("Target Ethernet Address %02x:%02x:%02x:%02x:%02x:%02x",
5156 + np->remote_mac[0], np->remote_mac[1], np->remote_mac[2],
5157 + np->remote_mac[3], np->remote_mac[4], np->remote_mac[5]);
5159 + if ((np->remote_mac[0] & np->remote_mac[1] & np->remote_mac[2] &
5160 + np->remote_mac[3] & np->remote_mac[4] & np->remote_mac[5]) == 255)
5161 + printk("(Broadcast)");
5167 + * Prepares the dump device so we can take a dump later.
5168 + * Validates the netdump configuration parameters.
5170 + * TODO: Network connectivity check should be done here.
5173 +dump_net_open(struct dump_dev *net_dev, unsigned long arg)
5177 + /* get the interface name */
5178 + if (copy_from_user(net_dev->np.dev_name, (void *)arg, IFNAMSIZ))
5180 + net_dev->np.rx_hook = rx_hook;
5181 + retval = netpoll_setup(&net_dev->np);
5183 + dump_validate_config(&net_dev->np);
5184 + net_dev->curr_offset = 0;
5185 + printk("Network device %s successfully configured for dumping\n",
5186 + net_dev->np.dev_name);
5191 + * Close the dump device and release associated resources
5192 + * Invoked when unconfiguring the dump device.
5195 +dump_net_release(struct dump_dev *net_dev)
5197 + netpoll_cleanup(&net_dev->np);
5202 + * Prepare the dump device for use (silence any ongoing activity
5203 + * and quiesce state) when the system crashes.
5206 +dump_net_silence(struct dump_dev *net_dev)
5208 + netpoll_set_trap(1);
5209 + local_irq_save(flags_global);
5210 + startup_handshake = 1;
5211 + net_dev->curr_offset = 0;
5212 + printk("Dumping to network device %s on CPU %d ...\n", net_dev->np.name,
5213 + smp_processor_id());
5218 + * Invoked when dumping is done. This is the time to put things back
5219 + * (i.e. undo the effects of dump_block_silence) so the device is
5220 + * available for normal use.
5223 +dump_net_resume(struct dump_dev *net_dev)
5229 + sprintf((netdump_membuf + HEADER_LEN), "NETDUMP end.\n");
5230 + str_len = strlen(netdump_membuf + HEADER_LEN);
5231 + for( indx = 0; indx < 6; indx++) {
5232 + reply.code = REPLY_END_NETDUMP;
5235 + netdump_send_packet(&net_dev->np, &reply, str_len);
5237 + printk("NETDUMP END!\n");
5238 + local_irq_restore(flags_global);
5239 + netpoll_set_trap(0);
5240 + startup_handshake = 0;
5245 + * Seek to the specified offset in the dump device.
5246 + * Makes sure this is a valid offset, otherwise returns an error.
5249 +dump_net_seek(struct dump_dev *net_dev, loff_t off)
5251 + net_dev->curr_offset = off;
5259 +dump_net_write(struct dump_dev *net_dev, void *buf, unsigned long len)
5264 + cnt = len/ PAGE_SIZE;
5266 + for (i = 0; i < cnt; i++) {
5267 + off = i* PAGE_SIZE;
5268 + ret = do_netdump(net_dev, buf+off, PAGE_SIZE);
5271 + net_dev->curr_offset = net_dev->curr_offset + PAGE_SIZE;
5277 + * check if the last dump i/o is over and ready for next request
5280 +dump_net_ready(struct dump_dev *net_dev, void *buf)
5286 + * ioctl function used for configuring network dump
5289 +dump_net_ioctl(struct dump_dev *net_dev, unsigned int cmd, unsigned long arg)
5292 + case DIOSTARGETIP:
5293 + net_dev->np.remote_ip= arg;
5295 + case DIOSTARGETPORT:
5296 + net_dev->np.remote_port = (u16)arg;
5298 + case DIOSSOURCEPORT:
5299 + net_dev->np.local_port = (u16)arg;
5302 + return copy_from_user(net_dev->np.remote_mac, (void *)arg, 6);
5304 + case DIOGTARGETIP:
5305 + case DIOGTARGETPORT:
5306 + case DIOGSOURCEPORT:
5315 +struct dump_dev_ops dump_netdev_ops = {
5316 + .open = dump_net_open,
5317 + .release = dump_net_release,
5318 + .silence = dump_net_silence,
5319 + .resume = dump_net_resume,
5320 + .seek = dump_net_seek,
5321 + .write = dump_net_write,
5322 + /* .read not implemented */
5323 + .ready = dump_net_ready,
5324 + .ioctl = dump_net_ioctl
5327 +static struct dump_dev default_dump_netdev = {
5328 + .type_name = "networkdev",
5329 + .ops = &dump_netdev_ops,
5331 + .np.name = "netdump",
5332 + .np.dev_name = "eth0",
5333 + .np.rx_hook = rx_hook,
5334 + .np.local_port = 6688,
5335 + .np.remote_port = 6688,
5336 + .np.remote_mac = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
5340 +dump_netdev_init(void)
5342 + default_dump_netdev.curr_offset = 0;
5344 + if (dump_register_device(&default_dump_netdev) < 0) {
5345 + printk("network dump device driver registration failed\n");
5348 + printk("network device driver for LKCD registered\n");
5350 + get_random_bytes(&dump_magic, sizeof(dump_magic));
5355 +dump_netdev_cleanup(void)
5357 + dump_unregister_device(&default_dump_netdev);
5360 +MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
5361 +MODULE_DESCRIPTION("Network Dump Driver for Linux Kernel Crash Dump (LKCD)");
5362 +MODULE_LICENSE("GPL");
5364 +module_init(dump_netdev_init);
5365 +module_exit(dump_netdev_cleanup);
5366 Index: linux-2.6.10/drivers/dump/dump_x8664.c
5367 ===================================================================
5368 --- linux-2.6.10.orig/drivers/dump/dump_x8664.c 2005-04-05 19:01:49.158500672 +0800
5369 +++ linux-2.6.10/drivers/dump/dump_x8664.c 2005-04-05 16:47:53.932206776 +0800
5372 + * Architecture specific (x86-64) functions for Linux crash dumps.
5374 + * Created by: Matt Robinson (yakker@sgi.com)
5376 + * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
5378 + * 2.3 kernel modifications by: Matt D. Robinson (yakker@turbolinux.com)
5379 + * Copyright 2000 TurboLinux, Inc. All rights reserved.
5381 + * x86-64 port Copyright 2002 Andi Kleen, SuSE Labs
5382 + * x86-64 port Sachin Sant ( sachinp@in.ibm.com )
5383 + * This code is released under version 2 of the GNU GPL.
5387 + * The hooks for dumping the kernel virtual memory to disk are in this
5388 + * file. Any time a modification is made to the virtual memory mechanism,
5389 + * these routines must be changed to use the new mechanisms.
5391 +#include <linux/init.h>
5392 +#include <linux/types.h>
5393 +#include <linux/kernel.h>
5394 +#include <linux/smp.h>
5395 +#include <linux/fs.h>
5396 +#include <linux/vmalloc.h>
5397 +#include <linux/dump.h>
5398 +#include "dump_methods.h"
5399 +#include <linux/mm.h>
5400 +#include <linux/rcupdate.h>
5401 +#include <asm/processor.h>
5402 +#include <asm/hardirq.h>
5403 +#include <asm/kdebug.h>
5404 +#include <asm/uaccess.h>
5405 +#include <asm/nmi.h>
5406 +#include <asm/kdebug.h>
5408 +static __s32 saved_irq_count; /* saved preempt_count() flag */
5410 +void (*dump_trace_ptr)(struct pt_regs *);
5412 +static int alloc_dha_stack(void)
5417 + if (dump_header_asm.dha_stack[0])
5420 + ptr = vmalloc(THREAD_SIZE * num_online_cpus());
5422 + printk("vmalloc for dha_stacks failed\n");
5426 + for (i = 0; i < num_online_cpus(); i++) {
5427 + dump_header_asm.dha_stack[i] =
5428 + (uint64_t)((unsigned long)ptr + (i * THREAD_SIZE));
5433 +static int free_dha_stack(void)
5435 + if (dump_header_asm.dha_stack[0]) {
5436 + vfree((void *)dump_header_asm.dha_stack[0]);
5437 + dump_header_asm.dha_stack[0] = 0;
5443 +__dump_save_regs(struct pt_regs* dest_regs, const struct pt_regs* regs)
5446 + memcpy(dest_regs, regs, sizeof(struct pt_regs));
5450 +__dump_save_context(int cpu, const struct pt_regs *regs,
5451 + struct task_struct *tsk)
5453 + dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
5454 + __dump_save_regs(&dump_header_asm.dha_smp_regs[cpu], regs);
5456 + /* take a snapshot of the stack */
5457 + /* doing this enables us to tolerate slight drifts on this cpu */
5459 + if (dump_header_asm.dha_stack[cpu]) {
5460 + memcpy((void *)dump_header_asm.dha_stack[cpu],
5461 + STACK_START_POSITION(tsk),
5464 + dump_header_asm.dha_stack_ptr[cpu] = (unsigned long)(tsk->thread_info);
5468 +extern cpumask_t irq_affinity[];
5469 +extern irq_desc_t irq_desc[];
5470 +extern void dump_send_ipi(void);
5471 +static int dump_expect_ipi[NR_CPUS];
5472 +static atomic_t waiting_for_dump_ipi;
5473 +static unsigned long saved_affinity[NR_IRQS];
5475 +extern void stop_this_cpu(void *);
5478 +dump_nmi_callback(struct pt_regs *regs, int cpu)
5480 + if (!dump_expect_ipi[cpu]) {
5484 + dump_expect_ipi[cpu] = 0;
5486 + dump_save_this_cpu(regs);
5487 + atomic_dec(&waiting_for_dump_ipi);
5491 + switch (dump_silence_level) {
5492 + case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
5493 + while (dump_oncpu) {
5494 + barrier(); /* paranoia */
5495 + if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
5496 + goto level_changed;
5498 + cpu_relax(); /* kill time nicely */
5502 + case DUMP_HALT_CPUS: /* Execute halt */
5503 + stop_this_cpu(NULL);
5506 + case DUMP_SOFT_SPIN_CPUS:
5507 + /* Mark the task so it spins in schedule */
5508 + set_tsk_thread_flag(current, TIF_NEED_RESCHED);
5515 +/* save registers on other processors */
5517 +__dump_save_other_cpus(void)
5519 + int i, cpu = smp_processor_id();
5520 + int other_cpus = num_online_cpus() - 1;
5522 + if (other_cpus > 0) {
5523 + atomic_set(&waiting_for_dump_ipi, other_cpus);
5525 + for (i = 0; i < NR_CPUS; i++)
5526 + dump_expect_ipi[i] = (i != cpu && cpu_online(i));
5528 + set_nmi_callback(dump_nmi_callback);
5533 + /* may be we dont need to wait for NMI to be processed.
5534 + just write out the header at the end of dumping, if
5535 + this IPI is not processed untill then, there probably
5536 + is a problem and we just fail to capture state of
5538 + while(atomic_read(&waiting_for_dump_ipi) > 0)
5541 + unset_nmi_callback();
5547 + * Routine to save the old irq affinities and change affinities of all irqs to
5548 + * the dumping cpu.
5551 +set_irq_affinity(void)
5554 + cpumask_t cpu = CPU_MASK_NONE;
5556 + cpu_set(smp_processor_id(), cpu);
5557 + memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
5558 + for (i = 0; i < NR_IRQS; i++) {
5559 + if (irq_desc[i].handler == NULL)
5561 + irq_affinity[i] = cpu;
5562 + if (irq_desc[i].handler->set_affinity != NULL)
5563 + irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
5568 + * Restore old irq affinities.
5571 +reset_irq_affinity(void)
5575 + memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
5576 + for (i = 0; i < NR_IRQS; i++) {
5577 + if (irq_desc[i].handler == NULL)
5579 + if (irq_desc[i].handler->set_affinity != NULL)
5580 + irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
5584 +#else /* !CONFIG_SMP */
5585 +#define set_irq_affinity() do { } while (0)
5586 +#define reset_irq_affinity() do { } while (0)
5587 +#define save_other_cpu_states() do { } while (0)
5588 +#endif /* !CONFIG_SMP */
5593 + saved_irq_count = irq_count();
5594 + preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
5598 +irq_bh_restore(void)
5600 + preempt_count() |= saved_irq_count;
5604 + * Name: __dump_irq_enable
5605 + * Func: Reset system so interrupts are enabled.
5606 + * This is used for dump methods that require interrupts
5607 + * Eventually, all methods will have interrupts disabled
5608 + * and this code can be removed.
5610 + * Change irq affinities
5611 + * Re-enable interrupts
5614 +__dump_irq_enable(void)
5616 + set_irq_affinity();
5618 + local_irq_enable();
5623 + * Name: __dump_irq_restore
5624 + * Func: Resume the system state in an architecture-speeific way.
5628 +__dump_irq_restore(void)
5630 + local_irq_disable();
5631 + reset_irq_affinity();
5636 + * Name: __dump_configure_header()
5637 + * Func: Configure the dump header with all proper values.
5640 +__dump_configure_header(const struct pt_regs *regs)
5642 + /* Dummy function - return */
5646 +static int notify(struct notifier_block *nb, unsigned long code, void *data)
5648 + if (code == DIE_NMI_IPI && dump_oncpu)
5649 + return NOTIFY_BAD;
5650 + return NOTIFY_DONE;
5653 +static struct notifier_block dump_notifier = {
5654 + .notifier_call = notify,
5658 + * Name: __dump_init()
5659 + * Func: Initialize the dumping routine process.
5662 +__dump_init(uint64_t local_memory_start)
5664 + notifier_chain_register(&die_chain, &dump_notifier);
5668 + * Name: __dump_open()
5669 + * Func: Open the dump device (architecture specific). This is in
5670 + * case it's necessary in the future.
5675 + alloc_dha_stack();
5681 + * Name: __dump_cleanup()
5682 + * Func: Free any architecture specific data structures. This is called
5683 + * when the dump module is being removed.
5686 +__dump_cleanup(void)
5689 + notifier_chain_unregister(&die_chain, &dump_notifier);
5690 + synchronize_kernel();
5694 +extern int page_is_ram(unsigned long);
5697 + * Name: __dump_page_valid()
5698 + * Func: Check if page is valid to dump.
5701 +__dump_page_valid(unsigned long index)
5703 + if (!pfn_valid(index))
5706 + return page_is_ram(index);
5710 + * Name: manual_handle_crashdump()
5711 + * Func: Interface for the lkcd dump command. Calls dump_execute()
5714 +manual_handle_crashdump(void) {
5716 + struct pt_regs regs;
5718 + get_current_regs(®s);
5719 + dump_execute("manual", ®s);
5724 + * Name: __dump_clean_irq_state()
5725 + * Func: Clean up from the previous IRQ handling state. Such as oops from
5726 + * interrupt handler or bottom half.
5729 +__dump_clean_irq_state(void)
5733 Index: linux-2.6.10/drivers/dump/dump_overlay.c
5734 ===================================================================
5735 --- linux-2.6.10.orig/drivers/dump/dump_overlay.c 2005-04-05 19:01:49.158500672 +0800
5736 +++ linux-2.6.10/drivers/dump/dump_overlay.c 2005-04-05 16:47:53.934206472 +0800
5739 + * Two-stage soft-boot based dump scheme methods (memory overlay
5740 + * with post soft-boot writeout)
5742 + * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
5744 + * This approach of saving the dump in memory and writing it
5745 + * out after a softboot without clearing memory is derived from the
5746 + * Mission Critical Linux dump implementation. Credits and a big
5747 + * thanks for letting the lkcd project make use of the excellent
5748 + * piece of work and also for helping with clarifications and
5749 + * tips along the way are due to:
5750 + * Dave Winchell <winchell@mclx.com> (primary author of mcore)
5752 + * Jeff Moyer <moyer@mclx.com>
5753 + * Josh Huber <huber@mclx.com>
5755 + * For those familiar with the mcore implementation, the key
5756 + * differences/extensions here are in allowing entire memory to be
5757 + * saved (in compressed form) through a careful ordering scheme
5758 + * on both the way down as well on the way up after boot, the latter
5759 + * for supporting the LKCD notion of passes in which most critical
5760 + * data is the first to be saved to the dump device. Also the post
5761 + * boot writeout happens from within the kernel rather than driven
5764 + * The sequence is orchestrated through the abstraction of "dumpers",
5765 + * one for the first stage which then sets up the dumper for the next
5766 + * stage, providing for a smooth and flexible reuse of the singlestage
5767 + * dump scheme methods and a handle to pass dump device configuration
5768 + * information across the soft boot.
5770 + * Copyright (C) 2002 International Business Machines Corp.
5772 + * This code is released under version 2 of the GNU GPL.
5776 + * Disruptive dumping using the second kernel soft-boot option
5777 + * for issuing dump i/o operates in 2 stages:
5779 + * (1) - Saves the (compressed & formatted) dump in memory using a
5780 + * carefully ordered overlay scheme designed to capture the
5781 + * entire physical memory or selective portions depending on
5782 + * dump config settings,
5783 + * - Registers the stage 2 dumper and
5784 + * - Issues a soft reboot w/o clearing memory.
5786 + * The overlay scheme starts with a small bootstrap free area
5787 + * and follows a reverse ordering of passes wherein it
5788 + * compresses and saves data starting with the least critical
5789 + * areas first, thus freeing up the corresponding pages to
5790 + * serve as destination for subsequent data to be saved, and
5791 + * so on. With a good compression ratio, this makes it feasible
5792 + * to capture an entire physical memory dump without significantly
5793 + * reducing memory available during regular operation.
5795 + * (2) Post soft-reboot, runs through the saved memory dump and
5796 + * writes it out to disk, this time around, taking care to
5797 + * save the more critical data first (i.e. pages which figure
5798 + * in early passes for a regular dump). Finally issues a
5801 + * Since the data was saved in memory after selection/filtering
5802 + * and formatted as per the chosen output dump format, at this
5803 + * stage the filter and format actions are just dummy (or
5804 + * passthrough) actions, except for influence on ordering of
5808 +#include <linux/types.h>
5809 +#include <linux/kernel.h>
5810 +#include <linux/highmem.h>
5811 +#include <linux/bootmem.h>
5812 +#include <linux/dump.h>
5813 +#ifdef CONFIG_KEXEC
5814 +#include <linux/delay.h>
5815 +#include <linux/reboot.h>
5816 +#include <linux/kexec.h>
5818 +#include "dump_methods.h"
5820 +extern struct list_head dumper_list_head;
5821 +extern struct dump_memdev *dump_memdev;
5822 +extern struct dumper dumper_stage2;
5823 +struct dump_config_block *dump_saved_config = NULL;
5824 +extern struct dump_blockdev *dump_blockdev;
5825 +static struct dump_memdev *saved_dump_memdev = NULL;
5826 +static struct dumper *saved_dumper = NULL;
5828 +#ifdef CONFIG_KEXEC
5829 +extern int panic_timeout;
5833 +extern void dump_display_map(struct dump_memdev *);
5836 +struct dumper *dumper_by_name(char *name)
5839 + struct dumper *dumper;
5840 + list_for_each_entry(dumper, &dumper_list_head, dumper_list)
5841 + if (!strncmp(dumper->name, name, 32))
5847 + /* Temporary proof of concept */
5848 + if (!strncmp(dumper_stage2.name, name, 32))
5849 + return &dumper_stage2;
5854 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
5855 +extern void dump_early_reserve_map(struct dump_memdev *);
5857 +void crashdump_reserve(void)
5859 + extern unsigned long crashdump_addr;
5861 + if (crashdump_addr == 0xdeadbeef)
5864 + /* reserve dump config and saved dump pages */
5865 + dump_saved_config = (struct dump_config_block *)crashdump_addr;
5866 + /* magic verification */
5867 + if (dump_saved_config->magic != DUMP_MAGIC_LIVE) {
5868 + printk("Invalid dump magic. Ignoring dump\n");
5869 + dump_saved_config = NULL;
5873 + printk("Dump may be available from previous boot\n");
5875 +#ifdef CONFIG_X86_64
5876 + reserve_bootmem_node(NODE_DATA(0),
5877 + virt_to_phys((void *)crashdump_addr),
5878 + PAGE_ALIGN(sizeof(struct dump_config_block)));
5880 + reserve_bootmem(virt_to_phys((void *)crashdump_addr),
5881 + PAGE_ALIGN(sizeof(struct dump_config_block)));
5883 + dump_early_reserve_map(&dump_saved_config->memdev);
5889 + * Loads the dump configuration from a memory block saved across soft-boot
5890 + * The ops vectors need fixing up as the corresp. routines may have
5891 + * relocated in the new soft-booted kernel.
5893 +int dump_load_config(struct dump_config_block *config)
5895 + struct dumper *dumper;
5896 + struct dump_data_filter *filter_table, *filter;
5897 + struct dump_dev *dev;
5900 + if (config->magic != DUMP_MAGIC_LIVE)
5901 + return -ENOENT; /* not a valid config */
5903 + /* initialize generic config data */
5904 + memcpy(&dump_config, &config->config, sizeof(dump_config));
5906 + /* initialize dumper state */
5907 + if (!(dumper = dumper_by_name(config->dumper.name))) {
5908 + printk("dumper name mismatch\n");
5909 + return -ENOENT; /* dumper mismatch */
5912 + /* verify and fixup schema */
5913 + if (strncmp(dumper->scheme->name, config->scheme.name, 32)) {
5914 + printk("dumper scheme mismatch\n");
5915 + return -ENOENT; /* mismatch */
5917 + config->scheme.ops = dumper->scheme->ops;
5918 + config->dumper.scheme = &config->scheme;
5920 + /* verify and fixup filter operations */
5921 + filter_table = dumper->filter;
5922 + for (i = 0, filter = config->filter_table;
5923 + ((i < MAX_PASSES) && filter_table[i].selector);
5925 + if (strncmp(filter_table[i].name, filter->name, 32)) {
5926 + printk("dump filter mismatch\n");
5927 + return -ENOENT; /* filter name mismatch */
5929 + filter->selector = filter_table[i].selector;
5931 + config->dumper.filter = config->filter_table;
5933 + /* fixup format */
5934 + if (strncmp(dumper->fmt->name, config->fmt.name, 32)) {
5935 + printk("dump format mismatch\n");
5936 + return -ENOENT; /* mismatch */
5938 + config->fmt.ops = dumper->fmt->ops;
5939 + config->dumper.fmt = &config->fmt;
5941 + /* fixup target device */
5942 + dev = (struct dump_dev *)(&config->dev[0]);
5943 + if (dumper->dev == NULL) {
5944 + pr_debug("Vanilla dumper - assume default\n");
5945 + if (dump_dev == NULL)
5947 + dumper->dev = dump_dev;
5950 + if (strncmp(dumper->dev->type_name, dev->type_name, 32)) {
5951 + printk("dump dev type mismatch %s instead of %s\n",
5952 + dev->type_name, dumper->dev->type_name);
5953 + return -ENOENT; /* mismatch */
5955 + dev->ops = dumper->dev->ops;
5956 + config->dumper.dev = dev;
5958 + /* fixup memory device containing saved dump pages */
5959 + /* assume statically init'ed dump_memdev */
5960 + config->memdev.ddev.ops = dump_memdev->ddev.ops;
5961 + /* switch to memdev from prev boot */
5962 + saved_dump_memdev = dump_memdev; /* remember current */
5963 + dump_memdev = &config->memdev;
5965 + /* Make this the current primary dumper */
5966 + dump_config.dumper = &config->dumper;
5971 +/* Saves the dump configuration in a memory block for use across a soft-boot */
5972 +int dump_save_config(struct dump_config_block *config)
5974 + printk("saving dump config settings\n");
5976 + /* dump config settings */
5977 + memcpy(&config->config, &dump_config, sizeof(dump_config));
5979 + /* dumper state */
5980 + memcpy(&config->dumper, dump_config.dumper, sizeof(struct dumper));
5981 + memcpy(&config->scheme, dump_config.dumper->scheme,
5982 + sizeof(struct dump_scheme));
5983 + memcpy(&config->fmt, dump_config.dumper->fmt, sizeof(struct dump_fmt));
5984 + memcpy(&config->dev[0], dump_config.dumper->dev,
5985 + sizeof(struct dump_anydev));
5986 + memcpy(&config->filter_table, dump_config.dumper->filter,
5987 + sizeof(struct dump_data_filter)*MAX_PASSES);
5989 + /* handle to saved mem pages */
5990 + memcpy(&config->memdev, dump_memdev, sizeof(struct dump_memdev));
5992 + config->magic = DUMP_MAGIC_LIVE;
5997 +int dump_init_stage2(struct dump_config_block *saved_config)
6001 + pr_debug("dump_init_stage2\n");
6002 + /* Check if dump from previous boot exists */
6003 + if (saved_config) {
6004 + printk("loading dumper from previous boot \n");
6005 + /* load and configure dumper from previous boot */
6006 + if ((err = dump_load_config(saved_config)))
6009 + if (!dump_oncpu) {
6010 + if ((err = dump_configure(dump_config.dump_device))) {
6011 + printk("Stage 2 dump configure failed\n");
6017 + dump_dev = dump_config.dumper->dev;
6018 + /* write out the dump */
6019 + err = dump_generic_execute(NULL, NULL);
6021 + dump_saved_config = NULL;
6023 + if (!dump_oncpu) {
6024 + dump_unconfigure();
6030 + /* no dump to write out */
6031 + printk("no dumper from previous boot \n");
6036 +extern void dump_mem_markpages(struct dump_memdev *);
6038 +int dump_switchover_stage(void)
6042 + /* trigger stage 2 rightaway - in real life would be after soft-boot */
6043 + /* dump_saved_config would be a boot param */
6044 + saved_dump_memdev = dump_memdev;
6045 + saved_dumper = dump_config.dumper;
6046 + ret = dump_init_stage2(dump_saved_config);
6047 + dump_memdev = saved_dump_memdev;
6048 + dump_config.dumper = saved_dumper;
6052 +int dump_activate_softboot(void)
6055 +#ifdef CONFIG_KEXEC
6056 + int num_cpus_online = 0;
6057 + struct kimage *image;
6060 + /* temporary - switchover to writeout previously saved dump */
6061 +#ifndef CONFIG_KEXEC
6062 + err = dump_switchover_stage(); /* non-disruptive case */
6064 + dump_config.dumper = &dumper_stage1; /* set things back */
6069 + dump_silence_level = DUMP_HALT_CPUS;
6070 + /* wait till we become the only cpu */
6071 + /* maybe by checking for online cpus ? */
6073 + while((num_cpus_online = num_online_cpus()) > 1);
6075 + /* now call into kexec */
6077 + image = xchg(&kexec_image, 0);
6079 + mdelay(panic_timeout*1000);
6080 + machine_kexec(image);
6085 + * * should we call reboot notifiers ? inappropriate for panic ?
6086 + * * what about device_shutdown() ?
6087 + * * is explicit bus master disabling needed or can we do that
6088 + * * through driverfs ?
6094 +/* --- DUMP SCHEME ROUTINES --- */
6096 +static inline int dump_buf_pending(struct dumper *dumper)
6098 + return (dumper->curr_buf - dumper->dump_buf);
6101 +/* Invoked during stage 1 of soft-reboot based dumping */
6102 +int dump_overlay_sequencer(void)
6104 + struct dump_data_filter *filter = dump_config.dumper->filter;
6105 + struct dump_data_filter *filter2 = dumper_stage2.filter;
6106 + int pass = 0, err = 0, save = 0;
6107 + int (*action)(unsigned long, unsigned long);
6109 + /* Make sure gzip compression is being used */
6110 + if (dump_config.dumper->compress->compress_type != DUMP_COMPRESS_GZIP) {
6111 + printk(" Please set GZIP compression \n");
6115 + /* start filling in dump data right after the header */
6116 + dump_config.dumper->curr_offset =
6117 + PAGE_ALIGN(dump_config.dumper->header_len);
6119 + /* Locate the last pass */
6120 + for (;filter->selector; filter++, pass++);
6123 + * Start from the end backwards: overlay involves a reverse
6124 + * ordering of passes, since less critical pages are more
6125 + * likely to be reusable as scratch space once we are through
6128 + for (--pass, --filter; pass >= 0; pass--, filter--)
6130 + /* Assumes passes are exclusive (even across dumpers) */
6131 + /* Requires care when coding the selection functions */
6132 + if ((save = filter->level_mask & dump_config.level))
6133 + action = dump_save_data;
6135 + action = dump_skip_data;
6137 + /* Remember the offset where this pass started */
6138 + /* The second stage dumper would use this */
6139 + if (dump_buf_pending(dump_config.dumper) & (PAGE_SIZE - 1)) {
6140 + pr_debug("Starting pass %d with pending data\n", pass);
6141 + pr_debug("filling dummy data to page-align it\n");
6142 + dump_config.dumper->curr_buf = (void *)PAGE_ALIGN(
6143 + (unsigned long)dump_config.dumper->curr_buf);
6146 + filter2[pass].start[0] = dump_config.dumper->curr_offset
6147 + + dump_buf_pending(dump_config.dumper);
6149 + err = dump_iterator(pass, action, filter);
6151 + filter2[pass].end[0] = dump_config.dumper->curr_offset
6152 + + dump_buf_pending(dump_config.dumper);
6153 + filter2[pass].num_mbanks = 1;
6156 + printk("dump_overlay_seq: failure %d in pass %d\n",
6160 + printk("\n %d overlay pages %s of %d each in pass %d\n",
6161 + err, save ? "saved" : "skipped", DUMP_PAGE_SIZE, pass);
6167 +/* from dump_memdev.c */
6168 +extern struct page *dump_mem_lookup(struct dump_memdev *dev, unsigned long loc);
6169 +extern struct page *dump_mem_next_page(struct dump_memdev *dev);
6171 +static inline struct page *dump_get_saved_page(loff_t loc)
6173 + return (dump_mem_lookup(dump_memdev, loc >> PAGE_SHIFT));
6176 +static inline struct page *dump_next_saved_page(void)
6178 + return (dump_mem_next_page(dump_memdev));
6182 + * Iterates over list of saved dump pages. Invoked during second stage of
6183 + * soft boot dumping
6185 + * Observation: If additional selection is desired at this stage then
6186 + * a different iterator could be written which would advance
6187 + * to the next page header everytime instead of blindly picking up
6188 + * the data. In such a case loc would be interpreted differently.
6189 + * At this moment however a blind pass seems sufficient, cleaner and
6192 +int dump_saved_data_iterator(int pass, int (*action)(unsigned long,
6193 + unsigned long), struct dump_data_filter *filter)
6196 + struct page *page;
6197 + unsigned long count = 0;
6201 + for (i = 0; i < filter->num_mbanks; i++) {
6202 + loc = filter->start[i];
6203 + end = filter->end[i];
6204 + printk("pass %d, start off 0x%llx end offset 0x%llx\n", pass,
6207 + /* loc will get treated as logical offset into stage 1 */
6208 + page = dump_get_saved_page(loc);
6210 + for (; loc < end; loc += PAGE_SIZE) {
6211 + dump_config.dumper->curr_loc = loc;
6213 + printk("no more saved data for pass %d\n",
6217 + sz = (loc + PAGE_SIZE > end) ? end - loc : PAGE_SIZE;
6219 + if (page && filter->selector(pass, (unsigned long)page,
6221 + pr_debug("mem offset 0x%llx\n", loc);
6222 + if ((err = action((unsigned long)page, sz)))
6226 + /* clear the contents of page */
6227 + /* fixme: consider using KM_DUMP instead */
6228 + clear_highpage(page);
6231 + page = dump_next_saved_page();
6235 + return err ? err : count;
6238 +static inline int dump_overlay_pages_done(struct page *page, int nr)
6242 + for (; nr ; page++, nr--) {
6243 + if (dump_check_and_free_page(dump_memdev, page))
6249 +int dump_overlay_save_data(unsigned long loc, unsigned long len)
6252 + struct page *page = (struct page *)loc;
6253 + static unsigned long cnt = 0;
6255 + if ((err = dump_generic_save_data(loc, len)))
6258 + if (dump_overlay_pages_done(page, len >> PAGE_SHIFT)) {
6260 + if (!(cnt & 0x7f))
6261 + pr_debug("released page 0x%lx\n", page_to_pfn(page));
6268 +int dump_overlay_skip_data(unsigned long loc, unsigned long len)
6270 + struct page *page = (struct page *)loc;
6272 + dump_overlay_pages_done(page, len >> PAGE_SHIFT);
6276 +int dump_overlay_resume(void)
6281 + * switch to stage 2 dumper, save dump_config_block
6282 + * and then trigger a soft-boot
6284 + dumper_stage2.header_len = dump_config.dumper->header_len;
6285 + dump_config.dumper = &dumper_stage2;
6286 + if ((err = dump_save_config(dump_saved_config)))
6289 + dump_dev = dump_config.dumper->dev;
6291 +#ifdef CONFIG_KEXEC
6292 + /* If we are doing a disruptive dump, activate softboot now */
6293 + if((panic_timeout > 0) && (!(dump_config.flags & DUMP_FLAGS_NONDISRUPT)))
6294 + err = dump_activate_softboot();
6298 + err = dump_switchover_stage(); /* plugs into soft boot mechanism */
6299 + dump_config.dumper = &dumper_stage1; /* set things back */
6303 +int dump_overlay_configure(unsigned long devid)
6305 + struct dump_dev *dev;
6306 + struct dump_config_block *saved_config = dump_saved_config;
6309 + /* If there is a previously saved dump, write it out first */
6310 + if (saved_config) {
6311 + printk("Processing old dump pending writeout\n");
6312 + err = dump_switchover_stage();
6314 + printk("failed to writeout saved dump\n");
6317 + dump_free_mem(saved_config); /* testing only: not after boot */
6320 + dev = dumper_stage2.dev = dump_config.dumper->dev;
6321 + /* From here on the intermediate dump target is memory-only */
6322 + dump_dev = dump_config.dumper->dev = &dump_memdev->ddev;
6323 + if ((err = dump_generic_configure(0))) {
6324 + printk("dump generic configure failed: err %d\n", err);
6328 + dumper_stage2.dump_buf = dump_config.dumper->dump_buf;
6330 + /* Sanity check on the actual target dump device */
6331 + if (!dev || (err = dev->ops->open(dev, devid))) {
6334 + /* TBD: should we release the target if this is soft-boot only ? */
6336 + /* alloc a dump config block area to save across reboot */
6337 + if (!(dump_saved_config = dump_alloc_mem(sizeof(struct
6338 + dump_config_block)))) {
6339 + printk("dump config block alloc failed\n");
6340 + /* undo configure */
6341 + dump_generic_unconfigure();
6344 + dump_config.dump_addr = (unsigned long)dump_saved_config;
6345 + printk("Dump config block of size %d set up at 0x%lx\n",
6346 + sizeof(*dump_saved_config), (unsigned long)dump_saved_config);
6350 +int dump_overlay_unconfigure(void)
6352 + struct dump_dev *dev = dumper_stage2.dev;
6355 + pr_debug("dump_overlay_unconfigure\n");
6356 + /* Close the secondary device */
6357 + dev->ops->release(dev);
6358 + pr_debug("released secondary device\n");
6360 + err = dump_generic_unconfigure();
6361 + pr_debug("Unconfigured generic portions\n");
6362 + dump_free_mem(dump_saved_config);
6363 + dump_saved_config = NULL;
6364 + pr_debug("Freed saved config block\n");
6365 + dump_dev = dump_config.dumper->dev = dumper_stage2.dev;
6367 + printk("Unconfigured overlay dumper\n");
6371 +int dump_staged_unconfigure(void)
6374 + struct dump_config_block *saved_config = dump_saved_config;
6375 + struct dump_dev *dev;
6377 + pr_debug("dump_staged_unconfigure\n");
6378 + err = dump_generic_unconfigure();
6380 + /* now check if there is a saved dump waiting to be written out */
6381 + if (saved_config) {
6382 + printk("Processing saved dump pending writeout\n");
6383 + if ((err = dump_switchover_stage())) {
6384 + printk("Error in commiting saved dump at 0x%lx\n",
6385 + (unsigned long)saved_config);
6386 + printk("Old dump may hog memory\n");
6388 + dump_free_mem(saved_config);
6389 + pr_debug("Freed saved config block\n");
6391 + dump_saved_config = NULL;
6393 + dev = &dump_memdev->ddev;
6394 + dev->ops->release(dev);
6396 + printk("Unconfigured second stage dumper\n");
6401 +/* ----- PASSTHRU FILTER ROUTINE --------- */
6403 +/* transparent - passes everything through */
6404 +int dump_passthru_filter(int pass, unsigned long loc, unsigned long sz)
6409 +/* ----- PASSTRU FORMAT ROUTINES ---- */
6412 +int dump_passthru_configure_header(const char *panic_str, const struct pt_regs *regs)
6414 + dump_config.dumper->header_dirty++;
6418 +/* Copies bytes of data from page(s) to the specified buffer */
6419 +int dump_copy_pages(void *buf, struct page *page, unsigned long sz)
6421 + unsigned long len = 0, bytes;
6424 + while (len < sz) {
6425 + addr = kmap_atomic(page, KM_DUMP);
6426 + bytes = (sz > len + PAGE_SIZE) ? PAGE_SIZE : sz - len;
6427 + memcpy(buf, addr, bytes);
6428 + kunmap_atomic(addr, KM_DUMP);
6433 + /* memset(dump_config.dumper->curr_buf, 0x57, len); temporary */
6438 +int dump_passthru_update_header(void)
6440 + long len = dump_config.dumper->header_len;
6441 + struct page *page;
6442 + void *buf = dump_config.dumper->dump_buf;
6445 + if (!dump_config.dumper->header_dirty)
6448 + pr_debug("Copying header of size %ld bytes from memory\n", len);
6449 + if (len > DUMP_BUFFER_SIZE)
6452 + page = dump_mem_lookup(dump_memdev, 0);
6453 + for (; (len > 0) && page; buf += PAGE_SIZE, len -= PAGE_SIZE) {
6454 + if ((err = dump_copy_pages(buf, page, PAGE_SIZE)))
6456 + page = dump_mem_next_page(dump_memdev);
6459 + printk("Incomplete header saved in mem\n");
6463 + if ((err = dump_dev_seek(0))) {
6464 + printk("Unable to seek to dump header offset\n");
6467 + err = dump_ll_write(dump_config.dumper->dump_buf,
6468 + buf - dump_config.dumper->dump_buf);
6469 + if (err < dump_config.dumper->header_len)
6470 + return (err < 0) ? err : -ENOSPC;
6472 + dump_config.dumper->header_dirty = 0;
6476 +static loff_t next_dph_offset = 0;
6478 +static int dph_valid(struct __dump_page *dph)
6480 + if ((dph->dp_address & (PAGE_SIZE - 1)) || (dph->dp_flags
6481 + > DUMP_DH_COMPRESSED) || (!dph->dp_flags) ||
6482 + (dph->dp_size > PAGE_SIZE)) {
6483 + printk("dp->address = 0x%llx, dp->size = 0x%x, dp->flag = 0x%x\n",
6484 + dph->dp_address, dph->dp_size, dph->dp_flags);
6490 +int dump_verify_lcrash_data(void *buf, unsigned long sz)
6492 + struct __dump_page *dph;
6494 + /* sanity check for page headers */
6495 + while (next_dph_offset + sizeof(*dph) < sz) {
6496 + dph = (struct __dump_page *)(buf + next_dph_offset);
6497 + if (!dph_valid(dph)) {
6498 + printk("Invalid page hdr at offset 0x%llx\n",
6502 + next_dph_offset += dph->dp_size + sizeof(*dph);
6505 + next_dph_offset -= sz;
6510 + * TBD/Later: Consider avoiding the copy by using a scatter/gather
6511 + * vector representation for the dump buffer
6513 +int dump_passthru_add_data(unsigned long loc, unsigned long sz)
6515 + struct page *page = (struct page *)loc;
6516 + void *buf = dump_config.dumper->curr_buf;
6519 + if ((err = dump_copy_pages(buf, page, sz))) {
6520 + printk("dump_copy_pages failed");
6524 + if ((err = dump_verify_lcrash_data(buf, sz))) {
6525 + printk("dump_verify_lcrash_data failed\n");
6526 + printk("Invalid data for pfn 0x%lx\n", page_to_pfn(page));
6527 + printk("Page flags 0x%lx\n", page->flags);
6528 + printk("Page count 0x%x\n", page_count(page));
6532 + dump_config.dumper->curr_buf = buf + sz;
6538 +/* Stage 1 dumper: Saves compressed dump in memory and soft-boots system */
6540 +/* Scheme to overlay saved data in memory for writeout after a soft-boot */
6541 +struct dump_scheme_ops dump_scheme_overlay_ops = {
6542 + .configure = dump_overlay_configure,
6543 + .unconfigure = dump_overlay_unconfigure,
6544 + .sequencer = dump_overlay_sequencer,
6545 + .iterator = dump_page_iterator,
6546 + .save_data = dump_overlay_save_data,
6547 + .skip_data = dump_overlay_skip_data,
6548 + .write_buffer = dump_generic_write_buffer
6551 +struct dump_scheme dump_scheme_overlay = {
6552 + .name = "overlay",
6553 + .ops = &dump_scheme_overlay_ops
6557 +/* Stage 1 must use a good compression scheme - default to gzip */
6558 +extern struct __dump_compress dump_gzip_compression;
6560 +struct dumper dumper_stage1 = {
6562 + .scheme = &dump_scheme_overlay,
6563 + .fmt = &dump_fmt_lcrash,
6564 + .compress = &dump_none_compression, /* needs to be gzip */
6565 + .filter = dump_filter_table,
6569 +/* Stage 2 dumper: Activated after softboot to write out saved dump to device */
6571 +/* Formatter that transfers data as is (transparent) w/o further conversion */
6572 +struct dump_fmt_ops dump_fmt_passthru_ops = {
6573 + .configure_header = dump_passthru_configure_header,
6574 + .update_header = dump_passthru_update_header,
6575 + .save_context = NULL, /* unused */
6576 + .add_data = dump_passthru_add_data,
6577 + .update_end_marker = dump_lcrash_update_end_marker
6580 +struct dump_fmt dump_fmt_passthru = {
6581 + .name = "passthru",
6582 + .ops = &dump_fmt_passthru_ops
6585 +/* Filter that simply passes along any data within the range (transparent)*/
6586 +/* Note: The start and end ranges in the table are filled in at run-time */
6588 +extern int dump_filter_none(int pass, unsigned long loc, unsigned long sz);
6590 +struct dump_data_filter dump_passthru_filtertable[MAX_PASSES] = {
6591 +{.name = "passkern", .selector = dump_passthru_filter,
6592 + .level_mask = DUMP_MASK_KERN },
6593 +{.name = "passuser", .selector = dump_passthru_filter,
6594 + .level_mask = DUMP_MASK_USED },
6595 +{.name = "passunused", .selector = dump_passthru_filter,
6596 + .level_mask = DUMP_MASK_UNUSED },
6597 +{.name = "none", .selector = dump_filter_none,
6598 + .level_mask = DUMP_MASK_REST }
6602 +/* Scheme to handle data staged / preserved across a soft-boot */
6603 +struct dump_scheme_ops dump_scheme_staged_ops = {
6604 + .configure = dump_generic_configure,
6605 + .unconfigure = dump_staged_unconfigure,
6606 + .sequencer = dump_generic_sequencer,
6607 + .iterator = dump_saved_data_iterator,
6608 + .save_data = dump_generic_save_data,
6609 + .skip_data = dump_generic_skip_data,
6610 + .write_buffer = dump_generic_write_buffer
6613 +struct dump_scheme dump_scheme_staged = {
6615 + .ops = &dump_scheme_staged_ops
6618 +/* The stage 2 dumper comprising all these */
6619 +struct dumper dumper_stage2 = {
6621 + .scheme = &dump_scheme_staged,
6622 + .fmt = &dump_fmt_passthru,
6623 + .compress = &dump_none_compression,
6624 + .filter = dump_passthru_filtertable,
6628 Index: linux-2.6.10/drivers/dump/dump_memdev.c
6629 ===================================================================
6630 --- linux-2.6.10.orig/drivers/dump/dump_memdev.c 2005-04-05 19:01:49.158500672 +0800
6631 +++ linux-2.6.10/drivers/dump/dump_memdev.c 2005-04-05 16:47:53.947204496 +0800
6634 + * Implements the dump driver interface for saving a dump in available
6635 + * memory areas. The saved pages may be written out to persistent storage
6636 + * after a soft reboot.
6638 + * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
6640 + * Copyright (C) 2002 International Business Machines Corp.
6642 + * This code is released under version 2 of the GNU GPL.
6644 + * The approach of tracking pages containing saved dump using map pages
6645 + * allocated as needed has been derived from the Mission Critical Linux
6646 + * mcore dump implementation.
6648 + * Credits and a big thanks for letting the lkcd project make use of
6649 + * the excellent piece of work and also helping with clarifications
6650 + * and tips along the way are due to:
6651 + * Dave Winchell <winchell@mclx.com> (primary author of mcore)
6652 + * Jeff Moyer <moyer@mclx.com>
6653 + * Josh Huber <huber@mclx.com>
6655 + * For those familiar with the mcore code, the main differences worth
6656 + * noting here (besides the dump device abstraction) result from enabling
6657 + * "high" memory pages (pages not permanently mapped in the kernel
6658 + * address space) to be used for saving dump data (because of which a
6659 + * simple virtual address based linked list cannot be used anymore for
6660 + * managing free pages), an added level of indirection for faster
6661 + * lookups during the post-boot stage, and the idea of pages being
6662 + * made available as they get freed up while dump to memory progresses
6663 + * rather than one time before starting the dump. The last point enables
6664 + * a full memory snapshot to be saved starting with an initial set of
6665 + * bootstrap pages given a good compression ratio. (See dump_overlay.c)
6670 + * -----------------MEMORY LAYOUT ------------------
6671 + * The memory space consists of a set of discontiguous pages, and
6672 + * discontiguous map pages as well, rooted in a chain of indirect
6673 + * map pages (also discontiguous). Except for the indirect maps
6674 + * (which must be preallocated in advance), the rest of the pages
6675 + * could be in high memory.
6678 + * | --------- -------- --------
6679 + * --> | . . +|--->| . +|------->| . . | indirect
6680 + * --|--|--- ---|---- --|-|--- maps
6682 + * ------ ------ ------- ------ -------
6683 + * | . | | . | | . . | | . | | . . | maps
6684 + * --|--- --|--- --|--|-- --|--- ---|-|--
6685 + * page page page page page page page data
6688 + * Writes to the dump device happen sequentially in append mode.
6689 + * The main reason for the existence of the indirect map is
6690 + * to enable a quick way to lookup a specific logical offset in
6691 + * the saved data post-soft-boot, e.g. to writeout pages
6692 + * with more critical data first, even though such pages
6693 + * would have been compressed and copied last, being the lowest
6694 + * ranked candidates for reuse due to their criticality.
6695 + * (See dump_overlay.c)
6697 +#include <linux/mm.h>
6698 +#include <linux/highmem.h>
6699 +#include <linux/bootmem.h>
6700 +#include <linux/dump.h>
6701 +#include "dump_methods.h"
6703 +#define DUMP_MAP_SZ (PAGE_SIZE / sizeof(unsigned long)) /* direct map size */
6704 +#define DUMP_IND_MAP_SZ DUMP_MAP_SZ - 1 /* indirect map size */
6705 +#define DUMP_NR_BOOTSTRAP 64 /* no of bootstrap pages */
6707 +extern int dump_low_page(struct page *);
6709 +/* check if the next entry crosses a page boundary */
6710 +static inline int is_last_map_entry(unsigned long *map)
6712 + unsigned long addr = (unsigned long)(map + 1);
6714 + return (!(addr & (PAGE_SIZE - 1)));
6717 +/* Todo: should have some validation checks */
6718 +/* The last entry in the indirect map points to the next indirect map */
6719 +/* Indirect maps are referred to directly by virtual address */
6720 +static inline unsigned long *next_indirect_map(unsigned long *map)
6722 + return (unsigned long *)map[DUMP_IND_MAP_SZ];
6725 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
6726 +/* Called during early bootup - fixme: make this __init */
6727 +void dump_early_reserve_map(struct dump_memdev *dev)
6729 + unsigned long *map1, *map2;
6730 + loff_t off = 0, last = dev->last_used_offset >> PAGE_SHIFT;
6733 + printk("Reserve bootmap space holding previous dump of %lld pages\n",
6735 + map1= (unsigned long *)dev->indirect_map_root;
6737 + while (map1 && (off < last)) {
6738 +#ifdef CONFIG_X86_64
6739 + reserve_bootmem_node(NODE_DATA(0), virt_to_phys((void *)map1),
6742 + reserve_bootmem(virt_to_phys((void *)map1), PAGE_SIZE);
6744 + for (i=0; (i < DUMP_MAP_SZ - 1) && map1[i] && (off < last);
6745 + i++, off += DUMP_MAP_SZ) {
6746 + pr_debug("indirect map[%d] = 0x%lx\n", i, map1[i]);
6747 + if (map1[i] >= max_low_pfn)
6749 +#ifdef CONFIG_X86_64
6750 + reserve_bootmem_node(NODE_DATA(0),
6751 + map1[i] << PAGE_SHIFT, PAGE_SIZE);
6753 + reserve_bootmem(map1[i] << PAGE_SHIFT, PAGE_SIZE);
6755 + map2 = pfn_to_kaddr(map1[i]);
6756 + for (j = 0 ; (j < DUMP_MAP_SZ) && map2[j] &&
6757 + (off + j < last); j++) {
6758 + pr_debug("\t map[%d][%d] = 0x%lx\n", i, j,
6760 + if (map2[j] < max_low_pfn) {
6761 +#ifdef CONFIG_X86_64
6762 + reserve_bootmem_node(NODE_DATA(0),
6763 + map2[j] << PAGE_SHIFT,
6766 + reserve_bootmem(map2[j] << PAGE_SHIFT,
6772 + map1 = next_indirect_map(map1);
6774 + dev->nr_free = 0; /* these pages don't belong to this boot */
6778 +/* mark dump pages so that they aren't used by this kernel */
6779 +void dump_mark_map(struct dump_memdev *dev)
6781 + unsigned long *map1, *map2;
6782 + loff_t off = 0, last = dev->last_used_offset >> PAGE_SHIFT;
6783 + struct page *page;
6786 + printk("Dump: marking pages in use by previous dump\n");
6787 + map1= (unsigned long *)dev->indirect_map_root;
6789 + while (map1 && (off < last)) {
6790 + page = virt_to_page(map1);
6791 + set_page_count(page, 1);
6792 + for (i=0; (i < DUMP_MAP_SZ - 1) && map1[i] && (off < last);
6793 + i++, off += DUMP_MAP_SZ) {
6794 + pr_debug("indirect map[%d] = 0x%lx\n", i, map1[i]);
6795 + page = pfn_to_page(map1[i]);
6796 + set_page_count(page, 1);
6797 + map2 = kmap_atomic(page, KM_DUMP);
6798 + for (j = 0 ; (j < DUMP_MAP_SZ) && map2[j] &&
6799 + (off + j < last); j++) {
6800 + pr_debug("\t map[%d][%d] = 0x%lx\n", i, j,
6802 + page = pfn_to_page(map2[j]);
6803 + set_page_count(page, 1);
6806 + map1 = next_indirect_map(map1);
6812 + * Given a logical offset into the mem device lookup the
6813 + * corresponding page
6814 + * loc is specified in units of pages
6815 + * Note: affects curr_map (even in the case where lookup fails)
6817 +struct page *dump_mem_lookup(struct dump_memdev *dump_mdev, unsigned long loc)
6819 + unsigned long *map;
6820 + unsigned long i, index = loc / DUMP_MAP_SZ;
6821 + struct page *page = NULL;
6822 + unsigned long curr_pfn, curr_map, *curr_map_ptr = NULL;
6824 + map = (unsigned long *)dump_mdev->indirect_map_root;
6827 + if (loc > dump_mdev->last_offset >> PAGE_SHIFT)
6831 + * first locate the right indirect map
6832 + * in the chain of indirect maps
6834 + for (i = 0; i + DUMP_IND_MAP_SZ < index ; i += DUMP_IND_MAP_SZ) {
6835 + if (!(map = next_indirect_map(map)))
6838 + /* then the right direct map */
6839 + /* map entries are referred to by page index */
6840 + if ((curr_map = map[index - i])) {
6841 + page = pfn_to_page(curr_map);
6842 + /* update the current traversal index */
6843 + /* dump_mdev->curr_map = &map[index - i];*/
6844 + curr_map_ptr = &map[index - i];
6848 + map = kmap_atomic(page, KM_DUMP);
6852 + /* and finally the right entry therein */
6853 + /* data pages are referred to by page index */
6854 + i = index * DUMP_MAP_SZ;
6855 + if ((curr_pfn = map[loc - i])) {
6856 + page = pfn_to_page(curr_pfn);
6857 + dump_mdev->curr_map = curr_map_ptr;
6858 + dump_mdev->curr_map_offset = loc - i;
6859 + dump_mdev->ddev.curr_offset = loc << PAGE_SHIFT;
6863 + kunmap_atomic(map, KM_DUMP);
6869 + * Retrieves a pointer to the next page in the dump device
6870 + * Used during the lookup pass post-soft-reboot
6872 +struct page *dump_mem_next_page(struct dump_memdev *dev)
6875 + unsigned long *map;
6876 + struct page *page = NULL;
6878 + if (dev->ddev.curr_offset + PAGE_SIZE >= dev->last_offset) {
6882 + if ((i = (unsigned long)(++dev->curr_map_offset)) >= DUMP_MAP_SZ) {
6883 + /* move to next map */
6884 + if (is_last_map_entry(++dev->curr_map)) {
6885 + /* move to the next indirect map page */
6886 + printk("dump_mem_next_page: go to next indirect map\n");
6887 + dev->curr_map = (unsigned long *)*dev->curr_map;
6888 + if (!dev->curr_map)
6891 + i = dev->curr_map_offset = 0;
6892 + pr_debug("dump_mem_next_page: next map 0x%lx, entry 0x%lx\n",
6893 + dev->curr_map, *dev->curr_map);
6897 + if (*dev->curr_map) {
6898 + map = kmap_atomic(pfn_to_page(*dev->curr_map), KM_DUMP);
6900 + page = pfn_to_page(map[i]);
6901 + kunmap_atomic(map, KM_DUMP);
6902 + dev->ddev.curr_offset += PAGE_SIZE;
6908 +/* Copied from dump_filters.c */
6909 +static inline int kernel_page(struct page *p)
6911 + /* FIXME: Need to exclude hugetlb pages. Clue: reserved but inuse */
6912 + return (PageReserved(p) && !PageInuse(p)) || (!PageLRU(p) && PageInuse(p));
6915 +static inline int user_page(struct page *p)
6917 + return PageInuse(p) && (!PageReserved(p) && PageLRU(p));
6920 +int dump_reused_by_boot(struct page *page)
6925 + * if < __end + bootmem_bootmap_pages for this boot + allowance
6926 + * if overwritten by initrd (how to check ?)
6927 + * Also, add more checks in early boot code
6928 + * e.g. bootmem bootmap alloc verify not overwriting dump, and if
6929 + * so then realloc or move the dump pages out accordingly.
6932 + /* Temporary proof of concept hack, avoid overwriting kern pages */
6934 + return (kernel_page(page) || dump_low_page(page) || user_page(page));
6938 +/* Uses the free page passed in to expand available space */
6939 +int dump_mem_add_space(struct dump_memdev *dev, struct page *page)
6941 + struct page *map_page;
6942 + unsigned long *map;
6945 + if (!dev->curr_map)
6946 + return -ENOMEM; /* must've exhausted indirect map */
6948 + if (!*dev->curr_map || dev->curr_map_offset >= DUMP_MAP_SZ) {
6949 + /* add map space */
6950 + *dev->curr_map = page_to_pfn(page);
6951 + dev->curr_map_offset = 0;
6955 + /* add data space */
6956 + i = dev->curr_map_offset;
6957 + map_page = pfn_to_page(*dev->curr_map);
6958 + map = (unsigned long *)kmap_atomic(map_page, KM_DUMP);
6959 + map[i] = page_to_pfn(page);
6960 + kunmap_atomic(map, KM_DUMP);
6961 + dev->curr_map_offset = ++i;
6962 + dev->last_offset += PAGE_SIZE;
6963 + if (i >= DUMP_MAP_SZ) {
6964 + /* move to next map */
6965 + if (is_last_map_entry(++dev->curr_map)) {
6966 + /* move to the next indirect map page */
6967 + pr_debug("dump_mem_add_space: using next"
6968 + "indirect map\n");
6969 + dev->curr_map = (unsigned long *)*dev->curr_map;
6976 +/* Caution: making a dest page invalidates existing contents of the page */
6977 +int dump_check_and_free_page(struct dump_memdev *dev, struct page *page)
6982 + * the page can be used as a destination only if we are sure
6983 + * it won't get overwritten by the soft-boot, and is not
6984 + * critical for us right now.
6986 + if (dump_reused_by_boot(page))
6989 + if ((err = dump_mem_add_space(dev, page))) {
6990 + printk("Warning: Unable to extend memdev space. Err %d\n",
7000 +/* Set up the initial maps and bootstrap space */
7001 +/* Must be called only after any previous dump is written out */
7002 +int dump_mem_open(struct dump_dev *dev, unsigned long devid)
7004 + struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
7005 + unsigned long nr_maps, *map, *prev_map = &dump_mdev->indirect_map_root;
7007 + struct page *page;
7008 + unsigned long i = 0;
7011 + /* Todo: sanity check for unwritten previous dump */
7013 + /* allocate pages for indirect map (non highmem area) */
7014 + nr_maps = num_physpages / DUMP_MAP_SZ; /* maps to cover entire mem */
7015 + for (i = 0; i < nr_maps; i += DUMP_IND_MAP_SZ) {
7016 + if (!(map = (unsigned long *)dump_alloc_mem(PAGE_SIZE))) {
7017 + printk("Unable to alloc indirect map %ld\n",
7018 + i / DUMP_IND_MAP_SZ);
7022 + *prev_map = (unsigned long)map;
7023 + prev_map = &map[DUMP_IND_MAP_SZ];
7026 + dump_mdev->curr_map = (unsigned long *)dump_mdev->indirect_map_root;
7027 + dump_mdev->curr_map_offset = 0;
7030 + * allocate a few bootstrap pages: at least 1 map and 1 data page
7031 + * plus enough to save the dump header
7035 + if (!(addr = dump_alloc_mem(PAGE_SIZE))) {
7036 + printk("Unable to alloc bootstrap page %ld\n", i);
7040 + page = virt_to_page(addr);
7041 + if (dump_low_page(page)) {
7042 + dump_free_mem(addr);
7046 + if (dump_mem_add_space(dump_mdev, page)) {
7047 + printk("Warning: Unable to extend memdev "
7048 + "space. Err %d\n", err);
7049 + dump_free_mem(addr);
7053 + } while (i < DUMP_NR_BOOTSTRAP);
7055 + printk("dump memdev init: %ld maps, %ld bootstrap pgs, %ld free pgs\n",
7056 + nr_maps, i, dump_mdev->last_offset >> PAGE_SHIFT);
7058 + dump_mdev->last_bs_offset = dump_mdev->last_offset;
7063 +/* Releases all pre-alloc'd pages */
7064 +int dump_mem_release(struct dump_dev *dev)
7066 + struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
7067 + struct page *page, *map_page;
7068 + unsigned long *map, *prev_map;
7072 + if (!dump_mdev->nr_free)
7075 + pr_debug("dump_mem_release\n");
7076 + page = dump_mem_lookup(dump_mdev, 0);
7077 + for (i = 0; page && (i < DUMP_NR_BOOTSTRAP - 1); i++) {
7078 + if (PageHighMem(page))
7080 + addr = page_address(page);
7082 + printk("page_address(%p) = NULL\n", page);
7085 + pr_debug("Freeing page at 0x%lx\n", addr);
7086 + dump_free_mem(addr);
7087 + if (dump_mdev->curr_map_offset >= DUMP_MAP_SZ - 1) {
7088 + map_page = pfn_to_page(*dump_mdev->curr_map);
7089 + if (PageHighMem(map_page))
7091 + page = dump_mem_next_page(dump_mdev);
7092 + addr = page_address(map_page);
7094 + printk("page_address(%p) = NULL\n",
7098 + pr_debug("Freeing map page at 0x%lx\n", addr);
7099 + dump_free_mem(addr);
7102 + page = dump_mem_next_page(dump_mdev);
7106 + /* now for the last used bootstrap page used as a map page */
7107 + if ((i < DUMP_NR_BOOTSTRAP) && (*dump_mdev->curr_map)) {
7108 + map_page = pfn_to_page(*dump_mdev->curr_map);
7109 + if ((map_page) && !PageHighMem(map_page)) {
7110 + addr = page_address(map_page);
7112 + printk("page_address(%p) = NULL\n", map_page);
7114 + pr_debug("Freeing map page at 0x%lx\n", addr);
7115 + dump_free_mem(addr);
7121 + printk("Freed %d bootstrap pages\n", i);
7123 + /* free the indirect maps */
7124 + map = (unsigned long *)dump_mdev->indirect_map_root;
7129 + map = next_indirect_map(map);
7130 + dump_free_mem(prev_map);
7134 + printk("Freed %d indirect map(s)\n", i);
7136 + /* Reset the indirect map */
7137 + dump_mdev->indirect_map_root = 0;
7138 + dump_mdev->curr_map = 0;
7140 + /* Reset the free list */
7141 + dump_mdev->nr_free = 0;
7143 + dump_mdev->last_offset = dump_mdev->ddev.curr_offset = 0;
7144 + dump_mdev->last_used_offset = 0;
7145 + dump_mdev->curr_map = NULL;
7146 + dump_mdev->curr_map_offset = 0;
7152 + * It is critical for this to be very strict. Cannot afford
7153 + * to have anything running and accessing memory while we overwrite
7154 + * memory (potential risk of data corruption).
7155 + * If in doubt (e.g if a cpu is hung and not responding) just give
7156 + * up and refuse to proceed with this scheme.
7158 + * Note: I/O will only happen after soft-boot/switchover, so we can
7159 + * safely disable interrupts and force stop other CPUs if this is
7160 + * going to be a disruptive dump, no matter what they
7161 + * are in the middle of.
7164 + * ATM Most of this is already taken care of in the nmi handler
7165 + * We may halt the cpus rightaway if we know this is going to be disruptive
7166 + * For now, since we've limited ourselves to overwriting free pages we
7167 + * aren't doing much here. Eventually, we'd have to wait to make sure other
7168 + * cpus aren't using memory we could be overwriting
7170 +int dump_mem_silence(struct dump_dev *dev)
7172 + struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
7174 + if (dump_mdev->last_offset > dump_mdev->last_bs_offset) {
7175 + /* prefer to run lkcd config & start with a clean slate */
7181 +extern int dump_overlay_resume(void);
7183 +/* Trigger the next stage of dumping */
7184 +int dump_mem_resume(struct dump_dev *dev)
7186 + dump_overlay_resume();
7191 + * Allocate mem dev pages as required and copy buffer contents into it.
7192 + * Fails if the no free pages are available
7193 + * Keeping it simple and limited for starters (can modify this over time)
7194 + * Does not handle holes or a sparse layout
7195 + * Data must be in multiples of PAGE_SIZE
7197 +int dump_mem_write(struct dump_dev *dev, void *buf, unsigned long len)
7199 + struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
7200 + struct page *page;
7201 + unsigned long n = 0;
7203 + unsigned long *saved_curr_map, saved_map_offset;
7206 + pr_debug("dump_mem_write: offset 0x%llx, size %ld\n",
7207 + dev->curr_offset, len);
7209 + if (dev->curr_offset + len > dump_mdev->last_offset) {
7210 + printk("Out of space to write\n");
7214 + if ((len & (PAGE_SIZE - 1)) || (dev->curr_offset & (PAGE_SIZE - 1)))
7215 + return -EINVAL; /* not aligned in units of page size */
7217 + saved_curr_map = dump_mdev->curr_map;
7218 + saved_map_offset = dump_mdev->curr_map_offset;
7219 + page = dump_mem_lookup(dump_mdev, dev->curr_offset >> PAGE_SHIFT);
7221 + for (n = len; (n > 0) && page; n -= PAGE_SIZE, buf += PAGE_SIZE ) {
7222 + addr = kmap_atomic(page, KM_DUMP);
7223 + /* memset(addr, 'x', PAGE_SIZE); */
7224 + memcpy(addr, buf, PAGE_SIZE);
7225 + kunmap_atomic(addr, KM_DUMP);
7226 + /* dev->curr_offset += PAGE_SIZE; */
7227 + page = dump_mem_next_page(dump_mdev);
7230 + dump_mdev->curr_map = saved_curr_map;
7231 + dump_mdev->curr_map_offset = saved_map_offset;
7233 + if (dump_mdev->last_used_offset < dev->curr_offset)
7234 + dump_mdev->last_used_offset = dev->curr_offset;
7236 + return (len - n) ? (len - n) : ret ;
7239 +/* dummy - always ready */
7240 +int dump_mem_ready(struct dump_dev *dev, void *buf)
7246 + * Should check for availability of space to write upto the offset
7247 + * affects only the curr_offset; last_offset untouched
7248 + * Keep it simple: Only allow multiples of PAGE_SIZE for now
7250 +int dump_mem_seek(struct dump_dev *dev, loff_t offset)
7252 + struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
7254 + if (offset & (PAGE_SIZE - 1))
7255 + return -EINVAL; /* allow page size units only for now */
7257 + /* Are we exceeding available space ? */
7258 + if (offset > dump_mdev->last_offset) {
7259 + printk("dump_mem_seek failed for offset 0x%llx\n",
7264 + dump_mdev->ddev.curr_offset = offset;
7268 +struct dump_dev_ops dump_memdev_ops = {
7269 + .open = dump_mem_open,
7270 + .release = dump_mem_release,
7271 + .silence = dump_mem_silence,
7272 + .resume = dump_mem_resume,
7273 + .seek = dump_mem_seek,
7274 + .write = dump_mem_write,
7275 + .read = NULL, /* not implemented at the moment */
7276 + .ready = dump_mem_ready
7279 +static struct dump_memdev default_dump_memdev = {
7280 + .ddev = {.type_name = "memdev", .ops = &dump_memdev_ops,
7281 + .device_id = 0x14}
7282 + /* assume the rest of the fields are zeroed by default */
7285 +/* may be overwritten if a previous dump exists */
7286 +struct dump_memdev *dump_memdev = &default_dump_memdev;
7288 Index: linux-2.6.10/drivers/dump/dump_blockdev.c
7289 ===================================================================
7290 --- linux-2.6.10.orig/drivers/dump/dump_blockdev.c 2005-04-05 19:01:49.158500672 +0800
7291 +++ linux-2.6.10/drivers/dump/dump_blockdev.c 2005-04-05 16:47:53.945204800 +0800
7294 + * Implements the dump driver interface for saving a dump to
7295 + * a block device through the kernel's generic low level block i/o
7298 + * Started: June 2002 - Mohamed Abbas <mohamed.abbas@intel.com>
7299 + * Moved original lkcd kiobuf dump i/o code from dump_base.c
7300 + * to use generic dump device interfaces
7302 + * Sept 2002 - Bharata B. Rao <bharata@in.ibm.com>
7303 + * Convert dump i/o to directly use bio instead of kiobuf for 2.5
7305 + * Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
7306 + * Rework to new dumpdev.h structures, implement open/close/
7307 + * silence, misc fixes (blocknr removal, bio_add_page usage)
7309 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
7310 + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
7311 + * Copyright (C) 2002 International Business Machines Corp.
7313 + * This code is released under version 2 of the GNU GPL.
7316 +#include <linux/types.h>
7317 +#include <linux/proc_fs.h>
7318 +#include <linux/module.h>
7319 +#include <linux/init.h>
7320 +#include <linux/blkdev.h>
7321 +#include <linux/bio.h>
7322 +#include <asm/hardirq.h>
7323 +#include <linux/dump.h>
7324 +#include "dump_methods.h"
7326 +extern void *dump_page_buf;
7328 +/* The end_io callback for dump i/o completion */
7330 +dump_bio_end_io(struct bio *bio, unsigned int bytes_done, int error)
7332 + struct dump_blockdev *dump_bdev;
7334 + if (bio->bi_size) {
7335 + /* some bytes still left to transfer */
7336 + return 1; /* not complete */
7339 + dump_bdev = (struct dump_blockdev *)bio->bi_private;
7341 + printk("IO error while writing the dump, aborting\n");
7344 + dump_bdev->err = error;
7346 + /* no wakeup needed, since caller polls for completion */
7350 +/* Check if the dump bio is already mapped to the specified buffer */
7352 +dump_block_map_valid(struct dump_blockdev *dev, struct page *page,
7355 + struct bio *bio = dev->bio;
7356 + unsigned long bsize = 0;
7358 + if (!bio->bi_vcnt)
7359 + return 0; /* first time, not mapped */
7362 + if ((bio_page(bio) != page) || (len > bio->bi_vcnt << PAGE_SHIFT))
7363 + return 0; /* buffer not mapped */
7365 + bsize = bdev_hardsect_size(bio->bi_bdev);
7366 + if ((len & (PAGE_SIZE - 1)) || (len & bsize))
7367 + return 0; /* alignment checks needed */
7369 + /* quick check to decide if we need to redo bio_add_page */
7370 + if (bdev_get_queue(bio->bi_bdev)->merge_bvec_fn)
7371 + return 0; /* device may have other restrictions */
7373 + return 1; /* already mapped */
7377 + * Set up the dump bio for i/o from the specified buffer
7378 + * Return value indicates whether the full buffer could be mapped or not
7381 +dump_block_map(struct dump_blockdev *dev, void *buf, int len)
7383 + struct page *page = virt_to_page(buf);
7384 + struct bio *bio = dev->bio;
7385 + unsigned long bsize = 0;
7387 + bio->bi_bdev = dev->bdev;
7388 + bio->bi_sector = (dev->start_offset + dev->ddev.curr_offset) >> 9;
7389 + bio->bi_idx = 0; /* reset index to the beginning */
7391 + if (dump_block_map_valid(dev, page, len)) {
7392 + /* already mapped and usable rightaway */
7393 + bio->bi_size = len; /* reset size to the whole bio */
7394 + bio->bi_vcnt = (len + PAGE_SIZE - 1) / PAGE_SIZE; /* Set the proper vector cnt */
7396 + /* need to map the bio */
7399 + bsize = bdev_hardsect_size(bio->bi_bdev);
7401 + /* first a few sanity checks */
7402 + if (len < bsize) {
7403 + printk("map: len less than hardsect size \n");
7407 + if ((unsigned long)buf & bsize) {
7408 + printk("map: not aligned \n");
7412 + /* assume contig. page aligned low mem buffer( no vmalloc) */
7413 + if ((page_address(page) != buf) || (len & (PAGE_SIZE - 1))) {
7414 + printk("map: invalid buffer alignment!\n");
7417 + /* finally we can go ahead and map it */
7418 + while (bio->bi_size < len)
7419 + if (bio_add_page(bio, page++, PAGE_SIZE, 0) == 0) {
7423 + bio->bi_end_io = dump_bio_end_io;
7424 + bio->bi_private = dev;
7427 + if (bio->bi_size != len) {
7428 + printk("map: bio size = %d not enough for len = %d!\n",
7429 + bio->bi_size, len);
7436 +dump_free_bio(struct bio *bio)
7439 + kfree(bio->bi_io_vec);
7444 + * Prepares the dump device so we can take a dump later.
7445 + * The caller is expected to have filled up the dev_id field in the
7446 + * block dump dev structure.
7448 + * At dump time when dump_block_write() is invoked it will be too
7449 + * late to recover, so as far as possible make sure obvious errors
7450 + * get caught right here and reported back to the caller.
7453 +dump_block_open(struct dump_dev *dev, unsigned long arg)
7455 + struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
7456 + struct block_device *bdev;
7458 + struct bio_vec *bvec;
7460 + /* make sure this is a valid block device */
7466 + /* Convert it to the new dev_t format */
7467 + arg = MKDEV((arg >> OLDMINORBITS), (arg & OLDMINORMASK));
7469 + /* get a corresponding block_dev struct for this */
7470 + bdev = bdget((dev_t)arg);
7476 + /* get the block device opened */
7477 + if ((retval = blkdev_get(bdev, O_RDWR | O_LARGEFILE, 0))) {
7481 + if ((dump_bdev->bio = kmalloc(sizeof(struct bio), GFP_KERNEL))
7483 + printk("Cannot allocate bio\n");
7488 + bio_init(dump_bdev->bio);
7490 + if ((bvec = kmalloc(sizeof(struct bio_vec) *
7491 + (DUMP_BUFFER_SIZE >> PAGE_SHIFT), GFP_KERNEL)) == NULL) {
7496 + /* assign the new dump dev structure */
7497 + dump_bdev->dev_id = (dev_t)arg;
7498 + dump_bdev->bdev = bdev;
7500 + /* make a note of the limit */
7501 + dump_bdev->limit = bdev->bd_inode->i_size;
7503 + /* now make sure we can map the dump buffer */
7504 + dump_bdev->bio->bi_io_vec = bvec;
7505 + dump_bdev->bio->bi_max_vecs = DUMP_BUFFER_SIZE >> PAGE_SHIFT;
7507 + retval = dump_block_map(dump_bdev, dump_config.dumper->dump_buf,
7508 + DUMP_BUFFER_SIZE);
7511 + printk("open: dump_block_map failed, ret %d\n", retval);
7515 + printk("Block device (%d,%d) successfully configured for dumping\n",
7516 + MAJOR(dump_bdev->dev_id),
7517 + MINOR(dump_bdev->dev_id));
7520 + /* after opening the block device, return */
7523 +err3: dump_free_bio(dump_bdev->bio);
7524 + dump_bdev->bio = NULL;
7525 +err2: if (bdev) blkdev_put(bdev);
7527 +err1: if (bdev) bdput(bdev);
7528 + dump_bdev->bdev = NULL;
7529 +err: return retval;
7533 + * Close the dump device and release associated resources
7534 + * Invoked when unconfiguring the dump device.
7537 +dump_block_release(struct dump_dev *dev)
7539 + struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
7541 + /* release earlier bdev if present */
7542 + if (dump_bdev->bdev) {
7543 + blkdev_put(dump_bdev->bdev);
7544 + dump_bdev->bdev = NULL;
7547 + dump_free_bio(dump_bdev->bio);
7548 + dump_bdev->bio = NULL;
7555 + * Prepare the dump device for use (silence any ongoing activity
7556 + * and quiesce state) when the system crashes.
7559 +dump_block_silence(struct dump_dev *dev)
7561 + struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
7562 + struct request_queue *q = bdev_get_queue(dump_bdev->bdev);
7565 + /* If we can't get request queue lock, refuse to take the dump */
7566 + if (!spin_trylock(q->queue_lock))
7569 + ret = elv_queue_empty(q);
7570 + spin_unlock(q->queue_lock);
7572 + /* For now we assume we have the device to ourselves */
7573 + /* Just a quick sanity check */
7575 + /* Warn the user and move on */
7576 + printk(KERN_ALERT "Warning: Non-empty request queue\n");
7577 + printk(KERN_ALERT "I/O requests in flight at dump time\n");
7581 + * Move to a softer level of silencing where no spin_lock_irqs
7582 + * are held on other cpus
7584 + dump_silence_level = DUMP_SOFT_SPIN_CPUS;
7586 + ret = __dump_irq_enable();
7591 + printk("Dumping to block device (%d,%d) on CPU %d ...\n",
7592 + MAJOR(dump_bdev->dev_id), MINOR(dump_bdev->dev_id),
7593 + smp_processor_id());
7599 + * Invoked when dumping is done. This is the time to put things back
7600 + * (i.e. undo the effects of dump_block_silence) so the device is
7601 + * available for normal use.
7604 +dump_block_resume(struct dump_dev *dev)
7606 + __dump_irq_restore();
7612 + * Seek to the specified offset in the dump device.
7613 + * Makes sure this is a valid offset, otherwise returns an error.
7616 +dump_block_seek(struct dump_dev *dev, loff_t off)
7618 + struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
7619 + loff_t offset = off + dump_bdev->start_offset;
7621 + if (offset & ( PAGE_SIZE - 1)) {
7622 + printk("seek: non-page aligned\n");
7626 + if (offset & (bdev_hardsect_size(dump_bdev->bdev) - 1)) {
7627 + printk("seek: not sector aligned \n");
7631 + if (offset > dump_bdev->limit) {
7632 + printk("seek: not enough space left on device!\n");
7635 + dev->curr_offset = off;
7640 + * Write out a buffer after checking the device limitations,
7641 + * sector sizes, etc. Assumes the buffer is in directly mapped
7642 + * kernel address space (not vmalloc'ed).
7644 + * Returns: number of bytes written or -ERRNO.
7647 +dump_block_write(struct dump_dev *dev, void *buf,
7648 + unsigned long len)
7650 + struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
7651 + loff_t offset = dev->curr_offset + dump_bdev->start_offset;
7652 + int retval = -ENOSPC;
7654 + if (offset >= dump_bdev->limit) {
7655 + printk("write: not enough space left on device!\n");
7659 + /* don't write more blocks than our max limit */
7660 + if (offset + len > dump_bdev->limit)
7661 + len = dump_bdev->limit - offset;
7664 + retval = dump_block_map(dump_bdev, buf, len);
7666 + printk("write: dump_block_map failed! err %d\n", retval);
7671 + * Write out the data to disk.
7672 + * Assumes the entire buffer mapped to a single bio, which we can
7673 + * submit and wait for io completion. In the future, may consider
7674 + * increasing the dump buffer size and submitting multiple bio s
7675 + * for better throughput.
7677 + dump_bdev->err = -EAGAIN;
7678 + submit_bio(WRITE, dump_bdev->bio);
7680 + dump_bdev->ddev.curr_offset += len;
7687 + * Name: dump_block_ready()
7688 + * Func: check if the last dump i/o is over and ready for next request
7691 +dump_block_ready(struct dump_dev *dev, void *buf)
7693 + struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
7694 + request_queue_t *q = bdev_get_queue(dump_bdev->bio->bi_bdev);
7696 + /* check for io completion */
7697 + if (dump_bdev->err == -EAGAIN) {
7702 + if (dump_bdev->err) {
7703 + printk("dump i/o err\n");
7704 + return dump_bdev->err;
7711 +struct dump_dev_ops dump_blockdev_ops = {
7712 + .open = dump_block_open,
7713 + .release = dump_block_release,
7714 + .silence = dump_block_silence,
7715 + .resume = dump_block_resume,
7716 + .seek = dump_block_seek,
7717 + .write = dump_block_write,
7718 + /* .read not implemented */
7719 + .ready = dump_block_ready
7722 +static struct dump_blockdev default_dump_blockdev = {
7723 + .ddev = {.type_name = "blockdev", .ops = &dump_blockdev_ops,
7724 + .curr_offset = 0},
7726 + * leave enough room for the longest swap header possibly written
7727 + * written by mkswap (likely the largest page size supported by
7730 + .start_offset = DUMP_HEADER_OFFSET,
7732 + /* assume the rest of the fields are zeroed by default */
7735 +struct dump_blockdev *dump_blockdev = &default_dump_blockdev;
7738 +dump_blockdev_init(void)
7740 + if (dump_register_device(&dump_blockdev->ddev) < 0) {
7741 + printk("block device driver registration failed\n");
7745 + printk("block device driver for LKCD registered\n");
7750 +dump_blockdev_cleanup(void)
7752 + dump_unregister_device(&dump_blockdev->ddev);
7753 + printk("block device driver for LKCD unregistered\n");
7756 +MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
7757 +MODULE_DESCRIPTION("Block Dump Driver for Linux Kernel Crash Dump (LKCD)");
7758 +MODULE_LICENSE("GPL");
7760 +module_init(dump_blockdev_init);
7761 +module_exit(dump_blockdev_cleanup);
7762 Index: linux-2.6.10/drivers/dump/dump_fmt.c
7763 ===================================================================
7764 --- linux-2.6.10.orig/drivers/dump/dump_fmt.c 2005-04-05 19:01:49.158500672 +0800
7765 +++ linux-2.6.10/drivers/dump/dump_fmt.c 2005-04-05 16:47:53.941205408 +0800
7768 + * Implements the routines which handle the format specific
7769 + * aspects of dump for the default dump format.
7771 + * Used in single stage dumping and stage 1 of soft-boot based dumping
7772 + * Saves data in LKCD (lcrash) format
7774 + * Previously a part of dump_base.c
7776 + * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
7777 + * Split off and reshuffled LKCD dump format code around generic
7778 + * dump method interfaces.
7780 + * Derived from original code created by
7781 + * Matt Robinson <yakker@sourceforge.net>)
7783 + * Contributions from SGI, IBM, HP, MCL, and others.
7785 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
7786 + * Copyright (C) 2000 - 2002 TurboLinux, Inc. All rights reserved.
7787 + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
7788 + * Copyright (C) 2002 International Business Machines Corp.
7790 + * This code is released under version 2 of the GNU GPL.
7793 +#include <linux/types.h>
7794 +#include <linux/kernel.h>
7795 +#include <linux/time.h>
7796 +#include <linux/sched.h>
7797 +#include <linux/ptrace.h>
7798 +#include <linux/utsname.h>
7799 +#include <linux/dump.h>
7800 +#include <asm/dump.h>
7801 +#include "dump_methods.h"
7804 + * SYSTEM DUMP LAYOUT
7806 + * System dumps are currently the combination of a dump header and a set
7807 + * of data pages which contain the system memory. The layout of the dump
7808 + * (for full dumps) is as follows:
7810 + * +-----------------------------+
7811 + * | generic dump header |
7812 + * +-----------------------------+
7813 + * | architecture dump header |
7814 + * +-----------------------------+
7816 + * +-----------------------------+
7818 + * +-----------------------------+
7820 + * +-----------------------------+
7822 + * +-----------------------------+
7828 + * +-----------------------------+
7829 + * | PAGE_END header |
7830 + * +-----------------------------+
7832 + * There are two dump headers, the first which is architecture
7833 + * independent, and the other which is architecture dependent. This
7834 + * allows different architectures to dump different data structures
7835 + * which are specific to their chipset, CPU, etc.
7837 + * After the dump headers come a succession of dump page headers along
7838 + * with dump pages. The page header contains information about the page
7839 + * size, any flags associated with the page (whether it's compressed or
7840 + * not), and the address of the page. After the page header is the page
7841 + * data, which is either compressed (or not). Each page of data is
7842 + * dumped in succession, until the final dump header (PAGE_END) is
7843 + * placed at the end of the dump, assuming the dump device isn't out
7846 + * This mechanism allows for multiple compression types, different
7847 + * types of data structures, different page ordering, etc., etc., etc.
7848 + * It's a very straightforward mechanism for dumping system memory.
7851 +struct __dump_header dump_header; /* the primary dump header */
7852 +struct __dump_header_asm dump_header_asm; /* the arch-specific dump header */
7854 +/* Replace a runtime sanity check on the DUMP_BUFFER_SIZE with a
7855 + * compile-time check. The compile_time_assertions routine will not
7856 + * compile if the assertion is false.
7858 + * If you fail this assert you are most likely on a large machine and
7859 + * should use a special 6.0.0 version of LKCD or a version > 7.0.0. See
7860 + * the LKCD website for more information.
7863 +#define COMPILE_TIME_ASSERT(const_expr) \
7864 + switch(0){case 0: case (const_expr):;}
7866 +static inline void compile_time_assertions(void)
7868 + COMPILE_TIME_ASSERT((sizeof(struct __dump_header) +
7869 + sizeof(struct __dump_header_asm)) <= DUMP_BUFFER_SIZE);
7873 + * Set up common header fields (mainly the arch indep section)
7874 + * Per-cpu state is handled by lcrash_save_context
7875 + * Returns the size of the header in bytes.
7877 +static int lcrash_init_dump_header(const char *panic_str)
7879 + struct timeval dh_time;
7880 + u64 temp_memsz = dump_header.dh_memory_size;
7882 + /* initialize the dump headers to zero */
7883 + /* save dha_stack pointer because it may contains pointer for stack! */
7884 + memset(&dump_header, 0, sizeof(dump_header));
7885 + memset(&dump_header_asm, 0,
7886 + offsetof(struct __dump_header_asm, dha_stack));
7887 + memset(&dump_header_asm.dha_stack+1, 0,
7888 + sizeof(dump_header_asm) -
7889 + offsetof(struct __dump_header_asm, dha_stack) -
7890 + sizeof(dump_header_asm.dha_stack));
7891 + dump_header.dh_memory_size = temp_memsz;
7893 + /* configure dump header values */
7894 + dump_header.dh_magic_number = DUMP_MAGIC_NUMBER;
7895 + dump_header.dh_version = DUMP_VERSION_NUMBER;
7896 + dump_header.dh_memory_start = PAGE_OFFSET;
7897 + dump_header.dh_memory_end = DUMP_MAGIC_NUMBER;
7898 + dump_header.dh_header_size = sizeof(struct __dump_header);
7899 + dump_header.dh_page_size = PAGE_SIZE;
7900 + dump_header.dh_dump_level = dump_config.level;
7901 + dump_header.dh_current_task = (unsigned long) current;
7902 + dump_header.dh_dump_compress = dump_config.dumper->compress->
7904 + dump_header.dh_dump_flags = dump_config.flags;
7905 + dump_header.dh_dump_device = dump_config.dumper->dev->device_id;
7907 +#if DUMP_DEBUG >= 6
7908 + dump_header.dh_num_bytes = 0;
7910 + dump_header.dh_num_dump_pages = 0;
7911 + do_gettimeofday(&dh_time);
7912 + dump_header.dh_time.tv_sec = dh_time.tv_sec;
7913 + dump_header.dh_time.tv_usec = dh_time.tv_usec;
7915 + memcpy((void *)&(dump_header.dh_utsname_sysname),
7916 + (const void *)&(system_utsname.sysname), __NEW_UTS_LEN + 1);
7917 + memcpy((void *)&(dump_header.dh_utsname_nodename),
7918 + (const void *)&(system_utsname.nodename), __NEW_UTS_LEN + 1);
7919 + memcpy((void *)&(dump_header.dh_utsname_release),
7920 + (const void *)&(system_utsname.release), __NEW_UTS_LEN + 1);
7921 + memcpy((void *)&(dump_header.dh_utsname_version),
7922 + (const void *)&(system_utsname.version), __NEW_UTS_LEN + 1);
7923 + memcpy((void *)&(dump_header.dh_utsname_machine),
7924 + (const void *)&(system_utsname.machine), __NEW_UTS_LEN + 1);
7925 + memcpy((void *)&(dump_header.dh_utsname_domainname),
7926 + (const void *)&(system_utsname.domainname), __NEW_UTS_LEN + 1);
7929 + memcpy((void *)&(dump_header.dh_panic_string),
7930 + (const void *)panic_str, DUMP_PANIC_LEN);
7933 + dump_header_asm.dha_magic_number = DUMP_ASM_MAGIC_NUMBER;
7934 + dump_header_asm.dha_version = DUMP_ASM_VERSION_NUMBER;
7935 + dump_header_asm.dha_header_size = sizeof(dump_header_asm);
7937 + dump_header_asm.dha_physaddr_start = PHYS_OFFSET;
7940 + dump_header_asm.dha_smp_num_cpus = num_online_cpus();
7941 + pr_debug("smp_num_cpus in header %d\n",
7942 + dump_header_asm.dha_smp_num_cpus);
7944 + dump_header_asm.dha_dumping_cpu = smp_processor_id();
7946 + return sizeof(dump_header) + sizeof(dump_header_asm);
7950 +int dump_lcrash_configure_header(const char *panic_str,
7951 + const struct pt_regs *regs)
7955 + dump_config.dumper->header_len = lcrash_init_dump_header(panic_str);
7957 + /* capture register states for all processors */
7958 + dump_save_this_cpu(regs);
7959 + __dump_save_other_cpus(); /* side effect:silence cpus */
7961 + /* configure architecture-specific dump header values */
7962 + if ((retval = __dump_configure_header(regs)))
7965 + dump_config.dumper->header_dirty++;
7968 +/* save register and task context */
7969 +void dump_lcrash_save_context(int cpu, const struct pt_regs *regs,
7970 + struct task_struct *tsk)
7972 + /* This level of abstraction might be redundantly redundant */
7973 + __dump_save_context(cpu, regs, tsk);
7976 +/* write out the header */
7977 +int dump_write_header(void)
7979 + int retval = 0, size;
7980 + void *buf = dump_config.dumper->dump_buf;
7982 + /* accounts for DUMP_HEADER_OFFSET if applicable */
7983 + if ((retval = dump_dev_seek(0))) {
7984 + printk("Unable to seek to dump header offset: %d\n",
7989 + memcpy(buf, (void *)&dump_header, sizeof(dump_header));
7990 + size = sizeof(dump_header);
7991 + memcpy(buf + size, (void *)&dump_header_asm, sizeof(dump_header_asm));
7992 + size += sizeof(dump_header_asm);
7993 + size = PAGE_ALIGN(size);
7994 + retval = dump_ll_write(buf , size);
7996 + if (retval < size)
7997 + return (retval >= 0) ? ENOSPC : retval;
8001 +int dump_generic_update_header(void)
8005 + if (dump_config.dumper->header_dirty) {
8006 + if ((err = dump_write_header())) {
8007 + printk("dump write header failed !err %d\n", err);
8009 + dump_config.dumper->header_dirty = 0;
8016 +static inline int is_curr_stack_page(struct page *page, unsigned long size)
8018 + unsigned long thread_addr = (unsigned long)current_thread_info();
8019 + unsigned long addr = (unsigned long)page_address(page);
8021 + return !PageHighMem(page) && (addr < thread_addr + THREAD_SIZE)
8022 + && (addr + size > thread_addr);
8025 +static inline int is_dump_page(struct page *page, unsigned long size)
8027 + unsigned long addr = (unsigned long)page_address(page);
8028 + unsigned long dump_buf = (unsigned long)dump_config.dumper->dump_buf;
8030 + return !PageHighMem(page) && (addr < dump_buf + DUMP_BUFFER_SIZE)
8031 + && (addr + size > dump_buf);
8034 +int dump_allow_compress(struct page *page, unsigned long size)
8037 + * Don't compress the page if any part of it overlaps
8038 + * with the current stack or dump buffer (since the contents
8039 + * in these could be changing while compression is going on)
8041 + return !is_curr_stack_page(page, size) && !is_dump_page(page, size);
8044 +void lcrash_init_pageheader(struct __dump_page *dp, struct page *page,
8047 + memset(dp, sizeof(struct __dump_page), 0);
8051 + dp->dp_address = (loff_t)page_to_pfn(page) << PAGE_SHIFT;
8054 + dp->dp_page_index = dump_header.dh_num_dump_pages;
8055 + dp->dp_byte_offset = dump_header.dh_num_bytes + DUMP_BUFFER_SIZE
8056 + + DUMP_HEADER_OFFSET; /* ?? */
8057 +#endif /* DUMP_DEBUG */
8060 +int dump_lcrash_add_data(unsigned long loc, unsigned long len)
8062 + struct page *page = (struct page *)loc;
8063 + void *addr, *buf = dump_config.dumper->curr_buf;
8064 + struct __dump_page *dp = (struct __dump_page *)buf;
8067 + if (buf > dump_config.dumper->dump_buf + DUMP_BUFFER_SIZE)
8070 + lcrash_init_pageheader(dp, page, len);
8071 + buf += sizeof(struct __dump_page);
8074 + addr = kmap_atomic(page, KM_DUMP);
8075 + size = bytes = (len > PAGE_SIZE) ? PAGE_SIZE : len;
8076 + /* check for compression */
8077 + if (dump_allow_compress(page, bytes)) {
8078 + size = dump_compress_data((char *)addr, bytes,
8079 + (char *)buf, loc);
8081 + /* set the compressed flag if the page did compress */
8082 + if (size && (size < bytes)) {
8083 + dp->dp_flags |= DUMP_DH_COMPRESSED;
8085 + /* compression failed -- default to raw mode */
8086 + dp->dp_flags |= DUMP_DH_RAW;
8087 + memcpy(buf, addr, bytes);
8090 + /* memset(buf, 'A', size); temporary: testing only !! */
8091 + kunmap_atomic(addr, KM_DUMP);
8092 + dp->dp_size += size;
8098 + /* now update the header */
8100 + dump_header.dh_num_bytes += dp->dp_size + sizeof(*dp);
8102 + dump_header.dh_num_dump_pages++;
8103 + dump_config.dumper->header_dirty++;
8105 + dump_config.dumper->curr_buf = buf;
8110 +int dump_lcrash_update_end_marker(void)
8112 + struct __dump_page *dp =
8113 + (struct __dump_page *)dump_config.dumper->curr_buf;
8114 + unsigned long left;
8117 + lcrash_init_pageheader(dp, NULL, 0);
8118 + dp->dp_flags |= DUMP_DH_END; /* tbd: truncation test ? */
8120 + /* now update the header */
8122 + dump_header.dh_num_bytes += sizeof(*dp);
8124 + dump_config.dumper->curr_buf += sizeof(*dp);
8125 + left = dump_config.dumper->curr_buf - dump_config.dumper->dump_buf;
8130 + if ((ret = dump_dev_seek(dump_config.dumper->curr_offset))) {
8131 + printk("Seek failed at offset 0x%llx\n",
8132 + dump_config.dumper->curr_offset);
8136 + if (DUMP_BUFFER_SIZE > left)
8137 + memset(dump_config.dumper->curr_buf, 'm',
8138 + DUMP_BUFFER_SIZE - left);
8140 + if ((ret = dump_ll_write(dump_config.dumper->dump_buf,
8141 + DUMP_BUFFER_SIZE)) < DUMP_BUFFER_SIZE) {
8142 + return (ret < 0) ? ret : -ENOSPC;
8145 + dump_config.dumper->curr_offset += DUMP_BUFFER_SIZE;
8147 + if (left > DUMP_BUFFER_SIZE) {
8148 + left -= DUMP_BUFFER_SIZE;
8149 + memcpy(dump_config.dumper->dump_buf,
8150 + dump_config.dumper->dump_buf + DUMP_BUFFER_SIZE, left);
8151 + dump_config.dumper->curr_buf -= DUMP_BUFFER_SIZE;
8160 +/* Default Formatter (lcrash) */
8161 +struct dump_fmt_ops dump_fmt_lcrash_ops = {
8162 + .configure_header = dump_lcrash_configure_header,
8163 + .update_header = dump_generic_update_header,
8164 + .save_context = dump_lcrash_save_context,
8165 + .add_data = dump_lcrash_add_data,
8166 + .update_end_marker = dump_lcrash_update_end_marker
8169 +struct dump_fmt dump_fmt_lcrash = {
8171 + .ops = &dump_fmt_lcrash_ops
8174 Index: linux-2.6.10/drivers/dump/dump_setup.c
8175 ===================================================================
8176 --- linux-2.6.10.orig/drivers/dump/dump_setup.c 2005-04-05 19:01:49.158500672 +0800
8177 +++ linux-2.6.10/drivers/dump/dump_setup.c 2005-04-05 16:47:53.939205712 +0800
8180 + * Standard kernel function entry points for Linux crash dumps.
8182 + * Created by: Matt Robinson (yakker@sourceforge.net)
8183 + * Contributions from SGI, IBM, HP, MCL, and others.
8185 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
8186 + * Copyright (C) 2000 - 2002 TurboLinux, Inc. All rights reserved.
8187 + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
8188 + * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
8190 + * This code is released under version 2 of the GNU GPL.
8194 + * -----------------------------------------------------------------------
8198 + * This dump code goes back to SGI's first attempts at dumping system
8199 + * memory on SGI systems running IRIX. A few developers at SGI needed
8200 + * a way to take this system dump and analyze it, and created 'icrash',
8201 + * or IRIX Crash. The mechanism (the dumps and 'icrash') were used
8202 + * by support people to generate crash reports when a system failure
8203 + * occurred. This was vital for large system configurations that
8204 + * couldn't apply patch after patch after fix just to hope that the
8205 + * problems would go away. So the system memory, along with the crash
8206 + * dump analyzer, allowed support people to quickly figure out what the
8207 + * problem was on the system with the crash dump.
8209 + * In comes Linux. SGI started moving towards the open source community,
8210 + * and upon doing so, SGI wanted to take its support utilities into Linux
8211 + * with the hopes that they would end up the in kernel and user space to
8212 + * be used by SGI's customers buying SGI Linux systems. One of the first
8213 + * few products to be open sourced by SGI was LKCD, or Linux Kernel Crash
8214 + * Dumps. LKCD comprises of a patch to the kernel to enable system
8215 + * dumping, along with 'lcrash', or Linux Crash, to analyze the system
8216 + * memory dump. A few additional system scripts and kernel modifications
8217 + * are also included to make the dump mechanism and dump data easier to
8218 + * process and use.
8220 + * As soon as LKCD was released into the open source community, a number
8221 + * of larger companies started to take advantage of it. Today, there are
8222 + * many community members that contribute to LKCD, and it continues to
8223 + * flourish and grow as an open source project.
8227 + * DUMP TUNABLES (read/write with ioctl, readonly with /proc)
8229 + * This is the list of system tunables (via /proc) that are available
8230 + * for Linux systems. All the read, write, etc., functions are listed
8231 + * here. Currently, there are a few different tunables for dumps:
8233 + * dump_device (used to be dumpdev):
8234 + * The device for dumping the memory pages out to. This
8235 + * may be set to the primary swap partition for disruptive dumps,
8236 + * and must be an unused partition for non-disruptive dumps.
8237 + * Todo: In the case of network dumps, this may be interpreted
8238 + * as the IP address of the netdump server to connect to.
8240 + * dump_compress (used to be dump_compress_pages):
8241 + * This is the flag which indicates which compression mechanism
8242 + * to use. This is a BITMASK, not an index (0,1,2,4,8,16,etc.).
8243 + * This is the current set of values:
8245 + * 0: DUMP_COMPRESS_NONE -- Don't compress any pages.
8246 + * 1: DUMP_COMPRESS_RLE -- This uses RLE compression.
8247 + * 2: DUMP_COMPRESS_GZIP -- This uses GZIP compression.
8250 + * The amount of effort the dump module should make to save
8251 + * information for post crash analysis. This value is now
8252 + * a BITMASK value, not an index:
8254 + * 0: Do nothing, no dumping. (DUMP_LEVEL_NONE)
8256 + * 1: Print out the dump information to the dump header, and
8257 + * write it out to the dump_device. (DUMP_LEVEL_HEADER)
8259 + * 2: Write out the dump header and all kernel memory pages.
8260 + * (DUMP_LEVEL_KERN)
8262 + * 4: Write out the dump header and all kernel and user
8263 + * memory pages. (DUMP_LEVEL_USED)
8265 + * 8: Write out the dump header and all conventional/cached
8266 + * memory (RAM) pages in the system (kernel, user, free).
8267 + * (DUMP_LEVEL_ALL_RAM)
8269 + * 16: Write out everything, including non-conventional memory
8270 + * like firmware, proms, I/O registers, uncached memory.
8271 + * (DUMP_LEVEL_ALL)
8273 + * The dump_level will default to 1.
8276 + * These are the flags to use when talking about dumps. There
8277 + * are lots of possibilities. This is a BITMASK value, not an index.
8279 + * -----------------------------------------------------------------------
8282 +#include <linux/kernel.h>
8283 +#include <linux/delay.h>
8284 +#include <linux/reboot.h>
8285 +#include <linux/fs.h>
8286 +#include <linux/dump.h>
8287 +#include <linux/ioctl32.h>
8288 +#include <linux/syscalls.h>
8289 +#include "dump_methods.h"
8290 +#include <linux/proc_fs.h>
8291 +#include <linux/module.h>
8292 +#include <linux/utsname.h>
8293 +#include <linux/highmem.h>
8294 +#include <linux/miscdevice.h>
8295 +#include <linux/sysrq.h>
8296 +#include <linux/sysctl.h>
8297 +#include <linux/nmi.h>
8298 +#include <linux/init.h>
8299 +#include <asm/hardirq.h>
8300 +#include <asm/uaccess.h>
8304 + * -----------------------------------------------------------------------
8305 + * V A R I A B L E S
8306 + * -----------------------------------------------------------------------
8309 +/* Dump tunables */
8310 +struct dump_config dump_config = {
8318 +static _dump_regs_t all_regs;
8321 +/* Global variables used in dump.h */
8322 +/* degree of system freeze when dumping */
8323 +enum dump_silence_levels dump_silence_level = DUMP_HARD_SPIN_CPUS;
8325 +/* Other global fields */
8326 +extern struct __dump_header dump_header;
8327 +struct dump_dev *dump_dev = NULL; /* Active dump device */
8328 +static int dump_compress = 0;
8330 +static u32 dump_compress_none(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
8331 + unsigned long loc);
8332 +struct __dump_compress dump_none_compression = {
8333 + .compress_type = DUMP_COMPRESS_NONE,
8334 + .compress_func = dump_compress_none,
8335 + .compress_name = "none",
8338 +/* our device operations and functions */
8339 +static int dump_ioctl(struct inode *i, struct file *f,
8340 + unsigned int cmd, unsigned long arg);
8342 +#ifdef CONFIG_COMPAT
8343 +static int dw_long(unsigned int, unsigned int, unsigned long, struct file*);
8346 +static struct file_operations dump_fops = {
8347 + .owner = THIS_MODULE,
8348 + .ioctl = dump_ioctl,
8351 +static struct miscdevice dump_miscdev = {
8352 + .minor = CRASH_DUMP_MINOR,
8354 + .fops = &dump_fops,
8356 +MODULE_ALIAS_MISCDEV(CRASH_DUMP_MINOR);
8358 +/* static variables */
8359 +static int dump_okay = 0; /* can we dump out to disk? */
8360 +static spinlock_t dump_lock = SPIN_LOCK_UNLOCKED;
8362 +/* used for dump compressors */
8363 +static struct list_head dump_compress_list = LIST_HEAD_INIT(dump_compress_list);
8365 +/* list of registered dump targets */
8366 +static struct list_head dump_target_list = LIST_HEAD_INIT(dump_target_list);
8368 +/* lkcd info structure -- this is used by lcrash for basic system data */
8369 +struct __lkcdinfo lkcdinfo = {
8370 + .ptrsz = (sizeof(void *) * 8),
8371 +#if defined(__LITTLE_ENDIAN)
8372 + .byte_order = __LITTLE_ENDIAN,
8374 + .byte_order = __BIG_ENDIAN,
8376 + .page_shift = PAGE_SHIFT,
8377 + .page_size = PAGE_SIZE,
8378 + .page_mask = PAGE_MASK,
8379 + .page_offset = PAGE_OFFSET,
8383 + * -----------------------------------------------------------------------
8384 + * / P R O C T U N A B L E F U N C T I O N S
8385 + * -----------------------------------------------------------------------
8388 +static int proc_dump_device(ctl_table *ctl, int write, struct file *f,
8389 + void __user *buffer, size_t *lenp, loff_t *ppos);
8391 +static int proc_doulonghex(ctl_table *ctl, int write, struct file *f,
8392 + void __user *buffer, size_t *lenp, loff_t *ppos);
8394 + * sysctl-tuning infrastructure.
8396 +static ctl_table dump_table[] = {
8397 + { .ctl_name = CTL_DUMP_LEVEL,
8398 + .procname = DUMP_LEVEL_NAME,
8399 + .data = &dump_config.level,
8400 + .maxlen = sizeof(int),
8402 + .proc_handler = proc_doulonghex, },
8404 + { .ctl_name = CTL_DUMP_FLAGS,
8405 + .procname = DUMP_FLAGS_NAME,
8406 + .data = &dump_config.flags,
8407 + .maxlen = sizeof(int),
8409 + .proc_handler = proc_doulonghex, },
8411 + { .ctl_name = CTL_DUMP_COMPRESS,
8412 + .procname = DUMP_COMPRESS_NAME,
8413 + .data = &dump_compress, /* FIXME */
8414 + .maxlen = sizeof(int),
8416 + .proc_handler = proc_dointvec, },
8418 + { .ctl_name = CTL_DUMP_DEVICE,
8419 + .procname = DUMP_DEVICE_NAME,
8421 + .data = &dump_config.dump_device, /* FIXME */
8422 + .maxlen = sizeof(int),
8423 + .proc_handler = proc_dump_device },
8425 +#ifdef CONFIG_CRASH_DUMP_MEMDEV
8426 + { .ctl_name = CTL_DUMP_ADDR,
8427 + .procname = DUMP_ADDR_NAME,
8429 + .data = &dump_config.dump_addr,
8430 + .maxlen = sizeof(unsigned long),
8431 + .proc_handler = proc_doulonghex },
8437 +static ctl_table dump_root[] = {
8438 + { .ctl_name = KERN_DUMP,
8439 + .procname = "dump",
8441 + .child = dump_table },
8445 +static ctl_table kernel_root[] = {
8446 + { .ctl_name = CTL_KERN,
8447 + .procname = "kernel",
8449 + .child = dump_root, },
8453 +static struct ctl_table_header *sysctl_header;
8456 + * -----------------------------------------------------------------------
8457 + * C O M P R E S S I O N F U N C T I O N S
8458 + * -----------------------------------------------------------------------
8462 + * Name: dump_compress_none()
8463 + * Func: Don't do any compression, period.
8466 +dump_compress_none(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
8467 + unsigned long loc)
8469 + /* just return the old size */
8475 + * Name: dump_execute()
8476 + * Func: Execute the dumping process. This makes sure all the appropriate
8477 + * fields are updated correctly, and calls dump_execute_memdump(),
8478 + * which does the real work.
8481 +dump_execute(const char *panic_str, const struct pt_regs *regs)
8484 + unsigned long flags;
8486 + /* make sure we can dump */
8488 + pr_info("LKCD not yet configured, can't take dump now\n");
8492 + /* Exclude multiple dumps at the same time,
8493 + * and disable interrupts, some drivers may re-enable
8494 + * interrupts in with silence()
8496 + * Try and acquire spin lock. If successful, leave preempt
8497 + * and interrupts disabled. See spin_lock_irqsave in spinlock.h
8499 + local_irq_save(flags);
8500 + if (!spin_trylock(&dump_lock)) {
8501 + local_irq_restore(flags);
8502 + pr_info("LKCD dump already in progress\n");
8506 + /* What state are interrupts really in? */
8507 + if (in_interrupt()){
8509 + printk(KERN_ALERT "Dumping from interrupt handler!\n");
8511 + printk(KERN_ALERT "Dumping from bottom half!\n");
8513 + __dump_clean_irq_state();
8517 + /* Bring system into the strictest level of quiescing for min drift
8518 + * dump drivers can soften this as required in dev->ops->silence()
8520 + dump_oncpu = smp_processor_id() + 1;
8521 + dump_silence_level = DUMP_HARD_SPIN_CPUS;
8523 + state = dump_generic_execute(panic_str, regs);
8526 + spin_unlock_irqrestore(&dump_lock, flags);
8529 + printk("Dump Incomplete or failed!\n");
8531 + printk("Dump Complete; %d dump pages saved.\n",
8532 + dump_header.dh_num_dump_pages);
8537 + * Name: dump_register_compression()
8538 + * Func: Register a dump compression mechanism.
8541 +dump_register_compression(struct __dump_compress *item)
8544 + list_add(&(item->list), &dump_compress_list);
8548 + * Name: dump_unregister_compression()
8549 + * Func: Remove a dump compression mechanism, and re-assign the dump
8550 + * compression pointer if necessary.
8553 +dump_unregister_compression(int compression_type)
8555 + struct list_head *tmp;
8556 + struct __dump_compress *dc;
8558 + /* let's make sure our list is valid */
8559 + if (compression_type != DUMP_COMPRESS_NONE) {
8560 + list_for_each(tmp, &dump_compress_list) {
8561 + dc = list_entry(tmp, struct __dump_compress, list);
8562 + if (dc->compress_type == compression_type) {
8563 + list_del(&(dc->list));
8571 + * Name: dump_compress_init()
8572 + * Func: Initialize (or re-initialize) compression scheme.
8575 +dump_compress_init(int compression_type)
8577 + struct list_head *tmp;
8578 + struct __dump_compress *dc;
8580 + /* try to remove the compression item */
8581 + list_for_each(tmp, &dump_compress_list) {
8582 + dc = list_entry(tmp, struct __dump_compress, list);
8583 + if (dc->compress_type == compression_type) {
8584 + dump_config.dumper->compress = dc;
8585 + dump_compress = compression_type;
8586 + pr_debug("Dump Compress %s\n", dc->compress_name);
8592 + * nothing on the list -- return ENODATA to indicate an error
8595 + * EAGAIN: reports "Resource temporarily unavailable" which
8596 + * isn't very enlightening.
8598 + printk("compression_type:%d not found\n", compression_type);
8604 +dumper_setup(unsigned long flags, unsigned long devid)
8608 + /* unconfigure old dumper if it exists */
8610 + if (dump_config.dumper) {
8611 + pr_debug("Unconfiguring current dumper\n");
8612 + dump_unconfigure();
8614 + /* set up new dumper */
8615 + if (dump_config.flags & DUMP_FLAGS_SOFTBOOT) {
8616 + printk("Configuring softboot based dump \n");
8617 +#ifdef CONFIG_CRASH_DUMP_MEMDEV
8618 + dump_config.dumper = &dumper_stage1;
8620 + printk("Requires CONFIG_CRASHDUMP_MEMDEV. Can't proceed.\n");
8624 + dump_config.dumper = &dumper_singlestage;
8626 + dump_config.dumper->dev = dump_dev;
8628 + ret = dump_configure(devid);
8631 + pr_debug("%s dumper set up for dev 0x%lx\n",
8632 + dump_config.dumper->name, devid);
8633 + dump_config.dump_device = devid;
8635 + printk("%s dumper set up failed for dev 0x%lx\n",
8636 + dump_config.dumper->name, devid);
8637 + dump_config.dumper = NULL;
8643 +dump_target_init(int target)
8646 + struct list_head *tmp;
8647 + struct dump_dev *dev;
8650 + case DUMP_FLAGS_DISKDUMP:
8651 + strcpy(type, "blockdev"); break;
8652 + case DUMP_FLAGS_NETDUMP:
8653 + strcpy(type, "networkdev"); break;
8659 + * This is a bit stupid, generating strings from flag
8660 + * and doing strcmp. This is done because 'struct dump_dev'
8661 + * has string 'type_name' and not interger 'type'.
8663 + list_for_each(tmp, &dump_target_list) {
8664 + dev = list_entry(tmp, struct dump_dev, list);
8665 + if (strcmp(type, dev->type_name) == 0) {
8674 + * Name: dump_ioctl()
8675 + * Func: Allow all dump tunables through a standard ioctl() mechanism.
8676 + * This is far better than before, where we'd go through /proc,
8677 + * because now this will work for multiple OS and architectures.
8680 +dump_ioctl(struct inode *i, struct file *f, unsigned int cmd, unsigned long arg)
8682 + /* check capabilities */
8683 + if (!capable(CAP_SYS_ADMIN))
8686 + if (!dump_config.dumper && cmd == DIOSDUMPCOMPRESS)
8687 + /* dump device must be configured first */
8691 + * This is the main mechanism for controlling get/set data
8692 + * for various dump device parameters. The real trick here
8693 + * is setting the dump device (DIOSDUMPDEV). That's what
8694 + * triggers everything else.
8697 + case DIOSDUMPDEV: /* set dump_device */
8698 + pr_debug("Configuring dump device\n");
8699 + if (!(f->f_flags & O_RDWR))
8703 + return dumper_setup(dump_config.flags, arg);
8706 + case DIOGDUMPDEV: /* get dump_device */
8707 + return put_user((long)dump_config.dump_device, (long *)arg);
8709 + case DIOSDUMPLEVEL: /* set dump_level */
8710 + if (!(f->f_flags & O_RDWR))
8713 + /* make sure we have a positive value */
8717 + /* Fixme: clean this up */
8718 + dump_config.level = 0;
8719 + switch ((int)arg) {
8720 + case DUMP_LEVEL_ALL:
8721 + case DUMP_LEVEL_ALL_RAM:
8722 + dump_config.level |= DUMP_MASK_UNUSED;
8723 + case DUMP_LEVEL_USED:
8724 + dump_config.level |= DUMP_MASK_USED;
8725 + case DUMP_LEVEL_KERN:
8726 + dump_config.level |= DUMP_MASK_KERN;
8727 + case DUMP_LEVEL_HEADER:
8728 + dump_config.level |= DUMP_MASK_HEADER;
8729 + case DUMP_LEVEL_NONE:
8734 + pr_debug("Dump Level 0x%lx\n", dump_config.level);
8737 + case DIOGDUMPLEVEL: /* get dump_level */
8738 + /* fixme: handle conversion */
8739 + return put_user((long)dump_config.level, (long *)arg);
8742 + case DIOSDUMPFLAGS: /* set dump_flags */
8744 + if (!(f->f_flags & O_RDWR))
8747 + /* make sure we have a positive value */
8751 + if (dump_target_init(arg & DUMP_FLAGS_TARGETMASK) < 0)
8752 + return -EINVAL; /* return proper error */
8754 + dump_config.flags = arg;
8756 + pr_debug("Dump Flags 0x%lx\n", dump_config.flags);
8759 + case DIOGDUMPFLAGS: /* get dump_flags */
8760 + return put_user((long)dump_config.flags, (long *)arg);
8762 + case DIOSDUMPCOMPRESS: /* set the dump_compress status */
8763 + if (!(f->f_flags & O_RDWR))
8766 + return dump_compress_init((int)arg);
8768 + case DIOGDUMPCOMPRESS: /* get the dump_compress status */
8769 + return put_user((long)(dump_config.dumper ?
8770 + dump_config.dumper->compress->compress_type : 0),
8772 + case DIOGDUMPOKAY: /* check if dump is configured */
8773 + return put_user((long)dump_okay, (long *)arg);
8775 + case DIOSDUMPTAKE: /* Trigger a manual dump */
8776 + /* Do not proceed if lkcd not yet configured */
8778 + printk("LKCD not yet configured. Cannot take manual dump\n");
8782 + /* Take the dump */
8783 + return manual_handle_crashdump();
8787 + * these are network dump specific ioctls, let the
8788 + * module handle them.
8790 + return dump_dev_ioctl(cmd, arg);
8796 + * Handle special cases for dump_device
8797 + * changing dump device requires doing an opening the device
8800 +proc_dump_device(ctl_table *ctl, int write, struct file *f,
8801 + void __user *buffer, size_t *lenp, loff_t *ppos)
8803 + int *valp = ctl->data;
8807 + /* same permission checks as ioctl */
8808 + if (capable(CAP_SYS_ADMIN)) {
8809 + ret = proc_doulonghex(ctl, write, f, buffer, lenp, ppos);
8810 + if (ret == 0 && write && *valp != oval) {
8811 + /* need to restore old value to close properly */
8812 + dump_config.dump_device = (dev_t) oval;
8814 + ret = dumper_setup(dump_config.flags, (dev_t) *valp);
8821 +/* All for the want of a proc_do_xxx routine which prints values in hex */
8822 +/* Write is not implemented correctly, so mode is set to 0444 above. */
8824 +proc_doulonghex(ctl_table *ctl, int write, struct file *f,
8825 + void __user *buffer, size_t *lenp, loff_t *ppos)
8827 +#define TMPBUFLEN 21
8830 + char buf[TMPBUFLEN];
8832 + if (!ctl->data || !ctl->maxlen || !*lenp || (*ppos && !write)) {
8837 + i = (unsigned long *) ctl->data;
8840 + sprintf(buf, "0x%lx\n", (*i));
8841 + len = strlen(buf);
8844 + if(copy_to_user(buffer, buf, len))
8854 + * -----------------------------------------------------------------------
8855 + * I N I T F U N C T I O N S
8856 + * -----------------------------------------------------------------------
8859 +#ifdef CONFIG_COMPAT
8860 +static int dw_long(unsigned int fd, unsigned int cmd, unsigned long arg,
8863 + mm_segment_t old_fs = get_fs();
8865 + unsigned long val;
8867 + set_fs (KERNEL_DS);
8868 + err = sys_ioctl(fd, cmd, (u64)&val);
8870 + if (!err && put_user((unsigned int) val, (u32 *)arg))
8877 + * These register and unregister routines are exported for modules
8878 + * to register their dump drivers (like block, net etc)
8881 +dump_register_device(struct dump_dev *ddev)
8883 + struct list_head *tmp;
8884 + struct dump_dev *dev;
8886 + list_for_each(tmp, &dump_target_list) {
8887 + dev = list_entry(tmp, struct dump_dev, list);
8888 + if (strcmp(ddev->type_name, dev->type_name) == 0) {
8889 + printk("Target type %s already registered\n",
8891 + return -1; /* return proper error */
8894 + list_add(&(ddev->list), &dump_target_list);
8900 +dump_unregister_device(struct dump_dev *ddev)
8902 + list_del(&(ddev->list));
8903 + if (ddev != dump_dev)
8908 + if (dump_config.dumper)
8909 + dump_unconfigure();
8911 + dump_config.flags &= ~DUMP_FLAGS_TARGETMASK;
8914 + dump_config.dumper = NULL;
8917 +static int panic_event(struct notifier_block *this, unsigned long event,
8921 + get_current_general_regs(&all_regs);
8922 + get_current_cp14_regs(&all_regs);
8923 + get_current_cp15_regs(&all_regs);
8924 + dump_execute((const char *)ptr, &all_regs);
8926 + struct pt_regs regs;
8928 + get_current_regs(®s);
8929 + dump_execute((const char *)ptr, ®s);
8934 +extern struct notifier_block *panic_notifier_list;
8935 +static int panic_event(struct notifier_block *, unsigned long, void *);
8936 +static struct notifier_block panic_block = {
8937 + .notifier_call = panic_event,
8940 +#ifdef CONFIG_MAGIC_SYSRQ
8941 +/* Sysrq handler */
8942 +static void sysrq_handle_crashdump(int key, struct pt_regs *pt_regs,
8943 + struct tty_struct *tty) {
8945 + struct pt_regs regs;
8946 + get_current_regs(®s);
8947 + dump_execute("sysrq", ®s);
8950 + dump_execute("sysrq", pt_regs);
8954 +static struct sysrq_key_op sysrq_crashdump_op = {
8955 + .handler = sysrq_handle_crashdump,
8956 + .help_msg = "Dump",
8957 + .action_msg = "Starting crash dump",
8962 +dump_sysrq_register(void)
8964 +#ifdef CONFIG_MAGIC_SYSRQ
8965 + register_sysrq_key(DUMP_SYSRQ_KEY, &sysrq_crashdump_op);
8970 +dump_sysrq_unregister(void)
8972 +#ifdef CONFIG_MAGIC_SYSRQ
8973 + unregister_sysrq_key(DUMP_SYSRQ_KEY, &sysrq_crashdump_op);
8978 + * Name: dump_init()
8979 + * Func: Initialize the dump process. This will set up any architecture
8980 + * dependent code. The big key is we need the memory offsets before
8981 + * the page table is initialized, because the base memory offset
8982 + * is changed after paging_init() is called.
8987 + struct sysinfo info;
8990 + /* try to create our dump device */
8991 + err = misc_register(&dump_miscdev);
8993 + printk("cannot register dump character device!\n");
8997 + __dump_init((u64)PAGE_OFFSET);
8999 +#ifdef CONFIG_COMPAT
9000 + err = register_ioctl32_conversion(DIOSDUMPDEV, NULL);
9001 + err |= register_ioctl32_conversion(DIOGDUMPDEV, NULL);
9002 + err |= register_ioctl32_conversion(DIOSDUMPLEVEL, NULL);
9003 + err |= register_ioctl32_conversion(DIOGDUMPLEVEL, dw_long);
9004 + err |= register_ioctl32_conversion(DIOSDUMPFLAGS, NULL);
9005 + err |= register_ioctl32_conversion(DIOGDUMPFLAGS, dw_long);
9006 + err |= register_ioctl32_conversion(DIOSDUMPCOMPRESS, NULL);
9007 + err |= register_ioctl32_conversion(DIOGDUMPCOMPRESS, dw_long);
9008 + err |= register_ioctl32_conversion(DIOSTARGETIP, NULL);
9009 + err |= register_ioctl32_conversion(DIOGTARGETIP, NULL);
9010 + err |= register_ioctl32_conversion(DIOSTARGETPORT, NULL);
9011 + err |= register_ioctl32_conversion(DIOGTARGETPORT, NULL);
9012 + err |= register_ioctl32_conversion(DIOSSOURCEPORT, NULL);
9013 + err |= register_ioctl32_conversion(DIOGSOURCEPORT, NULL);
9014 + err |= register_ioctl32_conversion(DIOSETHADDR, NULL);
9015 + err |= register_ioctl32_conversion(DIOGETHADDR, NULL);
9016 + err |= register_ioctl32_conversion(DIOGDUMPOKAY, dw_long);
9017 + err |= register_ioctl32_conversion(DIOSDUMPTAKE, NULL);
9019 + printk(KERN_ERR "LKCD: registering ioctl32 translations failed\
9023 + /* set the dump_compression_list structure up */
9024 + dump_register_compression(&dump_none_compression);
9026 + /* grab the total memory size now (not if/when we crash) */
9027 + si_meminfo(&info);
9029 + /* set the memory size */
9030 + dump_header.dh_memory_size = (u64)info.totalram;
9032 + sysctl_header = register_sysctl_table(kernel_root, 0);
9033 + dump_sysrq_register();
9035 + notifier_chain_register(&panic_notifier_list, &panic_block);
9036 + dump_function_ptr = dump_execute;
9038 + pr_info("Crash dump driver initialized.\n");
9048 + if (dump_config.dumper)
9049 + dump_unconfigure();
9051 + /* arch-specific cleanup routine */
9054 +#ifdef CONFIG_COMPAT
9055 + err = unregister_ioctl32_conversion(DIOSDUMPDEV);
9056 + err |= unregister_ioctl32_conversion(DIOGDUMPDEV);
9057 + err |= unregister_ioctl32_conversion(DIOSDUMPLEVEL);
9058 + err |= unregister_ioctl32_conversion(DIOGDUMPLEVEL);
9059 + err |= unregister_ioctl32_conversion(DIOSDUMPFLAGS);
9060 + err |= unregister_ioctl32_conversion(DIOGDUMPFLAGS);
9061 + err |= unregister_ioctl32_conversion(DIOSDUMPCOMPRESS);
9062 + err |= unregister_ioctl32_conversion(DIOGDUMPCOMPRESS);
9063 + err |= unregister_ioctl32_conversion(DIOSTARGETIP);
9064 + err |= unregister_ioctl32_conversion(DIOGTARGETIP);
9065 + err |= unregister_ioctl32_conversion(DIOSTARGETPORT);
9066 + err |= unregister_ioctl32_conversion(DIOGTARGETPORT);
9067 + err |= unregister_ioctl32_conversion(DIOSSOURCEPORT);
9068 + err |= unregister_ioctl32_conversion(DIOGSOURCEPORT);
9069 + err |= unregister_ioctl32_conversion(DIOSETHADDR);
9070 + err |= unregister_ioctl32_conversion(DIOGETHADDR);
9071 + err |= unregister_ioctl32_conversion(DIOGDUMPOKAY);
9072 + err |= unregister_ioctl32_conversion(DIOSDUMPTAKE);
9074 + printk(KERN_ERR "LKCD: Unregistering ioctl32 translations failed\n");
9078 + /* ignore errors while unregistering -- since can't do anything */
9079 + unregister_sysctl_table(sysctl_header);
9080 + misc_deregister(&dump_miscdev);
9081 + dump_sysrq_unregister();
9082 + notifier_chain_unregister(&panic_notifier_list, &panic_block);
9083 + dump_function_ptr = NULL;
9086 +EXPORT_SYMBOL(dump_register_compression);
9087 +EXPORT_SYMBOL(dump_unregister_compression);
9088 +EXPORT_SYMBOL(dump_register_device);
9089 +EXPORT_SYMBOL(dump_unregister_device);
9090 +EXPORT_SYMBOL(dump_config);
9091 +EXPORT_SYMBOL(dump_silence_level);
9093 +EXPORT_SYMBOL(__dump_irq_enable);
9094 +EXPORT_SYMBOL(__dump_irq_restore);
9096 +MODULE_AUTHOR("Matt D. Robinson <yakker@sourceforge.net>");
9097 +MODULE_DESCRIPTION("Linux Kernel Crash Dump (LKCD) driver");
9098 +MODULE_LICENSE("GPL");
9100 +module_init(dump_init);
9101 +module_exit(dump_cleanup);
9102 Index: linux-2.6.10/drivers/dump/dump_scheme.c
9103 ===================================================================
9104 --- linux-2.6.10.orig/drivers/dump/dump_scheme.c 2005-04-05 19:01:49.158500672 +0800
9105 +++ linux-2.6.10/drivers/dump/dump_scheme.c 2005-04-05 16:47:53.944204952 +0800
9108 + * Default single stage dump scheme methods
9110 + * Previously a part of dump_base.c
9112 + * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
9113 + * Split and rewrote LKCD dump scheme to generic dump method
9115 + * Derived from original code created by
9116 + * Matt Robinson <yakker@sourceforge.net>)
9118 + * Contributions from SGI, IBM, HP, MCL, and others.
9120 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
9121 + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
9122 + * Copyright (C) 2002 International Business Machines Corp.
9124 + * This code is released under version 2 of the GNU GPL.
9128 + * Implements the default dump scheme, i.e. single-stage gathering and
9129 + * saving of dump data directly to the target device, which operates in
9130 + * a push mode, where the dumping system decides what data it saves
9131 + * taking into account pre-specified dump config options.
9133 + * Aside: The 2-stage dump scheme, where there is a soft-reset between
9134 + * the gathering and saving phases, also reuses some of these
9135 + * default routines (see dump_overlay.c)
9137 +#include <linux/types.h>
9138 +#include <linux/kernel.h>
9139 +#include <linux/mm.h>
9140 +#include <linux/slab.h>
9141 +#include <linux/delay.h>
9142 +#include <linux/reboot.h>
9143 +#include <linux/nmi.h>
9144 +#include <linux/dump.h>
9145 +#include "dump_methods.h"
9147 +extern int panic_timeout; /* time before reboot */
9149 +extern void dump_speedo(int);
9151 +/* Default sequencer used during single stage dumping */
9152 +/* Also invoked during stage 2 of soft-boot based dumping */
9153 +int dump_generic_sequencer(void)
9155 + struct dump_data_filter *filter = dump_config.dumper->filter;
9156 + int pass = 0, err = 0, save = 0;
9157 + int (*action)(unsigned long, unsigned long);
9160 + * We want to save the more critical data areas first in
9161 + * case we run out of space, encounter i/o failures, or get
9162 + * interrupted otherwise and have to give up midway
9163 + * So, run through the passes in increasing order
9165 + for (;filter->selector; filter++, pass++)
9167 + /* Assumes passes are exclusive (even across dumpers) */
9168 + /* Requires care when coding the selection functions */
9169 + if ((save = filter->level_mask & dump_config.level))
9170 + action = dump_save_data;
9172 + action = dump_skip_data;
9174 + if ((err = dump_iterator(pass, action, filter)) < 0)
9177 + printk("\n %d dump pages %s of %d each in pass %d\n",
9178 + err, save ? "saved" : "skipped", (int)DUMP_PAGE_SIZE, pass);
9182 + return (err < 0) ? err : 0;
9185 +static inline struct page *dump_get_page(loff_t loc)
9188 + unsigned long page_index = loc >> PAGE_SHIFT;
9190 + /* todo: complete this to account for ia64/discontig mem */
9191 + /* todo: and to check for validity, ram page, no i/o mem etc */
9192 + /* need to use pfn/physaddr equiv of kern_addr_valid */
9195 + * On ARM/XScale system, the physical address starts from
9196 + * PHYS_OFFSET, and it maybe the situation that PHYS_OFFSET != 0.
9197 + * For example on Intel's PXA250, PHYS_OFFSET = 0xa0000000. And the
9198 + * page index starts from PHYS_PFN_OFFSET. When configuring
9199 + * filter, filter->start is assigned to 0 in dump_generic_configure.
9200 + * Here we want to adjust it by adding PHYS_PFN_OFFSET to it!
9203 + page_index += PHYS_PFN_OFFSET;
9205 + if (__dump_page_valid(page_index))
9206 + return pfn_to_page(page_index);
9212 +/* Default iterator: for singlestage and stage 1 of soft-boot dumping */
9213 +/* Iterates over range of physical memory pages in DUMP_PAGE_SIZE increments */
9214 +int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned long),
9215 + struct dump_data_filter *filter)
9217 + /* Todo : fix unit, type */
9218 + loff_t loc, start, end;
9219 + int i, count = 0, err = 0;
9220 + struct page *page;
9222 + /* Todo: Add membanks code */
9223 + /* TBD: Check if we need to address DUMP_PAGE_SIZE < PAGE_SIZE */
9225 + for (i = 0; i < filter->num_mbanks; i++) {
9226 + start = filter->start[i];
9227 + end = filter->end[i];
9228 + for (loc = start; loc < end; loc += DUMP_PAGE_SIZE) {
9229 + dump_config.dumper->curr_loc = loc;
9230 + page = dump_get_page(loc);
9231 + if (page && filter->selector(pass,
9232 + (unsigned long) page, DUMP_PAGE_SIZE)) {
9233 + if ((err = action((unsigned long)page,
9234 + DUMP_PAGE_SIZE))) {
9235 + printk("dump_page_iterator: err %d for "
9236 + "loc 0x%llx, in pass %d\n",
9238 + return err ? err : count;
9245 + return err ? err : count;
9249 + * Base function that saves the selected block of data in the dump
9250 + * Action taken when iterator decides that data needs to be saved
9252 +int dump_generic_save_data(unsigned long loc, unsigned long sz)
9255 + void *dump_buf = dump_config.dumper->dump_buf;
9256 + int left, bytes, ret;
9258 + if ((ret = dump_add_data(loc, sz))) {
9261 + buf = dump_config.dumper->curr_buf;
9263 + /* If we've filled up the buffer write it out */
9264 + if ((left = buf - dump_buf) >= DUMP_BUFFER_SIZE) {
9265 + bytes = dump_write_buffer(dump_buf, DUMP_BUFFER_SIZE);
9266 + if (bytes < DUMP_BUFFER_SIZE) {
9267 + printk("dump_write_buffer failed %d\n", bytes);
9268 + return bytes ? -ENOSPC : bytes;
9273 + /* -- A few chores to do from time to time -- */
9274 + dump_config.dumper->count++;
9276 + if (!(dump_config.dumper->count & 0x3f)) {
9277 + /* Update the header every one in a while */
9278 + memset((void *)dump_buf, 'b', DUMP_BUFFER_SIZE);
9279 + if ((ret = dump_update_header()) < 0) {
9280 + /* issue warning */
9285 + touch_nmi_watchdog();
9286 + } else if (!(dump_config.dumper->count & 0x7)) {
9287 + /* Show progress so the user knows we aren't hung */
9288 + dump_speedo(dump_config.dumper->count >> 3);
9290 + /* Todo: Touch/Refresh watchdog */
9292 + /* --- Done with periodic chores -- */
9295 + * extra bit of copying to simplify verification
9296 + * in the second kernel boot based scheme
9298 + memcpy(dump_buf - DUMP_PAGE_SIZE, dump_buf +
9299 + DUMP_BUFFER_SIZE - DUMP_PAGE_SIZE, DUMP_PAGE_SIZE);
9301 + /* now adjust the leftover bits back to the top of the page */
9302 + /* this case would not arise during stage 2 (passthru) */
9303 + memset(dump_buf, 'z', DUMP_BUFFER_SIZE);
9305 + memcpy(dump_buf, dump_buf + DUMP_BUFFER_SIZE, left);
9307 + buf -= DUMP_BUFFER_SIZE;
9308 + dump_config.dumper->curr_buf = buf;
9314 +int dump_generic_skip_data(unsigned long loc, unsigned long sz)
9316 + /* dummy by default */
9321 + * Common low level routine to write a buffer to current dump device
9322 + * Expects checks for space etc to have been taken care of by the caller
9323 + * Operates serially at the moment for simplicity.
9324 + * TBD/Todo: Consider batching for improved throughput
9326 +int dump_ll_write(void *buf, unsigned long len)
9328 + long transferred = 0, last_transfer = 0;
9331 + /* make sure device is ready */
9332 + while ((ret = dump_dev_ready(NULL)) == -EAGAIN);
9334 + printk("dump_dev_ready failed !err %d\n", ret);
9339 + if ((last_transfer = dump_dev_write(buf, len)) <= 0) {
9340 + ret = last_transfer;
9341 + printk("dump_dev_write failed !err %d\n",
9345 + /* wait till complete */
9346 + while ((ret = dump_dev_ready(buf)) == -EAGAIN)
9350 + printk("i/o failed !err %d\n", ret);
9354 + len -= last_transfer;
9355 + buf += last_transfer;
9356 + transferred += last_transfer;
9358 + return (ret < 0) ? ret : transferred;
9361 +/* default writeout routine for single dump device */
9362 +/* writes out the dump data ensuring enough space is left for the end marker */
9363 +int dump_generic_write_buffer(void *buf, unsigned long len)
9368 + /* check for space */
9369 + if ((err = dump_dev_seek(dump_config.dumper->curr_offset + len +
9370 + 2*DUMP_BUFFER_SIZE)) < 0) {
9371 + printk("dump_write_buffer: insuff space after offset 0x%llx\n",
9372 + dump_config.dumper->curr_offset);
9375 + /* alignment check would happen as a side effect of this */
9376 + if ((err = dump_dev_seek(dump_config.dumper->curr_offset)) < 0)
9379 + written = dump_ll_write(buf, len);
9383 + if (written < len)
9384 + written = written ? -ENOSPC : written;
9386 + dump_config.dumper->curr_offset += len;
9391 +int dump_generic_configure(unsigned long devid)
9393 + struct dump_dev *dev = dump_config.dumper->dev;
9394 + struct dump_data_filter *filter;
9398 + /* Allocate the dump buffer and initialize dumper state */
9399 + /* Assume that we get aligned addresses */
9400 + if (!(buf = dump_alloc_mem(DUMP_BUFFER_SIZE + 3 * DUMP_PAGE_SIZE)))
9403 + if ((unsigned long)buf & (PAGE_SIZE - 1)) {
9404 + /* sanity check for page aligned address */
9405 + dump_free_mem(buf);
9406 + return -ENOMEM; /* fixme: better error code */
9409 + /* Initialize the rest of the fields */
9410 + dump_config.dumper->dump_buf = buf + DUMP_PAGE_SIZE;
9413 + /* Open the dump device */
9417 + if ((ret = dev->ops->open(dev, devid))) {
9421 + /* Initialise the memory ranges in the dump filter */
9422 + for (filter = dump_config.dumper->filter ;filter->selector; filter++) {
9423 + if (!filter->start[0] && !filter->end[0]) {
9426 + for_each_pgdat(pgdat) {
9427 + filter->start[i] =
9428 + (loff_t)pgdat->node_start_pfn << PAGE_SHIFT;
9430 + (loff_t)(pgdat->node_start_pfn + pgdat->node_spanned_pages) << PAGE_SHIFT;
9433 + filter->num_mbanks = i;
9440 +int dump_generic_unconfigure(void)
9442 + struct dump_dev *dev = dump_config.dumper->dev;
9443 + void *buf = dump_config.dumper->dump_buf;
9446 + pr_debug("Generic unconfigure\n");
9447 + /* Close the dump device */
9448 + if (dev && (ret = dev->ops->release(dev)))
9451 + printk("Closed dump device\n");
9454 + dump_free_mem((buf - DUMP_PAGE_SIZE));
9456 + dump_config.dumper->curr_buf = dump_config.dumper->dump_buf = NULL;
9457 + pr_debug("Released dump buffer\n");
9462 +#ifdef CONFIG_DISCONTIGMEM
9464 +void dump_reconfigure_mbanks(void)
9467 + loff_t start, end, loc, loc_end;
9469 + struct dump_data_filter *filter = dump_config.dumper->filter;
9471 + for_each_pgdat(pgdat) {
9473 + start = (loff_t)(pgdat->node_start_pfn << PAGE_SHIFT);
9474 + end = ((loff_t)(pgdat->node_start_pfn + pgdat->node_spanned_pages) << PAGE_SHIFT);
9475 + for(loc = start; loc < end; loc += (DUMP_PAGE_SIZE)) {
9477 + if(!(__dump_page_valid(loc >> PAGE_SHIFT)))
9480 + /* We found a valid page. This is the start */
9481 + filter->start[i] = loc;
9483 + /* Now loop here till you find the end */
9484 + for(loc_end = loc; loc_end < end; loc_end += (DUMP_PAGE_SIZE)) {
9486 + if(__dump_page_valid(loc_end >> PAGE_SHIFT)) {
9487 + /* This page could very well be the last page */
9488 + filter->end[i] = loc_end;
9497 + filter->num_mbanks = i;
9499 + /* Propagate memory bank information to other filters */
9500 + for (filter = dump_config.dumper->filter, filter++ ;filter->selector; filter++) {
9501 + for(i = 0; i < dump_config.dumper->filter->num_mbanks; i++) {
9502 + filter->start[i] = dump_config.dumper->filter->start[i];
9503 + filter->end[i] = dump_config.dumper->filter->end[i];
9504 + filter->num_mbanks = dump_config.dumper->filter->num_mbanks;
9510 +/* Set up the default dump scheme */
9512 +struct dump_scheme_ops dump_scheme_singlestage_ops = {
9513 + .configure = dump_generic_configure,
9514 + .unconfigure = dump_generic_unconfigure,
9515 + .sequencer = dump_generic_sequencer,
9516 + .iterator = dump_page_iterator,
9517 + .save_data = dump_generic_save_data,
9518 + .skip_data = dump_generic_skip_data,
9519 + .write_buffer = dump_generic_write_buffer,
9522 +struct dump_scheme dump_scheme_singlestage = {
9523 + .name = "single-stage",
9524 + .ops = &dump_scheme_singlestage_ops
9527 +/* The single stage dumper comprising all these */
9528 +struct dumper dumper_singlestage = {
9529 + .name = "single-stage",
9530 + .scheme = &dump_scheme_singlestage,
9531 + .fmt = &dump_fmt_lcrash,
9532 + .compress = &dump_none_compression,
9533 + .filter = dump_filter_table,
9537 Index: linux-2.6.10/drivers/dump/dump_gzip.c
9538 ===================================================================
9539 --- linux-2.6.10.orig/drivers/dump/dump_gzip.c 2005-04-05 19:01:49.158500672 +0800
9540 +++ linux-2.6.10/drivers/dump/dump_gzip.c 2005-04-05 16:47:53.937206016 +0800
9543 + * GZIP Compression functions for kernel crash dumps.
9545 + * Created by: Matt Robinson (yakker@sourceforge.net)
9546 + * Copyright 2001 Matt D. Robinson. All rights reserved.
9548 + * This code is released under version 2 of the GNU GPL.
9552 +#include <linux/config.h>
9553 +#include <linux/module.h>
9554 +#include <linux/sched.h>
9555 +#include <linux/fs.h>
9556 +#include <linux/file.h>
9557 +#include <linux/init.h>
9558 +#include <linux/slab.h>
9559 +#include <linux/dump.h>
9560 +#include <linux/zlib.h>
9561 +#include <linux/vmalloc.h>
9563 +static void *deflate_workspace;
9564 +static unsigned long workspace_paddr[2];
9566 +static u8 *safety_buffer;
9569 + * Name: dump_compress_gzip()
9570 + * Func: Compress a DUMP_PAGE_SIZE page using gzip-style algorithms (the.
9571 + * deflate functions similar to what's used in PPP).
9574 +dump_compress_gzip(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
9575 + unsigned long loc)
9577 + /* error code and dump stream */
9579 + z_stream dump_stream;
9580 + struct page *pg = (struct page *)loc;
9581 + unsigned long paddr = page_to_pfn(pg) << PAGE_SHIFT;
9582 + static int warning = 0;
9584 + dump_stream.workspace = deflate_workspace;
9585 + if ((paddr == workspace_paddr[0]) || (paddr == workspace_paddr[1])) {
9587 + * This page belongs to deflate_workspace used as temporary
9588 + * buffer for compression. Hence, dump them without compression.
9592 + if ((err = zlib_deflateInit(&dump_stream, Z_BEST_COMPRESSION)) != Z_OK) {
9593 + /* fall back to RLE compression */
9594 + printk("dump_compress_gzip(): zlib_deflateInit() "
9595 + "failed (%d)!\n", err);
9599 + /* copy the old page to the safety buffer */
9600 + if (oldsize <= DUMP_PAGE_SIZE) {
9601 + memcpy(safety_buffer, old, oldsize);
9602 + dump_stream.next_in = (u8 *) safety_buffer;
9605 + printk("dump_compress_gzip oversize input: %d\n",
9609 + dump_stream.next_in = (u8 *) old;
9612 + /* use old (page of memory) and size (DUMP_PAGE_SIZE) as in-streams */
9613 + dump_stream.avail_in = oldsize;
9615 + /* out streams are new (dpcpage) and new size (DUMP_DPC_PAGE_SIZE) */
9616 + dump_stream.next_out = new;
9617 + dump_stream.avail_out = newsize;
9619 + /* deflate the page -- check for error */
9620 + err = zlib_deflate(&dump_stream, Z_FINISH);
9621 + if (err != Z_STREAM_END) {
9622 + /* zero is return code here */
9623 + (void)zlib_deflateEnd(&dump_stream);
9624 + printk("dump_compress_gzip(): zlib_deflate() failed (%d)!\n",
9629 + /* let's end the deflated compression stream */
9630 + if ((err = zlib_deflateEnd(&dump_stream)) != Z_OK) {
9631 + printk("dump_compress_gzip(): zlib_deflateEnd() "
9632 + "failed (%d)!\n", err);
9635 + /* return the compressed byte total (if it's smaller) */
9636 + if (dump_stream.total_out >= oldsize) {
9639 + return dump_stream.total_out;
9642 +/* setup the gzip compression functionality */
9643 +static struct __dump_compress dump_gzip_compression = {
9644 + .compress_type = DUMP_COMPRESS_GZIP,
9645 + .compress_func = dump_compress_gzip,
9646 + .compress_name = "GZIP",
9650 + * Name: dump_compress_gzip_init()
9651 + * Func: Initialize gzip as a compression mechanism.
9654 +dump_compress_gzip_init(void)
9658 + deflate_workspace = vmalloc(zlib_deflate_workspacesize());
9659 + if (!deflate_workspace) {
9660 + printk("dump_compress_gzip_init(): Failed to "
9661 + "alloc %d bytes for deflate workspace\n",
9662 + zlib_deflate_workspacesize());
9666 + * Need to find pages (workspace) that are used for compression.
9667 + * Even though zlib_deflate_workspacesize() is 64 pages (approximately)
9668 + * depends on the arch, we used only 2 pages. Hence, get the physical
9669 + * addresses for these 2 pages and used them to not to compress those
9672 + pg = vmalloc_to_page(deflate_workspace);
9673 + workspace_paddr[0] = page_to_pfn(pg) << PAGE_SHIFT;
9674 + pg = vmalloc_to_page(deflate_workspace + DUMP_PAGE_SIZE);
9675 + workspace_paddr[1] = page_to_pfn(pg) << PAGE_SHIFT;
9677 + /* Eliminate the possibility of real data getting a compression
9681 + if (!(safety_buffer = (void *)__get_free_pages(GFP_KERNEL,
9682 + get_order(DUMP_PAGE_SIZE))))
9685 + printk("dump gzip safety buffer: %p, %d\n", safety_buffer,
9686 + (int)DUMP_PAGE_SIZE);
9688 + dump_register_compression(&dump_gzip_compression);
9693 + * Name: dump_compress_gzip_cleanup()
9694 + * Func: Remove gzip as a compression mechanism.
9697 +dump_compress_gzip_cleanup(void)
9699 + vfree(deflate_workspace);
9700 + if (safety_buffer) {
9701 + free_pages((unsigned long)safety_buffer,
9702 + get_order(DUMP_PAGE_SIZE));
9703 + safety_buffer = NULL;
9706 + dump_unregister_compression(DUMP_COMPRESS_GZIP);
9709 +/* module initialization */
9710 +module_init(dump_compress_gzip_init);
9711 +module_exit(dump_compress_gzip_cleanup);
9713 +MODULE_LICENSE("GPL");
9714 +MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
9715 +MODULE_DESCRIPTION("Gzip compression module for crash dump driver");
9716 Index: linux-2.6.10/drivers/dump/dump_filters.c
9717 ===================================================================
9718 --- linux-2.6.10.orig/drivers/dump/dump_filters.c 2005-04-05 19:01:49.158500672 +0800
9719 +++ linux-2.6.10/drivers/dump/dump_filters.c 2005-04-05 16:47:53.942205256 +0800
9722 + * Default filters to select data to dump for various passes.
9724 + * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
9725 + * Split and rewrote default dump selection logic to generic dump
9726 + * method interfaces
9727 + * Derived from a portion of dump_base.c created by
9728 + * Matt Robinson <yakker@sourceforge.net>)
9730 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
9731 + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
9732 + * Copyright (C) 2002 International Business Machines Corp.
9734 + * Used during single-stage dumping and during stage 1 of the 2-stage scheme
9735 + * (Stage 2 of the 2-stage scheme uses the fully transparent filters
9736 + * i.e. passthru filters in dump_overlay.c)
9738 + * Future: Custom selective dump may involve a different set of filters.
9740 + * This code is released under version 2 of the GNU GPL.
9743 +#include <linux/kernel.h>
9744 +#include <linux/bootmem.h>
9745 +#include <linux/mm.h>
9746 +#include <linux/slab.h>
9747 +#include <linux/dump.h>
9748 +#include "dump_methods.h"
9750 +#define DUMP_PFN_SAFETY_MARGIN 1024 /* 4 MB */
9751 +static unsigned long bootmap_pages;
9753 +/* Copied from mm/bootmem.c - FIXME */
9754 +/* return the number of _pages_ that will be allocated for the boot bitmap */
9755 +void dump_calc_bootmap_pages (void)
9757 + unsigned long mapsize;
9758 + unsigned long pages = num_physpages;
9760 + mapsize = (pages+7)/8;
9761 + mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
9762 + mapsize >>= PAGE_SHIFT;
9763 + bootmap_pages = mapsize + DUMP_PFN_SAFETY_MARGIN + 1;
9768 +extern unsigned long min_low_pfn;
9771 +int dump_low_page(struct page *p)
9773 + return ((page_to_pfn(p) >= min_low_pfn) &&
9774 + (page_to_pfn(p) < (min_low_pfn + bootmap_pages)));
9777 +static inline int kernel_page(struct page *p)
9779 + /* FIXME: Need to exclude hugetlb pages. Clue: reserved but inuse */
9780 + return (PageReserved(p) && !PageInuse(p)) || (!PageLRU(p) && PageInuse(p));
9783 +static inline int user_page(struct page *p)
9785 + return PageInuse(p) && (!PageReserved(p) && PageLRU(p));
9788 +static inline int unreferenced_page(struct page *p)
9790 + return !PageInuse(p) && !PageReserved(p);
9794 +/* loc marks the beginning of a range of pages */
9795 +int dump_filter_kernpages(int pass, unsigned long loc, unsigned long sz)
9797 + struct page *page = (struct page *)loc;
9798 + /* if any of the pages is a kernel page, select this set */
9800 + if (dump_low_page(page) || kernel_page(page))
9809 +/* loc marks the beginning of a range of pages */
9810 +int dump_filter_userpages(int pass, unsigned long loc, unsigned long sz)
9812 + struct page *page = (struct page *)loc;
9814 + /* select if the set has any user page, and no kernel pages */
9816 + if (user_page(page) && !dump_low_page(page)) {
9818 + } else if (kernel_page(page) || dump_low_page(page)) {
9829 +/* loc marks the beginning of a range of pages */
9830 +int dump_filter_unusedpages(int pass, unsigned long loc, unsigned long sz)
9832 + struct page *page = (struct page *)loc;
9834 + /* select if the set does not have any used pages */
9836 + if (!unreferenced_page(page) || dump_low_page(page)) {
9845 +/* dummy: last (non-existent) pass */
9846 +int dump_filter_none(int pass, unsigned long loc, unsigned long sz)
9851 +/* TBD: resolve level bitmask ? */
9852 +struct dump_data_filter dump_filter_table[] = {
9853 + { .name = "kern", .selector = dump_filter_kernpages,
9854 + .level_mask = DUMP_MASK_KERN},
9855 + { .name = "user", .selector = dump_filter_userpages,
9856 + .level_mask = DUMP_MASK_USED},
9857 + { .name = "unused", .selector = dump_filter_unusedpages,
9858 + .level_mask = DUMP_MASK_UNUSED},
9859 + { .name = "none", .selector = dump_filter_none,
9860 + .level_mask = DUMP_MASK_REST},
9861 + { .name = "", .selector = NULL, .level_mask = 0}
9864 Index: linux-2.6.10/drivers/dump/dump_ppc64.c
9865 ===================================================================
9866 --- linux-2.6.10.orig/drivers/dump/dump_ppc64.c 2005-04-05 19:01:49.158500672 +0800
9867 +++ linux-2.6.10/drivers/dump/dump_ppc64.c 2005-04-05 16:47:53.931206928 +0800
9870 + * Architecture specific (ppc64) functions for Linux crash dumps.
9872 + * Created by: Matt Robinson (yakker@sgi.com)
9874 + * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
9876 + * 2.3 kernel modifications by: Matt D. Robinson (yakker@turbolinux.com)
9877 + * Copyright 2000 TurboLinux, Inc. All rights reserved.
9878 + * Copyright 2003, 2004 IBM Corporation
9880 + * This code is released under version 2 of the GNU GPL.
9884 + * The hooks for dumping the kernel virtual memory to disk are in this
9885 + * file. Any time a modification is made to the virtual memory mechanism,
9886 + * these routines must be changed to use the new mechanisms.
9888 +#include <linux/types.h>
9889 +#include <linux/fs.h>
9890 +#include <linux/dump.h>
9891 +#include <linux/mm.h>
9892 +#include <linux/vmalloc.h>
9893 +#include <linux/delay.h>
9894 +#include <linux/syscalls.h>
9895 +#include <asm/hardirq.h>
9896 +#include "dump_methods.h"
9897 +#include <linux/irq.h>
9898 +#include <asm/machdep.h>
9899 +#include <asm/uaccess.h>
9900 +#include <asm/irq.h>
9901 +#include <asm/page.h>
9902 +#if defined(CONFIG_KDB) && !defined(CONFIG_DUMP_MODULE)
9903 +#include <linux/kdb.h>
9906 +extern cpumask_t irq_affinity[];
9908 +static cpumask_t saved_affinity[NR_IRQS];
9910 +static __s32 saved_irq_count; /* saved preempt_count() flags */
9912 +static int alloc_dha_stack(void)
9917 + if (dump_header_asm.dha_stack[0])
9920 + ptr = (void *)vmalloc(THREAD_SIZE * num_possible_cpus());
9925 + for (i = 0; i < num_possible_cpus(); i++) {
9926 + dump_header_asm.dha_stack[i] =
9927 + (uint64_t)((unsigned long)ptr + (i * THREAD_SIZE));
9932 +static int free_dha_stack(void)
9934 + if (dump_header_asm.dha_stack[0]) {
9935 + vfree((void*)dump_header_asm.dha_stack[0]);
9936 + dump_header_asm.dha_stack[0] = 0;
9941 +static int dump_expect_ipi[NR_CPUS];
9942 +static atomic_t waiting_for_dump_ipi;
9944 +extern void stop_this_cpu(void *);
9946 +dump_ipi_handler(struct pt_regs *regs)
9948 + int cpu = smp_processor_id();
9950 + if (!dump_expect_ipi[cpu])
9952 + dump_save_this_cpu(regs);
9953 + atomic_dec(&waiting_for_dump_ipi);
9956 + switch (dump_silence_level) {
9957 + case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
9958 + while (dump_oncpu) {
9959 + barrier(); /* paranoia */
9960 + if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
9961 + goto level_changed;
9962 + cpu_relax(); /* kill time nicely */
9966 + case DUMP_HALT_CPUS: /* Execute halt */
9967 + stop_this_cpu(NULL);
9970 + case DUMP_SOFT_SPIN_CPUS:
9971 + /* Mark the task so it spins in schedule */
9972 + set_tsk_thread_flag(current, TIF_NEED_RESCHED);
9979 +/* save registers on other processors
9980 + * If the other cpus don't respond we simply do not get their states.
9983 +__dump_save_other_cpus(void)
9985 + int i, cpu = smp_processor_id();
9986 + int other_cpus = num_online_cpus()-1;
9988 + if (other_cpus > 0) {
9989 + atomic_set(&waiting_for_dump_ipi, other_cpus);
9990 + for (i = 0; i < NR_CPUS; i++)
9991 + dump_expect_ipi[i] = (i != cpu && cpu_online(i));
9993 + printk(KERN_ALERT "sending IPI to other cpus...\n");
9994 + dump_send_ipi(dump_ipi_handler);
9996 + * may be we dont need to wait for IPI to be processed.
9997 + * just write out the header at the end of dumping, if
9998 + * this IPI is not processed until then, there probably
9999 + * is a problem and we just fail to capture state of
10001 + * However, we will wait 10 secs for other CPUs to respond.
10002 + * If not, proceed the dump process even though we failed
10003 + * to capture other CPU states.
10005 + i = 10000; /* wait max of 10 seconds */
10006 + while ((atomic_read(&waiting_for_dump_ipi) > 0) && (--i > 0)) {
10010 + printk(KERN_ALERT "done waiting: %d cpus not responding\n",
10011 + atomic_read(&waiting_for_dump_ipi));
10012 + dump_send_ipi(NULL); /* clear handler */
10017 + * Restore old irq affinities.
10020 +__dump_reset_irq_affinity(void)
10023 + irq_desc_t *irq_d;
10025 + memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
10027 + for_each_irq(i) {
10028 + irq_d = get_irq_desc(i);
10029 + if (irq_d->handler == NULL) {
10032 + if (irq_d->handler->set_affinity != NULL) {
10033 + irq_d->handler->set_affinity(i, saved_affinity[i]);
10039 + * Routine to save the old irq affinities and change affinities of all irqs to
10040 + * the dumping cpu.
10042 + * NB: Need to be expanded to multiple nodes.
10045 +__dump_set_irq_affinity(void)
10048 + cpumask_t cpu = CPU_MASK_NONE;
10049 + irq_desc_t *irq_d;
10051 + cpu_set(smp_processor_id(), cpu);
10053 + memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
10055 + for_each_irq(i) {
10056 + irq_d = get_irq_desc(i);
10057 + if (irq_d->handler == NULL) {
10060 + irq_affinity[i] = cpu;
10061 + if (irq_d->handler->set_affinity != NULL) {
10062 + irq_d->handler->set_affinity(i, irq_affinity[i]);
10066 +#else /* !CONFIG_SMP */
10067 +#define __dump_save_other_cpus() do { } while (0)
10068 +#define __dump_set_irq_affinity() do { } while (0)
10069 +#define __dump_reset_irq_affinity() do { } while (0)
10070 +#endif /* !CONFIG_SMP */
10073 +__dump_save_regs(struct pt_regs *dest_regs, const struct pt_regs *regs)
10076 + memcpy(dest_regs, regs, sizeof(struct pt_regs));
10081 +__dump_save_context(int cpu, const struct pt_regs *regs,
10082 + struct task_struct *tsk)
10084 + dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
10085 + __dump_save_regs(&dump_header_asm.dha_smp_regs[cpu], regs);
10087 + /* take a snapshot of the stack */
10088 + /* doing this enables us to tolerate slight drifts on this cpu */
10090 + if (dump_header_asm.dha_stack[cpu]) {
10091 + memcpy((void *)dump_header_asm.dha_stack[cpu],
10092 + STACK_START_POSITION(tsk),
10095 + dump_header_asm.dha_stack_ptr[cpu] = (unsigned long)(tsk->thread_info);
10099 + * Name: __dump_configure_header()
10100 + * Func: Configure the dump header with all proper values.
10103 +__dump_configure_header(const struct pt_regs *regs)
10108 +#if defined(CONFIG_KDB) && !defined(CONFIG_DUMP_MODULE)
10110 +kdb_sysdump(int argc, const char **argv, const char **envp, struct pt_regs *regs)
10112 + kdb_printf("Dumping to disk...\n");
10113 + dump("dump from kdb", regs);
10114 + kdb_printf("Dump Complete\n");
10120 + * Name: __dump_init()
10121 + * Func: Initialize the dumping routine process. This is in case
10122 + * it's necessary in the future.
10125 +__dump_init(uint64_t local_memory_start)
10127 +#if defined(FIXME) && defined(CONFIG_KDB) && !defined(CONFIG_DUMP_MODULE)
10128 + /* This won't currently work because interrupts are off in kdb
10129 + * and the dump process doesn't understand how to recover.
10131 + /* ToDo: add a command to query/set dump configuration */
10132 + kdb_register_repeat("sysdump", kdb_sysdump, "", "use lkcd to dump the system to disk (if configured)", 0, KDB_REPEAT_NONE);
10140 + * Name: __dump_open()
10141 + * Func: Open the dump device (architecture specific). This is in
10142 + * case it's necessary in the future.
10147 + alloc_dha_stack();
10152 + * Name: __dump_cleanup()
10153 + * Func: Free any architecture specific data structures. This is called
10154 + * when the dump module is being removed.
10157 +__dump_cleanup(void)
10159 + free_dha_stack();
10163 + * Kludge - dump from interrupt context is unreliable (Fixme)
10165 + * We do this so that softirqs initiated for dump i/o
10166 + * get processed and we don't hang while waiting for i/o
10167 + * to complete or in any irq synchronization attempt.
10169 + * This is not quite legal of course, as it has the side
10170 + * effect of making all interrupts & softirqs triggered
10171 + * while dump is in progress complete before currently
10172 + * pending softirqs and the currently executing interrupt
10175 +static inline void
10178 + saved_irq_count = irq_count();
10179 + preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
10182 +static inline void
10183 +irq_bh_restore(void)
10185 + preempt_count() |= saved_irq_count;
10189 + * Name: __dump_irq_enable
10190 + * Func: Reset system so interrupts are enabled.
10191 + * This is used for dump methods that require interrupts
10192 + * Eventually, all methods will have interrupts disabled
10193 + * and this code can be removed.
10195 + * Change irq affinities
10196 + * Re-enable interrupts
10199 +__dump_irq_enable(void)
10201 + __dump_set_irq_affinity();
10203 + local_irq_enable();
10208 + * Name: __dump_irq_restore
10209 + * Func: Resume the system state in an architecture-specific way.
10212 +__dump_irq_restore(void)
10214 + local_irq_disable();
10215 + __dump_reset_irq_affinity();
10216 + irq_bh_restore();
10220 +/* Cheap progress hack. It estimates pages to write and
10221 + * assumes all pages will go -- so it may get way off.
10222 + * As the progress is not displayed for other architectures, not used at this
10226 +__dump_progress_add_page(void)
10228 + unsigned long total_pages = nr_free_pages() + nr_inactive_pages + nr_active_pages;
10229 + unsigned int percent = (dump_header.dh_num_dump_pages * 100) / total_pages;
10232 + if (percent > last_percent && percent <= 100) {
10233 + sprintf(buf, "Dump %3d%% ", percent);
10234 + ppc64_dump_msg(0x2, buf);
10235 + last_percent = percent;
10241 +extern int dump_page_is_ram(unsigned long);
10243 + * Name: __dump_page_valid()
10244 + * Func: Check if page is valid to dump.
10247 +__dump_page_valid(unsigned long index)
10249 + if (!pfn_valid(index))
10252 + return dump_page_is_ram(index);
10256 + * Name: manual_handle_crashdump()
10257 + * Func: Interface for the lkcd dump command. Calls dump_execute()
10260 +manual_handle_crashdump(void)
10262 + struct pt_regs regs;
10264 + get_current_regs(®s);
10265 + dump_execute("manual", ®s);
10270 + * Name: __dump_clean_irq_state()
10271 + * Func: Clean up from the previous IRQ handling state. Such as oops from
10272 + * interrupt handler or bottom half.
10275 +__dump_clean_irq_state(void)
10279 Index: linux-2.6.10/drivers/dump/dump_methods.h
10280 ===================================================================
10281 --- linux-2.6.10.orig/drivers/dump/dump_methods.h 2005-04-05 19:01:49.158500672 +0800
10282 +++ linux-2.6.10/drivers/dump/dump_methods.h 2005-04-05 16:47:53.930207080 +0800
10285 + * Generic interfaces for flexible system dump
10287 + * Started: Oct 2002 - Suparna Bhattacharya (suparna@in.ibm.com)
10289 + * Copyright (C) 2002 International Business Machines Corp.
10291 + * This code is released under version 2 of the GNU GPL.
10294 +#ifndef _LINUX_DUMP_METHODS_H
10295 +#define _LINUX_DUMP_METHODS_H
10298 + * Inspired by Matt Robinson's suggestion of introducing dump
10299 + * methods as a way to enable different crash dump facilities to
10300 + * coexist where each employs its own scheme or dumping policy.
10302 + * The code here creates a framework for flexible dump by defining
10303 + * a set of methods and providing associated helpers that differentiate
10304 + * between the underlying mechanism (how to dump), overall scheme
10305 + * (sequencing of stages and data dumped and associated quiescing),
10306 + * output format (what the dump output looks like), target type
10307 + * (where to save the dump; see dumpdev.h), and selection policy
10308 + * (state/data to dump).
10310 + * These sets of interfaces can be mixed and matched to build a
10311 + * dumper suitable for a given situation, allowing for
10312 + * flexibility as well appropriate degree of code reuse.
10313 + * For example all features and options of lkcd (including
10314 + * granular selective dumping in the near future) should be
10315 + * available even when say, the 2 stage soft-boot based mechanism
10316 + * is used for taking disruptive dumps.
10318 + * Todo: Additionally modules or drivers may supply their own
10319 + * custom dumpers which extend dump with module specific
10320 + * information or hardware state, and can even tweak the
10321 + * mechanism when it comes to saving state relevant to
10325 +#include <linux/sched.h>
10326 +#include <linux/slab.h>
10327 +#include <linux/highmem.h>
10328 +#include <linux/dumpdev.h>
10329 +#include <asm/page.h> /* get_order */
10331 +#define MAX_PASSES 6
10332 +#define MAX_DEVS 4
10335 +/* To customise selection of pages to be dumped in a given pass/group */
10336 +struct dump_data_filter{
10338 + int (*selector)(int, unsigned long, unsigned long);
10339 + ulong level_mask; /* dump level(s) for which this filter applies */
10340 + loff_t start[MAX_NUMNODES], end[MAX_NUMNODES]; /* location range applicable */
10341 + ulong num_mbanks; /* Number of memory banks. Greater than one for discontig memory (NUMA) */
10346 + * Determined by the kind of dump mechanism and appropriate
10349 +struct dump_scheme_ops {
10350 + /* sets aside memory, inits data structures etc */
10351 + int (*configure)(unsigned long devid);
10352 + /* releases resources */
10353 + int (*unconfigure)(void);
10355 + /* ordering of passes, invoking iterator */
10356 + int (*sequencer)(void);
10357 + /* iterates over system data, selects and acts on data to dump */
10358 + int (*iterator)(int, int (*)(unsigned long, unsigned long),
10359 + struct dump_data_filter *);
10360 + /* action when data is selected for dump */
10361 + int (*save_data)(unsigned long, unsigned long);
10362 + /* action when data is to be excluded from dump */
10363 + int (*skip_data)(unsigned long, unsigned long);
10364 + /* policies for space, multiple dump devices etc */
10365 + int (*write_buffer)(void *, unsigned long);
10368 +struct dump_scheme {
10369 + /* the name serves as an anchor to locate the scheme after reboot */
10371 + struct dump_scheme_ops *ops;
10372 + struct list_head list;
10375 +/* Quiescing/Silence levels (controls IPI callback behaviour) */
10376 +extern enum dump_silence_levels {
10377 + DUMP_SOFT_SPIN_CPUS = 1,
10378 + DUMP_HARD_SPIN_CPUS = 2,
10379 + DUMP_HALT_CPUS = 3,
10380 +} dump_silence_level;
10382 +/* determined by the dump (file) format */
10383 +struct dump_fmt_ops {
10384 + /* build header */
10385 + int (*configure_header)(const char *, const struct pt_regs *);
10386 + int (*update_header)(void); /* update header and write it out */
10387 + /* save curr context */
10388 + void (*save_context)(int, const struct pt_regs *,
10389 + struct task_struct *);
10390 + /* typically called by the save_data action */
10391 + /* add formatted data to the dump buffer */
10392 + int (*add_data)(unsigned long, unsigned long);
10393 + int (*update_end_marker)(void);
10397 + unsigned long magic;
10398 + char name[32]; /* lcrash, crash, elf-core etc */
10399 + struct dump_fmt_ops *ops;
10400 + struct list_head list;
10404 + * Modules will be able add their own data capture schemes by
10405 + * registering their own dumpers. Typically they would use the
10406 + * primary dumper as a template and tune it with their routines.
10410 +/* The combined dumper profile (mechanism, scheme, dev, fmt) */
10412 + char name[32]; /* singlestage, overlay (stg1), passthru(stg2), pull */
10413 + struct dump_scheme *scheme;
10414 + struct dump_fmt *fmt;
10415 + struct __dump_compress *compress;
10416 + struct dump_data_filter *filter;
10417 + struct dump_dev *dev;
10418 + /* state valid only for active dumper(s) - per instance */
10419 + /* run time state/context */
10421 + unsigned long count;
10422 + loff_t curr_offset; /* current logical offset into dump device */
10423 + loff_t curr_loc; /* current memory location */
10424 + void *curr_buf; /* current position in the dump buffer */
10425 + void *dump_buf; /* starting addr of dump buffer */
10426 + int header_dirty; /* whether the header needs to be written out */
10428 + struct list_head dumper_list; /* links to other dumpers */
10431 +/* Starting point to get to the current configured state */
10432 +struct dump_config {
10435 + struct dumper *dumper;
10436 + unsigned long dump_device;
10437 + unsigned long dump_addr; /* relevant only for in-memory dumps */
10438 + struct list_head dump_dev_list;
10441 +extern struct dump_config dump_config;
10443 +/* Used to save the dump config across a reboot for 2-stage dumps:
10445 + * Note: The scheme, format, compression and device type should be
10446 + * registered at bootup, for this config to be sharable across soft-boot.
10447 + * The function addresses could have changed and become invalid, and
10448 + * need to be set up again.
10450 +struct dump_config_block {
10451 + u64 magic; /* for a quick sanity check after reboot */
10452 + struct dump_memdev memdev; /* handle to dump stored in memory */
10453 + struct dump_config config;
10454 + struct dumper dumper;
10455 + struct dump_scheme scheme;
10456 + struct dump_fmt fmt;
10457 + struct __dump_compress compress;
10458 + struct dump_data_filter filter_table[MAX_PASSES];
10459 + struct dump_anydev dev[MAX_DEVS]; /* target dump device */
10463 +/* Wrappers that invoke the methods for the current (active) dumper */
10465 +/* Scheme operations */
10467 +static inline int dump_sequencer(void)
10469 + return dump_config.dumper->scheme->ops->sequencer();
10472 +static inline int dump_iterator(int pass, int (*action)(unsigned long,
10473 + unsigned long), struct dump_data_filter *filter)
10475 + return dump_config.dumper->scheme->ops->iterator(pass, action, filter);
10478 +#define dump_save_data dump_config.dumper->scheme->ops->save_data
10479 +#define dump_skip_data dump_config.dumper->scheme->ops->skip_data
10481 +static inline int dump_write_buffer(void *buf, unsigned long len)
10483 + return dump_config.dumper->scheme->ops->write_buffer(buf, len);
10486 +static inline int dump_configure(unsigned long devid)
10488 + return dump_config.dumper->scheme->ops->configure(devid);
10491 +static inline int dump_unconfigure(void)
10493 + return dump_config.dumper->scheme->ops->unconfigure();
10496 +/* Format operations */
10498 +static inline int dump_configure_header(const char *panic_str,
10499 + const struct pt_regs *regs)
10501 + return dump_config.dumper->fmt->ops->configure_header(panic_str, regs);
10504 +static inline void dump_save_context(int cpu, const struct pt_regs *regs,
10505 + struct task_struct *tsk)
10507 + dump_config.dumper->fmt->ops->save_context(cpu, regs, tsk);
10510 +static inline int dump_save_this_cpu(const struct pt_regs *regs)
10512 + int cpu = smp_processor_id();
10514 + dump_save_context(cpu, regs, current);
10518 +static inline int dump_update_header(void)
10520 + return dump_config.dumper->fmt->ops->update_header();
10523 +static inline int dump_update_end_marker(void)
10525 + return dump_config.dumper->fmt->ops->update_end_marker();
10528 +static inline int dump_add_data(unsigned long loc, unsigned long sz)
10530 + return dump_config.dumper->fmt->ops->add_data(loc, sz);
10533 +/* Compression operation */
10534 +static inline int dump_compress_data(char *src, int slen, char *dst,
10535 + unsigned long loc)
10537 + return dump_config.dumper->compress->compress_func(src, slen,
10538 + dst, DUMP_DPC_PAGE_SIZE, loc);
10542 +/* Prototypes of some default implementations of dump methods */
10544 +extern struct __dump_compress dump_none_compression;
10546 +/* Default scheme methods (dump_scheme.c) */
10548 +extern int dump_generic_sequencer(void);
10549 +extern int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned
10550 + long), struct dump_data_filter *filter);
10551 +extern int dump_generic_save_data(unsigned long loc, unsigned long sz);
10552 +extern int dump_generic_skip_data(unsigned long loc, unsigned long sz);
10553 +extern int dump_generic_write_buffer(void *buf, unsigned long len);
10554 +extern int dump_generic_configure(unsigned long);
10555 +extern int dump_generic_unconfigure(void);
10556 +#ifdef CONFIG_DISCONTIGMEM
10557 +extern void dump_reconfigure_mbanks(void);
10560 +/* Default scheme template */
10561 +extern struct dump_scheme dump_scheme_singlestage;
10563 +/* Default dump format methods */
10565 +extern int dump_lcrash_configure_header(const char *panic_str,
10566 + const struct pt_regs *regs);
10567 +extern void dump_lcrash_save_context(int cpu, const struct pt_regs *regs,
10568 + struct task_struct *tsk);
10569 +extern int dump_generic_update_header(void);
10570 +extern int dump_lcrash_add_data(unsigned long loc, unsigned long sz);
10571 +extern int dump_lcrash_update_end_marker(void);
10573 +/* Default format (lcrash) template */
10574 +extern struct dump_fmt dump_fmt_lcrash;
10576 +/* Default dump selection filter table */
10579 + * Entries listed in order of importance and correspond to passes
10580 + * The last entry (with a level_mask of zero) typically reflects data that
10581 + * won't be dumped -- this may for example be used to identify data
10582 + * that will be skipped for certain so the corresponding memory areas can be
10583 + * utilized as scratch space.
10585 +extern struct dump_data_filter dump_filter_table[];
10587 +/* Some pre-defined dumpers */
10588 +extern struct dumper dumper_singlestage;
10589 +extern struct dumper dumper_stage1;
10590 +extern struct dumper dumper_stage2;
10592 +/* These are temporary */
10593 +#define DUMP_MASK_HEADER DUMP_LEVEL_HEADER
10594 +#define DUMP_MASK_KERN DUMP_LEVEL_KERN
10595 +#define DUMP_MASK_USED DUMP_LEVEL_USED
10596 +#define DUMP_MASK_UNUSED DUMP_LEVEL_ALL_RAM
10597 +#define DUMP_MASK_REST 0 /* dummy for now */
10599 +/* Helpers - move these to dump.h later ? */
10601 +int dump_generic_execute(const char *panic_str, const struct pt_regs *regs);
10602 +extern int dump_ll_write(void *buf, unsigned long len);
10603 +int dump_check_and_free_page(struct dump_memdev *dev, struct page *page);
10605 +static inline void dumper_reset(void)
10607 + dump_config.dumper->curr_buf = dump_config.dumper->dump_buf;
10608 + dump_config.dumper->curr_loc = 0;
10609 + dump_config.dumper->curr_offset = 0;
10610 + dump_config.dumper->count = 0;
10611 + dump_config.dumper->curr_pass = 0;
10615 + * May later be moulded to perform boot-time allocations so we can dump
10616 + * earlier during bootup
10618 +static inline void *dump_alloc_mem(unsigned long size)
10620 + return (void *) __get_free_pages(GFP_KERNEL, get_order(size));
10623 +static inline void dump_free_mem(void *buf)
10625 + struct page *page;
10627 + /* ignore reserved pages (e.g. post soft boot stage) */
10628 + if (buf && (page = virt_to_page(buf))) {
10629 + if (PageReserved(page))
10633 + * Allocated using __get_free_pages().
10635 + free_pages((unsigned long)buf,
10636 + get_order(DUMP_BUFFER_SIZE + 3 * DUMP_PAGE_SIZE));
10640 +#endif /* _LINUX_DUMP_METHODS_H */
10641 Index: linux-2.6.10/drivers/dump/Makefile
10642 ===================================================================
10643 --- linux-2.6.10.orig/drivers/dump/Makefile 2005-04-05 19:01:49.158500672 +0800
10644 +++ linux-2.6.10/drivers/dump/Makefile 2005-04-05 16:47:53.947204496 +0800
10647 +# Makefile for the dump device drivers.
10650 +dump-y := dump_setup.o dump_fmt.o dump_filters.o dump_scheme.o dump_execute.o
10651 +ifeq ($(CONFIG_X86_64),)
10652 +ifeq ($(CONFIG_X86),y)
10653 +dump-$(CONFIG_X86) += dump_i386.o
10656 +dump-$(CONFIG_ARM) += dump_arm.o
10657 +dump-$(CONFIG_PPC64) += dump_ppc64.o
10658 +dump-$(CONFIG_X86_64) += dump_x8664.o
10659 +dump-$(CONFIG_IA64) += dump_ia64.o
10660 +dump-$(CONFIG_CRASH_DUMP_MEMDEV) += dump_memdev.o dump_overlay.o
10661 +dump-objs += $(dump-y)
10663 +obj-$(CONFIG_CRASH_DUMP) += dump.o
10664 +obj-$(CONFIG_CRASH_DUMP_BLOCKDEV) += dump_blockdev.o
10665 +obj-$(CONFIG_CRASH_DUMP_NETDEV) += dump_netdev.o
10666 +obj-$(CONFIG_CRASH_DUMP_COMPRESS_RLE) += dump_rle.o
10667 +obj-$(CONFIG_CRASH_DUMP_COMPRESS_GZIP) += dump_gzip.o
10668 Index: linux-2.6.10/drivers/Makefile
10669 ===================================================================
10670 --- linux-2.6.10.orig/drivers/Makefile 2004-12-25 05:36:00.000000000 +0800
10671 +++ linux-2.6.10/drivers/Makefile 2005-04-05 16:47:53.950204040 +0800
10673 obj-$(CONFIG_CPU_FREQ) += cpufreq/
10674 obj-$(CONFIG_MMC) += mmc/
10676 +obj-$(CONFIG_CRASH_DUMP) += dump/