-Index: linux-2.6.10/arch/i386/Kconfig.debug
+Index: linux-2.6.10/drivers/dump/dump_ia64.c
===================================================================
---- linux-2.6.10.orig/arch/i386/Kconfig.debug 2005-04-05 16:29:30.191000944 +0800
-+++ linux-2.6.10/arch/i386/Kconfig.debug 2005-04-05 16:47:53.904211032 +0800
-@@ -2,6 +2,63 @@
-
- source "lib/Kconfig.debug"
-
-+config CRASH_DUMP
-+ tristate "Crash dump support (EXPERIMENTAL)"
-+ depends on EXPERIMENTAL
-+ default n
-+ ---help---
-+ Say Y here to enable saving an image of system memory when a panic
-+ or other error occurs. Dumps can also be forced with the SysRq+d
-+ key if MAGIC_SYSRQ is enabled.
+--- linux-2.6.10.orig/drivers/dump/dump_ia64.c 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/drivers/dump/dump_ia64.c 2005-04-07 18:13:56.896754224 +0800
+@@ -0,0 +1,458 @@
++/*
++ * Architecture specific (ia64) functions for Linux crash dumps.
++ *
++ * Created by: Matt Robinson (yakker@sgi.com)
++ * Contributions from SGI, IBM, and others.
++ *
++ * 2.4 kernel modifications by: Matt D. Robinson (yakker@alacritech.com)
++ * ia64 kernel modifications by: Piet Delaney (piet@www.piet.net)
++ *
++ * Copyright (C) 2001 - 2002 Matt D. Robinson (yakker@alacritech.com)
++ * Copyright (C) 2002 Silicon Graphics, Inc. All rights reserved.
++ * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
++ *
++ * This code is released under version 2 of the GNU GPL.
++ */
+
-+config KERNTYPES
-+ bool
-+ depends on CRASH_DUMP
-+ default y
++/*
++ * The hooks for dumping the kernel virtual memory to disk are in this
++ * file. Any time a modification is made to the virtual memory mechanism,
++ * these routines must be changed to use the new mechanisms.
++ */
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/smp.h>
++#include <linux/fs.h>
++#include <linux/vmalloc.h>
++#include <linux/dump.h>
++#include "dump_methods.h"
++#include <linux/mm.h>
++#include <asm/processor.h>
++#include <asm-ia64/dump.h>
++#include <asm/hardirq.h>
++#include <linux/irq.h>
++#include <linux/delay.h>
+
-+config CRASH_DUMP_BLOCKDEV
-+ tristate "Crash dump block device driver"
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow saving crash dumps directly to a disk device.
++static __s32 saved_irq_count; /* saved preempt_count() flags */
+
-+config CRASH_DUMP_NETDEV
-+ tristate "Crash dump network device driver"
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow saving crash dumps over a network device.
+
-+config CRASH_DUMP_MEMDEV
-+ bool "Crash dump staged memory driver"
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow intermediate saving crash dumps in spare
-+ memory pages which would then be written out to disk
-+ later.
++static int alloc_dha_stack(void)
++{
++ int i;
++ void *ptr;
++
++ if (dump_header_asm.dha_stack[0])
++ {
++ return 0;
++ }
++ ptr = vmalloc(THREAD_SIZE * num_online_cpus());
++ if (!ptr) {
++ printk("vmalloc for dha_stacks failed\n");
++ return -ENOMEM;
++ }
++ bzero(ptr,THREAD_SIZE );
+
-+config CRASH_DUMP_SOFTBOOT
-+ bool "Save crash dump across a soft reboot"
-+ depends on CRASH_DUMP_MEMDEV
-+ help
-+ Say Y to allow a crash dump to be preserved in memory
-+ pages across a soft reboot and written out to disk
-+ thereafter. For this to work, CRASH_DUMP must be
-+ configured as part of the kernel (not as a module).
++ for (i = 0; i < num_online_cpus(); i++) {
++ dump_header_asm.dha_stack[i] = (uint64_t)((unsigned long)ptr + (i * THREAD_SIZE));
++ }
++ return 0;
++}
+
-+config CRASH_DUMP_COMPRESS_RLE
-+ tristate "Crash dump RLE compression"
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow saving dumps with Run Length Encoding compression.
++static int free_dha_stack(void)
++{
++ if (dump_header_asm.dha_stack[0])
++ {
++ vfree((void*)dump_header_asm.dha_stack[0]);
++ dump_header_asm.dha_stack[0] = 0;
++ }
++ return 0;
++}
+
-+config CRASH_DUMP_COMPRESS_GZIP
-+ tristate "Crash dump GZIP compression"
-+ select ZLIB_INFLATE
-+ select ZLIB_DEFLATE
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow saving dumps with Gnu Zip compression.
++/* a structure to get arguments into the following callback routine */
++struct unw_args {
++ int cpu;
++ struct task_struct *tsk;
++};
+
- config EARLY_PRINTK
- bool "Early printk" if EMBEDDED
- default y
-@@ -15,8 +72,8 @@
- with klogd/syslogd or the X server. You should normally N here,
- unless you want to debug such a crash.
-
--config DEBUG_STACKOVERFLOW
-- bool "Check for stack overflows"
-+config DEBUG_STACKOVERFLOW
-+ bool "Check for stack overflows"
- depends on DEBUG_KERNEL
-
- config KPROBES
-Index: linux-2.6.10/arch/i386/mm/init.c
-===================================================================
---- linux-2.6.10.orig/arch/i386/mm/init.c 2005-04-05 16:47:05.157621640 +0800
-+++ linux-2.6.10/arch/i386/mm/init.c 2005-04-05 16:47:53.909210272 +0800
-@@ -244,6 +244,13 @@
- return 0;
- }
-
-+/* To enable modules to check if a page is in RAM */
-+int pfn_is_ram(unsigned long pfn)
++static void
++do_save_sw(struct unw_frame_info *info, void *arg)
+{
-+ return (page_is_ram(pfn));
++ struct unw_args *uwargs = (struct unw_args *)arg;
++ int cpu = uwargs->cpu;
++ struct task_struct *tsk = uwargs->tsk;
++
++ dump_header_asm.dha_stack_ptr[cpu] = (uint64_t)info->sw;
++
++ if (tsk && dump_header_asm.dha_stack[cpu]) {
++ memcpy((void *)dump_header_asm.dha_stack[cpu],
++ STACK_START_POSITION(tsk),
++ THREAD_SIZE);
++ }
+}
+
++void
++__dump_save_context(int cpu, const struct pt_regs *regs,
++ struct task_struct *tsk)
++{
++ struct unw_args uwargs;
+
- #ifdef CONFIG_HIGHMEM
- pte_t *kmap_pte;
- pgprot_t kmap_prot;
-Index: linux-2.6.10/arch/i386/kernel/traps.c
-===================================================================
---- linux-2.6.10.orig/arch/i386/kernel/traps.c 2005-04-05 16:47:05.156621792 +0800
-+++ linux-2.6.10/arch/i386/kernel/traps.c 2005-04-05 16:47:53.906210728 +0800
-@@ -27,6 +27,7 @@
- #include <linux/ptrace.h>
- #include <linux/utsname.h>
- #include <linux/kprobes.h>
-+#include <linux/dump.h>
-
- #ifdef CONFIG_EISA
- #include <linux/ioport.h>
-@@ -382,6 +383,7 @@
- bust_spinlocks(0);
- die.lock_owner = -1;
- spin_unlock_irq(&die.lock);
-+ dump((char *)str, regs);
- if (in_interrupt())
- panic("Fatal exception in interrupt");
-
-@@ -654,6 +656,7 @@
- printk(" on CPU%d, eip %08lx, registers:\n",
- smp_processor_id(), regs->eip);
- show_registers(regs);
-+ dump((char *)msg, regs);
- printk("console shuts up ...\n");
- console_silent();
- spin_unlock(&nmi_print_lock);
-Index: linux-2.6.10/arch/i386/kernel/setup.c
-===================================================================
---- linux-2.6.10.orig/arch/i386/kernel/setup.c 2004-12-25 05:34:45.000000000 +0800
-+++ linux-2.6.10/arch/i386/kernel/setup.c 2005-04-05 16:47:53.905210880 +0800
-@@ -662,6 +662,10 @@
- */
- #define LOWMEMSIZE() (0x9f000)
-
-+#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
-+unsigned long crashdump_addr = 0xdeadbeef;
-+#endif
++ dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
+
- static void __init parse_cmdline_early (char ** cmdline_p)
- {
- char c = ' ', *to = command_line, *from = saved_command_line;
-@@ -823,6 +827,11 @@
- if (c == ' ' && !memcmp(from, "vmalloc=", 8))
- __VMALLOC_RESERVE = memparse(from+8, &from);
-
-+#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
-+ if (c == ' ' && !memcmp(from, "crashdump=", 10))
-+ crashdump_addr = memparse(from+10, &from);
-+#endif
++ if (regs) {
++ dump_header_asm.dha_smp_regs[cpu] = *regs;
++ }
+
- c = *(from++);
- if (!c)
- break;
-@@ -1288,6 +1297,10 @@
-
- static char * __init machine_specific_memory_setup(void);
-
-+#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
-+extern void crashdump_reserve(void);
-+#endif
++ /* save a snapshot of the stack in a nice state for unwinding */
++ uwargs.cpu = cpu;
++ uwargs.tsk = tsk;
+
- /*
- * Determine if we were loaded by an EFI loader. If so, then we have also been
- * passed the efi memmap, systab, etc., so we should use these data structures
-@@ -1393,6 +1406,10 @@
- #endif
-
-
-+#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
-+ crashdump_reserve(); /* Preserve crash dump state from prev boot */
-+#endif
++ unw_init_running(do_save_sw, (void *)&uwargs);
++}
+
- dmi_scan_machine();
-
- #ifdef CONFIG_X86_GENERICARCH
-Index: linux-2.6.10/arch/i386/kernel/smp.c
-===================================================================
---- linux-2.6.10.orig/arch/i386/kernel/smp.c 2005-04-05 16:47:05.154622096 +0800
-+++ linux-2.6.10/arch/i386/kernel/smp.c 2005-04-05 16:47:53.908210424 +0800
-@@ -19,6 +19,7 @@
- #include <linux/mc146818rtc.h>
- #include <linux/cache.h>
- #include <linux/interrupt.h>
-+#include <linux/dump.h>
-
- #include <asm/mtrr.h>
- #include <asm/tlbflush.h>
-@@ -143,6 +144,13 @@
- */
- cfg = __prepare_ICR(shortcut, vector);
-
-+ if (vector == DUMP_VECTOR) {
-+ /*
-+ * Setup DUMP IPI to be delivered as an NMI
-+ */
-+ cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
-+ }
++#ifdef CONFIG_SMP
+
- /*
- * Send the IPI. The write to APIC_ICR fires this off.
- */
-@@ -220,6 +228,13 @@
- * program the ICR
- */
- cfg = __prepare_ICR(0, vector);
++extern cpumask_t irq_affinity[];
++#define irq_desc _irq_desc
++extern irq_desc_t irq_desc[];
++extern void dump_send_ipi(void);
++static cpumask_t saved_affinity[NR_IRQS];
+
-+ if (vector == DUMP_VECTOR) {
-+ /*
-+ * Setup DUMP IPI to be delivered as an NMI
-+ */
-+ cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
-+ }
-
- /*
- * Send the IPI. The write to APIC_ICR fires this off.
-@@ -506,6 +521,11 @@
-
- static struct call_data_struct * call_data;
-
-+void dump_send_ipi(void)
++/*
++ * Routine to save the old irq affinities and change affinities of all irqs to
++ * the dumping cpu.
++ */
++static void
++set_irq_affinity(void)
+{
-+ send_IPI_allbutself(DUMP_VECTOR);
-+}
++ int i;
++ cpumask_t cpu = CPU_MASK_NONE;
+
- /*
- * this function sends a 'generic call function' IPI to all other CPUs
- * in the system.
-@@ -561,7 +581,7 @@
- return 0;
- }
-
--static void stop_this_cpu (void * dummy)
-+void stop_this_cpu (void * dummy)
- {
- /*
- * Remove this CPU:
-@@ -622,4 +642,3 @@
- atomic_inc(&call_data->finished);
- }
- }
--
-Index: linux-2.6.10/arch/i386/kernel/i386_ksyms.c
-===================================================================
---- linux-2.6.10.orig/arch/i386/kernel/i386_ksyms.c 2004-12-25 05:35:40.000000000 +0800
-+++ linux-2.6.10/arch/i386/kernel/i386_ksyms.c 2005-04-05 16:47:53.907210576 +0800
-@@ -16,6 +16,7 @@
- #include <linux/tty.h>
- #include <linux/highmem.h>
- #include <linux/time.h>
-+#include <linux/nmi.h>
-
- #include <asm/semaphore.h>
- #include <asm/processor.h>
-@@ -31,6 +32,7 @@
- #include <asm/tlbflush.h>
- #include <asm/nmi.h>
- #include <asm/ist.h>
-+#include <asm/e820.h>
- #include <asm/kdebug.h>
-
- extern void dump_thread(struct pt_regs *, struct user *);
-@@ -192,3 +194,20 @@
- #endif
-
- EXPORT_SYMBOL(csum_partial);
++ cpu_set(smp_processor_id(), cpu);
++ memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
++ for (i = 0; i < NR_IRQS; i++) {
++ if (irq_desc[i].handler == NULL)
++ continue;
++ irq_affinity[i] = cpu;
++ if (irq_desc[i].handler->set_affinity != NULL)
++ irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
++ }
++}
+
-+#ifdef CONFIG_CRASH_DUMP_MODULE
-+#ifdef CONFIG_SMP
-+extern irq_desc_t irq_desc[NR_IRQS];
-+extern cpumask_t irq_affinity[NR_IRQS];
-+extern void stop_this_cpu(void *);
-+EXPORT_SYMBOL(irq_desc);
-+EXPORT_SYMBOL(irq_affinity);
-+EXPORT_SYMBOL(stop_this_cpu);
-+EXPORT_SYMBOL(dump_send_ipi);
-+#endif
-+extern int pfn_is_ram(unsigned long);
-+EXPORT_SYMBOL(pfn_is_ram);
-+#ifdef ARCH_HAS_NMI_WATCHDOG
-+EXPORT_SYMBOL(touch_nmi_watchdog);
-+#endif
-+#endif
-Index: linux-2.6.10/arch/s390/Kconfig.debug
-===================================================================
---- linux-2.6.10.orig/arch/s390/Kconfig.debug 2004-12-25 05:34:31.000000000 +0800
-+++ linux-2.6.10/arch/s390/Kconfig.debug 2005-04-05 16:47:53.921208448 +0800
-@@ -2,4 +2,13 @@
-
- source "lib/Kconfig.debug"
-
-+config KERNTYPES
-+ bool "Kerntypes debugging information"
-+ default y
-+ ---help---
-+ Say Y here to save additional kernel debugging information in the
-+ file init/kerntypes.o. This information is used by crash analysis
-+ tools such as lcrash to assign structures to kernel addresses.
++/*
++ * Restore old irq affinities.
++ */
++static void
++reset_irq_affinity(void)
++{
++ int i;
+
++ memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
++ for (i = 0; i < NR_IRQS; i++) {
++ if (irq_desc[i].handler == NULL)
++ continue;
++ if (irq_desc[i].handler->set_affinity != NULL)
++ irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
++ }
++}
+
- endmenu
-Index: linux-2.6.10/arch/s390/boot/Makefile
-===================================================================
---- linux-2.6.10.orig/arch/s390/boot/Makefile 2004-12-25 05:35:49.000000000 +0800
-+++ linux-2.6.10/arch/s390/boot/Makefile 2005-04-05 16:47:53.922208296 +0800
-@@ -15,4 +15,4 @@
-
- install: $(CONFIGURE) $(obj)/image
- sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \
-- System.map Kerntypes "$(INSTALL_PATH)"
-+ System.map init/Kerntypes "$(INSTALL_PATH)"
-Index: linux-2.6.10/arch/s390/boot/install.sh
-===================================================================
---- linux-2.6.10.orig/arch/s390/boot/install.sh 2004-12-25 05:35:01.000000000 +0800
-+++ linux-2.6.10/arch/s390/boot/install.sh 2005-04-05 16:47:53.921208448 +0800
-@@ -16,7 +16,8 @@
- # $1 - kernel version
- # $2 - kernel image file
- # $3 - kernel map file
--# $4 - default install path (blank if root directory)
-+# $4 - kernel type file
-+# $5 - default install path (blank if root directory)
- #
-
- # User may have a custom install script
-@@ -26,13 +27,13 @@
-
- # Default install - same as make zlilo
-
--if [ -f $4/vmlinuz ]; then
-- mv $4/vmlinuz $4/vmlinuz.old
-+if [ -f $5/vmlinuz ]; then
-+ mv $5/vmlinuz $5/vmlinuz.old
- fi
-
--if [ -f $4/System.map ]; then
-- mv $4/System.map $4/System.old
-+if [ -f $5/System.map ]; then
-+ mv $5/System.map $5/System.old
- fi
-
--cat $2 > $4/vmlinuz
--cp $3 $4/System.map
-+cat $2 > $5/vmlinuz
-+cp $3 $5/System.map
-Index: linux-2.6.10/arch/ia64/Kconfig.debug
-===================================================================
---- linux-2.6.10.orig/arch/ia64/Kconfig.debug 2004-12-25 05:34:32.000000000 +0800
-+++ linux-2.6.10/arch/ia64/Kconfig.debug 2005-04-05 16:47:53.917209056 +0800
-@@ -2,6 +2,65 @@
-
- source "lib/Kconfig.debug"
-
-+config CRASH_DUMP
-+ tristate "Crash dump support (EXPERIMENTAL)"
-+ depends on EXPERIMENTAL
-+ default n
-+ ---help---
-+ Say Y here to enable saving an image of system memory when a panic
-+ or other error occurs. Dumps can also be forced with the SysRq+d
-+ key if MAGIC_SYSRQ is enabled.
++#else /* !CONFIG_SMP */
++#define set_irq_affinity() do { } while (0)
++#define reset_irq_affinity() do { } while (0)
++#define save_other_cpu_states() do { } while (0)
++#endif /* !CONFIG_SMP */
+
-+config KERNTYPES
-+ bool
-+ depends on CRASH_DUMP
-+ default y
++#ifdef CONFIG_SMP
++static int dump_expect_ipi[NR_CPUS];
++static atomic_t waiting_for_dump_ipi;
++static int wait_for_dump_ipi = 2000; /* wait 2000 ms for ipi to be handled */
++extern void (*dump_trace_ptr)(struct pt_regs *);
+
-+config CRASH_DUMP_BLOCKDEV
-+ tristate "Crash dump block device driver"
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow saving crash dumps directly to a disk device.
+
-+config CRASH_DUMP_NETDEV
-+ tristate "Crash dump network device driver"
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow saving crash dumps over a network device.
++extern void stop_this_cpu(void);
+
-+config CRASH_DUMP_MEMDEV
-+ bool "Crash dump staged memory driver"
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow intermediate saving crash dumps in spare
-+ memory pages which would then be written out to disk
-+ later.
++static int
++dump_nmi_callback(struct pt_regs *regs, int cpu)
++{
++ if (!dump_expect_ipi[cpu])
++ return 0;
+
-+config CRASH_DUMP_SOFTBOOT
-+ bool "Save crash dump across a soft reboot"
-+ depends on CRASH_DUMP_MEMDEV
-+ help
-+ Say Y to allow a crash dump to be preserved in memory
-+ pages across a soft reboot and written out to disk
-+ thereafter. For this to work, CRASH_DUMP must be
-+ configured as part of the kernel (not as a module).
++ dump_expect_ipi[cpu] = 0;
+
-+config CRASH_DUMP_COMPRESS_RLE
-+ tristate "Crash dump RLE compression"
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow saving dumps with Run Length Encoding compression.
++ dump_save_this_cpu(regs);
++ atomic_dec(&waiting_for_dump_ipi);
+
-+config CRASH_DUMP_COMPRESS_GZIP
-+ tristate "Crash dump GZIP compression"
-+ select ZLIB_INFLATE
-+ select ZLIB_DEFLATE
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow saving dumps with Gnu Zip compression.
++ level_changed:
++ switch (dump_silence_level) {
++ case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
++ while (dump_oncpu) {
++ barrier(); /* paranoia */
++ if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
++ goto level_changed;
+
++ cpu_relax(); /* kill time nicely */
++ }
++ break;
+
++ case DUMP_HALT_CPUS: /* Execute halt */
++ stop_this_cpu();
++ break;
+
- choice
- prompt "Physical memory granularity"
- default IA64_GRANULE_64MB
-Index: linux-2.6.10/arch/ia64/kernel/traps.c
-===================================================================
---- linux-2.6.10.orig/arch/ia64/kernel/traps.c 2004-12-25 05:35:39.000000000 +0800
-+++ linux-2.6.10/arch/ia64/kernel/traps.c 2005-04-05 16:47:53.918208904 +0800
-@@ -21,6 +21,8 @@
- #include <asm/intrinsics.h>
- #include <asm/processor.h>
- #include <asm/uaccess.h>
-+#include <asm/nmi.h>
-+#include <linux/dump.h>
-
- extern spinlock_t timerlist_lock;
-
-@@ -89,6 +91,7 @@
- printk("%s[%d]: %s %ld [%d]\n",
- current->comm, current->pid, str, err, ++die_counter);
- show_regs(regs);
-+ dump((char *)str, regs);
- } else
- printk(KERN_ERR "Recursive die() failure, output suppressed\n");
-
-Index: linux-2.6.10/arch/ia64/kernel/ia64_ksyms.c
-===================================================================
---- linux-2.6.10.orig/arch/ia64/kernel/ia64_ksyms.c 2005-04-05 16:29:27.954340968 +0800
-+++ linux-2.6.10/arch/ia64/kernel/ia64_ksyms.c 2005-04-05 16:47:53.917209056 +0800
-@@ -7,7 +7,6 @@
-
- #include <linux/config.h>
- #include <linux/module.h>
--
- #include <linux/string.h>
- EXPORT_SYMBOL(memset);
- EXPORT_SYMBOL(memchr);
-@@ -28,6 +27,9 @@
- EXPORT_SYMBOL(strstr);
- EXPORT_SYMBOL(strpbrk);
-
-+#include <linux/syscalls.h>
-+EXPORT_SYMBOL(sys_ioctl);
++ case DUMP_SOFT_SPIN_CPUS:
++ /* Mark the task so it spins in schedule */
++ set_tsk_thread_flag(current, TIF_NEED_RESCHED);
++ break;
++ }
+
- #include <asm/checksum.h>
- EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
-
-@@ -125,3 +127,21 @@
- # endif
- # endif
- #endif
++ return 1;
++}
+
-+#include <asm/hw_irq.h>
++int IPI_handler(struct pt_regs *regs)
++{
++ int cpu;
++ cpu = task_cpu(current);
++ return(dump_nmi_callback(regs, cpu));
++}
+
-+#ifdef CONFIG_CRASH_DUMP_MODULE
-+#ifdef CONFIG_SMP
-+extern irq_desc_t _irq_desc[NR_IRQS];
-+extern cpumask_t irq_affinity[NR_IRQS];
-+extern void stop_this_cpu(void *);
-+extern int (*dump_ipi_function_ptr)(struct pt_regs *);
-+extern void dump_send_ipi(void);
-+EXPORT_SYMBOL(_irq_desc);
-+EXPORT_SYMBOL(irq_affinity);
-+EXPORT_SYMBOL(stop_this_cpu);
-+EXPORT_SYMBOL(dump_send_ipi);
-+EXPORT_SYMBOL(dump_ipi_function_ptr);
-+#endif
-+#endif
++/* save registers on other processors */
++void
++__dump_save_other_cpus(void)
++{
++ int i, cpu = smp_processor_id();
++ int other_cpus = num_online_cpus()-1;
++ int wait_time = wait_for_dump_ipi;
+
-Index: linux-2.6.10/arch/ia64/kernel/irq.c
-===================================================================
---- linux-2.6.10.orig/arch/ia64/kernel/irq.c 2004-12-25 05:35:27.000000000 +0800
-+++ linux-2.6.10/arch/ia64/kernel/irq.c 2005-04-05 16:47:53.919208752 +0800
-@@ -933,7 +933,11 @@
-
- static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
-
-+#if defined(CONFIG_CRASH_DUMP) || defined (CONFIG_CRASH_DUMP_MODULE)
-+cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
-+#else
- static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
-+#endif
-
- static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
-
-Index: linux-2.6.10/arch/ia64/kernel/smp.c
-===================================================================
---- linux-2.6.10.orig/arch/ia64/kernel/smp.c 2004-12-25 05:35:40.000000000 +0800
-+++ linux-2.6.10/arch/ia64/kernel/smp.c 2005-04-05 16:47:53.920208600 +0800
-@@ -31,6 +31,10 @@
- #include <linux/efi.h>
- #include <linux/bitops.h>
-
-+#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
-+#include <linux/dump.h>
-+#endif
++ if (other_cpus > 0) {
++ atomic_set(&waiting_for_dump_ipi, other_cpus);
+
- #include <asm/atomic.h>
- #include <asm/current.h>
- #include <asm/delay.h>
-@@ -67,6 +71,11 @@
- #define IPI_CALL_FUNC 0
- #define IPI_CPU_STOP 1
-
-+#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
-+#define IPI_DUMP_INTERRUPT 4
-+ int (*dump_ipi_function_ptr)(struct pt_regs *) = NULL;
-+#endif
++ for (i = 0; i < NR_CPUS; i++) {
++ dump_expect_ipi[i] = (i != cpu && cpu_online(i));
++ }
+
- /* This needs to be cacheline aligned because it is written to by *other* CPUs. */
- static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
-
-@@ -84,7 +93,9 @@
- spin_unlock_irq(&call_lock);
- }
-
--static void
++ dump_ipi_function_ptr = IPI_handler;
++
++ wmb();
+
-+/*changed static void stop_this_cpu -> void stop_this_cpu */
-+void
- stop_this_cpu (void)
- {
- /*
-@@ -155,6 +166,15 @@
- case IPI_CPU_STOP:
- stop_this_cpu();
- break;
-+#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
-+ case IPI_DUMP_INTERRUPT:
-+ if( dump_ipi_function_ptr != NULL ) {
-+ if (!dump_ipi_function_ptr(regs)) {
-+ printk(KERN_ERR "(*dump_ipi_function_ptr)(): rejected IPI_DUMP_INTERRUPT\n");
-+ }
-+ }
-+ break;
++ dump_send_ipi();
++ /* may be we dont need to wait for IPI to be processed.
++ * just write out the header at the end of dumping, if
++ * this IPI is not processed until then, there probably
++ * is a problem and we just fail to capture state of
++ * other cpus. */
++ while(wait_time-- && (atomic_read(&waiting_for_dump_ipi) > 0)) {
++ barrier();
++ mdelay(1);
++ }
++ if (wait_time <= 0) {
++ printk("dump ipi timeout, proceeding...\n");
++ }
++ }
++}
+#endif
-
- default:
- printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
-@@ -369,9 +389,17 @@
- {
- send_IPI_allbutself(IPI_CPU_STOP);
- }
-+EXPORT_SYMBOL(smp_send_stop);
-
- int __init
- setup_profiling_timer (unsigned int multiplier)
- {
- return -EINVAL;
- }
++/*
++ * Kludge - dump from interrupt context is unreliable (Fixme)
++ *
++ * We do this so that softirqs initiated for dump i/o
++ * get processed and we don't hang while waiting for i/o
++ * to complete or in any irq synchronization attempt.
++ *
++ * This is not quite legal of course, as it has the side
++ * effect of making all interrupts & softirqs triggered
++ * while dump is in progress complete before currently
++ * pending softirqs and the currently executing interrupt
++ * code.
++ */
++static inline void
++irq_bh_save(void)
++{
++ saved_irq_count = irq_count();
++ preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
++}
+
-+#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
-+void dump_send_ipi(void)
++static inline void
++irq_bh_restore(void)
+{
-+ send_IPI_allbutself(IPI_DUMP_INTERRUPT);
++ preempt_count() |= saved_irq_count;
+}
-+#endif
-Index: linux-2.6.10/arch/ppc64/Kconfig.debug
-===================================================================
---- linux-2.6.10.orig/arch/ppc64/Kconfig.debug 2004-12-25 05:35:27.000000000 +0800
-+++ linux-2.6.10/arch/ppc64/Kconfig.debug 2005-04-05 16:47:53.922208296 +0800
-@@ -2,6 +2,64 @@
-
- source "lib/Kconfig.debug"
-
-+config KERNTYPES
-+ bool
-+ depends on CRASH_DUMP
-+ default y
+
-+config CRASH_DUMP
-+ tristate "Crash dump support"
-+ default n
-+ ---help---
-+ Say Y here to enable saving an image of system memory when a panic
-+ or other error occurs. Dumps can also be forced with the SysRq+d
-+ key if MAGIC_SYSRQ is enabled.
++/*
++ * Name: __dump_configure_header()
++ * Func: Configure the dump header with all proper values.
++ */
++int
++__dump_configure_header(const struct pt_regs *regs)
++{
++ return (0);
++}
+
-+config CRASH_DUMP_BLOCKDEV
-+ tristate "Crash dump block device driver"
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow saving crash dumps directly to a disk device.
-+
-+config CRASH_DUMP_NETDEV
-+ tristate "Crash dump network device driver"
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow saving crash dumps over a network device.
+
-+config CRASH_DUMP_MEMDEV
-+ bool "Crash dump staged memory driver"
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow intermediate saving crash dumps in spare
-+ memory pages which would then be written out to disk
-+ later. Need 'kexec' support for this to work.
-+ **** Not supported at present ****
++#define dim(x) (sizeof(x)/sizeof(*(x)))
+
-+config CRASH_DUMP_SOFTBOOT
-+ bool "Save crash dump across a soft reboot"
-+ help
-+ Say Y to allow a crash dump to be preserved in memory
-+ pages across a soft reboot and written out to disk
-+ thereafter. For this to work, CRASH_DUMP must be
-+ configured as part of the kernel (not as a module).
-+ Need 'kexec' support to use this option.
-+ **** Not supported at present ****
++/*
++ * Name: __dump_irq_enable
++ * Func: Reset system so interrupts are enabled.
++ * This is used for dump methods that require interrupts
++ * Eventually, all methods will have interrupts disabled
++ * and this code can be removed.
++ *
++ * Change irq affinities
++ * Re-enable interrupts
++ */
++int
++__dump_irq_enable(void)
++{
++ set_irq_affinity();
++ irq_bh_save();
++ ia64_srlz_d();
++ /*
++ * reduce the task priority level
++ * to get disk interrupts
++ */
++ ia64_setreg(_IA64_REG_CR_TPR, 0);
++ ia64_srlz_d();
++ local_irq_enable();
++ return 0;
++}
+
-+config CRASH_DUMP_COMPRESS_RLE
-+ tristate "Crash dump RLE compression"
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow saving dumps with Run Length Encoding compression.
++/*
++ * Name: __dump_irq_restore
++ * Func: Resume the system state in an architecture-specific way.
+
-+config CRASH_DUMP_COMPRESS_GZIP
-+ tristate "Crash dump GZIP compression"
-+ select ZLIB_INFLATE
-+ select ZLIB_DEFLATE
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow saving dumps with Gnu Zip compression.
++ */
++void
++__dump_irq_restore(void)
++{
++ local_irq_disable();
++ reset_irq_affinity();
++ irq_bh_restore();
++}
+
- config DEBUG_STACKOVERFLOW
- bool "Check for stack overflows"
- depends on DEBUG_KERNEL
-Index: linux-2.6.10/arch/ppc64/kernel/traps.c
-===================================================================
---- linux-2.6.10.orig/arch/ppc64/kernel/traps.c 2004-12-25 05:34:47.000000000 +0800
-+++ linux-2.6.10/arch/ppc64/kernel/traps.c 2005-04-05 16:47:53.923208144 +0800
-@@ -29,6 +29,7 @@
- #include <linux/interrupt.h>
- #include <linux/init.h>
- #include <linux/module.h>
-+#include <linux/dump.h>
-
- #include <asm/pgtable.h>
- #include <asm/uaccess.h>
-@@ -116,6 +117,7 @@
- if (nl)
- printk("\n");
- show_regs(regs);
-+ dump((char *)str, regs);
- bust_spinlocks(0);
- spin_unlock_irq(&die_lock);
-
-Index: linux-2.6.10/arch/ppc64/kernel/ppc_ksyms.c
-===================================================================
---- linux-2.6.10.orig/arch/ppc64/kernel/ppc_ksyms.c 2004-12-25 05:34:26.000000000 +0800
-+++ linux-2.6.10/arch/ppc64/kernel/ppc_ksyms.c 2005-04-05 16:47:53.925207840 +0800
-@@ -159,6 +159,17 @@
- EXPORT_SYMBOL(get_wchan);
- EXPORT_SYMBOL(console_drivers);
-
-+#ifdef CONFIG_CRASH_DUMP_MODULE
-+extern int dump_page_is_ram(unsigned long);
-+EXPORT_SYMBOL(dump_page_is_ram);
-+#ifdef CONFIG_SMP
-+EXPORT_SYMBOL(irq_affinity);
-+extern void stop_this_cpu(void *);
-+EXPORT_SYMBOL(stop_this_cpu);
-+EXPORT_SYMBOL(dump_send_ipi);
-+#endif
-+#endif
++/*
++ * Name: __dump_page_valid()
++ * Func: Check if page is valid to dump.
++ */
++int
++__dump_page_valid(unsigned long index)
++{
++ if (!pfn_valid(index))
++ {
++ return 0;
++ }
++ return 1;
++}
+
- EXPORT_SYMBOL(tb_ticks_per_usec);
- EXPORT_SYMBOL(paca);
- EXPORT_SYMBOL(cur_cpu_spec);
-Index: linux-2.6.10/arch/ppc64/kernel/lmb.c
-===================================================================
---- linux-2.6.10.orig/arch/ppc64/kernel/lmb.c 2004-12-25 05:34:58.000000000 +0800
-+++ linux-2.6.10/arch/ppc64/kernel/lmb.c 2005-04-05 16:47:53.924207992 +0800
-@@ -344,3 +344,31 @@
-
- return pa;
- }
++/*
++ * Name: __dump_init()
++ * Func: Initialize the dumping routine process. This is in case
++ * it's necessary in the future.
++ */
++void
++__dump_init(uint64_t local_memory_start)
++{
++ return;
++}
++
++/*
++ * Name: __dump_open()
++ * Func: Open the dump device (architecture specific). This is in
++ * case it's necessary in the future.
++ */
++void
++__dump_open(void)
++{
++ alloc_dha_stack();
++ return;
++}
+
+
+/*
-+ * This is the copy of page_is_ram (mm/init.c). The difference is
-+ * it identifies all memory holes.
++ * Name: __dump_cleanup()
++ * Func: Free any architecture specific data structures. This is called
++ * when the dump module is being removed.
+ */
-+int dump_page_is_ram(unsigned long pfn)
++void
++__dump_cleanup(void)
+{
-+ int i;
-+ unsigned long paddr = (pfn << PAGE_SHIFT);
++ free_dha_stack();
+
-+ for (i=0; i < lmb.memory.cnt ;i++) {
-+ unsigned long base;
++ return;
++}
+
-+#ifdef CONFIG_MSCHUNKS
-+ base = lmb.memory.region[i].physbase;
-+#else
-+ base = lmb.memory.region[i].base;
-+#endif
-+ if ((paddr >= base) &&
-+ (paddr < (base + lmb.memory.region[i].size))) {
-+ return 1;
-+ }
++
++
++int __dump_memcpy_mc_expected = 0; /* Doesn't help yet */
++
++/*
++ * An ia64 version of memcpy() that trys to avoid machine checks.
++ *
++ * NB:
++ * By itself __dump_memcpy_mc_expected() ins't providing any
++ * protection against Machine Checks. We are looking into the
++ * possability of adding code to the arch/ia64/kernel/mca.c fuction
++ * ia64_mca_ucmc_handler() to restore state so that a IA64_MCA_CORRECTED
++ * can be returned to the firmware. Curently it always returns
++ * IA64_MCA_COLD_BOOT and reboots the machine.
++ */
++/*
++void * __dump_memcpy(void * dest, const void *src, size_t count)
++{
++ void *vp;
++
++ if (__dump_memcpy_mc_expected) {
++ ia64_pal_mc_expected((u64) 1, 0);
+ }
+
-+ return 0;
++ vp = memcpy(dest, src, count);
++
++ if (__dump_memcpy_mc_expected) {
++ ia64_pal_mc_expected((u64) 0, 0);
++ }
++ return(vp);
+}
++*/
++/*
++ * Name: manual_handle_crashdump()
++ * Func: Interface for the lkcd dump command. Calls dump_execute()
++ */
++int
++manual_handle_crashdump(void) {
+
-Index: linux-2.6.10/arch/ppc64/kernel/xics.c
-===================================================================
---- linux-2.6.10.orig/arch/ppc64/kernel/xics.c 2004-12-25 05:34:58.000000000 +0800
-+++ linux-2.6.10/arch/ppc64/kernel/xics.c 2005-04-05 16:47:53.925207840 +0800
-@@ -421,7 +421,8 @@
- smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
- }
- #endif
--#ifdef CONFIG_DEBUGGER
-+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_CRASH_DUMP) \
-+ || defined(CONFIG_CRASH_DUMP_MODULE)
- if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
- &xics_ipi_message[cpu].value)) {
- mb();
-Index: linux-2.6.10/arch/ppc64/kernel/smp.c
-===================================================================
---- linux-2.6.10.orig/arch/ppc64/kernel/smp.c 2004-12-25 05:35:23.000000000 +0800
-+++ linux-2.6.10/arch/ppc64/kernel/smp.c 2005-04-05 16:47:53.926207688 +0800
-@@ -30,6 +30,7 @@
- #include <linux/spinlock.h>
- #include <linux/cache.h>
- #include <linux/err.h>
-+#include <linux/dump.h>
- #include <linux/sysdev.h>
- #include <linux/cpu.h>
-
-@@ -71,6 +72,7 @@
- struct smp_ops_t *smp_ops;
-
- static volatile unsigned int cpu_callin_map[NR_CPUS];
-+static int (*dump_ipi_function_ptr)(struct pt_regs *) = NULL;
-
- extern unsigned char stab_array[];
-
-@@ -177,9 +179,16 @@
- /* spare */
- break;
- #endif
--#ifdef CONFIG_DEBUGGER
-+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_CRASH_DUMP) \
-+ || defined(CONFIG_CRASH_DUMP_MODULE)
- case PPC_MSG_DEBUGGER_BREAK:
-- debugger_ipi(regs);
-+ if (dump_ipi_function_ptr) {
-+ dump_ipi_function_ptr(regs);
-+ }
-+#ifdef CONFIG_DEBUGGER
-+ else
-+ debugger_ipi(regs);
-+#endif
- break;
- #endif
- default:
-@@ -201,7 +210,16 @@
- }
- #endif
-
--static void stop_this_cpu(void *dummy)
-+void dump_send_ipi(int (*dump_ipi_callback)(struct pt_regs *))
-+{
-+ dump_ipi_function_ptr = dump_ipi_callback;
-+ if (dump_ipi_callback) {
-+ mb();
-+ smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK);
-+ }
++ struct pt_regs regs;
++
++ get_current_regs(®s);
++ dump_execute("manual", ®s);
++ return 0;
+}
+
-+void stop_this_cpu(void *dummy)
- {
- local_irq_disable();
- while (1)
-Index: linux-2.6.10/arch/x86_64/Kconfig.debug
-===================================================================
---- linux-2.6.10.orig/arch/x86_64/Kconfig.debug 2004-12-25 05:34:01.000000000 +0800
-+++ linux-2.6.10/arch/x86_64/Kconfig.debug 2005-04-05 16:47:53.909210272 +0800
-@@ -2,6 +2,66 @@
-
- source "lib/Kconfig.debug"
-
-+config CRASH_DUMP
-+ tristate "Crash dump support (EXPERIMENTAL)"
-+ depends on EXPERIMENTAL
-+ default n
-+ ---help---
-+ Say Y here to enable saving an image of system memory when a panic
-+ or other error occurs. Dumps can also be forced with the SysRq+d
-+ key if MAGIC_SYSRQ is enabled.
++/*
++ * Name: __dump_clean_irq_state()
++ * Func: Clean up from the previous IRQ handling state. Such as oops from
++ * interrupt handler or bottom half.
++ */
++void
++__dump_clean_irq_state(void)
++{
++ unsigned long saved_tpr;
++ unsigned long TPR_MASK = 0xFFFFFFFFFFFEFF0F;
++
++
++ /* Get the processors task priority register */
++ saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
++ /* clear the mmi and mic bit's of the TPR to unmask interrupts */
++ saved_tpr = saved_tpr & TPR_MASK;
++ ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
++ ia64_srlz_d();
+
-+config KERNTYPES
-+ bool
-+ depends on CRASH_DUMP
-+ default y
++ /* Tell the processor we're done with the interrupt
++ * that got us here.
++ */
++
++ ia64_eoi();
+
-+config CRASH_DUMP_BLOCKDEV
-+ tristate "Crash dump block device driver"
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow saving crash dumps directly to a disk device.
++ /* local implementation of irq_exit(); */
++ preempt_count() -= IRQ_EXIT_OFFSET;
++ preempt_enable_no_resched();
+
-+config CRASH_DUMP_NETDEV
-+ tristate "Crash dump network device driver"
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow saving crash dumps over a network device.
++ return;
++}
+
-+config CRASH_DUMP_MEMDEV
-+ bool "Crash dump staged memory driver"
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow intermediate saving crash dumps in spare
-+ memory pages which would then be written out to disk
-+ later.
+Index: linux-2.6.10/drivers/dump/dump_setup.c
+===================================================================
+--- linux-2.6.10.orig/drivers/dump/dump_setup.c 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/drivers/dump/dump_setup.c 2005-04-07 18:13:56.914751488 +0800
+@@ -0,0 +1,923 @@
++/*
++ * Standard kernel function entry points for Linux crash dumps.
++ *
++ * Created by: Matt Robinson (yakker@sourceforge.net)
++ * Contributions from SGI, IBM, HP, MCL, and others.
++ *
++ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
++ * Copyright (C) 2000 - 2002 TurboLinux, Inc. All rights reserved.
++ * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
++ * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
++ *
++ * This code is released under version 2 of the GNU GPL.
++ */
+
-+config CRASH_DUMP_SOFTBOOT
-+ bool "Save crash dump across a soft reboot"
-+ depends on CRASH_DUMP_MEMDEV
-+ help
-+ Say Y to allow a crash dump to be preserved in memory
-+ lkcd-kernpages across a soft reboot and written out to disk
-+ thereafter. For this to work, CRASH_DUMP must be
-+ configured as part of the kernel (not as a module).
++/*
++ * -----------------------------------------------------------------------
++ *
++ * DUMP HISTORY
++ *
++ * This dump code goes back to SGI's first attempts at dumping system
++ * memory on SGI systems running IRIX. A few developers at SGI needed
++ * a way to take this system dump and analyze it, and created 'icrash',
++ * or IRIX Crash. The mechanism (the dumps and 'icrash') were used
++ * by support people to generate crash reports when a system failure
++ * occurred. This was vital for large system configurations that
++ * couldn't apply patch after patch after fix just to hope that the
++ * problems would go away. So the system memory, along with the crash
++ * dump analyzer, allowed support people to quickly figure out what the
++ * problem was on the system with the crash dump.
++ *
++ * In comes Linux. SGI started moving towards the open source community,
++ * and upon doing so, SGI wanted to take its support utilities into Linux
++ * with the hopes that they would end up the in kernel and user space to
++ * be used by SGI's customers buying SGI Linux systems. One of the first
++ * few products to be open sourced by SGI was LKCD, or Linux Kernel Crash
++ * Dumps. LKCD comprises of a patch to the kernel to enable system
++ * dumping, along with 'lcrash', or Linux Crash, to analyze the system
++ * memory dump. A few additional system scripts and kernel modifications
++ * are also included to make the dump mechanism and dump data easier to
++ * process and use.
++ *
++ * As soon as LKCD was released into the open source community, a number
++ * of larger companies started to take advantage of it. Today, there are
++ * many community members that contribute to LKCD, and it continues to
++ * flourish and grow as an open source project.
++ */
+
-+config CRASH_DUMP_COMPRESS_RLE
-+ tristate "Crash dump RLE compression"
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow saving dumps with Run Length Encoding compression.
++/*
++ * DUMP TUNABLES (read/write with ioctl, readonly with /proc)
++ *
++ * This is the list of system tunables (via /proc) that are available
++ * for Linux systems. All the read, write, etc., functions are listed
++ * here. Currently, there are a few different tunables for dumps:
++ *
++ * dump_device (used to be dumpdev):
++ * The device for dumping the memory pages out to. This
++ * may be set to the primary swap partition for disruptive dumps,
++ * and must be an unused partition for non-disruptive dumps.
++ * Todo: In the case of network dumps, this may be interpreted
++ * as the IP address of the netdump server to connect to.
++ *
++ * dump_compress (used to be dump_compress_pages):
++ * This is the flag which indicates which compression mechanism
++ * to use. This is a BITMASK, not an index (0,1,2,4,8,16,etc.).
++ * This is the current set of values:
++ *
++ * 0: DUMP_COMPRESS_NONE -- Don't compress any pages.
++ * 1: DUMP_COMPRESS_RLE -- This uses RLE compression.
++ * 2: DUMP_COMPRESS_GZIP -- This uses GZIP compression.
++ *
++ * dump_level:
++ * The amount of effort the dump module should make to save
++ * information for post crash analysis. This value is now
++ * a BITMASK value, not an index:
++ *
++ * 0: Do nothing, no dumping. (DUMP_LEVEL_NONE)
++ *
++ * 1: Print out the dump information to the dump header, and
++ * write it out to the dump_device. (DUMP_LEVEL_HEADER)
++ *
++ * 2: Write out the dump header and all kernel memory pages.
++ * (DUMP_LEVEL_KERN)
++ *
++ * 4: Write out the dump header and all kernel and user
++ * memory pages. (DUMP_LEVEL_USED)
++ *
++ * 8: Write out the dump header and all conventional/cached
++ * memory (RAM) pages in the system (kernel, user, free).
++ * (DUMP_LEVEL_ALL_RAM)
++ *
++ * 16: Write out everything, including non-conventional memory
++ * like firmware, proms, I/O registers, uncached memory.
++ * (DUMP_LEVEL_ALL)
++ *
++ * The dump_level will default to 1.
++ *
++ * dump_flags:
++ * These are the flags to use when talking about dumps. There
++ * are lots of possibilities. This is a BITMASK value, not an index.
++ *
++ * -----------------------------------------------------------------------
++ */
+
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/reboot.h>
++#include <linux/fs.h>
++#include <linux/dump.h>
++#include <linux/ioctl32.h>
++#include <linux/syscalls.h>
++#include "dump_methods.h"
++#include <linux/proc_fs.h>
++#include <linux/module.h>
++#include <linux/utsname.h>
++#include <linux/highmem.h>
++#include <linux/miscdevice.h>
++#include <linux/sysrq.h>
++#include <linux/sysctl.h>
++#include <linux/nmi.h>
++#include <linux/init.h>
++#include <asm/hardirq.h>
++#include <asm/uaccess.h>
+
-+config CRASH_DUMP_COMPRESS_GZIP
-+ tristate "Crash dump GZIP compression"
-+ select ZLIB_INFLATE
-+ select ZLIB_DEFLATE
-+ depends on CRASH_DUMP
-+ help
-+ Say Y to allow saving dumps with Gnu Zip compression.
+
++/*
++ * -----------------------------------------------------------------------
++ * V A R I A B L E S
++ * -----------------------------------------------------------------------
++ */
+
++/* Dump tunables */
++struct dump_config dump_config = {
++ .level = 0,
++ .flags = 0,
++ .dump_device = 0,
++ .dump_addr = 0,
++ .dumper = NULL
++};
++#ifdef CONFIG_ARM
++static _dump_regs_t all_regs;
++#endif
+
- # !SMP for now because the context switch early causes GPF in segment reloading
- # and the GS base checking does the wrong thing then, causing a hang.
- config CHECKING
-Index: linux-2.6.10/arch/x86_64/mm/init.c
-===================================================================
---- linux-2.6.10.orig/arch/x86_64/mm/init.c 2005-04-05 16:29:30.040023896 +0800
-+++ linux-2.6.10/arch/x86_64/mm/init.c 2005-04-05 16:47:53.916209208 +0800
-@@ -378,7 +378,7 @@
- __flush_tlb_all();
- }
-
--static inline int page_is_ram (unsigned long pagenr)
-+inline int page_is_ram (unsigned long pagenr)
- {
- int i;
-
-Index: linux-2.6.10/arch/x86_64/kernel/traps.c
-===================================================================
---- linux-2.6.10.orig/arch/x86_64/kernel/traps.c 2004-12-25 05:33:49.000000000 +0800
-+++ linux-2.6.10/arch/x86_64/kernel/traps.c 2005-04-05 16:47:53.915209360 +0800
-@@ -27,6 +27,7 @@
- #include <linux/spinlock.h>
- #include <linux/interrupt.h>
- #include <linux/module.h>
-+#include <linux/dump.h>
- #include <linux/moduleparam.h>
-
- #include <asm/system.h>
-@@ -369,6 +370,7 @@
- printk("\n");
- notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
- show_registers(regs);
-+ dump((char *)str, regs);
- /* Executive summary in case the oops scrolled away */
- printk(KERN_ALERT "RIP ");
- printk_address(regs->rip);
-Index: linux-2.6.10/arch/x86_64/kernel/setup.c
-===================================================================
---- linux-2.6.10.orig/arch/x86_64/kernel/setup.c 2004-12-25 05:33:50.000000000 +0800
-+++ linux-2.6.10/arch/x86_64/kernel/setup.c 2005-04-05 16:47:53.911209968 +0800
-@@ -221,6 +221,8 @@
- }
- }
-
-+unsigned long crashdump_addr = 0xdeadbeef;
++/* Global variables used in dump.h */
++/* degree of system freeze when dumping */
++enum dump_silence_levels dump_silence_level = DUMP_HARD_SPIN_CPUS;
+
- static __init void parse_cmdline_early (char ** cmdline_p)
- {
- char c = ' ', *to = command_line, *from = COMMAND_LINE;
-@@ -311,6 +313,9 @@
-
- if (!memcmp(from,"oops=panic", 10))
- panic_on_oops = 1;
-+
-+ if (c == ' ' && !memcmp(from, "crashdump=", 10))
-+ crashdump_addr = memparse(from+10, &from);
-
- next_char:
- c = *(from++);
-@@ -441,6 +446,10 @@
- reserve_bootmem_generic(addr, PAGE_SIZE);
- }
-
-+#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
-+extern void crashdump_reserve(void);
-+#endif
++/* Other global fields */
++extern struct __dump_header dump_header;
++struct dump_dev *dump_dev = NULL; /* Active dump device */
++static int dump_compress = 0;
+
- void __init setup_arch(char **cmdline_p)
- {
- unsigned long low_mem_size;
-@@ -550,6 +559,9 @@
- }
- #endif
- paging_init();
-+#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
-+ crashdump_reserve(); /* Preserve crash dump state from prev boot */
++static u32 dump_compress_none(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
++ unsigned long loc);
++struct __dump_compress dump_none_compression = {
++ .compress_type = DUMP_COMPRESS_NONE,
++ .compress_func = dump_compress_none,
++ .compress_name = "none",
++};
++
++/* our device operations and functions */
++static int dump_ioctl(struct inode *i, struct file *f,
++ unsigned int cmd, unsigned long arg);
++
++#ifdef CONFIG_COMPAT
++static int dw_long(unsigned int, unsigned int, unsigned long, struct file*);
+#endif
-
- check_ioapic();
- #ifdef CONFIG_ACPI_BOOT
-Index: linux-2.6.10/arch/x86_64/kernel/smp.c
-===================================================================
---- linux-2.6.10.orig/arch/x86_64/kernel/smp.c 2004-12-25 05:35:50.000000000 +0800
-+++ linux-2.6.10/arch/x86_64/kernel/smp.c 2005-04-05 16:47:53.915209360 +0800
-@@ -20,6 +20,7 @@
- #include <linux/kernel_stat.h>
- #include <linux/mc146818rtc.h>
- #include <linux/interrupt.h>
-+#include <linux/dump.h>
-
- #include <asm/mtrr.h>
- #include <asm/pgalloc.h>
-@@ -151,6 +152,13 @@
- if (!mm)
- BUG();
-
-+ if (vector == DUMP_VECTOR) {
-+ /*
-+ * Setup DUMP IPI to be delivered as an NMI
-+ */
-+ cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
-+ }
+
- /*
- * I'm not happy about this global shared spinlock in the
- * MM hot path, but we'll see how contended it is.
-@@ -253,6 +261,13 @@
- send_IPI_allbutself(KDB_VECTOR);
- }
-
++static struct file_operations dump_fops = {
++ .owner = THIS_MODULE,
++ .ioctl = dump_ioctl,
++};
+
-+/* void dump_send_ipi(int (*dump_ipi_handler)(struct pt_regs *)); */
-+void dump_send_ipi(void)
-+{
-+ send_IPI_allbutself(DUMP_VECTOR);
-+}
++static struct miscdevice dump_miscdev = {
++ .minor = CRASH_DUMP_MINOR,
++ .name = "dump",
++ .fops = &dump_fops,
++};
++MODULE_ALIAS_MISCDEV(CRASH_DUMP_MINOR);
+
- /*
- * this function sends a 'reschedule' IPI to another CPU.
- * it goes straight through and wastes no time serializing
-@@ -340,6 +355,18 @@
- return 0;
- }
-
-+void stop_this_cpu(void* dummy)
-+{
-+ /*
-+ * Remove this CPU:
-+ */
-+ cpu_clear(smp_processor_id(), cpu_online_map);
-+ local_irq_disable();
-+ disable_local_APIC();
-+ for (;;)
-+ asm("hlt");
-+}
++/* static variables */
++static int dump_okay = 0; /* can we dump out to disk? */
++static spinlock_t dump_lock = SPIN_LOCK_UNLOCKED;
+
- void smp_stop_cpu(void)
- {
- /*
-Index: linux-2.6.10/arch/x86_64/kernel/x8664_ksyms.c
-===================================================================
---- linux-2.6.10.orig/arch/x86_64/kernel/x8664_ksyms.c 2004-12-25 05:34:01.000000000 +0800
-+++ linux-2.6.10/arch/x86_64/kernel/x8664_ksyms.c 2005-04-05 16:47:53.914209512 +0800
-@@ -32,6 +32,7 @@
- #include <asm/unistd.h>
- #include <asm/delay.h>
- #include <asm/tlbflush.h>
-+#include <asm/e820.h>
- #include <asm/kdebug.h>
-
- extern spinlock_t rtc_lock;
-@@ -216,6 +217,20 @@
- extern unsigned long __supported_pte_mask;
- EXPORT_SYMBOL(__supported_pte_mask);
-
-+#ifdef CONFIG_CRASH_DUMP_MODULE
-+#ifdef CONFIG_SMP
-+extern irq_desc_t irq_desc[NR_IRQS];
-+extern cpumask_t irq_affinity[NR_IRQS];
-+extern void stop_this_cpu(void *);
-+EXPORT_SYMBOL(irq_desc);
-+EXPORT_SYMBOL(irq_affinity);
-+EXPORT_SYMBOL(dump_send_ipi);
-+EXPORT_SYMBOL(stop_this_cpu);
-+#endif
-+extern int page_is_ram(unsigned long);
-+EXPORT_SYMBOL(page_is_ram);
-+#endif
++/* used for dump compressors */
++static struct list_head dump_compress_list = LIST_HEAD_INIT(dump_compress_list);
+
- #ifdef CONFIG_SMP
- EXPORT_SYMBOL(flush_tlb_page);
- EXPORT_SYMBOL_GPL(flush_tlb_all);
-Index: linux-2.6.10/arch/x86_64/kernel/pci-gart.c
-===================================================================
---- linux-2.6.10.orig/arch/x86_64/kernel/pci-gart.c 2004-12-25 05:34:32.000000000 +0800
-+++ linux-2.6.10/arch/x86_64/kernel/pci-gart.c 2005-04-05 16:47:53.913209664 +0800
-@@ -34,7 +34,7 @@
- dma_addr_t bad_dma_address;
-
- unsigned long iommu_bus_base; /* GART remapping area (physical) */
--static unsigned long iommu_size; /* size of remapping area bytes */
-+unsigned long iommu_size; /* size of remapping area bytes */
- static unsigned long iommu_pages; /* .. and in pages */
-
- u32 *iommu_gatt_base; /* Remapping table */
-Index: linux-2.6.10/init/version.c
-===================================================================
---- linux-2.6.10.orig/init/version.c 2004-12-25 05:34:45.000000000 +0800
-+++ linux-2.6.10/init/version.c 2005-04-05 16:47:53.896212248 +0800
-@@ -11,6 +11,7 @@
- #include <linux/uts.h>
- #include <linux/utsname.h>
- #include <linux/version.h>
-+#include <linux/stringify.h>
-
- #define version(a) Version_ ## a
- #define version_string(a) version(a)
-@@ -31,3 +32,6 @@
- const char *linux_banner =
- "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
- LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
++/* list of registered dump targets */
++static struct list_head dump_target_list = LIST_HEAD_INIT(dump_target_list);
++
++/* lkcd info structure -- this is used by lcrash for basic system data */
++struct __lkcdinfo lkcdinfo = {
++ .ptrsz = (sizeof(void *) * 8),
++#if defined(__LITTLE_ENDIAN)
++ .byte_order = __LITTLE_ENDIAN,
++#else
++ .byte_order = __BIG_ENDIAN,
++#endif
++ .page_shift = PAGE_SHIFT,
++ .page_size = PAGE_SIZE,
++ .page_mask = PAGE_MASK,
++ .page_offset = PAGE_OFFSET,
++};
+
-+const char *LINUX_COMPILE_VERSION_ID = __stringify(LINUX_COMPILE_VERSION_ID);
-+LINUX_COMPILE_VERSION_ID_TYPE;
-Index: linux-2.6.10/init/kerntypes.c
-===================================================================
---- linux-2.6.10.orig/init/kerntypes.c 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/init/kerntypes.c 2005-04-05 16:47:53.895212400 +0800
-@@ -0,0 +1,40 @@
+/*
-+ * kerntypes.c
-+ *
-+ * Copyright (C) 2000 Tom Morano (tjm@sgi.com) and
-+ * Matt D. Robinson (yakker@alacritech.com)
-+ *
-+ * Dummy module that includes headers for all kernel types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under version 2 of the GNU GPL.
++ * -----------------------------------------------------------------------
++ * / P R O C T U N A B L E F U N C T I O N S
++ * -----------------------------------------------------------------------
+ */
+
-+#include <linux/compile.h>
-+#include <linux/module.h>
-+#include <linux/mm.h>
-+#include <linux/vmalloc.h>
-+#include <linux/config.h>
-+#include <linux/utsname.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/dump.h>
++static int proc_dump_device(ctl_table *ctl, int write, struct file *f,
++ void __user *buffer, size_t *lenp, loff_t *ppos);
+
-+#include <asm/kerntypes.h>
++static int proc_doulonghex(ctl_table *ctl, int write, struct file *f,
++ void __user *buffer, size_t *lenp, loff_t *ppos);
++/*
++ * sysctl-tuning infrastructure.
++ */
++static ctl_table dump_table[] = {
++ { .ctl_name = CTL_DUMP_LEVEL,
++ .procname = DUMP_LEVEL_NAME,
++ .data = &dump_config.level,
++ .maxlen = sizeof(int),
++ .mode = 0444,
++ .proc_handler = proc_doulonghex, },
+
-+#ifdef LINUX_COMPILE_VERSION_ID_TYPE
-+/* Define version type for version validation of dump and kerntypes */
-+LINUX_COMPILE_VERSION_ID_TYPE;
-+#endif
-+#if defined(CONFIG_SMP) && defined(CONFIG_CRASH_DUMP)
-+extern struct runqueue runqueues;
-+struct runqueue rn;
++ { .ctl_name = CTL_DUMP_FLAGS,
++ .procname = DUMP_FLAGS_NAME,
++ .data = &dump_config.flags,
++ .maxlen = sizeof(int),
++ .mode = 0444,
++ .proc_handler = proc_doulonghex, },
++
++ { .ctl_name = CTL_DUMP_COMPRESS,
++ .procname = DUMP_COMPRESS_NAME,
++ .data = &dump_compress, /* FIXME */
++ .maxlen = sizeof(int),
++ .mode = 0444,
++ .proc_handler = proc_dointvec, },
++
++ { .ctl_name = CTL_DUMP_DEVICE,
++ .procname = DUMP_DEVICE_NAME,
++ .mode = 0444,
++ .data = &dump_config.dump_device, /* FIXME */
++ .maxlen = sizeof(int),
++ .proc_handler = proc_dump_device },
++
++#ifdef CONFIG_CRASH_DUMP_MEMDEV
++ { .ctl_name = CTL_DUMP_ADDR,
++ .procname = DUMP_ADDR_NAME,
++ .mode = 0444,
++ .data = &dump_config.dump_addr,
++ .maxlen = sizeof(unsigned long),
++ .proc_handler = proc_doulonghex },
+#endif
+
-+struct new_utsname *p;
-+void
-+kerntypes_dummy(void)
-+{
-+}
-Index: linux-2.6.10/init/main.c
-===================================================================
---- linux-2.6.10.orig/init/main.c 2005-04-05 16:29:30.028025720 +0800
-+++ linux-2.6.10/init/main.c 2005-04-05 16:47:53.897212096 +0800
-@@ -109,6 +109,16 @@
- EXPORT_SYMBOL(system_state);
-
- /*
-+ * The kernel_magic value represents the address of _end, which allows
-+ * namelist tools to "match" each other respectively. That way a tool
-+ * that looks at /dev/mem can verify that it is using the right System.map
-+ * file -- if kernel_magic doesn't equal the namelist value of _end,
-+ * something's wrong.
-+ */
-+extern unsigned long _end;
-+unsigned long *kernel_magic = &_end;
++ { 0, }
++};
+
-+/*
- * Boot command-line arguments
- */
- #define MAX_INIT_ARGS 32
-Index: linux-2.6.10/init/Makefile
-===================================================================
---- linux-2.6.10.orig/init/Makefile 2004-12-25 05:34:32.000000000 +0800
-+++ linux-2.6.10/init/Makefile 2005-04-05 16:47:53.897212096 +0800
-@@ -9,12 +9,20 @@
- mounts-$(CONFIG_BLK_DEV_INITRD) += do_mounts_initrd.o
- mounts-$(CONFIG_BLK_DEV_MD) += do_mounts_md.o
-
-+extra-$(CONFIG_KERNTYPES) += kerntypes.o
-+#For IA64, compile kerntypes in dwarf-2 format.
-+ifeq ($(CONFIG_IA64),y)
-+CFLAGS_kerntypes.o := -gdwarf-2
-+else
-+CFLAGS_kerntypes.o := -gstabs
-+endif
++static ctl_table dump_root[] = {
++ { .ctl_name = KERN_DUMP,
++ .procname = "dump",
++ .mode = 0555,
++ .child = dump_table },
++ { 0, }
++};
+
- # files to be removed upon make clean
- clean-files := ../include/linux/compile.h
-
- # dependencies on generated files need to be listed explicitly
-
--$(obj)/version.o: include/linux/compile.h
-+$(obj)/version.o $(obj)/kerntypes.o: include/linux/compile.h
-
- # compile.h changes depending on hostname, generation number, etc,
- # so we regenerate it always.
-@@ -24,3 +32,4 @@
- include/linux/compile.h: FORCE
- @echo ' CHK $@'
- @$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CC) $(CFLAGS)"
++static ctl_table kernel_root[] = {
++ { .ctl_name = CTL_KERN,
++ .procname = "kernel",
++ .mode = 0555,
++ .child = dump_root, },
++ { 0, }
++};
++
++static struct ctl_table_header *sysctl_header;
+
-Index: linux-2.6.10/include/asm-um/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-um/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-um/kerntypes.h 2005-04-05 16:47:53.864217112 +0800
-@@ -0,0 +1,21 @@
+/*
-+ * asm-um/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
++ * -----------------------------------------------------------------------
++ * C O M P R E S S I O N F U N C T I O N S
++ * -----------------------------------------------------------------------
+ */
+
-+/* Usermode-Linux-specific header files */
-+#ifndef _UM_KERNTYPES_H
-+#define _UM_KERNTYPES_H
++/*
++ * Name: dump_compress_none()
++ * Func: Don't do any compression, period.
++ */
++static u32
++dump_compress_none(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
++ unsigned long loc)
++{
++ /* just return the old size */
++ return oldsize;
++}
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
+
-+#endif /* _UM_KERNTYPES_H */
-Index: linux-2.6.10/include/linux/sysctl.h
-===================================================================
---- linux-2.6.10.orig/include/linux/sysctl.h 2005-04-05 16:29:27.969338688 +0800
-+++ linux-2.6.10/include/linux/sysctl.h 2005-04-05 16:47:53.894212552 +0800
-@@ -135,6 +135,7 @@
- KERN_HZ_TIMER=65, /* int: hz timer on or off */
- KERN_UNKNOWN_NMI_PANIC=66, /* int: unknown nmi panic flag */
- KERN_SETUID_DUMPABLE=67, /* int: behaviour of dumps for setuid core */
-+ KERN_DUMP=68, /* directory: dump parameters */
- };
-
-
-Index: linux-2.6.10/include/linux/sched.h
-===================================================================
---- linux-2.6.10.orig/include/linux/sched.h 2005-04-05 16:47:05.178618448 +0800
-+++ linux-2.6.10/include/linux/sched.h 2005-04-05 16:47:53.891213008 +0800
-@@ -94,6 +94,7 @@
- extern int nr_threads;
- extern int last_pid;
- DECLARE_PER_CPU(unsigned long, process_counts);
-+DECLARE_PER_CPU(struct runqueue, runqueues);
- extern int nr_processes(void);
- extern unsigned long nr_running(void);
- extern unsigned long nr_uninterruptible(void);
-@@ -760,6 +761,110 @@
- void yield(void);
-
- /*
-+ * These are the runqueue data structures:
++/*
++ * Name: dump_execute()
++ * Func: Execute the dumping process. This makes sure all the appropriate
++ * fields are updated correctly, and calls dump_execute_memdump(),
++ * which does the real work.
+ */
++void
++dump_execute(const char *panic_str, const struct pt_regs *regs)
++{
++ int state = -1;
++ unsigned long flags;
+
-+#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
++ /* make sure we can dump */
++ if (!dump_okay) {
++ pr_info("LKCD not yet configured, can't take dump now\n");
++ return;
++ }
+
-+typedef struct runqueue runqueue_t;
++ /* Exclude multiple dumps at the same time,
++ * and disable interrupts, some drivers may re-enable
++ * interrupts in with silence()
++ *
++ * Try and acquire spin lock. If successful, leave preempt
++ * and interrupts disabled. See spin_lock_irqsave in spinlock.h
++ */
++ local_irq_save(flags);
++ if (!spin_trylock(&dump_lock)) {
++ local_irq_restore(flags);
++ pr_info("LKCD dump already in progress\n");
++ return;
++ }
+
-+struct prio_array {
-+ unsigned int nr_active;
-+ unsigned long bitmap[BITMAP_SIZE];
-+ struct list_head queue[MAX_PRIO];
-+};
++ /* What state are interrupts really in? */
++ if (in_interrupt()){
++ if(in_irq())
++ printk(KERN_ALERT "Dumping from interrupt handler!\n");
++ else
++ printk(KERN_ALERT "Dumping from bottom half!\n");
+
-+/*
-+ * This is the main, per-CPU runqueue data structure.
-+ *
-+ * Locking rule: those places that want to lock multiple runqueues
-+ * (such as the load balancing or the thread migration code), lock
-+ * acquire operations must be ordered by ascending &runqueue.
-+ */
-+struct runqueue {
-+ spinlock_t lock;
++ __dump_clean_irq_state();
++ }
+
-+ /*
-+ * nr_running and cpu_load should be in the same cacheline because
-+ * remote CPUs use both these fields when doing load calculation.
-+ */
-+ unsigned long nr_running;
-+#ifdef CONFIG_SMP
-+ unsigned long cpu_load;
-+#endif
-+ unsigned long long nr_switches;
+
-+ /*
-+ * This is part of a global counter where only the total sum
-+ * over all CPUs matters. A task can increase this counter on
-+ * one CPU and if it got migrated afterwards it may decrease
-+ * it on another CPU. Always updated under the runqueue lock:
++ /* Bring system into the strictest level of quiescing for min drift
++ * dump drivers can soften this as required in dev->ops->silence()
+ */
-+ unsigned long nr_uninterruptible;
++ dump_oncpu = smp_processor_id() + 1;
++ dump_silence_level = DUMP_HARD_SPIN_CPUS;
+
-+ unsigned long expired_timestamp;
-+ unsigned long long timestamp_last_tick;
-+ task_t *curr, *idle;
-+ struct mm_struct *prev_mm;
-+ prio_array_t *active, *expired, arrays[2];
-+ int best_expired_prio;
-+ atomic_t nr_iowait;
-+
-+#ifdef CONFIG_SMP
-+ struct sched_domain *sd;
-+
-+ /* For active balancing */
-+ int active_balance;
-+ int push_cpu;
++ state = dump_generic_execute(panic_str, regs);
++
++ dump_oncpu = 0;
++ spin_unlock_irqrestore(&dump_lock, flags);
+
-+ task_t *migration_thread;
-+ struct list_head migration_queue;
-+#endif
++ if (state < 0) {
++ printk("Dump Incomplete or failed!\n");
++ } else {
++ printk("Dump Complete; %d dump pages saved.\n",
++ dump_header.dh_num_dump_pages);
++ }
++}
+
-+#ifdef CONFIG_SCHEDSTATS
-+ /* latency stats */
-+ struct sched_info rq_sched_info;
++/*
++ * Name: dump_register_compression()
++ * Func: Register a dump compression mechanism.
++ */
++void
++dump_register_compression(struct __dump_compress *item)
++{
++ if (item)
++ list_add(&(item->list), &dump_compress_list);
++}
+
-+ /* sys_sched_yield() stats */
-+ unsigned long yld_exp_empty;
-+ unsigned long yld_act_empty;
-+ unsigned long yld_both_empty;
-+ unsigned long yld_cnt;
++/*
++ * Name: dump_unregister_compression()
++ * Func: Remove a dump compression mechanism, and re-assign the dump
++ * compression pointer if necessary.
++ */
++void
++dump_unregister_compression(int compression_type)
++{
++ struct list_head *tmp;
++ struct __dump_compress *dc;
+
-+ /* schedule() stats */
-+ unsigned long sched_noswitch;
-+ unsigned long sched_switch;
-+ unsigned long sched_cnt;
-+ unsigned long sched_goidle;
++ /* let's make sure our list is valid */
++ if (compression_type != DUMP_COMPRESS_NONE) {
++ list_for_each(tmp, &dump_compress_list) {
++ dc = list_entry(tmp, struct __dump_compress, list);
++ if (dc->compress_type == compression_type) {
++ list_del(&(dc->list));
++ break;
++ }
++ }
++ }
++}
+
-+ /* pull_task() stats */
-+ unsigned long pt_gained[MAX_IDLE_TYPES];
-+ unsigned long pt_lost[MAX_IDLE_TYPES];
++/*
++ * Name: dump_compress_init()
++ * Func: Initialize (or re-initialize) compression scheme.
++ */
++static int
++dump_compress_init(int compression_type)
++{
++ struct list_head *tmp;
++ struct __dump_compress *dc;
+
-+ /* active_load_balance() stats */
-+ unsigned long alb_cnt;
-+ unsigned long alb_lost;
-+ unsigned long alb_gained;
-+ unsigned long alb_failed;
++ /* try to remove the compression item */
++ list_for_each(tmp, &dump_compress_list) {
++ dc = list_entry(tmp, struct __dump_compress, list);
++ if (dc->compress_type == compression_type) {
++ dump_config.dumper->compress = dc;
++ dump_compress = compression_type;
++ pr_debug("Dump Compress %s\n", dc->compress_name);
++ return 0;
++ }
++ }
+
-+ /* try_to_wake_up() stats */
-+ unsigned long ttwu_cnt;
-+ unsigned long ttwu_attempts;
-+ unsigned long ttwu_moved;
++ /*
++ * nothing on the list -- return ENODATA to indicate an error
++ *
++ * NB:
++ * EAGAIN: reports "Resource temporarily unavailable" which
++ * isn't very enlightening.
++ */
++ printk("compression_type:%d not found\n", compression_type);
+
-+ /* wake_up_new_task() stats */
-+ unsigned long wunt_cnt;
-+ unsigned long wunt_moved;
++ return -ENODATA;
++}
+
-+ /* sched_migrate_task() stats */
-+ unsigned long smt_cnt;
++static int
++dumper_setup(unsigned long flags, unsigned long devid)
++{
++ int ret = 0;
+
-+ /* sched_balance_exec() stats */
-+ unsigned long sbe_cnt;
++ /* unconfigure old dumper if it exists */
++ dump_okay = 0;
++ if (dump_config.dumper) {
++ pr_debug("Unconfiguring current dumper\n");
++ dump_unconfigure();
++ }
++ /* set up new dumper */
++ if (dump_config.flags & DUMP_FLAGS_SOFTBOOT) {
++ printk("Configuring softboot based dump \n");
++#ifdef CONFIG_CRASH_DUMP_MEMDEV
++ dump_config.dumper = &dumper_stage1;
++#else
++ printk("Requires CONFIG_CRASHDUMP_MEMDEV. Can't proceed.\n");
++ return -1;
+#endif
-+};
-+
-+/*
- * The default (Linux) execution domain.
- */
- extern struct exec_domain default_exec_domain;
-Index: linux-2.6.10/include/linux/miscdevice.h
-===================================================================
---- linux-2.6.10.orig/include/linux/miscdevice.h 2004-12-25 05:34:58.000000000 +0800
-+++ linux-2.6.10/include/linux/miscdevice.h 2005-04-05 16:47:53.893212704 +0800
-@@ -25,6 +25,7 @@
- #define MICROCODE_MINOR 184
- #define MWAVE_MINOR 219 /* ACP/Mwave Modem */
- #define MPT_MINOR 220
-+#define CRASH_DUMP_MINOR 230 /* LKCD */
- #define MISC_DYNAMIC_MINOR 255
-
- #define TUN_MINOR 200
-Index: linux-2.6.10/include/linux/dump.h
-===================================================================
---- linux-2.6.10.orig/include/linux/dump.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/linux/dump.h 2005-04-05 16:47:53.893212704 +0800
-@@ -0,0 +1,406 @@
-+/*
-+ * Kernel header file for Linux crash dumps.
-+ *
-+ * Created by: Matt Robinson (yakker@sgi.com)
-+ * Copyright 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
-+ *
-+ * vmdump.h to dump.h by: Matt D. Robinson (yakker@sourceforge.net)
-+ * Copyright 2001 - 2002 Matt D. Robinson. All rights reserved.
-+ * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
-+ *
-+ * Most of this is the same old stuff from vmdump.h, except now we're
-+ * actually a stand-alone driver plugged into the block layer interface,
-+ * with the exception that we now allow for compression modes externally
-+ * loaded (e.g., someone can come up with their own).
-+ *
-+ * This code is released under version 2 of the GNU GPL.
-+ */
-+
-+/* This header file includes all structure definitions for crash dumps. */
-+#ifndef _DUMP_H
-+#define _DUMP_H
++ } else {
++ dump_config.dumper = &dumper_singlestage;
++ }
++ dump_config.dumper->dev = dump_dev;
+
-+#if defined(CONFIG_CRASH_DUMP) || defined (CONFIG_CRASH_DUMP_MODULE)
++ ret = dump_configure(devid);
++ if (!ret) {
++ dump_okay = 1;
++ pr_debug("%s dumper set up for dev 0x%lx\n",
++ dump_config.dumper->name, devid);
++ dump_config.dump_device = devid;
++ } else {
++ printk("%s dumper set up failed for dev 0x%lx\n",
++ dump_config.dumper->name, devid);
++ dump_config.dumper = NULL;
++ }
++ return ret;
++}
+
-+#include <linux/list.h>
-+#include <linux/notifier.h>
-+#include <linux/dumpdev.h>
-+#include <asm/ioctl.h>
++static int
++dump_target_init(int target)
++{
++ char type[20];
++ struct list_head *tmp;
++ struct dump_dev *dev;
++
++ switch (target) {
++ case DUMP_FLAGS_DISKDUMP:
++ strcpy(type, "blockdev"); break;
++ case DUMP_FLAGS_NETDUMP:
++ strcpy(type, "networkdev"); break;
++ default:
++ return -1;
++ }
+
-+/*
-+ * Predefine default DUMP_PAGE constants, asm header may override.
-+ *
-+ * On ia64 discontinuous memory systems it's possible for the memory
-+ * banks to stop at 2**12 page alignments, the smallest possible page
-+ * size. But the system page size, PAGE_SIZE, is in fact larger.
-+ */
-+#define DUMP_PAGE_SHIFT PAGE_SHIFT
-+#define DUMP_PAGE_MASK PAGE_MASK
-+#define DUMP_PAGE_ALIGN(addr) PAGE_ALIGN(addr)
++ /*
++ * This is a bit stupid, generating strings from flag
++ * and doing strcmp. This is done because 'struct dump_dev'
++ * has string 'type_name' and not interger 'type'.
++ */
++ list_for_each(tmp, &dump_target_list) {
++ dev = list_entry(tmp, struct dump_dev, list);
++ if (strcmp(type, dev->type_name) == 0) {
++ dump_dev = dev;
++ return 0;
++ }
++ }
++ return -1;
++}
+
+/*
-+ * Dump offset changed from 4Kb to 64Kb to support multiple PAGE_SIZE
-+ * (kernel page size). Assumption goes that 64K is the highest page size
-+ * supported
++ * Name: dump_ioctl()
++ * Func: Allow all dump tunables through a standard ioctl() mechanism.
++ * This is far better than before, where we'd go through /proc,
++ * because now this will work for multiple OS and architectures.
+ */
++static int
++dump_ioctl(struct inode *i, struct file *f, unsigned int cmd, unsigned long arg)
++{
++ /* check capabilities */
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
+
-+#define DUMP_HEADER_OFFSET (1ULL << 16)
++ if (!dump_config.dumper && cmd == DIOSDUMPCOMPRESS)
++ /* dump device must be configured first */
++ return -ENODEV;
+
-+#define OLDMINORBITS 8
-+#define OLDMINORMASK ((1U << OLDMINORBITS) -1)
-+
-+/* Making DUMP_PAGE_SIZE = PAGE_SIZE, to support dumping on architectures
-+ * which support page sizes (PAGE_SIZE) greater than 4KB.
-+ * Will it affect ia64 discontinuous memory systems ????
-+ */
-+#define DUMP_PAGE_SIZE PAGE_SIZE
++ /*
++ * This is the main mechanism for controlling get/set data
++ * for various dump device parameters. The real trick here
++ * is setting the dump device (DIOSDUMPDEV). That's what
++ * triggers everything else.
++ */
++ switch (cmd) {
++ case DIOSDUMPDEV: /* set dump_device */
++ pr_debug("Configuring dump device\n");
++ if (!(f->f_flags & O_RDWR))
++ return -EPERM;
+
-+/* thread_info lies at the bottom of stack, (Except IA64). */
-+#define STACK_START_POSITION(tsk) (tsk->thread_info)
-+/*
-+ * Predefined default memcpy() to use when copying memory to the dump buffer.
-+ *
-+ * On ia64 there is a heads up function that can be called to let the prom
-+ * machine check monitor know that the current activity is risky and it should
-+ * ignore the fault (nofault). In this case the ia64 header will redefine this
-+ * macro to __dump_memcpy() and use it's arch specific version.
-+ */
-+#define DUMP_memcpy memcpy
-+#define bzero(a,b) memset(a, 0, b)
++ __dump_open();
++ return dumper_setup(dump_config.flags, arg);
+
-+/* necessary header files */
-+#include <asm/dump.h> /* for architecture-specific header */
++
++ case DIOGDUMPDEV: /* get dump_device */
++ return put_user((long)dump_config.dump_device, (long *)arg);
+
-+/*
-+ * Size of the buffer that's used to hold:
-+ *
-+ * 1. the dump header (padded to fill the complete buffer)
-+ * 2. the possibly compressed page headers and data
-+ *
-+ * = 256k for page size >= 64k
-+ * = 64k for page size < 64k
-+ */
-+#if (PAGE_SHIFT >= 16)
-+#define DUMP_BUFFER_SIZE (256 * 1024) /* size of dump buffer */
-+#else
-+#define DUMP_BUFFER_SIZE (64 * 1024) /* size of dump buffer */
-+#endif
++ case DIOSDUMPLEVEL: /* set dump_level */
++ if (!(f->f_flags & O_RDWR))
++ return -EPERM;
+
-+#define DUMP_HEADER_SIZE DUMP_BUFFER_SIZE
++ /* make sure we have a positive value */
++ if (arg < 0)
++ return -EINVAL;
+
-+/* standard header definitions */
-+#define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */
-+#define DUMP_MAGIC_LIVE 0xa8190173618f23cdULL /* live magic number */
-+#define DUMP_VERSION_NUMBER 0x8 /* dump version number */
-+#define DUMP_PANIC_LEN 0x100 /* dump panic string length */
++ /* Fixme: clean this up */
++ dump_config.level = 0;
++ switch ((int)arg) {
++ case DUMP_LEVEL_ALL:
++ case DUMP_LEVEL_ALL_RAM:
++ dump_config.level |= DUMP_MASK_UNUSED;
++ case DUMP_LEVEL_USED:
++ dump_config.level |= DUMP_MASK_USED;
++ case DUMP_LEVEL_KERN:
++ dump_config.level |= DUMP_MASK_KERN;
++ case DUMP_LEVEL_HEADER:
++ dump_config.level |= DUMP_MASK_HEADER;
++ case DUMP_LEVEL_NONE:
++ break;
++ default:
++ return (-EINVAL);
++ }
++ pr_debug("Dump Level 0x%lx\n", dump_config.level);
++ break;
+
-+/* dump levels - type specific stuff added later -- add as necessary */
-+#define DUMP_LEVEL_NONE 0x0 /* no dumping at all -- just bail */
-+#define DUMP_LEVEL_HEADER 0x1 /* kernel dump header only */
-+#define DUMP_LEVEL_KERN 0x2 /* dump header and kernel pages */
-+#define DUMP_LEVEL_USED 0x4 /* dump header, kernel/user pages */
-+#define DUMP_LEVEL_ALL_RAM 0x8 /* dump header, all RAM pages */
-+#define DUMP_LEVEL_ALL 0x10 /* dump all memory RAM and firmware */
++ case DIOGDUMPLEVEL: /* get dump_level */
++ /* fixme: handle conversion */
++ return put_user((long)dump_config.level, (long *)arg);
+
++
++ case DIOSDUMPFLAGS: /* set dump_flags */
++ /* check flags */
++ if (!(f->f_flags & O_RDWR))
++ return -EPERM;
+
-+/* dump compression options -- add as necessary */
-+#define DUMP_COMPRESS_NONE 0x0 /* don't compress this dump */
-+#define DUMP_COMPRESS_RLE 0x1 /* use RLE compression */
-+#define DUMP_COMPRESS_GZIP 0x2 /* use GZIP compression */
++ /* make sure we have a positive value */
++ if (arg < 0)
++ return -EINVAL;
++
++ if (dump_target_init(arg & DUMP_FLAGS_TARGETMASK) < 0)
++ return -EINVAL; /* return proper error */
+
-+/* dump flags - any dump-type specific flags -- add as necessary */
-+#define DUMP_FLAGS_NONE 0x0 /* no flags are set for this dump */
-+#define DUMP_FLAGS_SOFTBOOT 0x2 /* 2 stage soft-boot based dump */
-+#define DUMP_FLAGS_NONDISRUPT 0X1 /* non-disruptive dumping */
++ dump_config.flags = arg;
++
++ pr_debug("Dump Flags 0x%lx\n", dump_config.flags);
++ break;
++
++ case DIOGDUMPFLAGS: /* get dump_flags */
++ return put_user((long)dump_config.flags, (long *)arg);
+
-+#define DUMP_FLAGS_TARGETMASK 0xf0000000 /* handle special case targets */
-+#define DUMP_FLAGS_DISKDUMP 0x80000000 /* dump to local disk */
-+#define DUMP_FLAGS_NETDUMP 0x40000000 /* dump over the network */
++ case DIOSDUMPCOMPRESS: /* set the dump_compress status */
++ if (!(f->f_flags & O_RDWR))
++ return -EPERM;
+
-+/* dump header flags -- add as necessary */
-+#define DUMP_DH_FLAGS_NONE 0x0 /* no flags set (error condition!) */
-+#define DUMP_DH_RAW 0x1 /* raw page (no compression) */
-+#define DUMP_DH_COMPRESSED 0x2 /* page is compressed */
-+#define DUMP_DH_END 0x4 /* end marker on a full dump */
-+#define DUMP_DH_TRUNCATED 0x8 /* dump is incomplete */
-+#define DUMP_DH_TEST_PATTERN 0x10 /* dump page is a test pattern */
-+#define DUMP_DH_NOT_USED 0x20 /* 1st bit not used in flags */
++ return dump_compress_init((int)arg);
+
-+/* names for various dump parameters in /proc/kernel */
-+#define DUMP_ROOT_NAME "sys/dump"
-+#define DUMP_DEVICE_NAME "device"
-+#define DUMP_COMPRESS_NAME "compress"
-+#define DUMP_LEVEL_NAME "level"
-+#define DUMP_FLAGS_NAME "flags"
-+#define DUMP_ADDR_NAME "addr"
++ case DIOGDUMPCOMPRESS: /* get the dump_compress status */
++ return put_user((long)(dump_config.dumper ?
++ dump_config.dumper->compress->compress_type : 0),
++ (long *)arg);
++ case DIOGDUMPOKAY: /* check if dump is configured */
++ return put_user((long)dump_okay, (long *)arg);
++
++ case DIOSDUMPTAKE: /* Trigger a manual dump */
++ /* Do not proceed if lkcd not yet configured */
++ if(!dump_okay) {
++ printk("LKCD not yet configured. Cannot take manual dump\n");
++ return -ENODEV;
++ }
+
-+#define DUMP_SYSRQ_KEY 'd' /* key to use for MAGIC_SYSRQ key */
++ /* Take the dump */
++ return manual_handle_crashdump();
++
++ default:
++ /*
++ * these are network dump specific ioctls, let the
++ * module handle them.
++ */
++ return dump_dev_ioctl(cmd, arg);
++ }
++ return 0;
++}
+
-+/* CTL_DUMP names: */
-+enum
++/*
++ * Handle special cases for dump_device
++ * changing dump device requires doing an opening the device
++ */
++static int
++proc_dump_device(ctl_table *ctl, int write, struct file *f,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
-+ CTL_DUMP_DEVICE=1,
-+ CTL_DUMP_COMPRESS=3,
-+ CTL_DUMP_LEVEL=3,
-+ CTL_DUMP_FLAGS=4,
-+ CTL_DUMP_ADDR=5,
-+ CTL_DUMP_TEST=6,
-+};
++ int *valp = ctl->data;
++ int oval = *valp;
++ int ret = -EPERM;
+
++ /* same permission checks as ioctl */
++ if (capable(CAP_SYS_ADMIN)) {
++ ret = proc_doulonghex(ctl, write, f, buffer, lenp, ppos);
++ if (ret == 0 && write && *valp != oval) {
++ /* need to restore old value to close properly */
++ dump_config.dump_device = (dev_t) oval;
++ __dump_open();
++ ret = dumper_setup(dump_config.flags, (dev_t) *valp);
++ }
++ }
+
-+/* page size for gzip compression -- buffered slightly beyond hardware PAGE_SIZE used by DUMP */
-+#define DUMP_DPC_PAGE_SIZE (DUMP_PAGE_SIZE + 512)
++ return ret;
++}
+
-+/* dump ioctl() control options */
-+#define DIOSDUMPDEV _IOW('p', 0xA0, unsigned int) /* set the dump device */
-+#define DIOGDUMPDEV _IOR('p', 0xA1, unsigned int) /* get the dump device */
-+#define DIOSDUMPLEVEL _IOW('p', 0xA2, unsigned int) /* set the dump level */
-+#define DIOGDUMPLEVEL _IOR('p', 0xA3, unsigned int) /* get the dump level */
-+#define DIOSDUMPFLAGS _IOW('p', 0xA4, unsigned int) /* set the dump flag parameters */
-+#define DIOGDUMPFLAGS _IOR('p', 0xA5, unsigned int) /* get the dump flag parameters */
-+#define DIOSDUMPCOMPRESS _IOW('p', 0xA6, unsigned int) /* set the dump compress level */
-+#define DIOGDUMPCOMPRESS _IOR('p', 0xA7, unsigned int) /* get the dump compress level */
++/* All for the want of a proc_do_xxx routine which prints values in hex */
++/* Write is not implemented correctly, so mode is set to 0444 above. */
++static int
++proc_doulonghex(ctl_table *ctl, int write, struct file *f,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++#define TMPBUFLEN 21
++ unsigned long *i;
++ size_t len, left;
++ char buf[TMPBUFLEN];
+
-+/* these ioctls are used only by netdump module */
-+#define DIOSTARGETIP _IOW('p', 0xA8, unsigned int) /* set the target m/c's ip */
-+#define DIOGTARGETIP _IOR('p', 0xA9, unsigned int) /* get the target m/c's ip */
-+#define DIOSTARGETPORT _IOW('p', 0xAA, unsigned int) /* set the target m/c's port */
-+#define DIOGTARGETPORT _IOR('p', 0xAB, unsigned int) /* get the target m/c's port */
-+#define DIOSSOURCEPORT _IOW('p', 0xAC, unsigned int) /* set the source m/c's port */
-+#define DIOGSOURCEPORT _IOR('p', 0xAD, unsigned int) /* get the source m/c's port */
-+#define DIOSETHADDR _IOW('p', 0xAE, unsigned int) /* set ethernet address */
-+#define DIOGETHADDR _IOR('p', 0xAF, unsigned int) /* get ethernet address */
-+#define DIOGDUMPOKAY _IOR('p', 0xB0, unsigned int) /* check if dump is configured */
-+#define DIOSDUMPTAKE _IOW('p', 0xB1, unsigned int) /* Take a manual dump */
++ if (!ctl->data || !ctl->maxlen || !*lenp || (*ppos && !write)) {
++ *lenp = 0;
++ return 0;
++ }
++
++ i = (unsigned long *) ctl->data;
++ left = *lenp;
++
++ sprintf(buf, "0x%lx\n", (*i));
++ len = strlen(buf);
++ if (len > left)
++ len = left;
++ if(copy_to_user(buffer, buf, len))
++ return -EFAULT;
++
++ left -= len;
++ *lenp -= left;
++ *ppos += *lenp;
++ return 0;
++}
+
+/*
-+ * Structure: __dump_header
-+ * Function: This is the header dumped at the top of every valid crash
-+ * dump.
++ * -----------------------------------------------------------------------
++ * I N I T F U N C T I O N S
++ * -----------------------------------------------------------------------
+ */
-+struct __dump_header {
-+ /* the dump magic number -- unique to verify dump is valid */
-+ u64 dh_magic_number;
+
-+ /* the version number of this dump */
-+ u32 dh_version;
++#ifdef CONFIG_COMPAT
++static int dw_long(unsigned int fd, unsigned int cmd, unsigned long arg,
++ struct file *f)
++{
++ mm_segment_t old_fs = get_fs();
++ int err;
++ unsigned long val;
+
-+ /* the size of this header (in case we can't read it) */
-+ u32 dh_header_size;
++ set_fs (KERNEL_DS);
++ err = sys_ioctl(fd, cmd, (u64)&val);
++ set_fs (old_fs);
++ if (!err && put_user((unsigned int) val, (u32 *)arg))
++ return -EFAULT;
++ return err;
++}
++#endif
+
-+ /* the level of this dump (just a header?) */
-+ u32 dh_dump_level;
++/*
++ * These register and unregister routines are exported for modules
++ * to register their dump drivers (like block, net etc)
++ */
++int
++dump_register_device(struct dump_dev *ddev)
++{
++ struct list_head *tmp;
++ struct dump_dev *dev;
+
-+ /*
-+ * We assume dump_page_size to be 4K in every case.
-+ * Store here the configurable system page size (4K, 8K, 16K, etc.)
-+ */
-+ u32 dh_page_size;
++ list_for_each(tmp, &dump_target_list) {
++ dev = list_entry(tmp, struct dump_dev, list);
++ if (strcmp(ddev->type_name, dev->type_name) == 0) {
++ printk("Target type %s already registered\n",
++ dev->type_name);
++ return -1; /* return proper error */
++ }
++ }
++ list_add(&(ddev->list), &dump_target_list);
++
++ return 0;
++}
+
-+ /* the size of all physical memory */
-+ u64 dh_memory_size;
++void
++dump_unregister_device(struct dump_dev *ddev)
++{
++ list_del(&(ddev->list));
++ if (ddev != dump_dev)
++ return;
+
-+ /* the start of physical memory */
-+ u64 dh_memory_start;
++ dump_okay = 0;
+
-+ /* the end of physical memory */
-+ u64 dh_memory_end;
++ if (dump_config.dumper)
++ dump_unconfigure();
+
-+ /* the number of hardware/physical pages in this dump specifically */
-+ u32 dh_num_dump_pages;
++ dump_config.flags &= ~DUMP_FLAGS_TARGETMASK;
++ dump_okay = 0;
++ dump_dev = NULL;
++ dump_config.dumper = NULL;
++}
+
-+ /* the panic string, if available */
-+ char dh_panic_string[DUMP_PANIC_LEN];
++static int panic_event(struct notifier_block *this, unsigned long event,
++ void *ptr)
++{
++#ifdef CONFIG_ARM
++ get_current_general_regs(&all_regs);
++ get_current_cp14_regs(&all_regs);
++ get_current_cp15_regs(&all_regs);
++ dump_execute((const char *)ptr, &all_regs);
++#else
++ struct pt_regs regs;
++
++ get_current_regs(®s);
++ dump_execute((const char *)ptr, ®s);
++#endif
++ return 0;
++}
+
-+ /* timeval depends on architecture, two long values */
-+ struct {
-+ u64 tv_sec;
-+ u64 tv_usec;
-+ } dh_time; /* the time of the system crash */
++extern struct notifier_block *panic_notifier_list;
++static int panic_event(struct notifier_block *, unsigned long, void *);
++static struct notifier_block panic_block = {
++ .notifier_call = panic_event,
++};
+
-+ /* the NEW utsname (uname) information -- in character form */
-+ /* we do this so we don't have to include utsname.h */
-+ /* plus it helps us be more architecture independent */
-+ /* now maybe one day soon they'll make the [65] a #define! */
-+ char dh_utsname_sysname[65];
-+ char dh_utsname_nodename[65];
-+ char dh_utsname_release[65];
-+ char dh_utsname_version[65];
-+ char dh_utsname_machine[65];
-+ char dh_utsname_domainname[65];
++#ifdef CONFIG_MAGIC_SYSRQ
++/* Sysrq handler */
++static void sysrq_handle_crashdump(int key, struct pt_regs *pt_regs,
++ struct tty_struct *tty) {
++ if(!pt_regs) {
++ struct pt_regs regs;
++ get_current_regs(®s);
++ dump_execute("sysrq", ®s);
+
-+ /* the address of current task (OLD = void *, NEW = u64) */
-+ u64 dh_current_task;
++ } else {
++ dump_execute("sysrq", pt_regs);
++ }
++}
+
-+ /* what type of compression we're using in this dump (if any) */
-+ u32 dh_dump_compress;
++static struct sysrq_key_op sysrq_crashdump_op = {
++ .handler = sysrq_handle_crashdump,
++ .help_msg = "Dump",
++ .action_msg = "Starting crash dump",
++};
++#endif
+
-+ /* any additional flags */
-+ u32 dh_dump_flags;
++static inline void
++dump_sysrq_register(void)
++{
++#ifdef CONFIG_MAGIC_SYSRQ
++ register_sysrq_key(DUMP_SYSRQ_KEY, &sysrq_crashdump_op);
++#endif
++}
+
-+ /* any additional flags */
-+ u32 dh_dump_device;
-+} __attribute__((packed));
++static inline void
++dump_sysrq_unregister(void)
++{
++#ifdef CONFIG_MAGIC_SYSRQ
++ unregister_sysrq_key(DUMP_SYSRQ_KEY, &sysrq_crashdump_op);
++#endif
++}
+
+/*
-+ * Structure: __dump_page
-+ * Function: To act as the header associated to each physical page of
-+ * memory saved in the system crash dump. This allows for
-+ * easy reassembly of each crash dump page. The address bits
-+ * are split to make things easier for 64-bit/32-bit system
-+ * conversions.
-+ *
-+ * dp_byte_offset and dp_page_index are landmarks that are helpful when
-+ * looking at a hex dump of /dev/vmdump,
++ * Name: dump_init()
++ * Func: Initialize the dump process. This will set up any architecture
++ * dependent code. The big key is we need the memory offsets before
++ * the page table is initialized, because the base memory offset
++ * is changed after paging_init() is called.
+ */
-+struct __dump_page {
-+ /* the address of this dump page */
-+ u64 dp_address;
++static int __init
++dump_init(void)
++{
++ struct sysinfo info;
++ int err;
+
-+ /* the size of this dump page */
-+ u32 dp_size;
++ /* try to create our dump device */
++ err = misc_register(&dump_miscdev);
++ if (err) {
++ printk("cannot register dump character device!\n");
++ return err;
++ }
+
-+ /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */
-+ u32 dp_flags;
-+} __attribute__((packed));
++ __dump_init((u64)PAGE_OFFSET);
+
-+/*
-+ * Structure: __lkcdinfo
-+ * Function: This structure contains information needed for the lkcdutils
-+ * package (particularly lcrash) to determine what information is
-+ * associated to this kernel, specifically.
-+ */
-+struct __lkcdinfo {
-+ int arch;
-+ int ptrsz;
-+ int byte_order;
-+ int linux_release;
-+ int page_shift;
-+ int page_size;
-+ u64 page_mask;
-+ u64 page_offset;
-+ int stack_offset;
-+};
++#ifdef CONFIG_COMPAT
++ err = register_ioctl32_conversion(DIOSDUMPDEV, NULL);
++ err |= register_ioctl32_conversion(DIOGDUMPDEV, NULL);
++ err |= register_ioctl32_conversion(DIOSDUMPLEVEL, NULL);
++ err |= register_ioctl32_conversion(DIOGDUMPLEVEL, dw_long);
++ err |= register_ioctl32_conversion(DIOSDUMPFLAGS, NULL);
++ err |= register_ioctl32_conversion(DIOGDUMPFLAGS, dw_long);
++ err |= register_ioctl32_conversion(DIOSDUMPCOMPRESS, NULL);
++ err |= register_ioctl32_conversion(DIOGDUMPCOMPRESS, dw_long);
++ err |= register_ioctl32_conversion(DIOSTARGETIP, NULL);
++ err |= register_ioctl32_conversion(DIOGTARGETIP, NULL);
++ err |= register_ioctl32_conversion(DIOSTARGETPORT, NULL);
++ err |= register_ioctl32_conversion(DIOGTARGETPORT, NULL);
++ err |= register_ioctl32_conversion(DIOSSOURCEPORT, NULL);
++ err |= register_ioctl32_conversion(DIOGSOURCEPORT, NULL);
++ err |= register_ioctl32_conversion(DIOSETHADDR, NULL);
++ err |= register_ioctl32_conversion(DIOGETHADDR, NULL);
++ err |= register_ioctl32_conversion(DIOGDUMPOKAY, dw_long);
++ err |= register_ioctl32_conversion(DIOSDUMPTAKE, NULL);
++ if (err) {
++ printk(KERN_ERR "LKCD: registering ioctl32 translations failed\
++");
++ }
++#endif
++ /* set the dump_compression_list structure up */
++ dump_register_compression(&dump_none_compression);
+
-+#ifdef __KERNEL__
++ /* grab the total memory size now (not if/when we crash) */
++ si_meminfo(&info);
+
-+/*
-+ * Structure: __dump_compress
-+ * Function: This is what an individual compression mechanism can use
-+ * to plug in their own compression techniques. It's always
-+ * best to build these as individual modules so that people
-+ * can put in whatever they want.
-+ */
-+struct __dump_compress {
-+ /* the list_head structure for list storage */
-+ struct list_head list;
++ /* set the memory size */
++ dump_header.dh_memory_size = (u64)info.totalram;
+
-+ /* the type of compression to use (DUMP_COMPRESS_XXX) */
-+ int compress_type;
-+ const char *compress_name;
++ sysctl_header = register_sysctl_table(kernel_root, 0);
++ dump_sysrq_register();
+
-+ /* the compression function to call */
-+ u32 (*compress_func)(const u8 *, u32, u8 *, u32, unsigned long);
-+};
++ notifier_chain_register(&panic_notifier_list, &panic_block);
++ dump_function_ptr = dump_execute;
+
-+/* functions for dump compression registration */
-+extern void dump_register_compression(struct __dump_compress *);
-+extern void dump_unregister_compression(int);
-+
-+/*
-+ * Structure dump_mbank[]:
-+ *
-+ * For CONFIG_DISCONTIGMEM systems this array specifies the
-+ * memory banks/chunks that need to be dumped after a panic.
-+ *
-+ * For classic systems it specifies a single set of pages from
-+ * 0 to max_mapnr.
-+ */
-+struct __dump_mbank {
-+ u64 start;
-+ u64 end;
-+ int type;
-+ int pad1;
-+ long pad2;
-+};
-+
-+#define DUMP_MBANK_TYPE_CONVENTIONAL_MEMORY 1
-+#define DUMP_MBANK_TYPE_OTHER 2
-+
-+#define MAXCHUNKS 256
-+extern int dump_mbanks;
-+extern struct __dump_mbank dump_mbank[MAXCHUNKS];
-+
-+/* notification event codes */
-+#define DUMP_BEGIN 0x0001 /* dump beginning */
-+#define DUMP_END 0x0002 /* dump ending */
-+
-+/* Scheduler soft spin control.
-+ *
-+ * 0 - no dump in progress
-+ * 1 - cpu0 is dumping, ...
-+ */
-+extern unsigned long dump_oncpu;
-+extern void dump_execute(const char *, const struct pt_regs *);
-+
-+/*
-+ * Notifier list for kernel code which wants to be called
-+ * at kernel dump.
-+ */
-+extern struct notifier_block *dump_notifier_list;
-+static inline int register_dump_notifier(struct notifier_block *nb)
-+{
-+ return notifier_chain_register(&dump_notifier_list, nb);
-+}
-+static inline int unregister_dump_notifier(struct notifier_block * nb)
-+{
-+ return notifier_chain_unregister(&dump_notifier_list, nb);
++ pr_info("Crash dump driver initialized.\n");
++ return 0;
+}
+
-+extern void (*dump_function_ptr)(const char *, const struct pt_regs *);
-+static inline void dump(char * str, struct pt_regs * regs)
++static void __exit
++dump_cleanup(void)
+{
-+ if (dump_function_ptr)
-+ dump_function_ptr(str, regs);
-+}
++ int err;
++ dump_okay = 0;
+
-+/*
-+ * Common Arch Specific Functions should be declared here.
-+ * This allows the C compiler to detect discrepancies.
-+ */
-+extern void __dump_open(void);
-+extern void __dump_cleanup(void);
-+extern void __dump_clean_irq_state(void);
-+extern void __dump_init(u64);
-+extern void __dump_save_regs(struct pt_regs *, const struct pt_regs *);
-+extern void __dump_save_context(int cpu, const struct pt_regs *, struct task_struct *tsk);
-+extern int __dump_configure_header(const struct pt_regs *);
-+extern int __dump_irq_enable(void);
-+extern void __dump_irq_restore(void);
-+extern int __dump_page_valid(unsigned long index);
-+#ifdef CONFIG_SMP
-+extern void __dump_save_other_cpus(void);
-+#else
-+#define __dump_save_other_cpus()
-+#endif
++ if (dump_config.dumper)
++ dump_unconfigure();
+
-+extern int manual_handle_crashdump(void);
++ /* arch-specific cleanup routine */
++ __dump_cleanup();
+
-+/* to track all used (compound + zero order) pages */
-+#define PageInuse(p) (PageCompound(p) || page_count(p))
++#ifdef CONFIG_COMPAT
++ err = unregister_ioctl32_conversion(DIOSDUMPDEV);
++ err |= unregister_ioctl32_conversion(DIOGDUMPDEV);
++ err |= unregister_ioctl32_conversion(DIOSDUMPLEVEL);
++ err |= unregister_ioctl32_conversion(DIOGDUMPLEVEL);
++ err |= unregister_ioctl32_conversion(DIOSDUMPFLAGS);
++ err |= unregister_ioctl32_conversion(DIOGDUMPFLAGS);
++ err |= unregister_ioctl32_conversion(DIOSDUMPCOMPRESS);
++ err |= unregister_ioctl32_conversion(DIOGDUMPCOMPRESS);
++ err |= unregister_ioctl32_conversion(DIOSTARGETIP);
++ err |= unregister_ioctl32_conversion(DIOGTARGETIP);
++ err |= unregister_ioctl32_conversion(DIOSTARGETPORT);
++ err |= unregister_ioctl32_conversion(DIOGTARGETPORT);
++ err |= unregister_ioctl32_conversion(DIOSSOURCEPORT);
++ err |= unregister_ioctl32_conversion(DIOGSOURCEPORT);
++ err |= unregister_ioctl32_conversion(DIOSETHADDR);
++ err |= unregister_ioctl32_conversion(DIOGETHADDR);
++ err |= unregister_ioctl32_conversion(DIOGDUMPOKAY);
++ err |= unregister_ioctl32_conversion(DIOSDUMPTAKE);
++ if (err) {
++ printk(KERN_ERR "LKCD: Unregistering ioctl32 translations failed\n");
++ }
++#endif
+
-+#endif /* __KERNEL__ */
++ /* ignore errors while unregistering -- since can't do anything */
++ unregister_sysctl_table(sysctl_header);
++ misc_deregister(&dump_miscdev);
++ dump_sysrq_unregister();
++ notifier_chain_unregister(&panic_notifier_list, &panic_block);
++ dump_function_ptr = NULL;
++}
+
-+#else /* !CONFIG_CRASH_DUMP */
++EXPORT_SYMBOL(dump_register_compression);
++EXPORT_SYMBOL(dump_unregister_compression);
++EXPORT_SYMBOL(dump_register_device);
++EXPORT_SYMBOL(dump_unregister_device);
++EXPORT_SYMBOL(dump_config);
++EXPORT_SYMBOL(dump_silence_level);
+
-+/* If not configured then make code disappear! */
-+#define register_dump_watchdog(x) do { } while(0)
-+#define unregister_dump_watchdog(x) do { } while(0)
-+#define register_dump_notifier(x) do { } while(0)
-+#define unregister_dump_notifier(x) do { } while(0)
-+#define dump_in_progress() 0
-+#define dump(x, y) do { } while(0)
++EXPORT_SYMBOL(__dump_irq_enable);
++EXPORT_SYMBOL(__dump_irq_restore);
+
-+#endif /* !CONFIG_CRASH_DUMP */
++MODULE_AUTHOR("Matt D. Robinson <yakker@sourceforge.net>");
++MODULE_DESCRIPTION("Linux Kernel Crash Dump (LKCD) driver");
++MODULE_LICENSE("GPL");
+
-+#endif /* _DUMP_H */
-Index: linux-2.6.10/include/linux/dumpdev.h
++module_init(dump_init);
++module_exit(dump_cleanup);
+Index: linux-2.6.10/drivers/dump/dump_execute.c
===================================================================
---- linux-2.6.10.orig/include/linux/dumpdev.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/linux/dumpdev.h 2005-04-05 16:47:53.890213160 +0800
-@@ -0,0 +1,163 @@
+--- linux-2.6.10.orig/drivers/dump/dump_execute.c 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/drivers/dump/dump_execute.c 2005-04-07 18:13:56.898753920 +0800
+@@ -0,0 +1,144 @@
+/*
-+ * Generic dump device interfaces for flexible system dump
-+ * (Enables variation of dump target types e.g disk, network, memory)
-+ *
-+ * These interfaces have evolved based on discussions on lkcd-devel.
-+ * Eventually the intent is to support primary and secondary or
-+ * alternate targets registered at the same time, with scope for
-+ * situation based failover or multiple dump devices used for parallel
-+ * dump i/o.
++ * The file has the common/generic dump execution code
+ *
-+ * Started: Oct 2002 - Suparna Bhattacharya (suparna@in.ibm.com)
++ * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
++ * Split and rewrote high level dump execute code to make use
++ * of dump method interfaces.
+ *
++ * Derived from original code in dump_base.c created by
++ * Matt Robinson <yakker@sourceforge.net>)
++ *
++ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
+ * Copyright (C) 2002 International Business Machines Corp.
+ *
++ * Assumes dumper and dump config settings are in place
++ * (invokes corresponding dumper specific routines as applicable)
++ *
+ * This code is released under version 2 of the GNU GPL.
+ */
-+
-+#ifndef _LINUX_DUMPDEV_H
-+#define _LINUX_DUMPDEV_H
-+
+#include <linux/kernel.h>
-+#include <linux/wait.h>
-+#include <linux/netpoll.h>
-+#include <linux/bio.h>
++#include <linux/notifier.h>
++#include <linux/dump.h>
++#include <linux/delay.h>
++#include <linux/reboot.h>
++#include "dump_methods.h"
+
-+/* Determined by the dump target (device) type */
++struct notifier_block *dump_notifier_list; /* dump started/ended callback */
+
-+struct dump_dev;
++extern int panic_timeout;
+
-+struct dump_dev_ops {
-+ int (*open)(struct dump_dev *, unsigned long); /* configure */
-+ int (*release)(struct dump_dev *); /* unconfigure */
-+ int (*silence)(struct dump_dev *); /* when dump starts */
-+ int (*resume)(struct dump_dev *); /* when dump is over */
-+ int (*seek)(struct dump_dev *, loff_t);
-+ /* trigger a write (async in nature typically) */
-+ int (*write)(struct dump_dev *, void *, unsigned long);
-+ /* not usually used during dump, but option available */
-+ int (*read)(struct dump_dev *, void *, unsigned long);
-+ /* use to poll for completion */
-+ int (*ready)(struct dump_dev *, void *);
-+ int (*ioctl)(struct dump_dev *, unsigned int, unsigned long);
-+};
++/* Dump progress indicator */
++void
++dump_speedo(int i)
++{
++ static const char twiddle[4] = { '|', '\\', '-', '/' };
++ printk("%c\b", twiddle[i&3]);
++}
+
-+struct dump_dev {
-+ char type_name[32]; /* block, net-poll etc */
-+ unsigned long device_id; /* interpreted differently for various types */
-+ struct dump_dev_ops *ops;
-+ struct list_head list;
-+ loff_t curr_offset;
-+ struct netpoll np;
-+};
++/* Make the device ready and write out the header */
++int dump_begin(void)
++{
++ int err = 0;
+
-+/*
-+ * dump_dev type variations:
-+ */
++ /* dump_dev = dump_config.dumper->dev; */
++ dumper_reset();
++ if ((err = dump_dev_silence())) {
++ /* quiesce failed, can't risk continuing */
++ /* Todo/Future: switch to alternate dump scheme if possible */
++ printk("dump silence dev failed ! error %d\n", err);
++ return err;
++ }
+
-+/* block */
-+struct dump_blockdev {
-+ struct dump_dev ddev;
-+ dev_t dev_id;
-+ struct block_device *bdev;
-+ struct bio *bio;
-+ loff_t start_offset;
-+ loff_t limit;
-+ int err;
-+};
++ pr_debug("Writing dump header\n");
++ if ((err = dump_update_header())) {
++ printk("dump update header failed ! error %d\n", err);
++ dump_dev_resume();
++ return err;
++ }
+
-+static inline struct dump_blockdev *DUMP_BDEV(struct dump_dev *dev)
-+{
-+ return container_of(dev, struct dump_blockdev, ddev);
++ dump_config.dumper->curr_offset = DUMP_BUFFER_SIZE;
++
++ return 0;
+}
+
++/*
++ * Write the dump terminator, a final header update and let go of
++ * exclusive use of the device for dump.
++ */
++int dump_complete(void)
++{
++ int ret = 0;
+
-+/* mem - for internal use by soft-boot based dumper */
-+struct dump_memdev {
-+ struct dump_dev ddev;
-+ unsigned long indirect_map_root;
-+ unsigned long nr_free;
-+ struct page *curr_page;
-+ unsigned long *curr_map;
-+ unsigned long curr_map_offset;
-+ unsigned long last_offset;
-+ unsigned long last_used_offset;
-+ unsigned long last_bs_offset;
-+};
-+
-+static inline struct dump_memdev *DUMP_MDEV(struct dump_dev *dev)
-+{
-+ return container_of(dev, struct dump_memdev, ddev);
-+}
-+
-+/* Todo/future - meant for raw dedicated interfaces e.g. mini-ide driver */
-+struct dump_rdev {
-+ struct dump_dev ddev;
-+ char name[32];
-+ int (*reset)(struct dump_rdev *, unsigned int,
-+ unsigned long);
-+ /* ... to do ... */
-+};
-+
-+/* just to get the size right when saving config across a soft-reboot */
-+struct dump_anydev {
-+ union {
-+ struct dump_blockdev bddev;
-+ /* .. add other types here .. */
-+ };
-+};
++ if (dump_config.level != DUMP_LEVEL_HEADER) {
++ if ((ret = dump_update_end_marker())) {
++ printk("dump update end marker error %d\n", ret);
++ }
++ if ((ret = dump_update_header())) {
++ printk("dump update header error %d\n", ret);
++ }
++ }
++ ret = dump_dev_resume();
+
++ if ((panic_timeout > 0) && (!(dump_config.flags & (DUMP_FLAGS_SOFTBOOT | DUMP_FLAGS_NONDISRUPT)))) {
++ mdelay(panic_timeout * 1000);
++ machine_restart(NULL);
++ }
+
++ return ret;
++}
+
-+/* Dump device / target operation wrappers */
-+/* These assume that dump_dev is initiatized to dump_config.dumper->dev */
++/* Saves all dump data */
++int dump_execute_savedump(void)
++{
++ int ret = 0, err = 0;
+
-+extern struct dump_dev *dump_dev;
++ if ((ret = dump_begin())) {
++ return ret;
++ }
+
-+static inline int dump_dev_open(unsigned long arg)
-+{
-+ return dump_dev->ops->open(dump_dev, arg);
-+}
++ if (dump_config.level != DUMP_LEVEL_HEADER) {
++ ret = dump_sequencer();
++ }
++ if ((err = dump_complete())) {
++ printk("Dump complete failed. Error %d\n", err);
++ }
+
-+static inline int dump_dev_release(void)
-+{
-+ return dump_dev->ops->release(dump_dev);
++ return ret;
+}
+
-+static inline int dump_dev_silence(void)
-+{
-+ return dump_dev->ops->silence(dump_dev);
-+}
++extern void dump_calc_bootmap_pages(void);
+
-+static inline int dump_dev_resume(void)
++/* Does all the real work: Capture and save state */
++int dump_generic_execute(const char *panic_str, const struct pt_regs *regs)
+{
-+ return dump_dev->ops->resume(dump_dev);
-+}
++ int ret = 0;
+
-+static inline int dump_dev_seek(loff_t offset)
-+{
-+ return dump_dev->ops->seek(dump_dev, offset);
-+}
++#ifdef CONFIG_DISCONTIGMEM
++ printk(KERN_INFO "Reconfiguring memory bank information....\n");
++ printk(KERN_INFO "This may take a while....\n");
++ dump_reconfigure_mbanks();
++#endif
+
-+static inline int dump_dev_write(void *buf, unsigned long len)
-+{
-+ return dump_dev->ops->write(dump_dev, buf, len);
-+}
++ if ((ret = dump_configure_header(panic_str, regs))) {
++ printk("dump config header failed ! error %d\n", ret);
++ return ret;
++ }
+
-+static inline int dump_dev_ready(void *buf)
-+{
-+ return dump_dev->ops->ready(dump_dev, buf);
-+}
++ dump_calc_bootmap_pages();
++ /* tell interested parties that a dump is about to start */
++ notifier_call_chain(&dump_notifier_list, DUMP_BEGIN,
++ &dump_config.dump_device);
+
-+static inline int dump_dev_ioctl(unsigned int cmd, unsigned long arg)
-+{
-+ if (!dump_dev || !dump_dev->ops->ioctl)
-+ return -EINVAL;
-+ return dump_dev->ops->ioctl(dump_dev, cmd, arg);
-+}
++ if (dump_config.level != DUMP_LEVEL_NONE)
++ ret = dump_execute_savedump();
+
-+extern int dump_register_device(struct dump_dev *);
-+extern void dump_unregister_device(struct dump_dev *);
++ pr_debug("dumped %ld blocks of %d bytes each\n",
++ dump_config.dumper->count, DUMP_BUFFER_SIZE);
++
++ /* tell interested parties that a dump has completed */
++ notifier_call_chain(&dump_notifier_list, DUMP_END,
++ &dump_config.dump_device);
+
-+#endif /* _LINUX_DUMPDEV_H */
-Index: linux-2.6.10/include/linux/dump_netdev.h
++ return ret;
++}
+Index: linux-2.6.10/drivers/dump/dump_x8664.c
===================================================================
---- linux-2.6.10.orig/include/linux/dump_netdev.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/linux/dump_netdev.h 2005-04-05 16:47:53.889213312 +0800
-@@ -0,0 +1,80 @@
+--- linux-2.6.10.orig/drivers/dump/dump_x8664.c 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/drivers/dump/dump_x8664.c 2005-04-07 18:13:56.901753464 +0800
+@@ -0,0 +1,362 @@
+/*
-+ * linux/drivers/net/netconsole.h
++ * Architecture specific (x86-64) functions for Linux crash dumps.
+ *
-+ * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
++ * Created by: Matt Robinson (yakker@sgi.com)
+ *
-+ * This file contains the implementation of an IRQ-safe, crash-safe
-+ * kernel console implementation that outputs kernel messages to the
-+ * network.
++ * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
+ *
-+ * Modification history:
++ * 2.3 kernel modifications by: Matt D. Robinson (yakker@turbolinux.com)
++ * Copyright 2000 TurboLinux, Inc. All rights reserved.
+ *
-+ * 2001-09-17 started by Ingo Molnar.
++ * x86-64 port Copyright 2002 Andi Kleen, SuSE Labs
++ * x86-64 port Sachin Sant ( sachinp@in.ibm.com )
++ * This code is released under version 2 of the GNU GPL.
+ */
+
-+/****************************************************************
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2, or (at your option)
-+ * any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-+ *
-+ ****************************************************************/
-+
-+#define NETCONSOLE_VERSION 0x03
++/*
++ * The hooks for dumping the kernel virtual memory to disk are in this
++ * file. Any time a modification is made to the virtual memory mechanism,
++ * these routines must be changed to use the new mechanisms.
++ */
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/smp.h>
++#include <linux/fs.h>
++#include <linux/vmalloc.h>
++#include <linux/dump.h>
++#include "dump_methods.h"
++#include <linux/mm.h>
++#include <linux/rcupdate.h>
++#include <asm/processor.h>
++#include <asm/hardirq.h>
++#include <asm/kdebug.h>
++#include <asm/uaccess.h>
++#include <asm/nmi.h>
++#include <asm/kdebug.h>
+
-+enum netdump_commands {
-+ COMM_NONE = 0,
-+ COMM_SEND_MEM = 1,
-+ COMM_EXIT = 2,
-+ COMM_REBOOT = 3,
-+ COMM_HELLO = 4,
-+ COMM_GET_NR_PAGES = 5,
-+ COMM_GET_PAGE_SIZE = 6,
-+ COMM_START_NETDUMP_ACK = 7,
-+ COMM_GET_REGS = 8,
-+ COMM_GET_MAGIC = 9,
-+ COMM_START_WRITE_NETDUMP_ACK = 10,
-+};
++static __s32 saved_irq_count; /* saved preempt_count() flag */
+
-+typedef struct netdump_req_s {
-+ u64 magic;
-+ u32 nr;
-+ u32 command;
-+ u32 from;
-+ u32 to;
-+} req_t;
++void (*dump_trace_ptr)(struct pt_regs *);
+
-+enum netdump_replies {
-+ REPLY_NONE = 0,
-+ REPLY_ERROR = 1,
-+ REPLY_LOG = 2,
-+ REPLY_MEM = 3,
-+ REPLY_RESERVED = 4,
-+ REPLY_HELLO = 5,
-+ REPLY_NR_PAGES = 6,
-+ REPLY_PAGE_SIZE = 7,
-+ REPLY_START_NETDUMP = 8,
-+ REPLY_END_NETDUMP = 9,
-+ REPLY_REGS = 10,
-+ REPLY_MAGIC = 11,
-+ REPLY_START_WRITE_NETDUMP = 12,
-+};
++static int alloc_dha_stack(void)
++{
++ int i;
++ void *ptr;
++
++ if (dump_header_asm.dha_stack[0])
++ return 0;
+
-+typedef struct netdump_reply_s {
-+ u32 nr;
-+ u32 code;
-+ u32 info;
-+} reply_t;
++ ptr = vmalloc(THREAD_SIZE * num_online_cpus());
++ if (!ptr) {
++ printk("vmalloc for dha_stacks failed\n");
++ return -ENOMEM;
++ }
+
-+#define HEADER_LEN (1 + sizeof(reply_t))
++ for (i = 0; i < num_online_cpus(); i++) {
++ dump_header_asm.dha_stack[i] =
++ (uint64_t)((unsigned long)ptr + (i * THREAD_SIZE));
++ }
++ return 0;
++}
+
++static int free_dha_stack(void)
++{
++ if (dump_header_asm.dha_stack[0]) {
++ vfree((void *)dump_header_asm.dha_stack[0]);
++ dump_header_asm.dha_stack[0] = 0;
++ }
++ return 0;
++}
+
-Index: linux-2.6.10/include/asm-parisc/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-parisc/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-parisc/kerntypes.h 2005-04-05 16:47:53.870216200 +0800
-@@ -0,0 +1,21 @@
-+/*
-+ * asm-parisc/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
-+ */
++void
++__dump_save_regs(struct pt_regs* dest_regs, const struct pt_regs* regs)
++{
++ if (regs)
++ memcpy(dest_regs, regs, sizeof(struct pt_regs));
++}
+
-+/* PA-RISC-specific header files */
-+#ifndef _PARISC_KERNTYPES_H
-+#define _PARISC_KERNTYPES_H
++void
++__dump_save_context(int cpu, const struct pt_regs *regs,
++ struct task_struct *tsk)
++{
++ dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
++ __dump_save_regs(&dump_header_asm.dha_smp_regs[cpu], regs);
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++ /* take a snapshot of the stack */
++ /* doing this enables us to tolerate slight drifts on this cpu */
+
-+#endif /* _PARISC_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-h8300/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-h8300/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-h8300/kerntypes.h 2005-04-05 16:47:53.880214680 +0800
-@@ -0,0 +1,21 @@
-+/*
-+ * asm-h8300/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
-+ */
++ if (dump_header_asm.dha_stack[cpu]) {
++ memcpy((void *)dump_header_asm.dha_stack[cpu],
++ STACK_START_POSITION(tsk),
++ THREAD_SIZE);
++ }
++ dump_header_asm.dha_stack_ptr[cpu] = (unsigned long)(tsk->thread_info);
++}
+
-+/* H8300-specific header files */
-+#ifndef _H8300_KERNTYPES_H
-+#define _H8300_KERNTYPES_H
++#ifdef CONFIG_SMP
++extern cpumask_t irq_affinity[];
++extern irq_desc_t irq_desc[];
++extern void dump_send_ipi(void);
++static int dump_expect_ipi[NR_CPUS];
++static atomic_t waiting_for_dump_ipi;
++static unsigned long saved_affinity[NR_IRQS];
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++extern void stop_this_cpu(void *);
+
-+#endif /* _H8300_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-ppc/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-ppc/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-ppc/kerntypes.h 2005-04-05 16:47:53.882214376 +0800
-@@ -0,0 +1,21 @@
-+/*
-+ * asm-ppc/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
-+ */
++static int
++dump_nmi_callback(struct pt_regs *regs, int cpu)
++{
++ if (!dump_expect_ipi[cpu]) {
++ return 0;
++ }
++
++ dump_expect_ipi[cpu] = 0;
+
-+/* PowerPC-specific header files */
-+#ifndef _PPC_KERNTYPES_H
-+#define _PPC_KERNTYPES_H
++ dump_save_this_cpu(regs);
++ atomic_dec(&waiting_for_dump_ipi);
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++level_changed:
+
-+#endif /* _PPC_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-alpha/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-alpha/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-alpha/kerntypes.h 2005-04-05 16:47:53.876215288 +0800
-@@ -0,0 +1,21 @@
-+/*
-+ * asm-alpha/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
-+ */
++ switch (dump_silence_level) {
++ case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
++ while (dump_oncpu) {
++ barrier(); /* paranoia */
++ if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
++ goto level_changed;
+
-+/* Alpha-specific header files */
-+#ifndef _ALPHA_KERNTYPES_H
-+#define _ALPHA_KERNTYPES_H
++ cpu_relax(); /* kill time nicely */
++ }
++ break;
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++ case DUMP_HALT_CPUS: /* Execute halt */
++ stop_this_cpu(NULL);
++ break;
+
-+#endif /* _ALPHA_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-arm26/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-arm26/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-arm26/kerntypes.h 2005-04-05 16:47:53.865216960 +0800
-@@ -0,0 +1,21 @@
-+/*
-+ * asm-arm26/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
-+ */
++ case DUMP_SOFT_SPIN_CPUS:
++ /* Mark the task so it spins in schedule */
++ set_tsk_thread_flag(current, TIF_NEED_RESCHED);
++ break;
++ }
+
-+/* ARM26-specific header files */
-+#ifndef _ARM26_KERNTYPES_H
-+#define _ARM26_KERNTYPES_H
++ return 1;
++}
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++/* save registers on other processors */
++void
++__dump_save_other_cpus(void)
++{
++ int i, cpu = smp_processor_id();
++ int other_cpus = num_online_cpus() - 1;
+
-+#endif /* _ARM26_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-sh/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-sh/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-sh/kerntypes.h 2005-04-05 16:47:53.877215136 +0800
-@@ -0,0 +1,21 @@
-+/*
-+ * asm-sh/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
-+ */
++ if (other_cpus > 0) {
++ atomic_set(&waiting_for_dump_ipi, other_cpus);
+
-+/* Super-H-specific header files */
-+#ifndef _SH_KERNTYPES_H
-+#define _SH_KERNTYPES_H
++ for (i = 0; i < NR_CPUS; i++)
++ dump_expect_ipi[i] = (i != cpu && cpu_online(i));
++
++ set_nmi_callback(dump_nmi_callback);
++ wmb();
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++ dump_send_ipi();
+
-+#endif /* _SH_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-ia64/nmi.h
-===================================================================
---- linux-2.6.10.orig/include/asm-ia64/nmi.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-ia64/nmi.h 2005-04-05 16:47:53.883214224 +0800
-@@ -0,0 +1,28 @@
-+/*
-+ * linux/include/asm-ia64/nmi.h
-+ */
-+#ifndef ASM_NMI_H
-+#define ASM_NMI_H
++ /* may be we dont need to wait for NMI to be processed.
++ just write out the header at the end of dumping, if
++ this IPI is not processed untill then, there probably
++ is a problem and we just fail to capture state of
++ other cpus. */
++ while(atomic_read(&waiting_for_dump_ipi) > 0)
++ cpu_relax();
+
-+#include <linux/pm.h>
++ unset_nmi_callback();
++ }
++ return;
++}
+
-+struct pt_regs;
-+
-+typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
-+
-+/**
-+ * set_nmi_callback
-+ *
-+ * Set a handler for an NMI. Only one handler may be
-+ * set. Return 1 if the NMI was handled.
-+ */
-+void set_nmi_callback(nmi_callback_t callback);
-+
-+/**
-+ * unset_nmi_callback
-+ *
-+ * Remove the handler previously set.
-+ */
-+void unset_nmi_callback(void);
-+
-+#endif /* ASM_NMI_H */
-Index: linux-2.6.10/include/asm-ia64/dump.h
-===================================================================
---- linux-2.6.10.orig/include/asm-ia64/dump.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-ia64/dump.h 2005-04-05 16:47:53.884214072 +0800
-@@ -0,0 +1,201 @@
+/*
-+ * Kernel header file for Linux crash dumps.
-+ *
-+ * Created by: Matt Robinson (yakker@sgi.com)
-+ *
-+ * Copyright 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
-+ *
-+ * This code is released under version 2 of the GNU GPL.
++ * Routine to save the old irq affinities and change affinities of all irqs to
++ * the dumping cpu.
+ */
++static void
++set_irq_affinity(void)
++{
++ int i;
++ cpumask_t cpu = CPU_MASK_NONE;
+
-+/* This header file holds the architecture specific crash dump header */
-+#ifndef _ASM_DUMP_H
-+#define _ASM_DUMP_H
-+
-+/* definitions */
-+#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
-+#define DUMP_ASM_VERSION_NUMBER 0x4 /* version number */
++ cpu_set(smp_processor_id(), cpu);
++ memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
++ for (i = 0; i < NR_IRQS; i++) {
++ if (irq_desc[i].handler == NULL)
++ continue;
++ irq_affinity[i] = cpu;
++ if (irq_desc[i].handler->set_affinity != NULL)
++ irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
++ }
++}
+
-+#ifdef __KERNEL__
-+#include <linux/efi.h>
-+#include <asm/pal.h>
-+#include <asm/ptrace.h>
++/*
++ * Restore old irq affinities.
++ */
++static void
++reset_irq_affinity(void)
++{
++ int i;
++
++ memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
++ for (i = 0; i < NR_IRQS; i++) {
++ if (irq_desc[i].handler == NULL)
++ continue;
++ if (irq_desc[i].handler->set_affinity != NULL)
++ irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
++ }
++}
+
-+#ifdef CONFIG_SMP
-+extern cpumask_t irq_affinity[];
-+extern int (*dump_ipi_function_ptr)(struct pt_regs *);
-+extern void dump_send_ipi(void);
+#else /* !CONFIG_SMP */
-+#define dump_send_ipi() do { } while(0)
-+#endif
++#define set_irq_affinity() do { } while (0)
++#define reset_irq_affinity() do { } while (0)
++#define save_other_cpu_states() do { } while (0)
++#endif /* !CONFIG_SMP */
+
-+#else /* !__KERNEL__ */
-+/* necessary header files */
-+#include <asm/ptrace.h> /* for pt_regs */
-+#include <linux/threads.h>
-+#endif /* __KERNEL__ */
++static inline void
++irq_bh_save(void)
++{
++ saved_irq_count = irq_count();
++ preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
++}
+
-+/*
-+ * mkswap.c calls getpagesize() to get the system page size,
-+ * which is not necessarily the same as the hardware page size.
-+ *
-+ * For ia64 the kernel PAGE_SIZE can be configured from 4KB ... 16KB.
-+ *
-+ * The physical memory is layed out out in the hardware/minimal pages.
-+ * This is the size we need to use for dumping physical pages.
-+ *
-+ * Note ths hardware/minimal page size being use in;
-+ * arch/ia64/kernel/efi.c`efi_memmap_walk():
-+ * curr.end = curr.start + (md->num_pages << 12);
++static inline void
++irq_bh_restore(void)
++{
++ preempt_count() |= saved_irq_count;
++}
++
++/*
++ * Name: __dump_irq_enable
++ * Func: Reset system so interrupts are enabled.
++ * This is used for dump methods that require interrupts
++ * Eventually, all methods will have interrupts disabled
++ * and this code can be removed.
+ *
-+ * Since the system page size could change between the kernel we boot
-+ * on the the kernel that cause the core dume we may want to have something
-+ * more constant like the maximum system page size (See include/asm-ia64/page.h).
-+ */
-+/* IA64 manages the stack in differnt manner as compared to other architectures.
-+ * task_struct lies at the bottom of stack.
++ * Change irq affinities
++ * Re-enable interrupts
+ */
-+#undef STACK_START_POSITION
-+#define STACK_START_POSITION(tsk) (tsk)
-+#define DUMP_MIN_PAGE_SHIFT 12
-+#define DUMP_MIN_PAGE_SIZE (1UL << DUMP_MIN_PAGE_SHIFT)
-+#define DUMP_MIN_PAGE_MASK (~(DUMP_MIN_PAGE_SIZE - 1))
-+#define DUMP_MIN_PAGE_ALIGN(addr) (((addr) + DUMP_MIN_PAGE_SIZE - 1) & DUMP_MIN_PAGE_MASK)
-+
-+#define DUMP_MAX_PAGE_SHIFT 16
-+#define DUMP_MAX_PAGE_SIZE (1UL << DUMP_MAX_PAGE_SHIFT)
-+#define DUMP_MAX_PAGE_MASK (~(DUMP_MAX_PAGE_SIZE - 1))
-+#define DUMP_MAX_PAGE_ALIGN(addr) (((addr) + DUMP_MAX_PAGE_SIZE - 1) & DUMP_MAX_PAGE_MASK)
-+
-+#define DUMP_EF_PAGE_SHIFT DUMP_MIN_PAGE_SHIFT
++int
++__dump_irq_enable(void)
++{
++ set_irq_affinity();
++ irq_bh_save();
++ local_irq_enable();
++ return 0;
++}
+
-+extern int _end,_start;
++/*
++ * Name: __dump_irq_restore
++ * Func: Resume the system state in an architecture-speeific way.
++ *
++ */
++void
++__dump_irq_restore(void)
++{
++ local_irq_disable();
++ reset_irq_affinity();
++ irq_bh_restore();
++}
+
+/*
-+ * Structure: dump_header_asm_t
-+ * Function: This is the header for architecture-specific stuff. It
-+ * follows right after the dump header.
++ * Name: __dump_configure_header()
++ * Func: Configure the dump header with all proper values.
+ */
-+/*typedef struct _dump_header_asm {*/
++int
++__dump_configure_header(const struct pt_regs *regs)
++{
++ /* Dummy function - return */
++ return (0);
++}
+
-+typedef struct __dump_header_asm {
++static int notify(struct notifier_block *nb, unsigned long code, void *data)
++{
++ if (code == DIE_NMI_IPI && dump_oncpu)
++ return NOTIFY_BAD;
++ return NOTIFY_DONE;
++}
+
-+ /* the dump magic number -- unique to verify dump is valid */
-+ uint64_t dha_magic_number;
++static struct notifier_block dump_notifier = {
++ .notifier_call = notify,
++};
+
-+ /* the version number of this dump */
-+ uint32_t dha_version;
++/*
++ * Name: __dump_init()
++ * Func: Initialize the dumping routine process.
++ */
++void
++__dump_init(uint64_t local_memory_start)
++{
++ notifier_chain_register(&die_chain, &dump_notifier);
++}
+
-+ /* the size of this header (in case we can't read it) */
-+ uint32_t dha_header_size;
++/*
++ * Name: __dump_open()
++ * Func: Open the dump device (architecture specific). This is in
++ * case it's necessary in the future.
++ */
++void
++__dump_open(void)
++{
++ alloc_dha_stack();
++ /* return */
++ return;
++}
+
-+ /* pointer to pt_regs, (OLD: (struct pt_regs *, NEW: (uint64_t)) */
-+ uint64_t dha_pt_regs;
++/*
++ * Name: __dump_cleanup()
++ * Func: Free any architecture specific data structures. This is called
++ * when the dump module is being removed.
++ */
++void
++__dump_cleanup(void)
++{
++ free_dha_stack();
++ notifier_chain_unregister(&die_chain, &dump_notifier);
++ synchronize_kernel();
++ return;
++}
+
-+ /* the dump registers */
-+ struct pt_regs dha_regs;
++extern int page_is_ram(unsigned long);
+
-+ /* the rnat register saved after flushrs */
-+ uint64_t dha_rnat;
++/*
++ * Name: __dump_page_valid()
++ * Func: Check if page is valid to dump.
++ */
++int
++__dump_page_valid(unsigned long index)
++{
++ if (!pfn_valid(index))
++ return 0;
+
-+ /* the pfs register saved after flushrs */
-+ uint64_t dha_pfs;
++ return page_is_ram(index);
++}
+
-+ /* the bspstore register saved after flushrs */
-+ uint64_t dha_bspstore;
++/*
++ * Name: manual_handle_crashdump()
++ * Func: Interface for the lkcd dump command. Calls dump_execute()
++ */
++int
++manual_handle_crashdump(void) {
+
-+ /* smp specific */
-+ uint32_t dha_smp_num_cpus;
-+ uint32_t dha_dumping_cpu;
-+ struct pt_regs dha_smp_regs[NR_CPUS];
-+ uint64_t dha_smp_current_task[NR_CPUS];
-+ uint64_t dha_stack[NR_CPUS];
-+ uint64_t dha_stack_ptr[NR_CPUS];
++ struct pt_regs regs;
+
-+} __attribute__((packed)) dump_header_asm_t;
++ get_current_regs(®s);
++ dump_execute("manual", ®s);
++ return 0;
++}
+
++/*
++ * Name: __dump_clean_irq_state()
++ * Func: Clean up from the previous IRQ handling state. Such as oops from
++ * interrupt handler or bottom half.
++ */
++void
++__dump_clean_irq_state(void)
++{
++ return;
++}
+Index: linux-2.6.10/drivers/dump/dump_rle.c
+===================================================================
+--- linux-2.6.10.orig/drivers/dump/dump_rle.c 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/drivers/dump/dump_rle.c 2005-04-07 18:13:56.897754072 +0800
+@@ -0,0 +1,176 @@
++/*
++ * RLE Compression functions for kernel crash dumps.
++ *
++ * Created by: Matt Robinson (yakker@sourceforge.net)
++ * Copyright 2001 Matt D. Robinson. All rights reserved.
++ *
++ * This code is released under version 2 of the GNU GPL.
++ */
+
-+extern struct __dump_header_asm dump_header_asm;
++/* header files */
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/init.h>
++#include <linux/dump.h>
+
-+#ifdef __KERNEL__
-+static inline void get_current_regs(struct pt_regs *regs)
-+{
-+ /*
-+ * REMIND: Looking at functions/Macros like:
-+ * DO_SAVE_SWITCH_STACK
-+ * ia64_switch_to()
-+ * ia64_save_extra()
-+ * switch_to()
-+ * to implement this new feature that Matt seem to have added
-+ * to panic.c; seems all platforms are now expected to provide
-+ * this function to dump the current registers into the pt_regs
-+ * structure.
++/*
++ * Name: dump_compress_rle()
++ * Func: Compress a DUMP_PAGE_SIZE (hardware) page down to something more
++ * reasonable, if possible. This is the same routine we use in IRIX.
++ */
++static u32
++dump_compress_rle(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
++ unsigned long loc)
++{
++ u16 ri, wi, count = 0;
++ u_char value = 0, cur_byte;
++
++ /*
++ * If the block should happen to "compress" to larger than the
++ * buffer size, allocate a larger one and change cur_buf_size.
+ */
-+ volatile unsigned long rsc_value;/*for storing the rsc value*/
-+ volatile unsigned long ic_value;
+
-+ __asm__ __volatile__("mov %0=b6;;":"=r"(regs->b6));
-+ __asm__ __volatile__("mov %0=b7;;":"=r"(regs->b7));
-+
-+ __asm__ __volatile__("mov %0=ar.csd;;":"=r"(regs->ar_csd));
-+ __asm__ __volatile__("mov %0=ar.ssd;;":"=r"(regs->ar_ssd));
-+ __asm__ __volatile__("mov %0=psr;;":"=r"(ic_value));
-+ if(ic_value & 0x1000)/*Within an interrupt*/
-+ {
-+ __asm__ __volatile__("mov %0=cr.ipsr;;":"=r"(regs->cr_ipsr));
-+ __asm__ __volatile__("mov %0=cr.iip;;":"=r"(regs->cr_iip));
-+ __asm__ __volatile__("mov %0=cr.ifs;;":"=r"(regs->cr_ifs));
-+ }
-+ else
-+ {
-+ regs->cr_ipsr=regs->cr_iip=regs->cr_ifs=(unsigned long)-1;
++ wi = ri = 0;
++
++ while (ri < oldsize) {
++ if (!ri) {
++ cur_byte = value = old[ri];
++ count = 0;
++ } else {
++ if (count == 255) {
++ if (wi + 3 > oldsize) {
++ return oldsize;
++ }
++ new[wi++] = 0;
++ new[wi++] = count;
++ new[wi++] = value;
++ value = cur_byte = old[ri];
++ count = 0;
++ } else {
++ if ((cur_byte = old[ri]) == value) {
++ count++;
++ } else {
++ if (count > 1) {
++ if (wi + 3 > oldsize) {
++ return oldsize;
++ }
++ new[wi++] = 0;
++ new[wi++] = count;
++ new[wi++] = value;
++ } else if (count == 1) {
++ if (value == 0) {
++ if (wi + 3 > oldsize) {
++ return oldsize;
++ }
++ new[wi++] = 0;
++ new[wi++] = 1;
++ new[wi++] = 0;
++ } else {
++ if (wi + 2 > oldsize) {
++ return oldsize;
++ }
++ new[wi++] = value;
++ new[wi++] = value;
++ }
++ } else { /* count == 0 */
++ if (value == 0) {
++ if (wi + 2 > oldsize) {
++ return oldsize;
++ }
++ new[wi++] = value;
++ new[wi++] = value;
++ } else {
++ if (wi + 1 > oldsize) {
++ return oldsize;
++ }
++ new[wi++] = value;
++ }
++ } /* if count > 1 */
++
++ value = cur_byte;
++ count = 0;
++
++ } /* if byte == value */
++
++ } /* if count == 255 */
++
++ } /* if ri == 0 */
++ ri++;
++
+ }
-+ __asm__ __volatile__("mov %0=ar.unat;;":"=r"(regs->ar_unat));
-+ __asm__ __volatile__("mov %0=ar.pfs;;":"=r"(regs->ar_pfs));
-+ __asm__ __volatile__("mov %0=ar.rsc;;":"=r"(rsc_value));
-+ regs->ar_rsc = rsc_value;
-+ /*loadrs is from 16th bit to 29th bit of rsc*/
-+ regs->loadrs = rsc_value >> 16 & (unsigned long)0x3fff;
-+ /*setting the rsc.mode value to 0 (rsc.mode is the last two bits of rsc)*/
-+ __asm__ __volatile__("mov ar.rsc=%0;;"::"r"(rsc_value & (unsigned long)(~3)));
-+ __asm__ __volatile__("mov %0=ar.rnat;;":"=r"(regs->ar_rnat));
-+ __asm__ __volatile__("mov %0=ar.bspstore;;":"=r"(regs->ar_bspstore));
-+ /*copying the original value back*/
-+ __asm__ __volatile__("mov ar.rsc=%0;;"::"r"(rsc_value));
-+ __asm__ __volatile__("mov %0=pr;;":"=r"(regs->pr));
-+ __asm__ __volatile__("mov %0=ar.fpsr;;":"=r"(regs->ar_fpsr));
-+ __asm__ __volatile__("mov %0=ar.ccv;;":"=r"(regs->ar_ccv));
++ if (count > 1) {
++ if (wi + 3 > oldsize) {
++ return oldsize;
++ }
++ new[wi++] = 0;
++ new[wi++] = count;
++ new[wi++] = value;
++ } else if (count == 1) {
++ if (value == 0) {
++ if (wi + 3 > oldsize)
++ return oldsize;
++ new[wi++] = 0;
++ new[wi++] = 1;
++ new[wi++] = 0;
++ } else {
++ if (wi + 2 > oldsize)
++ return oldsize;
++ new[wi++] = value;
++ new[wi++] = value;
++ }
++ } else { /* count == 0 */
++ if (value == 0) {
++ if (wi + 2 > oldsize)
++ return oldsize;
++ new[wi++] = value;
++ new[wi++] = value;
++ } else {
++ if (wi + 1 > oldsize)
++ return oldsize;
++ new[wi++] = value;
++ }
++ } /* if count > 1 */
+
-+ __asm__ __volatile__("mov %0=r2;;":"=r"(regs->r2));
-+ __asm__ __volatile__("mov %0=r3;;":"=r"(regs->r3));
-+ __asm__ __volatile__("mov %0=r8;;":"=r"(regs->r8));
-+ __asm__ __volatile__("mov %0=r9;;":"=r"(regs->r9));
-+ __asm__ __volatile__("mov %0=r10;;":"=r"(regs->r10));
-+ __asm__ __volatile__("mov %0=r11;;":"=r"(regs->r11));
-+ __asm__ __volatile__("mov %0=r12;;":"=r"(regs->r12));
-+ __asm__ __volatile__("mov %0=r13;;":"=r"(regs->r13));
-+ __asm__ __volatile__("mov %0=r14;;":"=r"(regs->r14));
-+ __asm__ __volatile__("mov %0=r15;;":"=r"(regs->r15));
-+ __asm__ __volatile__("mov %0=r16;;":"=r"(regs->r16));
-+ __asm__ __volatile__("mov %0=r17;;":"=r"(regs->r17));
-+ __asm__ __volatile__("mov %0=r18;;":"=r"(regs->r18));
-+ __asm__ __volatile__("mov %0=r19;;":"=r"(regs->r19));
-+ __asm__ __volatile__("mov %0=r20;;":"=r"(regs->r20));
-+ __asm__ __volatile__("mov %0=r21;;":"=r"(regs->r21));
-+ __asm__ __volatile__("mov %0=r22;;":"=r"(regs->r22));
-+ __asm__ __volatile__("mov %0=r23;;":"=r"(regs->r23));
-+ __asm__ __volatile__("mov %0=r24;;":"=r"(regs->r24));
-+ __asm__ __volatile__("mov %0=r25;;":"=r"(regs->r25));
-+ __asm__ __volatile__("mov %0=r26;;":"=r"(regs->r26));
-+ __asm__ __volatile__("mov %0=r27;;":"=r"(regs->r27));
-+ __asm__ __volatile__("mov %0=r28;;":"=r"(regs->r28));
-+ __asm__ __volatile__("mov %0=r29;;":"=r"(regs->r29));
-+ __asm__ __volatile__("mov %0=r30;;":"=r"(regs->r30));
-+ __asm__ __volatile__("mov %0=r31;;":"=r"(regs->r31));
++ value = cur_byte;
++ count = 0;
++ return wi;
+}
+
-+/* Perhaps added to Common Arch Specific Functions and moved to dump.h some day */
-+extern void * __dump_memcpy(void *, const void *, size_t);
-+#endif /* __KERNEL__ */
++/* setup the rle compression functionality */
++static struct __dump_compress dump_rle_compression = {
++ .compress_type = DUMP_COMPRESS_RLE,
++ .compress_func = dump_compress_rle,
++ .compress_name = "RLE",
++};
+
-+#endif /* _ASM_DUMP_H */
-Index: linux-2.6.10/include/asm-ia64/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-ia64/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-ia64/kerntypes.h 2005-04-05 16:47:53.884214072 +0800
-@@ -0,0 +1,21 @@
+/*
-+ * asm-ia64/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
++ * Name: dump_compress_rle_init()
++ * Func: Initialize rle compression for dumping.
+ */
++static int __init
++dump_compress_rle_init(void)
++{
++ dump_register_compression(&dump_rle_compression);
++ return 0;
++}
+
-+/* IA64-specific header files */
-+#ifndef _IA64_KERNTYPES_H
-+#define _IA64_KERNTYPES_H
++/*
++ * Name: dump_compress_rle_cleanup()
++ * Func: Remove rle compression for dumping.
++ */
++static void __exit
++dump_compress_rle_cleanup(void)
++{
++ dump_unregister_compression(DUMP_COMPRESS_RLE);
++}
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++/* module initialization */
++module_init(dump_compress_rle_init);
++module_exit(dump_compress_rle_cleanup);
+
-+#endif /* _IA64_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-ppc64/dump.h
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
++MODULE_DESCRIPTION("RLE compression module for crash dump driver");
+Index: linux-2.6.10/drivers/dump/dump_overlay.c
===================================================================
---- linux-2.6.10.orig/include/asm-ppc64/dump.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-ppc64/dump.h 2005-04-05 16:47:53.878214984 +0800
-@@ -0,0 +1,115 @@
+--- linux-2.6.10.orig/drivers/dump/dump_overlay.c 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/drivers/dump/dump_overlay.c 2005-04-07 18:13:56.905752856 +0800
+@@ -0,0 +1,890 @@
+/*
-+ * Kernel header file for Linux crash dumps.
-+ *
-+ * Created by: Todd Inglett <tinglett@vnet.ibm.com>
++ * Two-stage soft-boot based dump scheme methods (memory overlay
++ * with post soft-boot writeout)
+ *
-+ * Copyright 2002 - 2004 International Business Machines
++ * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
+ *
-+ * This code is released under version 2 of the GNU GPL.
-+ */
-+
-+/* This header file holds the architecture specific crash dump header */
-+#ifndef _ASM_DUMP_H
-+#define _ASM_DUMP_H
-+
-+/* necessary header files */
-+#include <asm/ptrace.h> /* for pt_regs */
-+#include <asm/kmap_types.h>
-+#include <linux/threads.h>
-+
-+/* definitions */
-+#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
-+#define DUMP_ASM_VERSION_NUMBER 0x5 /* version number */
++ * This approach of saving the dump in memory and writing it
++ * out after a softboot without clearing memory is derived from the
++ * Mission Critical Linux dump implementation. Credits and a big
++ * thanks for letting the lkcd project make use of the excellent
++ * piece of work and also for helping with clarifications and
++ * tips along the way are due to:
++ * Dave Winchell <winchell@mclx.com> (primary author of mcore)
++ * and also to
++ * Jeff Moyer <moyer@mclx.com>
++ * Josh Huber <huber@mclx.com>
++ *
++ * For those familiar with the mcore implementation, the key
++ * differences/extensions here are in allowing entire memory to be
++ * saved (in compressed form) through a careful ordering scheme
++ * on both the way down as well on the way up after boot, the latter
++ * for supporting the LKCD notion of passes in which most critical
++ * data is the first to be saved to the dump device. Also the post
++ * boot writeout happens from within the kernel rather than driven
++ * from userspace.
++ *
++ * The sequence is orchestrated through the abstraction of "dumpers",
++ * one for the first stage which then sets up the dumper for the next
++ * stage, providing for a smooth and flexible reuse of the singlestage
++ * dump scheme methods and a handle to pass dump device configuration
++ * information across the soft boot.
++ *
++ * Copyright (C) 2002 International Business Machines Corp.
++ *
++ * This code is released under version 2 of the GNU GPL.
++ */
+
+/*
-+ * Structure: __dump_header_asm
-+ * Function: This is the header for architecture-specific stuff. It
-+ * follows right after the dump header.
++ * Disruptive dumping using the second kernel soft-boot option
++ * for issuing dump i/o operates in 2 stages:
++ *
++ * (1) - Saves the (compressed & formatted) dump in memory using a
++ * carefully ordered overlay scheme designed to capture the
++ * entire physical memory or selective portions depending on
++ * dump config settings,
++ * - Registers the stage 2 dumper and
++ * - Issues a soft reboot w/o clearing memory.
++ *
++ * The overlay scheme starts with a small bootstrap free area
++ * and follows a reverse ordering of passes wherein it
++ * compresses and saves data starting with the least critical
++ * areas first, thus freeing up the corresponding pages to
++ * serve as destination for subsequent data to be saved, and
++ * so on. With a good compression ratio, this makes it feasible
++ * to capture an entire physical memory dump without significantly
++ * reducing memory available during regular operation.
++ *
++ * (2) Post soft-reboot, runs through the saved memory dump and
++ * writes it out to disk, this time around, taking care to
++ * save the more critical data first (i.e. pages which figure
++ * in early passes for a regular dump). Finally issues a
++ * clean reboot.
++ *
++ * Since the data was saved in memory after selection/filtering
++ * and formatted as per the chosen output dump format, at this
++ * stage the filter and format actions are just dummy (or
++ * passthrough) actions, except for influence on ordering of
++ * passes.
+ */
-+struct __dump_header_asm {
-+
-+ /* the dump magic number -- unique to verify dump is valid */
-+ uint64_t dha_magic_number;
+
-+ /* the version number of this dump */
-+ uint32_t dha_version;
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/highmem.h>
++#include <linux/bootmem.h>
++#include <linux/dump.h>
++#ifdef CONFIG_KEXEC
++#include <linux/delay.h>
++#include <linux/reboot.h>
++#include <linux/kexec.h>
++#endif
++#include "dump_methods.h"
+
-+ /* the size of this header (in case we can't read it) */
-+ uint32_t dha_header_size;
++extern struct list_head dumper_list_head;
++extern struct dump_memdev *dump_memdev;
++extern struct dumper dumper_stage2;
++struct dump_config_block *dump_saved_config = NULL;
++extern struct dump_blockdev *dump_blockdev;
++static struct dump_memdev *saved_dump_memdev = NULL;
++static struct dumper *saved_dumper = NULL;
+
-+ /* the dump registers */
-+ struct pt_regs dha_regs;
++#ifdef CONFIG_KEXEC
++extern int panic_timeout;
++#endif
+
-+ /* smp specific */
-+ uint32_t dha_smp_num_cpus;
-+ int dha_dumping_cpu;
-+ struct pt_regs dha_smp_regs[NR_CPUS];
-+ uint64_t dha_smp_current_task[NR_CPUS];
-+ uint64_t dha_stack[NR_CPUS];
-+ uint64_t dha_stack_ptr[NR_CPUS];
-+} __attribute__((packed));
++/* For testing
++extern void dump_display_map(struct dump_memdev *);
++*/
+
-+#ifdef __KERNEL__
-+static inline void get_current_regs(struct pt_regs *regs)
++struct dumper *dumper_by_name(char *name)
+{
-+ unsigned long tmp1, tmp2;
++#ifdef LATER
++ struct dumper *dumper;
++ list_for_each_entry(dumper, &dumper_list_head, dumper_list)
++ if (!strncmp(dumper->name, name, 32))
++ return dumper;
+
-+ __asm__ __volatile__ (
-+ "std 0,0(%2)\n"
-+ "std 1,8(%2)\n"
-+ "std 2,16(%2)\n"
-+ "std 3,24(%2)\n"
-+ "std 4,32(%2)\n"
-+ "std 5,40(%2)\n"
-+ "std 6,48(%2)\n"
-+ "std 7,56(%2)\n"
-+ "std 8,64(%2)\n"
-+ "std 9,72(%2)\n"
-+ "std 10,80(%2)\n"
-+ "std 11,88(%2)\n"
-+ "std 12,96(%2)\n"
-+ "std 13,104(%2)\n"
-+ "std 14,112(%2)\n"
-+ "std 15,120(%2)\n"
-+ "std 16,128(%2)\n"
-+ "std 17,136(%2)\n"
-+ "std 18,144(%2)\n"
-+ "std 19,152(%2)\n"
-+ "std 20,160(%2)\n"
-+ "std 21,168(%2)\n"
-+ "std 22,176(%2)\n"
-+ "std 23,184(%2)\n"
-+ "std 24,192(%2)\n"
-+ "std 25,200(%2)\n"
-+ "std 26,208(%2)\n"
-+ "std 27,216(%2)\n"
-+ "std 28,224(%2)\n"
-+ "std 29,232(%2)\n"
-+ "std 30,240(%2)\n"
-+ "std 31,248(%2)\n"
-+ "mfmsr %0\n"
-+ "std %0, 264(%2)\n"
-+ "mfctr %0\n"
-+ "std %0, 280(%2)\n"
-+ "mflr %0\n"
-+ "std %0, 288(%2)\n"
-+ "bl 1f\n"
-+ "1: mflr %1\n"
-+ "std %1, 256(%2)\n"
-+ "mtlr %0\n"
-+ "mfxer %0\n"
-+ "std %0, 296(%2)\n"
-+ : "=&r" (tmp1), "=&r" (tmp2)
-+ : "b" (regs));
++ /* not found */
++ return NULL;
++#endif
++ /* Temporary proof of concept */
++ if (!strncmp(dumper_stage2.name, name, 32))
++ return &dumper_stage2;
++ else
++ return NULL;
+}
+
-+extern struct __dump_header_asm dump_header_asm;
++#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
++extern void dump_early_reserve_map(struct dump_memdev *);
+
-+#ifdef CONFIG_SMP
-+extern void dump_send_ipi(int (*dump_ipi_callback)(struct pt_regs *));
++void crashdump_reserve(void)
++{
++ extern unsigned long crashdump_addr;
++
++ if (crashdump_addr == 0xdeadbeef)
++ return;
++
++ /* reserve dump config and saved dump pages */
++ dump_saved_config = (struct dump_config_block *)crashdump_addr;
++ /* magic verification */
++ if (dump_saved_config->magic != DUMP_MAGIC_LIVE) {
++ printk("Invalid dump magic. Ignoring dump\n");
++ dump_saved_config = NULL;
++ return;
++ }
++
++ printk("Dump may be available from previous boot\n");
++
++#ifdef CONFIG_X86_64
++ reserve_bootmem_node(NODE_DATA(0),
++ virt_to_phys((void *)crashdump_addr),
++ PAGE_ALIGN(sizeof(struct dump_config_block)));
+#else
-+#define dump_send_ipi() do { } while(0)
++ reserve_bootmem(virt_to_phys((void *)crashdump_addr),
++ PAGE_ALIGN(sizeof(struct dump_config_block)));
+#endif
-+#endif /* __KERNEL__ */
++ dump_early_reserve_map(&dump_saved_config->memdev);
+
-+#endif /* _ASM_DUMP_H */
-Index: linux-2.6.10/include/asm-ppc64/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-ppc64/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-ppc64/kerntypes.h 2005-04-05 16:47:53.879214832 +0800
-@@ -0,0 +1,21 @@
-+/*
-+ * asm-ppc64/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
++}
++#endif
++
++/*
++ * Loads the dump configuration from a memory block saved across soft-boot
++ * The ops vectors need fixing up as the corresp. routines may have
++ * relocated in the new soft-booted kernel.
+ */
++int dump_load_config(struct dump_config_block *config)
++{
++ struct dumper *dumper;
++ struct dump_data_filter *filter_table, *filter;
++ struct dump_dev *dev;
++ int i;
+
-+/* PPC64-specific header files */
-+#ifndef _PPC64_KERNTYPES_H
-+#define _PPC64_KERNTYPES_H
++ if (config->magic != DUMP_MAGIC_LIVE)
++ return -ENOENT; /* not a valid config */
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++ /* initialize generic config data */
++ memcpy(&dump_config, &config->config, sizeof(dump_config));
+
-+#endif /* _PPC64_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-ppc64/kmap_types.h
-===================================================================
---- linux-2.6.10.orig/include/asm-ppc64/kmap_types.h 2004-12-25 05:34:45.000000000 +0800
-+++ linux-2.6.10/include/asm-ppc64/kmap_types.h 2005-04-05 16:47:53.878214984 +0800
-@@ -16,7 +16,8 @@
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
-- KM_TYPE_NR
-+ KM_TYPE_NR,
-+ KM_DUMP
- };
-
- #endif
-Index: linux-2.6.10/include/asm-ppc64/smp.h
-===================================================================
---- linux-2.6.10.orig/include/asm-ppc64/smp.h 2004-12-25 05:33:47.000000000 +0800
-+++ linux-2.6.10/include/asm-ppc64/smp.h 2005-04-05 16:47:53.877215136 +0800
-@@ -36,7 +36,7 @@
- extern void smp_send_debugger_break(int cpu);
- struct pt_regs;
- extern void smp_message_recv(int, struct pt_regs *);
--
-+extern void dump_send_ipi(int (*dump_ipi_callback)(struct pt_regs *));
-
- #define smp_processor_id() (get_paca()->paca_index)
- #define hard_smp_processor_id() (get_paca()->hw_cpu_id)
-Index: linux-2.6.10/include/asm-cris/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-cris/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-cris/kerntypes.h 2005-04-05 16:47:53.874215592 +0800
-@@ -0,0 +1,21 @@
-+/*
-+ * asm-cris/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
-+ */
++ /* initialize dumper state */
++ if (!(dumper = dumper_by_name(config->dumper.name))) {
++ printk("dumper name mismatch\n");
++ return -ENOENT; /* dumper mismatch */
++ }
++
++ /* verify and fixup schema */
++ if (strncmp(dumper->scheme->name, config->scheme.name, 32)) {
++ printk("dumper scheme mismatch\n");
++ return -ENOENT; /* mismatch */
++ }
++ config->scheme.ops = dumper->scheme->ops;
++ config->dumper.scheme = &config->scheme;
++
++ /* verify and fixup filter operations */
++ filter_table = dumper->filter;
++ for (i = 0, filter = config->filter_table;
++ ((i < MAX_PASSES) && filter_table[i].selector);
++ i++, filter++) {
++ if (strncmp(filter_table[i].name, filter->name, 32)) {
++ printk("dump filter mismatch\n");
++ return -ENOENT; /* filter name mismatch */
++ }
++ filter->selector = filter_table[i].selector;
++ }
++ config->dumper.filter = config->filter_table;
+
-+/* CRIS-specific header files */
-+#ifndef _CRIS_KERNTYPES_H
-+#define _CRIS_KERNTYPES_H
++ /* fixup format */
++ if (strncmp(dumper->fmt->name, config->fmt.name, 32)) {
++ printk("dump format mismatch\n");
++ return -ENOENT; /* mismatch */
++ }
++ config->fmt.ops = dumper->fmt->ops;
++ config->dumper.fmt = &config->fmt;
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++ /* fixup target device */
++ dev = (struct dump_dev *)(&config->dev[0]);
++ if (dumper->dev == NULL) {
++ pr_debug("Vanilla dumper - assume default\n");
++ if (dump_dev == NULL)
++ return -ENODEV;
++ dumper->dev = dump_dev;
++ }
+
-+#endif /* _CRIS_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-m68knommu/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-m68knommu/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-m68knommu/kerntypes.h 2005-04-05 16:47:53.870216200 +0800
-@@ -0,0 +1,21 @@
-+/*
-+ * asm-m68knommu/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
-+ */
++ if (strncmp(dumper->dev->type_name, dev->type_name, 32)) {
++ printk("dump dev type mismatch %s instead of %s\n",
++ dev->type_name, dumper->dev->type_name);
++ return -ENOENT; /* mismatch */
++ }
++ dev->ops = dumper->dev->ops;
++ config->dumper.dev = dev;
++
++ /* fixup memory device containing saved dump pages */
++ /* assume statically init'ed dump_memdev */
++ config->memdev.ddev.ops = dump_memdev->ddev.ops;
++ /* switch to memdev from prev boot */
++ saved_dump_memdev = dump_memdev; /* remember current */
++ dump_memdev = &config->memdev;
+
-+/* m68k/no-MMU-specific header files */
-+#ifndef _M68KNOMMU_KERNTYPES_H
-+#define _M68KNOMMU_KERNTYPES_H
++ /* Make this the current primary dumper */
++ dump_config.dumper = &config->dumper;
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++ return 0;
++}
+
-+#endif /* _M68KNOMMU_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-v850/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-v850/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-v850/kerntypes.h 2005-04-05 16:47:53.888213464 +0800
-@@ -0,0 +1,21 @@
-+/*
-+ * asm-v850/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
-+ */
++/* Saves the dump configuration in a memory block for use across a soft-boot */
++int dump_save_config(struct dump_config_block *config)
++{
++ printk("saving dump config settings\n");
+
-+/* V850-specific header files */
-+#ifndef _V850_KERNTYPES_H
-+#define _V850_KERNTYPES_H
++ /* dump config settings */
++ memcpy(&config->config, &dump_config, sizeof(dump_config));
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++ /* dumper state */
++ memcpy(&config->dumper, dump_config.dumper, sizeof(struct dumper));
++ memcpy(&config->scheme, dump_config.dumper->scheme,
++ sizeof(struct dump_scheme));
++ memcpy(&config->fmt, dump_config.dumper->fmt, sizeof(struct dump_fmt));
++ memcpy(&config->dev[0], dump_config.dumper->dev,
++ sizeof(struct dump_anydev));
++ memcpy(&config->filter_table, dump_config.dumper->filter,
++ sizeof(struct dump_data_filter)*MAX_PASSES);
+
-+#endif /* _V850_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-x86_64/dump.h
-===================================================================
---- linux-2.6.10.orig/include/asm-x86_64/dump.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-x86_64/dump.h 2005-04-05 16:47:53.868216504 +0800
-@@ -0,0 +1,93 @@
-+/*
-+ * Kernel header file for Linux crash dumps.
-+ *
-+ * Created by: Matt Robinson (yakker@sgi.com)
-+ *
-+ * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
-+ * x86_64 lkcd port Sachin Sant ( sachinp@in.ibm.com)
-+ * This code is released under version 2 of the GNU GPL.
-+ */
++ /* handle to saved mem pages */
++ memcpy(&config->memdev, dump_memdev, sizeof(struct dump_memdev));
+
-+/* This header file holds the architecture specific crash dump header */
-+#ifndef _ASM_DUMP_H
-+#define _ASM_DUMP_H
++ config->magic = DUMP_MAGIC_LIVE;
++
++ return 0;
++}
+
-+/* necessary header files */
-+#include <asm/ptrace.h> /* for pt_regs */
-+#include <linux/threads.h>
++int dump_init_stage2(struct dump_config_block *saved_config)
++{
++ int err = 0;
+
-+/* definitions */
-+#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
-+#define DUMP_ASM_VERSION_NUMBER 0x2 /* version number */
++ pr_debug("dump_init_stage2\n");
++ /* Check if dump from previous boot exists */
++ if (saved_config) {
++ printk("loading dumper from previous boot \n");
++ /* load and configure dumper from previous boot */
++ if ((err = dump_load_config(saved_config)))
++ return err;
+
++ if (!dump_oncpu) {
++ if ((err = dump_configure(dump_config.dump_device))) {
++ printk("Stage 2 dump configure failed\n");
++ return err;
++ }
++ }
+
-+/*
-+ * Structure: dump_header_asm_t
-+ * Function: This is the header for architecture-specific stuff. It
-+ * follows right after the dump header.
-+ */
-+struct __dump_header_asm {
++ dumper_reset();
++ dump_dev = dump_config.dumper->dev;
++ /* write out the dump */
++ err = dump_generic_execute(NULL, NULL);
++
++ dump_saved_config = NULL;
+
-+ /* the dump magic number -- unique to verify dump is valid */
-+ uint64_t dha_magic_number;
++ if (!dump_oncpu) {
++ dump_unconfigure();
++ }
++
++ return err;
+
-+ /* the version number of this dump */
-+ uint32_t dha_version;
++ } else {
++ /* no dump to write out */
++ printk("no dumper from previous boot \n");
++ return 0;
++ }
++}
+
-+ /* the size of this header (in case we can't read it) */
-+ uint32_t dha_header_size;
++extern void dump_mem_markpages(struct dump_memdev *);
+
-+ /* the dump registers */
-+ struct pt_regs dha_regs;
++int dump_switchover_stage(void)
++{
++ int ret = 0;
+
-+ /* smp specific */
-+ uint32_t dha_smp_num_cpus;
-+ int dha_dumping_cpu;
-+ struct pt_regs dha_smp_regs[NR_CPUS];
-+ uint64_t dha_smp_current_task[NR_CPUS];
-+ uint64_t dha_stack[NR_CPUS];
-+ uint64_t dha_stack_ptr[NR_CPUS];
-+} __attribute__((packed));
++ /* trigger stage 2 rightaway - in real life would be after soft-boot */
++ /* dump_saved_config would be a boot param */
++ saved_dump_memdev = dump_memdev;
++ saved_dumper = dump_config.dumper;
++ ret = dump_init_stage2(dump_saved_config);
++ dump_memdev = saved_dump_memdev;
++ dump_config.dumper = saved_dumper;
++ return ret;
++}
+
-+#ifdef __KERNEL__
-+static inline void get_current_regs(struct pt_regs *regs)
++int dump_activate_softboot(void)
+{
-+ unsigned seg;
-+ __asm__ __volatile__("movq %%r15,%0" : "=m"(regs->r15));
-+ __asm__ __volatile__("movq %%r14,%0" : "=m"(regs->r14));
-+ __asm__ __volatile__("movq %%r13,%0" : "=m"(regs->r13));
-+ __asm__ __volatile__("movq %%r12,%0" : "=m"(regs->r12));
-+ __asm__ __volatile__("movq %%r11,%0" : "=m"(regs->r11));
-+ __asm__ __volatile__("movq %%r10,%0" : "=m"(regs->r10));
-+ __asm__ __volatile__("movq %%r9,%0" : "=m"(regs->r9));
-+ __asm__ __volatile__("movq %%r8,%0" : "=m"(regs->r8));
-+ __asm__ __volatile__("movq %%rbx,%0" : "=m"(regs->rbx));
-+ __asm__ __volatile__("movq %%rcx,%0" : "=m"(regs->rcx));
-+ __asm__ __volatile__("movq %%rdx,%0" : "=m"(regs->rdx));
-+ __asm__ __volatile__("movq %%rsi,%0" : "=m"(regs->rsi));
-+ __asm__ __volatile__("movq %%rdi,%0" : "=m"(regs->rdi));
-+ __asm__ __volatile__("movq %%rbp,%0" : "=m"(regs->rbp));
-+ __asm__ __volatile__("movq %%rax,%0" : "=m"(regs->rax));
-+ __asm__ __volatile__("movq %%rsp,%0" : "=m"(regs->rsp));
-+ __asm__ __volatile__("movl %%ss, %0" :"=r"(seg));
-+ regs->ss = (unsigned long)seg;
-+ __asm__ __volatile__("movl %%cs, %0" :"=r"(seg));
-+ regs->cs = (unsigned long)seg;
-+ __asm__ __volatile__("pushfq; popq %0" :"=m"(regs->eflags));
-+ regs->rip = (unsigned long)current_text_addr();
-+
-+}
-+
-+extern volatile int dump_in_progress;
-+extern struct __dump_header_asm dump_header_asm;
-+
-+#ifdef CONFIG_SMP
++ int err = 0;
++#ifdef CONFIG_KEXEC
++ int num_cpus_online = 0;
++ struct kimage *image;
++#endif
+
++ /* temporary - switchover to writeout previously saved dump */
++#ifndef CONFIG_KEXEC
++ err = dump_switchover_stage(); /* non-disruptive case */
++ if (dump_oncpu)
++ dump_config.dumper = &dumper_stage1; /* set things back */
+
-+extern void dump_send_ipi(void);
++ return err;
+#else
-+#define dump_send_ipi() do { } while(0)
-+#endif
-+#endif /* __KERNEL__ */
+
-+#endif /* _ASM_DUMP_H */
-Index: linux-2.6.10/include/asm-x86_64/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-x86_64/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-x86_64/kerntypes.h 2005-04-05 16:47:53.869216352 +0800
-@@ -0,0 +1,21 @@
-+/*
-+ * asm-x86_64/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
-+ */
++ dump_silence_level = DUMP_HALT_CPUS;
++ /* wait till we become the only cpu */
++ /* maybe by checking for online cpus ? */
+
-+/* x86_64-specific header files */
-+#ifndef _X86_64_KERNTYPES_H
-+#define _X86_64_KERNTYPES_H
++ while((num_cpus_online = num_online_cpus()) > 1);
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++ /* now call into kexec */
+
-+#endif /* _X86_64_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-x86_64/hw_irq.h
-===================================================================
---- linux-2.6.10.orig/include/asm-x86_64/hw_irq.h 2004-12-25 05:35:39.000000000 +0800
-+++ linux-2.6.10/include/asm-x86_64/hw_irq.h 2005-04-05 16:47:53.869216352 +0800
-@@ -34,7 +34,6 @@
-
- #define IA32_SYSCALL_VECTOR 0x80
-
--
- /*
- * Vectors 0x20-0x2f are used for ISA interrupts.
- */
-@@ -55,6 +54,7 @@
- #define TASK_MIGRATION_VECTOR 0xfb
- #define CALL_FUNCTION_VECTOR 0xfa
- #define KDB_VECTOR 0xf9
-+#define DUMP_VECTOR 0xf8
-
- #define THERMAL_APIC_VECTOR 0xf0
-
-Index: linux-2.6.10/include/asm-x86_64/kmap_types.h
-===================================================================
---- linux-2.6.10.orig/include/asm-x86_64/kmap_types.h 2004-12-25 05:35:23.000000000 +0800
-+++ linux-2.6.10/include/asm-x86_64/kmap_types.h 2005-04-05 16:47:53.868216504 +0800
-@@ -13,7 +13,8 @@
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
-- KM_TYPE_NR
-+ KM_DUMP,
-+ KM_TYPE_NR,
- };
-
- #endif
-Index: linux-2.6.10/include/asm-x86_64/smp.h
-===================================================================
---- linux-2.6.10.orig/include/asm-x86_64/smp.h 2004-12-25 05:33:48.000000000 +0800
-+++ linux-2.6.10/include/asm-x86_64/smp.h 2005-04-05 16:47:53.867216656 +0800
-@@ -41,6 +41,7 @@
- extern int pic_mode;
- extern int smp_num_siblings;
- extern void smp_flush_tlb(void);
-+extern void dump_send_ipi(void);
- extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
- extern void smp_send_reschedule(int cpu);
- extern void smp_invalidate_rcv(void); /* Process an NMI */
-Index: linux-2.6.10/include/asm-s390/dump.h
-===================================================================
---- linux-2.6.10.orig/include/asm-s390/dump.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-s390/dump.h 2005-04-05 16:47:53.865216960 +0800
-@@ -0,0 +1,10 @@
-+/*
-+ * Kernel header file for Linux crash dumps.
-+ */
++ image = xchg(&kexec_image, 0);
++ if (image) {
++ mdelay(panic_timeout*1000);
++ machine_kexec(image);
++ }
+
-+/* Nothing to be done here, we have proper hardware support */
-+#ifndef _ASM_DUMP_H
-+#define _ASM_DUMP_H
+
++ /* TBD/Fixme:
++ * * should we call reboot notifiers ? inappropriate for panic ?
++ * * what about device_shutdown() ?
++ * * is explicit bus master disabling needed or can we do that
++ * * through driverfs ?
++ * */
++ return 0;
+#endif
++}
+
-Index: linux-2.6.10/include/asm-s390/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-s390/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-s390/kerntypes.h 2005-04-05 16:47:53.866216808 +0800
-@@ -0,0 +1,46 @@
-+/*
-+ * asm-s390/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
-+ */
++/* --- DUMP SCHEME ROUTINES --- */
+
-+/* S/390 specific header files */
-+#ifndef _S390_KERNTYPES_H
-+#define _S390_KERNTYPES_H
++static inline int dump_buf_pending(struct dumper *dumper)
++{
++ return (dumper->curr_buf - dumper->dump_buf);
++}
+
-+#include <asm/lowcore.h>
-+#include <asm/debug.h>
-+#include <asm/ccwdev.h>
-+#include <asm/ccwgroup.h>
-+#include <asm/qdio.h>
++/* Invoked during stage 1 of soft-reboot based dumping */
++int dump_overlay_sequencer(void)
++{
++ struct dump_data_filter *filter = dump_config.dumper->filter;
++ struct dump_data_filter *filter2 = dumper_stage2.filter;
++ int pass = 0, err = 0, save = 0;
++ int (*action)(unsigned long, unsigned long);
+
-+/* channel subsystem driver */
-+#include "../../drivers/s390/cio/cio.h"
-+#include "../../drivers/s390/cio/chsc.h"
-+#include "../../drivers/s390/cio/css.h"
-+#include "../../drivers/s390/cio/device.h"
-+#include "../../drivers/s390/cio/qdio.h"
++ /* Make sure gzip compression is being used */
++ if (dump_config.dumper->compress->compress_type != DUMP_COMPRESS_GZIP) {
++ printk(" Please set GZIP compression \n");
++ return -EINVAL;
++ }
+
-+/* dasd device driver */
-+#include "../../drivers/s390/block/dasd_int.h"
-+#include "../../drivers/s390/block/dasd_diag.h"
-+#include "../../drivers/s390/block/dasd_eckd.h"
-+#include "../../drivers/s390/block/dasd_fba.h"
++ /* start filling in dump data right after the header */
++ dump_config.dumper->curr_offset =
++ PAGE_ALIGN(dump_config.dumper->header_len);
+
-+/* networking drivers */
-+#include "../../drivers/s390/net/fsm.h"
-+#include "../../drivers/s390/net/iucv.h"
-+#include "../../drivers/s390/net/lcs.h"
++ /* Locate the last pass */
++ for (;filter->selector; filter++, pass++);
++
++ /*
++ * Start from the end backwards: overlay involves a reverse
++ * ordering of passes, since less critical pages are more
++ * likely to be reusable as scratch space once we are through
++ * with them.
++ */
++ for (--pass, --filter; pass >= 0; pass--, filter--)
++ {
++ /* Assumes passes are exclusive (even across dumpers) */
++ /* Requires care when coding the selection functions */
++ if ((save = filter->level_mask & dump_config.level))
++ action = dump_save_data;
++ else
++ action = dump_skip_data;
+
-+/* zfcp device driver */
-+#include "../../drivers/s390/scsi/zfcp_def.h"
-+#include "../../drivers/s390/scsi/zfcp_fsf.h"
++ /* Remember the offset where this pass started */
++ /* The second stage dumper would use this */
++ if (dump_buf_pending(dump_config.dumper) & (PAGE_SIZE - 1)) {
++ pr_debug("Starting pass %d with pending data\n", pass);
++ pr_debug("filling dummy data to page-align it\n");
++ dump_config.dumper->curr_buf = (void *)PAGE_ALIGN(
++ (unsigned long)dump_config.dumper->curr_buf);
++ }
++
++ filter2[pass].start[0] = dump_config.dumper->curr_offset
++ + dump_buf_pending(dump_config.dumper);
+
-+#endif /* _S390_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-sparc64/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-sparc64/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-sparc64/kerntypes.h 2005-04-05 16:47:53.872215896 +0800
-@@ -0,0 +1,21 @@
-+/*
-+ * asm-sparc64/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
-+ */
++ err = dump_iterator(pass, action, filter);
+
-+/* SPARC64-specific header files */
-+#ifndef _SPARC64_KERNTYPES_H
-+#define _SPARC64_KERNTYPES_H
++ filter2[pass].end[0] = dump_config.dumper->curr_offset
++ + dump_buf_pending(dump_config.dumper);
++ filter2[pass].num_mbanks = 1;
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++ if (err < 0) {
++ printk("dump_overlay_seq: failure %d in pass %d\n",
++ err, pass);
++ break;
++ }
++ printk("\n %d overlay pages %s of %d each in pass %d\n",
++ err, save ? "saved" : "skipped", DUMP_PAGE_SIZE, pass);
++ }
+
-+#endif /* _SPARC64_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-mips/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-mips/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-mips/kerntypes.h 2005-04-05 16:47:53.881214528 +0800
-@@ -0,0 +1,21 @@
-+/*
-+ * asm-mips/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
-+ */
-+
-+/* MIPS-specific header files */
-+#ifndef _MIPS_KERNTYPES_H
-+#define _MIPS_KERNTYPES_H
-+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++ return err;
++}
+
-+#endif /* _MIPS_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-m68k/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-m68k/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-m68k/kerntypes.h 2005-04-05 16:47:53.875215440 +0800
-@@ -0,0 +1,21 @@
-+/*
-+ * asm-m68k/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
-+ */
++/* from dump_memdev.c */
++extern struct page *dump_mem_lookup(struct dump_memdev *dev, unsigned long loc);
++extern struct page *dump_mem_next_page(struct dump_memdev *dev);
+
-+/* m68k-specific header files */
-+#ifndef _M68K_KERNTYPES_H
-+#define _M68K_KERNTYPES_H
++static inline struct page *dump_get_saved_page(loff_t loc)
++{
++ return (dump_mem_lookup(dump_memdev, loc >> PAGE_SHIFT));
++}
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++static inline struct page *dump_next_saved_page(void)
++{
++ return (dump_mem_next_page(dump_memdev));
++}
+
-+#endif /* _M68K_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-generic/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-generic/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-generic/kerntypes.h 2005-04-05 16:47:53.871216048 +0800
-@@ -0,0 +1,20 @@
-+/*
-+ * asm-generic/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
++/*
++ * Iterates over list of saved dump pages. Invoked during second stage of
++ * soft boot dumping
+ *
-+ * This source code is released under the GNU GPL.
++ * Observation: If additional selection is desired at this stage then
++ * a different iterator could be written which would advance
++ * to the next page header everytime instead of blindly picking up
++ * the data. In such a case loc would be interpreted differently.
++ * At this moment however a blind pass seems sufficient, cleaner and
++ * faster.
+ */
++int dump_saved_data_iterator(int pass, int (*action)(unsigned long,
++ unsigned long), struct dump_data_filter *filter)
++{
++ loff_t loc, end;
++ struct page *page;
++ unsigned long count = 0;
++ int i, err = 0;
++ unsigned long sz;
+
-+/* Arch-independent header files */
-+#ifndef _GENERIC_KERNTYPES_H
-+#define _GENERIC_KERNTYPES_H
++ for (i = 0; i < filter->num_mbanks; i++) {
++ loc = filter->start[i];
++ end = filter->end[i];
++ printk("pass %d, start off 0x%llx end offset 0x%llx\n", pass,
++ loc, end);
+
-+#include <linux/pci.h>
++ /* loc will get treated as logical offset into stage 1 */
++ page = dump_get_saved_page(loc);
++
++ for (; loc < end; loc += PAGE_SIZE) {
++ dump_config.dumper->curr_loc = loc;
++ if (!page) {
++ printk("no more saved data for pass %d\n",
++ pass);
++ break;
++ }
++ sz = (loc + PAGE_SIZE > end) ? end - loc : PAGE_SIZE;
+
-+#endif /* _GENERIC_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-i386/dump.h
-===================================================================
---- linux-2.6.10.orig/include/asm-i386/dump.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-i386/dump.h 2005-04-05 16:47:53.886213768 +0800
-@@ -0,0 +1,90 @@
-+/*
-+ * Kernel header file for Linux crash dumps.
-+ *
-+ * Created by: Matt Robinson (yakker@sgi.com)
-+ *
-+ * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
-+ *
-+ * This code is released under version 2 of the GNU GPL.
-+ */
++ if (page && filter->selector(pass, (unsigned long)page,
++ PAGE_SIZE)) {
++ pr_debug("mem offset 0x%llx\n", loc);
++ if ((err = action((unsigned long)page, sz)))
++ break;
++ else
++ count++;
++ /* clear the contents of page */
++ /* fixme: consider using KM_DUMP instead */
++ clear_highpage(page);
++
++ }
++ page = dump_next_saved_page();
++ }
++ }
+
-+/* This header file holds the architecture specific crash dump header */
-+#ifndef _ASM_DUMP_H
-+#define _ASM_DUMP_H
++ return err ? err : count;
++}
+
-+/* necessary header files */
-+#include <asm/ptrace.h>
-+#include <asm/page.h>
-+#include <linux/threads.h>
-+#include <linux/mm.h>
++static inline int dump_overlay_pages_done(struct page *page, int nr)
++{
++ int ret=0;
+
-+/* definitions */
-+#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
-+#define DUMP_ASM_VERSION_NUMBER 0x3 /* version number */
++ for (; nr ; page++, nr--) {
++ if (dump_check_and_free_page(dump_memdev, page))
++ ret++;
++ }
++ return ret;
++}
+
-+/*
-+ * Structure: __dump_header_asm
-+ * Function: This is the header for architecture-specific stuff. It
-+ * follows right after the dump header.
-+ */
-+struct __dump_header_asm {
-+ /* the dump magic number -- unique to verify dump is valid */
-+ u64 dha_magic_number;
++int dump_overlay_save_data(unsigned long loc, unsigned long len)
++{
++ int err = 0;
++ struct page *page = (struct page *)loc;
++ static unsigned long cnt = 0;
+
-+ /* the version number of this dump */
-+ u32 dha_version;
++ if ((err = dump_generic_save_data(loc, len)))
++ return err;
+
-+ /* the size of this header (in case we can't read it) */
-+ u32 dha_header_size;
++ if (dump_overlay_pages_done(page, len >> PAGE_SHIFT)) {
++ cnt++;
++ if (!(cnt & 0x7f))
++ pr_debug("released page 0x%lx\n", page_to_pfn(page));
++ }
++
++ return err;
++}
+
-+ /* the esp for i386 systems */
-+ u32 dha_esp;
+
-+ /* the eip for i386 systems */
-+ u32 dha_eip;
++int dump_overlay_skip_data(unsigned long loc, unsigned long len)
++{
++ struct page *page = (struct page *)loc;
+
-+ /* the dump registers */
-+ struct pt_regs dha_regs;
++ dump_overlay_pages_done(page, len >> PAGE_SHIFT);
++ return 0;
++}
+
-+ /* smp specific */
-+ u32 dha_smp_num_cpus;
-+ u32 dha_dumping_cpu;
-+ struct pt_regs dha_smp_regs[NR_CPUS];
-+ u32 dha_smp_current_task[NR_CPUS];
-+ u32 dha_stack[NR_CPUS];
-+ u32 dha_stack_ptr[NR_CPUS];
-+} __attribute__((packed));
++int dump_overlay_resume(void)
++{
++ int err = 0;
+
-+#ifdef __KERNEL__
++ /*
++ * switch to stage 2 dumper, save dump_config_block
++ * and then trigger a soft-boot
++ */
++ dumper_stage2.header_len = dump_config.dumper->header_len;
++ dump_config.dumper = &dumper_stage2;
++ if ((err = dump_save_config(dump_saved_config)))
++ return err;
+
-+extern struct __dump_header_asm dump_header_asm;
++ dump_dev = dump_config.dumper->dev;
+
-+#ifdef CONFIG_SMP
-+extern cpumask_t irq_affinity[];
-+extern int (*dump_ipi_function_ptr)(struct pt_regs *);
-+extern void dump_send_ipi(void);
-+#else
-+#define dump_send_ipi() do { } while(0)
++#ifdef CONFIG_KEXEC
++ /* If we are doing a disruptive dump, activate softboot now */
++ if((panic_timeout > 0) && (!(dump_config.flags & DUMP_FLAGS_NONDISRUPT)))
++ err = dump_activate_softboot();
+#endif
-+
-+static inline void get_current_regs(struct pt_regs *regs)
-+{
-+ __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
-+ __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
-+ __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
-+ __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
-+ __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
-+ __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
-+ __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
-+ __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
-+ __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
-+ __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
-+ __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
-+ __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
-+ __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
-+ regs->eip = (unsigned long)current_text_addr();
++
++ return err;
++ err = dump_switchover_stage(); /* plugs into soft boot mechanism */
++ dump_config.dumper = &dumper_stage1; /* set things back */
++ return err;
+}
+
-+#endif /* __KERNEL__ */
++int dump_overlay_configure(unsigned long devid)
++{
++ struct dump_dev *dev;
++ struct dump_config_block *saved_config = dump_saved_config;
++ int err = 0;
+
-+#endif /* _ASM_DUMP_H */
-Index: linux-2.6.10/include/asm-i386/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-i386/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-i386/kerntypes.h 2005-04-05 16:47:53.887213616 +0800
-@@ -0,0 +1,21 @@
-+/*
-+ * asm-i386/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
-+ */
++ /* If there is a previously saved dump, write it out first */
++ if (saved_config) {
++ printk("Processing old dump pending writeout\n");
++ err = dump_switchover_stage();
++ if (err) {
++ printk("failed to writeout saved dump\n");
++ return err;
++ }
++ dump_free_mem(saved_config); /* testing only: not after boot */
++ }
+
-+/* ix86-specific header files */
-+#ifndef _I386_KERNTYPES_H
-+#define _I386_KERNTYPES_H
++ dev = dumper_stage2.dev = dump_config.dumper->dev;
++ /* From here on the intermediate dump target is memory-only */
++ dump_dev = dump_config.dumper->dev = &dump_memdev->ddev;
++ if ((err = dump_generic_configure(0))) {
++ printk("dump generic configure failed: err %d\n", err);
++ return err;
++ }
++ /* temporary */
++ dumper_stage2.dump_buf = dump_config.dumper->dump_buf;
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++ /* Sanity check on the actual target dump device */
++ if (!dev || (err = dev->ops->open(dev, devid))) {
++ return err;
++ }
++ /* TBD: should we release the target if this is soft-boot only ? */
+
-+#endif /* _I386_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-i386/kmap_types.h
-===================================================================
---- linux-2.6.10.orig/include/asm-i386/kmap_types.h 2004-12-25 05:35:23.000000000 +0800
-+++ linux-2.6.10/include/asm-i386/kmap_types.h 2005-04-05 16:47:53.886213768 +0800
-@@ -23,7 +23,8 @@
- D(10) KM_IRQ1,
- D(11) KM_SOFTIRQ0,
- D(12) KM_SOFTIRQ1,
--D(13) KM_TYPE_NR
-+D(13) KM_DUMP,
-+D(14) KM_TYPE_NR
- };
-
- #undef D
-Index: linux-2.6.10/include/asm-i386/smp.h
-===================================================================
---- linux-2.6.10.orig/include/asm-i386/smp.h 2004-12-25 05:35:50.000000000 +0800
-+++ linux-2.6.10/include/asm-i386/smp.h 2005-04-05 16:47:53.885213920 +0800
-@@ -37,6 +37,7 @@
- extern cpumask_t cpu_sibling_map[];
-
- extern void smp_flush_tlb(void);
-+extern void dump_send_ipi(void);
- extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
- extern void smp_invalidate_rcv(void); /* Process an NMI */
- extern void (*mtrr_hook) (void);
-Index: linux-2.6.10/include/asm-i386/mach-default/irq_vectors.h
-===================================================================
---- linux-2.6.10.orig/include/asm-i386/mach-default/irq_vectors.h 2004-12-25 05:34:26.000000000 +0800
-+++ linux-2.6.10/include/asm-i386/mach-default/irq_vectors.h 2005-04-05 16:47:53.887213616 +0800
-@@ -48,6 +48,7 @@
- #define INVALIDATE_TLB_VECTOR 0xfd
- #define RESCHEDULE_VECTOR 0xfc
- #define CALL_FUNCTION_VECTOR 0xfb
-+#define DUMP_VECTOR 0xfa
-
- #define THERMAL_APIC_VECTOR 0xf0
- /*
-Index: linux-2.6.10/include/asm-arm/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-arm/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-arm/kerntypes.h 2005-04-05 16:47:53.873215744 +0800
-@@ -0,0 +1,21 @@
-+/*
-+ * asm-arm/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
-+ */
++ /* alloc a dump config block area to save across reboot */
++ if (!(dump_saved_config = dump_alloc_mem(sizeof(struct
++ dump_config_block)))) {
++ printk("dump config block alloc failed\n");
++ /* undo configure */
++ dump_generic_unconfigure();
++ return -ENOMEM;
++ }
++ dump_config.dump_addr = (unsigned long)dump_saved_config;
++ printk("Dump config block of size %d set up at 0x%lx\n",
++ sizeof(*dump_saved_config), (unsigned long)dump_saved_config);
++ return 0;
++}
+
-+/* ARM-specific header files */
-+#ifndef _ARM_KERNTYPES_H
-+#define _ARM_KERNTYPES_H
++int dump_overlay_unconfigure(void)
++{
++ struct dump_dev *dev = dumper_stage2.dev;
++ int err = 0;
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++ pr_debug("dump_overlay_unconfigure\n");
++ /* Close the secondary device */
++ dev->ops->release(dev);
++ pr_debug("released secondary device\n");
+
-+#endif /* _ARM_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-sparc/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-sparc/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-sparc/kerntypes.h 2005-04-05 16:47:53.874215592 +0800
-@@ -0,0 +1,21 @@
-+/*
-+ * asm-sparc/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
-+ */
++ err = dump_generic_unconfigure();
++ pr_debug("Unconfigured generic portions\n");
++ dump_free_mem(dump_saved_config);
++ dump_saved_config = NULL;
++ pr_debug("Freed saved config block\n");
++ dump_dev = dump_config.dumper->dev = dumper_stage2.dev;
+
-+/* SPARC-specific header files */
-+#ifndef _SPARC_KERNTYPES_H
-+#define _SPARC_KERNTYPES_H
++ printk("Unconfigured overlay dumper\n");
++ return err;
++}
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++int dump_staged_unconfigure(void)
++{
++ int err = 0;
++ struct dump_config_block *saved_config = dump_saved_config;
++ struct dump_dev *dev;
+
-+#endif /* _SPARC_KERNTYPES_H */
-Index: linux-2.6.10/include/asm-mips64/kerntypes.h
-===================================================================
---- linux-2.6.10.orig/include/asm-mips64/kerntypes.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/include/asm-mips64/kerntypes.h 2005-04-05 16:47:53.881214528 +0800
-@@ -0,0 +1,21 @@
-+/*
-+ * asm-mips64/kerntypes.h
-+ *
-+ * Arch-dependent header file that includes headers for all arch-specific
-+ * types of interest.
-+ * The kernel type information is used by the lcrash utility when
-+ * analyzing system crash dumps or the live system. Using the type
-+ * information for the running system, rather than kernel header files,
-+ * makes for a more flexible and robust analysis tool.
-+ *
-+ * This source code is released under the GNU GPL.
-+ */
++ pr_debug("dump_staged_unconfigure\n");
++ err = dump_generic_unconfigure();
+
-+/* MIPS64-specific header files */
-+#ifndef _MIPS64_KERNTYPES_H
-+#define _MIPS64_KERNTYPES_H
++ /* now check if there is a saved dump waiting to be written out */
++ if (saved_config) {
++ printk("Processing saved dump pending writeout\n");
++ if ((err = dump_switchover_stage())) {
++ printk("Error in commiting saved dump at 0x%lx\n",
++ (unsigned long)saved_config);
++ printk("Old dump may hog memory\n");
++ } else {
++ dump_free_mem(saved_config);
++ pr_debug("Freed saved config block\n");
++ }
++ dump_saved_config = NULL;
++ } else {
++ dev = &dump_memdev->ddev;
++ dev->ops->release(dev);
++ }
++ printk("Unconfigured second stage dumper\n");
+
-+/* Use the default */
-+#include <asm-generic/kerntypes.h>
++ return 0;
++}
+
-+#endif /* _MIPS64_KERNTYPES_H */
-Index: linux-2.6.10/net/Kconfig
-===================================================================
---- linux-2.6.10.orig/net/Kconfig 2005-04-05 16:29:27.896349784 +0800
-+++ linux-2.6.10/net/Kconfig 2005-04-05 16:47:53.895212400 +0800
-@@ -632,7 +632,7 @@
- endmenu
-
- config NETPOLL
-- def_bool NETCONSOLE
-+ def_bool NETCONSOLE || CRASH_DUMP_NETDEV
-
- config NETPOLL_RX
- bool "Netpoll support for trapping incoming packets"
-Index: linux-2.6.10/scripts/mkcompile_h
-===================================================================
---- linux-2.6.10.orig/scripts/mkcompile_h 2004-12-25 05:35:50.000000000 +0800
-+++ linux-2.6.10/scripts/mkcompile_h 2005-04-05 16:47:53.950204040 +0800
-@@ -33,7 +33,7 @@
-
- UTS_LEN=64
- UTS_TRUNCATE="sed -e s/\(.\{1,$UTS_LEN\}\).*/\1/"
--
-+LINUX_COMPILE_VERSION_ID="__linux_compile_version_id__`hostname | tr -c '[0-9A-Za-z\n]' '__'`_`LANG=C date | tr -c '[0-9A-Za-z\n]' '_'`"
- # Generate a temporary compile.h
-
- ( echo /\* This file is auto generated, version $VERSION \*/
-@@ -55,6 +55,8 @@
- fi
-
- echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | tail -n 1`\"
-+ echo \#define LINUX_COMPILE_VERSION_ID $LINUX_COMPILE_VERSION_ID
-+ echo \#define LINUX_COMPILE_VERSION_ID_TYPE typedef char* "$LINUX_COMPILE_VERSION_ID""_t"
- ) > .tmpcompile
-
- # Only replace the real compile.h if the new one is different,
-Index: linux-2.6.10/mm/bootmem.c
-===================================================================
---- linux-2.6.10.orig/mm/bootmem.c 2004-12-25 05:34:30.000000000 +0800
-+++ linux-2.6.10/mm/bootmem.c 2005-04-05 16:47:53.903211184 +0800
-@@ -26,6 +26,7 @@
- */
- unsigned long max_low_pfn;
- unsigned long min_low_pfn;
-+EXPORT_SYMBOL(min_low_pfn);
- unsigned long max_pfn;
-
- EXPORT_SYMBOL(max_pfn); /* This is exported so
-@@ -284,6 +285,7 @@
- if (j + 16 < BITS_PER_LONG)
- prefetchw(page + j + 16);
- __ClearPageReserved(page + j);
-+ set_page_count(page + j, 1);
- }
- __free_pages(page, ffs(BITS_PER_LONG)-1);
- i += BITS_PER_LONG;
-Index: linux-2.6.10/mm/page_alloc.c
-===================================================================
---- linux-2.6.10.orig/mm/page_alloc.c 2005-04-05 16:29:28.218300840 +0800
-+++ linux-2.6.10/mm/page_alloc.c 2005-04-05 16:47:53.902211336 +0800
-@@ -47,6 +47,11 @@
- EXPORT_SYMBOL(totalram_pages);
- EXPORT_SYMBOL(nr_swap_pages);
-
-+#ifdef CONFIG_CRASH_DUMP_MODULE
-+/* This symbol has to be exported to use 'for_each_pgdat' macro by modules. */
-+EXPORT_SYMBOL(pgdat_list);
-+#endif
-+
- /*
- * Used by page_zone() to look up the address of the struct zone whose
- * id is encoded in the upper bits of page->flags
-@@ -281,8 +286,11 @@
- arch_free_page(page, order);
-
- mod_page_state(pgfree, 1 << order);
-- for (i = 0 ; i < (1 << order) ; ++i)
-+ for (i = 0 ; i < (1 << order) ; ++i){
-+ if (unlikely(i))
-+ __put_page(page + i);
- free_pages_check(__FUNCTION__, page + i);
-+ }
- list_add(&page->lru, &list);
- kernel_map_pages(page, 1<<order, 0);
- free_pages_bulk(page_zone(page), 1, &list, order);
-@@ -322,44 +330,34 @@
- return page;
- }
-
--static inline void set_page_refs(struct page *page, int order)
--{
--#ifdef CONFIG_MMU
-- set_page_count(page, 1);
--#else
-- int i;
--
-- /*
-- * We need to reference all the pages for this order, otherwise if
-- * anyone accesses one of the pages with (get/put) it will be freed.
-- */
-- for (i = 0; i < (1 << order); i++)
-- set_page_count(page+i, 1);
--#endif /* CONFIG_MMU */
--}
--
- /*
- * This page is about to be returned from the page allocator
- */
--static void prep_new_page(struct page *page, int order)
-+static void prep_new_page(struct page *_page, int order)
- {
-- if (page->mapping || page_mapped(page) ||
-- (page->flags & (
-- 1 << PG_private |
-- 1 << PG_locked |
-- 1 << PG_lru |
-- 1 << PG_active |
-- 1 << PG_dirty |
-- 1 << PG_reclaim |
-- 1 << PG_swapcache |
-- 1 << PG_writeback )))
-+ int i;
-+
-+ for(i = 0; i < (1 << order); i++){
-+ struct page *page = _page + i;
-+
-+ if (page->mapping || page_mapped(page) ||
-+ (page->flags & (
-+ 1 << PG_private |
-+ 1 << PG_locked |
-+ 1 << PG_lru |
-+ 1 << PG_active |
-+ 1 << PG_dirty |
-+ 1 << PG_reclaim |
-+ 1 << PG_swapcache |
-+ 1 << PG_writeback )))
- bad_page(__FUNCTION__, page);
-
-- page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
-- 1 << PG_referenced | 1 << PG_arch_1 |
-- 1 << PG_checked | 1 << PG_mappedtodisk);
-- page->private = 0;
-- set_page_refs(page, order);
-+ page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
-+ 1 << PG_referenced | 1 << PG_arch_1 |
-+ 1 << PG_checked | 1 << PG_mappedtodisk);
-+ page->private = 0;
-+ set_page_count(page, 1);
-+ }
- }
-
- /*
-Index: linux-2.6.10/kernel/sched.c
-===================================================================
---- linux-2.6.10.orig/kernel/sched.c 2005-04-05 16:29:30.335978904 +0800
-+++ linux-2.6.10/kernel/sched.c 2005-04-05 16:47:53.901211488 +0800
-@@ -54,6 +54,10 @@
- #define cpu_to_node_mask(cpu) (cpu_online_map)
- #endif
-
-+/* used to soft spin in sched while dump is in progress */
-+unsigned long dump_oncpu;
-+EXPORT_SYMBOL(dump_oncpu);
-+
- /*
- * Convert user-nice values [ -20 ... 0 ... 19 ]
- * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
-@@ -184,109 +188,6 @@
- #define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \
- < (long long) (sd)->cache_hot_time)
-
--/*
-- * These are the runqueue data structures:
-- */
--
--#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
--
--typedef struct runqueue runqueue_t;
--
--struct prio_array {
-- unsigned int nr_active;
-- unsigned long bitmap[BITMAP_SIZE];
-- struct list_head queue[MAX_PRIO];
--};
--
--/*
-- * This is the main, per-CPU runqueue data structure.
-- *
-- * Locking rule: those places that want to lock multiple runqueues
-- * (such as the load balancing or the thread migration code), lock
-- * acquire operations must be ordered by ascending &runqueue.
-- */
--struct runqueue {
-- spinlock_t lock;
--
-- /*
-- * nr_running and cpu_load should be in the same cacheline because
-- * remote CPUs use both these fields when doing load calculation.
-- */
-- unsigned long nr_running;
--#ifdef CONFIG_SMP
-- unsigned long cpu_load;
--#endif
-- unsigned long long nr_switches;
--
-- /*
-- * This is part of a global counter where only the total sum
-- * over all CPUs matters. A task can increase this counter on
-- * one CPU and if it got migrated afterwards it may decrease
-- * it on another CPU. Always updated under the runqueue lock:
-- */
-- unsigned long nr_uninterruptible;
--
-- unsigned long expired_timestamp;
-- unsigned long long timestamp_last_tick;
-- task_t *curr, *idle;
-- struct mm_struct *prev_mm;
-- prio_array_t *active, *expired, arrays[2];
-- int best_expired_prio;
-- atomic_t nr_iowait;
--
--#ifdef CONFIG_SMP
-- struct sched_domain *sd;
--
-- /* For active balancing */
-- int active_balance;
-- int push_cpu;
--
-- task_t *migration_thread;
-- struct list_head migration_queue;
--#endif
--
--#ifdef CONFIG_SCHEDSTATS
-- /* latency stats */
-- struct sched_info rq_sched_info;
--
-- /* sys_sched_yield() stats */
-- unsigned long yld_exp_empty;
-- unsigned long yld_act_empty;
-- unsigned long yld_both_empty;
-- unsigned long yld_cnt;
--
-- /* schedule() stats */
-- unsigned long sched_noswitch;
-- unsigned long sched_switch;
-- unsigned long sched_cnt;
-- unsigned long sched_goidle;
--
-- /* pull_task() stats */
-- unsigned long pt_gained[MAX_IDLE_TYPES];
-- unsigned long pt_lost[MAX_IDLE_TYPES];
--
-- /* active_load_balance() stats */
-- unsigned long alb_cnt;
-- unsigned long alb_lost;
-- unsigned long alb_gained;
-- unsigned long alb_failed;
--
-- /* try_to_wake_up() stats */
-- unsigned long ttwu_cnt;
-- unsigned long ttwu_attempts;
-- unsigned long ttwu_moved;
--
-- /* wake_up_new_task() stats */
-- unsigned long wunt_cnt;
-- unsigned long wunt_moved;
--
-- /* sched_migrate_task() stats */
-- unsigned long smt_cnt;
--
-- /* sched_balance_exec() stats */
-- unsigned long sbe_cnt;
--#endif
--};
-
- static DEFINE_PER_CPU(struct runqueue, runqueues);
-
-@@ -2535,6 +2436,15 @@
- unsigned long run_time;
- int cpu, idx;
-
-+ /*
-+ * If crash dump is in progress, this other cpu's
-+ * need to wait until it completes.
-+ * NB: this code is optimized away for kernels without
-+ * dumping enabled.
-+ */
-+ if (unlikely(dump_oncpu))
-+ goto dump_scheduling_disabled;
-+
- /*
- * Test if we are atomic. Since do_exit() needs to call into
- * schedule() atomically, we ignore that path for now.
-@@ -2698,6 +2608,16 @@
- preempt_enable_no_resched();
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
- goto need_resched;
-+
-+ return;
-+
-+ dump_scheduling_disabled:
-+ /* allow scheduling only if this is the dumping cpu */
-+ if (dump_oncpu != smp_processor_id()+1) {
-+ while (dump_oncpu)
-+ cpu_relax();
-+ }
-+ return;
- }
-
- EXPORT_SYMBOL(schedule);
-Index: linux-2.6.10/kernel/panic.c
-===================================================================
---- linux-2.6.10.orig/kernel/panic.c 2004-12-25 05:35:29.000000000 +0800
-+++ linux-2.6.10/kernel/panic.c 2005-04-05 16:47:53.898211944 +0800
-@@ -18,12 +18,17 @@
- #include <linux/sysrq.h>
- #include <linux/interrupt.h>
- #include <linux/nmi.h>
-+#ifdef CONFIG_KEXEC
-+#include <linux/kexec.h>
-+#endif
-
- int panic_timeout;
- int panic_on_oops;
- int tainted;
-+void (*dump_function_ptr)(const char *, const struct pt_regs *) = 0;
-
- EXPORT_SYMBOL(panic_timeout);
-+EXPORT_SYMBOL(dump_function_ptr);
-
- struct notifier_block *panic_notifier_list;
-
-@@ -71,11 +76,12 @@
- printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
- bust_spinlocks(0);
-
-+ notifier_call_chain(&panic_notifier_list, 0, buf);
-+
- #ifdef CONFIG_SMP
- smp_send_stop();
- #endif
-
-- notifier_call_chain(&panic_notifier_list, 0, buf);
-
- if (!panic_blink)
- panic_blink = no_blink;
-@@ -87,6 +93,18 @@
- * We can't use the "normal" timers since we just panicked..
- */
- printk(KERN_EMERG "Rebooting in %d seconds..",panic_timeout);
-+#ifdef CONFIG_KEXEC
-+{
-+ struct kimage *image;
-+ image = xchg(&kexec_image, 0);
-+ if (image) {
-+ printk(KERN_EMERG "by starting a new kernel ..\n");
-+ mdelay(panic_timeout*1000);
-+ machine_kexec(image);
-+ }
++/* ----- PASSTHRU FILTER ROUTINE --------- */
++
++/* transparent - passes everything through */
++int dump_passthru_filter(int pass, unsigned long loc, unsigned long sz)
++{
++ return 1;
+}
-+#endif
+
- for (i = 0; i < panic_timeout*1000; ) {
- touch_nmi_watchdog();
- i += panic_blink(i);
-Index: linux-2.6.10/drivers/block/ll_rw_blk.c
-===================================================================
---- linux-2.6.10.orig/drivers/block/ll_rw_blk.c 2005-04-05 16:29:30.310982704 +0800
-+++ linux-2.6.10/drivers/block/ll_rw_blk.c 2005-04-05 16:47:53.949204192 +0800
-@@ -28,6 +28,7 @@
- #include <linux/slab.h>
- #include <linux/swap.h>
- #include <linux/writeback.h>
-+#include <linux/dump.h>
-
- /*
- * for max sense size
-@@ -2628,7 +2629,8 @@
- sector_t maxsector;
- int ret, nr_sectors = bio_sectors(bio);
-
-- might_sleep();
-+ if (likely(!dump_oncpu))
-+ might_sleep();
- /* Test device or partition size, when known. */
- maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
- if (maxsector) {
-Index: linux-2.6.10/drivers/dump/dump_i386.c
-===================================================================
---- linux-2.6.10.orig/drivers/dump/dump_i386.c 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/drivers/dump/dump_i386.c 2005-04-05 16:47:53.940205560 +0800
-@@ -0,0 +1,372 @@
-+/*
-+ * Architecture specific (i386) functions for Linux crash dumps.
-+ *
-+ * Created by: Matt Robinson (yakker@sgi.com)
-+ *
-+ * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
-+ *
-+ * 2.3 kernel modifications by: Matt D. Robinson (yakker@turbolinux.com)
-+ * Copyright 2000 TurboLinux, Inc. All rights reserved.
-+ *
-+ * This code is released under version 2 of the GNU GPL.
-+ */
++/* ----- PASSTRU FORMAT ROUTINES ---- */
+
-+/*
-+ * The hooks for dumping the kernel virtual memory to disk are in this
-+ * file. Any time a modification is made to the virtual memory mechanism,
-+ * these routines must be changed to use the new mechanisms.
-+ */
-+#include <linux/init.h>
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/smp.h>
-+#include <linux/fs.h>
-+#include <linux/vmalloc.h>
-+#include <linux/mm.h>
-+#include <linux/dump.h>
-+#include "dump_methods.h"
-+#include <linux/irq.h>
+
-+#include <asm/processor.h>
-+#include <asm/e820.h>
-+#include <asm/hardirq.h>
-+#include <asm/nmi.h>
++int dump_passthru_configure_header(const char *panic_str, const struct pt_regs *regs)
++{
++ dump_config.dumper->header_dirty++;
++ return 0;
++}
+
-+static __s32 saved_irq_count; /* saved preempt_count() flags */
++/* Copies bytes of data from page(s) to the specified buffer */
++int dump_copy_pages(void *buf, struct page *page, unsigned long sz)
++{
++ unsigned long len = 0, bytes;
++ void *addr;
+
-+static int
-+alloc_dha_stack(void)
++ while (len < sz) {
++ addr = kmap_atomic(page, KM_DUMP);
++ bytes = (sz > len + PAGE_SIZE) ? PAGE_SIZE : sz - len;
++ memcpy(buf, addr, bytes);
++ kunmap_atomic(addr, KM_DUMP);
++ buf += bytes;
++ len += bytes;
++ page++;
++ }
++ /* memset(dump_config.dumper->curr_buf, 0x57, len); temporary */
++
++ return sz - len;
++}
++
++int dump_passthru_update_header(void)
+{
-+ int i;
-+ void *ptr;
-+
-+ if (dump_header_asm.dha_stack[0])
++ long len = dump_config.dumper->header_len;
++ struct page *page;
++ void *buf = dump_config.dumper->dump_buf;
++ int err = 0;
++
++ if (!dump_config.dumper->header_dirty)
+ return 0;
+
-+ ptr = vmalloc(THREAD_SIZE * num_online_cpus());
-+ if (!ptr) {
-+ printk("vmalloc for dha_stacks failed\n");
-+ return -ENOMEM;
++ pr_debug("Copying header of size %ld bytes from memory\n", len);
++ if (len > DUMP_BUFFER_SIZE)
++ return -E2BIG;
++
++ page = dump_mem_lookup(dump_memdev, 0);
++ for (; (len > 0) && page; buf += PAGE_SIZE, len -= PAGE_SIZE) {
++ if ((err = dump_copy_pages(buf, page, PAGE_SIZE)))
++ return err;
++ page = dump_mem_next_page(dump_memdev);
++ }
++ if (len > 0) {
++ printk("Incomplete header saved in mem\n");
++ return -ENOENT;
+ }
+
-+ for (i = 0; i < num_online_cpus(); i++) {
-+ dump_header_asm.dha_stack[i] = (u32)((unsigned long)ptr +
-+ (i * THREAD_SIZE));
++ if ((err = dump_dev_seek(0))) {
++ printk("Unable to seek to dump header offset\n");
++ return err;
+ }
++ err = dump_ll_write(dump_config.dumper->dump_buf,
++ buf - dump_config.dumper->dump_buf);
++ if (err < dump_config.dumper->header_len)
++ return (err < 0) ? err : -ENOSPC;
++
++ dump_config.dumper->header_dirty = 0;
+ return 0;
+}
+
-+static int
-+free_dha_stack(void)
++static loff_t next_dph_offset = 0;
++
++static int dph_valid(struct __dump_page *dph)
+{
-+ if (dump_header_asm.dha_stack[0]) {
-+ vfree((void *)dump_header_asm.dha_stack[0]);
-+ dump_header_asm.dha_stack[0] = 0;
++ if ((dph->dp_address & (PAGE_SIZE - 1)) || (dph->dp_flags
++ > DUMP_DH_COMPRESSED) || (!dph->dp_flags) ||
++ (dph->dp_size > PAGE_SIZE)) {
++ printk("dp->address = 0x%llx, dp->size = 0x%x, dp->flag = 0x%x\n",
++ dph->dp_address, dph->dp_size, dph->dp_flags);
++ return 0;
+ }
-+ return 0;
++ return 1;
+}
+
-+
-+void
-+__dump_save_regs(struct pt_regs *dest_regs, const struct pt_regs *regs)
++int dump_verify_lcrash_data(void *buf, unsigned long sz)
+{
-+ *dest_regs = *regs;
++ struct __dump_page *dph;
+
-+ /* In case of panic dumps, we collects regs on entry to panic.
-+ * so, we shouldn't 'fix' ssesp here again. But it is hard to
-+ * tell just looking at regs whether ssesp need fixing. We make
-+ * this decision by looking at xss in regs. If we have better
-+ * means to determine that ssesp are valid (by some flag which
-+ * tells that we are here due to panic dump), then we can use
-+ * that instead of this kludge.
-+ */
-+ if (!user_mode(regs)) {
-+ if ((0xffff & regs->xss) == __KERNEL_DS)
-+ /* already fixed up */
-+ return;
-+ dest_regs->esp = (unsigned long)&(regs->esp);
-+ __asm__ __volatile__ ("movw %%ss, %%ax;"
-+ :"=a"(dest_regs->xss));
++ /* sanity check for page headers */
++ while (next_dph_offset + sizeof(*dph) < sz) {
++ dph = (struct __dump_page *)(buf + next_dph_offset);
++ if (!dph_valid(dph)) {
++ printk("Invalid page hdr at offset 0x%llx\n",
++ next_dph_offset);
++ return -EINVAL;
++ }
++ next_dph_offset += dph->dp_size + sizeof(*dph);
+ }
++
++ next_dph_offset -= sz;
++ return 0;
+}
+
-+void
-+__dump_save_context(int cpu, const struct pt_regs *regs,
-+ struct task_struct *tsk)
++/*
++ * TBD/Later: Consider avoiding the copy by using a scatter/gather
++ * vector representation for the dump buffer
++ */
++int dump_passthru_add_data(unsigned long loc, unsigned long sz)
+{
-+ dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
-+ __dump_save_regs(&dump_header_asm.dha_smp_regs[cpu], regs);
-+
-+ /* take a snapshot of the stack */
-+ /* doing this enables us to tolerate slight drifts on this cpu */
++ struct page *page = (struct page *)loc;
++ void *buf = dump_config.dumper->curr_buf;
++ int err = 0;
+
-+ if (dump_header_asm.dha_stack[cpu]) {
-+ memcpy((void *)dump_header_asm.dha_stack[cpu],
-+ STACK_START_POSITION(tsk),
-+ THREAD_SIZE);
++ if ((err = dump_copy_pages(buf, page, sz))) {
++ printk("dump_copy_pages failed");
++ return err;
+ }
-+ dump_header_asm.dha_stack_ptr[cpu] = (unsigned long)(tsk->thread_info);
-+}
+
-+#ifdef CONFIG_SMP
-+extern cpumask_t irq_affinity[];
-+extern irq_desc_t irq_desc[];
-+extern void dump_send_ipi(void);
++ if ((err = dump_verify_lcrash_data(buf, sz))) {
++ printk("dump_verify_lcrash_data failed\n");
++ printk("Invalid data for pfn 0x%lx\n", page_to_pfn(page));
++ printk("Page flags 0x%lx\n", page->flags);
++ printk("Page count 0x%x\n", page_count(page));
++ return err;
++ }
+
-+static int dump_expect_ipi[NR_CPUS];
-+static atomic_t waiting_for_dump_ipi;
-+static cpumask_t saved_affinity[NR_IRQS];
++ dump_config.dumper->curr_buf = buf + sz;
+
-+extern void stop_this_cpu(void *); /* exported by i386 kernel */
++ return 0;
++}
+
-+static int
-+dump_nmi_callback(struct pt_regs *regs, int cpu)
-+{
-+ if (!dump_expect_ipi[cpu])
-+ return 0;
+
-+ dump_expect_ipi[cpu] = 0;
-+
-+ dump_save_this_cpu(regs);
-+ atomic_dec(&waiting_for_dump_ipi);
++/* Stage 1 dumper: Saves compressed dump in memory and soft-boots system */
+
-+ level_changed:
-+ switch (dump_silence_level) {
-+ case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
-+ while (dump_oncpu) {
-+ barrier(); /* paranoia */
-+ if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
-+ goto level_changed;
++/* Scheme to overlay saved data in memory for writeout after a soft-boot */
++struct dump_scheme_ops dump_scheme_overlay_ops = {
++ .configure = dump_overlay_configure,
++ .unconfigure = dump_overlay_unconfigure,
++ .sequencer = dump_overlay_sequencer,
++ .iterator = dump_page_iterator,
++ .save_data = dump_overlay_save_data,
++ .skip_data = dump_overlay_skip_data,
++ .write_buffer = dump_generic_write_buffer
++};
+
-+ cpu_relax(); /* kill time nicely */
-+ }
-+ break;
++struct dump_scheme dump_scheme_overlay = {
++ .name = "overlay",
++ .ops = &dump_scheme_overlay_ops
++};
+
-+ case DUMP_HALT_CPUS: /* Execute halt */
-+ stop_this_cpu(NULL);
-+ break;
-+
-+ case DUMP_SOFT_SPIN_CPUS:
-+ /* Mark the task so it spins in schedule */
-+ set_tsk_thread_flag(current, TIF_NEED_RESCHED);
-+ break;
-+ }
+
-+ return 1;
-+}
++/* Stage 1 must use a good compression scheme - default to gzip */
++extern struct __dump_compress dump_gzip_compression;
+
-+/* save registers on other processors */
-+void
-+__dump_save_other_cpus(void)
-+{
-+ int i, cpu = smp_processor_id();
-+ int other_cpus = num_online_cpus()-1;
-+
-+ if (other_cpus > 0) {
-+ atomic_set(&waiting_for_dump_ipi, other_cpus);
++struct dumper dumper_stage1 = {
++ .name = "stage1",
++ .scheme = &dump_scheme_overlay,
++ .fmt = &dump_fmt_lcrash,
++ .compress = &dump_none_compression, /* needs to be gzip */
++ .filter = dump_filter_table,
++ .dev = NULL,
++};
+
-+ for (i = 0; i < NR_CPUS; i++) {
-+ dump_expect_ipi[i] = (i != cpu && cpu_online(i));
-+ }
++/* Stage 2 dumper: Activated after softboot to write out saved dump to device */
+
-+ /* short circuit normal NMI handling temporarily */
-+ set_nmi_callback(dump_nmi_callback);
-+ wmb();
++/* Formatter that transfers data as is (transparent) w/o further conversion */
++struct dump_fmt_ops dump_fmt_passthru_ops = {
++ .configure_header = dump_passthru_configure_header,
++ .update_header = dump_passthru_update_header,
++ .save_context = NULL, /* unused */
++ .add_data = dump_passthru_add_data,
++ .update_end_marker = dump_lcrash_update_end_marker
++};
+
-+ dump_send_ipi();
-+ /* may be we dont need to wait for NMI to be processed.
-+ just write out the header at the end of dumping, if
-+ this IPI is not processed until then, there probably
-+ is a problem and we just fail to capture state of
-+ other cpus. */
-+ while(atomic_read(&waiting_for_dump_ipi) > 0) {
-+ cpu_relax();
-+ }
++struct dump_fmt dump_fmt_passthru = {
++ .name = "passthru",
++ .ops = &dump_fmt_passthru_ops
++};
+
-+ unset_nmi_callback();
-+ }
-+}
++/* Filter that simply passes along any data within the range (transparent)*/
++/* Note: The start and end ranges in the table are filled in at run-time */
+
-+/*
-+ * Routine to save the old irq affinities and change affinities of all irqs to
-+ * the dumping cpu.
-+ */
-+static void
-+set_irq_affinity(void)
-+{
-+ int i;
-+ cpumask_t cpu = CPU_MASK_NONE;
++extern int dump_filter_none(int pass, unsigned long loc, unsigned long sz);
+
-+ cpu_set(smp_processor_id(), cpu);
-+ memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
-+ for (i = 0; i < NR_IRQS; i++) {
-+ if (irq_desc[i].handler == NULL)
-+ continue;
-+ irq_affinity[i] = cpu;
-+ if (irq_desc[i].handler->set_affinity != NULL)
-+ irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
-+ }
-+}
++struct dump_data_filter dump_passthru_filtertable[MAX_PASSES] = {
++{.name = "passkern", .selector = dump_passthru_filter,
++ .level_mask = DUMP_MASK_KERN },
++{.name = "passuser", .selector = dump_passthru_filter,
++ .level_mask = DUMP_MASK_USED },
++{.name = "passunused", .selector = dump_passthru_filter,
++ .level_mask = DUMP_MASK_UNUSED },
++{.name = "none", .selector = dump_filter_none,
++ .level_mask = DUMP_MASK_REST }
++};
+
-+/*
-+ * Restore old irq affinities.
-+ */
-+static void
-+reset_irq_affinity(void)
-+{
-+ int i;
+
-+ memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
-+ for (i = 0; i < NR_IRQS; i++) {
-+ if (irq_desc[i].handler == NULL)
-+ continue;
-+ if (irq_desc[i].handler->set_affinity != NULL)
-+ irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
-+ }
-+}
++/* Scheme to handle data staged / preserved across a soft-boot */
++struct dump_scheme_ops dump_scheme_staged_ops = {
++ .configure = dump_generic_configure,
++ .unconfigure = dump_staged_unconfigure,
++ .sequencer = dump_generic_sequencer,
++ .iterator = dump_saved_data_iterator,
++ .save_data = dump_generic_save_data,
++ .skip_data = dump_generic_skip_data,
++ .write_buffer = dump_generic_write_buffer
++};
+
-+#else /* !CONFIG_SMP */
-+#define set_irq_affinity() do { } while (0)
-+#define reset_irq_affinity() do { } while (0)
-+#define save_other_cpu_states() do { } while (0)
-+#endif /* !CONFIG_SMP */
++struct dump_scheme dump_scheme_staged = {
++ .name = "staged",
++ .ops = &dump_scheme_staged_ops
++};
+
-+/*
-+ * Kludge - dump from interrupt context is unreliable (Fixme)
++/* The stage 2 dumper comprising all these */
++struct dumper dumper_stage2 = {
++ .name = "stage2",
++ .scheme = &dump_scheme_staged,
++ .fmt = &dump_fmt_passthru,
++ .compress = &dump_none_compression,
++ .filter = dump_passthru_filtertable,
++ .dev = NULL,
++};
++
+Index: linux-2.6.10/drivers/dump/dump_fmt.c
+===================================================================
+--- linux-2.6.10.orig/drivers/dump/dump_fmt.c 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/drivers/dump/dump_fmt.c 2005-04-07 18:13:56.911751944 +0800
+@@ -0,0 +1,407 @@
++/*
++ * Implements the routines which handle the format specific
++ * aspects of dump for the default dump format.
+ *
-+ * We do this so that softirqs initiated for dump i/o
-+ * get processed and we don't hang while waiting for i/o
-+ * to complete or in any irq synchronization attempt.
++ * Used in single stage dumping and stage 1 of soft-boot based dumping
++ * Saves data in LKCD (lcrash) format
+ *
-+ * This is not quite legal of course, as it has the side
-+ * effect of making all interrupts & softirqs triggered
-+ * while dump is in progress complete before currently
-+ * pending softirqs and the currently executing interrupt
-+ * code.
++ * Previously a part of dump_base.c
++ *
++ * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
++ * Split off and reshuffled LKCD dump format code around generic
++ * dump method interfaces.
++ *
++ * Derived from original code created by
++ * Matt Robinson <yakker@sourceforge.net>)
++ *
++ * Contributions from SGI, IBM, HP, MCL, and others.
++ *
++ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
++ * Copyright (C) 2000 - 2002 TurboLinux, Inc. All rights reserved.
++ * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
++ * Copyright (C) 2002 International Business Machines Corp.
++ *
++ * This code is released under version 2 of the GNU GPL.
+ */
-+static inline void
-+irq_bh_save(void)
-+{
-+ saved_irq_count = irq_count();
-+ preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
-+}
+
-+static inline void
-+irq_bh_restore(void)
-+{
-+ preempt_count() |= saved_irq_count;
-+}
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/time.h>
++#include <linux/sched.h>
++#include <linux/ptrace.h>
++#include <linux/utsname.h>
++#include <linux/dump.h>
++#include <asm/dump.h>
++#include "dump_methods.h"
+
+/*
-+ * Name: __dump_irq_enable
-+ * Func: Reset system so interrupts are enabled.
-+ * This is used for dump methods that require interrupts
-+ * Eventually, all methods will have interrupts disabled
-+ * and this code can be removed.
++ * SYSTEM DUMP LAYOUT
++ *
++ * System dumps are currently the combination of a dump header and a set
++ * of data pages which contain the system memory. The layout of the dump
++ * (for full dumps) is as follows:
+ *
-+ * Change irq affinities
-+ * Re-enable interrupts
++ * +-----------------------------+
++ * | generic dump header |
++ * +-----------------------------+
++ * | architecture dump header |
++ * +-----------------------------+
++ * | page header |
++ * +-----------------------------+
++ * | page data |
++ * +-----------------------------+
++ * | page header |
++ * +-----------------------------+
++ * | page data |
++ * +-----------------------------+
++ * | | |
++ * | | |
++ * | | |
++ * | | |
++ * | V |
++ * +-----------------------------+
++ * | PAGE_END header |
++ * +-----------------------------+
++ *
++ * There are two dump headers, the first which is architecture
++ * independent, and the other which is architecture dependent. This
++ * allows different architectures to dump different data structures
++ * which are specific to their chipset, CPU, etc.
++ *
++ * After the dump headers come a succession of dump page headers along
++ * with dump pages. The page header contains information about the page
++ * size, any flags associated with the page (whether it's compressed or
++ * not), and the address of the page. After the page header is the page
++ * data, which is either compressed (or not). Each page of data is
++ * dumped in succession, until the final dump header (PAGE_END) is
++ * placed at the end of the dump, assuming the dump device isn't out
++ * of space.
++ *
++ * This mechanism allows for multiple compression types, different
++ * types of data structures, different page ordering, etc., etc., etc.
++ * It's a very straightforward mechanism for dumping system memory.
+ */
-+int
-+__dump_irq_enable(void)
-+{
-+ set_irq_affinity();
-+ irq_bh_save();
-+ local_irq_enable();
-+ return 0;
-+}
+
-+/*
-+ * Name: __dump_irq_restore
-+ * Func: Resume the system state in an architecture-specific way.
++struct __dump_header dump_header; /* the primary dump header */
++struct __dump_header_asm dump_header_asm; /* the arch-specific dump header */
+
++/* Replace a runtime sanity check on the DUMP_BUFFER_SIZE with a
++ * compile-time check. The compile_time_assertions routine will not
++ * compile if the assertion is false.
++ *
++ * If you fail this assert you are most likely on a large machine and
++ * should use a special 6.0.0 version of LKCD or a version > 7.0.0. See
++ * the LKCD website for more information.
+ */
-+void
-+__dump_irq_restore(void)
-+{
-+ local_irq_disable();
-+ reset_irq_affinity();
-+ irq_bh_restore();
-+}
+
-+/*
-+ * Name: __dump_configure_header()
-+ * Func: Meant to fill in arch specific header fields except per-cpu state
-+ * already captured via __dump_save_context for all CPUs.
-+ */
-+int
-+__dump_configure_header(const struct pt_regs *regs)
-+{
-+ return (0);
-+}
++#define COMPILE_TIME_ASSERT(const_expr) \
++ switch(0){case 0: case (const_expr):;}
+
-+/*
-+ * Name: __dump_init()
-+ * Func: Initialize the dumping routine process.
-+ */
-+void
-+__dump_init(uint64_t local_memory_start)
++static inline void compile_time_assertions(void)
+{
-+ return;
++ COMPILE_TIME_ASSERT((sizeof(struct __dump_header) +
++ sizeof(struct __dump_header_asm)) <= DUMP_BUFFER_SIZE);
+}
+
+/*
-+ * Name: __dump_open()
-+ * Func: Open the dump device (architecture specific).
++ * Set up common header fields (mainly the arch indep section)
++ * Per-cpu state is handled by lcrash_save_context
++ * Returns the size of the header in bytes.
+ */
-+void
-+__dump_open(void)
++static int lcrash_init_dump_header(const char *panic_str)
+{
-+ alloc_dha_stack();
-+}
++ struct timeval dh_time;
++ u64 temp_memsz = dump_header.dh_memory_size;
+
-+/*
-+ * Name: __dump_cleanup()
-+ * Func: Free any architecture specific data structures. This is called
-+ * when the dump module is being removed.
-+ */
-+void
-+__dump_cleanup(void)
-+{
-+ free_dha_stack();
-+}
++ /* initialize the dump headers to zero */
++ /* save dha_stack pointer because it may contains pointer for stack! */
++ memset(&dump_header, 0, sizeof(dump_header));
++ memset(&dump_header_asm, 0,
++ offsetof(struct __dump_header_asm, dha_stack));
++ memset(&dump_header_asm.dha_stack+1, 0,
++ sizeof(dump_header_asm) -
++ offsetof(struct __dump_header_asm, dha_stack) -
++ sizeof(dump_header_asm.dha_stack));
++ dump_header.dh_memory_size = temp_memsz;
+
-+extern int pfn_is_ram(unsigned long);
++ /* configure dump header values */
++ dump_header.dh_magic_number = DUMP_MAGIC_NUMBER;
++ dump_header.dh_version = DUMP_VERSION_NUMBER;
++ dump_header.dh_memory_start = PAGE_OFFSET;
++ dump_header.dh_memory_end = DUMP_MAGIC_NUMBER;
++ dump_header.dh_header_size = sizeof(struct __dump_header);
++ dump_header.dh_page_size = PAGE_SIZE;
++ dump_header.dh_dump_level = dump_config.level;
++ dump_header.dh_current_task = (unsigned long) current;
++ dump_header.dh_dump_compress = dump_config.dumper->compress->
++ compress_type;
++ dump_header.dh_dump_flags = dump_config.flags;
++ dump_header.dh_dump_device = dump_config.dumper->dev->device_id;
+
-+/*
-+ * Name: __dump_page_valid()
-+ * Func: Check if page is valid to dump.
-+ */
-+int
-+__dump_page_valid(unsigned long index)
-+{
-+ if (!pfn_valid(index))
-+ return 0;
++#if DUMP_DEBUG >= 6
++ dump_header.dh_num_bytes = 0;
++#endif
++ dump_header.dh_num_dump_pages = 0;
++ do_gettimeofday(&dh_time);
++ dump_header.dh_time.tv_sec = dh_time.tv_sec;
++ dump_header.dh_time.tv_usec = dh_time.tv_usec;
+
-+ return pfn_is_ram(index);
-+}
++ memcpy((void *)&(dump_header.dh_utsname_sysname),
++ (const void *)&(system_utsname.sysname), __NEW_UTS_LEN + 1);
++ memcpy((void *)&(dump_header.dh_utsname_nodename),
++ (const void *)&(system_utsname.nodename), __NEW_UTS_LEN + 1);
++ memcpy((void *)&(dump_header.dh_utsname_release),
++ (const void *)&(system_utsname.release), __NEW_UTS_LEN + 1);
++ memcpy((void *)&(dump_header.dh_utsname_version),
++ (const void *)&(system_utsname.version), __NEW_UTS_LEN + 1);
++ memcpy((void *)&(dump_header.dh_utsname_machine),
++ (const void *)&(system_utsname.machine), __NEW_UTS_LEN + 1);
++ memcpy((void *)&(dump_header.dh_utsname_domainname),
++ (const void *)&(system_utsname.domainname), __NEW_UTS_LEN + 1);
+
-+/*
-+ * Name: manual_handle_crashdump()
-+ * Func: Interface for the lkcd dump command. Calls dump_execute()
-+ */
-+int
-+manual_handle_crashdump(void) {
++ if (panic_str) {
++ memcpy((void *)&(dump_header.dh_panic_string),
++ (const void *)panic_str, DUMP_PANIC_LEN);
++ }
+
-+ struct pt_regs regs;
++ dump_header_asm.dha_magic_number = DUMP_ASM_MAGIC_NUMBER;
++ dump_header_asm.dha_version = DUMP_ASM_VERSION_NUMBER;
++ dump_header_asm.dha_header_size = sizeof(dump_header_asm);
++#ifdef CONFIG_ARM
++ dump_header_asm.dha_physaddr_start = PHYS_OFFSET;
++#endif
++
++ dump_header_asm.dha_smp_num_cpus = num_online_cpus();
++ pr_debug("smp_num_cpus in header %d\n",
++ dump_header_asm.dha_smp_num_cpus);
++
++ dump_header_asm.dha_dumping_cpu = smp_processor_id();
+
-+ get_current_regs(®s);
-+ dump_execute("manual", ®s);
-+ return 0;
++ return sizeof(dump_header) + sizeof(dump_header_asm);
+}
+
-+/*
-+ * Name: __dump_clean_irq_state()
-+ * Func: Clean up from the previous IRQ handling state. Such as oops from
-+ * interrupt handler or bottom half.
-+ */
-+void
-+__dump_clean_irq_state(void)
-+{
-+ return;
-+}
-Index: linux-2.6.10/drivers/dump/dump_ia64.c
-===================================================================
---- linux-2.6.10.orig/drivers/dump/dump_ia64.c 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/drivers/dump/dump_ia64.c 2005-04-05 16:47:53.928207384 +0800
-@@ -0,0 +1,458 @@
-+/*
-+ * Architecture specific (ia64) functions for Linux crash dumps.
-+ *
-+ * Created by: Matt Robinson (yakker@sgi.com)
-+ * Contributions from SGI, IBM, and others.
-+ *
-+ * 2.4 kernel modifications by: Matt D. Robinson (yakker@alacritech.com)
-+ * ia64 kernel modifications by: Piet Delaney (piet@www.piet.net)
-+ *
-+ * Copyright (C) 2001 - 2002 Matt D. Robinson (yakker@alacritech.com)
-+ * Copyright (C) 2002 Silicon Graphics, Inc. All rights reserved.
-+ * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
-+ *
-+ * This code is released under version 2 of the GNU GPL.
-+ */
+
-+/*
-+ * The hooks for dumping the kernel virtual memory to disk are in this
-+ * file. Any time a modification is made to the virtual memory mechanism,
-+ * these routines must be changed to use the new mechanisms.
-+ */
-+#include <linux/init.h>
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/smp.h>
-+#include <linux/fs.h>
-+#include <linux/vmalloc.h>
-+#include <linux/dump.h>
-+#include "dump_methods.h"
-+#include <linux/mm.h>
-+#include <asm/processor.h>
-+#include <asm-ia64/dump.h>
-+#include <asm/hardirq.h>
-+#include <linux/irq.h>
-+#include <linux/delay.h>
++int dump_lcrash_configure_header(const char *panic_str,
++ const struct pt_regs *regs)
++{
++ int retval = 0;
+
-+static __s32 saved_irq_count; /* saved preempt_count() flags */
++ dump_config.dumper->header_len = lcrash_init_dump_header(panic_str);
+
++ /* capture register states for all processors */
++ dump_save_this_cpu(regs);
++ __dump_save_other_cpus(); /* side effect:silence cpus */
+
-+static int alloc_dha_stack(void)
-+{
-+ int i;
-+ void *ptr;
-+
-+ if (dump_header_asm.dha_stack[0])
-+ {
-+ return 0;
-+ }
-+ ptr = vmalloc(THREAD_SIZE * num_online_cpus());
-+ if (!ptr) {
-+ printk("vmalloc for dha_stacks failed\n");
-+ return -ENOMEM;
-+ }
-+ bzero(ptr,THREAD_SIZE );
++ /* configure architecture-specific dump header values */
++ if ((retval = __dump_configure_header(regs)))
++ return retval;
+
-+ for (i = 0; i < num_online_cpus(); i++) {
-+ dump_header_asm.dha_stack[i] = (uint64_t)((unsigned long)ptr + (i * THREAD_SIZE));
-+ }
++ dump_config.dumper->header_dirty++;
+ return 0;
+}
-+
-+static int free_dha_stack(void)
++/* save register and task context */
++void dump_lcrash_save_context(int cpu, const struct pt_regs *regs,
++ struct task_struct *tsk)
+{
-+ if (dump_header_asm.dha_stack[0])
-+ {
-+ vfree((void*)dump_header_asm.dha_stack[0]);
-+ dump_header_asm.dha_stack[0] = 0;
-+ }
-+ return 0;
++ /* This level of abstraction might be redundantly redundant */
++ __dump_save_context(cpu, regs, tsk);
+}
+
-+/* a structure to get arguments into the following callback routine */
-+struct unw_args {
-+ int cpu;
-+ struct task_struct *tsk;
-+};
-+
-+static void
-+do_save_sw(struct unw_frame_info *info, void *arg)
++/* write out the header */
++int dump_write_header(void)
+{
-+ struct unw_args *uwargs = (struct unw_args *)arg;
-+ int cpu = uwargs->cpu;
-+ struct task_struct *tsk = uwargs->tsk;
-+
-+ dump_header_asm.dha_stack_ptr[cpu] = (uint64_t)info->sw;
++ int retval = 0, size;
++ void *buf = dump_config.dumper->dump_buf;
+
-+ if (tsk && dump_header_asm.dha_stack[cpu]) {
-+ memcpy((void *)dump_header_asm.dha_stack[cpu],
-+ STACK_START_POSITION(tsk),
-+ THREAD_SIZE);
++ /* accounts for DUMP_HEADER_OFFSET if applicable */
++ if ((retval = dump_dev_seek(0))) {
++ printk("Unable to seek to dump header offset: %d\n",
++ retval);
++ return retval;
+ }
++
++ memcpy(buf, (void *)&dump_header, sizeof(dump_header));
++ size = sizeof(dump_header);
++ memcpy(buf + size, (void *)&dump_header_asm, sizeof(dump_header_asm));
++ size += sizeof(dump_header_asm);
++ size = PAGE_ALIGN(size);
++ retval = dump_ll_write(buf , size);
++
++ if (retval < size)
++ return (retval >= 0) ? ENOSPC : retval;
++ return 0;
+}
+
-+void
-+__dump_save_context(int cpu, const struct pt_regs *regs,
-+ struct task_struct *tsk)
++int dump_generic_update_header(void)
+{
-+ struct unw_args uwargs;
-+
-+ dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
++ int err = 0;
+
-+ if (regs) {
-+ dump_header_asm.dha_smp_regs[cpu] = *regs;
++ if (dump_config.dumper->header_dirty) {
++ if ((err = dump_write_header())) {
++ printk("dump write header failed !err %d\n", err);
++ } else {
++ dump_config.dumper->header_dirty = 0;
++ }
+ }
+
-+ /* save a snapshot of the stack in a nice state for unwinding */
-+ uwargs.cpu = cpu;
-+ uwargs.tsk = tsk;
-+
-+ unw_init_running(do_save_sw, (void *)&uwargs);
++ return err;
+}
+
-+#ifdef CONFIG_SMP
-+
-+extern cpumask_t irq_affinity[];
-+#define irq_desc _irq_desc
-+extern irq_desc_t irq_desc[];
-+extern void dump_send_ipi(void);
-+static cpumask_t saved_affinity[NR_IRQS];
-+
-+/*
-+ * Routine to save the old irq affinities and change affinities of all irqs to
-+ * the dumping cpu.
-+ */
-+static void
-+set_irq_affinity(void)
++static inline int is_curr_stack_page(struct page *page, unsigned long size)
+{
-+ int i;
-+ cpumask_t cpu = CPU_MASK_NONE;
++ unsigned long thread_addr = (unsigned long)current_thread_info();
++ unsigned long addr = (unsigned long)page_address(page);
+
-+ cpu_set(smp_processor_id(), cpu);
-+ memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
-+ for (i = 0; i < NR_IRQS; i++) {
-+ if (irq_desc[i].handler == NULL)
-+ continue;
-+ irq_affinity[i] = cpu;
-+ if (irq_desc[i].handler->set_affinity != NULL)
-+ irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
-+ }
++ return !PageHighMem(page) && (addr < thread_addr + THREAD_SIZE)
++ && (addr + size > thread_addr);
+}
+
-+/*
-+ * Restore old irq affinities.
-+ */
-+static void
-+reset_irq_affinity(void)
++static inline int is_dump_page(struct page *page, unsigned long size)
+{
-+ int i;
++ unsigned long addr = (unsigned long)page_address(page);
++ unsigned long dump_buf = (unsigned long)dump_config.dumper->dump_buf;
+
-+ memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
-+ for (i = 0; i < NR_IRQS; i++) {
-+ if (irq_desc[i].handler == NULL)
-+ continue;
-+ if (irq_desc[i].handler->set_affinity != NULL)
-+ irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
-+ }
++ return !PageHighMem(page) && (addr < dump_buf + DUMP_BUFFER_SIZE)
++ && (addr + size > dump_buf);
+}
+
-+#else /* !CONFIG_SMP */
-+#define set_irq_affinity() do { } while (0)
-+#define reset_irq_affinity() do { } while (0)
-+#define save_other_cpu_states() do { } while (0)
-+#endif /* !CONFIG_SMP */
-+
-+#ifdef CONFIG_SMP
-+static int dump_expect_ipi[NR_CPUS];
-+static atomic_t waiting_for_dump_ipi;
-+static int wait_for_dump_ipi = 2000; /* wait 2000 ms for ipi to be handled */
-+extern void (*dump_trace_ptr)(struct pt_regs *);
-+
-+
-+extern void stop_this_cpu(void);
++int dump_allow_compress(struct page *page, unsigned long size)
++{
++ /*
++ * Don't compress the page if any part of it overlaps
++ * with the current stack or dump buffer (since the contents
++ * in these could be changing while compression is going on)
++ */
++ return !is_curr_stack_page(page, size) && !is_dump_page(page, size);
++}
+
-+static int
-+dump_nmi_callback(struct pt_regs *regs, int cpu)
++void lcrash_init_pageheader(struct __dump_page *dp, struct page *page,
++ unsigned long sz)
+{
-+ if (!dump_expect_ipi[cpu])
-+ return 0;
++ memset(dp, sizeof(struct __dump_page), 0);
++ dp->dp_flags = 0;
++ dp->dp_size = 0;
++ if (sz > 0)
++ dp->dp_address = (loff_t)page_to_pfn(page) << PAGE_SHIFT;
+
-+ dump_expect_ipi[cpu] = 0;
++#if DUMP_DEBUG > 6
++ dp->dp_page_index = dump_header.dh_num_dump_pages;
++ dp->dp_byte_offset = dump_header.dh_num_bytes + DUMP_BUFFER_SIZE
++ + DUMP_HEADER_OFFSET; /* ?? */
++#endif /* DUMP_DEBUG */
++}
+
-+ dump_save_this_cpu(regs);
-+ atomic_dec(&waiting_for_dump_ipi);
++int dump_lcrash_add_data(unsigned long loc, unsigned long len)
++{
++ struct page *page = (struct page *)loc;
++ void *addr, *buf = dump_config.dumper->curr_buf;
++ struct __dump_page *dp = (struct __dump_page *)buf;
++ int bytes, size;
+
-+ level_changed:
-+ switch (dump_silence_level) {
-+ case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
-+ while (dump_oncpu) {
-+ barrier(); /* paranoia */
-+ if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
-+ goto level_changed;
++ if (buf > dump_config.dumper->dump_buf + DUMP_BUFFER_SIZE)
++ return -ENOMEM;
+
-+ cpu_relax(); /* kill time nicely */
-+ }
-+ break;
++ lcrash_init_pageheader(dp, page, len);
++ buf += sizeof(struct __dump_page);
+
-+ case DUMP_HALT_CPUS: /* Execute halt */
-+ stop_this_cpu();
-+ break;
++ while (len) {
++ addr = kmap_atomic(page, KM_DUMP);
++ size = bytes = (len > PAGE_SIZE) ? PAGE_SIZE : len;
++ /* check for compression */
++ if (dump_allow_compress(page, bytes)) {
++ size = dump_compress_data((char *)addr, bytes,
++ (char *)buf, loc);
++ }
++ /* set the compressed flag if the page did compress */
++ if (size && (size < bytes)) {
++ dp->dp_flags |= DUMP_DH_COMPRESSED;
++ } else {
++ /* compression failed -- default to raw mode */
++ dp->dp_flags |= DUMP_DH_RAW;
++ memcpy(buf, addr, bytes);
++ size = bytes;
++ }
++ /* memset(buf, 'A', size); temporary: testing only !! */
++ kunmap_atomic(addr, KM_DUMP);
++ dp->dp_size += size;
++ buf += size;
++ len -= bytes;
++ page++;
++ }
+
-+ case DUMP_SOFT_SPIN_CPUS:
-+ /* Mark the task so it spins in schedule */
-+ set_tsk_thread_flag(current, TIF_NEED_RESCHED);
-+ break;
-+ }
++ /* now update the header */
++#if DUMP_DEBUG > 6
++ dump_header.dh_num_bytes += dp->dp_size + sizeof(*dp);
++#endif
++ dump_header.dh_num_dump_pages++;
++ dump_config.dumper->header_dirty++;
+
-+ return 1;
-+}
++ dump_config.dumper->curr_buf = buf;
+
-+int IPI_handler(struct pt_regs *regs)
-+{
-+ int cpu;
-+ cpu = task_cpu(current);
-+ return(dump_nmi_callback(regs, cpu));
++ return len;
+}
+
-+/* save registers on other processors */
-+void
-+__dump_save_other_cpus(void)
++int dump_lcrash_update_end_marker(void)
+{
-+ int i, cpu = smp_processor_id();
-+ int other_cpus = num_online_cpus()-1;
-+ int wait_time = wait_for_dump_ipi;
-+
-+ if (other_cpus > 0) {
-+ atomic_set(&waiting_for_dump_ipi, other_cpus);
-+
-+ for (i = 0; i < NR_CPUS; i++) {
-+ dump_expect_ipi[i] = (i != cpu && cpu_online(i));
-+ }
-+
-+ dump_ipi_function_ptr = IPI_handler;
++ struct __dump_page *dp =
++ (struct __dump_page *)dump_config.dumper->curr_buf;
++ unsigned long left;
++ int ret = 0;
+
-+ wmb();
-+
-+ dump_send_ipi();
-+ /* may be we dont need to wait for IPI to be processed.
-+ * just write out the header at the end of dumping, if
-+ * this IPI is not processed until then, there probably
-+ * is a problem and we just fail to capture state of
-+ * other cpus. */
-+ while(wait_time-- && (atomic_read(&waiting_for_dump_ipi) > 0)) {
-+ barrier();
-+ mdelay(1);
-+ }
-+ if (wait_time <= 0) {
-+ printk("dump ipi timeout, proceeding...\n");
-+ }
-+ }
-+}
++ lcrash_init_pageheader(dp, NULL, 0);
++ dp->dp_flags |= DUMP_DH_END; /* tbd: truncation test ? */
++
++ /* now update the header */
++#if DUMP_DEBUG > 6
++ dump_header.dh_num_bytes += sizeof(*dp);
+#endif
-+/*
-+ * Kludge - dump from interrupt context is unreliable (Fixme)
-+ *
-+ * We do this so that softirqs initiated for dump i/o
-+ * get processed and we don't hang while waiting for i/o
-+ * to complete or in any irq synchronization attempt.
-+ *
-+ * This is not quite legal of course, as it has the side
-+ * effect of making all interrupts & softirqs triggered
-+ * while dump is in progress complete before currently
-+ * pending softirqs and the currently executing interrupt
-+ * code.
-+ */
-+static inline void
-+irq_bh_save(void)
-+{
-+ saved_irq_count = irq_count();
-+ preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
-+}
++ dump_config.dumper->curr_buf += sizeof(*dp);
++ left = dump_config.dumper->curr_buf - dump_config.dumper->dump_buf;
+
-+static inline void
-+irq_bh_restore(void)
-+{
-+ preempt_count() |= saved_irq_count;
-+}
++ printk("\n");
+
-+/*
-+ * Name: __dump_configure_header()
-+ * Func: Configure the dump header with all proper values.
-+ */
-+int
-+__dump_configure_header(const struct pt_regs *regs)
-+{
-+ return (0);
-+}
++ while (left) {
++ if ((ret = dump_dev_seek(dump_config.dumper->curr_offset))) {
++ printk("Seek failed at offset 0x%llx\n",
++ dump_config.dumper->curr_offset);
++ return ret;
++ }
+
++ if (DUMP_BUFFER_SIZE > left)
++ memset(dump_config.dumper->curr_buf, 'm',
++ DUMP_BUFFER_SIZE - left);
+
-+#define dim(x) (sizeof(x)/sizeof(*(x)))
++ if ((ret = dump_ll_write(dump_config.dumper->dump_buf,
++ DUMP_BUFFER_SIZE)) < DUMP_BUFFER_SIZE) {
++ return (ret < 0) ? ret : -ENOSPC;
++ }
+
-+/*
-+ * Name: __dump_irq_enable
-+ * Func: Reset system so interrupts are enabled.
-+ * This is used for dump methods that require interrupts
-+ * Eventually, all methods will have interrupts disabled
-+ * and this code can be removed.
-+ *
-+ * Change irq affinities
-+ * Re-enable interrupts
-+ */
-+int
-+__dump_irq_enable(void)
-+{
-+ set_irq_affinity();
-+ irq_bh_save();
-+ ia64_srlz_d();
-+ /*
-+ * reduce the task priority level
-+ * to get disk interrupts
-+ */
-+ ia64_setreg(_IA64_REG_CR_TPR, 0);
-+ ia64_srlz_d();
-+ local_irq_enable();
++ dump_config.dumper->curr_offset += DUMP_BUFFER_SIZE;
++
++ if (left > DUMP_BUFFER_SIZE) {
++ left -= DUMP_BUFFER_SIZE;
++ memcpy(dump_config.dumper->dump_buf,
++ dump_config.dumper->dump_buf + DUMP_BUFFER_SIZE, left);
++ dump_config.dumper->curr_buf -= DUMP_BUFFER_SIZE;
++ } else {
++ left = 0;
++ }
++ }
+ return 0;
+}
+
-+/*
-+ * Name: __dump_irq_restore
-+ * Func: Resume the system state in an architecture-specific way.
+
-+ */
-+void
-+__dump_irq_restore(void)
-+{
-+ local_irq_disable();
-+ reset_irq_affinity();
-+ irq_bh_restore();
-+}
++/* Default Formatter (lcrash) */
++struct dump_fmt_ops dump_fmt_lcrash_ops = {
++ .configure_header = dump_lcrash_configure_header,
++ .update_header = dump_generic_update_header,
++ .save_context = dump_lcrash_save_context,
++ .add_data = dump_lcrash_add_data,
++ .update_end_marker = dump_lcrash_update_end_marker
++};
+
-+/*
-+ * Name: __dump_page_valid()
-+ * Func: Check if page is valid to dump.
-+ */
-+int
-+__dump_page_valid(unsigned long index)
-+{
-+ if (!pfn_valid(index))
-+ {
-+ return 0;
-+ }
-+ return 1;
-+}
++struct dump_fmt dump_fmt_lcrash = {
++ .name = "lcrash",
++ .ops = &dump_fmt_lcrash_ops
++};
+
+Index: linux-2.6.10/drivers/dump/dump_netdev.c
+===================================================================
+--- linux-2.6.10.orig/drivers/dump/dump_netdev.c 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/drivers/dump/dump_netdev.c 2005-04-07 18:13:56.900753616 +0800
+@@ -0,0 +1,566 @@
+/*
-+ * Name: __dump_init()
-+ * Func: Initialize the dumping routine process. This is in case
-+ * it's necessary in the future.
++ * Implements the dump driver interface for saving a dump via network
++ * interface.
++ *
++ * Some of this code has been taken/adapted from Ingo Molnar's netconsole
++ * code. LKCD team expresses its thanks to Ingo.
++ *
++ * Started: June 2002 - Mohamed Abbas <mohamed.abbas@intel.com>
++ * Adapted netconsole code to implement LKCD dump over the network.
++ *
++ * Nov 2002 - Bharata B. Rao <bharata@in.ibm.com>
++ * Innumerable code cleanups, simplification and some fixes.
++ * Netdump configuration done by ioctl instead of using module parameters.
++ * Oct 2003 - Prasanna S Panchamukhi <prasanna@in.ibm.com>
++ * Netdump code modified to use Netpoll API's.
++ *
++ * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
++ * Copyright (C) 2002 International Business Machines Corp.
++ *
++ * This code is released under version 2 of the GNU GPL.
+ */
-+void
-+__dump_init(uint64_t local_memory_start)
-+{
-+ return;
-+}
+
-+/*
-+ * Name: __dump_open()
-+ * Func: Open the dump device (architecture specific). This is in
-+ * case it's necessary in the future.
-+ */
-+void
-+__dump_open(void)
-+{
-+ alloc_dha_stack();
-+ return;
-+}
++#include <net/tcp.h>
++#include <net/udp.h>
++#include <linux/delay.h>
++#include <linux/random.h>
++#include <linux/reboot.h>
++#include <linux/module.h>
++#include <linux/dump.h>
++#include <linux/dump_netdev.h>
+
++#include <asm/unaligned.h>
++
++static int startup_handshake;
++static int page_counter;
++static unsigned long flags_global;
++static int netdump_in_progress;
+
+/*
-+ * Name: __dump_cleanup()
-+ * Func: Free any architecture specific data structures. This is called
-+ * when the dump module is being removed.
++ * security depends on the trusted path between the netconsole
++ * server and netconsole client, since none of the packets are
++ * encrypted. The random magic number protects the protocol
++ * against spoofing.
+ */
-+void
-+__dump_cleanup(void)
-+{
-+ free_dha_stack();
-+
-+ return;
-+}
-+
-+
-+
-+int __dump_memcpy_mc_expected = 0; /* Doesn't help yet */
++static u64 dump_magic;
+
+/*
-+ * An ia64 version of memcpy() that trys to avoid machine checks.
-+ *
-+ * NB:
-+ * By itself __dump_memcpy_mc_expected() ins't providing any
-+ * protection against Machine Checks. We are looking into the
-+ * possability of adding code to the arch/ia64/kernel/mca.c fuction
-+ * ia64_mca_ucmc_handler() to restore state so that a IA64_MCA_CORRECTED
-+ * can be returned to the firmware. Curently it always returns
-+ * IA64_MCA_COLD_BOOT and reboots the machine.
++ * We maintain a small pool of fully-sized skbs,
++ * to make sure the message gets out even in
++ * extreme OOM situations.
+ */
-+/*
-+void * __dump_memcpy(void * dest, const void *src, size_t count)
++
++static void rx_hook(struct netpoll *np, int port, char *msg, int size);
++int new_req = 0;
++static req_t req;
++
++static void rx_hook(struct netpoll *np, int port, char *msg, int size)
+{
-+ void *vp;
++ req_t * __req = (req_t *) msg;
++ /*
++ * First check if were are dumping or doing startup handshake, if
++ * not quickly return.
++ */
+
-+ if (__dump_memcpy_mc_expected) {
-+ ia64_pal_mc_expected((u64) 1, 0);
-+ }
++ if (!netdump_in_progress)
++ return ;
+
-+ vp = memcpy(dest, src, count);
++ if ((ntohl(__req->command) != COMM_GET_MAGIC) &&
++ (ntohl(__req->command) != COMM_HELLO) &&
++ (ntohl(__req->command) != COMM_START_WRITE_NETDUMP_ACK) &&
++ (ntohl(__req->command) != COMM_START_NETDUMP_ACK) &&
++ (memcmp(&__req->magic, &dump_magic, sizeof(dump_magic)) != 0))
++ goto out;
+
-+ if (__dump_memcpy_mc_expected) {
-+ ia64_pal_mc_expected((u64) 0, 0);
-+ }
-+ return(vp);
++ req.magic = ntohl(__req->magic);
++ req.command = ntohl(__req->command);
++ req.from = ntohl(__req->from);
++ req.to = ntohl(__req->to);
++ req.nr = ntohl(__req->nr);
++ new_req = 1;
++out:
++ return ;
+}
-+*/
++static char netdump_membuf[1024 + HEADER_LEN + 1];
+/*
-+ * Name: manual_handle_crashdump()
-+ * Func: Interface for the lkcd dump command. Calls dump_execute()
++ * Fill the netdump_membuf with the header information from reply_t structure
++ * and send it down to netpoll_send_udp() routine.
+ */
-+int
-+manual_handle_crashdump(void) {
++static void
++netdump_send_packet(struct netpoll *np, reply_t *reply, size_t data_len) {
++ char *b;
+
-+ struct pt_regs regs;
++ b = &netdump_membuf[1];
++ netdump_membuf[0] = NETCONSOLE_VERSION;
++ put_unaligned(htonl(reply->nr), (u32 *) b);
++ put_unaligned(htonl(reply->code), (u32 *) (b + sizeof(reply->code)));
++ put_unaligned(htonl(reply->info), (u32 *) (b + sizeof(reply->code) +
++ sizeof(reply->info)));
++ netpoll_send_udp(np, netdump_membuf, data_len + HEADER_LEN);
++}
+
-+ get_current_regs(®s);
-+ dump_execute("manual", ®s);
-+ return 0;
++static void
++dump_send_mem(struct netpoll *np, req_t *req, const char* buff, size_t len)
++{
++ int i;
++
++ int nr_chunks = len/1024;
++ reply_t reply;
++
++ reply.nr = req->nr;
++ reply.code = REPLY_MEM;
++ if ( nr_chunks <= 0)
++ nr_chunks = 1;
++ for (i = 0; i < nr_chunks; i++) {
++ unsigned int offset = i*1024;
++ reply.info = offset;
++ memcpy((netdump_membuf + HEADER_LEN), (buff + offset), 1024);
++ netdump_send_packet(np, &reply, 1024);
++ }
+}
+
+/*
-+ * Name: __dump_clean_irq_state()
-+ * Func: Clean up from the previous IRQ handling state. Such as oops from
-+ * interrupt handler or bottom half.
++ * This function waits for the client to acknowledge the receipt
++ * of the netdump startup reply, with the possibility of packets
++ * getting lost. We resend the startup packet if no ACK is received,
++ * after a 1 second delay.
++ *
++ * (The client can test the success of the handshake via the HELLO
++ * command, and send ACKs until we enter netdump mode.)
+ */
-+void
-+__dump_clean_irq_state(void)
++static int
++dump_handshake(struct dump_dev *net_dev)
+{
-+ unsigned long saved_tpr;
-+ unsigned long TPR_MASK = 0xFFFFFFFFFFFEFF0F;
-+
-+
-+ /* Get the processors task priority register */
-+ saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
-+ /* clear the mmi and mic bit's of the TPR to unmask interrupts */
-+ saved_tpr = saved_tpr & TPR_MASK;
-+ ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
-+ ia64_srlz_d();
++ reply_t reply;
++ int i, j;
++ size_t str_len;
+
-+ /* Tell the processor we're done with the interrupt
-+ * that got us here.
-+ */
++ if (startup_handshake) {
++ sprintf((netdump_membuf + HEADER_LEN),
++ "NETDUMP start, waiting for start-ACK.\n");
++ reply.code = REPLY_START_NETDUMP;
++ reply.nr = 0;
++ reply.info = 0;
++ } else {
++ sprintf((netdump_membuf + HEADER_LEN),
++ "NETDUMP start, waiting for start-ACK.\n");
++ reply.code = REPLY_START_WRITE_NETDUMP;
++ reply.nr = net_dev->curr_offset;
++ reply.info = net_dev->curr_offset;
++ }
++ str_len = strlen(netdump_membuf + HEADER_LEN);
+
-+ ia64_eoi();
-+
-+ /* local implementation of irq_exit(); */
-+ preempt_count() -= IRQ_EXIT_OFFSET;
-+ preempt_enable_no_resched();
++ /* send 300 handshake packets before declaring failure */
++ for (i = 0; i < 300; i++) {
++ netdump_send_packet(&net_dev->np, &reply, str_len);
+
-+ return;
-+}
++ /* wait 1 sec */
++ for (j = 0; j < 10000; j++) {
++ udelay(100);
++ netpoll_poll(&net_dev->np);
++ if (new_req)
++ break;
++ }
+
-Index: linux-2.6.10/drivers/dump/dump_rle.c
-===================================================================
---- linux-2.6.10.orig/drivers/dump/dump_rle.c 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/drivers/dump/dump_rle.c 2005-04-05 16:47:53.935206320 +0800
-@@ -0,0 +1,176 @@
-+/*
-+ * RLE Compression functions for kernel crash dumps.
-+ *
-+ * Created by: Matt Robinson (yakker@sourceforge.net)
-+ * Copyright 2001 Matt D. Robinson. All rights reserved.
-+ *
-+ * This code is released under version 2 of the GNU GPL.
-+ */
++ /*
++ * if there is no new request, try sending the handshaking
++ * packet again
++ */
++ if (!new_req)
++ continue;
+
-+/* header files */
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/fs.h>
-+#include <linux/file.h>
-+#include <linux/init.h>
-+#include <linux/dump.h>
++ /*
++ * check if the new request is of the expected type,
++ * if so, return, else try sending the handshaking
++ * packet again
++ */
++ if (startup_handshake) {
++ if (req.command == COMM_HELLO || req.command ==
++ COMM_START_NETDUMP_ACK) {
++ return 0;
++ } else {
++ new_req = 0;
++ continue;
++ }
++ } else {
++ if (req.command == COMM_SEND_MEM) {
++ return 0;
++ } else {
++ new_req = 0;
++ continue;
++ }
++ }
++ }
++ return -1;
++}
+
-+/*
-+ * Name: dump_compress_rle()
-+ * Func: Compress a DUMP_PAGE_SIZE (hardware) page down to something more
-+ * reasonable, if possible. This is the same routine we use in IRIX.
-+ */
-+static u32
-+dump_compress_rle(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
-+ unsigned long loc)
++static ssize_t
++do_netdump(struct dump_dev *net_dev, const char* buff, size_t len)
+{
-+ u16 ri, wi, count = 0;
-+ u_char value = 0, cur_byte;
++ reply_t reply;
++ ssize_t ret = 0;
++ int repeatCounter, counter, total_loop;
++ size_t str_len;
++
++ netdump_in_progress = 1;
++
++ if (dump_handshake(net_dev) < 0) {
++ printk("network dump failed due to handshake failure\n");
++ goto out;
++ }
+
+ /*
-+ * If the block should happen to "compress" to larger than the
-+ * buffer size, allocate a larger one and change cur_buf_size.
++ * Ideally startup handshake should be done during dump configuration,
++ * i.e., in dump_net_open(). This will be done when I figure out
++ * the dependency between startup handshake, subsequent write and
++ * various commands wrt to net-server.
+ */
++ if (startup_handshake)
++ startup_handshake = 0;
+
-+ wi = ri = 0;
++ counter = 0;
++ repeatCounter = 0;
++ total_loop = 0;
++ while (1) {
++ if (!new_req) {
++ netpoll_poll(&net_dev->np);
++ }
++ if (!new_req) {
++ repeatCounter++;
+
-+ while (ri < oldsize) {
-+ if (!ri) {
-+ cur_byte = value = old[ri];
-+ count = 0;
-+ } else {
-+ if (count == 255) {
-+ if (wi + 3 > oldsize) {
-+ return oldsize;
++ if (repeatCounter > 5) {
++ counter++;
++ if (counter > 10000) {
++ if (total_loop >= 100000) {
++ printk("Time OUT LEAVE NOW\n");
++ goto out;
++ } else {
++ total_loop++;
++ printk("Try number %d out of "
++ "10 before Time Out\n",
++ total_loop);
++ }
+ }
-+ new[wi++] = 0;
-+ new[wi++] = count;
-+ new[wi++] = value;
-+ value = cur_byte = old[ri];
-+ count = 0;
-+ } else {
-+ if ((cur_byte = old[ri]) == value) {
-+ count++;
-+ } else {
-+ if (count > 1) {
-+ if (wi + 3 > oldsize) {
-+ return oldsize;
-+ }
-+ new[wi++] = 0;
-+ new[wi++] = count;
-+ new[wi++] = value;
-+ } else if (count == 1) {
-+ if (value == 0) {
-+ if (wi + 3 > oldsize) {
-+ return oldsize;
-+ }
-+ new[wi++] = 0;
-+ new[wi++] = 1;
-+ new[wi++] = 0;
-+ } else {
-+ if (wi + 2 > oldsize) {
-+ return oldsize;
-+ }
-+ new[wi++] = value;
-+ new[wi++] = value;
-+ }
-+ } else { /* count == 0 */
-+ if (value == 0) {
-+ if (wi + 2 > oldsize) {
-+ return oldsize;
-+ }
-+ new[wi++] = value;
-+ new[wi++] = value;
-+ } else {
-+ if (wi + 1 > oldsize) {
-+ return oldsize;
-+ }
-+ new[wi++] = value;
-+ }
-+ } /* if count > 1 */
++ mdelay(1);
++ repeatCounter = 0;
++ }
++ continue;
++ }
++ repeatCounter = 0;
++ counter = 0;
++ total_loop = 0;
++ new_req = 0;
++ switch (req.command) {
++ case COMM_NONE:
++ break;
++
++ case COMM_SEND_MEM:
++ dump_send_mem(&net_dev->np, &req, buff, len);
++ break;
++
++ case COMM_EXIT:
++ case COMM_START_WRITE_NETDUMP_ACK:
++ ret = len;
++ goto out;
++
++ case COMM_HELLO:
++ sprintf((netdump_membuf + HEADER_LEN),
++ "Hello, this is netdump version " "0.%02d\n",
++ NETCONSOLE_VERSION);
++ str_len = strlen(netdump_membuf + HEADER_LEN);
++ reply.code = REPLY_HELLO;
++ reply.nr = req.nr;
++ reply.info = net_dev->curr_offset;
++ netdump_send_packet(&net_dev->np, &reply, str_len);
++ break;
+
-+ value = cur_byte;
-+ count = 0;
++ case COMM_GET_PAGE_SIZE:
++ sprintf((netdump_membuf + HEADER_LEN),
++ "PAGE_SIZE: %ld\n", PAGE_SIZE);
++ str_len = strlen(netdump_membuf + HEADER_LEN);
++ reply.code = REPLY_PAGE_SIZE;
++ reply.nr = req.nr;
++ reply.info = PAGE_SIZE;
++ netdump_send_packet(&net_dev->np, &reply, str_len);
++ break;
+
-+ } /* if byte == value */
++ case COMM_GET_NR_PAGES:
++ reply.code = REPLY_NR_PAGES;
++ reply.nr = req.nr;
++ reply.info = num_physpages;
++ reply.info = page_counter;
++ sprintf((netdump_membuf + HEADER_LEN),
++ "Number of pages: %ld\n", num_physpages);
++ str_len = strlen(netdump_membuf + HEADER_LEN);
++ netdump_send_packet(&net_dev->np, &reply, str_len);
++ break;
+
-+ } /* if count == 255 */
++ case COMM_GET_MAGIC:
++ reply.code = REPLY_MAGIC;
++ reply.nr = req.nr;
++ reply.info = NETCONSOLE_VERSION;
++ sprintf((netdump_membuf + HEADER_LEN),
++ (char *)&dump_magic, sizeof(dump_magic));
++ str_len = strlen(netdump_membuf + HEADER_LEN);
++ netdump_send_packet(&net_dev->np, &reply, str_len);
++ break;
+
-+ } /* if ri == 0 */
-+ ri++;
++ default:
++ reply.code = REPLY_ERROR;
++ reply.nr = req.nr;
++ reply.info = req.command;
++ sprintf((netdump_membuf + HEADER_LEN),
++ "Got unknown command code %d!\n", req.command);
++ str_len = strlen(netdump_membuf + HEADER_LEN);
++ netdump_send_packet(&net_dev->np, &reply, str_len);
++ break;
++ }
++ }
++out:
++ netdump_in_progress = 0;
++ return ret;
++}
+
++static int
++dump_validate_config(struct netpoll *np)
++{
++ if (!np->local_ip) {
++ printk("network device %s has no local address, "
++ "aborting.\n", np->name);
++ return -1;
+ }
-+ if (count > 1) {
-+ if (wi + 3 > oldsize) {
-+ return oldsize;
-+ }
-+ new[wi++] = 0;
-+ new[wi++] = count;
-+ new[wi++] = value;
-+ } else if (count == 1) {
-+ if (value == 0) {
-+ if (wi + 3 > oldsize)
-+ return oldsize;
-+ new[wi++] = 0;
-+ new[wi++] = 1;
-+ new[wi++] = 0;
-+ } else {
-+ if (wi + 2 > oldsize)
-+ return oldsize;
-+ new[wi++] = value;
-+ new[wi++] = value;
-+ }
-+ } else { /* count == 0 */
-+ if (value == 0) {
-+ if (wi + 2 > oldsize)
-+ return oldsize;
-+ new[wi++] = value;
-+ new[wi++] = value;
-+ } else {
-+ if (wi + 1 > oldsize)
-+ return oldsize;
-+ new[wi++] = value;
-+ }
-+ } /* if count > 1 */
+
-+ value = cur_byte;
-+ count = 0;
-+ return wi;
-+}
++#define IP(x) ((unsigned char *)&np->local_ip)[x]
++ printk("Source %d.%d.%d.%d", IP(0), IP(1), IP(2), IP(3));
++#undef IP
+
-+/* setup the rle compression functionality */
-+static struct __dump_compress dump_rle_compression = {
-+ .compress_type = DUMP_COMPRESS_RLE,
-+ .compress_func = dump_compress_rle,
-+ .compress_name = "RLE",
-+};
++ if (!np->local_port) {
++ printk("source_port parameter not specified, aborting.\n");
++ return -1;
++ }
++
++ if (!np->remote_ip) {
++ printk("target_ip parameter not specified, aborting.\n");
++ return -1;
++ }
++
++ np->remote_ip = ntohl(np->remote_ip);
++#define IP(x) ((unsigned char *)&np->remote_ip)[x]
++ printk("Target %d.%d.%d.%d", IP(0), IP(1), IP(2), IP(3));
++#undef IP
++
++ if (!np->remote_port) {
++ printk("target_port parameter not specified, aborting.\n");
++ return -1;
++ }
++ printk("Target Ethernet Address %02x:%02x:%02x:%02x:%02x:%02x",
++ np->remote_mac[0], np->remote_mac[1], np->remote_mac[2],
++ np->remote_mac[3], np->remote_mac[4], np->remote_mac[5]);
++
++ if ((np->remote_mac[0] & np->remote_mac[1] & np->remote_mac[2] &
++ np->remote_mac[3] & np->remote_mac[4] & np->remote_mac[5]) == 255)
++ printk("(Broadcast)");
++ printk("\n");
++ return 0;
++}
+
+/*
-+ * Name: dump_compress_rle_init()
-+ * Func: Initialize rle compression for dumping.
++ * Prepares the dump device so we can take a dump later.
++ * Validates the netdump configuration parameters.
++ *
++ * TODO: Network connectivity check should be done here.
+ */
-+static int __init
-+dump_compress_rle_init(void)
++static int
++dump_net_open(struct dump_dev *net_dev, unsigned long arg)
+{
-+ dump_register_compression(&dump_rle_compression);
-+ return 0;
++ int retval = 0;
++
++ /* get the interface name */
++ if (copy_from_user(net_dev->np.dev_name, (void *)arg, IFNAMSIZ))
++ return -EFAULT;
++ net_dev->np.rx_hook = rx_hook;
++ retval = netpoll_setup(&net_dev->np);
++
++ dump_validate_config(&net_dev->np);
++ net_dev->curr_offset = 0;
++ printk("Network device %s successfully configured for dumping\n",
++ net_dev->np.dev_name);
++ return retval;
+}
+
+/*
-+ * Name: dump_compress_rle_cleanup()
-+ * Func: Remove rle compression for dumping.
++ * Close the dump device and release associated resources
++ * Invoked when unconfiguring the dump device.
+ */
-+static void __exit
-+dump_compress_rle_cleanup(void)
++static int
++dump_net_release(struct dump_dev *net_dev)
+{
-+ dump_unregister_compression(DUMP_COMPRESS_RLE);
++ netpoll_cleanup(&net_dev->np);
++ return 0;
+}
+
-+/* module initialization */
-+module_init(dump_compress_rle_init);
-+module_exit(dump_compress_rle_cleanup);
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
-+MODULE_DESCRIPTION("RLE compression module for crash dump driver");
-Index: linux-2.6.10/drivers/dump/dump_execute.c
-===================================================================
---- linux-2.6.10.orig/drivers/dump/dump_execute.c 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/drivers/dump/dump_execute.c 2005-04-05 16:47:53.943205104 +0800
-@@ -0,0 +1,144 @@
+/*
-+ * The file has the common/generic dump execution code
-+ *
-+ * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
-+ * Split and rewrote high level dump execute code to make use
-+ * of dump method interfaces.
-+ *
-+ * Derived from original code in dump_base.c created by
-+ * Matt Robinson <yakker@sourceforge.net>)
-+ *
-+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
-+ * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
-+ * Copyright (C) 2002 International Business Machines Corp.
-+ *
-+ * Assumes dumper and dump config settings are in place
-+ * (invokes corresponding dumper specific routines as applicable)
-+ *
-+ * This code is released under version 2 of the GNU GPL.
++ * Prepare the dump device for use (silence any ongoing activity
++ * and quiesce state) when the system crashes.
+ */
-+#include <linux/kernel.h>
-+#include <linux/notifier.h>
-+#include <linux/dump.h>
-+#include <linux/delay.h>
-+#include <linux/reboot.h>
-+#include "dump_methods.h"
-+
-+struct notifier_block *dump_notifier_list; /* dump started/ended callback */
-+
-+extern int panic_timeout;
-+
-+/* Dump progress indicator */
-+void
-+dump_speedo(int i)
++static int
++dump_net_silence(struct dump_dev *net_dev)
+{
-+ static const char twiddle[4] = { '|', '\\', '-', '/' };
-+ printk("%c\b", twiddle[i&3]);
++ netpoll_set_trap(1);
++ local_irq_save(flags_global);
++ startup_handshake = 1;
++ net_dev->curr_offset = 0;
++ printk("Dumping to network device %s on CPU %d ...\n", net_dev->np.name,
++ smp_processor_id());
++ return 0;
+}
+
-+/* Make the device ready and write out the header */
-+int dump_begin(void)
++/*
++ * Invoked when dumping is done. This is the time to put things back
++ * (i.e. undo the effects of dump_block_silence) so the device is
++ * available for normal use.
++ */
++static int
++dump_net_resume(struct dump_dev *net_dev)
+{
-+ int err = 0;
-+
-+ /* dump_dev = dump_config.dumper->dev; */
-+ dumper_reset();
-+ if ((err = dump_dev_silence())) {
-+ /* quiesce failed, can't risk continuing */
-+ /* Todo/Future: switch to alternate dump scheme if possible */
-+ printk("dump silence dev failed ! error %d\n", err);
-+ return err;
-+ }
++ int indx;
++ size_t str_len;
++ reply_t reply;
+
-+ pr_debug("Writing dump header\n");
-+ if ((err = dump_update_header())) {
-+ printk("dump update header failed ! error %d\n", err);
-+ dump_dev_resume();
-+ return err;
++ sprintf((netdump_membuf + HEADER_LEN), "NETDUMP end.\n");
++ str_len = strlen(netdump_membuf + HEADER_LEN);
++ for( indx = 0; indx < 6; indx++) {
++ reply.code = REPLY_END_NETDUMP;
++ reply.nr = 0;
++ reply.info = 0;
++ netdump_send_packet(&net_dev->np, &reply, str_len);
+ }
++ printk("NETDUMP END!\n");
++ local_irq_restore(flags_global);
++ netpoll_set_trap(0);
++ startup_handshake = 0;
++ return 0;
++}
+
-+ dump_config.dumper->curr_offset = DUMP_BUFFER_SIZE;
-+
++/*
++ * Seek to the specified offset in the dump device.
++ * Makes sure this is a valid offset, otherwise returns an error.
++ */
++static int
++dump_net_seek(struct dump_dev *net_dev, loff_t off)
++{
++ net_dev->curr_offset = off;
+ return 0;
+}
+
-+/*
-+ * Write the dump terminator, a final header update and let go of
-+ * exclusive use of the device for dump.
++/*
++ *
+ */
-+int dump_complete(void)
++static int
++dump_net_write(struct dump_dev *net_dev, void *buf, unsigned long len)
+{
-+ int ret = 0;
++ int cnt, i, off;
++ ssize_t ret;
+
-+ if (dump_config.level != DUMP_LEVEL_HEADER) {
-+ if ((ret = dump_update_end_marker())) {
-+ printk("dump update end marker error %d\n", ret);
-+ }
-+ if ((ret = dump_update_header())) {
-+ printk("dump update header error %d\n", ret);
-+ }
-+ }
-+ ret = dump_dev_resume();
++ cnt = len/ PAGE_SIZE;
+
-+ if ((panic_timeout > 0) && (!(dump_config.flags & (DUMP_FLAGS_SOFTBOOT | DUMP_FLAGS_NONDISRUPT)))) {
-+ mdelay(panic_timeout * 1000);
-+ machine_restart(NULL);
++ for (i = 0; i < cnt; i++) {
++ off = i* PAGE_SIZE;
++ ret = do_netdump(net_dev, buf+off, PAGE_SIZE);
++ if (ret <= 0)
++ return -1;
++ net_dev->curr_offset = net_dev->curr_offset + PAGE_SIZE;
+ }
-+
-+ return ret;
++ return len;
+}
+
-+/* Saves all dump data */
-+int dump_execute_savedump(void)
++/*
++ * check if the last dump i/o is over and ready for next request
++ */
++static int
++dump_net_ready(struct dump_dev *net_dev, void *buf)
+{
-+ int ret = 0, err = 0;
-+
-+ if ((ret = dump_begin())) {
-+ return ret;
-+ }
++ return 0;
++}
+
-+ if (dump_config.level != DUMP_LEVEL_HEADER) {
-+ ret = dump_sequencer();
-+ }
-+ if ((err = dump_complete())) {
-+ printk("Dump complete failed. Error %d\n", err);
++/*
++ * ioctl function used for configuring network dump
++ */
++static int
++dump_net_ioctl(struct dump_dev *net_dev, unsigned int cmd, unsigned long arg)
++{
++ switch (cmd) {
++ case DIOSTARGETIP:
++ net_dev->np.remote_ip= arg;
++ break;
++ case DIOSTARGETPORT:
++ net_dev->np.remote_port = (u16)arg;
++ break;
++ case DIOSSOURCEPORT:
++ net_dev->np.local_port = (u16)arg;
++ break;
++ case DIOSETHADDR:
++ return copy_from_user(net_dev->np.remote_mac, (void *)arg, 6);
++ break;
++ case DIOGTARGETIP:
++ case DIOGTARGETPORT:
++ case DIOGSOURCEPORT:
++ case DIOGETHADDR:
++ break;
++ default:
++ return -EINVAL;
+ }
-+
-+ return ret;
++ return 0;
+}
+
-+extern void dump_calc_bootmap_pages(void);
++struct dump_dev_ops dump_netdev_ops = {
++ .open = dump_net_open,
++ .release = dump_net_release,
++ .silence = dump_net_silence,
++ .resume = dump_net_resume,
++ .seek = dump_net_seek,
++ .write = dump_net_write,
++ /* .read not implemented */
++ .ready = dump_net_ready,
++ .ioctl = dump_net_ioctl
++};
+
-+/* Does all the real work: Capture and save state */
-+int dump_generic_execute(const char *panic_str, const struct pt_regs *regs)
-+{
-+ int ret = 0;
++static struct dump_dev default_dump_netdev = {
++ .type_name = "networkdev",
++ .ops = &dump_netdev_ops,
++ .curr_offset = 0,
++ .np.name = "netdump",
++ .np.dev_name = "eth0",
++ .np.rx_hook = rx_hook,
++ .np.local_port = 6688,
++ .np.remote_port = 6688,
++ .np.remote_mac = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
++};
+
-+#ifdef CONFIG_DISCONTIGMEM
-+ printk(KERN_INFO "Reconfiguring memory bank information....\n");
-+ printk(KERN_INFO "This may take a while....\n");
-+ dump_reconfigure_mbanks();
-+#endif
++static int __init
++dump_netdev_init(void)
++{
++ default_dump_netdev.curr_offset = 0;
+
-+ if ((ret = dump_configure_header(panic_str, regs))) {
-+ printk("dump config header failed ! error %d\n", ret);
-+ return ret;
++ if (dump_register_device(&default_dump_netdev) < 0) {
++ printk("network dump device driver registration failed\n");
++ return -1;
+ }
++ printk("network device driver for LKCD registered\n");
++
++ get_random_bytes(&dump_magic, sizeof(dump_magic));
++ return 0;
++}
+
-+ dump_calc_bootmap_pages();
-+ /* tell interested parties that a dump is about to start */
-+ notifier_call_chain(&dump_notifier_list, DUMP_BEGIN,
-+ &dump_config.dump_device);
-+
-+ if (dump_config.level != DUMP_LEVEL_NONE)
-+ ret = dump_execute_savedump();
++static void __exit
++dump_netdev_cleanup(void)
++{
++ dump_unregister_device(&default_dump_netdev);
++}
+
-+ pr_debug("dumped %ld blocks of %d bytes each\n",
-+ dump_config.dumper->count, DUMP_BUFFER_SIZE);
-+
-+ /* tell interested parties that a dump has completed */
-+ notifier_call_chain(&dump_notifier_list, DUMP_END,
-+ &dump_config.dump_device);
++MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
++MODULE_DESCRIPTION("Network Dump Driver for Linux Kernel Crash Dump (LKCD)");
++MODULE_LICENSE("GPL");
+
-+ return ret;
-+}
-Index: linux-2.6.10/drivers/dump/dump_netdev.c
++module_init(dump_netdev_init);
++module_exit(dump_netdev_cleanup);
+Index: linux-2.6.10/drivers/dump/dump_methods.h
===================================================================
---- linux-2.6.10.orig/drivers/dump/dump_netdev.c 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/drivers/dump/dump_netdev.c 2005-04-05 16:47:53.936206168 +0800
-@@ -0,0 +1,566 @@
+--- linux-2.6.10.orig/drivers/dump/dump_methods.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/drivers/dump/dump_methods.h 2005-04-07 18:13:56.920750576 +0800
+@@ -0,0 +1,357 @@
+/*
-+ * Implements the dump driver interface for saving a dump via network
-+ * interface.
++ * Generic interfaces for flexible system dump
+ *
-+ * Some of this code has been taken/adapted from Ingo Molnar's netconsole
-+ * code. LKCD team expresses its thanks to Ingo.
++ * Started: Oct 2002 - Suparna Bhattacharya (suparna@in.ibm.com)
+ *
-+ * Started: June 2002 - Mohamed Abbas <mohamed.abbas@intel.com>
-+ * Adapted netconsole code to implement LKCD dump over the network.
++ * Copyright (C) 2002 International Business Machines Corp.
+ *
-+ * Nov 2002 - Bharata B. Rao <bharata@in.ibm.com>
-+ * Innumerable code cleanups, simplification and some fixes.
-+ * Netdump configuration done by ioctl instead of using module parameters.
-+ * Oct 2003 - Prasanna S Panchamukhi <prasanna@in.ibm.com>
-+ * Netdump code modified to use Netpoll API's.
++ * This code is released under version 2 of the GNU GPL.
++ */
++
++#ifndef _LINUX_DUMP_METHODS_H
++#define _LINUX_DUMP_METHODS_H
++
++/*
++ * Inspired by Matt Robinson's suggestion of introducing dump
++ * methods as a way to enable different crash dump facilities to
++ * coexist where each employs its own scheme or dumping policy.
+ *
-+ * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
-+ * Copyright (C) 2002 International Business Machines Corp.
++ * The code here creates a framework for flexible dump by defining
++ * a set of methods and providing associated helpers that differentiate
++ * between the underlying mechanism (how to dump), overall scheme
++ * (sequencing of stages and data dumped and associated quiescing),
++ * output format (what the dump output looks like), target type
++ * (where to save the dump; see dumpdev.h), and selection policy
++ * (state/data to dump).
++ *
++ * These sets of interfaces can be mixed and matched to build a
++ * dumper suitable for a given situation, allowing for
++ * flexibility as well appropriate degree of code reuse.
++ * For example all features and options of lkcd (including
++ * granular selective dumping in the near future) should be
++ * available even when say, the 2 stage soft-boot based mechanism
++ * is used for taking disruptive dumps.
+ *
-+ * This code is released under version 2 of the GNU GPL.
++ * Todo: Additionally modules or drivers may supply their own
++ * custom dumpers which extend dump with module specific
++ * information or hardware state, and can even tweak the
++ * mechanism when it comes to saving state relevant to
++ * them.
+ */
+
-+#include <net/tcp.h>
-+#include <net/udp.h>
-+#include <linux/delay.h>
-+#include <linux/random.h>
-+#include <linux/reboot.h>
-+#include <linux/module.h>
-+#include <linux/dump.h>
-+#include <linux/dump_netdev.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/highmem.h>
++#include <linux/dumpdev.h>
++#include <asm/page.h> /* get_order */
++
++#define MAX_PASSES 6
++#define MAX_DEVS 4
++
++
++/* To customise selection of pages to be dumped in a given pass/group */
++struct dump_data_filter{
++ char name[32];
++ int (*selector)(int, unsigned long, unsigned long);
++ ulong level_mask; /* dump level(s) for which this filter applies */
++ loff_t start[MAX_NUMNODES], end[MAX_NUMNODES]; /* location range applicable */
++ ulong num_mbanks; /* Number of memory banks. Greater than one for discontig memory (NUMA) */
++};
++
++
++/*
++ * Determined by the kind of dump mechanism and appropriate
++ * overall scheme
++ */
++struct dump_scheme_ops {
++ /* sets aside memory, inits data structures etc */
++ int (*configure)(unsigned long devid);
++ /* releases resources */
++ int (*unconfigure)(void);
++
++ /* ordering of passes, invoking iterator */
++ int (*sequencer)(void);
++ /* iterates over system data, selects and acts on data to dump */
++ int (*iterator)(int, int (*)(unsigned long, unsigned long),
++ struct dump_data_filter *);
++ /* action when data is selected for dump */
++ int (*save_data)(unsigned long, unsigned long);
++ /* action when data is to be excluded from dump */
++ int (*skip_data)(unsigned long, unsigned long);
++ /* policies for space, multiple dump devices etc */
++ int (*write_buffer)(void *, unsigned long);
++};
++
++struct dump_scheme {
++ /* the name serves as an anchor to locate the scheme after reboot */
++ char name[32];
++ struct dump_scheme_ops *ops;
++ struct list_head list;
++};
+
-+#include <asm/unaligned.h>
++/* Quiescing/Silence levels (controls IPI callback behaviour) */
++extern enum dump_silence_levels {
++ DUMP_SOFT_SPIN_CPUS = 1,
++ DUMP_HARD_SPIN_CPUS = 2,
++ DUMP_HALT_CPUS = 3,
++} dump_silence_level;
+
-+static int startup_handshake;
-+static int page_counter;
-+static unsigned long flags_global;
-+static int netdump_in_progress;
++/* determined by the dump (file) format */
++struct dump_fmt_ops {
++ /* build header */
++ int (*configure_header)(const char *, const struct pt_regs *);
++ int (*update_header)(void); /* update header and write it out */
++ /* save curr context */
++ void (*save_context)(int, const struct pt_regs *,
++ struct task_struct *);
++ /* typically called by the save_data action */
++ /* add formatted data to the dump buffer */
++ int (*add_data)(unsigned long, unsigned long);
++ int (*update_end_marker)(void);
++};
+
-+/*
-+ * security depends on the trusted path between the netconsole
-+ * server and netconsole client, since none of the packets are
-+ * encrypted. The random magic number protects the protocol
-+ * against spoofing.
-+ */
-+static u64 dump_magic;
++struct dump_fmt {
++ unsigned long magic;
++ char name[32]; /* lcrash, crash, elf-core etc */
++ struct dump_fmt_ops *ops;
++ struct list_head list;
++};
+
-+/*
-+ * We maintain a small pool of fully-sized skbs,
-+ * to make sure the message gets out even in
-+ * extreme OOM situations.
++/*
++ * Modules will be able add their own data capture schemes by
++ * registering their own dumpers. Typically they would use the
++ * primary dumper as a template and tune it with their routines.
++ * Still Todo.
+ */
+
-+static void rx_hook(struct netpoll *np, int port, char *msg, int size);
-+int new_req = 0;
-+static req_t req;
-+
-+static void rx_hook(struct netpoll *np, int port, char *msg, int size)
-+{
-+ req_t * __req = (req_t *) msg;
-+ /*
-+ * First check if were are dumping or doing startup handshake, if
-+ * not quickly return.
-+ */
++/* The combined dumper profile (mechanism, scheme, dev, fmt) */
++struct dumper {
++ char name[32]; /* singlestage, overlay (stg1), passthru(stg2), pull */
++ struct dump_scheme *scheme;
++ struct dump_fmt *fmt;
++ struct __dump_compress *compress;
++ struct dump_data_filter *filter;
++ struct dump_dev *dev;
++ /* state valid only for active dumper(s) - per instance */
++ /* run time state/context */
++ int curr_pass;
++ unsigned long count;
++ loff_t curr_offset; /* current logical offset into dump device */
++ loff_t curr_loc; /* current memory location */
++ void *curr_buf; /* current position in the dump buffer */
++ void *dump_buf; /* starting addr of dump buffer */
++ int header_dirty; /* whether the header needs to be written out */
++ int header_len;
++ struct list_head dumper_list; /* links to other dumpers */
++};
+
-+ if (!netdump_in_progress)
-+ return ;
++/* Starting point to get to the current configured state */
++struct dump_config {
++ ulong level;
++ ulong flags;
++ struct dumper *dumper;
++ unsigned long dump_device;
++ unsigned long dump_addr; /* relevant only for in-memory dumps */
++ struct list_head dump_dev_list;
++};
+
-+ if ((ntohl(__req->command) != COMM_GET_MAGIC) &&
-+ (ntohl(__req->command) != COMM_HELLO) &&
-+ (ntohl(__req->command) != COMM_START_WRITE_NETDUMP_ACK) &&
-+ (ntohl(__req->command) != COMM_START_NETDUMP_ACK) &&
-+ (memcmp(&__req->magic, &dump_magic, sizeof(dump_magic)) != 0))
-+ goto out;
++extern struct dump_config dump_config;
+
-+ req.magic = ntohl(__req->magic);
-+ req.command = ntohl(__req->command);
-+ req.from = ntohl(__req->from);
-+ req.to = ntohl(__req->to);
-+ req.nr = ntohl(__req->nr);
-+ new_req = 1;
-+out:
-+ return ;
-+}
-+static char netdump_membuf[1024 + HEADER_LEN + 1];
-+/*
-+ * Fill the netdump_membuf with the header information from reply_t structure
-+ * and send it down to netpoll_send_udp() routine.
++/* Used to save the dump config across a reboot for 2-stage dumps:
++ *
++ * Note: The scheme, format, compression and device type should be
++ * registered at bootup, for this config to be sharable across soft-boot.
++ * The function addresses could have changed and become invalid, and
++ * need to be set up again.
+ */
-+static void
-+netdump_send_packet(struct netpoll *np, reply_t *reply, size_t data_len) {
-+ char *b;
++struct dump_config_block {
++ u64 magic; /* for a quick sanity check after reboot */
++ struct dump_memdev memdev; /* handle to dump stored in memory */
++ struct dump_config config;
++ struct dumper dumper;
++ struct dump_scheme scheme;
++ struct dump_fmt fmt;
++ struct __dump_compress compress;
++ struct dump_data_filter filter_table[MAX_PASSES];
++ struct dump_anydev dev[MAX_DEVS]; /* target dump device */
++};
+
-+ b = &netdump_membuf[1];
-+ netdump_membuf[0] = NETCONSOLE_VERSION;
-+ put_unaligned(htonl(reply->nr), (u32 *) b);
-+ put_unaligned(htonl(reply->code), (u32 *) (b + sizeof(reply->code)));
-+ put_unaligned(htonl(reply->info), (u32 *) (b + sizeof(reply->code) +
-+ sizeof(reply->info)));
-+ netpoll_send_udp(np, netdump_membuf, data_len + HEADER_LEN);
-+}
+
-+static void
-+dump_send_mem(struct netpoll *np, req_t *req, const char* buff, size_t len)
-+{
-+ int i;
++/* Wrappers that invoke the methods for the current (active) dumper */
+
-+ int nr_chunks = len/1024;
-+ reply_t reply;
++/* Scheme operations */
+
-+ reply.nr = req->nr;
-+ reply.code = REPLY_MEM;
-+ if ( nr_chunks <= 0)
-+ nr_chunks = 1;
-+ for (i = 0; i < nr_chunks; i++) {
-+ unsigned int offset = i*1024;
-+ reply.info = offset;
-+ memcpy((netdump_membuf + HEADER_LEN), (buff + offset), 1024);
-+ netdump_send_packet(np, &reply, 1024);
-+ }
++static inline int dump_sequencer(void)
++{
++ return dump_config.dumper->scheme->ops->sequencer();
+}
+
-+/*
-+ * This function waits for the client to acknowledge the receipt
-+ * of the netdump startup reply, with the possibility of packets
-+ * getting lost. We resend the startup packet if no ACK is received,
-+ * after a 1 second delay.
-+ *
-+ * (The client can test the success of the handshake via the HELLO
-+ * command, and send ACKs until we enter netdump mode.)
-+ */
-+static int
-+dump_handshake(struct dump_dev *net_dev)
++static inline int dump_iterator(int pass, int (*action)(unsigned long,
++ unsigned long), struct dump_data_filter *filter)
+{
-+ reply_t reply;
-+ int i, j;
-+ size_t str_len;
-+
-+ if (startup_handshake) {
-+ sprintf((netdump_membuf + HEADER_LEN),
-+ "NETDUMP start, waiting for start-ACK.\n");
-+ reply.code = REPLY_START_NETDUMP;
-+ reply.nr = 0;
-+ reply.info = 0;
-+ } else {
-+ sprintf((netdump_membuf + HEADER_LEN),
-+ "NETDUMP start, waiting for start-ACK.\n");
-+ reply.code = REPLY_START_WRITE_NETDUMP;
-+ reply.nr = net_dev->curr_offset;
-+ reply.info = net_dev->curr_offset;
-+ }
-+ str_len = strlen(netdump_membuf + HEADER_LEN);
-+
-+ /* send 300 handshake packets before declaring failure */
-+ for (i = 0; i < 300; i++) {
-+ netdump_send_packet(&net_dev->np, &reply, str_len);
++ return dump_config.dumper->scheme->ops->iterator(pass, action, filter);
++}
+
-+ /* wait 1 sec */
-+ for (j = 0; j < 10000; j++) {
-+ udelay(100);
-+ netpoll_poll(&net_dev->np);
-+ if (new_req)
-+ break;
-+ }
++#define dump_save_data dump_config.dumper->scheme->ops->save_data
++#define dump_skip_data dump_config.dumper->scheme->ops->skip_data
+
-+ /*
-+ * if there is no new request, try sending the handshaking
-+ * packet again
-+ */
-+ if (!new_req)
-+ continue;
++static inline int dump_write_buffer(void *buf, unsigned long len)
++{
++ return dump_config.dumper->scheme->ops->write_buffer(buf, len);
++}
+
-+ /*
-+ * check if the new request is of the expected type,
-+ * if so, return, else try sending the handshaking
-+ * packet again
-+ */
-+ if (startup_handshake) {
-+ if (req.command == COMM_HELLO || req.command ==
-+ COMM_START_NETDUMP_ACK) {
-+ return 0;
-+ } else {
-+ new_req = 0;
-+ continue;
-+ }
-+ } else {
-+ if (req.command == COMM_SEND_MEM) {
-+ return 0;
-+ } else {
-+ new_req = 0;
-+ continue;
-+ }
-+ }
-+ }
-+ return -1;
++static inline int dump_configure(unsigned long devid)
++{
++ return dump_config.dumper->scheme->ops->configure(devid);
+}
+
-+static ssize_t
-+do_netdump(struct dump_dev *net_dev, const char* buff, size_t len)
++static inline int dump_unconfigure(void)
+{
-+ reply_t reply;
-+ ssize_t ret = 0;
-+ int repeatCounter, counter, total_loop;
-+ size_t str_len;
-+
-+ netdump_in_progress = 1;
++ return dump_config.dumper->scheme->ops->unconfigure();
++}
+
-+ if (dump_handshake(net_dev) < 0) {
-+ printk("network dump failed due to handshake failure\n");
-+ goto out;
-+ }
++/* Format operations */
+
-+ /*
-+ * Ideally startup handshake should be done during dump configuration,
-+ * i.e., in dump_net_open(). This will be done when I figure out
-+ * the dependency between startup handshake, subsequent write and
-+ * various commands wrt to net-server.
-+ */
-+ if (startup_handshake)
-+ startup_handshake = 0;
++static inline int dump_configure_header(const char *panic_str,
++ const struct pt_regs *regs)
++{
++ return dump_config.dumper->fmt->ops->configure_header(panic_str, regs);
++}
+
-+ counter = 0;
-+ repeatCounter = 0;
-+ total_loop = 0;
-+ while (1) {
-+ if (!new_req) {
-+ netpoll_poll(&net_dev->np);
-+ }
-+ if (!new_req) {
-+ repeatCounter++;
++static inline void dump_save_context(int cpu, const struct pt_regs *regs,
++ struct task_struct *tsk)
++{
++ dump_config.dumper->fmt->ops->save_context(cpu, regs, tsk);
++}
+
-+ if (repeatCounter > 5) {
-+ counter++;
-+ if (counter > 10000) {
-+ if (total_loop >= 100000) {
-+ printk("Time OUT LEAVE NOW\n");
-+ goto out;
-+ } else {
-+ total_loop++;
-+ printk("Try number %d out of "
-+ "10 before Time Out\n",
-+ total_loop);
-+ }
-+ }
-+ mdelay(1);
-+ repeatCounter = 0;
-+ }
-+ continue;
-+ }
-+ repeatCounter = 0;
-+ counter = 0;
-+ total_loop = 0;
-+ new_req = 0;
-+ switch (req.command) {
-+ case COMM_NONE:
-+ break;
++static inline int dump_save_this_cpu(const struct pt_regs *regs)
++{
++ int cpu = smp_processor_id();
+
-+ case COMM_SEND_MEM:
-+ dump_send_mem(&net_dev->np, &req, buff, len);
-+ break;
++ dump_save_context(cpu, regs, current);
++ return 1;
++}
+
-+ case COMM_EXIT:
-+ case COMM_START_WRITE_NETDUMP_ACK:
-+ ret = len;
-+ goto out;
++static inline int dump_update_header(void)
++{
++ return dump_config.dumper->fmt->ops->update_header();
++}
+
-+ case COMM_HELLO:
-+ sprintf((netdump_membuf + HEADER_LEN),
-+ "Hello, this is netdump version " "0.%02d\n",
-+ NETCONSOLE_VERSION);
-+ str_len = strlen(netdump_membuf + HEADER_LEN);
-+ reply.code = REPLY_HELLO;
-+ reply.nr = req.nr;
-+ reply.info = net_dev->curr_offset;
-+ netdump_send_packet(&net_dev->np, &reply, str_len);
-+ break;
++static inline int dump_update_end_marker(void)
++{
++ return dump_config.dumper->fmt->ops->update_end_marker();
++}
+
-+ case COMM_GET_PAGE_SIZE:
-+ sprintf((netdump_membuf + HEADER_LEN),
-+ "PAGE_SIZE: %ld\n", PAGE_SIZE);
-+ str_len = strlen(netdump_membuf + HEADER_LEN);
-+ reply.code = REPLY_PAGE_SIZE;
-+ reply.nr = req.nr;
-+ reply.info = PAGE_SIZE;
-+ netdump_send_packet(&net_dev->np, &reply, str_len);
-+ break;
++static inline int dump_add_data(unsigned long loc, unsigned long sz)
++{
++ return dump_config.dumper->fmt->ops->add_data(loc, sz);
++}
+
-+ case COMM_GET_NR_PAGES:
-+ reply.code = REPLY_NR_PAGES;
-+ reply.nr = req.nr;
-+ reply.info = num_physpages;
-+ reply.info = page_counter;
-+ sprintf((netdump_membuf + HEADER_LEN),
-+ "Number of pages: %ld\n", num_physpages);
-+ str_len = strlen(netdump_membuf + HEADER_LEN);
-+ netdump_send_packet(&net_dev->np, &reply, str_len);
-+ break;
++/* Compression operation */
++static inline int dump_compress_data(char *src, int slen, char *dst,
++ unsigned long loc)
++{
++ return dump_config.dumper->compress->compress_func(src, slen,
++ dst, DUMP_DPC_PAGE_SIZE, loc);
++}
+
-+ case COMM_GET_MAGIC:
-+ reply.code = REPLY_MAGIC;
-+ reply.nr = req.nr;
-+ reply.info = NETCONSOLE_VERSION;
-+ sprintf((netdump_membuf + HEADER_LEN),
-+ (char *)&dump_magic, sizeof(dump_magic));
-+ str_len = strlen(netdump_membuf + HEADER_LEN);
-+ netdump_send_packet(&net_dev->np, &reply, str_len);
-+ break;
+
-+ default:
-+ reply.code = REPLY_ERROR;
-+ reply.nr = req.nr;
-+ reply.info = req.command;
-+ sprintf((netdump_membuf + HEADER_LEN),
-+ "Got unknown command code %d!\n", req.command);
-+ str_len = strlen(netdump_membuf + HEADER_LEN);
-+ netdump_send_packet(&net_dev->np, &reply, str_len);
-+ break;
-+ }
-+ }
-+out:
-+ netdump_in_progress = 0;
-+ return ret;
-+}
++/* Prototypes of some default implementations of dump methods */
+
-+static int
-+dump_validate_config(struct netpoll *np)
-+{
-+ if (!np->local_ip) {
-+ printk("network device %s has no local address, "
-+ "aborting.\n", np->name);
-+ return -1;
-+ }
++extern struct __dump_compress dump_none_compression;
+
-+#define IP(x) ((unsigned char *)&np->local_ip)[x]
-+ printk("Source %d.%d.%d.%d", IP(0), IP(1), IP(2), IP(3));
-+#undef IP
++/* Default scheme methods (dump_scheme.c) */
+
-+ if (!np->local_port) {
-+ printk("source_port parameter not specified, aborting.\n");
-+ return -1;
-+ }
++extern int dump_generic_sequencer(void);
++extern int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned
++ long), struct dump_data_filter *filter);
++extern int dump_generic_save_data(unsigned long loc, unsigned long sz);
++extern int dump_generic_skip_data(unsigned long loc, unsigned long sz);
++extern int dump_generic_write_buffer(void *buf, unsigned long len);
++extern int dump_generic_configure(unsigned long);
++extern int dump_generic_unconfigure(void);
++#ifdef CONFIG_DISCONTIGMEM
++extern void dump_reconfigure_mbanks(void);
++#endif
+
-+ if (!np->remote_ip) {
-+ printk("target_ip parameter not specified, aborting.\n");
-+ return -1;
-+ }
++/* Default scheme template */
++extern struct dump_scheme dump_scheme_singlestage;
+
-+ np->remote_ip = ntohl(np->remote_ip);
-+#define IP(x) ((unsigned char *)&np->remote_ip)[x]
-+ printk("Target %d.%d.%d.%d", IP(0), IP(1), IP(2), IP(3));
-+#undef IP
++/* Default dump format methods */
+
-+ if (!np->remote_port) {
-+ printk("target_port parameter not specified, aborting.\n");
-+ return -1;
-+ }
-+ printk("Target Ethernet Address %02x:%02x:%02x:%02x:%02x:%02x",
-+ np->remote_mac[0], np->remote_mac[1], np->remote_mac[2],
-+ np->remote_mac[3], np->remote_mac[4], np->remote_mac[5]);
++extern int dump_lcrash_configure_header(const char *panic_str,
++ const struct pt_regs *regs);
++extern void dump_lcrash_save_context(int cpu, const struct pt_regs *regs,
++ struct task_struct *tsk);
++extern int dump_generic_update_header(void);
++extern int dump_lcrash_add_data(unsigned long loc, unsigned long sz);
++extern int dump_lcrash_update_end_marker(void);
+
-+ if ((np->remote_mac[0] & np->remote_mac[1] & np->remote_mac[2] &
-+ np->remote_mac[3] & np->remote_mac[4] & np->remote_mac[5]) == 255)
-+ printk("(Broadcast)");
-+ printk("\n");
-+ return 0;
-+}
++/* Default format (lcrash) template */
++extern struct dump_fmt dump_fmt_lcrash;
+
-+/*
-+ * Prepares the dump device so we can take a dump later.
-+ * Validates the netdump configuration parameters.
-+ *
-+ * TODO: Network connectivity check should be done here.
-+ */
-+static int
-+dump_net_open(struct dump_dev *net_dev, unsigned long arg)
-+{
-+ int retval = 0;
++/* Default dump selection filter table */
+
-+ /* get the interface name */
-+ if (copy_from_user(net_dev->np.dev_name, (void *)arg, IFNAMSIZ))
-+ return -EFAULT;
-+ net_dev->np.rx_hook = rx_hook;
-+ retval = netpoll_setup(&net_dev->np);
++/*
++ * Entries listed in order of importance and correspond to passes
++ * The last entry (with a level_mask of zero) typically reflects data that
++ * won't be dumped -- this may for example be used to identify data
++ * that will be skipped for certain so the corresponding memory areas can be
++ * utilized as scratch space.
++ */
++extern struct dump_data_filter dump_filter_table[];
+
-+ dump_validate_config(&net_dev->np);
-+ net_dev->curr_offset = 0;
-+ printk("Network device %s successfully configured for dumping\n",
-+ net_dev->np.dev_name);
-+ return retval;
-+}
++/* Some pre-defined dumpers */
++extern struct dumper dumper_singlestage;
++extern struct dumper dumper_stage1;
++extern struct dumper dumper_stage2;
++
++/* These are temporary */
++#define DUMP_MASK_HEADER DUMP_LEVEL_HEADER
++#define DUMP_MASK_KERN DUMP_LEVEL_KERN
++#define DUMP_MASK_USED DUMP_LEVEL_USED
++#define DUMP_MASK_UNUSED DUMP_LEVEL_ALL_RAM
++#define DUMP_MASK_REST 0 /* dummy for now */
+
-+/*
-+ * Close the dump device and release associated resources
-+ * Invoked when unconfiguring the dump device.
-+ */
-+static int
-+dump_net_release(struct dump_dev *net_dev)
++/* Helpers - move these to dump.h later ? */
++
++int dump_generic_execute(const char *panic_str, const struct pt_regs *regs);
++extern int dump_ll_write(void *buf, unsigned long len);
++int dump_check_and_free_page(struct dump_memdev *dev, struct page *page);
++
++static inline void dumper_reset(void)
+{
-+ netpoll_cleanup(&net_dev->np);
-+ return 0;
++ dump_config.dumper->curr_buf = dump_config.dumper->dump_buf;
++ dump_config.dumper->curr_loc = 0;
++ dump_config.dumper->curr_offset = 0;
++ dump_config.dumper->count = 0;
++ dump_config.dumper->curr_pass = 0;
+}
+
-+/*
-+ * Prepare the dump device for use (silence any ongoing activity
-+ * and quiesce state) when the system crashes.
++/*
++ * May later be moulded to perform boot-time allocations so we can dump
++ * earlier during bootup
+ */
-+static int
-+dump_net_silence(struct dump_dev *net_dev)
++static inline void *dump_alloc_mem(unsigned long size)
+{
-+ netpoll_set_trap(1);
-+ local_irq_save(flags_global);
-+ startup_handshake = 1;
-+ net_dev->curr_offset = 0;
-+ printk("Dumping to network device %s on CPU %d ...\n", net_dev->np.name,
-+ smp_processor_id());
-+ return 0;
++ return (void *) __get_free_pages(GFP_KERNEL, get_order(size));
+}
+
-+/*
-+ * Invoked when dumping is done. This is the time to put things back
-+ * (i.e. undo the effects of dump_block_silence) so the device is
-+ * available for normal use.
-+ */
-+static int
-+dump_net_resume(struct dump_dev *net_dev)
++static inline void dump_free_mem(void *buf)
+{
-+ int indx;
-+ size_t str_len;
-+ reply_t reply;
++ struct page *page;
+
-+ sprintf((netdump_membuf + HEADER_LEN), "NETDUMP end.\n");
-+ str_len = strlen(netdump_membuf + HEADER_LEN);
-+ for( indx = 0; indx < 6; indx++) {
-+ reply.code = REPLY_END_NETDUMP;
-+ reply.nr = 0;
-+ reply.info = 0;
-+ netdump_send_packet(&net_dev->np, &reply, str_len);
++ /* ignore reserved pages (e.g. post soft boot stage) */
++ if (buf && (page = virt_to_page(buf))) {
++ if (PageReserved(page))
++ return;
+ }
-+ printk("NETDUMP END!\n");
-+ local_irq_restore(flags_global);
-+ netpoll_set_trap(0);
-+ startup_handshake = 0;
-+ return 0;
++ /*
++ * Allocated using __get_free_pages().
++ */
++ free_pages((unsigned long)buf,
++ get_order(DUMP_BUFFER_SIZE + 3 * DUMP_PAGE_SIZE));
+}
+
-+/*
-+ * Seek to the specified offset in the dump device.
-+ * Makes sure this is a valid offset, otherwise returns an error.
-+ */
-+static int
-+dump_net_seek(struct dump_dev *net_dev, loff_t off)
-+{
-+ net_dev->curr_offset = off;
-+ return 0;
-+}
+
++#endif /* _LINUX_DUMP_METHODS_H */
+Index: linux-2.6.10/drivers/dump/dump_gzip.c
+===================================================================
+--- linux-2.6.10.orig/drivers/dump/dump_gzip.c 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/drivers/dump/dump_gzip.c 2005-04-07 18:13:56.917751032 +0800
+@@ -0,0 +1,174 @@
+/*
++ * GZIP Compression functions for kernel crash dumps.
++ *
++ * Created by: Matt Robinson (yakker@sourceforge.net)
++ * Copyright 2001 Matt D. Robinson. All rights reserved.
+ *
++ * This code is released under version 2 of the GNU GPL.
+ */
-+static int
-+dump_net_write(struct dump_dev *net_dev, void *buf, unsigned long len)
-+{
-+ int cnt, i, off;
-+ ssize_t ret;
+
-+ cnt = len/ PAGE_SIZE;
++/* header files */
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/dump.h>
++#include <linux/zlib.h>
++#include <linux/vmalloc.h>
+
-+ for (i = 0; i < cnt; i++) {
-+ off = i* PAGE_SIZE;
-+ ret = do_netdump(net_dev, buf+off, PAGE_SIZE);
-+ if (ret <= 0)
-+ return -1;
-+ net_dev->curr_offset = net_dev->curr_offset + PAGE_SIZE;
-+ }
-+ return len;
-+}
++static void *deflate_workspace;
++static unsigned long workspace_paddr[2];
++
++static u8 *safety_buffer;
+
+/*
-+ * check if the last dump i/o is over and ready for next request
++ * Name: dump_compress_gzip()
++ * Func: Compress a DUMP_PAGE_SIZE page using gzip-style algorithms (the.
++ * deflate functions similar to what's used in PPP).
+ */
-+static int
-+dump_net_ready(struct dump_dev *net_dev, void *buf)
++static u32
++dump_compress_gzip(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
++ unsigned long loc)
+{
-+ return 0;
++ /* error code and dump stream */
++ int err;
++ z_stream dump_stream;
++ struct page *pg = (struct page *)loc;
++ unsigned long paddr = page_to_pfn(pg) << PAGE_SHIFT;
++ static int warning = 0;
++
++ dump_stream.workspace = deflate_workspace;
++ if ((paddr == workspace_paddr[0]) || (paddr == workspace_paddr[1])) {
++ /*
++ * This page belongs to deflate_workspace used as temporary
++ * buffer for compression. Hence, dump them without compression.
++ */
++ return(0);
++ }
++ if ((err = zlib_deflateInit(&dump_stream, Z_BEST_COMPRESSION)) != Z_OK) {
++ /* fall back to RLE compression */
++ printk("dump_compress_gzip(): zlib_deflateInit() "
++ "failed (%d)!\n", err);
++ return 0;
++ }
++
++ /* copy the old page to the safety buffer */
++ if (oldsize <= DUMP_PAGE_SIZE) {
++ memcpy(safety_buffer, old, oldsize);
++ dump_stream.next_in = (u8 *) safety_buffer;
++ } else {
++ if (!warning) {
++ printk("dump_compress_gzip oversize input: %d\n",
++ oldsize);
++ warning++;
++ }
++ dump_stream.next_in = (u8 *) old;
++ }
++
++ /* use old (page of memory) and size (DUMP_PAGE_SIZE) as in-streams */
++ dump_stream.avail_in = oldsize;
++
++ /* out streams are new (dpcpage) and new size (DUMP_DPC_PAGE_SIZE) */
++ dump_stream.next_out = new;
++ dump_stream.avail_out = newsize;
++
++ /* deflate the page -- check for error */
++ err = zlib_deflate(&dump_stream, Z_FINISH);
++ if (err != Z_STREAM_END) {
++ /* zero is return code here */
++ (void)zlib_deflateEnd(&dump_stream);
++ printk("dump_compress_gzip(): zlib_deflate() failed (%d)!\n",
++ err);
++ return 0;
++ }
++
++ /* let's end the deflated compression stream */
++ if ((err = zlib_deflateEnd(&dump_stream)) != Z_OK) {
++ printk("dump_compress_gzip(): zlib_deflateEnd() "
++ "failed (%d)!\n", err);
++ }
++
++ /* return the compressed byte total (if it's smaller) */
++ if (dump_stream.total_out >= oldsize) {
++ return oldsize;
++ }
++ return dump_stream.total_out;
+}
+
++/* setup the gzip compression functionality */
++static struct __dump_compress dump_gzip_compression = {
++ .compress_type = DUMP_COMPRESS_GZIP,
++ .compress_func = dump_compress_gzip,
++ .compress_name = "GZIP",
++};
++
+/*
-+ * ioctl function used for configuring network dump
++ * Name: dump_compress_gzip_init()
++ * Func: Initialize gzip as a compression mechanism.
+ */
-+static int
-+dump_net_ioctl(struct dump_dev *net_dev, unsigned int cmd, unsigned long arg)
++static int __init
++dump_compress_gzip_init(void)
+{
-+ switch (cmd) {
-+ case DIOSTARGETIP:
-+ net_dev->np.remote_ip= arg;
-+ break;
-+ case DIOSTARGETPORT:
-+ net_dev->np.remote_port = (u16)arg;
-+ break;
-+ case DIOSSOURCEPORT:
-+ net_dev->np.local_port = (u16)arg;
-+ break;
-+ case DIOSETHADDR:
-+ return copy_from_user(net_dev->np.remote_mac, (void *)arg, 6);
-+ break;
-+ case DIOGTARGETIP:
-+ case DIOGTARGETPORT:
-+ case DIOGSOURCEPORT:
-+ case DIOGETHADDR:
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+ return 0;
-+}
++ struct page *pg;
+
-+struct dump_dev_ops dump_netdev_ops = {
-+ .open = dump_net_open,
-+ .release = dump_net_release,
-+ .silence = dump_net_silence,
-+ .resume = dump_net_resume,
-+ .seek = dump_net_seek,
-+ .write = dump_net_write,
-+ /* .read not implemented */
-+ .ready = dump_net_ready,
-+ .ioctl = dump_net_ioctl
-+};
++ deflate_workspace = vmalloc(zlib_deflate_workspacesize());
++ if (!deflate_workspace) {
++ printk("dump_compress_gzip_init(): Failed to "
++ "alloc %d bytes for deflate workspace\n",
++ zlib_deflate_workspacesize());
++ return -ENOMEM;
++ }
++ /*
++ * Need to find pages (workspace) that are used for compression.
++ * Even though zlib_deflate_workspacesize() is 64 pages (approximately)
++ * depends on the arch, we used only 2 pages. Hence, get the physical
++ * addresses for these 2 pages and used them to not to compress those
++ * pages.
++ */
++ pg = vmalloc_to_page(deflate_workspace);
++ workspace_paddr[0] = page_to_pfn(pg) << PAGE_SHIFT;
++ pg = vmalloc_to_page(deflate_workspace + DUMP_PAGE_SIZE);
++ workspace_paddr[1] = page_to_pfn(pg) << PAGE_SHIFT;
+
-+static struct dump_dev default_dump_netdev = {
-+ .type_name = "networkdev",
-+ .ops = &dump_netdev_ops,
-+ .curr_offset = 0,
-+ .np.name = "netdump",
-+ .np.dev_name = "eth0",
-+ .np.rx_hook = rx_hook,
-+ .np.local_port = 6688,
-+ .np.remote_port = 6688,
-+ .np.remote_mac = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
-+};
++ /* Eliminate the possibility of real data getting a compression
++ * failure.
++ */
+
-+static int __init
-+dump_netdev_init(void)
-+{
-+ default_dump_netdev.curr_offset = 0;
++ if (!(safety_buffer = (void *)__get_free_pages(GFP_KERNEL,
++ get_order(DUMP_PAGE_SIZE))))
++ return -ENOMEM;
+
-+ if (dump_register_device(&default_dump_netdev) < 0) {
-+ printk("network dump device driver registration failed\n");
-+ return -1;
-+ }
-+ printk("network device driver for LKCD registered\n");
-+
-+ get_random_bytes(&dump_magic, sizeof(dump_magic));
++ printk("dump gzip safety buffer: %p, %d\n", safety_buffer,
++ (int)DUMP_PAGE_SIZE);
++
++ dump_register_compression(&dump_gzip_compression);
+ return 0;
+}
+
++/*
++ * Name: dump_compress_gzip_cleanup()
++ * Func: Remove gzip as a compression mechanism.
++ */
+static void __exit
-+dump_netdev_cleanup(void)
++dump_compress_gzip_cleanup(void)
+{
-+ dump_unregister_device(&default_dump_netdev);
++ vfree(deflate_workspace);
++ if (safety_buffer) {
++ free_pages((unsigned long)safety_buffer,
++ get_order(DUMP_PAGE_SIZE));
++ safety_buffer = NULL;
++ }
++
++ dump_unregister_compression(DUMP_COMPRESS_GZIP);
+}
+
-+MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
-+MODULE_DESCRIPTION("Network Dump Driver for Linux Kernel Crash Dump (LKCD)");
-+MODULE_LICENSE("GPL");
++/* module initialization */
++module_init(dump_compress_gzip_init);
++module_exit(dump_compress_gzip_cleanup);
+
-+module_init(dump_netdev_init);
-+module_exit(dump_netdev_cleanup);
-Index: linux-2.6.10/drivers/dump/dump_x8664.c
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
++MODULE_DESCRIPTION("Gzip compression module for crash dump driver");
+Index: linux-2.6.10/drivers/dump/dump_ppc64.c
===================================================================
---- linux-2.6.10.orig/drivers/dump/dump_x8664.c 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/drivers/dump/dump_x8664.c 2005-04-05 16:47:53.932206776 +0800
-@@ -0,0 +1,362 @@
+--- linux-2.6.10.orig/drivers/dump/dump_ppc64.c 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/drivers/dump/dump_ppc64.c 2005-04-07 18:13:56.919750728 +0800
+@@ -0,0 +1,410 @@
+/*
-+ * Architecture specific (x86-64) functions for Linux crash dumps.
++ * Architecture specific (ppc64) functions for Linux crash dumps.
+ *
+ * Created by: Matt Robinson (yakker@sgi.com)
+ *
+ * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
-+ *
++ *
+ * 2.3 kernel modifications by: Matt D. Robinson (yakker@turbolinux.com)
+ * Copyright 2000 TurboLinux, Inc. All rights reserved.
-+ *
-+ * x86-64 port Copyright 2002 Andi Kleen, SuSE Labs
-+ * x86-64 port Sachin Sant ( sachinp@in.ibm.com )
++ * Copyright 2003, 2004 IBM Corporation
++ *
+ * This code is released under version 2 of the GNU GPL.
+ */
+
+ * file. Any time a modification is made to the virtual memory mechanism,
+ * these routines must be changed to use the new mechanisms.
+ */
-+#include <linux/init.h>
+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/smp.h>
+#include <linux/fs.h>
-+#include <linux/vmalloc.h>
+#include <linux/dump.h>
-+#include "dump_methods.h"
+#include <linux/mm.h>
-+#include <linux/rcupdate.h>
-+#include <asm/processor.h>
++#include <linux/vmalloc.h>
++#include <linux/delay.h>
++#include <linux/syscalls.h>
+#include <asm/hardirq.h>
-+#include <asm/kdebug.h>
++#include "dump_methods.h"
++#include <linux/irq.h>
++#include <asm/machdep.h>
+#include <asm/uaccess.h>
-+#include <asm/nmi.h>
-+#include <asm/kdebug.h>
++#include <asm/irq.h>
++#include <asm/page.h>
++#if defined(CONFIG_KDB) && !defined(CONFIG_DUMP_MODULE)
++#include <linux/kdb.h>
++#endif
+
-+static __s32 saved_irq_count; /* saved preempt_count() flag */
++extern cpumask_t irq_affinity[];
+
-+void (*dump_trace_ptr)(struct pt_regs *);
++static cpumask_t saved_affinity[NR_IRQS];
++
++static __s32 saved_irq_count; /* saved preempt_count() flags */
+
+static int alloc_dha_stack(void)
+{
-+ int i;
-+ void *ptr;
-+
-+ if (dump_header_asm.dha_stack[0])
-+ return 0;
++ int i;
++ void *ptr;
+
-+ ptr = vmalloc(THREAD_SIZE * num_online_cpus());
-+ if (!ptr) {
-+ printk("vmalloc for dha_stacks failed\n");
-+ return -ENOMEM;
-+ }
++ if (dump_header_asm.dha_stack[0])
++ return 0;
+
-+ for (i = 0; i < num_online_cpus(); i++) {
-+ dump_header_asm.dha_stack[i] =
++ ptr = (void *)vmalloc(THREAD_SIZE * num_possible_cpus());
++ if (!ptr) {
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < num_possible_cpus(); i++) {
++ dump_header_asm.dha_stack[i] =
+ (uint64_t)((unsigned long)ptr + (i * THREAD_SIZE));
+ }
+ return 0;
+}
+
-+static int free_dha_stack(void)
++static int free_dha_stack(void)
+{
-+ if (dump_header_asm.dha_stack[0]) {
-+ vfree((void *)dump_header_asm.dha_stack[0]);
++ if (dump_header_asm.dha_stack[0]) {
++ vfree((void*)dump_header_asm.dha_stack[0]);
+ dump_header_asm.dha_stack[0] = 0;
-+ }
-+ return 0;
++ }
++ return 0;
++}
++#ifdef CONFIG_SMP
++static int dump_expect_ipi[NR_CPUS];
++static atomic_t waiting_for_dump_ipi;
++
++extern void stop_this_cpu(void *);
++static int
++dump_ipi_handler(struct pt_regs *regs)
++{
++ int cpu = smp_processor_id();
++
++ if (!dump_expect_ipi[cpu])
++ return 0;
++ dump_save_this_cpu(regs);
++ atomic_dec(&waiting_for_dump_ipi);
++
++ level_changed:
++ switch (dump_silence_level) {
++ case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
++ while (dump_oncpu) {
++ barrier(); /* paranoia */
++ if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
++ goto level_changed;
++ cpu_relax(); /* kill time nicely */
++ }
++ break;
++
++ case DUMP_HALT_CPUS: /* Execute halt */
++ stop_this_cpu(NULL);
++ break;
++
++ case DUMP_SOFT_SPIN_CPUS:
++ /* Mark the task so it spins in schedule */
++ set_tsk_thread_flag(current, TIF_NEED_RESCHED);
++ break;
++ }
++
++ return 1;
++}
++
++/* save registers on other processors
++ * If the other cpus don't respond we simply do not get their states.
++ */
++void
++__dump_save_other_cpus(void)
++{
++ int i, cpu = smp_processor_id();
++ int other_cpus = num_online_cpus()-1;
++
++ if (other_cpus > 0) {
++ atomic_set(&waiting_for_dump_ipi, other_cpus);
++ for (i = 0; i < NR_CPUS; i++)
++ dump_expect_ipi[i] = (i != cpu && cpu_online(i));
++
++ printk(KERN_ALERT "sending IPI to other cpus...\n");
++ dump_send_ipi(dump_ipi_handler);
++ /*
++ * may be we dont need to wait for IPI to be processed.
++ * just write out the header at the end of dumping, if
++ * this IPI is not processed until then, there probably
++ * is a problem and we just fail to capture state of
++ * other cpus.
++ * However, we will wait 10 secs for other CPUs to respond.
++ * If not, proceed the dump process even though we failed
++ * to capture other CPU states.
++ */
++ i = 10000; /* wait max of 10 seconds */
++ while ((atomic_read(&waiting_for_dump_ipi) > 0) && (--i > 0)) {
++ barrier();
++ mdelay(1);
++ }
++ printk(KERN_ALERT "done waiting: %d cpus not responding\n",
++ atomic_read(&waiting_for_dump_ipi));
++ dump_send_ipi(NULL); /* clear handler */
++ }
++}
++
++/*
++ * Restore old irq affinities.
++ */
++static void
++__dump_reset_irq_affinity(void)
++{
++ int i;
++ irq_desc_t *irq_d;
++
++ memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
++
++ for_each_irq(i) {
++ irq_d = get_irq_desc(i);
++ if (irq_d->handler == NULL) {
++ continue;
++ }
++ if (irq_d->handler->set_affinity != NULL) {
++ irq_d->handler->set_affinity(i, saved_affinity[i]);
++ }
++ }
++}
++
++/*
++ * Routine to save the old irq affinities and change affinities of all irqs to
++ * the dumping cpu.
++ *
++ * NB: Need to be expanded to multiple nodes.
++ */
++static void
++__dump_set_irq_affinity(void)
++{
++ int i;
++ cpumask_t cpu = CPU_MASK_NONE;
++ irq_desc_t *irq_d;
++
++ cpu_set(smp_processor_id(), cpu);
++
++ memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
++
++ for_each_irq(i) {
++ irq_d = get_irq_desc(i);
++ if (irq_d->handler == NULL) {
++ continue;
++ }
++ irq_affinity[i] = cpu;
++ if (irq_d->handler->set_affinity != NULL) {
++ irq_d->handler->set_affinity(i, irq_affinity[i]);
++ }
++ }
+}
++#else /* !CONFIG_SMP */
++#define __dump_save_other_cpus() do { } while (0)
++#define __dump_set_irq_affinity() do { } while (0)
++#define __dump_reset_irq_affinity() do { } while (0)
++#endif /* !CONFIG_SMP */
+
+void
-+__dump_save_regs(struct pt_regs* dest_regs, const struct pt_regs* regs)
++__dump_save_regs(struct pt_regs *dest_regs, const struct pt_regs *regs)
+{
-+ if (regs)
++ if (regs) {
+ memcpy(dest_regs, regs, sizeof(struct pt_regs));
++ }
+}
+
+void
+ dump_header_asm.dha_stack_ptr[cpu] = (unsigned long)(tsk->thread_info);
+}
+
-+#ifdef CONFIG_SMP
-+extern cpumask_t irq_affinity[];
-+extern irq_desc_t irq_desc[];
-+extern void dump_send_ipi(void);
-+static int dump_expect_ipi[NR_CPUS];
-+static atomic_t waiting_for_dump_ipi;
-+static unsigned long saved_affinity[NR_IRQS];
-+
-+extern void stop_this_cpu(void *);
-+
-+static int
-+dump_nmi_callback(struct pt_regs *regs, int cpu)
++/*
++ * Name: __dump_configure_header()
++ * Func: Configure the dump header with all proper values.
++ */
++int
++__dump_configure_header(const struct pt_regs *regs)
+{
-+ if (!dump_expect_ipi[cpu]) {
-+ return 0;
-+ }
-+
-+ dump_expect_ipi[cpu] = 0;
-+
-+ dump_save_this_cpu(regs);
-+ atomic_dec(&waiting_for_dump_ipi);
-+
-+level_changed:
-+
-+ switch (dump_silence_level) {
-+ case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
-+ while (dump_oncpu) {
-+ barrier(); /* paranoia */
-+ if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
-+ goto level_changed;
-+
-+ cpu_relax(); /* kill time nicely */
-+ }
-+ break;
-+
-+ case DUMP_HALT_CPUS: /* Execute halt */
-+ stop_this_cpu(NULL);
-+ break;
-+
-+ case DUMP_SOFT_SPIN_CPUS:
-+ /* Mark the task so it spins in schedule */
-+ set_tsk_thread_flag(current, TIF_NEED_RESCHED);
-+ break;
-+ }
-+
-+ return 1;
++ return (0);
+}
+
-+/* save registers on other processors */
-+void
-+__dump_save_other_cpus(void)
++#if defined(CONFIG_KDB) && !defined(CONFIG_DUMP_MODULE)
++int
++kdb_sysdump(int argc, const char **argv, const char **envp, struct pt_regs *regs)
+{
-+ int i, cpu = smp_processor_id();
-+ int other_cpus = num_online_cpus() - 1;
-+
-+ if (other_cpus > 0) {
-+ atomic_set(&waiting_for_dump_ipi, other_cpus);
-+
-+ for (i = 0; i < NR_CPUS; i++)
-+ dump_expect_ipi[i] = (i != cpu && cpu_online(i));
-+
-+ set_nmi_callback(dump_nmi_callback);
-+ wmb();
-+
-+ dump_send_ipi();
++ kdb_printf("Dumping to disk...\n");
++ dump("dump from kdb", regs);
++ kdb_printf("Dump Complete\n");
++ return 0;
++}
++#endif
+
-+ /* may be we dont need to wait for NMI to be processed.
-+ just write out the header at the end of dumping, if
-+ this IPI is not processed untill then, there probably
-+ is a problem and we just fail to capture state of
-+ other cpus. */
-+ while(atomic_read(&waiting_for_dump_ipi) > 0)
-+ cpu_relax();
++/*
++ * Name: __dump_init()
++ * Func: Initialize the dumping routine process. This is in case
++ * it's necessary in the future.
++ */
++void
++__dump_init(uint64_t local_memory_start)
++{
++#if defined(FIXME) && defined(CONFIG_KDB) && !defined(CONFIG_DUMP_MODULE)
++ /* This won't currently work because interrupts are off in kdb
++ * and the dump process doesn't understand how to recover.
++ */
++ /* ToDo: add a command to query/set dump configuration */
++ kdb_register_repeat("sysdump", kdb_sysdump, "", "use lkcd to dump the system to disk (if configured)", 0, KDB_REPEAT_NONE);
++#endif
+
-+ unset_nmi_callback();
-+ }
++ /* return */
+ return;
+}
+
+/*
-+ * Routine to save the old irq affinities and change affinities of all irqs to
-+ * the dumping cpu.
++ * Name: __dump_open()
++ * Func: Open the dump device (architecture specific). This is in
++ * case it's necessary in the future.
+ */
-+static void
-+set_irq_affinity(void)
++void
++__dump_open(void)
+{
-+ int i;
-+ cpumask_t cpu = CPU_MASK_NONE;
-+
-+ cpu_set(smp_processor_id(), cpu);
-+ memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
-+ for (i = 0; i < NR_IRQS; i++) {
-+ if (irq_desc[i].handler == NULL)
-+ continue;
-+ irq_affinity[i] = cpu;
-+ if (irq_desc[i].handler->set_affinity != NULL)
-+ irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
-+ }
++ alloc_dha_stack();
+}
+
++
+/*
-+ * Restore old irq affinities.
++ * Name: __dump_cleanup()
++ * Func: Free any architecture specific data structures. This is called
++ * when the dump module is being removed.
+ */
-+static void
-+reset_irq_affinity(void)
++void
++__dump_cleanup(void)
+{
-+ int i;
-+
-+ memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
-+ for (i = 0; i < NR_IRQS; i++) {
-+ if (irq_desc[i].handler == NULL)
-+ continue;
-+ if (irq_desc[i].handler->set_affinity != NULL)
-+ irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
-+ }
++ free_dha_stack();
+}
+
-+#else /* !CONFIG_SMP */
-+#define set_irq_affinity() do { } while (0)
-+#define reset_irq_affinity() do { } while (0)
-+#define save_other_cpu_states() do { } while (0)
-+#endif /* !CONFIG_SMP */
-+
++/*
++ * Kludge - dump from interrupt context is unreliable (Fixme)
++ *
++ * We do this so that softirqs initiated for dump i/o
++ * get processed and we don't hang while waiting for i/o
++ * to complete or in any irq synchronization attempt.
++ *
++ * This is not quite legal of course, as it has the side
++ * effect of making all interrupts & softirqs triggered
++ * while dump is in progress complete before currently
++ * pending softirqs and the currently executing interrupt
++ * code.
++ */
+static inline void
+irq_bh_save(void)
+{
+/*
+ * Name: __dump_irq_enable
+ * Func: Reset system so interrupts are enabled.
-+ * This is used for dump methods that require interrupts
-+ * Eventually, all methods will have interrupts disabled
-+ * and this code can be removed.
++ * This is used for dump methods that require interrupts
++ * Eventually, all methods will have interrupts disabled
++ * and this code can be removed.
+ *
-+ * Change irq affinities
-+ * Re-enable interrupts
++ * Change irq affinities
++ * Re-enable interrupts
+ */
+int
+__dump_irq_enable(void)
+{
-+ set_irq_affinity();
-+ irq_bh_save();
-+ local_irq_enable();
++ __dump_set_irq_affinity();
++ irq_bh_save();
++ local_irq_enable();
+ return 0;
+}
+
+/*
+ * Name: __dump_irq_restore
-+ * Func: Resume the system state in an architecture-speeific way.
-+ *
++ * Func: Resume the system state in an architecture-specific way.
+ */
+void
+__dump_irq_restore(void)
+{
-+ local_irq_disable();
-+ reset_irq_affinity();
-+ irq_bh_restore();
-+}
-+
-+/*
-+ * Name: __dump_configure_header()
-+ * Func: Configure the dump header with all proper values.
-+ */
-+int
-+__dump_configure_header(const struct pt_regs *regs)
-+{
-+ /* Dummy function - return */
-+ return (0);
-+}
-+
-+static int notify(struct notifier_block *nb, unsigned long code, void *data)
-+{
-+ if (code == DIE_NMI_IPI && dump_oncpu)
-+ return NOTIFY_BAD;
-+ return NOTIFY_DONE;
-+}
-+
-+static struct notifier_block dump_notifier = {
-+ .notifier_call = notify,
-+};
-+
-+/*
-+ * Name: __dump_init()
-+ * Func: Initialize the dumping routine process.
-+ */
-+void
-+__dump_init(uint64_t local_memory_start)
-+{
-+ notifier_chain_register(&die_chain, &dump_notifier);
-+}
-+
-+/*
-+ * Name: __dump_open()
-+ * Func: Open the dump device (architecture specific). This is in
-+ * case it's necessary in the future.
-+ */
-+void
-+__dump_open(void)
-+{
-+ alloc_dha_stack();
-+ /* return */
-+ return;
++ local_irq_disable();
++ __dump_reset_irq_affinity();
++ irq_bh_restore();
+}
+
-+/*
-+ * Name: __dump_cleanup()
-+ * Func: Free any architecture specific data structures. This is called
-+ * when the dump module is being removed.
++#if 0
++/* Cheap progress hack. It estimates pages to write and
++ * assumes all pages will go -- so it may get way off.
++ * As the progress is not displayed for other architectures, not used at this
++ * moment.
+ */
+void
-+__dump_cleanup(void)
++__dump_progress_add_page(void)
+{
-+ free_dha_stack();
-+ notifier_chain_unregister(&die_chain, &dump_notifier);
-+ synchronize_kernel();
-+ return;
-+}
++ unsigned long total_pages = nr_free_pages() + nr_inactive_pages + nr_active_pages;
++ unsigned int percent = (dump_header.dh_num_dump_pages * 100) / total_pages;
++ char buf[30];
+
-+extern int page_is_ram(unsigned long);
++ if (percent > last_percent && percent <= 100) {
++ sprintf(buf, "Dump %3d%% ", percent);
++ ppc64_dump_msg(0x2, buf);
++ last_percent = percent;
++ }
++
++}
++#endif
+
++extern int dump_page_is_ram(unsigned long);
+/*
+ * Name: __dump_page_valid()
+ * Func: Check if page is valid to dump.
+ if (!pfn_valid(index))
+ return 0;
+
-+ return page_is_ram(index);
++ return dump_page_is_ram(index);
+}
+
+/*
+ * Func: Interface for the lkcd dump command. Calls dump_execute()
+ */
+int
-+manual_handle_crashdump(void) {
-+
-+ struct pt_regs regs;
++manual_handle_crashdump(void)
++{
++ struct pt_regs regs;
+
-+ get_current_regs(®s);
-+ dump_execute("manual", ®s);
-+ return 0;
++ get_current_regs(®s);
++ dump_execute("manual", ®s);
++ return 0;
+}
+
+/*
+{
+ return;
+}
-Index: linux-2.6.10/drivers/dump/dump_overlay.c
+Index: linux-2.6.10/drivers/dump/dump_i386.c
===================================================================
---- linux-2.6.10.orig/drivers/dump/dump_overlay.c 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/drivers/dump/dump_overlay.c 2005-04-05 16:47:53.934206472 +0800
-@@ -0,0 +1,890 @@
+--- linux-2.6.10.orig/drivers/dump/dump_i386.c 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/drivers/dump/dump_i386.c 2005-04-07 18:13:56.895754376 +0800
+@@ -0,0 +1,372 @@
+/*
-+ * Two-stage soft-boot based dump scheme methods (memory overlay
-+ * with post soft-boot writeout)
-+ *
-+ * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
-+ *
-+ * This approach of saving the dump in memory and writing it
-+ * out after a softboot without clearing memory is derived from the
-+ * Mission Critical Linux dump implementation. Credits and a big
-+ * thanks for letting the lkcd project make use of the excellent
-+ * piece of work and also for helping with clarifications and
-+ * tips along the way are due to:
-+ * Dave Winchell <winchell@mclx.com> (primary author of mcore)
-+ * and also to
-+ * Jeff Moyer <moyer@mclx.com>
-+ * Josh Huber <huber@mclx.com>
-+ *
-+ * For those familiar with the mcore implementation, the key
-+ * differences/extensions here are in allowing entire memory to be
-+ * saved (in compressed form) through a careful ordering scheme
-+ * on both the way down as well on the way up after boot, the latter
-+ * for supporting the LKCD notion of passes in which most critical
-+ * data is the first to be saved to the dump device. Also the post
-+ * boot writeout happens from within the kernel rather than driven
-+ * from userspace.
++ * Architecture specific (i386) functions for Linux crash dumps.
+ *
-+ * The sequence is orchestrated through the abstraction of "dumpers",
-+ * one for the first stage which then sets up the dumper for the next
-+ * stage, providing for a smooth and flexible reuse of the singlestage
-+ * dump scheme methods and a handle to pass dump device configuration
-+ * information across the soft boot.
++ * Created by: Matt Robinson (yakker@sgi.com)
+ *
-+ * Copyright (C) 2002 International Business Machines Corp.
++ * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
+ *
++ * 2.3 kernel modifications by: Matt D. Robinson (yakker@turbolinux.com)
++ * Copyright 2000 TurboLinux, Inc. All rights reserved.
++ *
+ * This code is released under version 2 of the GNU GPL.
+ */
+
+/*
-+ * Disruptive dumping using the second kernel soft-boot option
-+ * for issuing dump i/o operates in 2 stages:
-+ *
-+ * (1) - Saves the (compressed & formatted) dump in memory using a
-+ * carefully ordered overlay scheme designed to capture the
-+ * entire physical memory or selective portions depending on
-+ * dump config settings,
-+ * - Registers the stage 2 dumper and
-+ * - Issues a soft reboot w/o clearing memory.
-+ *
-+ * The overlay scheme starts with a small bootstrap free area
-+ * and follows a reverse ordering of passes wherein it
-+ * compresses and saves data starting with the least critical
-+ * areas first, thus freeing up the corresponding pages to
-+ * serve as destination for subsequent data to be saved, and
-+ * so on. With a good compression ratio, this makes it feasible
-+ * to capture an entire physical memory dump without significantly
-+ * reducing memory available during regular operation.
-+ *
-+ * (2) Post soft-reboot, runs through the saved memory dump and
-+ * writes it out to disk, this time around, taking care to
-+ * save the more critical data first (i.e. pages which figure
-+ * in early passes for a regular dump). Finally issues a
-+ * clean reboot.
-+ *
-+ * Since the data was saved in memory after selection/filtering
-+ * and formatted as per the chosen output dump format, at this
-+ * stage the filter and format actions are just dummy (or
-+ * passthrough) actions, except for influence on ordering of
-+ * passes.
++ * The hooks for dumping the kernel virtual memory to disk are in this
++ * file. Any time a modification is made to the virtual memory mechanism,
++ * these routines must be changed to use the new mechanisms.
+ */
-+
++#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
-+#include <linux/highmem.h>
-+#include <linux/bootmem.h>
++#include <linux/smp.h>
++#include <linux/fs.h>
++#include <linux/vmalloc.h>
++#include <linux/mm.h>
+#include <linux/dump.h>
-+#ifdef CONFIG_KEXEC
-+#include <linux/delay.h>
-+#include <linux/reboot.h>
-+#include <linux/kexec.h>
-+#endif
+#include "dump_methods.h"
++#include <linux/irq.h>
+
-+extern struct list_head dumper_list_head;
-+extern struct dump_memdev *dump_memdev;
-+extern struct dumper dumper_stage2;
-+struct dump_config_block *dump_saved_config = NULL;
-+extern struct dump_blockdev *dump_blockdev;
-+static struct dump_memdev *saved_dump_memdev = NULL;
-+static struct dumper *saved_dumper = NULL;
-+
-+#ifdef CONFIG_KEXEC
-+extern int panic_timeout;
-+#endif
-+
-+/* For testing
-+extern void dump_display_map(struct dump_memdev *);
-+*/
-+
-+struct dumper *dumper_by_name(char *name)
-+{
-+#ifdef LATER
-+ struct dumper *dumper;
-+ list_for_each_entry(dumper, &dumper_list_head, dumper_list)
-+ if (!strncmp(dumper->name, name, 32))
-+ return dumper;
-+
-+ /* not found */
-+ return NULL;
-+#endif
-+ /* Temporary proof of concept */
-+ if (!strncmp(dumper_stage2.name, name, 32))
-+ return &dumper_stage2;
-+ else
-+ return NULL;
-+}
++#include <asm/processor.h>
++#include <asm/e820.h>
++#include <asm/hardirq.h>
++#include <asm/nmi.h>
+
-+#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
-+extern void dump_early_reserve_map(struct dump_memdev *);
++static __s32 saved_irq_count; /* saved preempt_count() flags */
+
-+void crashdump_reserve(void)
++static int
++alloc_dha_stack(void)
+{
-+ extern unsigned long crashdump_addr;
-+
-+ if (crashdump_addr == 0xdeadbeef)
-+ return;
++ int i;
++ void *ptr;
++
++ if (dump_header_asm.dha_stack[0])
++ return 0;
+
-+ /* reserve dump config and saved dump pages */
-+ dump_saved_config = (struct dump_config_block *)crashdump_addr;
-+ /* magic verification */
-+ if (dump_saved_config->magic != DUMP_MAGIC_LIVE) {
-+ printk("Invalid dump magic. Ignoring dump\n");
-+ dump_saved_config = NULL;
-+ return;
++ ptr = vmalloc(THREAD_SIZE * num_online_cpus());
++ if (!ptr) {
++ printk("vmalloc for dha_stacks failed\n");
++ return -ENOMEM;
+ }
-+
-+ printk("Dump may be available from previous boot\n");
-+
-+#ifdef CONFIG_X86_64
-+ reserve_bootmem_node(NODE_DATA(0),
-+ virt_to_phys((void *)crashdump_addr),
-+ PAGE_ALIGN(sizeof(struct dump_config_block)));
-+#else
-+ reserve_bootmem(virt_to_phys((void *)crashdump_addr),
-+ PAGE_ALIGN(sizeof(struct dump_config_block)));
-+#endif
-+ dump_early_reserve_map(&dump_saved_config->memdev);
+
++ for (i = 0; i < num_online_cpus(); i++) {
++ dump_header_asm.dha_stack[i] = (u32)((unsigned long)ptr +
++ (i * THREAD_SIZE));
++ }
++ return 0;
+}
-+#endif
+
-+/*
-+ * Loads the dump configuration from a memory block saved across soft-boot
-+ * The ops vectors need fixing up as the corresp. routines may have
-+ * relocated in the new soft-booted kernel.
-+ */
-+int dump_load_config(struct dump_config_block *config)
++static int
++free_dha_stack(void)
+{
-+ struct dumper *dumper;
-+ struct dump_data_filter *filter_table, *filter;
-+ struct dump_dev *dev;
-+ int i;
++ if (dump_header_asm.dha_stack[0]) {
++ vfree((void *)dump_header_asm.dha_stack[0]);
++ dump_header_asm.dha_stack[0] = 0;
++ }
++ return 0;
++}
+
-+ if (config->magic != DUMP_MAGIC_LIVE)
-+ return -ENOENT; /* not a valid config */
+
-+ /* initialize generic config data */
-+ memcpy(&dump_config, &config->config, sizeof(dump_config));
++void
++__dump_save_regs(struct pt_regs *dest_regs, const struct pt_regs *regs)
++{
++ *dest_regs = *regs;
+
-+ /* initialize dumper state */
-+ if (!(dumper = dumper_by_name(config->dumper.name))) {
-+ printk("dumper name mismatch\n");
-+ return -ENOENT; /* dumper mismatch */
-+ }
-+
-+ /* verify and fixup schema */
-+ if (strncmp(dumper->scheme->name, config->scheme.name, 32)) {
-+ printk("dumper scheme mismatch\n");
-+ return -ENOENT; /* mismatch */
-+ }
-+ config->scheme.ops = dumper->scheme->ops;
-+ config->dumper.scheme = &config->scheme;
-+
-+ /* verify and fixup filter operations */
-+ filter_table = dumper->filter;
-+ for (i = 0, filter = config->filter_table;
-+ ((i < MAX_PASSES) && filter_table[i].selector);
-+ i++, filter++) {
-+ if (strncmp(filter_table[i].name, filter->name, 32)) {
-+ printk("dump filter mismatch\n");
-+ return -ENOENT; /* filter name mismatch */
-+ }
-+ filter->selector = filter_table[i].selector;
++ /* In case of panic dumps, we collects regs on entry to panic.
++ * so, we shouldn't 'fix' ssesp here again. But it is hard to
++ * tell just looking at regs whether ssesp need fixing. We make
++ * this decision by looking at xss in regs. If we have better
++ * means to determine that ssesp are valid (by some flag which
++ * tells that we are here due to panic dump), then we can use
++ * that instead of this kludge.
++ */
++ if (!user_mode(regs)) {
++ if ((0xffff & regs->xss) == __KERNEL_DS)
++ /* already fixed up */
++ return;
++ dest_regs->esp = (unsigned long)&(regs->esp);
++ __asm__ __volatile__ ("movw %%ss, %%ax;"
++ :"=a"(dest_regs->xss));
+ }
-+ config->dumper.filter = config->filter_table;
++}
+
-+ /* fixup format */
-+ if (strncmp(dumper->fmt->name, config->fmt.name, 32)) {
-+ printk("dump format mismatch\n");
-+ return -ENOENT; /* mismatch */
-+ }
-+ config->fmt.ops = dumper->fmt->ops;
-+ config->dumper.fmt = &config->fmt;
++void
++__dump_save_context(int cpu, const struct pt_regs *regs,
++ struct task_struct *tsk)
++{
++ dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
++ __dump_save_regs(&dump_header_asm.dha_smp_regs[cpu], regs);
+
-+ /* fixup target device */
-+ dev = (struct dump_dev *)(&config->dev[0]);
-+ if (dumper->dev == NULL) {
-+ pr_debug("Vanilla dumper - assume default\n");
-+ if (dump_dev == NULL)
-+ return -ENODEV;
-+ dumper->dev = dump_dev;
-+ }
++ /* take a snapshot of the stack */
++ /* doing this enables us to tolerate slight drifts on this cpu */
+
-+ if (strncmp(dumper->dev->type_name, dev->type_name, 32)) {
-+ printk("dump dev type mismatch %s instead of %s\n",
-+ dev->type_name, dumper->dev->type_name);
-+ return -ENOENT; /* mismatch */
++ if (dump_header_asm.dha_stack[cpu]) {
++ memcpy((void *)dump_header_asm.dha_stack[cpu],
++ STACK_START_POSITION(tsk),
++ THREAD_SIZE);
+ }
-+ dev->ops = dumper->dev->ops;
-+ config->dumper.dev = dev;
-+
-+ /* fixup memory device containing saved dump pages */
-+ /* assume statically init'ed dump_memdev */
-+ config->memdev.ddev.ops = dump_memdev->ddev.ops;
-+ /* switch to memdev from prev boot */
-+ saved_dump_memdev = dump_memdev; /* remember current */
-+ dump_memdev = &config->memdev;
++ dump_header_asm.dha_stack_ptr[cpu] = (unsigned long)(tsk->thread_info);
++}
+
-+ /* Make this the current primary dumper */
-+ dump_config.dumper = &config->dumper;
++#ifdef CONFIG_SMP
++extern cpumask_t irq_affinity[];
++extern irq_desc_t irq_desc[];
++extern void dump_send_ipi(void);
+
-+ return 0;
-+}
++static int dump_expect_ipi[NR_CPUS];
++static atomic_t waiting_for_dump_ipi;
++static cpumask_t saved_affinity[NR_IRQS];
+
-+/* Saves the dump configuration in a memory block for use across a soft-boot */
-+int dump_save_config(struct dump_config_block *config)
++extern void stop_this_cpu(void *); /* exported by i386 kernel */
++
++static int
++dump_nmi_callback(struct pt_regs *regs, int cpu)
+{
-+ printk("saving dump config settings\n");
++ if (!dump_expect_ipi[cpu])
++ return 0;
+
-+ /* dump config settings */
-+ memcpy(&config->config, &dump_config, sizeof(dump_config));
++ dump_expect_ipi[cpu] = 0;
++
++ dump_save_this_cpu(regs);
++ atomic_dec(&waiting_for_dump_ipi);
+
-+ /* dumper state */
-+ memcpy(&config->dumper, dump_config.dumper, sizeof(struct dumper));
-+ memcpy(&config->scheme, dump_config.dumper->scheme,
-+ sizeof(struct dump_scheme));
-+ memcpy(&config->fmt, dump_config.dumper->fmt, sizeof(struct dump_fmt));
-+ memcpy(&config->dev[0], dump_config.dumper->dev,
-+ sizeof(struct dump_anydev));
-+ memcpy(&config->filter_table, dump_config.dumper->filter,
-+ sizeof(struct dump_data_filter)*MAX_PASSES);
++ level_changed:
++ switch (dump_silence_level) {
++ case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
++ while (dump_oncpu) {
++ barrier(); /* paranoia */
++ if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
++ goto level_changed;
+
-+ /* handle to saved mem pages */
-+ memcpy(&config->memdev, dump_memdev, sizeof(struct dump_memdev));
++ cpu_relax(); /* kill time nicely */
++ }
++ break;
+
-+ config->magic = DUMP_MAGIC_LIVE;
-+
-+ return 0;
++ case DUMP_HALT_CPUS: /* Execute halt */
++ stop_this_cpu(NULL);
++ break;
++
++ case DUMP_SOFT_SPIN_CPUS:
++ /* Mark the task so it spins in schedule */
++ set_tsk_thread_flag(current, TIF_NEED_RESCHED);
++ break;
++ }
++
++ return 1;
+}
+
-+int dump_init_stage2(struct dump_config_block *saved_config)
++/* save registers on other processors */
++void
++__dump_save_other_cpus(void)
+{
-+ int err = 0;
-+
-+ pr_debug("dump_init_stage2\n");
-+ /* Check if dump from previous boot exists */
-+ if (saved_config) {
-+ printk("loading dumper from previous boot \n");
-+ /* load and configure dumper from previous boot */
-+ if ((err = dump_load_config(saved_config)))
-+ return err;
++ int i, cpu = smp_processor_id();
++ int other_cpus = num_online_cpus()-1;
++
++ if (other_cpus > 0) {
++ atomic_set(&waiting_for_dump_ipi, other_cpus);
+
-+ if (!dump_oncpu) {
-+ if ((err = dump_configure(dump_config.dump_device))) {
-+ printk("Stage 2 dump configure failed\n");
-+ return err;
-+ }
++ for (i = 0; i < NR_CPUS; i++) {
++ dump_expect_ipi[i] = (i != cpu && cpu_online(i));
+ }
+
-+ dumper_reset();
-+ dump_dev = dump_config.dumper->dev;
-+ /* write out the dump */
-+ err = dump_generic_execute(NULL, NULL);
-+
-+ dump_saved_config = NULL;
++ /* short circuit normal NMI handling temporarily */
++ set_nmi_callback(dump_nmi_callback);
++ wmb();
+
-+ if (!dump_oncpu) {
-+ dump_unconfigure();
++ dump_send_ipi();
++ /* may be we dont need to wait for NMI to be processed.
++ just write out the header at the end of dumping, if
++ this IPI is not processed until then, there probably
++ is a problem and we just fail to capture state of
++ other cpus. */
++ while(atomic_read(&waiting_for_dump_ipi) > 0) {
++ cpu_relax();
+ }
-+
-+ return err;
+
-+ } else {
-+ /* no dump to write out */
-+ printk("no dumper from previous boot \n");
-+ return 0;
++ unset_nmi_callback();
+ }
+}
+
-+extern void dump_mem_markpages(struct dump_memdev *);
-+
-+int dump_switchover_stage(void)
++/*
++ * Routine to save the old irq affinities and change affinities of all irqs to
++ * the dumping cpu.
++ */
++static void
++set_irq_affinity(void)
+{
-+ int ret = 0;
++ int i;
++ cpumask_t cpu = CPU_MASK_NONE;
+
-+ /* trigger stage 2 rightaway - in real life would be after soft-boot */
-+ /* dump_saved_config would be a boot param */
-+ saved_dump_memdev = dump_memdev;
-+ saved_dumper = dump_config.dumper;
-+ ret = dump_init_stage2(dump_saved_config);
-+ dump_memdev = saved_dump_memdev;
-+ dump_config.dumper = saved_dumper;
-+ return ret;
++ cpu_set(smp_processor_id(), cpu);
++ memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
++ for (i = 0; i < NR_IRQS; i++) {
++ if (irq_desc[i].handler == NULL)
++ continue;
++ irq_affinity[i] = cpu;
++ if (irq_desc[i].handler->set_affinity != NULL)
++ irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
++ }
+}
+
-+int dump_activate_softboot(void)
++/*
++ * Restore old irq affinities.
++ */
++static void
++reset_irq_affinity(void)
+{
-+ int err = 0;
-+#ifdef CONFIG_KEXEC
-+ int num_cpus_online = 0;
-+ struct kimage *image;
-+#endif
-+
-+ /* temporary - switchover to writeout previously saved dump */
-+#ifndef CONFIG_KEXEC
-+ err = dump_switchover_stage(); /* non-disruptive case */
-+ if (dump_oncpu)
-+ dump_config.dumper = &dumper_stage1; /* set things back */
-+
-+ return err;
-+#else
-+
-+ dump_silence_level = DUMP_HALT_CPUS;
-+ /* wait till we become the only cpu */
-+ /* maybe by checking for online cpus ? */
++ int i;
+
-+ while((num_cpus_online = num_online_cpus()) > 1);
++ memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
++ for (i = 0; i < NR_IRQS; i++) {
++ if (irq_desc[i].handler == NULL)
++ continue;
++ if (irq_desc[i].handler->set_affinity != NULL)
++ irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
++ }
++}
+
-+ /* now call into kexec */
++#else /* !CONFIG_SMP */
++#define set_irq_affinity() do { } while (0)
++#define reset_irq_affinity() do { } while (0)
++#define save_other_cpu_states() do { } while (0)
++#endif /* !CONFIG_SMP */
+
-+ image = xchg(&kexec_image, 0);
-+ if (image) {
-+ mdelay(panic_timeout*1000);
-+ machine_kexec(image);
-+ }
++/*
++ * Kludge - dump from interrupt context is unreliable (Fixme)
++ *
++ * We do this so that softirqs initiated for dump i/o
++ * get processed and we don't hang while waiting for i/o
++ * to complete or in any irq synchronization attempt.
++ *
++ * This is not quite legal of course, as it has the side
++ * effect of making all interrupts & softirqs triggered
++ * while dump is in progress complete before currently
++ * pending softirqs and the currently executing interrupt
++ * code.
++ */
++static inline void
++irq_bh_save(void)
++{
++ saved_irq_count = irq_count();
++ preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
++}
+
++static inline void
++irq_bh_restore(void)
++{
++ preempt_count() |= saved_irq_count;
++}
+
-+ /* TBD/Fixme:
-+ * * should we call reboot notifiers ? inappropriate for panic ?
-+ * * what about device_shutdown() ?
-+ * * is explicit bus master disabling needed or can we do that
-+ * * through driverfs ?
-+ * */
-+ return 0;
-+#endif
++/*
++ * Name: __dump_irq_enable
++ * Func: Reset system so interrupts are enabled.
++ * This is used for dump methods that require interrupts
++ * Eventually, all methods will have interrupts disabled
++ * and this code can be removed.
++ *
++ * Change irq affinities
++ * Re-enable interrupts
++ */
++int
++__dump_irq_enable(void)
++{
++ set_irq_affinity();
++ irq_bh_save();
++ local_irq_enable();
++ return 0;
+}
+
-+/* --- DUMP SCHEME ROUTINES --- */
++/*
++ * Name: __dump_irq_restore
++ * Func: Resume the system state in an architecture-specific way.
+
-+static inline int dump_buf_pending(struct dumper *dumper)
++ */
++void
++__dump_irq_restore(void)
+{
-+ return (dumper->curr_buf - dumper->dump_buf);
++ local_irq_disable();
++ reset_irq_affinity();
++ irq_bh_restore();
+}
+
-+/* Invoked during stage 1 of soft-reboot based dumping */
-+int dump_overlay_sequencer(void)
++/*
++ * Name: __dump_configure_header()
++ * Func: Meant to fill in arch specific header fields except per-cpu state
++ * already captured via __dump_save_context for all CPUs.
++ */
++int
++__dump_configure_header(const struct pt_regs *regs)
+{
-+ struct dump_data_filter *filter = dump_config.dumper->filter;
-+ struct dump_data_filter *filter2 = dumper_stage2.filter;
-+ int pass = 0, err = 0, save = 0;
-+ int (*action)(unsigned long, unsigned long);
-+
-+ /* Make sure gzip compression is being used */
-+ if (dump_config.dumper->compress->compress_type != DUMP_COMPRESS_GZIP) {
-+ printk(" Please set GZIP compression \n");
-+ return -EINVAL;
-+ }
-+
-+ /* start filling in dump data right after the header */
-+ dump_config.dumper->curr_offset =
-+ PAGE_ALIGN(dump_config.dumper->header_len);
++ return (0);
++}
+
-+ /* Locate the last pass */
-+ for (;filter->selector; filter++, pass++);
-+
-+ /*
-+ * Start from the end backwards: overlay involves a reverse
-+ * ordering of passes, since less critical pages are more
-+ * likely to be reusable as scratch space once we are through
-+ * with them.
-+ */
-+ for (--pass, --filter; pass >= 0; pass--, filter--)
-+ {
-+ /* Assumes passes are exclusive (even across dumpers) */
-+ /* Requires care when coding the selection functions */
-+ if ((save = filter->level_mask & dump_config.level))
-+ action = dump_save_data;
-+ else
-+ action = dump_skip_data;
++/*
++ * Name: __dump_init()
++ * Func: Initialize the dumping routine process.
++ */
++void
++__dump_init(uint64_t local_memory_start)
++{
++ return;
++}
+
-+ /* Remember the offset where this pass started */
-+ /* The second stage dumper would use this */
-+ if (dump_buf_pending(dump_config.dumper) & (PAGE_SIZE - 1)) {
-+ pr_debug("Starting pass %d with pending data\n", pass);
-+ pr_debug("filling dummy data to page-align it\n");
-+ dump_config.dumper->curr_buf = (void *)PAGE_ALIGN(
-+ (unsigned long)dump_config.dumper->curr_buf);
-+ }
-+
-+ filter2[pass].start[0] = dump_config.dumper->curr_offset
-+ + dump_buf_pending(dump_config.dumper);
++/*
++ * Name: __dump_open()
++ * Func: Open the dump device (architecture specific).
++ */
++void
++__dump_open(void)
++{
++ alloc_dha_stack();
++}
+
-+ err = dump_iterator(pass, action, filter);
++/*
++ * Name: __dump_cleanup()
++ * Func: Free any architecture specific data structures. This is called
++ * when the dump module is being removed.
++ */
++void
++__dump_cleanup(void)
++{
++ free_dha_stack();
++}
+
-+ filter2[pass].end[0] = dump_config.dumper->curr_offset
-+ + dump_buf_pending(dump_config.dumper);
-+ filter2[pass].num_mbanks = 1;
++extern int pfn_is_ram(unsigned long);
+
-+ if (err < 0) {
-+ printk("dump_overlay_seq: failure %d in pass %d\n",
-+ err, pass);
-+ break;
-+ }
-+ printk("\n %d overlay pages %s of %d each in pass %d\n",
-+ err, save ? "saved" : "skipped", DUMP_PAGE_SIZE, pass);
-+ }
++/*
++ * Name: __dump_page_valid()
++ * Func: Check if page is valid to dump.
++ */
++int
++__dump_page_valid(unsigned long index)
++{
++ if (!pfn_valid(index))
++ return 0;
+
-+ return err;
++ return pfn_is_ram(index);
+}
+
-+/* from dump_memdev.c */
-+extern struct page *dump_mem_lookup(struct dump_memdev *dev, unsigned long loc);
-+extern struct page *dump_mem_next_page(struct dump_memdev *dev);
++/*
++ * Name: manual_handle_crashdump()
++ * Func: Interface for the lkcd dump command. Calls dump_execute()
++ */
++int
++manual_handle_crashdump(void) {
+
-+static inline struct page *dump_get_saved_page(loff_t loc)
-+{
-+ return (dump_mem_lookup(dump_memdev, loc >> PAGE_SHIFT));
++ struct pt_regs regs;
++
++ get_current_regs(®s);
++ dump_execute("manual", ®s);
++ return 0;
+}
+
-+static inline struct page *dump_next_saved_page(void)
++/*
++ * Name: __dump_clean_irq_state()
++ * Func: Clean up from the previous IRQ handling state. Such as oops from
++ * interrupt handler or bottom half.
++ */
++void
++__dump_clean_irq_state(void)
+{
-+ return (dump_mem_next_page(dump_memdev));
++ return;
+}
-+
-+/*
-+ * Iterates over list of saved dump pages. Invoked during second stage of
-+ * soft boot dumping
+Index: linux-2.6.10/drivers/dump/dump_filters.c
+===================================================================
+--- linux-2.6.10.orig/drivers/dump/dump_filters.c 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/drivers/dump/dump_filters.c 2005-04-07 18:13:56.917751032 +0800
+@@ -0,0 +1,143 @@
++/*
++ * Default filters to select data to dump for various passes.
+ *
-+ * Observation: If additional selection is desired at this stage then
-+ * a different iterator could be written which would advance
-+ * to the next page header everytime instead of blindly picking up
-+ * the data. In such a case loc would be interpreted differently.
-+ * At this moment however a blind pass seems sufficient, cleaner and
-+ * faster.
++ * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
++ * Split and rewrote default dump selection logic to generic dump
++ * method interfaces
++ * Derived from a portion of dump_base.c created by
++ * Matt Robinson <yakker@sourceforge.net>)
++ *
++ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
++ * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
++ * Copyright (C) 2002 International Business Machines Corp.
++ *
++ * Used during single-stage dumping and during stage 1 of the 2-stage scheme
++ * (Stage 2 of the 2-stage scheme uses the fully transparent filters
++ * i.e. passthru filters in dump_overlay.c)
++ *
++ * Future: Custom selective dump may involve a different set of filters.
++ *
++ * This code is released under version 2 of the GNU GPL.
+ */
-+int dump_saved_data_iterator(int pass, int (*action)(unsigned long,
-+ unsigned long), struct dump_data_filter *filter)
-+{
-+ loff_t loc, end;
-+ struct page *page;
-+ unsigned long count = 0;
-+ int i, err = 0;
-+ unsigned long sz;
-+
-+ for (i = 0; i < filter->num_mbanks; i++) {
-+ loc = filter->start[i];
-+ end = filter->end[i];
-+ printk("pass %d, start off 0x%llx end offset 0x%llx\n", pass,
-+ loc, end);
-+
-+ /* loc will get treated as logical offset into stage 1 */
-+ page = dump_get_saved_page(loc);
-+
-+ for (; loc < end; loc += PAGE_SIZE) {
-+ dump_config.dumper->curr_loc = loc;
-+ if (!page) {
-+ printk("no more saved data for pass %d\n",
-+ pass);
-+ break;
-+ }
-+ sz = (loc + PAGE_SIZE > end) ? end - loc : PAGE_SIZE;
+
-+ if (page && filter->selector(pass, (unsigned long)page,
-+ PAGE_SIZE)) {
-+ pr_debug("mem offset 0x%llx\n", loc);
-+ if ((err = action((unsigned long)page, sz)))
-+ break;
-+ else
-+ count++;
-+ /* clear the contents of page */
-+ /* fixme: consider using KM_DUMP instead */
-+ clear_highpage(page);
-+
-+ }
-+ page = dump_next_saved_page();
-+ }
-+ }
++#include <linux/kernel.h>
++#include <linux/bootmem.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/dump.h>
++#include "dump_methods.h"
+
-+ return err ? err : count;
-+}
++#define DUMP_PFN_SAFETY_MARGIN 1024 /* 4 MB */
++static unsigned long bootmap_pages;
+
-+static inline int dump_overlay_pages_done(struct page *page, int nr)
++/* Copied from mm/bootmem.c - FIXME */
++/* return the number of _pages_ that will be allocated for the boot bitmap */
++void dump_calc_bootmap_pages (void)
+{
-+ int ret=0;
++ unsigned long mapsize;
++ unsigned long pages = num_physpages;
+
-+ for (; nr ; page++, nr--) {
-+ if (dump_check_and_free_page(dump_memdev, page))
-+ ret++;
-+ }
-+ return ret;
++ mapsize = (pages+7)/8;
++ mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
++ mapsize >>= PAGE_SHIFT;
++ bootmap_pages = mapsize + DUMP_PFN_SAFETY_MARGIN + 1;
+}
+
-+int dump_overlay_save_data(unsigned long loc, unsigned long len)
-+{
-+ int err = 0;
-+ struct page *page = (struct page *)loc;
-+ static unsigned long cnt = 0;
-+
-+ if ((err = dump_generic_save_data(loc, len)))
-+ return err;
+
-+ if (dump_overlay_pages_done(page, len >> PAGE_SHIFT)) {
-+ cnt++;
-+ if (!(cnt & 0x7f))
-+ pr_debug("released page 0x%lx\n", page_to_pfn(page));
-+ }
-+
-+ return err;
-+}
++/* temporary */
++extern unsigned long min_low_pfn;
+
+
-+int dump_overlay_skip_data(unsigned long loc, unsigned long len)
++int dump_low_page(struct page *p)
+{
-+ struct page *page = (struct page *)loc;
-+
-+ dump_overlay_pages_done(page, len >> PAGE_SHIFT);
-+ return 0;
++ return ((page_to_pfn(p) >= min_low_pfn) &&
++ (page_to_pfn(p) < (min_low_pfn + bootmap_pages)));
+}
+
-+int dump_overlay_resume(void)
++static inline int kernel_page(struct page *p)
+{
-+ int err = 0;
-+
-+ /*
-+ * switch to stage 2 dumper, save dump_config_block
-+ * and then trigger a soft-boot
-+ */
-+ dumper_stage2.header_len = dump_config.dumper->header_len;
-+ dump_config.dumper = &dumper_stage2;
-+ if ((err = dump_save_config(dump_saved_config)))
-+ return err;
-+
-+ dump_dev = dump_config.dumper->dev;
-+
-+#ifdef CONFIG_KEXEC
-+ /* If we are doing a disruptive dump, activate softboot now */
-+ if((panic_timeout > 0) && (!(dump_config.flags & DUMP_FLAGS_NONDISRUPT)))
-+ err = dump_activate_softboot();
-+#endif
-+
-+ return err;
-+ err = dump_switchover_stage(); /* plugs into soft boot mechanism */
-+ dump_config.dumper = &dumper_stage1; /* set things back */
-+ return err;
++ /* FIXME: Need to exclude hugetlb pages. Clue: reserved but inuse */
++ return (PageReserved(p) && !PageInuse(p)) || (!PageLRU(p) && PageInuse(p));
+}
+
-+int dump_overlay_configure(unsigned long devid)
++static inline int user_page(struct page *p)
+{
-+ struct dump_dev *dev;
-+ struct dump_config_block *saved_config = dump_saved_config;
-+ int err = 0;
-+
-+ /* If there is a previously saved dump, write it out first */
-+ if (saved_config) {
-+ printk("Processing old dump pending writeout\n");
-+ err = dump_switchover_stage();
-+ if (err) {
-+ printk("failed to writeout saved dump\n");
-+ return err;
-+ }
-+ dump_free_mem(saved_config); /* testing only: not after boot */
-+ }
++ return PageInuse(p) && (!PageReserved(p) && PageLRU(p));
++}
+
-+ dev = dumper_stage2.dev = dump_config.dumper->dev;
-+ /* From here on the intermediate dump target is memory-only */
-+ dump_dev = dump_config.dumper->dev = &dump_memdev->ddev;
-+ if ((err = dump_generic_configure(0))) {
-+ printk("dump generic configure failed: err %d\n", err);
-+ return err;
-+ }
-+ /* temporary */
-+ dumper_stage2.dump_buf = dump_config.dumper->dump_buf;
++static inline int unreferenced_page(struct page *p)
++{
++ return !PageInuse(p) && !PageReserved(p);
++}
+
-+ /* Sanity check on the actual target dump device */
-+ if (!dev || (err = dev->ops->open(dev, devid))) {
-+ return err;
-+ }
-+ /* TBD: should we release the target if this is soft-boot only ? */
+
-+ /* alloc a dump config block area to save across reboot */
-+ if (!(dump_saved_config = dump_alloc_mem(sizeof(struct
-+ dump_config_block)))) {
-+ printk("dump config block alloc failed\n");
-+ /* undo configure */
-+ dump_generic_unconfigure();
-+ return -ENOMEM;
-+ }
-+ dump_config.dump_addr = (unsigned long)dump_saved_config;
-+ printk("Dump config block of size %d set up at 0x%lx\n",
-+ sizeof(*dump_saved_config), (unsigned long)dump_saved_config);
++/* loc marks the beginning of a range of pages */
++int dump_filter_kernpages(int pass, unsigned long loc, unsigned long sz)
++{
++ struct page *page = (struct page *)loc;
++ /* if any of the pages is a kernel page, select this set */
++ while (sz) {
++ if (dump_low_page(page) || kernel_page(page))
++ return 1;
++ sz -= PAGE_SIZE;
++ page++;
++ }
+ return 0;
+}
+
-+int dump_overlay_unconfigure(void)
++
++/* loc marks the beginning of a range of pages */
++int dump_filter_userpages(int pass, unsigned long loc, unsigned long sz)
+{
-+ struct dump_dev *dev = dumper_stage2.dev;
-+ int err = 0;
++ struct page *page = (struct page *)loc;
++ int ret = 0;
++ /* select if the set has any user page, and no kernel pages */
++ while (sz) {
++ if (user_page(page) && !dump_low_page(page)) {
++ ret = 1;
++ } else if (kernel_page(page) || dump_low_page(page)) {
++ return 0;
++ }
++ page++;
++ sz -= PAGE_SIZE;
++ }
++ return ret;
++}
+
-+ pr_debug("dump_overlay_unconfigure\n");
-+ /* Close the secondary device */
-+ dev->ops->release(dev);
-+ pr_debug("released secondary device\n");
+
-+ err = dump_generic_unconfigure();
-+ pr_debug("Unconfigured generic portions\n");
-+ dump_free_mem(dump_saved_config);
-+ dump_saved_config = NULL;
-+ pr_debug("Freed saved config block\n");
-+ dump_dev = dump_config.dumper->dev = dumper_stage2.dev;
+
-+ printk("Unconfigured overlay dumper\n");
-+ return err;
++/* loc marks the beginning of a range of pages */
++int dump_filter_unusedpages(int pass, unsigned long loc, unsigned long sz)
++{
++ struct page *page = (struct page *)loc;
++
++ /* select if the set does not have any used pages */
++ while (sz) {
++ if (!unreferenced_page(page) || dump_low_page(page)) {
++ return 0;
++ }
++ page++;
++ sz -= PAGE_SIZE;
++ }
++ return 1;
+}
+
-+int dump_staged_unconfigure(void)
++/* dummy: last (non-existent) pass */
++int dump_filter_none(int pass, unsigned long loc, unsigned long sz)
+{
-+ int err = 0;
-+ struct dump_config_block *saved_config = dump_saved_config;
-+ struct dump_dev *dev;
++ return 0;
++}
+
-+ pr_debug("dump_staged_unconfigure\n");
-+ err = dump_generic_unconfigure();
++/* TBD: resolve level bitmask ? */
++struct dump_data_filter dump_filter_table[] = {
++ { .name = "kern", .selector = dump_filter_kernpages,
++ .level_mask = DUMP_MASK_KERN},
++ { .name = "user", .selector = dump_filter_userpages,
++ .level_mask = DUMP_MASK_USED},
++ { .name = "unused", .selector = dump_filter_unusedpages,
++ .level_mask = DUMP_MASK_UNUSED},
++ { .name = "none", .selector = dump_filter_none,
++ .level_mask = DUMP_MASK_REST},
++ { .name = "", .selector = NULL, .level_mask = 0}
++};
+
-+ /* now check if there is a saved dump waiting to be written out */
-+ if (saved_config) {
-+ printk("Processing saved dump pending writeout\n");
-+ if ((err = dump_switchover_stage())) {
-+ printk("Error in commiting saved dump at 0x%lx\n",
-+ (unsigned long)saved_config);
-+ printk("Old dump may hog memory\n");
-+ } else {
-+ dump_free_mem(saved_config);
-+ pr_debug("Freed saved config block\n");
-+ }
-+ dump_saved_config = NULL;
-+ } else {
-+ dev = &dump_memdev->ddev;
-+ dev->ops->release(dev);
-+ }
-+ printk("Unconfigured second stage dumper\n");
+Index: linux-2.6.10/drivers/dump/dump_memdev.c
+===================================================================
+--- linux-2.6.10.orig/drivers/dump/dump_memdev.c 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/drivers/dump/dump_memdev.c 2005-04-07 18:13:56.907752552 +0800
+@@ -0,0 +1,655 @@
++/*
++ * Implements the dump driver interface for saving a dump in available
++ * memory areas. The saved pages may be written out to persistent storage
++ * after a soft reboot.
++ *
++ * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
++ *
++ * Copyright (C) 2002 International Business Machines Corp.
++ *
++ * This code is released under version 2 of the GNU GPL.
++ *
++ * The approach of tracking pages containing saved dump using map pages
++ * allocated as needed has been derived from the Mission Critical Linux
++ * mcore dump implementation.
++ *
++ * Credits and a big thanks for letting the lkcd project make use of
++ * the excellent piece of work and also helping with clarifications
++ * and tips along the way are due to:
++ * Dave Winchell <winchell@mclx.com> (primary author of mcore)
++ * Jeff Moyer <moyer@mclx.com>
++ * Josh Huber <huber@mclx.com>
++ *
++ * For those familiar with the mcore code, the main differences worth
++ * noting here (besides the dump device abstraction) result from enabling
++ * "high" memory pages (pages not permanently mapped in the kernel
++ * address space) to be used for saving dump data (because of which a
++ * simple virtual address based linked list cannot be used anymore for
++ * managing free pages), an added level of indirection for faster
++ * lookups during the post-boot stage, and the idea of pages being
++ * made available as they get freed up while dump to memory progresses
++ * rather than one time before starting the dump. The last point enables
++ * a full memory snapshot to be saved starting with an initial set of
++ * bootstrap pages given a good compression ratio. (See dump_overlay.c)
++ *
++ */
+
-+ return 0;
-+}
++/*
++ * -----------------MEMORY LAYOUT ------------------
++ * The memory space consists of a set of discontiguous pages, and
++ * discontiguous map pages as well, rooted in a chain of indirect
++ * map pages (also discontiguous). Except for the indirect maps
++ * (which must be preallocated in advance), the rest of the pages
++ * could be in high memory.
++ *
++ * root
++ * | --------- -------- --------
++ * --> | . . +|--->| . +|------->| . . | indirect
++ * --|--|--- ---|---- --|-|--- maps
++ * | | | | |
++ * ------ ------ ------- ------ -------
++ * | . | | . | | . . | | . | | . . | maps
++ * --|--- --|--- --|--|-- --|--- ---|-|--
++ * page page page page page page page data
++ * pages
++ *
++ * Writes to the dump device happen sequentially in append mode.
++ * The main reason for the existence of the indirect map is
++ * to enable a quick way to lookup a specific logical offset in
++ * the saved data post-soft-boot, e.g. to writeout pages
++ * with more critical data first, even though such pages
++ * would have been compressed and copied last, being the lowest
++ * ranked candidates for reuse due to their criticality.
++ * (See dump_overlay.c)
++ */
++#include <linux/mm.h>
++#include <linux/highmem.h>
++#include <linux/bootmem.h>
++#include <linux/dump.h>
++#include "dump_methods.h"
+
-+/* ----- PASSTHRU FILTER ROUTINE --------- */
++#define DUMP_MAP_SZ (PAGE_SIZE / sizeof(unsigned long)) /* direct map size */
++#define DUMP_IND_MAP_SZ DUMP_MAP_SZ - 1 /* indirect map size */
++#define DUMP_NR_BOOTSTRAP 64 /* no of bootstrap pages */
+
-+/* transparent - passes everything through */
-+int dump_passthru_filter(int pass, unsigned long loc, unsigned long sz)
-+{
-+ return 1;
-+}
++extern int dump_low_page(struct page *);
+
-+/* ----- PASSTRU FORMAT ROUTINES ---- */
++/* check if the next entry crosses a page boundary */
++static inline int is_last_map_entry(unsigned long *map)
++{
++ unsigned long addr = (unsigned long)(map + 1);
+
++ return (!(addr & (PAGE_SIZE - 1)));
++}
+
-+int dump_passthru_configure_header(const char *panic_str, const struct pt_regs *regs)
++/* Todo: should have some validation checks */
++/* The last entry in the indirect map points to the next indirect map */
++/* Indirect maps are referred to directly by virtual address */
++static inline unsigned long *next_indirect_map(unsigned long *map)
+{
-+ dump_config.dumper->header_dirty++;
-+ return 0;
++ return (unsigned long *)map[DUMP_IND_MAP_SZ];
+}
+
-+/* Copies bytes of data from page(s) to the specified buffer */
-+int dump_copy_pages(void *buf, struct page *page, unsigned long sz)
++#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
++/* Called during early bootup - fixme: make this __init */
++void dump_early_reserve_map(struct dump_memdev *dev)
+{
-+ unsigned long len = 0, bytes;
-+ void *addr;
++ unsigned long *map1, *map2;
++ loff_t off = 0, last = dev->last_used_offset >> PAGE_SHIFT;
++ int i, j;
++
++ printk("Reserve bootmap space holding previous dump of %lld pages\n",
++ last);
++ map1= (unsigned long *)dev->indirect_map_root;
+
-+ while (len < sz) {
-+ addr = kmap_atomic(page, KM_DUMP);
-+ bytes = (sz > len + PAGE_SIZE) ? PAGE_SIZE : sz - len;
-+ memcpy(buf, addr, bytes);
-+ kunmap_atomic(addr, KM_DUMP);
-+ buf += bytes;
-+ len += bytes;
-+ page++;
++ while (map1 && (off < last)) {
++#ifdef CONFIG_X86_64
++ reserve_bootmem_node(NODE_DATA(0), virt_to_phys((void *)map1),
++ PAGE_SIZE);
++#else
++ reserve_bootmem(virt_to_phys((void *)map1), PAGE_SIZE);
++#endif
++ for (i=0; (i < DUMP_MAP_SZ - 1) && map1[i] && (off < last);
++ i++, off += DUMP_MAP_SZ) {
++ pr_debug("indirect map[%d] = 0x%lx\n", i, map1[i]);
++ if (map1[i] >= max_low_pfn)
++ continue;
++#ifdef CONFIG_X86_64
++ reserve_bootmem_node(NODE_DATA(0),
++ map1[i] << PAGE_SHIFT, PAGE_SIZE);
++#else
++ reserve_bootmem(map1[i] << PAGE_SHIFT, PAGE_SIZE);
++#endif
++ map2 = pfn_to_kaddr(map1[i]);
++ for (j = 0 ; (j < DUMP_MAP_SZ) && map2[j] &&
++ (off + j < last); j++) {
++ pr_debug("\t map[%d][%d] = 0x%lx\n", i, j,
++ map2[j]);
++ if (map2[j] < max_low_pfn) {
++#ifdef CONFIG_X86_64
++ reserve_bootmem_node(NODE_DATA(0),
++ map2[j] << PAGE_SHIFT,
++ PAGE_SIZE);
++#else
++ reserve_bootmem(map2[j] << PAGE_SHIFT,
++ PAGE_SIZE);
++#endif
++ }
++ }
++ }
++ map1 = next_indirect_map(map1);
+ }
-+ /* memset(dump_config.dumper->curr_buf, 0x57, len); temporary */
-+
-+ return sz - len;
++ dev->nr_free = 0; /* these pages don't belong to this boot */
+}
++#endif
+
-+int dump_passthru_update_header(void)
++/* mark dump pages so that they aren't used by this kernel */
++void dump_mark_map(struct dump_memdev *dev)
+{
-+ long len = dump_config.dumper->header_len;
++ unsigned long *map1, *map2;
++ loff_t off = 0, last = dev->last_used_offset >> PAGE_SHIFT;
+ struct page *page;
-+ void *buf = dump_config.dumper->dump_buf;
-+ int err = 0;
-+
-+ if (!dump_config.dumper->header_dirty)
-+ return 0;
-+
-+ pr_debug("Copying header of size %ld bytes from memory\n", len);
-+ if (len > DUMP_BUFFER_SIZE)
-+ return -E2BIG;
-+
-+ page = dump_mem_lookup(dump_memdev, 0);
-+ for (; (len > 0) && page; buf += PAGE_SIZE, len -= PAGE_SIZE) {
-+ if ((err = dump_copy_pages(buf, page, PAGE_SIZE)))
-+ return err;
-+ page = dump_mem_next_page(dump_memdev);
-+ }
-+ if (len > 0) {
-+ printk("Incomplete header saved in mem\n");
-+ return -ENOENT;
-+ }
++ int i, j;
++
++ printk("Dump: marking pages in use by previous dump\n");
++ map1= (unsigned long *)dev->indirect_map_root;
+
-+ if ((err = dump_dev_seek(0))) {
-+ printk("Unable to seek to dump header offset\n");
-+ return err;
++ while (map1 && (off < last)) {
++ page = virt_to_page(map1);
++ set_page_count(page, 1);
++ for (i=0; (i < DUMP_MAP_SZ - 1) && map1[i] && (off < last);
++ i++, off += DUMP_MAP_SZ) {
++ pr_debug("indirect map[%d] = 0x%lx\n", i, map1[i]);
++ page = pfn_to_page(map1[i]);
++ set_page_count(page, 1);
++ map2 = kmap_atomic(page, KM_DUMP);
++ for (j = 0 ; (j < DUMP_MAP_SZ) && map2[j] &&
++ (off + j < last); j++) {
++ pr_debug("\t map[%d][%d] = 0x%lx\n", i, j,
++ map2[j]);
++ page = pfn_to_page(map2[j]);
++ set_page_count(page, 1);
++ }
++ }
++ map1 = next_indirect_map(map1);
+ }
-+ err = dump_ll_write(dump_config.dumper->dump_buf,
-+ buf - dump_config.dumper->dump_buf);
-+ if (err < dump_config.dumper->header_len)
-+ return (err < 0) ? err : -ENOSPC;
-+
-+ dump_config.dumper->header_dirty = 0;
-+ return 0;
+}
++
+
-+static loff_t next_dph_offset = 0;
-+
-+static int dph_valid(struct __dump_page *dph)
++/*
++ * Given a logical offset into the mem device lookup the
++ * corresponding page
++ * loc is specified in units of pages
++ * Note: affects curr_map (even in the case where lookup fails)
++ */
++struct page *dump_mem_lookup(struct dump_memdev *dump_mdev, unsigned long loc)
+{
-+ if ((dph->dp_address & (PAGE_SIZE - 1)) || (dph->dp_flags
-+ > DUMP_DH_COMPRESSED) || (!dph->dp_flags) ||
-+ (dph->dp_size > PAGE_SIZE)) {
-+ printk("dp->address = 0x%llx, dp->size = 0x%x, dp->flag = 0x%x\n",
-+ dph->dp_address, dph->dp_size, dph->dp_flags);
-+ return 0;
++ unsigned long *map;
++ unsigned long i, index = loc / DUMP_MAP_SZ;
++ struct page *page = NULL;
++ unsigned long curr_pfn, curr_map, *curr_map_ptr = NULL;
++
++ map = (unsigned long *)dump_mdev->indirect_map_root;
++ if (!map)
++ return NULL;
++ if (loc > dump_mdev->last_offset >> PAGE_SHIFT)
++ return NULL;
++
++ /*
++ * first locate the right indirect map
++ * in the chain of indirect maps
++ */
++ for (i = 0; i + DUMP_IND_MAP_SZ < index ; i += DUMP_IND_MAP_SZ) {
++ if (!(map = next_indirect_map(map)))
++ return NULL;
++ }
++ /* then the right direct map */
++ /* map entries are referred to by page index */
++ if ((curr_map = map[index - i])) {
++ page = pfn_to_page(curr_map);
++ /* update the current traversal index */
++ /* dump_mdev->curr_map = &map[index - i];*/
++ curr_map_ptr = &map[index - i];
+ }
-+ return 1;
-+}
+
-+int dump_verify_lcrash_data(void *buf, unsigned long sz)
-+{
-+ struct __dump_page *dph;
++ if (page)
++ map = kmap_atomic(page, KM_DUMP);
++ else
++ return NULL;
+
-+ /* sanity check for page headers */
-+ while (next_dph_offset + sizeof(*dph) < sz) {
-+ dph = (struct __dump_page *)(buf + next_dph_offset);
-+ if (!dph_valid(dph)) {
-+ printk("Invalid page hdr at offset 0x%llx\n",
-+ next_dph_offset);
-+ return -EINVAL;
-+ }
-+ next_dph_offset += dph->dp_size + sizeof(*dph);
++ /* and finally the right entry therein */
++ /* data pages are referred to by page index */
++ i = index * DUMP_MAP_SZ;
++ if ((curr_pfn = map[loc - i])) {
++ page = pfn_to_page(curr_pfn);
++ dump_mdev->curr_map = curr_map_ptr;
++ dump_mdev->curr_map_offset = loc - i;
++ dump_mdev->ddev.curr_offset = loc << PAGE_SHIFT;
++ } else {
++ page = NULL;
+ }
++ kunmap_atomic(map, KM_DUMP);
+
-+ next_dph_offset -= sz;
-+ return 0;
++ return page;
+}
-+
++
+/*
-+ * TBD/Later: Consider avoiding the copy by using a scatter/gather
-+ * vector representation for the dump buffer
++ * Retrieves a pointer to the next page in the dump device
++ * Used during the lookup pass post-soft-reboot
+ */
-+int dump_passthru_add_data(unsigned long loc, unsigned long sz)
++struct page *dump_mem_next_page(struct dump_memdev *dev)
+{
-+ struct page *page = (struct page *)loc;
-+ void *buf = dump_config.dumper->curr_buf;
-+ int err = 0;
++ unsigned long i;
++ unsigned long *map;
++ struct page *page = NULL;
+
-+ if ((err = dump_copy_pages(buf, page, sz))) {
-+ printk("dump_copy_pages failed");
-+ return err;
++ if (dev->ddev.curr_offset + PAGE_SIZE >= dev->last_offset) {
++ return NULL;
+ }
+
-+ if ((err = dump_verify_lcrash_data(buf, sz))) {
-+ printk("dump_verify_lcrash_data failed\n");
-+ printk("Invalid data for pfn 0x%lx\n", page_to_pfn(page));
-+ printk("Page flags 0x%lx\n", page->flags);
-+ printk("Page count 0x%x\n", page_count(page));
-+ return err;
-+ }
++ if ((i = (unsigned long)(++dev->curr_map_offset)) >= DUMP_MAP_SZ) {
++ /* move to next map */
++ if (is_last_map_entry(++dev->curr_map)) {
++ /* move to the next indirect map page */
++ printk("dump_mem_next_page: go to next indirect map\n");
++ dev->curr_map = (unsigned long *)*dev->curr_map;
++ if (!dev->curr_map)
++ return NULL;
++ }
++ i = dev->curr_map_offset = 0;
++ pr_debug("dump_mem_next_page: next map 0x%lx, entry 0x%lx\n",
++ dev->curr_map, *dev->curr_map);
+
-+ dump_config.dumper->curr_buf = buf + sz;
++ };
++
++ if (*dev->curr_map) {
++ map = kmap_atomic(pfn_to_page(*dev->curr_map), KM_DUMP);
++ if (map[i])
++ page = pfn_to_page(map[i]);
++ kunmap_atomic(map, KM_DUMP);
++ dev->ddev.curr_offset += PAGE_SIZE;
++ };
+
-+ return 0;
++ return page;
+}
+
++/* Copied from dump_filters.c */
++static inline int kernel_page(struct page *p)
++{
++ /* FIXME: Need to exclude hugetlb pages. Clue: reserved but inuse */
++ return (PageReserved(p) && !PageInuse(p)) || (!PageLRU(p) && PageInuse(p));
++}
+
-+/* Stage 1 dumper: Saves compressed dump in memory and soft-boots system */
-+
-+/* Scheme to overlay saved data in memory for writeout after a soft-boot */
-+struct dump_scheme_ops dump_scheme_overlay_ops = {
-+ .configure = dump_overlay_configure,
-+ .unconfigure = dump_overlay_unconfigure,
-+ .sequencer = dump_overlay_sequencer,
-+ .iterator = dump_page_iterator,
-+ .save_data = dump_overlay_save_data,
-+ .skip_data = dump_overlay_skip_data,
-+ .write_buffer = dump_generic_write_buffer
-+};
-+
-+struct dump_scheme dump_scheme_overlay = {
-+ .name = "overlay",
-+ .ops = &dump_scheme_overlay_ops
-+};
-+
-+
-+/* Stage 1 must use a good compression scheme - default to gzip */
-+extern struct __dump_compress dump_gzip_compression;
-+
-+struct dumper dumper_stage1 = {
-+ .name = "stage1",
-+ .scheme = &dump_scheme_overlay,
-+ .fmt = &dump_fmt_lcrash,
-+ .compress = &dump_none_compression, /* needs to be gzip */
-+ .filter = dump_filter_table,
-+ .dev = NULL,
-+};
-+
-+/* Stage 2 dumper: Activated after softboot to write out saved dump to device */
-+
-+/* Formatter that transfers data as is (transparent) w/o further conversion */
-+struct dump_fmt_ops dump_fmt_passthru_ops = {
-+ .configure_header = dump_passthru_configure_header,
-+ .update_header = dump_passthru_update_header,
-+ .save_context = NULL, /* unused */
-+ .add_data = dump_passthru_add_data,
-+ .update_end_marker = dump_lcrash_update_end_marker
-+};
-+
-+struct dump_fmt dump_fmt_passthru = {
-+ .name = "passthru",
-+ .ops = &dump_fmt_passthru_ops
-+};
-+
-+/* Filter that simply passes along any data within the range (transparent)*/
-+/* Note: The start and end ranges in the table are filled in at run-time */
-+
-+extern int dump_filter_none(int pass, unsigned long loc, unsigned long sz);
++static inline int user_page(struct page *p)
++{
++ return PageInuse(p) && (!PageReserved(p) && PageLRU(p));
++}
+
-+struct dump_data_filter dump_passthru_filtertable[MAX_PASSES] = {
-+{.name = "passkern", .selector = dump_passthru_filter,
-+ .level_mask = DUMP_MASK_KERN },
-+{.name = "passuser", .selector = dump_passthru_filter,
-+ .level_mask = DUMP_MASK_USED },
-+{.name = "passunused", .selector = dump_passthru_filter,
-+ .level_mask = DUMP_MASK_UNUSED },
-+{.name = "none", .selector = dump_filter_none,
-+ .level_mask = DUMP_MASK_REST }
-+};
++int dump_reused_by_boot(struct page *page)
++{
++ /* Todo
++ * Checks:
++ * if PageReserved
++ * if < __end + bootmem_bootmap_pages for this boot + allowance
++ * if overwritten by initrd (how to check ?)
++ * Also, add more checks in early boot code
++ * e.g. bootmem bootmap alloc verify not overwriting dump, and if
++ * so then realloc or move the dump pages out accordingly.
++ */
+
++ /* Temporary proof of concept hack, avoid overwriting kern pages */
+
-+/* Scheme to handle data staged / preserved across a soft-boot */
-+struct dump_scheme_ops dump_scheme_staged_ops = {
-+ .configure = dump_generic_configure,
-+ .unconfigure = dump_staged_unconfigure,
-+ .sequencer = dump_generic_sequencer,
-+ .iterator = dump_saved_data_iterator,
-+ .save_data = dump_generic_save_data,
-+ .skip_data = dump_generic_skip_data,
-+ .write_buffer = dump_generic_write_buffer
-+};
++ return (kernel_page(page) || dump_low_page(page) || user_page(page));
++}
+
-+struct dump_scheme dump_scheme_staged = {
-+ .name = "staged",
-+ .ops = &dump_scheme_staged_ops
-+};
+
-+/* The stage 2 dumper comprising all these */
-+struct dumper dumper_stage2 = {
-+ .name = "stage2",
-+ .scheme = &dump_scheme_staged,
-+ .fmt = &dump_fmt_passthru,
-+ .compress = &dump_none_compression,
-+ .filter = dump_passthru_filtertable,
-+ .dev = NULL,
-+};
++/* Uses the free page passed in to expand available space */
++int dump_mem_add_space(struct dump_memdev *dev, struct page *page)
++{
++ struct page *map_page;
++ unsigned long *map;
++ unsigned long i;
+
-Index: linux-2.6.10/drivers/dump/dump_memdev.c
-===================================================================
---- linux-2.6.10.orig/drivers/dump/dump_memdev.c 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/drivers/dump/dump_memdev.c 2005-04-05 16:47:53.947204496 +0800
-@@ -0,0 +1,655 @@
-+/*
-+ * Implements the dump driver interface for saving a dump in available
-+ * memory areas. The saved pages may be written out to persistent storage
-+ * after a soft reboot.
-+ *
-+ * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
-+ *
-+ * Copyright (C) 2002 International Business Machines Corp.
-+ *
-+ * This code is released under version 2 of the GNU GPL.
-+ *
-+ * The approach of tracking pages containing saved dump using map pages
-+ * allocated as needed has been derived from the Mission Critical Linux
-+ * mcore dump implementation.
-+ *
-+ * Credits and a big thanks for letting the lkcd project make use of
-+ * the excellent piece of work and also helping with clarifications
-+ * and tips along the way are due to:
-+ * Dave Winchell <winchell@mclx.com> (primary author of mcore)
-+ * Jeff Moyer <moyer@mclx.com>
-+ * Josh Huber <huber@mclx.com>
-+ *
-+ * For those familiar with the mcore code, the main differences worth
-+ * noting here (besides the dump device abstraction) result from enabling
-+ * "high" memory pages (pages not permanently mapped in the kernel
-+ * address space) to be used for saving dump data (because of which a
-+ * simple virtual address based linked list cannot be used anymore for
-+ * managing free pages), an added level of indirection for faster
-+ * lookups during the post-boot stage, and the idea of pages being
-+ * made available as they get freed up while dump to memory progresses
-+ * rather than one time before starting the dump. The last point enables
-+ * a full memory snapshot to be saved starting with an initial set of
-+ * bootstrap pages given a good compression ratio. (See dump_overlay.c)
-+ *
-+ */
++ if (!dev->curr_map)
++ return -ENOMEM; /* must've exhausted indirect map */
+
-+/*
-+ * -----------------MEMORY LAYOUT ------------------
-+ * The memory space consists of a set of discontiguous pages, and
-+ * discontiguous map pages as well, rooted in a chain of indirect
-+ * map pages (also discontiguous). Except for the indirect maps
-+ * (which must be preallocated in advance), the rest of the pages
-+ * could be in high memory.
-+ *
-+ * root
-+ * | --------- -------- --------
-+ * --> | . . +|--->| . +|------->| . . | indirect
-+ * --|--|--- ---|---- --|-|--- maps
-+ * | | | | |
-+ * ------ ------ ------- ------ -------
-+ * | . | | . | | . . | | . | | . . | maps
-+ * --|--- --|--- --|--|-- --|--- ---|-|--
-+ * page page page page page page page data
-+ * pages
-+ *
-+ * Writes to the dump device happen sequentially in append mode.
-+ * The main reason for the existence of the indirect map is
-+ * to enable a quick way to lookup a specific logical offset in
-+ * the saved data post-soft-boot, e.g. to writeout pages
-+ * with more critical data first, even though such pages
-+ * would have been compressed and copied last, being the lowest
-+ * ranked candidates for reuse due to their criticality.
-+ * (See dump_overlay.c)
-+ */
-+#include <linux/mm.h>
-+#include <linux/highmem.h>
-+#include <linux/bootmem.h>
-+#include <linux/dump.h>
-+#include "dump_methods.h"
++ if (!*dev->curr_map || dev->curr_map_offset >= DUMP_MAP_SZ) {
++ /* add map space */
++ *dev->curr_map = page_to_pfn(page);
++ dev->curr_map_offset = 0;
++ return 0;
++ }
+
-+#define DUMP_MAP_SZ (PAGE_SIZE / sizeof(unsigned long)) /* direct map size */
-+#define DUMP_IND_MAP_SZ DUMP_MAP_SZ - 1 /* indirect map size */
-+#define DUMP_NR_BOOTSTRAP 64 /* no of bootstrap pages */
++ /* add data space */
++ i = dev->curr_map_offset;
++ map_page = pfn_to_page(*dev->curr_map);
++ map = (unsigned long *)kmap_atomic(map_page, KM_DUMP);
++ map[i] = page_to_pfn(page);
++ kunmap_atomic(map, KM_DUMP);
++ dev->curr_map_offset = ++i;
++ dev->last_offset += PAGE_SIZE;
++ if (i >= DUMP_MAP_SZ) {
++ /* move to next map */
++ if (is_last_map_entry(++dev->curr_map)) {
++ /* move to the next indirect map page */
++ pr_debug("dump_mem_add_space: using next"
++ "indirect map\n");
++ dev->curr_map = (unsigned long *)*dev->curr_map;
++ }
++ }
++ return 0;
++}
+
-+extern int dump_low_page(struct page *);
+
-+/* check if the next entry crosses a page boundary */
-+static inline int is_last_map_entry(unsigned long *map)
++/* Caution: making a dest page invalidates existing contents of the page */
++int dump_check_and_free_page(struct dump_memdev *dev, struct page *page)
+{
-+ unsigned long addr = (unsigned long)(map + 1);
++ int err = 0;
+
-+ return (!(addr & (PAGE_SIZE - 1)));
++ /*
++ * the page can be used as a destination only if we are sure
++ * it won't get overwritten by the soft-boot, and is not
++ * critical for us right now.
++ */
++ if (dump_reused_by_boot(page))
++ return 0;
++
++ if ((err = dump_mem_add_space(dev, page))) {
++ printk("Warning: Unable to extend memdev space. Err %d\n",
++ err);
++ return 0;
++ }
++
++ dev->nr_free++;
++ return 1;
+}
+
-+/* Todo: should have some validation checks */
-+/* The last entry in the indirect map points to the next indirect map */
-+/* Indirect maps are referred to directly by virtual address */
-+static inline unsigned long *next_indirect_map(unsigned long *map)
++
++/* Set up the initial maps and bootstrap space */
++/* Must be called only after any previous dump is written out */
++int dump_mem_open(struct dump_dev *dev, unsigned long devid)
+{
-+ return (unsigned long *)map[DUMP_IND_MAP_SZ];
++ struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
++ unsigned long nr_maps, *map, *prev_map = &dump_mdev->indirect_map_root;
++ void *addr;
++ struct page *page;
++ unsigned long i = 0;
++ int err = 0;
++
++ /* Todo: sanity check for unwritten previous dump */
++
++ /* allocate pages for indirect map (non highmem area) */
++ nr_maps = num_physpages / DUMP_MAP_SZ; /* maps to cover entire mem */
++ for (i = 0; i < nr_maps; i += DUMP_IND_MAP_SZ) {
++ if (!(map = (unsigned long *)dump_alloc_mem(PAGE_SIZE))) {
++ printk("Unable to alloc indirect map %ld\n",
++ i / DUMP_IND_MAP_SZ);
++ return -ENOMEM;
++ }
++ clear_page(map);
++ *prev_map = (unsigned long)map;
++ prev_map = &map[DUMP_IND_MAP_SZ];
++ };
++
++ dump_mdev->curr_map = (unsigned long *)dump_mdev->indirect_map_root;
++ dump_mdev->curr_map_offset = 0;
++
++ /*
++ * allocate a few bootstrap pages: at least 1 map and 1 data page
++ * plus enough to save the dump header
++ */
++ i = 0;
++ do {
++ if (!(addr = dump_alloc_mem(PAGE_SIZE))) {
++ printk("Unable to alloc bootstrap page %ld\n", i);
++ return -ENOMEM;
++ }
++
++ page = virt_to_page(addr);
++ if (dump_low_page(page)) {
++ dump_free_mem(addr);
++ continue;
++ }
++
++ if (dump_mem_add_space(dump_mdev, page)) {
++ printk("Warning: Unable to extend memdev "
++ "space. Err %d\n", err);
++ dump_free_mem(addr);
++ continue;
++ }
++ i++;
++ } while (i < DUMP_NR_BOOTSTRAP);
++
++ printk("dump memdev init: %ld maps, %ld bootstrap pgs, %ld free pgs\n",
++ nr_maps, i, dump_mdev->last_offset >> PAGE_SHIFT);
++
++ dump_mdev->last_bs_offset = dump_mdev->last_offset;
++
++ return 0;
+}
+
-+#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
-+/* Called during early bootup - fixme: make this __init */
-+void dump_early_reserve_map(struct dump_memdev *dev)
++/* Releases all pre-alloc'd pages */
++int dump_mem_release(struct dump_dev *dev)
+{
-+ unsigned long *map1, *map2;
-+ loff_t off = 0, last = dev->last_used_offset >> PAGE_SHIFT;
-+ int i, j;
-+
-+ printk("Reserve bootmap space holding previous dump of %lld pages\n",
-+ last);
-+ map1= (unsigned long *)dev->indirect_map_root;
++ struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
++ struct page *page, *map_page;
++ unsigned long *map, *prev_map;
++ void *addr;
++ int i;
+
-+ while (map1 && (off < last)) {
-+#ifdef CONFIG_X86_64
-+ reserve_bootmem_node(NODE_DATA(0), virt_to_phys((void *)map1),
-+ PAGE_SIZE);
-+#else
-+ reserve_bootmem(virt_to_phys((void *)map1), PAGE_SIZE);
-+#endif
-+ for (i=0; (i < DUMP_MAP_SZ - 1) && map1[i] && (off < last);
-+ i++, off += DUMP_MAP_SZ) {
-+ pr_debug("indirect map[%d] = 0x%lx\n", i, map1[i]);
-+ if (map1[i] >= max_low_pfn)
-+ continue;
-+#ifdef CONFIG_X86_64
-+ reserve_bootmem_node(NODE_DATA(0),
-+ map1[i] << PAGE_SHIFT, PAGE_SIZE);
-+#else
-+ reserve_bootmem(map1[i] << PAGE_SHIFT, PAGE_SIZE);
-+#endif
-+ map2 = pfn_to_kaddr(map1[i]);
-+ for (j = 0 ; (j < DUMP_MAP_SZ) && map2[j] &&
-+ (off + j < last); j++) {
-+ pr_debug("\t map[%d][%d] = 0x%lx\n", i, j,
-+ map2[j]);
-+ if (map2[j] < max_low_pfn) {
-+#ifdef CONFIG_X86_64
-+ reserve_bootmem_node(NODE_DATA(0),
-+ map2[j] << PAGE_SHIFT,
-+ PAGE_SIZE);
-+#else
-+ reserve_bootmem(map2[j] << PAGE_SHIFT,
-+ PAGE_SIZE);
-+#endif
-+ }
++ if (!dump_mdev->nr_free)
++ return 0;
++
++ pr_debug("dump_mem_release\n");
++ page = dump_mem_lookup(dump_mdev, 0);
++ for (i = 0; page && (i < DUMP_NR_BOOTSTRAP - 1); i++) {
++ if (PageHighMem(page))
++ break;
++ addr = page_address(page);
++ if (!addr) {
++ printk("page_address(%p) = NULL\n", page);
++ break;
++ }
++ pr_debug("Freeing page at 0x%lx\n", addr);
++ dump_free_mem(addr);
++ if (dump_mdev->curr_map_offset >= DUMP_MAP_SZ - 1) {
++ map_page = pfn_to_page(*dump_mdev->curr_map);
++ if (PageHighMem(map_page))
++ break;
++ page = dump_mem_next_page(dump_mdev);
++ addr = page_address(map_page);
++ if (!addr) {
++ printk("page_address(%p) = NULL\n",
++ map_page);
++ break;
+ }
++ pr_debug("Freeing map page at 0x%lx\n", addr);
++ dump_free_mem(addr);
++ i++;
++ } else {
++ page = dump_mem_next_page(dump_mdev);
+ }
-+ map1 = next_indirect_map(map1);
+ }
-+ dev->nr_free = 0; /* these pages don't belong to this boot */
-+}
-+#endif
-+
-+/* mark dump pages so that they aren't used by this kernel */
-+void dump_mark_map(struct dump_memdev *dev)
-+{
-+ unsigned long *map1, *map2;
-+ loff_t off = 0, last = dev->last_used_offset >> PAGE_SHIFT;
-+ struct page *page;
-+ int i, j;
-+
-+ printk("Dump: marking pages in use by previous dump\n");
-+ map1= (unsigned long *)dev->indirect_map_root;
+
-+ while (map1 && (off < last)) {
-+ page = virt_to_page(map1);
-+ set_page_count(page, 1);
-+ for (i=0; (i < DUMP_MAP_SZ - 1) && map1[i] && (off < last);
-+ i++, off += DUMP_MAP_SZ) {
-+ pr_debug("indirect map[%d] = 0x%lx\n", i, map1[i]);
-+ page = pfn_to_page(map1[i]);
-+ set_page_count(page, 1);
-+ map2 = kmap_atomic(page, KM_DUMP);
-+ for (j = 0 ; (j < DUMP_MAP_SZ) && map2[j] &&
-+ (off + j < last); j++) {
-+ pr_debug("\t map[%d][%d] = 0x%lx\n", i, j,
-+ map2[j]);
-+ page = pfn_to_page(map2[j]);
-+ set_page_count(page, 1);
++ /* now for the last used bootstrap page used as a map page */
++ if ((i < DUMP_NR_BOOTSTRAP) && (*dump_mdev->curr_map)) {
++ map_page = pfn_to_page(*dump_mdev->curr_map);
++ if ((map_page) && !PageHighMem(map_page)) {
++ addr = page_address(map_page);
++ if (!addr) {
++ printk("page_address(%p) = NULL\n", map_page);
++ } else {
++ pr_debug("Freeing map page at 0x%lx\n", addr);
++ dump_free_mem(addr);
++ i++;
+ }
+ }
-+ map1 = next_indirect_map(map1);
+ }
++
++ printk("Freed %d bootstrap pages\n", i);
++
++ /* free the indirect maps */
++ map = (unsigned long *)dump_mdev->indirect_map_root;
++
++ i = 0;
++ while (map) {
++ prev_map = map;
++ map = next_indirect_map(map);
++ dump_free_mem(prev_map);
++ i++;
++ }
++
++ printk("Freed %d indirect map(s)\n", i);
++
++ /* Reset the indirect map */
++ dump_mdev->indirect_map_root = 0;
++ dump_mdev->curr_map = 0;
++
++ /* Reset the free list */
++ dump_mdev->nr_free = 0;
++
++ dump_mdev->last_offset = dump_mdev->ddev.curr_offset = 0;
++ dump_mdev->last_used_offset = 0;
++ dump_mdev->curr_map = NULL;
++ dump_mdev->curr_map_offset = 0;
++ return 0;
+}
-+
+
++/*
++ * Long term:
++ * It is critical for this to be very strict. Cannot afford
++ * to have anything running and accessing memory while we overwrite
++ * memory (potential risk of data corruption).
++ * If in doubt (e.g if a cpu is hung and not responding) just give
++ * up and refuse to proceed with this scheme.
++ *
++ * Note: I/O will only happen after soft-boot/switchover, so we can
++ * safely disable interrupts and force stop other CPUs if this is
++ * going to be a disruptive dump, no matter what they
++ * are in the middle of.
++ */
+/*
-+ * Given a logical offset into the mem device lookup the
-+ * corresponding page
-+ * loc is specified in units of pages
-+ * Note: affects curr_map (even in the case where lookup fails)
++ * ATM Most of this is already taken care of in the nmi handler
++ * We may halt the cpus rightaway if we know this is going to be disruptive
++ * For now, since we've limited ourselves to overwriting free pages we
++ * aren't doing much here. Eventually, we'd have to wait to make sure other
++ * cpus aren't using memory we could be overwriting
+ */
-+struct page *dump_mem_lookup(struct dump_memdev *dump_mdev, unsigned long loc)
++int dump_mem_silence(struct dump_dev *dev)
+{
-+ unsigned long *map;
-+ unsigned long i, index = loc / DUMP_MAP_SZ;
-+ struct page *page = NULL;
-+ unsigned long curr_pfn, curr_map, *curr_map_ptr = NULL;
-+
-+ map = (unsigned long *)dump_mdev->indirect_map_root;
-+ if (!map)
-+ return NULL;
-+ if (loc > dump_mdev->last_offset >> PAGE_SHIFT)
-+ return NULL;
++ struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
+
-+ /*
-+ * first locate the right indirect map
-+ * in the chain of indirect maps
-+ */
-+ for (i = 0; i + DUMP_IND_MAP_SZ < index ; i += DUMP_IND_MAP_SZ) {
-+ if (!(map = next_indirect_map(map)))
-+ return NULL;
-+ }
-+ /* then the right direct map */
-+ /* map entries are referred to by page index */
-+ if ((curr_map = map[index - i])) {
-+ page = pfn_to_page(curr_map);
-+ /* update the current traversal index */
-+ /* dump_mdev->curr_map = &map[index - i];*/
-+ curr_map_ptr = &map[index - i];
++ if (dump_mdev->last_offset > dump_mdev->last_bs_offset) {
++ /* prefer to run lkcd config & start with a clean slate */
++ return -EEXIST;
+ }
++ return 0;
++}
+
-+ if (page)
-+ map = kmap_atomic(page, KM_DUMP);
-+ else
-+ return NULL;
-+
-+ /* and finally the right entry therein */
-+ /* data pages are referred to by page index */
-+ i = index * DUMP_MAP_SZ;
-+ if ((curr_pfn = map[loc - i])) {
-+ page = pfn_to_page(curr_pfn);
-+ dump_mdev->curr_map = curr_map_ptr;
-+ dump_mdev->curr_map_offset = loc - i;
-+ dump_mdev->ddev.curr_offset = loc << PAGE_SHIFT;
-+ } else {
-+ page = NULL;
-+ }
-+ kunmap_atomic(map, KM_DUMP);
++extern int dump_overlay_resume(void);
+
-+ return page;
++/* Trigger the next stage of dumping */
++int dump_mem_resume(struct dump_dev *dev)
++{
++ dump_overlay_resume();
++ return 0;
+}
-+
++
+/*
-+ * Retrieves a pointer to the next page in the dump device
-+ * Used during the lookup pass post-soft-reboot
++ * Allocate mem dev pages as required and copy buffer contents into it.
++ * Fails if the no free pages are available
++ * Keeping it simple and limited for starters (can modify this over time)
++ * Does not handle holes or a sparse layout
++ * Data must be in multiples of PAGE_SIZE
+ */
-+struct page *dump_mem_next_page(struct dump_memdev *dev)
++int dump_mem_write(struct dump_dev *dev, void *buf, unsigned long len)
+{
-+ unsigned long i;
-+ unsigned long *map;
-+ struct page *page = NULL;
++ struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
++ struct page *page;
++ unsigned long n = 0;
++ void *addr;
++ unsigned long *saved_curr_map, saved_map_offset;
++ int ret = 0;
+
-+ if (dev->ddev.curr_offset + PAGE_SIZE >= dev->last_offset) {
-+ return NULL;
++ pr_debug("dump_mem_write: offset 0x%llx, size %ld\n",
++ dev->curr_offset, len);
++
++ if (dev->curr_offset + len > dump_mdev->last_offset) {
++ printk("Out of space to write\n");
++ return -ENOSPC;
+ }
++
++ if ((len & (PAGE_SIZE - 1)) || (dev->curr_offset & (PAGE_SIZE - 1)))
++ return -EINVAL; /* not aligned in units of page size */
+
-+ if ((i = (unsigned long)(++dev->curr_map_offset)) >= DUMP_MAP_SZ) {
-+ /* move to next map */
-+ if (is_last_map_entry(++dev->curr_map)) {
-+ /* move to the next indirect map page */
-+ printk("dump_mem_next_page: go to next indirect map\n");
-+ dev->curr_map = (unsigned long *)*dev->curr_map;
-+ if (!dev->curr_map)
-+ return NULL;
-+ }
-+ i = dev->curr_map_offset = 0;
-+ pr_debug("dump_mem_next_page: next map 0x%lx, entry 0x%lx\n",
-+ dev->curr_map, *dev->curr_map);
++ saved_curr_map = dump_mdev->curr_map;
++ saved_map_offset = dump_mdev->curr_map_offset;
++ page = dump_mem_lookup(dump_mdev, dev->curr_offset >> PAGE_SHIFT);
+
-+ };
-+
-+ if (*dev->curr_map) {
-+ map = kmap_atomic(pfn_to_page(*dev->curr_map), KM_DUMP);
-+ if (map[i])
-+ page = pfn_to_page(map[i]);
-+ kunmap_atomic(map, KM_DUMP);
-+ dev->ddev.curr_offset += PAGE_SIZE;
-+ };
++ for (n = len; (n > 0) && page; n -= PAGE_SIZE, buf += PAGE_SIZE ) {
++ addr = kmap_atomic(page, KM_DUMP);
++ /* memset(addr, 'x', PAGE_SIZE); */
++ memcpy(addr, buf, PAGE_SIZE);
++ kunmap_atomic(addr, KM_DUMP);
++ /* dev->curr_offset += PAGE_SIZE; */
++ page = dump_mem_next_page(dump_mdev);
++ }
+
-+ return page;
-+}
++ dump_mdev->curr_map = saved_curr_map;
++ dump_mdev->curr_map_offset = saved_map_offset;
+
-+/* Copied from dump_filters.c */
-+static inline int kernel_page(struct page *p)
-+{
-+ /* FIXME: Need to exclude hugetlb pages. Clue: reserved but inuse */
-+ return (PageReserved(p) && !PageInuse(p)) || (!PageLRU(p) && PageInuse(p));
++ if (dump_mdev->last_used_offset < dev->curr_offset)
++ dump_mdev->last_used_offset = dev->curr_offset;
++
++ return (len - n) ? (len - n) : ret ;
+}
+
-+static inline int user_page(struct page *p)
++/* dummy - always ready */
++int dump_mem_ready(struct dump_dev *dev, void *buf)
+{
-+ return PageInuse(p) && (!PageReserved(p) && PageLRU(p));
++ return 0;
+}
+
-+int dump_reused_by_boot(struct page *page)
++/*
++ * Should check for availability of space to write upto the offset
++ * affects only the curr_offset; last_offset untouched
++ * Keep it simple: Only allow multiples of PAGE_SIZE for now
++ */
++int dump_mem_seek(struct dump_dev *dev, loff_t offset)
+{
-+ /* Todo
-+ * Checks:
-+ * if PageReserved
-+ * if < __end + bootmem_bootmap_pages for this boot + allowance
-+ * if overwritten by initrd (how to check ?)
-+ * Also, add more checks in early boot code
-+ * e.g. bootmem bootmap alloc verify not overwriting dump, and if
-+ * so then realloc or move the dump pages out accordingly.
-+ */
++ struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
+
-+ /* Temporary proof of concept hack, avoid overwriting kern pages */
++ if (offset & (PAGE_SIZE - 1))
++ return -EINVAL; /* allow page size units only for now */
++
++ /* Are we exceeding available space ? */
++ if (offset > dump_mdev->last_offset) {
++ printk("dump_mem_seek failed for offset 0x%llx\n",
++ offset);
++ return -ENOSPC;
++ }
+
-+ return (kernel_page(page) || dump_low_page(page) || user_page(page));
++ dump_mdev->ddev.curr_offset = offset;
++ return 0;
+}
+
++struct dump_dev_ops dump_memdev_ops = {
++ .open = dump_mem_open,
++ .release = dump_mem_release,
++ .silence = dump_mem_silence,
++ .resume = dump_mem_resume,
++ .seek = dump_mem_seek,
++ .write = dump_mem_write,
++ .read = NULL, /* not implemented at the moment */
++ .ready = dump_mem_ready
++};
++
++static struct dump_memdev default_dump_memdev = {
++ .ddev = {.type_name = "memdev", .ops = &dump_memdev_ops,
++ .device_id = 0x14}
++ /* assume the rest of the fields are zeroed by default */
++};
++
++/* may be overwritten if a previous dump exists */
++struct dump_memdev *dump_memdev = &default_dump_memdev;
+
-+/* Uses the free page passed in to expand available space */
-+int dump_mem_add_space(struct dump_memdev *dev, struct page *page)
+Index: linux-2.6.10/drivers/dump/dump_blockdev.c
+===================================================================
+--- linux-2.6.10.orig/drivers/dump/dump_blockdev.c 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/drivers/dump/dump_blockdev.c 2005-04-07 18:13:56.909752248 +0800
+@@ -0,0 +1,469 @@
++/*
++ * Implements the dump driver interface for saving a dump to
++ * a block device through the kernel's generic low level block i/o
++ * routines.
++ *
++ * Started: June 2002 - Mohamed Abbas <mohamed.abbas@intel.com>
++ * Moved original lkcd kiobuf dump i/o code from dump_base.c
++ * to use generic dump device interfaces
++ *
++ * Sept 2002 - Bharata B. Rao <bharata@in.ibm.com>
++ * Convert dump i/o to directly use bio instead of kiobuf for 2.5
++ *
++ * Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
++ * Rework to new dumpdev.h structures, implement open/close/
++ * silence, misc fixes (blocknr removal, bio_add_page usage)
++ *
++ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
++ * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
++ * Copyright (C) 2002 International Business Machines Corp.
++ *
++ * This code is released under version 2 of the GNU GPL.
++ */
++
++#include <linux/types.h>
++#include <linux/proc_fs.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/blkdev.h>
++#include <linux/bio.h>
++#include <asm/hardirq.h>
++#include <linux/dump.h>
++#include "dump_methods.h"
++
++extern void *dump_page_buf;
++
++/* The end_io callback for dump i/o completion */
++static int
++dump_bio_end_io(struct bio *bio, unsigned int bytes_done, int error)
+{
-+ struct page *map_page;
-+ unsigned long *map;
-+ unsigned long i;
++ struct dump_blockdev *dump_bdev;
+
-+ if (!dev->curr_map)
-+ return -ENOMEM; /* must've exhausted indirect map */
++ if (bio->bi_size) {
++ /* some bytes still left to transfer */
++ return 1; /* not complete */
++ }
+
-+ if (!*dev->curr_map || dev->curr_map_offset >= DUMP_MAP_SZ) {
-+ /* add map space */
-+ *dev->curr_map = page_to_pfn(page);
-+ dev->curr_map_offset = 0;
-+ return 0;
++ dump_bdev = (struct dump_blockdev *)bio->bi_private;
++ if (error) {
++ printk("IO error while writing the dump, aborting\n");
+ }
+
-+ /* add data space */
-+ i = dev->curr_map_offset;
-+ map_page = pfn_to_page(*dev->curr_map);
-+ map = (unsigned long *)kmap_atomic(map_page, KM_DUMP);
-+ map[i] = page_to_pfn(page);
-+ kunmap_atomic(map, KM_DUMP);
-+ dev->curr_map_offset = ++i;
-+ dev->last_offset += PAGE_SIZE;
-+ if (i >= DUMP_MAP_SZ) {
-+ /* move to next map */
-+ if (is_last_map_entry(++dev->curr_map)) {
-+ /* move to the next indirect map page */
-+ pr_debug("dump_mem_add_space: using next"
-+ "indirect map\n");
-+ dev->curr_map = (unsigned long *)*dev->curr_map;
-+ }
-+ }
++ dump_bdev->err = error;
++
++ /* no wakeup needed, since caller polls for completion */
+ return 0;
+}
+
-+
-+/* Caution: making a dest page invalidates existing contents of the page */
-+int dump_check_and_free_page(struct dump_memdev *dev, struct page *page)
++/* Check if the dump bio is already mapped to the specified buffer */
++static int
++dump_block_map_valid(struct dump_blockdev *dev, struct page *page,
++ int len)
+{
-+ int err = 0;
++ struct bio *bio = dev->bio;
++ unsigned long bsize = 0;
+
-+ /*
-+ * the page can be used as a destination only if we are sure
-+ * it won't get overwritten by the soft-boot, and is not
-+ * critical for us right now.
-+ */
-+ if (dump_reused_by_boot(page))
-+ return 0;
++ if (!bio->bi_vcnt)
++ return 0; /* first time, not mapped */
+
-+ if ((err = dump_mem_add_space(dev, page))) {
-+ printk("Warning: Unable to extend memdev space. Err %d\n",
-+ err);
-+ return 0;
-+ }
+
-+ dev->nr_free++;
-+ return 1;
-+}
++ if ((bio_page(bio) != page) || (len > bio->bi_vcnt << PAGE_SHIFT))
++ return 0; /* buffer not mapped */
+
++ bsize = bdev_hardsect_size(bio->bi_bdev);
++ if ((len & (PAGE_SIZE - 1)) || (len & bsize))
++ return 0; /* alignment checks needed */
+
-+/* Set up the initial maps and bootstrap space */
-+/* Must be called only after any previous dump is written out */
-+int dump_mem_open(struct dump_dev *dev, unsigned long devid)
++ /* quick check to decide if we need to redo bio_add_page */
++ if (bdev_get_queue(bio->bi_bdev)->merge_bvec_fn)
++ return 0; /* device may have other restrictions */
++
++ return 1; /* already mapped */
++}
++
++/*
++ * Set up the dump bio for i/o from the specified buffer
++ * Return value indicates whether the full buffer could be mapped or not
++ */
++static int
++dump_block_map(struct dump_blockdev *dev, void *buf, int len)
+{
-+ struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
-+ unsigned long nr_maps, *map, *prev_map = &dump_mdev->indirect_map_root;
-+ void *addr;
-+ struct page *page;
-+ unsigned long i = 0;
-+ int err = 0;
++ struct page *page = virt_to_page(buf);
++ struct bio *bio = dev->bio;
++ unsigned long bsize = 0;
+
-+ /* Todo: sanity check for unwritten previous dump */
++ bio->bi_bdev = dev->bdev;
++ bio->bi_sector = (dev->start_offset + dev->ddev.curr_offset) >> 9;
++ bio->bi_idx = 0; /* reset index to the beginning */
+
-+ /* allocate pages for indirect map (non highmem area) */
-+ nr_maps = num_physpages / DUMP_MAP_SZ; /* maps to cover entire mem */
-+ for (i = 0; i < nr_maps; i += DUMP_IND_MAP_SZ) {
-+ if (!(map = (unsigned long *)dump_alloc_mem(PAGE_SIZE))) {
-+ printk("Unable to alloc indirect map %ld\n",
-+ i / DUMP_IND_MAP_SZ);
-+ return -ENOMEM;
-+ }
-+ clear_page(map);
-+ *prev_map = (unsigned long)map;
-+ prev_map = &map[DUMP_IND_MAP_SZ];
-+ };
-+
-+ dump_mdev->curr_map = (unsigned long *)dump_mdev->indirect_map_root;
-+ dump_mdev->curr_map_offset = 0;
++ if (dump_block_map_valid(dev, page, len)) {
++ /* already mapped and usable rightaway */
++ bio->bi_size = len; /* reset size to the whole bio */
++ bio->bi_vcnt = (len + PAGE_SIZE - 1) / PAGE_SIZE; /* Set the proper vector cnt */
++ } else {
++ /* need to map the bio */
++ bio->bi_size = 0;
++ bio->bi_vcnt = 0;
++ bsize = bdev_hardsect_size(bio->bi_bdev);
+
-+ /*
-+ * allocate a few bootstrap pages: at least 1 map and 1 data page
-+ * plus enough to save the dump header
-+ */
-+ i = 0;
-+ do {
-+ if (!(addr = dump_alloc_mem(PAGE_SIZE))) {
-+ printk("Unable to alloc bootstrap page %ld\n", i);
-+ return -ENOMEM;
++ /* first a few sanity checks */
++ if (len < bsize) {
++ printk("map: len less than hardsect size \n");
++ return -EINVAL;
+ }
+
-+ page = virt_to_page(addr);
-+ if (dump_low_page(page)) {
-+ dump_free_mem(addr);
-+ continue;
++ if ((unsigned long)buf & bsize) {
++ printk("map: not aligned \n");
++ return -EINVAL;
+ }
+
-+ if (dump_mem_add_space(dump_mdev, page)) {
-+ printk("Warning: Unable to extend memdev "
-+ "space. Err %d\n", err);
-+ dump_free_mem(addr);
-+ continue;
++ /* assume contig. page aligned low mem buffer( no vmalloc) */
++ if ((page_address(page) != buf) || (len & (PAGE_SIZE - 1))) {
++ printk("map: invalid buffer alignment!\n");
++ return -EINVAL;
+ }
-+ i++;
-+ } while (i < DUMP_NR_BOOTSTRAP);
++ /* finally we can go ahead and map it */
++ while (bio->bi_size < len)
++ if (bio_add_page(bio, page++, PAGE_SIZE, 0) == 0) {
++ break;
++ }
+
-+ printk("dump memdev init: %ld maps, %ld bootstrap pgs, %ld free pgs\n",
-+ nr_maps, i, dump_mdev->last_offset >> PAGE_SHIFT);
-+
-+ dump_mdev->last_bs_offset = dump_mdev->last_offset;
++ bio->bi_end_io = dump_bio_end_io;
++ bio->bi_private = dev;
++ }
+
++ if (bio->bi_size != len) {
++ printk("map: bio size = %d not enough for len = %d!\n",
++ bio->bi_size, len);
++ return -E2BIG;
++ }
+ return 0;
+}
+
-+/* Releases all pre-alloc'd pages */
-+int dump_mem_release(struct dump_dev *dev)
++static void
++dump_free_bio(struct bio *bio)
+{
-+ struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
-+ struct page *page, *map_page;
-+ unsigned long *map, *prev_map;
-+ void *addr;
-+ int i;
++ if (bio)
++ kfree(bio->bi_io_vec);
++ kfree(bio);
++}
+
-+ if (!dump_mdev->nr_free)
-+ return 0;
++/*
++ * Prepares the dump device so we can take a dump later.
++ * The caller is expected to have filled up the dev_id field in the
++ * block dump dev structure.
++ *
++ * At dump time when dump_block_write() is invoked it will be too
++ * late to recover, so as far as possible make sure obvious errors
++ * get caught right here and reported back to the caller.
++ */
++static int
++dump_block_open(struct dump_dev *dev, unsigned long arg)
++{
++ struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
++ struct block_device *bdev;
++ int retval = 0;
++ struct bio_vec *bvec;
+
-+ pr_debug("dump_mem_release\n");
-+ page = dump_mem_lookup(dump_mdev, 0);
-+ for (i = 0; page && (i < DUMP_NR_BOOTSTRAP - 1); i++) {
-+ if (PageHighMem(page))
-+ break;
-+ addr = page_address(page);
-+ if (!addr) {
-+ printk("page_address(%p) = NULL\n", page);
-+ break;
-+ }
-+ pr_debug("Freeing page at 0x%lx\n", addr);
-+ dump_free_mem(addr);
-+ if (dump_mdev->curr_map_offset >= DUMP_MAP_SZ - 1) {
-+ map_page = pfn_to_page(*dump_mdev->curr_map);
-+ if (PageHighMem(map_page))
-+ break;
-+ page = dump_mem_next_page(dump_mdev);
-+ addr = page_address(map_page);
-+ if (!addr) {
-+ printk("page_address(%p) = NULL\n",
-+ map_page);
-+ break;
-+ }
-+ pr_debug("Freeing map page at 0x%lx\n", addr);
-+ dump_free_mem(addr);
-+ i++;
-+ } else {
-+ page = dump_mem_next_page(dump_mdev);
-+ }
++ /* make sure this is a valid block device */
++ if (!arg) {
++ retval = -EINVAL;
++ goto err;
+ }
+
-+ /* now for the last used bootstrap page used as a map page */
-+ if ((i < DUMP_NR_BOOTSTRAP) && (*dump_mdev->curr_map)) {
-+ map_page = pfn_to_page(*dump_mdev->curr_map);
-+ if ((map_page) && !PageHighMem(map_page)) {
-+ addr = page_address(map_page);
-+ if (!addr) {
-+ printk("page_address(%p) = NULL\n", map_page);
-+ } else {
-+ pr_debug("Freeing map page at 0x%lx\n", addr);
-+ dump_free_mem(addr);
-+ i++;
-+ }
-+ }
++ /* Convert it to the new dev_t format */
++ arg = MKDEV((arg >> OLDMINORBITS), (arg & OLDMINORMASK));
++
++ /* get a corresponding block_dev struct for this */
++ bdev = bdget((dev_t)arg);
++ if (!bdev) {
++ retval = -ENODEV;
++ goto err;
++ }
++
++ /* get the block device opened */
++ if ((retval = blkdev_get(bdev, O_RDWR | O_LARGEFILE, 0))) {
++ goto err1;
++ }
++
++ if ((dump_bdev->bio = kmalloc(sizeof(struct bio), GFP_KERNEL))
++ == NULL) {
++ printk("Cannot allocate bio\n");
++ retval = -ENOMEM;
++ goto err2;
+ }
+
-+ printk("Freed %d bootstrap pages\n", i);
++ bio_init(dump_bdev->bio);
+
-+ /* free the indirect maps */
-+ map = (unsigned long *)dump_mdev->indirect_map_root;
++ if ((bvec = kmalloc(sizeof(struct bio_vec) *
++ (DUMP_BUFFER_SIZE >> PAGE_SHIFT), GFP_KERNEL)) == NULL) {
++ retval = -ENOMEM;
++ goto err3;
++ }
+
-+ i = 0;
-+ while (map) {
-+ prev_map = map;
-+ map = next_indirect_map(map);
-+ dump_free_mem(prev_map);
-+ i++;
++ /* assign the new dump dev structure */
++ dump_bdev->dev_id = (dev_t)arg;
++ dump_bdev->bdev = bdev;
++
++ /* make a note of the limit */
++ dump_bdev->limit = bdev->bd_inode->i_size;
++
++ /* now make sure we can map the dump buffer */
++ dump_bdev->bio->bi_io_vec = bvec;
++ dump_bdev->bio->bi_max_vecs = DUMP_BUFFER_SIZE >> PAGE_SHIFT;
++
++ retval = dump_block_map(dump_bdev, dump_config.dumper->dump_buf,
++ DUMP_BUFFER_SIZE);
++
++ if (retval) {
++ printk("open: dump_block_map failed, ret %d\n", retval);
++ goto err3;
+ }
+
-+ printk("Freed %d indirect map(s)\n", i);
++ printk("Block device (%d,%d) successfully configured for dumping\n",
++ MAJOR(dump_bdev->dev_id),
++ MINOR(dump_bdev->dev_id));
+
-+ /* Reset the indirect map */
-+ dump_mdev->indirect_map_root = 0;
-+ dump_mdev->curr_map = 0;
+
-+ /* Reset the free list */
-+ dump_mdev->nr_free = 0;
++ /* after opening the block device, return */
++ return retval;
+
-+ dump_mdev->last_offset = dump_mdev->ddev.curr_offset = 0;
-+ dump_mdev->last_used_offset = 0;
-+ dump_mdev->curr_map = NULL;
-+ dump_mdev->curr_map_offset = 0;
-+ return 0;
++err3: dump_free_bio(dump_bdev->bio);
++ dump_bdev->bio = NULL;
++err2: if (bdev) blkdev_put(bdev);
++ goto err;
++err1: if (bdev) bdput(bdev);
++ dump_bdev->bdev = NULL;
++err: return retval;
+}
+
+/*
-+ * Long term:
-+ * It is critical for this to be very strict. Cannot afford
-+ * to have anything running and accessing memory while we overwrite
-+ * memory (potential risk of data corruption).
-+ * If in doubt (e.g if a cpu is hung and not responding) just give
-+ * up and refuse to proceed with this scheme.
-+ *
-+ * Note: I/O will only happen after soft-boot/switchover, so we can
-+ * safely disable interrupts and force stop other CPUs if this is
-+ * going to be a disruptive dump, no matter what they
-+ * are in the middle of.
-+ */
-+/*
-+ * ATM Most of this is already taken care of in the nmi handler
-+ * We may halt the cpus rightaway if we know this is going to be disruptive
-+ * For now, since we've limited ourselves to overwriting free pages we
-+ * aren't doing much here. Eventually, we'd have to wait to make sure other
-+ * cpus aren't using memory we could be overwriting
++ * Close the dump device and release associated resources
++ * Invoked when unconfiguring the dump device.
+ */
-+int dump_mem_silence(struct dump_dev *dev)
++static int
++dump_block_release(struct dump_dev *dev)
+{
-+ struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
++ struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
+
-+ if (dump_mdev->last_offset > dump_mdev->last_bs_offset) {
-+ /* prefer to run lkcd config & start with a clean slate */
-+ return -EEXIST;
++ /* release earlier bdev if present */
++ if (dump_bdev->bdev) {
++ blkdev_put(dump_bdev->bdev);
++ dump_bdev->bdev = NULL;
+ }
-+ return 0;
-+}
+
-+extern int dump_overlay_resume(void);
++ dump_free_bio(dump_bdev->bio);
++ dump_bdev->bio = NULL;
+
-+/* Trigger the next stage of dumping */
-+int dump_mem_resume(struct dump_dev *dev)
-+{
-+ dump_overlay_resume();
+ return 0;
+}
+
-+/*
-+ * Allocate mem dev pages as required and copy buffer contents into it.
-+ * Fails if the no free pages are available
-+ * Keeping it simple and limited for starters (can modify this over time)
-+ * Does not handle holes or a sparse layout
-+ * Data must be in multiples of PAGE_SIZE
++
++/*
++ * Prepare the dump device for use (silence any ongoing activity
++ * and quiesce state) when the system crashes.
+ */
-+int dump_mem_write(struct dump_dev *dev, void *buf, unsigned long len)
++static int
++dump_block_silence(struct dump_dev *dev)
+{
-+ struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
-+ struct page *page;
-+ unsigned long n = 0;
-+ void *addr;
-+ unsigned long *saved_curr_map, saved_map_offset;
-+ int ret = 0;
-+
-+ pr_debug("dump_mem_write: offset 0x%llx, size %ld\n",
-+ dev->curr_offset, len);
++ struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
++ struct request_queue *q = bdev_get_queue(dump_bdev->bdev);
++ int ret;
+
-+ if (dev->curr_offset + len > dump_mdev->last_offset) {
-+ printk("Out of space to write\n");
-+ return -ENOSPC;
-+ }
-+
-+ if ((len & (PAGE_SIZE - 1)) || (dev->curr_offset & (PAGE_SIZE - 1)))
-+ return -EINVAL; /* not aligned in units of page size */
++ /* If we can't get request queue lock, refuse to take the dump */
++ if (!spin_trylock(q->queue_lock))
++ return -EBUSY;
+
-+ saved_curr_map = dump_mdev->curr_map;
-+ saved_map_offset = dump_mdev->curr_map_offset;
-+ page = dump_mem_lookup(dump_mdev, dev->curr_offset >> PAGE_SHIFT);
++ ret = elv_queue_empty(q);
++ spin_unlock(q->queue_lock);
+
-+ for (n = len; (n > 0) && page; n -= PAGE_SIZE, buf += PAGE_SIZE ) {
-+ addr = kmap_atomic(page, KM_DUMP);
-+ /* memset(addr, 'x', PAGE_SIZE); */
-+ memcpy(addr, buf, PAGE_SIZE);
-+ kunmap_atomic(addr, KM_DUMP);
-+ /* dev->curr_offset += PAGE_SIZE; */
-+ page = dump_mem_next_page(dump_mdev);
++ /* For now we assume we have the device to ourselves */
++ /* Just a quick sanity check */
++ if (!ret) {
++ /* Warn the user and move on */
++ printk(KERN_ALERT "Warning: Non-empty request queue\n");
++ printk(KERN_ALERT "I/O requests in flight at dump time\n");
+ }
+
-+ dump_mdev->curr_map = saved_curr_map;
-+ dump_mdev->curr_map_offset = saved_map_offset;
-+
-+ if (dump_mdev->last_used_offset < dev->curr_offset)
-+ dump_mdev->last_used_offset = dev->curr_offset;
++ /*
++ * Move to a softer level of silencing where no spin_lock_irqs
++ * are held on other cpus
++ */
++ dump_silence_level = DUMP_SOFT_SPIN_CPUS;
+
-+ return (len - n) ? (len - n) : ret ;
-+}
++ ret = __dump_irq_enable();
++ if (ret) {
++ return ret;
++ }
+
-+/* dummy - always ready */
-+int dump_mem_ready(struct dump_dev *dev, void *buf)
-+{
++ printk("Dumping to block device (%d,%d) on CPU %d ...\n",
++ MAJOR(dump_bdev->dev_id), MINOR(dump_bdev->dev_id),
++ smp_processor_id());
++
+ return 0;
+}
+
-+/*
-+ * Should check for availability of space to write upto the offset
-+ * affects only the curr_offset; last_offset untouched
-+ * Keep it simple: Only allow multiples of PAGE_SIZE for now
++/*
++ * Invoked when dumping is done. This is the time to put things back
++ * (i.e. undo the effects of dump_block_silence) so the device is
++ * available for normal use.
+ */
-+int dump_mem_seek(struct dump_dev *dev, loff_t offset)
++static int
++dump_block_resume(struct dump_dev *dev)
+{
-+ struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
-+
-+ if (offset & (PAGE_SIZE - 1))
-+ return -EINVAL; /* allow page size units only for now */
-+
-+ /* Are we exceeding available space ? */
-+ if (offset > dump_mdev->last_offset) {
-+ printk("dump_mem_seek failed for offset 0x%llx\n",
-+ offset);
-+ return -ENOSPC;
-+ }
-+
-+ dump_mdev->ddev.curr_offset = offset;
++ __dump_irq_restore();
+ return 0;
+}
+
-+struct dump_dev_ops dump_memdev_ops = {
-+ .open = dump_mem_open,
-+ .release = dump_mem_release,
-+ .silence = dump_mem_silence,
-+ .resume = dump_mem_resume,
-+ .seek = dump_mem_seek,
-+ .write = dump_mem_write,
-+ .read = NULL, /* not implemented at the moment */
-+ .ready = dump_mem_ready
-+};
-+
-+static struct dump_memdev default_dump_memdev = {
-+ .ddev = {.type_name = "memdev", .ops = &dump_memdev_ops,
-+ .device_id = 0x14}
-+ /* assume the rest of the fields are zeroed by default */
-+};
-+
-+/* may be overwritten if a previous dump exists */
-+struct dump_memdev *dump_memdev = &default_dump_memdev;
+
-Index: linux-2.6.10/drivers/dump/dump_blockdev.c
-===================================================================
---- linux-2.6.10.orig/drivers/dump/dump_blockdev.c 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/drivers/dump/dump_blockdev.c 2005-04-05 16:47:53.945204800 +0800
-@@ -0,0 +1,469 @@
+/*
-+ * Implements the dump driver interface for saving a dump to
-+ * a block device through the kernel's generic low level block i/o
-+ * routines.
-+ *
-+ * Started: June 2002 - Mohamed Abbas <mohamed.abbas@intel.com>
-+ * Moved original lkcd kiobuf dump i/o code from dump_base.c
-+ * to use generic dump device interfaces
-+ *
-+ * Sept 2002 - Bharata B. Rao <bharata@in.ibm.com>
-+ * Convert dump i/o to directly use bio instead of kiobuf for 2.5
-+ *
-+ * Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
-+ * Rework to new dumpdev.h structures, implement open/close/
-+ * silence, misc fixes (blocknr removal, bio_add_page usage)
-+ *
-+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
-+ * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
-+ * Copyright (C) 2002 International Business Machines Corp.
-+ *
-+ * This code is released under version 2 of the GNU GPL.
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/proc_fs.h>
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/blkdev.h>
-+#include <linux/bio.h>
-+#include <asm/hardirq.h>
-+#include <linux/dump.h>
-+#include "dump_methods.h"
-+
-+extern void *dump_page_buf;
-+
-+/* The end_io callback for dump i/o completion */
++ * Seek to the specified offset in the dump device.
++ * Makes sure this is a valid offset, otherwise returns an error.
++ */
+static int
-+dump_bio_end_io(struct bio *bio, unsigned int bytes_done, int error)
++dump_block_seek(struct dump_dev *dev, loff_t off)
+{
-+ struct dump_blockdev *dump_bdev;
-+
-+ if (bio->bi_size) {
-+ /* some bytes still left to transfer */
-+ return 1; /* not complete */
++ struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
++ loff_t offset = off + dump_bdev->start_offset;
++
++ if (offset & ( PAGE_SIZE - 1)) {
++ printk("seek: non-page aligned\n");
++ return -EINVAL;
+ }
+
-+ dump_bdev = (struct dump_blockdev *)bio->bi_private;
-+ if (error) {
-+ printk("IO error while writing the dump, aborting\n");
++ if (offset & (bdev_hardsect_size(dump_bdev->bdev) - 1)) {
++ printk("seek: not sector aligned \n");
++ return -EINVAL;
+ }
+
-+ dump_bdev->err = error;
-+
-+ /* no wakeup needed, since caller polls for completion */
++ if (offset > dump_bdev->limit) {
++ printk("seek: not enough space left on device!\n");
++ return -ENOSPC;
++ }
++ dev->curr_offset = off;
+ return 0;
+}
+
-+/* Check if the dump bio is already mapped to the specified buffer */
++/*
++ * Write out a buffer after checking the device limitations,
++ * sector sizes, etc. Assumes the buffer is in directly mapped
++ * kernel address space (not vmalloc'ed).
++ *
++ * Returns: number of bytes written or -ERRNO.
++ */
+static int
-+dump_block_map_valid(struct dump_blockdev *dev, struct page *page,
-+ int len)
++dump_block_write(struct dump_dev *dev, void *buf,
++ unsigned long len)
+{
-+ struct bio *bio = dev->bio;
-+ unsigned long bsize = 0;
++ struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
++ loff_t offset = dev->curr_offset + dump_bdev->start_offset;
++ int retval = -ENOSPC;
+
-+ if (!bio->bi_vcnt)
-+ return 0; /* first time, not mapped */
++ if (offset >= dump_bdev->limit) {
++ printk("write: not enough space left on device!\n");
++ goto out;
++ }
+
++ /* don't write more blocks than our max limit */
++ if (offset + len > dump_bdev->limit)
++ len = dump_bdev->limit - offset;
+
-+ if ((bio_page(bio) != page) || (len > bio->bi_vcnt << PAGE_SHIFT))
-+ return 0; /* buffer not mapped */
+
-+ bsize = bdev_hardsect_size(bio->bi_bdev);
-+ if ((len & (PAGE_SIZE - 1)) || (len & bsize))
-+ return 0; /* alignment checks needed */
++ retval = dump_block_map(dump_bdev, buf, len);
++ if (retval){
++ printk("write: dump_block_map failed! err %d\n", retval);
++ goto out;
++ }
+
-+ /* quick check to decide if we need to redo bio_add_page */
-+ if (bdev_get_queue(bio->bi_bdev)->merge_bvec_fn)
-+ return 0; /* device may have other restrictions */
++ /*
++ * Write out the data to disk.
++ * Assumes the entire buffer mapped to a single bio, which we can
++ * submit and wait for io completion. In the future, may consider
++ * increasing the dump buffer size and submitting multiple bio s
++ * for better throughput.
++ */
++ dump_bdev->err = -EAGAIN;
++ submit_bio(WRITE, dump_bdev->bio);
+
-+ return 1; /* already mapped */
++ dump_bdev->ddev.curr_offset += len;
++ retval = len;
++ out:
++ return retval;
+}
+
-+/*
-+ * Set up the dump bio for i/o from the specified buffer
-+ * Return value indicates whether the full buffer could be mapped or not
++/*
++ * Name: dump_block_ready()
++ * Func: check if the last dump i/o is over and ready for next request
+ */
+static int
-+dump_block_map(struct dump_blockdev *dev, void *buf, int len)
++dump_block_ready(struct dump_dev *dev, void *buf)
+{
-+ struct page *page = virt_to_page(buf);
-+ struct bio *bio = dev->bio;
-+ unsigned long bsize = 0;
++ struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
++ request_queue_t *q = bdev_get_queue(dump_bdev->bio->bi_bdev);
+
-+ bio->bi_bdev = dev->bdev;
-+ bio->bi_sector = (dev->start_offset + dev->ddev.curr_offset) >> 9;
-+ bio->bi_idx = 0; /* reset index to the beginning */
++ /* check for io completion */
++ if (dump_bdev->err == -EAGAIN) {
++ q->unplug_fn(q);
++ return -EAGAIN;
++ }
+
-+ if (dump_block_map_valid(dev, page, len)) {
-+ /* already mapped and usable rightaway */
-+ bio->bi_size = len; /* reset size to the whole bio */
-+ bio->bi_vcnt = (len + PAGE_SIZE - 1) / PAGE_SIZE; /* Set the proper vector cnt */
-+ } else {
-+ /* need to map the bio */
-+ bio->bi_size = 0;
-+ bio->bi_vcnt = 0;
-+ bsize = bdev_hardsect_size(bio->bi_bdev);
++ if (dump_bdev->err) {
++ printk("dump i/o err\n");
++ return dump_bdev->err;
++ }
+
-+ /* first a few sanity checks */
-+ if (len < bsize) {
-+ printk("map: len less than hardsect size \n");
-+ return -EINVAL;
-+ }
++ return 0;
++}
+
-+ if ((unsigned long)buf & bsize) {
-+ printk("map: not aligned \n");
-+ return -EINVAL;
-+ }
+
-+ /* assume contig. page aligned low mem buffer( no vmalloc) */
-+ if ((page_address(page) != buf) || (len & (PAGE_SIZE - 1))) {
-+ printk("map: invalid buffer alignment!\n");
-+ return -EINVAL;
-+ }
-+ /* finally we can go ahead and map it */
-+ while (bio->bi_size < len)
-+ if (bio_add_page(bio, page++, PAGE_SIZE, 0) == 0) {
-+ break;
-+ }
++struct dump_dev_ops dump_blockdev_ops = {
++ .open = dump_block_open,
++ .release = dump_block_release,
++ .silence = dump_block_silence,
++ .resume = dump_block_resume,
++ .seek = dump_block_seek,
++ .write = dump_block_write,
++ /* .read not implemented */
++ .ready = dump_block_ready
++};
+
-+ bio->bi_end_io = dump_bio_end_io;
-+ bio->bi_private = dev;
-+ }
++static struct dump_blockdev default_dump_blockdev = {
++ .ddev = {.type_name = "blockdev", .ops = &dump_blockdev_ops,
++ .curr_offset = 0},
++ /*
++ * leave enough room for the longest swap header possibly written
++ * written by mkswap (likely the largest page size supported by
++ * the arch
++ */
++ .start_offset = DUMP_HEADER_OFFSET,
++ .err = 0
++ /* assume the rest of the fields are zeroed by default */
++};
++
++struct dump_blockdev *dump_blockdev = &default_dump_blockdev;
+
-+ if (bio->bi_size != len) {
-+ printk("map: bio size = %d not enough for len = %d!\n",
-+ bio->bi_size, len);
-+ return -E2BIG;
++static int __init
++dump_blockdev_init(void)
++{
++ if (dump_register_device(&dump_blockdev->ddev) < 0) {
++ printk("block device driver registration failed\n");
++ return -1;
+ }
++
++ printk("block device driver for LKCD registered\n");
+ return 0;
+}
+
-+static void
-+dump_free_bio(struct bio *bio)
-+{
-+ if (bio)
-+ kfree(bio->bi_io_vec);
-+ kfree(bio);
-+}
++static void __exit
++dump_blockdev_cleanup(void)
++{
++ dump_unregister_device(&dump_blockdev->ddev);
++ printk("block device driver for LKCD unregistered\n");
++}
++
++MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
++MODULE_DESCRIPTION("Block Dump Driver for Linux Kernel Crash Dump (LKCD)");
++MODULE_LICENSE("GPL");
++
++module_init(dump_blockdev_init);
++module_exit(dump_blockdev_cleanup);
+Index: linux-2.6.10/drivers/dump/Makefile
+===================================================================
+--- linux-2.6.10.orig/drivers/dump/Makefile 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/drivers/dump/Makefile 2005-04-07 18:13:56.921750424 +0800
+@@ -0,0 +1,22 @@
++#
++# Makefile for the dump device drivers.
++#
++
++dump-y := dump_setup.o dump_fmt.o dump_filters.o dump_scheme.o dump_execute.o
++ifeq ($(CONFIG_X86_64),)
++ifeq ($(CONFIG_X86),y)
++dump-$(CONFIG_X86) += dump_i386.o
++endif
++endif
++dump-$(CONFIG_ARM) += dump_arm.o
++dump-$(CONFIG_PPC64) += dump_ppc64.o
++dump-$(CONFIG_X86_64) += dump_x8664.o
++dump-$(CONFIG_IA64) += dump_ia64.o
++dump-$(CONFIG_CRASH_DUMP_MEMDEV) += dump_memdev.o dump_overlay.o
++dump-objs += $(dump-y)
++
++obj-$(CONFIG_CRASH_DUMP) += dump.o
++obj-$(CONFIG_CRASH_DUMP_BLOCKDEV) += dump_blockdev.o
++obj-$(CONFIG_CRASH_DUMP_NETDEV) += dump_netdev.o
++obj-$(CONFIG_CRASH_DUMP_COMPRESS_RLE) += dump_rle.o
++obj-$(CONFIG_CRASH_DUMP_COMPRESS_GZIP) += dump_gzip.o
+Index: linux-2.6.10/drivers/dump/dump_scheme.c
+===================================================================
+--- linux-2.6.10.orig/drivers/dump/dump_scheme.c 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/drivers/dump/dump_scheme.c 2005-04-07 18:13:56.916751184 +0800
+@@ -0,0 +1,430 @@
++/*
++ * Default single stage dump scheme methods
++ *
++ * Previously a part of dump_base.c
++ *
++ * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
++ * Split and rewrote LKCD dump scheme to generic dump method
++ * interfaces
++ * Derived from original code created by
++ * Matt Robinson <yakker@sourceforge.net>)
++ *
++ * Contributions from SGI, IBM, HP, MCL, and others.
++ *
++ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
++ * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
++ * Copyright (C) 2002 International Business Machines Corp.
++ *
++ * This code is released under version 2 of the GNU GPL.
++ */
++
++/*
++ * Implements the default dump scheme, i.e. single-stage gathering and
++ * saving of dump data directly to the target device, which operates in
++ * a push mode, where the dumping system decides what data it saves
++ * taking into account pre-specified dump config options.
++ *
++ * Aside: The 2-stage dump scheme, where there is a soft-reset between
++ * the gathering and saving phases, also reuses some of these
++ * default routines (see dump_overlay.c)
++ */
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/reboot.h>
++#include <linux/nmi.h>
++#include <linux/dump.h>
++#include "dump_methods.h"
++
++extern int panic_timeout; /* time before reboot */
+
-+/*
-+ * Prepares the dump device so we can take a dump later.
-+ * The caller is expected to have filled up the dev_id field in the
-+ * block dump dev structure.
-+ *
-+ * At dump time when dump_block_write() is invoked it will be too
-+ * late to recover, so as far as possible make sure obvious errors
-+ * get caught right here and reported back to the caller.
-+ */
-+static int
-+dump_block_open(struct dump_dev *dev, unsigned long arg)
++extern void dump_speedo(int);
++
++/* Default sequencer used during single stage dumping */
++/* Also invoked during stage 2 of soft-boot based dumping */
++int dump_generic_sequencer(void)
+{
-+ struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
-+ struct block_device *bdev;
-+ int retval = 0;
-+ struct bio_vec *bvec;
++ struct dump_data_filter *filter = dump_config.dumper->filter;
++ int pass = 0, err = 0, save = 0;
++ int (*action)(unsigned long, unsigned long);
+
-+ /* make sure this is a valid block device */
-+ if (!arg) {
-+ retval = -EINVAL;
-+ goto err;
-+ }
++ /*
++ * We want to save the more critical data areas first in
++ * case we run out of space, encounter i/o failures, or get
++ * interrupted otherwise and have to give up midway
++ * So, run through the passes in increasing order
++ */
++ for (;filter->selector; filter++, pass++)
++ {
++ /* Assumes passes are exclusive (even across dumpers) */
++ /* Requires care when coding the selection functions */
++ if ((save = filter->level_mask & dump_config.level))
++ action = dump_save_data;
++ else
++ action = dump_skip_data;
+
-+ /* Convert it to the new dev_t format */
-+ arg = MKDEV((arg >> OLDMINORBITS), (arg & OLDMINORMASK));
-+
-+ /* get a corresponding block_dev struct for this */
-+ bdev = bdget((dev_t)arg);
-+ if (!bdev) {
-+ retval = -ENODEV;
-+ goto err;
-+ }
++ if ((err = dump_iterator(pass, action, filter)) < 0)
++ break;
+
-+ /* get the block device opened */
-+ if ((retval = blkdev_get(bdev, O_RDWR | O_LARGEFILE, 0))) {
-+ goto err1;
-+ }
++ printk("\n %d dump pages %s of %d each in pass %d\n",
++ err, save ? "saved" : "skipped", (int)DUMP_PAGE_SIZE, pass);
+
-+ if ((dump_bdev->bio = kmalloc(sizeof(struct bio), GFP_KERNEL))
-+ == NULL) {
-+ printk("Cannot allocate bio\n");
-+ retval = -ENOMEM;
-+ goto err2;
+ }
+
-+ bio_init(dump_bdev->bio);
++ return (err < 0) ? err : 0;
++}
+
-+ if ((bvec = kmalloc(sizeof(struct bio_vec) *
-+ (DUMP_BUFFER_SIZE >> PAGE_SHIFT), GFP_KERNEL)) == NULL) {
-+ retval = -ENOMEM;
-+ goto err3;
-+ }
++static inline struct page *dump_get_page(loff_t loc)
++{
+
-+ /* assign the new dump dev structure */
-+ dump_bdev->dev_id = (dev_t)arg;
-+ dump_bdev->bdev = bdev;
++ unsigned long page_index = loc >> PAGE_SHIFT;
+
-+ /* make a note of the limit */
-+ dump_bdev->limit = bdev->bd_inode->i_size;
-+
-+ /* now make sure we can map the dump buffer */
-+ dump_bdev->bio->bi_io_vec = bvec;
-+ dump_bdev->bio->bi_max_vecs = DUMP_BUFFER_SIZE >> PAGE_SHIFT;
++ /* todo: complete this to account for ia64/discontig mem */
++ /* todo: and to check for validity, ram page, no i/o mem etc */
++ /* need to use pfn/physaddr equiv of kern_addr_valid */
+
-+ retval = dump_block_map(dump_bdev, dump_config.dumper->dump_buf,
-+ DUMP_BUFFER_SIZE);
-+
-+ if (retval) {
-+ printk("open: dump_block_map failed, ret %d\n", retval);
-+ goto err3;
-+ }
++ /* Important:
++ * On ARM/XScale system, the physical address starts from
++ * PHYS_OFFSET, and it maybe the situation that PHYS_OFFSET != 0.
++ * For example on Intel's PXA250, PHYS_OFFSET = 0xa0000000. And the
++ * page index starts from PHYS_PFN_OFFSET. When configuring
++ * filter, filter->start is assigned to 0 in dump_generic_configure.
++ * Here we want to adjust it by adding PHYS_PFN_OFFSET to it!
++ */
++#ifdef CONFIG_ARM
++ page_index += PHYS_PFN_OFFSET;
++#endif
++ if (__dump_page_valid(page_index))
++ return pfn_to_page(page_index);
++ else
++ return NULL;
+
-+ printk("Block device (%d,%d) successfully configured for dumping\n",
-+ MAJOR(dump_bdev->dev_id),
-+ MINOR(dump_bdev->dev_id));
++}
++
++/* Default iterator: for singlestage and stage 1 of soft-boot dumping */
++/* Iterates over range of physical memory pages in DUMP_PAGE_SIZE increments */
++int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned long),
++ struct dump_data_filter *filter)
++{
++ /* Todo : fix unit, type */
++ loff_t loc, start, end;
++ int i, count = 0, err = 0;
++ struct page *page;
+
++ /* Todo: Add membanks code */
++ /* TBD: Check if we need to address DUMP_PAGE_SIZE < PAGE_SIZE */
+
-+ /* after opening the block device, return */
-+ return retval;
++ for (i = 0; i < filter->num_mbanks; i++) {
++ start = filter->start[i];
++ end = filter->end[i];
++ for (loc = start; loc < end; loc += DUMP_PAGE_SIZE) {
++ dump_config.dumper->curr_loc = loc;
++ page = dump_get_page(loc);
++ if (page && filter->selector(pass,
++ (unsigned long) page, DUMP_PAGE_SIZE)) {
++ if ((err = action((unsigned long)page,
++ DUMP_PAGE_SIZE))) {
++ printk("dump_page_iterator: err %d for "
++ "loc 0x%llx, in pass %d\n",
++ err, loc, pass);
++ return err ? err : count;
++ } else
++ count++;
++ }
++ }
++ }
+
-+err3: dump_free_bio(dump_bdev->bio);
-+ dump_bdev->bio = NULL;
-+err2: if (bdev) blkdev_put(bdev);
-+ goto err;
-+err1: if (bdev) bdput(bdev);
-+ dump_bdev->bdev = NULL;
-+err: return retval;
++ return err ? err : count;
+}
+
-+/*
-+ * Close the dump device and release associated resources
-+ * Invoked when unconfiguring the dump device.
++/*
++ * Base function that saves the selected block of data in the dump
++ * Action taken when iterator decides that data needs to be saved
+ */
-+static int
-+dump_block_release(struct dump_dev *dev)
++int dump_generic_save_data(unsigned long loc, unsigned long sz)
+{
-+ struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
++ void *buf;
++ void *dump_buf = dump_config.dumper->dump_buf;
++ int left, bytes, ret;
+
-+ /* release earlier bdev if present */
-+ if (dump_bdev->bdev) {
-+ blkdev_put(dump_bdev->bdev);
-+ dump_bdev->bdev = NULL;
++ if ((ret = dump_add_data(loc, sz))) {
++ return ret;
+ }
++ buf = dump_config.dumper->curr_buf;
+
-+ dump_free_bio(dump_bdev->bio);
-+ dump_bdev->bio = NULL;
-+
-+ return 0;
-+}
-+
++ /* If we've filled up the buffer write it out */
++ if ((left = buf - dump_buf) >= DUMP_BUFFER_SIZE) {
++ bytes = dump_write_buffer(dump_buf, DUMP_BUFFER_SIZE);
++ if (bytes < DUMP_BUFFER_SIZE) {
++ printk("dump_write_buffer failed %d\n", bytes);
++ return bytes ? -ENOSPC : bytes;
++ }
+
-+/*
-+ * Prepare the dump device for use (silence any ongoing activity
-+ * and quiesce state) when the system crashes.
-+ */
-+static int
-+dump_block_silence(struct dump_dev *dev)
-+{
-+ struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
-+ struct request_queue *q = bdev_get_queue(dump_bdev->bdev);
-+ int ret;
++ left -= bytes;
++
++ /* -- A few chores to do from time to time -- */
++ dump_config.dumper->count++;
+
-+ /* If we can't get request queue lock, refuse to take the dump */
-+ if (!spin_trylock(q->queue_lock))
-+ return -EBUSY;
++ if (!(dump_config.dumper->count & 0x3f)) {
++ /* Update the header every one in a while */
++ memset((void *)dump_buf, 'b', DUMP_BUFFER_SIZE);
++ if ((ret = dump_update_header()) < 0) {
++ /* issue warning */
++ return ret;
++ }
++ printk(".");
+
-+ ret = elv_queue_empty(q);
-+ spin_unlock(q->queue_lock);
++ touch_nmi_watchdog();
++ } else if (!(dump_config.dumper->count & 0x7)) {
++ /* Show progress so the user knows we aren't hung */
++ dump_speedo(dump_config.dumper->count >> 3);
++ }
++ /* Todo: Touch/Refresh watchdog */
+
-+ /* For now we assume we have the device to ourselves */
-+ /* Just a quick sanity check */
-+ if (!ret) {
-+ /* Warn the user and move on */
-+ printk(KERN_ALERT "Warning: Non-empty request queue\n");
-+ printk(KERN_ALERT "I/O requests in flight at dump time\n");
-+ }
++ /* --- Done with periodic chores -- */
+
-+ /*
-+ * Move to a softer level of silencing where no spin_lock_irqs
-+ * are held on other cpus
-+ */
-+ dump_silence_level = DUMP_SOFT_SPIN_CPUS;
++ /*
++ * extra bit of copying to simplify verification
++ * in the second kernel boot based scheme
++ */
++ memcpy(dump_buf - DUMP_PAGE_SIZE, dump_buf +
++ DUMP_BUFFER_SIZE - DUMP_PAGE_SIZE, DUMP_PAGE_SIZE);
+
-+ ret = __dump_irq_enable();
-+ if (ret) {
-+ return ret;
++ /* now adjust the leftover bits back to the top of the page */
++ /* this case would not arise during stage 2 (passthru) */
++ memset(dump_buf, 'z', DUMP_BUFFER_SIZE);
++ if (left) {
++ memcpy(dump_buf, dump_buf + DUMP_BUFFER_SIZE, left);
++ }
++ buf -= DUMP_BUFFER_SIZE;
++ dump_config.dumper->curr_buf = buf;
+ }
-+
-+ printk("Dumping to block device (%d,%d) on CPU %d ...\n",
-+ MAJOR(dump_bdev->dev_id), MINOR(dump_bdev->dev_id),
-+ smp_processor_id());
-+
++
+ return 0;
+}
+
-+/*
-+ * Invoked when dumping is done. This is the time to put things back
-+ * (i.e. undo the effects of dump_block_silence) so the device is
-+ * available for normal use.
-+ */
-+static int
-+dump_block_resume(struct dump_dev *dev)
++int dump_generic_skip_data(unsigned long loc, unsigned long sz)
+{
-+ __dump_irq_restore();
++ /* dummy by default */
+ return 0;
+}
+
-+
-+/*
-+ * Seek to the specified offset in the dump device.
-+ * Makes sure this is a valid offset, otherwise returns an error.
++/*
++ * Common low level routine to write a buffer to current dump device
++ * Expects checks for space etc to have been taken care of by the caller
++ * Operates serially at the moment for simplicity.
++ * TBD/Todo: Consider batching for improved throughput
+ */
-+static int
-+dump_block_seek(struct dump_dev *dev, loff_t off)
++int dump_ll_write(void *buf, unsigned long len)
+{
-+ struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
-+ loff_t offset = off + dump_bdev->start_offset;
-+
-+ if (offset & ( PAGE_SIZE - 1)) {
-+ printk("seek: non-page aligned\n");
-+ return -EINVAL;
-+ }
++ long transferred = 0, last_transfer = 0;
++ int ret = 0;
+
-+ if (offset & (bdev_hardsect_size(dump_bdev->bdev) - 1)) {
-+ printk("seek: not sector aligned \n");
-+ return -EINVAL;
++ /* make sure device is ready */
++ while ((ret = dump_dev_ready(NULL)) == -EAGAIN);
++ if (ret < 0) {
++ printk("dump_dev_ready failed !err %d\n", ret);
++ return ret;
+ }
+
-+ if (offset > dump_bdev->limit) {
-+ printk("seek: not enough space left on device!\n");
-+ return -ENOSPC;
++ while (len) {
++ if ((last_transfer = dump_dev_write(buf, len)) <= 0) {
++ ret = last_transfer;
++ printk("dump_dev_write failed !err %d\n",
++ ret);
++ break;
++ }
++ /* wait till complete */
++ while ((ret = dump_dev_ready(buf)) == -EAGAIN)
++ cpu_relax();
++
++ if (ret < 0) {
++ printk("i/o failed !err %d\n", ret);
++ break;
++ }
++
++ len -= last_transfer;
++ buf += last_transfer;
++ transferred += last_transfer;
+ }
-+ dev->curr_offset = off;
-+ return 0;
++ return (ret < 0) ? ret : transferred;
+}
+
-+/*
-+ * Write out a buffer after checking the device limitations,
-+ * sector sizes, etc. Assumes the buffer is in directly mapped
-+ * kernel address space (not vmalloc'ed).
-+ *
-+ * Returns: number of bytes written or -ERRNO.
-+ */
-+static int
-+dump_block_write(struct dump_dev *dev, void *buf,
-+ unsigned long len)
++/* default writeout routine for single dump device */
++/* writes out the dump data ensuring enough space is left for the end marker */
++int dump_generic_write_buffer(void *buf, unsigned long len)
+{
-+ struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
-+ loff_t offset = dev->curr_offset + dump_bdev->start_offset;
-+ int retval = -ENOSPC;
++ long written = 0;
++ int err = 0;
+
-+ if (offset >= dump_bdev->limit) {
-+ printk("write: not enough space left on device!\n");
-+ goto out;
++ /* check for space */
++ if ((err = dump_dev_seek(dump_config.dumper->curr_offset + len +
++ 2*DUMP_BUFFER_SIZE)) < 0) {
++ printk("dump_write_buffer: insuff space after offset 0x%llx\n",
++ dump_config.dumper->curr_offset);
++ return err;
+ }
++ /* alignment check would happen as a side effect of this */
++ if ((err = dump_dev_seek(dump_config.dumper->curr_offset)) < 0)
++ return err;
+
-+ /* don't write more blocks than our max limit */
-+ if (offset + len > dump_bdev->limit)
-+ len = dump_bdev->limit - offset;
-+
++ written = dump_ll_write(buf, len);
+
-+ retval = dump_block_map(dump_bdev, buf, len);
-+ if (retval){
-+ printk("write: dump_block_map failed! err %d\n", retval);
-+ goto out;
-+ }
++ /* all or none */
+
-+ /*
-+ * Write out the data to disk.
-+ * Assumes the entire buffer mapped to a single bio, which we can
-+ * submit and wait for io completion. In the future, may consider
-+ * increasing the dump buffer size and submitting multiple bio s
-+ * for better throughput.
-+ */
-+ dump_bdev->err = -EAGAIN;
-+ submit_bio(WRITE, dump_bdev->bio);
++ if (written < len)
++ written = written ? -ENOSPC : written;
++ else
++ dump_config.dumper->curr_offset += len;
+
-+ dump_bdev->ddev.curr_offset += len;
-+ retval = len;
-+ out:
-+ return retval;
++ return written;
+}
+
-+/*
-+ * Name: dump_block_ready()
-+ * Func: check if the last dump i/o is over and ready for next request
-+ */
-+static int
-+dump_block_ready(struct dump_dev *dev, void *buf)
++int dump_generic_configure(unsigned long devid)
+{
-+ struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
-+ request_queue_t *q = bdev_get_queue(dump_bdev->bio->bi_bdev);
++ struct dump_dev *dev = dump_config.dumper->dev;
++ struct dump_data_filter *filter;
++ void *buf;
++ int ret = 0;
+
-+ /* check for io completion */
-+ if (dump_bdev->err == -EAGAIN) {
-+ q->unplug_fn(q);
-+ return -EAGAIN;
++ /* Allocate the dump buffer and initialize dumper state */
++ /* Assume that we get aligned addresses */
++ if (!(buf = dump_alloc_mem(DUMP_BUFFER_SIZE + 3 * DUMP_PAGE_SIZE)))
++ return -ENOMEM;
++
++ if ((unsigned long)buf & (PAGE_SIZE - 1)) {
++ /* sanity check for page aligned address */
++ dump_free_mem(buf);
++ return -ENOMEM; /* fixme: better error code */
+ }
+
-+ if (dump_bdev->err) {
-+ printk("dump i/o err\n");
-+ return dump_bdev->err;
++ /* Initialize the rest of the fields */
++ dump_config.dumper->dump_buf = buf + DUMP_PAGE_SIZE;
++ dumper_reset();
++
++ /* Open the dump device */
++ if (!dev)
++ return -ENODEV;
++
++ if ((ret = dev->ops->open(dev, devid))) {
++ return ret;
++ }
++
++ /* Initialise the memory ranges in the dump filter */
++ for (filter = dump_config.dumper->filter ;filter->selector; filter++) {
++ if (!filter->start[0] && !filter->end[0]) {
++ pg_data_t *pgdat;
++ int i = 0;
++ for_each_pgdat(pgdat) {
++ filter->start[i] =
++ (loff_t)pgdat->node_start_pfn << PAGE_SHIFT;
++ filter->end[i] =
++ (loff_t)(pgdat->node_start_pfn + pgdat->node_spanned_pages) << PAGE_SHIFT;
++ i++;
++ }
++ filter->num_mbanks = i;
++ }
+ }
+
+ return 0;
+}
+
++int dump_generic_unconfigure(void)
++{
++ struct dump_dev *dev = dump_config.dumper->dev;
++ void *buf = dump_config.dumper->dump_buf;
++ int ret = 0;
+
-+struct dump_dev_ops dump_blockdev_ops = {
-+ .open = dump_block_open,
-+ .release = dump_block_release,
-+ .silence = dump_block_silence,
-+ .resume = dump_block_resume,
-+ .seek = dump_block_seek,
-+ .write = dump_block_write,
-+ /* .read not implemented */
-+ .ready = dump_block_ready
-+};
++ pr_debug("Generic unconfigure\n");
++ /* Close the dump device */
++ if (dev && (ret = dev->ops->release(dev)))
++ return ret;
+
-+static struct dump_blockdev default_dump_blockdev = {
-+ .ddev = {.type_name = "blockdev", .ops = &dump_blockdev_ops,
-+ .curr_offset = 0},
-+ /*
-+ * leave enough room for the longest swap header possibly written
-+ * written by mkswap (likely the largest page size supported by
-+ * the arch
-+ */
-+ .start_offset = DUMP_HEADER_OFFSET,
-+ .err = 0
-+ /* assume the rest of the fields are zeroed by default */
-+};
++ printk("Closed dump device\n");
+
-+struct dump_blockdev *dump_blockdev = &default_dump_blockdev;
++ if (buf)
++ dump_free_mem((buf - DUMP_PAGE_SIZE));
++
++ dump_config.dumper->curr_buf = dump_config.dumper->dump_buf = NULL;
++ pr_debug("Released dump buffer\n");
+
-+static int __init
-+dump_blockdev_init(void)
-+{
-+ if (dump_register_device(&dump_blockdev->ddev) < 0) {
-+ printk("block device driver registration failed\n");
-+ return -1;
-+ }
-+
-+ printk("block device driver for LKCD registered\n");
+ return 0;
+}
+
-+static void __exit
-+dump_blockdev_cleanup(void)
-+{
-+ dump_unregister_device(&dump_blockdev->ddev);
-+ printk("block device driver for LKCD unregistered\n");
-+}
++#ifdef CONFIG_DISCONTIGMEM
++
++void dump_reconfigure_mbanks(void)
++{
++ pg_data_t *pgdat;
++ loff_t start, end, loc, loc_end;
++ int i=0;
++ struct dump_data_filter *filter = dump_config.dumper->filter;
++
++ for_each_pgdat(pgdat) {
++
++ start = (loff_t)(pgdat->node_start_pfn << PAGE_SHIFT);
++ end = ((loff_t)(pgdat->node_start_pfn + pgdat->node_spanned_pages) << PAGE_SHIFT);
++ for(loc = start; loc < end; loc += (DUMP_PAGE_SIZE)) {
++
++ if(!(__dump_page_valid(loc >> PAGE_SHIFT)))
++ continue;
++
++ /* We found a valid page. This is the start */
++ filter->start[i] = loc;
++
++ /* Now loop here till you find the end */
++ for(loc_end = loc; loc_end < end; loc_end += (DUMP_PAGE_SIZE)) {
++
++ if(__dump_page_valid(loc_end >> PAGE_SHIFT)) {
++ /* This page could very well be the last page */
++ filter->end[i] = loc_end;
++ continue;
++ }
++ break;
++ }
++ i++;
++ loc = loc_end;
++ }
++ }
++ filter->num_mbanks = i;
+
-+MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
-+MODULE_DESCRIPTION("Block Dump Driver for Linux Kernel Crash Dump (LKCD)");
-+MODULE_LICENSE("GPL");
++ /* Propagate memory bank information to other filters */
++ for (filter = dump_config.dumper->filter, filter++ ;filter->selector; filter++) {
++ for(i = 0; i < dump_config.dumper->filter->num_mbanks; i++) {
++ filter->start[i] = dump_config.dumper->filter->start[i];
++ filter->end[i] = dump_config.dumper->filter->end[i];
++ filter->num_mbanks = dump_config.dumper->filter->num_mbanks;
++ }
++ }
++}
++#endif
+
-+module_init(dump_blockdev_init);
-+module_exit(dump_blockdev_cleanup);
-Index: linux-2.6.10/drivers/dump/dump_fmt.c
-===================================================================
---- linux-2.6.10.orig/drivers/dump/dump_fmt.c 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/drivers/dump/dump_fmt.c 2005-04-05 16:47:53.941205408 +0800
-@@ -0,0 +1,407 @@
-+/*
-+ * Implements the routines which handle the format specific
-+ * aspects of dump for the default dump format.
-+ *
-+ * Used in single stage dumping and stage 1 of soft-boot based dumping
-+ * Saves data in LKCD (lcrash) format
-+ *
-+ * Previously a part of dump_base.c
-+ *
-+ * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
-+ * Split off and reshuffled LKCD dump format code around generic
-+ * dump method interfaces.
-+ *
-+ * Derived from original code created by
-+ * Matt Robinson <yakker@sourceforge.net>)
-+ *
-+ * Contributions from SGI, IBM, HP, MCL, and others.
-+ *
-+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
-+ * Copyright (C) 2000 - 2002 TurboLinux, Inc. All rights reserved.
-+ * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
-+ * Copyright (C) 2002 International Business Machines Corp.
-+ *
-+ * This code is released under version 2 of the GNU GPL.
-+ */
++/* Set up the default dump scheme */
+
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/time.h>
-+#include <linux/sched.h>
-+#include <linux/ptrace.h>
-+#include <linux/utsname.h>
-+#include <linux/dump.h>
-+#include <asm/dump.h>
-+#include "dump_methods.h"
++struct dump_scheme_ops dump_scheme_singlestage_ops = {
++ .configure = dump_generic_configure,
++ .unconfigure = dump_generic_unconfigure,
++ .sequencer = dump_generic_sequencer,
++ .iterator = dump_page_iterator,
++ .save_data = dump_generic_save_data,
++ .skip_data = dump_generic_skip_data,
++ .write_buffer = dump_generic_write_buffer,
++};
+
-+/*
-+ * SYSTEM DUMP LAYOUT
-+ *
-+ * System dumps are currently the combination of a dump header and a set
-+ * of data pages which contain the system memory. The layout of the dump
-+ * (for full dumps) is as follows:
-+ *
-+ * +-----------------------------+
-+ * | generic dump header |
-+ * +-----------------------------+
-+ * | architecture dump header |
-+ * +-----------------------------+
-+ * | page header |
-+ * +-----------------------------+
-+ * | page data |
-+ * +-----------------------------+
-+ * | page header |
-+ * +-----------------------------+
-+ * | page data |
-+ * +-----------------------------+
-+ * | | |
-+ * | | |
-+ * | | |
-+ * | | |
-+ * | V |
-+ * +-----------------------------+
-+ * | PAGE_END header |
-+ * +-----------------------------+
-+ *
-+ * There are two dump headers, the first which is architecture
-+ * independent, and the other which is architecture dependent. This
-+ * allows different architectures to dump different data structures
-+ * which are specific to their chipset, CPU, etc.
-+ *
-+ * After the dump headers come a succession of dump page headers along
-+ * with dump pages. The page header contains information about the page
-+ * size, any flags associated with the page (whether it's compressed or
-+ * not), and the address of the page. After the page header is the page
-+ * data, which is either compressed (or not). Each page of data is
-+ * dumped in succession, until the final dump header (PAGE_END) is
-+ * placed at the end of the dump, assuming the dump device isn't out
-+ * of space.
-+ *
-+ * This mechanism allows for multiple compression types, different
-+ * types of data structures, different page ordering, etc., etc., etc.
-+ * It's a very straightforward mechanism for dumping system memory.
-+ */
++struct dump_scheme dump_scheme_singlestage = {
++ .name = "single-stage",
++ .ops = &dump_scheme_singlestage_ops
++};
+
-+struct __dump_header dump_header; /* the primary dump header */
-+struct __dump_header_asm dump_header_asm; /* the arch-specific dump header */
++/* The single stage dumper comprising all these */
++struct dumper dumper_singlestage = {
++ .name = "single-stage",
++ .scheme = &dump_scheme_singlestage,
++ .fmt = &dump_fmt_lcrash,
++ .compress = &dump_none_compression,
++ .filter = dump_filter_table,
++ .dev = NULL,
++};
+
-+/* Replace a runtime sanity check on the DUMP_BUFFER_SIZE with a
-+ * compile-time check. The compile_time_assertions routine will not
-+ * compile if the assertion is false.
-+ *
-+ * If you fail this assert you are most likely on a large machine and
-+ * should use a special 6.0.0 version of LKCD or a version > 7.0.0. See
-+ * the LKCD website for more information.
-+ */
+Index: linux-2.6.10/drivers/Makefile
+===================================================================
+--- linux-2.6.10.orig/drivers/Makefile 2004-12-25 05:36:00.000000000 +0800
++++ linux-2.6.10/drivers/Makefile 2005-04-07 18:13:56.936748144 +0800
+@@ -60,3 +60,4 @@
+ obj-$(CONFIG_CPU_FREQ) += cpufreq/
+ obj-$(CONFIG_MMC) += mmc/
+ obj-y += firmware/
++obj-$(CONFIG_CRASH_DUMP) += dump/
+Index: linux-2.6.10/drivers/block/ll_rw_blk.c
+===================================================================
+--- linux-2.6.10.orig/drivers/block/ll_rw_blk.c 2005-04-07 14:55:41.000000000 +0800
++++ linux-2.6.10/drivers/block/ll_rw_blk.c 2005-04-07 18:17:16.782366992 +0800
+@@ -28,6 +28,7 @@
+ #include <linux/slab.h>
+ #include <linux/swap.h>
+ #include <linux/writeback.h>
++#include <linux/dump.h>
+
+ /*
+ * for max sense size
+@@ -2624,13 +2625,15 @@
+ * bi_sector for remaps as it sees fit. So the values of these fields
+ * should NOT be depended on after the call to generic_make_request.
+ */
++extern unsigned long dump_oncpu;
+ void generic_make_request(struct bio *bio)
+ {
+ request_queue_t *q;
+ sector_t maxsector;
+ int ret, nr_sectors = bio_sectors(bio);
+
+- might_sleep();
++ if (likely(!dump_oncpu))
++ might_sleep();
+ /* Test device or partition size, when known. */
+ maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+ if (maxsector) {
+Index: linux-2.6.10/mm/bootmem.c
+===================================================================
+--- linux-2.6.10.orig/mm/bootmem.c 2004-12-25 05:34:30.000000000 +0800
++++ linux-2.6.10/mm/bootmem.c 2005-04-07 18:13:56.780771856 +0800
+@@ -26,6 +26,7 @@
+ */
+ unsigned long max_low_pfn;
+ unsigned long min_low_pfn;
++EXPORT_SYMBOL(min_low_pfn);
+ unsigned long max_pfn;
+
+ EXPORT_SYMBOL(max_pfn); /* This is exported so
+@@ -284,6 +285,7 @@
+ if (j + 16 < BITS_PER_LONG)
+ prefetchw(page + j + 16);
+ __ClearPageReserved(page + j);
++ set_page_count(page + j, 1);
+ }
+ __free_pages(page, ffs(BITS_PER_LONG)-1);
+ i += BITS_PER_LONG;
+Index: linux-2.6.10/mm/page_alloc.c
+===================================================================
+--- linux-2.6.10.orig/mm/page_alloc.c 2005-04-06 23:38:35.000000000 +0800
++++ linux-2.6.10/mm/page_alloc.c 2005-04-07 18:13:56.794769728 +0800
+@@ -47,6 +47,11 @@
+ EXPORT_SYMBOL(totalram_pages);
+ EXPORT_SYMBOL(nr_swap_pages);
+
++#ifdef CONFIG_CRASH_DUMP_MODULE
++/* This symbol has to be exported to use 'for_each_pgdat' macro by modules. */
++EXPORT_SYMBOL(pgdat_list);
++#endif
+
-+#define COMPILE_TIME_ASSERT(const_expr) \
-+ switch(0){case 0: case (const_expr):;}
+ /*
+ * Used by page_zone() to look up the address of the struct zone whose
+ * id is encoded in the upper bits of page->flags
+@@ -281,8 +286,11 @@
+ arch_free_page(page, order);
+
+ mod_page_state(pgfree, 1 << order);
+- for (i = 0 ; i < (1 << order) ; ++i)
++ for (i = 0 ; i < (1 << order) ; ++i){
++ if (unlikely(i))
++ __put_page(page + i);
+ free_pages_check(__FUNCTION__, page + i);
++ }
+ list_add(&page->lru, &list);
+ kernel_map_pages(page, 1<<order, 0);
+ free_pages_bulk(page_zone(page), 1, &list, order);
+@@ -322,44 +330,34 @@
+ return page;
+ }
+
+-static inline void set_page_refs(struct page *page, int order)
+-{
+-#ifdef CONFIG_MMU
+- set_page_count(page, 1);
+-#else
+- int i;
+-
+- /*
+- * We need to reference all the pages for this order, otherwise if
+- * anyone accesses one of the pages with (get/put) it will be freed.
+- */
+- for (i = 0; i < (1 << order); i++)
+- set_page_count(page+i, 1);
+-#endif /* CONFIG_MMU */
+-}
+-
+ /*
+ * This page is about to be returned from the page allocator
+ */
+-static void prep_new_page(struct page *page, int order)
++static void prep_new_page(struct page *_page, int order)
+ {
+- if (page->mapping || page_mapped(page) ||
+- (page->flags & (
+- 1 << PG_private |
+- 1 << PG_locked |
+- 1 << PG_lru |
+- 1 << PG_active |
+- 1 << PG_dirty |
+- 1 << PG_reclaim |
+- 1 << PG_swapcache |
+- 1 << PG_writeback )))
++ int i;
+
-+static inline void compile_time_assertions(void)
-+{
-+ COMPILE_TIME_ASSERT((sizeof(struct __dump_header) +
-+ sizeof(struct __dump_header_asm)) <= DUMP_BUFFER_SIZE);
-+}
++ for(i = 0; i < (1 << order); i++){
++ struct page *page = _page + i;
+
-+/*
-+ * Set up common header fields (mainly the arch indep section)
-+ * Per-cpu state is handled by lcrash_save_context
-+ * Returns the size of the header in bytes.
-+ */
-+static int lcrash_init_dump_header(const char *panic_str)
-+{
-+ struct timeval dh_time;
-+ u64 temp_memsz = dump_header.dh_memory_size;
++ if (page->mapping || page_mapped(page) ||
++ (page->flags & (
++ 1 << PG_private |
++ 1 << PG_locked |
++ 1 << PG_lru |
++ 1 << PG_active |
++ 1 << PG_dirty |
++ 1 << PG_reclaim |
++ 1 << PG_swapcache |
++ 1 << PG_writeback )))
+ bad_page(__FUNCTION__, page);
+
+- page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
+- 1 << PG_referenced | 1 << PG_arch_1 |
+- 1 << PG_checked | 1 << PG_mappedtodisk);
+- page->private = 0;
+- set_page_refs(page, order);
++ page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
++ 1 << PG_referenced | 1 << PG_arch_1 |
++ 1 << PG_checked | 1 << PG_mappedtodisk);
++ page->private = 0;
++ set_page_count(page, 1);
++ }
+ }
+
+ /*
+Index: linux-2.6.10/arch/ia64/Kconfig.debug
+===================================================================
+--- linux-2.6.10.orig/arch/ia64/Kconfig.debug 2004-12-25 05:34:32.000000000 +0800
++++ linux-2.6.10/arch/ia64/Kconfig.debug 2005-04-07 18:13:56.459820648 +0800
+@@ -2,6 +2,65 @@
+
+ source "lib/Kconfig.debug"
+
++config CRASH_DUMP
++ tristate "Crash dump support (EXPERIMENTAL)"
++ depends on EXPERIMENTAL
++ default n
++ ---help---
++ Say Y here to enable saving an image of system memory when a panic
++ or other error occurs. Dumps can also be forced with the SysRq+d
++ key if MAGIC_SYSRQ is enabled.
+
-+ /* initialize the dump headers to zero */
-+ /* save dha_stack pointer because it may contains pointer for stack! */
-+ memset(&dump_header, 0, sizeof(dump_header));
-+ memset(&dump_header_asm, 0,
-+ offsetof(struct __dump_header_asm, dha_stack));
-+ memset(&dump_header_asm.dha_stack+1, 0,
-+ sizeof(dump_header_asm) -
-+ offsetof(struct __dump_header_asm, dha_stack) -
-+ sizeof(dump_header_asm.dha_stack));
-+ dump_header.dh_memory_size = temp_memsz;
++config KERNTYPES
++ bool
++ depends on CRASH_DUMP
++ default y
+
-+ /* configure dump header values */
-+ dump_header.dh_magic_number = DUMP_MAGIC_NUMBER;
-+ dump_header.dh_version = DUMP_VERSION_NUMBER;
-+ dump_header.dh_memory_start = PAGE_OFFSET;
-+ dump_header.dh_memory_end = DUMP_MAGIC_NUMBER;
-+ dump_header.dh_header_size = sizeof(struct __dump_header);
-+ dump_header.dh_page_size = PAGE_SIZE;
-+ dump_header.dh_dump_level = dump_config.level;
-+ dump_header.dh_current_task = (unsigned long) current;
-+ dump_header.dh_dump_compress = dump_config.dumper->compress->
-+ compress_type;
-+ dump_header.dh_dump_flags = dump_config.flags;
-+ dump_header.dh_dump_device = dump_config.dumper->dev->device_id;
++config CRASH_DUMP_BLOCKDEV
++ tristate "Crash dump block device driver"
++ depends on CRASH_DUMP
++ help
++ Say Y to allow saving crash dumps directly to a disk device.
+
-+#if DUMP_DEBUG >= 6
-+ dump_header.dh_num_bytes = 0;
-+#endif
-+ dump_header.dh_num_dump_pages = 0;
-+ do_gettimeofday(&dh_time);
-+ dump_header.dh_time.tv_sec = dh_time.tv_sec;
-+ dump_header.dh_time.tv_usec = dh_time.tv_usec;
++config CRASH_DUMP_NETDEV
++ tristate "Crash dump network device driver"
++ depends on CRASH_DUMP
++ help
++ Say Y to allow saving crash dumps over a network device.
+
-+ memcpy((void *)&(dump_header.dh_utsname_sysname),
-+ (const void *)&(system_utsname.sysname), __NEW_UTS_LEN + 1);
-+ memcpy((void *)&(dump_header.dh_utsname_nodename),
-+ (const void *)&(system_utsname.nodename), __NEW_UTS_LEN + 1);
-+ memcpy((void *)&(dump_header.dh_utsname_release),
-+ (const void *)&(system_utsname.release), __NEW_UTS_LEN + 1);
-+ memcpy((void *)&(dump_header.dh_utsname_version),
-+ (const void *)&(system_utsname.version), __NEW_UTS_LEN + 1);
-+ memcpy((void *)&(dump_header.dh_utsname_machine),
-+ (const void *)&(system_utsname.machine), __NEW_UTS_LEN + 1);
-+ memcpy((void *)&(dump_header.dh_utsname_domainname),
-+ (const void *)&(system_utsname.domainname), __NEW_UTS_LEN + 1);
++config CRASH_DUMP_MEMDEV
++ bool "Crash dump staged memory driver"
++ depends on CRASH_DUMP
++ help
++ Say Y to allow intermediate saving crash dumps in spare
++ memory pages which would then be written out to disk
++ later.
+
-+ if (panic_str) {
-+ memcpy((void *)&(dump_header.dh_panic_string),
-+ (const void *)panic_str, DUMP_PANIC_LEN);
-+ }
++config CRASH_DUMP_SOFTBOOT
++ bool "Save crash dump across a soft reboot"
++ depends on CRASH_DUMP_MEMDEV
++ help
++ Say Y to allow a crash dump to be preserved in memory
++ pages across a soft reboot and written out to disk
++ thereafter. For this to work, CRASH_DUMP must be
++ configured as part of the kernel (not as a module).
+
-+ dump_header_asm.dha_magic_number = DUMP_ASM_MAGIC_NUMBER;
-+ dump_header_asm.dha_version = DUMP_ASM_VERSION_NUMBER;
-+ dump_header_asm.dha_header_size = sizeof(dump_header_asm);
-+#ifdef CONFIG_ARM
-+ dump_header_asm.dha_physaddr_start = PHYS_OFFSET;
-+#endif
++config CRASH_DUMP_COMPRESS_RLE
++ tristate "Crash dump RLE compression"
++ depends on CRASH_DUMP
++ help
++ Say Y to allow saving dumps with Run Length Encoding compression.
+
-+ dump_header_asm.dha_smp_num_cpus = num_online_cpus();
-+ pr_debug("smp_num_cpus in header %d\n",
-+ dump_header_asm.dha_smp_num_cpus);
++config CRASH_DUMP_COMPRESS_GZIP
++ tristate "Crash dump GZIP compression"
++ select ZLIB_INFLATE
++ select ZLIB_DEFLATE
++ depends on CRASH_DUMP
++ help
++ Say Y to allow saving dumps with Gnu Zip compression.
+
-+ dump_header_asm.dha_dumping_cpu = smp_processor_id();
-+
-+ return sizeof(dump_header) + sizeof(dump_header_asm);
-+}
+
+
-+int dump_lcrash_configure_header(const char *panic_str,
-+ const struct pt_regs *regs)
-+{
-+ int retval = 0;
+ choice
+ prompt "Physical memory granularity"
+ default IA64_GRANULE_64MB
+Index: linux-2.6.10/arch/ia64/kernel/irq.c
+===================================================================
+--- linux-2.6.10.orig/arch/ia64/kernel/irq.c 2004-12-25 05:35:27.000000000 +0800
++++ linux-2.6.10/arch/ia64/kernel/irq.c 2005-04-07 18:13:56.501814264 +0800
+@@ -933,7 +933,11 @@
+
+ static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
+
++#if defined(CONFIG_CRASH_DUMP) || defined (CONFIG_CRASH_DUMP_MODULE)
++cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
++#else
+ static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
++#endif
+
+ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
+
+Index: linux-2.6.10/arch/ia64/kernel/smp.c
+===================================================================
+--- linux-2.6.10.orig/arch/ia64/kernel/smp.c 2004-12-25 05:35:40.000000000 +0800
++++ linux-2.6.10/arch/ia64/kernel/smp.c 2005-04-07 18:13:56.504813808 +0800
+@@ -31,6 +31,10 @@
+ #include <linux/efi.h>
+ #include <linux/bitops.h>
+
++#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
++#include <linux/dump.h>
++#endif
+
-+ dump_config.dumper->header_len = lcrash_init_dump_header(panic_str);
+ #include <asm/atomic.h>
+ #include <asm/current.h>
+ #include <asm/delay.h>
+@@ -67,6 +71,11 @@
+ #define IPI_CALL_FUNC 0
+ #define IPI_CPU_STOP 1
+
++#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
++#define IPI_DUMP_INTERRUPT 4
++ int (*dump_ipi_function_ptr)(struct pt_regs *) = NULL;
++#endif
+
-+ /* capture register states for all processors */
-+ dump_save_this_cpu(regs);
-+ __dump_save_other_cpus(); /* side effect:silence cpus */
+ /* This needs to be cacheline aligned because it is written to by *other* CPUs. */
+ static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
+
+@@ -84,7 +93,9 @@
+ spin_unlock_irq(&call_lock);
+ }
+
+-static void
+
-+ /* configure architecture-specific dump header values */
-+ if ((retval = __dump_configure_header(regs)))
-+ return retval;
++/*changed static void stop_this_cpu -> void stop_this_cpu */
++void
+ stop_this_cpu (void)
+ {
+ /*
+@@ -155,6 +166,15 @@
+ case IPI_CPU_STOP:
+ stop_this_cpu();
+ break;
++#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
++ case IPI_DUMP_INTERRUPT:
++ if( dump_ipi_function_ptr != NULL ) {
++ if (!dump_ipi_function_ptr(regs)) {
++ printk(KERN_ERR "(*dump_ipi_function_ptr)(): rejected IPI_DUMP_INTERRUPT\n");
++ }
++ }
++ break;
++#endif
+
+ default:
+ printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
+@@ -369,9 +389,17 @@
+ {
+ send_IPI_allbutself(IPI_CPU_STOP);
+ }
++EXPORT_SYMBOL(smp_send_stop);
+
+ int __init
+ setup_profiling_timer (unsigned int multiplier)
+ {
+ return -EINVAL;
+ }
+
-+ dump_config.dumper->header_dirty++;
-+ return 0;
-+}
-+/* save register and task context */
-+void dump_lcrash_save_context(int cpu, const struct pt_regs *regs,
-+ struct task_struct *tsk)
++#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
++void dump_send_ipi(void)
+{
-+ /* This level of abstraction might be redundantly redundant */
-+ __dump_save_context(cpu, regs, tsk);
++ send_IPI_allbutself(IPI_DUMP_INTERRUPT);
+}
++#endif
+Index: linux-2.6.10/arch/ia64/kernel/traps.c
+===================================================================
+--- linux-2.6.10.orig/arch/ia64/kernel/traps.c 2004-12-25 05:35:39.000000000 +0800
++++ linux-2.6.10/arch/ia64/kernel/traps.c 2005-04-07 18:13:56.475818216 +0800
+@@ -21,6 +21,8 @@
+ #include <asm/intrinsics.h>
+ #include <asm/processor.h>
+ #include <asm/uaccess.h>
++#include <asm/nmi.h>
++#include <linux/dump.h>
+
+ extern spinlock_t timerlist_lock;
+
+@@ -89,6 +91,7 @@
+ printk("%s[%d]: %s %ld [%d]\n",
+ current->comm, current->pid, str, err, ++die_counter);
+ show_regs(regs);
++ dump((char *)str, regs);
+ } else
+ printk(KERN_ERR "Recursive die() failure, output suppressed\n");
+
+Index: linux-2.6.10/arch/ia64/kernel/ia64_ksyms.c
+===================================================================
+--- linux-2.6.10.orig/arch/ia64/kernel/ia64_ksyms.c 2005-04-06 23:38:35.000000000 +0800
++++ linux-2.6.10/arch/ia64/kernel/ia64_ksyms.c 2005-04-07 18:13:56.485816696 +0800
+@@ -7,7 +7,6 @@
+
+ #include <linux/config.h>
+ #include <linux/module.h>
+-
+ #include <linux/string.h>
+ EXPORT_SYMBOL(memset);
+ EXPORT_SYMBOL(memchr);
+@@ -28,6 +27,9 @@
+ EXPORT_SYMBOL(strstr);
+ EXPORT_SYMBOL(strpbrk);
+
++#include <linux/syscalls.h>
++EXPORT_SYMBOL(sys_ioctl);
+
-+/* write out the header */
-+int dump_write_header(void)
-+{
-+ int retval = 0, size;
-+ void *buf = dump_config.dumper->dump_buf;
-+
-+ /* accounts for DUMP_HEADER_OFFSET if applicable */
-+ if ((retval = dump_dev_seek(0))) {
-+ printk("Unable to seek to dump header offset: %d\n",
-+ retval);
-+ return retval;
-+ }
+ #include <asm/checksum.h>
+ EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
+
+@@ -125,3 +127,21 @@
+ # endif
+ # endif
+ #endif
+
-+ memcpy(buf, (void *)&dump_header, sizeof(dump_header));
-+ size = sizeof(dump_header);
-+ memcpy(buf + size, (void *)&dump_header_asm, sizeof(dump_header_asm));
-+ size += sizeof(dump_header_asm);
-+ size = PAGE_ALIGN(size);
-+ retval = dump_ll_write(buf , size);
++#include <asm/hw_irq.h>
+
-+ if (retval < size)
-+ return (retval >= 0) ? ENOSPC : retval;
-+ return 0;
-+}
++#ifdef CONFIG_CRASH_DUMP_MODULE
++#ifdef CONFIG_SMP
++extern irq_desc_t _irq_desc[NR_IRQS];
++extern cpumask_t irq_affinity[NR_IRQS];
++extern void stop_this_cpu(void *);
++extern int (*dump_ipi_function_ptr)(struct pt_regs *);
++extern void dump_send_ipi(void);
++EXPORT_SYMBOL(_irq_desc);
++EXPORT_SYMBOL(irq_affinity);
++EXPORT_SYMBOL(stop_this_cpu);
++EXPORT_SYMBOL(dump_send_ipi);
++EXPORT_SYMBOL(dump_ipi_function_ptr);
++#endif
++#endif
+
-+int dump_generic_update_header(void)
+Index: linux-2.6.10/arch/i386/mm/init.c
+===================================================================
+--- linux-2.6.10.orig/arch/i386/mm/init.c 2005-04-07 18:13:54.785075248 +0800
++++ linux-2.6.10/arch/i386/mm/init.c 2005-04-07 18:13:56.405828856 +0800
+@@ -244,6 +244,13 @@
+ return 0;
+ }
+
++/* To enable modules to check if a page is in RAM */
++int pfn_is_ram(unsigned long pfn)
+{
-+ int err = 0;
-+
-+ if (dump_config.dumper->header_dirty) {
-+ if ((err = dump_write_header())) {
-+ printk("dump write header failed !err %d\n", err);
-+ } else {
-+ dump_config.dumper->header_dirty = 0;
-+ }
-+ }
-+
-+ return err;
++ return (page_is_ram(pfn));
+}
+
-+static inline int is_curr_stack_page(struct page *page, unsigned long size)
-+{
-+ unsigned long thread_addr = (unsigned long)current_thread_info();
-+ unsigned long addr = (unsigned long)page_address(page);
-+
-+ return !PageHighMem(page) && (addr < thread_addr + THREAD_SIZE)
-+ && (addr + size > thread_addr);
-+}
+
-+static inline int is_dump_page(struct page *page, unsigned long size)
-+{
-+ unsigned long addr = (unsigned long)page_address(page);
-+ unsigned long dump_buf = (unsigned long)dump_config.dumper->dump_buf;
+ #ifdef CONFIG_HIGHMEM
+ pte_t *kmap_pte;
+ pgprot_t kmap_prot;
+Index: linux-2.6.10/arch/i386/Kconfig.debug
+===================================================================
+--- linux-2.6.10.orig/arch/i386/Kconfig.debug 2005-04-07 00:35:34.000000000 +0800
++++ linux-2.6.10/arch/i386/Kconfig.debug 2005-04-07 18:13:56.403829160 +0800
+@@ -2,6 +2,63 @@
+
+ source "lib/Kconfig.debug"
+
++config CRASH_DUMP
++ tristate "Crash dump support (EXPERIMENTAL)"
++ depends on EXPERIMENTAL
++ default n
++ ---help---
++ Say Y here to enable saving an image of system memory when a panic
++ or other error occurs. Dumps can also be forced with the SysRq+d
++ key if MAGIC_SYSRQ is enabled.
+
-+ return !PageHighMem(page) && (addr < dump_buf + DUMP_BUFFER_SIZE)
-+ && (addr + size > dump_buf);
-+}
++config KERNTYPES
++ bool
++ depends on CRASH_DUMP
++ default y
+
-+int dump_allow_compress(struct page *page, unsigned long size)
-+{
-+ /*
-+ * Don't compress the page if any part of it overlaps
-+ * with the current stack or dump buffer (since the contents
-+ * in these could be changing while compression is going on)
-+ */
-+ return !is_curr_stack_page(page, size) && !is_dump_page(page, size);
-+}
++config CRASH_DUMP_BLOCKDEV
++ tristate "Crash dump block device driver"
++ depends on CRASH_DUMP
++ help
++ Say Y to allow saving crash dumps directly to a disk device.
+
-+void lcrash_init_pageheader(struct __dump_page *dp, struct page *page,
-+ unsigned long sz)
-+{
-+ memset(dp, sizeof(struct __dump_page), 0);
-+ dp->dp_flags = 0;
-+ dp->dp_size = 0;
-+ if (sz > 0)
-+ dp->dp_address = (loff_t)page_to_pfn(page) << PAGE_SHIFT;
++config CRASH_DUMP_NETDEV
++ tristate "Crash dump network device driver"
++ depends on CRASH_DUMP
++ help
++ Say Y to allow saving crash dumps over a network device.
+
-+#if DUMP_DEBUG > 6
-+ dp->dp_page_index = dump_header.dh_num_dump_pages;
-+ dp->dp_byte_offset = dump_header.dh_num_bytes + DUMP_BUFFER_SIZE
-+ + DUMP_HEADER_OFFSET; /* ?? */
-+#endif /* DUMP_DEBUG */
-+}
++config CRASH_DUMP_MEMDEV
++ bool "Crash dump staged memory driver"
++ depends on CRASH_DUMP
++ help
++ Say Y to allow intermediate saving crash dumps in spare
++ memory pages which would then be written out to disk
++ later.
+
-+int dump_lcrash_add_data(unsigned long loc, unsigned long len)
-+{
-+ struct page *page = (struct page *)loc;
-+ void *addr, *buf = dump_config.dumper->curr_buf;
-+ struct __dump_page *dp = (struct __dump_page *)buf;
-+ int bytes, size;
++config CRASH_DUMP_SOFTBOOT
++ bool "Save crash dump across a soft reboot"
++ depends on CRASH_DUMP_MEMDEV
++ help
++ Say Y to allow a crash dump to be preserved in memory
++ pages across a soft reboot and written out to disk
++ thereafter. For this to work, CRASH_DUMP must be
++ configured as part of the kernel (not as a module).
+
-+ if (buf > dump_config.dumper->dump_buf + DUMP_BUFFER_SIZE)
-+ return -ENOMEM;
++config CRASH_DUMP_COMPRESS_RLE
++ tristate "Crash dump RLE compression"
++ depends on CRASH_DUMP
++ help
++ Say Y to allow saving dumps with Run Length Encoding compression.
+
-+ lcrash_init_pageheader(dp, page, len);
-+ buf += sizeof(struct __dump_page);
++config CRASH_DUMP_COMPRESS_GZIP
++ tristate "Crash dump GZIP compression"
++ select ZLIB_INFLATE
++ select ZLIB_DEFLATE
++ depends on CRASH_DUMP
++ help
++ Say Y to allow saving dumps with Gnu Zip compression.
+
-+ while (len) {
-+ addr = kmap_atomic(page, KM_DUMP);
-+ size = bytes = (len > PAGE_SIZE) ? PAGE_SIZE : len;
-+ /* check for compression */
-+ if (dump_allow_compress(page, bytes)) {
-+ size = dump_compress_data((char *)addr, bytes,
-+ (char *)buf, loc);
-+ }
-+ /* set the compressed flag if the page did compress */
-+ if (size && (size < bytes)) {
-+ dp->dp_flags |= DUMP_DH_COMPRESSED;
-+ } else {
-+ /* compression failed -- default to raw mode */
-+ dp->dp_flags |= DUMP_DH_RAW;
-+ memcpy(buf, addr, bytes);
-+ size = bytes;
-+ }
-+ /* memset(buf, 'A', size); temporary: testing only !! */
-+ kunmap_atomic(addr, KM_DUMP);
-+ dp->dp_size += size;
-+ buf += size;
-+ len -= bytes;
-+ page++;
+ config EARLY_PRINTK
+ bool "Early printk" if EMBEDDED
+ default y
+@@ -15,8 +72,8 @@
+ with klogd/syslogd or the X server. You should normally N here,
+ unless you want to debug such a crash.
+
+-config DEBUG_STACKOVERFLOW
+- bool "Check for stack overflows"
++config DEBUG_STACKOVERFLOW
++ bool "Check for stack overflows"
+ depends on DEBUG_KERNEL
+
+ config KPROBES
+Index: linux-2.6.10/arch/i386/kernel/smp.c
+===================================================================
+--- linux-2.6.10.orig/arch/i386/kernel/smp.c 2005-04-07 18:13:54.752080264 +0800
++++ linux-2.6.10/arch/i386/kernel/smp.c 2005-04-07 18:13:56.428825360 +0800
+@@ -19,6 +19,7 @@
+ #include <linux/mc146818rtc.h>
+ #include <linux/cache.h>
+ #include <linux/interrupt.h>
++#include <linux/dump.h>
+
+ #include <asm/mtrr.h>
+ #include <asm/tlbflush.h>
+@@ -143,6 +144,13 @@
+ */
+ cfg = __prepare_ICR(shortcut, vector);
+
++ if (vector == DUMP_VECTOR) {
++ /*
++ * Setup DUMP IPI to be delivered as an NMI
++ */
++ cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
+ }
+
-+ /* now update the header */
-+#if DUMP_DEBUG > 6
-+ dump_header.dh_num_bytes += dp->dp_size + sizeof(*dp);
-+#endif
-+ dump_header.dh_num_dump_pages++;
-+ dump_config.dumper->header_dirty++;
-+
-+ dump_config.dumper->curr_buf = buf;
+ /*
+ * Send the IPI. The write to APIC_ICR fires this off.
+ */
+@@ -220,6 +228,13 @@
+ * program the ICR
+ */
+ cfg = __prepare_ICR(0, vector);
+
-+ return len;
++ if (vector == DUMP_VECTOR) {
++ /*
++ * Setup DUMP IPI to be delivered as an NMI
++ */
++ cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
++ }
+
+ /*
+ * Send the IPI. The write to APIC_ICR fires this off.
+@@ -506,6 +521,11 @@
+
+ static struct call_data_struct * call_data;
+
++void dump_send_ipi(void)
++{
++ send_IPI_allbutself(DUMP_VECTOR);
+}
+
-+int dump_lcrash_update_end_marker(void)
-+{
-+ struct __dump_page *dp =
-+ (struct __dump_page *)dump_config.dumper->curr_buf;
-+ unsigned long left;
-+ int ret = 0;
-+
-+ lcrash_init_pageheader(dp, NULL, 0);
-+ dp->dp_flags |= DUMP_DH_END; /* tbd: truncation test ? */
-+
-+ /* now update the header */
-+#if DUMP_DEBUG > 6
-+ dump_header.dh_num_bytes += sizeof(*dp);
+ /*
+ * this function sends a 'generic call function' IPI to all other CPUs
+ * in the system.
+@@ -561,7 +581,7 @@
+ return 0;
+ }
+
+-static void stop_this_cpu (void * dummy)
++void stop_this_cpu (void * dummy)
+ {
+ /*
+ * Remove this CPU:
+@@ -622,4 +642,3 @@
+ atomic_inc(&call_data->finished);
+ }
+ }
+-
+Index: linux-2.6.10/arch/i386/kernel/traps.c
+===================================================================
+--- linux-2.6.10.orig/arch/i386/kernel/traps.c 2005-04-07 18:13:54.770077528 +0800
++++ linux-2.6.10/arch/i386/kernel/traps.c 2005-04-07 18:13:56.406828704 +0800
+@@ -27,6 +27,7 @@
+ #include <linux/ptrace.h>
+ #include <linux/utsname.h>
+ #include <linux/kprobes.h>
++#include <linux/dump.h>
+
+ #ifdef CONFIG_EISA
+ #include <linux/ioport.h>
+@@ -382,6 +383,7 @@
+ bust_spinlocks(0);
+ die.lock_owner = -1;
+ spin_unlock_irq(&die.lock);
++ dump((char *)str, regs);
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+@@ -654,6 +656,7 @@
+ printk(" on CPU%d, eip %08lx, registers:\n",
+ smp_processor_id(), regs->eip);
+ show_registers(regs);
++ dump((char *)msg, regs);
+ printk("console shuts up ...\n");
+ console_silent();
+ spin_unlock(&nmi_print_lock);
+Index: linux-2.6.10/arch/i386/kernel/setup.c
+===================================================================
+--- linux-2.6.10.orig/arch/i386/kernel/setup.c 2004-12-25 05:34:45.000000000 +0800
++++ linux-2.6.10/arch/i386/kernel/setup.c 2005-04-07 18:13:56.427825512 +0800
+@@ -662,6 +662,10 @@
+ */
+ #define LOWMEMSIZE() (0x9f000)
+
++#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
++unsigned long crashdump_addr = 0xdeadbeef;
+#endif
-+ dump_config.dumper->curr_buf += sizeof(*dp);
-+ left = dump_config.dumper->curr_buf - dump_config.dumper->dump_buf;
+
-+ printk("\n");
+ static void __init parse_cmdline_early (char ** cmdline_p)
+ {
+ char c = ' ', *to = command_line, *from = saved_command_line;
+@@ -823,6 +827,11 @@
+ if (c == ' ' && !memcmp(from, "vmalloc=", 8))
+ __VMALLOC_RESERVE = memparse(from+8, &from);
+
++#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
++ if (c == ' ' && !memcmp(from, "crashdump=", 10))
++ crashdump_addr = memparse(from+10, &from);
++#endif
+
-+ while (left) {
-+ if ((ret = dump_dev_seek(dump_config.dumper->curr_offset))) {
-+ printk("Seek failed at offset 0x%llx\n",
-+ dump_config.dumper->curr_offset);
-+ return ret;
-+ }
+ c = *(from++);
+ if (!c)
+ break;
+@@ -1288,6 +1297,10 @@
+
+ static char * __init machine_specific_memory_setup(void);
+
++#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
++extern void crashdump_reserve(void);
++#endif
+
-+ if (DUMP_BUFFER_SIZE > left)
-+ memset(dump_config.dumper->curr_buf, 'm',
-+ DUMP_BUFFER_SIZE - left);
+ /*
+ * Determine if we were loaded by an EFI loader. If so, then we have also been
+ * passed the efi memmap, systab, etc., so we should use these data structures
+@@ -1393,6 +1406,10 @@
+ #endif
+
+
++#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
++ crashdump_reserve(); /* Preserve crash dump state from prev boot */
++#endif
+
-+ if ((ret = dump_ll_write(dump_config.dumper->dump_buf,
-+ DUMP_BUFFER_SIZE)) < DUMP_BUFFER_SIZE) {
-+ return (ret < 0) ? ret : -ENOSPC;
-+ }
+ dmi_scan_machine();
+
+ #ifdef CONFIG_X86_GENERICARCH
+Index: linux-2.6.10/arch/i386/kernel/i386_ksyms.c
+===================================================================
+--- linux-2.6.10.orig/arch/i386/kernel/i386_ksyms.c 2004-12-25 05:35:40.000000000 +0800
++++ linux-2.6.10/arch/i386/kernel/i386_ksyms.c 2005-04-07 18:13:56.429825208 +0800
+@@ -16,6 +16,7 @@
+ #include <linux/tty.h>
+ #include <linux/highmem.h>
+ #include <linux/time.h>
++#include <linux/nmi.h>
+
+ #include <asm/semaphore.h>
+ #include <asm/processor.h>
+@@ -31,6 +32,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/nmi.h>
+ #include <asm/ist.h>
++#include <asm/e820.h>
+ #include <asm/kdebug.h>
+
+ extern void dump_thread(struct pt_regs *, struct user *);
+@@ -192,3 +194,20 @@
+ #endif
+
+ EXPORT_SYMBOL(csum_partial);
+
-+ dump_config.dumper->curr_offset += DUMP_BUFFER_SIZE;
-+
-+ if (left > DUMP_BUFFER_SIZE) {
-+ left -= DUMP_BUFFER_SIZE;
-+ memcpy(dump_config.dumper->dump_buf,
-+ dump_config.dumper->dump_buf + DUMP_BUFFER_SIZE, left);
-+ dump_config.dumper->curr_buf -= DUMP_BUFFER_SIZE;
-+ } else {
-+ left = 0;
-+ }
-+ }
-+ return 0;
-+}
++#ifdef CONFIG_CRASH_DUMP_MODULE
++#ifdef CONFIG_SMP
++extern irq_desc_t irq_desc[NR_IRQS];
++extern cpumask_t irq_affinity[NR_IRQS];
++extern void stop_this_cpu(void *);
++EXPORT_SYMBOL(irq_desc);
++EXPORT_SYMBOL(irq_affinity);
++EXPORT_SYMBOL(stop_this_cpu);
++EXPORT_SYMBOL(dump_send_ipi);
++#endif
++extern int pfn_is_ram(unsigned long);
++EXPORT_SYMBOL(pfn_is_ram);
++#ifdef ARCH_HAS_NMI_WATCHDOG
++EXPORT_SYMBOL(touch_nmi_watchdog);
++#endif
++#endif
+Index: linux-2.6.10/arch/ppc64/Kconfig.debug
+===================================================================
+--- linux-2.6.10.orig/arch/ppc64/Kconfig.debug 2004-12-25 05:35:27.000000000 +0800
++++ linux-2.6.10/arch/ppc64/Kconfig.debug 2005-04-07 18:13:56.521811224 +0800
+@@ -2,6 +2,64 @@
+
+ source "lib/Kconfig.debug"
+
++config KERNTYPES
++ bool
++ depends on CRASH_DUMP
++ default y
+
++config CRASH_DUMP
++ tristate "Crash dump support"
++ default n
++ ---help---
++ Say Y here to enable saving an image of system memory when a panic
++ or other error occurs. Dumps can also be forced with the SysRq+d
++ key if MAGIC_SYSRQ is enabled.
+
-+/* Default Formatter (lcrash) */
-+struct dump_fmt_ops dump_fmt_lcrash_ops = {
-+ .configure_header = dump_lcrash_configure_header,
-+ .update_header = dump_generic_update_header,
-+ .save_context = dump_lcrash_save_context,
-+ .add_data = dump_lcrash_add_data,
-+ .update_end_marker = dump_lcrash_update_end_marker
-+};
++config CRASH_DUMP_BLOCKDEV
++ tristate "Crash dump block device driver"
++ depends on CRASH_DUMP
++ help
++ Say Y to allow saving crash dumps directly to a disk device.
++
++config CRASH_DUMP_NETDEV
++ tristate "Crash dump network device driver"
++ depends on CRASH_DUMP
++ help
++ Say Y to allow saving crash dumps over a network device.
+
-+struct dump_fmt dump_fmt_lcrash = {
-+ .name = "lcrash",
-+ .ops = &dump_fmt_lcrash_ops
-+};
++config CRASH_DUMP_MEMDEV
++ bool "Crash dump staged memory driver"
++ depends on CRASH_DUMP
++ help
++ Say Y to allow intermediate saving crash dumps in spare
++ memory pages which would then be written out to disk
++ later. Need 'kexec' support for this to work.
++ **** Not supported at present ****
+
-Index: linux-2.6.10/drivers/dump/dump_setup.c
-===================================================================
---- linux-2.6.10.orig/drivers/dump/dump_setup.c 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/drivers/dump/dump_setup.c 2005-04-05 16:47:53.939205712 +0800
-@@ -0,0 +1,923 @@
-+/*
-+ * Standard kernel function entry points for Linux crash dumps.
-+ *
-+ * Created by: Matt Robinson (yakker@sourceforge.net)
-+ * Contributions from SGI, IBM, HP, MCL, and others.
-+ *
-+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
-+ * Copyright (C) 2000 - 2002 TurboLinux, Inc. All rights reserved.
-+ * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
-+ * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
-+ *
-+ * This code is released under version 2 of the GNU GPL.
-+ */
++config CRASH_DUMP_SOFTBOOT
++ bool "Save crash dump across a soft reboot"
++ help
++ Say Y to allow a crash dump to be preserved in memory
++ pages across a soft reboot and written out to disk
++ thereafter. For this to work, CRASH_DUMP must be
++ configured as part of the kernel (not as a module).
++ Need 'kexec' support to use this option.
++ **** Not supported at present ****
+
-+/*
-+ * -----------------------------------------------------------------------
-+ *
-+ * DUMP HISTORY
-+ *
-+ * This dump code goes back to SGI's first attempts at dumping system
-+ * memory on SGI systems running IRIX. A few developers at SGI needed
-+ * a way to take this system dump and analyze it, and created 'icrash',
-+ * or IRIX Crash. The mechanism (the dumps and 'icrash') were used
-+ * by support people to generate crash reports when a system failure
-+ * occurred. This was vital for large system configurations that
-+ * couldn't apply patch after patch after fix just to hope that the
-+ * problems would go away. So the system memory, along with the crash
-+ * dump analyzer, allowed support people to quickly figure out what the
-+ * problem was on the system with the crash dump.
-+ *
-+ * In comes Linux. SGI started moving towards the open source community,
-+ * and upon doing so, SGI wanted to take its support utilities into Linux
-+ * with the hopes that they would end up the in kernel and user space to
-+ * be used by SGI's customers buying SGI Linux systems. One of the first
-+ * few products to be open sourced by SGI was LKCD, or Linux Kernel Crash
-+ * Dumps. LKCD comprises of a patch to the kernel to enable system
-+ * dumping, along with 'lcrash', or Linux Crash, to analyze the system
-+ * memory dump. A few additional system scripts and kernel modifications
-+ * are also included to make the dump mechanism and dump data easier to
-+ * process and use.
-+ *
-+ * As soon as LKCD was released into the open source community, a number
-+ * of larger companies started to take advantage of it. Today, there are
-+ * many community members that contribute to LKCD, and it continues to
-+ * flourish and grow as an open source project.
-+ */
++config CRASH_DUMP_COMPRESS_RLE
++ tristate "Crash dump RLE compression"
++ depends on CRASH_DUMP
++ help
++ Say Y to allow saving dumps with Run Length Encoding compression.
+
-+/*
-+ * DUMP TUNABLES (read/write with ioctl, readonly with /proc)
-+ *
-+ * This is the list of system tunables (via /proc) that are available
-+ * for Linux systems. All the read, write, etc., functions are listed
-+ * here. Currently, there are a few different tunables for dumps:
-+ *
-+ * dump_device (used to be dumpdev):
-+ * The device for dumping the memory pages out to. This
-+ * may be set to the primary swap partition for disruptive dumps,
-+ * and must be an unused partition for non-disruptive dumps.
-+ * Todo: In the case of network dumps, this may be interpreted
-+ * as the IP address of the netdump server to connect to.
-+ *
-+ * dump_compress (used to be dump_compress_pages):
-+ * This is the flag which indicates which compression mechanism
-+ * to use. This is a BITMASK, not an index (0,1,2,4,8,16,etc.).
-+ * This is the current set of values:
-+ *
-+ * 0: DUMP_COMPRESS_NONE -- Don't compress any pages.
-+ * 1: DUMP_COMPRESS_RLE -- This uses RLE compression.
-+ * 2: DUMP_COMPRESS_GZIP -- This uses GZIP compression.
-+ *
-+ * dump_level:
-+ * The amount of effort the dump module should make to save
-+ * information for post crash analysis. This value is now
-+ * a BITMASK value, not an index:
-+ *
-+ * 0: Do nothing, no dumping. (DUMP_LEVEL_NONE)
-+ *
-+ * 1: Print out the dump information to the dump header, and
-+ * write it out to the dump_device. (DUMP_LEVEL_HEADER)
-+ *
-+ * 2: Write out the dump header and all kernel memory pages.
-+ * (DUMP_LEVEL_KERN)
-+ *
-+ * 4: Write out the dump header and all kernel and user
-+ * memory pages. (DUMP_LEVEL_USED)
-+ *
-+ * 8: Write out the dump header and all conventional/cached
-+ * memory (RAM) pages in the system (kernel, user, free).
-+ * (DUMP_LEVEL_ALL_RAM)
-+ *
-+ * 16: Write out everything, including non-conventional memory
-+ * like firmware, proms, I/O registers, uncached memory.
-+ * (DUMP_LEVEL_ALL)
-+ *
-+ * The dump_level will default to 1.
-+ *
-+ * dump_flags:
-+ * These are the flags to use when talking about dumps. There
-+ * are lots of possibilities. This is a BITMASK value, not an index.
-+ *
-+ * -----------------------------------------------------------------------
-+ */
++config CRASH_DUMP_COMPRESS_GZIP
++ tristate "Crash dump GZIP compression"
++ select ZLIB_INFLATE
++ select ZLIB_DEFLATE
++ depends on CRASH_DUMP
++ help
++ Say Y to allow saving dumps with Gnu Zip compression.
+
-+#include <linux/kernel.h>
-+#include <linux/delay.h>
-+#include <linux/reboot.h>
-+#include <linux/fs.h>
+ config DEBUG_STACKOVERFLOW
+ bool "Check for stack overflows"
+ depends on DEBUG_KERNEL
+Index: linux-2.6.10/arch/ppc64/kernel/smp.c
+===================================================================
+--- linux-2.6.10.orig/arch/ppc64/kernel/smp.c 2004-12-25 05:35:23.000000000 +0800
++++ linux-2.6.10/arch/ppc64/kernel/smp.c 2005-04-07 18:13:56.560805296 +0800
+@@ -30,6 +30,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/cache.h>
+ #include <linux/err.h>
+#include <linux/dump.h>
-+#include <linux/ioctl32.h>
-+#include <linux/syscalls.h>
-+#include "dump_methods.h"
-+#include <linux/proc_fs.h>
-+#include <linux/module.h>
-+#include <linux/utsname.h>
-+#include <linux/highmem.h>
-+#include <linux/miscdevice.h>
-+#include <linux/sysrq.h>
-+#include <linux/sysctl.h>
-+#include <linux/nmi.h>
-+#include <linux/init.h>
-+#include <asm/hardirq.h>
-+#include <asm/uaccess.h>
-+
-+
-+/*
-+ * -----------------------------------------------------------------------
-+ * V A R I A B L E S
-+ * -----------------------------------------------------------------------
-+ */
+ #include <linux/sysdev.h>
+ #include <linux/cpu.h>
+
+@@ -71,6 +72,7 @@
+ struct smp_ops_t *smp_ops;
+
+ static volatile unsigned int cpu_callin_map[NR_CPUS];
++static int (*dump_ipi_function_ptr)(struct pt_regs *) = NULL;
+
+ extern unsigned char stab_array[];
+
+@@ -177,9 +179,16 @@
+ /* spare */
+ break;
+ #endif
+-#ifdef CONFIG_DEBUGGER
++#if defined(CONFIG_DEBUGGER) || defined(CONFIG_CRASH_DUMP) \
++ || defined(CONFIG_CRASH_DUMP_MODULE)
+ case PPC_MSG_DEBUGGER_BREAK:
+- debugger_ipi(regs);
++ if (dump_ipi_function_ptr) {
++ dump_ipi_function_ptr(regs);
++ }
++#ifdef CONFIG_DEBUGGER
++ else
++ debugger_ipi(regs);
++#endif
+ break;
+ #endif
+ default:
+@@ -201,7 +210,16 @@
+ }
+ #endif
+
+-static void stop_this_cpu(void *dummy)
++void dump_send_ipi(int (*dump_ipi_callback)(struct pt_regs *))
++{
++ dump_ipi_function_ptr = dump_ipi_callback;
++ if (dump_ipi_callback) {
++ mb();
++ smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK);
++ }
++}
+
-+/* Dump tunables */
-+struct dump_config dump_config = {
-+ .level = 0,
-+ .flags = 0,
-+ .dump_device = 0,
-+ .dump_addr = 0,
-+ .dumper = NULL
-+};
-+#ifdef CONFIG_ARM
-+static _dump_regs_t all_regs;
++void stop_this_cpu(void *dummy)
+ {
+ local_irq_disable();
+ while (1)
+Index: linux-2.6.10/arch/ppc64/kernel/traps.c
+===================================================================
+--- linux-2.6.10.orig/arch/ppc64/kernel/traps.c 2004-12-25 05:34:47.000000000 +0800
++++ linux-2.6.10/arch/ppc64/kernel/traps.c 2005-04-07 18:13:56.534809248 +0800
+@@ -29,6 +29,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/init.h>
+ #include <linux/module.h>
++#include <linux/dump.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/uaccess.h>
+@@ -116,6 +117,7 @@
+ if (nl)
+ printk("\n");
+ show_regs(regs);
++ dump((char *)str, regs);
+ bust_spinlocks(0);
+ spin_unlock_irq(&die_lock);
+
+Index: linux-2.6.10/arch/ppc64/kernel/ppc_ksyms.c
+===================================================================
+--- linux-2.6.10.orig/arch/ppc64/kernel/ppc_ksyms.c 2004-12-25 05:34:26.000000000 +0800
++++ linux-2.6.10/arch/ppc64/kernel/ppc_ksyms.c 2005-04-07 18:13:56.535809096 +0800
+@@ -159,6 +159,17 @@
+ EXPORT_SYMBOL(get_wchan);
+ EXPORT_SYMBOL(console_drivers);
+
++#ifdef CONFIG_CRASH_DUMP_MODULE
++extern int dump_page_is_ram(unsigned long);
++EXPORT_SYMBOL(dump_page_is_ram);
++#ifdef CONFIG_SMP
++EXPORT_SYMBOL(irq_affinity);
++extern void stop_this_cpu(void *);
++EXPORT_SYMBOL(stop_this_cpu);
++EXPORT_SYMBOL(dump_send_ipi);
++#endif
+#endif
+
-+/* Global variables used in dump.h */
-+/* degree of system freeze when dumping */
-+enum dump_silence_levels dump_silence_level = DUMP_HARD_SPIN_CPUS;
+ EXPORT_SYMBOL(tb_ticks_per_usec);
+ EXPORT_SYMBOL(paca);
+ EXPORT_SYMBOL(cur_cpu_spec);
+Index: linux-2.6.10/arch/ppc64/kernel/lmb.c
+===================================================================
+--- linux-2.6.10.orig/arch/ppc64/kernel/lmb.c 2004-12-25 05:34:58.000000000 +0800
++++ linux-2.6.10/arch/ppc64/kernel/lmb.c 2005-04-07 18:13:56.546807424 +0800
+@@ -344,3 +344,31 @@
+
+ return pa;
+ }
+
-+/* Other global fields */
-+extern struct __dump_header dump_header;
-+struct dump_dev *dump_dev = NULL; /* Active dump device */
-+static int dump_compress = 0;
+
-+static u32 dump_compress_none(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
-+ unsigned long loc);
-+struct __dump_compress dump_none_compression = {
-+ .compress_type = DUMP_COMPRESS_NONE,
-+ .compress_func = dump_compress_none,
-+ .compress_name = "none",
-+};
++/*
++ * This is the copy of page_is_ram (mm/init.c). The difference is
++ * it identifies all memory holes.
++ */
++int dump_page_is_ram(unsigned long pfn)
++{
++ int i;
++ unsigned long paddr = (pfn << PAGE_SHIFT);
+
-+/* our device operations and functions */
-+static int dump_ioctl(struct inode *i, struct file *f,
-+ unsigned int cmd, unsigned long arg);
++ for (i=0; i < lmb.memory.cnt ;i++) {
++ unsigned long base;
+
-+#ifdef CONFIG_COMPAT
-+static int dw_long(unsigned int, unsigned int, unsigned long, struct file*);
++#ifdef CONFIG_MSCHUNKS
++ base = lmb.memory.region[i].physbase;
++#else
++ base = lmb.memory.region[i].base;
+#endif
++ if ((paddr >= base) &&
++ (paddr < (base + lmb.memory.region[i].size))) {
++ return 1;
++ }
++ }
+
-+static struct file_operations dump_fops = {
-+ .owner = THIS_MODULE,
-+ .ioctl = dump_ioctl,
-+};
-+
-+static struct miscdevice dump_miscdev = {
-+ .minor = CRASH_DUMP_MINOR,
-+ .name = "dump",
-+ .fops = &dump_fops,
-+};
-+MODULE_ALIAS_MISCDEV(CRASH_DUMP_MINOR);
++ return 0;
++}
+
-+/* static variables */
-+static int dump_okay = 0; /* can we dump out to disk? */
-+static spinlock_t dump_lock = SPIN_LOCK_UNLOCKED;
+Index: linux-2.6.10/arch/ppc64/kernel/xics.c
+===================================================================
+--- linux-2.6.10.orig/arch/ppc64/kernel/xics.c 2004-12-25 05:34:58.000000000 +0800
++++ linux-2.6.10/arch/ppc64/kernel/xics.c 2005-04-07 18:13:56.553806360 +0800
+@@ -421,7 +421,8 @@
+ smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
+ }
+ #endif
+-#ifdef CONFIG_DEBUGGER
++#if defined(CONFIG_DEBUGGER) || defined(CONFIG_CRASH_DUMP) \
++ || defined(CONFIG_CRASH_DUMP_MODULE)
+ if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
+ &xics_ipi_message[cpu].value)) {
+ mb();
+Index: linux-2.6.10/arch/s390/boot/install.sh
+===================================================================
+--- linux-2.6.10.orig/arch/s390/boot/install.sh 2004-12-25 05:35:01.000000000 +0800
++++ linux-2.6.10/arch/s390/boot/install.sh 2005-04-07 18:13:56.443823080 +0800
+@@ -16,7 +16,8 @@
+ # $1 - kernel version
+ # $2 - kernel image file
+ # $3 - kernel map file
+-# $4 - default install path (blank if root directory)
++# $4 - kernel type file
++# $5 - default install path (blank if root directory)
+ #
+
+ # User may have a custom install script
+@@ -26,13 +27,13 @@
+
+ # Default install - same as make zlilo
+
+-if [ -f $4/vmlinuz ]; then
+- mv $4/vmlinuz $4/vmlinuz.old
++if [ -f $5/vmlinuz ]; then
++ mv $5/vmlinuz $5/vmlinuz.old
+ fi
+
+-if [ -f $4/System.map ]; then
+- mv $4/System.map $4/System.old
++if [ -f $5/System.map ]; then
++ mv $5/System.map $5/System.old
+ fi
+
+-cat $2 > $4/vmlinuz
+-cp $3 $4/System.map
++cat $2 > $5/vmlinuz
++cp $3 $5/System.map
+Index: linux-2.6.10/arch/s390/boot/Makefile
+===================================================================
+--- linux-2.6.10.orig/arch/s390/boot/Makefile 2004-12-25 05:35:49.000000000 +0800
++++ linux-2.6.10/arch/s390/boot/Makefile 2005-04-07 18:13:56.442823232 +0800
+@@ -15,4 +15,4 @@
+
+ install: $(CONFIGURE) $(obj)/image
+ sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \
+- System.map Kerntypes "$(INSTALL_PATH)"
++ System.map init/Kerntypes "$(INSTALL_PATH)"
+Index: linux-2.6.10/arch/s390/Kconfig.debug
+===================================================================
+--- linux-2.6.10.orig/arch/s390/Kconfig.debug 2004-12-25 05:34:31.000000000 +0800
++++ linux-2.6.10/arch/s390/Kconfig.debug 2005-04-07 18:13:56.436824144 +0800
+@@ -2,4 +2,13 @@
+
+ source "lib/Kconfig.debug"
+
++config KERNTYPES
++ bool "Kerntypes debugging information"
++ default y
++ ---help---
++ Say Y here to save additional kernel debugging information in the
++ file init/kerntypes.o. This information is used by crash analysis
++ tools such as lcrash to assign structures to kernel addresses.
+
-+/* used for dump compressors */
-+static struct list_head dump_compress_list = LIST_HEAD_INIT(dump_compress_list);
+
-+/* list of registered dump targets */
-+static struct list_head dump_target_list = LIST_HEAD_INIT(dump_target_list);
+ endmenu
+Index: linux-2.6.10/arch/x86_64/mm/init.c
+===================================================================
+--- linux-2.6.10.orig/arch/x86_64/mm/init.c 2005-04-06 23:38:35.000000000 +0800
++++ linux-2.6.10/arch/x86_64/mm/init.c 2005-04-07 18:13:56.582801952 +0800
+@@ -378,7 +378,7 @@
+ __flush_tlb_all();
+ }
+
+-static inline int page_is_ram (unsigned long pagenr)
++inline int page_is_ram (unsigned long pagenr)
+ {
+ int i;
+
+Index: linux-2.6.10/arch/x86_64/Kconfig.debug
+===================================================================
+--- linux-2.6.10.orig/arch/x86_64/Kconfig.debug 2004-12-25 05:34:01.000000000 +0800
++++ linux-2.6.10/arch/x86_64/Kconfig.debug 2005-04-07 18:13:56.572803472 +0800
+@@ -2,6 +2,66 @@
+
+ source "lib/Kconfig.debug"
+
++config CRASH_DUMP
++ tristate "Crash dump support (EXPERIMENTAL)"
++ depends on EXPERIMENTAL
++ default n
++ ---help---
++ Say Y here to enable saving an image of system memory when a panic
++ or other error occurs. Dumps can also be forced with the SysRq+d
++ key if MAGIC_SYSRQ is enabled.
+
-+/* lkcd info structure -- this is used by lcrash for basic system data */
-+struct __lkcdinfo lkcdinfo = {
-+ .ptrsz = (sizeof(void *) * 8),
-+#if defined(__LITTLE_ENDIAN)
-+ .byte_order = __LITTLE_ENDIAN,
-+#else
-+ .byte_order = __BIG_ENDIAN,
-+#endif
-+ .page_shift = PAGE_SHIFT,
-+ .page_size = PAGE_SIZE,
-+ .page_mask = PAGE_MASK,
-+ .page_offset = PAGE_OFFSET,
-+};
++config KERNTYPES
++ bool
++ depends on CRASH_DUMP
++ default y
+
-+/*
-+ * -----------------------------------------------------------------------
-+ * / P R O C T U N A B L E F U N C T I O N S
-+ * -----------------------------------------------------------------------
-+ */
++config CRASH_DUMP_BLOCKDEV
++ tristate "Crash dump block device driver"
++ depends on CRASH_DUMP
++ help
++ Say Y to allow saving crash dumps directly to a disk device.
+
-+static int proc_dump_device(ctl_table *ctl, int write, struct file *f,
-+ void __user *buffer, size_t *lenp, loff_t *ppos);
++config CRASH_DUMP_NETDEV
++ tristate "Crash dump network device driver"
++ depends on CRASH_DUMP
++ help
++ Say Y to allow saving crash dumps over a network device.
+
-+static int proc_doulonghex(ctl_table *ctl, int write, struct file *f,
-+ void __user *buffer, size_t *lenp, loff_t *ppos);
-+/*
-+ * sysctl-tuning infrastructure.
-+ */
-+static ctl_table dump_table[] = {
-+ { .ctl_name = CTL_DUMP_LEVEL,
-+ .procname = DUMP_LEVEL_NAME,
-+ .data = &dump_config.level,
-+ .maxlen = sizeof(int),
-+ .mode = 0444,
-+ .proc_handler = proc_doulonghex, },
++config CRASH_DUMP_MEMDEV
++ bool "Crash dump staged memory driver"
++ depends on CRASH_DUMP
++ help
++ Say Y to allow intermediate saving crash dumps in spare
++ memory pages which would then be written out to disk
++ later.
+
-+ { .ctl_name = CTL_DUMP_FLAGS,
-+ .procname = DUMP_FLAGS_NAME,
-+ .data = &dump_config.flags,
-+ .maxlen = sizeof(int),
-+ .mode = 0444,
-+ .proc_handler = proc_doulonghex, },
++config CRASH_DUMP_SOFTBOOT
++ bool "Save crash dump across a soft reboot"
++ depends on CRASH_DUMP_MEMDEV
++ help
++ Say Y to allow a crash dump to be preserved in memory
++ lkcd-kernpages across a soft reboot and written out to disk
++ thereafter. For this to work, CRASH_DUMP must be
++ configured as part of the kernel (not as a module).
+
-+ { .ctl_name = CTL_DUMP_COMPRESS,
-+ .procname = DUMP_COMPRESS_NAME,
-+ .data = &dump_compress, /* FIXME */
-+ .maxlen = sizeof(int),
-+ .mode = 0444,
-+ .proc_handler = proc_dointvec, },
-+
-+ { .ctl_name = CTL_DUMP_DEVICE,
-+ .procname = DUMP_DEVICE_NAME,
-+ .mode = 0444,
-+ .data = &dump_config.dump_device, /* FIXME */
-+ .maxlen = sizeof(int),
-+ .proc_handler = proc_dump_device },
++config CRASH_DUMP_COMPRESS_RLE
++ tristate "Crash dump RLE compression"
++ depends on CRASH_DUMP
++ help
++ Say Y to allow saving dumps with Run Length Encoding compression.
+
-+#ifdef CONFIG_CRASH_DUMP_MEMDEV
-+ { .ctl_name = CTL_DUMP_ADDR,
-+ .procname = DUMP_ADDR_NAME,
-+ .mode = 0444,
-+ .data = &dump_config.dump_addr,
-+ .maxlen = sizeof(unsigned long),
-+ .proc_handler = proc_doulonghex },
-+#endif
+
-+ { 0, }
-+};
++config CRASH_DUMP_COMPRESS_GZIP
++ tristate "Crash dump GZIP compression"
++ select ZLIB_INFLATE
++ select ZLIB_DEFLATE
++ depends on CRASH_DUMP
++ help
++ Say Y to allow saving dumps with Gnu Zip compression.
+
-+static ctl_table dump_root[] = {
-+ { .ctl_name = KERN_DUMP,
-+ .procname = "dump",
-+ .mode = 0555,
-+ .child = dump_table },
-+ { 0, }
-+};
+
-+static ctl_table kernel_root[] = {
-+ { .ctl_name = CTL_KERN,
-+ .procname = "kernel",
-+ .mode = 0555,
-+ .child = dump_root, },
-+ { 0, }
-+};
+
-+static struct ctl_table_header *sysctl_header;
+ # !SMP for now because the context switch early causes GPF in segment reloading
+ # and the GS base checking does the wrong thing then, causing a hang.
+ config CHECKING
+Index: linux-2.6.10/arch/x86_64/kernel/smp.c
+===================================================================
+--- linux-2.6.10.orig/arch/x86_64/kernel/smp.c 2004-12-25 05:35:50.000000000 +0800
++++ linux-2.6.10/arch/x86_64/kernel/smp.c 2005-04-07 18:13:56.609797848 +0800
+@@ -20,6 +20,7 @@
+ #include <linux/kernel_stat.h>
+ #include <linux/mc146818rtc.h>
+ #include <linux/interrupt.h>
++#include <linux/dump.h>
+
+ #include <asm/mtrr.h>
+ #include <asm/pgalloc.h>
+@@ -151,6 +152,13 @@
+ if (!mm)
+ BUG();
+
++ if (vector == DUMP_VECTOR) {
++ /*
++ * Setup DUMP IPI to be delivered as an NMI
++ */
++ cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
++ }
+
-+/*
-+ * -----------------------------------------------------------------------
-+ * C O M P R E S S I O N F U N C T I O N S
-+ * -----------------------------------------------------------------------
-+ */
+ /*
+ * I'm not happy about this global shared spinlock in the
+ * MM hot path, but we'll see how contended it is.
+@@ -253,6 +261,13 @@
+ send_IPI_allbutself(KDB_VECTOR);
+ }
+
+
-+/*
-+ * Name: dump_compress_none()
-+ * Func: Don't do any compression, period.
-+ */
-+static u32
-+dump_compress_none(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
-+ unsigned long loc)
++/* void dump_send_ipi(int (*dump_ipi_handler)(struct pt_regs *)); */
++void dump_send_ipi(void)
+{
-+ /* just return the old size */
-+ return oldsize;
++ send_IPI_allbutself(DUMP_VECTOR);
+}
+
-+
-+/*
-+ * Name: dump_execute()
-+ * Func: Execute the dumping process. This makes sure all the appropriate
-+ * fields are updated correctly, and calls dump_execute_memdump(),
-+ * which does the real work.
-+ */
-+void
-+dump_execute(const char *panic_str, const struct pt_regs *regs)
+ /*
+ * this function sends a 'reschedule' IPI to another CPU.
+ * it goes straight through and wastes no time serializing
+@@ -340,6 +355,18 @@
+ return 0;
+ }
+
++void stop_this_cpu(void* dummy)
+{
-+ int state = -1;
-+ unsigned long flags;
++ /*
++ * Remove this CPU:
++ */
++ cpu_clear(smp_processor_id(), cpu_online_map);
++ local_irq_disable();
++ disable_local_APIC();
++ for (;;)
++ asm("hlt");
++}
+
-+ /* make sure we can dump */
-+ if (!dump_okay) {
-+ pr_info("LKCD not yet configured, can't take dump now\n");
-+ return;
-+ }
+ void smp_stop_cpu(void)
+ {
+ /*
+Index: linux-2.6.10/arch/x86_64/kernel/traps.c
+===================================================================
+--- linux-2.6.10.orig/arch/x86_64/kernel/traps.c 2004-12-25 05:33:49.000000000 +0800
++++ linux-2.6.10/arch/x86_64/kernel/traps.c 2005-04-07 18:13:56.595799976 +0800
+@@ -27,6 +27,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/dump.h>
+ #include <linux/moduleparam.h>
+
+ #include <asm/system.h>
+@@ -369,6 +370,7 @@
+ printk("\n");
+ notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
+ show_registers(regs);
++ dump((char *)str, regs);
+ /* Executive summary in case the oops scrolled away */
+ printk(KERN_ALERT "RIP ");
+ printk_address(regs->rip);
+Index: linux-2.6.10/arch/x86_64/kernel/pci-gart.c
+===================================================================
+--- linux-2.6.10.orig/arch/x86_64/kernel/pci-gart.c 2004-12-25 05:34:32.000000000 +0800
++++ linux-2.6.10/arch/x86_64/kernel/pci-gart.c 2005-04-07 18:13:56.627795112 +0800
+@@ -34,7 +34,7 @@
+ dma_addr_t bad_dma_address;
+
+ unsigned long iommu_bus_base; /* GART remapping area (physical) */
+-static unsigned long iommu_size; /* size of remapping area bytes */
++unsigned long iommu_size; /* size of remapping area bytes */
+ static unsigned long iommu_pages; /* .. and in pages */
+
+ u32 *iommu_gatt_base; /* Remapping table */
+Index: linux-2.6.10/arch/x86_64/kernel/setup.c
+===================================================================
+--- linux-2.6.10.orig/arch/x86_64/kernel/setup.c 2004-12-25 05:33:50.000000000 +0800
++++ linux-2.6.10/arch/x86_64/kernel/setup.c 2005-04-07 18:13:56.608798000 +0800
+@@ -221,6 +221,8 @@
+ }
+ }
+
++unsigned long crashdump_addr = 0xdeadbeef;
+
-+ /* Exclude multiple dumps at the same time,
-+ * and disable interrupts, some drivers may re-enable
-+ * interrupts in with silence()
-+ *
-+ * Try and acquire spin lock. If successful, leave preempt
-+ * and interrupts disabled. See spin_lock_irqsave in spinlock.h
-+ */
-+ local_irq_save(flags);
-+ if (!spin_trylock(&dump_lock)) {
-+ local_irq_restore(flags);
-+ pr_info("LKCD dump already in progress\n");
-+ return;
-+ }
+ static __init void parse_cmdline_early (char ** cmdline_p)
+ {
+ char c = ' ', *to = command_line, *from = COMMAND_LINE;
+@@ -311,6 +313,9 @@
+
+ if (!memcmp(from,"oops=panic", 10))
+ panic_on_oops = 1;
++
++ if (c == ' ' && !memcmp(from, "crashdump=", 10))
++ crashdump_addr = memparse(from+10, &from);
+
+ next_char:
+ c = *(from++);
+@@ -441,6 +446,10 @@
+ reserve_bootmem_generic(addr, PAGE_SIZE);
+ }
+
++#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
++extern void crashdump_reserve(void);
++#endif
+
-+ /* What state are interrupts really in? */
-+ if (in_interrupt()){
-+ if(in_irq())
-+ printk(KERN_ALERT "Dumping from interrupt handler!\n");
-+ else
-+ printk(KERN_ALERT "Dumping from bottom half!\n");
+ void __init setup_arch(char **cmdline_p)
+ {
+ unsigned long low_mem_size;
+@@ -550,6 +559,9 @@
+ }
+ #endif
+ paging_init();
++#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
++ crashdump_reserve(); /* Preserve crash dump state from prev boot */
++#endif
+
+ check_ioapic();
+ #ifdef CONFIG_ACPI_BOOT
+Index: linux-2.6.10/arch/x86_64/kernel/x8664_ksyms.c
+===================================================================
+--- linux-2.6.10.orig/arch/x86_64/kernel/x8664_ksyms.c 2004-12-25 05:34:01.000000000 +0800
++++ linux-2.6.10/arch/x86_64/kernel/x8664_ksyms.c 2005-04-07 18:13:56.625795416 +0800
+@@ -32,6 +32,7 @@
+ #include <asm/unistd.h>
+ #include <asm/delay.h>
+ #include <asm/tlbflush.h>
++#include <asm/e820.h>
+ #include <asm/kdebug.h>
+
+ extern spinlock_t rtc_lock;
+@@ -216,6 +217,20 @@
+ extern unsigned long __supported_pte_mask;
+ EXPORT_SYMBOL(__supported_pte_mask);
+
++#ifdef CONFIG_CRASH_DUMP_MODULE
++#ifdef CONFIG_SMP
++extern irq_desc_t irq_desc[NR_IRQS];
++extern cpumask_t irq_affinity[NR_IRQS];
++extern void stop_this_cpu(void *);
++EXPORT_SYMBOL(irq_desc);
++EXPORT_SYMBOL(irq_affinity);
++EXPORT_SYMBOL(dump_send_ipi);
++EXPORT_SYMBOL(stop_this_cpu);
++#endif
++extern int page_is_ram(unsigned long);
++EXPORT_SYMBOL(page_is_ram);
++#endif
+
-+ __dump_clean_irq_state();
-+ }
+ #ifdef CONFIG_SMP
+ EXPORT_SYMBOL(flush_tlb_page);
+ EXPORT_SYMBOL_GPL(flush_tlb_all);
+Index: linux-2.6.10/scripts/mkcompile_h
+===================================================================
+--- linux-2.6.10.orig/scripts/mkcompile_h 2004-12-25 05:35:50.000000000 +0800
++++ linux-2.6.10/scripts/mkcompile_h 2005-04-07 18:13:56.778772160 +0800
+@@ -33,7 +33,7 @@
+
+ UTS_LEN=64
+ UTS_TRUNCATE="sed -e s/\(.\{1,$UTS_LEN\}\).*/\1/"
+-
++LINUX_COMPILE_VERSION_ID="__linux_compile_version_id__`hostname | tr -c '[0-9A-Za-z\n]' '__'`_`LANG=C date | tr -c '[0-9A-Za-z\n]' '_'`"
+ # Generate a temporary compile.h
+
+ ( echo /\* This file is auto generated, version $VERSION \*/
+@@ -55,6 +55,8 @@
+ fi
+
+ echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | tail -n 1`\"
++ echo \#define LINUX_COMPILE_VERSION_ID $LINUX_COMPILE_VERSION_ID
++ echo \#define LINUX_COMPILE_VERSION_ID_TYPE typedef char* "$LINUX_COMPILE_VERSION_ID""_t"
+ ) > .tmpcompile
+
+ # Only replace the real compile.h if the new one is different,
+Index: linux-2.6.10/init/main.c
+===================================================================
+--- linux-2.6.10.orig/init/main.c 2005-04-06 23:38:35.000000000 +0800
++++ linux-2.6.10/init/main.c 2005-04-07 18:13:56.635793896 +0800
+@@ -109,6 +109,16 @@
+ EXPORT_SYMBOL(system_state);
+
+ /*
++ * The kernel_magic value represents the address of _end, which allows
++ * namelist tools to "match" each other respectively. That way a tool
++ * that looks at /dev/mem can verify that it is using the right System.map
++ * file -- if kernel_magic doesn't equal the namelist value of _end,
++ * something's wrong.
++ */
++extern unsigned long _end;
++unsigned long *kernel_magic = &_end;
+
++/*
+ * Boot command-line arguments
+ */
+ #define MAX_INIT_ARGS 32
+Index: linux-2.6.10/init/kerntypes.c
+===================================================================
+--- linux-2.6.10.orig/init/kerntypes.c 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/init/kerntypes.c 2005-04-07 18:13:56.634794048 +0800
+@@ -0,0 +1,40 @@
++/*
++ * kerntypes.c
++ *
++ * Copyright (C) 2000 Tom Morano (tjm@sgi.com) and
++ * Matt D. Robinson (yakker@alacritech.com)
++ *
++ * Dummy module that includes headers for all kernel types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under version 2 of the GNU GPL.
++ */
+
-+ /* Bring system into the strictest level of quiescing for min drift
-+ * dump drivers can soften this as required in dev->ops->silence()
-+ */
-+ dump_oncpu = smp_processor_id() + 1;
-+ dump_silence_level = DUMP_HARD_SPIN_CPUS;
++#include <linux/compile.h>
++#include <linux/module.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <linux/config.h>
++#include <linux/utsname.h>
++#include <linux/kernel_stat.h>
++#include <linux/dump.h>
+
-+ state = dump_generic_execute(panic_str, regs);
-+
-+ dump_oncpu = 0;
-+ spin_unlock_irqrestore(&dump_lock, flags);
++#include <asm/kerntypes.h>
+
-+ if (state < 0) {
-+ printk("Dump Incomplete or failed!\n");
-+ } else {
-+ printk("Dump Complete; %d dump pages saved.\n",
-+ dump_header.dh_num_dump_pages);
-+ }
-+}
++#ifdef LINUX_COMPILE_VERSION_ID_TYPE
++/* Define version type for version validation of dump and kerntypes */
++LINUX_COMPILE_VERSION_ID_TYPE;
++#endif
++#if defined(CONFIG_SMP) && defined(CONFIG_CRASH_DUMP)
++extern struct runqueue runqueues;
++struct runqueue rn;
++#endif
+
-+/*
-+ * Name: dump_register_compression()
-+ * Func: Register a dump compression mechanism.
-+ */
++struct new_utsname *p;
+void
-+dump_register_compression(struct __dump_compress *item)
++kerntypes_dummy(void)
+{
-+ if (item)
-+ list_add(&(item->list), &dump_compress_list);
+}
+Index: linux-2.6.10/init/version.c
+===================================================================
+--- linux-2.6.10.orig/init/version.c 2004-12-25 05:34:45.000000000 +0800
++++ linux-2.6.10/init/version.c 2005-04-07 18:13:56.633794200 +0800
+@@ -11,6 +11,7 @@
+ #include <linux/uts.h>
+ #include <linux/utsname.h>
+ #include <linux/version.h>
++#include <linux/stringify.h>
+
+ #define version(a) Version_ ## a
+ #define version_string(a) version(a)
+@@ -31,3 +32,6 @@
+ const char *linux_banner =
+ "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
+ LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
+
-+/*
-+ * Name: dump_unregister_compression()
-+ * Func: Remove a dump compression mechanism, and re-assign the dump
-+ * compression pointer if necessary.
-+ */
-+void
-+dump_unregister_compression(int compression_type)
-+{
-+ struct list_head *tmp;
-+ struct __dump_compress *dc;
-+
-+ /* let's make sure our list is valid */
-+ if (compression_type != DUMP_COMPRESS_NONE) {
-+ list_for_each(tmp, &dump_compress_list) {
-+ dc = list_entry(tmp, struct __dump_compress, list);
-+ if (dc->compress_type == compression_type) {
-+ list_del(&(dc->list));
-+ break;
-+ }
-+ }
-+ }
-+}
++const char *LINUX_COMPILE_VERSION_ID = __stringify(LINUX_COMPILE_VERSION_ID);
++LINUX_COMPILE_VERSION_ID_TYPE;
+Index: linux-2.6.10/init/Makefile
+===================================================================
+--- linux-2.6.10.orig/init/Makefile 2004-12-25 05:34:32.000000000 +0800
++++ linux-2.6.10/init/Makefile 2005-04-07 18:13:56.636793744 +0800
+@@ -9,12 +9,20 @@
+ mounts-$(CONFIG_BLK_DEV_INITRD) += do_mounts_initrd.o
+ mounts-$(CONFIG_BLK_DEV_MD) += do_mounts_md.o
+
++extra-$(CONFIG_KERNTYPES) += kerntypes.o
++#For IA64, compile kerntypes in dwarf-2 format.
++ifeq ($(CONFIG_IA64),y)
++CFLAGS_kerntypes.o := -gdwarf-2
++else
++CFLAGS_kerntypes.o := -gstabs
++endif
+
-+/*
-+ * Name: dump_compress_init()
-+ * Func: Initialize (or re-initialize) compression scheme.
-+ */
-+static int
-+dump_compress_init(int compression_type)
-+{
-+ struct list_head *tmp;
-+ struct __dump_compress *dc;
+ # files to be removed upon make clean
+ clean-files := ../include/linux/compile.h
+
+ # dependencies on generated files need to be listed explicitly
+
+-$(obj)/version.o: include/linux/compile.h
++$(obj)/version.o $(obj)/kerntypes.o: include/linux/compile.h
+
+ # compile.h changes depending on hostname, generation number, etc,
+ # so we regenerate it always.
+@@ -24,3 +32,4 @@
+ include/linux/compile.h: FORCE
+ @echo ' CHK $@'
+ @$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CC) $(CFLAGS)"
+
-+ /* try to remove the compression item */
-+ list_for_each(tmp, &dump_compress_list) {
-+ dc = list_entry(tmp, struct __dump_compress, list);
-+ if (dc->compress_type == compression_type) {
-+ dump_config.dumper->compress = dc;
-+ dump_compress = compression_type;
-+ pr_debug("Dump Compress %s\n", dc->compress_name);
-+ return 0;
-+ }
-+ }
+Index: linux-2.6.10/net/Kconfig
+===================================================================
+--- linux-2.6.10.orig/net/Kconfig 2005-04-06 23:38:35.000000000 +0800
++++ linux-2.6.10/net/Kconfig 2005-04-07 18:13:56.760774896 +0800
+@@ -632,7 +632,7 @@
+ endmenu
+
+ config NETPOLL
+- def_bool NETCONSOLE
++ def_bool NETCONSOLE || CRASH_DUMP_NETDEV
+
+ config NETPOLL_RX
+ bool "Netpoll support for trapping incoming packets"
+Index: linux-2.6.10/kernel/sched.c
+===================================================================
+--- linux-2.6.10.orig/kernel/sched.c 2005-04-07 14:55:26.000000000 +0800
++++ linux-2.6.10/kernel/sched.c 2005-04-07 18:13:56.850761216 +0800
+@@ -54,6 +54,10 @@
+ #define cpu_to_node_mask(cpu) (cpu_online_map)
+ #endif
+
++/* used to soft spin in sched while dump is in progress */
++unsigned long dump_oncpu;
++EXPORT_SYMBOL(dump_oncpu);
+
-+ /*
-+ * nothing on the list -- return ENODATA to indicate an error
-+ *
-+ * NB:
-+ * EAGAIN: reports "Resource temporarily unavailable" which
-+ * isn't very enlightening.
+ /*
+ * Convert user-nice values [ -20 ... 0 ... 19 ]
+ * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+@@ -184,109 +188,6 @@
+ #define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \
+ < (long long) (sd)->cache_hot_time)
+
+-/*
+- * These are the runqueue data structures:
+- */
+-
+-#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
+-
+-typedef struct runqueue runqueue_t;
+-
+-struct prio_array {
+- unsigned int nr_active;
+- unsigned long bitmap[BITMAP_SIZE];
+- struct list_head queue[MAX_PRIO];
+-};
+-
+-/*
+- * This is the main, per-CPU runqueue data structure.
+- *
+- * Locking rule: those places that want to lock multiple runqueues
+- * (such as the load balancing or the thread migration code), lock
+- * acquire operations must be ordered by ascending &runqueue.
+- */
+-struct runqueue {
+- spinlock_t lock;
+-
+- /*
+- * nr_running and cpu_load should be in the same cacheline because
+- * remote CPUs use both these fields when doing load calculation.
+- */
+- unsigned long nr_running;
+-#ifdef CONFIG_SMP
+- unsigned long cpu_load;
+-#endif
+- unsigned long long nr_switches;
+-
+- /*
+- * This is part of a global counter where only the total sum
+- * over all CPUs matters. A task can increase this counter on
+- * one CPU and if it got migrated afterwards it may decrease
+- * it on another CPU. Always updated under the runqueue lock:
+- */
+- unsigned long nr_uninterruptible;
+-
+- unsigned long expired_timestamp;
+- unsigned long long timestamp_last_tick;
+- task_t *curr, *idle;
+- struct mm_struct *prev_mm;
+- prio_array_t *active, *expired, arrays[2];
+- int best_expired_prio;
+- atomic_t nr_iowait;
+-
+-#ifdef CONFIG_SMP
+- struct sched_domain *sd;
+-
+- /* For active balancing */
+- int active_balance;
+- int push_cpu;
+-
+- task_t *migration_thread;
+- struct list_head migration_queue;
+-#endif
+-
+-#ifdef CONFIG_SCHEDSTATS
+- /* latency stats */
+- struct sched_info rq_sched_info;
+-
+- /* sys_sched_yield() stats */
+- unsigned long yld_exp_empty;
+- unsigned long yld_act_empty;
+- unsigned long yld_both_empty;
+- unsigned long yld_cnt;
+-
+- /* schedule() stats */
+- unsigned long sched_noswitch;
+- unsigned long sched_switch;
+- unsigned long sched_cnt;
+- unsigned long sched_goidle;
+-
+- /* pull_task() stats */
+- unsigned long pt_gained[MAX_IDLE_TYPES];
+- unsigned long pt_lost[MAX_IDLE_TYPES];
+-
+- /* active_load_balance() stats */
+- unsigned long alb_cnt;
+- unsigned long alb_lost;
+- unsigned long alb_gained;
+- unsigned long alb_failed;
+-
+- /* try_to_wake_up() stats */
+- unsigned long ttwu_cnt;
+- unsigned long ttwu_attempts;
+- unsigned long ttwu_moved;
+-
+- /* wake_up_new_task() stats */
+- unsigned long wunt_cnt;
+- unsigned long wunt_moved;
+-
+- /* sched_migrate_task() stats */
+- unsigned long smt_cnt;
+-
+- /* sched_balance_exec() stats */
+- unsigned long sbe_cnt;
+-#endif
+-};
+
+ static DEFINE_PER_CPU(struct runqueue, runqueues);
+
+@@ -2535,6 +2436,15 @@
+ unsigned long run_time;
+ int cpu, idx;
+
++ /*
++ * If crash dump is in progress, this other cpu's
++ * need to wait until it completes.
++ * NB: this code is optimized away for kernels without
++ * dumping enabled.
+ */
-+ printk("compression_type:%d not found\n", compression_type);
++ if (unlikely(dump_oncpu))
++ goto dump_scheduling_disabled;
+
-+ return -ENODATA;
-+}
+ /*
+ * Test if we are atomic. Since do_exit() needs to call into
+ * schedule() atomically, we ignore that path for now.
+@@ -2698,6 +2608,16 @@
+ preempt_enable_no_resched();
+ if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
+ goto need_resched;
+
-+static int
-+dumper_setup(unsigned long flags, unsigned long devid)
-+{
-+ int ret = 0;
++ return;
+
-+ /* unconfigure old dumper if it exists */
-+ dump_okay = 0;
-+ if (dump_config.dumper) {
-+ pr_debug("Unconfiguring current dumper\n");
-+ dump_unconfigure();
++ dump_scheduling_disabled:
++ /* allow scheduling only if this is the dumping cpu */
++ if (dump_oncpu != smp_processor_id()+1) {
++ while (dump_oncpu)
++ cpu_relax();
+ }
-+ /* set up new dumper */
-+ if (dump_config.flags & DUMP_FLAGS_SOFTBOOT) {
-+ printk("Configuring softboot based dump \n");
-+#ifdef CONFIG_CRASH_DUMP_MEMDEV
-+ dump_config.dumper = &dumper_stage1;
-+#else
-+ printk("Requires CONFIG_CRASHDUMP_MEMDEV. Can't proceed.\n");
-+ return -1;
++ return;
+ }
+
+ EXPORT_SYMBOL(schedule);
+Index: linux-2.6.10/kernel/panic.c
+===================================================================
+--- linux-2.6.10.orig/kernel/panic.c 2004-12-25 05:35:29.000000000 +0800
++++ linux-2.6.10/kernel/panic.c 2005-04-07 18:13:56.860759696 +0800
+@@ -18,12 +18,17 @@
+ #include <linux/sysrq.h>
+ #include <linux/interrupt.h>
+ #include <linux/nmi.h>
++#ifdef CONFIG_KEXEC
++#include <linux/kexec.h>
+#endif
-+ } else {
-+ dump_config.dumper = &dumper_singlestage;
-+ }
-+ dump_config.dumper->dev = dump_dev;
-+
-+ ret = dump_configure(devid);
-+ if (!ret) {
-+ dump_okay = 1;
-+ pr_debug("%s dumper set up for dev 0x%lx\n",
-+ dump_config.dumper->name, devid);
-+ dump_config.dump_device = devid;
-+ } else {
-+ printk("%s dumper set up failed for dev 0x%lx\n",
-+ dump_config.dumper->name, devid);
-+ dump_config.dumper = NULL;
-+ }
-+ return ret;
-+}
-+
-+static int
-+dump_target_init(int target)
-+{
-+ char type[20];
-+ struct list_head *tmp;
-+ struct dump_dev *dev;
-+
-+ switch (target) {
-+ case DUMP_FLAGS_DISKDUMP:
-+ strcpy(type, "blockdev"); break;
-+ case DUMP_FLAGS_NETDUMP:
-+ strcpy(type, "networkdev"); break;
-+ default:
-+ return -1;
-+ }
-+
-+ /*
-+ * This is a bit stupid, generating strings from flag
-+ * and doing strcmp. This is done because 'struct dump_dev'
-+ * has string 'type_name' and not interger 'type'.
-+ */
-+ list_for_each(tmp, &dump_target_list) {
-+ dev = list_entry(tmp, struct dump_dev, list);
-+ if (strcmp(type, dev->type_name) == 0) {
-+ dump_dev = dev;
-+ return 0;
-+ }
-+ }
-+ return -1;
+
+ int panic_timeout;
+ int panic_on_oops;
+ int tainted;
++void (*dump_function_ptr)(const char *, const struct pt_regs *) = 0;
+
+ EXPORT_SYMBOL(panic_timeout);
++EXPORT_SYMBOL(dump_function_ptr);
+
+ struct notifier_block *panic_notifier_list;
+
+@@ -71,11 +76,12 @@
+ printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
+ bust_spinlocks(0);
+
++ notifier_call_chain(&panic_notifier_list, 0, buf);
++
+ #ifdef CONFIG_SMP
+ smp_send_stop();
+ #endif
+
+- notifier_call_chain(&panic_notifier_list, 0, buf);
+
+ if (!panic_blink)
+ panic_blink = no_blink;
+@@ -87,6 +93,18 @@
+ * We can't use the "normal" timers since we just panicked..
+ */
+ printk(KERN_EMERG "Rebooting in %d seconds..",panic_timeout);
++#ifdef CONFIG_KEXEC
++{
++ struct kimage *image;
++ image = xchg(&kexec_image, 0);
++ if (image) {
++ printk(KERN_EMERG "by starting a new kernel ..\n");
++ mdelay(panic_timeout*1000);
++ machine_kexec(image);
++ }
+}
++#endif
+
+ for (i = 0; i < panic_timeout*1000; ) {
+ touch_nmi_watchdog();
+ i += panic_blink(i);
+Index: linux-2.6.10/include/linux/sysctl.h
+===================================================================
+--- linux-2.6.10.orig/include/linux/sysctl.h 2005-04-06 23:38:35.000000000 +0800
++++ linux-2.6.10/include/linux/sysctl.h 2005-04-07 18:13:56.651791464 +0800
+@@ -135,6 +135,7 @@
+ KERN_HZ_TIMER=65, /* int: hz timer on or off */
+ KERN_UNKNOWN_NMI_PANIC=66, /* int: unknown nmi panic flag */
+ KERN_SETUID_DUMPABLE=67, /* int: behaviour of dumps for setuid core */
++ KERN_DUMP=68, /* directory: dump parameters */
+ };
+
+
+Index: linux-2.6.10/include/linux/dump_netdev.h
+===================================================================
+--- linux-2.6.10.orig/include/linux/dump_netdev.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/linux/dump_netdev.h 2005-04-07 18:13:56.663789640 +0800
+@@ -0,0 +1,80 @@
+/*
-+ * Name: dump_ioctl()
-+ * Func: Allow all dump tunables through a standard ioctl() mechanism.
-+ * This is far better than before, where we'd go through /proc,
-+ * because now this will work for multiple OS and architectures.
++ * linux/drivers/net/netconsole.h
++ *
++ * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
++ *
++ * This file contains the implementation of an IRQ-safe, crash-safe
++ * kernel console implementation that outputs kernel messages to the
++ * network.
++ *
++ * Modification history:
++ *
++ * 2001-09-17 started by Ingo Molnar.
+ */
-+static int
-+dump_ioctl(struct inode *i, struct file *f, unsigned int cmd, unsigned long arg)
-+{
-+ /* check capabilities */
-+ if (!capable(CAP_SYS_ADMIN))
-+ return -EPERM;
+
-+ if (!dump_config.dumper && cmd == DIOSDUMPCOMPRESS)
-+ /* dump device must be configured first */
-+ return -ENODEV;
++/****************************************************************
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ ****************************************************************/
++
++#define NETCONSOLE_VERSION 0x03
++
++enum netdump_commands {
++ COMM_NONE = 0,
++ COMM_SEND_MEM = 1,
++ COMM_EXIT = 2,
++ COMM_REBOOT = 3,
++ COMM_HELLO = 4,
++ COMM_GET_NR_PAGES = 5,
++ COMM_GET_PAGE_SIZE = 6,
++ COMM_START_NETDUMP_ACK = 7,
++ COMM_GET_REGS = 8,
++ COMM_GET_MAGIC = 9,
++ COMM_START_WRITE_NETDUMP_ACK = 10,
++};
+
-+ /*
-+ * This is the main mechanism for controlling get/set data
-+ * for various dump device parameters. The real trick here
-+ * is setting the dump device (DIOSDUMPDEV). That's what
-+ * triggers everything else.
-+ */
-+ switch (cmd) {
-+ case DIOSDUMPDEV: /* set dump_device */
-+ pr_debug("Configuring dump device\n");
-+ if (!(f->f_flags & O_RDWR))
-+ return -EPERM;
++typedef struct netdump_req_s {
++ u64 magic;
++ u32 nr;
++ u32 command;
++ u32 from;
++ u32 to;
++} req_t;
+
-+ __dump_open();
-+ return dumper_setup(dump_config.flags, arg);
++enum netdump_replies {
++ REPLY_NONE = 0,
++ REPLY_ERROR = 1,
++ REPLY_LOG = 2,
++ REPLY_MEM = 3,
++ REPLY_RESERVED = 4,
++ REPLY_HELLO = 5,
++ REPLY_NR_PAGES = 6,
++ REPLY_PAGE_SIZE = 7,
++ REPLY_START_NETDUMP = 8,
++ REPLY_END_NETDUMP = 9,
++ REPLY_REGS = 10,
++ REPLY_MAGIC = 11,
++ REPLY_START_WRITE_NETDUMP = 12,
++};
+
-+
-+ case DIOGDUMPDEV: /* get dump_device */
-+ return put_user((long)dump_config.dump_device, (long *)arg);
++typedef struct netdump_reply_s {
++ u32 nr;
++ u32 code;
++ u32 info;
++} reply_t;
+
-+ case DIOSDUMPLEVEL: /* set dump_level */
-+ if (!(f->f_flags & O_RDWR))
-+ return -EPERM;
++#define HEADER_LEN (1 + sizeof(reply_t))
+
-+ /* make sure we have a positive value */
-+ if (arg < 0)
-+ return -EINVAL;
+
-+ /* Fixme: clean this up */
-+ dump_config.level = 0;
-+ switch ((int)arg) {
-+ case DUMP_LEVEL_ALL:
-+ case DUMP_LEVEL_ALL_RAM:
-+ dump_config.level |= DUMP_MASK_UNUSED;
-+ case DUMP_LEVEL_USED:
-+ dump_config.level |= DUMP_MASK_USED;
-+ case DUMP_LEVEL_KERN:
-+ dump_config.level |= DUMP_MASK_KERN;
-+ case DUMP_LEVEL_HEADER:
-+ dump_config.level |= DUMP_MASK_HEADER;
-+ case DUMP_LEVEL_NONE:
-+ break;
-+ default:
-+ return (-EINVAL);
-+ }
-+ pr_debug("Dump Level 0x%lx\n", dump_config.level);
-+ break;
+Index: linux-2.6.10/include/linux/sched.h
+===================================================================
+--- linux-2.6.10.orig/include/linux/sched.h 2005-04-07 18:13:55.080030408 +0800
++++ linux-2.6.10/include/linux/sched.h 2005-04-07 18:13:56.653791160 +0800
+@@ -94,6 +94,7 @@
+ extern int nr_threads;
+ extern int last_pid;
+ DECLARE_PER_CPU(unsigned long, process_counts);
++DECLARE_PER_CPU(struct runqueue, runqueues);
+ extern int nr_processes(void);
+ extern unsigned long nr_running(void);
+ extern unsigned long nr_uninterruptible(void);
+@@ -760,6 +761,110 @@
+ void yield(void);
+
+ /*
++ * These are the runqueue data structures:
++ */
+
-+ case DIOGDUMPLEVEL: /* get dump_level */
-+ /* fixme: handle conversion */
-+ return put_user((long)dump_config.level, (long *)arg);
++#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
+
-+
-+ case DIOSDUMPFLAGS: /* set dump_flags */
-+ /* check flags */
-+ if (!(f->f_flags & O_RDWR))
-+ return -EPERM;
++typedef struct runqueue runqueue_t;
+
-+ /* make sure we have a positive value */
-+ if (arg < 0)
-+ return -EINVAL;
-+
-+ if (dump_target_init(arg & DUMP_FLAGS_TARGETMASK) < 0)
-+ return -EINVAL; /* return proper error */
++struct prio_array {
++ unsigned int nr_active;
++ unsigned long bitmap[BITMAP_SIZE];
++ struct list_head queue[MAX_PRIO];
++};
+
-+ dump_config.flags = arg;
-+
-+ pr_debug("Dump Flags 0x%lx\n", dump_config.flags);
-+ break;
-+
-+ case DIOGDUMPFLAGS: /* get dump_flags */
-+ return put_user((long)dump_config.flags, (long *)arg);
++/*
++ * This is the main, per-CPU runqueue data structure.
++ *
++ * Locking rule: those places that want to lock multiple runqueues
++ * (such as the load balancing or the thread migration code), lock
++ * acquire operations must be ordered by ascending &runqueue.
++ */
++struct runqueue {
++ spinlock_t lock;
+
-+ case DIOSDUMPCOMPRESS: /* set the dump_compress status */
-+ if (!(f->f_flags & O_RDWR))
-+ return -EPERM;
++ /*
++ * nr_running and cpu_load should be in the same cacheline because
++ * remote CPUs use both these fields when doing load calculation.
++ */
++ unsigned long nr_running;
++#ifdef CONFIG_SMP
++ unsigned long cpu_load;
++#endif
++ unsigned long long nr_switches;
+
-+ return dump_compress_init((int)arg);
++ /*
++ * This is part of a global counter where only the total sum
++ * over all CPUs matters. A task can increase this counter on
++ * one CPU and if it got migrated afterwards it may decrease
++ * it on another CPU. Always updated under the runqueue lock:
++ */
++ unsigned long nr_uninterruptible;
+
-+ case DIOGDUMPCOMPRESS: /* get the dump_compress status */
-+ return put_user((long)(dump_config.dumper ?
-+ dump_config.dumper->compress->compress_type : 0),
-+ (long *)arg);
-+ case DIOGDUMPOKAY: /* check if dump is configured */
-+ return put_user((long)dump_okay, (long *)arg);
-+
-+ case DIOSDUMPTAKE: /* Trigger a manual dump */
-+ /* Do not proceed if lkcd not yet configured */
-+ if(!dump_okay) {
-+ printk("LKCD not yet configured. Cannot take manual dump\n");
-+ return -ENODEV;
-+ }
++ unsigned long expired_timestamp;
++ unsigned long long timestamp_last_tick;
++ task_t *curr, *idle;
++ struct mm_struct *prev_mm;
++ prio_array_t *active, *expired, arrays[2];
++ int best_expired_prio;
++ atomic_t nr_iowait;
+
-+ /* Take the dump */
-+ return manual_handle_crashdump();
-+
-+ default:
-+ /*
-+ * these are network dump specific ioctls, let the
-+ * module handle them.
-+ */
-+ return dump_dev_ioctl(cmd, arg);
-+ }
-+ return 0;
-+}
++#ifdef CONFIG_SMP
++ struct sched_domain *sd;
+
-+/*
-+ * Handle special cases for dump_device
-+ * changing dump device requires doing an opening the device
-+ */
-+static int
-+proc_dump_device(ctl_table *ctl, int write, struct file *f,
-+ void __user *buffer, size_t *lenp, loff_t *ppos)
-+{
-+ int *valp = ctl->data;
-+ int oval = *valp;
-+ int ret = -EPERM;
++ /* For active balancing */
++ int active_balance;
++ int push_cpu;
+
-+ /* same permission checks as ioctl */
-+ if (capable(CAP_SYS_ADMIN)) {
-+ ret = proc_doulonghex(ctl, write, f, buffer, lenp, ppos);
-+ if (ret == 0 && write && *valp != oval) {
-+ /* need to restore old value to close properly */
-+ dump_config.dump_device = (dev_t) oval;
-+ __dump_open();
-+ ret = dumper_setup(dump_config.flags, (dev_t) *valp);
-+ }
-+ }
++ task_t *migration_thread;
++ struct list_head migration_queue;
++#endif
+
-+ return ret;
-+}
++#ifdef CONFIG_SCHEDSTATS
++ /* latency stats */
++ struct sched_info rq_sched_info;
+
-+/* All for the want of a proc_do_xxx routine which prints values in hex */
-+/* Write is not implemented correctly, so mode is set to 0444 above. */
-+static int
-+proc_doulonghex(ctl_table *ctl, int write, struct file *f,
-+ void __user *buffer, size_t *lenp, loff_t *ppos)
-+{
-+#define TMPBUFLEN 21
-+ unsigned long *i;
-+ size_t len, left;
-+ char buf[TMPBUFLEN];
++ /* sys_sched_yield() stats */
++ unsigned long yld_exp_empty;
++ unsigned long yld_act_empty;
++ unsigned long yld_both_empty;
++ unsigned long yld_cnt;
+
-+ if (!ctl->data || !ctl->maxlen || !*lenp || (*ppos && !write)) {
-+ *lenp = 0;
-+ return 0;
-+ }
-+
-+ i = (unsigned long *) ctl->data;
-+ left = *lenp;
-+
-+ sprintf(buf, "0x%lx\n", (*i));
-+ len = strlen(buf);
-+ if (len > left)
-+ len = left;
-+ if(copy_to_user(buffer, buf, len))
-+ return -EFAULT;
-+
-+ left -= len;
-+ *lenp -= left;
-+ *ppos += *lenp;
-+ return 0;
-+}
++ /* schedule() stats */
++ unsigned long sched_noswitch;
++ unsigned long sched_switch;
++ unsigned long sched_cnt;
++ unsigned long sched_goidle;
+
-+/*
-+ * -----------------------------------------------------------------------
-+ * I N I T F U N C T I O N S
-+ * -----------------------------------------------------------------------
-+ */
++ /* pull_task() stats */
++ unsigned long pt_gained[MAX_IDLE_TYPES];
++ unsigned long pt_lost[MAX_IDLE_TYPES];
+
-+#ifdef CONFIG_COMPAT
-+static int dw_long(unsigned int fd, unsigned int cmd, unsigned long arg,
-+ struct file *f)
-+{
-+ mm_segment_t old_fs = get_fs();
-+ int err;
-+ unsigned long val;
++ /* active_load_balance() stats */
++ unsigned long alb_cnt;
++ unsigned long alb_lost;
++ unsigned long alb_gained;
++ unsigned long alb_failed;
+
-+ set_fs (KERNEL_DS);
-+ err = sys_ioctl(fd, cmd, (u64)&val);
-+ set_fs (old_fs);
-+ if (!err && put_user((unsigned int) val, (u32 *)arg))
-+ return -EFAULT;
-+ return err;
-+}
++ /* try_to_wake_up() stats */
++ unsigned long ttwu_cnt;
++ unsigned long ttwu_attempts;
++ unsigned long ttwu_moved;
++
++ /* wake_up_new_task() stats */
++ unsigned long wunt_cnt;
++ unsigned long wunt_moved;
++
++ /* sched_migrate_task() stats */
++ unsigned long smt_cnt;
++
++ /* sched_balance_exec() stats */
++ unsigned long sbe_cnt;
+#endif
++};
+
+/*
-+ * These register and unregister routines are exported for modules
-+ * to register their dump drivers (like block, net etc)
+ * The default (Linux) execution domain.
+ */
+ extern struct exec_domain default_exec_domain;
+Index: linux-2.6.10/include/linux/dumpdev.h
+===================================================================
+--- linux-2.6.10.orig/include/linux/dumpdev.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/linux/dumpdev.h 2005-04-07 18:13:56.663789640 +0800
+@@ -0,0 +1,163 @@
++/*
++ * Generic dump device interfaces for flexible system dump
++ * (Enables variation of dump target types e.g disk, network, memory)
++ *
++ * These interfaces have evolved based on discussions on lkcd-devel.
++ * Eventually the intent is to support primary and secondary or
++ * alternate targets registered at the same time, with scope for
++ * situation based failover or multiple dump devices used for parallel
++ * dump i/o.
++ *
++ * Started: Oct 2002 - Suparna Bhattacharya (suparna@in.ibm.com)
++ *
++ * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
++ * Copyright (C) 2002 International Business Machines Corp.
++ *
++ * This code is released under version 2 of the GNU GPL.
+ */
-+int
-+dump_register_device(struct dump_dev *ddev)
-+{
-+ struct list_head *tmp;
-+ struct dump_dev *dev;
-+
-+ list_for_each(tmp, &dump_target_list) {
-+ dev = list_entry(tmp, struct dump_dev, list);
-+ if (strcmp(ddev->type_name, dev->type_name) == 0) {
-+ printk("Target type %s already registered\n",
-+ dev->type_name);
-+ return -1; /* return proper error */
-+ }
-+ }
-+ list_add(&(ddev->list), &dump_target_list);
-+
-+ return 0;
-+}
+
-+void
-+dump_unregister_device(struct dump_dev *ddev)
-+{
-+ list_del(&(ddev->list));
-+ if (ddev != dump_dev)
-+ return;
-+
-+ dump_okay = 0;
++#ifndef _LINUX_DUMPDEV_H
++#define _LINUX_DUMPDEV_H
+
-+ if (dump_config.dumper)
-+ dump_unconfigure();
++#include <linux/kernel.h>
++#include <linux/wait.h>
++#include <linux/netpoll.h>
++#include <linux/bio.h>
+
-+ dump_config.flags &= ~DUMP_FLAGS_TARGETMASK;
-+ dump_okay = 0;
-+ dump_dev = NULL;
-+ dump_config.dumper = NULL;
-+}
++/* Determined by the dump target (device) type */
+
-+static int panic_event(struct notifier_block *this, unsigned long event,
-+ void *ptr)
-+{
-+#ifdef CONFIG_ARM
-+ get_current_general_regs(&all_regs);
-+ get_current_cp14_regs(&all_regs);
-+ get_current_cp15_regs(&all_regs);
-+ dump_execute((const char *)ptr, &all_regs);
-+#else
-+ struct pt_regs regs;
-+
-+ get_current_regs(®s);
-+ dump_execute((const char *)ptr, ®s);
-+#endif
-+ return 0;
-+}
++struct dump_dev;
+
-+extern struct notifier_block *panic_notifier_list;
-+static int panic_event(struct notifier_block *, unsigned long, void *);
-+static struct notifier_block panic_block = {
-+ .notifier_call = panic_event,
++struct dump_dev_ops {
++ int (*open)(struct dump_dev *, unsigned long); /* configure */
++ int (*release)(struct dump_dev *); /* unconfigure */
++ int (*silence)(struct dump_dev *); /* when dump starts */
++ int (*resume)(struct dump_dev *); /* when dump is over */
++ int (*seek)(struct dump_dev *, loff_t);
++ /* trigger a write (async in nature typically) */
++ int (*write)(struct dump_dev *, void *, unsigned long);
++ /* not usually used during dump, but option available */
++ int (*read)(struct dump_dev *, void *, unsigned long);
++ /* use to poll for completion */
++ int (*ready)(struct dump_dev *, void *);
++ int (*ioctl)(struct dump_dev *, unsigned int, unsigned long);
+};
+
-+#ifdef CONFIG_MAGIC_SYSRQ
-+/* Sysrq handler */
-+static void sysrq_handle_crashdump(int key, struct pt_regs *pt_regs,
-+ struct tty_struct *tty) {
-+ if(!pt_regs) {
-+ struct pt_regs regs;
-+ get_current_regs(®s);
-+ dump_execute("sysrq", ®s);
++struct dump_dev {
++ char type_name[32]; /* block, net-poll etc */
++ unsigned long device_id; /* interpreted differently for various types */
++ struct dump_dev_ops *ops;
++ struct list_head list;
++ loff_t curr_offset;
++ struct netpoll np;
++};
+
-+ } else {
-+ dump_execute("sysrq", pt_regs);
-+ }
-+}
++/*
++ * dump_dev type variations:
++ */
+
-+static struct sysrq_key_op sysrq_crashdump_op = {
-+ .handler = sysrq_handle_crashdump,
-+ .help_msg = "Dump",
-+ .action_msg = "Starting crash dump",
++/* block */
++struct dump_blockdev {
++ struct dump_dev ddev;
++ dev_t dev_id;
++ struct block_device *bdev;
++ struct bio *bio;
++ loff_t start_offset;
++ loff_t limit;
++ int err;
+};
-+#endif
-+
-+static inline void
-+dump_sysrq_register(void)
-+{
-+#ifdef CONFIG_MAGIC_SYSRQ
-+ register_sysrq_key(DUMP_SYSRQ_KEY, &sysrq_crashdump_op);
-+#endif
-+}
+
-+static inline void
-+dump_sysrq_unregister(void)
++static inline struct dump_blockdev *DUMP_BDEV(struct dump_dev *dev)
+{
-+#ifdef CONFIG_MAGIC_SYSRQ
-+ unregister_sysrq_key(DUMP_SYSRQ_KEY, &sysrq_crashdump_op);
-+#endif
++ return container_of(dev, struct dump_blockdev, ddev);
+}
+
-+/*
-+ * Name: dump_init()
-+ * Func: Initialize the dump process. This will set up any architecture
-+ * dependent code. The big key is we need the memory offsets before
-+ * the page table is initialized, because the base memory offset
-+ * is changed after paging_init() is called.
-+ */
-+static int __init
-+dump_init(void)
-+{
-+ struct sysinfo info;
-+ int err;
-+
-+ /* try to create our dump device */
-+ err = misc_register(&dump_miscdev);
-+ if (err) {
-+ printk("cannot register dump character device!\n");
-+ return err;
-+ }
+
-+ __dump_init((u64)PAGE_OFFSET);
++/* mem - for internal use by soft-boot based dumper */
++struct dump_memdev {
++ struct dump_dev ddev;
++ unsigned long indirect_map_root;
++ unsigned long nr_free;
++ struct page *curr_page;
++ unsigned long *curr_map;
++ unsigned long curr_map_offset;
++ unsigned long last_offset;
++ unsigned long last_used_offset;
++ unsigned long last_bs_offset;
++};
+
-+#ifdef CONFIG_COMPAT
-+ err = register_ioctl32_conversion(DIOSDUMPDEV, NULL);
-+ err |= register_ioctl32_conversion(DIOGDUMPDEV, NULL);
-+ err |= register_ioctl32_conversion(DIOSDUMPLEVEL, NULL);
-+ err |= register_ioctl32_conversion(DIOGDUMPLEVEL, dw_long);
-+ err |= register_ioctl32_conversion(DIOSDUMPFLAGS, NULL);
-+ err |= register_ioctl32_conversion(DIOGDUMPFLAGS, dw_long);
-+ err |= register_ioctl32_conversion(DIOSDUMPCOMPRESS, NULL);
-+ err |= register_ioctl32_conversion(DIOGDUMPCOMPRESS, dw_long);
-+ err |= register_ioctl32_conversion(DIOSTARGETIP, NULL);
-+ err |= register_ioctl32_conversion(DIOGTARGETIP, NULL);
-+ err |= register_ioctl32_conversion(DIOSTARGETPORT, NULL);
-+ err |= register_ioctl32_conversion(DIOGTARGETPORT, NULL);
-+ err |= register_ioctl32_conversion(DIOSSOURCEPORT, NULL);
-+ err |= register_ioctl32_conversion(DIOGSOURCEPORT, NULL);
-+ err |= register_ioctl32_conversion(DIOSETHADDR, NULL);
-+ err |= register_ioctl32_conversion(DIOGETHADDR, NULL);
-+ err |= register_ioctl32_conversion(DIOGDUMPOKAY, dw_long);
-+ err |= register_ioctl32_conversion(DIOSDUMPTAKE, NULL);
-+ if (err) {
-+ printk(KERN_ERR "LKCD: registering ioctl32 translations failed\
-+");
-+ }
-+#endif
-+ /* set the dump_compression_list structure up */
-+ dump_register_compression(&dump_none_compression);
++static inline struct dump_memdev *DUMP_MDEV(struct dump_dev *dev)
++{
++ return container_of(dev, struct dump_memdev, ddev);
++}
+
-+ /* grab the total memory size now (not if/when we crash) */
-+ si_meminfo(&info);
++/* Todo/future - meant for raw dedicated interfaces e.g. mini-ide driver */
++struct dump_rdev {
++ struct dump_dev ddev;
++ char name[32];
++ int (*reset)(struct dump_rdev *, unsigned int,
++ unsigned long);
++ /* ... to do ... */
++};
+
-+ /* set the memory size */
-+ dump_header.dh_memory_size = (u64)info.totalram;
++/* just to get the size right when saving config across a soft-reboot */
++struct dump_anydev {
++ union {
++ struct dump_blockdev bddev;
++ /* .. add other types here .. */
++ };
++};
+
-+ sysctl_header = register_sysctl_table(kernel_root, 0);
-+ dump_sysrq_register();
+
-+ notifier_chain_register(&panic_notifier_list, &panic_block);
-+ dump_function_ptr = dump_execute;
+
-+ pr_info("Crash dump driver initialized.\n");
-+ return 0;
++/* Dump device / target operation wrappers */
++/* These assume that dump_dev is initiatized to dump_config.dumper->dev */
++
++extern struct dump_dev *dump_dev;
++
++static inline int dump_dev_open(unsigned long arg)
++{
++ return dump_dev->ops->open(dump_dev, arg);
+}
+
-+static void __exit
-+dump_cleanup(void)
++static inline int dump_dev_release(void)
+{
-+ int err;
-+ dump_okay = 0;
++ return dump_dev->ops->release(dump_dev);
++}
+
-+ if (dump_config.dumper)
-+ dump_unconfigure();
++static inline int dump_dev_silence(void)
++{
++ return dump_dev->ops->silence(dump_dev);
++}
+
-+ /* arch-specific cleanup routine */
-+ __dump_cleanup();
++static inline int dump_dev_resume(void)
++{
++ return dump_dev->ops->resume(dump_dev);
++}
+
-+#ifdef CONFIG_COMPAT
-+ err = unregister_ioctl32_conversion(DIOSDUMPDEV);
-+ err |= unregister_ioctl32_conversion(DIOGDUMPDEV);
-+ err |= unregister_ioctl32_conversion(DIOSDUMPLEVEL);
-+ err |= unregister_ioctl32_conversion(DIOGDUMPLEVEL);
-+ err |= unregister_ioctl32_conversion(DIOSDUMPFLAGS);
-+ err |= unregister_ioctl32_conversion(DIOGDUMPFLAGS);
-+ err |= unregister_ioctl32_conversion(DIOSDUMPCOMPRESS);
-+ err |= unregister_ioctl32_conversion(DIOGDUMPCOMPRESS);
-+ err |= unregister_ioctl32_conversion(DIOSTARGETIP);
-+ err |= unregister_ioctl32_conversion(DIOGTARGETIP);
-+ err |= unregister_ioctl32_conversion(DIOSTARGETPORT);
-+ err |= unregister_ioctl32_conversion(DIOGTARGETPORT);
-+ err |= unregister_ioctl32_conversion(DIOSSOURCEPORT);
-+ err |= unregister_ioctl32_conversion(DIOGSOURCEPORT);
-+ err |= unregister_ioctl32_conversion(DIOSETHADDR);
-+ err |= unregister_ioctl32_conversion(DIOGETHADDR);
-+ err |= unregister_ioctl32_conversion(DIOGDUMPOKAY);
-+ err |= unregister_ioctl32_conversion(DIOSDUMPTAKE);
-+ if (err) {
-+ printk(KERN_ERR "LKCD: Unregistering ioctl32 translations failed\n");
-+ }
-+#endif
++static inline int dump_dev_seek(loff_t offset)
++{
++ return dump_dev->ops->seek(dump_dev, offset);
++}
+
-+ /* ignore errors while unregistering -- since can't do anything */
-+ unregister_sysctl_table(sysctl_header);
-+ misc_deregister(&dump_miscdev);
-+ dump_sysrq_unregister();
-+ notifier_chain_unregister(&panic_notifier_list, &panic_block);
-+ dump_function_ptr = NULL;
++static inline int dump_dev_write(void *buf, unsigned long len)
++{
++ return dump_dev->ops->write(dump_dev, buf, len);
+}
+
-+EXPORT_SYMBOL(dump_register_compression);
-+EXPORT_SYMBOL(dump_unregister_compression);
-+EXPORT_SYMBOL(dump_register_device);
-+EXPORT_SYMBOL(dump_unregister_device);
-+EXPORT_SYMBOL(dump_config);
-+EXPORT_SYMBOL(dump_silence_level);
++static inline int dump_dev_ready(void *buf)
++{
++ return dump_dev->ops->ready(dump_dev, buf);
++}
+
-+EXPORT_SYMBOL(__dump_irq_enable);
-+EXPORT_SYMBOL(__dump_irq_restore);
++static inline int dump_dev_ioctl(unsigned int cmd, unsigned long arg)
++{
++ if (!dump_dev || !dump_dev->ops->ioctl)
++ return -EINVAL;
++ return dump_dev->ops->ioctl(dump_dev, cmd, arg);
++}
+
-+MODULE_AUTHOR("Matt D. Robinson <yakker@sourceforge.net>");
-+MODULE_DESCRIPTION("Linux Kernel Crash Dump (LKCD) driver");
-+MODULE_LICENSE("GPL");
++extern int dump_register_device(struct dump_dev *);
++extern void dump_unregister_device(struct dump_dev *);
+
-+module_init(dump_init);
-+module_exit(dump_cleanup);
-Index: linux-2.6.10/drivers/dump/dump_scheme.c
++#endif /* _LINUX_DUMPDEV_H */
+Index: linux-2.6.10/include/linux/dump.h
===================================================================
---- linux-2.6.10.orig/drivers/dump/dump_scheme.c 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/drivers/dump/dump_scheme.c 2005-04-05 16:47:53.944204952 +0800
-@@ -0,0 +1,430 @@
-+/*
-+ * Default single stage dump scheme methods
-+ *
-+ * Previously a part of dump_base.c
+--- linux-2.6.10.orig/include/linux/dump.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/linux/dump.h 2005-04-07 18:13:56.662789792 +0800
+@@ -0,0 +1,406 @@
++/*
++ * Kernel header file for Linux crash dumps.
+ *
-+ * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
-+ * Split and rewrote LKCD dump scheme to generic dump method
-+ * interfaces
-+ * Derived from original code created by
-+ * Matt Robinson <yakker@sourceforge.net>)
++ * Created by: Matt Robinson (yakker@sgi.com)
++ * Copyright 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
+ *
-+ * Contributions from SGI, IBM, HP, MCL, and others.
++ * vmdump.h to dump.h by: Matt D. Robinson (yakker@sourceforge.net)
++ * Copyright 2001 - 2002 Matt D. Robinson. All rights reserved.
++ * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
+ *
-+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
-+ * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
-+ * Copyright (C) 2002 International Business Machines Corp.
++ * Most of this is the same old stuff from vmdump.h, except now we're
++ * actually a stand-alone driver plugged into the block layer interface,
++ * with the exception that we now allow for compression modes externally
++ * loaded (e.g., someone can come up with their own).
+ *
+ * This code is released under version 2 of the GNU GPL.
+ */
+
-+/*
-+ * Implements the default dump scheme, i.e. single-stage gathering and
-+ * saving of dump data directly to the target device, which operates in
-+ * a push mode, where the dumping system decides what data it saves
-+ * taking into account pre-specified dump config options.
++/* This header file includes all structure definitions for crash dumps. */
++#ifndef _DUMP_H
++#define _DUMP_H
++
++#if defined(CONFIG_CRASH_DUMP) || defined (CONFIG_CRASH_DUMP_MODULE)
++
++#include <linux/list.h>
++#include <linux/notifier.h>
++#include <linux/dumpdev.h>
++#include <asm/ioctl.h>
++
++/*
++ * Predefine default DUMP_PAGE constants, asm header may override.
+ *
-+ * Aside: The 2-stage dump scheme, where there is a soft-reset between
-+ * the gathering and saving phases, also reuses some of these
-+ * default routines (see dump_overlay.c)
-+ */
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/slab.h>
-+#include <linux/delay.h>
-+#include <linux/reboot.h>
-+#include <linux/nmi.h>
-+#include <linux/dump.h>
-+#include "dump_methods.h"
++ * On ia64 discontinuous memory systems it's possible for the memory
++ * banks to stop at 2**12 page alignments, the smallest possible page
++ * size. But the system page size, PAGE_SIZE, is in fact larger.
++ */
++#define DUMP_PAGE_SHIFT PAGE_SHIFT
++#define DUMP_PAGE_MASK PAGE_MASK
++#define DUMP_PAGE_ALIGN(addr) PAGE_ALIGN(addr)
+
-+extern int panic_timeout; /* time before reboot */
++/*
++ * Dump offset changed from 4Kb to 64Kb to support multiple PAGE_SIZE
++ * (kernel page size). Assumption goes that 64K is the highest page size
++ * supported
++ */
+
-+extern void dump_speedo(int);
++#define DUMP_HEADER_OFFSET (1ULL << 16)
+
-+/* Default sequencer used during single stage dumping */
-+/* Also invoked during stage 2 of soft-boot based dumping */
-+int dump_generic_sequencer(void)
-+{
-+ struct dump_data_filter *filter = dump_config.dumper->filter;
-+ int pass = 0, err = 0, save = 0;
-+ int (*action)(unsigned long, unsigned long);
++#define OLDMINORBITS 8
++#define OLDMINORMASK ((1U << OLDMINORBITS) -1)
+
-+ /*
-+ * We want to save the more critical data areas first in
-+ * case we run out of space, encounter i/o failures, or get
-+ * interrupted otherwise and have to give up midway
-+ * So, run through the passes in increasing order
-+ */
-+ for (;filter->selector; filter++, pass++)
-+ {
-+ /* Assumes passes are exclusive (even across dumpers) */
-+ /* Requires care when coding the selection functions */
-+ if ((save = filter->level_mask & dump_config.level))
-+ action = dump_save_data;
-+ else
-+ action = dump_skip_data;
++/* Making DUMP_PAGE_SIZE = PAGE_SIZE, to support dumping on architectures
++ * which support page sizes (PAGE_SIZE) greater than 4KB.
++ * Will it affect ia64 discontinuous memory systems ????
++ */
++#define DUMP_PAGE_SIZE PAGE_SIZE
+
-+ if ((err = dump_iterator(pass, action, filter)) < 0)
-+ break;
++/* thread_info lies at the bottom of stack, (Except IA64). */
++#define STACK_START_POSITION(tsk) (tsk->thread_info)
++/*
++ * Predefined default memcpy() to use when copying memory to the dump buffer.
++ *
++ * On ia64 there is a heads up function that can be called to let the prom
++ * machine check monitor know that the current activity is risky and it should
++ * ignore the fault (nofault). In this case the ia64 header will redefine this
++ * macro to __dump_memcpy() and use it's arch specific version.
++ */
++#define DUMP_memcpy memcpy
++#define bzero(a,b) memset(a, 0, b)
++
++/* necessary header files */
++#include <asm/dump.h> /* for architecture-specific header */
++
++/*
++ * Size of the buffer that's used to hold:
++ *
++ * 1. the dump header (padded to fill the complete buffer)
++ * 2. the possibly compressed page headers and data
++ *
++ * = 256k for page size >= 64k
++ * = 64k for page size < 64k
++ */
++#if (PAGE_SHIFT >= 16)
++#define DUMP_BUFFER_SIZE (256 * 1024) /* size of dump buffer */
++#else
++#define DUMP_BUFFER_SIZE (64 * 1024) /* size of dump buffer */
++#endif
++
++#define DUMP_HEADER_SIZE DUMP_BUFFER_SIZE
++
++/* standard header definitions */
++#define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */
++#define DUMP_MAGIC_LIVE 0xa8190173618f23cdULL /* live magic number */
++#define DUMP_VERSION_NUMBER 0x8 /* dump version number */
++#define DUMP_PANIC_LEN 0x100 /* dump panic string length */
+
-+ printk("\n %d dump pages %s of %d each in pass %d\n",
-+ err, save ? "saved" : "skipped", (int)DUMP_PAGE_SIZE, pass);
++/* dump levels - type specific stuff added later -- add as necessary */
++#define DUMP_LEVEL_NONE 0x0 /* no dumping at all -- just bail */
++#define DUMP_LEVEL_HEADER 0x1 /* kernel dump header only */
++#define DUMP_LEVEL_KERN 0x2 /* dump header and kernel pages */
++#define DUMP_LEVEL_USED 0x4 /* dump header, kernel/user pages */
++#define DUMP_LEVEL_ALL_RAM 0x8 /* dump header, all RAM pages */
++#define DUMP_LEVEL_ALL 0x10 /* dump all memory RAM and firmware */
+
-+ }
+
-+ return (err < 0) ? err : 0;
-+}
++/* dump compression options -- add as necessary */
++#define DUMP_COMPRESS_NONE 0x0 /* don't compress this dump */
++#define DUMP_COMPRESS_RLE 0x1 /* use RLE compression */
++#define DUMP_COMPRESS_GZIP 0x2 /* use GZIP compression */
+
-+static inline struct page *dump_get_page(loff_t loc)
-+{
++/* dump flags - any dump-type specific flags -- add as necessary */
++#define DUMP_FLAGS_NONE 0x0 /* no flags are set for this dump */
++#define DUMP_FLAGS_SOFTBOOT 0x2 /* 2 stage soft-boot based dump */
++#define DUMP_FLAGS_NONDISRUPT 0X1 /* non-disruptive dumping */
+
-+ unsigned long page_index = loc >> PAGE_SHIFT;
++#define DUMP_FLAGS_TARGETMASK 0xf0000000 /* handle special case targets */
++#define DUMP_FLAGS_DISKDUMP 0x80000000 /* dump to local disk */
++#define DUMP_FLAGS_NETDUMP 0x40000000 /* dump over the network */
+
-+ /* todo: complete this to account for ia64/discontig mem */
-+ /* todo: and to check for validity, ram page, no i/o mem etc */
-+ /* need to use pfn/physaddr equiv of kern_addr_valid */
++/* dump header flags -- add as necessary */
++#define DUMP_DH_FLAGS_NONE 0x0 /* no flags set (error condition!) */
++#define DUMP_DH_RAW 0x1 /* raw page (no compression) */
++#define DUMP_DH_COMPRESSED 0x2 /* page is compressed */
++#define DUMP_DH_END 0x4 /* end marker on a full dump */
++#define DUMP_DH_TRUNCATED 0x8 /* dump is incomplete */
++#define DUMP_DH_TEST_PATTERN 0x10 /* dump page is a test pattern */
++#define DUMP_DH_NOT_USED 0x20 /* 1st bit not used in flags */
+
-+ /* Important:
-+ * On ARM/XScale system, the physical address starts from
-+ * PHYS_OFFSET, and it maybe the situation that PHYS_OFFSET != 0.
-+ * For example on Intel's PXA250, PHYS_OFFSET = 0xa0000000. And the
-+ * page index starts from PHYS_PFN_OFFSET. When configuring
-+ * filter, filter->start is assigned to 0 in dump_generic_configure.
-+ * Here we want to adjust it by adding PHYS_PFN_OFFSET to it!
-+ */
-+#ifdef CONFIG_ARM
-+ page_index += PHYS_PFN_OFFSET;
-+#endif
-+ if (__dump_page_valid(page_index))
-+ return pfn_to_page(page_index);
-+ else
-+ return NULL;
++/* names for various dump parameters in /proc/kernel */
++#define DUMP_ROOT_NAME "sys/dump"
++#define DUMP_DEVICE_NAME "device"
++#define DUMP_COMPRESS_NAME "compress"
++#define DUMP_LEVEL_NAME "level"
++#define DUMP_FLAGS_NAME "flags"
++#define DUMP_ADDR_NAME "addr"
+
-+}
++#define DUMP_SYSRQ_KEY 'd' /* key to use for MAGIC_SYSRQ key */
+
-+/* Default iterator: for singlestage and stage 1 of soft-boot dumping */
-+/* Iterates over range of physical memory pages in DUMP_PAGE_SIZE increments */
-+int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned long),
-+ struct dump_data_filter *filter)
++/* CTL_DUMP names: */
++enum
+{
-+ /* Todo : fix unit, type */
-+ loff_t loc, start, end;
-+ int i, count = 0, err = 0;
-+ struct page *page;
++ CTL_DUMP_DEVICE=1,
++ CTL_DUMP_COMPRESS=3,
++ CTL_DUMP_LEVEL=3,
++ CTL_DUMP_FLAGS=4,
++ CTL_DUMP_ADDR=5,
++ CTL_DUMP_TEST=6,
++};
+
-+ /* Todo: Add membanks code */
-+ /* TBD: Check if we need to address DUMP_PAGE_SIZE < PAGE_SIZE */
+
-+ for (i = 0; i < filter->num_mbanks; i++) {
-+ start = filter->start[i];
-+ end = filter->end[i];
-+ for (loc = start; loc < end; loc += DUMP_PAGE_SIZE) {
-+ dump_config.dumper->curr_loc = loc;
-+ page = dump_get_page(loc);
-+ if (page && filter->selector(pass,
-+ (unsigned long) page, DUMP_PAGE_SIZE)) {
-+ if ((err = action((unsigned long)page,
-+ DUMP_PAGE_SIZE))) {
-+ printk("dump_page_iterator: err %d for "
-+ "loc 0x%llx, in pass %d\n",
-+ err, loc, pass);
-+ return err ? err : count;
-+ } else
-+ count++;
-+ }
-+ }
-+ }
++/* page size for gzip compression -- buffered slightly beyond hardware PAGE_SIZE used by DUMP */
++#define DUMP_DPC_PAGE_SIZE (DUMP_PAGE_SIZE + 512)
+
-+ return err ? err : count;
-+}
++/* dump ioctl() control options */
++#define DIOSDUMPDEV _IOW('p', 0xA0, unsigned int) /* set the dump device */
++#define DIOGDUMPDEV _IOR('p', 0xA1, unsigned int) /* get the dump device */
++#define DIOSDUMPLEVEL _IOW('p', 0xA2, unsigned int) /* set the dump level */
++#define DIOGDUMPLEVEL _IOR('p', 0xA3, unsigned int) /* get the dump level */
++#define DIOSDUMPFLAGS _IOW('p', 0xA4, unsigned int) /* set the dump flag parameters */
++#define DIOGDUMPFLAGS _IOR('p', 0xA5, unsigned int) /* get the dump flag parameters */
++#define DIOSDUMPCOMPRESS _IOW('p', 0xA6, unsigned int) /* set the dump compress level */
++#define DIOGDUMPCOMPRESS _IOR('p', 0xA7, unsigned int) /* get the dump compress level */
+
-+/*
-+ * Base function that saves the selected block of data in the dump
-+ * Action taken when iterator decides that data needs to be saved
-+ */
-+int dump_generic_save_data(unsigned long loc, unsigned long sz)
-+{
-+ void *buf;
-+ void *dump_buf = dump_config.dumper->dump_buf;
-+ int left, bytes, ret;
++/* these ioctls are used only by netdump module */
++#define DIOSTARGETIP _IOW('p', 0xA8, unsigned int) /* set the target m/c's ip */
++#define DIOGTARGETIP _IOR('p', 0xA9, unsigned int) /* get the target m/c's ip */
++#define DIOSTARGETPORT _IOW('p', 0xAA, unsigned int) /* set the target m/c's port */
++#define DIOGTARGETPORT _IOR('p', 0xAB, unsigned int) /* get the target m/c's port */
++#define DIOSSOURCEPORT _IOW('p', 0xAC, unsigned int) /* set the source m/c's port */
++#define DIOGSOURCEPORT _IOR('p', 0xAD, unsigned int) /* get the source m/c's port */
++#define DIOSETHADDR _IOW('p', 0xAE, unsigned int) /* set ethernet address */
++#define DIOGETHADDR _IOR('p', 0xAF, unsigned int) /* get ethernet address */
++#define DIOGDUMPOKAY _IOR('p', 0xB0, unsigned int) /* check if dump is configured */
++#define DIOSDUMPTAKE _IOW('p', 0xB1, unsigned int) /* Take a manual dump */
+
-+ if ((ret = dump_add_data(loc, sz))) {
-+ return ret;
-+ }
-+ buf = dump_config.dumper->curr_buf;
++/*
++ * Structure: __dump_header
++ * Function: This is the header dumped at the top of every valid crash
++ * dump.
++ */
++struct __dump_header {
++ /* the dump magic number -- unique to verify dump is valid */
++ u64 dh_magic_number;
+
-+ /* If we've filled up the buffer write it out */
-+ if ((left = buf - dump_buf) >= DUMP_BUFFER_SIZE) {
-+ bytes = dump_write_buffer(dump_buf, DUMP_BUFFER_SIZE);
-+ if (bytes < DUMP_BUFFER_SIZE) {
-+ printk("dump_write_buffer failed %d\n", bytes);
-+ return bytes ? -ENOSPC : bytes;
-+ }
++ /* the version number of this dump */
++ u32 dh_version;
+
-+ left -= bytes;
-+
-+ /* -- A few chores to do from time to time -- */
-+ dump_config.dumper->count++;
++ /* the size of this header (in case we can't read it) */
++ u32 dh_header_size;
+
-+ if (!(dump_config.dumper->count & 0x3f)) {
-+ /* Update the header every one in a while */
-+ memset((void *)dump_buf, 'b', DUMP_BUFFER_SIZE);
-+ if ((ret = dump_update_header()) < 0) {
-+ /* issue warning */
-+ return ret;
-+ }
-+ printk(".");
++ /* the level of this dump (just a header?) */
++ u32 dh_dump_level;
+
-+ touch_nmi_watchdog();
-+ } else if (!(dump_config.dumper->count & 0x7)) {
-+ /* Show progress so the user knows we aren't hung */
-+ dump_speedo(dump_config.dumper->count >> 3);
-+ }
-+ /* Todo: Touch/Refresh watchdog */
++ /*
++ * We assume dump_page_size to be 4K in every case.
++ * Store here the configurable system page size (4K, 8K, 16K, etc.)
++ */
++ u32 dh_page_size;
+
-+ /* --- Done with periodic chores -- */
++ /* the size of all physical memory */
++ u64 dh_memory_size;
+
-+ /*
-+ * extra bit of copying to simplify verification
-+ * in the second kernel boot based scheme
-+ */
-+ memcpy(dump_buf - DUMP_PAGE_SIZE, dump_buf +
-+ DUMP_BUFFER_SIZE - DUMP_PAGE_SIZE, DUMP_PAGE_SIZE);
++ /* the start of physical memory */
++ u64 dh_memory_start;
+
-+ /* now adjust the leftover bits back to the top of the page */
-+ /* this case would not arise during stage 2 (passthru) */
-+ memset(dump_buf, 'z', DUMP_BUFFER_SIZE);
-+ if (left) {
-+ memcpy(dump_buf, dump_buf + DUMP_BUFFER_SIZE, left);
-+ }
-+ buf -= DUMP_BUFFER_SIZE;
-+ dump_config.dumper->curr_buf = buf;
-+ }
-+
-+ return 0;
-+}
++ /* the end of physical memory */
++ u64 dh_memory_end;
+
-+int dump_generic_skip_data(unsigned long loc, unsigned long sz)
-+{
-+ /* dummy by default */
-+ return 0;
-+}
++ /* the number of hardware/physical pages in this dump specifically */
++ u32 dh_num_dump_pages;
+
-+/*
-+ * Common low level routine to write a buffer to current dump device
-+ * Expects checks for space etc to have been taken care of by the caller
-+ * Operates serially at the moment for simplicity.
-+ * TBD/Todo: Consider batching for improved throughput
-+ */
-+int dump_ll_write(void *buf, unsigned long len)
-+{
-+ long transferred = 0, last_transfer = 0;
-+ int ret = 0;
++ /* the panic string, if available */
++ char dh_panic_string[DUMP_PANIC_LEN];
+
-+ /* make sure device is ready */
-+ while ((ret = dump_dev_ready(NULL)) == -EAGAIN);
-+ if (ret < 0) {
-+ printk("dump_dev_ready failed !err %d\n", ret);
-+ return ret;
-+ }
++ /* timeval depends on architecture, two long values */
++ struct {
++ u64 tv_sec;
++ u64 tv_usec;
++ } dh_time; /* the time of the system crash */
+
-+ while (len) {
-+ if ((last_transfer = dump_dev_write(buf, len)) <= 0) {
-+ ret = last_transfer;
-+ printk("dump_dev_write failed !err %d\n",
-+ ret);
-+ break;
-+ }
-+ /* wait till complete */
-+ while ((ret = dump_dev_ready(buf)) == -EAGAIN)
-+ cpu_relax();
++ /* the NEW utsname (uname) information -- in character form */
++ /* we do this so we don't have to include utsname.h */
++ /* plus it helps us be more architecture independent */
++ /* now maybe one day soon they'll make the [65] a #define! */
++ char dh_utsname_sysname[65];
++ char dh_utsname_nodename[65];
++ char dh_utsname_release[65];
++ char dh_utsname_version[65];
++ char dh_utsname_machine[65];
++ char dh_utsname_domainname[65];
+
-+ if (ret < 0) {
-+ printk("i/o failed !err %d\n", ret);
-+ break;
-+ }
++ /* the address of current task (OLD = void *, NEW = u64) */
++ u64 dh_current_task;
+
-+ len -= last_transfer;
-+ buf += last_transfer;
-+ transferred += last_transfer;
-+ }
-+ return (ret < 0) ? ret : transferred;
-+}
++ /* what type of compression we're using in this dump (if any) */
++ u32 dh_dump_compress;
+
-+/* default writeout routine for single dump device */
-+/* writes out the dump data ensuring enough space is left for the end marker */
-+int dump_generic_write_buffer(void *buf, unsigned long len)
-+{
-+ long written = 0;
-+ int err = 0;
++ /* any additional flags */
++ u32 dh_dump_flags;
+
-+ /* check for space */
-+ if ((err = dump_dev_seek(dump_config.dumper->curr_offset + len +
-+ 2*DUMP_BUFFER_SIZE)) < 0) {
-+ printk("dump_write_buffer: insuff space after offset 0x%llx\n",
-+ dump_config.dumper->curr_offset);
-+ return err;
-+ }
-+ /* alignment check would happen as a side effect of this */
-+ if ((err = dump_dev_seek(dump_config.dumper->curr_offset)) < 0)
-+ return err;
++ /* any additional flags */
++ u32 dh_dump_device;
++} __attribute__((packed));
+
-+ written = dump_ll_write(buf, len);
++/*
++ * Structure: __dump_page
++ * Function: To act as the header associated to each physical page of
++ * memory saved in the system crash dump. This allows for
++ * easy reassembly of each crash dump page. The address bits
++ * are split to make things easier for 64-bit/32-bit system
++ * conversions.
++ *
++ * dp_byte_offset and dp_page_index are landmarks that are helpful when
++ * looking at a hex dump of /dev/vmdump,
++ */
++struct __dump_page {
++ /* the address of this dump page */
++ u64 dp_address;
+
-+ /* all or none */
++ /* the size of this dump page */
++ u32 dp_size;
+
-+ if (written < len)
-+ written = written ? -ENOSPC : written;
-+ else
-+ dump_config.dumper->curr_offset += len;
++ /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */
++ u32 dp_flags;
++} __attribute__((packed));
+
-+ return written;
-+}
++/*
++ * Structure: __lkcdinfo
++ * Function: This structure contains information needed for the lkcdutils
++ * package (particularly lcrash) to determine what information is
++ * associated to this kernel, specifically.
++ */
++struct __lkcdinfo {
++ int arch;
++ int ptrsz;
++ int byte_order;
++ int linux_release;
++ int page_shift;
++ int page_size;
++ u64 page_mask;
++ u64 page_offset;
++ int stack_offset;
++};
+
-+int dump_generic_configure(unsigned long devid)
-+{
-+ struct dump_dev *dev = dump_config.dumper->dev;
-+ struct dump_data_filter *filter;
-+ void *buf;
-+ int ret = 0;
++#ifdef __KERNEL__
+
-+ /* Allocate the dump buffer and initialize dumper state */
-+ /* Assume that we get aligned addresses */
-+ if (!(buf = dump_alloc_mem(DUMP_BUFFER_SIZE + 3 * DUMP_PAGE_SIZE)))
-+ return -ENOMEM;
++/*
++ * Structure: __dump_compress
++ * Function: This is what an individual compression mechanism can use
++ * to plug in their own compression techniques. It's always
++ * best to build these as individual modules so that people
++ * can put in whatever they want.
++ */
++struct __dump_compress {
++ /* the list_head structure for list storage */
++ struct list_head list;
+
-+ if ((unsigned long)buf & (PAGE_SIZE - 1)) {
-+ /* sanity check for page aligned address */
-+ dump_free_mem(buf);
-+ return -ENOMEM; /* fixme: better error code */
-+ }
++ /* the type of compression to use (DUMP_COMPRESS_XXX) */
++ int compress_type;
++ const char *compress_name;
+
-+ /* Initialize the rest of the fields */
-+ dump_config.dumper->dump_buf = buf + DUMP_PAGE_SIZE;
-+ dumper_reset();
++ /* the compression function to call */
++ u32 (*compress_func)(const u8 *, u32, u8 *, u32, unsigned long);
++};
+
-+ /* Open the dump device */
-+ if (!dev)
-+ return -ENODEV;
++/* functions for dump compression registration */
++extern void dump_register_compression(struct __dump_compress *);
++extern void dump_unregister_compression(int);
+
-+ if ((ret = dev->ops->open(dev, devid))) {
-+ return ret;
-+ }
++/*
++ * Structure dump_mbank[]:
++ *
++ * For CONFIG_DISCONTIGMEM systems this array specifies the
++ * memory banks/chunks that need to be dumped after a panic.
++ *
++ * For classic systems it specifies a single set of pages from
++ * 0 to max_mapnr.
++ */
++struct __dump_mbank {
++ u64 start;
++ u64 end;
++ int type;
++ int pad1;
++ long pad2;
++};
+
-+ /* Initialise the memory ranges in the dump filter */
-+ for (filter = dump_config.dumper->filter ;filter->selector; filter++) {
-+ if (!filter->start[0] && !filter->end[0]) {
-+ pg_data_t *pgdat;
-+ int i = 0;
-+ for_each_pgdat(pgdat) {
-+ filter->start[i] =
-+ (loff_t)pgdat->node_start_pfn << PAGE_SHIFT;
-+ filter->end[i] =
-+ (loff_t)(pgdat->node_start_pfn + pgdat->node_spanned_pages) << PAGE_SHIFT;
-+ i++;
-+ }
-+ filter->num_mbanks = i;
-+ }
-+ }
++#define DUMP_MBANK_TYPE_CONVENTIONAL_MEMORY 1
++#define DUMP_MBANK_TYPE_OTHER 2
+
-+ return 0;
++#define MAXCHUNKS 256
++extern int dump_mbanks;
++extern struct __dump_mbank dump_mbank[MAXCHUNKS];
++
++/* notification event codes */
++#define DUMP_BEGIN 0x0001 /* dump beginning */
++#define DUMP_END 0x0002 /* dump ending */
++
++/* Scheduler soft spin control.
++ *
++ * 0 - no dump in progress
++ * 1 - cpu0 is dumping, ...
++ */
++extern unsigned long dump_oncpu;
++extern void dump_execute(const char *, const struct pt_regs *);
++
++/*
++ * Notifier list for kernel code which wants to be called
++ * at kernel dump.
++ */
++extern struct notifier_block *dump_notifier_list;
++static inline int register_dump_notifier(struct notifier_block *nb)
++{
++ return notifier_chain_register(&dump_notifier_list, nb);
++}
++static inline int unregister_dump_notifier(struct notifier_block * nb)
++{
++ return notifier_chain_unregister(&dump_notifier_list, nb);
+}
+
-+int dump_generic_unconfigure(void)
++extern void (*dump_function_ptr)(const char *, const struct pt_regs *);
++static inline void dump(char * str, struct pt_regs * regs)
+{
-+ struct dump_dev *dev = dump_config.dumper->dev;
-+ void *buf = dump_config.dumper->dump_buf;
-+ int ret = 0;
++ if (dump_function_ptr)
++ dump_function_ptr(str, regs);
++}
+
-+ pr_debug("Generic unconfigure\n");
-+ /* Close the dump device */
-+ if (dev && (ret = dev->ops->release(dev)))
-+ return ret;
++/*
++ * Common Arch Specific Functions should be declared here.
++ * This allows the C compiler to detect discrepancies.
++ */
++extern void __dump_open(void);
++extern void __dump_cleanup(void);
++extern void __dump_clean_irq_state(void);
++extern void __dump_init(u64);
++extern void __dump_save_regs(struct pt_regs *, const struct pt_regs *);
++extern void __dump_save_context(int cpu, const struct pt_regs *, struct task_struct *tsk);
++extern int __dump_configure_header(const struct pt_regs *);
++extern int __dump_irq_enable(void);
++extern void __dump_irq_restore(void);
++extern int __dump_page_valid(unsigned long index);
++#ifdef CONFIG_SMP
++extern void __dump_save_other_cpus(void);
++#else
++#define __dump_save_other_cpus()
++#endif
+
-+ printk("Closed dump device\n");
-+
-+ if (buf)
-+ dump_free_mem((buf - DUMP_PAGE_SIZE));
++extern int manual_handle_crashdump(void);
+
-+ dump_config.dumper->curr_buf = dump_config.dumper->dump_buf = NULL;
-+ pr_debug("Released dump buffer\n");
++/* to track all used (compound + zero order) pages */
++#define PageInuse(p) (PageCompound(p) || page_count(p))
+
-+ return 0;
-+}
++#endif /* __KERNEL__ */
+
-+#ifdef CONFIG_DISCONTIGMEM
++#else /* !CONFIG_CRASH_DUMP */
++
++/* If not configured then make code disappear! */
++#define register_dump_watchdog(x) do { } while(0)
++#define unregister_dump_watchdog(x) do { } while(0)
++#define register_dump_notifier(x) do { } while(0)
++#define unregister_dump_notifier(x) do { } while(0)
++#define dump_in_progress() 0
++#define dump(x, y) do { } while(0)
++
++#endif /* !CONFIG_CRASH_DUMP */
++
++#endif /* _DUMP_H */
+Index: linux-2.6.10/include/linux/miscdevice.h
+===================================================================
+--- linux-2.6.10.orig/include/linux/miscdevice.h 2004-12-25 05:34:58.000000000 +0800
++++ linux-2.6.10/include/linux/miscdevice.h 2005-04-07 18:13:56.660790096 +0800
+@@ -25,6 +25,7 @@
+ #define MICROCODE_MINOR 184
+ #define MWAVE_MINOR 219 /* ACP/Mwave Modem */
+ #define MPT_MINOR 220
++#define CRASH_DUMP_MINOR 230 /* LKCD */
+ #define MISC_DYNAMIC_MINOR 255
+
+ #define TUN_MINOR 200
+Index: linux-2.6.10/include/asm-um/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-um/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-um/kerntypes.h 2005-04-07 18:13:56.636793744 +0800
+@@ -0,0 +1,21 @@
++/*
++ * asm-um/kerntypes.h
++ *
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
++ */
+
-+void dump_reconfigure_mbanks(void)
-+{
-+ pg_data_t *pgdat;
-+ loff_t start, end, loc, loc_end;
-+ int i=0;
-+ struct dump_data_filter *filter = dump_config.dumper->filter;
++/* Usermode-Linux-specific header files */
++#ifndef _UM_KERNTYPES_H
++#define _UM_KERNTYPES_H
+
-+ for_each_pgdat(pgdat) {
++/* Use the default */
++#include <asm-generic/kerntypes.h>
+
-+ start = (loff_t)(pgdat->node_start_pfn << PAGE_SHIFT);
-+ end = ((loff_t)(pgdat->node_start_pfn + pgdat->node_spanned_pages) << PAGE_SHIFT);
-+ for(loc = start; loc < end; loc += (DUMP_PAGE_SIZE)) {
++#endif /* _UM_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-generic/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-generic/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-generic/kerntypes.h 2005-04-07 18:13:56.716781584 +0800
+@@ -0,0 +1,20 @@
++/*
++ * asm-generic/kerntypes.h
++ *
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
++ */
+
-+ if(!(__dump_page_valid(loc >> PAGE_SHIFT)))
-+ continue;
++/* Arch-independent header files */
++#ifndef _GENERIC_KERNTYPES_H
++#define _GENERIC_KERNTYPES_H
+
-+ /* We found a valid page. This is the start */
-+ filter->start[i] = loc;
++#include <linux/pci.h>
+
-+ /* Now loop here till you find the end */
-+ for(loc_end = loc; loc_end < end; loc_end += (DUMP_PAGE_SIZE)) {
-+
-+ if(__dump_page_valid(loc_end >> PAGE_SHIFT)) {
-+ /* This page could very well be the last page */
-+ filter->end[i] = loc_end;
-+ continue;
-+ }
-+ break;
-+ }
-+ i++;
-+ loc = loc_end;
-+ }
-+ }
-+ filter->num_mbanks = i;
++#endif /* _GENERIC_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-sparc/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-sparc/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-sparc/kerntypes.h 2005-04-07 18:13:56.739778088 +0800
+@@ -0,0 +1,21 @@
++/*
++ * asm-sparc/kerntypes.h
++ *
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
++ */
+
-+ /* Propagate memory bank information to other filters */
-+ for (filter = dump_config.dumper->filter, filter++ ;filter->selector; filter++) {
-+ for(i = 0; i < dump_config.dumper->filter->num_mbanks; i++) {
-+ filter->start[i] = dump_config.dumper->filter->start[i];
-+ filter->end[i] = dump_config.dumper->filter->end[i];
-+ filter->num_mbanks = dump_config.dumper->filter->num_mbanks;
-+ }
-+ }
-+}
-+#endif
++/* SPARC-specific header files */
++#ifndef _SPARC_KERNTYPES_H
++#define _SPARC_KERNTYPES_H
+
-+/* Set up the default dump scheme */
++/* Use the default */
++#include <asm-generic/kerntypes.h>
+
-+struct dump_scheme_ops dump_scheme_singlestage_ops = {
-+ .configure = dump_generic_configure,
-+ .unconfigure = dump_generic_unconfigure,
-+ .sequencer = dump_generic_sequencer,
-+ .iterator = dump_page_iterator,
-+ .save_data = dump_generic_save_data,
-+ .skip_data = dump_generic_skip_data,
-+ .write_buffer = dump_generic_write_buffer,
-+};
++#endif /* _SPARC_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-arm/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-arm/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-arm/kerntypes.h 2005-04-07 18:13:56.739778088 +0800
+@@ -0,0 +1,21 @@
++/*
++ * asm-arm/kerntypes.h
++ *
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
++ */
+
-+struct dump_scheme dump_scheme_singlestage = {
-+ .name = "single-stage",
-+ .ops = &dump_scheme_singlestage_ops
-+};
++/* ARM-specific header files */
++#ifndef _ARM_KERNTYPES_H
++#define _ARM_KERNTYPES_H
+
-+/* The single stage dumper comprising all these */
-+struct dumper dumper_singlestage = {
-+ .name = "single-stage",
-+ .scheme = &dump_scheme_singlestage,
-+ .fmt = &dump_fmt_lcrash,
-+ .compress = &dump_none_compression,
-+ .filter = dump_filter_table,
-+ .dev = NULL,
-+};
++/* Use the default */
++#include <asm-generic/kerntypes.h>
+
-Index: linux-2.6.10/drivers/dump/dump_gzip.c
++#endif /* _ARM_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-sparc64/kerntypes.h
===================================================================
---- linux-2.6.10.orig/drivers/dump/dump_gzip.c 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/drivers/dump/dump_gzip.c 2005-04-05 16:47:53.937206016 +0800
-@@ -0,0 +1,174 @@
+--- linux-2.6.10.orig/include/asm-sparc64/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-sparc64/kerntypes.h 2005-04-07 18:13:56.714781888 +0800
+@@ -0,0 +1,21 @@
+/*
-+ * GZIP Compression functions for kernel crash dumps.
++ * asm-sparc64/kerntypes.h
+ *
-+ * Created by: Matt Robinson (yakker@sourceforge.net)
-+ * Copyright 2001 Matt D. Robinson. All rights reserved.
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
+ *
-+ * This code is released under version 2 of the GNU GPL.
++ * This source code is released under the GNU GPL.
+ */
+
-+/* header files */
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/fs.h>
-+#include <linux/file.h>
-+#include <linux/init.h>
-+#include <linux/slab.h>
-+#include <linux/dump.h>
-+#include <linux/zlib.h>
-+#include <linux/vmalloc.h>
-+
-+static void *deflate_workspace;
-+static unsigned long workspace_paddr[2];
++/* SPARC64-specific header files */
++#ifndef _SPARC64_KERNTYPES_H
++#define _SPARC64_KERNTYPES_H
+
-+static u8 *safety_buffer;
++/* Use the default */
++#include <asm-generic/kerntypes.h>
+
++#endif /* _SPARC64_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-mips64/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-mips64/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-mips64/kerntypes.h 2005-04-07 18:13:56.740777936 +0800
+@@ -0,0 +1,21 @@
+/*
-+ * Name: dump_compress_gzip()
-+ * Func: Compress a DUMP_PAGE_SIZE page using gzip-style algorithms (the.
-+ * deflate functions similar to what's used in PPP).
++ * asm-mips64/kerntypes.h
++ *
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
+ */
-+static u32
-+dump_compress_gzip(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
-+ unsigned long loc)
-+{
-+ /* error code and dump stream */
-+ int err;
-+ z_stream dump_stream;
-+ struct page *pg = (struct page *)loc;
-+ unsigned long paddr = page_to_pfn(pg) << PAGE_SHIFT;
-+ static int warning = 0;
+
-+ dump_stream.workspace = deflate_workspace;
-+ if ((paddr == workspace_paddr[0]) || (paddr == workspace_paddr[1])) {
-+ /*
-+ * This page belongs to deflate_workspace used as temporary
-+ * buffer for compression. Hence, dump them without compression.
-+ */
-+ return(0);
-+ }
-+ if ((err = zlib_deflateInit(&dump_stream, Z_BEST_COMPRESSION)) != Z_OK) {
-+ /* fall back to RLE compression */
-+ printk("dump_compress_gzip(): zlib_deflateInit() "
-+ "failed (%d)!\n", err);
-+ return 0;
-+ }
++/* MIPS64-specific header files */
++#ifndef _MIPS64_KERNTYPES_H
++#define _MIPS64_KERNTYPES_H
+
-+ /* copy the old page to the safety buffer */
-+ if (oldsize <= DUMP_PAGE_SIZE) {
-+ memcpy(safety_buffer, old, oldsize);
-+ dump_stream.next_in = (u8 *) safety_buffer;
-+ } else {
-+ if (!warning) {
-+ printk("dump_compress_gzip oversize input: %d\n",
-+ oldsize);
-+ warning++;
-+ }
-+ dump_stream.next_in = (u8 *) old;
-+ }
++/* Use the default */
++#include <asm-generic/kerntypes.h>
+
-+ /* use old (page of memory) and size (DUMP_PAGE_SIZE) as in-streams */
-+ dump_stream.avail_in = oldsize;
++#endif /* _MIPS64_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-v850/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-v850/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-v850/kerntypes.h 2005-04-07 18:13:56.695784776 +0800
+@@ -0,0 +1,21 @@
++/*
++ * asm-v850/kerntypes.h
++ *
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
++ */
+
-+ /* out streams are new (dpcpage) and new size (DUMP_DPC_PAGE_SIZE) */
-+ dump_stream.next_out = new;
-+ dump_stream.avail_out = newsize;
++/* V850-specific header files */
++#ifndef _V850_KERNTYPES_H
++#define _V850_KERNTYPES_H
+
-+ /* deflate the page -- check for error */
-+ err = zlib_deflate(&dump_stream, Z_FINISH);
-+ if (err != Z_STREAM_END) {
-+ /* zero is return code here */
-+ (void)zlib_deflateEnd(&dump_stream);
-+ printk("dump_compress_gzip(): zlib_deflate() failed (%d)!\n",
-+ err);
-+ return 0;
-+ }
++/* Use the default */
++#include <asm-generic/kerntypes.h>
+
-+ /* let's end the deflated compression stream */
-+ if ((err = zlib_deflateEnd(&dump_stream)) != Z_OK) {
-+ printk("dump_compress_gzip(): zlib_deflateEnd() "
-+ "failed (%d)!\n", err);
-+ }
++#endif /* _V850_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-sh/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-sh/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-sh/kerntypes.h 2005-04-07 18:13:56.667789032 +0800
+@@ -0,0 +1,21 @@
++/*
++ * asm-sh/kerntypes.h
++ *
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
++ */
+
-+ /* return the compressed byte total (if it's smaller) */
-+ if (dump_stream.total_out >= oldsize) {
-+ return oldsize;
-+ }
-+ return dump_stream.total_out;
-+}
++/* Super-H-specific header files */
++#ifndef _SH_KERNTYPES_H
++#define _SH_KERNTYPES_H
+
-+/* setup the gzip compression functionality */
-+static struct __dump_compress dump_gzip_compression = {
-+ .compress_type = DUMP_COMPRESS_GZIP,
-+ .compress_func = dump_compress_gzip,
-+ .compress_name = "GZIP",
-+};
++/* Use the default */
++#include <asm-generic/kerntypes.h>
+
++#endif /* _SH_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-alpha/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-alpha/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-alpha/kerntypes.h 2005-04-07 18:13:56.666789184 +0800
+@@ -0,0 +1,21 @@
+/*
-+ * Name: dump_compress_gzip_init()
-+ * Func: Initialize gzip as a compression mechanism.
++ * asm-alpha/kerntypes.h
++ *
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
+ */
-+static int __init
-+dump_compress_gzip_init(void)
-+{
-+ struct page *pg;
+
-+ deflate_workspace = vmalloc(zlib_deflate_workspacesize());
-+ if (!deflate_workspace) {
-+ printk("dump_compress_gzip_init(): Failed to "
-+ "alloc %d bytes for deflate workspace\n",
-+ zlib_deflate_workspacesize());
-+ return -ENOMEM;
-+ }
-+ /*
-+ * Need to find pages (workspace) that are used for compression.
-+ * Even though zlib_deflate_workspacesize() is 64 pages (approximately)
-+ * depends on the arch, we used only 2 pages. Hence, get the physical
-+ * addresses for these 2 pages and used them to not to compress those
-+ * pages.
-+ */
-+ pg = vmalloc_to_page(deflate_workspace);
-+ workspace_paddr[0] = page_to_pfn(pg) << PAGE_SHIFT;
-+ pg = vmalloc_to_page(deflate_workspace + DUMP_PAGE_SIZE);
-+ workspace_paddr[1] = page_to_pfn(pg) << PAGE_SHIFT;
++/* Alpha-specific header files */
++#ifndef _ALPHA_KERNTYPES_H
++#define _ALPHA_KERNTYPES_H
+
-+ /* Eliminate the possibility of real data getting a compression
-+ * failure.
-+ */
++/* Use the default */
++#include <asm-generic/kerntypes.h>
+
-+ if (!(safety_buffer = (void *)__get_free_pages(GFP_KERNEL,
-+ get_order(DUMP_PAGE_SIZE))))
-+ return -ENOMEM;
++#endif /* _ALPHA_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-ppc/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-ppc/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-ppc/kerntypes.h 2005-04-07 18:13:56.665789336 +0800
+@@ -0,0 +1,21 @@
++/*
++ * asm-ppc/kerntypes.h
++ *
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
++ */
+
-+ printk("dump gzip safety buffer: %p, %d\n", safety_buffer,
-+ (int)DUMP_PAGE_SIZE);
++/* PowerPC-specific header files */
++#ifndef _PPC_KERNTYPES_H
++#define _PPC_KERNTYPES_H
+
-+ dump_register_compression(&dump_gzip_compression);
-+ return 0;
-+}
++/* Use the default */
++#include <asm-generic/kerntypes.h>
+
++#endif /* _PPC_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-m68knommu/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-m68knommu/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-m68knommu/kerntypes.h 2005-04-07 18:13:56.694784928 +0800
+@@ -0,0 +1,21 @@
+/*
-+ * Name: dump_compress_gzip_cleanup()
-+ * Func: Remove gzip as a compression mechanism.
++ * asm-m68knommu/kerntypes.h
++ *
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
+ */
-+static void __exit
-+dump_compress_gzip_cleanup(void)
-+{
-+ vfree(deflate_workspace);
-+ if (safety_buffer) {
-+ free_pages((unsigned long)safety_buffer,
-+ get_order(DUMP_PAGE_SIZE));
-+ safety_buffer = NULL;
-+ }
+
-+ dump_unregister_compression(DUMP_COMPRESS_GZIP);
-+}
++/* m68k/no-MMU-specific header files */
++#ifndef _M68KNOMMU_KERNTYPES_H
++#define _M68KNOMMU_KERNTYPES_H
+
-+/* module initialization */
-+module_init(dump_compress_gzip_init);
-+module_exit(dump_compress_gzip_cleanup);
++/* Use the default */
++#include <asm-generic/kerntypes.h>
+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
-+MODULE_DESCRIPTION("Gzip compression module for crash dump driver");
-Index: linux-2.6.10/drivers/dump/dump_filters.c
++#endif /* _M68KNOMMU_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-x86_64/hw_irq.h
===================================================================
---- linux-2.6.10.orig/drivers/dump/dump_filters.c 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/drivers/dump/dump_filters.c 2005-04-05 16:47:53.942205256 +0800
-@@ -0,0 +1,143 @@
+--- linux-2.6.10.orig/include/asm-x86_64/hw_irq.h 2004-12-25 05:35:39.000000000 +0800
++++ linux-2.6.10/include/asm-x86_64/hw_irq.h 2005-04-07 18:13:56.705783256 +0800
+@@ -34,7 +34,6 @@
+
+ #define IA32_SYSCALL_VECTOR 0x80
+
+-
+ /*
+ * Vectors 0x20-0x2f are used for ISA interrupts.
+ */
+@@ -55,6 +54,7 @@
+ #define TASK_MIGRATION_VECTOR 0xfb
+ #define CALL_FUNCTION_VECTOR 0xfa
+ #define KDB_VECTOR 0xf9
++#define DUMP_VECTOR 0xf8
+
+ #define THERMAL_APIC_VECTOR 0xf0
+
+Index: linux-2.6.10/include/asm-x86_64/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-x86_64/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-x86_64/kerntypes.h 2005-04-07 18:13:56.696784624 +0800
+@@ -0,0 +1,21 @@
+/*
-+ * Default filters to select data to dump for various passes.
-+ *
-+ * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
-+ * Split and rewrote default dump selection logic to generic dump
-+ * method interfaces
-+ * Derived from a portion of dump_base.c created by
-+ * Matt Robinson <yakker@sourceforge.net>)
++ * asm-x86_64/kerntypes.h
+ *
-+ * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
-+ * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
-+ * Copyright (C) 2002 International Business Machines Corp.
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
+ *
-+ * Used during single-stage dumping and during stage 1 of the 2-stage scheme
-+ * (Stage 2 of the 2-stage scheme uses the fully transparent filters
-+ * i.e. passthru filters in dump_overlay.c)
++ * This source code is released under the GNU GPL.
++ */
++
++/* x86_64-specific header files */
++#ifndef _X86_64_KERNTYPES_H
++#define _X86_64_KERNTYPES_H
++
++/* Use the default */
++#include <asm-generic/kerntypes.h>
++
++#endif /* _X86_64_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-x86_64/dump.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-x86_64/dump.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-x86_64/dump.h 2005-04-07 18:13:56.696784624 +0800
+@@ -0,0 +1,93 @@
++/*
++ * Kernel header file for Linux crash dumps.
+ *
-+ * Future: Custom selective dump may involve a different set of filters.
++ * Created by: Matt Robinson (yakker@sgi.com)
+ *
++ * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
++ * x86_64 lkcd port Sachin Sant ( sachinp@in.ibm.com)
+ * This code is released under version 2 of the GNU GPL.
+ */
+
-+#include <linux/kernel.h>
-+#include <linux/bootmem.h>
-+#include <linux/mm.h>
-+#include <linux/slab.h>
-+#include <linux/dump.h>
-+#include "dump_methods.h"
++/* This header file holds the architecture specific crash dump header */
++#ifndef _ASM_DUMP_H
++#define _ASM_DUMP_H
+
-+#define DUMP_PFN_SAFETY_MARGIN 1024 /* 4 MB */
-+static unsigned long bootmap_pages;
++/* necessary header files */
++#include <asm/ptrace.h> /* for pt_regs */
++#include <linux/threads.h>
++
++/* definitions */
++#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
++#define DUMP_ASM_VERSION_NUMBER 0x2 /* version number */
++
++
++/*
++ * Structure: dump_header_asm_t
++ * Function: This is the header for architecture-specific stuff. It
++ * follows right after the dump header.
++ */
++struct __dump_header_asm {
+
-+/* Copied from mm/bootmem.c - FIXME */
-+/* return the number of _pages_ that will be allocated for the boot bitmap */
-+void dump_calc_bootmap_pages (void)
-+{
-+ unsigned long mapsize;
-+ unsigned long pages = num_physpages;
++ /* the dump magic number -- unique to verify dump is valid */
++ uint64_t dha_magic_number;
+
-+ mapsize = (pages+7)/8;
-+ mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
-+ mapsize >>= PAGE_SHIFT;
-+ bootmap_pages = mapsize + DUMP_PFN_SAFETY_MARGIN + 1;
-+}
++ /* the version number of this dump */
++ uint32_t dha_version;
+
++ /* the size of this header (in case we can't read it) */
++ uint32_t dha_header_size;
+
-+/* temporary */
-+extern unsigned long min_low_pfn;
++ /* the dump registers */
++ struct pt_regs dha_regs;
+
++ /* smp specific */
++ uint32_t dha_smp_num_cpus;
++ int dha_dumping_cpu;
++ struct pt_regs dha_smp_regs[NR_CPUS];
++ uint64_t dha_smp_current_task[NR_CPUS];
++ uint64_t dha_stack[NR_CPUS];
++ uint64_t dha_stack_ptr[NR_CPUS];
++} __attribute__((packed));
+
-+int dump_low_page(struct page *p)
++#ifdef __KERNEL__
++static inline void get_current_regs(struct pt_regs *regs)
+{
-+ return ((page_to_pfn(p) >= min_low_pfn) &&
-+ (page_to_pfn(p) < (min_low_pfn + bootmap_pages)));
++ unsigned seg;
++ __asm__ __volatile__("movq %%r15,%0" : "=m"(regs->r15));
++ __asm__ __volatile__("movq %%r14,%0" : "=m"(regs->r14));
++ __asm__ __volatile__("movq %%r13,%0" : "=m"(regs->r13));
++ __asm__ __volatile__("movq %%r12,%0" : "=m"(regs->r12));
++ __asm__ __volatile__("movq %%r11,%0" : "=m"(regs->r11));
++ __asm__ __volatile__("movq %%r10,%0" : "=m"(regs->r10));
++ __asm__ __volatile__("movq %%r9,%0" : "=m"(regs->r9));
++ __asm__ __volatile__("movq %%r8,%0" : "=m"(regs->r8));
++ __asm__ __volatile__("movq %%rbx,%0" : "=m"(regs->rbx));
++ __asm__ __volatile__("movq %%rcx,%0" : "=m"(regs->rcx));
++ __asm__ __volatile__("movq %%rdx,%0" : "=m"(regs->rdx));
++ __asm__ __volatile__("movq %%rsi,%0" : "=m"(regs->rsi));
++ __asm__ __volatile__("movq %%rdi,%0" : "=m"(regs->rdi));
++ __asm__ __volatile__("movq %%rbp,%0" : "=m"(regs->rbp));
++ __asm__ __volatile__("movq %%rax,%0" : "=m"(regs->rax));
++ __asm__ __volatile__("movq %%rsp,%0" : "=m"(regs->rsp));
++ __asm__ __volatile__("movl %%ss, %0" :"=r"(seg));
++ regs->ss = (unsigned long)seg;
++ __asm__ __volatile__("movl %%cs, %0" :"=r"(seg));
++ regs->cs = (unsigned long)seg;
++ __asm__ __volatile__("pushfq; popq %0" :"=m"(regs->eflags));
++ regs->rip = (unsigned long)current_text_addr();
++
+}
+
-+static inline int kernel_page(struct page *p)
-+{
-+ /* FIXME: Need to exclude hugetlb pages. Clue: reserved but inuse */
-+ return (PageReserved(p) && !PageInuse(p)) || (!PageLRU(p) && PageInuse(p));
-+}
++extern volatile int dump_in_progress;
++extern struct __dump_header_asm dump_header_asm;
+
-+static inline int user_page(struct page *p)
-+{
-+ return PageInuse(p) && (!PageReserved(p) && PageLRU(p));
-+}
++#ifdef CONFIG_SMP
+
-+static inline int unreferenced_page(struct page *p)
-+{
-+ return !PageInuse(p) && !PageReserved(p);
-+}
+
++extern void dump_send_ipi(void);
++#else
++#define dump_send_ipi() do { } while(0)
++#endif
++#endif /* __KERNEL__ */
+
-+/* loc marks the beginning of a range of pages */
-+int dump_filter_kernpages(int pass, unsigned long loc, unsigned long sz)
-+{
-+ struct page *page = (struct page *)loc;
-+ /* if any of the pages is a kernel page, select this set */
-+ while (sz) {
-+ if (dump_low_page(page) || kernel_page(page))
-+ return 1;
-+ sz -= PAGE_SIZE;
-+ page++;
-+ }
-+ return 0;
-+}
++#endif /* _ASM_DUMP_H */
+Index: linux-2.6.10/include/asm-x86_64/kmap_types.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-x86_64/kmap_types.h 2004-12-25 05:35:23.000000000 +0800
++++ linux-2.6.10/include/asm-x86_64/kmap_types.h 2005-04-07 18:13:56.710782496 +0800
+@@ -13,7 +13,8 @@
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
+- KM_TYPE_NR
++ KM_DUMP,
++ KM_TYPE_NR,
+ };
+
+ #endif
+Index: linux-2.6.10/include/asm-x86_64/smp.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-x86_64/smp.h 2004-12-25 05:33:48.000000000 +0800
++++ linux-2.6.10/include/asm-x86_64/smp.h 2005-04-07 18:13:56.712782192 +0800
+@@ -41,6 +41,7 @@
+ extern int pic_mode;
+ extern int smp_num_siblings;
+ extern void smp_flush_tlb(void);
++extern void dump_send_ipi(void);
+ extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
+ extern void smp_send_reschedule(int cpu);
+ extern void smp_invalidate_rcv(void); /* Process an NMI */
+Index: linux-2.6.10/include/asm-h8300/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-h8300/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-h8300/kerntypes.h 2005-04-07 18:13:56.665789336 +0800
+@@ -0,0 +1,21 @@
++/*
++ * asm-h8300/kerntypes.h
++ *
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
++ */
+
++/* H8300-specific header files */
++#ifndef _H8300_KERNTYPES_H
++#define _H8300_KERNTYPES_H
+
-+/* loc marks the beginning of a range of pages */
-+int dump_filter_userpages(int pass, unsigned long loc, unsigned long sz)
-+{
-+ struct page *page = (struct page *)loc;
-+ int ret = 0;
-+ /* select if the set has any user page, and no kernel pages */
-+ while (sz) {
-+ if (user_page(page) && !dump_low_page(page)) {
-+ ret = 1;
-+ } else if (kernel_page(page) || dump_low_page(page)) {
-+ return 0;
-+ }
-+ page++;
-+ sz -= PAGE_SIZE;
-+ }
-+ return ret;
-+}
++/* Use the default */
++#include <asm-generic/kerntypes.h>
+
++#endif /* _H8300_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-cris/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-cris/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-cris/kerntypes.h 2005-04-07 18:13:56.694784928 +0800
+@@ -0,0 +1,21 @@
++/*
++ * asm-cris/kerntypes.h
++ *
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
++ */
+
++/* CRIS-specific header files */
++#ifndef _CRIS_KERNTYPES_H
++#define _CRIS_KERNTYPES_H
+
-+/* loc marks the beginning of a range of pages */
-+int dump_filter_unusedpages(int pass, unsigned long loc, unsigned long sz)
-+{
-+ struct page *page = (struct page *)loc;
++/* Use the default */
++#include <asm-generic/kerntypes.h>
+
-+ /* select if the set does not have any used pages */
-+ while (sz) {
-+ if (!unreferenced_page(page) || dump_low_page(page)) {
-+ return 0;
-+ }
-+ page++;
-+ sz -= PAGE_SIZE;
-+ }
-+ return 1;
-+}
++#endif /* _CRIS_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-mips/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-mips/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-mips/kerntypes.h 2005-04-07 18:13:56.714781888 +0800
+@@ -0,0 +1,21 @@
++/*
++ * asm-mips/kerntypes.h
++ *
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
++ */
+
-+/* dummy: last (non-existent) pass */
-+int dump_filter_none(int pass, unsigned long loc, unsigned long sz)
-+{
-+ return 0;
-+}
++/* MIPS-specific header files */
++#ifndef _MIPS_KERNTYPES_H
++#define _MIPS_KERNTYPES_H
+
-+/* TBD: resolve level bitmask ? */
-+struct dump_data_filter dump_filter_table[] = {
-+ { .name = "kern", .selector = dump_filter_kernpages,
-+ .level_mask = DUMP_MASK_KERN},
-+ { .name = "user", .selector = dump_filter_userpages,
-+ .level_mask = DUMP_MASK_USED},
-+ { .name = "unused", .selector = dump_filter_unusedpages,
-+ .level_mask = DUMP_MASK_UNUSED},
-+ { .name = "none", .selector = dump_filter_none,
-+ .level_mask = DUMP_MASK_REST},
-+ { .name = "", .selector = NULL, .level_mask = 0}
-+};
++/* Use the default */
++#include <asm-generic/kerntypes.h>
+
-Index: linux-2.6.10/drivers/dump/dump_ppc64.c
++#endif /* _MIPS_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-arm26/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-arm26/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-arm26/kerntypes.h 2005-04-07 18:13:56.666789184 +0800
+@@ -0,0 +1,21 @@
++/*
++ * asm-arm26/kerntypes.h
++ *
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
++ */
++
++/* ARM26-specific header files */
++#ifndef _ARM26_KERNTYPES_H
++#define _ARM26_KERNTYPES_H
++
++/* Use the default */
++#include <asm-generic/kerntypes.h>
++
++#endif /* _ARM26_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-parisc/kerntypes.h
===================================================================
---- linux-2.6.10.orig/drivers/dump/dump_ppc64.c 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/drivers/dump/dump_ppc64.c 2005-04-05 16:47:53.931206928 +0800
-@@ -0,0 +1,410 @@
+--- linux-2.6.10.orig/include/asm-parisc/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-parisc/kerntypes.h 2005-04-07 18:13:56.664789488 +0800
+@@ -0,0 +1,21 @@
+/*
-+ * Architecture specific (ppc64) functions for Linux crash dumps.
++ * asm-parisc/kerntypes.h
+ *
-+ * Created by: Matt Robinson (yakker@sgi.com)
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
+ *
-+ * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
-+ *
-+ * 2.3 kernel modifications by: Matt D. Robinson (yakker@turbolinux.com)
-+ * Copyright 2000 TurboLinux, Inc. All rights reserved.
-+ * Copyright 2003, 2004 IBM Corporation
-+ *
-+ * This code is released under version 2 of the GNU GPL.
++ * This source code is released under the GNU GPL.
+ */
+
++/* PA-RISC-specific header files */
++#ifndef _PARISC_KERNTYPES_H
++#define _PARISC_KERNTYPES_H
++
++/* Use the default */
++#include <asm-generic/kerntypes.h>
++
++#endif /* _PARISC_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-ia64/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-ia64/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-ia64/kerntypes.h 2005-04-07 18:13:56.681786904 +0800
+@@ -0,0 +1,21 @@
+/*
-+ * The hooks for dumping the kernel virtual memory to disk are in this
-+ * file. Any time a modification is made to the virtual memory mechanism,
-+ * these routines must be changed to use the new mechanisms.
++ * asm-ia64/kerntypes.h
++ *
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
+ */
-+#include <linux/types.h>
-+#include <linux/fs.h>
-+#include <linux/dump.h>
-+#include <linux/mm.h>
-+#include <linux/vmalloc.h>
-+#include <linux/delay.h>
-+#include <linux/syscalls.h>
-+#include <asm/hardirq.h>
-+#include "dump_methods.h"
-+#include <linux/irq.h>
-+#include <asm/machdep.h>
-+#include <asm/uaccess.h>
-+#include <asm/irq.h>
-+#include <asm/page.h>
-+#if defined(CONFIG_KDB) && !defined(CONFIG_DUMP_MODULE)
-+#include <linux/kdb.h>
-+#endif
+
-+extern cpumask_t irq_affinity[];
-+
-+static cpumask_t saved_affinity[NR_IRQS];
++/* IA64-specific header files */
++#ifndef _IA64_KERNTYPES_H
++#define _IA64_KERNTYPES_H
+
-+static __s32 saved_irq_count; /* saved preempt_count() flags */
++/* Use the default */
++#include <asm-generic/kerntypes.h>
+
-+static int alloc_dha_stack(void)
-+{
-+ int i;
-+ void *ptr;
++#endif /* _IA64_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-ia64/dump.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-ia64/dump.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-ia64/dump.h 2005-04-07 18:13:56.680787056 +0800
+@@ -0,0 +1,201 @@
++/*
++ * Kernel header file for Linux crash dumps.
++ *
++ * Created by: Matt Robinson (yakker@sgi.com)
++ *
++ * Copyright 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
++ *
++ * This code is released under version 2 of the GNU GPL.
++ */
+
-+ if (dump_header_asm.dha_stack[0])
-+ return 0;
++/* This header file holds the architecture specific crash dump header */
++#ifndef _ASM_DUMP_H
++#define _ASM_DUMP_H
+
-+ ptr = (void *)vmalloc(THREAD_SIZE * num_possible_cpus());
-+ if (!ptr) {
-+ return -ENOMEM;
-+ }
++/* definitions */
++#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
++#define DUMP_ASM_VERSION_NUMBER 0x4 /* version number */
+
-+ for (i = 0; i < num_possible_cpus(); i++) {
-+ dump_header_asm.dha_stack[i] =
-+ (uint64_t)((unsigned long)ptr + (i * THREAD_SIZE));
-+ }
-+ return 0;
-+}
++#ifdef __KERNEL__
++#include <linux/efi.h>
++#include <asm/pal.h>
++#include <asm/ptrace.h>
+
-+static int free_dha_stack(void)
-+{
-+ if (dump_header_asm.dha_stack[0]) {
-+ vfree((void*)dump_header_asm.dha_stack[0]);
-+ dump_header_asm.dha_stack[0] = 0;
-+ }
-+ return 0;
-+}
+#ifdef CONFIG_SMP
-+static int dump_expect_ipi[NR_CPUS];
-+static atomic_t waiting_for_dump_ipi;
-+
-+extern void stop_this_cpu(void *);
-+static int
-+dump_ipi_handler(struct pt_regs *regs)
-+{
-+ int cpu = smp_processor_id();
-+
-+ if (!dump_expect_ipi[cpu])
-+ return 0;
-+ dump_save_this_cpu(regs);
-+ atomic_dec(&waiting_for_dump_ipi);
-+
-+ level_changed:
-+ switch (dump_silence_level) {
-+ case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
-+ while (dump_oncpu) {
-+ barrier(); /* paranoia */
-+ if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
-+ goto level_changed;
-+ cpu_relax(); /* kill time nicely */
-+ }
-+ break;
-+
-+ case DUMP_HALT_CPUS: /* Execute halt */
-+ stop_this_cpu(NULL);
-+ break;
-+
-+ case DUMP_SOFT_SPIN_CPUS:
-+ /* Mark the task so it spins in schedule */
-+ set_tsk_thread_flag(current, TIF_NEED_RESCHED);
-+ break;
-+ }
++extern cpumask_t irq_affinity[];
++extern int (*dump_ipi_function_ptr)(struct pt_regs *);
++extern void dump_send_ipi(void);
++#else /* !CONFIG_SMP */
++#define dump_send_ipi() do { } while(0)
++#endif
+
-+ return 1;
-+}
++#else /* !__KERNEL__ */
++/* necessary header files */
++#include <asm/ptrace.h> /* for pt_regs */
++#include <linux/threads.h>
++#endif /* __KERNEL__ */
+
-+/* save registers on other processors
-+ * If the other cpus don't respond we simply do not get their states.
++/*
++ * mkswap.c calls getpagesize() to get the system page size,
++ * which is not necessarily the same as the hardware page size.
++ *
++ * For ia64 the kernel PAGE_SIZE can be configured from 4KB ... 16KB.
++ *
++ * The physical memory is layed out out in the hardware/minimal pages.
++ * This is the size we need to use for dumping physical pages.
++ *
++ * Note ths hardware/minimal page size being use in;
++ * arch/ia64/kernel/efi.c`efi_memmap_walk():
++ * curr.end = curr.start + (md->num_pages << 12);
++ *
++ * Since the system page size could change between the kernel we boot
++ * on the the kernel that cause the core dume we may want to have something
++ * more constant like the maximum system page size (See include/asm-ia64/page.h).
+ */
-+void
-+__dump_save_other_cpus(void)
-+{
-+ int i, cpu = smp_processor_id();
-+ int other_cpus = num_online_cpus()-1;
-+
-+ if (other_cpus > 0) {
-+ atomic_set(&waiting_for_dump_ipi, other_cpus);
-+ for (i = 0; i < NR_CPUS; i++)
-+ dump_expect_ipi[i] = (i != cpu && cpu_online(i));
-+
-+ printk(KERN_ALERT "sending IPI to other cpus...\n");
-+ dump_send_ipi(dump_ipi_handler);
-+ /*
-+ * may be we dont need to wait for IPI to be processed.
-+ * just write out the header at the end of dumping, if
-+ * this IPI is not processed until then, there probably
-+ * is a problem and we just fail to capture state of
-+ * other cpus.
-+ * However, we will wait 10 secs for other CPUs to respond.
-+ * If not, proceed the dump process even though we failed
-+ * to capture other CPU states.
-+ */
-+ i = 10000; /* wait max of 10 seconds */
-+ while ((atomic_read(&waiting_for_dump_ipi) > 0) && (--i > 0)) {
-+ barrier();
-+ mdelay(1);
-+ }
-+ printk(KERN_ALERT "done waiting: %d cpus not responding\n",
-+ atomic_read(&waiting_for_dump_ipi));
-+ dump_send_ipi(NULL); /* clear handler */
-+ }
-+}
-+
-+/*
-+ * Restore old irq affinities.
++/* IA64 manages the stack in differnt manner as compared to other architectures.
++ * task_struct lies at the bottom of stack.
+ */
-+static void
-+__dump_reset_irq_affinity(void)
-+{
-+ int i;
-+ irq_desc_t *irq_d;
-+
-+ memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
-+
-+ for_each_irq(i) {
-+ irq_d = get_irq_desc(i);
-+ if (irq_d->handler == NULL) {
-+ continue;
-+ }
-+ if (irq_d->handler->set_affinity != NULL) {
-+ irq_d->handler->set_affinity(i, saved_affinity[i]);
-+ }
-+ }
-+}
++#undef STACK_START_POSITION
++#define STACK_START_POSITION(tsk) (tsk)
++#define DUMP_MIN_PAGE_SHIFT 12
++#define DUMP_MIN_PAGE_SIZE (1UL << DUMP_MIN_PAGE_SHIFT)
++#define DUMP_MIN_PAGE_MASK (~(DUMP_MIN_PAGE_SIZE - 1))
++#define DUMP_MIN_PAGE_ALIGN(addr) (((addr) + DUMP_MIN_PAGE_SIZE - 1) & DUMP_MIN_PAGE_MASK)
+
-+/*
-+ * Routine to save the old irq affinities and change affinities of all irqs to
-+ * the dumping cpu.
-+ *
-+ * NB: Need to be expanded to multiple nodes.
-+ */
-+static void
-+__dump_set_irq_affinity(void)
-+{
-+ int i;
-+ cpumask_t cpu = CPU_MASK_NONE;
-+ irq_desc_t *irq_d;
++#define DUMP_MAX_PAGE_SHIFT 16
++#define DUMP_MAX_PAGE_SIZE (1UL << DUMP_MAX_PAGE_SHIFT)
++#define DUMP_MAX_PAGE_MASK (~(DUMP_MAX_PAGE_SIZE - 1))
++#define DUMP_MAX_PAGE_ALIGN(addr) (((addr) + DUMP_MAX_PAGE_SIZE - 1) & DUMP_MAX_PAGE_MASK)
+
-+ cpu_set(smp_processor_id(), cpu);
++#define DUMP_EF_PAGE_SHIFT DUMP_MIN_PAGE_SHIFT
+
-+ memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
++extern int _end,_start;
+
-+ for_each_irq(i) {
-+ irq_d = get_irq_desc(i);
-+ if (irq_d->handler == NULL) {
-+ continue;
-+ }
-+ irq_affinity[i] = cpu;
-+ if (irq_d->handler->set_affinity != NULL) {
-+ irq_d->handler->set_affinity(i, irq_affinity[i]);
-+ }
-+ }
-+}
-+#else /* !CONFIG_SMP */
-+#define __dump_save_other_cpus() do { } while (0)
-+#define __dump_set_irq_affinity() do { } while (0)
-+#define __dump_reset_irq_affinity() do { } while (0)
-+#endif /* !CONFIG_SMP */
++/*
++ * Structure: dump_header_asm_t
++ * Function: This is the header for architecture-specific stuff. It
++ * follows right after the dump header.
++ */
++/*typedef struct _dump_header_asm {*/
+
-+void
-+__dump_save_regs(struct pt_regs *dest_regs, const struct pt_regs *regs)
-+{
-+ if (regs) {
-+ memcpy(dest_regs, regs, sizeof(struct pt_regs));
-+ }
-+}
++typedef struct __dump_header_asm {
+
-+void
-+__dump_save_context(int cpu, const struct pt_regs *regs,
-+ struct task_struct *tsk)
-+{
-+ dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
-+ __dump_save_regs(&dump_header_asm.dha_smp_regs[cpu], regs);
++ /* the dump magic number -- unique to verify dump is valid */
++ uint64_t dha_magic_number;
+
-+ /* take a snapshot of the stack */
-+ /* doing this enables us to tolerate slight drifts on this cpu */
++ /* the version number of this dump */
++ uint32_t dha_version;
+
-+ if (dump_header_asm.dha_stack[cpu]) {
-+ memcpy((void *)dump_header_asm.dha_stack[cpu],
-+ STACK_START_POSITION(tsk),
-+ THREAD_SIZE);
-+ }
-+ dump_header_asm.dha_stack_ptr[cpu] = (unsigned long)(tsk->thread_info);
-+}
++ /* the size of this header (in case we can't read it) */
++ uint32_t dha_header_size;
+
-+/*
-+ * Name: __dump_configure_header()
-+ * Func: Configure the dump header with all proper values.
-+ */
-+int
-+__dump_configure_header(const struct pt_regs *regs)
-+{
-+ return (0);
-+}
++ /* pointer to pt_regs, (OLD: (struct pt_regs *, NEW: (uint64_t)) */
++ uint64_t dha_pt_regs;
+
-+#if defined(CONFIG_KDB) && !defined(CONFIG_DUMP_MODULE)
-+int
-+kdb_sysdump(int argc, const char **argv, const char **envp, struct pt_regs *regs)
-+{
-+ kdb_printf("Dumping to disk...\n");
-+ dump("dump from kdb", regs);
-+ kdb_printf("Dump Complete\n");
-+ return 0;
-+}
-+#endif
++ /* the dump registers */
++ struct pt_regs dha_regs;
+
-+/*
-+ * Name: __dump_init()
-+ * Func: Initialize the dumping routine process. This is in case
-+ * it's necessary in the future.
-+ */
-+void
-+__dump_init(uint64_t local_memory_start)
-+{
-+#if defined(FIXME) && defined(CONFIG_KDB) && !defined(CONFIG_DUMP_MODULE)
-+ /* This won't currently work because interrupts are off in kdb
-+ * and the dump process doesn't understand how to recover.
-+ */
-+ /* ToDo: add a command to query/set dump configuration */
-+ kdb_register_repeat("sysdump", kdb_sysdump, "", "use lkcd to dump the system to disk (if configured)", 0, KDB_REPEAT_NONE);
-+#endif
++ /* the rnat register saved after flushrs */
++ uint64_t dha_rnat;
+
-+ /* return */
-+ return;
-+}
++ /* the pfs register saved after flushrs */
++ uint64_t dha_pfs;
+
-+/*
-+ * Name: __dump_open()
-+ * Func: Open the dump device (architecture specific). This is in
-+ * case it's necessary in the future.
-+ */
-+void
-+__dump_open(void)
-+{
-+ alloc_dha_stack();
-+}
++ /* the bspstore register saved after flushrs */
++ uint64_t dha_bspstore;
+
++ /* smp specific */
++ uint32_t dha_smp_num_cpus;
++ uint32_t dha_dumping_cpu;
++ struct pt_regs dha_smp_regs[NR_CPUS];
++ uint64_t dha_smp_current_task[NR_CPUS];
++ uint64_t dha_stack[NR_CPUS];
++ uint64_t dha_stack_ptr[NR_CPUS];
+
-+/*
-+ * Name: __dump_cleanup()
-+ * Func: Free any architecture specific data structures. This is called
-+ * when the dump module is being removed.
-+ */
-+void
-+__dump_cleanup(void)
-+{
-+ free_dha_stack();
-+}
++} __attribute__((packed)) dump_header_asm_t;
+
-+/*
-+ * Kludge - dump from interrupt context is unreliable (Fixme)
-+ *
-+ * We do this so that softirqs initiated for dump i/o
-+ * get processed and we don't hang while waiting for i/o
-+ * to complete or in any irq synchronization attempt.
-+ *
-+ * This is not quite legal of course, as it has the side
-+ * effect of making all interrupts & softirqs triggered
-+ * while dump is in progress complete before currently
-+ * pending softirqs and the currently executing interrupt
-+ * code.
-+ */
-+static inline void
-+irq_bh_save(void)
-+{
-+ saved_irq_count = irq_count();
-+ preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
-+}
+
-+static inline void
-+irq_bh_restore(void)
-+{
-+ preempt_count() |= saved_irq_count;
-+}
++extern struct __dump_header_asm dump_header_asm;
+
-+/*
-+ * Name: __dump_irq_enable
-+ * Func: Reset system so interrupts are enabled.
-+ * This is used for dump methods that require interrupts
-+ * Eventually, all methods will have interrupts disabled
-+ * and this code can be removed.
-+ *
-+ * Change irq affinities
-+ * Re-enable interrupts
-+ */
-+int
-+__dump_irq_enable(void)
++#ifdef __KERNEL__
++static inline void get_current_regs(struct pt_regs *regs)
+{
-+ __dump_set_irq_affinity();
-+ irq_bh_save();
-+ local_irq_enable();
-+ return 0;
-+}
++ /*
++ * REMIND: Looking at functions/Macros like:
++ * DO_SAVE_SWITCH_STACK
++ * ia64_switch_to()
++ * ia64_save_extra()
++ * switch_to()
++ * to implement this new feature that Matt seem to have added
++ * to panic.c; seems all platforms are now expected to provide
++ * this function to dump the current registers into the pt_regs
++ * structure.
++ */
++ volatile unsigned long rsc_value;/*for storing the rsc value*/
++ volatile unsigned long ic_value;
+
-+/*
-+ * Name: __dump_irq_restore
-+ * Func: Resume the system state in an architecture-specific way.
-+ */
-+void
-+__dump_irq_restore(void)
-+{
-+ local_irq_disable();
-+ __dump_reset_irq_affinity();
-+ irq_bh_restore();
++ __asm__ __volatile__("mov %0=b6;;":"=r"(regs->b6));
++ __asm__ __volatile__("mov %0=b7;;":"=r"(regs->b7));
++
++ __asm__ __volatile__("mov %0=ar.csd;;":"=r"(regs->ar_csd));
++ __asm__ __volatile__("mov %0=ar.ssd;;":"=r"(regs->ar_ssd));
++ __asm__ __volatile__("mov %0=psr;;":"=r"(ic_value));
++ if(ic_value & 0x1000)/*Within an interrupt*/
++ {
++ __asm__ __volatile__("mov %0=cr.ipsr;;":"=r"(regs->cr_ipsr));
++ __asm__ __volatile__("mov %0=cr.iip;;":"=r"(regs->cr_iip));
++ __asm__ __volatile__("mov %0=cr.ifs;;":"=r"(regs->cr_ifs));
++ }
++ else
++ {
++ regs->cr_ipsr=regs->cr_iip=regs->cr_ifs=(unsigned long)-1;
++ }
++ __asm__ __volatile__("mov %0=ar.unat;;":"=r"(regs->ar_unat));
++ __asm__ __volatile__("mov %0=ar.pfs;;":"=r"(regs->ar_pfs));
++ __asm__ __volatile__("mov %0=ar.rsc;;":"=r"(rsc_value));
++ regs->ar_rsc = rsc_value;
++ /*loadrs is from 16th bit to 29th bit of rsc*/
++ regs->loadrs = rsc_value >> 16 & (unsigned long)0x3fff;
++ /*setting the rsc.mode value to 0 (rsc.mode is the last two bits of rsc)*/
++ __asm__ __volatile__("mov ar.rsc=%0;;"::"r"(rsc_value & (unsigned long)(~3)));
++ __asm__ __volatile__("mov %0=ar.rnat;;":"=r"(regs->ar_rnat));
++ __asm__ __volatile__("mov %0=ar.bspstore;;":"=r"(regs->ar_bspstore));
++ /*copying the original value back*/
++ __asm__ __volatile__("mov ar.rsc=%0;;"::"r"(rsc_value));
++ __asm__ __volatile__("mov %0=pr;;":"=r"(regs->pr));
++ __asm__ __volatile__("mov %0=ar.fpsr;;":"=r"(regs->ar_fpsr));
++ __asm__ __volatile__("mov %0=ar.ccv;;":"=r"(regs->ar_ccv));
++
++ __asm__ __volatile__("mov %0=r2;;":"=r"(regs->r2));
++ __asm__ __volatile__("mov %0=r3;;":"=r"(regs->r3));
++ __asm__ __volatile__("mov %0=r8;;":"=r"(regs->r8));
++ __asm__ __volatile__("mov %0=r9;;":"=r"(regs->r9));
++ __asm__ __volatile__("mov %0=r10;;":"=r"(regs->r10));
++ __asm__ __volatile__("mov %0=r11;;":"=r"(regs->r11));
++ __asm__ __volatile__("mov %0=r12;;":"=r"(regs->r12));
++ __asm__ __volatile__("mov %0=r13;;":"=r"(regs->r13));
++ __asm__ __volatile__("mov %0=r14;;":"=r"(regs->r14));
++ __asm__ __volatile__("mov %0=r15;;":"=r"(regs->r15));
++ __asm__ __volatile__("mov %0=r16;;":"=r"(regs->r16));
++ __asm__ __volatile__("mov %0=r17;;":"=r"(regs->r17));
++ __asm__ __volatile__("mov %0=r18;;":"=r"(regs->r18));
++ __asm__ __volatile__("mov %0=r19;;":"=r"(regs->r19));
++ __asm__ __volatile__("mov %0=r20;;":"=r"(regs->r20));
++ __asm__ __volatile__("mov %0=r21;;":"=r"(regs->r21));
++ __asm__ __volatile__("mov %0=r22;;":"=r"(regs->r22));
++ __asm__ __volatile__("mov %0=r23;;":"=r"(regs->r23));
++ __asm__ __volatile__("mov %0=r24;;":"=r"(regs->r24));
++ __asm__ __volatile__("mov %0=r25;;":"=r"(regs->r25));
++ __asm__ __volatile__("mov %0=r26;;":"=r"(regs->r26));
++ __asm__ __volatile__("mov %0=r27;;":"=r"(regs->r27));
++ __asm__ __volatile__("mov %0=r28;;":"=r"(regs->r28));
++ __asm__ __volatile__("mov %0=r29;;":"=r"(regs->r29));
++ __asm__ __volatile__("mov %0=r30;;":"=r"(regs->r30));
++ __asm__ __volatile__("mov %0=r31;;":"=r"(regs->r31));
+}
+
-+#if 0
-+/* Cheap progress hack. It estimates pages to write and
-+ * assumes all pages will go -- so it may get way off.
-+ * As the progress is not displayed for other architectures, not used at this
-+ * moment.
-+ */
-+void
-+__dump_progress_add_page(void)
-+{
-+ unsigned long total_pages = nr_free_pages() + nr_inactive_pages + nr_active_pages;
-+ unsigned int percent = (dump_header.dh_num_dump_pages * 100) / total_pages;
-+ char buf[30];
-+
-+ if (percent > last_percent && percent <= 100) {
-+ sprintf(buf, "Dump %3d%% ", percent);
-+ ppc64_dump_msg(0x2, buf);
-+ last_percent = percent;
-+ }
-+
-+}
-+#endif
++/* Perhaps added to Common Arch Specific Functions and moved to dump.h some day */
++extern void * __dump_memcpy(void *, const void *, size_t);
++#endif /* __KERNEL__ */
+
-+extern int dump_page_is_ram(unsigned long);
++#endif /* _ASM_DUMP_H */
+Index: linux-2.6.10/include/asm-ia64/nmi.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-ia64/nmi.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-ia64/nmi.h 2005-04-07 18:13:56.679787208 +0800
+@@ -0,0 +1,28 @@
+/*
-+ * Name: __dump_page_valid()
-+ * Func: Check if page is valid to dump.
++ * linux/include/asm-ia64/nmi.h
+ */
-+int
-+__dump_page_valid(unsigned long index)
-+{
-+ if (!pfn_valid(index))
-+ return 0;
++#ifndef ASM_NMI_H
++#define ASM_NMI_H
+
-+ return dump_page_is_ram(index);
-+}
++#include <linux/pm.h>
+
-+/*
-+ * Name: manual_handle_crashdump()
-+ * Func: Interface for the lkcd dump command. Calls dump_execute()
++struct pt_regs;
++
++typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
++
++/**
++ * set_nmi_callback
++ *
++ * Set a handler for an NMI. Only one handler may be
++ * set. Return 1 if the NMI was handled.
+ */
-+int
-+manual_handle_crashdump(void)
-+{
-+ struct pt_regs regs;
-+
-+ get_current_regs(®s);
-+ dump_execute("manual", ®s);
-+ return 0;
-+}
-+
-+/*
-+ * Name: __dump_clean_irq_state()
-+ * Func: Clean up from the previous IRQ handling state. Such as oops from
-+ * interrupt handler or bottom half.
++void set_nmi_callback(nmi_callback_t callback);
++
++/**
++ * unset_nmi_callback
++ *
++ * Remove the handler previously set.
+ */
-+void
-+__dump_clean_irq_state(void)
-+{
-+ return;
-+}
-Index: linux-2.6.10/drivers/dump/dump_methods.h
++void unset_nmi_callback(void);
++
++#endif /* ASM_NMI_H */
+Index: linux-2.6.10/include/asm-ppc64/kerntypes.h
===================================================================
---- linux-2.6.10.orig/drivers/dump/dump_methods.h 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/drivers/dump/dump_methods.h 2005-04-05 16:47:53.930207080 +0800
-@@ -0,0 +1,357 @@
+--- linux-2.6.10.orig/include/asm-ppc64/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-ppc64/kerntypes.h 2005-04-07 18:13:56.682786752 +0800
+@@ -0,0 +1,21 @@
+/*
-+ * Generic interfaces for flexible system dump
-+ *
-+ * Started: Oct 2002 - Suparna Bhattacharya (suparna@in.ibm.com)
++ * asm-ppc64/kerntypes.h
+ *
-+ * Copyright (C) 2002 International Business Machines Corp.
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
+ *
-+ * This code is released under version 2 of the GNU GPL.
++ * This source code is released under the GNU GPL.
+ */
+
-+#ifndef _LINUX_DUMP_METHODS_H
-+#define _LINUX_DUMP_METHODS_H
++/* PPC64-specific header files */
++#ifndef _PPC64_KERNTYPES_H
++#define _PPC64_KERNTYPES_H
++
++/* Use the default */
++#include <asm-generic/kerntypes.h>
+
++#endif /* _PPC64_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-ppc64/dump.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-ppc64/dump.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-ppc64/dump.h 2005-04-07 18:13:56.681786904 +0800
+@@ -0,0 +1,115 @@
+/*
-+ * Inspired by Matt Robinson's suggestion of introducing dump
-+ * methods as a way to enable different crash dump facilities to
-+ * coexist where each employs its own scheme or dumping policy.
++ * Kernel header file for Linux crash dumps.
+ *
-+ * The code here creates a framework for flexible dump by defining
-+ * a set of methods and providing associated helpers that differentiate
-+ * between the underlying mechanism (how to dump), overall scheme
-+ * (sequencing of stages and data dumped and associated quiescing),
-+ * output format (what the dump output looks like), target type
-+ * (where to save the dump; see dumpdev.h), and selection policy
-+ * (state/data to dump).
-+ *
-+ * These sets of interfaces can be mixed and matched to build a
-+ * dumper suitable for a given situation, allowing for
-+ * flexibility as well appropriate degree of code reuse.
-+ * For example all features and options of lkcd (including
-+ * granular selective dumping in the near future) should be
-+ * available even when say, the 2 stage soft-boot based mechanism
-+ * is used for taking disruptive dumps.
++ * Created by: Todd Inglett <tinglett@vnet.ibm.com>
+ *
-+ * Todo: Additionally modules or drivers may supply their own
-+ * custom dumpers which extend dump with module specific
-+ * information or hardware state, and can even tweak the
-+ * mechanism when it comes to saving state relevant to
-+ * them.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/highmem.h>
-+#include <linux/dumpdev.h>
-+#include <asm/page.h> /* get_order */
-+
-+#define MAX_PASSES 6
-+#define MAX_DEVS 4
-+
-+
-+/* To customise selection of pages to be dumped in a given pass/group */
-+struct dump_data_filter{
-+ char name[32];
-+ int (*selector)(int, unsigned long, unsigned long);
-+ ulong level_mask; /* dump level(s) for which this filter applies */
-+ loff_t start[MAX_NUMNODES], end[MAX_NUMNODES]; /* location range applicable */
-+ ulong num_mbanks; /* Number of memory banks. Greater than one for discontig memory (NUMA) */
-+};
-+
-+
-+/*
-+ * Determined by the kind of dump mechanism and appropriate
-+ * overall scheme
-+ */
-+struct dump_scheme_ops {
-+ /* sets aside memory, inits data structures etc */
-+ int (*configure)(unsigned long devid);
-+ /* releases resources */
-+ int (*unconfigure)(void);
-+
-+ /* ordering of passes, invoking iterator */
-+ int (*sequencer)(void);
-+ /* iterates over system data, selects and acts on data to dump */
-+ int (*iterator)(int, int (*)(unsigned long, unsigned long),
-+ struct dump_data_filter *);
-+ /* action when data is selected for dump */
-+ int (*save_data)(unsigned long, unsigned long);
-+ /* action when data is to be excluded from dump */
-+ int (*skip_data)(unsigned long, unsigned long);
-+ /* policies for space, multiple dump devices etc */
-+ int (*write_buffer)(void *, unsigned long);
-+};
-+
-+struct dump_scheme {
-+ /* the name serves as an anchor to locate the scheme after reboot */
-+ char name[32];
-+ struct dump_scheme_ops *ops;
-+ struct list_head list;
-+};
-+
-+/* Quiescing/Silence levels (controls IPI callback behaviour) */
-+extern enum dump_silence_levels {
-+ DUMP_SOFT_SPIN_CPUS = 1,
-+ DUMP_HARD_SPIN_CPUS = 2,
-+ DUMP_HALT_CPUS = 3,
-+} dump_silence_level;
-+
-+/* determined by the dump (file) format */
-+struct dump_fmt_ops {
-+ /* build header */
-+ int (*configure_header)(const char *, const struct pt_regs *);
-+ int (*update_header)(void); /* update header and write it out */
-+ /* save curr context */
-+ void (*save_context)(int, const struct pt_regs *,
-+ struct task_struct *);
-+ /* typically called by the save_data action */
-+ /* add formatted data to the dump buffer */
-+ int (*add_data)(unsigned long, unsigned long);
-+ int (*update_end_marker)(void);
-+};
-+
-+struct dump_fmt {
-+ unsigned long magic;
-+ char name[32]; /* lcrash, crash, elf-core etc */
-+ struct dump_fmt_ops *ops;
-+ struct list_head list;
-+};
-+
-+/*
-+ * Modules will be able add their own data capture schemes by
-+ * registering their own dumpers. Typically they would use the
-+ * primary dumper as a template and tune it with their routines.
-+ * Still Todo.
-+ */
-+
-+/* The combined dumper profile (mechanism, scheme, dev, fmt) */
-+struct dumper {
-+ char name[32]; /* singlestage, overlay (stg1), passthru(stg2), pull */
-+ struct dump_scheme *scheme;
-+ struct dump_fmt *fmt;
-+ struct __dump_compress *compress;
-+ struct dump_data_filter *filter;
-+ struct dump_dev *dev;
-+ /* state valid only for active dumper(s) - per instance */
-+ /* run time state/context */
-+ int curr_pass;
-+ unsigned long count;
-+ loff_t curr_offset; /* current logical offset into dump device */
-+ loff_t curr_loc; /* current memory location */
-+ void *curr_buf; /* current position in the dump buffer */
-+ void *dump_buf; /* starting addr of dump buffer */
-+ int header_dirty; /* whether the header needs to be written out */
-+ int header_len;
-+ struct list_head dumper_list; /* links to other dumpers */
-+};
-+
-+/* Starting point to get to the current configured state */
-+struct dump_config {
-+ ulong level;
-+ ulong flags;
-+ struct dumper *dumper;
-+ unsigned long dump_device;
-+ unsigned long dump_addr; /* relevant only for in-memory dumps */
-+ struct list_head dump_dev_list;
-+};
++ * Copyright 2002 - 2004 International Business Machines
++ *
++ * This code is released under version 2 of the GNU GPL.
++ */
+
-+extern struct dump_config dump_config;
++/* This header file holds the architecture specific crash dump header */
++#ifndef _ASM_DUMP_H
++#define _ASM_DUMP_H
+
-+/* Used to save the dump config across a reboot for 2-stage dumps:
-+ *
-+ * Note: The scheme, format, compression and device type should be
-+ * registered at bootup, for this config to be sharable across soft-boot.
-+ * The function addresses could have changed and become invalid, and
-+ * need to be set up again.
-+ */
-+struct dump_config_block {
-+ u64 magic; /* for a quick sanity check after reboot */
-+ struct dump_memdev memdev; /* handle to dump stored in memory */
-+ struct dump_config config;
-+ struct dumper dumper;
-+ struct dump_scheme scheme;
-+ struct dump_fmt fmt;
-+ struct __dump_compress compress;
-+ struct dump_data_filter filter_table[MAX_PASSES];
-+ struct dump_anydev dev[MAX_DEVS]; /* target dump device */
-+};
++/* necessary header files */
++#include <asm/ptrace.h> /* for pt_regs */
++#include <asm/kmap_types.h>
++#include <linux/threads.h>
+
++/* definitions */
++#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
++#define DUMP_ASM_VERSION_NUMBER 0x5 /* version number */
+
-+/* Wrappers that invoke the methods for the current (active) dumper */
++/*
++ * Structure: __dump_header_asm
++ * Function: This is the header for architecture-specific stuff. It
++ * follows right after the dump header.
++ */
++struct __dump_header_asm {
+
-+/* Scheme operations */
++ /* the dump magic number -- unique to verify dump is valid */
++ uint64_t dha_magic_number;
+
-+static inline int dump_sequencer(void)
-+{
-+ return dump_config.dumper->scheme->ops->sequencer();
-+}
++ /* the version number of this dump */
++ uint32_t dha_version;
+
-+static inline int dump_iterator(int pass, int (*action)(unsigned long,
-+ unsigned long), struct dump_data_filter *filter)
-+{
-+ return dump_config.dumper->scheme->ops->iterator(pass, action, filter);
-+}
++ /* the size of this header (in case we can't read it) */
++ uint32_t dha_header_size;
+
-+#define dump_save_data dump_config.dumper->scheme->ops->save_data
-+#define dump_skip_data dump_config.dumper->scheme->ops->skip_data
++ /* the dump registers */
++ struct pt_regs dha_regs;
+
-+static inline int dump_write_buffer(void *buf, unsigned long len)
-+{
-+ return dump_config.dumper->scheme->ops->write_buffer(buf, len);
-+}
++ /* smp specific */
++ uint32_t dha_smp_num_cpus;
++ int dha_dumping_cpu;
++ struct pt_regs dha_smp_regs[NR_CPUS];
++ uint64_t dha_smp_current_task[NR_CPUS];
++ uint64_t dha_stack[NR_CPUS];
++ uint64_t dha_stack_ptr[NR_CPUS];
++} __attribute__((packed));
+
-+static inline int dump_configure(unsigned long devid)
++#ifdef __KERNEL__
++static inline void get_current_regs(struct pt_regs *regs)
+{
-+ return dump_config.dumper->scheme->ops->configure(devid);
-+}
++ unsigned long tmp1, tmp2;
+
-+static inline int dump_unconfigure(void)
-+{
-+ return dump_config.dumper->scheme->ops->unconfigure();
++ __asm__ __volatile__ (
++ "std 0,0(%2)\n"
++ "std 1,8(%2)\n"
++ "std 2,16(%2)\n"
++ "std 3,24(%2)\n"
++ "std 4,32(%2)\n"
++ "std 5,40(%2)\n"
++ "std 6,48(%2)\n"
++ "std 7,56(%2)\n"
++ "std 8,64(%2)\n"
++ "std 9,72(%2)\n"
++ "std 10,80(%2)\n"
++ "std 11,88(%2)\n"
++ "std 12,96(%2)\n"
++ "std 13,104(%2)\n"
++ "std 14,112(%2)\n"
++ "std 15,120(%2)\n"
++ "std 16,128(%2)\n"
++ "std 17,136(%2)\n"
++ "std 18,144(%2)\n"
++ "std 19,152(%2)\n"
++ "std 20,160(%2)\n"
++ "std 21,168(%2)\n"
++ "std 22,176(%2)\n"
++ "std 23,184(%2)\n"
++ "std 24,192(%2)\n"
++ "std 25,200(%2)\n"
++ "std 26,208(%2)\n"
++ "std 27,216(%2)\n"
++ "std 28,224(%2)\n"
++ "std 29,232(%2)\n"
++ "std 30,240(%2)\n"
++ "std 31,248(%2)\n"
++ "mfmsr %0\n"
++ "std %0, 264(%2)\n"
++ "mfctr %0\n"
++ "std %0, 280(%2)\n"
++ "mflr %0\n"
++ "std %0, 288(%2)\n"
++ "bl 1f\n"
++ "1: mflr %1\n"
++ "std %1, 256(%2)\n"
++ "mtlr %0\n"
++ "mfxer %0\n"
++ "std %0, 296(%2)\n"
++ : "=&r" (tmp1), "=&r" (tmp2)
++ : "b" (regs));
+}
+
-+/* Format operations */
++extern struct __dump_header_asm dump_header_asm;
+
-+static inline int dump_configure_header(const char *panic_str,
-+ const struct pt_regs *regs)
-+{
-+ return dump_config.dumper->fmt->ops->configure_header(panic_str, regs);
-+}
++#ifdef CONFIG_SMP
++extern void dump_send_ipi(int (*dump_ipi_callback)(struct pt_regs *));
++#else
++#define dump_send_ipi() do { } while(0)
++#endif
++#endif /* __KERNEL__ */
+
-+static inline void dump_save_context(int cpu, const struct pt_regs *regs,
-+ struct task_struct *tsk)
-+{
-+ dump_config.dumper->fmt->ops->save_context(cpu, regs, tsk);
-+}
++#endif /* _ASM_DUMP_H */
+Index: linux-2.6.10/include/asm-ppc64/kmap_types.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-ppc64/kmap_types.h 2004-12-25 05:34:45.000000000 +0800
++++ linux-2.6.10/include/asm-ppc64/kmap_types.h 2005-04-07 18:13:56.692785232 +0800
+@@ -16,7 +16,8 @@
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
+- KM_TYPE_NR
++ KM_TYPE_NR,
++ KM_DUMP
+ };
+
+ #endif
+Index: linux-2.6.10/include/asm-ppc64/smp.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-ppc64/smp.h 2004-12-25 05:33:47.000000000 +0800
++++ linux-2.6.10/include/asm-ppc64/smp.h 2005-04-07 18:13:56.693785080 +0800
+@@ -36,7 +36,7 @@
+ extern void smp_send_debugger_break(int cpu);
+ struct pt_regs;
+ extern void smp_message_recv(int, struct pt_regs *);
+-
++extern void dump_send_ipi(int (*dump_ipi_callback)(struct pt_regs *));
+
+ #define smp_processor_id() (get_paca()->paca_index)
+ #define hard_smp_processor_id() (get_paca()->hw_cpu_id)
+Index: linux-2.6.10/include/asm-s390/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-s390/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-s390/kerntypes.h 2005-04-07 18:13:56.713782040 +0800
+@@ -0,0 +1,46 @@
++/*
++ * asm-s390/kerntypes.h
++ *
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
++ */
+
-+static inline int dump_save_this_cpu(const struct pt_regs *regs)
-+{
-+ int cpu = smp_processor_id();
++/* S/390 specific header files */
++#ifndef _S390_KERNTYPES_H
++#define _S390_KERNTYPES_H
+
-+ dump_save_context(cpu, regs, current);
-+ return 1;
-+}
++#include <asm/lowcore.h>
++#include <asm/debug.h>
++#include <asm/ccwdev.h>
++#include <asm/ccwgroup.h>
++#include <asm/qdio.h>
+
-+static inline int dump_update_header(void)
-+{
-+ return dump_config.dumper->fmt->ops->update_header();
-+}
++/* channel subsystem driver */
++#include "../../drivers/s390/cio/cio.h"
++#include "../../drivers/s390/cio/chsc.h"
++#include "../../drivers/s390/cio/css.h"
++#include "../../drivers/s390/cio/device.h"
++#include "../../drivers/s390/cio/qdio.h"
+
-+static inline int dump_update_end_marker(void)
-+{
-+ return dump_config.dumper->fmt->ops->update_end_marker();
-+}
++/* dasd device driver */
++#include "../../drivers/s390/block/dasd_int.h"
++#include "../../drivers/s390/block/dasd_diag.h"
++#include "../../drivers/s390/block/dasd_eckd.h"
++#include "../../drivers/s390/block/dasd_fba.h"
+
-+static inline int dump_add_data(unsigned long loc, unsigned long sz)
-+{
-+ return dump_config.dumper->fmt->ops->add_data(loc, sz);
-+}
++/* networking drivers */
++#include "../../drivers/s390/net/fsm.h"
++#include "../../drivers/s390/net/iucv.h"
++#include "../../drivers/s390/net/lcs.h"
+
-+/* Compression operation */
-+static inline int dump_compress_data(char *src, int slen, char *dst,
-+ unsigned long loc)
-+{
-+ return dump_config.dumper->compress->compress_func(src, slen,
-+ dst, DUMP_DPC_PAGE_SIZE, loc);
-+}
++/* zfcp device driver */
++#include "../../drivers/s390/scsi/zfcp_def.h"
++#include "../../drivers/s390/scsi/zfcp_fsf.h"
++
++#endif /* _S390_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-s390/dump.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-s390/dump.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-s390/dump.h 2005-04-07 18:13:56.713782040 +0800
+@@ -0,0 +1,10 @@
++/*
++ * Kernel header file for Linux crash dumps.
++ */
++
++/* Nothing to be done here, we have proper hardware support */
++#ifndef _ASM_DUMP_H
++#define _ASM_DUMP_H
++
++#endif
+
+Index: linux-2.6.10/include/asm-i386/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-i386/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-i386/kerntypes.h 2005-04-07 18:13:56.717781432 +0800
+@@ -0,0 +1,21 @@
++/*
++ * asm-i386/kerntypes.h
++ *
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
++ */
+
-+/* Prototypes of some default implementations of dump methods */
++/* ix86-specific header files */
++#ifndef _I386_KERNTYPES_H
++#define _I386_KERNTYPES_H
+
-+extern struct __dump_compress dump_none_compression;
++/* Use the default */
++#include <asm-generic/kerntypes.h>
+
-+/* Default scheme methods (dump_scheme.c) */
++#endif /* _I386_KERNTYPES_H */
+Index: linux-2.6.10/include/asm-i386/dump.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-i386/dump.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-i386/dump.h 2005-04-07 18:13:56.716781584 +0800
+@@ -0,0 +1,90 @@
++/*
++ * Kernel header file for Linux crash dumps.
++ *
++ * Created by: Matt Robinson (yakker@sgi.com)
++ *
++ * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
++ *
++ * This code is released under version 2 of the GNU GPL.
++ */
+
-+extern int dump_generic_sequencer(void);
-+extern int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned
-+ long), struct dump_data_filter *filter);
-+extern int dump_generic_save_data(unsigned long loc, unsigned long sz);
-+extern int dump_generic_skip_data(unsigned long loc, unsigned long sz);
-+extern int dump_generic_write_buffer(void *buf, unsigned long len);
-+extern int dump_generic_configure(unsigned long);
-+extern int dump_generic_unconfigure(void);
-+#ifdef CONFIG_DISCONTIGMEM
-+extern void dump_reconfigure_mbanks(void);
-+#endif
++/* This header file holds the architecture specific crash dump header */
++#ifndef _ASM_DUMP_H
++#define _ASM_DUMP_H
+
-+/* Default scheme template */
-+extern struct dump_scheme dump_scheme_singlestage;
++/* necessary header files */
++#include <asm/ptrace.h>
++#include <asm/page.h>
++#include <linux/threads.h>
++#include <linux/mm.h>
+
-+/* Default dump format methods */
++/* definitions */
++#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
++#define DUMP_ASM_VERSION_NUMBER 0x3 /* version number */
+
-+extern int dump_lcrash_configure_header(const char *panic_str,
-+ const struct pt_regs *regs);
-+extern void dump_lcrash_save_context(int cpu, const struct pt_regs *regs,
-+ struct task_struct *tsk);
-+extern int dump_generic_update_header(void);
-+extern int dump_lcrash_add_data(unsigned long loc, unsigned long sz);
-+extern int dump_lcrash_update_end_marker(void);
++/*
++ * Structure: __dump_header_asm
++ * Function: This is the header for architecture-specific stuff. It
++ * follows right after the dump header.
++ */
++struct __dump_header_asm {
++ /* the dump magic number -- unique to verify dump is valid */
++ u64 dha_magic_number;
+
-+/* Default format (lcrash) template */
-+extern struct dump_fmt dump_fmt_lcrash;
++ /* the version number of this dump */
++ u32 dha_version;
+
-+/* Default dump selection filter table */
++ /* the size of this header (in case we can't read it) */
++ u32 dha_header_size;
+
-+/*
-+ * Entries listed in order of importance and correspond to passes
-+ * The last entry (with a level_mask of zero) typically reflects data that
-+ * won't be dumped -- this may for example be used to identify data
-+ * that will be skipped for certain so the corresponding memory areas can be
-+ * utilized as scratch space.
-+ */
-+extern struct dump_data_filter dump_filter_table[];
++ /* the esp for i386 systems */
++ u32 dha_esp;
+
-+/* Some pre-defined dumpers */
-+extern struct dumper dumper_singlestage;
-+extern struct dumper dumper_stage1;
-+extern struct dumper dumper_stage2;
++ /* the eip for i386 systems */
++ u32 dha_eip;
+
-+/* These are temporary */
-+#define DUMP_MASK_HEADER DUMP_LEVEL_HEADER
-+#define DUMP_MASK_KERN DUMP_LEVEL_KERN
-+#define DUMP_MASK_USED DUMP_LEVEL_USED
-+#define DUMP_MASK_UNUSED DUMP_LEVEL_ALL_RAM
-+#define DUMP_MASK_REST 0 /* dummy for now */
++ /* the dump registers */
++ struct pt_regs dha_regs;
+
-+/* Helpers - move these to dump.h later ? */
++ /* smp specific */
++ u32 dha_smp_num_cpus;
++ u32 dha_dumping_cpu;
++ struct pt_regs dha_smp_regs[NR_CPUS];
++ u32 dha_smp_current_task[NR_CPUS];
++ u32 dha_stack[NR_CPUS];
++ u32 dha_stack_ptr[NR_CPUS];
++} __attribute__((packed));
+
-+int dump_generic_execute(const char *panic_str, const struct pt_regs *regs);
-+extern int dump_ll_write(void *buf, unsigned long len);
-+int dump_check_and_free_page(struct dump_memdev *dev, struct page *page);
++#ifdef __KERNEL__
+
-+static inline void dumper_reset(void)
-+{
-+ dump_config.dumper->curr_buf = dump_config.dumper->dump_buf;
-+ dump_config.dumper->curr_loc = 0;
-+ dump_config.dumper->curr_offset = 0;
-+ dump_config.dumper->count = 0;
-+ dump_config.dumper->curr_pass = 0;
-+}
++extern struct __dump_header_asm dump_header_asm;
+
-+/*
-+ * May later be moulded to perform boot-time allocations so we can dump
-+ * earlier during bootup
-+ */
-+static inline void *dump_alloc_mem(unsigned long size)
-+{
-+ return (void *) __get_free_pages(GFP_KERNEL, get_order(size));
-+}
++#ifdef CONFIG_SMP
++extern cpumask_t irq_affinity[];
++extern int (*dump_ipi_function_ptr)(struct pt_regs *);
++extern void dump_send_ipi(void);
++#else
++#define dump_send_ipi() do { } while(0)
++#endif
+
-+static inline void dump_free_mem(void *buf)
++static inline void get_current_regs(struct pt_regs *regs)
+{
-+ struct page *page;
-+
-+ /* ignore reserved pages (e.g. post soft boot stage) */
-+ if (buf && (page = virt_to_page(buf))) {
-+ if (PageReserved(page))
-+ return;
-+ }
-+ /*
-+ * Allocated using __get_free_pages().
-+ */
-+ free_pages((unsigned long)buf,
-+ get_order(DUMP_BUFFER_SIZE + 3 * DUMP_PAGE_SIZE));
++ __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
++ __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
++ __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
++ __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
++ __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
++ __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
++ __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
++ __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
++ __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
++ __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
++ __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
++ __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
++ __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
++ regs->eip = (unsigned long)current_text_addr();
+}
+
++#endif /* __KERNEL__ */
+
-+#endif /* _LINUX_DUMP_METHODS_H */
-Index: linux-2.6.10/drivers/dump/Makefile
++#endif /* _ASM_DUMP_H */
+Index: linux-2.6.10/include/asm-i386/kmap_types.h
===================================================================
---- linux-2.6.10.orig/drivers/dump/Makefile 2005-04-05 19:01:49.158500672 +0800
-+++ linux-2.6.10/drivers/dump/Makefile 2005-04-05 16:47:53.947204496 +0800
-@@ -0,0 +1,22 @@
-+#
-+# Makefile for the dump device drivers.
-+#
+--- linux-2.6.10.orig/include/asm-i386/kmap_types.h 2004-12-25 05:35:23.000000000 +0800
++++ linux-2.6.10/include/asm-i386/kmap_types.h 2005-04-07 18:13:56.729779608 +0800
+@@ -23,7 +23,8 @@
+ D(10) KM_IRQ1,
+ D(11) KM_SOFTIRQ0,
+ D(12) KM_SOFTIRQ1,
+-D(13) KM_TYPE_NR
++D(13) KM_DUMP,
++D(14) KM_TYPE_NR
+ };
+
+ #undef D
+Index: linux-2.6.10/include/asm-i386/smp.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-i386/smp.h 2004-12-25 05:35:50.000000000 +0800
++++ linux-2.6.10/include/asm-i386/smp.h 2005-04-07 18:13:56.730779456 +0800
+@@ -37,6 +37,7 @@
+ extern cpumask_t cpu_sibling_map[];
+
+ extern void smp_flush_tlb(void);
++extern void dump_send_ipi(void);
+ extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
+ extern void smp_invalidate_rcv(void); /* Process an NMI */
+ extern void (*mtrr_hook) (void);
+Index: linux-2.6.10/include/asm-i386/mach-default/irq_vectors.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-i386/mach-default/irq_vectors.h 2004-12-25 05:34:26.000000000 +0800
++++ linux-2.6.10/include/asm-i386/mach-default/irq_vectors.h 2005-04-07 18:13:56.738778240 +0800
+@@ -48,6 +48,7 @@
+ #define INVALIDATE_TLB_VECTOR 0xfd
+ #define RESCHEDULE_VECTOR 0xfc
+ #define CALL_FUNCTION_VECTOR 0xfb
++#define DUMP_VECTOR 0xfa
+
+ #define THERMAL_APIC_VECTOR 0xf0
+ /*
+Index: linux-2.6.10/include/asm-m68k/kerntypes.h
+===================================================================
+--- linux-2.6.10.orig/include/asm-m68k/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
++++ linux-2.6.10/include/asm-m68k/kerntypes.h 2005-04-07 18:13:56.715781736 +0800
+@@ -0,0 +1,21 @@
++/*
++ * asm-m68k/kerntypes.h
++ *
++ * Arch-dependent header file that includes headers for all arch-specific
++ * types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
++ */
+
-+dump-y := dump_setup.o dump_fmt.o dump_filters.o dump_scheme.o dump_execute.o
-+ifeq ($(CONFIG_X86_64),)
-+ifeq ($(CONFIG_X86),y)
-+dump-$(CONFIG_X86) += dump_i386.o
-+endif
-+endif
-+dump-$(CONFIG_ARM) += dump_arm.o
-+dump-$(CONFIG_PPC64) += dump_ppc64.o
-+dump-$(CONFIG_X86_64) += dump_x8664.o
-+dump-$(CONFIG_IA64) += dump_ia64.o
-+dump-$(CONFIG_CRASH_DUMP_MEMDEV) += dump_memdev.o dump_overlay.o
-+dump-objs += $(dump-y)
++/* m68k-specific header files */
++#ifndef _M68K_KERNTYPES_H
++#define _M68K_KERNTYPES_H
+
-+obj-$(CONFIG_CRASH_DUMP) += dump.o
-+obj-$(CONFIG_CRASH_DUMP_BLOCKDEV) += dump_blockdev.o
-+obj-$(CONFIG_CRASH_DUMP_NETDEV) += dump_netdev.o
-+obj-$(CONFIG_CRASH_DUMP_COMPRESS_RLE) += dump_rle.o
-+obj-$(CONFIG_CRASH_DUMP_COMPRESS_GZIP) += dump_gzip.o
-Index: linux-2.6.10/drivers/Makefile
-===================================================================
---- linux-2.6.10.orig/drivers/Makefile 2004-12-25 05:36:00.000000000 +0800
-+++ linux-2.6.10/drivers/Makefile 2005-04-05 16:47:53.950204040 +0800
-@@ -60,3 +60,4 @@
- obj-$(CONFIG_CPU_FREQ) += cpufreq/
- obj-$(CONFIG_MMC) += mmc/
- obj-y += firmware/
-+obj-$(CONFIG_CRASH_DUMP) += dump/
++/* Use the default */
++#include <asm-generic/kerntypes.h>
++
++#endif /* _M68K_KERNTYPES_H */