1 Index: linux-2.6.10/drivers/dump/dump_ia64.c
2 ===================================================================
3 --- linux-2.6.10.orig/drivers/dump/dump_ia64.c 2005-04-07 19:34:21.197950744 +0800
4 +++ linux-2.6.10/drivers/dump/dump_ia64.c 2005-04-07 18:13:56.896754224 +0800
7 + * Architecture specific (ia64) functions for Linux crash dumps.
9 + * Created by: Matt Robinson (yakker@sgi.com)
10 + * Contributions from SGI, IBM, and others.
12 + * 2.4 kernel modifications by: Matt D. Robinson (yakker@alacritech.com)
13 + * ia64 kernel modifications by: Piet Delaney (piet@www.piet.net)
15 + * Copyright (C) 2001 - 2002 Matt D. Robinson (yakker@alacritech.com)
16 + * Copyright (C) 2002 Silicon Graphics, Inc. All rights reserved.
17 + * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
19 + * This code is released under version 2 of the GNU GPL.
23 + * The hooks for dumping the kernel virtual memory to disk are in this
24 + * file. Any time a modification is made to the virtual memory mechanism,
25 + * these routines must be changed to use the new mechanisms.
27 +#include <linux/init.h>
28 +#include <linux/types.h>
29 +#include <linux/kernel.h>
30 +#include <linux/smp.h>
31 +#include <linux/fs.h>
32 +#include <linux/vmalloc.h>
33 +#include <linux/dump.h>
34 +#include "dump_methods.h"
35 +#include <linux/mm.h>
36 +#include <asm/processor.h>
37 +#include <asm-ia64/dump.h>
38 +#include <asm/hardirq.h>
39 +#include <linux/irq.h>
40 +#include <linux/delay.h>
42 +static __s32 saved_irq_count; /* saved preempt_count() flags */
45 +static int alloc_dha_stack(void)
50 + if (dump_header_asm.dha_stack[0])
54 + ptr = vmalloc(THREAD_SIZE * num_online_cpus());
56 + printk("vmalloc for dha_stacks failed\n");
59 + bzero(ptr,THREAD_SIZE );
61 + for (i = 0; i < num_online_cpus(); i++) {
62 + dump_header_asm.dha_stack[i] = (uint64_t)((unsigned long)ptr + (i * THREAD_SIZE));
67 +static int free_dha_stack(void)
69 + if (dump_header_asm.dha_stack[0])
71 + vfree((void*)dump_header_asm.dha_stack[0]);
72 + dump_header_asm.dha_stack[0] = 0;
77 +/* a structure to get arguments into the following callback routine */
80 + struct task_struct *tsk;
84 +do_save_sw(struct unw_frame_info *info, void *arg)
86 + struct unw_args *uwargs = (struct unw_args *)arg;
87 + int cpu = uwargs->cpu;
88 + struct task_struct *tsk = uwargs->tsk;
90 + dump_header_asm.dha_stack_ptr[cpu] = (uint64_t)info->sw;
92 + if (tsk && dump_header_asm.dha_stack[cpu]) {
93 + memcpy((void *)dump_header_asm.dha_stack[cpu],
94 + STACK_START_POSITION(tsk),
100 +__dump_save_context(int cpu, const struct pt_regs *regs,
101 + struct task_struct *tsk)
103 + struct unw_args uwargs;
105 + dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
108 + dump_header_asm.dha_smp_regs[cpu] = *regs;
111 + /* save a snapshot of the stack in a nice state for unwinding */
115 + unw_init_running(do_save_sw, (void *)&uwargs);
120 +extern cpumask_t irq_affinity[];
121 +#define irq_desc _irq_desc
122 +extern irq_desc_t irq_desc[];
123 +extern void dump_send_ipi(void);
124 +static cpumask_t saved_affinity[NR_IRQS];
127 + * Routine to save the old irq affinities and change affinities of all irqs to
131 +set_irq_affinity(void)
134 + cpumask_t cpu = CPU_MASK_NONE;
136 + cpu_set(smp_processor_id(), cpu);
137 + memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
138 + for (i = 0; i < NR_IRQS; i++) {
139 + if (irq_desc[i].handler == NULL)
141 + irq_affinity[i] = cpu;
142 + if (irq_desc[i].handler->set_affinity != NULL)
143 + irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
148 + * Restore old irq affinities.
151 +reset_irq_affinity(void)
155 + memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
156 + for (i = 0; i < NR_IRQS; i++) {
157 + if (irq_desc[i].handler == NULL)
159 + if (irq_desc[i].handler->set_affinity != NULL)
160 + irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
164 +#else /* !CONFIG_SMP */
165 +#define set_irq_affinity() do { } while (0)
166 +#define reset_irq_affinity() do { } while (0)
167 +#define save_other_cpu_states() do { } while (0)
168 +#endif /* !CONFIG_SMP */
171 +static int dump_expect_ipi[NR_CPUS];
172 +static atomic_t waiting_for_dump_ipi;
173 +static int wait_for_dump_ipi = 2000; /* wait 2000 ms for ipi to be handled */
174 +extern void (*dump_trace_ptr)(struct pt_regs *);
177 +extern void stop_this_cpu(void);
180 +dump_nmi_callback(struct pt_regs *regs, int cpu)
182 + if (!dump_expect_ipi[cpu])
185 + dump_expect_ipi[cpu] = 0;
187 + dump_save_this_cpu(regs);
188 + atomic_dec(&waiting_for_dump_ipi);
191 + switch (dump_silence_level) {
192 + case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
193 + while (dump_oncpu) {
194 + barrier(); /* paranoia */
195 + if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
196 + goto level_changed;
198 + cpu_relax(); /* kill time nicely */
202 + case DUMP_HALT_CPUS: /* Execute halt */
206 + case DUMP_SOFT_SPIN_CPUS:
207 + /* Mark the task so it spins in schedule */
208 + set_tsk_thread_flag(current, TIF_NEED_RESCHED);
215 +int IPI_handler(struct pt_regs *regs)
218 + cpu = task_cpu(current);
219 + return(dump_nmi_callback(regs, cpu));
222 +/* save registers on other processors */
224 +__dump_save_other_cpus(void)
226 + int i, cpu = smp_processor_id();
227 + int other_cpus = num_online_cpus()-1;
228 + int wait_time = wait_for_dump_ipi;
230 + if (other_cpus > 0) {
231 + atomic_set(&waiting_for_dump_ipi, other_cpus);
233 + for (i = 0; i < NR_CPUS; i++) {
234 + dump_expect_ipi[i] = (i != cpu && cpu_online(i));
237 + dump_ipi_function_ptr = IPI_handler;
242 + /* may be we dont need to wait for IPI to be processed.
243 + * just write out the header at the end of dumping, if
244 + * this IPI is not processed until then, there probably
245 + * is a problem and we just fail to capture state of
247 + while(wait_time-- && (atomic_read(&waiting_for_dump_ipi) > 0)) {
251 + if (wait_time <= 0) {
252 + printk("dump ipi timeout, proceeding...\n");
258 + * Kludge - dump from interrupt context is unreliable (Fixme)
260 + * We do this so that softirqs initiated for dump i/o
261 + * get processed and we don't hang while waiting for i/o
262 + * to complete or in any irq synchronization attempt.
264 + * This is not quite legal of course, as it has the side
265 + * effect of making all interrupts & softirqs triggered
266 + * while dump is in progress complete before currently
267 + * pending softirqs and the currently executing interrupt
273 + saved_irq_count = irq_count();
274 + preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
278 +irq_bh_restore(void)
280 + preempt_count() |= saved_irq_count;
284 + * Name: __dump_configure_header()
285 + * Func: Configure the dump header with all proper values.
288 +__dump_configure_header(const struct pt_regs *regs)
294 +#define dim(x) (sizeof(x)/sizeof(*(x)))
297 + * Name: __dump_irq_enable
298 + * Func: Reset system so interrupts are enabled.
299 + * This is used for dump methods that require interrupts
300 + * Eventually, all methods will have interrupts disabled
301 + * and this code can be removed.
303 + * Change irq affinities
304 + * Re-enable interrupts
307 +__dump_irq_enable(void)
309 + set_irq_affinity();
313 + * reduce the task priority level
314 + * to get disk interrupts
316 + ia64_setreg(_IA64_REG_CR_TPR, 0);
318 + local_irq_enable();
323 + * Name: __dump_irq_restore
324 + * Func: Resume the system state in an architecture-specific way.
328 +__dump_irq_restore(void)
330 + local_irq_disable();
331 + reset_irq_affinity();
336 + * Name: __dump_page_valid()
337 + * Func: Check if page is valid to dump.
340 +__dump_page_valid(unsigned long index)
342 + if (!pfn_valid(index))
350 + * Name: __dump_init()
351 + * Func: Initialize the dumping routine process. This is in case
352 + * it's necessary in the future.
355 +__dump_init(uint64_t local_memory_start)
361 + * Name: __dump_open()
362 + * Func: Open the dump device (architecture specific). This is in
363 + * case it's necessary in the future.
374 + * Name: __dump_cleanup()
375 + * Func: Free any architecture specific data structures. This is called
376 + * when the dump module is being removed.
379 +__dump_cleanup(void)
388 +int __dump_memcpy_mc_expected = 0; /* Doesn't help yet */
391 + * An ia64 version of memcpy() that trys to avoid machine checks.
394 + * By itself __dump_memcpy_mc_expected() ins't providing any
395 + * protection against Machine Checks. We are looking into the
396 + * possability of adding code to the arch/ia64/kernel/mca.c fuction
397 + * ia64_mca_ucmc_handler() to restore state so that a IA64_MCA_CORRECTED
398 + * can be returned to the firmware. Curently it always returns
399 + * IA64_MCA_COLD_BOOT and reboots the machine.
402 +void * __dump_memcpy(void * dest, const void *src, size_t count)
406 + if (__dump_memcpy_mc_expected) {
407 + ia64_pal_mc_expected((u64) 1, 0);
410 + vp = memcpy(dest, src, count);
412 + if (__dump_memcpy_mc_expected) {
413 + ia64_pal_mc_expected((u64) 0, 0);
419 + * Name: manual_handle_crashdump()
420 + * Func: Interface for the lkcd dump command. Calls dump_execute()
423 +manual_handle_crashdump(void) {
425 + struct pt_regs regs;
427 + get_current_regs(®s);
428 + dump_execute("manual", ®s);
433 + * Name: __dump_clean_irq_state()
434 + * Func: Clean up from the previous IRQ handling state. Such as oops from
435 + * interrupt handler or bottom half.
438 +__dump_clean_irq_state(void)
440 + unsigned long saved_tpr;
441 + unsigned long TPR_MASK = 0xFFFFFFFFFFFEFF0F;
444 + /* Get the processors task priority register */
445 + saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
446 + /* clear the mmi and mic bit's of the TPR to unmask interrupts */
447 + saved_tpr = saved_tpr & TPR_MASK;
448 + ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
451 + /* Tell the processor we're done with the interrupt
452 + * that got us here.
457 + /* local implementation of irq_exit(); */
458 + preempt_count() -= IRQ_EXIT_OFFSET;
459 + preempt_enable_no_resched();
464 Index: linux-2.6.10/drivers/dump/dump_setup.c
465 ===================================================================
466 --- linux-2.6.10.orig/drivers/dump/dump_setup.c 2005-04-07 19:34:21.197950744 +0800
467 +++ linux-2.6.10/drivers/dump/dump_setup.c 2005-04-07 18:13:56.914751488 +0800
470 + * Standard kernel function entry points for Linux crash dumps.
472 + * Created by: Matt Robinson (yakker@sourceforge.net)
473 + * Contributions from SGI, IBM, HP, MCL, and others.
475 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
476 + * Copyright (C) 2000 - 2002 TurboLinux, Inc. All rights reserved.
477 + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
478 + * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
480 + * This code is released under version 2 of the GNU GPL.
484 + * -----------------------------------------------------------------------
488 + * This dump code goes back to SGI's first attempts at dumping system
489 + * memory on SGI systems running IRIX. A few developers at SGI needed
490 + * a way to take this system dump and analyze it, and created 'icrash',
491 + * or IRIX Crash. The mechanism (the dumps and 'icrash') were used
492 + * by support people to generate crash reports when a system failure
493 + * occurred. This was vital for large system configurations that
494 + * couldn't apply patch after patch after fix just to hope that the
495 + * problems would go away. So the system memory, along with the crash
496 + * dump analyzer, allowed support people to quickly figure out what the
497 + * problem was on the system with the crash dump.
499 + * In comes Linux. SGI started moving towards the open source community,
500 + * and upon doing so, SGI wanted to take its support utilities into Linux
501 + * with the hopes that they would end up the in kernel and user space to
502 + * be used by SGI's customers buying SGI Linux systems. One of the first
503 + * few products to be open sourced by SGI was LKCD, or Linux Kernel Crash
504 + * Dumps. LKCD comprises of a patch to the kernel to enable system
505 + * dumping, along with 'lcrash', or Linux Crash, to analyze the system
506 + * memory dump. A few additional system scripts and kernel modifications
507 + * are also included to make the dump mechanism and dump data easier to
510 + * As soon as LKCD was released into the open source community, a number
511 + * of larger companies started to take advantage of it. Today, there are
512 + * many community members that contribute to LKCD, and it continues to
513 + * flourish and grow as an open source project.
517 + * DUMP TUNABLES (read/write with ioctl, readonly with /proc)
519 + * This is the list of system tunables (via /proc) that are available
520 + * for Linux systems. All the read, write, etc., functions are listed
521 + * here. Currently, there are a few different tunables for dumps:
523 + * dump_device (used to be dumpdev):
524 + * The device for dumping the memory pages out to. This
525 + * may be set to the primary swap partition for disruptive dumps,
526 + * and must be an unused partition for non-disruptive dumps.
527 + * Todo: In the case of network dumps, this may be interpreted
528 + * as the IP address of the netdump server to connect to.
530 + * dump_compress (used to be dump_compress_pages):
531 + * This is the flag which indicates which compression mechanism
532 + * to use. This is a BITMASK, not an index (0,1,2,4,8,16,etc.).
533 + * This is the current set of values:
535 + * 0: DUMP_COMPRESS_NONE -- Don't compress any pages.
536 + * 1: DUMP_COMPRESS_RLE -- This uses RLE compression.
537 + * 2: DUMP_COMPRESS_GZIP -- This uses GZIP compression.
540 + * The amount of effort the dump module should make to save
541 + * information for post crash analysis. This value is now
542 + * a BITMASK value, not an index:
544 + * 0: Do nothing, no dumping. (DUMP_LEVEL_NONE)
546 + * 1: Print out the dump information to the dump header, and
547 + * write it out to the dump_device. (DUMP_LEVEL_HEADER)
549 + * 2: Write out the dump header and all kernel memory pages.
550 + * (DUMP_LEVEL_KERN)
552 + * 4: Write out the dump header and all kernel and user
553 + * memory pages. (DUMP_LEVEL_USED)
555 + * 8: Write out the dump header and all conventional/cached
556 + * memory (RAM) pages in the system (kernel, user, free).
557 + * (DUMP_LEVEL_ALL_RAM)
559 + * 16: Write out everything, including non-conventional memory
560 + * like firmware, proms, I/O registers, uncached memory.
563 + * The dump_level will default to 1.
566 + * These are the flags to use when talking about dumps. There
567 + * are lots of possibilities. This is a BITMASK value, not an index.
569 + * -----------------------------------------------------------------------
572 +#include <linux/kernel.h>
573 +#include <linux/delay.h>
574 +#include <linux/reboot.h>
575 +#include <linux/fs.h>
576 +#include <linux/dump.h>
577 +#include <linux/ioctl32.h>
578 +#include <linux/syscalls.h>
579 +#include "dump_methods.h"
580 +#include <linux/proc_fs.h>
581 +#include <linux/module.h>
582 +#include <linux/utsname.h>
583 +#include <linux/highmem.h>
584 +#include <linux/miscdevice.h>
585 +#include <linux/sysrq.h>
586 +#include <linux/sysctl.h>
587 +#include <linux/nmi.h>
588 +#include <linux/init.h>
589 +#include <asm/hardirq.h>
590 +#include <asm/uaccess.h>
594 + * -----------------------------------------------------------------------
595 + * V A R I A B L E S
596 + * -----------------------------------------------------------------------
600 +struct dump_config dump_config = {
608 +static _dump_regs_t all_regs;
611 +/* Global variables used in dump.h */
612 +/* degree of system freeze when dumping */
613 +enum dump_silence_levels dump_silence_level = DUMP_HARD_SPIN_CPUS;
615 +/* Other global fields */
616 +extern struct __dump_header dump_header;
617 +struct dump_dev *dump_dev = NULL; /* Active dump device */
618 +static int dump_compress = 0;
620 +static u32 dump_compress_none(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
621 + unsigned long loc);
622 +struct __dump_compress dump_none_compression = {
623 + .compress_type = DUMP_COMPRESS_NONE,
624 + .compress_func = dump_compress_none,
625 + .compress_name = "none",
628 +/* our device operations and functions */
629 +static int dump_ioctl(struct inode *i, struct file *f,
630 + unsigned int cmd, unsigned long arg);
632 +#ifdef CONFIG_COMPAT
633 +static int dw_long(unsigned int, unsigned int, unsigned long, struct file*);
636 +static struct file_operations dump_fops = {
637 + .owner = THIS_MODULE,
638 + .ioctl = dump_ioctl,
641 +static struct miscdevice dump_miscdev = {
642 + .minor = CRASH_DUMP_MINOR,
644 + .fops = &dump_fops,
646 +MODULE_ALIAS_MISCDEV(CRASH_DUMP_MINOR);
648 +/* static variables */
649 +static int dump_okay = 0; /* can we dump out to disk? */
650 +static spinlock_t dump_lock = SPIN_LOCK_UNLOCKED;
652 +/* used for dump compressors */
653 +static struct list_head dump_compress_list = LIST_HEAD_INIT(dump_compress_list);
655 +/* list of registered dump targets */
656 +static struct list_head dump_target_list = LIST_HEAD_INIT(dump_target_list);
658 +/* lkcd info structure -- this is used by lcrash for basic system data */
659 +struct __lkcdinfo lkcdinfo = {
660 + .ptrsz = (sizeof(void *) * 8),
661 +#if defined(__LITTLE_ENDIAN)
662 + .byte_order = __LITTLE_ENDIAN,
664 + .byte_order = __BIG_ENDIAN,
666 + .page_shift = PAGE_SHIFT,
667 + .page_size = PAGE_SIZE,
668 + .page_mask = PAGE_MASK,
669 + .page_offset = PAGE_OFFSET,
673 + * -----------------------------------------------------------------------
674 + * / P R O C T U N A B L E F U N C T I O N S
675 + * -----------------------------------------------------------------------
678 +static int proc_dump_device(ctl_table *ctl, int write, struct file *f,
679 + void __user *buffer, size_t *lenp, loff_t *ppos);
681 +static int proc_doulonghex(ctl_table *ctl, int write, struct file *f,
682 + void __user *buffer, size_t *lenp, loff_t *ppos);
684 + * sysctl-tuning infrastructure.
686 +static ctl_table dump_table[] = {
687 + { .ctl_name = CTL_DUMP_LEVEL,
688 + .procname = DUMP_LEVEL_NAME,
689 + .data = &dump_config.level,
690 + .maxlen = sizeof(int),
692 + .proc_handler = proc_doulonghex, },
694 + { .ctl_name = CTL_DUMP_FLAGS,
695 + .procname = DUMP_FLAGS_NAME,
696 + .data = &dump_config.flags,
697 + .maxlen = sizeof(int),
699 + .proc_handler = proc_doulonghex, },
701 + { .ctl_name = CTL_DUMP_COMPRESS,
702 + .procname = DUMP_COMPRESS_NAME,
703 + .data = &dump_compress, /* FIXME */
704 + .maxlen = sizeof(int),
706 + .proc_handler = proc_dointvec, },
708 + { .ctl_name = CTL_DUMP_DEVICE,
709 + .procname = DUMP_DEVICE_NAME,
711 + .data = &dump_config.dump_device, /* FIXME */
712 + .maxlen = sizeof(int),
713 + .proc_handler = proc_dump_device },
715 +#ifdef CONFIG_CRASH_DUMP_MEMDEV
716 + { .ctl_name = CTL_DUMP_ADDR,
717 + .procname = DUMP_ADDR_NAME,
719 + .data = &dump_config.dump_addr,
720 + .maxlen = sizeof(unsigned long),
721 + .proc_handler = proc_doulonghex },
727 +static ctl_table dump_root[] = {
728 + { .ctl_name = KERN_DUMP,
729 + .procname = "dump",
731 + .child = dump_table },
735 +static ctl_table kernel_root[] = {
736 + { .ctl_name = CTL_KERN,
737 + .procname = "kernel",
739 + .child = dump_root, },
743 +static struct ctl_table_header *sysctl_header;
746 + * -----------------------------------------------------------------------
747 + * C O M P R E S S I O N F U N C T I O N S
748 + * -----------------------------------------------------------------------
752 + * Name: dump_compress_none()
753 + * Func: Don't do any compression, period.
756 +dump_compress_none(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
759 + /* just return the old size */
765 + * Name: dump_execute()
766 + * Func: Execute the dumping process. This makes sure all the appropriate
767 + * fields are updated correctly, and calls dump_execute_memdump(),
768 + * which does the real work.
771 +dump_execute(const char *panic_str, const struct pt_regs *regs)
774 + unsigned long flags;
776 + /* make sure we can dump */
778 + pr_info("LKCD not yet configured, can't take dump now\n");
782 + /* Exclude multiple dumps at the same time,
783 + * and disable interrupts, some drivers may re-enable
784 + * interrupts in with silence()
786 + * Try and acquire spin lock. If successful, leave preempt
787 + * and interrupts disabled. See spin_lock_irqsave in spinlock.h
789 + local_irq_save(flags);
790 + if (!spin_trylock(&dump_lock)) {
791 + local_irq_restore(flags);
792 + pr_info("LKCD dump already in progress\n");
796 + /* What state are interrupts really in? */
797 + if (in_interrupt()){
799 + printk(KERN_ALERT "Dumping from interrupt handler!\n");
801 + printk(KERN_ALERT "Dumping from bottom half!\n");
803 + __dump_clean_irq_state();
807 + /* Bring system into the strictest level of quiescing for min drift
808 + * dump drivers can soften this as required in dev->ops->silence()
810 + dump_oncpu = smp_processor_id() + 1;
811 + dump_silence_level = DUMP_HARD_SPIN_CPUS;
813 + state = dump_generic_execute(panic_str, regs);
816 + spin_unlock_irqrestore(&dump_lock, flags);
819 + printk("Dump Incomplete or failed!\n");
821 + printk("Dump Complete; %d dump pages saved.\n",
822 + dump_header.dh_num_dump_pages);
827 + * Name: dump_register_compression()
828 + * Func: Register a dump compression mechanism.
831 +dump_register_compression(struct __dump_compress *item)
834 + list_add(&(item->list), &dump_compress_list);
838 + * Name: dump_unregister_compression()
839 + * Func: Remove a dump compression mechanism, and re-assign the dump
840 + * compression pointer if necessary.
843 +dump_unregister_compression(int compression_type)
845 + struct list_head *tmp;
846 + struct __dump_compress *dc;
848 + /* let's make sure our list is valid */
849 + if (compression_type != DUMP_COMPRESS_NONE) {
850 + list_for_each(tmp, &dump_compress_list) {
851 + dc = list_entry(tmp, struct __dump_compress, list);
852 + if (dc->compress_type == compression_type) {
853 + list_del(&(dc->list));
861 + * Name: dump_compress_init()
862 + * Func: Initialize (or re-initialize) compression scheme.
865 +dump_compress_init(int compression_type)
867 + struct list_head *tmp;
868 + struct __dump_compress *dc;
870 + /* try to remove the compression item */
871 + list_for_each(tmp, &dump_compress_list) {
872 + dc = list_entry(tmp, struct __dump_compress, list);
873 + if (dc->compress_type == compression_type) {
874 + dump_config.dumper->compress = dc;
875 + dump_compress = compression_type;
876 + pr_debug("Dump Compress %s\n", dc->compress_name);
882 + * nothing on the list -- return ENODATA to indicate an error
885 + * EAGAIN: reports "Resource temporarily unavailable" which
886 + * isn't very enlightening.
888 + printk("compression_type:%d not found\n", compression_type);
894 +dumper_setup(unsigned long flags, unsigned long devid)
898 + /* unconfigure old dumper if it exists */
900 + if (dump_config.dumper) {
901 + pr_debug("Unconfiguring current dumper\n");
902 + dump_unconfigure();
904 + /* set up new dumper */
905 + if (dump_config.flags & DUMP_FLAGS_SOFTBOOT) {
906 + printk("Configuring softboot based dump \n");
907 +#ifdef CONFIG_CRASH_DUMP_MEMDEV
908 + dump_config.dumper = &dumper_stage1;
910 + printk("Requires CONFIG_CRASHDUMP_MEMDEV. Can't proceed.\n");
914 + dump_config.dumper = &dumper_singlestage;
916 + dump_config.dumper->dev = dump_dev;
918 + ret = dump_configure(devid);
921 + pr_debug("%s dumper set up for dev 0x%lx\n",
922 + dump_config.dumper->name, devid);
923 + dump_config.dump_device = devid;
925 + printk("%s dumper set up failed for dev 0x%lx\n",
926 + dump_config.dumper->name, devid);
927 + dump_config.dumper = NULL;
933 +dump_target_init(int target)
936 + struct list_head *tmp;
937 + struct dump_dev *dev;
940 + case DUMP_FLAGS_DISKDUMP:
941 + strcpy(type, "blockdev"); break;
942 + case DUMP_FLAGS_NETDUMP:
943 + strcpy(type, "networkdev"); break;
949 + * This is a bit stupid, generating strings from flag
950 + * and doing strcmp. This is done because 'struct dump_dev'
951 + * has string 'type_name' and not interger 'type'.
953 + list_for_each(tmp, &dump_target_list) {
954 + dev = list_entry(tmp, struct dump_dev, list);
955 + if (strcmp(type, dev->type_name) == 0) {
964 + * Name: dump_ioctl()
965 + * Func: Allow all dump tunables through a standard ioctl() mechanism.
966 + * This is far better than before, where we'd go through /proc,
967 + * because now this will work for multiple OS and architectures.
970 +dump_ioctl(struct inode *i, struct file *f, unsigned int cmd, unsigned long arg)
972 + /* check capabilities */
973 + if (!capable(CAP_SYS_ADMIN))
976 + if (!dump_config.dumper && cmd == DIOSDUMPCOMPRESS)
977 + /* dump device must be configured first */
981 + * This is the main mechanism for controlling get/set data
982 + * for various dump device parameters. The real trick here
983 + * is setting the dump device (DIOSDUMPDEV). That's what
984 + * triggers everything else.
987 + case DIOSDUMPDEV: /* set dump_device */
988 + pr_debug("Configuring dump device\n");
989 + if (!(f->f_flags & O_RDWR))
993 + return dumper_setup(dump_config.flags, arg);
996 + case DIOGDUMPDEV: /* get dump_device */
997 + return put_user((long)dump_config.dump_device, (long *)arg);
999 + case DIOSDUMPLEVEL: /* set dump_level */
1000 + if (!(f->f_flags & O_RDWR))
1003 + /* make sure we have a positive value */
1007 + /* Fixme: clean this up */
1008 + dump_config.level = 0;
1009 + switch ((int)arg) {
1010 + case DUMP_LEVEL_ALL:
1011 + case DUMP_LEVEL_ALL_RAM:
1012 + dump_config.level |= DUMP_MASK_UNUSED;
1013 + case DUMP_LEVEL_USED:
1014 + dump_config.level |= DUMP_MASK_USED;
1015 + case DUMP_LEVEL_KERN:
1016 + dump_config.level |= DUMP_MASK_KERN;
1017 + case DUMP_LEVEL_HEADER:
1018 + dump_config.level |= DUMP_MASK_HEADER;
1019 + case DUMP_LEVEL_NONE:
1024 + pr_debug("Dump Level 0x%lx\n", dump_config.level);
1027 + case DIOGDUMPLEVEL: /* get dump_level */
1028 + /* fixme: handle conversion */
1029 + return put_user((long)dump_config.level, (long *)arg);
1032 + case DIOSDUMPFLAGS: /* set dump_flags */
1034 + if (!(f->f_flags & O_RDWR))
1037 + /* make sure we have a positive value */
1041 + if (dump_target_init(arg & DUMP_FLAGS_TARGETMASK) < 0)
1042 + return -EINVAL; /* return proper error */
1044 + dump_config.flags = arg;
1046 + pr_debug("Dump Flags 0x%lx\n", dump_config.flags);
1049 + case DIOGDUMPFLAGS: /* get dump_flags */
1050 + return put_user((long)dump_config.flags, (long *)arg);
1052 + case DIOSDUMPCOMPRESS: /* set the dump_compress status */
1053 + if (!(f->f_flags & O_RDWR))
1056 + return dump_compress_init((int)arg);
1058 + case DIOGDUMPCOMPRESS: /* get the dump_compress status */
1059 + return put_user((long)(dump_config.dumper ?
1060 + dump_config.dumper->compress->compress_type : 0),
1062 + case DIOGDUMPOKAY: /* check if dump is configured */
1063 + return put_user((long)dump_okay, (long *)arg);
1065 + case DIOSDUMPTAKE: /* Trigger a manual dump */
1066 + /* Do not proceed if lkcd not yet configured */
1068 + printk("LKCD not yet configured. Cannot take manual dump\n");
1072 + /* Take the dump */
1073 + return manual_handle_crashdump();
1077 + * these are network dump specific ioctls, let the
1078 + * module handle them.
1080 + return dump_dev_ioctl(cmd, arg);
1086 + * Handle special cases for dump_device
1087 + * changing dump device requires doing an opening the device
1090 +proc_dump_device(ctl_table *ctl, int write, struct file *f,
1091 + void __user *buffer, size_t *lenp, loff_t *ppos)
1093 + int *valp = ctl->data;
1097 + /* same permission checks as ioctl */
1098 + if (capable(CAP_SYS_ADMIN)) {
1099 + ret = proc_doulonghex(ctl, write, f, buffer, lenp, ppos);
1100 + if (ret == 0 && write && *valp != oval) {
1101 + /* need to restore old value to close properly */
1102 + dump_config.dump_device = (dev_t) oval;
1104 + ret = dumper_setup(dump_config.flags, (dev_t) *valp);
1111 +/* All for the want of a proc_do_xxx routine which prints values in hex */
1112 +/* Write is not implemented correctly, so mode is set to 0444 above. */
1114 +proc_doulonghex(ctl_table *ctl, int write, struct file *f,
1115 + void __user *buffer, size_t *lenp, loff_t *ppos)
1117 +#define TMPBUFLEN 21
1120 + char buf[TMPBUFLEN];
1122 + if (!ctl->data || !ctl->maxlen || !*lenp || (*ppos && !write)) {
1127 + i = (unsigned long *) ctl->data;
1130 + sprintf(buf, "0x%lx\n", (*i));
1131 + len = strlen(buf);
1134 + if(copy_to_user(buffer, buf, len))
1144 + * -----------------------------------------------------------------------
1145 + * I N I T F U N C T I O N S
1146 + * -----------------------------------------------------------------------
1149 +#ifdef CONFIG_COMPAT
1150 +static int dw_long(unsigned int fd, unsigned int cmd, unsigned long arg,
1153 + mm_segment_t old_fs = get_fs();
1155 + unsigned long val;
1157 + set_fs (KERNEL_DS);
1158 + err = sys_ioctl(fd, cmd, (u64)&val);
1160 + if (!err && put_user((unsigned int) val, (u32 *)arg))
1167 + * These register and unregister routines are exported for modules
1168 + * to register their dump drivers (like block, net etc)
1171 +dump_register_device(struct dump_dev *ddev)
1173 + struct list_head *tmp;
1174 + struct dump_dev *dev;
1176 + list_for_each(tmp, &dump_target_list) {
1177 + dev = list_entry(tmp, struct dump_dev, list);
1178 + if (strcmp(ddev->type_name, dev->type_name) == 0) {
1179 + printk("Target type %s already registered\n",
1181 + return -1; /* return proper error */
1184 + list_add(&(ddev->list), &dump_target_list);
1190 +dump_unregister_device(struct dump_dev *ddev)
1192 + list_del(&(ddev->list));
1193 + if (ddev != dump_dev)
1198 + if (dump_config.dumper)
1199 + dump_unconfigure();
1201 + dump_config.flags &= ~DUMP_FLAGS_TARGETMASK;
1204 + dump_config.dumper = NULL;
1207 +static int panic_event(struct notifier_block *this, unsigned long event,
1211 + get_current_general_regs(&all_regs);
1212 + get_current_cp14_regs(&all_regs);
1213 + get_current_cp15_regs(&all_regs);
1214 + dump_execute((const char *)ptr, &all_regs);
1216 + struct pt_regs regs;
1218 + get_current_regs(®s);
1219 + dump_execute((const char *)ptr, ®s);
1224 +extern struct notifier_block *panic_notifier_list;
1225 +static int panic_event(struct notifier_block *, unsigned long, void *);
1226 +static struct notifier_block panic_block = {
1227 + .notifier_call = panic_event,
1230 +#ifdef CONFIG_MAGIC_SYSRQ
1231 +/* Sysrq handler */
1232 +static void sysrq_handle_crashdump(int key, struct pt_regs *pt_regs,
1233 + struct tty_struct *tty) {
1235 + struct pt_regs regs;
1236 + get_current_regs(®s);
1237 + dump_execute("sysrq", ®s);
1240 + dump_execute("sysrq", pt_regs);
1244 +static struct sysrq_key_op sysrq_crashdump_op = {
1245 + .handler = sysrq_handle_crashdump,
1246 + .help_msg = "Dump",
1247 + .action_msg = "Starting crash dump",
1252 +dump_sysrq_register(void)
1254 +#ifdef CONFIG_MAGIC_SYSRQ
1255 + register_sysrq_key(DUMP_SYSRQ_KEY, &sysrq_crashdump_op);
1260 +dump_sysrq_unregister(void)
1262 +#ifdef CONFIG_MAGIC_SYSRQ
1263 + unregister_sysrq_key(DUMP_SYSRQ_KEY, &sysrq_crashdump_op);
1268 + * Name: dump_init()
1269 + * Func: Initialize the dump process. This will set up any architecture
1270 + * dependent code. The big key is we need the memory offsets before
1271 + * the page table is initialized, because the base memory offset
1272 + * is changed after paging_init() is called.
1277 + struct sysinfo info;
1280 + /* try to create our dump device */
1281 + err = misc_register(&dump_miscdev);
1283 + printk("cannot register dump character device!\n");
1287 + __dump_init((u64)PAGE_OFFSET);
1289 +#ifdef CONFIG_COMPAT
1290 + err = register_ioctl32_conversion(DIOSDUMPDEV, NULL);
1291 + err |= register_ioctl32_conversion(DIOGDUMPDEV, NULL);
1292 + err |= register_ioctl32_conversion(DIOSDUMPLEVEL, NULL);
1293 + err |= register_ioctl32_conversion(DIOGDUMPLEVEL, dw_long);
1294 + err |= register_ioctl32_conversion(DIOSDUMPFLAGS, NULL);
1295 + err |= register_ioctl32_conversion(DIOGDUMPFLAGS, dw_long);
1296 + err |= register_ioctl32_conversion(DIOSDUMPCOMPRESS, NULL);
1297 + err |= register_ioctl32_conversion(DIOGDUMPCOMPRESS, dw_long);
1298 + err |= register_ioctl32_conversion(DIOSTARGETIP, NULL);
1299 + err |= register_ioctl32_conversion(DIOGTARGETIP, NULL);
1300 + err |= register_ioctl32_conversion(DIOSTARGETPORT, NULL);
1301 + err |= register_ioctl32_conversion(DIOGTARGETPORT, NULL);
1302 + err |= register_ioctl32_conversion(DIOSSOURCEPORT, NULL);
1303 + err |= register_ioctl32_conversion(DIOGSOURCEPORT, NULL);
1304 + err |= register_ioctl32_conversion(DIOSETHADDR, NULL);
1305 + err |= register_ioctl32_conversion(DIOGETHADDR, NULL);
1306 + err |= register_ioctl32_conversion(DIOGDUMPOKAY, dw_long);
1307 + err |= register_ioctl32_conversion(DIOSDUMPTAKE, NULL);
1309 + printk(KERN_ERR "LKCD: registering ioctl32 translations failed\
1313 + /* set the dump_compression_list structure up */
1314 + dump_register_compression(&dump_none_compression);
1316 + /* grab the total memory size now (not if/when we crash) */
1317 + si_meminfo(&info);
1319 + /* set the memory size */
1320 + dump_header.dh_memory_size = (u64)info.totalram;
1322 + sysctl_header = register_sysctl_table(kernel_root, 0);
1323 + dump_sysrq_register();
1325 + notifier_chain_register(&panic_notifier_list, &panic_block);
1326 + dump_function_ptr = dump_execute;
1328 + pr_info("Crash dump driver initialized.\n");
1338 + if (dump_config.dumper)
1339 + dump_unconfigure();
1341 + /* arch-specific cleanup routine */
1344 +#ifdef CONFIG_COMPAT
1345 + err = unregister_ioctl32_conversion(DIOSDUMPDEV);
1346 + err |= unregister_ioctl32_conversion(DIOGDUMPDEV);
1347 + err |= unregister_ioctl32_conversion(DIOSDUMPLEVEL);
1348 + err |= unregister_ioctl32_conversion(DIOGDUMPLEVEL);
1349 + err |= unregister_ioctl32_conversion(DIOSDUMPFLAGS);
1350 + err |= unregister_ioctl32_conversion(DIOGDUMPFLAGS);
1351 + err |= unregister_ioctl32_conversion(DIOSDUMPCOMPRESS);
1352 + err |= unregister_ioctl32_conversion(DIOGDUMPCOMPRESS);
1353 + err |= unregister_ioctl32_conversion(DIOSTARGETIP);
1354 + err |= unregister_ioctl32_conversion(DIOGTARGETIP);
1355 + err |= unregister_ioctl32_conversion(DIOSTARGETPORT);
1356 + err |= unregister_ioctl32_conversion(DIOGTARGETPORT);
1357 + err |= unregister_ioctl32_conversion(DIOSSOURCEPORT);
1358 + err |= unregister_ioctl32_conversion(DIOGSOURCEPORT);
1359 + err |= unregister_ioctl32_conversion(DIOSETHADDR);
1360 + err |= unregister_ioctl32_conversion(DIOGETHADDR);
1361 + err |= unregister_ioctl32_conversion(DIOGDUMPOKAY);
1362 + err |= unregister_ioctl32_conversion(DIOSDUMPTAKE);
1364 + printk(KERN_ERR "LKCD: Unregistering ioctl32 translations failed\n");
1368 + /* ignore errors while unregistering -- since can't do anything */
1369 + unregister_sysctl_table(sysctl_header);
1370 + misc_deregister(&dump_miscdev);
1371 + dump_sysrq_unregister();
1372 + notifier_chain_unregister(&panic_notifier_list, &panic_block);
1373 + dump_function_ptr = NULL;
1376 +EXPORT_SYMBOL(dump_register_compression);
1377 +EXPORT_SYMBOL(dump_unregister_compression);
1378 +EXPORT_SYMBOL(dump_register_device);
1379 +EXPORT_SYMBOL(dump_unregister_device);
1380 +EXPORT_SYMBOL(dump_config);
1381 +EXPORT_SYMBOL(dump_silence_level);
1383 +EXPORT_SYMBOL(__dump_irq_enable);
1384 +EXPORT_SYMBOL(__dump_irq_restore);
1386 +MODULE_AUTHOR("Matt D. Robinson <yakker@sourceforge.net>");
1387 +MODULE_DESCRIPTION("Linux Kernel Crash Dump (LKCD) driver");
1388 +MODULE_LICENSE("GPL");
1390 +module_init(dump_init);
1391 +module_exit(dump_cleanup);
1392 Index: linux-2.6.10/drivers/dump/dump_execute.c
1393 ===================================================================
1394 --- linux-2.6.10.orig/drivers/dump/dump_execute.c 2005-04-07 19:34:21.197950744 +0800
1395 +++ linux-2.6.10/drivers/dump/dump_execute.c 2005-04-07 18:13:56.898753920 +0800
1398 + * The file has the common/generic dump execution code
1400 + * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
1401 + * Split and rewrote high level dump execute code to make use
1402 + * of dump method interfaces.
1404 + * Derived from original code in dump_base.c created by
1405 + * Matt Robinson <yakker@sourceforge.net>)
1407 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
1408 + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
1409 + * Copyright (C) 2002 International Business Machines Corp.
1411 + * Assumes dumper and dump config settings are in place
1412 + * (invokes corresponding dumper specific routines as applicable)
1414 + * This code is released under version 2 of the GNU GPL.
1416 +#include <linux/kernel.h>
1417 +#include <linux/notifier.h>
1418 +#include <linux/dump.h>
1419 +#include <linux/delay.h>
1420 +#include <linux/reboot.h>
1421 +#include "dump_methods.h"
1423 +struct notifier_block *dump_notifier_list; /* dump started/ended callback */
1425 +extern int panic_timeout;
1427 +/* Dump progress indicator */
1431 + static const char twiddle[4] = { '|', '\\', '-', '/' };
1432 + printk("%c\b", twiddle[i&3]);
1435 +/* Make the device ready and write out the header */
1436 +int dump_begin(void)
1440 + /* dump_dev = dump_config.dumper->dev; */
1442 + if ((err = dump_dev_silence())) {
1443 + /* quiesce failed, can't risk continuing */
1444 + /* Todo/Future: switch to alternate dump scheme if possible */
1445 + printk("dump silence dev failed ! error %d\n", err);
1449 + pr_debug("Writing dump header\n");
1450 + if ((err = dump_update_header())) {
1451 + printk("dump update header failed ! error %d\n", err);
1452 + dump_dev_resume();
1456 + dump_config.dumper->curr_offset = DUMP_BUFFER_SIZE;
1462 + * Write the dump terminator, a final header update and let go of
1463 + * exclusive use of the device for dump.
1465 +int dump_complete(void)
1469 + if (dump_config.level != DUMP_LEVEL_HEADER) {
1470 + if ((ret = dump_update_end_marker())) {
1471 + printk("dump update end marker error %d\n", ret);
1473 + if ((ret = dump_update_header())) {
1474 + printk("dump update header error %d\n", ret);
1477 + ret = dump_dev_resume();
1479 + if ((panic_timeout > 0) && (!(dump_config.flags & (DUMP_FLAGS_SOFTBOOT | DUMP_FLAGS_NONDISRUPT)))) {
1480 + mdelay(panic_timeout * 1000);
1481 + machine_restart(NULL);
1487 +/* Saves all dump data */
1488 +int dump_execute_savedump(void)
1490 + int ret = 0, err = 0;
1492 + if ((ret = dump_begin())) {
1496 + if (dump_config.level != DUMP_LEVEL_HEADER) {
1497 + ret = dump_sequencer();
1499 + if ((err = dump_complete())) {
1500 + printk("Dump complete failed. Error %d\n", err);
1506 +extern void dump_calc_bootmap_pages(void);
1508 +/* Does all the real work: Capture and save state */
1509 +int dump_generic_execute(const char *panic_str, const struct pt_regs *regs)
1513 +#ifdef CONFIG_DISCONTIGMEM
1514 + printk(KERN_INFO "Reconfiguring memory bank information....\n");
1515 + printk(KERN_INFO "This may take a while....\n");
1516 + dump_reconfigure_mbanks();
1519 + if ((ret = dump_configure_header(panic_str, regs))) {
1520 + printk("dump config header failed ! error %d\n", ret);
1524 + dump_calc_bootmap_pages();
1525 + /* tell interested parties that a dump is about to start */
1526 + notifier_call_chain(&dump_notifier_list, DUMP_BEGIN,
1527 + &dump_config.dump_device);
1529 + if (dump_config.level != DUMP_LEVEL_NONE)
1530 + ret = dump_execute_savedump();
1532 + pr_debug("dumped %ld blocks of %d bytes each\n",
1533 + dump_config.dumper->count, DUMP_BUFFER_SIZE);
1535 + /* tell interested parties that a dump has completed */
1536 + notifier_call_chain(&dump_notifier_list, DUMP_END,
1537 + &dump_config.dump_device);
1541 Index: linux-2.6.10/drivers/dump/dump_x8664.c
1542 ===================================================================
1543 --- linux-2.6.10.orig/drivers/dump/dump_x8664.c 2005-04-07 19:34:21.197950744 +0800
1544 +++ linux-2.6.10/drivers/dump/dump_x8664.c 2005-04-07 18:13:56.901753464 +0800
1547 + * Architecture specific (x86-64) functions for Linux crash dumps.
1549 + * Created by: Matt Robinson (yakker@sgi.com)
1551 + * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
1553 + * 2.3 kernel modifications by: Matt D. Robinson (yakker@turbolinux.com)
1554 + * Copyright 2000 TurboLinux, Inc. All rights reserved.
1556 + * x86-64 port Copyright 2002 Andi Kleen, SuSE Labs
1557 + * x86-64 port Sachin Sant ( sachinp@in.ibm.com )
1558 + * This code is released under version 2 of the GNU GPL.
1562 + * The hooks for dumping the kernel virtual memory to disk are in this
1563 + * file. Any time a modification is made to the virtual memory mechanism,
1564 + * these routines must be changed to use the new mechanisms.
1566 +#include <linux/init.h>
1567 +#include <linux/types.h>
1568 +#include <linux/kernel.h>
1569 +#include <linux/smp.h>
1570 +#include <linux/fs.h>
1571 +#include <linux/vmalloc.h>
1572 +#include <linux/dump.h>
1573 +#include "dump_methods.h"
1574 +#include <linux/mm.h>
1575 +#include <linux/rcupdate.h>
1576 +#include <asm/processor.h>
1577 +#include <asm/hardirq.h>
1578 +#include <asm/kdebug.h>
1579 +#include <asm/uaccess.h>
1580 +#include <asm/nmi.h>
1581 +#include <asm/kdebug.h>
1583 +static __s32 saved_irq_count; /* saved preempt_count() flag */
1585 +void (*dump_trace_ptr)(struct pt_regs *);
1587 +static int alloc_dha_stack(void)
1592 + if (dump_header_asm.dha_stack[0])
1595 + ptr = vmalloc(THREAD_SIZE * num_online_cpus());
1597 + printk("vmalloc for dha_stacks failed\n");
1601 + for (i = 0; i < num_online_cpus(); i++) {
1602 + dump_header_asm.dha_stack[i] =
1603 + (uint64_t)((unsigned long)ptr + (i * THREAD_SIZE));
1608 +static int free_dha_stack(void)
1610 + if (dump_header_asm.dha_stack[0]) {
1611 + vfree((void *)dump_header_asm.dha_stack[0]);
1612 + dump_header_asm.dha_stack[0] = 0;
1618 +__dump_save_regs(struct pt_regs* dest_regs, const struct pt_regs* regs)
1621 + memcpy(dest_regs, regs, sizeof(struct pt_regs));
1625 +__dump_save_context(int cpu, const struct pt_regs *regs,
1626 + struct task_struct *tsk)
1628 + dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
1629 + __dump_save_regs(&dump_header_asm.dha_smp_regs[cpu], regs);
1631 + /* take a snapshot of the stack */
1632 + /* doing this enables us to tolerate slight drifts on this cpu */
1634 + if (dump_header_asm.dha_stack[cpu]) {
1635 + memcpy((void *)dump_header_asm.dha_stack[cpu],
1636 + STACK_START_POSITION(tsk),
1639 + dump_header_asm.dha_stack_ptr[cpu] = (unsigned long)(tsk->thread_info);
1643 +extern cpumask_t irq_affinity[];
1644 +extern irq_desc_t irq_desc[];
1645 +extern void dump_send_ipi(void);
1646 +static int dump_expect_ipi[NR_CPUS];
1647 +static atomic_t waiting_for_dump_ipi;
1648 +static unsigned long saved_affinity[NR_IRQS];
1650 +extern void stop_this_cpu(void *);
1653 +dump_nmi_callback(struct pt_regs *regs, int cpu)
1655 + if (!dump_expect_ipi[cpu]) {
1659 + dump_expect_ipi[cpu] = 0;
1661 + dump_save_this_cpu(regs);
1662 + atomic_dec(&waiting_for_dump_ipi);
1666 + switch (dump_silence_level) {
1667 + case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
1668 + while (dump_oncpu) {
1669 + barrier(); /* paranoia */
1670 + if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
1671 + goto level_changed;
1673 + cpu_relax(); /* kill time nicely */
1677 + case DUMP_HALT_CPUS: /* Execute halt */
1678 + stop_this_cpu(NULL);
1681 + case DUMP_SOFT_SPIN_CPUS:
1682 + /* Mark the task so it spins in schedule */
1683 + set_tsk_thread_flag(current, TIF_NEED_RESCHED);
1690 +/* save registers on other processors */
1692 +__dump_save_other_cpus(void)
1694 + int i, cpu = smp_processor_id();
1695 + int other_cpus = num_online_cpus() - 1;
1697 + if (other_cpus > 0) {
1698 + atomic_set(&waiting_for_dump_ipi, other_cpus);
1700 + for (i = 0; i < NR_CPUS; i++)
1701 + dump_expect_ipi[i] = (i != cpu && cpu_online(i));
1703 + set_nmi_callback(dump_nmi_callback);
1708 + /* may be we dont need to wait for NMI to be processed.
1709 + just write out the header at the end of dumping, if
1710 + this IPI is not processed untill then, there probably
1711 + is a problem and we just fail to capture state of
1713 + while(atomic_read(&waiting_for_dump_ipi) > 0)
1716 + unset_nmi_callback();
1722 + * Routine to save the old irq affinities and change affinities of all irqs to
1723 + * the dumping cpu.
1726 +set_irq_affinity(void)
1729 + cpumask_t cpu = CPU_MASK_NONE;
1731 + cpu_set(smp_processor_id(), cpu);
1732 + memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
1733 + for (i = 0; i < NR_IRQS; i++) {
1734 + if (irq_desc[i].handler == NULL)
1736 + irq_affinity[i] = cpu;
1737 + if (irq_desc[i].handler->set_affinity != NULL)
1738 + irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
1743 + * Restore old irq affinities.
1746 +reset_irq_affinity(void)
1750 + memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
1751 + for (i = 0; i < NR_IRQS; i++) {
1752 + if (irq_desc[i].handler == NULL)
1754 + if (irq_desc[i].handler->set_affinity != NULL)
1755 + irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
1759 +#else /* !CONFIG_SMP */
1760 +#define set_irq_affinity() do { } while (0)
1761 +#define reset_irq_affinity() do { } while (0)
1762 +#define save_other_cpu_states() do { } while (0)
1763 +#endif /* !CONFIG_SMP */
1768 + saved_irq_count = irq_count();
1769 + preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
1773 +irq_bh_restore(void)
1775 + preempt_count() |= saved_irq_count;
1779 + * Name: __dump_irq_enable
1780 + * Func: Reset system so interrupts are enabled.
1781 + * This is used for dump methods that require interrupts
1782 + * Eventually, all methods will have interrupts disabled
1783 + * and this code can be removed.
1785 + * Change irq affinities
1786 + * Re-enable interrupts
1789 +__dump_irq_enable(void)
1791 + set_irq_affinity();
1793 + local_irq_enable();
1798 + * Name: __dump_irq_restore
1799 + * Func: Resume the system state in an architecture-speeific way.
1803 +__dump_irq_restore(void)
1805 + local_irq_disable();
1806 + reset_irq_affinity();
1811 + * Name: __dump_configure_header()
1812 + * Func: Configure the dump header with all proper values.
1815 +__dump_configure_header(const struct pt_regs *regs)
1817 + /* Dummy function - return */
1821 +static int notify(struct notifier_block *nb, unsigned long code, void *data)
1823 + if (code == DIE_NMI_IPI && dump_oncpu)
1824 + return NOTIFY_BAD;
1825 + return NOTIFY_DONE;
1828 +static struct notifier_block dump_notifier = {
1829 + .notifier_call = notify,
1833 + * Name: __dump_init()
1834 + * Func: Initialize the dumping routine process.
1837 +__dump_init(uint64_t local_memory_start)
1839 + notifier_chain_register(&die_chain, &dump_notifier);
1843 + * Name: __dump_open()
1844 + * Func: Open the dump device (architecture specific). This is in
1845 + * case it's necessary in the future.
1850 + alloc_dha_stack();
1856 + * Name: __dump_cleanup()
1857 + * Func: Free any architecture specific data structures. This is called
1858 + * when the dump module is being removed.
1861 +__dump_cleanup(void)
1864 + notifier_chain_unregister(&die_chain, &dump_notifier);
1865 + synchronize_kernel();
1869 +extern int page_is_ram(unsigned long);
1872 + * Name: __dump_page_valid()
1873 + * Func: Check if page is valid to dump.
1876 +__dump_page_valid(unsigned long index)
1878 + if (!pfn_valid(index))
1881 + return page_is_ram(index);
1885 + * Name: manual_handle_crashdump()
1886 + * Func: Interface for the lkcd dump command. Calls dump_execute()
1889 +manual_handle_crashdump(void) {
1891 + struct pt_regs regs;
1893 + get_current_regs(®s);
1894 + dump_execute("manual", ®s);
1899 + * Name: __dump_clean_irq_state()
1900 + * Func: Clean up from the previous IRQ handling state. Such as oops from
1901 + * interrupt handler or bottom half.
1904 +__dump_clean_irq_state(void)
1908 Index: linux-2.6.10/drivers/dump/dump_rle.c
1909 ===================================================================
1910 --- linux-2.6.10.orig/drivers/dump/dump_rle.c 2005-04-07 19:34:21.197950744 +0800
1911 +++ linux-2.6.10/drivers/dump/dump_rle.c 2005-04-07 18:13:56.897754072 +0800
1914 + * RLE Compression functions for kernel crash dumps.
1916 + * Created by: Matt Robinson (yakker@sourceforge.net)
1917 + * Copyright 2001 Matt D. Robinson. All rights reserved.
1919 + * This code is released under version 2 of the GNU GPL.
1923 +#include <linux/config.h>
1924 +#include <linux/module.h>
1925 +#include <linux/sched.h>
1926 +#include <linux/fs.h>
1927 +#include <linux/file.h>
1928 +#include <linux/init.h>
1929 +#include <linux/dump.h>
1932 + * Name: dump_compress_rle()
1933 + * Func: Compress a DUMP_PAGE_SIZE (hardware) page down to something more
1934 + * reasonable, if possible. This is the same routine we use in IRIX.
1937 +dump_compress_rle(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
1938 + unsigned long loc)
1940 + u16 ri, wi, count = 0;
1941 + u_char value = 0, cur_byte;
1944 + * If the block should happen to "compress" to larger than the
1945 + * buffer size, allocate a larger one and change cur_buf_size.
1950 + while (ri < oldsize) {
1952 + cur_byte = value = old[ri];
1955 + if (count == 255) {
1956 + if (wi + 3 > oldsize) {
1960 + new[wi++] = count;
1961 + new[wi++] = value;
1962 + value = cur_byte = old[ri];
1965 + if ((cur_byte = old[ri]) == value) {
1969 + if (wi + 3 > oldsize) {
1973 + new[wi++] = count;
1974 + new[wi++] = value;
1975 + } else if (count == 1) {
1977 + if (wi + 3 > oldsize) {
1984 + if (wi + 2 > oldsize) {
1987 + new[wi++] = value;
1988 + new[wi++] = value;
1990 + } else { /* count == 0 */
1992 + if (wi + 2 > oldsize) {
1995 + new[wi++] = value;
1996 + new[wi++] = value;
1998 + if (wi + 1 > oldsize) {
2001 + new[wi++] = value;
2003 + } /* if count > 1 */
2008 + } /* if byte == value */
2010 + } /* if count == 255 */
2012 + } /* if ri == 0 */
2017 + if (wi + 3 > oldsize) {
2021 + new[wi++] = count;
2022 + new[wi++] = value;
2023 + } else if (count == 1) {
2025 + if (wi + 3 > oldsize)
2031 + if (wi + 2 > oldsize)
2033 + new[wi++] = value;
2034 + new[wi++] = value;
2036 + } else { /* count == 0 */
2038 + if (wi + 2 > oldsize)
2040 + new[wi++] = value;
2041 + new[wi++] = value;
2043 + if (wi + 1 > oldsize)
2045 + new[wi++] = value;
2047 + } /* if count > 1 */
2054 +/* setup the rle compression functionality */
2055 +static struct __dump_compress dump_rle_compression = {
2056 + .compress_type = DUMP_COMPRESS_RLE,
2057 + .compress_func = dump_compress_rle,
2058 + .compress_name = "RLE",
2062 + * Name: dump_compress_rle_init()
2063 + * Func: Initialize rle compression for dumping.
2066 +dump_compress_rle_init(void)
2068 + dump_register_compression(&dump_rle_compression);
2073 + * Name: dump_compress_rle_cleanup()
2074 + * Func: Remove rle compression for dumping.
2077 +dump_compress_rle_cleanup(void)
2079 + dump_unregister_compression(DUMP_COMPRESS_RLE);
2082 +/* module initialization */
2083 +module_init(dump_compress_rle_init);
2084 +module_exit(dump_compress_rle_cleanup);
2086 +MODULE_LICENSE("GPL");
2087 +MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
2088 +MODULE_DESCRIPTION("RLE compression module for crash dump driver");
2089 Index: linux-2.6.10/drivers/dump/dump_overlay.c
2090 ===================================================================
2091 --- linux-2.6.10.orig/drivers/dump/dump_overlay.c 2005-04-07 19:34:21.197950744 +0800
2092 +++ linux-2.6.10/drivers/dump/dump_overlay.c 2005-04-07 18:13:56.905752856 +0800
2095 + * Two-stage soft-boot based dump scheme methods (memory overlay
2096 + * with post soft-boot writeout)
2098 + * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
2100 + * This approach of saving the dump in memory and writing it
2101 + * out after a softboot without clearing memory is derived from the
2102 + * Mission Critical Linux dump implementation. Credits and a big
2103 + * thanks for letting the lkcd project make use of the excellent
2104 + * piece of work and also for helping with clarifications and
2105 + * tips along the way are due to:
2106 + * Dave Winchell <winchell@mclx.com> (primary author of mcore)
2108 + * Jeff Moyer <moyer@mclx.com>
2109 + * Josh Huber <huber@mclx.com>
2111 + * For those familiar with the mcore implementation, the key
2112 + * differences/extensions here are in allowing entire memory to be
2113 + * saved (in compressed form) through a careful ordering scheme
2114 + * on both the way down as well on the way up after boot, the latter
2115 + * for supporting the LKCD notion of passes in which most critical
2116 + * data is the first to be saved to the dump device. Also the post
2117 + * boot writeout happens from within the kernel rather than driven
2120 + * The sequence is orchestrated through the abstraction of "dumpers",
2121 + * one for the first stage which then sets up the dumper for the next
2122 + * stage, providing for a smooth and flexible reuse of the singlestage
2123 + * dump scheme methods and a handle to pass dump device configuration
2124 + * information across the soft boot.
2126 + * Copyright (C) 2002 International Business Machines Corp.
2128 + * This code is released under version 2 of the GNU GPL.
2132 + * Disruptive dumping using the second kernel soft-boot option
2133 + * for issuing dump i/o operates in 2 stages:
2135 + * (1) - Saves the (compressed & formatted) dump in memory using a
2136 + * carefully ordered overlay scheme designed to capture the
2137 + * entire physical memory or selective portions depending on
2138 + * dump config settings,
2139 + * - Registers the stage 2 dumper and
2140 + * - Issues a soft reboot w/o clearing memory.
2142 + * The overlay scheme starts with a small bootstrap free area
2143 + * and follows a reverse ordering of passes wherein it
2144 + * compresses and saves data starting with the least critical
2145 + * areas first, thus freeing up the corresponding pages to
2146 + * serve as destination for subsequent data to be saved, and
2147 + * so on. With a good compression ratio, this makes it feasible
2148 + * to capture an entire physical memory dump without significantly
2149 + * reducing memory available during regular operation.
2151 + * (2) Post soft-reboot, runs through the saved memory dump and
2152 + * writes it out to disk, this time around, taking care to
2153 + * save the more critical data first (i.e. pages which figure
2154 + * in early passes for a regular dump). Finally issues a
2157 + * Since the data was saved in memory after selection/filtering
2158 + * and formatted as per the chosen output dump format, at this
2159 + * stage the filter and format actions are just dummy (or
2160 + * passthrough) actions, except for influence on ordering of
2164 +#include <linux/types.h>
2165 +#include <linux/kernel.h>
2166 +#include <linux/highmem.h>
2167 +#include <linux/bootmem.h>
2168 +#include <linux/dump.h>
2169 +#ifdef CONFIG_KEXEC
2170 +#include <linux/delay.h>
2171 +#include <linux/reboot.h>
2172 +#include <linux/kexec.h>
2174 +#include "dump_methods.h"
2176 +extern struct list_head dumper_list_head;
2177 +extern struct dump_memdev *dump_memdev;
2178 +extern struct dumper dumper_stage2;
2179 +struct dump_config_block *dump_saved_config = NULL;
2180 +extern struct dump_blockdev *dump_blockdev;
2181 +static struct dump_memdev *saved_dump_memdev = NULL;
2182 +static struct dumper *saved_dumper = NULL;
2184 +#ifdef CONFIG_KEXEC
2185 +extern int panic_timeout;
2189 +extern void dump_display_map(struct dump_memdev *);
2192 +struct dumper *dumper_by_name(char *name)
2195 + struct dumper *dumper;
2196 + list_for_each_entry(dumper, &dumper_list_head, dumper_list)
2197 + if (!strncmp(dumper->name, name, 32))
2203 + /* Temporary proof of concept */
2204 + if (!strncmp(dumper_stage2.name, name, 32))
2205 + return &dumper_stage2;
2210 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
2211 +extern void dump_early_reserve_map(struct dump_memdev *);
2213 +void crashdump_reserve(void)
2215 + extern unsigned long crashdump_addr;
2217 + if (crashdump_addr == 0xdeadbeef)
2220 + /* reserve dump config and saved dump pages */
2221 + dump_saved_config = (struct dump_config_block *)crashdump_addr;
2222 + /* magic verification */
2223 + if (dump_saved_config->magic != DUMP_MAGIC_LIVE) {
2224 + printk("Invalid dump magic. Ignoring dump\n");
2225 + dump_saved_config = NULL;
2229 + printk("Dump may be available from previous boot\n");
2231 +#ifdef CONFIG_X86_64
2232 + reserve_bootmem_node(NODE_DATA(0),
2233 + virt_to_phys((void *)crashdump_addr),
2234 + PAGE_ALIGN(sizeof(struct dump_config_block)));
2236 + reserve_bootmem(virt_to_phys((void *)crashdump_addr),
2237 + PAGE_ALIGN(sizeof(struct dump_config_block)));
2239 + dump_early_reserve_map(&dump_saved_config->memdev);
2245 + * Loads the dump configuration from a memory block saved across soft-boot
2246 + * The ops vectors need fixing up as the corresp. routines may have
2247 + * relocated in the new soft-booted kernel.
2249 +int dump_load_config(struct dump_config_block *config)
2251 + struct dumper *dumper;
2252 + struct dump_data_filter *filter_table, *filter;
2253 + struct dump_dev *dev;
2256 + if (config->magic != DUMP_MAGIC_LIVE)
2257 + return -ENOENT; /* not a valid config */
2259 + /* initialize generic config data */
2260 + memcpy(&dump_config, &config->config, sizeof(dump_config));
2262 + /* initialize dumper state */
2263 + if (!(dumper = dumper_by_name(config->dumper.name))) {
2264 + printk("dumper name mismatch\n");
2265 + return -ENOENT; /* dumper mismatch */
2268 + /* verify and fixup schema */
2269 + if (strncmp(dumper->scheme->name, config->scheme.name, 32)) {
2270 + printk("dumper scheme mismatch\n");
2271 + return -ENOENT; /* mismatch */
2273 + config->scheme.ops = dumper->scheme->ops;
2274 + config->dumper.scheme = &config->scheme;
2276 + /* verify and fixup filter operations */
2277 + filter_table = dumper->filter;
2278 + for (i = 0, filter = config->filter_table;
2279 + ((i < MAX_PASSES) && filter_table[i].selector);
2281 + if (strncmp(filter_table[i].name, filter->name, 32)) {
2282 + printk("dump filter mismatch\n");
2283 + return -ENOENT; /* filter name mismatch */
2285 + filter->selector = filter_table[i].selector;
2287 + config->dumper.filter = config->filter_table;
2289 + /* fixup format */
2290 + if (strncmp(dumper->fmt->name, config->fmt.name, 32)) {
2291 + printk("dump format mismatch\n");
2292 + return -ENOENT; /* mismatch */
2294 + config->fmt.ops = dumper->fmt->ops;
2295 + config->dumper.fmt = &config->fmt;
2297 + /* fixup target device */
2298 + dev = (struct dump_dev *)(&config->dev[0]);
2299 + if (dumper->dev == NULL) {
2300 + pr_debug("Vanilla dumper - assume default\n");
2301 + if (dump_dev == NULL)
2303 + dumper->dev = dump_dev;
2306 + if (strncmp(dumper->dev->type_name, dev->type_name, 32)) {
2307 + printk("dump dev type mismatch %s instead of %s\n",
2308 + dev->type_name, dumper->dev->type_name);
2309 + return -ENOENT; /* mismatch */
2311 + dev->ops = dumper->dev->ops;
2312 + config->dumper.dev = dev;
2314 + /* fixup memory device containing saved dump pages */
2315 + /* assume statically init'ed dump_memdev */
2316 + config->memdev.ddev.ops = dump_memdev->ddev.ops;
2317 + /* switch to memdev from prev boot */
2318 + saved_dump_memdev = dump_memdev; /* remember current */
2319 + dump_memdev = &config->memdev;
2321 + /* Make this the current primary dumper */
2322 + dump_config.dumper = &config->dumper;
2327 +/* Saves the dump configuration in a memory block for use across a soft-boot */
2328 +int dump_save_config(struct dump_config_block *config)
2330 + printk("saving dump config settings\n");
2332 + /* dump config settings */
2333 + memcpy(&config->config, &dump_config, sizeof(dump_config));
2335 + /* dumper state */
2336 + memcpy(&config->dumper, dump_config.dumper, sizeof(struct dumper));
2337 + memcpy(&config->scheme, dump_config.dumper->scheme,
2338 + sizeof(struct dump_scheme));
2339 + memcpy(&config->fmt, dump_config.dumper->fmt, sizeof(struct dump_fmt));
2340 + memcpy(&config->dev[0], dump_config.dumper->dev,
2341 + sizeof(struct dump_anydev));
2342 + memcpy(&config->filter_table, dump_config.dumper->filter,
2343 + sizeof(struct dump_data_filter)*MAX_PASSES);
2345 + /* handle to saved mem pages */
2346 + memcpy(&config->memdev, dump_memdev, sizeof(struct dump_memdev));
2348 + config->magic = DUMP_MAGIC_LIVE;
2353 +int dump_init_stage2(struct dump_config_block *saved_config)
2357 + pr_debug("dump_init_stage2\n");
2358 + /* Check if dump from previous boot exists */
2359 + if (saved_config) {
2360 + printk("loading dumper from previous boot \n");
2361 + /* load and configure dumper from previous boot */
2362 + if ((err = dump_load_config(saved_config)))
2365 + if (!dump_oncpu) {
2366 + if ((err = dump_configure(dump_config.dump_device))) {
2367 + printk("Stage 2 dump configure failed\n");
2373 + dump_dev = dump_config.dumper->dev;
2374 + /* write out the dump */
2375 + err = dump_generic_execute(NULL, NULL);
2377 + dump_saved_config = NULL;
2379 + if (!dump_oncpu) {
2380 + dump_unconfigure();
2386 + /* no dump to write out */
2387 + printk("no dumper from previous boot \n");
2392 +extern void dump_mem_markpages(struct dump_memdev *);
2394 +int dump_switchover_stage(void)
2398 + /* trigger stage 2 rightaway - in real life would be after soft-boot */
2399 + /* dump_saved_config would be a boot param */
2400 + saved_dump_memdev = dump_memdev;
2401 + saved_dumper = dump_config.dumper;
2402 + ret = dump_init_stage2(dump_saved_config);
2403 + dump_memdev = saved_dump_memdev;
2404 + dump_config.dumper = saved_dumper;
2408 +int dump_activate_softboot(void)
2411 +#ifdef CONFIG_KEXEC
2412 + int num_cpus_online = 0;
2413 + struct kimage *image;
2416 + /* temporary - switchover to writeout previously saved dump */
2417 +#ifndef CONFIG_KEXEC
2418 + err = dump_switchover_stage(); /* non-disruptive case */
2420 + dump_config.dumper = &dumper_stage1; /* set things back */
2425 + dump_silence_level = DUMP_HALT_CPUS;
2426 + /* wait till we become the only cpu */
2427 + /* maybe by checking for online cpus ? */
2429 + while((num_cpus_online = num_online_cpus()) > 1);
2431 + /* now call into kexec */
2433 + image = xchg(&kexec_image, 0);
2435 + mdelay(panic_timeout*1000);
2436 + machine_kexec(image);
2441 + * * should we call reboot notifiers ? inappropriate for panic ?
2442 + * * what about device_shutdown() ?
2443 + * * is explicit bus master disabling needed or can we do that
2444 + * * through driverfs ?
2450 +/* --- DUMP SCHEME ROUTINES --- */
2452 +static inline int dump_buf_pending(struct dumper *dumper)
2454 + return (dumper->curr_buf - dumper->dump_buf);
2457 +/* Invoked during stage 1 of soft-reboot based dumping */
2458 +int dump_overlay_sequencer(void)
2460 + struct dump_data_filter *filter = dump_config.dumper->filter;
2461 + struct dump_data_filter *filter2 = dumper_stage2.filter;
2462 + int pass = 0, err = 0, save = 0;
2463 + int (*action)(unsigned long, unsigned long);
2465 + /* Make sure gzip compression is being used */
2466 + if (dump_config.dumper->compress->compress_type != DUMP_COMPRESS_GZIP) {
2467 + printk(" Please set GZIP compression \n");
2471 + /* start filling in dump data right after the header */
2472 + dump_config.dumper->curr_offset =
2473 + PAGE_ALIGN(dump_config.dumper->header_len);
2475 + /* Locate the last pass */
2476 + for (;filter->selector; filter++, pass++);
2479 + * Start from the end backwards: overlay involves a reverse
2480 + * ordering of passes, since less critical pages are more
2481 + * likely to be reusable as scratch space once we are through
2484 + for (--pass, --filter; pass >= 0; pass--, filter--)
2486 + /* Assumes passes are exclusive (even across dumpers) */
2487 + /* Requires care when coding the selection functions */
2488 + if ((save = filter->level_mask & dump_config.level))
2489 + action = dump_save_data;
2491 + action = dump_skip_data;
2493 + /* Remember the offset where this pass started */
2494 + /* The second stage dumper would use this */
2495 + if (dump_buf_pending(dump_config.dumper) & (PAGE_SIZE - 1)) {
2496 + pr_debug("Starting pass %d with pending data\n", pass);
2497 + pr_debug("filling dummy data to page-align it\n");
2498 + dump_config.dumper->curr_buf = (void *)PAGE_ALIGN(
2499 + (unsigned long)dump_config.dumper->curr_buf);
2502 + filter2[pass].start[0] = dump_config.dumper->curr_offset
2503 + + dump_buf_pending(dump_config.dumper);
2505 + err = dump_iterator(pass, action, filter);
2507 + filter2[pass].end[0] = dump_config.dumper->curr_offset
2508 + + dump_buf_pending(dump_config.dumper);
2509 + filter2[pass].num_mbanks = 1;
2512 + printk("dump_overlay_seq: failure %d in pass %d\n",
2516 + printk("\n %d overlay pages %s of %d each in pass %d\n",
2517 + err, save ? "saved" : "skipped", DUMP_PAGE_SIZE, pass);
2523 +/* from dump_memdev.c */
2524 +extern struct page *dump_mem_lookup(struct dump_memdev *dev, unsigned long loc);
2525 +extern struct page *dump_mem_next_page(struct dump_memdev *dev);
2527 +static inline struct page *dump_get_saved_page(loff_t loc)
2529 + return (dump_mem_lookup(dump_memdev, loc >> PAGE_SHIFT));
2532 +static inline struct page *dump_next_saved_page(void)
2534 + return (dump_mem_next_page(dump_memdev));
2538 + * Iterates over list of saved dump pages. Invoked during second stage of
2539 + * soft boot dumping
2541 + * Observation: If additional selection is desired at this stage then
2542 + * a different iterator could be written which would advance
2543 + * to the next page header everytime instead of blindly picking up
2544 + * the data. In such a case loc would be interpreted differently.
2545 + * At this moment however a blind pass seems sufficient, cleaner and
2548 +int dump_saved_data_iterator(int pass, int (*action)(unsigned long,
2549 + unsigned long), struct dump_data_filter *filter)
2552 + struct page *page;
2553 + unsigned long count = 0;
2557 + for (i = 0; i < filter->num_mbanks; i++) {
2558 + loc = filter->start[i];
2559 + end = filter->end[i];
2560 + printk("pass %d, start off 0x%llx end offset 0x%llx\n", pass,
2563 + /* loc will get treated as logical offset into stage 1 */
2564 + page = dump_get_saved_page(loc);
2566 + for (; loc < end; loc += PAGE_SIZE) {
2567 + dump_config.dumper->curr_loc = loc;
2569 + printk("no more saved data for pass %d\n",
2573 + sz = (loc + PAGE_SIZE > end) ? end - loc : PAGE_SIZE;
2575 + if (page && filter->selector(pass, (unsigned long)page,
2577 + pr_debug("mem offset 0x%llx\n", loc);
2578 + if ((err = action((unsigned long)page, sz)))
2582 + /* clear the contents of page */
2583 + /* fixme: consider using KM_DUMP instead */
2584 + clear_highpage(page);
2587 + page = dump_next_saved_page();
2591 + return err ? err : count;
2594 +static inline int dump_overlay_pages_done(struct page *page, int nr)
2598 + for (; nr ; page++, nr--) {
2599 + if (dump_check_and_free_page(dump_memdev, page))
2605 +int dump_overlay_save_data(unsigned long loc, unsigned long len)
2608 + struct page *page = (struct page *)loc;
2609 + static unsigned long cnt = 0;
2611 + if ((err = dump_generic_save_data(loc, len)))
2614 + if (dump_overlay_pages_done(page, len >> PAGE_SHIFT)) {
2616 + if (!(cnt & 0x7f))
2617 + pr_debug("released page 0x%lx\n", page_to_pfn(page));
2624 +int dump_overlay_skip_data(unsigned long loc, unsigned long len)
2626 + struct page *page = (struct page *)loc;
2628 + dump_overlay_pages_done(page, len >> PAGE_SHIFT);
2632 +int dump_overlay_resume(void)
2637 + * switch to stage 2 dumper, save dump_config_block
2638 + * and then trigger a soft-boot
2640 + dumper_stage2.header_len = dump_config.dumper->header_len;
2641 + dump_config.dumper = &dumper_stage2;
2642 + if ((err = dump_save_config(dump_saved_config)))
2645 + dump_dev = dump_config.dumper->dev;
2647 +#ifdef CONFIG_KEXEC
2648 + /* If we are doing a disruptive dump, activate softboot now */
2649 + if((panic_timeout > 0) && (!(dump_config.flags & DUMP_FLAGS_NONDISRUPT)))
2650 + err = dump_activate_softboot();
2654 + err = dump_switchover_stage(); /* plugs into soft boot mechanism */
2655 + dump_config.dumper = &dumper_stage1; /* set things back */
2659 +int dump_overlay_configure(unsigned long devid)
2661 + struct dump_dev *dev;
2662 + struct dump_config_block *saved_config = dump_saved_config;
2665 + /* If there is a previously saved dump, write it out first */
2666 + if (saved_config) {
2667 + printk("Processing old dump pending writeout\n");
2668 + err = dump_switchover_stage();
2670 + printk("failed to writeout saved dump\n");
2673 + dump_free_mem(saved_config); /* testing only: not after boot */
2676 + dev = dumper_stage2.dev = dump_config.dumper->dev;
2677 + /* From here on the intermediate dump target is memory-only */
2678 + dump_dev = dump_config.dumper->dev = &dump_memdev->ddev;
2679 + if ((err = dump_generic_configure(0))) {
2680 + printk("dump generic configure failed: err %d\n", err);
2684 + dumper_stage2.dump_buf = dump_config.dumper->dump_buf;
2686 + /* Sanity check on the actual target dump device */
2687 + if (!dev || (err = dev->ops->open(dev, devid))) {
2690 + /* TBD: should we release the target if this is soft-boot only ? */
2692 + /* alloc a dump config block area to save across reboot */
2693 + if (!(dump_saved_config = dump_alloc_mem(sizeof(struct
2694 + dump_config_block)))) {
2695 + printk("dump config block alloc failed\n");
2696 + /* undo configure */
2697 + dump_generic_unconfigure();
2700 + dump_config.dump_addr = (unsigned long)dump_saved_config;
2701 + printk("Dump config block of size %d set up at 0x%lx\n",
2702 + sizeof(*dump_saved_config), (unsigned long)dump_saved_config);
2706 +int dump_overlay_unconfigure(void)
2708 + struct dump_dev *dev = dumper_stage2.dev;
2711 + pr_debug("dump_overlay_unconfigure\n");
2712 + /* Close the secondary device */
2713 + dev->ops->release(dev);
2714 + pr_debug("released secondary device\n");
2716 + err = dump_generic_unconfigure();
2717 + pr_debug("Unconfigured generic portions\n");
2718 + dump_free_mem(dump_saved_config);
2719 + dump_saved_config = NULL;
2720 + pr_debug("Freed saved config block\n");
2721 + dump_dev = dump_config.dumper->dev = dumper_stage2.dev;
2723 + printk("Unconfigured overlay dumper\n");
2727 +int dump_staged_unconfigure(void)
2730 + struct dump_config_block *saved_config = dump_saved_config;
2731 + struct dump_dev *dev;
2733 + pr_debug("dump_staged_unconfigure\n");
2734 + err = dump_generic_unconfigure();
2736 + /* now check if there is a saved dump waiting to be written out */
2737 + if (saved_config) {
2738 + printk("Processing saved dump pending writeout\n");
2739 + if ((err = dump_switchover_stage())) {
2740 + printk("Error in commiting saved dump at 0x%lx\n",
2741 + (unsigned long)saved_config);
2742 + printk("Old dump may hog memory\n");
2744 + dump_free_mem(saved_config);
2745 + pr_debug("Freed saved config block\n");
2747 + dump_saved_config = NULL;
2749 + dev = &dump_memdev->ddev;
2750 + dev->ops->release(dev);
2752 + printk("Unconfigured second stage dumper\n");
2757 +/* ----- PASSTHRU FILTER ROUTINE --------- */
2759 +/* transparent - passes everything through */
2760 +int dump_passthru_filter(int pass, unsigned long loc, unsigned long sz)
2765 +/* ----- PASSTRU FORMAT ROUTINES ---- */
2768 +int dump_passthru_configure_header(const char *panic_str, const struct pt_regs *regs)
2770 + dump_config.dumper->header_dirty++;
2774 +/* Copies bytes of data from page(s) to the specified buffer */
2775 +int dump_copy_pages(void *buf, struct page *page, unsigned long sz)
2777 + unsigned long len = 0, bytes;
2780 + while (len < sz) {
2781 + addr = kmap_atomic(page, KM_DUMP);
2782 + bytes = (sz > len + PAGE_SIZE) ? PAGE_SIZE : sz - len;
2783 + memcpy(buf, addr, bytes);
2784 + kunmap_atomic(addr, KM_DUMP);
2789 + /* memset(dump_config.dumper->curr_buf, 0x57, len); temporary */
2794 +int dump_passthru_update_header(void)
2796 + long len = dump_config.dumper->header_len;
2797 + struct page *page;
2798 + void *buf = dump_config.dumper->dump_buf;
2801 + if (!dump_config.dumper->header_dirty)
2804 + pr_debug("Copying header of size %ld bytes from memory\n", len);
2805 + if (len > DUMP_BUFFER_SIZE)
2808 + page = dump_mem_lookup(dump_memdev, 0);
2809 + for (; (len > 0) && page; buf += PAGE_SIZE, len -= PAGE_SIZE) {
2810 + if ((err = dump_copy_pages(buf, page, PAGE_SIZE)))
2812 + page = dump_mem_next_page(dump_memdev);
2815 + printk("Incomplete header saved in mem\n");
2819 + if ((err = dump_dev_seek(0))) {
2820 + printk("Unable to seek to dump header offset\n");
2823 + err = dump_ll_write(dump_config.dumper->dump_buf,
2824 + buf - dump_config.dumper->dump_buf);
2825 + if (err < dump_config.dumper->header_len)
2826 + return (err < 0) ? err : -ENOSPC;
2828 + dump_config.dumper->header_dirty = 0;
2832 +static loff_t next_dph_offset = 0;
2834 +static int dph_valid(struct __dump_page *dph)
2836 + if ((dph->dp_address & (PAGE_SIZE - 1)) || (dph->dp_flags
2837 + > DUMP_DH_COMPRESSED) || (!dph->dp_flags) ||
2838 + (dph->dp_size > PAGE_SIZE)) {
2839 + printk("dp->address = 0x%llx, dp->size = 0x%x, dp->flag = 0x%x\n",
2840 + dph->dp_address, dph->dp_size, dph->dp_flags);
2846 +int dump_verify_lcrash_data(void *buf, unsigned long sz)
2848 + struct __dump_page *dph;
2850 + /* sanity check for page headers */
2851 + while (next_dph_offset + sizeof(*dph) < sz) {
2852 + dph = (struct __dump_page *)(buf + next_dph_offset);
2853 + if (!dph_valid(dph)) {
2854 + printk("Invalid page hdr at offset 0x%llx\n",
2858 + next_dph_offset += dph->dp_size + sizeof(*dph);
2861 + next_dph_offset -= sz;
2866 + * TBD/Later: Consider avoiding the copy by using a scatter/gather
2867 + * vector representation for the dump buffer
2869 +int dump_passthru_add_data(unsigned long loc, unsigned long sz)
2871 + struct page *page = (struct page *)loc;
2872 + void *buf = dump_config.dumper->curr_buf;
2875 + if ((err = dump_copy_pages(buf, page, sz))) {
2876 + printk("dump_copy_pages failed");
2880 + if ((err = dump_verify_lcrash_data(buf, sz))) {
2881 + printk("dump_verify_lcrash_data failed\n");
2882 + printk("Invalid data for pfn 0x%lx\n", page_to_pfn(page));
2883 + printk("Page flags 0x%lx\n", page->flags);
2884 + printk("Page count 0x%x\n", page_count(page));
2888 + dump_config.dumper->curr_buf = buf + sz;
2894 +/* Stage 1 dumper: Saves compressed dump in memory and soft-boots system */
2896 +/* Scheme to overlay saved data in memory for writeout after a soft-boot */
2897 +struct dump_scheme_ops dump_scheme_overlay_ops = {
2898 + .configure = dump_overlay_configure,
2899 + .unconfigure = dump_overlay_unconfigure,
2900 + .sequencer = dump_overlay_sequencer,
2901 + .iterator = dump_page_iterator,
2902 + .save_data = dump_overlay_save_data,
2903 + .skip_data = dump_overlay_skip_data,
2904 + .write_buffer = dump_generic_write_buffer
2907 +struct dump_scheme dump_scheme_overlay = {
2908 + .name = "overlay",
2909 + .ops = &dump_scheme_overlay_ops
2913 +/* Stage 1 must use a good compression scheme - default to gzip */
2914 +extern struct __dump_compress dump_gzip_compression;
2916 +struct dumper dumper_stage1 = {
2918 + .scheme = &dump_scheme_overlay,
2919 + .fmt = &dump_fmt_lcrash,
2920 + .compress = &dump_none_compression, /* needs to be gzip */
2921 + .filter = dump_filter_table,
2925 +/* Stage 2 dumper: Activated after softboot to write out saved dump to device */
2927 +/* Formatter that transfers data as is (transparent) w/o further conversion */
2928 +struct dump_fmt_ops dump_fmt_passthru_ops = {
2929 + .configure_header = dump_passthru_configure_header,
2930 + .update_header = dump_passthru_update_header,
2931 + .save_context = NULL, /* unused */
2932 + .add_data = dump_passthru_add_data,
2933 + .update_end_marker = dump_lcrash_update_end_marker
2936 +struct dump_fmt dump_fmt_passthru = {
2937 + .name = "passthru",
2938 + .ops = &dump_fmt_passthru_ops
2941 +/* Filter that simply passes along any data within the range (transparent)*/
2942 +/* Note: The start and end ranges in the table are filled in at run-time */
2944 +extern int dump_filter_none(int pass, unsigned long loc, unsigned long sz);
2946 +struct dump_data_filter dump_passthru_filtertable[MAX_PASSES] = {
2947 +{.name = "passkern", .selector = dump_passthru_filter,
2948 + .level_mask = DUMP_MASK_KERN },
2949 +{.name = "passuser", .selector = dump_passthru_filter,
2950 + .level_mask = DUMP_MASK_USED },
2951 +{.name = "passunused", .selector = dump_passthru_filter,
2952 + .level_mask = DUMP_MASK_UNUSED },
2953 +{.name = "none", .selector = dump_filter_none,
2954 + .level_mask = DUMP_MASK_REST }
2958 +/* Scheme to handle data staged / preserved across a soft-boot */
2959 +struct dump_scheme_ops dump_scheme_staged_ops = {
2960 + .configure = dump_generic_configure,
2961 + .unconfigure = dump_staged_unconfigure,
2962 + .sequencer = dump_generic_sequencer,
2963 + .iterator = dump_saved_data_iterator,
2964 + .save_data = dump_generic_save_data,
2965 + .skip_data = dump_generic_skip_data,
2966 + .write_buffer = dump_generic_write_buffer
2969 +struct dump_scheme dump_scheme_staged = {
2971 + .ops = &dump_scheme_staged_ops
2974 +/* The stage 2 dumper comprising all these */
2975 +struct dumper dumper_stage2 = {
2977 + .scheme = &dump_scheme_staged,
2978 + .fmt = &dump_fmt_passthru,
2979 + .compress = &dump_none_compression,
2980 + .filter = dump_passthru_filtertable,
2984 Index: linux-2.6.10/drivers/dump/dump_fmt.c
2985 ===================================================================
2986 --- linux-2.6.10.orig/drivers/dump/dump_fmt.c 2005-04-07 19:34:21.197950744 +0800
2987 +++ linux-2.6.10/drivers/dump/dump_fmt.c 2005-04-07 18:13:56.911751944 +0800
2990 + * Implements the routines which handle the format specific
2991 + * aspects of dump for the default dump format.
2993 + * Used in single stage dumping and stage 1 of soft-boot based dumping
2994 + * Saves data in LKCD (lcrash) format
2996 + * Previously a part of dump_base.c
2998 + * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
2999 + * Split off and reshuffled LKCD dump format code around generic
3000 + * dump method interfaces.
3002 + * Derived from original code created by
3003 + * Matt Robinson <yakker@sourceforge.net>)
3005 + * Contributions from SGI, IBM, HP, MCL, and others.
3007 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
3008 + * Copyright (C) 2000 - 2002 TurboLinux, Inc. All rights reserved.
3009 + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
3010 + * Copyright (C) 2002 International Business Machines Corp.
3012 + * This code is released under version 2 of the GNU GPL.
3015 +#include <linux/types.h>
3016 +#include <linux/kernel.h>
3017 +#include <linux/time.h>
3018 +#include <linux/sched.h>
3019 +#include <linux/ptrace.h>
3020 +#include <linux/utsname.h>
3021 +#include <linux/dump.h>
3022 +#include <asm/dump.h>
3023 +#include "dump_methods.h"
3026 + * SYSTEM DUMP LAYOUT
3028 + * System dumps are currently the combination of a dump header and a set
3029 + * of data pages which contain the system memory. The layout of the dump
3030 + * (for full dumps) is as follows:
3032 + * +-----------------------------+
3033 + * | generic dump header |
3034 + * +-----------------------------+
3035 + * | architecture dump header |
3036 + * +-----------------------------+
3038 + * +-----------------------------+
3040 + * +-----------------------------+
3042 + * +-----------------------------+
3044 + * +-----------------------------+
3050 + * +-----------------------------+
3051 + * | PAGE_END header |
3052 + * +-----------------------------+
3054 + * There are two dump headers, the first which is architecture
3055 + * independent, and the other which is architecture dependent. This
3056 + * allows different architectures to dump different data structures
3057 + * which are specific to their chipset, CPU, etc.
3059 + * After the dump headers come a succession of dump page headers along
3060 + * with dump pages. The page header contains information about the page
3061 + * size, any flags associated with the page (whether it's compressed or
3062 + * not), and the address of the page. After the page header is the page
3063 + * data, which is either compressed (or not). Each page of data is
3064 + * dumped in succession, until the final dump header (PAGE_END) is
3065 + * placed at the end of the dump, assuming the dump device isn't out
3068 + * This mechanism allows for multiple compression types, different
3069 + * types of data structures, different page ordering, etc., etc., etc.
3070 + * It's a very straightforward mechanism for dumping system memory.
3073 +struct __dump_header dump_header; /* the primary dump header */
3074 +struct __dump_header_asm dump_header_asm; /* the arch-specific dump header */
3076 +/* Replace a runtime sanity check on the DUMP_BUFFER_SIZE with a
3077 + * compile-time check. The compile_time_assertions routine will not
3078 + * compile if the assertion is false.
3080 + * If you fail this assert you are most likely on a large machine and
3081 + * should use a special 6.0.0 version of LKCD or a version > 7.0.0. See
3082 + * the LKCD website for more information.
3085 +#define COMPILE_TIME_ASSERT(const_expr) \
3086 + switch(0){case 0: case (const_expr):;}
3088 +static inline void compile_time_assertions(void)
3090 + COMPILE_TIME_ASSERT((sizeof(struct __dump_header) +
3091 + sizeof(struct __dump_header_asm)) <= DUMP_BUFFER_SIZE);
3095 + * Set up common header fields (mainly the arch indep section)
3096 + * Per-cpu state is handled by lcrash_save_context
3097 + * Returns the size of the header in bytes.
3099 +static int lcrash_init_dump_header(const char *panic_str)
3101 + struct timeval dh_time;
3102 + u64 temp_memsz = dump_header.dh_memory_size;
3104 + /* initialize the dump headers to zero */
3105 + /* save dha_stack pointer because it may contains pointer for stack! */
3106 + memset(&dump_header, 0, sizeof(dump_header));
3107 + memset(&dump_header_asm, 0,
3108 + offsetof(struct __dump_header_asm, dha_stack));
3109 + memset(&dump_header_asm.dha_stack+1, 0,
3110 + sizeof(dump_header_asm) -
3111 + offsetof(struct __dump_header_asm, dha_stack) -
3112 + sizeof(dump_header_asm.dha_stack));
3113 + dump_header.dh_memory_size = temp_memsz;
3115 + /* configure dump header values */
3116 + dump_header.dh_magic_number = DUMP_MAGIC_NUMBER;
3117 + dump_header.dh_version = DUMP_VERSION_NUMBER;
3118 + dump_header.dh_memory_start = PAGE_OFFSET;
3119 + dump_header.dh_memory_end = DUMP_MAGIC_NUMBER;
3120 + dump_header.dh_header_size = sizeof(struct __dump_header);
3121 + dump_header.dh_page_size = PAGE_SIZE;
3122 + dump_header.dh_dump_level = dump_config.level;
3123 + dump_header.dh_current_task = (unsigned long) current;
3124 + dump_header.dh_dump_compress = dump_config.dumper->compress->
3126 + dump_header.dh_dump_flags = dump_config.flags;
3127 + dump_header.dh_dump_device = dump_config.dumper->dev->device_id;
3129 +#if DUMP_DEBUG >= 6
3130 + dump_header.dh_num_bytes = 0;
3132 + dump_header.dh_num_dump_pages = 0;
3133 + do_gettimeofday(&dh_time);
3134 + dump_header.dh_time.tv_sec = dh_time.tv_sec;
3135 + dump_header.dh_time.tv_usec = dh_time.tv_usec;
3137 + memcpy((void *)&(dump_header.dh_utsname_sysname),
3138 + (const void *)&(system_utsname.sysname), __NEW_UTS_LEN + 1);
3139 + memcpy((void *)&(dump_header.dh_utsname_nodename),
3140 + (const void *)&(system_utsname.nodename), __NEW_UTS_LEN + 1);
3141 + memcpy((void *)&(dump_header.dh_utsname_release),
3142 + (const void *)&(system_utsname.release), __NEW_UTS_LEN + 1);
3143 + memcpy((void *)&(dump_header.dh_utsname_version),
3144 + (const void *)&(system_utsname.version), __NEW_UTS_LEN + 1);
3145 + memcpy((void *)&(dump_header.dh_utsname_machine),
3146 + (const void *)&(system_utsname.machine), __NEW_UTS_LEN + 1);
3147 + memcpy((void *)&(dump_header.dh_utsname_domainname),
3148 + (const void *)&(system_utsname.domainname), __NEW_UTS_LEN + 1);
3151 + memcpy((void *)&(dump_header.dh_panic_string),
3152 + (const void *)panic_str, DUMP_PANIC_LEN);
3155 + dump_header_asm.dha_magic_number = DUMP_ASM_MAGIC_NUMBER;
3156 + dump_header_asm.dha_version = DUMP_ASM_VERSION_NUMBER;
3157 + dump_header_asm.dha_header_size = sizeof(dump_header_asm);
3159 + dump_header_asm.dha_physaddr_start = PHYS_OFFSET;
3162 + dump_header_asm.dha_smp_num_cpus = num_online_cpus();
3163 + pr_debug("smp_num_cpus in header %d\n",
3164 + dump_header_asm.dha_smp_num_cpus);
3166 + dump_header_asm.dha_dumping_cpu = smp_processor_id();
3168 + return sizeof(dump_header) + sizeof(dump_header_asm);
3172 +int dump_lcrash_configure_header(const char *panic_str,
3173 + const struct pt_regs *regs)
3177 + dump_config.dumper->header_len = lcrash_init_dump_header(panic_str);
3179 + /* capture register states for all processors */
3180 + dump_save_this_cpu(regs);
3181 + __dump_save_other_cpus(); /* side effect:silence cpus */
3183 + /* configure architecture-specific dump header values */
3184 + if ((retval = __dump_configure_header(regs)))
3187 + dump_config.dumper->header_dirty++;
3190 +/* save register and task context */
3191 +void dump_lcrash_save_context(int cpu, const struct pt_regs *regs,
3192 + struct task_struct *tsk)
3194 + /* This level of abstraction might be redundantly redundant */
3195 + __dump_save_context(cpu, regs, tsk);
3198 +/* write out the header */
3199 +int dump_write_header(void)
3201 + int retval = 0, size;
3202 + void *buf = dump_config.dumper->dump_buf;
3204 + /* accounts for DUMP_HEADER_OFFSET if applicable */
3205 + if ((retval = dump_dev_seek(0))) {
3206 + printk("Unable to seek to dump header offset: %d\n",
3211 + memcpy(buf, (void *)&dump_header, sizeof(dump_header));
3212 + size = sizeof(dump_header);
3213 + memcpy(buf + size, (void *)&dump_header_asm, sizeof(dump_header_asm));
3214 + size += sizeof(dump_header_asm);
3215 + size = PAGE_ALIGN(size);
3216 + retval = dump_ll_write(buf , size);
3218 + if (retval < size)
3219 + return (retval >= 0) ? ENOSPC : retval;
3223 +int dump_generic_update_header(void)
3227 + if (dump_config.dumper->header_dirty) {
3228 + if ((err = dump_write_header())) {
3229 + printk("dump write header failed !err %d\n", err);
3231 + dump_config.dumper->header_dirty = 0;
3238 +static inline int is_curr_stack_page(struct page *page, unsigned long size)
3240 + unsigned long thread_addr = (unsigned long)current_thread_info();
3241 + unsigned long addr = (unsigned long)page_address(page);
3243 + return !PageHighMem(page) && (addr < thread_addr + THREAD_SIZE)
3244 + && (addr + size > thread_addr);
3247 +static inline int is_dump_page(struct page *page, unsigned long size)
3249 + unsigned long addr = (unsigned long)page_address(page);
3250 + unsigned long dump_buf = (unsigned long)dump_config.dumper->dump_buf;
3252 + return !PageHighMem(page) && (addr < dump_buf + DUMP_BUFFER_SIZE)
3253 + && (addr + size > dump_buf);
3256 +int dump_allow_compress(struct page *page, unsigned long size)
3259 + * Don't compress the page if any part of it overlaps
3260 + * with the current stack or dump buffer (since the contents
3261 + * in these could be changing while compression is going on)
3263 + return !is_curr_stack_page(page, size) && !is_dump_page(page, size);
3266 +void lcrash_init_pageheader(struct __dump_page *dp, struct page *page,
3269 + memset(dp, sizeof(struct __dump_page), 0);
3273 + dp->dp_address = (loff_t)page_to_pfn(page) << PAGE_SHIFT;
3276 + dp->dp_page_index = dump_header.dh_num_dump_pages;
3277 + dp->dp_byte_offset = dump_header.dh_num_bytes + DUMP_BUFFER_SIZE
3278 + + DUMP_HEADER_OFFSET; /* ?? */
3279 +#endif /* DUMP_DEBUG */
3282 +int dump_lcrash_add_data(unsigned long loc, unsigned long len)
3284 + struct page *page = (struct page *)loc;
3285 + void *addr, *buf = dump_config.dumper->curr_buf;
3286 + struct __dump_page *dp = (struct __dump_page *)buf;
3289 + if (buf > dump_config.dumper->dump_buf + DUMP_BUFFER_SIZE)
3292 + lcrash_init_pageheader(dp, page, len);
3293 + buf += sizeof(struct __dump_page);
3296 + addr = kmap_atomic(page, KM_DUMP);
3297 + size = bytes = (len > PAGE_SIZE) ? PAGE_SIZE : len;
3298 + /* check for compression */
3299 + if (dump_allow_compress(page, bytes)) {
3300 + size = dump_compress_data((char *)addr, bytes,
3301 + (char *)buf, loc);
3303 + /* set the compressed flag if the page did compress */
3304 + if (size && (size < bytes)) {
3305 + dp->dp_flags |= DUMP_DH_COMPRESSED;
3307 + /* compression failed -- default to raw mode */
3308 + dp->dp_flags |= DUMP_DH_RAW;
3309 + memcpy(buf, addr, bytes);
3312 + /* memset(buf, 'A', size); temporary: testing only !! */
3313 + kunmap_atomic(addr, KM_DUMP);
3314 + dp->dp_size += size;
3320 + /* now update the header */
3322 + dump_header.dh_num_bytes += dp->dp_size + sizeof(*dp);
3324 + dump_header.dh_num_dump_pages++;
3325 + dump_config.dumper->header_dirty++;
3327 + dump_config.dumper->curr_buf = buf;
3332 +int dump_lcrash_update_end_marker(void)
3334 + struct __dump_page *dp =
3335 + (struct __dump_page *)dump_config.dumper->curr_buf;
3336 + unsigned long left;
3339 + lcrash_init_pageheader(dp, NULL, 0);
3340 + dp->dp_flags |= DUMP_DH_END; /* tbd: truncation test ? */
3342 + /* now update the header */
3344 + dump_header.dh_num_bytes += sizeof(*dp);
3346 + dump_config.dumper->curr_buf += sizeof(*dp);
3347 + left = dump_config.dumper->curr_buf - dump_config.dumper->dump_buf;
3352 + if ((ret = dump_dev_seek(dump_config.dumper->curr_offset))) {
3353 + printk("Seek failed at offset 0x%llx\n",
3354 + dump_config.dumper->curr_offset);
3358 + if (DUMP_BUFFER_SIZE > left)
3359 + memset(dump_config.dumper->curr_buf, 'm',
3360 + DUMP_BUFFER_SIZE - left);
3362 + if ((ret = dump_ll_write(dump_config.dumper->dump_buf,
3363 + DUMP_BUFFER_SIZE)) < DUMP_BUFFER_SIZE) {
3364 + return (ret < 0) ? ret : -ENOSPC;
3367 + dump_config.dumper->curr_offset += DUMP_BUFFER_SIZE;
3369 + if (left > DUMP_BUFFER_SIZE) {
3370 + left -= DUMP_BUFFER_SIZE;
3371 + memcpy(dump_config.dumper->dump_buf,
3372 + dump_config.dumper->dump_buf + DUMP_BUFFER_SIZE, left);
3373 + dump_config.dumper->curr_buf -= DUMP_BUFFER_SIZE;
3382 +/* Default Formatter (lcrash) */
3383 +struct dump_fmt_ops dump_fmt_lcrash_ops = {
3384 + .configure_header = dump_lcrash_configure_header,
3385 + .update_header = dump_generic_update_header,
3386 + .save_context = dump_lcrash_save_context,
3387 + .add_data = dump_lcrash_add_data,
3388 + .update_end_marker = dump_lcrash_update_end_marker
3391 +struct dump_fmt dump_fmt_lcrash = {
3393 + .ops = &dump_fmt_lcrash_ops
3396 Index: linux-2.6.10/drivers/dump/dump_netdev.c
3397 ===================================================================
3398 --- linux-2.6.10.orig/drivers/dump/dump_netdev.c 2005-04-07 19:34:21.197950744 +0800
3399 +++ linux-2.6.10/drivers/dump/dump_netdev.c 2005-04-07 18:13:56.900753616 +0800
3402 + * Implements the dump driver interface for saving a dump via network
3405 + * Some of this code has been taken/adapted from Ingo Molnar's netconsole
3406 + * code. LKCD team expresses its thanks to Ingo.
3408 + * Started: June 2002 - Mohamed Abbas <mohamed.abbas@intel.com>
3409 + * Adapted netconsole code to implement LKCD dump over the network.
3411 + * Nov 2002 - Bharata B. Rao <bharata@in.ibm.com>
3412 + * Innumerable code cleanups, simplification and some fixes.
3413 + * Netdump configuration done by ioctl instead of using module parameters.
3414 + * Oct 2003 - Prasanna S Panchamukhi <prasanna@in.ibm.com>
3415 + * Netdump code modified to use Netpoll API's.
3417 + * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
3418 + * Copyright (C) 2002 International Business Machines Corp.
3420 + * This code is released under version 2 of the GNU GPL.
3423 +#include <net/tcp.h>
3424 +#include <net/udp.h>
3425 +#include <linux/delay.h>
3426 +#include <linux/random.h>
3427 +#include <linux/reboot.h>
3428 +#include <linux/module.h>
3429 +#include <linux/dump.h>
3430 +#include <linux/dump_netdev.h>
3432 +#include <asm/unaligned.h>
3434 +static int startup_handshake;
3435 +static int page_counter;
3436 +static unsigned long flags_global;
3437 +static int netdump_in_progress;
3440 + * security depends on the trusted path between the netconsole
3441 + * server and netconsole client, since none of the packets are
3442 + * encrypted. The random magic number protects the protocol
3443 + * against spoofing.
3445 +static u64 dump_magic;
3448 + * We maintain a small pool of fully-sized skbs,
3449 + * to make sure the message gets out even in
3450 + * extreme OOM situations.
3453 +static void rx_hook(struct netpoll *np, int port, char *msg, int size);
3457 +static void rx_hook(struct netpoll *np, int port, char *msg, int size)
3459 + req_t * __req = (req_t *) msg;
3461 + * First check if were are dumping or doing startup handshake, if
3462 + * not quickly return.
3465 + if (!netdump_in_progress)
3468 + if ((ntohl(__req->command) != COMM_GET_MAGIC) &&
3469 + (ntohl(__req->command) != COMM_HELLO) &&
3470 + (ntohl(__req->command) != COMM_START_WRITE_NETDUMP_ACK) &&
3471 + (ntohl(__req->command) != COMM_START_NETDUMP_ACK) &&
3472 + (memcmp(&__req->magic, &dump_magic, sizeof(dump_magic)) != 0))
3475 + req.magic = ntohl(__req->magic);
3476 + req.command = ntohl(__req->command);
3477 + req.from = ntohl(__req->from);
3478 + req.to = ntohl(__req->to);
3479 + req.nr = ntohl(__req->nr);
3484 +static char netdump_membuf[1024 + HEADER_LEN + 1];
3486 + * Fill the netdump_membuf with the header information from reply_t structure
3487 + * and send it down to netpoll_send_udp() routine.
3490 +netdump_send_packet(struct netpoll *np, reply_t *reply, size_t data_len) {
3493 + b = &netdump_membuf[1];
3494 + netdump_membuf[0] = NETCONSOLE_VERSION;
3495 + put_unaligned(htonl(reply->nr), (u32 *) b);
3496 + put_unaligned(htonl(reply->code), (u32 *) (b + sizeof(reply->code)));
3497 + put_unaligned(htonl(reply->info), (u32 *) (b + sizeof(reply->code) +
3498 + sizeof(reply->info)));
3499 + netpoll_send_udp(np, netdump_membuf, data_len + HEADER_LEN);
3503 +dump_send_mem(struct netpoll *np, req_t *req, const char* buff, size_t len)
3507 + int nr_chunks = len/1024;
3510 + reply.nr = req->nr;
3511 + reply.code = REPLY_MEM;
3512 + if ( nr_chunks <= 0)
3514 + for (i = 0; i < nr_chunks; i++) {
3515 + unsigned int offset = i*1024;
3516 + reply.info = offset;
3517 + memcpy((netdump_membuf + HEADER_LEN), (buff + offset), 1024);
3518 + netdump_send_packet(np, &reply, 1024);
3523 + * This function waits for the client to acknowledge the receipt
3524 + * of the netdump startup reply, with the possibility of packets
3525 + * getting lost. We resend the startup packet if no ACK is received,
3526 + * after a 1 second delay.
3528 + * (The client can test the success of the handshake via the HELLO
3529 + * command, and send ACKs until we enter netdump mode.)
3532 +dump_handshake(struct dump_dev *net_dev)
3538 + if (startup_handshake) {
3539 + sprintf((netdump_membuf + HEADER_LEN),
3540 + "NETDUMP start, waiting for start-ACK.\n");
3541 + reply.code = REPLY_START_NETDUMP;
3545 + sprintf((netdump_membuf + HEADER_LEN),
3546 + "NETDUMP start, waiting for start-ACK.\n");
3547 + reply.code = REPLY_START_WRITE_NETDUMP;
3548 + reply.nr = net_dev->curr_offset;
3549 + reply.info = net_dev->curr_offset;
3551 + str_len = strlen(netdump_membuf + HEADER_LEN);
3553 + /* send 300 handshake packets before declaring failure */
3554 + for (i = 0; i < 300; i++) {
3555 + netdump_send_packet(&net_dev->np, &reply, str_len);
3558 + for (j = 0; j < 10000; j++) {
3560 + netpoll_poll(&net_dev->np);
3566 + * if there is no new request, try sending the handshaking
3573 + * check if the new request is of the expected type,
3574 + * if so, return, else try sending the handshaking
3577 + if (startup_handshake) {
3578 + if (req.command == COMM_HELLO || req.command ==
3579 + COMM_START_NETDUMP_ACK) {
3586 + if (req.command == COMM_SEND_MEM) {
3598 +do_netdump(struct dump_dev *net_dev, const char* buff, size_t len)
3602 + int repeatCounter, counter, total_loop;
3605 + netdump_in_progress = 1;
3607 + if (dump_handshake(net_dev) < 0) {
3608 + printk("network dump failed due to handshake failure\n");
3613 + * Ideally startup handshake should be done during dump configuration,
3614 + * i.e., in dump_net_open(). This will be done when I figure out
3615 + * the dependency between startup handshake, subsequent write and
3616 + * various commands wrt to net-server.
3618 + if (startup_handshake)
3619 + startup_handshake = 0;
3622 + repeatCounter = 0;
3626 + netpoll_poll(&net_dev->np);
3631 + if (repeatCounter > 5) {
3633 + if (counter > 10000) {
3634 + if (total_loop >= 100000) {
3635 + printk("Time OUT LEAVE NOW\n");
3639 + printk("Try number %d out of "
3640 + "10 before Time Out\n",
3645 + repeatCounter = 0;
3649 + repeatCounter = 0;
3653 + switch (req.command) {
3657 + case COMM_SEND_MEM:
3658 + dump_send_mem(&net_dev->np, &req, buff, len);
3662 + case COMM_START_WRITE_NETDUMP_ACK:
3667 + sprintf((netdump_membuf + HEADER_LEN),
3668 + "Hello, this is netdump version " "0.%02d\n",
3669 + NETCONSOLE_VERSION);
3670 + str_len = strlen(netdump_membuf + HEADER_LEN);
3671 + reply.code = REPLY_HELLO;
3672 + reply.nr = req.nr;
3673 + reply.info = net_dev->curr_offset;
3674 + netdump_send_packet(&net_dev->np, &reply, str_len);
3677 + case COMM_GET_PAGE_SIZE:
3678 + sprintf((netdump_membuf + HEADER_LEN),
3679 + "PAGE_SIZE: %ld\n", PAGE_SIZE);
3680 + str_len = strlen(netdump_membuf + HEADER_LEN);
3681 + reply.code = REPLY_PAGE_SIZE;
3682 + reply.nr = req.nr;
3683 + reply.info = PAGE_SIZE;
3684 + netdump_send_packet(&net_dev->np, &reply, str_len);
3687 + case COMM_GET_NR_PAGES:
3688 + reply.code = REPLY_NR_PAGES;
3689 + reply.nr = req.nr;
3690 + reply.info = num_physpages;
3691 + reply.info = page_counter;
3692 + sprintf((netdump_membuf + HEADER_LEN),
3693 + "Number of pages: %ld\n", num_physpages);
3694 + str_len = strlen(netdump_membuf + HEADER_LEN);
3695 + netdump_send_packet(&net_dev->np, &reply, str_len);
3698 + case COMM_GET_MAGIC:
3699 + reply.code = REPLY_MAGIC;
3700 + reply.nr = req.nr;
3701 + reply.info = NETCONSOLE_VERSION;
3702 + sprintf((netdump_membuf + HEADER_LEN),
3703 + (char *)&dump_magic, sizeof(dump_magic));
3704 + str_len = strlen(netdump_membuf + HEADER_LEN);
3705 + netdump_send_packet(&net_dev->np, &reply, str_len);
3709 + reply.code = REPLY_ERROR;
3710 + reply.nr = req.nr;
3711 + reply.info = req.command;
3712 + sprintf((netdump_membuf + HEADER_LEN),
3713 + "Got unknown command code %d!\n", req.command);
3714 + str_len = strlen(netdump_membuf + HEADER_LEN);
3715 + netdump_send_packet(&net_dev->np, &reply, str_len);
3720 + netdump_in_progress = 0;
3725 +dump_validate_config(struct netpoll *np)
3727 + if (!np->local_ip) {
3728 + printk("network device %s has no local address, "
3729 + "aborting.\n", np->name);
3733 +#define IP(x) ((unsigned char *)&np->local_ip)[x]
3734 + printk("Source %d.%d.%d.%d", IP(0), IP(1), IP(2), IP(3));
3737 + if (!np->local_port) {
3738 + printk("source_port parameter not specified, aborting.\n");
3742 + if (!np->remote_ip) {
3743 + printk("target_ip parameter not specified, aborting.\n");
3747 + np->remote_ip = ntohl(np->remote_ip);
3748 +#define IP(x) ((unsigned char *)&np->remote_ip)[x]
3749 + printk("Target %d.%d.%d.%d", IP(0), IP(1), IP(2), IP(3));
3752 + if (!np->remote_port) {
3753 + printk("target_port parameter not specified, aborting.\n");
3756 + printk("Target Ethernet Address %02x:%02x:%02x:%02x:%02x:%02x",
3757 + np->remote_mac[0], np->remote_mac[1], np->remote_mac[2],
3758 + np->remote_mac[3], np->remote_mac[4], np->remote_mac[5]);
3760 + if ((np->remote_mac[0] & np->remote_mac[1] & np->remote_mac[2] &
3761 + np->remote_mac[3] & np->remote_mac[4] & np->remote_mac[5]) == 255)
3762 + printk("(Broadcast)");
3768 + * Prepares the dump device so we can take a dump later.
3769 + * Validates the netdump configuration parameters.
3771 + * TODO: Network connectivity check should be done here.
3774 +dump_net_open(struct dump_dev *net_dev, unsigned long arg)
3778 + /* get the interface name */
3779 + if (copy_from_user(net_dev->np.dev_name, (void *)arg, IFNAMSIZ))
3781 + net_dev->np.rx_hook = rx_hook;
3782 + retval = netpoll_setup(&net_dev->np);
3784 + dump_validate_config(&net_dev->np);
3785 + net_dev->curr_offset = 0;
3786 + printk("Network device %s successfully configured for dumping\n",
3787 + net_dev->np.dev_name);
3792 + * Close the dump device and release associated resources
3793 + * Invoked when unconfiguring the dump device.
3796 +dump_net_release(struct dump_dev *net_dev)
3798 + netpoll_cleanup(&net_dev->np);
3803 + * Prepare the dump device for use (silence any ongoing activity
3804 + * and quiesce state) when the system crashes.
3807 +dump_net_silence(struct dump_dev *net_dev)
3809 + netpoll_set_trap(1);
3810 + local_irq_save(flags_global);
3811 + startup_handshake = 1;
3812 + net_dev->curr_offset = 0;
3813 + printk("Dumping to network device %s on CPU %d ...\n", net_dev->np.name,
3814 + smp_processor_id());
3819 + * Invoked when dumping is done. This is the time to put things back
3820 + * (i.e. undo the effects of dump_block_silence) so the device is
3821 + * available for normal use.
3824 +dump_net_resume(struct dump_dev *net_dev)
3830 + sprintf((netdump_membuf + HEADER_LEN), "NETDUMP end.\n");
3831 + str_len = strlen(netdump_membuf + HEADER_LEN);
3832 + for( indx = 0; indx < 6; indx++) {
3833 + reply.code = REPLY_END_NETDUMP;
3836 + netdump_send_packet(&net_dev->np, &reply, str_len);
3838 + printk("NETDUMP END!\n");
3839 + local_irq_restore(flags_global);
3840 + netpoll_set_trap(0);
3841 + startup_handshake = 0;
3846 + * Seek to the specified offset in the dump device.
3847 + * Makes sure this is a valid offset, otherwise returns an error.
3850 +dump_net_seek(struct dump_dev *net_dev, loff_t off)
3852 + net_dev->curr_offset = off;
3860 +dump_net_write(struct dump_dev *net_dev, void *buf, unsigned long len)
3865 + cnt = len/ PAGE_SIZE;
3867 + for (i = 0; i < cnt; i++) {
3868 + off = i* PAGE_SIZE;
3869 + ret = do_netdump(net_dev, buf+off, PAGE_SIZE);
3872 + net_dev->curr_offset = net_dev->curr_offset + PAGE_SIZE;
3878 + * check if the last dump i/o is over and ready for next request
3881 +dump_net_ready(struct dump_dev *net_dev, void *buf)
3887 + * ioctl function used for configuring network dump
3890 +dump_net_ioctl(struct dump_dev *net_dev, unsigned int cmd, unsigned long arg)
3893 + case DIOSTARGETIP:
3894 + net_dev->np.remote_ip= arg;
3896 + case DIOSTARGETPORT:
3897 + net_dev->np.remote_port = (u16)arg;
3899 + case DIOSSOURCEPORT:
3900 + net_dev->np.local_port = (u16)arg;
3903 + return copy_from_user(net_dev->np.remote_mac, (void *)arg, 6);
3905 + case DIOGTARGETIP:
3906 + case DIOGTARGETPORT:
3907 + case DIOGSOURCEPORT:
3916 +struct dump_dev_ops dump_netdev_ops = {
3917 + .open = dump_net_open,
3918 + .release = dump_net_release,
3919 + .silence = dump_net_silence,
3920 + .resume = dump_net_resume,
3921 + .seek = dump_net_seek,
3922 + .write = dump_net_write,
3923 + /* .read not implemented */
3924 + .ready = dump_net_ready,
3925 + .ioctl = dump_net_ioctl
3928 +static struct dump_dev default_dump_netdev = {
3929 + .type_name = "networkdev",
3930 + .ops = &dump_netdev_ops,
3932 + .np.name = "netdump",
3933 + .np.dev_name = "eth0",
3934 + .np.rx_hook = rx_hook,
3935 + .np.local_port = 6688,
3936 + .np.remote_port = 6688,
3937 + .np.remote_mac = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
3941 +dump_netdev_init(void)
3943 + default_dump_netdev.curr_offset = 0;
3945 + if (dump_register_device(&default_dump_netdev) < 0) {
3946 + printk("network dump device driver registration failed\n");
3949 + printk("network device driver for LKCD registered\n");
3951 + get_random_bytes(&dump_magic, sizeof(dump_magic));
3956 +dump_netdev_cleanup(void)
3958 + dump_unregister_device(&default_dump_netdev);
3961 +MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
3962 +MODULE_DESCRIPTION("Network Dump Driver for Linux Kernel Crash Dump (LKCD)");
3963 +MODULE_LICENSE("GPL");
3965 +module_init(dump_netdev_init);
3966 +module_exit(dump_netdev_cleanup);
3967 Index: linux-2.6.10/drivers/dump/dump_methods.h
3968 ===================================================================
3969 --- linux-2.6.10.orig/drivers/dump/dump_methods.h 2005-04-07 19:34:21.197950744 +0800
3970 +++ linux-2.6.10/drivers/dump/dump_methods.h 2005-04-07 18:13:56.920750576 +0800
3973 + * Generic interfaces for flexible system dump
3975 + * Started: Oct 2002 - Suparna Bhattacharya (suparna@in.ibm.com)
3977 + * Copyright (C) 2002 International Business Machines Corp.
3979 + * This code is released under version 2 of the GNU GPL.
3982 +#ifndef _LINUX_DUMP_METHODS_H
3983 +#define _LINUX_DUMP_METHODS_H
3986 + * Inspired by Matt Robinson's suggestion of introducing dump
3987 + * methods as a way to enable different crash dump facilities to
3988 + * coexist where each employs its own scheme or dumping policy.
3990 + * The code here creates a framework for flexible dump by defining
3991 + * a set of methods and providing associated helpers that differentiate
3992 + * between the underlying mechanism (how to dump), overall scheme
3993 + * (sequencing of stages and data dumped and associated quiescing),
3994 + * output format (what the dump output looks like), target type
3995 + * (where to save the dump; see dumpdev.h), and selection policy
3996 + * (state/data to dump).
3998 + * These sets of interfaces can be mixed and matched to build a
3999 + * dumper suitable for a given situation, allowing for
4000 + * flexibility as well appropriate degree of code reuse.
4001 + * For example all features and options of lkcd (including
4002 + * granular selective dumping in the near future) should be
4003 + * available even when say, the 2 stage soft-boot based mechanism
4004 + * is used for taking disruptive dumps.
4006 + * Todo: Additionally modules or drivers may supply their own
4007 + * custom dumpers which extend dump with module specific
4008 + * information or hardware state, and can even tweak the
4009 + * mechanism when it comes to saving state relevant to
4013 +#include <linux/sched.h>
4014 +#include <linux/slab.h>
4015 +#include <linux/highmem.h>
4016 +#include <linux/dumpdev.h>
4017 +#include <asm/page.h> /* get_order */
4019 +#define MAX_PASSES 6
4023 +/* To customise selection of pages to be dumped in a given pass/group */
4024 +struct dump_data_filter{
4026 + int (*selector)(int, unsigned long, unsigned long);
4027 + ulong level_mask; /* dump level(s) for which this filter applies */
4028 + loff_t start[MAX_NUMNODES], end[MAX_NUMNODES]; /* location range applicable */
4029 + ulong num_mbanks; /* Number of memory banks. Greater than one for discontig memory (NUMA) */
4034 + * Determined by the kind of dump mechanism and appropriate
4037 +struct dump_scheme_ops {
4038 + /* sets aside memory, inits data structures etc */
4039 + int (*configure)(unsigned long devid);
4040 + /* releases resources */
4041 + int (*unconfigure)(void);
4043 + /* ordering of passes, invoking iterator */
4044 + int (*sequencer)(void);
4045 + /* iterates over system data, selects and acts on data to dump */
4046 + int (*iterator)(int, int (*)(unsigned long, unsigned long),
4047 + struct dump_data_filter *);
4048 + /* action when data is selected for dump */
4049 + int (*save_data)(unsigned long, unsigned long);
4050 + /* action when data is to be excluded from dump */
4051 + int (*skip_data)(unsigned long, unsigned long);
4052 + /* policies for space, multiple dump devices etc */
4053 + int (*write_buffer)(void *, unsigned long);
4056 +struct dump_scheme {
4057 + /* the name serves as an anchor to locate the scheme after reboot */
4059 + struct dump_scheme_ops *ops;
4060 + struct list_head list;
4063 +/* Quiescing/Silence levels (controls IPI callback behaviour) */
4064 +extern enum dump_silence_levels {
4065 + DUMP_SOFT_SPIN_CPUS = 1,
4066 + DUMP_HARD_SPIN_CPUS = 2,
4067 + DUMP_HALT_CPUS = 3,
4068 +} dump_silence_level;
4070 +/* determined by the dump (file) format */
4071 +struct dump_fmt_ops {
4072 + /* build header */
4073 + int (*configure_header)(const char *, const struct pt_regs *);
4074 + int (*update_header)(void); /* update header and write it out */
4075 + /* save curr context */
4076 + void (*save_context)(int, const struct pt_regs *,
4077 + struct task_struct *);
4078 + /* typically called by the save_data action */
4079 + /* add formatted data to the dump buffer */
4080 + int (*add_data)(unsigned long, unsigned long);
4081 + int (*update_end_marker)(void);
4085 + unsigned long magic;
4086 + char name[32]; /* lcrash, crash, elf-core etc */
4087 + struct dump_fmt_ops *ops;
4088 + struct list_head list;
4092 + * Modules will be able add their own data capture schemes by
4093 + * registering their own dumpers. Typically they would use the
4094 + * primary dumper as a template and tune it with their routines.
4098 +/* The combined dumper profile (mechanism, scheme, dev, fmt) */
4100 + char name[32]; /* singlestage, overlay (stg1), passthru(stg2), pull */
4101 + struct dump_scheme *scheme;
4102 + struct dump_fmt *fmt;
4103 + struct __dump_compress *compress;
4104 + struct dump_data_filter *filter;
4105 + struct dump_dev *dev;
4106 + /* state valid only for active dumper(s) - per instance */
4107 + /* run time state/context */
4109 + unsigned long count;
4110 + loff_t curr_offset; /* current logical offset into dump device */
4111 + loff_t curr_loc; /* current memory location */
4112 + void *curr_buf; /* current position in the dump buffer */
4113 + void *dump_buf; /* starting addr of dump buffer */
4114 + int header_dirty; /* whether the header needs to be written out */
4116 + struct list_head dumper_list; /* links to other dumpers */
4119 +/* Starting point to get to the current configured state */
4120 +struct dump_config {
4123 + struct dumper *dumper;
4124 + unsigned long dump_device;
4125 + unsigned long dump_addr; /* relevant only for in-memory dumps */
4126 + struct list_head dump_dev_list;
4129 +extern struct dump_config dump_config;
4131 +/* Used to save the dump config across a reboot for 2-stage dumps:
4133 + * Note: The scheme, format, compression and device type should be
4134 + * registered at bootup, for this config to be sharable across soft-boot.
4135 + * The function addresses could have changed and become invalid, and
4136 + * need to be set up again.
4138 +struct dump_config_block {
4139 + u64 magic; /* for a quick sanity check after reboot */
4140 + struct dump_memdev memdev; /* handle to dump stored in memory */
4141 + struct dump_config config;
4142 + struct dumper dumper;
4143 + struct dump_scheme scheme;
4144 + struct dump_fmt fmt;
4145 + struct __dump_compress compress;
4146 + struct dump_data_filter filter_table[MAX_PASSES];
4147 + struct dump_anydev dev[MAX_DEVS]; /* target dump device */
4151 +/* Wrappers that invoke the methods for the current (active) dumper */
4153 +/* Scheme operations */
4155 +static inline int dump_sequencer(void)
4157 + return dump_config.dumper->scheme->ops->sequencer();
4160 +static inline int dump_iterator(int pass, int (*action)(unsigned long,
4161 + unsigned long), struct dump_data_filter *filter)
4163 + return dump_config.dumper->scheme->ops->iterator(pass, action, filter);
4166 +#define dump_save_data dump_config.dumper->scheme->ops->save_data
4167 +#define dump_skip_data dump_config.dumper->scheme->ops->skip_data
4169 +static inline int dump_write_buffer(void *buf, unsigned long len)
4171 + return dump_config.dumper->scheme->ops->write_buffer(buf, len);
4174 +static inline int dump_configure(unsigned long devid)
4176 + return dump_config.dumper->scheme->ops->configure(devid);
4179 +static inline int dump_unconfigure(void)
4181 + return dump_config.dumper->scheme->ops->unconfigure();
4184 +/* Format operations */
4186 +static inline int dump_configure_header(const char *panic_str,
4187 + const struct pt_regs *regs)
4189 + return dump_config.dumper->fmt->ops->configure_header(panic_str, regs);
4192 +static inline void dump_save_context(int cpu, const struct pt_regs *regs,
4193 + struct task_struct *tsk)
4195 + dump_config.dumper->fmt->ops->save_context(cpu, regs, tsk);
4198 +static inline int dump_save_this_cpu(const struct pt_regs *regs)
4200 + int cpu = smp_processor_id();
4202 + dump_save_context(cpu, regs, current);
4206 +static inline int dump_update_header(void)
4208 + return dump_config.dumper->fmt->ops->update_header();
4211 +static inline int dump_update_end_marker(void)
4213 + return dump_config.dumper->fmt->ops->update_end_marker();
4216 +static inline int dump_add_data(unsigned long loc, unsigned long sz)
4218 + return dump_config.dumper->fmt->ops->add_data(loc, sz);
4221 +/* Compression operation */
4222 +static inline int dump_compress_data(char *src, int slen, char *dst,
4223 + unsigned long loc)
4225 + return dump_config.dumper->compress->compress_func(src, slen,
4226 + dst, DUMP_DPC_PAGE_SIZE, loc);
4230 +/* Prototypes of some default implementations of dump methods */
4232 +extern struct __dump_compress dump_none_compression;
4234 +/* Default scheme methods (dump_scheme.c) */
4236 +extern int dump_generic_sequencer(void);
4237 +extern int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned
4238 + long), struct dump_data_filter *filter);
4239 +extern int dump_generic_save_data(unsigned long loc, unsigned long sz);
4240 +extern int dump_generic_skip_data(unsigned long loc, unsigned long sz);
4241 +extern int dump_generic_write_buffer(void *buf, unsigned long len);
4242 +extern int dump_generic_configure(unsigned long);
4243 +extern int dump_generic_unconfigure(void);
4244 +#ifdef CONFIG_DISCONTIGMEM
4245 +extern void dump_reconfigure_mbanks(void);
4248 +/* Default scheme template */
4249 +extern struct dump_scheme dump_scheme_singlestage;
4251 +/* Default dump format methods */
4253 +extern int dump_lcrash_configure_header(const char *panic_str,
4254 + const struct pt_regs *regs);
4255 +extern void dump_lcrash_save_context(int cpu, const struct pt_regs *regs,
4256 + struct task_struct *tsk);
4257 +extern int dump_generic_update_header(void);
4258 +extern int dump_lcrash_add_data(unsigned long loc, unsigned long sz);
4259 +extern int dump_lcrash_update_end_marker(void);
4261 +/* Default format (lcrash) template */
4262 +extern struct dump_fmt dump_fmt_lcrash;
4264 +/* Default dump selection filter table */
4267 + * Entries listed in order of importance and correspond to passes
4268 + * The last entry (with a level_mask of zero) typically reflects data that
4269 + * won't be dumped -- this may for example be used to identify data
4270 + * that will be skipped for certain so the corresponding memory areas can be
4271 + * utilized as scratch space.
4273 +extern struct dump_data_filter dump_filter_table[];
4275 +/* Some pre-defined dumpers */
4276 +extern struct dumper dumper_singlestage;
4277 +extern struct dumper dumper_stage1;
4278 +extern struct dumper dumper_stage2;
4280 +/* These are temporary */
4281 +#define DUMP_MASK_HEADER DUMP_LEVEL_HEADER
4282 +#define DUMP_MASK_KERN DUMP_LEVEL_KERN
4283 +#define DUMP_MASK_USED DUMP_LEVEL_USED
4284 +#define DUMP_MASK_UNUSED DUMP_LEVEL_ALL_RAM
4285 +#define DUMP_MASK_REST 0 /* dummy for now */
4287 +/* Helpers - move these to dump.h later ? */
4289 +int dump_generic_execute(const char *panic_str, const struct pt_regs *regs);
4290 +extern int dump_ll_write(void *buf, unsigned long len);
4291 +int dump_check_and_free_page(struct dump_memdev *dev, struct page *page);
4293 +static inline void dumper_reset(void)
4295 + dump_config.dumper->curr_buf = dump_config.dumper->dump_buf;
4296 + dump_config.dumper->curr_loc = 0;
4297 + dump_config.dumper->curr_offset = 0;
4298 + dump_config.dumper->count = 0;
4299 + dump_config.dumper->curr_pass = 0;
4303 + * May later be moulded to perform boot-time allocations so we can dump
4304 + * earlier during bootup
4306 +static inline void *dump_alloc_mem(unsigned long size)
4308 + return (void *) __get_free_pages(GFP_KERNEL, get_order(size));
4311 +static inline void dump_free_mem(void *buf)
4313 + struct page *page;
4315 + /* ignore reserved pages (e.g. post soft boot stage) */
4316 + if (buf && (page = virt_to_page(buf))) {
4317 + if (PageReserved(page))
4321 + * Allocated using __get_free_pages().
4323 + free_pages((unsigned long)buf,
4324 + get_order(DUMP_BUFFER_SIZE + 3 * DUMP_PAGE_SIZE));
4328 +#endif /* _LINUX_DUMP_METHODS_H */
4329 Index: linux-2.6.10/drivers/dump/dump_gzip.c
4330 ===================================================================
4331 --- linux-2.6.10.orig/drivers/dump/dump_gzip.c 2005-04-07 19:34:21.197950744 +0800
4332 +++ linux-2.6.10/drivers/dump/dump_gzip.c 2005-04-07 18:13:56.917751032 +0800
4335 + * GZIP Compression functions for kernel crash dumps.
4337 + * Created by: Matt Robinson (yakker@sourceforge.net)
4338 + * Copyright 2001 Matt D. Robinson. All rights reserved.
4340 + * This code is released under version 2 of the GNU GPL.
4344 +#include <linux/config.h>
4345 +#include <linux/module.h>
4346 +#include <linux/sched.h>
4347 +#include <linux/fs.h>
4348 +#include <linux/file.h>
4349 +#include <linux/init.h>
4350 +#include <linux/slab.h>
4351 +#include <linux/dump.h>
4352 +#include <linux/zlib.h>
4353 +#include <linux/vmalloc.h>
4355 +static void *deflate_workspace;
4356 +static unsigned long workspace_paddr[2];
4358 +static u8 *safety_buffer;
4361 + * Name: dump_compress_gzip()
4362 + * Func: Compress a DUMP_PAGE_SIZE page using gzip-style algorithms (the.
4363 + * deflate functions similar to what's used in PPP).
4366 +dump_compress_gzip(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
4367 + unsigned long loc)
4369 + /* error code and dump stream */
4371 + z_stream dump_stream;
4372 + struct page *pg = (struct page *)loc;
4373 + unsigned long paddr = page_to_pfn(pg) << PAGE_SHIFT;
4374 + static int warning = 0;
4376 + dump_stream.workspace = deflate_workspace;
4377 + if ((paddr == workspace_paddr[0]) || (paddr == workspace_paddr[1])) {
4379 + * This page belongs to deflate_workspace used as temporary
4380 + * buffer for compression. Hence, dump them without compression.
4384 + if ((err = zlib_deflateInit(&dump_stream, Z_BEST_COMPRESSION)) != Z_OK) {
4385 + /* fall back to RLE compression */
4386 + printk("dump_compress_gzip(): zlib_deflateInit() "
4387 + "failed (%d)!\n", err);
4391 + /* copy the old page to the safety buffer */
4392 + if (oldsize <= DUMP_PAGE_SIZE) {
4393 + memcpy(safety_buffer, old, oldsize);
4394 + dump_stream.next_in = (u8 *) safety_buffer;
4397 + printk("dump_compress_gzip oversize input: %d\n",
4401 + dump_stream.next_in = (u8 *) old;
4404 + /* use old (page of memory) and size (DUMP_PAGE_SIZE) as in-streams */
4405 + dump_stream.avail_in = oldsize;
4407 + /* out streams are new (dpcpage) and new size (DUMP_DPC_PAGE_SIZE) */
4408 + dump_stream.next_out = new;
4409 + dump_stream.avail_out = newsize;
4411 + /* deflate the page -- check for error */
4412 + err = zlib_deflate(&dump_stream, Z_FINISH);
4413 + if (err != Z_STREAM_END) {
4414 + /* zero is return code here */
4415 + (void)zlib_deflateEnd(&dump_stream);
4416 + printk("dump_compress_gzip(): zlib_deflate() failed (%d)!\n",
4421 + /* let's end the deflated compression stream */
4422 + if ((err = zlib_deflateEnd(&dump_stream)) != Z_OK) {
4423 + printk("dump_compress_gzip(): zlib_deflateEnd() "
4424 + "failed (%d)!\n", err);
4427 + /* return the compressed byte total (if it's smaller) */
4428 + if (dump_stream.total_out >= oldsize) {
4431 + return dump_stream.total_out;
4434 +/* setup the gzip compression functionality */
4435 +static struct __dump_compress dump_gzip_compression = {
4436 + .compress_type = DUMP_COMPRESS_GZIP,
4437 + .compress_func = dump_compress_gzip,
4438 + .compress_name = "GZIP",
4442 + * Name: dump_compress_gzip_init()
4443 + * Func: Initialize gzip as a compression mechanism.
4446 +dump_compress_gzip_init(void)
4450 + deflate_workspace = vmalloc(zlib_deflate_workspacesize());
4451 + if (!deflate_workspace) {
4452 + printk("dump_compress_gzip_init(): Failed to "
4453 + "alloc %d bytes for deflate workspace\n",
4454 + zlib_deflate_workspacesize());
4458 + * Need to find pages (workspace) that are used for compression.
4459 + * Even though zlib_deflate_workspacesize() is 64 pages (approximately)
4460 + * depends on the arch, we used only 2 pages. Hence, get the physical
4461 + * addresses for these 2 pages and used them to not to compress those
4464 + pg = vmalloc_to_page(deflate_workspace);
4465 + workspace_paddr[0] = page_to_pfn(pg) << PAGE_SHIFT;
4466 + pg = vmalloc_to_page(deflate_workspace + DUMP_PAGE_SIZE);
4467 + workspace_paddr[1] = page_to_pfn(pg) << PAGE_SHIFT;
4469 + /* Eliminate the possibility of real data getting a compression
4473 + if (!(safety_buffer = (void *)__get_free_pages(GFP_KERNEL,
4474 + get_order(DUMP_PAGE_SIZE))))
4477 + printk("dump gzip safety buffer: %p, %d\n", safety_buffer,
4478 + (int)DUMP_PAGE_SIZE);
4480 + dump_register_compression(&dump_gzip_compression);
4485 + * Name: dump_compress_gzip_cleanup()
4486 + * Func: Remove gzip as a compression mechanism.
4489 +dump_compress_gzip_cleanup(void)
4491 + vfree(deflate_workspace);
4492 + if (safety_buffer) {
4493 + free_pages((unsigned long)safety_buffer,
4494 + get_order(DUMP_PAGE_SIZE));
4495 + safety_buffer = NULL;
4498 + dump_unregister_compression(DUMP_COMPRESS_GZIP);
4501 +/* module initialization */
4502 +module_init(dump_compress_gzip_init);
4503 +module_exit(dump_compress_gzip_cleanup);
4505 +MODULE_LICENSE("GPL");
4506 +MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
4507 +MODULE_DESCRIPTION("Gzip compression module for crash dump driver");
4508 Index: linux-2.6.10/drivers/dump/dump_ppc64.c
4509 ===================================================================
4510 --- linux-2.6.10.orig/drivers/dump/dump_ppc64.c 2005-04-07 19:34:21.197950744 +0800
4511 +++ linux-2.6.10/drivers/dump/dump_ppc64.c 2005-04-07 18:13:56.919750728 +0800
4514 + * Architecture specific (ppc64) functions for Linux crash dumps.
4516 + * Created by: Matt Robinson (yakker@sgi.com)
4518 + * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
4520 + * 2.3 kernel modifications by: Matt D. Robinson (yakker@turbolinux.com)
4521 + * Copyright 2000 TurboLinux, Inc. All rights reserved.
4522 + * Copyright 2003, 2004 IBM Corporation
4524 + * This code is released under version 2 of the GNU GPL.
4528 + * The hooks for dumping the kernel virtual memory to disk are in this
4529 + * file. Any time a modification is made to the virtual memory mechanism,
4530 + * these routines must be changed to use the new mechanisms.
4532 +#include <linux/types.h>
4533 +#include <linux/fs.h>
4534 +#include <linux/dump.h>
4535 +#include <linux/mm.h>
4536 +#include <linux/vmalloc.h>
4537 +#include <linux/delay.h>
4538 +#include <linux/syscalls.h>
4539 +#include <asm/hardirq.h>
4540 +#include "dump_methods.h"
4541 +#include <linux/irq.h>
4542 +#include <asm/machdep.h>
4543 +#include <asm/uaccess.h>
4544 +#include <asm/irq.h>
4545 +#include <asm/page.h>
4546 +#if defined(CONFIG_KDB) && !defined(CONFIG_DUMP_MODULE)
4547 +#include <linux/kdb.h>
4550 +extern cpumask_t irq_affinity[];
4552 +static cpumask_t saved_affinity[NR_IRQS];
4554 +static __s32 saved_irq_count; /* saved preempt_count() flags */
4556 +static int alloc_dha_stack(void)
4561 + if (dump_header_asm.dha_stack[0])
4564 + ptr = (void *)vmalloc(THREAD_SIZE * num_possible_cpus());
4569 + for (i = 0; i < num_possible_cpus(); i++) {
4570 + dump_header_asm.dha_stack[i] =
4571 + (uint64_t)((unsigned long)ptr + (i * THREAD_SIZE));
4576 +static int free_dha_stack(void)
4578 + if (dump_header_asm.dha_stack[0]) {
4579 + vfree((void*)dump_header_asm.dha_stack[0]);
4580 + dump_header_asm.dha_stack[0] = 0;
4585 +static int dump_expect_ipi[NR_CPUS];
4586 +static atomic_t waiting_for_dump_ipi;
4588 +extern void stop_this_cpu(void *);
4590 +dump_ipi_handler(struct pt_regs *regs)
4592 + int cpu = smp_processor_id();
4594 + if (!dump_expect_ipi[cpu])
4596 + dump_save_this_cpu(regs);
4597 + atomic_dec(&waiting_for_dump_ipi);
4600 + switch (dump_silence_level) {
4601 + case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
4602 + while (dump_oncpu) {
4603 + barrier(); /* paranoia */
4604 + if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
4605 + goto level_changed;
4606 + cpu_relax(); /* kill time nicely */
4610 + case DUMP_HALT_CPUS: /* Execute halt */
4611 + stop_this_cpu(NULL);
4614 + case DUMP_SOFT_SPIN_CPUS:
4615 + /* Mark the task so it spins in schedule */
4616 + set_tsk_thread_flag(current, TIF_NEED_RESCHED);
4623 +/* save registers on other processors
4624 + * If the other cpus don't respond we simply do not get their states.
4627 +__dump_save_other_cpus(void)
4629 + int i, cpu = smp_processor_id();
4630 + int other_cpus = num_online_cpus()-1;
4632 + if (other_cpus > 0) {
4633 + atomic_set(&waiting_for_dump_ipi, other_cpus);
4634 + for (i = 0; i < NR_CPUS; i++)
4635 + dump_expect_ipi[i] = (i != cpu && cpu_online(i));
4637 + printk(KERN_ALERT "sending IPI to other cpus...\n");
4638 + dump_send_ipi(dump_ipi_handler);
4640 + * may be we dont need to wait for IPI to be processed.
4641 + * just write out the header at the end of dumping, if
4642 + * this IPI is not processed until then, there probably
4643 + * is a problem and we just fail to capture state of
4645 + * However, we will wait 10 secs for other CPUs to respond.
4646 + * If not, proceed the dump process even though we failed
4647 + * to capture other CPU states.
4649 + i = 10000; /* wait max of 10 seconds */
4650 + while ((atomic_read(&waiting_for_dump_ipi) > 0) && (--i > 0)) {
4654 + printk(KERN_ALERT "done waiting: %d cpus not responding\n",
4655 + atomic_read(&waiting_for_dump_ipi));
4656 + dump_send_ipi(NULL); /* clear handler */
4661 + * Restore old irq affinities.
4664 +__dump_reset_irq_affinity(void)
4667 + irq_desc_t *irq_d;
4669 + memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
4672 + irq_d = get_irq_desc(i);
4673 + if (irq_d->handler == NULL) {
4676 + if (irq_d->handler->set_affinity != NULL) {
4677 + irq_d->handler->set_affinity(i, saved_affinity[i]);
4683 + * Routine to save the old irq affinities and change affinities of all irqs to
4684 + * the dumping cpu.
4686 + * NB: Need to be expanded to multiple nodes.
4689 +__dump_set_irq_affinity(void)
4692 + cpumask_t cpu = CPU_MASK_NONE;
4693 + irq_desc_t *irq_d;
4695 + cpu_set(smp_processor_id(), cpu);
4697 + memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
4700 + irq_d = get_irq_desc(i);
4701 + if (irq_d->handler == NULL) {
4704 + irq_affinity[i] = cpu;
4705 + if (irq_d->handler->set_affinity != NULL) {
4706 + irq_d->handler->set_affinity(i, irq_affinity[i]);
4710 +#else /* !CONFIG_SMP */
4711 +#define __dump_save_other_cpus() do { } while (0)
4712 +#define __dump_set_irq_affinity() do { } while (0)
4713 +#define __dump_reset_irq_affinity() do { } while (0)
4714 +#endif /* !CONFIG_SMP */
4717 +__dump_save_regs(struct pt_regs *dest_regs, const struct pt_regs *regs)
4720 + memcpy(dest_regs, regs, sizeof(struct pt_regs));
4725 +__dump_save_context(int cpu, const struct pt_regs *regs,
4726 + struct task_struct *tsk)
4728 + dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
4729 + __dump_save_regs(&dump_header_asm.dha_smp_regs[cpu], regs);
4731 + /* take a snapshot of the stack */
4732 + /* doing this enables us to tolerate slight drifts on this cpu */
4734 + if (dump_header_asm.dha_stack[cpu]) {
4735 + memcpy((void *)dump_header_asm.dha_stack[cpu],
4736 + STACK_START_POSITION(tsk),
4739 + dump_header_asm.dha_stack_ptr[cpu] = (unsigned long)(tsk->thread_info);
4743 + * Name: __dump_configure_header()
4744 + * Func: Configure the dump header with all proper values.
4747 +__dump_configure_header(const struct pt_regs *regs)
4752 +#if defined(CONFIG_KDB) && !defined(CONFIG_DUMP_MODULE)
4754 +kdb_sysdump(int argc, const char **argv, const char **envp, struct pt_regs *regs)
4756 + kdb_printf("Dumping to disk...\n");
4757 + dump("dump from kdb", regs);
4758 + kdb_printf("Dump Complete\n");
4764 + * Name: __dump_init()
4765 + * Func: Initialize the dumping routine process. This is in case
4766 + * it's necessary in the future.
4769 +__dump_init(uint64_t local_memory_start)
4771 +#if defined(FIXME) && defined(CONFIG_KDB) && !defined(CONFIG_DUMP_MODULE)
4772 + /* This won't currently work because interrupts are off in kdb
4773 + * and the dump process doesn't understand how to recover.
4775 + /* ToDo: add a command to query/set dump configuration */
4776 + kdb_register_repeat("sysdump", kdb_sysdump, "", "use lkcd to dump the system to disk (if configured)", 0, KDB_REPEAT_NONE);
4784 + * Name: __dump_open()
4785 + * Func: Open the dump device (architecture specific). This is in
4786 + * case it's necessary in the future.
4791 + alloc_dha_stack();
4796 + * Name: __dump_cleanup()
4797 + * Func: Free any architecture specific data structures. This is called
4798 + * when the dump module is being removed.
4801 +__dump_cleanup(void)
4807 + * Kludge - dump from interrupt context is unreliable (Fixme)
4809 + * We do this so that softirqs initiated for dump i/o
4810 + * get processed and we don't hang while waiting for i/o
4811 + * to complete or in any irq synchronization attempt.
4813 + * This is not quite legal of course, as it has the side
4814 + * effect of making all interrupts & softirqs triggered
4815 + * while dump is in progress complete before currently
4816 + * pending softirqs and the currently executing interrupt
4822 + saved_irq_count = irq_count();
4823 + preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
4827 +irq_bh_restore(void)
4829 + preempt_count() |= saved_irq_count;
4833 + * Name: __dump_irq_enable
4834 + * Func: Reset system so interrupts are enabled.
4835 + * This is used for dump methods that require interrupts
4836 + * Eventually, all methods will have interrupts disabled
4837 + * and this code can be removed.
4839 + * Change irq affinities
4840 + * Re-enable interrupts
4843 +__dump_irq_enable(void)
4845 + __dump_set_irq_affinity();
4847 + local_irq_enable();
4852 + * Name: __dump_irq_restore
4853 + * Func: Resume the system state in an architecture-specific way.
4856 +__dump_irq_restore(void)
4858 + local_irq_disable();
4859 + __dump_reset_irq_affinity();
4864 +/* Cheap progress hack. It estimates pages to write and
4865 + * assumes all pages will go -- so it may get way off.
4866 + * As the progress is not displayed for other architectures, not used at this
4870 +__dump_progress_add_page(void)
4872 + unsigned long total_pages = nr_free_pages() + nr_inactive_pages + nr_active_pages;
4873 + unsigned int percent = (dump_header.dh_num_dump_pages * 100) / total_pages;
4876 + if (percent > last_percent && percent <= 100) {
4877 + sprintf(buf, "Dump %3d%% ", percent);
4878 + ppc64_dump_msg(0x2, buf);
4879 + last_percent = percent;
4885 +extern int dump_page_is_ram(unsigned long);
4887 + * Name: __dump_page_valid()
4888 + * Func: Check if page is valid to dump.
4891 +__dump_page_valid(unsigned long index)
4893 + if (!pfn_valid(index))
4896 + return dump_page_is_ram(index);
4900 + * Name: manual_handle_crashdump()
4901 + * Func: Interface for the lkcd dump command. Calls dump_execute()
4904 +manual_handle_crashdump(void)
4906 + struct pt_regs regs;
4908 + get_current_regs(®s);
4909 + dump_execute("manual", ®s);
4914 + * Name: __dump_clean_irq_state()
4915 + * Func: Clean up from the previous IRQ handling state. Such as oops from
4916 + * interrupt handler or bottom half.
4919 +__dump_clean_irq_state(void)
4923 Index: linux-2.6.10/drivers/dump/dump_i386.c
4924 ===================================================================
4925 --- linux-2.6.10.orig/drivers/dump/dump_i386.c 2005-04-07 19:34:21.197950744 +0800
4926 +++ linux-2.6.10/drivers/dump/dump_i386.c 2005-04-07 18:13:56.895754376 +0800
4929 + * Architecture specific (i386) functions for Linux crash dumps.
4931 + * Created by: Matt Robinson (yakker@sgi.com)
4933 + * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
4935 + * 2.3 kernel modifications by: Matt D. Robinson (yakker@turbolinux.com)
4936 + * Copyright 2000 TurboLinux, Inc. All rights reserved.
4938 + * This code is released under version 2 of the GNU GPL.
4942 + * The hooks for dumping the kernel virtual memory to disk are in this
4943 + * file. Any time a modification is made to the virtual memory mechanism,
4944 + * these routines must be changed to use the new mechanisms.
4946 +#include <linux/init.h>
4947 +#include <linux/types.h>
4948 +#include <linux/kernel.h>
4949 +#include <linux/smp.h>
4950 +#include <linux/fs.h>
4951 +#include <linux/vmalloc.h>
4952 +#include <linux/mm.h>
4953 +#include <linux/dump.h>
4954 +#include "dump_methods.h"
4955 +#include <linux/irq.h>
4957 +#include <asm/processor.h>
4958 +#include <asm/e820.h>
4959 +#include <asm/hardirq.h>
4960 +#include <asm/nmi.h>
4962 +static __s32 saved_irq_count; /* saved preempt_count() flags */
4965 +alloc_dha_stack(void)
4970 + if (dump_header_asm.dha_stack[0])
4973 + ptr = vmalloc(THREAD_SIZE * num_online_cpus());
4975 + printk("vmalloc for dha_stacks failed\n");
4979 + for (i = 0; i < num_online_cpus(); i++) {
4980 + dump_header_asm.dha_stack[i] = (u32)((unsigned long)ptr +
4981 + (i * THREAD_SIZE));
4987 +free_dha_stack(void)
4989 + if (dump_header_asm.dha_stack[0]) {
4990 + vfree((void *)dump_header_asm.dha_stack[0]);
4991 + dump_header_asm.dha_stack[0] = 0;
4998 +__dump_save_regs(struct pt_regs *dest_regs, const struct pt_regs *regs)
5000 + *dest_regs = *regs;
5002 + /* In case of panic dumps, we collects regs on entry to panic.
5003 + * so, we shouldn't 'fix' ssesp here again. But it is hard to
5004 + * tell just looking at regs whether ssesp need fixing. We make
5005 + * this decision by looking at xss in regs. If we have better
5006 + * means to determine that ssesp are valid (by some flag which
5007 + * tells that we are here due to panic dump), then we can use
5008 + * that instead of this kludge.
5010 + if (!user_mode(regs)) {
5011 + if ((0xffff & regs->xss) == __KERNEL_DS)
5012 + /* already fixed up */
5014 + dest_regs->esp = (unsigned long)&(regs->esp);
5015 + __asm__ __volatile__ ("movw %%ss, %%ax;"
5016 + :"=a"(dest_regs->xss));
5021 +__dump_save_context(int cpu, const struct pt_regs *regs,
5022 + struct task_struct *tsk)
5024 + dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
5025 + __dump_save_regs(&dump_header_asm.dha_smp_regs[cpu], regs);
5027 + /* take a snapshot of the stack */
5028 + /* doing this enables us to tolerate slight drifts on this cpu */
5030 + if (dump_header_asm.dha_stack[cpu]) {
5031 + memcpy((void *)dump_header_asm.dha_stack[cpu],
5032 + STACK_START_POSITION(tsk),
5035 + dump_header_asm.dha_stack_ptr[cpu] = (unsigned long)(tsk->thread_info);
5039 +extern cpumask_t irq_affinity[];
5040 +extern irq_desc_t irq_desc[];
5041 +extern void dump_send_ipi(void);
5043 +static int dump_expect_ipi[NR_CPUS];
5044 +static atomic_t waiting_for_dump_ipi;
5045 +static cpumask_t saved_affinity[NR_IRQS];
5047 +extern void stop_this_cpu(void *); /* exported by i386 kernel */
5050 +dump_nmi_callback(struct pt_regs *regs, int cpu)
5052 + if (!dump_expect_ipi[cpu])
5055 + dump_expect_ipi[cpu] = 0;
5057 + dump_save_this_cpu(regs);
5058 + atomic_dec(&waiting_for_dump_ipi);
5061 + switch (dump_silence_level) {
5062 + case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
5063 + while (dump_oncpu) {
5064 + barrier(); /* paranoia */
5065 + if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
5066 + goto level_changed;
5068 + cpu_relax(); /* kill time nicely */
5072 + case DUMP_HALT_CPUS: /* Execute halt */
5073 + stop_this_cpu(NULL);
5076 + case DUMP_SOFT_SPIN_CPUS:
5077 + /* Mark the task so it spins in schedule */
5078 + set_tsk_thread_flag(current, TIF_NEED_RESCHED);
5085 +/* save registers on other processors */
5087 +__dump_save_other_cpus(void)
5089 + int i, cpu = smp_processor_id();
5090 + int other_cpus = num_online_cpus()-1;
5092 + if (other_cpus > 0) {
5093 + atomic_set(&waiting_for_dump_ipi, other_cpus);
5095 + for (i = 0; i < NR_CPUS; i++) {
5096 + dump_expect_ipi[i] = (i != cpu && cpu_online(i));
5099 + /* short circuit normal NMI handling temporarily */
5100 + set_nmi_callback(dump_nmi_callback);
5104 + /* may be we dont need to wait for NMI to be processed.
5105 + just write out the header at the end of dumping, if
5106 + this IPI is not processed until then, there probably
5107 + is a problem and we just fail to capture state of
5109 + while(atomic_read(&waiting_for_dump_ipi) > 0) {
5113 + unset_nmi_callback();
5118 + * Routine to save the old irq affinities and change affinities of all irqs to
5119 + * the dumping cpu.
5122 +set_irq_affinity(void)
5125 + cpumask_t cpu = CPU_MASK_NONE;
5127 + cpu_set(smp_processor_id(), cpu);
5128 + memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
5129 + for (i = 0; i < NR_IRQS; i++) {
5130 + if (irq_desc[i].handler == NULL)
5132 + irq_affinity[i] = cpu;
5133 + if (irq_desc[i].handler->set_affinity != NULL)
5134 + irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
5139 + * Restore old irq affinities.
5142 +reset_irq_affinity(void)
5146 + memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
5147 + for (i = 0; i < NR_IRQS; i++) {
5148 + if (irq_desc[i].handler == NULL)
5150 + if (irq_desc[i].handler->set_affinity != NULL)
5151 + irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
5155 +#else /* !CONFIG_SMP */
5156 +#define set_irq_affinity() do { } while (0)
5157 +#define reset_irq_affinity() do { } while (0)
5158 +#define save_other_cpu_states() do { } while (0)
5159 +#endif /* !CONFIG_SMP */
5162 + * Kludge - dump from interrupt context is unreliable (Fixme)
5164 + * We do this so that softirqs initiated for dump i/o
5165 + * get processed and we don't hang while waiting for i/o
5166 + * to complete or in any irq synchronization attempt.
5168 + * This is not quite legal of course, as it has the side
5169 + * effect of making all interrupts & softirqs triggered
5170 + * while dump is in progress complete before currently
5171 + * pending softirqs and the currently executing interrupt
5177 + saved_irq_count = irq_count();
5178 + preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
5182 +irq_bh_restore(void)
5184 + preempt_count() |= saved_irq_count;
5188 + * Name: __dump_irq_enable
5189 + * Func: Reset system so interrupts are enabled.
5190 + * This is used for dump methods that require interrupts
5191 + * Eventually, all methods will have interrupts disabled
5192 + * and this code can be removed.
5194 + * Change irq affinities
5195 + * Re-enable interrupts
5198 +__dump_irq_enable(void)
5200 + set_irq_affinity();
5202 + local_irq_enable();
5207 + * Name: __dump_irq_restore
5208 + * Func: Resume the system state in an architecture-specific way.
5212 +__dump_irq_restore(void)
5214 + local_irq_disable();
5215 + reset_irq_affinity();
5220 + * Name: __dump_configure_header()
5221 + * Func: Meant to fill in arch specific header fields except per-cpu state
5222 + * already captured via __dump_save_context for all CPUs.
5225 +__dump_configure_header(const struct pt_regs *regs)
5231 + * Name: __dump_init()
5232 + * Func: Initialize the dumping routine process.
5235 +__dump_init(uint64_t local_memory_start)
5241 + * Name: __dump_open()
5242 + * Func: Open the dump device (architecture specific).
5247 + alloc_dha_stack();
5251 + * Name: __dump_cleanup()
5252 + * Func: Free any architecture specific data structures. This is called
5253 + * when the dump module is being removed.
5256 +__dump_cleanup(void)
5261 +extern int pfn_is_ram(unsigned long);
5264 + * Name: __dump_page_valid()
5265 + * Func: Check if page is valid to dump.
5268 +__dump_page_valid(unsigned long index)
5270 + if (!pfn_valid(index))
5273 + return pfn_is_ram(index);
5277 + * Name: manual_handle_crashdump()
5278 + * Func: Interface for the lkcd dump command. Calls dump_execute()
5281 +manual_handle_crashdump(void) {
5283 + struct pt_regs regs;
5285 + get_current_regs(®s);
5286 + dump_execute("manual", ®s);
5291 + * Name: __dump_clean_irq_state()
5292 + * Func: Clean up from the previous IRQ handling state. Such as oops from
5293 + * interrupt handler or bottom half.
5296 +__dump_clean_irq_state(void)
5300 Index: linux-2.6.10/drivers/dump/dump_filters.c
5301 ===================================================================
5302 --- linux-2.6.10.orig/drivers/dump/dump_filters.c 2005-04-07 19:34:21.197950744 +0800
5303 +++ linux-2.6.10/drivers/dump/dump_filters.c 2005-04-07 18:13:56.917751032 +0800
5306 + * Default filters to select data to dump for various passes.
5308 + * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
5309 + * Split and rewrote default dump selection logic to generic dump
5310 + * method interfaces
5311 + * Derived from a portion of dump_base.c created by
5312 + * Matt Robinson <yakker@sourceforge.net>)
5314 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
5315 + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
5316 + * Copyright (C) 2002 International Business Machines Corp.
5318 + * Used during single-stage dumping and during stage 1 of the 2-stage scheme
5319 + * (Stage 2 of the 2-stage scheme uses the fully transparent filters
5320 + * i.e. passthru filters in dump_overlay.c)
5322 + * Future: Custom selective dump may involve a different set of filters.
5324 + * This code is released under version 2 of the GNU GPL.
5327 +#include <linux/kernel.h>
5328 +#include <linux/bootmem.h>
5329 +#include <linux/mm.h>
5330 +#include <linux/slab.h>
5331 +#include <linux/dump.h>
5332 +#include "dump_methods.h"
5334 +#define DUMP_PFN_SAFETY_MARGIN 1024 /* 4 MB */
5335 +static unsigned long bootmap_pages;
5337 +/* Copied from mm/bootmem.c - FIXME */
5338 +/* return the number of _pages_ that will be allocated for the boot bitmap */
5339 +void dump_calc_bootmap_pages (void)
5341 + unsigned long mapsize;
5342 + unsigned long pages = num_physpages;
5344 + mapsize = (pages+7)/8;
5345 + mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
5346 + mapsize >>= PAGE_SHIFT;
5347 + bootmap_pages = mapsize + DUMP_PFN_SAFETY_MARGIN + 1;
5352 +extern unsigned long min_low_pfn;
5355 +int dump_low_page(struct page *p)
5357 + return ((page_to_pfn(p) >= min_low_pfn) &&
5358 + (page_to_pfn(p) < (min_low_pfn + bootmap_pages)));
5361 +static inline int kernel_page(struct page *p)
5363 + /* FIXME: Need to exclude hugetlb pages. Clue: reserved but inuse */
5364 + return (PageReserved(p) && !PageInuse(p)) || (!PageLRU(p) && PageInuse(p));
5367 +static inline int user_page(struct page *p)
5369 + return PageInuse(p) && (!PageReserved(p) && PageLRU(p));
5372 +static inline int unreferenced_page(struct page *p)
5374 + return !PageInuse(p) && !PageReserved(p);
5378 +/* loc marks the beginning of a range of pages */
5379 +int dump_filter_kernpages(int pass, unsigned long loc, unsigned long sz)
5381 + struct page *page = (struct page *)loc;
5382 + /* if any of the pages is a kernel page, select this set */
5384 + if (dump_low_page(page) || kernel_page(page))
5393 +/* loc marks the beginning of a range of pages */
5394 +int dump_filter_userpages(int pass, unsigned long loc, unsigned long sz)
5396 + struct page *page = (struct page *)loc;
5398 + /* select if the set has any user page, and no kernel pages */
5400 + if (user_page(page) && !dump_low_page(page)) {
5402 + } else if (kernel_page(page) || dump_low_page(page)) {
5413 +/* loc marks the beginning of a range of pages */
5414 +int dump_filter_unusedpages(int pass, unsigned long loc, unsigned long sz)
5416 + struct page *page = (struct page *)loc;
5418 + /* select if the set does not have any used pages */
5420 + if (!unreferenced_page(page) || dump_low_page(page)) {
5429 +/* dummy: last (non-existent) pass */
5430 +int dump_filter_none(int pass, unsigned long loc, unsigned long sz)
5435 +/* TBD: resolve level bitmask ? */
5436 +struct dump_data_filter dump_filter_table[] = {
5437 + { .name = "kern", .selector = dump_filter_kernpages,
5438 + .level_mask = DUMP_MASK_KERN},
5439 + { .name = "user", .selector = dump_filter_userpages,
5440 + .level_mask = DUMP_MASK_USED},
5441 + { .name = "unused", .selector = dump_filter_unusedpages,
5442 + .level_mask = DUMP_MASK_UNUSED},
5443 + { .name = "none", .selector = dump_filter_none,
5444 + .level_mask = DUMP_MASK_REST},
5445 + { .name = "", .selector = NULL, .level_mask = 0}
5448 Index: linux-2.6.10/drivers/dump/dump_memdev.c
5449 ===================================================================
5450 --- linux-2.6.10.orig/drivers/dump/dump_memdev.c 2005-04-07 19:34:21.197950744 +0800
5451 +++ linux-2.6.10/drivers/dump/dump_memdev.c 2005-04-07 18:13:56.907752552 +0800
5454 + * Implements the dump driver interface for saving a dump in available
5455 + * memory areas. The saved pages may be written out to persistent storage
5456 + * after a soft reboot.
5458 + * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
5460 + * Copyright (C) 2002 International Business Machines Corp.
5462 + * This code is released under version 2 of the GNU GPL.
5464 + * The approach of tracking pages containing saved dump using map pages
5465 + * allocated as needed has been derived from the Mission Critical Linux
5466 + * mcore dump implementation.
5468 + * Credits and a big thanks for letting the lkcd project make use of
5469 + * the excellent piece of work and also helping with clarifications
5470 + * and tips along the way are due to:
5471 + * Dave Winchell <winchell@mclx.com> (primary author of mcore)
5472 + * Jeff Moyer <moyer@mclx.com>
5473 + * Josh Huber <huber@mclx.com>
5475 + * For those familiar with the mcore code, the main differences worth
5476 + * noting here (besides the dump device abstraction) result from enabling
5477 + * "high" memory pages (pages not permanently mapped in the kernel
5478 + * address space) to be used for saving dump data (because of which a
5479 + * simple virtual address based linked list cannot be used anymore for
5480 + * managing free pages), an added level of indirection for faster
5481 + * lookups during the post-boot stage, and the idea of pages being
5482 + * made available as they get freed up while dump to memory progresses
5483 + * rather than one time before starting the dump. The last point enables
5484 + * a full memory snapshot to be saved starting with an initial set of
5485 + * bootstrap pages given a good compression ratio. (See dump_overlay.c)
5490 + * -----------------MEMORY LAYOUT ------------------
5491 + * The memory space consists of a set of discontiguous pages, and
5492 + * discontiguous map pages as well, rooted in a chain of indirect
5493 + * map pages (also discontiguous). Except for the indirect maps
5494 + * (which must be preallocated in advance), the rest of the pages
5495 + * could be in high memory.
5498 + * | --------- -------- --------
5499 + * --> | . . +|--->| . +|------->| . . | indirect
5500 + * --|--|--- ---|---- --|-|--- maps
5502 + * ------ ------ ------- ------ -------
5503 + * | . | | . | | . . | | . | | . . | maps
5504 + * --|--- --|--- --|--|-- --|--- ---|-|--
5505 + * page page page page page page page data
5508 + * Writes to the dump device happen sequentially in append mode.
5509 + * The main reason for the existence of the indirect map is
5510 + * to enable a quick way to lookup a specific logical offset in
5511 + * the saved data post-soft-boot, e.g. to writeout pages
5512 + * with more critical data first, even though such pages
5513 + * would have been compressed and copied last, being the lowest
5514 + * ranked candidates for reuse due to their criticality.
5515 + * (See dump_overlay.c)
5517 +#include <linux/mm.h>
5518 +#include <linux/highmem.h>
5519 +#include <linux/bootmem.h>
5520 +#include <linux/dump.h>
5521 +#include "dump_methods.h"
5523 +#define DUMP_MAP_SZ (PAGE_SIZE / sizeof(unsigned long)) /* direct map size */
5524 +#define DUMP_IND_MAP_SZ DUMP_MAP_SZ - 1 /* indirect map size */
5525 +#define DUMP_NR_BOOTSTRAP 64 /* no of bootstrap pages */
5527 +extern int dump_low_page(struct page *);
5529 +/* check if the next entry crosses a page boundary */
5530 +static inline int is_last_map_entry(unsigned long *map)
5532 + unsigned long addr = (unsigned long)(map + 1);
5534 + return (!(addr & (PAGE_SIZE - 1)));
5537 +/* Todo: should have some validation checks */
5538 +/* The last entry in the indirect map points to the next indirect map */
5539 +/* Indirect maps are referred to directly by virtual address */
5540 +static inline unsigned long *next_indirect_map(unsigned long *map)
5542 + return (unsigned long *)map[DUMP_IND_MAP_SZ];
5545 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
5546 +/* Called during early bootup - fixme: make this __init */
5547 +void dump_early_reserve_map(struct dump_memdev *dev)
5549 + unsigned long *map1, *map2;
5550 + loff_t off = 0, last = dev->last_used_offset >> PAGE_SHIFT;
5553 + printk("Reserve bootmap space holding previous dump of %lld pages\n",
5555 + map1= (unsigned long *)dev->indirect_map_root;
5557 + while (map1 && (off < last)) {
5558 +#ifdef CONFIG_X86_64
5559 + reserve_bootmem_node(NODE_DATA(0), virt_to_phys((void *)map1),
5562 + reserve_bootmem(virt_to_phys((void *)map1), PAGE_SIZE);
5564 + for (i=0; (i < DUMP_MAP_SZ - 1) && map1[i] && (off < last);
5565 + i++, off += DUMP_MAP_SZ) {
5566 + pr_debug("indirect map[%d] = 0x%lx\n", i, map1[i]);
5567 + if (map1[i] >= max_low_pfn)
5569 +#ifdef CONFIG_X86_64
5570 + reserve_bootmem_node(NODE_DATA(0),
5571 + map1[i] << PAGE_SHIFT, PAGE_SIZE);
5573 + reserve_bootmem(map1[i] << PAGE_SHIFT, PAGE_SIZE);
5575 + map2 = pfn_to_kaddr(map1[i]);
5576 + for (j = 0 ; (j < DUMP_MAP_SZ) && map2[j] &&
5577 + (off + j < last); j++) {
5578 + pr_debug("\t map[%d][%d] = 0x%lx\n", i, j,
5580 + if (map2[j] < max_low_pfn) {
5581 +#ifdef CONFIG_X86_64
5582 + reserve_bootmem_node(NODE_DATA(0),
5583 + map2[j] << PAGE_SHIFT,
5586 + reserve_bootmem(map2[j] << PAGE_SHIFT,
5592 + map1 = next_indirect_map(map1);
5594 + dev->nr_free = 0; /* these pages don't belong to this boot */
5598 +/* mark dump pages so that they aren't used by this kernel */
5599 +void dump_mark_map(struct dump_memdev *dev)
5601 + unsigned long *map1, *map2;
5602 + loff_t off = 0, last = dev->last_used_offset >> PAGE_SHIFT;
5603 + struct page *page;
5606 + printk("Dump: marking pages in use by previous dump\n");
5607 + map1= (unsigned long *)dev->indirect_map_root;
5609 + while (map1 && (off < last)) {
5610 + page = virt_to_page(map1);
5611 + set_page_count(page, 1);
5612 + for (i=0; (i < DUMP_MAP_SZ - 1) && map1[i] && (off < last);
5613 + i++, off += DUMP_MAP_SZ) {
5614 + pr_debug("indirect map[%d] = 0x%lx\n", i, map1[i]);
5615 + page = pfn_to_page(map1[i]);
5616 + set_page_count(page, 1);
5617 + map2 = kmap_atomic(page, KM_DUMP);
5618 + for (j = 0 ; (j < DUMP_MAP_SZ) && map2[j] &&
5619 + (off + j < last); j++) {
5620 + pr_debug("\t map[%d][%d] = 0x%lx\n", i, j,
5622 + page = pfn_to_page(map2[j]);
5623 + set_page_count(page, 1);
5626 + map1 = next_indirect_map(map1);
5632 + * Given a logical offset into the mem device lookup the
5633 + * corresponding page
5634 + * loc is specified in units of pages
5635 + * Note: affects curr_map (even in the case where lookup fails)
5637 +struct page *dump_mem_lookup(struct dump_memdev *dump_mdev, unsigned long loc)
5639 + unsigned long *map;
5640 + unsigned long i, index = loc / DUMP_MAP_SZ;
5641 + struct page *page = NULL;
5642 + unsigned long curr_pfn, curr_map, *curr_map_ptr = NULL;
5644 + map = (unsigned long *)dump_mdev->indirect_map_root;
5647 + if (loc > dump_mdev->last_offset >> PAGE_SHIFT)
5651 + * first locate the right indirect map
5652 + * in the chain of indirect maps
5654 + for (i = 0; i + DUMP_IND_MAP_SZ < index ; i += DUMP_IND_MAP_SZ) {
5655 + if (!(map = next_indirect_map(map)))
5658 + /* then the right direct map */
5659 + /* map entries are referred to by page index */
5660 + if ((curr_map = map[index - i])) {
5661 + page = pfn_to_page(curr_map);
5662 + /* update the current traversal index */
5663 + /* dump_mdev->curr_map = &map[index - i];*/
5664 + curr_map_ptr = &map[index - i];
5668 + map = kmap_atomic(page, KM_DUMP);
5672 + /* and finally the right entry therein */
5673 + /* data pages are referred to by page index */
5674 + i = index * DUMP_MAP_SZ;
5675 + if ((curr_pfn = map[loc - i])) {
5676 + page = pfn_to_page(curr_pfn);
5677 + dump_mdev->curr_map = curr_map_ptr;
5678 + dump_mdev->curr_map_offset = loc - i;
5679 + dump_mdev->ddev.curr_offset = loc << PAGE_SHIFT;
5683 + kunmap_atomic(map, KM_DUMP);
5689 + * Retrieves a pointer to the next page in the dump device
5690 + * Used during the lookup pass post-soft-reboot
5692 +struct page *dump_mem_next_page(struct dump_memdev *dev)
5695 + unsigned long *map;
5696 + struct page *page = NULL;
5698 + if (dev->ddev.curr_offset + PAGE_SIZE >= dev->last_offset) {
5702 + if ((i = (unsigned long)(++dev->curr_map_offset)) >= DUMP_MAP_SZ) {
5703 + /* move to next map */
5704 + if (is_last_map_entry(++dev->curr_map)) {
5705 + /* move to the next indirect map page */
5706 + printk("dump_mem_next_page: go to next indirect map\n");
5707 + dev->curr_map = (unsigned long *)*dev->curr_map;
5708 + if (!dev->curr_map)
5711 + i = dev->curr_map_offset = 0;
5712 + pr_debug("dump_mem_next_page: next map 0x%lx, entry 0x%lx\n",
5713 + dev->curr_map, *dev->curr_map);
5717 + if (*dev->curr_map) {
5718 + map = kmap_atomic(pfn_to_page(*dev->curr_map), KM_DUMP);
5720 + page = pfn_to_page(map[i]);
5721 + kunmap_atomic(map, KM_DUMP);
5722 + dev->ddev.curr_offset += PAGE_SIZE;
5728 +/* Copied from dump_filters.c */
5729 +static inline int kernel_page(struct page *p)
5731 + /* FIXME: Need to exclude hugetlb pages. Clue: reserved but inuse */
5732 + return (PageReserved(p) && !PageInuse(p)) || (!PageLRU(p) && PageInuse(p));
5735 +static inline int user_page(struct page *p)
5737 + return PageInuse(p) && (!PageReserved(p) && PageLRU(p));
5740 +int dump_reused_by_boot(struct page *page)
5745 + * if < __end + bootmem_bootmap_pages for this boot + allowance
5746 + * if overwritten by initrd (how to check ?)
5747 + * Also, add more checks in early boot code
5748 + * e.g. bootmem bootmap alloc verify not overwriting dump, and if
5749 + * so then realloc or move the dump pages out accordingly.
5752 + /* Temporary proof of concept hack, avoid overwriting kern pages */
5754 + return (kernel_page(page) || dump_low_page(page) || user_page(page));
5758 +/* Uses the free page passed in to expand available space */
5759 +int dump_mem_add_space(struct dump_memdev *dev, struct page *page)
5761 + struct page *map_page;
5762 + unsigned long *map;
5765 + if (!dev->curr_map)
5766 + return -ENOMEM; /* must've exhausted indirect map */
5768 + if (!*dev->curr_map || dev->curr_map_offset >= DUMP_MAP_SZ) {
5769 + /* add map space */
5770 + *dev->curr_map = page_to_pfn(page);
5771 + dev->curr_map_offset = 0;
5775 + /* add data space */
5776 + i = dev->curr_map_offset;
5777 + map_page = pfn_to_page(*dev->curr_map);
5778 + map = (unsigned long *)kmap_atomic(map_page, KM_DUMP);
5779 + map[i] = page_to_pfn(page);
5780 + kunmap_atomic(map, KM_DUMP);
5781 + dev->curr_map_offset = ++i;
5782 + dev->last_offset += PAGE_SIZE;
5783 + if (i >= DUMP_MAP_SZ) {
5784 + /* move to next map */
5785 + if (is_last_map_entry(++dev->curr_map)) {
5786 + /* move to the next indirect map page */
5787 + pr_debug("dump_mem_add_space: using next"
5788 + "indirect map\n");
5789 + dev->curr_map = (unsigned long *)*dev->curr_map;
5796 +/* Caution: making a dest page invalidates existing contents of the page */
5797 +int dump_check_and_free_page(struct dump_memdev *dev, struct page *page)
5802 + * the page can be used as a destination only if we are sure
5803 + * it won't get overwritten by the soft-boot, and is not
5804 + * critical for us right now.
5806 + if (dump_reused_by_boot(page))
5809 + if ((err = dump_mem_add_space(dev, page))) {
5810 + printk("Warning: Unable to extend memdev space. Err %d\n",
5820 +/* Set up the initial maps and bootstrap space */
5821 +/* Must be called only after any previous dump is written out */
5822 +int dump_mem_open(struct dump_dev *dev, unsigned long devid)
5824 + struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
5825 + unsigned long nr_maps, *map, *prev_map = &dump_mdev->indirect_map_root;
5827 + struct page *page;
5828 + unsigned long i = 0;
5831 + /* Todo: sanity check for unwritten previous dump */
5833 + /* allocate pages for indirect map (non highmem area) */
5834 + nr_maps = num_physpages / DUMP_MAP_SZ; /* maps to cover entire mem */
5835 + for (i = 0; i < nr_maps; i += DUMP_IND_MAP_SZ) {
5836 + if (!(map = (unsigned long *)dump_alloc_mem(PAGE_SIZE))) {
5837 + printk("Unable to alloc indirect map %ld\n",
5838 + i / DUMP_IND_MAP_SZ);
5842 + *prev_map = (unsigned long)map;
5843 + prev_map = &map[DUMP_IND_MAP_SZ];
5846 + dump_mdev->curr_map = (unsigned long *)dump_mdev->indirect_map_root;
5847 + dump_mdev->curr_map_offset = 0;
5850 + * allocate a few bootstrap pages: at least 1 map and 1 data page
5851 + * plus enough to save the dump header
5855 + if (!(addr = dump_alloc_mem(PAGE_SIZE))) {
5856 + printk("Unable to alloc bootstrap page %ld\n", i);
5860 + page = virt_to_page(addr);
5861 + if (dump_low_page(page)) {
5862 + dump_free_mem(addr);
5866 + if (dump_mem_add_space(dump_mdev, page)) {
5867 + printk("Warning: Unable to extend memdev "
5868 + "space. Err %d\n", err);
5869 + dump_free_mem(addr);
5873 + } while (i < DUMP_NR_BOOTSTRAP);
5875 + printk("dump memdev init: %ld maps, %ld bootstrap pgs, %ld free pgs\n",
5876 + nr_maps, i, dump_mdev->last_offset >> PAGE_SHIFT);
5878 + dump_mdev->last_bs_offset = dump_mdev->last_offset;
5883 +/* Releases all pre-alloc'd pages */
5884 +int dump_mem_release(struct dump_dev *dev)
5886 + struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
5887 + struct page *page, *map_page;
5888 + unsigned long *map, *prev_map;
5892 + if (!dump_mdev->nr_free)
5895 + pr_debug("dump_mem_release\n");
5896 + page = dump_mem_lookup(dump_mdev, 0);
5897 + for (i = 0; page && (i < DUMP_NR_BOOTSTRAP - 1); i++) {
5898 + if (PageHighMem(page))
5900 + addr = page_address(page);
5902 + printk("page_address(%p) = NULL\n", page);
5905 + pr_debug("Freeing page at 0x%lx\n", addr);
5906 + dump_free_mem(addr);
5907 + if (dump_mdev->curr_map_offset >= DUMP_MAP_SZ - 1) {
5908 + map_page = pfn_to_page(*dump_mdev->curr_map);
5909 + if (PageHighMem(map_page))
5911 + page = dump_mem_next_page(dump_mdev);
5912 + addr = page_address(map_page);
5914 + printk("page_address(%p) = NULL\n",
5918 + pr_debug("Freeing map page at 0x%lx\n", addr);
5919 + dump_free_mem(addr);
5922 + page = dump_mem_next_page(dump_mdev);
5926 + /* now for the last used bootstrap page used as a map page */
5927 + if ((i < DUMP_NR_BOOTSTRAP) && (*dump_mdev->curr_map)) {
5928 + map_page = pfn_to_page(*dump_mdev->curr_map);
5929 + if ((map_page) && !PageHighMem(map_page)) {
5930 + addr = page_address(map_page);
5932 + printk("page_address(%p) = NULL\n", map_page);
5934 + pr_debug("Freeing map page at 0x%lx\n", addr);
5935 + dump_free_mem(addr);
5941 + printk("Freed %d bootstrap pages\n", i);
5943 + /* free the indirect maps */
5944 + map = (unsigned long *)dump_mdev->indirect_map_root;
5949 + map = next_indirect_map(map);
5950 + dump_free_mem(prev_map);
5954 + printk("Freed %d indirect map(s)\n", i);
5956 + /* Reset the indirect map */
5957 + dump_mdev->indirect_map_root = 0;
5958 + dump_mdev->curr_map = 0;
5960 + /* Reset the free list */
5961 + dump_mdev->nr_free = 0;
5963 + dump_mdev->last_offset = dump_mdev->ddev.curr_offset = 0;
5964 + dump_mdev->last_used_offset = 0;
5965 + dump_mdev->curr_map = NULL;
5966 + dump_mdev->curr_map_offset = 0;
5972 + * It is critical for this to be very strict. Cannot afford
5973 + * to have anything running and accessing memory while we overwrite
5974 + * memory (potential risk of data corruption).
5975 + * If in doubt (e.g if a cpu is hung and not responding) just give
5976 + * up and refuse to proceed with this scheme.
5978 + * Note: I/O will only happen after soft-boot/switchover, so we can
5979 + * safely disable interrupts and force stop other CPUs if this is
5980 + * going to be a disruptive dump, no matter what they
5981 + * are in the middle of.
5984 + * ATM Most of this is already taken care of in the nmi handler
5985 + * We may halt the cpus rightaway if we know this is going to be disruptive
5986 + * For now, since we've limited ourselves to overwriting free pages we
5987 + * aren't doing much here. Eventually, we'd have to wait to make sure other
5988 + * cpus aren't using memory we could be overwriting
5990 +int dump_mem_silence(struct dump_dev *dev)
5992 + struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
5994 + if (dump_mdev->last_offset > dump_mdev->last_bs_offset) {
5995 + /* prefer to run lkcd config & start with a clean slate */
6001 +extern int dump_overlay_resume(void);
6003 +/* Trigger the next stage of dumping */
6004 +int dump_mem_resume(struct dump_dev *dev)
6006 + dump_overlay_resume();
6011 + * Allocate mem dev pages as required and copy buffer contents into it.
6012 + * Fails if the no free pages are available
6013 + * Keeping it simple and limited for starters (can modify this over time)
6014 + * Does not handle holes or a sparse layout
6015 + * Data must be in multiples of PAGE_SIZE
6017 +int dump_mem_write(struct dump_dev *dev, void *buf, unsigned long len)
6019 + struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
6020 + struct page *page;
6021 + unsigned long n = 0;
6023 + unsigned long *saved_curr_map, saved_map_offset;
6026 + pr_debug("dump_mem_write: offset 0x%llx, size %ld\n",
6027 + dev->curr_offset, len);
6029 + if (dev->curr_offset + len > dump_mdev->last_offset) {
6030 + printk("Out of space to write\n");
6034 + if ((len & (PAGE_SIZE - 1)) || (dev->curr_offset & (PAGE_SIZE - 1)))
6035 + return -EINVAL; /* not aligned in units of page size */
6037 + saved_curr_map = dump_mdev->curr_map;
6038 + saved_map_offset = dump_mdev->curr_map_offset;
6039 + page = dump_mem_lookup(dump_mdev, dev->curr_offset >> PAGE_SHIFT);
6041 + for (n = len; (n > 0) && page; n -= PAGE_SIZE, buf += PAGE_SIZE ) {
6042 + addr = kmap_atomic(page, KM_DUMP);
6043 + /* memset(addr, 'x', PAGE_SIZE); */
6044 + memcpy(addr, buf, PAGE_SIZE);
6045 + kunmap_atomic(addr, KM_DUMP);
6046 + /* dev->curr_offset += PAGE_SIZE; */
6047 + page = dump_mem_next_page(dump_mdev);
6050 + dump_mdev->curr_map = saved_curr_map;
6051 + dump_mdev->curr_map_offset = saved_map_offset;
6053 + if (dump_mdev->last_used_offset < dev->curr_offset)
6054 + dump_mdev->last_used_offset = dev->curr_offset;
6056 + return (len - n) ? (len - n) : ret ;
6059 +/* dummy - always ready */
6060 +int dump_mem_ready(struct dump_dev *dev, void *buf)
6066 + * Should check for availability of space to write upto the offset
6067 + * affects only the curr_offset; last_offset untouched
6068 + * Keep it simple: Only allow multiples of PAGE_SIZE for now
6070 +int dump_mem_seek(struct dump_dev *dev, loff_t offset)
6072 + struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
6074 + if (offset & (PAGE_SIZE - 1))
6075 + return -EINVAL; /* allow page size units only for now */
6077 + /* Are we exceeding available space ? */
6078 + if (offset > dump_mdev->last_offset) {
6079 + printk("dump_mem_seek failed for offset 0x%llx\n",
6084 + dump_mdev->ddev.curr_offset = offset;
6088 +struct dump_dev_ops dump_memdev_ops = {
6089 + .open = dump_mem_open,
6090 + .release = dump_mem_release,
6091 + .silence = dump_mem_silence,
6092 + .resume = dump_mem_resume,
6093 + .seek = dump_mem_seek,
6094 + .write = dump_mem_write,
6095 + .read = NULL, /* not implemented at the moment */
6096 + .ready = dump_mem_ready
6099 +static struct dump_memdev default_dump_memdev = {
6100 + .ddev = {.type_name = "memdev", .ops = &dump_memdev_ops,
6101 + .device_id = 0x14}
6102 + /* assume the rest of the fields are zeroed by default */
6105 +/* may be overwritten if a previous dump exists */
6106 +struct dump_memdev *dump_memdev = &default_dump_memdev;
6108 Index: linux-2.6.10/drivers/dump/dump_blockdev.c
6109 ===================================================================
6110 --- linux-2.6.10.orig/drivers/dump/dump_blockdev.c 2005-04-07 19:34:21.197950744 +0800
6111 +++ linux-2.6.10/drivers/dump/dump_blockdev.c 2005-04-07 18:13:56.909752248 +0800
6114 + * Implements the dump driver interface for saving a dump to
6115 + * a block device through the kernel's generic low level block i/o
6118 + * Started: June 2002 - Mohamed Abbas <mohamed.abbas@intel.com>
6119 + * Moved original lkcd kiobuf dump i/o code from dump_base.c
6120 + * to use generic dump device interfaces
6122 + * Sept 2002 - Bharata B. Rao <bharata@in.ibm.com>
6123 + * Convert dump i/o to directly use bio instead of kiobuf for 2.5
6125 + * Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
6126 + * Rework to new dumpdev.h structures, implement open/close/
6127 + * silence, misc fixes (blocknr removal, bio_add_page usage)
6129 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
6130 + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
6131 + * Copyright (C) 2002 International Business Machines Corp.
6133 + * This code is released under version 2 of the GNU GPL.
6136 +#include <linux/types.h>
6137 +#include <linux/proc_fs.h>
6138 +#include <linux/module.h>
6139 +#include <linux/init.h>
6140 +#include <linux/blkdev.h>
6141 +#include <linux/bio.h>
6142 +#include <asm/hardirq.h>
6143 +#include <linux/dump.h>
6144 +#include "dump_methods.h"
6146 +extern void *dump_page_buf;
6148 +/* The end_io callback for dump i/o completion */
6150 +dump_bio_end_io(struct bio *bio, unsigned int bytes_done, int error)
6152 + struct dump_blockdev *dump_bdev;
6154 + if (bio->bi_size) {
6155 + /* some bytes still left to transfer */
6156 + return 1; /* not complete */
6159 + dump_bdev = (struct dump_blockdev *)bio->bi_private;
6161 + printk("IO error while writing the dump, aborting\n");
6164 + dump_bdev->err = error;
6166 + /* no wakeup needed, since caller polls for completion */
6170 +/* Check if the dump bio is already mapped to the specified buffer */
6172 +dump_block_map_valid(struct dump_blockdev *dev, struct page *page,
6175 + struct bio *bio = dev->bio;
6176 + unsigned long bsize = 0;
6178 + if (!bio->bi_vcnt)
6179 + return 0; /* first time, not mapped */
6182 + if ((bio_page(bio) != page) || (len > bio->bi_vcnt << PAGE_SHIFT))
6183 + return 0; /* buffer not mapped */
6185 + bsize = bdev_hardsect_size(bio->bi_bdev);
6186 + if ((len & (PAGE_SIZE - 1)) || (len & bsize))
6187 + return 0; /* alignment checks needed */
6189 + /* quick check to decide if we need to redo bio_add_page */
6190 + if (bdev_get_queue(bio->bi_bdev)->merge_bvec_fn)
6191 + return 0; /* device may have other restrictions */
6193 + return 1; /* already mapped */
6197 + * Set up the dump bio for i/o from the specified buffer
6198 + * Return value indicates whether the full buffer could be mapped or not
6201 +dump_block_map(struct dump_blockdev *dev, void *buf, int len)
6203 + struct page *page = virt_to_page(buf);
6204 + struct bio *bio = dev->bio;
6205 + unsigned long bsize = 0;
6207 + bio->bi_bdev = dev->bdev;
6208 + bio->bi_sector = (dev->start_offset + dev->ddev.curr_offset) >> 9;
6209 + bio->bi_idx = 0; /* reset index to the beginning */
6211 + if (dump_block_map_valid(dev, page, len)) {
6212 + /* already mapped and usable rightaway */
6213 + bio->bi_size = len; /* reset size to the whole bio */
6214 + bio->bi_vcnt = (len + PAGE_SIZE - 1) / PAGE_SIZE; /* Set the proper vector cnt */
6216 + /* need to map the bio */
6219 + bsize = bdev_hardsect_size(bio->bi_bdev);
6221 + /* first a few sanity checks */
6222 + if (len < bsize) {
6223 + printk("map: len less than hardsect size \n");
6227 + if ((unsigned long)buf & bsize) {
6228 + printk("map: not aligned \n");
6232 + /* assume contig. page aligned low mem buffer( no vmalloc) */
6233 + if ((page_address(page) != buf) || (len & (PAGE_SIZE - 1))) {
6234 + printk("map: invalid buffer alignment!\n");
6237 + /* finally we can go ahead and map it */
6238 + while (bio->bi_size < len)
6239 + if (bio_add_page(bio, page++, PAGE_SIZE, 0) == 0) {
6243 + bio->bi_end_io = dump_bio_end_io;
6244 + bio->bi_private = dev;
6247 + if (bio->bi_size != len) {
6248 + printk("map: bio size = %d not enough for len = %d!\n",
6249 + bio->bi_size, len);
6256 +dump_free_bio(struct bio *bio)
6259 + kfree(bio->bi_io_vec);
6264 + * Prepares the dump device so we can take a dump later.
6265 + * The caller is expected to have filled up the dev_id field in the
6266 + * block dump dev structure.
6268 + * At dump time when dump_block_write() is invoked it will be too
6269 + * late to recover, so as far as possible make sure obvious errors
6270 + * get caught right here and reported back to the caller.
6273 +dump_block_open(struct dump_dev *dev, unsigned long arg)
6275 + struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
6276 + struct block_device *bdev;
6278 + struct bio_vec *bvec;
6280 + /* make sure this is a valid block device */
6286 + /* Convert it to the new dev_t format */
6287 + arg = MKDEV((arg >> OLDMINORBITS), (arg & OLDMINORMASK));
6289 + /* get a corresponding block_dev struct for this */
6290 + bdev = bdget((dev_t)arg);
6296 + /* get the block device opened */
6297 + if ((retval = blkdev_get(bdev, O_RDWR | O_LARGEFILE, 0))) {
6301 + if ((dump_bdev->bio = kmalloc(sizeof(struct bio), GFP_KERNEL))
6303 + printk("Cannot allocate bio\n");
6308 + bio_init(dump_bdev->bio);
6310 + if ((bvec = kmalloc(sizeof(struct bio_vec) *
6311 + (DUMP_BUFFER_SIZE >> PAGE_SHIFT), GFP_KERNEL)) == NULL) {
6316 + /* assign the new dump dev structure */
6317 + dump_bdev->dev_id = (dev_t)arg;
6318 + dump_bdev->bdev = bdev;
6320 + /* make a note of the limit */
6321 + dump_bdev->limit = bdev->bd_inode->i_size;
6323 + /* now make sure we can map the dump buffer */
6324 + dump_bdev->bio->bi_io_vec = bvec;
6325 + dump_bdev->bio->bi_max_vecs = DUMP_BUFFER_SIZE >> PAGE_SHIFT;
6327 + retval = dump_block_map(dump_bdev, dump_config.dumper->dump_buf,
6328 + DUMP_BUFFER_SIZE);
6331 + printk("open: dump_block_map failed, ret %d\n", retval);
6335 + printk("Block device (%d,%d) successfully configured for dumping\n",
6336 + MAJOR(dump_bdev->dev_id),
6337 + MINOR(dump_bdev->dev_id));
6340 + /* after opening the block device, return */
6343 +err3: dump_free_bio(dump_bdev->bio);
6344 + dump_bdev->bio = NULL;
6345 +err2: if (bdev) blkdev_put(bdev);
6347 +err1: if (bdev) bdput(bdev);
6348 + dump_bdev->bdev = NULL;
6349 +err: return retval;
6353 + * Close the dump device and release associated resources
6354 + * Invoked when unconfiguring the dump device.
6357 +dump_block_release(struct dump_dev *dev)
6359 + struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
6361 + /* release earlier bdev if present */
6362 + if (dump_bdev->bdev) {
6363 + blkdev_put(dump_bdev->bdev);
6364 + dump_bdev->bdev = NULL;
6367 + dump_free_bio(dump_bdev->bio);
6368 + dump_bdev->bio = NULL;
6375 + * Prepare the dump device for use (silence any ongoing activity
6376 + * and quiesce state) when the system crashes.
6379 +dump_block_silence(struct dump_dev *dev)
6381 + struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
6382 + struct request_queue *q = bdev_get_queue(dump_bdev->bdev);
6385 + /* If we can't get request queue lock, refuse to take the dump */
6386 + if (!spin_trylock(q->queue_lock))
6389 + ret = elv_queue_empty(q);
6390 + spin_unlock(q->queue_lock);
6392 + /* For now we assume we have the device to ourselves */
6393 + /* Just a quick sanity check */
6395 + /* Warn the user and move on */
6396 + printk(KERN_ALERT "Warning: Non-empty request queue\n");
6397 + printk(KERN_ALERT "I/O requests in flight at dump time\n");
6401 + * Move to a softer level of silencing where no spin_lock_irqs
6402 + * are held on other cpus
6404 + dump_silence_level = DUMP_SOFT_SPIN_CPUS;
6406 + ret = __dump_irq_enable();
6411 + printk("Dumping to block device (%d,%d) on CPU %d ...\n",
6412 + MAJOR(dump_bdev->dev_id), MINOR(dump_bdev->dev_id),
6413 + smp_processor_id());
6419 + * Invoked when dumping is done. This is the time to put things back
6420 + * (i.e. undo the effects of dump_block_silence) so the device is
6421 + * available for normal use.
6424 +dump_block_resume(struct dump_dev *dev)
6426 + __dump_irq_restore();
6432 + * Seek to the specified offset in the dump device.
6433 + * Makes sure this is a valid offset, otherwise returns an error.
6436 +dump_block_seek(struct dump_dev *dev, loff_t off)
6438 + struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
6439 + loff_t offset = off + dump_bdev->start_offset;
6441 + if (offset & ( PAGE_SIZE - 1)) {
6442 + printk("seek: non-page aligned\n");
6446 + if (offset & (bdev_hardsect_size(dump_bdev->bdev) - 1)) {
6447 + printk("seek: not sector aligned \n");
6451 + if (offset > dump_bdev->limit) {
6452 + printk("seek: not enough space left on device!\n");
6455 + dev->curr_offset = off;
6460 + * Write out a buffer after checking the device limitations,
6461 + * sector sizes, etc. Assumes the buffer is in directly mapped
6462 + * kernel address space (not vmalloc'ed).
6464 + * Returns: number of bytes written or -ERRNO.
6467 +dump_block_write(struct dump_dev *dev, void *buf,
6468 + unsigned long len)
6470 + struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
6471 + loff_t offset = dev->curr_offset + dump_bdev->start_offset;
6472 + int retval = -ENOSPC;
6474 + if (offset >= dump_bdev->limit) {
6475 + printk("write: not enough space left on device!\n");
6479 + /* don't write more blocks than our max limit */
6480 + if (offset + len > dump_bdev->limit)
6481 + len = dump_bdev->limit - offset;
6484 + retval = dump_block_map(dump_bdev, buf, len);
6486 + printk("write: dump_block_map failed! err %d\n", retval);
6491 + * Write out the data to disk.
6492 + * Assumes the entire buffer mapped to a single bio, which we can
6493 + * submit and wait for io completion. In the future, may consider
6494 + * increasing the dump buffer size and submitting multiple bio s
6495 + * for better throughput.
6497 + dump_bdev->err = -EAGAIN;
6498 + submit_bio(WRITE, dump_bdev->bio);
6500 + dump_bdev->ddev.curr_offset += len;
6507 + * Name: dump_block_ready()
6508 + * Func: check if the last dump i/o is over and ready for next request
6511 +dump_block_ready(struct dump_dev *dev, void *buf)
6513 + struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
6514 + request_queue_t *q = bdev_get_queue(dump_bdev->bio->bi_bdev);
6516 + /* check for io completion */
6517 + if (dump_bdev->err == -EAGAIN) {
6522 + if (dump_bdev->err) {
6523 + printk("dump i/o err\n");
6524 + return dump_bdev->err;
6531 +struct dump_dev_ops dump_blockdev_ops = {
6532 + .open = dump_block_open,
6533 + .release = dump_block_release,
6534 + .silence = dump_block_silence,
6535 + .resume = dump_block_resume,
6536 + .seek = dump_block_seek,
6537 + .write = dump_block_write,
6538 + /* .read not implemented */
6539 + .ready = dump_block_ready
6542 +static struct dump_blockdev default_dump_blockdev = {
6543 + .ddev = {.type_name = "blockdev", .ops = &dump_blockdev_ops,
6544 + .curr_offset = 0},
6546 + * leave enough room for the longest swap header possibly written
6547 + * written by mkswap (likely the largest page size supported by
6550 + .start_offset = DUMP_HEADER_OFFSET,
6552 + /* assume the rest of the fields are zeroed by default */
6555 +struct dump_blockdev *dump_blockdev = &default_dump_blockdev;
6558 +dump_blockdev_init(void)
6560 + if (dump_register_device(&dump_blockdev->ddev) < 0) {
6561 + printk("block device driver registration failed\n");
6565 + printk("block device driver for LKCD registered\n");
6570 +dump_blockdev_cleanup(void)
6572 + dump_unregister_device(&dump_blockdev->ddev);
6573 + printk("block device driver for LKCD unregistered\n");
6576 +MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
6577 +MODULE_DESCRIPTION("Block Dump Driver for Linux Kernel Crash Dump (LKCD)");
6578 +MODULE_LICENSE("GPL");
6580 +module_init(dump_blockdev_init);
6581 +module_exit(dump_blockdev_cleanup);
6582 Index: linux-2.6.10/drivers/dump/Makefile
6583 ===================================================================
6584 --- linux-2.6.10.orig/drivers/dump/Makefile 2005-04-07 19:34:21.197950744 +0800
6585 +++ linux-2.6.10/drivers/dump/Makefile 2005-04-07 18:13:56.921750424 +0800
6588 +# Makefile for the dump device drivers.
6591 +dump-y := dump_setup.o dump_fmt.o dump_filters.o dump_scheme.o dump_execute.o
6592 +ifeq ($(CONFIG_X86_64),)
6593 +ifeq ($(CONFIG_X86),y)
6594 +dump-$(CONFIG_X86) += dump_i386.o
6597 +dump-$(CONFIG_ARM) += dump_arm.o
6598 +dump-$(CONFIG_PPC64) += dump_ppc64.o
6599 +dump-$(CONFIG_X86_64) += dump_x8664.o
6600 +dump-$(CONFIG_IA64) += dump_ia64.o
6601 +dump-$(CONFIG_CRASH_DUMP_MEMDEV) += dump_memdev.o dump_overlay.o
6602 +dump-objs += $(dump-y)
6604 +obj-$(CONFIG_CRASH_DUMP) += dump.o
6605 +obj-$(CONFIG_CRASH_DUMP_BLOCKDEV) += dump_blockdev.o
6606 +obj-$(CONFIG_CRASH_DUMP_NETDEV) += dump_netdev.o
6607 +obj-$(CONFIG_CRASH_DUMP_COMPRESS_RLE) += dump_rle.o
6608 +obj-$(CONFIG_CRASH_DUMP_COMPRESS_GZIP) += dump_gzip.o
6609 Index: linux-2.6.10/drivers/dump/dump_scheme.c
6610 ===================================================================
6611 --- linux-2.6.10.orig/drivers/dump/dump_scheme.c 2005-04-07 19:34:21.197950744 +0800
6612 +++ linux-2.6.10/drivers/dump/dump_scheme.c 2005-04-07 18:13:56.916751184 +0800
6615 + * Default single stage dump scheme methods
6617 + * Previously a part of dump_base.c
6619 + * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
6620 + * Split and rewrote LKCD dump scheme to generic dump method
6622 + * Derived from original code created by
6623 + * Matt Robinson <yakker@sourceforge.net>)
6625 + * Contributions from SGI, IBM, HP, MCL, and others.
6627 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
6628 + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
6629 + * Copyright (C) 2002 International Business Machines Corp.
6631 + * This code is released under version 2 of the GNU GPL.
6635 + * Implements the default dump scheme, i.e. single-stage gathering and
6636 + * saving of dump data directly to the target device, which operates in
6637 + * a push mode, where the dumping system decides what data it saves
6638 + * taking into account pre-specified dump config options.
6640 + * Aside: The 2-stage dump scheme, where there is a soft-reset between
6641 + * the gathering and saving phases, also reuses some of these
6642 + * default routines (see dump_overlay.c)
6644 +#include <linux/types.h>
6645 +#include <linux/kernel.h>
6646 +#include <linux/mm.h>
6647 +#include <linux/slab.h>
6648 +#include <linux/delay.h>
6649 +#include <linux/reboot.h>
6650 +#include <linux/nmi.h>
6651 +#include <linux/dump.h>
6652 +#include "dump_methods.h"
6654 +extern int panic_timeout; /* time before reboot */
6656 +extern void dump_speedo(int);
6658 +/* Default sequencer used during single stage dumping */
6659 +/* Also invoked during stage 2 of soft-boot based dumping */
6660 +int dump_generic_sequencer(void)
6662 + struct dump_data_filter *filter = dump_config.dumper->filter;
6663 + int pass = 0, err = 0, save = 0;
6664 + int (*action)(unsigned long, unsigned long);
6667 + * We want to save the more critical data areas first in
6668 + * case we run out of space, encounter i/o failures, or get
6669 + * interrupted otherwise and have to give up midway
6670 + * So, run through the passes in increasing order
6672 + for (;filter->selector; filter++, pass++)
6674 + /* Assumes passes are exclusive (even across dumpers) */
6675 + /* Requires care when coding the selection functions */
6676 + if ((save = filter->level_mask & dump_config.level))
6677 + action = dump_save_data;
6679 + action = dump_skip_data;
6681 + if ((err = dump_iterator(pass, action, filter)) < 0)
6684 + printk("\n %d dump pages %s of %d each in pass %d\n",
6685 + err, save ? "saved" : "skipped", (int)DUMP_PAGE_SIZE, pass);
6689 + return (err < 0) ? err : 0;
6692 +static inline struct page *dump_get_page(loff_t loc)
6695 + unsigned long page_index = loc >> PAGE_SHIFT;
6697 + /* todo: complete this to account for ia64/discontig mem */
6698 + /* todo: and to check for validity, ram page, no i/o mem etc */
6699 + /* need to use pfn/physaddr equiv of kern_addr_valid */
6702 + * On ARM/XScale system, the physical address starts from
6703 + * PHYS_OFFSET, and it maybe the situation that PHYS_OFFSET != 0.
6704 + * For example on Intel's PXA250, PHYS_OFFSET = 0xa0000000. And the
6705 + * page index starts from PHYS_PFN_OFFSET. When configuring
6706 + * filter, filter->start is assigned to 0 in dump_generic_configure.
6707 + * Here we want to adjust it by adding PHYS_PFN_OFFSET to it!
6710 + page_index += PHYS_PFN_OFFSET;
6712 + if (__dump_page_valid(page_index))
6713 + return pfn_to_page(page_index);
6719 +/* Default iterator: for singlestage and stage 1 of soft-boot dumping */
6720 +/* Iterates over range of physical memory pages in DUMP_PAGE_SIZE increments */
6721 +int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned long),
6722 + struct dump_data_filter *filter)
6724 + /* Todo : fix unit, type */
6725 + loff_t loc, start, end;
6726 + int i, count = 0, err = 0;
6727 + struct page *page;
6729 + /* Todo: Add membanks code */
6730 + /* TBD: Check if we need to address DUMP_PAGE_SIZE < PAGE_SIZE */
6732 + for (i = 0; i < filter->num_mbanks; i++) {
6733 + start = filter->start[i];
6734 + end = filter->end[i];
6735 + for (loc = start; loc < end; loc += DUMP_PAGE_SIZE) {
6736 + dump_config.dumper->curr_loc = loc;
6737 + page = dump_get_page(loc);
6738 + if (page && filter->selector(pass,
6739 + (unsigned long) page, DUMP_PAGE_SIZE)) {
6740 + if ((err = action((unsigned long)page,
6741 + DUMP_PAGE_SIZE))) {
6742 + printk("dump_page_iterator: err %d for "
6743 + "loc 0x%llx, in pass %d\n",
6745 + return err ? err : count;
6752 + return err ? err : count;
6756 + * Base function that saves the selected block of data in the dump
6757 + * Action taken when iterator decides that data needs to be saved
6759 +int dump_generic_save_data(unsigned long loc, unsigned long sz)
6762 + void *dump_buf = dump_config.dumper->dump_buf;
6763 + int left, bytes, ret;
6765 + if ((ret = dump_add_data(loc, sz))) {
6768 + buf = dump_config.dumper->curr_buf;
6770 + /* If we've filled up the buffer write it out */
6771 + if ((left = buf - dump_buf) >= DUMP_BUFFER_SIZE) {
6772 + bytes = dump_write_buffer(dump_buf, DUMP_BUFFER_SIZE);
6773 + if (bytes < DUMP_BUFFER_SIZE) {
6774 + printk("dump_write_buffer failed %d\n", bytes);
6775 + return bytes ? -ENOSPC : bytes;
6780 + /* -- A few chores to do from time to time -- */
6781 + dump_config.dumper->count++;
6783 + if (!(dump_config.dumper->count & 0x3f)) {
6784 + /* Update the header every one in a while */
6785 + memset((void *)dump_buf, 'b', DUMP_BUFFER_SIZE);
6786 + if ((ret = dump_update_header()) < 0) {
6787 + /* issue warning */
6792 + touch_nmi_watchdog();
6793 + } else if (!(dump_config.dumper->count & 0x7)) {
6794 + /* Show progress so the user knows we aren't hung */
6795 + dump_speedo(dump_config.dumper->count >> 3);
6797 + /* Todo: Touch/Refresh watchdog */
6799 + /* --- Done with periodic chores -- */
6802 + * extra bit of copying to simplify verification
6803 + * in the second kernel boot based scheme
6805 + memcpy(dump_buf - DUMP_PAGE_SIZE, dump_buf +
6806 + DUMP_BUFFER_SIZE - DUMP_PAGE_SIZE, DUMP_PAGE_SIZE);
6808 + /* now adjust the leftover bits back to the top of the page */
6809 + /* this case would not arise during stage 2 (passthru) */
6810 + memset(dump_buf, 'z', DUMP_BUFFER_SIZE);
6812 + memcpy(dump_buf, dump_buf + DUMP_BUFFER_SIZE, left);
6814 + buf -= DUMP_BUFFER_SIZE;
6815 + dump_config.dumper->curr_buf = buf;
6821 +int dump_generic_skip_data(unsigned long loc, unsigned long sz)
6823 + /* dummy by default */
6828 + * Common low level routine to write a buffer to current dump device
6829 + * Expects checks for space etc to have been taken care of by the caller
6830 + * Operates serially at the moment for simplicity.
6831 + * TBD/Todo: Consider batching for improved throughput
6833 +int dump_ll_write(void *buf, unsigned long len)
6835 + long transferred = 0, last_transfer = 0;
6838 + /* make sure device is ready */
6839 + while ((ret = dump_dev_ready(NULL)) == -EAGAIN);
6841 + printk("dump_dev_ready failed !err %d\n", ret);
6846 + if ((last_transfer = dump_dev_write(buf, len)) <= 0) {
6847 + ret = last_transfer;
6848 + printk("dump_dev_write failed !err %d\n",
6852 + /* wait till complete */
6853 + while ((ret = dump_dev_ready(buf)) == -EAGAIN)
6857 + printk("i/o failed !err %d\n", ret);
6861 + len -= last_transfer;
6862 + buf += last_transfer;
6863 + transferred += last_transfer;
6865 + return (ret < 0) ? ret : transferred;
6868 +/* default writeout routine for single dump device */
6869 +/* writes out the dump data ensuring enough space is left for the end marker */
6870 +int dump_generic_write_buffer(void *buf, unsigned long len)
6875 + /* check for space */
6876 + if ((err = dump_dev_seek(dump_config.dumper->curr_offset + len +
6877 + 2*DUMP_BUFFER_SIZE)) < 0) {
6878 + printk("dump_write_buffer: insuff space after offset 0x%llx\n",
6879 + dump_config.dumper->curr_offset);
6882 + /* alignment check would happen as a side effect of this */
6883 + if ((err = dump_dev_seek(dump_config.dumper->curr_offset)) < 0)
6886 + written = dump_ll_write(buf, len);
6890 + if (written < len)
6891 + written = written ? -ENOSPC : written;
6893 + dump_config.dumper->curr_offset += len;
6898 +int dump_generic_configure(unsigned long devid)
6900 + struct dump_dev *dev = dump_config.dumper->dev;
6901 + struct dump_data_filter *filter;
6905 + /* Allocate the dump buffer and initialize dumper state */
6906 + /* Assume that we get aligned addresses */
6907 + if (!(buf = dump_alloc_mem(DUMP_BUFFER_SIZE + 3 * DUMP_PAGE_SIZE)))
6910 + if ((unsigned long)buf & (PAGE_SIZE - 1)) {
6911 + /* sanity check for page aligned address */
6912 + dump_free_mem(buf);
6913 + return -ENOMEM; /* fixme: better error code */
6916 + /* Initialize the rest of the fields */
6917 + dump_config.dumper->dump_buf = buf + DUMP_PAGE_SIZE;
6920 + /* Open the dump device */
6924 + if ((ret = dev->ops->open(dev, devid))) {
6928 + /* Initialise the memory ranges in the dump filter */
6929 + for (filter = dump_config.dumper->filter ;filter->selector; filter++) {
6930 + if (!filter->start[0] && !filter->end[0]) {
6933 + for_each_pgdat(pgdat) {
6934 + filter->start[i] =
6935 + (loff_t)pgdat->node_start_pfn << PAGE_SHIFT;
6937 + (loff_t)(pgdat->node_start_pfn + pgdat->node_spanned_pages) << PAGE_SHIFT;
6940 + filter->num_mbanks = i;
6947 +int dump_generic_unconfigure(void)
6949 + struct dump_dev *dev = dump_config.dumper->dev;
6950 + void *buf = dump_config.dumper->dump_buf;
6953 + pr_debug("Generic unconfigure\n");
6954 + /* Close the dump device */
6955 + if (dev && (ret = dev->ops->release(dev)))
6958 + printk("Closed dump device\n");
6961 + dump_free_mem((buf - DUMP_PAGE_SIZE));
6963 + dump_config.dumper->curr_buf = dump_config.dumper->dump_buf = NULL;
6964 + pr_debug("Released dump buffer\n");
6969 +#ifdef CONFIG_DISCONTIGMEM
6971 +void dump_reconfigure_mbanks(void)
6974 + loff_t start, end, loc, loc_end;
6976 + struct dump_data_filter *filter = dump_config.dumper->filter;
6978 + for_each_pgdat(pgdat) {
6980 + start = (loff_t)(pgdat->node_start_pfn << PAGE_SHIFT);
6981 + end = ((loff_t)(pgdat->node_start_pfn + pgdat->node_spanned_pages) << PAGE_SHIFT);
6982 + for(loc = start; loc < end; loc += (DUMP_PAGE_SIZE)) {
6984 + if(!(__dump_page_valid(loc >> PAGE_SHIFT)))
6987 + /* We found a valid page. This is the start */
6988 + filter->start[i] = loc;
6990 + /* Now loop here till you find the end */
6991 + for(loc_end = loc; loc_end < end; loc_end += (DUMP_PAGE_SIZE)) {
6993 + if(__dump_page_valid(loc_end >> PAGE_SHIFT)) {
6994 + /* This page could very well be the last page */
6995 + filter->end[i] = loc_end;
7004 + filter->num_mbanks = i;
7006 + /* Propagate memory bank information to other filters */
7007 + for (filter = dump_config.dumper->filter, filter++ ;filter->selector; filter++) {
7008 + for(i = 0; i < dump_config.dumper->filter->num_mbanks; i++) {
7009 + filter->start[i] = dump_config.dumper->filter->start[i];
7010 + filter->end[i] = dump_config.dumper->filter->end[i];
7011 + filter->num_mbanks = dump_config.dumper->filter->num_mbanks;
7017 +/* Set up the default dump scheme */
7019 +struct dump_scheme_ops dump_scheme_singlestage_ops = {
7020 + .configure = dump_generic_configure,
7021 + .unconfigure = dump_generic_unconfigure,
7022 + .sequencer = dump_generic_sequencer,
7023 + .iterator = dump_page_iterator,
7024 + .save_data = dump_generic_save_data,
7025 + .skip_data = dump_generic_skip_data,
7026 + .write_buffer = dump_generic_write_buffer,
7029 +struct dump_scheme dump_scheme_singlestage = {
7030 + .name = "single-stage",
7031 + .ops = &dump_scheme_singlestage_ops
7034 +/* The single stage dumper comprising all these */
7035 +struct dumper dumper_singlestage = {
7036 + .name = "single-stage",
7037 + .scheme = &dump_scheme_singlestage,
7038 + .fmt = &dump_fmt_lcrash,
7039 + .compress = &dump_none_compression,
7040 + .filter = dump_filter_table,
7044 Index: linux-2.6.10/drivers/Makefile
7045 ===================================================================
7046 --- linux-2.6.10.orig/drivers/Makefile 2004-12-25 05:36:00.000000000 +0800
7047 +++ linux-2.6.10/drivers/Makefile 2005-04-07 18:13:56.936748144 +0800
7049 obj-$(CONFIG_CPU_FREQ) += cpufreq/
7050 obj-$(CONFIG_MMC) += mmc/
7052 +obj-$(CONFIG_CRASH_DUMP) += dump/
7053 Index: linux-2.6.10/drivers/block/ll_rw_blk.c
7054 ===================================================================
7055 --- linux-2.6.10.orig/drivers/block/ll_rw_blk.c 2005-04-07 14:55:41.000000000 +0800
7056 +++ linux-2.6.10/drivers/block/ll_rw_blk.c 2005-04-07 18:17:16.782366992 +0800
7058 #include <linux/slab.h>
7059 #include <linux/swap.h>
7060 #include <linux/writeback.h>
7061 +#include <linux/dump.h>
7064 * for max sense size
7065 @@ -2624,13 +2625,15 @@
7066 * bi_sector for remaps as it sees fit. So the values of these fields
7067 * should NOT be depended on after the call to generic_make_request.
7069 +extern unsigned long dump_oncpu;
7070 void generic_make_request(struct bio *bio)
7074 int ret, nr_sectors = bio_sectors(bio);
7077 + if (likely(!dump_oncpu))
7079 /* Test device or partition size, when known. */
7080 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
7082 Index: linux-2.6.10/mm/bootmem.c
7083 ===================================================================
7084 --- linux-2.6.10.orig/mm/bootmem.c 2004-12-25 05:34:30.000000000 +0800
7085 +++ linux-2.6.10/mm/bootmem.c 2005-04-07 18:13:56.780771856 +0800
7088 unsigned long max_low_pfn;
7089 unsigned long min_low_pfn;
7090 +EXPORT_SYMBOL(min_low_pfn);
7091 unsigned long max_pfn;
7093 EXPORT_SYMBOL(max_pfn); /* This is exported so
7095 if (j + 16 < BITS_PER_LONG)
7096 prefetchw(page + j + 16);
7097 __ClearPageReserved(page + j);
7098 + set_page_count(page + j, 1);
7100 __free_pages(page, ffs(BITS_PER_LONG)-1);
7102 Index: linux-2.6.10/mm/page_alloc.c
7103 ===================================================================
7104 --- linux-2.6.10.orig/mm/page_alloc.c 2005-04-06 23:38:35.000000000 +0800
7105 +++ linux-2.6.10/mm/page_alloc.c 2005-04-07 18:13:56.794769728 +0800
7107 EXPORT_SYMBOL(totalram_pages);
7108 EXPORT_SYMBOL(nr_swap_pages);
7110 +#ifdef CONFIG_CRASH_DUMP_MODULE
7111 +/* This symbol has to be exported to use 'for_each_pgdat' macro by modules. */
7112 +EXPORT_SYMBOL(pgdat_list);
7116 * Used by page_zone() to look up the address of the struct zone whose
7117 * id is encoded in the upper bits of page->flags
7118 @@ -281,8 +286,11 @@
7119 arch_free_page(page, order);
7121 mod_page_state(pgfree, 1 << order);
7122 - for (i = 0 ; i < (1 << order) ; ++i)
7123 + for (i = 0 ; i < (1 << order) ; ++i){
7125 + __put_page(page + i);
7126 free_pages_check(__FUNCTION__, page + i);
7128 list_add(&page->lru, &list);
7129 kernel_map_pages(page, 1<<order, 0);
7130 free_pages_bulk(page_zone(page), 1, &list, order);
7131 @@ -322,44 +330,34 @@
7135 -static inline void set_page_refs(struct page *page, int order)
7138 - set_page_count(page, 1);
7143 - * We need to reference all the pages for this order, otherwise if
7144 - * anyone accesses one of the pages with (get/put) it will be freed.
7146 - for (i = 0; i < (1 << order); i++)
7147 - set_page_count(page+i, 1);
7148 -#endif /* CONFIG_MMU */
7152 * This page is about to be returned from the page allocator
7154 -static void prep_new_page(struct page *page, int order)
7155 +static void prep_new_page(struct page *_page, int order)
7157 - if (page->mapping || page_mapped(page) ||
7165 - 1 << PG_swapcache |
7166 - 1 << PG_writeback )))
7169 + for(i = 0; i < (1 << order); i++){
7170 + struct page *page = _page + i;
7172 + if (page->mapping || page_mapped(page) ||
7180 + 1 << PG_swapcache |
7181 + 1 << PG_writeback )))
7182 bad_page(__FUNCTION__, page);
7184 - page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
7185 - 1 << PG_referenced | 1 << PG_arch_1 |
7186 - 1 << PG_checked | 1 << PG_mappedtodisk);
7187 - page->private = 0;
7188 - set_page_refs(page, order);
7189 + page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
7190 + 1 << PG_referenced | 1 << PG_arch_1 |
7191 + 1 << PG_checked | 1 << PG_mappedtodisk);
7192 + page->private = 0;
7193 + set_page_count(page, 1);
7198 Index: linux-2.6.10/arch/ia64/Kconfig.debug
7199 ===================================================================
7200 --- linux-2.6.10.orig/arch/ia64/Kconfig.debug 2004-12-25 05:34:32.000000000 +0800
7201 +++ linux-2.6.10/arch/ia64/Kconfig.debug 2005-04-07 18:13:56.459820648 +0800
7204 source "lib/Kconfig.debug"
7207 + tristate "Crash dump support (EXPERIMENTAL)"
7208 + depends on EXPERIMENTAL
7211 + Say Y here to enable saving an image of system memory when a panic
7212 + or other error occurs. Dumps can also be forced with the SysRq+d
7213 + key if MAGIC_SYSRQ is enabled.
7217 + depends on CRASH_DUMP
7220 +config CRASH_DUMP_BLOCKDEV
7221 + tristate "Crash dump block device driver"
7222 + depends on CRASH_DUMP
7224 + Say Y to allow saving crash dumps directly to a disk device.
7226 +config CRASH_DUMP_NETDEV
7227 + tristate "Crash dump network device driver"
7228 + depends on CRASH_DUMP
7230 + Say Y to allow saving crash dumps over a network device.
7232 +config CRASH_DUMP_MEMDEV
7233 + bool "Crash dump staged memory driver"
7234 + depends on CRASH_DUMP
7236 + Say Y to allow intermediate saving crash dumps in spare
7237 + memory pages which would then be written out to disk
7240 +config CRASH_DUMP_SOFTBOOT
7241 + bool "Save crash dump across a soft reboot"
7242 + depends on CRASH_DUMP_MEMDEV
7244 + Say Y to allow a crash dump to be preserved in memory
7245 + pages across a soft reboot and written out to disk
7246 + thereafter. For this to work, CRASH_DUMP must be
7247 + configured as part of the kernel (not as a module).
7249 +config CRASH_DUMP_COMPRESS_RLE
7250 + tristate "Crash dump RLE compression"
7251 + depends on CRASH_DUMP
7253 + Say Y to allow saving dumps with Run Length Encoding compression.
7255 +config CRASH_DUMP_COMPRESS_GZIP
7256 + tristate "Crash dump GZIP compression"
7257 + select ZLIB_INFLATE
7258 + select ZLIB_DEFLATE
7259 + depends on CRASH_DUMP
7261 + Say Y to allow saving dumps with Gnu Zip compression.
7266 prompt "Physical memory granularity"
7267 default IA64_GRANULE_64MB
7268 Index: linux-2.6.10/arch/ia64/kernel/irq.c
7269 ===================================================================
7270 --- linux-2.6.10.orig/arch/ia64/kernel/irq.c 2004-12-25 05:35:27.000000000 +0800
7271 +++ linux-2.6.10/arch/ia64/kernel/irq.c 2005-04-07 18:13:56.501814264 +0800
7272 @@ -933,7 +933,11 @@
7274 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
7276 +#if defined(CONFIG_CRASH_DUMP) || defined (CONFIG_CRASH_DUMP_MODULE)
7277 +cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
7279 static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
7282 static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
7284 Index: linux-2.6.10/arch/ia64/kernel/smp.c
7285 ===================================================================
7286 --- linux-2.6.10.orig/arch/ia64/kernel/smp.c 2004-12-25 05:35:40.000000000 +0800
7287 +++ linux-2.6.10/arch/ia64/kernel/smp.c 2005-04-07 18:13:56.504813808 +0800
7289 #include <linux/efi.h>
7290 #include <linux/bitops.h>
7292 +#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
7293 +#include <linux/dump.h>
7296 #include <asm/atomic.h>
7297 #include <asm/current.h>
7298 #include <asm/delay.h>
7300 #define IPI_CALL_FUNC 0
7301 #define IPI_CPU_STOP 1
7303 +#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
7304 +#define IPI_DUMP_INTERRUPT 4
7305 + int (*dump_ipi_function_ptr)(struct pt_regs *) = NULL;
7308 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */
7309 static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
7312 spin_unlock_irq(&call_lock);
7317 +/*changed static void stop_this_cpu -> void stop_this_cpu */
7319 stop_this_cpu (void)
7322 @@ -155,6 +166,15 @@
7326 +#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
7327 + case IPI_DUMP_INTERRUPT:
7328 + if( dump_ipi_function_ptr != NULL ) {
7329 + if (!dump_ipi_function_ptr(regs)) {
7330 + printk(KERN_ERR "(*dump_ipi_function_ptr)(): rejected IPI_DUMP_INTERRUPT\n");
7337 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
7338 @@ -369,9 +389,17 @@
7340 send_IPI_allbutself(IPI_CPU_STOP);
7342 +EXPORT_SYMBOL(smp_send_stop);
7345 setup_profiling_timer (unsigned int multiplier)
7350 +#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
7351 +void dump_send_ipi(void)
7353 + send_IPI_allbutself(IPI_DUMP_INTERRUPT);
7356 Index: linux-2.6.10/arch/ia64/kernel/traps.c
7357 ===================================================================
7358 --- linux-2.6.10.orig/arch/ia64/kernel/traps.c 2004-12-25 05:35:39.000000000 +0800
7359 +++ linux-2.6.10/arch/ia64/kernel/traps.c 2005-04-07 18:13:56.475818216 +0800
7361 #include <asm/intrinsics.h>
7362 #include <asm/processor.h>
7363 #include <asm/uaccess.h>
7364 +#include <asm/nmi.h>
7365 +#include <linux/dump.h>
7367 extern spinlock_t timerlist_lock;
7370 printk("%s[%d]: %s %ld [%d]\n",
7371 current->comm, current->pid, str, err, ++die_counter);
7373 + dump((char *)str, regs);
7375 printk(KERN_ERR "Recursive die() failure, output suppressed\n");
7377 Index: linux-2.6.10/arch/ia64/kernel/ia64_ksyms.c
7378 ===================================================================
7379 --- linux-2.6.10.orig/arch/ia64/kernel/ia64_ksyms.c 2005-04-06 23:38:35.000000000 +0800
7380 +++ linux-2.6.10/arch/ia64/kernel/ia64_ksyms.c 2005-04-07 18:13:56.485816696 +0800
7383 #include <linux/config.h>
7384 #include <linux/module.h>
7386 #include <linux/string.h>
7387 EXPORT_SYMBOL(memset);
7388 EXPORT_SYMBOL(memchr);
7390 EXPORT_SYMBOL(strstr);
7391 EXPORT_SYMBOL(strpbrk);
7393 +#include <linux/syscalls.h>
7394 +EXPORT_SYMBOL(sys_ioctl);
7396 #include <asm/checksum.h>
7397 EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
7399 @@ -125,3 +127,21 @@
7404 +#include <asm/hw_irq.h>
7406 +#ifdef CONFIG_CRASH_DUMP_MODULE
7408 +extern irq_desc_t _irq_desc[NR_IRQS];
7409 +extern cpumask_t irq_affinity[NR_IRQS];
7410 +extern void stop_this_cpu(void *);
7411 +extern int (*dump_ipi_function_ptr)(struct pt_regs *);
7412 +extern void dump_send_ipi(void);
7413 +EXPORT_SYMBOL(_irq_desc);
7414 +EXPORT_SYMBOL(irq_affinity);
7415 +EXPORT_SYMBOL(stop_this_cpu);
7416 +EXPORT_SYMBOL(dump_send_ipi);
7417 +EXPORT_SYMBOL(dump_ipi_function_ptr);
7421 Index: linux-2.6.10/arch/i386/mm/init.c
7422 ===================================================================
7423 --- linux-2.6.10.orig/arch/i386/mm/init.c 2005-04-07 18:13:54.785075248 +0800
7424 +++ linux-2.6.10/arch/i386/mm/init.c 2005-04-07 18:13:56.405828856 +0800
7425 @@ -244,6 +244,13 @@
7429 +/* To enable modules to check if a page is in RAM */
7430 +int pfn_is_ram(unsigned long pfn)
7432 + return (page_is_ram(pfn));
7436 #ifdef CONFIG_HIGHMEM
7439 Index: linux-2.6.10/arch/i386/Kconfig.debug
7440 ===================================================================
7441 --- linux-2.6.10.orig/arch/i386/Kconfig.debug 2005-04-07 00:35:34.000000000 +0800
7442 +++ linux-2.6.10/arch/i386/Kconfig.debug 2005-04-07 18:13:56.403829160 +0800
7445 source "lib/Kconfig.debug"
7448 + tristate "Crash dump support (EXPERIMENTAL)"
7449 + depends on EXPERIMENTAL
7452 + Say Y here to enable saving an image of system memory when a panic
7453 + or other error occurs. Dumps can also be forced with the SysRq+d
7454 + key if MAGIC_SYSRQ is enabled.
7458 + depends on CRASH_DUMP
7461 +config CRASH_DUMP_BLOCKDEV
7462 + tristate "Crash dump block device driver"
7463 + depends on CRASH_DUMP
7465 + Say Y to allow saving crash dumps directly to a disk device.
7467 +config CRASH_DUMP_NETDEV
7468 + tristate "Crash dump network device driver"
7469 + depends on CRASH_DUMP
7471 + Say Y to allow saving crash dumps over a network device.
7473 +config CRASH_DUMP_MEMDEV
7474 + bool "Crash dump staged memory driver"
7475 + depends on CRASH_DUMP
7477 + Say Y to allow intermediate saving crash dumps in spare
7478 + memory pages which would then be written out to disk
7481 +config CRASH_DUMP_SOFTBOOT
7482 + bool "Save crash dump across a soft reboot"
7483 + depends on CRASH_DUMP_MEMDEV
7485 + Say Y to allow a crash dump to be preserved in memory
7486 + pages across a soft reboot and written out to disk
7487 + thereafter. For this to work, CRASH_DUMP must be
7488 + configured as part of the kernel (not as a module).
7490 +config CRASH_DUMP_COMPRESS_RLE
7491 + tristate "Crash dump RLE compression"
7492 + depends on CRASH_DUMP
7494 + Say Y to allow saving dumps with Run Length Encoding compression.
7496 +config CRASH_DUMP_COMPRESS_GZIP
7497 + tristate "Crash dump GZIP compression"
7498 + select ZLIB_INFLATE
7499 + select ZLIB_DEFLATE
7500 + depends on CRASH_DUMP
7502 + Say Y to allow saving dumps with Gnu Zip compression.
7505 bool "Early printk" if EMBEDDED
7508 with klogd/syslogd or the X server. You should normally N here,
7509 unless you want to debug such a crash.
7511 -config DEBUG_STACKOVERFLOW
7512 - bool "Check for stack overflows"
7513 +config DEBUG_STACKOVERFLOW
7514 + bool "Check for stack overflows"
7515 depends on DEBUG_KERNEL
7518 Index: linux-2.6.10/arch/i386/kernel/smp.c
7519 ===================================================================
7520 --- linux-2.6.10.orig/arch/i386/kernel/smp.c 2005-04-07 18:13:54.752080264 +0800
7521 +++ linux-2.6.10/arch/i386/kernel/smp.c 2005-04-07 18:13:56.428825360 +0800
7523 #include <linux/mc146818rtc.h>
7524 #include <linux/cache.h>
7525 #include <linux/interrupt.h>
7526 +#include <linux/dump.h>
7528 #include <asm/mtrr.h>
7529 #include <asm/tlbflush.h>
7530 @@ -143,6 +144,13 @@
7532 cfg = __prepare_ICR(shortcut, vector);
7534 + if (vector == DUMP_VECTOR) {
7536 + * Setup DUMP IPI to be delivered as an NMI
7538 + cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
7542 * Send the IPI. The write to APIC_ICR fires this off.
7544 @@ -220,6 +228,13 @@
7547 cfg = __prepare_ICR(0, vector);
7549 + if (vector == DUMP_VECTOR) {
7551 + * Setup DUMP IPI to be delivered as an NMI
7553 + cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
7557 * Send the IPI. The write to APIC_ICR fires this off.
7558 @@ -506,6 +521,11 @@
7560 static struct call_data_struct * call_data;
7562 +void dump_send_ipi(void)
7564 + send_IPI_allbutself(DUMP_VECTOR);
7568 * this function sends a 'generic call function' IPI to all other CPUs
7574 -static void stop_this_cpu (void * dummy)
7575 +void stop_this_cpu (void * dummy)
7580 atomic_inc(&call_data->finished);
7584 Index: linux-2.6.10/arch/i386/kernel/traps.c
7585 ===================================================================
7586 --- linux-2.6.10.orig/arch/i386/kernel/traps.c 2005-04-07 18:13:54.770077528 +0800
7587 +++ linux-2.6.10/arch/i386/kernel/traps.c 2005-04-07 18:13:56.406828704 +0800
7589 #include <linux/ptrace.h>
7590 #include <linux/utsname.h>
7591 #include <linux/kprobes.h>
7592 +#include <linux/dump.h>
7595 #include <linux/ioport.h>
7598 die.lock_owner = -1;
7599 spin_unlock_irq(&die.lock);
7600 + dump((char *)str, regs);
7602 panic("Fatal exception in interrupt");
7605 printk(" on CPU%d, eip %08lx, registers:\n",
7606 smp_processor_id(), regs->eip);
7607 show_registers(regs);
7608 + dump((char *)msg, regs);
7609 printk("console shuts up ...\n");
7611 spin_unlock(&nmi_print_lock);
7612 Index: linux-2.6.10/arch/i386/kernel/setup.c
7613 ===================================================================
7614 --- linux-2.6.10.orig/arch/i386/kernel/setup.c 2004-12-25 05:34:45.000000000 +0800
7615 +++ linux-2.6.10/arch/i386/kernel/setup.c 2005-04-07 18:13:56.427825512 +0800
7616 @@ -662,6 +662,10 @@
7618 #define LOWMEMSIZE() (0x9f000)
7620 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
7621 +unsigned long crashdump_addr = 0xdeadbeef;
7624 static void __init parse_cmdline_early (char ** cmdline_p)
7626 char c = ' ', *to = command_line, *from = saved_command_line;
7627 @@ -823,6 +827,11 @@
7628 if (c == ' ' && !memcmp(from, "vmalloc=", 8))
7629 __VMALLOC_RESERVE = memparse(from+8, &from);
7631 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
7632 + if (c == ' ' && !memcmp(from, "crashdump=", 10))
7633 + crashdump_addr = memparse(from+10, &from);
7639 @@ -1288,6 +1297,10 @@
7641 static char * __init machine_specific_memory_setup(void);
7643 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
7644 +extern void crashdump_reserve(void);
7648 * Determine if we were loaded by an EFI loader. If so, then we have also been
7649 * passed the efi memmap, systab, etc., so we should use these data structures
7650 @@ -1393,6 +1406,10 @@
7654 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
7655 + crashdump_reserve(); /* Preserve crash dump state from prev boot */
7660 #ifdef CONFIG_X86_GENERICARCH
7661 Index: linux-2.6.10/arch/i386/kernel/i386_ksyms.c
7662 ===================================================================
7663 --- linux-2.6.10.orig/arch/i386/kernel/i386_ksyms.c 2004-12-25 05:35:40.000000000 +0800
7664 +++ linux-2.6.10/arch/i386/kernel/i386_ksyms.c 2005-04-07 18:13:56.429825208 +0800
7666 #include <linux/tty.h>
7667 #include <linux/highmem.h>
7668 #include <linux/time.h>
7669 +#include <linux/nmi.h>
7671 #include <asm/semaphore.h>
7672 #include <asm/processor.h>
7674 #include <asm/tlbflush.h>
7675 #include <asm/nmi.h>
7676 #include <asm/ist.h>
7677 +#include <asm/e820.h>
7678 #include <asm/kdebug.h>
7680 extern void dump_thread(struct pt_regs *, struct user *);
7681 @@ -192,3 +194,20 @@
7684 EXPORT_SYMBOL(csum_partial);
7686 +#ifdef CONFIG_CRASH_DUMP_MODULE
7688 +extern irq_desc_t irq_desc[NR_IRQS];
7689 +extern cpumask_t irq_affinity[NR_IRQS];
7690 +extern void stop_this_cpu(void *);
7691 +EXPORT_SYMBOL(irq_desc);
7692 +EXPORT_SYMBOL(irq_affinity);
7693 +EXPORT_SYMBOL(stop_this_cpu);
7694 +EXPORT_SYMBOL(dump_send_ipi);
7696 +extern int pfn_is_ram(unsigned long);
7697 +EXPORT_SYMBOL(pfn_is_ram);
7698 +#ifdef ARCH_HAS_NMI_WATCHDOG
7699 +EXPORT_SYMBOL(touch_nmi_watchdog);
7702 Index: linux-2.6.10/arch/ppc64/Kconfig.debug
7703 ===================================================================
7704 --- linux-2.6.10.orig/arch/ppc64/Kconfig.debug 2004-12-25 05:35:27.000000000 +0800
7705 +++ linux-2.6.10/arch/ppc64/Kconfig.debug 2005-04-07 18:13:56.521811224 +0800
7708 source "lib/Kconfig.debug"
7712 + depends on CRASH_DUMP
7716 + tristate "Crash dump support"
7719 + Say Y here to enable saving an image of system memory when a panic
7720 + or other error occurs. Dumps can also be forced with the SysRq+d
7721 + key if MAGIC_SYSRQ is enabled.
7723 +config CRASH_DUMP_BLOCKDEV
7724 + tristate "Crash dump block device driver"
7725 + depends on CRASH_DUMP
7727 + Say Y to allow saving crash dumps directly to a disk device.
7729 +config CRASH_DUMP_NETDEV
7730 + tristate "Crash dump network device driver"
7731 + depends on CRASH_DUMP
7733 + Say Y to allow saving crash dumps over a network device.
7735 +config CRASH_DUMP_MEMDEV
7736 + bool "Crash dump staged memory driver"
7737 + depends on CRASH_DUMP
7739 + Say Y to allow intermediate saving crash dumps in spare
7740 + memory pages which would then be written out to disk
7741 + later. Need 'kexec' support for this to work.
7742 + **** Not supported at present ****
7744 +config CRASH_DUMP_SOFTBOOT
7745 + bool "Save crash dump across a soft reboot"
7747 + Say Y to allow a crash dump to be preserved in memory
7748 + pages across a soft reboot and written out to disk
7749 + thereafter. For this to work, CRASH_DUMP must be
7750 + configured as part of the kernel (not as a module).
7751 + Need 'kexec' support to use this option.
7752 + **** Not supported at present ****
7754 +config CRASH_DUMP_COMPRESS_RLE
7755 + tristate "Crash dump RLE compression"
7756 + depends on CRASH_DUMP
7758 + Say Y to allow saving dumps with Run Length Encoding compression.
7760 +config CRASH_DUMP_COMPRESS_GZIP
7761 + tristate "Crash dump GZIP compression"
7762 + select ZLIB_INFLATE
7763 + select ZLIB_DEFLATE
7764 + depends on CRASH_DUMP
7766 + Say Y to allow saving dumps with Gnu Zip compression.
7768 config DEBUG_STACKOVERFLOW
7769 bool "Check for stack overflows"
7770 depends on DEBUG_KERNEL
7771 Index: linux-2.6.10/arch/ppc64/kernel/smp.c
7772 ===================================================================
7773 --- linux-2.6.10.orig/arch/ppc64/kernel/smp.c 2004-12-25 05:35:23.000000000 +0800
7774 +++ linux-2.6.10/arch/ppc64/kernel/smp.c 2005-04-07 18:13:56.560805296 +0800
7776 #include <linux/spinlock.h>
7777 #include <linux/cache.h>
7778 #include <linux/err.h>
7779 +#include <linux/dump.h>
7780 #include <linux/sysdev.h>
7781 #include <linux/cpu.h>
7784 struct smp_ops_t *smp_ops;
7786 static volatile unsigned int cpu_callin_map[NR_CPUS];
7787 +static int (*dump_ipi_function_ptr)(struct pt_regs *) = NULL;
7789 extern unsigned char stab_array[];
7791 @@ -177,9 +179,16 @@
7795 -#ifdef CONFIG_DEBUGGER
7796 +#if defined(CONFIG_DEBUGGER) || defined(CONFIG_CRASH_DUMP) \
7797 + || defined(CONFIG_CRASH_DUMP_MODULE)
7798 case PPC_MSG_DEBUGGER_BREAK:
7799 - debugger_ipi(regs);
7800 + if (dump_ipi_function_ptr) {
7801 + dump_ipi_function_ptr(regs);
7803 +#ifdef CONFIG_DEBUGGER
7805 + debugger_ipi(regs);
7810 @@ -201,7 +210,16 @@
7814 -static void stop_this_cpu(void *dummy)
7815 +void dump_send_ipi(int (*dump_ipi_callback)(struct pt_regs *))
7817 + dump_ipi_function_ptr = dump_ipi_callback;
7818 + if (dump_ipi_callback) {
7820 + smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK);
7824 +void stop_this_cpu(void *dummy)
7826 local_irq_disable();
7828 Index: linux-2.6.10/arch/ppc64/kernel/traps.c
7829 ===================================================================
7830 --- linux-2.6.10.orig/arch/ppc64/kernel/traps.c 2004-12-25 05:34:47.000000000 +0800
7831 +++ linux-2.6.10/arch/ppc64/kernel/traps.c 2005-04-07 18:13:56.534809248 +0800
7833 #include <linux/interrupt.h>
7834 #include <linux/init.h>
7835 #include <linux/module.h>
7836 +#include <linux/dump.h>
7838 #include <asm/pgtable.h>
7839 #include <asm/uaccess.h>
7844 + dump((char *)str, regs);
7846 spin_unlock_irq(&die_lock);
7848 Index: linux-2.6.10/arch/ppc64/kernel/ppc_ksyms.c
7849 ===================================================================
7850 --- linux-2.6.10.orig/arch/ppc64/kernel/ppc_ksyms.c 2004-12-25 05:34:26.000000000 +0800
7851 +++ linux-2.6.10/arch/ppc64/kernel/ppc_ksyms.c 2005-04-07 18:13:56.535809096 +0800
7852 @@ -159,6 +159,17 @@
7853 EXPORT_SYMBOL(get_wchan);
7854 EXPORT_SYMBOL(console_drivers);
7856 +#ifdef CONFIG_CRASH_DUMP_MODULE
7857 +extern int dump_page_is_ram(unsigned long);
7858 +EXPORT_SYMBOL(dump_page_is_ram);
7860 +EXPORT_SYMBOL(irq_affinity);
7861 +extern void stop_this_cpu(void *);
7862 +EXPORT_SYMBOL(stop_this_cpu);
7863 +EXPORT_SYMBOL(dump_send_ipi);
7867 EXPORT_SYMBOL(tb_ticks_per_usec);
7868 EXPORT_SYMBOL(paca);
7869 EXPORT_SYMBOL(cur_cpu_spec);
7870 Index: linux-2.6.10/arch/ppc64/kernel/lmb.c
7871 ===================================================================
7872 --- linux-2.6.10.orig/arch/ppc64/kernel/lmb.c 2004-12-25 05:34:58.000000000 +0800
7873 +++ linux-2.6.10/arch/ppc64/kernel/lmb.c 2005-04-07 18:13:56.546807424 +0800
7874 @@ -344,3 +344,31 @@
7881 + * This is the copy of page_is_ram (mm/init.c). The difference is
7882 + * it identifies all memory holes.
7884 +int dump_page_is_ram(unsigned long pfn)
7887 + unsigned long paddr = (pfn << PAGE_SHIFT);
7889 + for (i=0; i < lmb.memory.cnt ;i++) {
7890 + unsigned long base;
7892 +#ifdef CONFIG_MSCHUNKS
7893 + base = lmb.memory.region[i].physbase;
7895 + base = lmb.memory.region[i].base;
7897 + if ((paddr >= base) &&
7898 + (paddr < (base + lmb.memory.region[i].size))) {
7906 Index: linux-2.6.10/arch/ppc64/kernel/xics.c
7907 ===================================================================
7908 --- linux-2.6.10.orig/arch/ppc64/kernel/xics.c 2004-12-25 05:34:58.000000000 +0800
7909 +++ linux-2.6.10/arch/ppc64/kernel/xics.c 2005-04-07 18:13:56.553806360 +0800
7911 smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
7914 -#ifdef CONFIG_DEBUGGER
7915 +#if defined(CONFIG_DEBUGGER) || defined(CONFIG_CRASH_DUMP) \
7916 + || defined(CONFIG_CRASH_DUMP_MODULE)
7917 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
7918 &xics_ipi_message[cpu].value)) {
7920 Index: linux-2.6.10/arch/s390/boot/install.sh
7921 ===================================================================
7922 --- linux-2.6.10.orig/arch/s390/boot/install.sh 2004-12-25 05:35:01.000000000 +0800
7923 +++ linux-2.6.10/arch/s390/boot/install.sh 2005-04-07 18:13:56.443823080 +0800
7925 # $1 - kernel version
7926 # $2 - kernel image file
7927 # $3 - kernel map file
7928 -# $4 - default install path (blank if root directory)
7929 +# $4 - kernel type file
7930 +# $5 - default install path (blank if root directory)
7933 # User may have a custom install script
7936 # Default install - same as make zlilo
7938 -if [ -f $4/vmlinuz ]; then
7939 - mv $4/vmlinuz $4/vmlinuz.old
7940 +if [ -f $5/vmlinuz ]; then
7941 + mv $5/vmlinuz $5/vmlinuz.old
7944 -if [ -f $4/System.map ]; then
7945 - mv $4/System.map $4/System.old
7946 +if [ -f $5/System.map ]; then
7947 + mv $5/System.map $5/System.old
7950 -cat $2 > $4/vmlinuz
7951 -cp $3 $4/System.map
7952 +cat $2 > $5/vmlinuz
7953 +cp $3 $5/System.map
7954 Index: linux-2.6.10/arch/s390/boot/Makefile
7955 ===================================================================
7956 --- linux-2.6.10.orig/arch/s390/boot/Makefile 2004-12-25 05:35:49.000000000 +0800
7957 +++ linux-2.6.10/arch/s390/boot/Makefile 2005-04-07 18:13:56.442823232 +0800
7960 install: $(CONFIGURE) $(obj)/image
7961 sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \
7962 - System.map Kerntypes "$(INSTALL_PATH)"
7963 + System.map init/Kerntypes "$(INSTALL_PATH)"
7964 Index: linux-2.6.10/arch/s390/Kconfig.debug
7965 ===================================================================
7966 --- linux-2.6.10.orig/arch/s390/Kconfig.debug 2004-12-25 05:34:31.000000000 +0800
7967 +++ linux-2.6.10/arch/s390/Kconfig.debug 2005-04-07 18:13:56.436824144 +0800
7970 source "lib/Kconfig.debug"
7973 + bool "Kerntypes debugging information"
7976 + Say Y here to save additional kernel debugging information in the
7977 + file init/kerntypes.o. This information is used by crash analysis
7978 + tools such as lcrash to assign structures to kernel addresses.
7982 Index: linux-2.6.10/arch/x86_64/mm/init.c
7983 ===================================================================
7984 --- linux-2.6.10.orig/arch/x86_64/mm/init.c 2005-04-06 23:38:35.000000000 +0800
7985 +++ linux-2.6.10/arch/x86_64/mm/init.c 2005-04-07 18:13:56.582801952 +0800
7990 -static inline int page_is_ram (unsigned long pagenr)
7991 +inline int page_is_ram (unsigned long pagenr)
7995 Index: linux-2.6.10/arch/x86_64/Kconfig.debug
7996 ===================================================================
7997 --- linux-2.6.10.orig/arch/x86_64/Kconfig.debug 2004-12-25 05:34:01.000000000 +0800
7998 +++ linux-2.6.10/arch/x86_64/Kconfig.debug 2005-04-07 18:13:56.572803472 +0800
8001 source "lib/Kconfig.debug"
8004 + tristate "Crash dump support (EXPERIMENTAL)"
8005 + depends on EXPERIMENTAL
8008 + Say Y here to enable saving an image of system memory when a panic
8009 + or other error occurs. Dumps can also be forced with the SysRq+d
8010 + key if MAGIC_SYSRQ is enabled.
8014 + depends on CRASH_DUMP
8017 +config CRASH_DUMP_BLOCKDEV
8018 + tristate "Crash dump block device driver"
8019 + depends on CRASH_DUMP
8021 + Say Y to allow saving crash dumps directly to a disk device.
8023 +config CRASH_DUMP_NETDEV
8024 + tristate "Crash dump network device driver"
8025 + depends on CRASH_DUMP
8027 + Say Y to allow saving crash dumps over a network device.
8029 +config CRASH_DUMP_MEMDEV
8030 + bool "Crash dump staged memory driver"
8031 + depends on CRASH_DUMP
8033 + Say Y to allow intermediate saving crash dumps in spare
8034 + memory pages which would then be written out to disk
8037 +config CRASH_DUMP_SOFTBOOT
8038 + bool "Save crash dump across a soft reboot"
8039 + depends on CRASH_DUMP_MEMDEV
8041 + Say Y to allow a crash dump to be preserved in memory
8042 + lkcd-kernpages across a soft reboot and written out to disk
8043 + thereafter. For this to work, CRASH_DUMP must be
8044 + configured as part of the kernel (not as a module).
8046 +config CRASH_DUMP_COMPRESS_RLE
8047 + tristate "Crash dump RLE compression"
8048 + depends on CRASH_DUMP
8050 + Say Y to allow saving dumps with Run Length Encoding compression.
8053 +config CRASH_DUMP_COMPRESS_GZIP
8054 + tristate "Crash dump GZIP compression"
8055 + select ZLIB_INFLATE
8056 + select ZLIB_DEFLATE
8057 + depends on CRASH_DUMP
8059 + Say Y to allow saving dumps with Gnu Zip compression.
8063 # !SMP for now because the context switch early causes GPF in segment reloading
8064 # and the GS base checking does the wrong thing then, causing a hang.
8066 Index: linux-2.6.10/arch/x86_64/kernel/smp.c
8067 ===================================================================
8068 --- linux-2.6.10.orig/arch/x86_64/kernel/smp.c 2004-12-25 05:35:50.000000000 +0800
8069 +++ linux-2.6.10/arch/x86_64/kernel/smp.c 2005-04-07 18:13:56.609797848 +0800
8071 #include <linux/kernel_stat.h>
8072 #include <linux/mc146818rtc.h>
8073 #include <linux/interrupt.h>
8074 +#include <linux/dump.h>
8076 #include <asm/mtrr.h>
8077 #include <asm/pgalloc.h>
8078 @@ -151,6 +152,13 @@
8082 + if (vector == DUMP_VECTOR) {
8084 + * Setup DUMP IPI to be delivered as an NMI
8086 + cfg = (cfg&~APIC_VECTOR_MASK)|APIC_DM_NMI;
8090 * I'm not happy about this global shared spinlock in the
8091 * MM hot path, but we'll see how contended it is.
8092 @@ -253,6 +261,13 @@
8093 send_IPI_allbutself(KDB_VECTOR);
8097 +/* void dump_send_ipi(int (*dump_ipi_handler)(struct pt_regs *)); */
8098 +void dump_send_ipi(void)
8100 + send_IPI_allbutself(DUMP_VECTOR);
8104 * this function sends a 'reschedule' IPI to another CPU.
8105 * it goes straight through and wastes no time serializing
8106 @@ -340,6 +355,18 @@
8110 +void stop_this_cpu(void* dummy)
8113 + * Remove this CPU:
8115 + cpu_clear(smp_processor_id(), cpu_online_map);
8116 + local_irq_disable();
8117 + disable_local_APIC();
8122 void smp_stop_cpu(void)
8125 Index: linux-2.6.10/arch/x86_64/kernel/traps.c
8126 ===================================================================
8127 --- linux-2.6.10.orig/arch/x86_64/kernel/traps.c 2004-12-25 05:33:49.000000000 +0800
8128 +++ linux-2.6.10/arch/x86_64/kernel/traps.c 2005-04-07 18:13:56.595799976 +0800
8130 #include <linux/spinlock.h>
8131 #include <linux/interrupt.h>
8132 #include <linux/module.h>
8133 +#include <linux/dump.h>
8134 #include <linux/moduleparam.h>
8136 #include <asm/system.h>
8139 notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
8140 show_registers(regs);
8141 + dump((char *)str, regs);
8142 /* Executive summary in case the oops scrolled away */
8143 printk(KERN_ALERT "RIP ");
8144 printk_address(regs->rip);
8145 Index: linux-2.6.10/arch/x86_64/kernel/pci-gart.c
8146 ===================================================================
8147 --- linux-2.6.10.orig/arch/x86_64/kernel/pci-gart.c 2004-12-25 05:34:32.000000000 +0800
8148 +++ linux-2.6.10/arch/x86_64/kernel/pci-gart.c 2005-04-07 18:13:56.627795112 +0800
8150 dma_addr_t bad_dma_address;
8152 unsigned long iommu_bus_base; /* GART remapping area (physical) */
8153 -static unsigned long iommu_size; /* size of remapping area bytes */
8154 +unsigned long iommu_size; /* size of remapping area bytes */
8155 static unsigned long iommu_pages; /* .. and in pages */
8157 u32 *iommu_gatt_base; /* Remapping table */
8158 Index: linux-2.6.10/arch/x86_64/kernel/setup.c
8159 ===================================================================
8160 --- linux-2.6.10.orig/arch/x86_64/kernel/setup.c 2004-12-25 05:33:50.000000000 +0800
8161 +++ linux-2.6.10/arch/x86_64/kernel/setup.c 2005-04-07 18:13:56.608798000 +0800
8166 +unsigned long crashdump_addr = 0xdeadbeef;
8168 static __init void parse_cmdline_early (char ** cmdline_p)
8170 char c = ' ', *to = command_line, *from = COMMAND_LINE;
8173 if (!memcmp(from,"oops=panic", 10))
8176 + if (c == ' ' && !memcmp(from, "crashdump=", 10))
8177 + crashdump_addr = memparse(from+10, &from);
8181 @@ -441,6 +446,10 @@
8182 reserve_bootmem_generic(addr, PAGE_SIZE);
8185 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
8186 +extern void crashdump_reserve(void);
8189 void __init setup_arch(char **cmdline_p)
8191 unsigned long low_mem_size;
8196 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
8197 + crashdump_reserve(); /* Preserve crash dump state from prev boot */
8201 #ifdef CONFIG_ACPI_BOOT
8202 Index: linux-2.6.10/arch/x86_64/kernel/x8664_ksyms.c
8203 ===================================================================
8204 --- linux-2.6.10.orig/arch/x86_64/kernel/x8664_ksyms.c 2004-12-25 05:34:01.000000000 +0800
8205 +++ linux-2.6.10/arch/x86_64/kernel/x8664_ksyms.c 2005-04-07 18:13:56.625795416 +0800
8207 #include <asm/unistd.h>
8208 #include <asm/delay.h>
8209 #include <asm/tlbflush.h>
8210 +#include <asm/e820.h>
8211 #include <asm/kdebug.h>
8213 extern spinlock_t rtc_lock;
8214 @@ -216,6 +217,20 @@
8215 extern unsigned long __supported_pte_mask;
8216 EXPORT_SYMBOL(__supported_pte_mask);
8218 +#ifdef CONFIG_CRASH_DUMP_MODULE
8220 +extern irq_desc_t irq_desc[NR_IRQS];
8221 +extern cpumask_t irq_affinity[NR_IRQS];
8222 +extern void stop_this_cpu(void *);
8223 +EXPORT_SYMBOL(irq_desc);
8224 +EXPORT_SYMBOL(irq_affinity);
8225 +EXPORT_SYMBOL(dump_send_ipi);
8226 +EXPORT_SYMBOL(stop_this_cpu);
8228 +extern int page_is_ram(unsigned long);
8229 +EXPORT_SYMBOL(page_is_ram);
8233 EXPORT_SYMBOL(flush_tlb_page);
8234 EXPORT_SYMBOL_GPL(flush_tlb_all);
8235 Index: linux-2.6.10/scripts/mkcompile_h
8236 ===================================================================
8237 --- linux-2.6.10.orig/scripts/mkcompile_h 2004-12-25 05:35:50.000000000 +0800
8238 +++ linux-2.6.10/scripts/mkcompile_h 2005-04-07 18:13:56.778772160 +0800
8242 UTS_TRUNCATE="sed -e s/\(.\{1,$UTS_LEN\}\).*/\1/"
8244 +LINUX_COMPILE_VERSION_ID="__linux_compile_version_id__`hostname | tr -c '[0-9A-Za-z\n]' '__'`_`LANG=C date | tr -c '[0-9A-Za-z\n]' '_'`"
8245 # Generate a temporary compile.h
8247 ( echo /\* This file is auto generated, version $VERSION \*/
8251 echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | tail -n 1`\"
8252 + echo \#define LINUX_COMPILE_VERSION_ID $LINUX_COMPILE_VERSION_ID
8253 + echo \#define LINUX_COMPILE_VERSION_ID_TYPE typedef char* "$LINUX_COMPILE_VERSION_ID""_t"
8256 # Only replace the real compile.h if the new one is different,
8257 Index: linux-2.6.10/init/main.c
8258 ===================================================================
8259 --- linux-2.6.10.orig/init/main.c 2005-04-06 23:38:35.000000000 +0800
8260 +++ linux-2.6.10/init/main.c 2005-04-07 18:13:56.635793896 +0800
8261 @@ -109,6 +109,16 @@
8262 EXPORT_SYMBOL(system_state);
8265 + * The kernel_magic value represents the address of _end, which allows
8266 + * namelist tools to "match" each other respectively. That way a tool
8267 + * that looks at /dev/mem can verify that it is using the right System.map
8268 + * file -- if kernel_magic doesn't equal the namelist value of _end,
8269 + * something's wrong.
8271 +extern unsigned long _end;
8272 +unsigned long *kernel_magic = &_end;
8275 * Boot command-line arguments
8277 #define MAX_INIT_ARGS 32
8278 Index: linux-2.6.10/init/kerntypes.c
8279 ===================================================================
8280 --- linux-2.6.10.orig/init/kerntypes.c 2005-04-07 19:34:21.197950744 +0800
8281 +++ linux-2.6.10/init/kerntypes.c 2005-04-07 18:13:56.634794048 +0800
8286 + * Copyright (C) 2000 Tom Morano (tjm@sgi.com) and
8287 + * Matt D. Robinson (yakker@alacritech.com)
8289 + * Dummy module that includes headers for all kernel types of interest.
8290 + * The kernel type information is used by the lcrash utility when
8291 + * analyzing system crash dumps or the live system. Using the type
8292 + * information for the running system, rather than kernel header files,
8293 + * makes for a more flexible and robust analysis tool.
8295 + * This source code is released under version 2 of the GNU GPL.
8298 +#include <linux/compile.h>
8299 +#include <linux/module.h>
8300 +#include <linux/mm.h>
8301 +#include <linux/vmalloc.h>
8302 +#include <linux/config.h>
8303 +#include <linux/utsname.h>
8304 +#include <linux/kernel_stat.h>
8305 +#include <linux/dump.h>
8307 +#include <asm/kerntypes.h>
8309 +#ifdef LINUX_COMPILE_VERSION_ID_TYPE
8310 +/* Define version type for version validation of dump and kerntypes */
8311 +LINUX_COMPILE_VERSION_ID_TYPE;
8313 +#if defined(CONFIG_SMP) && defined(CONFIG_CRASH_DUMP)
8314 +extern struct runqueue runqueues;
8315 +struct runqueue rn;
8318 +struct new_utsname *p;
8320 +kerntypes_dummy(void)
8323 Index: linux-2.6.10/init/version.c
8324 ===================================================================
8325 --- linux-2.6.10.orig/init/version.c 2004-12-25 05:34:45.000000000 +0800
8326 +++ linux-2.6.10/init/version.c 2005-04-07 18:13:56.633794200 +0800
8328 #include <linux/uts.h>
8329 #include <linux/utsname.h>
8330 #include <linux/version.h>
8331 +#include <linux/stringify.h>
8333 #define version(a) Version_ ## a
8334 #define version_string(a) version(a)
8336 const char *linux_banner =
8337 "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
8338 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
8340 +const char *LINUX_COMPILE_VERSION_ID = __stringify(LINUX_COMPILE_VERSION_ID);
8341 +LINUX_COMPILE_VERSION_ID_TYPE;
8342 Index: linux-2.6.10/init/Makefile
8343 ===================================================================
8344 --- linux-2.6.10.orig/init/Makefile 2004-12-25 05:34:32.000000000 +0800
8345 +++ linux-2.6.10/init/Makefile 2005-04-07 18:13:56.636793744 +0800
8347 mounts-$(CONFIG_BLK_DEV_INITRD) += do_mounts_initrd.o
8348 mounts-$(CONFIG_BLK_DEV_MD) += do_mounts_md.o
8350 +extra-$(CONFIG_KERNTYPES) += kerntypes.o
8351 +#For IA64, compile kerntypes in dwarf-2 format.
8352 +ifeq ($(CONFIG_IA64),y)
8353 +CFLAGS_kerntypes.o := -gdwarf-2
8355 +CFLAGS_kerntypes.o := -gstabs
8358 # files to be removed upon make clean
8359 clean-files := ../include/linux/compile.h
8361 # dependencies on generated files need to be listed explicitly
8363 -$(obj)/version.o: include/linux/compile.h
8364 +$(obj)/version.o $(obj)/kerntypes.o: include/linux/compile.h
8366 # compile.h changes depending on hostname, generation number, etc,
8367 # so we regenerate it always.
8369 include/linux/compile.h: FORCE
8371 @$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CC) $(CFLAGS)"
8373 Index: linux-2.6.10/net/Kconfig
8374 ===================================================================
8375 --- linux-2.6.10.orig/net/Kconfig 2005-04-06 23:38:35.000000000 +0800
8376 +++ linux-2.6.10/net/Kconfig 2005-04-07 18:13:56.760774896 +0800
8381 - def_bool NETCONSOLE
8382 + def_bool NETCONSOLE || CRASH_DUMP_NETDEV
8385 bool "Netpoll support for trapping incoming packets"
8386 Index: linux-2.6.10/kernel/sched.c
8387 ===================================================================
8388 --- linux-2.6.10.orig/kernel/sched.c 2005-04-07 14:55:26.000000000 +0800
8389 +++ linux-2.6.10/kernel/sched.c 2005-04-07 18:13:56.850761216 +0800
8391 #define cpu_to_node_mask(cpu) (cpu_online_map)
8394 +/* used to soft spin in sched while dump is in progress */
8395 +unsigned long dump_oncpu;
8396 +EXPORT_SYMBOL(dump_oncpu);
8399 * Convert user-nice values [ -20 ... 0 ... 19 ]
8400 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
8401 @@ -184,109 +188,6 @@
8402 #define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \
8403 < (long long) (sd)->cache_hot_time)
8406 - * These are the runqueue data structures:
8409 -#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
8411 -typedef struct runqueue runqueue_t;
8413 -struct prio_array {
8414 - unsigned int nr_active;
8415 - unsigned long bitmap[BITMAP_SIZE];
8416 - struct list_head queue[MAX_PRIO];
8420 - * This is the main, per-CPU runqueue data structure.
8422 - * Locking rule: those places that want to lock multiple runqueues
8423 - * (such as the load balancing or the thread migration code), lock
8424 - * acquire operations must be ordered by ascending &runqueue.
8430 - * nr_running and cpu_load should be in the same cacheline because
8431 - * remote CPUs use both these fields when doing load calculation.
8433 - unsigned long nr_running;
8435 - unsigned long cpu_load;
8437 - unsigned long long nr_switches;
8440 - * This is part of a global counter where only the total sum
8441 - * over all CPUs matters. A task can increase this counter on
8442 - * one CPU and if it got migrated afterwards it may decrease
8443 - * it on another CPU. Always updated under the runqueue lock:
8445 - unsigned long nr_uninterruptible;
8447 - unsigned long expired_timestamp;
8448 - unsigned long long timestamp_last_tick;
8449 - task_t *curr, *idle;
8450 - struct mm_struct *prev_mm;
8451 - prio_array_t *active, *expired, arrays[2];
8452 - int best_expired_prio;
8453 - atomic_t nr_iowait;
8456 - struct sched_domain *sd;
8458 - /* For active balancing */
8459 - int active_balance;
8462 - task_t *migration_thread;
8463 - struct list_head migration_queue;
8466 -#ifdef CONFIG_SCHEDSTATS
8467 - /* latency stats */
8468 - struct sched_info rq_sched_info;
8470 - /* sys_sched_yield() stats */
8471 - unsigned long yld_exp_empty;
8472 - unsigned long yld_act_empty;
8473 - unsigned long yld_both_empty;
8474 - unsigned long yld_cnt;
8476 - /* schedule() stats */
8477 - unsigned long sched_noswitch;
8478 - unsigned long sched_switch;
8479 - unsigned long sched_cnt;
8480 - unsigned long sched_goidle;
8482 - /* pull_task() stats */
8483 - unsigned long pt_gained[MAX_IDLE_TYPES];
8484 - unsigned long pt_lost[MAX_IDLE_TYPES];
8486 - /* active_load_balance() stats */
8487 - unsigned long alb_cnt;
8488 - unsigned long alb_lost;
8489 - unsigned long alb_gained;
8490 - unsigned long alb_failed;
8492 - /* try_to_wake_up() stats */
8493 - unsigned long ttwu_cnt;
8494 - unsigned long ttwu_attempts;
8495 - unsigned long ttwu_moved;
8497 - /* wake_up_new_task() stats */
8498 - unsigned long wunt_cnt;
8499 - unsigned long wunt_moved;
8501 - /* sched_migrate_task() stats */
8502 - unsigned long smt_cnt;
8504 - /* sched_balance_exec() stats */
8505 - unsigned long sbe_cnt;
8509 static DEFINE_PER_CPU(struct runqueue, runqueues);
8511 @@ -2535,6 +2436,15 @@
8512 unsigned long run_time;
8516 + * If crash dump is in progress, this other cpu's
8517 + * need to wait until it completes.
8518 + * NB: this code is optimized away for kernels without
8519 + * dumping enabled.
8521 + if (unlikely(dump_oncpu))
8522 + goto dump_scheduling_disabled;
8525 * Test if we are atomic. Since do_exit() needs to call into
8526 * schedule() atomically, we ignore that path for now.
8527 @@ -2698,6 +2608,16 @@
8528 preempt_enable_no_resched();
8529 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
8534 + dump_scheduling_disabled:
8535 + /* allow scheduling only if this is the dumping cpu */
8536 + if (dump_oncpu != smp_processor_id()+1) {
8537 + while (dump_oncpu)
8543 EXPORT_SYMBOL(schedule);
8544 Index: linux-2.6.10/kernel/panic.c
8545 ===================================================================
8546 --- linux-2.6.10.orig/kernel/panic.c 2004-12-25 05:35:29.000000000 +0800
8547 +++ linux-2.6.10/kernel/panic.c 2005-04-07 18:13:56.860759696 +0800
8549 #include <linux/sysrq.h>
8550 #include <linux/interrupt.h>
8551 #include <linux/nmi.h>
8552 +#ifdef CONFIG_KEXEC
8553 +#include <linux/kexec.h>
8559 +void (*dump_function_ptr)(const char *, const struct pt_regs *) = 0;
8561 EXPORT_SYMBOL(panic_timeout);
8562 +EXPORT_SYMBOL(dump_function_ptr);
8564 struct notifier_block *panic_notifier_list;
8567 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
8570 + notifier_call_chain(&panic_notifier_list, 0, buf);
8576 - notifier_call_chain(&panic_notifier_list, 0, buf);
8579 panic_blink = no_blink;
8581 * We can't use the "normal" timers since we just panicked..
8583 printk(KERN_EMERG "Rebooting in %d seconds..",panic_timeout);
8584 +#ifdef CONFIG_KEXEC
8586 + struct kimage *image;
8587 + image = xchg(&kexec_image, 0);
8589 + printk(KERN_EMERG "by starting a new kernel ..\n");
8590 + mdelay(panic_timeout*1000);
8591 + machine_kexec(image);
8596 for (i = 0; i < panic_timeout*1000; ) {
8597 touch_nmi_watchdog();
8598 i += panic_blink(i);
8599 Index: linux-2.6.10/include/linux/sysctl.h
8600 ===================================================================
8601 --- linux-2.6.10.orig/include/linux/sysctl.h 2005-04-06 23:38:35.000000000 +0800
8602 +++ linux-2.6.10/include/linux/sysctl.h 2005-04-07 18:13:56.651791464 +0800
8604 KERN_HZ_TIMER=65, /* int: hz timer on or off */
8605 KERN_UNKNOWN_NMI_PANIC=66, /* int: unknown nmi panic flag */
8606 KERN_SETUID_DUMPABLE=67, /* int: behaviour of dumps for setuid core */
8607 + KERN_DUMP=68, /* directory: dump parameters */
8611 Index: linux-2.6.10/include/linux/dump_netdev.h
8612 ===================================================================
8613 --- linux-2.6.10.orig/include/linux/dump_netdev.h 2005-04-07 19:34:21.197950744 +0800
8614 +++ linux-2.6.10/include/linux/dump_netdev.h 2005-04-07 18:13:56.663789640 +0800
8617 + * linux/drivers/net/netconsole.h
8619 + * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
8621 + * This file contains the implementation of an IRQ-safe, crash-safe
8622 + * kernel console implementation that outputs kernel messages to the
8625 + * Modification history:
8627 + * 2001-09-17 started by Ingo Molnar.
8630 +/****************************************************************
8631 + * This program is free software; you can redistribute it and/or modify
8632 + * it under the terms of the GNU General Public License as published by
8633 + * the Free Software Foundation; either version 2, or (at your option)
8634 + * any later version.
8636 + * This program is distributed in the hope that it will be useful,
8637 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
8638 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8639 + * GNU General Public License for more details.
8641 + * You should have received a copy of the GNU General Public License
8642 + * along with this program; if not, write to the Free Software
8643 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
8645 + ****************************************************************/
8647 +#define NETCONSOLE_VERSION 0x03
8649 +enum netdump_commands {
8651 + COMM_SEND_MEM = 1,
8655 + COMM_GET_NR_PAGES = 5,
8656 + COMM_GET_PAGE_SIZE = 6,
8657 + COMM_START_NETDUMP_ACK = 7,
8658 + COMM_GET_REGS = 8,
8659 + COMM_GET_MAGIC = 9,
8660 + COMM_START_WRITE_NETDUMP_ACK = 10,
8663 +typedef struct netdump_req_s {
8671 +enum netdump_replies {
8676 + REPLY_RESERVED = 4,
8678 + REPLY_NR_PAGES = 6,
8679 + REPLY_PAGE_SIZE = 7,
8680 + REPLY_START_NETDUMP = 8,
8681 + REPLY_END_NETDUMP = 9,
8684 + REPLY_START_WRITE_NETDUMP = 12,
8687 +typedef struct netdump_reply_s {
8693 +#define HEADER_LEN (1 + sizeof(reply_t))
8696 Index: linux-2.6.10/include/linux/sched.h
8697 ===================================================================
8698 --- linux-2.6.10.orig/include/linux/sched.h 2005-04-07 18:13:55.080030408 +0800
8699 +++ linux-2.6.10/include/linux/sched.h 2005-04-07 18:13:56.653791160 +0800
8701 extern int nr_threads;
8702 extern int last_pid;
8703 DECLARE_PER_CPU(unsigned long, process_counts);
8704 +DECLARE_PER_CPU(struct runqueue, runqueues);
8705 extern int nr_processes(void);
8706 extern unsigned long nr_running(void);
8707 extern unsigned long nr_uninterruptible(void);
8708 @@ -760,6 +761,110 @@
8712 + * These are the runqueue data structures:
8715 +#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
8717 +typedef struct runqueue runqueue_t;
8719 +struct prio_array {
8720 + unsigned int nr_active;
8721 + unsigned long bitmap[BITMAP_SIZE];
8722 + struct list_head queue[MAX_PRIO];
8726 + * This is the main, per-CPU runqueue data structure.
8728 + * Locking rule: those places that want to lock multiple runqueues
8729 + * (such as the load balancing or the thread migration code), lock
8730 + * acquire operations must be ordered by ascending &runqueue.
8736 + * nr_running and cpu_load should be in the same cacheline because
8737 + * remote CPUs use both these fields when doing load calculation.
8739 + unsigned long nr_running;
8741 + unsigned long cpu_load;
8743 + unsigned long long nr_switches;
8746 + * This is part of a global counter where only the total sum
8747 + * over all CPUs matters. A task can increase this counter on
8748 + * one CPU and if it got migrated afterwards it may decrease
8749 + * it on another CPU. Always updated under the runqueue lock:
8751 + unsigned long nr_uninterruptible;
8753 + unsigned long expired_timestamp;
8754 + unsigned long long timestamp_last_tick;
8755 + task_t *curr, *idle;
8756 + struct mm_struct *prev_mm;
8757 + prio_array_t *active, *expired, arrays[2];
8758 + int best_expired_prio;
8759 + atomic_t nr_iowait;
8762 + struct sched_domain *sd;
8764 + /* For active balancing */
8765 + int active_balance;
8768 + task_t *migration_thread;
8769 + struct list_head migration_queue;
8772 +#ifdef CONFIG_SCHEDSTATS
8773 + /* latency stats */
8774 + struct sched_info rq_sched_info;
8776 + /* sys_sched_yield() stats */
8777 + unsigned long yld_exp_empty;
8778 + unsigned long yld_act_empty;
8779 + unsigned long yld_both_empty;
8780 + unsigned long yld_cnt;
8782 + /* schedule() stats */
8783 + unsigned long sched_noswitch;
8784 + unsigned long sched_switch;
8785 + unsigned long sched_cnt;
8786 + unsigned long sched_goidle;
8788 + /* pull_task() stats */
8789 + unsigned long pt_gained[MAX_IDLE_TYPES];
8790 + unsigned long pt_lost[MAX_IDLE_TYPES];
8792 + /* active_load_balance() stats */
8793 + unsigned long alb_cnt;
8794 + unsigned long alb_lost;
8795 + unsigned long alb_gained;
8796 + unsigned long alb_failed;
8798 + /* try_to_wake_up() stats */
8799 + unsigned long ttwu_cnt;
8800 + unsigned long ttwu_attempts;
8801 + unsigned long ttwu_moved;
8803 + /* wake_up_new_task() stats */
8804 + unsigned long wunt_cnt;
8805 + unsigned long wunt_moved;
8807 + /* sched_migrate_task() stats */
8808 + unsigned long smt_cnt;
8810 + /* sched_balance_exec() stats */
8811 + unsigned long sbe_cnt;
8816 * The default (Linux) execution domain.
8818 extern struct exec_domain default_exec_domain;
8819 Index: linux-2.6.10/include/linux/dumpdev.h
8820 ===================================================================
8821 --- linux-2.6.10.orig/include/linux/dumpdev.h 2005-04-07 19:34:21.197950744 +0800
8822 +++ linux-2.6.10/include/linux/dumpdev.h 2005-04-07 18:13:56.663789640 +0800
8825 + * Generic dump device interfaces for flexible system dump
8826 + * (Enables variation of dump target types e.g disk, network, memory)
8828 + * These interfaces have evolved based on discussions on lkcd-devel.
8829 + * Eventually the intent is to support primary and secondary or
8830 + * alternate targets registered at the same time, with scope for
8831 + * situation based failover or multiple dump devices used for parallel
8834 + * Started: Oct 2002 - Suparna Bhattacharya (suparna@in.ibm.com)
8836 + * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
8837 + * Copyright (C) 2002 International Business Machines Corp.
8839 + * This code is released under version 2 of the GNU GPL.
8842 +#ifndef _LINUX_DUMPDEV_H
8843 +#define _LINUX_DUMPDEV_H
8845 +#include <linux/kernel.h>
8846 +#include <linux/wait.h>
8847 +#include <linux/netpoll.h>
8848 +#include <linux/bio.h>
8850 +/* Determined by the dump target (device) type */
8854 +struct dump_dev_ops {
8855 + int (*open)(struct dump_dev *, unsigned long); /* configure */
8856 + int (*release)(struct dump_dev *); /* unconfigure */
8857 + int (*silence)(struct dump_dev *); /* when dump starts */
8858 + int (*resume)(struct dump_dev *); /* when dump is over */
8859 + int (*seek)(struct dump_dev *, loff_t);
8860 + /* trigger a write (async in nature typically) */
8861 + int (*write)(struct dump_dev *, void *, unsigned long);
8862 + /* not usually used during dump, but option available */
8863 + int (*read)(struct dump_dev *, void *, unsigned long);
8864 + /* use to poll for completion */
8865 + int (*ready)(struct dump_dev *, void *);
8866 + int (*ioctl)(struct dump_dev *, unsigned int, unsigned long);
8870 + char type_name[32]; /* block, net-poll etc */
8871 + unsigned long device_id; /* interpreted differently for various types */
8872 + struct dump_dev_ops *ops;
8873 + struct list_head list;
8874 + loff_t curr_offset;
8875 + struct netpoll np;
8879 + * dump_dev type variations:
8883 +struct dump_blockdev {
8884 + struct dump_dev ddev;
8886 + struct block_device *bdev;
8888 + loff_t start_offset;
8893 +static inline struct dump_blockdev *DUMP_BDEV(struct dump_dev *dev)
8895 + return container_of(dev, struct dump_blockdev, ddev);
8899 +/* mem - for internal use by soft-boot based dumper */
8900 +struct dump_memdev {
8901 + struct dump_dev ddev;
8902 + unsigned long indirect_map_root;
8903 + unsigned long nr_free;
8904 + struct page *curr_page;
8905 + unsigned long *curr_map;
8906 + unsigned long curr_map_offset;
8907 + unsigned long last_offset;
8908 + unsigned long last_used_offset;
8909 + unsigned long last_bs_offset;
8912 +static inline struct dump_memdev *DUMP_MDEV(struct dump_dev *dev)
8914 + return container_of(dev, struct dump_memdev, ddev);
8917 +/* Todo/future - meant for raw dedicated interfaces e.g. mini-ide driver */
8919 + struct dump_dev ddev;
8921 + int (*reset)(struct dump_rdev *, unsigned int,
8923 + /* ... to do ... */
8926 +/* just to get the size right when saving config across a soft-reboot */
8927 +struct dump_anydev {
8929 + struct dump_blockdev bddev;
8930 + /* .. add other types here .. */
8936 +/* Dump device / target operation wrappers */
8937 +/* These assume that dump_dev is initiatized to dump_config.dumper->dev */
8939 +extern struct dump_dev *dump_dev;
8941 +static inline int dump_dev_open(unsigned long arg)
8943 + return dump_dev->ops->open(dump_dev, arg);
8946 +static inline int dump_dev_release(void)
8948 + return dump_dev->ops->release(dump_dev);
8951 +static inline int dump_dev_silence(void)
8953 + return dump_dev->ops->silence(dump_dev);
8956 +static inline int dump_dev_resume(void)
8958 + return dump_dev->ops->resume(dump_dev);
8961 +static inline int dump_dev_seek(loff_t offset)
8963 + return dump_dev->ops->seek(dump_dev, offset);
8966 +static inline int dump_dev_write(void *buf, unsigned long len)
8968 + return dump_dev->ops->write(dump_dev, buf, len);
8971 +static inline int dump_dev_ready(void *buf)
8973 + return dump_dev->ops->ready(dump_dev, buf);
8976 +static inline int dump_dev_ioctl(unsigned int cmd, unsigned long arg)
8978 + if (!dump_dev || !dump_dev->ops->ioctl)
8980 + return dump_dev->ops->ioctl(dump_dev, cmd, arg);
8983 +extern int dump_register_device(struct dump_dev *);
8984 +extern void dump_unregister_device(struct dump_dev *);
8986 +#endif /* _LINUX_DUMPDEV_H */
8987 Index: linux-2.6.10/include/linux/dump.h
8988 ===================================================================
8989 --- linux-2.6.10.orig/include/linux/dump.h 2005-04-07 19:34:21.197950744 +0800
8990 +++ linux-2.6.10/include/linux/dump.h 2005-04-07 18:13:56.662789792 +0800
8993 + * Kernel header file for Linux crash dumps.
8995 + * Created by: Matt Robinson (yakker@sgi.com)
8996 + * Copyright 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
8998 + * vmdump.h to dump.h by: Matt D. Robinson (yakker@sourceforge.net)
8999 + * Copyright 2001 - 2002 Matt D. Robinson. All rights reserved.
9000 + * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
9002 + * Most of this is the same old stuff from vmdump.h, except now we're
9003 + * actually a stand-alone driver plugged into the block layer interface,
9004 + * with the exception that we now allow for compression modes externally
9005 + * loaded (e.g., someone can come up with their own).
9007 + * This code is released under version 2 of the GNU GPL.
9010 +/* This header file includes all structure definitions for crash dumps. */
9014 +#if defined(CONFIG_CRASH_DUMP) || defined (CONFIG_CRASH_DUMP_MODULE)
9016 +#include <linux/list.h>
9017 +#include <linux/notifier.h>
9018 +#include <linux/dumpdev.h>
9019 +#include <asm/ioctl.h>
9022 + * Predefine default DUMP_PAGE constants, asm header may override.
9024 + * On ia64 discontinuous memory systems it's possible for the memory
9025 + * banks to stop at 2**12 page alignments, the smallest possible page
9026 + * size. But the system page size, PAGE_SIZE, is in fact larger.
9028 +#define DUMP_PAGE_SHIFT PAGE_SHIFT
9029 +#define DUMP_PAGE_MASK PAGE_MASK
9030 +#define DUMP_PAGE_ALIGN(addr) PAGE_ALIGN(addr)
9033 + * Dump offset changed from 4Kb to 64Kb to support multiple PAGE_SIZE
9034 + * (kernel page size). Assumption goes that 64K is the highest page size
9038 +#define DUMP_HEADER_OFFSET (1ULL << 16)
9040 +#define OLDMINORBITS 8
9041 +#define OLDMINORMASK ((1U << OLDMINORBITS) -1)
9043 +/* Making DUMP_PAGE_SIZE = PAGE_SIZE, to support dumping on architectures
9044 + * which support page sizes (PAGE_SIZE) greater than 4KB.
9045 + * Will it affect ia64 discontinuous memory systems ????
9047 +#define DUMP_PAGE_SIZE PAGE_SIZE
9049 +/* thread_info lies at the bottom of stack, (Except IA64). */
9050 +#define STACK_START_POSITION(tsk) (tsk->thread_info)
9052 + * Predefined default memcpy() to use when copying memory to the dump buffer.
9054 + * On ia64 there is a heads up function that can be called to let the prom
9055 + * machine check monitor know that the current activity is risky and it should
9056 + * ignore the fault (nofault). In this case the ia64 header will redefine this
9057 + * macro to __dump_memcpy() and use it's arch specific version.
9059 +#define DUMP_memcpy memcpy
9060 +#define bzero(a,b) memset(a, 0, b)
9062 +/* necessary header files */
9063 +#include <asm/dump.h> /* for architecture-specific header */
9066 + * Size of the buffer that's used to hold:
9068 + * 1. the dump header (padded to fill the complete buffer)
9069 + * 2. the possibly compressed page headers and data
9071 + * = 256k for page size >= 64k
9072 + * = 64k for page size < 64k
9074 +#if (PAGE_SHIFT >= 16)
9075 +#define DUMP_BUFFER_SIZE (256 * 1024) /* size of dump buffer */
9077 +#define DUMP_BUFFER_SIZE (64 * 1024) /* size of dump buffer */
9080 +#define DUMP_HEADER_SIZE DUMP_BUFFER_SIZE
9082 +/* standard header definitions */
9083 +#define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */
9084 +#define DUMP_MAGIC_LIVE 0xa8190173618f23cdULL /* live magic number */
9085 +#define DUMP_VERSION_NUMBER 0x8 /* dump version number */
9086 +#define DUMP_PANIC_LEN 0x100 /* dump panic string length */
9088 +/* dump levels - type specific stuff added later -- add as necessary */
9089 +#define DUMP_LEVEL_NONE 0x0 /* no dumping at all -- just bail */
9090 +#define DUMP_LEVEL_HEADER 0x1 /* kernel dump header only */
9091 +#define DUMP_LEVEL_KERN 0x2 /* dump header and kernel pages */
9092 +#define DUMP_LEVEL_USED 0x4 /* dump header, kernel/user pages */
9093 +#define DUMP_LEVEL_ALL_RAM 0x8 /* dump header, all RAM pages */
9094 +#define DUMP_LEVEL_ALL 0x10 /* dump all memory RAM and firmware */
9097 +/* dump compression options -- add as necessary */
9098 +#define DUMP_COMPRESS_NONE 0x0 /* don't compress this dump */
9099 +#define DUMP_COMPRESS_RLE 0x1 /* use RLE compression */
9100 +#define DUMP_COMPRESS_GZIP 0x2 /* use GZIP compression */
9102 +/* dump flags - any dump-type specific flags -- add as necessary */
9103 +#define DUMP_FLAGS_NONE 0x0 /* no flags are set for this dump */
9104 +#define DUMP_FLAGS_SOFTBOOT 0x2 /* 2 stage soft-boot based dump */
9105 +#define DUMP_FLAGS_NONDISRUPT 0X1 /* non-disruptive dumping */
9107 +#define DUMP_FLAGS_TARGETMASK 0xf0000000 /* handle special case targets */
9108 +#define DUMP_FLAGS_DISKDUMP 0x80000000 /* dump to local disk */
9109 +#define DUMP_FLAGS_NETDUMP 0x40000000 /* dump over the network */
9111 +/* dump header flags -- add as necessary */
9112 +#define DUMP_DH_FLAGS_NONE 0x0 /* no flags set (error condition!) */
9113 +#define DUMP_DH_RAW 0x1 /* raw page (no compression) */
9114 +#define DUMP_DH_COMPRESSED 0x2 /* page is compressed */
9115 +#define DUMP_DH_END 0x4 /* end marker on a full dump */
9116 +#define DUMP_DH_TRUNCATED 0x8 /* dump is incomplete */
9117 +#define DUMP_DH_TEST_PATTERN 0x10 /* dump page is a test pattern */
9118 +#define DUMP_DH_NOT_USED 0x20 /* 1st bit not used in flags */
9120 +/* names for various dump parameters in /proc/kernel */
9121 +#define DUMP_ROOT_NAME "sys/dump"
9122 +#define DUMP_DEVICE_NAME "device"
9123 +#define DUMP_COMPRESS_NAME "compress"
9124 +#define DUMP_LEVEL_NAME "level"
9125 +#define DUMP_FLAGS_NAME "flags"
9126 +#define DUMP_ADDR_NAME "addr"
9128 +#define DUMP_SYSRQ_KEY 'd' /* key to use for MAGIC_SYSRQ key */
9130 +/* CTL_DUMP names: */
9133 + CTL_DUMP_DEVICE=1,
9134 + CTL_DUMP_COMPRESS=3,
9142 +/* page size for gzip compression -- buffered slightly beyond hardware PAGE_SIZE used by DUMP */
9143 +#define DUMP_DPC_PAGE_SIZE (DUMP_PAGE_SIZE + 512)
9145 +/* dump ioctl() control options */
9146 +#define DIOSDUMPDEV _IOW('p', 0xA0, unsigned int) /* set the dump device */
9147 +#define DIOGDUMPDEV _IOR('p', 0xA1, unsigned int) /* get the dump device */
9148 +#define DIOSDUMPLEVEL _IOW('p', 0xA2, unsigned int) /* set the dump level */
9149 +#define DIOGDUMPLEVEL _IOR('p', 0xA3, unsigned int) /* get the dump level */
9150 +#define DIOSDUMPFLAGS _IOW('p', 0xA4, unsigned int) /* set the dump flag parameters */
9151 +#define DIOGDUMPFLAGS _IOR('p', 0xA5, unsigned int) /* get the dump flag parameters */
9152 +#define DIOSDUMPCOMPRESS _IOW('p', 0xA6, unsigned int) /* set the dump compress level */
9153 +#define DIOGDUMPCOMPRESS _IOR('p', 0xA7, unsigned int) /* get the dump compress level */
9155 +/* these ioctls are used only by netdump module */
9156 +#define DIOSTARGETIP _IOW('p', 0xA8, unsigned int) /* set the target m/c's ip */
9157 +#define DIOGTARGETIP _IOR('p', 0xA9, unsigned int) /* get the target m/c's ip */
9158 +#define DIOSTARGETPORT _IOW('p', 0xAA, unsigned int) /* set the target m/c's port */
9159 +#define DIOGTARGETPORT _IOR('p', 0xAB, unsigned int) /* get the target m/c's port */
9160 +#define DIOSSOURCEPORT _IOW('p', 0xAC, unsigned int) /* set the source m/c's port */
9161 +#define DIOGSOURCEPORT _IOR('p', 0xAD, unsigned int) /* get the source m/c's port */
9162 +#define DIOSETHADDR _IOW('p', 0xAE, unsigned int) /* set ethernet address */
9163 +#define DIOGETHADDR _IOR('p', 0xAF, unsigned int) /* get ethernet address */
9164 +#define DIOGDUMPOKAY _IOR('p', 0xB0, unsigned int) /* check if dump is configured */
9165 +#define DIOSDUMPTAKE _IOW('p', 0xB1, unsigned int) /* Take a manual dump */
9168 + * Structure: __dump_header
9169 + * Function: This is the header dumped at the top of every valid crash
9172 +struct __dump_header {
9173 + /* the dump magic number -- unique to verify dump is valid */
9174 + u64 dh_magic_number;
9176 + /* the version number of this dump */
9179 + /* the size of this header (in case we can't read it) */
9180 + u32 dh_header_size;
9182 + /* the level of this dump (just a header?) */
9183 + u32 dh_dump_level;
9186 + * We assume dump_page_size to be 4K in every case.
9187 + * Store here the configurable system page size (4K, 8K, 16K, etc.)
9191 + /* the size of all physical memory */
9192 + u64 dh_memory_size;
9194 + /* the start of physical memory */
9195 + u64 dh_memory_start;
9197 + /* the end of physical memory */
9198 + u64 dh_memory_end;
9200 + /* the number of hardware/physical pages in this dump specifically */
9201 + u32 dh_num_dump_pages;
9203 + /* the panic string, if available */
9204 + char dh_panic_string[DUMP_PANIC_LEN];
9206 + /* timeval depends on architecture, two long values */
9210 + } dh_time; /* the time of the system crash */
9212 + /* the NEW utsname (uname) information -- in character form */
9213 + /* we do this so we don't have to include utsname.h */
9214 + /* plus it helps us be more architecture independent */
9215 + /* now maybe one day soon they'll make the [65] a #define! */
9216 + char dh_utsname_sysname[65];
9217 + char dh_utsname_nodename[65];
9218 + char dh_utsname_release[65];
9219 + char dh_utsname_version[65];
9220 + char dh_utsname_machine[65];
9221 + char dh_utsname_domainname[65];
9223 + /* the address of current task (OLD = void *, NEW = u64) */
9224 + u64 dh_current_task;
9226 + /* what type of compression we're using in this dump (if any) */
9227 + u32 dh_dump_compress;
9229 + /* any additional flags */
9230 + u32 dh_dump_flags;
9232 + /* any additional flags */
9233 + u32 dh_dump_device;
9234 +} __attribute__((packed));
9237 + * Structure: __dump_page
9238 + * Function: To act as the header associated to each physical page of
9239 + * memory saved in the system crash dump. This allows for
9240 + * easy reassembly of each crash dump page. The address bits
9241 + * are split to make things easier for 64-bit/32-bit system
9244 + * dp_byte_offset and dp_page_index are landmarks that are helpful when
9245 + * looking at a hex dump of /dev/vmdump,
9247 +struct __dump_page {
9248 + /* the address of this dump page */
9251 + /* the size of this dump page */
9254 + /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */
9256 +} __attribute__((packed));
9259 + * Structure: __lkcdinfo
9260 + * Function: This structure contains information needed for the lkcdutils
9261 + * package (particularly lcrash) to determine what information is
9262 + * associated to this kernel, specifically.
9264 +struct __lkcdinfo {
9268 + int linux_release;
9279 + * Structure: __dump_compress
9280 + * Function: This is what an individual compression mechanism can use
9281 + * to plug in their own compression techniques. It's always
9282 + * best to build these as individual modules so that people
9283 + * can put in whatever they want.
9285 +struct __dump_compress {
9286 + /* the list_head structure for list storage */
9287 + struct list_head list;
9289 + /* the type of compression to use (DUMP_COMPRESS_XXX) */
9290 + int compress_type;
9291 + const char *compress_name;
9293 + /* the compression function to call */
9294 + u32 (*compress_func)(const u8 *, u32, u8 *, u32, unsigned long);
9297 +/* functions for dump compression registration */
9298 +extern void dump_register_compression(struct __dump_compress *);
9299 +extern void dump_unregister_compression(int);
9302 + * Structure dump_mbank[]:
9304 + * For CONFIG_DISCONTIGMEM systems this array specifies the
9305 + * memory banks/chunks that need to be dumped after a panic.
9307 + * For classic systems it specifies a single set of pages from
9310 +struct __dump_mbank {
9318 +#define DUMP_MBANK_TYPE_CONVENTIONAL_MEMORY 1
9319 +#define DUMP_MBANK_TYPE_OTHER 2
9321 +#define MAXCHUNKS 256
9322 +extern int dump_mbanks;
9323 +extern struct __dump_mbank dump_mbank[MAXCHUNKS];
9325 +/* notification event codes */
9326 +#define DUMP_BEGIN 0x0001 /* dump beginning */
9327 +#define DUMP_END 0x0002 /* dump ending */
9329 +/* Scheduler soft spin control.
9331 + * 0 - no dump in progress
9332 + * 1 - cpu0 is dumping, ...
9334 +extern unsigned long dump_oncpu;
9335 +extern void dump_execute(const char *, const struct pt_regs *);
9338 + * Notifier list for kernel code which wants to be called
9341 +extern struct notifier_block *dump_notifier_list;
9342 +static inline int register_dump_notifier(struct notifier_block *nb)
9344 + return notifier_chain_register(&dump_notifier_list, nb);
9346 +static inline int unregister_dump_notifier(struct notifier_block * nb)
9348 + return notifier_chain_unregister(&dump_notifier_list, nb);
9351 +extern void (*dump_function_ptr)(const char *, const struct pt_regs *);
9352 +static inline void dump(char * str, struct pt_regs * regs)
9354 + if (dump_function_ptr)
9355 + dump_function_ptr(str, regs);
9359 + * Common Arch Specific Functions should be declared here.
9360 + * This allows the C compiler to detect discrepancies.
9362 +extern void __dump_open(void);
9363 +extern void __dump_cleanup(void);
9364 +extern void __dump_clean_irq_state(void);
9365 +extern void __dump_init(u64);
9366 +extern void __dump_save_regs(struct pt_regs *, const struct pt_regs *);
9367 +extern void __dump_save_context(int cpu, const struct pt_regs *, struct task_struct *tsk);
9368 +extern int __dump_configure_header(const struct pt_regs *);
9369 +extern int __dump_irq_enable(void);
9370 +extern void __dump_irq_restore(void);
9371 +extern int __dump_page_valid(unsigned long index);
9373 +extern void __dump_save_other_cpus(void);
9375 +#define __dump_save_other_cpus()
9378 +extern int manual_handle_crashdump(void);
9380 +/* to track all used (compound + zero order) pages */
9381 +#define PageInuse(p) (PageCompound(p) || page_count(p))
9383 +#endif /* __KERNEL__ */
9385 +#else /* !CONFIG_CRASH_DUMP */
9387 +/* If not configured then make code disappear! */
9388 +#define register_dump_watchdog(x) do { } while(0)
9389 +#define unregister_dump_watchdog(x) do { } while(0)
9390 +#define register_dump_notifier(x) do { } while(0)
9391 +#define unregister_dump_notifier(x) do { } while(0)
9392 +#define dump_in_progress() 0
9393 +#define dump(x, y) do { } while(0)
9395 +#endif /* !CONFIG_CRASH_DUMP */
9397 +#endif /* _DUMP_H */
9398 Index: linux-2.6.10/include/linux/miscdevice.h
9399 ===================================================================
9400 --- linux-2.6.10.orig/include/linux/miscdevice.h 2004-12-25 05:34:58.000000000 +0800
9401 +++ linux-2.6.10/include/linux/miscdevice.h 2005-04-07 18:13:56.660790096 +0800
9403 #define MICROCODE_MINOR 184
9404 #define MWAVE_MINOR 219 /* ACP/Mwave Modem */
9405 #define MPT_MINOR 220
9406 +#define CRASH_DUMP_MINOR 230 /* LKCD */
9407 #define MISC_DYNAMIC_MINOR 255
9409 #define TUN_MINOR 200
9410 Index: linux-2.6.10/include/asm-um/kerntypes.h
9411 ===================================================================
9412 --- linux-2.6.10.orig/include/asm-um/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
9413 +++ linux-2.6.10/include/asm-um/kerntypes.h 2005-04-07 18:13:56.636793744 +0800
9416 + * asm-um/kerntypes.h
9418 + * Arch-dependent header file that includes headers for all arch-specific
9419 + * types of interest.
9420 + * The kernel type information is used by the lcrash utility when
9421 + * analyzing system crash dumps or the live system. Using the type
9422 + * information for the running system, rather than kernel header files,
9423 + * makes for a more flexible and robust analysis tool.
9425 + * This source code is released under the GNU GPL.
9428 +/* Usermode-Linux-specific header files */
9429 +#ifndef _UM_KERNTYPES_H
9430 +#define _UM_KERNTYPES_H
9432 +/* Use the default */
9433 +#include <asm-generic/kerntypes.h>
9435 +#endif /* _UM_KERNTYPES_H */
9436 Index: linux-2.6.10/include/asm-generic/kerntypes.h
9437 ===================================================================
9438 --- linux-2.6.10.orig/include/asm-generic/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
9439 +++ linux-2.6.10/include/asm-generic/kerntypes.h 2005-04-07 18:13:56.716781584 +0800
9442 + * asm-generic/kerntypes.h
9444 + * Arch-dependent header file that includes headers for all arch-specific
9445 + * types of interest.
9446 + * The kernel type information is used by the lcrash utility when
9447 + * analyzing system crash dumps or the live system. Using the type
9448 + * information for the running system, rather than kernel header files,
9449 + * makes for a more flexible and robust analysis tool.
9451 + * This source code is released under the GNU GPL.
9454 +/* Arch-independent header files */
9455 +#ifndef _GENERIC_KERNTYPES_H
9456 +#define _GENERIC_KERNTYPES_H
9458 +#include <linux/pci.h>
9460 +#endif /* _GENERIC_KERNTYPES_H */
9461 Index: linux-2.6.10/include/asm-sparc/kerntypes.h
9462 ===================================================================
9463 --- linux-2.6.10.orig/include/asm-sparc/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
9464 +++ linux-2.6.10/include/asm-sparc/kerntypes.h 2005-04-07 18:13:56.739778088 +0800
9467 + * asm-sparc/kerntypes.h
9469 + * Arch-dependent header file that includes headers for all arch-specific
9470 + * types of interest.
9471 + * The kernel type information is used by the lcrash utility when
9472 + * analyzing system crash dumps or the live system. Using the type
9473 + * information for the running system, rather than kernel header files,
9474 + * makes for a more flexible and robust analysis tool.
9476 + * This source code is released under the GNU GPL.
9479 +/* SPARC-specific header files */
9480 +#ifndef _SPARC_KERNTYPES_H
9481 +#define _SPARC_KERNTYPES_H
9483 +/* Use the default */
9484 +#include <asm-generic/kerntypes.h>
9486 +#endif /* _SPARC_KERNTYPES_H */
9487 Index: linux-2.6.10/include/asm-arm/kerntypes.h
9488 ===================================================================
9489 --- linux-2.6.10.orig/include/asm-arm/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
9490 +++ linux-2.6.10/include/asm-arm/kerntypes.h 2005-04-07 18:13:56.739778088 +0800
9493 + * asm-arm/kerntypes.h
9495 + * Arch-dependent header file that includes headers for all arch-specific
9496 + * types of interest.
9497 + * The kernel type information is used by the lcrash utility when
9498 + * analyzing system crash dumps or the live system. Using the type
9499 + * information for the running system, rather than kernel header files,
9500 + * makes for a more flexible and robust analysis tool.
9502 + * This source code is released under the GNU GPL.
9505 +/* ARM-specific header files */
9506 +#ifndef _ARM_KERNTYPES_H
9507 +#define _ARM_KERNTYPES_H
9509 +/* Use the default */
9510 +#include <asm-generic/kerntypes.h>
9512 +#endif /* _ARM_KERNTYPES_H */
9513 Index: linux-2.6.10/include/asm-sparc64/kerntypes.h
9514 ===================================================================
9515 --- linux-2.6.10.orig/include/asm-sparc64/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
9516 +++ linux-2.6.10/include/asm-sparc64/kerntypes.h 2005-04-07 18:13:56.714781888 +0800
9519 + * asm-sparc64/kerntypes.h
9521 + * Arch-dependent header file that includes headers for all arch-specific
9522 + * types of interest.
9523 + * The kernel type information is used by the lcrash utility when
9524 + * analyzing system crash dumps or the live system. Using the type
9525 + * information for the running system, rather than kernel header files,
9526 + * makes for a more flexible and robust analysis tool.
9528 + * This source code is released under the GNU GPL.
9531 +/* SPARC64-specific header files */
9532 +#ifndef _SPARC64_KERNTYPES_H
9533 +#define _SPARC64_KERNTYPES_H
9535 +/* Use the default */
9536 +#include <asm-generic/kerntypes.h>
9538 +#endif /* _SPARC64_KERNTYPES_H */
9539 Index: linux-2.6.10/include/asm-mips64/kerntypes.h
9540 ===================================================================
9541 --- linux-2.6.10.orig/include/asm-mips64/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
9542 +++ linux-2.6.10/include/asm-mips64/kerntypes.h 2005-04-07 18:13:56.740777936 +0800
9545 + * asm-mips64/kerntypes.h
9547 + * Arch-dependent header file that includes headers for all arch-specific
9548 + * types of interest.
9549 + * The kernel type information is used by the lcrash utility when
9550 + * analyzing system crash dumps or the live system. Using the type
9551 + * information for the running system, rather than kernel header files,
9552 + * makes for a more flexible and robust analysis tool.
9554 + * This source code is released under the GNU GPL.
9557 +/* MIPS64-specific header files */
9558 +#ifndef _MIPS64_KERNTYPES_H
9559 +#define _MIPS64_KERNTYPES_H
9561 +/* Use the default */
9562 +#include <asm-generic/kerntypes.h>
9564 +#endif /* _MIPS64_KERNTYPES_H */
9565 Index: linux-2.6.10/include/asm-v850/kerntypes.h
9566 ===================================================================
9567 --- linux-2.6.10.orig/include/asm-v850/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
9568 +++ linux-2.6.10/include/asm-v850/kerntypes.h 2005-04-07 18:13:56.695784776 +0800
9571 + * asm-v850/kerntypes.h
9573 + * Arch-dependent header file that includes headers for all arch-specific
9574 + * types of interest.
9575 + * The kernel type information is used by the lcrash utility when
9576 + * analyzing system crash dumps or the live system. Using the type
9577 + * information for the running system, rather than kernel header files,
9578 + * makes for a more flexible and robust analysis tool.
9580 + * This source code is released under the GNU GPL.
9583 +/* V850-specific header files */
9584 +#ifndef _V850_KERNTYPES_H
9585 +#define _V850_KERNTYPES_H
9587 +/* Use the default */
9588 +#include <asm-generic/kerntypes.h>
9590 +#endif /* _V850_KERNTYPES_H */
9591 Index: linux-2.6.10/include/asm-sh/kerntypes.h
9592 ===================================================================
9593 --- linux-2.6.10.orig/include/asm-sh/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
9594 +++ linux-2.6.10/include/asm-sh/kerntypes.h 2005-04-07 18:13:56.667789032 +0800
9597 + * asm-sh/kerntypes.h
9599 + * Arch-dependent header file that includes headers for all arch-specific
9600 + * types of interest.
9601 + * The kernel type information is used by the lcrash utility when
9602 + * analyzing system crash dumps or the live system. Using the type
9603 + * information for the running system, rather than kernel header files,
9604 + * makes for a more flexible and robust analysis tool.
9606 + * This source code is released under the GNU GPL.
9609 +/* Super-H-specific header files */
9610 +#ifndef _SH_KERNTYPES_H
9611 +#define _SH_KERNTYPES_H
9613 +/* Use the default */
9614 +#include <asm-generic/kerntypes.h>
9616 +#endif /* _SH_KERNTYPES_H */
9617 Index: linux-2.6.10/include/asm-alpha/kerntypes.h
9618 ===================================================================
9619 --- linux-2.6.10.orig/include/asm-alpha/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
9620 +++ linux-2.6.10/include/asm-alpha/kerntypes.h 2005-04-07 18:13:56.666789184 +0800
9623 + * asm-alpha/kerntypes.h
9625 + * Arch-dependent header file that includes headers for all arch-specific
9626 + * types of interest.
9627 + * The kernel type information is used by the lcrash utility when
9628 + * analyzing system crash dumps or the live system. Using the type
9629 + * information for the running system, rather than kernel header files,
9630 + * makes for a more flexible and robust analysis tool.
9632 + * This source code is released under the GNU GPL.
9635 +/* Alpha-specific header files */
9636 +#ifndef _ALPHA_KERNTYPES_H
9637 +#define _ALPHA_KERNTYPES_H
9639 +/* Use the default */
9640 +#include <asm-generic/kerntypes.h>
9642 +#endif /* _ALPHA_KERNTYPES_H */
9643 Index: linux-2.6.10/include/asm-ppc/kerntypes.h
9644 ===================================================================
9645 --- linux-2.6.10.orig/include/asm-ppc/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
9646 +++ linux-2.6.10/include/asm-ppc/kerntypes.h 2005-04-07 18:13:56.665789336 +0800
9649 + * asm-ppc/kerntypes.h
9651 + * Arch-dependent header file that includes headers for all arch-specific
9652 + * types of interest.
9653 + * The kernel type information is used by the lcrash utility when
9654 + * analyzing system crash dumps or the live system. Using the type
9655 + * information for the running system, rather than kernel header files,
9656 + * makes for a more flexible and robust analysis tool.
9658 + * This source code is released under the GNU GPL.
9661 +/* PowerPC-specific header files */
9662 +#ifndef _PPC_KERNTYPES_H
9663 +#define _PPC_KERNTYPES_H
9665 +/* Use the default */
9666 +#include <asm-generic/kerntypes.h>
9668 +#endif /* _PPC_KERNTYPES_H */
9669 Index: linux-2.6.10/include/asm-m68knommu/kerntypes.h
9670 ===================================================================
9671 --- linux-2.6.10.orig/include/asm-m68knommu/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
9672 +++ linux-2.6.10/include/asm-m68knommu/kerntypes.h 2005-04-07 18:13:56.694784928 +0800
9675 + * asm-m68knommu/kerntypes.h
9677 + * Arch-dependent header file that includes headers for all arch-specific
9678 + * types of interest.
9679 + * The kernel type information is used by the lcrash utility when
9680 + * analyzing system crash dumps or the live system. Using the type
9681 + * information for the running system, rather than kernel header files,
9682 + * makes for a more flexible and robust analysis tool.
9684 + * This source code is released under the GNU GPL.
9687 +/* m68k/no-MMU-specific header files */
9688 +#ifndef _M68KNOMMU_KERNTYPES_H
9689 +#define _M68KNOMMU_KERNTYPES_H
9691 +/* Use the default */
9692 +#include <asm-generic/kerntypes.h>
9694 +#endif /* _M68KNOMMU_KERNTYPES_H */
9695 Index: linux-2.6.10/include/asm-x86_64/hw_irq.h
9696 ===================================================================
9697 --- linux-2.6.10.orig/include/asm-x86_64/hw_irq.h 2004-12-25 05:35:39.000000000 +0800
9698 +++ linux-2.6.10/include/asm-x86_64/hw_irq.h 2005-04-07 18:13:56.705783256 +0800
9701 #define IA32_SYSCALL_VECTOR 0x80
9705 * Vectors 0x20-0x2f are used for ISA interrupts.
9708 #define TASK_MIGRATION_VECTOR 0xfb
9709 #define CALL_FUNCTION_VECTOR 0xfa
9710 #define KDB_VECTOR 0xf9
9711 +#define DUMP_VECTOR 0xf8
9713 #define THERMAL_APIC_VECTOR 0xf0
9715 Index: linux-2.6.10/include/asm-x86_64/kerntypes.h
9716 ===================================================================
9717 --- linux-2.6.10.orig/include/asm-x86_64/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
9718 +++ linux-2.6.10/include/asm-x86_64/kerntypes.h 2005-04-07 18:13:56.696784624 +0800
9721 + * asm-x86_64/kerntypes.h
9723 + * Arch-dependent header file that includes headers for all arch-specific
9724 + * types of interest.
9725 + * The kernel type information is used by the lcrash utility when
9726 + * analyzing system crash dumps or the live system. Using the type
9727 + * information for the running system, rather than kernel header files,
9728 + * makes for a more flexible and robust analysis tool.
9730 + * This source code is released under the GNU GPL.
9733 +/* x86_64-specific header files */
9734 +#ifndef _X86_64_KERNTYPES_H
9735 +#define _X86_64_KERNTYPES_H
9737 +/* Use the default */
9738 +#include <asm-generic/kerntypes.h>
9740 +#endif /* _X86_64_KERNTYPES_H */
9741 Index: linux-2.6.10/include/asm-x86_64/dump.h
9742 ===================================================================
9743 --- linux-2.6.10.orig/include/asm-x86_64/dump.h 2005-04-07 19:34:21.197950744 +0800
9744 +++ linux-2.6.10/include/asm-x86_64/dump.h 2005-04-07 18:13:56.696784624 +0800
9747 + * Kernel header file for Linux crash dumps.
9749 + * Created by: Matt Robinson (yakker@sgi.com)
9751 + * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
9752 + * x86_64 lkcd port Sachin Sant ( sachinp@in.ibm.com)
9753 + * This code is released under version 2 of the GNU GPL.
9756 +/* This header file holds the architecture specific crash dump header */
9757 +#ifndef _ASM_DUMP_H
9758 +#define _ASM_DUMP_H
9760 +/* necessary header files */
9761 +#include <asm/ptrace.h> /* for pt_regs */
9762 +#include <linux/threads.h>
9765 +#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
9766 +#define DUMP_ASM_VERSION_NUMBER 0x2 /* version number */
9770 + * Structure: dump_header_asm_t
9771 + * Function: This is the header for architecture-specific stuff. It
9772 + * follows right after the dump header.
9774 +struct __dump_header_asm {
9776 + /* the dump magic number -- unique to verify dump is valid */
9777 + uint64_t dha_magic_number;
9779 + /* the version number of this dump */
9780 + uint32_t dha_version;
9782 + /* the size of this header (in case we can't read it) */
9783 + uint32_t dha_header_size;
9785 + /* the dump registers */
9786 + struct pt_regs dha_regs;
9788 + /* smp specific */
9789 + uint32_t dha_smp_num_cpus;
9790 + int dha_dumping_cpu;
9791 + struct pt_regs dha_smp_regs[NR_CPUS];
9792 + uint64_t dha_smp_current_task[NR_CPUS];
9793 + uint64_t dha_stack[NR_CPUS];
9794 + uint64_t dha_stack_ptr[NR_CPUS];
9795 +} __attribute__((packed));
9798 +static inline void get_current_regs(struct pt_regs *regs)
9801 + __asm__ __volatile__("movq %%r15,%0" : "=m"(regs->r15));
9802 + __asm__ __volatile__("movq %%r14,%0" : "=m"(regs->r14));
9803 + __asm__ __volatile__("movq %%r13,%0" : "=m"(regs->r13));
9804 + __asm__ __volatile__("movq %%r12,%0" : "=m"(regs->r12));
9805 + __asm__ __volatile__("movq %%r11,%0" : "=m"(regs->r11));
9806 + __asm__ __volatile__("movq %%r10,%0" : "=m"(regs->r10));
9807 + __asm__ __volatile__("movq %%r9,%0" : "=m"(regs->r9));
9808 + __asm__ __volatile__("movq %%r8,%0" : "=m"(regs->r8));
9809 + __asm__ __volatile__("movq %%rbx,%0" : "=m"(regs->rbx));
9810 + __asm__ __volatile__("movq %%rcx,%0" : "=m"(regs->rcx));
9811 + __asm__ __volatile__("movq %%rdx,%0" : "=m"(regs->rdx));
9812 + __asm__ __volatile__("movq %%rsi,%0" : "=m"(regs->rsi));
9813 + __asm__ __volatile__("movq %%rdi,%0" : "=m"(regs->rdi));
9814 + __asm__ __volatile__("movq %%rbp,%0" : "=m"(regs->rbp));
9815 + __asm__ __volatile__("movq %%rax,%0" : "=m"(regs->rax));
9816 + __asm__ __volatile__("movq %%rsp,%0" : "=m"(regs->rsp));
9817 + __asm__ __volatile__("movl %%ss, %0" :"=r"(seg));
9818 + regs->ss = (unsigned long)seg;
9819 + __asm__ __volatile__("movl %%cs, %0" :"=r"(seg));
9820 + regs->cs = (unsigned long)seg;
9821 + __asm__ __volatile__("pushfq; popq %0" :"=m"(regs->eflags));
9822 + regs->rip = (unsigned long)current_text_addr();
9826 +extern volatile int dump_in_progress;
9827 +extern struct __dump_header_asm dump_header_asm;
9832 +extern void dump_send_ipi(void);
9834 +#define dump_send_ipi() do { } while(0)
9836 +#endif /* __KERNEL__ */
9838 +#endif /* _ASM_DUMP_H */
9839 Index: linux-2.6.10/include/asm-x86_64/kmap_types.h
9840 ===================================================================
9841 --- linux-2.6.10.orig/include/asm-x86_64/kmap_types.h 2004-12-25 05:35:23.000000000 +0800
9842 +++ linux-2.6.10/include/asm-x86_64/kmap_types.h 2005-04-07 18:13:56.710782496 +0800
9853 Index: linux-2.6.10/include/asm-x86_64/smp.h
9854 ===================================================================
9855 --- linux-2.6.10.orig/include/asm-x86_64/smp.h 2004-12-25 05:33:48.000000000 +0800
9856 +++ linux-2.6.10/include/asm-x86_64/smp.h 2005-04-07 18:13:56.712782192 +0800
9858 extern int pic_mode;
9859 extern int smp_num_siblings;
9860 extern void smp_flush_tlb(void);
9861 +extern void dump_send_ipi(void);
9862 extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
9863 extern void smp_send_reschedule(int cpu);
9864 extern void smp_invalidate_rcv(void); /* Process an NMI */
9865 Index: linux-2.6.10/include/asm-h8300/kerntypes.h
9866 ===================================================================
9867 --- linux-2.6.10.orig/include/asm-h8300/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
9868 +++ linux-2.6.10/include/asm-h8300/kerntypes.h 2005-04-07 18:13:56.665789336 +0800
9871 + * asm-h8300/kerntypes.h
9873 + * Arch-dependent header file that includes headers for all arch-specific
9874 + * types of interest.
9875 + * The kernel type information is used by the lcrash utility when
9876 + * analyzing system crash dumps or the live system. Using the type
9877 + * information for the running system, rather than kernel header files,
9878 + * makes for a more flexible and robust analysis tool.
9880 + * This source code is released under the GNU GPL.
9883 +/* H8300-specific header files */
9884 +#ifndef _H8300_KERNTYPES_H
9885 +#define _H8300_KERNTYPES_H
9887 +/* Use the default */
9888 +#include <asm-generic/kerntypes.h>
9890 +#endif /* _H8300_KERNTYPES_H */
9891 Index: linux-2.6.10/include/asm-cris/kerntypes.h
9892 ===================================================================
9893 --- linux-2.6.10.orig/include/asm-cris/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
9894 +++ linux-2.6.10/include/asm-cris/kerntypes.h 2005-04-07 18:13:56.694784928 +0800
9897 + * asm-cris/kerntypes.h
9899 + * Arch-dependent header file that includes headers for all arch-specific
9900 + * types of interest.
9901 + * The kernel type information is used by the lcrash utility when
9902 + * analyzing system crash dumps or the live system. Using the type
9903 + * information for the running system, rather than kernel header files,
9904 + * makes for a more flexible and robust analysis tool.
9906 + * This source code is released under the GNU GPL.
9909 +/* CRIS-specific header files */
9910 +#ifndef _CRIS_KERNTYPES_H
9911 +#define _CRIS_KERNTYPES_H
9913 +/* Use the default */
9914 +#include <asm-generic/kerntypes.h>
9916 +#endif /* _CRIS_KERNTYPES_H */
9917 Index: linux-2.6.10/include/asm-mips/kerntypes.h
9918 ===================================================================
9919 --- linux-2.6.10.orig/include/asm-mips/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
9920 +++ linux-2.6.10/include/asm-mips/kerntypes.h 2005-04-07 18:13:56.714781888 +0800
9923 + * asm-mips/kerntypes.h
9925 + * Arch-dependent header file that includes headers for all arch-specific
9926 + * types of interest.
9927 + * The kernel type information is used by the lcrash utility when
9928 + * analyzing system crash dumps or the live system. Using the type
9929 + * information for the running system, rather than kernel header files,
9930 + * makes for a more flexible and robust analysis tool.
9932 + * This source code is released under the GNU GPL.
9935 +/* MIPS-specific header files */
9936 +#ifndef _MIPS_KERNTYPES_H
9937 +#define _MIPS_KERNTYPES_H
9939 +/* Use the default */
9940 +#include <asm-generic/kerntypes.h>
9942 +#endif /* _MIPS_KERNTYPES_H */
9943 Index: linux-2.6.10/include/asm-arm26/kerntypes.h
9944 ===================================================================
9945 --- linux-2.6.10.orig/include/asm-arm26/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
9946 +++ linux-2.6.10/include/asm-arm26/kerntypes.h 2005-04-07 18:13:56.666789184 +0800
9949 + * asm-arm26/kerntypes.h
9951 + * Arch-dependent header file that includes headers for all arch-specific
9952 + * types of interest.
9953 + * The kernel type information is used by the lcrash utility when
9954 + * analyzing system crash dumps or the live system. Using the type
9955 + * information for the running system, rather than kernel header files,
9956 + * makes for a more flexible and robust analysis tool.
9958 + * This source code is released under the GNU GPL.
9961 +/* ARM26-specific header files */
9962 +#ifndef _ARM26_KERNTYPES_H
9963 +#define _ARM26_KERNTYPES_H
9965 +/* Use the default */
9966 +#include <asm-generic/kerntypes.h>
9968 +#endif /* _ARM26_KERNTYPES_H */
9969 Index: linux-2.6.10/include/asm-parisc/kerntypes.h
9970 ===================================================================
9971 --- linux-2.6.10.orig/include/asm-parisc/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
9972 +++ linux-2.6.10/include/asm-parisc/kerntypes.h 2005-04-07 18:13:56.664789488 +0800
9975 + * asm-parisc/kerntypes.h
9977 + * Arch-dependent header file that includes headers for all arch-specific
9978 + * types of interest.
9979 + * The kernel type information is used by the lcrash utility when
9980 + * analyzing system crash dumps or the live system. Using the type
9981 + * information for the running system, rather than kernel header files,
9982 + * makes for a more flexible and robust analysis tool.
9984 + * This source code is released under the GNU GPL.
9987 +/* PA-RISC-specific header files */
9988 +#ifndef _PARISC_KERNTYPES_H
9989 +#define _PARISC_KERNTYPES_H
9991 +/* Use the default */
9992 +#include <asm-generic/kerntypes.h>
9994 +#endif /* _PARISC_KERNTYPES_H */
9995 Index: linux-2.6.10/include/asm-ia64/kerntypes.h
9996 ===================================================================
9997 --- linux-2.6.10.orig/include/asm-ia64/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
9998 +++ linux-2.6.10/include/asm-ia64/kerntypes.h 2005-04-07 18:13:56.681786904 +0800
10001 + * asm-ia64/kerntypes.h
10003 + * Arch-dependent header file that includes headers for all arch-specific
10004 + * types of interest.
10005 + * The kernel type information is used by the lcrash utility when
10006 + * analyzing system crash dumps or the live system. Using the type
10007 + * information for the running system, rather than kernel header files,
10008 + * makes for a more flexible and robust analysis tool.
10010 + * This source code is released under the GNU GPL.
10013 +/* IA64-specific header files */
10014 +#ifndef _IA64_KERNTYPES_H
10015 +#define _IA64_KERNTYPES_H
10017 +/* Use the default */
10018 +#include <asm-generic/kerntypes.h>
10020 +#endif /* _IA64_KERNTYPES_H */
10021 Index: linux-2.6.10/include/asm-ia64/dump.h
10022 ===================================================================
10023 --- linux-2.6.10.orig/include/asm-ia64/dump.h 2005-04-07 19:34:21.197950744 +0800
10024 +++ linux-2.6.10/include/asm-ia64/dump.h 2005-04-07 18:13:56.680787056 +0800
10027 + * Kernel header file for Linux crash dumps.
10029 + * Created by: Matt Robinson (yakker@sgi.com)
10031 + * Copyright 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
10033 + * This code is released under version 2 of the GNU GPL.
10036 +/* This header file holds the architecture specific crash dump header */
10037 +#ifndef _ASM_DUMP_H
10038 +#define _ASM_DUMP_H
10041 +#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
10042 +#define DUMP_ASM_VERSION_NUMBER 0x4 /* version number */
10045 +#include <linux/efi.h>
10046 +#include <asm/pal.h>
10047 +#include <asm/ptrace.h>
10050 +extern cpumask_t irq_affinity[];
10051 +extern int (*dump_ipi_function_ptr)(struct pt_regs *);
10052 +extern void dump_send_ipi(void);
10053 +#else /* !CONFIG_SMP */
10054 +#define dump_send_ipi() do { } while(0)
10057 +#else /* !__KERNEL__ */
10058 +/* necessary header files */
10059 +#include <asm/ptrace.h> /* for pt_regs */
10060 +#include <linux/threads.h>
10061 +#endif /* __KERNEL__ */
10064 + * mkswap.c calls getpagesize() to get the system page size,
10065 + * which is not necessarily the same as the hardware page size.
10067 + * For ia64 the kernel PAGE_SIZE can be configured from 4KB ... 16KB.
10069 + * The physical memory is layed out out in the hardware/minimal pages.
10070 + * This is the size we need to use for dumping physical pages.
10072 + * Note ths hardware/minimal page size being use in;
10073 + * arch/ia64/kernel/efi.c`efi_memmap_walk():
10074 + * curr.end = curr.start + (md->num_pages << 12);
10076 + * Since the system page size could change between the kernel we boot
10077 + * on the the kernel that cause the core dume we may want to have something
10078 + * more constant like the maximum system page size (See include/asm-ia64/page.h).
10080 +/* IA64 manages the stack in differnt manner as compared to other architectures.
10081 + * task_struct lies at the bottom of stack.
10083 +#undef STACK_START_POSITION
10084 +#define STACK_START_POSITION(tsk) (tsk)
10085 +#define DUMP_MIN_PAGE_SHIFT 12
10086 +#define DUMP_MIN_PAGE_SIZE (1UL << DUMP_MIN_PAGE_SHIFT)
10087 +#define DUMP_MIN_PAGE_MASK (~(DUMP_MIN_PAGE_SIZE - 1))
10088 +#define DUMP_MIN_PAGE_ALIGN(addr) (((addr) + DUMP_MIN_PAGE_SIZE - 1) & DUMP_MIN_PAGE_MASK)
10090 +#define DUMP_MAX_PAGE_SHIFT 16
10091 +#define DUMP_MAX_PAGE_SIZE (1UL << DUMP_MAX_PAGE_SHIFT)
10092 +#define DUMP_MAX_PAGE_MASK (~(DUMP_MAX_PAGE_SIZE - 1))
10093 +#define DUMP_MAX_PAGE_ALIGN(addr) (((addr) + DUMP_MAX_PAGE_SIZE - 1) & DUMP_MAX_PAGE_MASK)
10095 +#define DUMP_EF_PAGE_SHIFT DUMP_MIN_PAGE_SHIFT
10097 +extern int _end,_start;
10100 + * Structure: dump_header_asm_t
10101 + * Function: This is the header for architecture-specific stuff. It
10102 + * follows right after the dump header.
10104 +/*typedef struct _dump_header_asm {*/
10106 +typedef struct __dump_header_asm {
10108 + /* the dump magic number -- unique to verify dump is valid */
10109 + uint64_t dha_magic_number;
10111 + /* the version number of this dump */
10112 + uint32_t dha_version;
10114 + /* the size of this header (in case we can't read it) */
10115 + uint32_t dha_header_size;
10117 + /* pointer to pt_regs, (OLD: (struct pt_regs *, NEW: (uint64_t)) */
10118 + uint64_t dha_pt_regs;
10120 + /* the dump registers */
10121 + struct pt_regs dha_regs;
10123 + /* the rnat register saved after flushrs */
10124 + uint64_t dha_rnat;
10126 + /* the pfs register saved after flushrs */
10127 + uint64_t dha_pfs;
10129 + /* the bspstore register saved after flushrs */
10130 + uint64_t dha_bspstore;
10132 + /* smp specific */
10133 + uint32_t dha_smp_num_cpus;
10134 + uint32_t dha_dumping_cpu;
10135 + struct pt_regs dha_smp_regs[NR_CPUS];
10136 + uint64_t dha_smp_current_task[NR_CPUS];
10137 + uint64_t dha_stack[NR_CPUS];
10138 + uint64_t dha_stack_ptr[NR_CPUS];
10140 +} __attribute__((packed)) dump_header_asm_t;
10143 +extern struct __dump_header_asm dump_header_asm;
10146 +static inline void get_current_regs(struct pt_regs *regs)
10149 + * REMIND: Looking at functions/Macros like:
10150 + * DO_SAVE_SWITCH_STACK
10151 + * ia64_switch_to()
10152 + * ia64_save_extra()
10154 + * to implement this new feature that Matt seem to have added
10155 + * to panic.c; seems all platforms are now expected to provide
10156 + * this function to dump the current registers into the pt_regs
10159 + volatile unsigned long rsc_value;/*for storing the rsc value*/
10160 + volatile unsigned long ic_value;
10162 + __asm__ __volatile__("mov %0=b6;;":"=r"(regs->b6));
10163 + __asm__ __volatile__("mov %0=b7;;":"=r"(regs->b7));
10165 + __asm__ __volatile__("mov %0=ar.csd;;":"=r"(regs->ar_csd));
10166 + __asm__ __volatile__("mov %0=ar.ssd;;":"=r"(regs->ar_ssd));
10167 + __asm__ __volatile__("mov %0=psr;;":"=r"(ic_value));
10168 + if(ic_value & 0x1000)/*Within an interrupt*/
10170 + __asm__ __volatile__("mov %0=cr.ipsr;;":"=r"(regs->cr_ipsr));
10171 + __asm__ __volatile__("mov %0=cr.iip;;":"=r"(regs->cr_iip));
10172 + __asm__ __volatile__("mov %0=cr.ifs;;":"=r"(regs->cr_ifs));
10176 + regs->cr_ipsr=regs->cr_iip=regs->cr_ifs=(unsigned long)-1;
10178 + __asm__ __volatile__("mov %0=ar.unat;;":"=r"(regs->ar_unat));
10179 + __asm__ __volatile__("mov %0=ar.pfs;;":"=r"(regs->ar_pfs));
10180 + __asm__ __volatile__("mov %0=ar.rsc;;":"=r"(rsc_value));
10181 + regs->ar_rsc = rsc_value;
10182 + /*loadrs is from 16th bit to 29th bit of rsc*/
10183 + regs->loadrs = rsc_value >> 16 & (unsigned long)0x3fff;
10184 + /*setting the rsc.mode value to 0 (rsc.mode is the last two bits of rsc)*/
10185 + __asm__ __volatile__("mov ar.rsc=%0;;"::"r"(rsc_value & (unsigned long)(~3)));
10186 + __asm__ __volatile__("mov %0=ar.rnat;;":"=r"(regs->ar_rnat));
10187 + __asm__ __volatile__("mov %0=ar.bspstore;;":"=r"(regs->ar_bspstore));
10188 + /*copying the original value back*/
10189 + __asm__ __volatile__("mov ar.rsc=%0;;"::"r"(rsc_value));
10190 + __asm__ __volatile__("mov %0=pr;;":"=r"(regs->pr));
10191 + __asm__ __volatile__("mov %0=ar.fpsr;;":"=r"(regs->ar_fpsr));
10192 + __asm__ __volatile__("mov %0=ar.ccv;;":"=r"(regs->ar_ccv));
10194 + __asm__ __volatile__("mov %0=r2;;":"=r"(regs->r2));
10195 + __asm__ __volatile__("mov %0=r3;;":"=r"(regs->r3));
10196 + __asm__ __volatile__("mov %0=r8;;":"=r"(regs->r8));
10197 + __asm__ __volatile__("mov %0=r9;;":"=r"(regs->r9));
10198 + __asm__ __volatile__("mov %0=r10;;":"=r"(regs->r10));
10199 + __asm__ __volatile__("mov %0=r11;;":"=r"(regs->r11));
10200 + __asm__ __volatile__("mov %0=r12;;":"=r"(regs->r12));
10201 + __asm__ __volatile__("mov %0=r13;;":"=r"(regs->r13));
10202 + __asm__ __volatile__("mov %0=r14;;":"=r"(regs->r14));
10203 + __asm__ __volatile__("mov %0=r15;;":"=r"(regs->r15));
10204 + __asm__ __volatile__("mov %0=r16;;":"=r"(regs->r16));
10205 + __asm__ __volatile__("mov %0=r17;;":"=r"(regs->r17));
10206 + __asm__ __volatile__("mov %0=r18;;":"=r"(regs->r18));
10207 + __asm__ __volatile__("mov %0=r19;;":"=r"(regs->r19));
10208 + __asm__ __volatile__("mov %0=r20;;":"=r"(regs->r20));
10209 + __asm__ __volatile__("mov %0=r21;;":"=r"(regs->r21));
10210 + __asm__ __volatile__("mov %0=r22;;":"=r"(regs->r22));
10211 + __asm__ __volatile__("mov %0=r23;;":"=r"(regs->r23));
10212 + __asm__ __volatile__("mov %0=r24;;":"=r"(regs->r24));
10213 + __asm__ __volatile__("mov %0=r25;;":"=r"(regs->r25));
10214 + __asm__ __volatile__("mov %0=r26;;":"=r"(regs->r26));
10215 + __asm__ __volatile__("mov %0=r27;;":"=r"(regs->r27));
10216 + __asm__ __volatile__("mov %0=r28;;":"=r"(regs->r28));
10217 + __asm__ __volatile__("mov %0=r29;;":"=r"(regs->r29));
10218 + __asm__ __volatile__("mov %0=r30;;":"=r"(regs->r30));
10219 + __asm__ __volatile__("mov %0=r31;;":"=r"(regs->r31));
10222 +/* Perhaps added to Common Arch Specific Functions and moved to dump.h some day */
10223 +extern void * __dump_memcpy(void *, const void *, size_t);
10224 +#endif /* __KERNEL__ */
10226 +#endif /* _ASM_DUMP_H */
10227 Index: linux-2.6.10/include/asm-ia64/nmi.h
10228 ===================================================================
10229 --- linux-2.6.10.orig/include/asm-ia64/nmi.h 2005-04-07 19:34:21.197950744 +0800
10230 +++ linux-2.6.10/include/asm-ia64/nmi.h 2005-04-07 18:13:56.679787208 +0800
10233 + * linux/include/asm-ia64/nmi.h
10238 +#include <linux/pm.h>
10242 +typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
10245 + * set_nmi_callback
10247 + * Set a handler for an NMI. Only one handler may be
10248 + * set. Return 1 if the NMI was handled.
10250 +void set_nmi_callback(nmi_callback_t callback);
10253 + * unset_nmi_callback
10255 + * Remove the handler previously set.
10257 +void unset_nmi_callback(void);
10259 +#endif /* ASM_NMI_H */
10260 Index: linux-2.6.10/include/asm-ppc64/kerntypes.h
10261 ===================================================================
10262 --- linux-2.6.10.orig/include/asm-ppc64/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
10263 +++ linux-2.6.10/include/asm-ppc64/kerntypes.h 2005-04-07 18:13:56.682786752 +0800
10266 + * asm-ppc64/kerntypes.h
10268 + * Arch-dependent header file that includes headers for all arch-specific
10269 + * types of interest.
10270 + * The kernel type information is used by the lcrash utility when
10271 + * analyzing system crash dumps or the live system. Using the type
10272 + * information for the running system, rather than kernel header files,
10273 + * makes for a more flexible and robust analysis tool.
10275 + * This source code is released under the GNU GPL.
10278 +/* PPC64-specific header files */
10279 +#ifndef _PPC64_KERNTYPES_H
10280 +#define _PPC64_KERNTYPES_H
10282 +/* Use the default */
10283 +#include <asm-generic/kerntypes.h>
10285 +#endif /* _PPC64_KERNTYPES_H */
10286 Index: linux-2.6.10/include/asm-ppc64/dump.h
10287 ===================================================================
10288 --- linux-2.6.10.orig/include/asm-ppc64/dump.h 2005-04-07 19:34:21.197950744 +0800
10289 +++ linux-2.6.10/include/asm-ppc64/dump.h 2005-04-07 18:13:56.681786904 +0800
10292 + * Kernel header file for Linux crash dumps.
10294 + * Created by: Todd Inglett <tinglett@vnet.ibm.com>
10296 + * Copyright 2002 - 2004 International Business Machines
10298 + * This code is released under version 2 of the GNU GPL.
10301 +/* This header file holds the architecture specific crash dump header */
10302 +#ifndef _ASM_DUMP_H
10303 +#define _ASM_DUMP_H
10305 +/* necessary header files */
10306 +#include <asm/ptrace.h> /* for pt_regs */
10307 +#include <asm/kmap_types.h>
10308 +#include <linux/threads.h>
10311 +#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
10312 +#define DUMP_ASM_VERSION_NUMBER 0x5 /* version number */
10315 + * Structure: __dump_header_asm
10316 + * Function: This is the header for architecture-specific stuff. It
10317 + * follows right after the dump header.
10319 +struct __dump_header_asm {
10321 + /* the dump magic number -- unique to verify dump is valid */
10322 + uint64_t dha_magic_number;
10324 + /* the version number of this dump */
10325 + uint32_t dha_version;
10327 + /* the size of this header (in case we can't read it) */
10328 + uint32_t dha_header_size;
10330 + /* the dump registers */
10331 + struct pt_regs dha_regs;
10333 + /* smp specific */
10334 + uint32_t dha_smp_num_cpus;
10335 + int dha_dumping_cpu;
10336 + struct pt_regs dha_smp_regs[NR_CPUS];
10337 + uint64_t dha_smp_current_task[NR_CPUS];
10338 + uint64_t dha_stack[NR_CPUS];
10339 + uint64_t dha_stack_ptr[NR_CPUS];
10340 +} __attribute__((packed));
10343 +static inline void get_current_regs(struct pt_regs *regs)
10345 + unsigned long tmp1, tmp2;
10347 + __asm__ __volatile__ (
10358 + "std 10,80(%2)\n"
10359 + "std 11,88(%2)\n"
10360 + "std 12,96(%2)\n"
10361 + "std 13,104(%2)\n"
10362 + "std 14,112(%2)\n"
10363 + "std 15,120(%2)\n"
10364 + "std 16,128(%2)\n"
10365 + "std 17,136(%2)\n"
10366 + "std 18,144(%2)\n"
10367 + "std 19,152(%2)\n"
10368 + "std 20,160(%2)\n"
10369 + "std 21,168(%2)\n"
10370 + "std 22,176(%2)\n"
10371 + "std 23,184(%2)\n"
10372 + "std 24,192(%2)\n"
10373 + "std 25,200(%2)\n"
10374 + "std 26,208(%2)\n"
10375 + "std 27,216(%2)\n"
10376 + "std 28,224(%2)\n"
10377 + "std 29,232(%2)\n"
10378 + "std 30,240(%2)\n"
10379 + "std 31,248(%2)\n"
10381 + "std %0, 264(%2)\n"
10383 + "std %0, 280(%2)\n"
10385 + "std %0, 288(%2)\n"
10388 + "std %1, 256(%2)\n"
10391 + "std %0, 296(%2)\n"
10392 + : "=&r" (tmp1), "=&r" (tmp2)
10396 +extern struct __dump_header_asm dump_header_asm;
10399 +extern void dump_send_ipi(int (*dump_ipi_callback)(struct pt_regs *));
10401 +#define dump_send_ipi() do { } while(0)
10403 +#endif /* __KERNEL__ */
10405 +#endif /* _ASM_DUMP_H */
10406 Index: linux-2.6.10/include/asm-ppc64/kmap_types.h
10407 ===================================================================
10408 --- linux-2.6.10.orig/include/asm-ppc64/kmap_types.h 2004-12-25 05:34:45.000000000 +0800
10409 +++ linux-2.6.10/include/asm-ppc64/kmap_types.h 2005-04-07 18:13:56.692785232 +0800
10420 Index: linux-2.6.10/include/asm-ppc64/smp.h
10421 ===================================================================
10422 --- linux-2.6.10.orig/include/asm-ppc64/smp.h 2004-12-25 05:33:47.000000000 +0800
10423 +++ linux-2.6.10/include/asm-ppc64/smp.h 2005-04-07 18:13:56.693785080 +0800
10425 extern void smp_send_debugger_break(int cpu);
10427 extern void smp_message_recv(int, struct pt_regs *);
10429 +extern void dump_send_ipi(int (*dump_ipi_callback)(struct pt_regs *));
10431 #define smp_processor_id() (get_paca()->paca_index)
10432 #define hard_smp_processor_id() (get_paca()->hw_cpu_id)
10433 Index: linux-2.6.10/include/asm-s390/kerntypes.h
10434 ===================================================================
10435 --- linux-2.6.10.orig/include/asm-s390/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
10436 +++ linux-2.6.10/include/asm-s390/kerntypes.h 2005-04-07 18:13:56.713782040 +0800
10439 + * asm-s390/kerntypes.h
10441 + * Arch-dependent header file that includes headers for all arch-specific
10442 + * types of interest.
10443 + * The kernel type information is used by the lcrash utility when
10444 + * analyzing system crash dumps or the live system. Using the type
10445 + * information for the running system, rather than kernel header files,
10446 + * makes for a more flexible and robust analysis tool.
10448 + * This source code is released under the GNU GPL.
10451 +/* S/390 specific header files */
10452 +#ifndef _S390_KERNTYPES_H
10453 +#define _S390_KERNTYPES_H
10455 +#include <asm/lowcore.h>
10456 +#include <asm/debug.h>
10457 +#include <asm/ccwdev.h>
10458 +#include <asm/ccwgroup.h>
10459 +#include <asm/qdio.h>
10461 +/* channel subsystem driver */
10462 +#include "../../drivers/s390/cio/cio.h"
10463 +#include "../../drivers/s390/cio/chsc.h"
10464 +#include "../../drivers/s390/cio/css.h"
10465 +#include "../../drivers/s390/cio/device.h"
10466 +#include "../../drivers/s390/cio/qdio.h"
10468 +/* dasd device driver */
10469 +#include "../../drivers/s390/block/dasd_int.h"
10470 +#include "../../drivers/s390/block/dasd_diag.h"
10471 +#include "../../drivers/s390/block/dasd_eckd.h"
10472 +#include "../../drivers/s390/block/dasd_fba.h"
10474 +/* networking drivers */
10475 +#include "../../drivers/s390/net/fsm.h"
10476 +#include "../../drivers/s390/net/iucv.h"
10477 +#include "../../drivers/s390/net/lcs.h"
10479 +/* zfcp device driver */
10480 +#include "../../drivers/s390/scsi/zfcp_def.h"
10481 +#include "../../drivers/s390/scsi/zfcp_fsf.h"
10483 +#endif /* _S390_KERNTYPES_H */
10484 Index: linux-2.6.10/include/asm-s390/dump.h
10485 ===================================================================
10486 --- linux-2.6.10.orig/include/asm-s390/dump.h 2005-04-07 19:34:21.197950744 +0800
10487 +++ linux-2.6.10/include/asm-s390/dump.h 2005-04-07 18:13:56.713782040 +0800
10490 + * Kernel header file for Linux crash dumps.
10493 +/* Nothing to be done here, we have proper hardware support */
10494 +#ifndef _ASM_DUMP_H
10495 +#define _ASM_DUMP_H
10499 Index: linux-2.6.10/include/asm-i386/kerntypes.h
10500 ===================================================================
10501 --- linux-2.6.10.orig/include/asm-i386/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
10502 +++ linux-2.6.10/include/asm-i386/kerntypes.h 2005-04-07 18:13:56.717781432 +0800
10505 + * asm-i386/kerntypes.h
10507 + * Arch-dependent header file that includes headers for all arch-specific
10508 + * types of interest.
10509 + * The kernel type information is used by the lcrash utility when
10510 + * analyzing system crash dumps or the live system. Using the type
10511 + * information for the running system, rather than kernel header files,
10512 + * makes for a more flexible and robust analysis tool.
10514 + * This source code is released under the GNU GPL.
10517 +/* ix86-specific header files */
10518 +#ifndef _I386_KERNTYPES_H
10519 +#define _I386_KERNTYPES_H
10521 +/* Use the default */
10522 +#include <asm-generic/kerntypes.h>
10524 +#endif /* _I386_KERNTYPES_H */
10525 Index: linux-2.6.10/include/asm-i386/dump.h
10526 ===================================================================
10527 --- linux-2.6.10.orig/include/asm-i386/dump.h 2005-04-07 19:34:21.197950744 +0800
10528 +++ linux-2.6.10/include/asm-i386/dump.h 2005-04-07 18:13:56.716781584 +0800
10531 + * Kernel header file for Linux crash dumps.
10533 + * Created by: Matt Robinson (yakker@sgi.com)
10535 + * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
10537 + * This code is released under version 2 of the GNU GPL.
10540 +/* This header file holds the architecture specific crash dump header */
10541 +#ifndef _ASM_DUMP_H
10542 +#define _ASM_DUMP_H
10544 +/* necessary header files */
10545 +#include <asm/ptrace.h>
10546 +#include <asm/page.h>
10547 +#include <linux/threads.h>
10548 +#include <linux/mm.h>
10551 +#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
10552 +#define DUMP_ASM_VERSION_NUMBER 0x3 /* version number */
10555 + * Structure: __dump_header_asm
10556 + * Function: This is the header for architecture-specific stuff. It
10557 + * follows right after the dump header.
10559 +struct __dump_header_asm {
10560 + /* the dump magic number -- unique to verify dump is valid */
10561 + u64 dha_magic_number;
10563 + /* the version number of this dump */
10566 + /* the size of this header (in case we can't read it) */
10567 + u32 dha_header_size;
10569 + /* the esp for i386 systems */
10572 + /* the eip for i386 systems */
10575 + /* the dump registers */
10576 + struct pt_regs dha_regs;
10578 + /* smp specific */
10579 + u32 dha_smp_num_cpus;
10580 + u32 dha_dumping_cpu;
10581 + struct pt_regs dha_smp_regs[NR_CPUS];
10582 + u32 dha_smp_current_task[NR_CPUS];
10583 + u32 dha_stack[NR_CPUS];
10584 + u32 dha_stack_ptr[NR_CPUS];
10585 +} __attribute__((packed));
10589 +extern struct __dump_header_asm dump_header_asm;
10592 +extern cpumask_t irq_affinity[];
10593 +extern int (*dump_ipi_function_ptr)(struct pt_regs *);
10594 +extern void dump_send_ipi(void);
10596 +#define dump_send_ipi() do { } while(0)
10599 +static inline void get_current_regs(struct pt_regs *regs)
10601 + __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
10602 + __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
10603 + __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
10604 + __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
10605 + __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
10606 + __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
10607 + __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
10608 + __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
10609 + __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
10610 + __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
10611 + __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
10612 + __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
10613 + __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
10614 + regs->eip = (unsigned long)current_text_addr();
10617 +#endif /* __KERNEL__ */
10619 +#endif /* _ASM_DUMP_H */
10620 Index: linux-2.6.10/include/asm-i386/kmap_types.h
10621 ===================================================================
10622 --- linux-2.6.10.orig/include/asm-i386/kmap_types.h 2004-12-25 05:35:23.000000000 +0800
10623 +++ linux-2.6.10/include/asm-i386/kmap_types.h 2005-04-07 18:13:56.729779608 +0800
10634 Index: linux-2.6.10/include/asm-i386/smp.h
10635 ===================================================================
10636 --- linux-2.6.10.orig/include/asm-i386/smp.h 2004-12-25 05:35:50.000000000 +0800
10637 +++ linux-2.6.10/include/asm-i386/smp.h 2005-04-07 18:13:56.730779456 +0800
10639 extern cpumask_t cpu_sibling_map[];
10641 extern void smp_flush_tlb(void);
10642 +extern void dump_send_ipi(void);
10643 extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
10644 extern void smp_invalidate_rcv(void); /* Process an NMI */
10645 extern void (*mtrr_hook) (void);
10646 Index: linux-2.6.10/include/asm-i386/mach-default/irq_vectors.h
10647 ===================================================================
10648 --- linux-2.6.10.orig/include/asm-i386/mach-default/irq_vectors.h 2004-12-25 05:34:26.000000000 +0800
10649 +++ linux-2.6.10/include/asm-i386/mach-default/irq_vectors.h 2005-04-07 18:13:56.738778240 +0800
10651 #define INVALIDATE_TLB_VECTOR 0xfd
10652 #define RESCHEDULE_VECTOR 0xfc
10653 #define CALL_FUNCTION_VECTOR 0xfb
10654 +#define DUMP_VECTOR 0xfa
10656 #define THERMAL_APIC_VECTOR 0xf0
10658 Index: linux-2.6.10/include/asm-m68k/kerntypes.h
10659 ===================================================================
10660 --- linux-2.6.10.orig/include/asm-m68k/kerntypes.h 2005-04-07 19:34:21.197950744 +0800
10661 +++ linux-2.6.10/include/asm-m68k/kerntypes.h 2005-04-07 18:13:56.715781736 +0800
10664 + * asm-m68k/kerntypes.h
10666 + * Arch-dependent header file that includes headers for all arch-specific
10667 + * types of interest.
10668 + * The kernel type information is used by the lcrash utility when
10669 + * analyzing system crash dumps or the live system. Using the type
10670 + * information for the running system, rather than kernel header files,
10671 + * makes for a more flexible and robust analysis tool.
10673 + * This source code is released under the GNU GPL.
10676 +/* m68k-specific header files */
10677 +#ifndef _M68K_KERNTYPES_H
10678 +#define _M68K_KERNTYPES_H
10680 +/* Use the default */
10681 +#include <asm-generic/kerntypes.h>
10683 +#endif /* _M68K_KERNTYPES_H */