Whamcloud - gitweb
Branch: HEAD
[fs/lustre-release.git] / lustre / kernel_patches / patches / linux-2.6.10-fc3-lkcd.patch
1 Index: linux-2.6.10/drivers/dump/dump_ia64.c
2 ===================================================================
3 --- linux-2.6.10.orig/drivers/dump/dump_ia64.c  2005-04-07 19:34:21.197950744 +0800
4 +++ linux-2.6.10/drivers/dump/dump_ia64.c       2005-04-07 18:13:56.896754224 +0800
5 @@ -0,0 +1,458 @@
6 +/*
7 + * Architecture specific (ia64) functions for Linux crash dumps.
8 + *
9 + * Created by: Matt Robinson (yakker@sgi.com)
10 + * Contributions from SGI, IBM, and others.
11 + *
12 + * 2.4  kernel modifications by:  Matt D. Robinson (yakker@alacritech.com)
13 + * ia64 kernel modifications by: Piet Delaney (piet@www.piet.net)
14 + *
15 + * Copyright (C) 2001 - 2002 Matt D. Robinson (yakker@alacritech.com)
16 + * Copyright (C) 2002 Silicon Graphics, Inc. All rights reserved.
17 + * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
18 + *
19 + * This code is released under version 2 of the GNU GPL.
20 + */
21 +
22 +/*
23 + * The hooks for dumping the kernel virtual memory to disk are in this
24 + * file.  Any time a modification is made to the virtual memory mechanism,
25 + * these routines must be changed to use the new mechanisms.
26 + */
27 +#include <linux/init.h>
28 +#include <linux/types.h>
29 +#include <linux/kernel.h>
30 +#include <linux/smp.h>
31 +#include <linux/fs.h>
32 +#include <linux/vmalloc.h>
33 +#include <linux/dump.h>
34 +#include "dump_methods.h"
35 +#include <linux/mm.h>
36 +#include <asm/processor.h>
37 +#include <asm-ia64/dump.h>
38 +#include <asm/hardirq.h>
39 +#include <linux/irq.h>
40 +#include <linux/delay.h>
41 +
42 +static __s32         saved_irq_count;   /* saved preempt_count() flags */
43 +
44 +
45 +static int alloc_dha_stack(void)
46 +{
47 +       int i;
48 +       void *ptr;
49 +       
50 +       if (dump_header_asm.dha_stack[0])
51 +       {
52 +               return 0;
53 +       }
54 +               ptr = vmalloc(THREAD_SIZE * num_online_cpus());
55 +               if (!ptr) {
56 +               printk("vmalloc for dha_stacks failed\n");
57 +               return -ENOMEM;
58 +       }
59 +       bzero(ptr,THREAD_SIZE );
60 +
61 +       for (i = 0; i < num_online_cpus(); i++) {
62 +               dump_header_asm.dha_stack[i] = (uint64_t)((unsigned long)ptr + (i * THREAD_SIZE));
63 +       }
64 +       return 0;
65 +}
66 +
67 +static int free_dha_stack(void) 
68 +{
69 +       if (dump_header_asm.dha_stack[0])
70 +       {
71 +               vfree((void*)dump_header_asm.dha_stack[0]);
72 +               dump_header_asm.dha_stack[0] = 0;
73 +       }
74 +       return 0;
75 +}
76 +
77 +/* a structure to get arguments into the following callback routine */
78 +struct unw_args {
79 +       int cpu;
80 +       struct task_struct *tsk;
81 +};
82 +
83 +static void
84 +do_save_sw(struct unw_frame_info *info, void *arg)
85 +{
86 +       struct unw_args *uwargs = (struct unw_args *)arg;
87 +       int cpu = uwargs->cpu;
88 +       struct task_struct *tsk = uwargs->tsk;
89 +
90 +       dump_header_asm.dha_stack_ptr[cpu] = (uint64_t)info->sw;
91 +
92 +       if (tsk && dump_header_asm.dha_stack[cpu]) {
93 +               memcpy((void *)dump_header_asm.dha_stack[cpu],
94 +                               STACK_START_POSITION(tsk),
95 +                               THREAD_SIZE);
96 +       }
97 +}
98 +
99 +void
100 +__dump_save_context(int cpu, const struct pt_regs *regs, 
101 +       struct task_struct *tsk)
102 +{
103 +       struct unw_args uwargs;
104 +
105 +       dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
106 +
107 +       if (regs) {
108 +               dump_header_asm.dha_smp_regs[cpu] = *regs;
109 +       }
110 +
111 +       /* save a snapshot of the stack in a nice state for unwinding */
112 +       uwargs.cpu = cpu;
113 +       uwargs.tsk = tsk;
114 +
115 +       unw_init_running(do_save_sw, (void *)&uwargs);
116 +}
117 +
118 +#ifdef CONFIG_SMP
119 +
120 +extern cpumask_t irq_affinity[];
121 +#define irq_desc _irq_desc
122 +extern irq_desc_t irq_desc[];
123 +extern void dump_send_ipi(void);
124 +static cpumask_t saved_affinity[NR_IRQS];
125 +
126 +/*
127 + * Routine to save the old irq affinities and change affinities of all irqs to
128 + * the dumping cpu.
129 + */
130 +static void
131 +set_irq_affinity(void)
132 +{
133 +        int i;
134 +       cpumask_t cpu = CPU_MASK_NONE;
135 +
136 +       cpu_set(smp_processor_id(), cpu);
137 +        memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
138 +        for (i = 0; i < NR_IRQS; i++) {
139 +                if (irq_desc[i].handler == NULL)
140 +                        continue;
141 +               irq_affinity[i] = cpu;
142 +                if (irq_desc[i].handler->set_affinity != NULL)
143 +                        irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
144 +        }
145 +}
146 +
147 +/*
148 + * Restore old irq affinities.
149 + */
150 +static void
151 +reset_irq_affinity(void)
152 +{
153 +        int i;
154 +
155 +        memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
156 +        for (i = 0; i < NR_IRQS; i++) {
157 +                if (irq_desc[i].handler == NULL)
158 +                        continue;
159 +                if (irq_desc[i].handler->set_affinity != NULL)
160 +                        irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
161 +        }
162 +}
163 +
164 +#else /* !CONFIG_SMP */
165 +#define set_irq_affinity()      do { } while (0)
166 +#define reset_irq_affinity()    do { } while (0)
167 +#define save_other_cpu_states() do { } while (0)
168 +#endif /* !CONFIG_SMP */
169 +
170 +#ifdef CONFIG_SMP
171 +static int dump_expect_ipi[NR_CPUS];
172 +static atomic_t waiting_for_dump_ipi;
173 +static int wait_for_dump_ipi = 2000; /* wait 2000 ms for ipi to be handled */
174 +extern void (*dump_trace_ptr)(struct pt_regs *);
175 +
176 +
177 +extern void stop_this_cpu(void);
178 +
179 +static int
180 +dump_nmi_callback(struct pt_regs *regs, int cpu)
181 +{
182 +        if (!dump_expect_ipi[cpu])
183 +                return 0;
184 +
185 +        dump_expect_ipi[cpu] = 0;
186 +
187 +        dump_save_this_cpu(regs);
188 +        atomic_dec(&waiting_for_dump_ipi);
189 +
190 + level_changed:
191 +        switch (dump_silence_level) {
192 +        case DUMP_HARD_SPIN_CPUS:       /* Spin until dump is complete */
193 +                while (dump_oncpu) {
194 +                        barrier();      /* paranoia */
195 +                        if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
196 +                                goto level_changed;
197 +
198 +                        cpu_relax();    /* kill time nicely */
199 +                }
200 +                break;
201 +
202 +        case DUMP_HALT_CPUS:            /* Execute halt */
203 +                stop_this_cpu();
204 +                break;
205 +
206 +        case DUMP_SOFT_SPIN_CPUS:
207 +                /* Mark the task so it spins in schedule */
208 +                set_tsk_thread_flag(current, TIF_NEED_RESCHED);
209 +                break;
210 +        }
211 +
212 +        return 1;
213 +}
214 +
215 +int IPI_handler(struct pt_regs *regs)
216 +{
217 +       int cpu;
218 +       cpu = task_cpu(current);
219 +       return(dump_nmi_callback(regs, cpu));
220 +}
221 +
222 +/* save registers on other processors */
223 +void
224 +__dump_save_other_cpus(void)
225 +{
226 +        int i, cpu = smp_processor_id();
227 +        int other_cpus = num_online_cpus()-1;
228 +       int wait_time = wait_for_dump_ipi;
229 +
230 +        if (other_cpus > 0) {
231 +                atomic_set(&waiting_for_dump_ipi, other_cpus);
232 +
233 +                for (i = 0; i < NR_CPUS; i++) {
234 +                        dump_expect_ipi[i] = (i != cpu && cpu_online(i));
235 +                }
236 +
237 +               dump_ipi_function_ptr = IPI_handler;
238 +               
239 +                wmb();
240 +
241 +                dump_send_ipi();
242 +                /* may be we dont need to wait for IPI to be processed.
243 +                 * just write out the header at the end of dumping, if
244 +                 * this IPI is not processed until then, there probably
245 +                 * is a problem and we just fail to capture state of
246 +                 * other cpus. */
247 +                while(wait_time-- && (atomic_read(&waiting_for_dump_ipi) > 0)) {
248 +                       barrier();
249 +                       mdelay(1);
250 +                }
251 +               if (wait_time <= 0) {
252 +                       printk("dump ipi timeout, proceeding...\n");
253 +               }
254 +        }
255 +}
256 +#endif
257 +/*
258 + * Kludge - dump from interrupt context is unreliable (Fixme)
259 + *
260 + * We do this so that softirqs initiated for dump i/o
261 + * get processed and we don't hang while waiting for i/o
262 + * to complete or in any irq synchronization attempt.
263 + *
264 + * This is not quite legal of course, as it has the side
265 + * effect of making all interrupts & softirqs triggered
266 + * while dump is in progress complete before currently
267 + * pending softirqs and the currently executing interrupt
268 + * code.
269 + */
270 +static inline void
271 +irq_bh_save(void)
272 +{
273 +        saved_irq_count = irq_count();
274 +        preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
275 +}
276 +
277 +static inline void
278 +irq_bh_restore(void)
279 +{
280 +        preempt_count() |= saved_irq_count;
281 +}
282 +
283 +/*
284 + * Name: __dump_configure_header()
285 + * Func: Configure the dump header with all proper values.
286 + */
287 +int
288 +__dump_configure_header(const struct pt_regs *regs)
289 +{
290 +       return (0);
291 +}
292 +
293 +
294 +#define dim(x) (sizeof(x)/sizeof(*(x)))
295 +
296 +/*
297 + * Name: __dump_irq_enable
298 + * Func: Reset system so interrupts are enabled.
299 + *       This is used for dump methods that require interrupts
300 + *       Eventually, all methods will have interrupts disabled
301 + *       and this code can be removed.
302 + *
303 + *     Change irq affinities
304 + *     Re-enable interrupts
305 + */
306 +int
307 +__dump_irq_enable(void)
308 +{
309 +        set_irq_affinity();
310 +        irq_bh_save();
311 +       ia64_srlz_d();
312 +       /* 
313 +        * reduce the task priority level
314 +        * to get disk interrupts
315 +        */
316 +       ia64_setreg(_IA64_REG_CR_TPR, 0);
317 +       ia64_srlz_d();
318 +        local_irq_enable();
319 +       return 0;
320 +}
321 +
322 +/*
323 + * Name: __dump_irq_restore
324 + * Func: Resume the system state in an architecture-specific way.
325 +
326 + */
327 +void
328 +__dump_irq_restore(void)
329 +{
330 +        local_irq_disable();
331 +        reset_irq_affinity();
332 +        irq_bh_restore();
333 +}
334 +
335 +/*
336 + * Name: __dump_page_valid()
337 + * Func: Check if page is valid to dump.
338 + */
339 +int
340 +__dump_page_valid(unsigned long index)
341 +{
342 +        if (!pfn_valid(index))
343 +       {
344 +                return 0;
345 +       }
346 +        return 1;
347 +}
348 +
349 +/*
350 + * Name: __dump_init()
351 + * Func: Initialize the dumping routine process.  This is in case
352 + *       it's necessary in the future.
353 + */
354 +void
355 +__dump_init(uint64_t local_memory_start)
356 +{
357 +       return;
358 +}
359 +
360 +/*
361 + * Name: __dump_open()
362 + * Func: Open the dump device (architecture specific).  This is in
363 + *       case it's necessary in the future.
364 + */
365 +void
366 +__dump_open(void)
367 +{
368 +       alloc_dha_stack();
369 +       return;
370 +}
371 +
372 +
373 +/*
374 + * Name: __dump_cleanup()
375 + * Func: Free any architecture specific data structures. This is called
376 + *       when the dump module is being removed.
377 + */
378 +void
379 +__dump_cleanup(void)
380 +{
381 +       free_dha_stack();
382 +
383 +       return;
384 +}
385 +
386 +
387 +
388 +int __dump_memcpy_mc_expected = 0;             /* Doesn't help yet */
389 +
390 +/*
391 + * An ia64 version of memcpy() that trys to avoid machine checks.
392 + *
393 + * NB: 
394 + *     By itself __dump_memcpy_mc_expected() ins't providing any
395 + *     protection against Machine Checks. We are looking into the
396 + *     possability of adding code to the arch/ia64/kernel/mca.c fuction
397 + *     ia64_mca_ucmc_handler() to restore state so that a IA64_MCA_CORRECTED
398 + *     can be returned to the firmware. Curently it always returns 
399 + *     IA64_MCA_COLD_BOOT and reboots the machine.
400 + */
401 +/*
402 +void * __dump_memcpy(void * dest, const void *src, size_t count)
403 +{
404 +       void *vp;
405 +
406 +       if (__dump_memcpy_mc_expected) {
407 +               ia64_pal_mc_expected((u64) 1, 0);
408 +       }
409 +
410 +       vp = memcpy(dest, src, count);
411 +
412 +       if (__dump_memcpy_mc_expected) {
413 +               ia64_pal_mc_expected((u64) 0, 0);
414 +       }
415 +       return(vp);
416 +}
417 +*/
418 +/*
419 + * Name: manual_handle_crashdump()
420 + * Func: Interface for the lkcd dump command. Calls dump_execute()
421 + */
422 +int
423 +manual_handle_crashdump(void) {
424 +
425 +        struct pt_regs regs;
426 +
427 +        get_current_regs(&regs);
428 +        dump_execute("manual", &regs);
429 +        return 0;
430 +}
431 +
432 +/*
433 + * Name: __dump_clean_irq_state()
434 + * Func: Clean up from the previous IRQ handling state. Such as oops from 
435 + *       interrupt handler or bottom half.
436 + */
437 +void
438 +__dump_clean_irq_state(void)
439 +{
440 +       unsigned long saved_tpr;
441 +       unsigned long TPR_MASK = 0xFFFFFFFFFFFEFF0F;
442 +       
443 +       
444 +       /* Get the processors task priority register */
445 +       saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
446 +       /* clear the mmi and mic bit's of the TPR to unmask interrupts */
447 +       saved_tpr = saved_tpr & TPR_MASK; 
448 +       ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
449 +       ia64_srlz_d();
450 +
451 +       /* Tell the processor we're done with the interrupt
452 +        * that got us here.
453 +        */
454 +       
455 +       ia64_eoi();
456 +
457 +       /* local implementation of irq_exit(); */
458 +       preempt_count() -= IRQ_EXIT_OFFSET;
459 +       preempt_enable_no_resched();
460 +
461 +       return;
462 +}
463 +
464 Index: linux-2.6.10/drivers/dump/dump_setup.c
465 ===================================================================
466 --- linux-2.6.10.orig/drivers/dump/dump_setup.c 2005-04-07 19:34:21.197950744 +0800
467 +++ linux-2.6.10/drivers/dump/dump_setup.c      2005-04-07 18:13:56.914751488 +0800
468 @@ -0,0 +1,923 @@
469 +/*
470 + * Standard kernel function entry points for Linux crash dumps.
471 + *
472 + * Created by: Matt Robinson (yakker@sourceforge.net)
473 + * Contributions from SGI, IBM, HP, MCL, and others.
474 + *
475 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
476 + * Copyright (C) 2000 - 2002 TurboLinux, Inc.  All rights reserved.
477 + * Copyright (C) 2001 - 2002 Matt D. Robinson.  All rights reserved.
478 + * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
479 + *
480 + * This code is released under version 2 of the GNU GPL.
481 + */
482 +
483 +/*
484 + * -----------------------------------------------------------------------
485 + *
486 + * DUMP HISTORY
487 + *
488 + * This dump code goes back to SGI's first attempts at dumping system
489 + * memory on SGI systems running IRIX.  A few developers at SGI needed
490 + * a way to take this system dump and analyze it, and created 'icrash',
491 + * or IRIX Crash.  The mechanism (the dumps and 'icrash') were used
492 + * by support people to generate crash reports when a system failure
493 + * occurred.  This was vital for large system configurations that
494 + * couldn't apply patch after patch after fix just to hope that the
495 + * problems would go away.  So the system memory, along with the crash
496 + * dump analyzer, allowed support people to quickly figure out what the
497 + * problem was on the system with the crash dump.
498 + *
499 + * In comes Linux.  SGI started moving towards the open source community,
500 + * and upon doing so, SGI wanted to take its support utilities into Linux
501 + * with the hopes that they would end up the in kernel and user space to
502 + * be used by SGI's customers buying SGI Linux systems.  One of the first
503 + * few products to be open sourced by SGI was LKCD, or Linux Kernel Crash
504 + * Dumps.  LKCD comprises of a patch to the kernel to enable system
505 + * dumping, along with 'lcrash', or Linux Crash, to analyze the system
506 + * memory dump.  A few additional system scripts and kernel modifications
507 + * are also included to make the dump mechanism and dump data easier to
508 + * process and use.
509 + *
510 + * As soon as LKCD was released into the open source community, a number
511 + * of larger companies started to take advantage of it.  Today, there are
512 + * many community members that contribute to LKCD, and it continues to
513 + * flourish and grow as an open source project.
514 + */
515 +
516 +/*
517 + * DUMP TUNABLES (read/write with ioctl, readonly with /proc)
518 + *
519 + * This is the list of system tunables (via /proc) that are available
520 + * for Linux systems.  All the read, write, etc., functions are listed
521 + * here.  Currently, there are a few different tunables for dumps:
522 + *
523 + * dump_device (used to be dumpdev):
524 + *     The device for dumping the memory pages out to.  This 
525 + *     may be set to the primary swap partition for disruptive dumps,
526 + *     and must be an unused partition for non-disruptive dumps.
527 + *     Todo: In the case of network dumps, this may be interpreted 
528 + *     as the IP address of the netdump server to connect to.
529 + *
530 + * dump_compress (used to be dump_compress_pages):
531 + *     This is the flag which indicates which compression mechanism
532 + *     to use.  This is a BITMASK, not an index (0,1,2,4,8,16,etc.).
533 + *     This is the current set of values:
534 + *
535 + *     0: DUMP_COMPRESS_NONE -- Don't compress any pages.
536 + *     1: DUMP_COMPRESS_RLE  -- This uses RLE compression.
537 + *     2: DUMP_COMPRESS_GZIP -- This uses GZIP compression.
538 + *
539 + * dump_level:
540 + *     The amount of effort the dump module should make to save
541 + *     information for post crash analysis.  This value is now
542 + *     a BITMASK value, not an index:
543 + *
544 + *     0:   Do nothing, no dumping. (DUMP_LEVEL_NONE)
545 + *
546 + *     1:   Print out the dump information to the dump header, and
547 + *          write it out to the dump_device. (DUMP_LEVEL_HEADER)
548 + *
549 + *     2:   Write out the dump header and all kernel memory pages.
550 + *          (DUMP_LEVEL_KERN)
551 + *
552 + *     4:   Write out the dump header and all kernel and user
553 + *          memory pages.  (DUMP_LEVEL_USED)
554 + *
555 + *     8:   Write out the dump header and all conventional/cached 
556 + *         memory (RAM) pages in the system (kernel, user, free).  
557 + *         (DUMP_LEVEL_ALL_RAM)
558 + *
559 + *    16:   Write out everything, including non-conventional memory
560 + *         like firmware, proms, I/O registers, uncached memory.
561 + *         (DUMP_LEVEL_ALL)
562 + *
563 + *     The dump_level will default to 1.
564 + *
565 + * dump_flags:
566 + *     These are the flags to use when talking about dumps.  There
567 + *     are lots of possibilities.  This is a BITMASK value, not an index.
568 + * 
569 + * -----------------------------------------------------------------------
570 + */
571 +
572 +#include <linux/kernel.h>
573 +#include <linux/delay.h>
574 +#include <linux/reboot.h>
575 +#include <linux/fs.h>
576 +#include <linux/dump.h>
577 +#include <linux/ioctl32.h>
578 +#include <linux/syscalls.h>
579 +#include "dump_methods.h"
580 +#include <linux/proc_fs.h>
581 +#include <linux/module.h>
582 +#include <linux/utsname.h>
583 +#include <linux/highmem.h>
584 +#include <linux/miscdevice.h>
585 +#include <linux/sysrq.h>
586 +#include <linux/sysctl.h>
587 +#include <linux/nmi.h>
588 +#include <linux/init.h>
589 +#include <asm/hardirq.h>
590 +#include <asm/uaccess.h>
591 +
592 +
593 +/*
594 + * -----------------------------------------------------------------------
595 + *                         V A R I A B L E S
596 + * -----------------------------------------------------------------------
597 + */
598 +
599 +/* Dump tunables */
600 +struct dump_config dump_config = {
601 +       .level          = 0,
602 +       .flags          = 0,
603 +       .dump_device    = 0,
604 +       .dump_addr      = 0,
605 +       .dumper         = NULL
606 +};
607 +#ifdef CONFIG_ARM 
608 +static _dump_regs_t all_regs;
609 +#endif
610 +
611 +/* Global variables used in dump.h */
612 +/* degree of system freeze when dumping */
613 +enum dump_silence_levels dump_silence_level = DUMP_HARD_SPIN_CPUS;      
614 +
615 +/* Other global fields */
616 +extern struct __dump_header dump_header; 
617 +struct dump_dev *dump_dev = NULL;  /* Active dump device                   */
618 +static int dump_compress = 0;
619 +
620 +static u32 dump_compress_none(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
621 +                               unsigned long loc);
622 +struct __dump_compress dump_none_compression = {
623 +       .compress_type  = DUMP_COMPRESS_NONE,
624 +       .compress_func  = dump_compress_none,
625 +       .compress_name  = "none",
626 +};
627 +
628 +/* our device operations and functions */
629 +static int dump_ioctl(struct inode *i, struct file *f,
630 +       unsigned int cmd, unsigned long arg);
631 +
632 +#ifdef CONFIG_COMPAT
633 +static int dw_long(unsigned int, unsigned int, unsigned long, struct file*);
634 +#endif
635 +
636 +static struct file_operations dump_fops = {
637 +       .owner  = THIS_MODULE,
638 +       .ioctl  = dump_ioctl,
639 +};
640 +
641 +static struct miscdevice dump_miscdev = {
642 +       .minor  = CRASH_DUMP_MINOR,
643 +       .name   = "dump",
644 +       .fops   = &dump_fops,
645 +};
646 +MODULE_ALIAS_MISCDEV(CRASH_DUMP_MINOR);
647 +
648 +/* static variables                                                    */
649 +static int dump_okay = 0;              /* can we dump out to disk?     */
650 +static spinlock_t dump_lock = SPIN_LOCK_UNLOCKED;
651 +
652 +/* used for dump compressors */
653 +static struct list_head dump_compress_list = LIST_HEAD_INIT(dump_compress_list);
654 +
655 +/* list of registered dump targets */
656 +static struct list_head dump_target_list = LIST_HEAD_INIT(dump_target_list);
657 +
658 +/* lkcd info structure -- this is used by lcrash for basic system data     */
659 +struct __lkcdinfo lkcdinfo = {
660 +       .ptrsz          = (sizeof(void *) * 8),
661 +#if defined(__LITTLE_ENDIAN) 
662 +       .byte_order     = __LITTLE_ENDIAN,
663 +#else
664 +       .byte_order     = __BIG_ENDIAN,
665 +#endif
666 +       .page_shift     = PAGE_SHIFT,
667 +       .page_size      = PAGE_SIZE,
668 +       .page_mask      = PAGE_MASK,
669 +       .page_offset    = PAGE_OFFSET,
670 +};
671 +
672 +/*
673 + * -----------------------------------------------------------------------
674 + *            / P R O C   T U N A B L E   F U N C T I O N S
675 + * -----------------------------------------------------------------------
676 + */
677 +
678 +static int proc_dump_device(ctl_table *ctl, int write, struct file *f,
679 +                           void __user *buffer, size_t *lenp, loff_t *ppos);
680 +
681 +static int proc_doulonghex(ctl_table *ctl, int write, struct file *f,
682 +                           void __user *buffer, size_t *lenp, loff_t *ppos);
683 +/*
684 + * sysctl-tuning infrastructure.
685 + */
686 +static ctl_table dump_table[] = {
687 +       { .ctl_name = CTL_DUMP_LEVEL,
688 +         .procname = DUMP_LEVEL_NAME, 
689 +         .data = &dump_config.level,    
690 +         .maxlen = sizeof(int),
691 +         .mode = 0444,
692 +         .proc_handler = proc_doulonghex, },
693 +
694 +       { .ctl_name = CTL_DUMP_FLAGS,
695 +         .procname = DUMP_FLAGS_NAME,
696 +         .data = &dump_config.flags,   
697 +         .maxlen = sizeof(int),
698 +         .mode = 0444,
699 +         .proc_handler = proc_doulonghex, },
700 +
701 +       { .ctl_name = CTL_DUMP_COMPRESS,
702 +         .procname = DUMP_COMPRESS_NAME,
703 +         .data = &dump_compress, /* FIXME */
704 +         .maxlen = sizeof(int),
705 +         .mode = 0444,
706 +         .proc_handler = proc_dointvec, },
707 +         
708 +       { .ctl_name = CTL_DUMP_DEVICE,
709 +         .procname = DUMP_DEVICE_NAME,
710 +         .mode = 0444,
711 +         .data = &dump_config.dump_device, /* FIXME */
712 +         .maxlen = sizeof(int),
713 +         .proc_handler = proc_dump_device },
714 +
715 +#ifdef CONFIG_CRASH_DUMP_MEMDEV
716 +       { .ctl_name = CTL_DUMP_ADDR,
717 +         .procname = DUMP_ADDR_NAME,
718 +         .mode = 0444,
719 +         .data = &dump_config.dump_addr,
720 +         .maxlen = sizeof(unsigned long),
721 +         .proc_handler = proc_doulonghex },
722 +#endif
723 +
724 +       { 0, }
725 +};
726 +
727 +static ctl_table dump_root[] = {
728 +       { .ctl_name = KERN_DUMP,
729 +         .procname = "dump",
730 +         .mode = 0555, 
731 +         .child = dump_table },
732 +       { 0, }
733 +};
734 +
735 +static ctl_table kernel_root[] = {
736 +       { .ctl_name = CTL_KERN,
737 +         .procname = "kernel",
738 +         .mode = 0555,
739 +         .child = dump_root, },
740 +       { 0, }
741 +};
742 +
743 +static struct ctl_table_header *sysctl_header;
744 +
745 +/*
746 + * -----------------------------------------------------------------------
747 + *              C O M P R E S S I O N   F U N C T I O N S
748 + * -----------------------------------------------------------------------
749 + */
750 +
751 +/*
752 + * Name: dump_compress_none()
753 + * Func: Don't do any compression, period.
754 + */
755 +static u32
756 +dump_compress_none(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
757 +               unsigned long loc)
758 +{
759 +       /* just return the old size */
760 +       return oldsize;
761 +}
762 +
763 +
764 +/*
765 + * Name: dump_execute()
766 + * Func: Execute the dumping process.  This makes sure all the appropriate
767 + *       fields are updated correctly, and calls dump_execute_memdump(),
768 + *       which does the real work.
769 + */
770 +void
771 +dump_execute(const char *panic_str, const struct pt_regs *regs)
772 +{
773 +       int state = -1;
774 +       unsigned long flags;
775 +
776 +       /* make sure we can dump */
777 +       if (!dump_okay) {
778 +               pr_info("LKCD not yet configured, can't take dump now\n");
779 +               return;
780 +       }
781 +
782 +       /* Exclude multiple dumps at the same time,
783 +        * and disable interrupts,  some drivers may re-enable
784 +        * interrupts in with silence()
785 +        *
786 +        * Try and acquire spin lock. If successful, leave preempt
787 +        * and interrupts disabled.  See spin_lock_irqsave in spinlock.h
788 +        */
789 +       local_irq_save(flags);
790 +       if (!spin_trylock(&dump_lock)) {
791 +               local_irq_restore(flags);
792 +               pr_info("LKCD dump already in progress\n");
793 +               return;
794 +       }
795 +
796 +       /* What state are interrupts really in? */
797 +       if (in_interrupt()){ 
798 +               if(in_irq())
799 +                   printk(KERN_ALERT "Dumping from interrupt handler!\n");
800 +               else 
801 +                   printk(KERN_ALERT "Dumping from bottom half!\n");
802 +
803 +               __dump_clean_irq_state();
804 +       }
805 +
806 +
807 +       /* Bring system into the strictest level of quiescing for min drift 
808 +        * dump drivers can soften this as required in dev->ops->silence() 
809 +        */
810 +       dump_oncpu = smp_processor_id() + 1;
811 +       dump_silence_level = DUMP_HARD_SPIN_CPUS; 
812 +
813 +       state = dump_generic_execute(panic_str, regs);
814 +       
815 +       dump_oncpu = 0;
816 +       spin_unlock_irqrestore(&dump_lock, flags);
817 +
818 +       if (state < 0) {
819 +               printk("Dump Incomplete or failed!\n");
820 +       } else {
821 +               printk("Dump Complete; %d dump pages saved.\n", 
822 +                      dump_header.dh_num_dump_pages);
823 +       }
824 +}
825 +
826 +/*
827 + * Name: dump_register_compression()
828 + * Func: Register a dump compression mechanism.
829 + */
830 +void
831 +dump_register_compression(struct __dump_compress *item)
832 +{
833 +       if (item)
834 +               list_add(&(item->list), &dump_compress_list);
835 +}
836 +
837 +/*
838 + * Name: dump_unregister_compression()
839 + * Func: Remove a dump compression mechanism, and re-assign the dump
840 + *       compression pointer if necessary.
841 + */
842 +void
843 +dump_unregister_compression(int compression_type)
844 +{
845 +       struct list_head *tmp;
846 +       struct __dump_compress *dc;
847 +
848 +       /* let's make sure our list is valid */
849 +       if (compression_type != DUMP_COMPRESS_NONE) {
850 +               list_for_each(tmp, &dump_compress_list) {
851 +                       dc = list_entry(tmp, struct __dump_compress, list);
852 +                       if (dc->compress_type == compression_type) {
853 +                               list_del(&(dc->list));
854 +                               break;
855 +                       }
856 +               }
857 +       }
858 +}
859 +
860 +/*
861 + * Name: dump_compress_init()
862 + * Func: Initialize (or re-initialize) compression scheme.
863 + */
864 +static int
865 +dump_compress_init(int compression_type)
866 +{
867 +       struct list_head *tmp;
868 +       struct __dump_compress *dc;
869 +
870 +       /* try to remove the compression item */
871 +       list_for_each(tmp, &dump_compress_list) {
872 +               dc = list_entry(tmp, struct __dump_compress, list);
873 +               if (dc->compress_type == compression_type) {
874 +                       dump_config.dumper->compress = dc;
875 +                       dump_compress = compression_type;
876 +                       pr_debug("Dump Compress %s\n", dc->compress_name);
877 +                       return 0;
878 +               }
879 +       }
880 +
881 +       /* 
882 +        * nothing on the list -- return ENODATA to indicate an error 
883 +        *
884 +        * NB: 
885 +        *      EAGAIN: reports "Resource temporarily unavailable" which
886 +        *              isn't very enlightening.
887 +        */
888 +       printk("compression_type:%d not found\n", compression_type);
889 +
890 +       return -ENODATA;
891 +}
892 +
893 +static int
894 +dumper_setup(unsigned long flags, unsigned long devid)
895 +{
896 +       int ret = 0;
897 +
898 +       /* unconfigure old dumper if it exists */
899 +       dump_okay = 0;
900 +       if (dump_config.dumper) {
901 +               pr_debug("Unconfiguring current dumper\n");
902 +               dump_unconfigure();
903 +       }
904 +       /* set up new dumper */
905 +       if (dump_config.flags & DUMP_FLAGS_SOFTBOOT) {
906 +               printk("Configuring softboot based dump \n");
907 +#ifdef CONFIG_CRASH_DUMP_MEMDEV
908 +               dump_config.dumper = &dumper_stage1; 
909 +#else
910 +               printk("Requires CONFIG_CRASHDUMP_MEMDEV. Can't proceed.\n");
911 +               return -1;
912 +#endif
913 +       } else {
914 +               dump_config.dumper = &dumper_singlestage;
915 +       }       
916 +       dump_config.dumper->dev = dump_dev;
917 +
918 +       ret = dump_configure(devid);
919 +       if (!ret) {
920 +               dump_okay = 1;
921 +               pr_debug("%s dumper set up for dev 0x%lx\n", 
922 +                       dump_config.dumper->name, devid);
923 +               dump_config.dump_device = devid;
924 +       } else {
925 +               printk("%s dumper set up failed for dev 0x%lx\n", 
926 +                      dump_config.dumper->name, devid);
927 +               dump_config.dumper = NULL;
928 +       }
929 +       return ret;
930 +}
931 +
932 +static int
933 +dump_target_init(int target)
934 +{
935 +       char type[20];
936 +       struct list_head *tmp;
937 +       struct dump_dev *dev;
938 +       
939 +       switch (target) {
940 +               case DUMP_FLAGS_DISKDUMP:
941 +                       strcpy(type, "blockdev"); break;
942 +               case DUMP_FLAGS_NETDUMP:
943 +                       strcpy(type, "networkdev"); break;
944 +               default:
945 +                       return -1;
946 +       }
947 +
948 +       /*
949 +        * This is a bit stupid, generating strings from flag
950 +        * and doing strcmp. This is done because 'struct dump_dev'
951 +        * has string 'type_name' and not interger 'type'.
952 +        */
953 +       list_for_each(tmp, &dump_target_list) {
954 +               dev = list_entry(tmp, struct dump_dev, list);
955 +               if (strcmp(type, dev->type_name) == 0) {
956 +                       dump_dev = dev;
957 +                       return 0;
958 +               }
959 +       }
960 +       return -1;
961 +}
962 +
963 +/*
964 + * Name: dump_ioctl()
965 + * Func: Allow all dump tunables through a standard ioctl() mechanism.
966 + *       This is far better than before, where we'd go through /proc,
967 + *       because now this will work for multiple OS and architectures.
968 + */
969 +static int
970 +dump_ioctl(struct inode *i, struct file *f, unsigned int cmd, unsigned long arg)
971 +{
972 +       /* check capabilities */
973 +       if (!capable(CAP_SYS_ADMIN))
974 +               return -EPERM;
975 +
976 +       if (!dump_config.dumper && cmd == DIOSDUMPCOMPRESS)
977 +               /* dump device must be configured first */
978 +               return -ENODEV;
979 +
980 +       /*
981 +        * This is the main mechanism for controlling get/set data
982 +        * for various dump device parameters.  The real trick here
983 +        * is setting the dump device (DIOSDUMPDEV).  That's what
984 +        * triggers everything else.
985 +        */
986 +       switch (cmd) {
987 +       case DIOSDUMPDEV:       /* set dump_device */
988 +               pr_debug("Configuring dump device\n"); 
989 +               if (!(f->f_flags & O_RDWR))
990 +                       return -EPERM;
991 +
992 +               __dump_open();
993 +               return dumper_setup(dump_config.flags, arg);
994 +
995 +               
996 +       case DIOGDUMPDEV:       /* get dump_device */
997 +               return put_user((long)dump_config.dump_device, (long *)arg);
998 +
999 +       case DIOSDUMPLEVEL:     /* set dump_level */
1000 +               if (!(f->f_flags & O_RDWR))
1001 +                       return -EPERM;
1002 +
1003 +               /* make sure we have a positive value */
1004 +               if (arg < 0)
1005 +                       return -EINVAL;
1006 +
1007 +               /* Fixme: clean this up */
1008 +               dump_config.level = 0;
1009 +               switch ((int)arg) {
1010 +                       case DUMP_LEVEL_ALL:
1011 +                       case DUMP_LEVEL_ALL_RAM:
1012 +                               dump_config.level |= DUMP_MASK_UNUSED;
1013 +                       case DUMP_LEVEL_USED:
1014 +                               dump_config.level |= DUMP_MASK_USED;
1015 +                       case DUMP_LEVEL_KERN:
1016 +                               dump_config.level |= DUMP_MASK_KERN;
1017 +                       case DUMP_LEVEL_HEADER:
1018 +                               dump_config.level |= DUMP_MASK_HEADER;
1019 +                       case DUMP_LEVEL_NONE:
1020 +                               break;
1021 +                       default:
1022 +                               return (-EINVAL);
1023 +                       }
1024 +               pr_debug("Dump Level 0x%lx\n", dump_config.level);
1025 +               break;
1026 +
1027 +       case DIOGDUMPLEVEL:     /* get dump_level */
1028 +               /* fixme: handle conversion */
1029 +               return put_user((long)dump_config.level, (long *)arg);
1030 +
1031 +               
1032 +       case DIOSDUMPFLAGS:     /* set dump_flags */
1033 +               /* check flags */
1034 +               if (!(f->f_flags & O_RDWR))
1035 +                       return -EPERM;
1036 +
1037 +               /* make sure we have a positive value */
1038 +               if (arg < 0)
1039 +                       return -EINVAL;
1040 +                       
1041 +               if (dump_target_init(arg & DUMP_FLAGS_TARGETMASK) < 0)
1042 +                       return -EINVAL; /* return proper error */
1043 +
1044 +               dump_config.flags = arg;
1045 +               
1046 +               pr_debug("Dump Flags 0x%lx\n", dump_config.flags);
1047 +               break;
1048 +               
1049 +       case DIOGDUMPFLAGS:     /* get dump_flags */
1050 +               return put_user((long)dump_config.flags, (long *)arg);
1051 +
1052 +       case DIOSDUMPCOMPRESS:  /* set the dump_compress status */
1053 +               if (!(f->f_flags & O_RDWR))
1054 +                       return -EPERM;
1055 +
1056 +               return dump_compress_init((int)arg);
1057 +
1058 +       case DIOGDUMPCOMPRESS:  /* get the dump_compress status */
1059 +               return put_user((long)(dump_config.dumper ? 
1060 +                       dump_config.dumper->compress->compress_type : 0), 
1061 +                       (long *)arg);
1062 +       case DIOGDUMPOKAY: /* check if dump is configured */
1063 +               return put_user((long)dump_okay, (long *)arg);
1064 +       
1065 +       case DIOSDUMPTAKE: /* Trigger a manual dump */
1066 +               /* Do not proceed if lkcd not yet configured */
1067 +               if(!dump_okay) {
1068 +                       printk("LKCD not yet configured. Cannot take manual dump\n");
1069 +                       return -ENODEV;
1070 +               }
1071 +
1072 +               /* Take the dump */
1073 +               return  manual_handle_crashdump();
1074 +                       
1075 +       default:
1076 +               /* 
1077 +                * these are network dump specific ioctls, let the
1078 +                * module handle them.
1079 +                */
1080 +               return dump_dev_ioctl(cmd, arg);
1081 +       }
1082 +       return 0;
1083 +}
1084 +
1085 +/*
1086 + * Handle special cases for dump_device 
1087 + * changing dump device requires doing an opening the device
1088 + */
1089 +static int 
1090 +proc_dump_device(ctl_table *ctl, int write, struct file *f,
1091 +                void __user *buffer, size_t *lenp, loff_t *ppos)
1092 +{
1093 +       int *valp = ctl->data;
1094 +       int oval = *valp;
1095 +       int ret = -EPERM;
1096 +
1097 +       /* same permission checks as ioctl */
1098 +       if (capable(CAP_SYS_ADMIN)) {
1099 +               ret = proc_doulonghex(ctl, write, f, buffer, lenp, ppos);
1100 +               if (ret == 0 && write && *valp != oval) {
1101 +                       /* need to restore old value to close properly */
1102 +                       dump_config.dump_device = (dev_t) oval;
1103 +                       __dump_open();
1104 +                       ret = dumper_setup(dump_config.flags, (dev_t) *valp);
1105 +               }
1106 +       }
1107 +
1108 +       return ret;
1109 +}
1110 +
1111 +/* All for the want of a proc_do_xxx routine which prints values in hex */
1112 +/* Write is not implemented correctly, so mode is set to 0444 above. */
1113 +static int 
1114 +proc_doulonghex(ctl_table *ctl, int write, struct file *f,
1115 +                void __user *buffer, size_t *lenp, loff_t *ppos)
1116 +{
1117 +#define TMPBUFLEN 21
1118 +       unsigned long *i;
1119 +       size_t len, left;
1120 +       char buf[TMPBUFLEN];
1121 +
1122 +       if (!ctl->data || !ctl->maxlen || !*lenp || (*ppos && !write)) {
1123 +               *lenp = 0;
1124 +               return 0;
1125 +       }
1126 +       
1127 +       i = (unsigned long *) ctl->data;
1128 +       left = *lenp;
1129 +       
1130 +       sprintf(buf, "0x%lx\n", (*i));
1131 +       len = strlen(buf);
1132 +       if (len > left)
1133 +               len = left;
1134 +       if(copy_to_user(buffer, buf, len))
1135 +               return -EFAULT;
1136 +       
1137 +       left -= len;
1138 +       *lenp -= left;
1139 +       *ppos += *lenp;
1140 +       return 0;
1141 +}
1142 +
1143 +/*
1144 + * -----------------------------------------------------------------------
1145 + *                     I N I T   F U N C T I O N S
1146 + * -----------------------------------------------------------------------
1147 + */
1148 +
1149 +#ifdef CONFIG_COMPAT
1150 +static int dw_long(unsigned int fd, unsigned int cmd, unsigned long arg,
1151 +                struct file *f)
1152 +{
1153 +        mm_segment_t old_fs = get_fs();
1154 +        int err;
1155 +        unsigned long val;
1156 +
1157 +        set_fs (KERNEL_DS);
1158 +        err = sys_ioctl(fd, cmd, (u64)&val);
1159 +        set_fs (old_fs);
1160 +        if (!err && put_user((unsigned int) val, (u32 *)arg))
1161 +               return -EFAULT;
1162 +        return err;
1163 +}
1164 +#endif
1165 +
1166 +/*
1167 + * These register and unregister routines are exported for modules
1168 + * to register their dump drivers (like block, net etc)
1169 + */
1170 +int
1171 +dump_register_device(struct dump_dev *ddev)
1172 +{
1173 +       struct list_head *tmp;
1174 +       struct dump_dev *dev;
1175 +
1176 +       list_for_each(tmp, &dump_target_list) {
1177 +               dev = list_entry(tmp, struct dump_dev, list);
1178 +               if (strcmp(ddev->type_name, dev->type_name) == 0) {
1179 +                       printk("Target type %s already registered\n",
1180 +                                       dev->type_name);
1181 +                       return -1; /* return proper error */
1182 +               }
1183 +       }
1184 +       list_add(&(ddev->list), &dump_target_list);
1185 +       
1186 +       return 0;
1187 +}
1188 +
1189 +void
1190 +dump_unregister_device(struct dump_dev *ddev)
1191 +{
1192 +       list_del(&(ddev->list));
1193 +       if (ddev != dump_dev)
1194 +               return;
1195 +
1196 +       dump_okay = 0;
1197 +
1198 +       if (dump_config.dumper)
1199 +               dump_unconfigure();
1200 +
1201 +       dump_config.flags &= ~DUMP_FLAGS_TARGETMASK;
1202 +       dump_okay = 0;
1203 +       dump_dev = NULL;
1204 +       dump_config.dumper = NULL;
1205 +}
1206 +
1207 +static int panic_event(struct notifier_block *this, unsigned long event,
1208 +                      void *ptr)
1209 +{
1210 +#ifdef CONFIG_ARM
1211 +       get_current_general_regs(&all_regs);
1212 +       get_current_cp14_regs(&all_regs);
1213 +       get_current_cp15_regs(&all_regs);
1214 +       dump_execute((const char *)ptr, &all_regs);
1215 +#else
1216 +       struct pt_regs regs;
1217 +       
1218 +       get_current_regs(&regs);
1219 +       dump_execute((const char *)ptr, &regs);
1220 +#endif
1221 +       return 0;
1222 +}
1223 +
1224 +extern struct notifier_block *panic_notifier_list;
1225 +static int panic_event(struct notifier_block *, unsigned long, void *);
1226 +static struct notifier_block panic_block = {
1227 +       .notifier_call = panic_event,
1228 +};
1229 +
1230 +#ifdef CONFIG_MAGIC_SYSRQ
1231 +/* Sysrq handler */
1232 +static void sysrq_handle_crashdump(int key, struct pt_regs *pt_regs,
1233 +               struct tty_struct *tty) {
1234 +       if(!pt_regs) {
1235 +               struct pt_regs regs;
1236 +               get_current_regs(&regs);
1237 +               dump_execute("sysrq", &regs);
1238 +
1239 +       } else {
1240 +               dump_execute("sysrq", pt_regs);
1241 +       }
1242 +}
1243 +
1244 +static struct sysrq_key_op sysrq_crashdump_op = {
1245 +       .handler        =       sysrq_handle_crashdump,
1246 +       .help_msg       =       "Dump",
1247 +       .action_msg     =       "Starting crash dump",
1248 +};
1249 +#endif
1250 +
1251 +static inline void
1252 +dump_sysrq_register(void) 
1253 +{
1254 +#ifdef CONFIG_MAGIC_SYSRQ
1255 +       register_sysrq_key(DUMP_SYSRQ_KEY, &sysrq_crashdump_op);
1256 +#endif
1257 +}
1258 +
1259 +static inline void
1260 +dump_sysrq_unregister(void)
1261 +{
1262 +#ifdef CONFIG_MAGIC_SYSRQ
1263 +       unregister_sysrq_key(DUMP_SYSRQ_KEY, &sysrq_crashdump_op);
1264 +#endif
1265 +}
1266 +
1267 +/*
1268 + * Name: dump_init()
1269 + * Func: Initialize the dump process.  This will set up any architecture
1270 + *       dependent code.  The big key is we need the memory offsets before
1271 + *       the page table is initialized, because the base memory offset
1272 + *       is changed after paging_init() is called.
1273 + */
1274 +static int __init
1275 +dump_init(void)
1276 +{
1277 +       struct sysinfo info;
1278 +       int err;
1279 +
1280 +       /* try to create our dump device */
1281 +       err = misc_register(&dump_miscdev);
1282 +       if (err) {
1283 +               printk("cannot register dump character device!\n");
1284 +               return err;
1285 +       }
1286 +
1287 +       __dump_init((u64)PAGE_OFFSET);
1288 +
1289 +#ifdef CONFIG_COMPAT
1290 +       err = register_ioctl32_conversion(DIOSDUMPDEV, NULL);
1291 +       err |= register_ioctl32_conversion(DIOGDUMPDEV, NULL);
1292 +       err |= register_ioctl32_conversion(DIOSDUMPLEVEL, NULL);
1293 +       err |= register_ioctl32_conversion(DIOGDUMPLEVEL, dw_long);
1294 +       err |= register_ioctl32_conversion(DIOSDUMPFLAGS, NULL);
1295 +       err |= register_ioctl32_conversion(DIOGDUMPFLAGS, dw_long);
1296 +       err |= register_ioctl32_conversion(DIOSDUMPCOMPRESS, NULL);
1297 +       err |= register_ioctl32_conversion(DIOGDUMPCOMPRESS, dw_long);
1298 +       err |= register_ioctl32_conversion(DIOSTARGETIP, NULL);
1299 +       err |= register_ioctl32_conversion(DIOGTARGETIP, NULL);
1300 +       err |= register_ioctl32_conversion(DIOSTARGETPORT, NULL);
1301 +       err |= register_ioctl32_conversion(DIOGTARGETPORT, NULL);
1302 +       err |= register_ioctl32_conversion(DIOSSOURCEPORT, NULL);
1303 +       err |= register_ioctl32_conversion(DIOGSOURCEPORT, NULL);
1304 +       err |= register_ioctl32_conversion(DIOSETHADDR, NULL);
1305 +       err |= register_ioctl32_conversion(DIOGETHADDR, NULL);
1306 +       err |= register_ioctl32_conversion(DIOGDUMPOKAY, dw_long);
1307 +       err |= register_ioctl32_conversion(DIOSDUMPTAKE, NULL);
1308 +       if (err) {
1309 +                printk(KERN_ERR "LKCD: registering ioctl32 translations failed\
1310 +");
1311 +       }
1312 +#endif
1313 +       /* set the dump_compression_list structure up */
1314 +       dump_register_compression(&dump_none_compression);
1315 +
1316 +       /* grab the total memory size now (not if/when we crash) */
1317 +       si_meminfo(&info);
1318 +
1319 +       /* set the memory size */
1320 +       dump_header.dh_memory_size = (u64)info.totalram;
1321 +
1322 +       sysctl_header = register_sysctl_table(kernel_root, 0);
1323 +       dump_sysrq_register();
1324 +
1325 +       notifier_chain_register(&panic_notifier_list, &panic_block);
1326 +       dump_function_ptr = dump_execute;
1327 +
1328 +       pr_info("Crash dump driver initialized.\n");
1329 +       return 0;
1330 +}
1331 +
1332 +static void __exit
1333 +dump_cleanup(void)
1334 +{
1335 +       int err;
1336 +       dump_okay = 0;
1337 +
1338 +       if (dump_config.dumper)
1339 +               dump_unconfigure();
1340 +
1341 +       /* arch-specific cleanup routine */
1342 +       __dump_cleanup();
1343 +
1344 +#ifdef CONFIG_COMPAT
1345 +       err = unregister_ioctl32_conversion(DIOSDUMPDEV);
1346 +       err |= unregister_ioctl32_conversion(DIOGDUMPDEV);
1347 +       err |= unregister_ioctl32_conversion(DIOSDUMPLEVEL);
1348 +       err |= unregister_ioctl32_conversion(DIOGDUMPLEVEL);
1349 +       err |= unregister_ioctl32_conversion(DIOSDUMPFLAGS);
1350 +       err |= unregister_ioctl32_conversion(DIOGDUMPFLAGS);
1351 +       err |= unregister_ioctl32_conversion(DIOSDUMPCOMPRESS);
1352 +       err |= unregister_ioctl32_conversion(DIOGDUMPCOMPRESS);
1353 +       err |= unregister_ioctl32_conversion(DIOSTARGETIP);
1354 +       err |= unregister_ioctl32_conversion(DIOGTARGETIP);
1355 +       err |= unregister_ioctl32_conversion(DIOSTARGETPORT);
1356 +       err |= unregister_ioctl32_conversion(DIOGTARGETPORT);
1357 +       err |= unregister_ioctl32_conversion(DIOSSOURCEPORT);
1358 +       err |= unregister_ioctl32_conversion(DIOGSOURCEPORT);
1359 +       err |= unregister_ioctl32_conversion(DIOSETHADDR);
1360 +       err |= unregister_ioctl32_conversion(DIOGETHADDR);
1361 +       err |= unregister_ioctl32_conversion(DIOGDUMPOKAY);
1362 +       err |= unregister_ioctl32_conversion(DIOSDUMPTAKE);
1363 +       if (err) {
1364 +               printk(KERN_ERR "LKCD: Unregistering ioctl32 translations failed\n");
1365 +       }
1366 +#endif
1367 +
1368 +       /* ignore errors while unregistering -- since can't do anything */
1369 +       unregister_sysctl_table(sysctl_header);
1370 +       misc_deregister(&dump_miscdev);
1371 +       dump_sysrq_unregister();
1372 +       notifier_chain_unregister(&panic_notifier_list, &panic_block);
1373 +       dump_function_ptr = NULL;
1374 +}
1375 +
1376 +EXPORT_SYMBOL(dump_register_compression);
1377 +EXPORT_SYMBOL(dump_unregister_compression);
1378 +EXPORT_SYMBOL(dump_register_device);
1379 +EXPORT_SYMBOL(dump_unregister_device);
1380 +EXPORT_SYMBOL(dump_config);
1381 +EXPORT_SYMBOL(dump_silence_level);
1382 +
1383 +EXPORT_SYMBOL(__dump_irq_enable);
1384 +EXPORT_SYMBOL(__dump_irq_restore);
1385 +
1386 +MODULE_AUTHOR("Matt D. Robinson <yakker@sourceforge.net>");
1387 +MODULE_DESCRIPTION("Linux Kernel Crash Dump (LKCD) driver");
1388 +MODULE_LICENSE("GPL");
1389 +
1390 +module_init(dump_init);
1391 +module_exit(dump_cleanup);
1392 Index: linux-2.6.10/drivers/dump/dump_execute.c
1393 ===================================================================
1394 --- linux-2.6.10.orig/drivers/dump/dump_execute.c       2005-04-07 19:34:21.197950744 +0800
1395 +++ linux-2.6.10/drivers/dump/dump_execute.c    2005-04-07 18:13:56.898753920 +0800
1396 @@ -0,0 +1,144 @@
1397 +/*
1398 + * The file has the common/generic dump execution code 
1399 + *
1400 + * Started: Oct 2002 -  Suparna Bhattacharya <suparna@in.ibm.com>
1401 + *     Split and rewrote high level dump execute code to make use 
1402 + *     of dump method interfaces.
1403 + *
1404 + * Derived from original code in dump_base.c created by 
1405 + *     Matt Robinson <yakker@sourceforge.net>)
1406 + *     
1407 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
1408 + * Copyright (C) 2001 - 2002 Matt D. Robinson.  All rights reserved.
1409 + * Copyright (C) 2002 International Business Machines Corp. 
1410 + *
1411 + * Assumes dumper and dump config settings are in place
1412 + * (invokes corresponding dumper specific routines as applicable)
1413 + *
1414 + * This code is released under version 2 of the GNU GPL.
1415 + */
1416 +#include <linux/kernel.h>
1417 +#include <linux/notifier.h>
1418 +#include <linux/dump.h>
1419 +#include <linux/delay.h>
1420 +#include <linux/reboot.h>
1421 +#include "dump_methods.h"
1422 +
1423 +struct notifier_block *dump_notifier_list; /* dump started/ended callback */
1424 +
1425 +extern int panic_timeout;
1426 +
1427 +/* Dump progress indicator */
1428 +void 
1429 +dump_speedo(int i)
1430 +{
1431 +       static const char twiddle[4] =  { '|', '\\', '-', '/' };
1432 +       printk("%c\b", twiddle[i&3]);
1433 +}
1434 +
1435 +/* Make the device ready and write out the header */
1436 +int dump_begin(void)
1437 +{
1438 +       int err = 0;
1439 +
1440 +       /* dump_dev = dump_config.dumper->dev; */
1441 +       dumper_reset();
1442 +       if ((err = dump_dev_silence())) {
1443 +               /* quiesce failed, can't risk continuing */
1444 +               /* Todo/Future: switch to alternate dump scheme if possible */
1445 +               printk("dump silence dev failed ! error %d\n", err);
1446 +               return err;
1447 +       }
1448 +
1449 +       pr_debug("Writing dump header\n");
1450 +       if ((err = dump_update_header())) {
1451 +               printk("dump update header failed ! error %d\n", err);
1452 +               dump_dev_resume();
1453 +               return err;
1454 +       }
1455 +
1456 +       dump_config.dumper->curr_offset = DUMP_BUFFER_SIZE;
1457 +
1458 +       return 0;
1459 +}
1460 +
1461 +/* 
1462 + * Write the dump terminator, a final header update and let go of 
1463 + * exclusive use of the device for dump.
1464 + */
1465 +int dump_complete(void)
1466 +{
1467 +       int ret = 0;
1468 +
1469 +       if (dump_config.level != DUMP_LEVEL_HEADER) {
1470 +               if ((ret = dump_update_end_marker())) {
1471 +                       printk("dump update end marker error %d\n", ret);
1472 +               }
1473 +               if ((ret = dump_update_header())) {
1474 +                       printk("dump update header error %d\n", ret);
1475 +               }
1476 +       }
1477 +       ret = dump_dev_resume();
1478 +
1479 +       if ((panic_timeout > 0) && (!(dump_config.flags & (DUMP_FLAGS_SOFTBOOT | DUMP_FLAGS_NONDISRUPT)))) {
1480 +               mdelay(panic_timeout * 1000);
1481 +               machine_restart(NULL);
1482 +       }
1483 +
1484 +       return ret;
1485 +}
1486 +
1487 +/* Saves all dump data */
1488 +int dump_execute_savedump(void)
1489 +{
1490 +       int ret = 0, err = 0;
1491 +
1492 +       if ((ret = dump_begin()))  {
1493 +               return ret;
1494 +       }
1495 +
1496 +       if (dump_config.level != DUMP_LEVEL_HEADER) { 
1497 +               ret = dump_sequencer();
1498 +       }
1499 +       if ((err = dump_complete())) {
1500 +               printk("Dump complete failed. Error %d\n", err);
1501 +       }
1502 +
1503 +       return ret;
1504 +}
1505 +
1506 +extern void dump_calc_bootmap_pages(void);
1507 +
1508 +/* Does all the real work:  Capture and save state */
1509 +int dump_generic_execute(const char *panic_str, const struct pt_regs *regs)
1510 +{
1511 +       int ret = 0;
1512 +
1513 +#ifdef CONFIG_DISCONTIGMEM
1514 +        printk(KERN_INFO "Reconfiguring memory bank information....\n");
1515 +        printk(KERN_INFO "This may take a while....\n");
1516 +        dump_reconfigure_mbanks();
1517 +#endif
1518 +
1519 +       if ((ret = dump_configure_header(panic_str, regs))) {
1520 +               printk("dump config header failed ! error %d\n", ret);
1521 +               return ret;     
1522 +       }
1523 +
1524 +       dump_calc_bootmap_pages();
1525 +       /* tell interested parties that a dump is about to start */
1526 +       notifier_call_chain(&dump_notifier_list, DUMP_BEGIN, 
1527 +               &dump_config.dump_device);
1528 +
1529 +       if (dump_config.level != DUMP_LEVEL_NONE)
1530 +               ret = dump_execute_savedump();
1531 +
1532 +       pr_debug("dumped %ld blocks of %d bytes each\n", 
1533 +               dump_config.dumper->count, DUMP_BUFFER_SIZE);
1534 +       
1535 +       /* tell interested parties that a dump has completed */
1536 +       notifier_call_chain(&dump_notifier_list, DUMP_END, 
1537 +               &dump_config.dump_device);
1538 +
1539 +       return ret;
1540 +}
1541 Index: linux-2.6.10/drivers/dump/dump_x8664.c
1542 ===================================================================
1543 --- linux-2.6.10.orig/drivers/dump/dump_x8664.c 2005-04-07 19:34:21.197950744 +0800
1544 +++ linux-2.6.10/drivers/dump/dump_x8664.c      2005-04-07 18:13:56.901753464 +0800
1545 @@ -0,0 +1,362 @@
1546 +/*
1547 + * Architecture specific (x86-64) functions for Linux crash dumps.
1548 + *
1549 + * Created by: Matt Robinson (yakker@sgi.com)
1550 + *
1551 + * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
1552 + *
1553 + * 2.3 kernel modifications by: Matt D. Robinson (yakker@turbolinux.com)
1554 + * Copyright 2000 TurboLinux, Inc.  All rights reserved.
1555 + *
1556 + * x86-64 port Copyright 2002 Andi Kleen, SuSE Labs
1557 + * x86-64 port Sachin Sant ( sachinp@in.ibm.com )
1558 + * This code is released under version 2 of the GNU GPL.
1559 + */
1560 +
1561 +/*
1562 + * The hooks for dumping the kernel virtual memory to disk are in this
1563 + * file.  Any time a modification is made to the virtual memory mechanism,
1564 + * these routines must be changed to use the new mechanisms.
1565 + */
1566 +#include <linux/init.h>
1567 +#include <linux/types.h>
1568 +#include <linux/kernel.h>
1569 +#include <linux/smp.h>
1570 +#include <linux/fs.h>
1571 +#include <linux/vmalloc.h>
1572 +#include <linux/dump.h>
1573 +#include "dump_methods.h"
1574 +#include <linux/mm.h>
1575 +#include <linux/rcupdate.h>
1576 +#include <asm/processor.h>
1577 +#include <asm/hardirq.h>
1578 +#include <asm/kdebug.h>
1579 +#include <asm/uaccess.h>
1580 +#include <asm/nmi.h>
1581 +#include <asm/kdebug.h>
1582 +
1583 +static __s32   saved_irq_count; /* saved preempt_count() flag */
1584 +
1585 +void (*dump_trace_ptr)(struct pt_regs *);
1586 +
1587 +static int alloc_dha_stack(void)
1588 +{
1589 +       int i;
1590 +       void *ptr;
1591 +       
1592 +       if (dump_header_asm.dha_stack[0])
1593 +               return 0;
1594 +
1595 +               ptr = vmalloc(THREAD_SIZE * num_online_cpus());
1596 +       if (!ptr) {
1597 +               printk("vmalloc for dha_stacks failed\n");
1598 +               return -ENOMEM;
1599 +       }
1600 +
1601 +       for (i = 0; i < num_online_cpus(); i++) {
1602 +               dump_header_asm.dha_stack[i] = 
1603 +                       (uint64_t)((unsigned long)ptr + (i * THREAD_SIZE));
1604 +       }
1605 +       return 0;
1606 +}
1607 +
1608 +static int free_dha_stack(void) 
1609 +{
1610 +       if (dump_header_asm.dha_stack[0]) {
1611 +               vfree((void *)dump_header_asm.dha_stack[0]);    
1612 +               dump_header_asm.dha_stack[0] = 0;
1613 +       }       
1614 +       return 0;
1615 +}
1616 +
1617 +void
1618 +__dump_save_regs(struct pt_regs* dest_regs, const struct pt_regs* regs)
1619 +{
1620 +       if (regs)
1621 +               memcpy(dest_regs, regs, sizeof(struct pt_regs));
1622 +}
1623 +
1624 +void
1625 +__dump_save_context(int cpu, const struct pt_regs *regs, 
1626 +       struct task_struct *tsk)
1627 +{
1628 +       dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
1629 +       __dump_save_regs(&dump_header_asm.dha_smp_regs[cpu], regs);
1630 +
1631 +       /* take a snapshot of the stack */
1632 +       /* doing this enables us to tolerate slight drifts on this cpu */
1633 +
1634 +       if (dump_header_asm.dha_stack[cpu]) {
1635 +               memcpy((void *)dump_header_asm.dha_stack[cpu],
1636 +                               STACK_START_POSITION(tsk),
1637 +                               THREAD_SIZE);
1638 +       }
1639 +       dump_header_asm.dha_stack_ptr[cpu] = (unsigned long)(tsk->thread_info);
1640 +}
1641 +
1642 +#ifdef CONFIG_SMP
1643 +extern cpumask_t irq_affinity[];
1644 +extern irq_desc_t irq_desc[];
1645 +extern void dump_send_ipi(void);
1646 +static int dump_expect_ipi[NR_CPUS];
1647 +static atomic_t waiting_for_dump_ipi;
1648 +static unsigned long saved_affinity[NR_IRQS];
1649 +
1650 +extern void stop_this_cpu(void *);
1651 +
1652 +static int
1653 +dump_nmi_callback(struct pt_regs *regs, int cpu) 
1654 +{
1655 +       if (!dump_expect_ipi[cpu]) {
1656 +               return 0;
1657 +       }
1658 +       
1659 +       dump_expect_ipi[cpu] = 0;
1660 +
1661 +       dump_save_this_cpu(regs);
1662 +       atomic_dec(&waiting_for_dump_ipi);
1663 +
1664 +level_changed:
1665 +
1666 +       switch (dump_silence_level) {
1667 +        case DUMP_HARD_SPIN_CPUS:       /* Spin until dump is complete */
1668 +                while (dump_oncpu) {
1669 +                        barrier();      /* paranoia */
1670 +                        if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
1671 +                                goto level_changed;
1672 +
1673 +                        cpu_relax();    /* kill time nicely */
1674 +                }
1675 +                break;
1676 +
1677 +        case DUMP_HALT_CPUS:            /* Execute halt */
1678 +                stop_this_cpu(NULL);
1679 +                break;
1680 +
1681 +        case DUMP_SOFT_SPIN_CPUS:
1682 +                /* Mark the task so it spins in schedule */
1683 +                set_tsk_thread_flag(current, TIF_NEED_RESCHED);
1684 +                break;
1685 +        }
1686 +
1687 +       return 1;
1688 +}
1689 +
1690 +/* save registers on other processors */
1691 +void 
1692 +__dump_save_other_cpus(void) 
1693 +{
1694 +       int i, cpu = smp_processor_id();
1695 +       int other_cpus = num_online_cpus() - 1;
1696 +
1697 +       if (other_cpus > 0) {
1698 +               atomic_set(&waiting_for_dump_ipi, other_cpus);
1699 +
1700 +               for (i = 0; i < NR_CPUS; i++)
1701 +                       dump_expect_ipi[i] = (i != cpu && cpu_online(i));
1702 +               
1703 +               set_nmi_callback(dump_nmi_callback);
1704 +               wmb();
1705 +
1706 +               dump_send_ipi();
1707 +
1708 +               /* may be we dont need to wait for NMI to be processed. 
1709 +                  just write out the header at the end of dumping, if
1710 +                  this IPI is not processed untill then, there probably
1711 +                  is a problem and we just fail to capture state of 
1712 +                  other cpus. */
1713 +               while(atomic_read(&waiting_for_dump_ipi) > 0)
1714 +                       cpu_relax();
1715 +
1716 +               unset_nmi_callback();
1717 +       }
1718 +       return;
1719 +}
1720 +
1721 +/*
1722 + * Routine to save the old irq affinities and change affinities of all irqs to
1723 + * the dumping cpu.
1724 + */
1725 +static void
1726 +set_irq_affinity(void)
1727 +{
1728 +       int i;
1729 +       cpumask_t cpu = CPU_MASK_NONE;
1730 +
1731 +       cpu_set(smp_processor_id(), cpu); 
1732 +       memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
1733 +       for (i = 0; i < NR_IRQS; i++) {
1734 +               if (irq_desc[i].handler == NULL)
1735 +                       continue;
1736 +               irq_affinity[i] = cpu;
1737 +               if (irq_desc[i].handler->set_affinity != NULL)
1738 +                       irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
1739 +       }
1740 +}
1741 +
1742 +/*
1743 + * Restore old irq affinities.
1744 + */
1745 +static void
1746 +reset_irq_affinity(void)
1747 +{
1748 +       int i;
1749 +
1750 +       memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
1751 +       for (i = 0; i < NR_IRQS; i++) {
1752 +               if (irq_desc[i].handler == NULL)
1753 +                       continue;
1754 +               if (irq_desc[i].handler->set_affinity != NULL)
1755 +                       irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
1756 +       }
1757 +}
1758 +
1759 +#else /* !CONFIG_SMP */
1760 +#define set_irq_affinity()     do { } while (0)
1761 +#define reset_irq_affinity()   do { } while (0)
1762 +#define save_other_cpu_states() do { } while (0)
1763 +#endif /* !CONFIG_SMP */
1764 +
1765 +static inline void
1766 +irq_bh_save(void)
1767 +{
1768 +       saved_irq_count = irq_count();
1769 +       preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
1770 +}
1771 +
1772 +static inline void
1773 +irq_bh_restore(void)
1774 +{
1775 +       preempt_count() |= saved_irq_count;
1776 +}
1777 +
1778 +/*
1779 + * Name: __dump_irq_enable
1780 + * Func: Reset system so interrupts are enabled.
1781 + *       This is used for dump methods that require interrupts
1782 + *       Eventually, all methods will have interrupts disabled
1783 + *       and this code can be removed.
1784 + *
1785 + *     Change irq affinities
1786 + *     Re-enable interrupts
1787 + */
1788 +int
1789 +__dump_irq_enable(void)
1790 +{
1791 +        set_irq_affinity();
1792 +        irq_bh_save();
1793 +        local_irq_enable();
1794 +       return 0;
1795 +}
1796 +
1797 +/*
1798 + * Name: __dump_irq_restore
1799 + * Func: Resume the system state in an architecture-speeific way.
1800 + *
1801 + */
1802 +void
1803 +__dump_irq_restore(void)
1804 +{
1805 +        local_irq_disable();
1806 +        reset_irq_affinity();
1807 +        irq_bh_restore();
1808 +}
1809 +
1810 +/*
1811 + * Name: __dump_configure_header()
1812 + * Func: Configure the dump header with all proper values.
1813 + */
1814 +int
1815 +__dump_configure_header(const struct pt_regs *regs)
1816 +{
1817 +       /* Dummy function - return */
1818 +       return (0);
1819 +}
1820 +
1821 +static int notify(struct notifier_block *nb, unsigned long code, void *data)
1822 +{
1823 +       if (code == DIE_NMI_IPI && dump_oncpu)
1824 +               return NOTIFY_BAD; 
1825 +       return NOTIFY_DONE; 
1826 +} 
1827 +
1828 +static struct notifier_block dump_notifier = { 
1829 +       .notifier_call = notify,        
1830 +}; 
1831 +
1832 +/*
1833 + * Name: __dump_init()
1834 + * Func: Initialize the dumping routine process.
1835 + */
1836 +void
1837 +__dump_init(uint64_t local_memory_start)
1838 +{
1839 +       notifier_chain_register(&die_chain, &dump_notifier);
1840 +}
1841 +
1842 +/*
1843 + * Name: __dump_open()
1844 + * Func: Open the dump device (architecture specific).  This is in
1845 + *       case it's necessary in the future.
1846 + */
1847 +void
1848 +__dump_open(void)
1849 +{
1850 +       alloc_dha_stack();
1851 +       /* return */
1852 +       return;
1853 +}
1854 +
1855 +/*
1856 + * Name: __dump_cleanup()
1857 + * Func: Free any architecture specific data structures. This is called
1858 + *       when the dump module is being removed.
1859 + */
1860 +void
1861 +__dump_cleanup(void)
1862 +{
1863 +       free_dha_stack();
1864 +       notifier_chain_unregister(&die_chain, &dump_notifier);
1865 +       synchronize_kernel(); 
1866 +       return;
1867 +}
1868 +
1869 +extern int page_is_ram(unsigned long);
1870 +
1871 +/*
1872 + * Name: __dump_page_valid()
1873 + * Func: Check if page is valid to dump.
1874 + */
1875 +int
1876 +__dump_page_valid(unsigned long index)
1877 +{
1878 +       if (!pfn_valid(index))
1879 +               return 0;
1880 +
1881 +       return page_is_ram(index);
1882 +}
1883 +
1884 +/*
1885 + * Name: manual_handle_crashdump()
1886 + * Func: Interface for the lkcd dump command. Calls dump_execute()
1887 + */
1888 +int
1889 +manual_handle_crashdump(void) {
1890 +
1891 +        struct pt_regs regs;
1892 +
1893 +        get_current_regs(&regs);
1894 +        dump_execute("manual", &regs);
1895 +        return 0;
1896 +}
1897 +
1898 +/*
1899 + * Name: __dump_clean_irq_state()
1900 + * Func: Clean up from the previous IRQ handling state. Such as oops from 
1901 + *       interrupt handler or bottom half.
1902 + */
1903 +void
1904 +__dump_clean_irq_state(void)
1905 +{
1906 +    return;
1907 +}
1908 Index: linux-2.6.10/drivers/dump/dump_rle.c
1909 ===================================================================
1910 --- linux-2.6.10.orig/drivers/dump/dump_rle.c   2005-04-07 19:34:21.197950744 +0800
1911 +++ linux-2.6.10/drivers/dump/dump_rle.c        2005-04-07 18:13:56.897754072 +0800
1912 @@ -0,0 +1,176 @@
1913 +/*
1914 + * RLE Compression functions for kernel crash dumps.
1915 + *
1916 + * Created by: Matt Robinson (yakker@sourceforge.net)
1917 + * Copyright 2001 Matt D. Robinson.  All rights reserved.
1918 + *
1919 + * This code is released under version 2 of the GNU GPL.
1920 + */
1921 +
1922 +/* header files */
1923 +#include <linux/config.h>
1924 +#include <linux/module.h>
1925 +#include <linux/sched.h>
1926 +#include <linux/fs.h>
1927 +#include <linux/file.h>
1928 +#include <linux/init.h>
1929 +#include <linux/dump.h>
1930 +
1931 +/*
1932 + * Name: dump_compress_rle()
1933 + * Func: Compress a DUMP_PAGE_SIZE (hardware) page down to something more
1934 + *       reasonable, if possible.  This is the same routine we use in IRIX.
1935 + */
1936 +static u32
1937 +dump_compress_rle(const u8 *old, u32 oldsize, u8 *new, u32 newsize,
1938 +               unsigned long loc)
1939 +{
1940 +       u16 ri, wi, count = 0;
1941 +       u_char value = 0, cur_byte;
1942 +
1943 +       /*
1944 +        * If the block should happen to "compress" to larger than the
1945 +        * buffer size, allocate a larger one and change cur_buf_size.
1946 +        */
1947 +
1948 +       wi = ri = 0;
1949 +
1950 +       while (ri < oldsize) {
1951 +               if (!ri) {
1952 +                       cur_byte = value = old[ri];
1953 +                       count = 0;
1954 +               } else {
1955 +                       if (count == 255) {
1956 +                               if (wi + 3 > oldsize) {
1957 +                                       return oldsize;
1958 +                               }
1959 +                               new[wi++] = 0;
1960 +                               new[wi++] = count;
1961 +                               new[wi++] = value;
1962 +                               value = cur_byte = old[ri];
1963 +                               count = 0;
1964 +                       } else { 
1965 +                               if ((cur_byte = old[ri]) == value) {
1966 +                                       count++;
1967 +                               } else {
1968 +                                       if (count > 1) {
1969 +                                               if (wi + 3 > oldsize) {
1970 +                                                       return oldsize;
1971 +                                               }
1972 +                                               new[wi++] = 0;
1973 +                                               new[wi++] = count;
1974 +                                               new[wi++] = value;
1975 +                                       } else if (count == 1) {
1976 +                                               if (value == 0) {
1977 +                                                       if (wi + 3 > oldsize) {
1978 +                                                               return oldsize;
1979 +                                                       }
1980 +                                                       new[wi++] = 0;
1981 +                                                       new[wi++] = 1;
1982 +                                                       new[wi++] = 0;
1983 +                                               } else {
1984 +                                                       if (wi + 2 > oldsize) {
1985 +                                                               return oldsize;
1986 +                                                       }
1987 +                                                       new[wi++] = value;
1988 +                                                       new[wi++] = value;
1989 +                                               }
1990 +                                       } else { /* count == 0 */
1991 +                                               if (value == 0) {
1992 +                                                       if (wi + 2 > oldsize) {
1993 +                                                               return oldsize;
1994 +                                                       }
1995 +                                                       new[wi++] = value;
1996 +                                                       new[wi++] = value;
1997 +                                               } else {
1998 +                                                       if (wi + 1 > oldsize) {
1999 +                                                               return oldsize;
2000 +                                                       }
2001 +                                                       new[wi++] = value;
2002 +                                               }
2003 +                                       } /* if count > 1 */
2004 +
2005 +                                       value = cur_byte;
2006 +                                       count = 0;
2007 +
2008 +                               } /* if byte == value */
2009 +
2010 +                       } /* if count == 255 */
2011 +
2012 +               } /* if ri == 0 */
2013 +               ri++;
2014 +
2015 +       }
2016 +       if (count > 1) {
2017 +               if (wi + 3 > oldsize) {
2018 +                       return oldsize;
2019 +               }
2020 +               new[wi++] = 0;
2021 +               new[wi++] = count;
2022 +               new[wi++] = value;
2023 +       } else if (count == 1) {
2024 +               if (value == 0) {
2025 +                       if (wi + 3 > oldsize)
2026 +                               return oldsize;
2027 +                       new[wi++] = 0;
2028 +                       new[wi++] = 1;
2029 +                       new[wi++] = 0;
2030 +               } else {
2031 +                       if (wi + 2 > oldsize)
2032 +                               return oldsize;
2033 +                       new[wi++] = value;
2034 +                       new[wi++] = value;
2035 +               }
2036 +       } else { /* count == 0 */
2037 +               if (value == 0) {
2038 +                       if (wi + 2 > oldsize)
2039 +                               return oldsize;
2040 +                       new[wi++] = value;
2041 +                       new[wi++] = value;
2042 +               } else {
2043 +                       if (wi + 1 > oldsize)
2044 +                               return oldsize;
2045 +                       new[wi++] = value;
2046 +               }
2047 +       } /* if count > 1 */
2048 +
2049 +       value = cur_byte;
2050 +       count = 0;
2051 +       return wi;
2052 +}
2053 +
2054 +/* setup the rle compression functionality */
2055 +static struct __dump_compress dump_rle_compression = {
2056 +       .compress_type = DUMP_COMPRESS_RLE,
2057 +       .compress_func = dump_compress_rle,
2058 +       .compress_name = "RLE",
2059 +};
2060 +
2061 +/*
2062 + * Name: dump_compress_rle_init()
2063 + * Func: Initialize rle compression for dumping.
2064 + */
2065 +static int __init
2066 +dump_compress_rle_init(void)
2067 +{
2068 +       dump_register_compression(&dump_rle_compression);
2069 +       return 0;
2070 +}
2071 +
2072 +/*
2073 + * Name: dump_compress_rle_cleanup()
2074 + * Func: Remove rle compression for dumping.
2075 + */
2076 +static void __exit
2077 +dump_compress_rle_cleanup(void)
2078 +{
2079 +       dump_unregister_compression(DUMP_COMPRESS_RLE);
2080 +}
2081 +
2082 +/* module initialization */
2083 +module_init(dump_compress_rle_init);
2084 +module_exit(dump_compress_rle_cleanup);
2085 +
2086 +MODULE_LICENSE("GPL");
2087 +MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
2088 +MODULE_DESCRIPTION("RLE compression module for crash dump driver");
2089 Index: linux-2.6.10/drivers/dump/dump_overlay.c
2090 ===================================================================
2091 --- linux-2.6.10.orig/drivers/dump/dump_overlay.c       2005-04-07 19:34:21.197950744 +0800
2092 +++ linux-2.6.10/drivers/dump/dump_overlay.c    2005-04-07 18:13:56.905752856 +0800
2093 @@ -0,0 +1,890 @@
2094 +/*
2095 + * Two-stage soft-boot based dump scheme methods (memory overlay
2096 + * with post soft-boot writeout)
2097 + *
2098 + * Started: Oct 2002 -  Suparna Bhattacharya <suparna@in.ibm.com>
2099 + *
2100 + * This approach of saving the dump in memory and writing it 
2101 + * out after a softboot without clearing memory is derived from the 
2102 + * Mission Critical Linux dump implementation. Credits and a big
2103 + * thanks for letting the lkcd project make use of the excellent 
2104 + * piece of work and also for helping with clarifications and 
2105 + * tips along the way are due to:
2106 + *     Dave Winchell <winchell@mclx.com> (primary author of mcore)
2107 + *     and also to
2108 + *     Jeff Moyer <moyer@mclx.com>
2109 + *     Josh Huber <huber@mclx.com>
2110 + * 
2111 + * For those familiar with the mcore implementation, the key 
2112 + * differences/extensions here are in allowing entire memory to be 
2113 + * saved (in compressed form) through a careful ordering scheme 
2114 + * on both the way down as well on the way up after boot, the latter
2115 + * for supporting the LKCD notion of passes in which most critical 
2116 + * data is the first to be saved to the dump device. Also the post 
2117 + * boot writeout happens from within the kernel rather than driven 
2118 + * from userspace.
2119 + *
2120 + * The sequence is orchestrated through the abstraction of "dumpers",
2121 + * one for the first stage which then sets up the dumper for the next 
2122 + * stage, providing for a smooth and flexible reuse of the singlestage 
2123 + * dump scheme methods and a handle to pass dump device configuration 
2124 + * information across the soft boot. 
2125 + *
2126 + * Copyright (C) 2002 International Business Machines Corp. 
2127 + *
2128 + * This code is released under version 2 of the GNU GPL.
2129 + */
2130 +
2131 +/*
2132 + * Disruptive dumping using the second kernel soft-boot option
2133 + * for issuing dump i/o operates in 2 stages:
2134 + * 
2135 + * (1) - Saves the (compressed & formatted) dump in memory using a 
2136 + *       carefully ordered overlay scheme designed to capture the 
2137 + *       entire physical memory or selective portions depending on 
2138 + *       dump config settings, 
2139 + *     - Registers the stage 2 dumper and 
2140 + *     - Issues a soft reboot w/o clearing memory. 
2141 + *
2142 + *     The overlay scheme starts with a small bootstrap free area
2143 + *     and follows a reverse ordering of passes wherein it 
2144 + *     compresses and saves data starting with the least critical 
2145 + *     areas first, thus freeing up the corresponding pages to 
2146 + *     serve as destination for subsequent data to be saved, and
2147 + *     so on. With a good compression ratio, this makes it feasible
2148 + *     to capture an entire physical memory dump without significantly
2149 + *     reducing memory available during regular operation.
2150 + *
2151 + * (2) Post soft-reboot, runs through the saved memory dump and
2152 + *     writes it out to disk, this time around, taking care to
2153 + *     save the more critical data first (i.e. pages which figure 
2154 + *     in early passes for a regular dump). Finally issues a 
2155 + *     clean reboot.
2156 + *     
2157 + *     Since the data was saved in memory after selection/filtering
2158 + *     and formatted as per the chosen output dump format, at this 
2159 + *     stage the filter and format actions are just dummy (or
2160 + *     passthrough) actions, except for influence on ordering of
2161 + *     passes.
2162 + */
2163 +
2164 +#include <linux/types.h>
2165 +#include <linux/kernel.h>
2166 +#include <linux/highmem.h>
2167 +#include <linux/bootmem.h>
2168 +#include <linux/dump.h>
2169 +#ifdef CONFIG_KEXEC
2170 +#include <linux/delay.h>
2171 +#include <linux/reboot.h>
2172 +#include <linux/kexec.h>
2173 +#endif
2174 +#include "dump_methods.h"
2175 +
2176 +extern struct list_head dumper_list_head;
2177 +extern struct dump_memdev *dump_memdev;
2178 +extern struct dumper dumper_stage2;
2179 +struct dump_config_block *dump_saved_config = NULL;
2180 +extern struct dump_blockdev *dump_blockdev;
2181 +static struct dump_memdev *saved_dump_memdev = NULL;
2182 +static struct dumper *saved_dumper = NULL;
2183 +
2184 +#ifdef CONFIG_KEXEC
2185 +extern int panic_timeout;
2186 +#endif
2187 +
2188 +/* For testing 
2189 +extern void dump_display_map(struct dump_memdev *);
2190 +*/
2191 +
2192 +struct dumper *dumper_by_name(char *name)
2193 +{
2194 +#ifdef LATER
2195 +       struct dumper *dumper;
2196 +       list_for_each_entry(dumper, &dumper_list_head, dumper_list)
2197 +               if (!strncmp(dumper->name, name, 32))
2198 +                       return dumper;
2199 +
2200 +       /* not found */
2201 +       return NULL; 
2202 +#endif
2203 +       /* Temporary proof of concept */
2204 +       if (!strncmp(dumper_stage2.name, name, 32))
2205 +               return &dumper_stage2;
2206 +       else
2207 +               return NULL;
2208 +}
2209 +
2210 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
2211 +extern void dump_early_reserve_map(struct dump_memdev *);
2212 +
2213 +void crashdump_reserve(void)
2214 +{
2215 +       extern unsigned long crashdump_addr;
2216 +
2217 +       if (crashdump_addr == 0xdeadbeef) 
2218 +               return;
2219 +
2220 +       /* reserve dump config and saved dump pages */
2221 +       dump_saved_config = (struct dump_config_block *)crashdump_addr;
2222 +       /* magic verification */
2223 +       if (dump_saved_config->magic != DUMP_MAGIC_LIVE) {
2224 +               printk("Invalid dump magic. Ignoring dump\n");
2225 +               dump_saved_config = NULL;
2226 +               return;
2227 +       }
2228 +                       
2229 +       printk("Dump may be available from previous boot\n");
2230 +
2231 +#ifdef CONFIG_X86_64
2232 +       reserve_bootmem_node(NODE_DATA(0), 
2233 +               virt_to_phys((void *)crashdump_addr), 
2234 +               PAGE_ALIGN(sizeof(struct dump_config_block)));
2235 +#else
2236 +       reserve_bootmem(virt_to_phys((void *)crashdump_addr), 
2237 +               PAGE_ALIGN(sizeof(struct dump_config_block)));
2238 +#endif
2239 +       dump_early_reserve_map(&dump_saved_config->memdev);
2240 +
2241 +}
2242 +#endif
2243 +
2244 +/* 
2245 + * Loads the dump configuration from a memory block saved across soft-boot
2246 + * The ops vectors need fixing up as the corresp. routines may have 
2247 + * relocated in the new soft-booted kernel.
2248 + */
2249 +int dump_load_config(struct dump_config_block *config)
2250 +{
2251 +       struct dumper *dumper;
2252 +       struct dump_data_filter *filter_table, *filter;
2253 +       struct dump_dev *dev;
2254 +       int i;
2255 +
2256 +       if (config->magic != DUMP_MAGIC_LIVE)
2257 +               return -ENOENT; /* not a valid config */
2258 +
2259 +       /* initialize generic config data */
2260 +       memcpy(&dump_config, &config->config, sizeof(dump_config));
2261 +
2262 +       /* initialize dumper state */
2263 +       if (!(dumper = dumper_by_name(config->dumper.name)))  {
2264 +               printk("dumper name mismatch\n");
2265 +               return -ENOENT; /* dumper mismatch */
2266 +       }
2267 +       
2268 +       /* verify and fixup schema */
2269 +       if (strncmp(dumper->scheme->name, config->scheme.name, 32)) {
2270 +               printk("dumper scheme mismatch\n");
2271 +               return -ENOENT; /* mismatch */
2272 +       }
2273 +       config->scheme.ops = dumper->scheme->ops;
2274 +       config->dumper.scheme = &config->scheme;
2275 +       
2276 +       /* verify and fixup filter operations */
2277 +       filter_table = dumper->filter;
2278 +       for (i = 0, filter = config->filter_table; 
2279 +               ((i < MAX_PASSES) && filter_table[i].selector); 
2280 +               i++, filter++) {
2281 +               if (strncmp(filter_table[i].name, filter->name, 32)) {
2282 +                       printk("dump filter mismatch\n");
2283 +                       return -ENOENT; /* filter name mismatch */
2284 +               }
2285 +               filter->selector = filter_table[i].selector;
2286 +       }
2287 +       config->dumper.filter = config->filter_table;
2288 +
2289 +       /* fixup format */
2290 +       if (strncmp(dumper->fmt->name, config->fmt.name, 32)) {
2291 +               printk("dump format mismatch\n");
2292 +               return -ENOENT; /* mismatch */
2293 +       }
2294 +       config->fmt.ops = dumper->fmt->ops;
2295 +       config->dumper.fmt = &config->fmt;
2296 +
2297 +       /* fixup target device */
2298 +       dev = (struct dump_dev *)(&config->dev[0]);
2299 +       if (dumper->dev == NULL) {
2300 +               pr_debug("Vanilla dumper - assume default\n");
2301 +               if (dump_dev == NULL)
2302 +                       return -ENODEV;
2303 +               dumper->dev = dump_dev;
2304 +       }
2305 +
2306 +       if (strncmp(dumper->dev->type_name, dev->type_name, 32)) { 
2307 +               printk("dump dev type mismatch %s instead of %s\n",
2308 +                               dev->type_name, dumper->dev->type_name);
2309 +               return -ENOENT; /* mismatch */
2310 +       }
2311 +       dev->ops = dumper->dev->ops; 
2312 +       config->dumper.dev = dev;
2313 +       
2314 +       /* fixup memory device containing saved dump pages */
2315 +       /* assume statically init'ed dump_memdev */
2316 +       config->memdev.ddev.ops = dump_memdev->ddev.ops; 
2317 +       /* switch to memdev from prev boot */
2318 +       saved_dump_memdev = dump_memdev; /* remember current */
2319 +       dump_memdev = &config->memdev;
2320 +
2321 +       /* Make this the current primary dumper */
2322 +       dump_config.dumper = &config->dumper;
2323 +
2324 +       return 0;
2325 +}
2326 +
2327 +/* Saves the dump configuration in a memory block for use across a soft-boot */
2328 +int dump_save_config(struct dump_config_block *config)
2329 +{
2330 +       printk("saving dump config settings\n");
2331 +
2332 +       /* dump config settings */
2333 +       memcpy(&config->config, &dump_config, sizeof(dump_config));
2334 +
2335 +       /* dumper state */
2336 +       memcpy(&config->dumper, dump_config.dumper, sizeof(struct dumper));
2337 +       memcpy(&config->scheme, dump_config.dumper->scheme, 
2338 +               sizeof(struct dump_scheme));
2339 +       memcpy(&config->fmt, dump_config.dumper->fmt, sizeof(struct dump_fmt));
2340 +       memcpy(&config->dev[0], dump_config.dumper->dev, 
2341 +               sizeof(struct dump_anydev));
2342 +       memcpy(&config->filter_table, dump_config.dumper->filter, 
2343 +               sizeof(struct dump_data_filter)*MAX_PASSES);
2344 +
2345 +       /* handle to saved mem pages */
2346 +       memcpy(&config->memdev, dump_memdev, sizeof(struct dump_memdev));
2347 +
2348 +       config->magic = DUMP_MAGIC_LIVE;
2349 +       
2350 +       return 0;
2351 +}
2352 +
2353 +int dump_init_stage2(struct dump_config_block *saved_config)
2354 +{
2355 +       int err = 0;
2356 +
2357 +       pr_debug("dump_init_stage2\n");
2358 +       /* Check if dump from previous boot exists */
2359 +       if (saved_config) {
2360 +               printk("loading dumper from previous boot \n");
2361 +               /* load and configure dumper from previous boot */
2362 +               if ((err = dump_load_config(saved_config)))
2363 +                       return err;
2364 +
2365 +               if (!dump_oncpu) {
2366 +                       if ((err = dump_configure(dump_config.dump_device))) {
2367 +                               printk("Stage 2 dump configure failed\n");
2368 +                               return err;
2369 +                       }
2370 +               }
2371 +
2372 +               dumper_reset();
2373 +               dump_dev = dump_config.dumper->dev;
2374 +               /* write out the dump */
2375 +               err = dump_generic_execute(NULL, NULL);
2376 +               
2377 +               dump_saved_config = NULL;
2378 +
2379 +               if (!dump_oncpu) {
2380 +                       dump_unconfigure(); 
2381 +               }
2382 +               
2383 +               return err;
2384 +
2385 +       } else {
2386 +               /* no dump to write out */
2387 +               printk("no dumper from previous boot \n");
2388 +               return 0;
2389 +       }
2390 +}
2391 +
2392 +extern void dump_mem_markpages(struct dump_memdev *);
2393 +
2394 +int dump_switchover_stage(void)
2395 +{
2396 +       int ret = 0;
2397 +
2398 +       /* trigger stage 2 rightaway - in real life would be after soft-boot */
2399 +       /* dump_saved_config would be a boot param */
2400 +       saved_dump_memdev = dump_memdev;
2401 +       saved_dumper = dump_config.dumper;
2402 +       ret = dump_init_stage2(dump_saved_config);
2403 +       dump_memdev = saved_dump_memdev;
2404 +       dump_config.dumper = saved_dumper;
2405 +       return ret;
2406 +}
2407 +
2408 +int dump_activate_softboot(void) 
2409 +{
2410 +        int err = 0;
2411 +#ifdef CONFIG_KEXEC
2412 +        int num_cpus_online = 0;
2413 +        struct kimage *image;
2414 +#endif
2415 +
2416 +        /* temporary - switchover to writeout previously saved dump */
2417 +#ifndef CONFIG_KEXEC
2418 +        err = dump_switchover_stage(); /* non-disruptive case */
2419 +        if (dump_oncpu)
2420 +                       dump_config.dumper = &dumper_stage1; /* set things back */
2421 +
2422 +        return err;
2423 +#else
2424 +
2425 +        dump_silence_level = DUMP_HALT_CPUS;
2426 +        /* wait till we become the only cpu */
2427 +        /* maybe by checking for online cpus ? */
2428 +
2429 +        while((num_cpus_online = num_online_cpus()) > 1);
2430 +
2431 +        /* now call into kexec */
2432 +
2433 +        image = xchg(&kexec_image, 0);
2434 +        if (image) {
2435 +                       mdelay(panic_timeout*1000);
2436 +                               machine_kexec(image);
2437 +                               }
2438 +
2439 +
2440 +        /* TBD/Fixme:
2441 +        *          * should we call reboot notifiers ? inappropriate for panic ?
2442 +        *                   * what about device_shutdown() ?
2443 +        *                            * is explicit bus master disabling needed or can we do that
2444 +        *                                     * through driverfs ?
2445 +        *                                              */
2446 +        return 0;
2447 +#endif
2448 +}
2449 +
2450 +/* --- DUMP SCHEME ROUTINES  --- */
2451 +
2452 +static inline int dump_buf_pending(struct dumper *dumper)
2453 +{
2454 +       return (dumper->curr_buf - dumper->dump_buf);
2455 +}
2456 +
2457 +/* Invoked during stage 1 of soft-reboot based dumping */
2458 +int dump_overlay_sequencer(void)
2459 +{
2460 +       struct dump_data_filter *filter = dump_config.dumper->filter;
2461 +       struct dump_data_filter *filter2 = dumper_stage2.filter;
2462 +       int pass = 0, err = 0, save = 0;
2463 +       int (*action)(unsigned long, unsigned long);
2464 +
2465 +       /* Make sure gzip compression is being used */
2466 +       if (dump_config.dumper->compress->compress_type != DUMP_COMPRESS_GZIP) {
2467 +               printk(" Please set GZIP compression \n");
2468 +               return -EINVAL;
2469 +       }
2470 +
2471 +       /* start filling in dump data right after the header */
2472 +       dump_config.dumper->curr_offset = 
2473 +               PAGE_ALIGN(dump_config.dumper->header_len);
2474 +
2475 +       /* Locate the last pass */
2476 +       for (;filter->selector; filter++, pass++);
2477 +       
2478 +       /* 
2479 +        * Start from the end backwards: overlay involves a reverse 
2480 +        * ordering of passes, since less critical pages are more
2481 +        * likely to be reusable as scratch space once we are through
2482 +        * with them. 
2483 +        */
2484 +       for (--pass, --filter; pass >= 0; pass--, filter--)
2485 +       {
2486 +               /* Assumes passes are exclusive (even across dumpers) */
2487 +               /* Requires care when coding the selection functions */
2488 +               if ((save = filter->level_mask & dump_config.level))
2489 +                       action = dump_save_data;
2490 +               else
2491 +                       action = dump_skip_data;
2492 +
2493 +               /* Remember the offset where this pass started */
2494 +               /* The second stage dumper would use this */
2495 +               if (dump_buf_pending(dump_config.dumper) & (PAGE_SIZE - 1)) {
2496 +                       pr_debug("Starting pass %d with pending data\n", pass);
2497 +                       pr_debug("filling dummy data to page-align it\n");
2498 +                       dump_config.dumper->curr_buf = (void *)PAGE_ALIGN(
2499 +                               (unsigned long)dump_config.dumper->curr_buf);
2500 +               }
2501 +               
2502 +               filter2[pass].start[0] = dump_config.dumper->curr_offset
2503 +                       + dump_buf_pending(dump_config.dumper);
2504 +
2505 +               err = dump_iterator(pass, action, filter);
2506 +
2507 +               filter2[pass].end[0] = dump_config.dumper->curr_offset
2508 +                       + dump_buf_pending(dump_config.dumper);
2509 +               filter2[pass].num_mbanks = 1;
2510 +
2511 +               if (err < 0) {
2512 +                       printk("dump_overlay_seq: failure %d in pass %d\n", 
2513 +                               err, pass);
2514 +                       break;
2515 +               }       
2516 +               printk("\n %d overlay pages %s of %d each in pass %d\n", 
2517 +               err, save ? "saved" : "skipped", DUMP_PAGE_SIZE, pass);
2518 +       }
2519 +
2520 +       return err;
2521 +}
2522 +
2523 +/* from dump_memdev.c */
2524 +extern struct page *dump_mem_lookup(struct dump_memdev *dev, unsigned long loc);
2525 +extern struct page *dump_mem_next_page(struct dump_memdev *dev);
2526 +
2527 +static inline struct page *dump_get_saved_page(loff_t loc)
2528 +{
2529 +       return (dump_mem_lookup(dump_memdev, loc >> PAGE_SHIFT));
2530 +}
2531 +
2532 +static inline struct page *dump_next_saved_page(void)
2533 +{
2534 +       return (dump_mem_next_page(dump_memdev));
2535 +}
2536 +
2537 +/* 
2538 + * Iterates over list of saved dump pages. Invoked during second stage of 
2539 + * soft boot dumping
2540 + *
2541 + * Observation: If additional selection is desired at this stage then
2542 + * a different iterator could be written which would advance 
2543 + * to the next page header everytime instead of blindly picking up
2544 + * the data. In such a case loc would be interpreted differently. 
2545 + * At this moment however a blind pass seems sufficient, cleaner and
2546 + * faster.
2547 + */
2548 +int dump_saved_data_iterator(int pass, int (*action)(unsigned long, 
2549 +       unsigned long), struct dump_data_filter *filter)
2550 +{
2551 +       loff_t loc, end;
2552 +       struct page *page;
2553 +       unsigned long count = 0;
2554 +       int i, err = 0;
2555 +       unsigned long sz;
2556 +
2557 +       for (i = 0; i < filter->num_mbanks; i++) {
2558 +               loc  = filter->start[i];
2559 +               end = filter->end[i];
2560 +               printk("pass %d, start off 0x%llx end offset 0x%llx\n", pass,
2561 +                       loc, end);
2562 +
2563 +               /* loc will get treated as logical offset into stage 1 */
2564 +               page = dump_get_saved_page(loc);
2565 +                       
2566 +               for (; loc < end; loc += PAGE_SIZE) {
2567 +                       dump_config.dumper->curr_loc = loc;
2568 +                       if (!page) {
2569 +                               printk("no more saved data for pass %d\n", 
2570 +                                       pass);
2571 +                               break;
2572 +                       }
2573 +                       sz = (loc + PAGE_SIZE > end) ? end - loc : PAGE_SIZE;
2574 +
2575 +                       if (page && filter->selector(pass, (unsigned long)page, 
2576 +                               PAGE_SIZE))  {
2577 +                               pr_debug("mem offset 0x%llx\n", loc);
2578 +                               if ((err = action((unsigned long)page, sz))) 
2579 +                                       break;
2580 +                               else
2581 +                                       count++;
2582 +                               /* clear the contents of page */
2583 +                               /* fixme: consider using KM_DUMP instead */
2584 +                               clear_highpage(page);
2585 +                       
2586 +                       }
2587 +                       page = dump_next_saved_page();
2588 +               }
2589 +       }
2590 +
2591 +       return err ? err : count;
2592 +}
2593 +
2594 +static inline int dump_overlay_pages_done(struct page *page, int nr)
2595 +{
2596 +       int ret=0;
2597 +
2598 +       for (; nr ; page++, nr--) {
2599 +               if (dump_check_and_free_page(dump_memdev, page))
2600 +                       ret++;
2601 +       }
2602 +       return ret;
2603 +}
2604 +
2605 +int dump_overlay_save_data(unsigned long loc, unsigned long len)
2606 +{
2607 +       int err = 0;
2608 +       struct page *page = (struct page *)loc;
2609 +       static unsigned long cnt = 0;
2610 +
2611 +       if ((err = dump_generic_save_data(loc, len)))
2612 +               return err;
2613 +
2614 +       if (dump_overlay_pages_done(page, len >> PAGE_SHIFT)) {
2615 +               cnt++;
2616 +               if (!(cnt & 0x7f))
2617 +                       pr_debug("released page 0x%lx\n", page_to_pfn(page));
2618 +       }
2619 +       
2620 +       return err;
2621 +}
2622 +
2623 +
2624 +int dump_overlay_skip_data(unsigned long loc, unsigned long len)
2625 +{
2626 +       struct page *page = (struct page *)loc;
2627 +
2628 +       dump_overlay_pages_done(page, len >> PAGE_SHIFT);
2629 +       return 0;
2630 +}
2631 +
2632 +int dump_overlay_resume(void)
2633 +{
2634 +       int err = 0;
2635 +
2636 +       /* 
2637 +        * switch to stage 2 dumper, save dump_config_block
2638 +        * and then trigger a soft-boot
2639 +        */
2640 +       dumper_stage2.header_len = dump_config.dumper->header_len;
2641 +       dump_config.dumper = &dumper_stage2;
2642 +       if ((err = dump_save_config(dump_saved_config)))
2643 +               return err;
2644 +
2645 +       dump_dev = dump_config.dumper->dev;
2646 +
2647 +#ifdef CONFIG_KEXEC
2648 +        /* If we are doing a disruptive dump, activate softboot now */
2649 +        if((panic_timeout > 0) && (!(dump_config.flags & DUMP_FLAGS_NONDISRUPT)))
2650 +        err = dump_activate_softboot();
2651 +#endif
2652 +               
2653 +       return err;
2654 +       err = dump_switchover_stage();  /* plugs into soft boot mechanism */
2655 +       dump_config.dumper = &dumper_stage1; /* set things back */
2656 +       return err;
2657 +}
2658 +
2659 +int dump_overlay_configure(unsigned long devid)
2660 +{
2661 +       struct dump_dev *dev;
2662 +       struct dump_config_block *saved_config = dump_saved_config;
2663 +       int err = 0;
2664 +
2665 +       /* If there is a previously saved dump, write it out first */
2666 +       if (saved_config) {
2667 +               printk("Processing old dump pending writeout\n");
2668 +               err = dump_switchover_stage();
2669 +               if (err) {
2670 +                       printk("failed to writeout saved dump\n");
2671 +                       return err;
2672 +               }
2673 +               dump_free_mem(saved_config); /* testing only: not after boot */
2674 +       }
2675 +
2676 +       dev = dumper_stage2.dev = dump_config.dumper->dev;
2677 +       /* From here on the intermediate dump target is memory-only */
2678 +       dump_dev = dump_config.dumper->dev = &dump_memdev->ddev;
2679 +       if ((err = dump_generic_configure(0))) {
2680 +               printk("dump generic configure failed: err %d\n", err);
2681 +               return err;
2682 +       }
2683 +       /* temporary */
2684 +       dumper_stage2.dump_buf = dump_config.dumper->dump_buf;
2685 +
2686 +       /* Sanity check on the actual target dump device */
2687 +       if (!dev || (err = dev->ops->open(dev, devid))) {
2688 +               return err;
2689 +       }
2690 +       /* TBD: should we release the target if this is soft-boot only ? */
2691 +
2692 +       /* alloc a dump config block area to save across reboot */
2693 +       if (!(dump_saved_config = dump_alloc_mem(sizeof(struct 
2694 +               dump_config_block)))) {
2695 +               printk("dump config block alloc failed\n");
2696 +               /* undo configure */
2697 +               dump_generic_unconfigure();
2698 +               return -ENOMEM;
2699 +       }
2700 +       dump_config.dump_addr = (unsigned long)dump_saved_config;
2701 +       printk("Dump config block of size %d set up at 0x%lx\n", 
2702 +               sizeof(*dump_saved_config), (unsigned long)dump_saved_config);
2703 +       return 0;
2704 +}
2705 +
2706 +int dump_overlay_unconfigure(void)
2707 +{
2708 +       struct dump_dev *dev = dumper_stage2.dev;
2709 +       int err = 0;
2710 +
2711 +       pr_debug("dump_overlay_unconfigure\n");
2712 +       /* Close the secondary device */
2713 +       dev->ops->release(dev); 
2714 +       pr_debug("released secondary device\n");
2715 +
2716 +       err = dump_generic_unconfigure();
2717 +       pr_debug("Unconfigured generic portions\n");
2718 +       dump_free_mem(dump_saved_config);
2719 +       dump_saved_config = NULL;
2720 +       pr_debug("Freed saved config block\n");
2721 +       dump_dev = dump_config.dumper->dev = dumper_stage2.dev;
2722 +
2723 +       printk("Unconfigured overlay dumper\n");
2724 +       return err;
2725 +}
2726 +
2727 +int dump_staged_unconfigure(void)
2728 +{
2729 +       int err = 0;
2730 +       struct dump_config_block *saved_config = dump_saved_config;
2731 +       struct dump_dev *dev;
2732 +
2733 +       pr_debug("dump_staged_unconfigure\n");
2734 +       err = dump_generic_unconfigure();
2735 +
2736 +       /* now check if there is a saved dump waiting to be written out */
2737 +       if (saved_config) {
2738 +               printk("Processing saved dump pending writeout\n");
2739 +               if ((err = dump_switchover_stage())) {
2740 +                       printk("Error in commiting saved dump at 0x%lx\n", 
2741 +                               (unsigned long)saved_config);
2742 +                       printk("Old dump may hog memory\n");
2743 +               } else {
2744 +                       dump_free_mem(saved_config);
2745 +                       pr_debug("Freed saved config block\n");
2746 +               }
2747 +               dump_saved_config = NULL;
2748 +       } else {
2749 +               dev = &dump_memdev->ddev;
2750 +               dev->ops->release(dev);
2751 +       }
2752 +       printk("Unconfigured second stage dumper\n");
2753 +
2754 +       return 0;
2755 +}
2756 +
2757 +/* ----- PASSTHRU FILTER ROUTINE --------- */
2758 +
2759 +/* transparent - passes everything through */
2760 +int dump_passthru_filter(int pass, unsigned long loc, unsigned long sz)
2761 +{
2762 +       return 1;
2763 +}
2764 +
2765 +/* ----- PASSTRU FORMAT ROUTINES ---- */
2766 +
2767 +
2768 +int dump_passthru_configure_header(const char *panic_str, const struct pt_regs *regs)
2769 +{
2770 +       dump_config.dumper->header_dirty++;
2771 +       return 0;
2772 +}
2773 +
2774 +/* Copies bytes of data from page(s) to the specified buffer */
2775 +int dump_copy_pages(void *buf, struct page *page, unsigned long sz)
2776 +{
2777 +       unsigned long len = 0, bytes;
2778 +       void *addr;
2779 +
2780 +       while (len < sz) {
2781 +               addr = kmap_atomic(page, KM_DUMP);
2782 +               bytes = (sz > len + PAGE_SIZE) ? PAGE_SIZE : sz - len;  
2783 +               memcpy(buf, addr, bytes); 
2784 +               kunmap_atomic(addr, KM_DUMP);
2785 +               buf += bytes;
2786 +               len += bytes;
2787 +               page++;
2788 +       }
2789 +       /* memset(dump_config.dumper->curr_buf, 0x57, len); temporary */
2790 +
2791 +       return sz - len;
2792 +}
2793 +
2794 +int dump_passthru_update_header(void)
2795 +{
2796 +       long len = dump_config.dumper->header_len;
2797 +       struct page *page;
2798 +       void *buf = dump_config.dumper->dump_buf;
2799 +       int err = 0;
2800 +
2801 +       if (!dump_config.dumper->header_dirty)
2802 +               return 0;
2803 +
2804 +       pr_debug("Copying header of size %ld bytes from memory\n", len);
2805 +       if (len > DUMP_BUFFER_SIZE) 
2806 +               return -E2BIG;
2807 +
2808 +       page = dump_mem_lookup(dump_memdev, 0);
2809 +       for (; (len > 0) && page; buf += PAGE_SIZE, len -= PAGE_SIZE) {
2810 +               if ((err = dump_copy_pages(buf, page, PAGE_SIZE)))
2811 +                       return err;
2812 +               page = dump_mem_next_page(dump_memdev);
2813 +       }
2814 +       if (len > 0) {
2815 +               printk("Incomplete header saved in mem\n");
2816 +               return -ENOENT;
2817 +       }
2818 +
2819 +       if ((err = dump_dev_seek(0))) {
2820 +               printk("Unable to seek to dump header offset\n");
2821 +               return err;
2822 +       }
2823 +       err = dump_ll_write(dump_config.dumper->dump_buf, 
2824 +               buf - dump_config.dumper->dump_buf);
2825 +       if (err < dump_config.dumper->header_len)
2826 +               return (err < 0) ? err : -ENOSPC;
2827 +
2828 +       dump_config.dumper->header_dirty = 0;
2829 +       return 0;
2830 +}
2831 +
2832 +static loff_t next_dph_offset = 0;
2833 +
2834 +static int dph_valid(struct __dump_page *dph)
2835 +{
2836 +       if ((dph->dp_address & (PAGE_SIZE - 1)) || (dph->dp_flags 
2837 +             > DUMP_DH_COMPRESSED) || (!dph->dp_flags) ||
2838 +               (dph->dp_size > PAGE_SIZE)) {
2839 +       printk("dp->address = 0x%llx, dp->size = 0x%x, dp->flag = 0x%x\n",
2840 +               dph->dp_address, dph->dp_size, dph->dp_flags);
2841 +               return 0;
2842 +       }
2843 +       return 1;
2844 +}
2845 +
2846 +int dump_verify_lcrash_data(void *buf, unsigned long sz)
2847 +{
2848 +       struct __dump_page *dph;
2849 +
2850 +       /* sanity check for page headers */
2851 +       while (next_dph_offset + sizeof(*dph) < sz) {
2852 +               dph = (struct __dump_page *)(buf + next_dph_offset);
2853 +               if (!dph_valid(dph)) {
2854 +                       printk("Invalid page hdr at offset 0x%llx\n",
2855 +                               next_dph_offset);
2856 +                       return -EINVAL;
2857 +               }
2858 +               next_dph_offset += dph->dp_size + sizeof(*dph);
2859 +       }
2860 +
2861 +       next_dph_offset -= sz;  
2862 +       return 0;
2863 +}
2864 +
2865 +/* 
2866 + * TBD/Later: Consider avoiding the copy by using a scatter/gather 
2867 + * vector representation for the dump buffer
2868 + */
2869 +int dump_passthru_add_data(unsigned long loc, unsigned long sz)
2870 +{
2871 +       struct page *page = (struct page *)loc;
2872 +       void *buf = dump_config.dumper->curr_buf;
2873 +       int err = 0;
2874 +
2875 +       if ((err = dump_copy_pages(buf, page, sz))) {
2876 +               printk("dump_copy_pages failed");
2877 +               return err;
2878 +       }
2879 +
2880 +       if ((err = dump_verify_lcrash_data(buf, sz))) {
2881 +               printk("dump_verify_lcrash_data failed\n");
2882 +               printk("Invalid data for pfn 0x%lx\n", page_to_pfn(page));
2883 +               printk("Page flags 0x%lx\n", page->flags);
2884 +               printk("Page count 0x%x\n", page_count(page));
2885 +               return err;
2886 +       }
2887 +
2888 +       dump_config.dumper->curr_buf = buf + sz;
2889 +
2890 +       return 0;
2891 +}
2892 +
2893 +
2894 +/* Stage 1 dumper: Saves compressed dump in memory and soft-boots system */
2895 +
2896 +/* Scheme to overlay saved data in memory for writeout after a soft-boot */
2897 +struct dump_scheme_ops dump_scheme_overlay_ops = {
2898 +       .configure      = dump_overlay_configure,
2899 +       .unconfigure    = dump_overlay_unconfigure,
2900 +       .sequencer      = dump_overlay_sequencer,
2901 +       .iterator       = dump_page_iterator,
2902 +       .save_data      = dump_overlay_save_data,
2903 +       .skip_data      = dump_overlay_skip_data,
2904 +       .write_buffer   = dump_generic_write_buffer
2905 +};
2906 +
2907 +struct dump_scheme dump_scheme_overlay = {
2908 +       .name           = "overlay",
2909 +       .ops            = &dump_scheme_overlay_ops
2910 +};
2911 +
2912 +
2913 +/* Stage 1 must use a good compression scheme - default to gzip */
2914 +extern struct __dump_compress dump_gzip_compression;
2915 +
2916 +struct dumper dumper_stage1 = {
2917 +       .name           = "stage1",
2918 +       .scheme         = &dump_scheme_overlay,
2919 +       .fmt            = &dump_fmt_lcrash,
2920 +       .compress       = &dump_none_compression, /* needs to be gzip */
2921 +       .filter         = dump_filter_table,
2922 +       .dev            = NULL,
2923 +};             
2924 +
2925 +/* Stage 2 dumper: Activated after softboot to write out saved dump to device */
2926 +
2927 +/* Formatter that transfers data as is (transparent) w/o further conversion */
2928 +struct dump_fmt_ops dump_fmt_passthru_ops = {
2929 +       .configure_header       = dump_passthru_configure_header,
2930 +       .update_header          = dump_passthru_update_header,
2931 +       .save_context           = NULL, /* unused */
2932 +       .add_data               = dump_passthru_add_data,
2933 +       .update_end_marker      = dump_lcrash_update_end_marker
2934 +};
2935 +
2936 +struct dump_fmt dump_fmt_passthru = {
2937 +       .name   = "passthru",
2938 +       .ops    = &dump_fmt_passthru_ops
2939 +};
2940 +
2941 +/* Filter that simply passes along any data within the range (transparent)*/
2942 +/* Note: The start and end ranges in the table are filled in at run-time */
2943 +
2944 +extern int dump_filter_none(int pass, unsigned long loc, unsigned long sz);
2945 +
2946 +struct dump_data_filter dump_passthru_filtertable[MAX_PASSES] = {
2947 +{.name = "passkern", .selector = dump_passthru_filter, 
2948 +       .level_mask = DUMP_MASK_KERN },
2949 +{.name = "passuser", .selector = dump_passthru_filter, 
2950 +       .level_mask = DUMP_MASK_USED },
2951 +{.name = "passunused", .selector = dump_passthru_filter, 
2952 +       .level_mask = DUMP_MASK_UNUSED },
2953 +{.name = "none", .selector = dump_filter_none, 
2954 +       .level_mask = DUMP_MASK_REST }
2955 +};
2956 +
2957 +
2958 +/* Scheme to handle data staged / preserved across a soft-boot */
2959 +struct dump_scheme_ops dump_scheme_staged_ops = {
2960 +       .configure      = dump_generic_configure,
2961 +       .unconfigure    = dump_staged_unconfigure,
2962 +       .sequencer      = dump_generic_sequencer,
2963 +       .iterator       = dump_saved_data_iterator,
2964 +       .save_data      = dump_generic_save_data,
2965 +       .skip_data      = dump_generic_skip_data,
2966 +       .write_buffer   = dump_generic_write_buffer
2967 +};
2968 +
2969 +struct dump_scheme dump_scheme_staged = {
2970 +       .name           = "staged",
2971 +       .ops            = &dump_scheme_staged_ops
2972 +};
2973 +
2974 +/* The stage 2 dumper comprising all these */
2975 +struct dumper dumper_stage2 = {
2976 +       .name           = "stage2",
2977 +       .scheme         = &dump_scheme_staged,
2978 +       .fmt            = &dump_fmt_passthru,
2979 +       .compress       = &dump_none_compression,
2980 +       .filter         = dump_passthru_filtertable,
2981 +       .dev            = NULL,
2982 +};             
2983 +
2984 Index: linux-2.6.10/drivers/dump/dump_fmt.c
2985 ===================================================================
2986 --- linux-2.6.10.orig/drivers/dump/dump_fmt.c   2005-04-07 19:34:21.197950744 +0800
2987 +++ linux-2.6.10/drivers/dump/dump_fmt.c        2005-04-07 18:13:56.911751944 +0800
2988 @@ -0,0 +1,407 @@
2989 +/*
2990 + * Implements the routines which handle the format specific
2991 + * aspects of dump for the default dump format.
2992 + *
2993 + * Used in single stage dumping and stage 1 of soft-boot based dumping 
2994 + * Saves data in LKCD (lcrash) format 
2995 + *
2996 + * Previously a part of dump_base.c
2997 + *
2998 + * Started: Oct 2002 -  Suparna Bhattacharya <suparna@in.ibm.com>
2999 + *     Split off and reshuffled LKCD dump format code around generic
3000 + *     dump method interfaces.
3001 + *
3002 + * Derived from original code created by 
3003 + *     Matt Robinson <yakker@sourceforge.net>)
3004 + *
3005 + * Contributions from SGI, IBM, HP, MCL, and others.
3006 + *
3007 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
3008 + * Copyright (C) 2000 - 2002 TurboLinux, Inc.  All rights reserved.
3009 + * Copyright (C) 2001 - 2002 Matt D. Robinson.  All rights reserved.
3010 + * Copyright (C) 2002 International Business Machines Corp. 
3011 + *
3012 + * This code is released under version 2 of the GNU GPL.
3013 + */
3014 +
3015 +#include <linux/types.h>
3016 +#include <linux/kernel.h>
3017 +#include <linux/time.h>
3018 +#include <linux/sched.h>
3019 +#include <linux/ptrace.h>
3020 +#include <linux/utsname.h>
3021 +#include <linux/dump.h>
3022 +#include <asm/dump.h>
3023 +#include "dump_methods.h"
3024 +
3025 +/*
3026 + * SYSTEM DUMP LAYOUT
3027 + * 
3028 + * System dumps are currently the combination of a dump header and a set
3029 + * of data pages which contain the system memory.  The layout of the dump
3030 + * (for full dumps) is as follows:
3031 + *
3032 + *             +-----------------------------+
3033 + *             |     generic dump header     |
3034 + *             +-----------------------------+
3035 + *             |   architecture dump header  |
3036 + *             +-----------------------------+
3037 + *             |         page header         |
3038 + *             +-----------------------------+
3039 + *             |          page data          |
3040 + *             +-----------------------------+
3041 + *             |         page header         |
3042 + *             +-----------------------------+
3043 + *             |          page data          |
3044 + *             +-----------------------------+
3045 + *             |              |              |
3046 + *             |              |              |
3047 + *             |              |              |
3048 + *             |              |              |
3049 + *             |              V              |
3050 + *             +-----------------------------+
3051 + *             |        PAGE_END header      |
3052 + *             +-----------------------------+
3053 + *
3054 + * There are two dump headers, the first which is architecture
3055 + * independent, and the other which is architecture dependent.  This
3056 + * allows different architectures to dump different data structures
3057 + * which are specific to their chipset, CPU, etc.
3058 + *
3059 + * After the dump headers come a succession of dump page headers along
3060 + * with dump pages.  The page header contains information about the page
3061 + * size, any flags associated with the page (whether it's compressed or
3062 + * not), and the address of the page.  After the page header is the page
3063 + * data, which is either compressed (or not).  Each page of data is
3064 + * dumped in succession, until the final dump header (PAGE_END) is
3065 + * placed at the end of the dump, assuming the dump device isn't out
3066 + * of space.
3067 + *
3068 + * This mechanism allows for multiple compression types, different
3069 + * types of data structures, different page ordering, etc., etc., etc.
3070 + * It's a very straightforward mechanism for dumping system memory.
3071 + */
3072 +
3073 +struct __dump_header dump_header;  /* the primary dump header              */
3074 +struct __dump_header_asm dump_header_asm; /* the arch-specific dump header */
3075 +
3076 +/* Replace a runtime sanity check on the DUMP_BUFFER_SIZE with a 
3077 + * compile-time check.  The compile_time_assertions routine will not
3078 + * compile if the assertion is false. 
3079 + *
3080 + * If you fail this assert you are most likely on a large machine and 
3081 + * should use a special 6.0.0 version of LKCD or a version > 7.0.0. See
3082 + * the LKCD website for more information.
3083 + */
3084 +
3085 +#define COMPILE_TIME_ASSERT(const_expr) \
3086 +       switch(0){case 0: case (const_expr):;}
3087 +
3088 +static inline void compile_time_assertions(void)
3089 +{
3090 +       COMPILE_TIME_ASSERT((sizeof(struct __dump_header) +
3091 +               sizeof(struct __dump_header_asm)) <= DUMP_BUFFER_SIZE);
3092 +}
3093 +
3094 +/*
3095 + *  Set up common header fields (mainly the arch indep section) 
3096 + *  Per-cpu state is handled by lcrash_save_context
3097 + *  Returns the size of the header in bytes.
3098 + */
3099 +static int lcrash_init_dump_header(const char *panic_str)
3100 +{
3101 +       struct timeval dh_time;
3102 +       u64 temp_memsz = dump_header.dh_memory_size;
3103 +
3104 +       /* initialize the dump headers to zero */
3105 +       /* save dha_stack pointer because it may contains pointer for stack! */
3106 +       memset(&dump_header, 0, sizeof(dump_header));
3107 +       memset(&dump_header_asm, 0,
3108 +               offsetof(struct __dump_header_asm, dha_stack));
3109 +       memset(&dump_header_asm.dha_stack+1, 0,
3110 +               sizeof(dump_header_asm) -
3111 +               offsetof(struct __dump_header_asm, dha_stack) -
3112 +               sizeof(dump_header_asm.dha_stack));
3113 +       dump_header.dh_memory_size = temp_memsz;
3114 +
3115 +       /* configure dump header values */
3116 +       dump_header.dh_magic_number = DUMP_MAGIC_NUMBER;
3117 +       dump_header.dh_version = DUMP_VERSION_NUMBER;
3118 +       dump_header.dh_memory_start = PAGE_OFFSET;
3119 +       dump_header.dh_memory_end = DUMP_MAGIC_NUMBER;
3120 +       dump_header.dh_header_size = sizeof(struct __dump_header);
3121 +       dump_header.dh_page_size = PAGE_SIZE;
3122 +       dump_header.dh_dump_level = dump_config.level;
3123 +       dump_header.dh_current_task = (unsigned long) current;
3124 +       dump_header.dh_dump_compress = dump_config.dumper->compress->