Whamcloud - gitweb
file configurable-x86-stack-2.4.22-rh.patch was initially added on branch b_devel.
[fs/lustre-release.git] / lustre / kernel_patches / patches / lkcd-cvs-2.5.69.patch
1 --- linux-2.5.69/drivers/dump/Makefile.lkcdbase Mon Jun  2 17:29:39 2003
2 +++ linux-2.5.69/drivers/dump/Makefile  Fri Apr 25 00:24:15 2003
3 @@ -0,0 +1,14 @@
4 +#
5 +# Makefile for the dump device drivers.
6 +#
7 +
8 +dump-y                                 := dump_setup.o dump_fmt.o dump_filters.o dump_scheme.o dump_execute.o
9 +dump-$(CONFIG_X86)                     += dump_i386.o
10 +dump-$(CONFIG_CRASH_DUMP_MEMDEV)       += dump_memdev.o dump_overlay.o
11 +dump-objs                              += $(dump-y)
12 +
13 +obj-$(CONFIG_CRASH_DUMP)               += dump.o
14 +obj-$(CONFIG_CRASH_DUMP_BLOCKDEV)      += dump_blockdev.o
15 +obj-$(CONFIG_CRASH_DUMP_NETDEV)        += dump_netdev.o
16 +obj-$(CONFIG_CRASH_DUMP_COMPRESS_RLE)  += dump_rle.o
17 +obj-$(CONFIG_CRASH_DUMP_COMPRESS_GZIP) += dump_gzip.o
18 --- linux-2.5.69/drivers/dump/dump_blockdev.c.lkcdbase  Mon Jun  2 17:29:49 2003
19 +++ linux-2.5.69/drivers/dump/dump_blockdev.c   Sun May 18 22:30:52 2003
20 @@ -0,0 +1,461 @@
21 +/*
22 + * Implements the dump driver interface for saving a dump to 
23 + * a block device through the kernel's generic low level block i/o
24 + * routines.
25 + *
26 + * Started: June 2002 - Mohamed Abbas <mohamed.abbas@intel.com>
27 + *     Moved original lkcd kiobuf dump i/o code from dump_base.c
28 + *     to use generic dump device interfaces
29 + *
30 + * Sept 2002 - Bharata B. Rao <bharata@in.ibm.com>
31 + *     Convert dump i/o to directly use bio instead of kiobuf for 2.5
32 + *
33 + * Oct 2002  - Suparna Bhattacharya <suparna@in.ibm.com>
34 + *     Rework to new dumpdev.h structures, implement open/close/
35 + *     silence, misc fixes (blocknr removal, bio_add_page usage)  
36 + *
37 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
38 + * Copyright (C) 2001 - 2002 Matt D. Robinson.  All rights reserved.
39 + * Copyright (C) 2002 International Business Machines Corp. 
40 + *
41 + * This code is released under version 2 of the GNU GPL.
42 + */
43 +
44 +#include <linux/types.h>
45 +#include <linux/proc_fs.h>
46 +#include <linux/module.h>
47 +#include <linux/init.h>
48 +#include <linux/blkdev.h>
49 +#include <linux/bio.h>
50 +#include <asm/hardirq.h>
51 +#include <linux/dump.h>
52 +#include "dump_methods.h"
53 +
54 +extern void *dump_page_buf;
55 +
56 +/* The end_io callback for dump i/o completion */
57 +static int
58 +dump_bio_end_io(struct bio *bio, unsigned int bytes_done, int error)
59 +{
60 +       struct dump_blockdev *dump_bdev;
61 +
62 +       if (bio->bi_size) {
63 +               /* some bytes still left to transfer */
64 +               return 1; /* not complete */
65 +       }
66 +
67 +       dump_bdev = (struct dump_blockdev *)bio->bi_private;
68 +       if (error) {
69 +               printk("IO error while writing the dump, aborting\n");
70 +       }
71 +
72 +       dump_bdev->err = error;
73 +
74 +       /* no wakeup needed, since caller polls for completion */
75 +       return 0;
76 +}
77 +
78 +/* Check if the dump bio is already mapped to the specified buffer */
79 +static int
80 +dump_block_map_valid(struct dump_blockdev *dev, struct page *page, 
81 +       int len) 
82 +{
83 +       struct bio *bio = dev->bio;
84 +       unsigned long bsize = 0;
85 +
86 +       if (!bio->bi_vcnt)
87 +               return 0; /* first time, not mapped */
88 +
89 +
90 +       if ((bio_page(bio) != page) || (len > bio->bi_vcnt << PAGE_SHIFT))
91 +               return 0; /* buffer not mapped */
92 +
93 +       bsize = bdev_hardsect_size(bio->bi_bdev);
94 +       if ((len & (PAGE_SIZE - 1)) || (len & bsize))
95 +               return 0; /* alignment checks needed */
96 +
97 +       /* quick check to decide if we need to redo bio_add_page */
98 +       if (bdev_get_queue(bio->bi_bdev)->merge_bvec_fn)
99 +               return 0; /* device may have other restrictions */
100 +
101 +       return 1; /* already mapped */
102 +}
103 +
104 +/* 
105 + * Set up the dump bio for i/o from the specified buffer 
106 + * Return value indicates whether the full buffer could be mapped or not
107 + */
108 +static int
109 +dump_block_map(struct dump_blockdev *dev, void *buf, int len)
110 +{
111 +       struct page *page = virt_to_page(buf);
112 +       struct bio *bio = dev->bio;
113 +       unsigned long bsize = 0;
114 +
115 +       bio->bi_bdev = dev->bdev;
116 +       bio->bi_sector = (dev->start_offset + dev->ddev.curr_offset) >> 9;
117 +       bio->bi_idx = 0; /* reset index to the beginning */
118 +
119 +       if (dump_block_map_valid(dev, page, len)) {
120 +               /* already mapped and usable rightaway */
121 +               bio->bi_size = len; /* reset size to the whole bio */
122 +       } else {
123 +               /* need to map the bio */
124 +               bio->bi_size = 0;
125 +               bio->bi_vcnt = 0;
126 +               bsize = bdev_hardsect_size(bio->bi_bdev);
127 +
128 +               /* first a few sanity checks */
129 +               if (len < bsize) {
130 +                       printk("map: len less than hardsect size \n");
131 +                       return -EINVAL;
132 +               }
133 +
134 +               if ((unsigned long)buf & bsize) {
135 +                       printk("map: not aligned \n");
136 +                       return -EINVAL;
137 +               }
138 +
139 +               /* assume contig. page aligned low mem buffer( no vmalloc) */
140 +               if ((page_address(page) != buf) || (len & (PAGE_SIZE - 1))) {
141 +                       printk("map: invalid buffer alignment!\n");
142 +                       return -EINVAL; 
143 +               }
144 +               /* finally we can go ahead and map it */
145 +               while (bio->bi_size < len)
146 +                       if (bio_add_page(bio, page++, PAGE_SIZE, 0) == 0) {
147 +                               break;
148 +                       }
149 +
150 +               bio->bi_end_io = dump_bio_end_io;
151 +               bio->bi_private = dev;
152 +       }
153 +
154 +       if (bio->bi_size != len) {
155 +               printk("map: bio size = %d not enough for len = %d!\n",
156 +                       bio->bi_size, len);
157 +               return -E2BIG;
158 +       }
159 +       return 0;
160 +}
161 +
162 +static void
163 +dump_free_bio(struct bio *bio)
164 +{
165 +       if (bio)
166 +               kfree(bio->bi_io_vec);
167 +       kfree(bio);
168 +}
169 +
170 +/*
171 + * Prepares the dump device so we can take a dump later. 
172 + * The caller is expected to have filled up the kdev_id field in the 
173 + * block dump dev structure.
174 + *
175 + * At dump time when dump_block_write() is invoked it will be too 
176 + * late to recover, so as far as possible make sure obvious errors 
177 + * get caught right here and reported back to the caller.
178 + */
179 +static int
180 +dump_block_open(struct dump_dev *dev, unsigned long arg)
181 +{
182 +       struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
183 +       struct block_device *bdev;
184 +       int retval = 0;
185 +       struct bio_vec *bvec;
186 +
187 +       /* make sure this is a valid block device */
188 +       if (!arg) {
189 +               retval = -EINVAL;
190 +               goto err;
191 +       }
192 +
193 +       /* get a corresponding block_dev struct for this */
194 +       bdev = bdget((dev_t)arg);
195 +       if (!bdev) {
196 +               retval = -ENODEV;
197 +               goto err;
198 +       }
199 +
200 +       /* get the block device opened */
201 +       if ((retval = blkdev_get(bdev, O_RDWR | O_LARGEFILE, 0, BDEV_RAW))) {
202 +               goto err1;
203 +       }
204 +
205 +       if ((dump_bdev->bio = kmalloc(sizeof(struct bio), GFP_KERNEL)) 
206 +               == NULL) {
207 +               printk("Cannot allocate bio\n");
208 +               retval = -ENOMEM;
209 +               goto err2;
210 +       }
211 +
212 +       bio_init(dump_bdev->bio);
213 +
214 +       if ((bvec = kmalloc(sizeof(struct bio_vec) * 
215 +               (DUMP_BUFFER_SIZE >> PAGE_SHIFT), GFP_KERNEL)) == NULL) {
216 +               retval = -ENOMEM;
217 +               goto err3;
218 +       }
219 +
220 +       /* assign the new dump dev structure */
221 +       dump_bdev->kdev_id = to_kdev_t((dev_t)arg);
222 +       dump_bdev->bdev = bdev;
223 +
224 +       /* make a note of the limit */
225 +       dump_bdev->limit = bdev->bd_inode->i_size;
226 +       
227 +       /* now make sure we can map the dump buffer */
228 +       dump_bdev->bio->bi_io_vec = bvec;
229 +       dump_bdev->bio->bi_max_vecs = DUMP_BUFFER_SIZE >> PAGE_SHIFT;
230 +
231 +       retval = dump_block_map(dump_bdev, dump_config.dumper->dump_buf, 
232 +               DUMP_BUFFER_SIZE);
233 +               
234 +       if (retval) {
235 +               printk("open: dump_block_map failed, ret %d\n", retval);
236 +               goto err3;
237 +       }
238 +
239 +       printk("Block device (%d,%d) successfully configured for dumping\n",
240 +              major(dump_bdev->kdev_id),
241 +              minor(dump_bdev->kdev_id));
242 +
243 +
244 +       /* after opening the block device, return */
245 +       return retval;
246 +
247 +err3:  dump_free_bio(dump_bdev->bio);
248 +       dump_bdev->bio = NULL;
249 +err2:  if (bdev) blkdev_put(bdev, BDEV_RAW);
250 +               goto err;
251 +err1:  if (bdev) bdput(bdev);
252 +       dump_bdev->bdev = NULL;
253 +err:   return retval;
254 +}
255 +
256 +/*
257 + * Close the dump device and release associated resources
258 + * Invoked when unconfiguring the dump device.
259 + */
260 +static int
261 +dump_block_release(struct dump_dev *dev)
262 +{
263 +       struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
264 +
265 +       /* release earlier bdev if present */
266 +       if (dump_bdev->bdev) {
267 +               blkdev_put(dump_bdev->bdev, BDEV_RAW);
268 +               dump_bdev->bdev = NULL;
269 +       }
270 +
271 +       dump_free_bio(dump_bdev->bio);
272 +       dump_bdev->bio = NULL;
273 +
274 +       return 0;
275 +}
276 +
277 +
278 +/*
279 + * Prepare the dump device for use (silence any ongoing activity
280 + * and quiesce state) when the system crashes.
281 + */
282 +static int
283 +dump_block_silence(struct dump_dev *dev)
284 +{
285 +       struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
286 +       struct request_queue *q = bdev_get_queue(dump_bdev->bdev);
287 +       int ret;
288 +
289 +       /* If we can't get request queue lock, refuse to take the dump */
290 +       if (!spin_trylock(q->queue_lock))
291 +               return -EBUSY;
292 +
293 +       ret = elv_queue_empty(q);
294 +       spin_unlock(q->queue_lock);
295 +
296 +       /* For now we assume we have the device to ourselves */
297 +       /* Just a quick sanity check */
298 +       if (!ret) {
299 +               /* i/o in flight - safer to quit */
300 +               return -EBUSY;
301 +       }
302 +
303 +       /* 
304 +        * Move to a softer level of silencing where no spin_lock_irqs 
305 +        * are held on other cpus
306 +        */
307 +       dump_silence_level = DUMP_SOFT_SPIN_CPUS;       
308 +
309 +       __dump_irq_enable();
310 +
311 +       printk("Dumping to block device (%d,%d) on CPU %d ...\n",
312 +              major(dump_bdev->kdev_id), minor(dump_bdev->kdev_id),
313 +              smp_processor_id());
314 +       
315 +       return 0;
316 +}
317 +
318 +/*
319 + * Invoked when dumping is done. This is the time to put things back 
320 + * (i.e. undo the effects of dump_block_silence) so the device is 
321 + * available for normal use.
322 + */
323 +static int
324 +dump_block_resume(struct dump_dev *dev)
325 +{
326 +       __dump_irq_restore();
327 +       return 0;
328 +}
329 +
330 +
331 +/*
332 + * Seek to the specified offset in the dump device.
333 + * Makes sure this is a valid offset, otherwise returns an error.
334 + */
335 +static int
336 +dump_block_seek(struct dump_dev *dev, loff_t off)
337 +{
338 +       struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
339 +       loff_t offset = off + dump_bdev->start_offset;
340 +       
341 +       if (offset & ( PAGE_SIZE - 1)) {
342 +               printk("seek: non-page aligned\n");
343 +               return -EINVAL;
344 +       }
345 +
346 +       if (offset & (bdev_hardsect_size(dump_bdev->bdev) - 1)) {
347 +               printk("seek: not sector aligned \n");
348 +               return -EINVAL;
349 +       }
350 +
351 +       if (offset > dump_bdev->limit) {
352 +               printk("seek: not enough space left on device!\n");
353 +               return -ENOSPC; 
354 +       }
355 +       dev->curr_offset = off;
356 +       return 0;
357 +}
358 +
359 +/*
360 + * Write out a buffer after checking the device limitations, 
361 + * sector sizes, etc. Assumes the buffer is in directly mapped 
362 + * kernel address space (not vmalloc'ed).
363 + *
364 + * Returns: number of bytes written or -ERRNO. 
365 + */
366 +static int
367 +dump_block_write(struct dump_dev *dev, void *buf, 
368 +       unsigned long len)
369 +{
370 +       struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
371 +       loff_t offset = dev->curr_offset + dump_bdev->start_offset;
372 +       int retval = -ENOSPC;
373 +
374 +       if (offset >= dump_bdev->limit) {
375 +               printk("write: not enough space left on device!\n");
376 +               goto out;
377 +       }
378 +
379 +       /* don't write more blocks than our max limit */
380 +       if (offset + len > dump_bdev->limit) 
381 +               len = dump_bdev->limit - offset;
382 +
383 +
384 +       retval = dump_block_map(dump_bdev, buf, len);
385 +       if (retval){
386 +               printk("write: dump_block_map failed! err %d\n", retval);
387 +               goto out;
388 +       }
389 +
390 +       /*
391 +        * Write out the data to disk.
392 +        * Assumes the entire buffer mapped to a single bio, which we can
393 +        * submit and wait for io completion. In the future, may consider
394 +        * increasing the dump buffer size and submitting multiple bio s 
395 +        * for better throughput.
396 +        */
397 +       dump_bdev->err = -EAGAIN;
398 +       submit_bio(WRITE, dump_bdev->bio);
399 +
400 +       dump_bdev->ddev.curr_offset += len;
401 +       retval = len;
402 + out:
403 +       return retval;
404 +}
405 +
406 +/*
407 + * Name: dump_block_ready()
408 + * Func: check if the last dump i/o is over and ready for next request
409 + */
410 +static int
411 +dump_block_ready(struct dump_dev *dev, void *buf)
412 +{
413 +       struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
414 +       request_queue_t *q = bdev_get_queue(dump_bdev->bio->bi_bdev);
415 +
416 +       /* check for io completion */
417 +       if (dump_bdev->err == -EAGAIN) {
418 +               q->unplug_fn(q);
419 +               return -EAGAIN;
420 +       }
421 +
422 +       if (dump_bdev->err) {
423 +               printk("dump i/o err\n");
424 +               return dump_bdev->err;
425 +       }
426 +
427 +       return 0;
428 +}
429 +
430 +
431 +struct dump_dev_ops dump_blockdev_ops = {
432 +       .open           = dump_block_open,
433 +       .release        = dump_block_release,
434 +       .silence        = dump_block_silence,
435 +       .resume         = dump_block_resume,
436 +       .seek           = dump_block_seek,
437 +       .write          = dump_block_write,
438 +       /* .read not implemented */
439 +       .ready          = dump_block_ready
440 +};
441 +
442 +static struct dump_blockdev default_dump_blockdev = {
443 +       .ddev = {.type_name = "blockdev", .ops = &dump_blockdev_ops, 
444 +                       .curr_offset = 0},
445 +       /* 
446 +        * leave enough room for the longest swap header possibly written 
447 +        * written by mkswap (likely the largest page size supported by
448 +        * the arch
449 +        */
450 +       .start_offset   = DUMP_HEADER_OFFSET,
451 +       .err            = 0
452 +       /* assume the rest of the fields are zeroed by default */
453 +};     
454 +       
455 +struct dump_blockdev *dump_blockdev = &default_dump_blockdev;
456 +
457 +static int __init
458 +dump_blockdev_init(void)
459 +{
460 +       if (dump_register_device(&dump_blockdev->ddev) < 0) {
461 +               printk("block device driver registration failed\n");
462 +               return -1;
463 +       }
464 +               
465 +       printk("block device driver for LKCD registered\n");
466 +       return 0;
467 +}
468 +
469 +static void __exit
470 +dump_blockdev_cleanup(void)
471 +{
472 +       dump_unregister_device(&dump_blockdev->ddev);
473 +       printk("block device driver for LKCD unregistered\n");
474 +}
475 +
476 +MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
477 +MODULE_DESCRIPTION("Block Dump Driver for Linux Kernel Crash Dump (LKCD)");
478 +MODULE_LICENSE("GPL");
479 +
480 +module_init(dump_blockdev_init);
481 +module_exit(dump_blockdev_cleanup);
482 --- linux-2.5.69/drivers/dump/dump_execute.c.lkcdbase   Mon Jun  2 17:29:49 2003
483 +++ linux-2.5.69/drivers/dump/dump_execute.c    Fri Feb  7 06:47:58 2003
484 @@ -0,0 +1,126 @@
485 +/*
486 + * The file has the common/generic dump execution code 
487 + *
488 + * Started: Oct 2002 -  Suparna Bhattacharya <suparna@in.ibm.com>
489 + *     Split and rewrote high level dump execute code to make use 
490 + *     of dump method interfaces.
491 + *
492 + * Derived from original code in dump_base.c created by 
493 + *     Matt Robinson <yakker@sourceforge.net>)
494 + *     
495 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
496 + * Copyright (C) 2001 - 2002 Matt D. Robinson.  All rights reserved.
497 + * Copyright (C) 2002 International Business Machines Corp. 
498 + *
499 + * Assumes dumper and dump config settings are in place
500 + * (invokes corresponding dumper specific routines as applicable)
501 + *
502 + * This code is released under version 2 of the GNU GPL.
503 + */
504 +#include <linux/kernel.h>
505 +#include <linux/notifier.h>
506 +#include <linux/dump.h>
507 +#include "dump_methods.h"
508 +
509 +struct notifier_block *dump_notifier_list; /* dump started/ended callback */
510 +
511 +/* Dump progress indicator */
512 +void 
513 +dump_speedo(int i)
514 +{
515 +       static const char twiddle[4] =  { '|', '\\', '-', '/' };
516 +       printk("%c\b", twiddle[i&3]);
517 +}
518 +
519 +/* Make the device ready and write out the header */
520 +int dump_begin(void)
521 +{
522 +       int err = 0;
523 +
524 +       /* dump_dev = dump_config.dumper->dev; */
525 +       dumper_reset();
526 +       if ((err = dump_dev_silence())) {
527 +               /* quiesce failed, can't risk continuing */
528 +               /* Todo/Future: switch to alternate dump scheme if possible */
529 +               printk("dump silence dev failed ! error %d\n", err);
530 +               return err;
531 +       }
532 +
533 +       pr_debug("Writing dump header\n");
534 +       if ((err = dump_update_header())) {
535 +               printk("dump update header failed ! error %d\n", err);
536 +               dump_dev_resume();
537 +               return err;
538 +       }
539 +
540 +       dump_config.dumper->curr_offset = DUMP_BUFFER_SIZE;
541 +
542 +       return 0;
543 +}
544 +
545 +/* 
546 + * Write the dump terminator, a final header update and let go of 
547 + * exclusive use of the device for dump.
548 + */
549 +int dump_complete(void)
550 +{
551 +       int ret = 0;
552 +
553 +       if (dump_config.level != DUMP_LEVEL_HEADER) {
554 +               if ((ret = dump_update_end_marker())) {
555 +                       printk("dump update end marker error %d\n", ret);
556 +               }
557 +               if ((ret = dump_update_header())) {
558 +                       printk("dump update header error %d\n", ret);
559 +               }
560 +       }
561 +       ret = dump_dev_resume();
562 +
563 +       return ret;
564 +}
565 +
566 +/* Saves all dump data */
567 +int dump_execute_savedump(void)
568 +{
569 +       int ret = 0, err = 0;
570 +
571 +       if ((ret = dump_begin()))  {
572 +               return ret;
573 +       }
574 +
575 +       if (dump_config.level != DUMP_LEVEL_HEADER) { 
576 +               ret = dump_sequencer();
577 +       }
578 +       if ((err = dump_complete())) {
579 +               printk("Dump complete failed. Error %d\n", err);
580 +       }
581 +
582 +       return ret;
583 +}
584 +
585 +/* Does all the real work:  Capture and save state */
586 +int dump_generic_execute(const char *panic_str, const struct pt_regs *regs)
587 +{
588 +       int ret = 0;
589 +
590 +       if ((ret = dump_configure_header(panic_str, regs))) {
591 +               printk("dump config header failed ! error %d\n", ret);
592 +               return ret;     
593 +       }
594 +
595 +       /* tell interested parties that a dump is about to start */
596 +       notifier_call_chain(&dump_notifier_list, DUMP_BEGIN, 
597 +               &dump_config.dump_device);
598 +
599 +       if (dump_config.level != DUMP_LEVEL_NONE)
600 +               ret = dump_execute_savedump();
601 +
602 +       pr_debug("dumped %ld blocks of %d bytes each\n", 
603 +               dump_config.dumper->count, DUMP_BUFFER_SIZE);
604 +       
605 +       /* tell interested parties that a dump has completed */
606 +       notifier_call_chain(&dump_notifier_list, DUMP_END, 
607 +               &dump_config.dump_device);
608 +
609 +       return ret;
610 +}
611 --- linux-2.5.69/drivers/dump/dump_filters.c.lkcdbase   Mon Jun  2 17:29:49 2003
612 +++ linux-2.5.69/drivers/dump/dump_filters.c    Mon Feb  3 05:06:28 2003
613 @@ -0,0 +1,143 @@
614 +/*
615 + * Default filters to select data to dump for various passes.
616 + *
617 + * Started: Oct 2002 -  Suparna Bhattacharya <suparna@in.ibm.com>
618 + *     Split and rewrote default dump selection logic to generic dump 
619 + *     method interfaces 
620 + * Derived from a portion of dump_base.c created by 
621 + *     Matt Robinson <yakker@sourceforge.net>)
622 + *
623 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
624 + * Copyright (C) 2001 - 2002 Matt D. Robinson.  All rights reserved.
625 + * Copyright (C) 2002 International Business Machines Corp. 
626 + *
627 + * Used during single-stage dumping and during stage 1 of the 2-stage scheme
628 + * (Stage 2 of the 2-stage scheme uses the fully transparent filters
629 + * i.e. passthru filters in dump_overlay.c)
630 + *
631 + * Future: Custom selective dump may involve a different set of filters.
632 + *
633 + * This code is released under version 2 of the GNU GPL.
634 + */
635 +
636 +#include <linux/kernel.h>
637 +#include <linux/bootmem.h>
638 +#include <linux/mm.h>
639 +#include <linux/slab.h>
640 +#include <linux/dump.h>
641 +#include "dump_methods.h"
642 +
643 +
644 +/* Copied from mm/bootmem.c - FIXME */
645 +/* return the number of _pages_ that will be allocated for the boot bitmap */
646 +unsigned long dump_calc_bootmap_pages (void)
647 +{
648 +       unsigned long mapsize;
649 +       unsigned long pages = num_physpages;
650 +
651 +       mapsize = (pages+7)/8;
652 +       mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
653 +       mapsize >>= PAGE_SHIFT;
654 +
655 +       return mapsize;
656 +}
657 +
658 +
659 +#define DUMP_PFN_SAFETY_MARGIN 1024  /* 4 MB */
660 +/* temporary */
661 +extern unsigned long min_low_pfn;
662 +
663 +
664 +int dump_low_page(struct page *p)
665 +{
666 +       return page_to_pfn(p) < min_low_pfn + dump_calc_bootmap_pages() 
667 +                       + 1 + DUMP_PFN_SAFETY_MARGIN;
668 +}
669 +
670 +static inline int kernel_page(struct page *p)
671 +{
672 +       /* FIXME: Need to exclude hugetlb pages. Clue: reserved but inuse */
673 +       return PageReserved(p) || (!PageLRU(p) && PageInuse(p));
674 +}
675 +
676 +static inline int user_page(struct page *p)
677 +{
678 +       return PageInuse(p) && (!PageReserved(p) && PageLRU(p));
679 +}
680 +
681 +static inline int unreferenced_page(struct page *p)
682 +{
683 +       return !PageInuse(p) && !PageReserved(p);
684 +}
685 +
686 +
687 +/* loc marks the beginning of a range of pages */
688 +int dump_filter_kernpages(int pass, unsigned long loc, unsigned long sz)
689 +{
690 +       struct page *page = (struct page *)loc;
691 +       /* if any of the pages is a kernel page, select this set */     
692 +       while (sz) {
693 +               if (dump_low_page(page) || kernel_page(page))
694 +                       return 1;
695 +               sz -= PAGE_SIZE;
696 +               page++;
697 +       }       
698 +       return 0;
699 +}
700 +
701 +
702 +/* loc marks the beginning of a range of pages */
703 +int dump_filter_userpages(int pass, unsigned long loc, unsigned long sz)
704 +{
705 +       struct page *page = (struct page *)loc;
706 +       int ret = 0;
707 +       /* select if the set has any user page, and no kernel pages  */ 
708 +       while (sz) {
709 +               if (user_page(page) && !dump_low_page(page)) {
710 +                       ret = 1;
711 +               } else if (kernel_page(page) || dump_low_page(page)) {
712 +                       return 0;
713 +               }
714 +               page++;
715 +               sz -= PAGE_SIZE;
716 +       }       
717 +       return ret;
718 +}
719 +
720 +
721 +
722 +/* loc marks the beginning of a range of pages */
723 +int dump_filter_unusedpages(int pass, unsigned long loc, unsigned long sz)
724 +{
725 +       struct page *page = (struct page *)loc;
726 +
727 +       /* select if the set does not have any used pages  */   
728 +       while (sz) {
729 +               if (!unreferenced_page(page) || dump_low_page(page)) {
730 +                       return 0;
731 +               }
732 +               page++;
733 +               sz -= PAGE_SIZE;
734 +       }       
735 +       return 1;
736 +}
737 +
738 +/* dummy: last (non-existent) pass */
739 +int dump_filter_none(int pass, unsigned long loc, unsigned long sz)
740 +{
741 +       return 0;
742 +}
743 +
744 +/* TBD: resolve level bitmask ? */
745 +struct dump_data_filter dump_filter_table[] = {
746 +       { .name = "kern", .selector = dump_filter_kernpages, 
747 +               .level_mask = DUMP_MASK_KERN},
748 +       { .name = "user", .selector = dump_filter_userpages, 
749 +               .level_mask = DUMP_MASK_USED},
750 +       { .name = "unused", .selector = dump_filter_unusedpages, 
751 +               .level_mask = DUMP_MASK_UNUSED},
752 +       { .name = "none", .selector = dump_filter_none, 
753 +               .level_mask = DUMP_MASK_REST},
754 +       { .name = "", .selector = NULL, .level_mask = 0}
755 +};
756 +
757 --- linux-2.5.69/drivers/dump/dump_fmt.c.lkcdbase       Mon Jun  2 17:29:49 2003
758 +++ linux-2.5.69/drivers/dump/dump_fmt.c        Fri Feb  7 06:47:58 2003
759 @@ -0,0 +1,395 @@
760 +/*
761 + * Implements the routines which handle the format specific
762 + * aspects of dump for the default dump format.
763 + *
764 + * Used in single stage dumping and stage 1 of soft-boot based dumping 
765 + * Saves data in LKCD (lcrash) format 
766 + *
767 + * Previously a part of dump_base.c
768 + *
769 + * Started: Oct 2002 -  Suparna Bhattacharya <suparna@in.ibm.com>
770 + *     Split off and reshuffled LKCD dump format code around generic
771 + *     dump method interfaces.
772 + *
773 + * Derived from original code created by 
774 + *     Matt Robinson <yakker@sourceforge.net>)
775 + *
776 + * Contributions from SGI, IBM, HP, MCL, and others.
777 + *
778 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
779 + * Copyright (C) 2000 - 2002 TurboLinux, Inc.  All rights reserved.
780 + * Copyright (C) 2001 - 2002 Matt D. Robinson.  All rights reserved.
781 + * Copyright (C) 2002 International Business Machines Corp. 
782 + *
783 + * This code is released under version 2 of the GNU GPL.
784 + */
785 +
786 +#include <linux/types.h>
787 +#include <linux/kernel.h>
788 +#include <linux/time.h>
789 +#include <linux/sched.h>
790 +#include <linux/ptrace.h>
791 +#include <linux/utsname.h>
792 +#include <asm/dump.h>
793 +#include <linux/dump.h>
794 +#include "dump_methods.h"
795 +
796 +/*
797 + * SYSTEM DUMP LAYOUT
798 + * 
799 + * System dumps are currently the combination of a dump header and a set
800 + * of data pages which contain the system memory.  The layout of the dump
801 + * (for full dumps) is as follows:
802 + *
803 + *             +-----------------------------+
804 + *             |     generic dump header     |
805 + *             +-----------------------------+
806 + *             |   architecture dump header  |
807 + *             +-----------------------------+
808 + *             |         page header         |
809 + *             +-----------------------------+
810 + *             |          page data          |
811 + *             +-----------------------------+
812 + *             |         page header         |
813 + *             +-----------------------------+
814 + *             |          page data          |
815 + *             +-----------------------------+
816 + *             |              |              |
817 + *             |              |              |
818 + *             |              |              |
819 + *             |              |              |
820 + *             |              V              |
821 + *             +-----------------------------+
822 + *             |        PAGE_END header      |
823 + *             +-----------------------------+
824 + *
825 + * There are two dump headers, the first which is architecture
826 + * independent, and the other which is architecture dependent.  This
827 + * allows different architectures to dump different data structures
828 + * which are specific to their chipset, CPU, etc.
829 + *
830 + * After the dump headers come a succession of dump page headers along
831 + * with dump pages.  The page header contains information about the page
832 + * size, any flags associated with the page (whether it's compressed or
833 + * not), and the address of the page.  After the page header is the page
834 + * data, which is either compressed (or not).  Each page of data is
835 + * dumped in succession, until the final dump header (PAGE_END) is
836 + * placed at the end of the dump, assuming the dump device isn't out
837 + * of space.
838 + *
839 + * This mechanism allows for multiple compression types, different
840 + * types of data structures, different page ordering, etc., etc., etc.
841 + * It's a very straightforward mechanism for dumping system memory.
842 + */
843 +
844 +struct __dump_header dump_header;  /* the primary dump header              */
845 +struct __dump_header_asm dump_header_asm; /* the arch-specific dump header */
846 +
847 +/*
848 + *  Set up common header fields (mainly the arch indep section) 
849 + *  Per-cpu state is handled by lcrash_save_context
850 + *  Returns the size of the header in bytes.
851 + */
852 +static int lcrash_init_dump_header(const char *panic_str)
853 +{
854 +       struct timeval dh_time;
855 +       /* make sure the dump header isn't TOO big */
856 +       if ((sizeof(struct __dump_header) +
857 +               sizeof(struct __dump_header_asm)) > DUMP_BUFFER_SIZE) {
858 +                       printk("lcrash_init_header(): combined "
859 +                               "headers larger than DUMP_BUFFER_SIZE!\n");
860 +                       return -E2BIG;
861 +       }
862 +
863 +       /* initialize the dump headers to zero */
864 +       memset(&dump_header, 0, sizeof(dump_header));
865 +       memset(&dump_header_asm, 0, sizeof(dump_header_asm));
866 +
867 +       /* configure dump header values */
868 +       dump_header.dh_magic_number = DUMP_MAGIC_NUMBER;
869 +       dump_header.dh_version = DUMP_VERSION_NUMBER;
870 +       dump_header.dh_memory_start = PAGE_OFFSET;
871 +       dump_header.dh_memory_end = DUMP_MAGIC_NUMBER;
872 +       dump_header.dh_header_size = sizeof(struct __dump_header);
873 +       dump_header.dh_page_size = PAGE_SIZE;
874 +       dump_header.dh_dump_level = dump_config.level;
875 +       dump_header.dh_current_task = (unsigned long) current;
876 +       dump_header.dh_dump_compress = dump_config.dumper->compress->
877 +               compress_type;
878 +       dump_header.dh_dump_flags = dump_config.flags;
879 +       dump_header.dh_dump_device = dump_config.dumper->dev->device_id; 
880 +
881 +#if DUMP_DEBUG >= 6
882 +       dump_header.dh_num_bytes = 0;
883 +#endif
884 +       dump_header.dh_num_dump_pages = 0;
885 +       do_gettimeofday(&dh_time);
886 +       dump_header.dh_time.tv_sec = dh_time.tv_sec;
887 +       dump_header.dh_time.tv_usec = dh_time.tv_usec;
888 +
889 +       memcpy((void *)&(dump_header.dh_utsname_sysname), 
890 +               (const void *)&(system_utsname.sysname), __NEW_UTS_LEN + 1);
891 +       memcpy((void *)&(dump_header.dh_utsname_nodename), 
892 +               (const void *)&(system_utsname.nodename), __NEW_UTS_LEN + 1);
893 +       memcpy((void *)&(dump_header.dh_utsname_release), 
894 +               (const void *)&(system_utsname.release), __NEW_UTS_LEN + 1);
895 +       memcpy((void *)&(dump_header.dh_utsname_version), 
896 +               (const void *)&(system_utsname.version), __NEW_UTS_LEN + 1);
897 +       memcpy((void *)&(dump_header.dh_utsname_machine), 
898 +               (const void *)&(system_utsname.machine), __NEW_UTS_LEN + 1);
899 +       memcpy((void *)&(dump_header.dh_utsname_domainname), 
900 +               (const void *)&(system_utsname.domainname), __NEW_UTS_LEN + 1);
901 +
902 +       if (panic_str) {
903 +               memcpy((void *)&(dump_header.dh_panic_string),
904 +                       (const void *)panic_str, DUMP_PANIC_LEN);
905 +       }
906 +
907 +        dump_header_asm.dha_magic_number = DUMP_ASM_MAGIC_NUMBER;
908 +        dump_header_asm.dha_version = DUMP_ASM_VERSION_NUMBER;
909 +        dump_header_asm.dha_header_size = sizeof(dump_header_asm);
910 +
911 +       dump_header_asm.dha_smp_num_cpus = num_online_cpus();
912 +       pr_debug("smp_num_cpus in header %d\n", 
913 +               dump_header_asm.dha_smp_num_cpus);
914 +
915 +       dump_header_asm.dha_dumping_cpu = smp_processor_id();
916 +       
917 +       return sizeof(dump_header) + sizeof(dump_header_asm);
918 +}
919 +
920 +
921 +int dump_lcrash_configure_header(const char *panic_str, 
922 +       const struct pt_regs *regs)
923 +{
924 +       int retval = 0;
925 +
926 +       dump_config.dumper->header_len = lcrash_init_dump_header(panic_str);
927 +
928 +       /* capture register states for all processors */
929 +       dump_save_this_cpu(regs);
930 +       __dump_save_other_cpus(); /* side effect:silence cpus */
931 +
932 +       /* configure architecture-specific dump header values */
933 +       if ((retval = __dump_configure_header(regs))) 
934 +               return retval;
935 +
936 +       dump_config.dumper->header_dirty++;
937 +       return 0;
938 +}
939 +
940 +/* save register and task context */
941 +void dump_lcrash_save_context(int cpu, const struct pt_regs *regs, 
942 +       struct task_struct *tsk)
943 +{
944 +       dump_header_asm.dha_smp_current_task[cpu] = (uint32_t) tsk;
945 +
946 +       __dump_save_regs(&dump_header_asm.dha_smp_regs[cpu], regs);
947 +
948 +       /* take a snapshot of the stack */
949 +       /* doing this enables us to tolerate slight drifts on this cpu */
950 +       if (dump_header_asm.dha_stack[cpu]) {
951 +               memcpy((void *)dump_header_asm.dha_stack[cpu],
952 +                               tsk->thread_info, THREAD_SIZE);
953 +       }
954 +       dump_header_asm.dha_stack_ptr[cpu] = (uint32_t)(tsk->thread_info);
955 +}
956 +
957 +/* write out the header */
958 +int dump_write_header(void)
959 +{
960 +       int retval = 0, size;
961 +       void *buf = dump_config.dumper->dump_buf;
962 +
963 +       /* accounts for DUMP_HEADER_OFFSET if applicable */
964 +       if ((retval = dump_dev_seek(0))) {
965 +               printk("Unable to seek to dump header offset: %d\n", 
966 +                       retval);
967 +               return retval;
968 +       }
969 +
970 +       memcpy(buf, (void *)&dump_header, sizeof(dump_header));
971 +       size = sizeof(dump_header);
972 +       memcpy(buf + size, (void *)&dump_header_asm, sizeof(dump_header_asm));
973 +       size += sizeof(dump_header_asm);
974 +       size = PAGE_ALIGN(size);
975 +       retval = dump_ll_write(buf , size);
976 +
977 +       if (retval < size) 
978 +               return (retval >= 0) ? ENOSPC : retval;
979 +
980 +       return 0;
981 +}
982 +
983 +int dump_generic_update_header(void)
984 +{
985 +       int err = 0;
986 +
987 +       if (dump_config.dumper->header_dirty) {
988 +               if ((err = dump_write_header())) {
989 +                       printk("dump write header failed !err %d\n", err);
990 +               } else {
991 +                       dump_config.dumper->header_dirty = 0;
992 +               }
993 +       }
994 +
995 +       return err;
996 +}
997 +
998 +static inline int is_curr_stack_page(struct page *page, unsigned long size)
999 +{
1000 +       unsigned long thread_addr = (unsigned long)current_thread_info();
1001 +       unsigned long addr = (unsigned long)page_address(page);
1002 +
1003 +       return !PageHighMem(page) && (addr < thread_addr + THREAD_SIZE)
1004 +               && (addr + size > thread_addr);
1005 +}
1006 +
1007 +static inline int is_dump_page(struct page *page, unsigned long size)
1008 +{
1009 +       unsigned long addr = (unsigned long)page_address(page);
1010 +       unsigned long dump_buf = (unsigned long)dump_config.dumper->dump_buf;
1011 +
1012 +       return !PageHighMem(page) && (addr < dump_buf + DUMP_BUFFER_SIZE)
1013 +               && (addr + size > dump_buf);
1014 +}
1015 +
1016 +int dump_allow_compress(struct page *page, unsigned long size)
1017 +{
1018 +       /*
1019 +        * Don't compress the page if any part of it overlaps
1020 +        * with the current stack or dump buffer (since the contents
1021 +        * in these could be changing while compression is going on)
1022 +        */
1023 +       return !is_curr_stack_page(page, size) && !is_dump_page(page, size);
1024 +}
1025 +
1026 +void lcrash_init_pageheader(struct __dump_page *dp, struct page *page, 
1027 +       unsigned long sz)
1028 +{
1029 +       memset(dp, sizeof(struct __dump_page), 0);
1030 +       dp->dp_flags = 0; 
1031 +       dp->dp_size = 0;
1032 +       if (sz > 0)
1033 +               dp->dp_address = page_to_pfn(page) << PAGE_SHIFT;
1034 +
1035 +#if DUMP_DEBUG > 6
1036 +       dp->dp_page_index = dump_header.dh_num_dump_pages;
1037 +       dp->dp_byte_offset = dump_header.dh_num_bytes + DUMP_BUFFER_SIZE
1038 +               + DUMP_HEADER_OFFSET; /* ?? */
1039 +#endif /* DUMP_DEBUG */
1040 +}
1041 +
1042 +int dump_lcrash_add_data(unsigned long loc, unsigned long len)
1043 +{
1044 +       struct page *page = (struct page *)loc;
1045 +       void *addr, *buf = dump_config.dumper->curr_buf;
1046 +       struct __dump_page *dp = (struct __dump_page *)buf; 
1047 +       int bytes, size;
1048 +
1049 +       if (buf > dump_config.dumper->dump_buf + DUMP_BUFFER_SIZE)
1050 +               return -ENOMEM;
1051 +
1052 +       lcrash_init_pageheader(dp, page, len);
1053 +       buf += sizeof(struct __dump_page);
1054 +
1055 +       while (len) {
1056 +               addr = kmap_atomic(page, KM_DUMP);
1057 +               size = bytes = (len > PAGE_SIZE) ? PAGE_SIZE : len;     
1058 +               /* check for compression */
1059 +               if (dump_allow_compress(page, bytes)) {
1060 +                       size = dump_compress_data((char *)addr, bytes, (char *)buf);
1061 +               }
1062 +               /* set the compressed flag if the page did compress */
1063 +               if (size && (size < bytes)) {
1064 +                       dp->dp_flags |= DUMP_DH_COMPRESSED;
1065 +               } else {
1066 +                       /* compression failed -- default to raw mode */
1067 +                       dp->dp_flags |= DUMP_DH_RAW;
1068 +                       memcpy(buf, addr, bytes);
1069 +                       size = bytes;
1070 +               }
1071 +               /* memset(buf, 'A', size); temporary: testing only !! */
1072 +               kunmap_atomic(addr, KM_DUMP);
1073 +               dp->dp_size += size;
1074 +               buf += size;
1075 +               len -= bytes;
1076 +               page++;
1077 +       }
1078 +
1079 +       /* now update the header */
1080 +#if DUMP_DEBUG > 6
1081 +       dump_header.dh_num_bytes += dp->dp_size + sizeof(*dp);
1082 +#endif
1083 +       dump_header.dh_num_dump_pages++;
1084 +       dump_config.dumper->header_dirty++;
1085 +
1086 +       dump_config.dumper->curr_buf = buf;     
1087 +
1088 +       return len;
1089 +}
1090 +
1091 +int dump_lcrash_update_end_marker(void)
1092 +{
1093 +       struct __dump_page *dp = 
1094 +               (struct __dump_page *)dump_config.dumper->curr_buf;
1095 +       unsigned long left;
1096 +       int ret = 0;
1097 +               
1098 +       lcrash_init_pageheader(dp, NULL, 0);
1099 +       dp->dp_flags |= DUMP_DH_END; /* tbd: truncation test ? */
1100 +       
1101 +       /* now update the header */
1102 +#if DUMP_DEBUG > 6
1103 +       dump_header.dh_num_bytes += sizeof(*dp);
1104 +#endif
1105 +       dump_config.dumper->curr_buf += sizeof(*dp);
1106 +       left = dump_config.dumper->curr_buf - dump_config.dumper->dump_buf;
1107 +
1108 +       printk("\n");
1109 +
1110 +       while (left) {
1111 +               if ((ret = dump_dev_seek(dump_config.dumper->curr_offset))) {
1112 +                       printk("Seek failed at offset 0x%llx\n", 
1113 +                       dump_config.dumper->curr_offset);
1114 +                       return ret;
1115 +               }
1116 +
1117 +               if (DUMP_BUFFER_SIZE > left) 
1118 +                       memset(dump_config.dumper->curr_buf, 'm', 
1119 +                               DUMP_BUFFER_SIZE - left);
1120 +
1121 +               if ((ret = dump_ll_write(dump_config.dumper->dump_buf, 
1122 +                       DUMP_BUFFER_SIZE)) < DUMP_BUFFER_SIZE) {
1123 +                       return (ret < 0) ? ret : -ENOSPC;
1124 +               }
1125 +
1126 +               dump_config.dumper->curr_offset += DUMP_BUFFER_SIZE;
1127 +       
1128 +               if (left > DUMP_BUFFER_SIZE) {
1129 +                       left -= DUMP_BUFFER_SIZE;
1130 +                       memcpy(dump_config.dumper->dump_buf, 
1131 +                       dump_config.dumper->dump_buf + DUMP_BUFFER_SIZE, left);
1132 +                       dump_config.dumper->curr_buf -= DUMP_BUFFER_SIZE;
1133 +               } else {
1134 +                       left = 0;
1135 +               }
1136 +       }
1137 +       return 0;
1138 +}
1139 +
1140 +
1141 +/* Default Formatter (lcrash) */
1142 +struct dump_fmt_ops dump_fmt_lcrash_ops = {
1143 +       .configure_header       = dump_lcrash_configure_header,
1144 +       .update_header          = dump_generic_update_header,
1145 +       .save_context           = dump_lcrash_save_context,
1146 +       .add_data               = dump_lcrash_add_data,
1147 +       .update_end_marker      = dump_lcrash_update_end_marker
1148 +};
1149 +
1150 +struct dump_fmt dump_fmt_lcrash = {
1151 +       .name   = "lcrash",
1152 +       .ops    = &dump_fmt_lcrash_ops
1153 +};
1154 +
1155 --- linux-2.5.69/drivers/dump/dump_gzip.c.lkcdbase      Mon Jun  2 17:29:49 2003
1156 +++ linux-2.5.69/drivers/dump/dump_gzip.c       Fri Dec 13 00:51:31 2002
1157 @@ -0,0 +1,118 @@
1158 +/*
1159 + * GZIP Compression functions for kernel crash dumps.
1160 + *
1161 + * Created by: Matt Robinson (yakker@sourceforge.net)
1162 + * Copyright 2001 Matt D. Robinson.  All rights reserved.
1163 + *
1164 + * This code is released under version 2 of the GNU GPL.
1165 + */
1166 +
1167 +/* header files */
1168 +#include <linux/config.h>
1169 +#include <linux/module.h>
1170 +#include <linux/sched.h>
1171 +#include <linux/fs.h>
1172 +#include <linux/file.h>
1173 +#include <linux/init.h>
1174 +#include <linux/slab.h>
1175 +#include <linux/dump.h>
1176 +#include <linux/zlib.h>
1177 +#include <linux/vmalloc.h>
1178 +
1179 +static void *deflate_workspace;
1180 +
1181 +/*
1182 + * Name: dump_compress_gzip()
1183 + * Func: Compress a DUMP_PAGE_SIZE page using gzip-style algorithms (the.
1184 + *       deflate functions similar to what's used in PPP).
1185 + */
1186 +static u16
1187 +dump_compress_gzip(const u8 *old, u16 oldsize, u8 *new, u16 newsize)
1188 +{
1189 +       /* error code and dump stream */
1190 +       int err;
1191 +       z_stream dump_stream;
1192 +       
1193 +       dump_stream.workspace = deflate_workspace;
1194 +       
1195 +       if ((err = zlib_deflateInit(&dump_stream, Z_BEST_COMPRESSION)) != Z_OK) {
1196 +               /* fall back to RLE compression */
1197 +               printk("dump_compress_gzip(): zlib_deflateInit() "
1198 +                       "failed (%d)!\n", err);
1199 +               return 0;
1200 +       }
1201 +
1202 +       /* use old (page of memory) and size (DUMP_PAGE_SIZE) as in-streams */
1203 +       dump_stream.next_in = (u8 *) old;
1204 +       dump_stream.avail_in = oldsize;
1205 +
1206 +       /* out streams are new (dpcpage) and new size (DUMP_DPC_PAGE_SIZE) */
1207 +       dump_stream.next_out = new;
1208 +       dump_stream.avail_out = newsize;
1209 +
1210 +       /* deflate the page -- check for error */
1211 +       err = zlib_deflate(&dump_stream, Z_FINISH);
1212 +       if (err != Z_STREAM_END) {
1213 +               /* zero is return code here */
1214 +               (void)zlib_deflateEnd(&dump_stream);
1215 +               printk("dump_compress_gzip(): zlib_deflate() failed (%d)!\n",
1216 +                       err);
1217 +               return 0;
1218 +       }
1219 +
1220 +       /* let's end the deflated compression stream */
1221 +       if ((err = zlib_deflateEnd(&dump_stream)) != Z_OK) {
1222 +               printk("dump_compress_gzip(): zlib_deflateEnd() "
1223 +                       "failed (%d)!\n", err);
1224 +       }
1225 +
1226 +       /* return the compressed byte total (if it's smaller) */
1227 +       if (dump_stream.total_out >= oldsize) {
1228 +               return oldsize;
1229 +       }
1230 +       return dump_stream.total_out;
1231 +}
1232 +
1233 +/* setup the gzip compression functionality */
1234 +static struct __dump_compress dump_gzip_compression = {
1235 +       .compress_type = DUMP_COMPRESS_GZIP,
1236 +       .compress_func = dump_compress_gzip,
1237 +       .compress_name = "GZIP",
1238 +};
1239 +
1240 +/*
1241 + * Name: dump_compress_gzip_init()
1242 + * Func: Initialize gzip as a compression mechanism.
1243 + */
1244 +static int __init
1245 +dump_compress_gzip_init(void)
1246 +{
1247 +       deflate_workspace = vmalloc(zlib_deflate_workspacesize());
1248 +       if (!deflate_workspace) {
1249 +               printk("dump_compress_gzip_init(): Failed to "
1250 +                       "alloc %d bytes for deflate workspace\n",
1251 +                       zlib_deflate_workspacesize());
1252 +               return -ENOMEM;
1253 +       }
1254 +       dump_register_compression(&dump_gzip_compression);
1255 +       return 0;
1256 +}
1257 +
1258 +/*
1259 + * Name: dump_compress_gzip_cleanup()
1260 + * Func: Remove gzip as a compression mechanism.
1261 + */
1262 +static void __exit
1263 +dump_compress_gzip_cleanup(void)
1264 +{
1265 +       vfree(deflate_workspace);
1266 +       dump_unregister_compression(DUMP_COMPRESS_GZIP);
1267 +}
1268 +
1269 +/* module initialization */
1270 +module_init(dump_compress_gzip_init);
1271 +module_exit(dump_compress_gzip_cleanup);
1272 +
1273 +MODULE_LICENSE("GPL");
1274 +MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
1275 +MODULE_DESCRIPTION("Gzip compression module for crash dump driver");
1276 --- linux-2.5.69/drivers/dump/dump_i386.c.lkcdbase      Mon Jun  2 17:29:49 2003
1277 +++ linux-2.5.69/drivers/dump/dump_i386.c       Wed Mar  5 02:49:22 2003
1278 @@ -0,0 +1,329 @@
1279 +/*
1280 + * Architecture specific (i386) functions for Linux crash dumps.
1281 + *
1282 + * Created by: Matt Robinson (yakker@sgi.com)
1283 + *
1284 + * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
1285 + *
1286 + * 2.3 kernel modifications by: Matt D. Robinson (yakker@turbolinux.com)
1287 + * Copyright 2000 TurboLinux, Inc.  All rights reserved.
1288 + * 
1289 + * This code is released under version 2 of the GNU GPL.
1290 + */
1291 +
1292 +/*
1293 + * The hooks for dumping the kernel virtual memory to disk are in this
1294 + * file.  Any time a modification is made to the virtual memory mechanism,
1295 + * these routines must be changed to use the new mechanisms.
1296 + */
1297 +#include <linux/init.h>
1298 +#include <linux/types.h>
1299 +#include <linux/kernel.h>
1300 +#include <linux/smp.h>
1301 +#include <linux/fs.h>
1302 +#include <linux/vmalloc.h>
1303 +#include <linux/mm.h>
1304 +#include <linux/dump.h>
1305 +#include "dump_methods.h"
1306 +#include <linux/irq.h>
1307 +
1308 +#include <asm/processor.h>
1309 +#include <asm/e820.h>
1310 +#include <asm/hardirq.h>
1311 +#include <asm/nmi.h>
1312 +
1313 +static __s32        saved_irq_count;   /* saved preempt_count() flags */
1314 +
1315 +static int
1316 +alloc_dha_stack(void)
1317 +{
1318 +       int i;
1319 +       void *ptr;
1320 +       
1321 +       if (dump_header_asm.dha_stack[0])
1322 +               return 0;
1323 +
1324 +       ptr = vmalloc(THREAD_SIZE * num_online_cpus());
1325 +       if (!ptr) {
1326 +               printk("vmalloc for dha_stacks failed\n");
1327 +               return -ENOMEM;
1328 +       }
1329 +
1330 +       for (i = 0; i < num_online_cpus(); i++) {
1331 +               dump_header_asm.dha_stack[i] = (u32)((unsigned long)ptr +
1332 +                               (i * THREAD_SIZE));
1333 +       }
1334 +       return 0;
1335 +}
1336 +
1337 +static int
1338 +free_dha_stack(void) 
1339 +{
1340 +       if (dump_header_asm.dha_stack[0]) {
1341 +               vfree((void *)dump_header_asm.dha_stack[0]);    
1342 +               dump_header_asm.dha_stack[0] = 0;
1343 +       }
1344 +       return 0;
1345 +}
1346 +
1347 +
1348 +void 
1349 +__dump_save_regs(struct pt_regs *dest_regs, const struct pt_regs *regs)
1350 +{
1351 +       *dest_regs = *regs;
1352 +
1353 +       /* In case of panic dumps, we collects regs on entry to panic.
1354 +        * so, we shouldn't 'fix' ssesp here again. But it is hard to
1355 +        * tell just looking at regs whether ssesp need fixing. We make
1356 +        * this decision by looking at xss in regs. If we have better
1357 +        * means to determine that ssesp are valid (by some flag which
1358 +        * tells that we are here due to panic dump), then we can use
1359 +        * that instead of this kludge.
1360 +        */
1361 +       if (!user_mode(regs)) {
1362 +               if ((0xffff & regs->xss) == __KERNEL_DS) 
1363 +                       /* already fixed up */
1364 +                       return;
1365 +               dest_regs->esp = (unsigned long)&(regs->esp);
1366 +               __asm__ __volatile__ ("movw %%ss, %%ax;"
1367 +                       :"=a"(dest_regs->xss));
1368 +       }
1369 +}
1370 +
1371 +
1372 +#ifdef CONFIG_SMP
1373 +extern unsigned long irq_affinity[];
1374 +extern irq_desc_t irq_desc[];
1375 +extern void dump_send_ipi(void);
1376 +
1377 +static int dump_expect_ipi[NR_CPUS];
1378 +static atomic_t waiting_for_dump_ipi;
1379 +static unsigned long saved_affinity[NR_IRQS];
1380 +
1381 +extern void stop_this_cpu(void *); /* exported by i386 kernel */
1382 +
1383 +static int
1384 +dump_nmi_callback(struct pt_regs *regs, int cpu) 
1385 +{
1386 +       if (!dump_expect_ipi[cpu])
1387 +               return 0;
1388 +
1389 +       dump_expect_ipi[cpu] = 0;
1390 +       
1391 +       dump_save_this_cpu(regs);
1392 +       atomic_dec(&waiting_for_dump_ipi);
1393 +
1394 + level_changed:
1395 +       switch (dump_silence_level) {
1396 +       case DUMP_HARD_SPIN_CPUS:       /* Spin until dump is complete */
1397 +               while (dump_oncpu) {
1398 +                       barrier();      /* paranoia */
1399 +                       if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
1400 +                               goto level_changed;
1401 +
1402 +                       cpu_relax();    /* kill time nicely */
1403 +               }
1404 +               break;
1405 +
1406 +       case DUMP_HALT_CPUS:            /* Execute halt */
1407 +               stop_this_cpu(NULL);
1408 +               break;
1409 +               
1410 +       case DUMP_SOFT_SPIN_CPUS:
1411 +               /* Mark the task so it spins in schedule */
1412 +               set_tsk_thread_flag(current, TIF_NEED_RESCHED);
1413 +               break;
1414 +       }
1415 +
1416 +       return 1;
1417 +}
1418 +
1419 +/* save registers on other processors */
1420 +void 
1421 +__dump_save_other_cpus(void)
1422 +{
1423 +       int i, cpu = smp_processor_id();
1424 +       int other_cpus = num_online_cpus()-1;
1425 +       
1426 +       if (other_cpus > 0) {
1427 +               atomic_set(&waiting_for_dump_ipi, other_cpus);
1428 +
1429 +               for (i = 0; i < NR_CPUS; i++) {
1430 +                       dump_expect_ipi[i] = (i != cpu && cpu_online(i));
1431 +               }
1432 +
1433 +               /* short circuit normal NMI handling temporarily */
1434 +               set_nmi_callback(dump_nmi_callback);
1435 +               wmb();
1436 +
1437 +               dump_send_ipi();
1438 +               /* may be we dont need to wait for NMI to be processed. 
1439 +                  just write out the header at the end of dumping, if
1440 +                  this IPI is not processed until then, there probably
1441 +                  is a problem and we just fail to capture state of 
1442 +                  other cpus. */
1443 +               while(atomic_read(&waiting_for_dump_ipi) > 0) {
1444 +                       cpu_relax();
1445 +               }
1446 +
1447 +               unset_nmi_callback();
1448 +       }
1449 +}
1450 +
1451 +/*
1452 + * Routine to save the old irq affinities and change affinities of all irqs to
1453 + * the dumping cpu.
1454 + */
1455 +static void 
1456 +set_irq_affinity(void)
1457 +{
1458 +       int i;
1459 +       int cpu = smp_processor_id();
1460 +
1461 +       memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
1462 +       for (i = 0; i < NR_IRQS; i++) {
1463 +               if (irq_desc[i].handler == NULL)
1464 +                       continue;
1465 +               irq_affinity[i] = 1UL << cpu;
1466 +               if (irq_desc[i].handler->set_affinity != NULL)
1467 +                       irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
1468 +       }
1469 +}
1470 +
1471 +/*
1472 + * Restore old irq affinities.
1473 + */
1474 +static void 
1475 +reset_irq_affinity(void)
1476 +{
1477 +       int i;
1478 +
1479 +       memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
1480 +       for (i = 0; i < NR_IRQS; i++) {
1481 +               if (irq_desc[i].handler == NULL)
1482 +                       continue;
1483 +               if (irq_desc[i].handler->set_affinity != NULL)
1484 +                       irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
1485 +       }
1486 +}
1487 +
1488 +#else /* !CONFIG_SMP */
1489 +#define set_irq_affinity()     do { } while (0)
1490 +#define reset_irq_affinity()   do { } while (0)
1491 +#define save_other_cpu_states() do { } while (0)
1492 +#endif /* !CONFIG_SMP */
1493 +
1494 +/* 
1495 + * Kludge - dump from interrupt context is unreliable (Fixme)
1496 + *
1497 + * We do this so that softirqs initiated for dump i/o 
1498 + * get processed and we don't hang while waiting for i/o
1499 + * to complete or in any irq synchronization attempt.
1500 + *
1501 + * This is not quite legal of course, as it has the side 
1502 + * effect of making all interrupts & softirqs triggered 
1503 + * while dump is in progress complete before currently 
1504 + * pending softirqs and the currently executing interrupt 
1505 + * code. 
1506 + */
1507 +static inline void
1508 +irq_bh_save(void)
1509 +{
1510 +       saved_irq_count = irq_count();
1511 +       preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
1512 +}
1513 +
1514 +static inline void
1515 +irq_bh_restore(void)
1516 +{
1517 +       preempt_count() |= saved_irq_count;
1518 +}
1519 +
1520 +/*
1521 + * Name: __dump_irq_enable
1522 + * Func: Reset system so interrupts are enabled.
1523 + *      This is used for dump methods that require interrupts
1524 + *      Eventually, all methods will have interrupts disabled
1525 + *      and this code can be removed.
1526 + *
1527 + *     Change irq affinities
1528 + *     Re-enable interrupts
1529 + */
1530 +void 
1531 +__dump_irq_enable(void)
1532 +{
1533 +       set_irq_affinity();
1534 +       irq_bh_save();
1535 +       local_irq_enable();
1536 +}
1537 +
1538 +/*
1539 + * Name: __dump_irq_restore
1540 + * Func: Resume the system state in an architecture-specific way.
1541 +
1542 + */
1543 +void 
1544 +__dump_irq_restore(void)
1545 +{
1546 +       local_irq_disable();
1547 +       reset_irq_affinity();
1548 +       irq_bh_restore();
1549 +}
1550 +
1551 +/*
1552 + * Name: __dump_configure_header()
1553 + * Func: Meant to fill in arch specific header fields except per-cpu state
1554 + * already captured via __dump_save_context for all CPUs.
1555 + */
1556 +int
1557 +__dump_configure_header(const struct pt_regs *regs)
1558 +{
1559 +       return (0);
1560 +}
1561 +
1562 +/*
1563 + * Name: __dump_init()
1564 + * Func: Initialize the dumping routine process.
1565 + */
1566 +void
1567 +__dump_init(uint64_t local_memory_start)
1568 +{
1569 +       return;
1570 +}
1571 +
1572 +/*
1573 + * Name: __dump_open()
1574 + * Func: Open the dump device (architecture specific).
1575 + */
1576 +void
1577 +__dump_open(void)
1578 +{
1579 +       alloc_dha_stack();
1580 +}
1581 +
1582 +/*
1583 + * Name: __dump_cleanup()
1584 + * Func: Free any architecture specific data structures. This is called
1585 + *       when the dump module is being removed.
1586 + */
1587 +void
1588 +__dump_cleanup(void)
1589 +{
1590 +       free_dha_stack();
1591 +}
1592 +
1593 +extern int pfn_is_ram(unsigned long);
1594 +
1595 +/*
1596 + * Name: __dump_page_valid()
1597 + * Func: Check if page is valid to dump.
1598 + */ 
1599 +int 
1600 +__dump_page_valid(unsigned long index)
1601 +{
1602 +       if (!pfn_valid(index))
1603 +               return 0;
1604 +
1605 +       return pfn_is_ram(index);
1606 +}
1607 +
1608 --- linux-2.5.69/drivers/dump/dump_memdev.c.lkcdbase    Mon Jun  2 17:29:49 2003
1609 +++ linux-2.5.69/drivers/dump/dump_memdev.c     Tue Mar 25 21:34:35 2003
1610 @@ -0,0 +1,640 @@
1611 +/*
1612 + * Implements the dump driver interface for saving a dump in available
1613 + * memory areas. The saved pages may be written out to persistent storage  
1614 + * after a soft reboot.
1615 + *
1616 + * Started: Oct 2002 -  Suparna Bhattacharya <suparna@in.ibm.com>
1617 + *
1618 + * Copyright (C) 2002 International Business Machines Corp. 
1619 + *
1620 + * This code is released under version 2 of the GNU GPL.
1621 + *
1622 + * The approach of tracking pages containing saved dump using map pages 
1623 + * allocated as needed has been derived from the Mission Critical Linux 
1624 + * mcore dump implementation. 
1625 + *
1626 + * Credits and a big thanks for letting the lkcd project make use of 
1627 + * the excellent piece of work and also helping with clarifications 
1628 + * and tips along the way are due to:
1629 + *     Dave Winchell <winchell@mclx.com> (primary author of mcore)
1630 + *     Jeff Moyer <moyer@mclx.com>
1631 + *     Josh Huber <huber@mclx.com>
1632 + *
1633 + * For those familiar with the mcore code, the main differences worth
1634 + * noting here (besides the dump device abstraction) result from enabling 
1635 + * "high" memory pages (pages not permanently mapped in the kernel 
1636 + * address space) to be used for saving dump data (because of which a 
1637 + * simple virtual address based linked list cannot be used anymore for 
1638 + * managing free pages), an added level of indirection for faster 
1639 + * lookups during the post-boot stage, and the idea of pages being 
1640 + * made available as they get freed up while dump to memory progresses 
1641 + * rather than one time before starting the dump. The last point enables 
1642 + * a full memory snapshot to be saved starting with an initial set of 
1643 + * bootstrap pages given a good compression ratio. (See dump_overlay.c)
1644 + *
1645 + */
1646 +
1647 +/*
1648 + * -----------------MEMORY LAYOUT ------------------
1649 + * The memory space consists of a set of discontiguous pages, and
1650 + * discontiguous map pages as well, rooted in a chain of indirect
1651 + * map pages (also discontiguous). Except for the indirect maps 
1652 + * (which must be preallocated in advance), the rest of the pages 
1653 + * could be in high memory.
1654 + *
1655 + * root
1656 + *  |    ---------    --------        --------
1657 + *  -->  | .  . +|--->|  .  +|------->| . .  |       indirect 
1658 + *       --|--|---    ---|----        --|-|---      maps
1659 + *         |  |          |                     | |     
1660 + *    ------  ------   -------     ------ -------
1661 + *    | .  |  | .  |   | .  . |    | .  | |  . . |   maps 
1662 + *    --|---  --|---   --|--|--    --|--- ---|-|--
1663 + *     page    page    page page   page   page page  data
1664 + *                                                   pages
1665 + *
1666 + * Writes to the dump device happen sequentially in append mode.
1667 + * The main reason for the existence of the indirect map is
1668 + * to enable a quick way to lookup a specific logical offset in
1669 + * the saved data post-soft-boot, e.g. to writeout pages
1670 + * with more critical data first, even though such pages
1671 + * would have been compressed and copied last, being the lowest
1672 + * ranked candidates for reuse due to their criticality.
1673 + * (See dump_overlay.c)
1674 + */
1675 +#include <linux/mm.h>
1676 +#include <linux/highmem.h>
1677 +#include <linux/bootmem.h>
1678 +#include <linux/dump.h>
1679 +#include "dump_methods.h"
1680 +
1681 +#define DUMP_MAP_SZ (PAGE_SIZE / sizeof(unsigned long)) /* direct map size */
1682 +#define DUMP_IND_MAP_SZ        DUMP_MAP_SZ - 1  /* indirect map size */
1683 +#define DUMP_NR_BOOTSTRAP      64  /* no of bootstrap pages */
1684 +
1685 +extern int dump_low_page(struct page *);
1686 +
1687 +/* check if the next entry crosses a page boundary */
1688 +static inline int is_last_map_entry(unsigned long *map)
1689 +{
1690 +       unsigned long addr = (unsigned long)(map + 1);
1691 +
1692 +       return (!(addr & (PAGE_SIZE - 1)));
1693 +}
1694 +
1695 +/* Todo: should have some validation checks */
1696 +/* The last entry in the indirect map points to the next indirect map */
1697 +/* Indirect maps are referred to directly by virtual address */
1698 +static inline unsigned long *next_indirect_map(unsigned long *map)
1699 +{
1700 +       return (unsigned long *)map[DUMP_IND_MAP_SZ];
1701 +}
1702 +
1703 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
1704 +/* Called during early bootup - fixme: make this __init */
1705 +void dump_early_reserve_map(struct dump_memdev *dev)
1706 +{
1707 +       unsigned long *map1, *map2;
1708 +       loff_t off = 0, last = dev->last_used_offset >> PAGE_SHIFT;
1709 +       int i, j;
1710 +       
1711 +       printk("Reserve bootmap space holding previous dump of %lld pages\n",
1712 +                       last);
1713 +       map1= (unsigned long *)dev->indirect_map_root;
1714 +
1715 +       while (map1 && (off < last)) {
1716 +               reserve_bootmem(virt_to_phys((void *)map1), PAGE_SIZE);
1717 +               for (i=0;  (i < DUMP_MAP_SZ - 1) && map1[i] && (off < last); 
1718 +                       i++, off += DUMP_MAP_SZ) {
1719 +                       pr_debug("indirect map[%d] = 0x%lx\n", i, map1[i]);
1720 +                       if (map1[i] >= max_low_pfn)
1721 +                               continue;
1722 +                       reserve_bootmem(map1[i] << PAGE_SHIFT, PAGE_SIZE);
1723 +                       map2 = pfn_to_kaddr(map1[i]);
1724 +                       for (j = 0 ; (j < DUMP_MAP_SZ) && map2[j] && 
1725 +                               (off + j < last); j++) {
1726 +                               pr_debug("\t map[%d][%d] = 0x%lx\n", i, j, 
1727 +                                       map2[j]);
1728 +                               if (map2[j] < max_low_pfn) {
1729 +                                       reserve_bootmem(map2[j] << PAGE_SHIFT,
1730 +                                               PAGE_SIZE);
1731 +                               }
1732 +                       }
1733 +               }
1734 +               map1 = next_indirect_map(map1);
1735 +       }
1736 +       dev->nr_free = 0; /* these pages don't belong to this boot */
1737 +}
1738 +#endif
1739 +
1740 +/* mark dump pages so that they aren't used by this kernel */
1741 +void dump_mark_map(struct dump_memdev *dev)
1742 +{
1743 +       unsigned long *map1, *map2;
1744 +       loff_t off = 0, last = dev->last_used_offset >> PAGE_SHIFT;
1745 +       struct page *page;
1746 +       int i, j;
1747 +       
1748 +       printk("Dump: marking pages in use by previous dump\n");
1749 +       map1= (unsigned long *)dev->indirect_map_root;
1750 +
1751 +       while (map1 && (off < last)) {
1752 +               page = virt_to_page(map1);      
1753 +               set_page_count(page, 1);
1754 +               for (i=0;  (i < DUMP_MAP_SZ - 1) && map1[i] && (off < last); 
1755 +                       i++, off += DUMP_MAP_SZ) {
1756 +                       pr_debug("indirect map[%d] = 0x%lx\n", i, map1[i]);
1757 +                       page = pfn_to_page(map1[i]);
1758 +                       set_page_count(page, 1);
1759 +                       map2 = kmap_atomic(page, KM_DUMP);
1760 +                       for (j = 0 ; (j < DUMP_MAP_SZ) && map2[j] && 
1761 +                               (off + j < last); j++) {
1762 +                               pr_debug("\t map[%d][%d] = 0x%lx\n", i, j, 
1763 +                                       map2[j]);
1764 +                               page = pfn_to_page(map2[j]);
1765 +                               set_page_count(page, 1);
1766 +                       }
1767 +               }
1768 +               map1 = next_indirect_map(map1);
1769 +       }
1770 +}
1771 +       
1772 +
1773 +/* 
1774 + * Given a logical offset into the mem device lookup the 
1775 + * corresponding page 
1776 + *     loc is specified in units of pages 
1777 + * Note: affects curr_map (even in the case where lookup fails)
1778 + */
1779 +struct page *dump_mem_lookup(struct dump_memdev *dump_mdev, unsigned long loc)
1780 +{
1781 +       unsigned long *map;
1782 +       unsigned long i, index = loc / DUMP_MAP_SZ;
1783 +       struct page *page = NULL;
1784 +       unsigned long curr_pfn, curr_map, *curr_map_ptr = NULL;
1785 +
1786 +       map = (unsigned long *)dump_mdev->indirect_map_root;
1787 +       if (!map)
1788 +               return NULL;
1789 +
1790 +       if (loc > dump_mdev->last_offset >> PAGE_SHIFT)
1791 +               return NULL;
1792 +
1793 +       /* 
1794 +        * first locate the right indirect map 
1795 +        * in the chain of indirect maps 
1796 +        */
1797 +       for (i = 0; i + DUMP_IND_MAP_SZ < index ; i += DUMP_IND_MAP_SZ) {
1798 +               if (!(map = next_indirect_map(map)))
1799 +                       return NULL;
1800 +       }
1801 +       /* then the right direct map */
1802 +       /* map entries are referred to by page index */
1803 +       if ((curr_map = map[index - i])) {
1804 +               page = pfn_to_page(curr_map);
1805 +               /* update the current traversal index */
1806 +               /* dump_mdev->curr_map = &map[index - i];*/
1807 +               curr_map_ptr = &map[index - i];
1808 +       }
1809 +
1810 +       if (page)
1811 +               map = kmap_atomic(page, KM_DUMP);
1812 +       else 
1813 +               return NULL;
1814 +
1815 +       /* and finally the right entry therein */
1816 +       /* data pages are referred to by page index */
1817 +       i = index * DUMP_MAP_SZ;
1818 +       if ((curr_pfn = map[loc - i])) {
1819 +               page = pfn_to_page(curr_pfn);
1820 +               dump_mdev->curr_map = curr_map_ptr;
1821 +               dump_mdev->curr_map_offset = loc - i;
1822 +               dump_mdev->ddev.curr_offset = loc << PAGE_SHIFT;
1823 +       } else {
1824 +               page = NULL;
1825 +       }
1826 +       kunmap_atomic(map, KM_DUMP);
1827 +
1828 +       return page;
1829 +}
1830 +                       
1831 +/* 
1832 + * Retrieves a pointer to the next page in the dump device 
1833 + * Used during the lookup pass post-soft-reboot 
1834 + */
1835 +struct page *dump_mem_next_page(struct dump_memdev *dev)
1836 +{
1837 +       unsigned long i; 
1838 +       unsigned long *map;     
1839 +       struct page *page = NULL;
1840 +
1841 +       if (dev->ddev.curr_offset + PAGE_SIZE >= dev->last_offset) {
1842 +               return NULL;
1843 +       }
1844 +
1845 +       if ((i = (unsigned long)(++dev->curr_map_offset)) >= DUMP_MAP_SZ) {
1846 +               /* move to next map */  
1847 +               if (is_last_map_entry(++dev->curr_map)) {
1848 +                       /* move to the next indirect map page */
1849 +                       printk("dump_mem_next_page: go to next indirect map\n");
1850 +                       dev->curr_map = (unsigned long *)*dev->curr_map;
1851 +                       if (!dev->curr_map)
1852 +                               return NULL;
1853 +               }
1854 +               i = dev->curr_map_offset = 0;
1855 +               pr_debug("dump_mem_next_page: next map 0x%lx, entry 0x%lx\n",
1856 +                               dev->curr_map, *dev->curr_map);
1857 +
1858 +       };
1859 +       
1860 +       if (*dev->curr_map) {
1861 +               map = kmap_atomic(pfn_to_page(*dev->curr_map), KM_DUMP);
1862 +               if (map[i])
1863 +                       page = pfn_to_page(map[i]);
1864 +               kunmap_atomic(map, KM_DUMP);
1865 +               dev->ddev.curr_offset += PAGE_SIZE;
1866 +       };
1867 +
1868 +       return page;
1869 +}
1870 +
1871 +/* Copied from dump_filters.c */
1872 +static inline int kernel_page(struct page *p)
1873 +{
1874 +       /* FIXME: Need to exclude hugetlb pages. Clue: reserved but inuse */
1875 +       return PageReserved(p) || (!PageLRU(p) && PageInuse(p));
1876 +}
1877 +
1878 +static inline int user_page(struct page *p)
1879 +{
1880 +       return PageInuse(p) && (!PageReserved(p) && PageLRU(p));
1881 +}
1882 +
1883 +int dump_reused_by_boot(struct page *page)
1884 +{
1885 +       /* Todo
1886 +        * Checks:
1887 +        * if PageReserved 
1888 +        * if < __end + bootmem_bootmap_pages for this boot + allowance 
1889 +        * if overwritten by initrd (how to check ?)
1890 +        * Also, add more checks in early boot code
1891 +        * e.g. bootmem bootmap alloc verify not overwriting dump, and if
1892 +        * so then realloc or move the dump pages out accordingly.
1893 +        */
1894 +
1895 +       /* Temporary proof of concept hack, avoid overwriting kern pages */
1896 +
1897 +       return (kernel_page(page) || dump_low_page(page) || user_page(page));
1898 +}
1899 +
1900 +
1901 +/* Uses the free page passed in to expand available space */
1902 +int dump_mem_add_space(struct dump_memdev *dev, struct page *page)
1903 +{
1904 +       struct page *map_page;
1905 +       unsigned long *map;     
1906 +       unsigned long i; 
1907 +
1908 +       if (!dev->curr_map)
1909 +               return -ENOMEM; /* must've exhausted indirect map */
1910 +
1911 +       if (!*dev->curr_map || dev->curr_map_offset >= DUMP_MAP_SZ) {
1912 +               /* add map space */
1913 +               *dev->curr_map = page_to_pfn(page);
1914 +               dev->curr_map_offset = 0;
1915 +               return 0;
1916 +       }
1917 +
1918 +       /* add data space */
1919 +       i = dev->curr_map_offset;
1920 +       map_page = pfn_to_page(*dev->curr_map);
1921 +       map = (unsigned long *)kmap_atomic(map_page, KM_DUMP);
1922 +       map[i] = page_to_pfn(page);
1923 +       kunmap_atomic(map, KM_DUMP);
1924 +       dev->curr_map_offset = ++i;
1925 +       dev->last_offset += PAGE_SIZE;
1926 +       if (i >= DUMP_MAP_SZ) {
1927 +               /* move to next map */
1928 +               if (is_last_map_entry(++dev->curr_map)) {
1929 +                       /* move to the next indirect map page */
1930 +                       pr_debug("dump_mem_add_space: using next"
1931 +                       "indirect map\n");
1932 +                       dev->curr_map = (unsigned long *)*dev->curr_map;
1933 +               }
1934 +       }               
1935 +       return 0;
1936 +}
1937 +
1938 +
1939 +/* Caution: making a dest page invalidates existing contents of the page */
1940 +int dump_check_and_free_page(struct dump_memdev *dev, struct page *page)
1941 +{
1942 +       int err = 0;
1943 +
1944 +       /* 
1945 +        * the page can be used as a destination only if we are sure
1946 +        * it won't get overwritten by the soft-boot, and is not
1947 +        * critical for us right now.
1948 +        */
1949 +       if (dump_reused_by_boot(page))
1950 +               return 0;
1951 +
1952 +       if ((err = dump_mem_add_space(dev, page))) {
1953 +               printk("Warning: Unable to extend memdev space. Err %d\n",
1954 +               err);
1955 +               return 0;
1956 +       }
1957 +
1958 +       dev->nr_free++;
1959 +       return 1;
1960 +}
1961 +
1962 +
1963 +/* Set up the initial maps and bootstrap space  */
1964 +/* Must be called only after any previous dump is written out */
1965 +int dump_mem_open(struct dump_dev *dev, unsigned long devid)
1966 +{
1967 +       struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
1968 +       unsigned long nr_maps, *map, *prev_map = &dump_mdev->indirect_map_root;
1969 +       void *addr;
1970 +       struct page *page;
1971 +       unsigned long i = 0;
1972 +       int err = 0;
1973 +
1974 +       /* Todo: sanity check for unwritten previous dump */
1975 +
1976 +       /* allocate pages for indirect map (non highmem area) */
1977 +       nr_maps = num_physpages / DUMP_MAP_SZ; /* maps to cover entire mem */
1978 +       for (i = 0; i < nr_maps; i += DUMP_IND_MAP_SZ) {
1979 +               if (!(map = (unsigned long *)dump_alloc_mem(PAGE_SIZE))) {
1980 +                       printk("Unable to alloc indirect map %ld\n", 
1981 +                               i / DUMP_IND_MAP_SZ);
1982 +                       return -ENOMEM;
1983 +               }
1984 +               clear_page(map);
1985 +               *prev_map = (unsigned long)map;
1986 +               prev_map = &map[DUMP_IND_MAP_SZ];
1987 +       };
1988 +               
1989 +       dump_mdev->curr_map = (unsigned long *)dump_mdev->indirect_map_root;
1990 +       dump_mdev->curr_map_offset = 0; 
1991 +
1992 +       /* 
1993 +        * allocate a few bootstrap pages: at least 1 map and 1 data page
1994 +        * plus enough to save the dump header
1995 +        */
1996 +       i = 0;
1997 +       do {
1998 +               if (!(addr = dump_alloc_mem(PAGE_SIZE))) {
1999 +                       printk("Unable to alloc bootstrap page %ld\n", i);
2000 +                       return -ENOMEM;
2001 +               }
2002 +
2003 +               page = virt_to_page(addr);
2004 +               if (dump_low_page(page)) {
2005 +                       dump_free_mem(addr);
2006 +                       continue;
2007 +               }
2008 +
2009 +               if (dump_mem_add_space(dump_mdev, page)) {
2010 +                       printk("Warning: Unable to extend memdev "
2011 +                                       "space. Err %d\n", err);
2012 +                       dump_free_mem(addr);
2013 +                       continue;
2014 +               }
2015 +               i++;
2016 +       } while (i < DUMP_NR_BOOTSTRAP);
2017 +
2018 +       printk("dump memdev init: %ld maps, %ld bootstrap pgs, %ld free pgs\n",
2019 +               nr_maps, i, dump_mdev->last_offset >> PAGE_SHIFT);
2020 +       
2021 +       dump_mdev->last_bs_offset = dump_mdev->last_offset;
2022 +
2023 +       return 0;
2024 +}
2025 +
2026 +/* Releases all pre-alloc'd pages */
2027 +int dump_mem_release(struct dump_dev *dev)
2028 +{
2029 +       struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
2030 +       struct page *page, *map_page;
2031 +       unsigned long *map, *prev_map;
2032 +       void *addr;
2033 +       int i;
2034 +
2035 +       if (!dump_mdev->nr_free)
2036 +               return 0;
2037 +
2038 +       pr_debug("dump_mem_release\n");
2039 +       page = dump_mem_lookup(dump_mdev, 0);
2040 +       for (i = 0; page && (i < DUMP_NR_BOOTSTRAP - 1); i++) {
2041 +               if (PageHighMem(page))
2042 +                       break;
2043 +               addr = page_address(page);
2044 +               if (!addr) {
2045 +                       printk("page_address(%p) = NULL\n", page);
2046 +                       break;
2047 +               }
2048 +               pr_debug("Freeing page at 0x%lx\n", addr); 
2049 +               dump_free_mem(addr);
2050 +               if (dump_mdev->curr_map_offset >= DUMP_MAP_SZ - 1) {
2051 +                       map_page = pfn_to_page(*dump_mdev->curr_map);
2052 +                       if (PageHighMem(map_page))
2053 +                               break;
2054 +                       page = dump_mem_next_page(dump_mdev);
2055 +                       addr = page_address(map_page);
2056 +                       if (!addr) {
2057 +                               printk("page_address(%p) = NULL\n", 
2058 +                                       map_page);
2059 +                               break;
2060 +                       }
2061 +                       pr_debug("Freeing map page at 0x%lx\n", addr);
2062 +                       dump_free_mem(addr);
2063 +                       i++;
2064 +               } else {
2065 +                       page = dump_mem_next_page(dump_mdev);
2066 +               }
2067 +       }
2068 +
2069 +       /* now for the last used bootstrap page used as a map page */
2070 +       if ((i < DUMP_NR_BOOTSTRAP) && (*dump_mdev->curr_map)) {
2071 +               map_page = pfn_to_page(*dump_mdev->curr_map);
2072 +               if ((map_page) && !PageHighMem(map_page)) {
2073 +                       addr = page_address(map_page);
2074 +                       if (!addr) {
2075 +                               printk("page_address(%p) = NULL\n", map_page);
2076 +                       } else {
2077 +                               pr_debug("Freeing map page at 0x%lx\n", addr);
2078 +                               dump_free_mem(addr);
2079 +                               i++;
2080 +                       }
2081 +               }
2082 +       }
2083 +
2084 +       printk("Freed %d bootstrap pages\n", i);
2085 +
2086 +       /* free the indirect maps */
2087 +       map = (unsigned long *)dump_mdev->indirect_map_root;
2088 +
2089 +       i = 0;
2090 +       while (map) {
2091 +               prev_map = map;
2092 +               map = next_indirect_map(map);
2093 +               dump_free_mem(prev_map);
2094 +               i++;
2095 +       }
2096 +
2097 +       printk("Freed %d indirect map(s)\n", i);
2098 +
2099 +       /* Reset the indirect map */
2100 +       dump_mdev->indirect_map_root = 0;
2101 +       dump_mdev->curr_map = 0;
2102 +
2103 +       /* Reset the free list */
2104 +       dump_mdev->nr_free = 0;
2105 +
2106 +       dump_mdev->last_offset = dump_mdev->ddev.curr_offset = 0;
2107 +       dump_mdev->last_used_offset = 0;
2108 +       dump_mdev->curr_map = NULL;
2109 +       dump_mdev->curr_map_offset = 0;
2110 +       return 0;
2111 +}
2112 +
2113 +/*
2114 + * Long term:
2115 + * It is critical for this to be very strict. Cannot afford
2116 + * to have anything running and accessing memory while we overwrite 
2117 + * memory (potential risk of data corruption).
2118 + * If in doubt (e.g if a cpu is hung and not responding) just give
2119 + * up and refuse to proceed with this scheme.
2120 + *
2121 + * Note: I/O will only happen after soft-boot/switchover, so we can 
2122 + * safely disable interrupts and force stop other CPUs if this is
2123 + * going to be a disruptive dump, no matter what they
2124 + * are in the middle of.
2125 + */
2126 +/* 
2127 + * ATM Most of this is already taken care of in the nmi handler 
2128 + * We may halt the cpus rightaway if we know this is going to be disruptive 
2129 + * For now, since we've limited ourselves to overwriting free pages we
2130 + * aren't doing much here. Eventually, we'd have to wait to make sure other
2131 + * cpus aren't using memory we could be overwriting
2132 + */
2133 +int dump_mem_silence(struct dump_dev *dev)
2134 +{
2135 +       struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
2136 +
2137 +       if (dump_mdev->last_offset > dump_mdev->last_bs_offset) {
2138 +               /* prefer to run lkcd config & start with a clean slate */
2139 +               return -EEXIST;
2140 +       }
2141 +       return 0;
2142 +}
2143 +
2144 +extern int dump_overlay_resume(void);
2145 +
2146 +/* Trigger the next stage of dumping */
2147 +int dump_mem_resume(struct dump_dev *dev)
2148 +{
2149 +       dump_overlay_resume(); 
2150 +       return 0;
2151 +}
2152 +
2153 +/* 
2154 + * Allocate mem dev pages as required and copy buffer contents into it.
2155 + * Fails if the no free pages are available
2156 + * Keeping it simple and limited for starters (can modify this over time)
2157 + *  Does not handle holes or a sparse layout
2158 + *  Data must be in multiples of PAGE_SIZE
2159 + */
2160 +int dump_mem_write(struct dump_dev *dev, void *buf, unsigned long len)
2161 +{
2162 +       struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
2163 +       struct page *page;
2164 +       unsigned long n = 0;
2165 +       void *addr;
2166 +       unsigned long *saved_curr_map, saved_map_offset;
2167 +       int ret = 0;
2168 +
2169 +       pr_debug("dump_mem_write: offset 0x%llx, size %ld\n", 
2170 +               dev->curr_offset, len);
2171 +
2172 +       if (dev->curr_offset + len > dump_mdev->last_offset)  {
2173 +               printk("Out of space to write\n");
2174 +               return -ENOSPC;
2175 +       }
2176 +       
2177 +       if ((len & (PAGE_SIZE - 1)) || (dev->curr_offset & (PAGE_SIZE - 1)))
2178 +               return -EINVAL; /* not aligned in units of page size */
2179 +
2180 +       saved_curr_map = dump_mdev->curr_map;
2181 +       saved_map_offset = dump_mdev->curr_map_offset;
2182 +       page = dump_mem_lookup(dump_mdev, dev->curr_offset >> PAGE_SHIFT);
2183 +
2184 +       for (n = len; (n > 0) && page; n -= PAGE_SIZE, buf += PAGE_SIZE ) {
2185 +               addr = kmap_atomic(page, KM_DUMP);
2186 +               /* memset(addr, 'x', PAGE_SIZE); */
2187 +               memcpy(addr, buf, PAGE_SIZE);
2188 +               kunmap_atomic(addr, KM_DUMP);
2189 +               /* dev->curr_offset += PAGE_SIZE; */
2190 +               page = dump_mem_next_page(dump_mdev);
2191 +       }
2192 +
2193 +       dump_mdev->curr_map = saved_curr_map;
2194 +       dump_mdev->curr_map_offset = saved_map_offset;
2195 +
2196 +       if (dump_mdev->last_used_offset < dev->curr_offset)
2197 +               dump_mdev->last_used_offset = dev->curr_offset;
2198 +
2199 +       return (len - n) ? (len - n) : ret ;
2200 +}
2201 +
2202 +/* dummy - always ready */
2203 +int dump_mem_ready(struct dump_dev *dev, void *buf)
2204 +{
2205 +       return 0;
2206 +}
2207 +
2208 +/* 
2209 + * Should check for availability of space to write upto the offset 
2210 + * affects only the curr_offset; last_offset untouched 
2211 + * Keep it simple: Only allow multiples of PAGE_SIZE for now 
2212 + */
2213 +int dump_mem_seek(struct dump_dev *dev, loff_t offset)
2214 +{
2215 +       struct dump_memdev *dump_mdev = DUMP_MDEV(dev);
2216 +
2217 +       if (offset & (PAGE_SIZE - 1))
2218 +               return -EINVAL; /* allow page size units only for now */
2219 +       
2220 +       /* Are we exceeding available space ? */
2221 +       if (offset > dump_mdev->last_offset) {
2222 +               printk("dump_mem_seek failed for offset 0x%llx\n",
2223 +                       offset);
2224 +               return -ENOSPC; 
2225 +       }
2226 +
2227 +       dump_mdev->ddev.curr_offset = offset;
2228 +       return 0;
2229 +}
2230 +
2231 +struct dump_dev_ops dump_memdev_ops = {
2232 +       .open           = dump_mem_open,
2233 +       .release        = dump_mem_release,
2234 +       .silence        = dump_mem_silence,
2235 +       .resume         = dump_mem_resume,
2236 +       .seek           = dump_mem_seek,
2237 +       .write          = dump_mem_write,
2238 +       .read           = NULL, /* not implemented at the moment */
2239 +       .ready          = dump_mem_ready
2240 +};
2241 +
2242 +static struct dump_memdev default_dump_memdev = {
2243 +       .ddev = {.type_name = "memdev", .ops = &dump_memdev_ops,
2244 +                .device_id = 0x14}
2245 +       /* assume the rest of the fields are zeroed by default */
2246 +};     
2247 +       
2248 +/* may be overwritten if a previous dump exists */
2249 +struct dump_memdev *dump_memdev = &default_dump_memdev;
2250 +
2251 --- linux-2.5.69/drivers/dump/dump_netdev.c.lkcdbase    Mon Jun  2 17:29:49 2003
2252 +++ linux-2.5.69/drivers/dump/dump_netdev.c     Tue May 20 03:04:07 2003
2253 @@ -0,0 +1,863 @@
2254 +/*
2255 + * Implements the dump driver interface for saving a dump via network
2256 + * interface. 
2257 + *
2258 + * Some of this code has been taken/adapted from Ingo Molnar's netconsole
2259 + * code. LKCD team expresses its thanks to Ingo.
2260 + *
2261 + * Started: June 2002 - Mohamed Abbas <mohamed.abbas@intel.com>
2262 + *     Adapted netconsole code to implement LKCD dump over the network.
2263 + *
2264 + * Nov 2002 - Bharata B. Rao <bharata@in.ibm.com>
2265 + *     Innumerable code cleanups, simplification and some fixes.
2266 + *     Netdump configuration done by ioctl instead of using module parameters.
2267 + *
2268 + * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
2269 + * Copyright (C) 2002 International Business Machines Corp. 
2270 + *
2271 + *  This code is released under version 2 of the GNU GPL.
2272 + */
2273 +
2274 +#include <net/tcp.h>
2275 +#include <net/udp.h>
2276 +#include <linux/delay.h>
2277 +#include <linux/random.h>
2278 +#include <linux/reboot.h>
2279 +#include <linux/module.h>
2280 +#include <linux/dump.h>
2281 +#include <linux/dump_netdev.h>
2282 +
2283 +#include <asm/unaligned.h>
2284 +
2285 +static int startup_handshake;
2286 +static int page_counter;
2287 +static struct net_device *dump_ndev;
2288 +static struct in_device *dump_in_dev;
2289 +static u16 source_port, target_port;
2290 +static u32 source_ip, target_ip;
2291 +static unsigned char daddr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} ;
2292 +static spinlock_t dump_skb_lock = SPIN_LOCK_UNLOCKED;
2293 +static int dump_nr_skbs;
2294 +static struct sk_buff *dump_skb;
2295 +static unsigned long flags_global;
2296 +static int netdump_in_progress;
2297 +static char device_name[IFNAMSIZ];
2298 +
2299 +/*
2300 + * security depends on the trusted path between the netconsole
2301 + * server and netconsole client, since none of the packets are
2302 + * encrypted. The random magic number protects the protocol
2303 + * against spoofing.
2304 + */
2305 +static u64 dump_magic;
2306 +
2307 +#define MAX_UDP_CHUNK 1460
2308 +#define MAX_PRINT_CHUNK (MAX_UDP_CHUNK-HEADER_LEN)
2309 +
2310 +/*
2311 + * We maintain a small pool of fully-sized skbs,
2312 + * to make sure the message gets out even in
2313 + * extreme OOM situations.
2314 + */
2315 +#define DUMP_MAX_SKBS 32
2316 +
2317 +#define MAX_SKB_SIZE \
2318 +               (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
2319 +                               sizeof(struct iphdr) + sizeof(struct ethhdr))
2320 +
2321 +static void
2322 +dump_refill_skbs(void)
2323 +{
2324 +       struct sk_buff *skb;
2325 +       unsigned long flags;
2326 +
2327 +       spin_lock_irqsave(&dump_skb_lock, flags);
2328 +       while (dump_nr_skbs < DUMP_MAX_SKBS) {
2329 +               skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
2330 +               if (!skb)
2331 +                       break;
2332 +               if (dump_skb)
2333 +                       skb->next = dump_skb;
2334 +               else
2335 +                       skb->next = NULL;
2336 +               dump_skb = skb;
2337 +               dump_nr_skbs++;
2338 +       }
2339 +       spin_unlock_irqrestore(&dump_skb_lock, flags);
2340 +}
2341 +
2342 +static struct
2343 +sk_buff * dump_get_skb(void)
2344 +{
2345 +       struct sk_buff *skb;
2346 +       unsigned long flags;
2347 +
2348 +       spin_lock_irqsave(&dump_skb_lock, flags);
2349 +       skb = dump_skb;
2350 +       if (skb) {
2351 +               dump_skb = skb->next;
2352 +               skb->next = NULL;
2353 +               dump_nr_skbs--;
2354 +       }
2355 +       spin_unlock_irqrestore(&dump_skb_lock, flags);
2356 +        
2357 +       return skb;
2358 +}
2359 +
2360 +/*
2361 + * Zap completed output skbs.
2362 + */
2363 +static void
2364 +zap_completion_queue(void)
2365 +{
2366 +       int count;
2367 +       unsigned long flags;
2368 +       int cpu = smp_processor_id();
2369 +
2370 +        count=0;
2371 +       if (softnet_data[cpu].completion_queue) {
2372 +               struct sk_buff *clist;
2373 +       
2374 +               local_irq_save(flags);
2375 +               clist = softnet_data[cpu].completion_queue;
2376 +               softnet_data[cpu].completion_queue = NULL;
2377 +               local_irq_restore(flags);
2378 +
2379 +               while (clist != NULL) {
2380 +                       struct sk_buff *skb = clist;
2381 +                       clist = clist->next;
2382 +                       __kfree_skb(skb);
2383 +                       count++;
2384 +                       if (count > 10000)
2385 +                               printk("Error in sk list\n");
2386 +               }
2387 +       }
2388 +}
2389 +
2390 +static void
2391 +dump_send_skb(struct net_device *dev, const char *msg, unsigned int msg_len,
2392 +               reply_t *reply)
2393 +{
2394 +       int once = 1;
2395 +       int total_len, eth_len, ip_len, udp_len, count = 0;
2396 +       struct sk_buff *skb;
2397 +       struct udphdr *udph;
2398 +       struct iphdr *iph;
2399 +       struct ethhdr *eth; 
2400 +
2401 +       udp_len = msg_len + HEADER_LEN + sizeof(*udph);
2402 +       ip_len = eth_len = udp_len + sizeof(*iph);
2403 +       total_len = eth_len + ETH_HLEN;
2404 +
2405 +repeat_loop:
2406 +       zap_completion_queue();
2407 +       if (dump_nr_skbs < DUMP_MAX_SKBS)
2408 +               dump_refill_skbs();
2409 +
2410 +       skb = alloc_skb(total_len, GFP_ATOMIC);
2411 +       if (!skb) {
2412 +               skb = dump_get_skb();
2413 +               if (!skb) {
2414 +                       count++;
2415 +                       if (once && (count == 1000000)) {
2416 +                               printk("possibly FATAL: out of netconsole "
2417 +                                       "skbs!!! will keep retrying.\n");
2418 +                               once = 0;
2419 +                       }
2420 +                       dev->poll_controller(dev);
2421 +                       goto repeat_loop;
2422 +               }
2423 +       }
2424 +
2425 +       atomic_set(&skb->users, 1);
2426 +       skb_reserve(skb, total_len - msg_len - HEADER_LEN);
2427 +       skb->data[0] = NETCONSOLE_VERSION;
2428 +
2429 +       put_unaligned(htonl(reply->nr), (u32 *) (skb->data + 1));
2430 +       put_unaligned(htonl(reply->code), (u32 *) (skb->data + 5));
2431 +       put_unaligned(htonl(reply->info), (u32 *) (skb->data + 9));
2432 +
2433 +       memcpy(skb->data + HEADER_LEN, msg, msg_len);
2434 +       skb->len += msg_len + HEADER_LEN;
2435 +
2436 +       udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
2437 +       udph->source = source_port;
2438 +       udph->dest = target_port;
2439 +       udph->len = htons(udp_len);
2440 +       udph->check = 0;
2441 +
2442 +       iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
2443 +
2444 +       iph->version  = 4;
2445 +       iph->ihl      = 5;
2446 +       iph->tos      = 0;
2447 +       iph->tot_len  = htons(ip_len);
2448 +       iph->id       = 0;
2449 +       iph->frag_off = 0;
2450 +       iph->ttl      = 64;
2451 +       iph->protocol = IPPROTO_UDP;
2452 +       iph->check    = 0;
2453 +       iph->saddr    = source_ip;
2454 +       iph->daddr    = target_ip;
2455 +       iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
2456 +
2457 +       eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
2458 +
2459 +       eth->h_proto = htons(ETH_P_IP);
2460 +       memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
2461 +       memcpy(eth->h_dest, daddr, dev->addr_len);
2462 +
2463 +       count=0;
2464 +repeat_poll:
2465 +       spin_lock(&dev->xmit_lock);
2466 +       dev->xmit_lock_owner = smp_processor_id();
2467 +
2468 +       count++;
2469 +
2470 +
2471 +       if (netif_queue_stopped(dev)) {
2472 +               dev->xmit_lock_owner = -1;
2473 +               spin_unlock(&dev->xmit_lock);
2474 +
2475 +               dev->poll_controller(dev);
2476 +               zap_completion_queue();
2477 +
2478 +
2479 +               goto repeat_poll;
2480 +       }
2481 +
2482 +       dev->hard_start_xmit(skb, dev);
2483 +
2484 +       dev->xmit_lock_owner = -1;
2485 +       spin_unlock(&dev->xmit_lock);
2486 +}
2487 +
2488 +static unsigned short
2489 +udp_check(struct udphdr *uh, int len, unsigned long saddr, unsigned long daddr,
2490 +               unsigned long base)
2491 +{
2492 +       return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base);
2493 +}
2494 +
2495 +static int
2496 +udp_checksum_init(struct sk_buff *skb, struct udphdr *uh,
2497 +                            unsigned short ulen, u32 saddr, u32 daddr)
2498 +{
2499 +       if (uh->check == 0) {
2500 +               skb->ip_summed = CHECKSUM_UNNECESSARY;
2501 +       } else if (skb->ip_summed == CHECKSUM_HW) {
2502 +               skb->ip_summed = CHECKSUM_UNNECESSARY;
2503 +               if (!udp_check(uh, ulen, saddr, daddr, skb->csum))
2504 +                       return 0;
2505 +               skb->ip_summed = CHECKSUM_NONE;
2506 +       }
2507 +       if (skb->ip_summed != CHECKSUM_UNNECESSARY)
2508 +               skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen,
2509 +                               IPPROTO_UDP, 0);
2510 +       /* Probably, we should checksum udp header (it should be in cache
2511 +        * in any case) and data in tiny packets (< rx copybreak).
2512 +        */
2513 +       return 0;
2514 +}
2515 +
2516 +static __inline__ int
2517 +__udp_checksum_complete(struct sk_buff *skb)
2518 +{
2519 +       return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len,
2520 +                               skb->csum));
2521 +}
2522 +
2523 +static __inline__
2524 +int udp_checksum_complete(struct sk_buff *skb)
2525 +{
2526 +       return skb->ip_summed != CHECKSUM_UNNECESSARY &&
2527 +               __udp_checksum_complete(skb);
2528 +}
2529 +
2530 +int new_req = 0;
2531 +static req_t req;
2532 +
2533 +static int
2534 +dump_rx_hook(struct sk_buff *skb)
2535 +{
2536 +       int proto;
2537 +       struct iphdr *iph;
2538 +       struct udphdr *uh;
2539 +       __u32 len, saddr, daddr, ulen;
2540 +       req_t *__req;
2541 +
2542 +       /* 
2543 +        * First check if were are dumping or doing startup handshake, if
2544 +        * not quickly return.
2545 +        */
2546 +       if (!netdump_in_progress)
2547 +               return NET_RX_SUCCESS;
2548 +
2549 +       if (skb->dev->type != ARPHRD_ETHER)
2550 +               goto out;
2551 +
2552 +       proto = ntohs(skb->mac.ethernet->h_proto);
2553 +       if (proto != ETH_P_IP)
2554 +               goto out;
2555 +
2556 +       if (skb->pkt_type == PACKET_OTHERHOST)
2557 +               goto out;
2558 +
2559 +       if (skb_shared(skb))
2560 +               goto out;
2561 +
2562 +        /* IP header correctness testing: */
2563 +       iph = (struct iphdr *)skb->data;
2564 +       if (!pskb_may_pull(skb, sizeof(struct iphdr)))
2565 +               goto out;
2566 +
2567 +       if (iph->ihl < 5 || iph->version != 4)
2568 +               goto out;
2569 +
2570 +       if (!pskb_may_pull(skb, iph->ihl*4))
2571 +               goto out;
2572 +
2573 +       if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
2574 +               goto out;
2575 +
2576 +       len = ntohs(iph->tot_len);
2577 +       if (skb->len < len || len < iph->ihl*4)
2578 +               goto out;
2579 +
2580 +       saddr = iph->saddr;
2581 +       daddr = iph->daddr;
2582 +       if (iph->protocol != IPPROTO_UDP)
2583 +               goto out;
2584 +
2585 +       if (source_ip != daddr)
2586 +               goto out;
2587 +
2588 +       if (target_ip != saddr)
2589 +               goto out;
2590 +
2591 +       len -= iph->ihl*4;
2592 +       uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
2593 +       ulen = ntohs(uh->len);
2594 +
2595 +       if (ulen != len || ulen < (sizeof(*uh) + sizeof(*__req)))
2596 +               goto out;
2597 +
2598 +       if (udp_checksum_init(skb, uh, ulen, saddr, daddr) < 0)
2599 +               goto out;
2600 +
2601 +       if (udp_checksum_complete(skb))
2602 +               goto out;
2603 +
2604 +       if (source_port != uh->dest)
2605 +               goto out;
2606 +
2607 +       if (target_port != uh->source)
2608 +               goto out;
2609 +
2610 +       __req = (req_t *)(uh + 1);
2611 +       if ((ntohl(__req->command) != COMM_GET_MAGIC) &&
2612 +           (ntohl(__req->command) != COMM_HELLO) &&
2613 +           (ntohl(__req->command) != COMM_START_WRITE_NETDUMP_ACK) &&
2614 +           (ntohl(__req->command) != COMM_START_NETDUMP_ACK) &&
2615 +           (memcmp(&__req->magic, &dump_magic, sizeof(dump_magic)) != 0))
2616 +               goto out;
2617 +
2618 +       req.magic = ntohl(__req->magic);
2619 +       req.command = ntohl(__req->command);
2620 +       req.from = ntohl(__req->from);
2621 +       req.to = ntohl(__req->to);
2622 +       req.nr = ntohl(__req->nr);
2623 +       new_req = 1;
2624 +out:
2625 +       return NET_RX_DROP;
2626 +}
2627 +
2628 +static void
2629 +dump_send_mem(struct net_device *dev, req_t *req, const char* buff, size_t len)
2630 +{
2631 +       int i;
2632 +
2633 +       int nr_chunks = len/1024;
2634 +       reply_t reply;
2635 +       
2636 +       reply.nr = req->nr;
2637 +       reply.info = 0;
2638 +
2639 +        if ( nr_chunks <= 0)
2640 +                nr_chunks = 1;
2641 +       for (i = 0; i < nr_chunks; i++) {
2642 +               unsigned int offset = i*1024;
2643 +               reply.code = REPLY_MEM;
2644 +               reply.info = offset;
2645 +                dump_send_skb(dev, buff + offset, 1024, &reply);
2646 +       }
2647 +}
2648 +
2649 +/*
2650 + * This function waits for the client to acknowledge the receipt
2651 + * of the netdump startup reply, with the possibility of packets
2652 + * getting lost. We resend the startup packet if no ACK is received,
2653 + * after a 1 second delay.
2654 + *
2655 + * (The client can test the success of the handshake via the HELLO
2656 + * command, and send ACKs until we enter netdump mode.)
2657 + */
2658 +static int
2659 +dump_handshake(struct dump_dev *net_dev)
2660 +{
2661 +       char tmp[200];
2662 +       reply_t reply;
2663 +       int i, j;
2664 +
2665 +       if (startup_handshake) {
2666 +               sprintf(tmp, "NETDUMP start, waiting for start-ACK.\n");
2667 +               reply.code = REPLY_START_NETDUMP;
2668 +               reply.nr = 0;
2669 +               reply.info = 0;
2670 +       } else {
2671 +               sprintf(tmp, "NETDUMP start, waiting for start-ACK.\n");
2672 +               reply.code = REPLY_START_WRITE_NETDUMP;
2673 +               reply.nr = net_dev->curr_offset;
2674 +               reply.info = net_dev->curr_offset;
2675 +       }
2676 +       
2677 +       /* send 300 handshake packets before declaring failure */
2678 +       for (i = 0; i < 300; i++) {
2679 +               dump_send_skb(dump_ndev, tmp, strlen(tmp), &reply);
2680 +
2681 +               /* wait 1 sec */
2682 +               for (j = 0; j < 10000; j++) {
2683 +                       udelay(100);
2684 +                       dump_ndev->poll_controller(dump_ndev);
2685 +                       zap_completion_queue();
2686 +                       if (new_req)
2687 +                               break;
2688 +               }
2689 +
2690 +               /* 
2691 +                * if there is no new request, try sending the handshaking
2692 +                * packet again
2693 +                */
2694 +               if (!new_req)
2695 +                       continue;
2696 +
2697 +               /* 
2698 +                * check if the new request is of the expected type,
2699 +                * if so, return, else try sending the handshaking
2700 +                * packet again
2701 +                */
2702 +               if (startup_handshake) {
2703 +                       if (req.command == COMM_HELLO || req.command ==
2704 +                               COMM_START_NETDUMP_ACK) {
2705 +                               return 0;
2706 +                       } else {
2707 +                               new_req = 0;
2708 +                               continue;
2709 +                       }
2710 +               } else {
2711 +                       if (req.command == COMM_SEND_MEM) {
2712 +                               return 0;
2713 +                       } else {
2714 +                               new_req = 0;
2715 +                               continue;
2716 +                       }
2717 +               }
2718 +       }
2719 +       return -1;
2720 +}
2721 +
2722 +static ssize_t
2723 +do_netdump(struct dump_dev *net_dev, const char* buff, size_t len)
2724 +{
2725 +       reply_t reply;
2726 +       char tmp[200];
2727 +       ssize_t  ret = 0;
2728 +       int repeatCounter, counter, total_loop;
2729 +       
2730 +       netdump_in_progress = 1;
2731 +
2732 +       if (dump_handshake(net_dev) < 0) {
2733 +               printk("network dump failed due to handshake failure\n");
2734 +               goto out;
2735 +       }
2736 +
2737 +       /*
2738 +        * Ideally startup handshake should be done during dump configuration,
2739 +        * i.e., in dump_net_open(). This will be done when I figure out
2740 +        * the dependency between startup handshake, subsequent write and
2741 +        * various commands wrt to net-server.
2742 +        */
2743 +       if (startup_handshake)
2744 +               startup_handshake = 0;
2745 +
2746 +        counter = 0;
2747 +       repeatCounter = 0;
2748 +       total_loop = 0;
2749 +       while (1) {
2750 +                if (!new_req) {
2751 +                       dump_ndev->poll_controller(dump_ndev);
2752 +                       zap_completion_queue();
2753 +               }
2754 +               if (!new_req) {
2755 +                       repeatCounter++;
2756 +
2757 +                       if (repeatCounter > 5) {
2758 +                               counter++;
2759 +                               if (counter > 10000) {
2760 +                                       if (total_loop >= 100000) {
2761 +                                               printk("Time OUT LEAVE NOW\n");
2762 +                                               goto out;
2763 +                                       } else {
2764 +                                               total_loop++;
2765 +                                               printk("Try number %d out of "
2766 +                                                       "10 before Time Out\n",
2767 +                                                       total_loop);
2768 +                                       }
2769 +                               }
2770 +                               mdelay(1);
2771 +                               repeatCounter = 0;
2772 +                       }       
2773 +                       continue;
2774 +               }
2775 +               repeatCounter = 0;
2776 +               counter = 0;
2777 +               total_loop = 0;
2778 +               new_req = 0;
2779 +               switch (req.command) {
2780 +               case COMM_NONE:
2781 +                       break;
2782 +
2783 +               case COMM_SEND_MEM:
2784 +                       dump_send_mem(dump_ndev, &req, buff, len);
2785 +                       break;
2786 +
2787 +               case COMM_EXIT:
2788 +                case COMM_START_WRITE_NETDUMP_ACK:
2789 +                       ret = len;
2790 +                       goto out;
2791 +
2792 +               case COMM_HELLO:
2793 +                       sprintf(tmp, "Hello, this is netdump version "
2794 +                                       "0.%02d\n", NETCONSOLE_VERSION);
2795 +                       reply.code = REPLY_HELLO;
2796 +                       reply.nr = req.nr;
2797 +                        reply.info = net_dev->curr_offset;
2798 +                       dump_send_skb(dump_ndev, tmp, strlen(tmp), &reply);
2799 +                       break;
2800 +
2801 +               case COMM_GET_PAGE_SIZE:
2802 +                       sprintf(tmp, "PAGE_SIZE: %ld\n", PAGE_SIZE);
2803 +                       reply.code = REPLY_PAGE_SIZE;
2804 +                       reply.nr = req.nr;
2805 +                       reply.info = PAGE_SIZE;
2806 +                       dump_send_skb(dump_ndev, tmp, strlen(tmp), &reply);
2807 +                       break;
2808 +
2809 +               case COMM_GET_NR_PAGES:
2810 +                       reply.code = REPLY_NR_PAGES;
2811 +                       reply.nr = req.nr;
2812 +                       reply.info = num_physpages;
2813 +                        reply.info = page_counter;
2814 +                       sprintf(tmp, "Number of pages: %ld\n", num_physpages);
2815 +                       dump_send_skb(dump_ndev, tmp, strlen(tmp), &reply);
2816 +                       break;
2817 +
2818 +               case COMM_GET_MAGIC:
2819 +                       reply.code = REPLY_MAGIC;
2820 +                       reply.nr = req.nr;
2821 +                       reply.info = NETCONSOLE_VERSION;
2822 +                       dump_send_skb(dump_ndev, (char *)&dump_magic,
2823 +                                       sizeof(dump_magic), &reply);
2824 +                       break;
2825 +
2826 +               default:
2827 +                       reply.code = REPLY_ERROR;
2828 +                       reply.nr = req.nr;
2829 +                       reply.info = req.command;
2830 +                       sprintf(tmp, "Got unknown command code %d!\n",
2831 +                                       req.command);
2832 +                       dump_send_skb(dump_ndev, tmp, strlen(tmp), &reply);
2833 +                       break;
2834 +               }
2835 +       }
2836 +out:
2837 +       netdump_in_progress = 0;
2838 +       return ret;
2839 +}
2840 +
2841 +static int
2842 +dump_validate_config(void)
2843 +{
2844 +       source_ip = dump_in_dev->ifa_list->ifa_local;
2845 +       if (!source_ip) {
2846 +               printk("network device %s has no local address, "
2847 +                               "aborting.\n", device_name);
2848 +               return -1;
2849 +       }
2850 +
2851 +#define IP(x) ((unsigned char *)&source_ip)[x]
2852 +       printk("Source %d.%d.%d.%d", IP(0), IP(1), IP(2), IP(3));
2853 +#undef IP
2854 +
2855 +       if (!source_port) {
2856 +               printk("source_port parameter not specified, aborting.\n");
2857 +               return -1;
2858 +       }
2859 +       printk(":%i\n", source_port);
2860 +       source_port = htons(source_port);
2861 +
2862 +       if (!target_ip) {
2863 +               printk("target_ip parameter not specified, aborting.\n");
2864 +               return -1;
2865 +       }
2866 +
2867 +#define IP(x) ((unsigned char *)&target_ip)[x]
2868 +       printk("Target %d.%d.%d.%d", IP(0), IP(1), IP(2), IP(3));
2869 +#undef IP
2870 +
2871 +       if (!target_port) {
2872 +               printk("target_port parameter not specified, aborting.\n");
2873 +               return -1;
2874 +       }
2875 +       printk(":%i\n", target_port);
2876 +       target_port = htons(target_port);
2877 +
2878 +       printk("Target Ethernet Address %02x:%02x:%02x:%02x:%02x:%02x",
2879 +               daddr[0], daddr[1], daddr[2], daddr[3], daddr[4], daddr[5]);
2880 +
2881 +       if ((daddr[0] & daddr[1] & daddr[2] & daddr[3] & daddr[4] & 
2882 +                               daddr[5]) == 255)
2883 +               printk("(Broadcast)");
2884 +       printk("\n");
2885 +       return 0;
2886 +}
2887 +
2888 +/*
2889 + * Prepares the dump device so we can take a dump later. 
2890 + * Validates the netdump configuration parameters.
2891 + *
2892 + * TODO: Network connectivity check should be done here.
2893 + */
2894 +static int
2895 +dump_net_open(struct dump_dev *net_dev, unsigned long arg)
2896 +{
2897 +       int retval = 0;
2898 +
2899 +       /* get the interface name */
2900 +       if (copy_from_user(device_name, (void *)arg, IFNAMSIZ))
2901 +               return -EFAULT;
2902 +
2903 +       if (!(dump_ndev = dev_get_by_name(device_name))) {
2904 +               printk("network device %s does not exist, aborting.\n",
2905 +                               device_name);
2906 +               return -ENODEV;
2907 +       }
2908 +
2909 +       if (!dump_ndev->poll_controller) {
2910 +               printk("network device %s does not implement polling yet, "
2911 +                               "aborting.\n", device_name);
2912 +               retval = -1; /* return proper error */
2913 +               goto err1;
2914 +       }
2915 +
2916 +       if (!(dump_in_dev = in_dev_get(dump_ndev))) {
2917 +               printk("network device %s is not an IP protocol device, "
2918 +                               "aborting.\n", device_name);
2919 +               retval = -EINVAL;
2920 +               goto err1;
2921 +       }
2922 +
2923 +       if ((retval = dump_validate_config()) < 0)
2924 +               goto err2;
2925 +
2926 +       net_dev->curr_offset = 0;
2927 +       printk("Network device %s successfully configured for dumping\n",
2928 +                       device_name);
2929 +       return retval;
2930 +err2:
2931 +       in_dev_put(dump_in_dev);
2932 +err1:
2933 +       dev_put(dump_ndev);     
2934 +       return retval;
2935 +}
2936 +
2937 +/*
2938 + * Close the dump device and release associated resources
2939 + * Invoked when unconfiguring the dump device.
2940 + */
2941 +static int
2942 +dump_net_release(struct dump_dev *net_dev)
2943 +{
2944 +       if (dump_in_dev)
2945 +               in_dev_put(dump_in_dev);
2946 +       if (dump_ndev)
2947 +               dev_put(dump_ndev);
2948 +       return 0;
2949 +}
2950 +
2951 +/*
2952 + * Prepare the dump device for use (silence any ongoing activity
2953 + * and quiesce state) when the system crashes.
2954 + */
2955 +static int
2956 +dump_net_silence(struct dump_dev *net_dev)
2957 +{
2958 +       local_irq_save(flags_global);
2959 +       dump_ndev->rx_hook = dump_rx_hook;
2960 +        startup_handshake = 1;
2961 +       net_dev->curr_offset = 0;
2962 +       printk("Dumping to network device %s on CPU %d ...\n", device_name,
2963 +                       smp_processor_id());
2964 +       return 0;
2965 +}
2966 +
2967 +/*
2968 + * Invoked when dumping is done. This is the time to put things back 
2969 + * (i.e. undo the effects of dump_block_silence) so the device is 
2970 + * available for normal use.
2971 + */
2972 +static int
2973 +dump_net_resume(struct dump_dev *net_dev)
2974 +{
2975 +       int indx;
2976 +       reply_t reply;
2977 +       char tmp[200];
2978 +
2979 +        if (!dump_ndev)
2980 +               return (0);
2981 +
2982 +       sprintf(tmp, "NETDUMP end.\n");
2983 +       for( indx = 0; indx < 6; indx++) {
2984 +               reply.code = REPLY_END_NETDUMP;
2985 +               reply.nr = 0;
2986 +               reply.info = 0;
2987 +               dump_send_skb(dump_ndev, tmp, strlen(tmp), &reply);
2988 +       }
2989 +       printk("NETDUMP END!\n");
2990 +       local_irq_restore(flags_global);
2991 +       dump_ndev->rx_hook = NULL;
2992 +       startup_handshake = 0;
2993 +       return 0;
2994 +}
2995 +
2996 +/*
2997 + * Seek to the specified offset in the dump device.
2998 + * Makes sure this is a valid offset, otherwise returns an error.
2999 + */
3000 +static  int
3001 +dump_net_seek(struct dump_dev *net_dev, loff_t off)
3002 +{
3003 +       /*
3004 +        * For now using DUMP_HEADER_OFFSET as hard coded value,
3005 +        * See dump_block_seekin dump_blockdev.c to know how to
3006 +        * do this properly.
3007 +        */
3008 +       net_dev->curr_offset = off + DUMP_HEADER_OFFSET;
3009 +       return 0;
3010 +}
3011 +
3012 +/*
3013 + *
3014 + */
3015 +static int
3016 +dump_net_write(struct dump_dev *net_dev, void *buf, unsigned long len)
3017 +{
3018 +       int cnt, i, off;
3019 +       ssize_t ret;
3020 +
3021 +       cnt = len/ PAGE_SIZE;
3022 +
3023 +       for (i = 0; i < cnt; i++) {
3024 +               off = i* PAGE_SIZE;
3025 +               ret = do_netdump(net_dev, buf+off, PAGE_SIZE);
3026 +               if (ret <= 0)
3027 +                       return -1;
3028 +               net_dev->curr_offset = net_dev->curr_offset + PAGE_SIZE;
3029 +       }
3030 +       return len;
3031 +}
3032 +
3033 +/*
3034 + * check if the last dump i/o is over and ready for next request
3035 + */
3036 +static int
3037 +dump_net_ready(struct dump_dev *net_dev, void *buf)
3038 +{
3039 +       return 0;
3040 +}
3041 +
3042 +/*
3043 + * ioctl function used for configuring network dump
3044 + */
3045 +static int
3046 +dump_net_ioctl(struct dump_dev *net_dev, unsigned int cmd, unsigned long arg)
3047 +{
3048 +       switch (cmd) {
3049 +       case DIOSTARGETIP:
3050 +               target_ip = arg;
3051 +               break;
3052 +       case DIOSTARGETPORT:
3053 +               target_port = (u16)arg;
3054 +               break;
3055 +       case DIOSSOURCEPORT:
3056 +               source_port = (u16)arg;
3057 +               break;
3058 +       case DIOSETHADDR:
3059 +               return copy_from_user(daddr, (void *)arg, 6);
3060 +               break;
3061 +       case DIOGTARGETIP:
3062 +       case DIOGTARGETPORT:
3063 +       case DIOGSOURCEPORT:
3064 +       case DIOGETHADDR:
3065 +               break;
3066 +       default:
3067 +               return -EINVAL;
3068 +       }
3069 +       return 0;
3070 +}
3071 +
3072 +struct dump_dev_ops dump_netdev_ops = {
3073 +       .open           = dump_net_open,
3074 +       .release        = dump_net_release,
3075 +       .silence        = dump_net_silence,
3076 +       .resume         = dump_net_resume,
3077 +       .seek           = dump_net_seek,
3078 +       .write          = dump_net_write,
3079 +       /* .read not implemented */
3080 +       .ready          = dump_net_ready,
3081 +       .ioctl          = dump_net_ioctl
3082 +};
3083 +
3084 +static struct dump_dev default_dump_netdev = {
3085 +       .type_name = "networkdev", 
3086 +       .ops = &dump_netdev_ops, 
3087 +       .curr_offset = 0
3088 +};
3089 +
3090 +static int __init
3091 +dump_netdev_init(void)
3092 +{
3093 +        default_dump_netdev.curr_offset = 0;
3094 +
3095 +       if (dump_register_device(&default_dump_netdev) < 0) {
3096 +               printk("network dump device driver registration failed\n");
3097 +               return -1;
3098 +       }
3099 +       printk("network device driver for LKCD registered\n");
3100
3101 +       get_random_bytes(&dump_magic, sizeof(dump_magic));
3102 +       return 0;
3103 +}
3104 +
3105 +static void __exit
3106 +dump_netdev_cleanup(void)
3107 +{
3108 +       dump_unregister_device(&default_dump_netdev);
3109 +}
3110 +
3111 +MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
3112 +MODULE_DESCRIPTION("Network Dump Driver for Linux Kernel Crash Dump (LKCD)");
3113 +MODULE_LICENSE("GPL");
3114 +
3115 +module_init(dump_netdev_init);
3116 +module_exit(dump_netdev_cleanup);
3117 --- linux-2.5.69/drivers/dump/dump_overlay.c.lkcdbase   Mon Jun  2 17:29:49 2003
3118 +++ linux-2.5.69/drivers/dump/dump_overlay.c    Fri Feb  7 06:47:58 2003
3119 @@ -0,0 +1,848 @@
3120 +/*
3121 + * Two-stage soft-boot based dump scheme methods (memory overlay
3122 + * with post soft-boot writeout)
3123 + *
3124 + * Started: Oct 2002 -  Suparna Bhattacharya <suparna@in.ibm.com>
3125 + *
3126 + * This approach of saving the dump in memory and writing it 
3127 + * out after a softboot without clearing memory is derived from the 
3128 + * Mission Critical Linux dump implementation. Credits and a big
3129 + * thanks for letting the lkcd project make use of the excellent 
3130 + * piece of work and also for helping with clarifications and 
3131 + * tips along the way are due to:
3132 + *     Dave Winchell <winchell@mclx.com> (primary author of mcore)
3133 + *     and also to
3134 + *     Jeff Moyer <moyer@mclx.com>
3135 + *     Josh Huber <huber@mclx.com>
3136 + * 
3137 + * For those familiar with the mcore implementation, the key 
3138 + * differences/extensions here are in allowing entire memory to be 
3139 + * saved (in compressed form) through a careful ordering scheme 
3140 + * on both the way down as well on the way up after boot, the latter
3141 + * for supporting the LKCD notion of passes in which most critical 
3142 + * data is the first to be saved to the dump device. Also the post 
3143 + * boot writeout happens from within the kernel rather than driven 
3144 + * from userspace.
3145 + *
3146 + * The sequence is orchestrated through the abstraction of "dumpers",
3147 + * one for the first stage which then sets up the dumper for the next 
3148 + * stage, providing for a smooth and flexible reuse of the singlestage 
3149 + * dump scheme methods and a handle to pass dump device configuration 
3150 + * information across the soft boot. 
3151 + *
3152 + * Copyright (C) 2002 International Business Machines Corp. 
3153 + *
3154 + * This code is released under version 2 of the GNU GPL.
3155 + */
3156 +
3157 +/*
3158 + * Disruptive dumping using the second kernel soft-boot option
3159 + * for issuing dump i/o operates in 2 stages:
3160 + * 
3161 + * (1) - Saves the (compressed & formatted) dump in memory using a 
3162 + *       carefully ordered overlay scheme designed to capture the 
3163 + *       entire physical memory or selective portions depending on 
3164 + *       dump config settings, 
3165 + *     - Registers the stage 2 dumper and 
3166 + *     - Issues a soft reboot w/o clearing memory. 
3167 + *
3168 + *     The overlay scheme starts with a small bootstrap free area
3169 + *     and follows a reverse ordering of passes wherein it 
3170 + *     compresses and saves data starting with the least critical 
3171 + *     areas first, thus freeing up the corresponding pages to 
3172 + *     serve as destination for subsequent data to be saved, and
3173 + *     so on. With a good compression ratio, this makes it feasible
3174 + *     to capture an entire physical memory dump without significantly
3175 + *     reducing memory available during regular operation.
3176 + *
3177 + * (2) Post soft-reboot, runs through the saved memory dump and
3178 + *     writes it out to disk, this time around, taking care to
3179 + *     save the more critical data first (i.e. pages which figure 
3180 + *     in early passes for a regular dump). Finally issues a 
3181 + *     clean reboot.
3182 + *     
3183 + *     Since the data was saved in memory after selection/filtering
3184 + *     and formatted as per the chosen output dump format, at this 
3185 + *     stage the filter and format actions are just dummy (or
3186 + *     passthrough) actions, except for influence on ordering of
3187 + *     passes.
3188 + */
3189 +
3190 +#include <linux/types.h>
3191 +#include <linux/kernel.h>
3192 +#include <linux/highmem.h>
3193 +#include <linux/bootmem.h>
3194 +#include <linux/dump.h>
3195 +#include "dump_methods.h"
3196 +
3197 +extern struct list_head dumper_list_head;
3198 +extern struct dump_memdev *dump_memdev;
3199 +extern struct dumper dumper_stage2;
3200 +struct dump_config_block *dump_saved_config = NULL;
3201 +extern struct dump_blockdev *dump_blockdev;
3202 +static struct dump_memdev *saved_dump_memdev = NULL;
3203 +static struct dumper *saved_dumper = NULL;
3204 +
3205 +/* For testing 
3206 +extern void dump_display_map(struct dump_memdev *);
3207 +*/
3208 +
3209 +struct dumper *dumper_by_name(char *name)
3210 +{
3211 +#ifdef LATER
3212 +       struct dumper *dumper;
3213 +       list_for_each_entry(dumper, &dumper_list_head, dumper_list)
3214 +               if (!strncmp(dumper->name, name, 32))
3215 +                       return dumper;
3216 +
3217 +       /* not found */
3218 +       return NULL; 
3219 +#endif
3220 +       /* Temporary proof of concept */
3221 +       if (!strncmp(dumper_stage2.name, name, 32))
3222 +               return &dumper_stage2;
3223 +       else
3224 +               return NULL;
3225 +}
3226 +
3227 +#ifdef CONFIG_CRASH_DUMP_SOFTBOOT
3228 +extern void dump_early_reserve_map(struct dump_memdev *);
3229 +
3230 +void crashdump_reserve(void)
3231 +{
3232 +       extern unsigned long crashdump_addr;
3233 +
3234 +       if (crashdump_addr == 0xdeadbeef) 
3235 +               return;
3236 +
3237 +       /* reserve dump config and saved dump pages */
3238 +       dump_saved_config = (struct dump_config_block *)crashdump_addr;
3239 +       /* magic verification */
3240 +       if (dump_saved_config->magic != DUMP_MAGIC_LIVE) {
3241 +               printk("Invalid dump magic. Ignoring dump\n");
3242 +               dump_saved_config = NULL;
3243 +               return;
3244 +       }
3245 +                       
3246 +       printk("Dump may be available from previous boot\n");
3247 +
3248 +       reserve_bootmem(virt_to_phys((void *)crashdump_addr), 
3249 +               PAGE_ALIGN(sizeof(struct dump_config_block)));
3250 +       dump_early_reserve_map(&dump_saved_config->memdev);
3251 +
3252 +}
3253 +#endif
3254 +
3255 +/* 
3256 + * Loads the dump configuration from a memory block saved across soft-boot
3257 + * The ops vectors need fixing up as the corresp. routines may have 
3258 + * relocated in the new soft-booted kernel.
3259 + */
3260 +int dump_load_config(struct dump_config_block *config)
3261 +{
3262 +       struct dumper *dumper;
3263 +       struct dump_data_filter *filter_table, *filter;
3264 +       struct dump_dev *dev;
3265 +       int i;
3266 +
3267 +       if (config->magic != DUMP_MAGIC_LIVE)
3268 +               return -ENOENT; /* not a valid config */
3269 +
3270 +       /* initialize generic config data */
3271 +       memcpy(&dump_config, &config->config, sizeof(dump_config));
3272 +
3273 +       /* initialize dumper state */
3274 +       if (!(dumper = dumper_by_name(config->dumper.name)))  {
3275 +               printk("dumper name mismatch\n");
3276 +               return -ENOENT; /* dumper mismatch */
3277 +       }
3278 +       
3279 +       /* verify and fixup schema */
3280 +       if (strncmp(dumper->scheme->name, config->scheme.name, 32)) {
3281 +               printk("dumper scheme mismatch\n");
3282 +               return -ENOENT; /* mismatch */
3283 +       }
3284 +       config->scheme.ops = dumper->scheme->ops;
3285 +       config->dumper.scheme = &config->scheme;
3286 +       
3287 +       /* verify and fixup filter operations */
3288 +       filter_table = dumper->filter;
3289 +       for (i = 0, filter = config->filter_table; 
3290 +               ((i < MAX_PASSES) && filter_table[i].selector); 
3291 +               i++, filter++) {
3292 +               if (strncmp(filter_table[i].name, filter->name, 32)) {
3293 +                       printk("dump filter mismatch\n");
3294 +                       return -ENOENT; /* filter name mismatch */
3295 +               }
3296 +               filter->selector = filter_table[i].selector;
3297 +       }
3298 +       config->dumper.filter = config->filter_table;
3299 +
3300 +       /* fixup format */
3301 +       if (strncmp(dumper->fmt->name, config->fmt.name, 32)) {
3302 +               printk("dump format mismatch\n");
3303 +               return -ENOENT; /* mismatch */
3304 +       }
3305 +       config->fmt.ops = dumper->fmt->ops;
3306 +       config->dumper.fmt = &config->fmt;
3307 +
3308 +       /* fixup target device */
3309 +       dev = (struct dump_dev *)(&config->dev[0]);
3310 +       if (dumper->dev == NULL) {
3311 +               pr_debug("Vanilla dumper - assume default\n");
3312 +               if (dump_dev == NULL)
3313 +                       return -ENODEV;
3314 +               dumper->dev = dump_dev;
3315 +       }
3316 +
3317 +       if (strncmp(dumper->dev->type_name, dev->type_name, 32)) { 
3318 +               printk("dump dev type mismatch %s instead of %s\n",
3319 +                               dev->type_name, dumper->dev->type_name);
3320 +               return -ENOENT; /* mismatch */
3321 +       }
3322 +       dev->ops = dumper->dev->ops; 
3323 +       config->dumper.dev = dev;
3324 +       
3325 +       /* fixup memory device containing saved dump pages */
3326 +       /* assume statically init'ed dump_memdev */
3327 +       config->memdev.ddev.ops = dump_memdev->ddev.ops; 
3328 +       /* switch to memdev from prev boot */
3329 +       saved_dump_memdev = dump_memdev; /* remember current */
3330 +       dump_memdev = &config->memdev;
3331 +
3332 +       /* Make this the current primary dumper */
3333 +       dump_config.dumper = &config->dumper;
3334 +
3335 +       return 0;
3336 +}
3337 +
3338 +/* Saves the dump configuration in a memory block for use across a soft-boot */
3339 +int dump_save_config(struct dump_config_block *config)
3340 +{
3341 +       printk("saving dump config settings\n");
3342 +
3343 +       /* dump config settings */
3344 +       memcpy(&config->config, &dump_config, sizeof(dump_config));
3345 +
3346 +       /* dumper state */
3347 +       memcpy(&config->dumper, dump_config.dumper, sizeof(struct dumper));
3348 +       memcpy(&config->scheme, dump_config.dumper->scheme, 
3349 +               sizeof(struct dump_scheme));
3350 +       memcpy(&config->fmt, dump_config.dumper->fmt, sizeof(struct dump_fmt));
3351 +       memcpy(&config->dev[0], dump_config.dumper->dev, 
3352 +               sizeof(struct dump_anydev));
3353 +       memcpy(&config->filter_table, dump_config.dumper->filter, 
3354 +               sizeof(struct dump_data_filter)*MAX_PASSES);
3355 +
3356 +       /* handle to saved mem pages */
3357 +       memcpy(&config->memdev, dump_memdev, sizeof(struct dump_memdev));
3358 +
3359 +       config->magic = DUMP_MAGIC_LIVE;
3360 +       
3361 +       return 0;
3362 +}
3363 +
3364 +int dump_init_stage2(struct dump_config_block *saved_config)
3365 +{
3366 +       int err = 0;
3367 +
3368 +       pr_debug("dump_init_stage2\n");
3369 +       /* Check if dump from previous boot exists */
3370 +       if (saved_config) {
3371 +               printk("loading dumper from previous boot \n");
3372 +               /* load and configure dumper from previous boot */
3373 +               if ((err = dump_load_config(saved_config)))
3374 +                       return err;
3375 +
3376 +               if (!dump_oncpu) {
3377 +                       if ((err = dump_configure(dump_config.dump_device))) {
3378 +                               printk("Stage 2 dump configure failed\n");
3379 +                               return err;
3380 +                       }
3381 +               }
3382 +
3383 +               dumper_reset();
3384 +               dump_dev = dump_config.dumper->dev;
3385 +               /* write out the dump */
3386 +               err = dump_generic_execute(NULL, NULL);
3387 +               
3388 +               dump_saved_config = NULL;
3389 +
3390 +               if (!dump_oncpu) {
3391 +                       dump_unconfigure(); 
3392 +               }
3393 +               
3394 +               return err;
3395 +
3396 +       } else {
3397 +               /* no dump to write out */
3398 +               printk("no dumper from previous boot \n");
3399 +               return 0;
3400 +       }
3401 +}
3402 +
3403 +extern void dump_mem_markpages(struct dump_memdev *);
3404 +
3405 +int dump_switchover_stage(void)
3406 +{
3407 +       int ret = 0;
3408 +
3409 +       /* trigger stage 2 rightaway - in real life would be after soft-boot */
3410 +       /* dump_saved_config would be a boot param */
3411 +       saved_dump_memdev = dump_memdev;
3412 +       saved_dumper = dump_config.dumper;
3413 +       ret = dump_init_stage2(dump_saved_config);
3414 +       dump_memdev = saved_dump_memdev;
3415 +       dump_config.dumper = saved_dumper;
3416 +       return ret;
3417 +}
3418 +
3419 +int dump_activate_softboot(void) 
3420 +{
3421 +       int err = 0;
3422 +
3423 +       /* temporary - switchover to writeout previously saved dump */
3424 +       err = dump_switchover_stage(); /* non-disruptive case */
3425 +       if (dump_oncpu) 
3426 +               dump_config.dumper = &dumper_stage1; /* set things back */
3427 +
3428 +       return err;
3429 +
3430 +       dump_silence_level = DUMP_HALT_CPUS;
3431 +       /* wait till we become the only cpu */
3432 +       /* maybe by checking for online cpus ? */
3433 +
3434 +       /* now call into kexec */
3435 +
3436 +       /* TBD/Fixme: 
3437 +        * should we call reboot notifiers ? inappropriate for panic ?  
3438 +        * what about device_shutdown() ? 
3439 +        * is explicit bus master disabling needed or can we do that
3440 +        * through driverfs ? 
3441 +        */
3442 +       return 0;
3443 +}
3444 +
3445 +/* --- DUMP SCHEME ROUTINES  --- */
3446 +
3447 +static inline int dump_buf_pending(struct dumper *dumper)
3448 +{
3449 +       return (dumper->curr_buf - dumper->dump_buf);
3450 +}
3451 +
3452 +/* Invoked during stage 1 of soft-reboot based dumping */
3453 +int dump_overlay_sequencer(void)
3454 +{
3455 +       struct dump_data_filter *filter = dump_config.dumper->filter;
3456 +       struct dump_data_filter *filter2 = dumper_stage2.filter;
3457 +       int pass = 0, err = 0, save = 0;
3458 +       int (*action)(unsigned long, unsigned long);
3459 +
3460 +       /* Make sure gzip compression is being used */
3461 +       if (dump_config.dumper->compress->compress_type != DUMP_COMPRESS_GZIP) {
3462 +               printk(" Please set GZIP compression \n");
3463 +               return -EINVAL;
3464 +       }
3465 +
3466 +       /* start filling in dump data right after the header */
3467 +       dump_config.dumper->curr_offset = 
3468 +               PAGE_ALIGN(dump_config.dumper->header_len);
3469 +
3470 +       /* Locate the last pass */
3471 +       for (;filter->selector; filter++, pass++);
3472 +       
3473 +       /* 
3474 +        * Start from the end backwards: overlay involves a reverse 
3475 +        * ordering of passes, since less critical pages are more
3476 +        * likely to be reusable as scratch space once we are through
3477 +        * with them. 
3478 +        */
3479 +       for (--pass, --filter; pass >= 0; pass--, filter--)
3480 +       {
3481 +               /* Assumes passes are exclusive (even across dumpers) */
3482 +               /* Requires care when coding the selection functions */
3483 +               if ((save = filter->level_mask & dump_config.level))
3484 +                       action = dump_save_data;
3485 +               else
3486 +                       action = dump_skip_data;
3487 +
3488 +               /* Remember the offset where this pass started */
3489 +               /* The second stage dumper would use this */
3490 +               if (dump_buf_pending(dump_config.dumper) & (PAGE_SIZE - 1)) {
3491 +                       pr_debug("Starting pass %d with pending data\n", pass);
3492 +                       pr_debug("filling dummy data to page-align it\n");
3493 +                       dump_config.dumper->curr_buf = (void *)PAGE_ALIGN(
3494 +                               (unsigned long)dump_config.dumper->curr_buf);
3495 +               }
3496 +               
3497 +               filter2[pass].start = dump_config.dumper->curr_offset
3498 +                       + dump_buf_pending(dump_config.dumper);
3499 +
3500 +               err = dump_iterator(pass, action, filter);
3501 +
3502 +               filter2[pass].end = dump_config.dumper->curr_offset
3503 +                       + dump_buf_pending(dump_config.dumper);
3504 +
3505 +               if (err < 0) {
3506 +                       printk("dump_overlay_seq: failure %d in pass %d\n", 
3507 +                               err, pass);
3508 +                       break;
3509 +               }       
3510 +               printk("\n %d overlay pages %s of %d each in pass %d\n", 
3511 +               err, save ? "saved" : "skipped", DUMP_PAGE_SIZE, pass);
3512 +       }
3513 +
3514 +       return err;
3515 +}
3516 +
3517 +/* from dump_memdev.c */
3518 +extern struct page *dump_mem_lookup(struct dump_memdev *dev, unsigned long loc);
3519 +extern struct page *dump_mem_next_page(struct dump_memdev *dev);
3520 +
3521 +static inline struct page *dump_get_saved_page(loff_t loc)
3522 +{
3523 +       return (dump_mem_lookup(dump_memdev, loc >> PAGE_SHIFT));
3524 +}
3525 +
3526 +static inline struct page *dump_next_saved_page(void)
3527 +{
3528 +       return (dump_mem_next_page(dump_memdev));
3529 +}
3530 +
3531 +/* 
3532 + * Iterates over list of saved dump pages. Invoked during second stage of 
3533 + * soft boot dumping
3534 + *
3535 + * Observation: If additional selection is desired at this stage then
3536 + * a different iterator could be written which would advance 
3537 + * to the next page header everytime instead of blindly picking up
3538 + * the data. In such a case loc would be interpreted differently. 
3539 + * At this moment however a blind pass seems sufficient, cleaner and
3540 + * faster.
3541 + */
3542 +int dump_saved_data_iterator(int pass, int (*action)(unsigned long, 
3543 +       unsigned long), struct dump_data_filter *filter)
3544 +{
3545 +       loff_t loc = filter->start;
3546 +       struct page *page;
3547 +       unsigned long count = 0;
3548 +       int err = 0;
3549 +       unsigned long sz;
3550 +
3551 +       printk("pass %d, start off 0x%llx end offset 0x%llx\n", pass,
3552 +                       filter->start, filter->end);
3553 +
3554 +       /* loc will get treated as logical offset into stage 1 */
3555 +       page = dump_get_saved_page(loc);
3556 +                       
3557 +       for (; loc < filter->end; loc += PAGE_SIZE) {
3558 +               dump_config.dumper->curr_loc = loc;
3559 +               if (!page) {
3560 +                       printk("no more saved data for pass %d\n", pass);
3561 +                       break;
3562 +               }
3563 +               sz = (loc + PAGE_SIZE > filter->end) ? filter->end - loc :
3564 +                       PAGE_SIZE;
3565 +
3566 +               if (page && filter->selector(pass, (unsigned long)page, 
3567 +                       PAGE_SIZE))  {
3568 +                       pr_debug("mem offset 0x%llx\n", loc);
3569 +                       if ((err = action((unsigned long)page, sz))) 
3570 +                               break;
3571 +                       else
3572 +                               count++;
3573 +                       /* clear the contents of page */
3574 +                       /* fixme: consider using KM_DUMP instead */
3575 +                       clear_highpage(page);
3576 +                       
3577 +               }
3578 +               page = dump_next_saved_page();
3579 +       }
3580 +
3581 +       return err ? err : count;
3582 +}
3583 +
3584 +static inline int dump_overlay_pages_done(struct page *page, int nr)
3585 +{
3586 +       int ret=0;
3587 +
3588 +       for (; nr ; page++, nr--) {
3589 +               if (dump_check_and_free_page(dump_memdev, page))
3590 +                       ret++;
3591 +       }
3592 +       return ret;
3593 +}
3594 +
3595 +int dump_overlay_save_data(unsigned long loc, unsigned long len)
3596 +{
3597 +       int err = 0;
3598 +       struct page *page = (struct page *)loc;
3599 +       static unsigned long cnt = 0;
3600 +
3601 +       if ((err = dump_generic_save_data(loc, len)))
3602 +               return err;
3603 +
3604 +       if (dump_overlay_pages_done(page, len >> PAGE_SHIFT)) {
3605 +               cnt++;
3606 +               if (!(cnt & 0x7f))
3607 +                       pr_debug("released page 0x%lx\n", page_to_pfn(page));
3608 +       }
3609 +       
3610 +       return err;
3611 +}
3612 +
3613 +
3614 +int dump_overlay_skip_data(unsigned long loc, unsigned long len)
3615 +{
3616 +       struct page *page = (struct page *)loc;
3617 +
3618 +       dump_overlay_pages_done(page, len >> PAGE_SHIFT);
3619 +       return 0;
3620 +}
3621 +
3622 +int dump_overlay_resume(void)
3623 +{
3624 +       int err = 0;
3625 +
3626 +       /* 
3627 +        * switch to stage 2 dumper, save dump_config_block
3628 +        * and then trigger a soft-boot
3629 +        */
3630 +       dumper_stage2.header_len = dump_config.dumper->header_len;
3631 +       dump_config.dumper = &dumper_stage2;
3632 +       if ((err = dump_save_config(dump_saved_config)))
3633 +               return err;
3634 +
3635 +       dump_dev = dump_config.dumper->dev;
3636 +
3637 +       return err;
3638 +       err = dump_switchover_stage();  /* plugs into soft boot mechanism */
3639 +       dump_config.dumper = &dumper_stage1; /* set things back */
3640 +       return err;
3641 +}
3642 +
3643 +int dump_overlay_configure(unsigned long devid)
3644 +{
3645 +       struct dump_dev *dev;
3646 +       struct dump_config_block *saved_config = dump_saved_config;
3647 +       int err = 0;
3648 +
3649 +       /* If there is a previously saved dump, write it out first */
3650 +       if (saved_config) {
3651 +               printk("Processing old dump pending writeout\n");
3652 +               err = dump_switchover_stage();
3653 +               if (err) {
3654 +                       printk("failed to writeout saved dump\n");
3655 +                       return err;
3656 +               }
3657 +               dump_free_mem(saved_config); /* testing only: not after boot */
3658 +       }
3659 +
3660 +       dev = dumper_stage2.dev = dump_config.dumper->dev;
3661 +       /* From here on the intermediate dump target is memory-only */
3662 +       dump_dev = dump_config.dumper->dev = &dump_memdev->ddev;
3663 +       if ((err = dump_generic_configure(0))) {
3664 +               printk("dump generic configure failed: err %d\n", err);
3665 +               return err;
3666 +       }
3667 +       /* temporary */
3668 +       dumper_stage2.dump_buf = dump_config.dumper->dump_buf;
3669 +
3670 +       /* Sanity check on the actual target dump device */
3671 +       if (!dev || (err = dev->ops->open(dev, devid))) {
3672 +               return err;
3673 +       }
3674 +       /* TBD: should we release the target if this is soft-boot only ? */
3675 +
3676 +       /* alloc a dump config block area to save across reboot */
3677 +       if (!(dump_saved_config = dump_alloc_mem(sizeof(struct 
3678 +               dump_config_block)))) {
3679 +               printk("dump config block alloc failed\n");
3680 +               /* undo configure */
3681 +               dump_generic_unconfigure();
3682 +               return -ENOMEM;
3683 +       }
3684 +       dump_config.dump_addr = (unsigned long)dump_saved_config;
3685 +       printk("Dump config block of size %d set up at 0x%lx\n", 
3686 +               sizeof(*dump_saved_config), (unsigned long)dump_saved_config);
3687 +       return 0;
3688 +}
3689 +
3690 +int dump_overlay_unconfigure(void)
3691 +{
3692 +       struct dump_dev *dev = dumper_stage2.dev;
3693 +       int err = 0;
3694 +
3695 +       pr_debug("dump_overlay_unconfigure\n");
3696 +       /* Close the secondary device */
3697 +       dev->ops->release(dev); 
3698 +       pr_debug("released secondary device\n");
3699 +
3700 +       err = dump_generic_unconfigure();
3701 +       pr_debug("Unconfigured generic portions\n");
3702 +       dump_free_mem(dump_saved_config);
3703 +       dump_saved_config = NULL;
3704 +       pr_debug("Freed saved config block\n");
3705 +       dump_dev = dump_config.dumper->dev = dumper_stage2.dev;
3706 +
3707 +       printk("Unconfigured overlay dumper\n");
3708 +       return err;
3709 +}
3710 +
3711 +int dump_staged_unconfigure(void)
3712 +{
3713 +       int err = 0;
3714 +       struct dump_config_block *saved_config = dump_saved_config;
3715 +       struct dump_dev *dev;
3716 +
3717 +       pr_debug("dump_staged_unconfigure\n");
3718 +       err = dump_generic_unconfigure();
3719 +
3720 +       /* now check if there is a saved dump waiting to be written out */
3721 +       if (saved_config) {
3722 +               printk("Processing saved dump pending writeout\n");
3723 +               if ((err = dump_switchover_stage())) {
3724 +                       printk("Error in commiting saved dump at 0x%lx\n", 
3725 +                               (unsigned long)saved_config);
3726 +                       printk("Old dump may hog memory\n");
3727 +               } else {
3728 +                       dump_free_mem(saved_config);
3729 +                       pr_debug("Freed saved config block\n");
3730 +               }
3731 +               dump_saved_config = NULL;
3732 +       } else {
3733 +               dev = &dump_memdev->ddev;
3734 +               dev->ops->release(dev);
3735 +       }
3736 +       printk("Unconfigured second stage dumper\n");
3737 +
3738 +       return 0;
3739 +}
3740 +
3741 +/* ----- PASSTHRU FILTER ROUTINE --------- */
3742 +
3743 +/* transparent - passes everything through */
3744 +int dump_passthru_filter(int pass, unsigned long loc, unsigned long sz)
3745 +{
3746 +       return 1;
3747 +}
3748 +
3749 +/* ----- PASSTRU FORMAT ROUTINES ---- */
3750 +
3751 +
3752 +int dump_passthru_configure_header(const char *panic_str, const struct pt_regs *regs)
3753 +{
3754 +       dump_config.dumper->header_dirty++;
3755 +       return 0;
3756 +}
3757 +
3758 +/* Copies bytes of data from page(s) to the specified buffer */
3759 +int dump_copy_pages(void *buf, struct page *page, unsigned long sz)
3760 +{
3761 +       unsigned long len = 0, bytes;
3762 +       void *addr;
3763 +
3764 +       while (len < sz) {
3765 +               addr = kmap_atomic(page, KM_DUMP);
3766 +               bytes = (sz > len + PAGE_SIZE) ? PAGE_SIZE : sz - len;  
3767 +               memcpy(buf, addr, bytes); 
3768 +               kunmap_atomic(addr, KM_DUMP);
3769 +               buf += bytes;
3770 +               len += bytes;
3771 +               page++;
3772 +       }
3773 +       /* memset(dump_config.dumper->curr_buf, 0x57, len); temporary */
3774 +
3775 +       return sz - len;
3776 +}
3777 +
3778 +int dump_passthru_update_header(void)
3779 +{
3780 +       long len = dump_config.dumper->header_len;
3781 +       struct page *page;
3782 +       void *buf = dump_config.dumper->dump_buf;
3783 +       int err = 0;
3784 +
3785 +       if (!dump_config.dumper->header_dirty)
3786 +               return 0;
3787 +
3788 +       pr_debug("Copying header of size %ld bytes from memory\n", len);
3789 +       if (len > DUMP_BUFFER_SIZE) 
3790 +               return -E2BIG;
3791 +
3792 +       page = dump_mem_lookup(dump_memdev, 0);
3793 +       for (; (len > 0) && page; buf += PAGE_SIZE, len -= PAGE_SIZE) {
3794 +               if ((err = dump_copy_pages(buf, page, PAGE_SIZE)))
3795 +                       return err;
3796 +               page = dump_mem_next_page(dump_memdev);
3797 +       }
3798 +       if (len > 0) {
3799 +               printk("Incomplete header saved in mem\n");
3800 +               return -ENOENT;
3801 +       }
3802 +
3803 +       if ((err = dump_dev_seek(0))) {
3804 +               printk("Unable to seek to dump header offset\n");
3805 +               return err;
3806 +       }
3807 +       err = dump_ll_write(dump_config.dumper->dump_buf, 
3808 +               buf - dump_config.dumper->dump_buf);
3809 +       if (err < dump_config.dumper->header_len)
3810 +               return (err < 0) ? err : -ENOSPC;
3811 +
3812 +       dump_config.dumper->header_dirty = 0;
3813 +       return 0;
3814 +}
3815 +
3816 +static loff_t next_dph_offset = 0;
3817 +
3818 +static int dph_valid(struct __dump_page *dph)
3819 +{
3820 +       if ((dph->dp_address & (PAGE_SIZE - 1)) || (dph->dp_flags 
3821 +             > DUMP_DH_COMPRESSED) || (!dph->dp_flags) ||
3822 +               (dph->dp_size > PAGE_SIZE)) {
3823 +       printk("dp->address = 0x%llx, dp->size = 0x%x, dp->flag = 0x%x\n",
3824 +               dph->dp_address, dph->dp_size, dph->dp_flags);
3825 +               return 0;
3826 +       }
3827 +       return 1;
3828 +}
3829 +
3830 +int dump_verify_lcrash_data(void *buf, unsigned long sz)
3831 +{
3832 +       struct __dump_page *dph;
3833 +
3834 +       /* sanity check for page headers */
3835 +       while (next_dph_offset + sizeof(*dph) < sz) {
3836 +               dph = (struct __dump_page *)(buf + next_dph_offset);
3837 +               if (!dph_valid(dph)) {
3838 +                       printk("Invalid page hdr at offset 0x%llx\n",
3839 +                               next_dph_offset);
3840 +                       return -EINVAL;
3841 +               }
3842 +               next_dph_offset += dph->dp_size + sizeof(*dph);
3843 +       }
3844 +
3845 +       next_dph_offset -= sz;  
3846 +       return 0;
3847 +}
3848 +
3849 +/* 
3850 + * TBD/Later: Consider avoiding the copy by using a scatter/gather 
3851 + * vector representation for the dump buffer
3852 + */
3853 +int dump_passthru_add_data(unsigned long loc, unsigned long sz)
3854 +{
3855 +       struct page *page = (struct page *)loc;
3856 +       void *buf = dump_config.dumper->curr_buf;
3857 +       int err = 0;
3858 +
3859 +       if ((err = dump_copy_pages(buf, page, sz))) {
3860 +               printk("dump_copy_pages failed");
3861 +               return err;
3862 +       }
3863 +
3864 +       if ((err = dump_verify_lcrash_data(buf, sz))) {
3865 +               printk("dump_verify_lcrash_data failed\n");
3866 +               printk("Invalid data for pfn 0x%lx\n", page_to_pfn(page));
3867 +               printk("Page flags 0x%lx\n", page->flags);
3868 +               printk("Page count 0x%x\n", atomic_read(&page->count));
3869 +               return err;
3870 +       }
3871 +
3872 +       dump_config.dumper->curr_buf = buf + sz;
3873 +
3874 +       return 0;
3875 +}
3876 +
3877 +
3878 +/* Stage 1 dumper: Saves compressed dump in memory and soft-boots system */
3879 +
3880 +/* Scheme to overlay saved data in memory for writeout after a soft-boot */
3881 +struct dump_scheme_ops dump_scheme_overlay_ops = {
3882 +       .configure      = dump_overlay_configure,
3883 +       .unconfigure    = dump_overlay_unconfigure,
3884 +       .sequencer      = dump_overlay_sequencer,
3885 +       .iterator       = dump_page_iterator,
3886 +       .save_data      = dump_overlay_save_data,
3887 +       .skip_data      = dump_overlay_skip_data,
3888 +       .write_buffer   = dump_generic_write_buffer
3889 +};
3890 +
3891 +struct dump_scheme dump_scheme_overlay = {
3892 +       .name           = "overlay",
3893 +       .ops            = &dump_scheme_overlay_ops
3894 +};
3895 +
3896 +
3897 +/* Stage 1 must use a good compression scheme - default to gzip */
3898 +extern struct __dump_compress dump_gzip_compression;
3899 +
3900 +struct dumper dumper_stage1 = {
3901 +       .name           = "stage1",
3902 +       .scheme         = &dump_scheme_overlay,
3903 +       .fmt            = &dump_fmt_lcrash,
3904 +       .compress       = &dump_none_compression, /* needs to be gzip */
3905 +       .filter         = dump_filter_table,
3906 +       .dev            = NULL,
3907 +};             
3908 +
3909 +/* Stage 2 dumper: Activated after softboot to write out saved dump to device */
3910 +
3911 +/* Formatter that transfers data as is (transparent) w/o further conversion */
3912 +struct dump_fmt_ops dump_fmt_passthru_ops = {
3913 +       .configure_header       = dump_passthru_configure_header,
3914 +       .update_header          = dump_passthru_update_header,
3915 +       .save_context           = NULL, /* unused */
3916 +       .add_data               = dump_passthru_add_data,
3917 +       .update_end_marker      = dump_lcrash_update_end_marker
3918 +};
3919 +
3920 +struct dump_fmt dump_fmt_passthru = {
3921 +       .name   = "passthru",
3922 +       .ops    = &dump_fmt_passthru_ops
3923 +};
3924 +
3925 +/* Filter that simply passes along any data within the range (transparent)*/
3926 +/* Note: The start and end ranges in the table are filled in at run-time */
3927 +
3928 +extern int dump_filter_none(int pass, unsigned long loc, unsigned long sz);
3929 +
3930 +struct dump_data_filter dump_passthru_filtertable[MAX_PASSES] = {
3931 +{.name = "passkern", .selector = dump_passthru_filter, 
3932 +       .level_mask = DUMP_MASK_KERN },
3933 +{.name = "passuser", .selector = dump_passthru_filter, 
3934 +       .level_mask = DUMP_MASK_USED },
3935 +{.name = "passunused", .selector = dump_passthru_filter, 
3936 +       .level_mask = DUMP_MASK_UNUSED },
3937 +{.name = "none", .selector = dump_filter_none, 
3938 +       .level_mask = DUMP_MASK_REST }
3939 +};
3940 +
3941 +
3942 +/* Scheme to handle data staged / preserved across a soft-boot */
3943 +struct dump_scheme_ops dump_scheme_staged_ops = {
3944 +       .configure      = dump_generic_configure,
3945 +       .unconfigure    = dump_staged_unconfigure,
3946 +       .sequencer      = dump_generic_sequencer,
3947 +       .iterator       = dump_saved_data_iterator,
3948 +       .save_data      = dump_generic_save_data,
3949 +       .skip_data      = dump_generic_skip_data,
3950 +       .write_buffer   = dump_generic_write_buffer
3951 +};
3952 +
3953 +struct dump_scheme dump_scheme_staged = {
3954 +       .name           = "staged",
3955 +       .ops            = &dump_scheme_staged_ops
3956 +};
3957 +
3958 +/* The stage 2 dumper comprising all these */
3959 +struct dumper dumper_stage2 = {
3960 +       .name           = "stage2",
3961 +       .scheme         = &dump_scheme_staged,
3962 +       .fmt            = &dump_fmt_passthru,
3963 +       .compress       = &dump_none_compression,
3964 +       .filter         = dump_passthru_filtertable,
3965 +       .dev            = NULL,
3966 +};             
3967 +
3968 --- linux-2.5.69/drivers/dump/dump_rle.c.lkcdbase       Mon Jun  2 17:29:49 2003
3969 +++ linux-2.5.69/drivers/dump/dump_rle.c        Fri Dec 13 00:51:31 2002
3970 @@ -0,0 +1,175 @@
3971 +/*
3972 + * RLE Compression functions for kernel crash dumps.
3973 + *
3974 + * Created by: Matt Robinson (yakker@sourceforge.net)
3975 + * Copyright 2001 Matt D. Robinson.  All rights reserved.
3976 + *
3977 + * This code is released under version 2 of the GNU GPL.
3978 + */
3979 +
3980 +/* header files */
3981 +#include <linux/config.h>
3982 +#include <linux/module.h>
3983 +#include <linux/sched.h>
3984 +#include <linux/fs.h>
3985 +#include <linux/file.h>
3986 +#include <linux/init.h>
3987 +#include <linux/dump.h>
3988 +
3989 +/*
3990 + * Name: dump_compress_rle()
3991 + * Func: Compress a DUMP_PAGE_SIZE (hardware) page down to something more
3992 + *       reasonable, if possible.  This is the same routine we use in IRIX.
3993 + */
3994 +static u16
3995 +dump_compress_rle(const u8 *old, u16 oldsize, u8 *new, u16 newsize)
3996 +{
3997 +       u16 ri, wi, count = 0;
3998 +       u_char value = 0, cur_byte;
3999 +
4000 +       /*
4001 +        * If the block should happen to "compress" to larger than the
4002 +        * buffer size, allocate a larger one and change cur_buf_size.
4003 +        */
4004 +
4005 +       wi = ri = 0;
4006 +
4007 +       while (ri < oldsize) {
4008 +               if (!ri) {
4009 +                       cur_byte = value = old[ri];
4010 +                       count = 0;
4011 +               } else {
4012 +                       if (count == 255) {
4013 +                               if (wi + 3 > oldsize) {
4014 +                                       return oldsize;
4015 +                               }
4016 +                               new[wi++] = 0;
4017 +                               new[wi++] = count;
4018 +                               new[wi++] = value;
4019 +                               value = cur_byte = old[ri];
4020 +                               count = 0;
4021 +                       } else { 
4022 +                               if ((cur_byte = old[ri]) == value) {
4023 +                                       count++;
4024 +                               } else {
4025 +                                       if (count > 1) {
4026 +                                               if (wi + 3 > oldsize) {
4027 +                                                       return oldsize;
4028 +                                               }
4029 +                                               new[wi++] = 0;
4030 +                                               new[wi++] = count;
4031 +                                               new[wi++] = value;
4032 +                                       } else if (count == 1) {
4033 +                                               if (value == 0) {
4034 +                                                       if (wi + 3 > oldsize) {
4035 +                                                               return oldsize;
4036 +                                                       }
4037 +                                                       new[wi++] = 0;
4038 +                                                       new[wi++] = 1;
4039 +                                                       new[wi++] = 0;
4040 +                                               } else {
4041 +                                                       if (wi + 2 > oldsize) {
4042 +                                                               return oldsize;
4043 +                                                       }
4044 +                                                       new[wi++] = value;
4045 +                                                       new[wi++] = value;
4046 +                                               }
4047 +                                       } else { /* count == 0 */
4048 +                                               if (value == 0) {
4049 +                                                       if (wi + 2 > oldsize) {
4050 +                                                               return oldsize;
4051 +                                                       }
4052 +                                                       new[wi++] = value;
4053 +                                                       new[wi++] = value;
4054 +                                               } else {
4055 +                                                       if (wi + 1 > oldsize) {
4056 +                                                               return oldsize;
4057 +                                                       }
4058 +                                                       new[wi++] = value;
4059 +                                               }
4060 +                                       } /* if count > 1 */
4061 +
4062 +                                       value = cur_byte;
4063 +                                       count = 0;
4064 +
4065 +                               } /* if byte == value */
4066 +
4067 +                       } /* if count == 255 */
4068 +
4069 +               } /* if ri == 0 */
4070 +               ri++;
4071 +
4072 +       }
4073 +       if (count > 1) {
4074 +               if (wi + 3 > oldsize) {
4075 +                       return oldsize;
4076 +               }
4077 +               new[wi++] = 0;
4078 +               new[wi++] = count;
4079 +               new[wi++] = value;
4080 +       } else if (count == 1) {
4081 +               if (value == 0) {
4082 +                       if (wi + 3 > oldsize)
4083 +                               return oldsize;
4084 +                       new[wi++] = 0;
4085 +                       new[wi++] = 1;
4086 +                       new[wi++] = 0;
4087 +               } else {
4088 +                       if (wi + 2 > oldsize)
4089 +                               return oldsize;
4090 +                       new[wi++] = value;
4091 +                       new[wi++] = value;
4092 +               }
4093 +       } else { /* count == 0 */
4094 +               if (value == 0) {
4095 +                       if (wi + 2 > oldsize)
4096 +                               return oldsize;
4097 +                       new[wi++] = value;
4098 +                       new[wi++] = value;
4099 +               } else {
4100 +                       if (wi + 1 > oldsize)
4101 +                               return oldsize;
4102 +                       new[wi++] = value;
4103 +               }
4104 +       } /* if count > 1 */
4105 +
4106 +       value = cur_byte;
4107 +       count = 0;
4108 +       return wi;
4109 +}
4110 +
4111 +/* setup the rle compression functionality */
4112 +static struct __dump_compress dump_rle_compression = {
4113 +       .compress_type = DUMP_COMPRESS_RLE,
4114 +       .compress_func = dump_compress_rle,
4115 +       .compress_name = "RLE",
4116 +};
4117 +
4118 +/*
4119 + * Name: dump_compress_rle_init()
4120 + * Func: Initialize rle compression for dumping.
4121 + */
4122 +static int __init
4123 +dump_compress_rle_init(void)
4124 +{
4125 +       dump_register_compression(&dump_rle_compression);
4126 +       return 0;
4127 +}
4128 +
4129 +/*
4130 + * Name: dump_compress_rle_cleanup()
4131 + * Func: Remove rle compression for dumping.
4132 + */
4133 +static void __exit
4134 +dump_compress_rle_cleanup(void)
4135 +{
4136 +       dump_unregister_compression(DUMP_COMPRESS_RLE);
4137 +}
4138 +
4139 +/* module initialization */
4140 +module_init(dump_compress_rle_init);
4141 +module_exit(dump_compress_rle_cleanup);
4142 +
4143 +MODULE_LICENSE("GPL");
4144 +MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
4145 +MODULE_DESCRIPTION("RLE compression module for crash dump driver");
4146 --- linux-2.5.69/drivers/dump/dump_scheme.c.lkcdbase    Mon Jun  2 17:29:49 2003
4147 +++ linux-2.5.69/drivers/dump/dump_scheme.c     Fri Apr 25 00:24:15 2003
4148 @@ -0,0 +1,357 @@
4149 +/* 
4150 + * Default single stage dump scheme methods
4151 + *
4152 + * Previously a part of dump_base.c
4153 + *
4154 + * Started: Oct 2002 -  Suparna Bhattacharya <suparna@in.ibm.com>
4155 + *     Split and rewrote LKCD dump scheme to generic dump method 
4156 + *     interfaces 
4157 + * Derived from original code created by
4158 + *     Matt Robinson <yakker@sourceforge.net>)
4159 + *
4160 + * Contributions from SGI, IBM, HP, MCL, and others.
4161 + *
4162 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
4163 + * Copyright (C) 2001 - 2002 Matt D. Robinson.  All rights reserved.
4164 + * Copyright (C) 2002 International Business Machines Corp. 
4165 + *
4166 + * This code is released under version 2 of the GNU GPL.
4167 + */
4168 +
4169 +/*
4170 + * Implements the default dump scheme, i.e. single-stage gathering and 
4171 + * saving of dump data directly to the target device, which operates in
4172 + * a push mode, where the dumping system decides what data it saves
4173 + * taking into account pre-specified dump config options.
4174 + *
4175 + * Aside: The 2-stage dump scheme, where there is a soft-reset between
4176 + * the gathering and saving phases, also reuses some of these
4177 + * default routines (see dump_overlay.c) 
4178 + */ 
4179 +#include <linux/types.h>
4180 +#include <linux/kernel.h>
4181 +#include <linux/mm.h>
4182 +#include <linux/slab.h>
4183 +#include <linux/delay.h>
4184 +#include <linux/reboot.h>
4185 +#include <linux/nmi.h>
4186 +#include <linux/dump.h>
4187 +#include "dump_methods.h"
4188 +
4189 +extern int panic_timeout;  /* time before reboot */
4190 +
4191 +extern void dump_speedo(int);
4192 +
4193 +/* Default sequencer used during single stage dumping */
4194 +/* Also invoked during stage 2 of soft-boot based dumping */
4195 +int dump_generic_sequencer(void)
4196 +{
4197 +       struct dump_data_filter *filter = dump_config.dumper->filter;
4198 +       int pass = 0, err = 0, save = 0;
4199 +       int (*action)(unsigned long, unsigned long);
4200 +
4201 +       /* 
4202 +        * We want to save the more critical data areas first in 
4203 +        * case we run out of space, encounter i/o failures, or get
4204 +        * interrupted otherwise and have to give up midway
4205 +        * So, run through the passes in increasing order 
4206 +        */
4207 +       for (;filter->selector; filter++, pass++)
4208 +       {
4209 +               /* Assumes passes are exclusive (even across dumpers) */
4210 +               /* Requires care when coding the selection functions */
4211 +               if ((save = filter->level_mask & dump_config.level))
4212 +                       action = dump_save_data;
4213 +               else
4214 +                       action = dump_skip_data;
4215 +
4216 +               if ((err = dump_iterator(pass, action, filter)) < 0)
4217 +                       break;
4218 +
4219 +               printk("\n %d dump pages %s of %d each in pass %d\n", 
4220 +               err, save ? "saved" : "skipped", DUMP_PAGE_SIZE, pass);
4221 +
4222 +       }
4223 +
4224 +       return (err < 0) ? err : 0;
4225 +}
4226 +
4227 +static inline struct page *dump_get_page(loff_t loc)
4228 +{
4229 +       unsigned long page_index = loc >> PAGE_SHIFT;
4230 +
4231 +       /* todo: complete this  to account for ia64/discontig mem */
4232 +       /* todo: and to check for validity, ram page, no i/o mem etc */
4233 +       /* need to use pfn/physaddr equiv of kern_addr_valid */
4234 +       if (__dump_page_valid(page_index))
4235 +               return pfn_to_page(page_index);
4236 +       else
4237 +               return NULL;
4238 +
4239 +}
4240 +
4241 +/* Default iterator: for singlestage and stage 1 of soft-boot dumping */
4242 +/* Iterates over range of physical memory pages in DUMP_PAGE_SIZE increments */
4243 +int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned long), 
4244 +       struct dump_data_filter *filter)
4245 +{
4246 +       /* Todo : fix unit, type */
4247 +       loff_t loc;
4248 +       int count = 0, err = 0;
4249 +       struct page *page;
4250 +
4251 +       /* Todo: Add membanks code */
4252 +       /* TBD: Check if we need to address DUMP_PAGE_SIZE < PAGE_SIZE */       
4253 +       
4254 +       for (loc = filter->start; loc < filter->end; loc += DUMP_PAGE_SIZE) {
4255 +               dump_config.dumper->curr_loc = loc;
4256 +               page = dump_get_page(loc);
4257 +               if (page && filter->selector(pass, (unsigned long) page, 
4258 +               DUMP_PAGE_SIZE)) {
4259 +                       if ((err = action((unsigned long)page, DUMP_PAGE_SIZE)))
4260 +                       {
4261 +                               printk("dump_page_iterator: err %d for loc "
4262 +                               "0x%llx, in pass %d\n", err, loc, pass);
4263 +                               break;
4264 +                       } else
4265 +                               count++;
4266 +               }
4267 +       }
4268 +
4269 +       return err ? err : count;
4270 +}
4271 +
4272 +/* 
4273 + * Base function that saves the selected block of data in the dump 
4274 + * Action taken when iterator decides that data needs to be saved 
4275 + */
4276 +int dump_generic_save_data(unsigned long loc, unsigned long sz)
4277 +{
4278 +       void *buf;
4279 +       void *dump_buf = dump_config.dumper->dump_buf;
4280 +       int left, bytes, ret;
4281 +
4282 +       if ((ret = dump_add_data(loc, sz))) {
4283 +               return ret;
4284 +       }
4285 +       buf = dump_config.dumper->curr_buf;
4286 +
4287 +       /* If we've filled up the buffer write it out */
4288 +       if ((left = buf - dump_buf) >= DUMP_BUFFER_SIZE) {
4289 +               bytes = dump_write_buffer(dump_buf, DUMP_BUFFER_SIZE);
4290 +               if (bytes < DUMP_BUFFER_SIZE) {
4291 +                       printk("dump_write_buffer failed %d\n", bytes);
4292 +                       return bytes ? -ENOSPC : bytes;
4293 +               }
4294 +
4295 +               left -= bytes;
4296 +               
4297 +               /* -- A few chores to do from time to time -- */
4298 +               dump_config.dumper->count++;
4299 +
4300 +               if (!(dump_config.dumper->count & 0x3f)) {
4301 +                       /* Update the header every one in a while */
4302 +                       memset((void *)dump_buf, 'b', DUMP_BUFFER_SIZE);
4303 +                       if ((ret = dump_update_header()) < 0) {
4304 +                               /* issue warning */
4305 +                               return ret;
4306 +                       }
4307 +                       printk(".");
4308 +
4309 +                       touch_nmi_watchdog();
4310 +               } else if (!(dump_config.dumper->count & 0x7)) {
4311 +                       /* Show progress so the user knows we aren't hung */
4312 +                       dump_speedo(dump_config.dumper->count >> 3); 
4313 +               }
4314 +               /* Todo: Touch/Refresh watchdog */
4315 +
4316 +               /* --- Done with periodic chores -- */
4317 +
4318 +               /* 
4319 +                * extra bit of copying to simplify verification  
4320 +                * in the second kernel boot based scheme
4321 +                */
4322 +               memcpy(dump_buf - DUMP_PAGE_SIZE, dump_buf + 
4323 +                       DUMP_BUFFER_SIZE - DUMP_PAGE_SIZE, DUMP_PAGE_SIZE);
4324 +
4325 +               /* now adjust the leftover bits back to the top of the page */
4326 +               /* this case would not arise during stage 2 (passthru) */
4327 +               memset(dump_buf, 'z', DUMP_BUFFER_SIZE);
4328 +               if (left) {
4329 +                       memcpy(dump_buf, dump_buf + DUMP_BUFFER_SIZE, left);
4330 +               }
4331 +               buf -= DUMP_BUFFER_SIZE;
4332 +               dump_config.dumper->curr_buf = buf;
4333 +       }
4334 +                               
4335 +       return 0;
4336 +}
4337 +
4338 +int dump_generic_skip_data(unsigned long loc, unsigned long sz)
4339 +{
4340 +       /* dummy by default */
4341 +       return 0;
4342 +}
4343 +
4344 +/* 
4345 + * Common low level routine to write a buffer to current dump device 
4346 + * Expects checks for space etc to have been taken care of by the caller 
4347 + * Operates serially at the moment for simplicity. 
4348 + * TBD/Todo: Consider batching for improved throughput
4349 + */
4350 +int dump_ll_write(void *buf, unsigned long len)
4351 +{
4352 +       long transferred = 0, last_transfer = 0;
4353 +       int ret = 0;
4354 +
4355 +       /* make sure device is ready */
4356 +       while ((ret = dump_dev_ready(NULL)) == -EAGAIN);
4357 +       if  (ret < 0) {
4358 +               printk("dump_dev_ready failed !err %d\n", ret);
4359 +               return ret;
4360 +       }
4361 +
4362 +       while (len) {
4363 +               if ((last_transfer = dump_dev_write(buf, len)) <= 0)  {
4364 +                       ret = last_transfer;
4365 +                       printk("dump_dev_write failed !err %d\n", 
4366 +                       ret);
4367 +                       break;
4368 +               }
4369 +               /* wait till complete */
4370 +               while ((ret = dump_dev_ready(buf)) == -EAGAIN)
4371 +                       cpu_relax();
4372 +
4373 +               if  (ret < 0) {
4374 +                       printk("i/o failed !err %d\n", ret);
4375 +                       break;
4376 +               }
4377 +
4378 +               len -= last_transfer;
4379 +               buf += last_transfer;
4380 +               transferred += last_transfer;
4381 +       }
4382 +       return (ret < 0) ? ret : transferred;
4383 +}
4384 +
4385 +/* default writeout routine for single dump device */
4386 +/* writes out the dump data ensuring enough space is left for the end marker */
4387 +int dump_generic_write_buffer(void *buf, unsigned long len)
4388 +{
4389 +       long written = 0;
4390 +       int err = 0;
4391 +
4392 +       /* check for space */
4393 +       if ((err = dump_dev_seek(dump_config.dumper->curr_offset + len + 
4394 +                       2*DUMP_BUFFER_SIZE)) < 0) {
4395 +               printk("dump_write_buffer: insuff space after offset 0x%llx\n",
4396 +                       dump_config.dumper->curr_offset);
4397 +               return err;
4398 +       }
4399 +       /* alignment check would happen as a side effect of this */
4400 +       if ((err = dump_dev_seek(dump_config.dumper->curr_offset)) < 0)
4401 +               return err; 
4402 +
4403 +       written = dump_ll_write(buf, len);
4404 +
4405 +       /* all or none */
4406 +
4407 +       if (written < len)
4408 +               written = written ? -ENOSPC : written;
4409 +       else
4410 +               dump_config.dumper->curr_offset += len;
4411 +
4412 +       return written;
4413 +}
4414 +
4415 +int dump_generic_configure(unsigned long devid)
4416 +{
4417 +       struct dump_dev *dev = dump_config.dumper->dev;
4418 +       struct dump_data_filter *filter;
4419 +       void *buf;
4420 +       int ret = 0;
4421 +
4422 +       /* Allocate the dump buffer and initialize dumper state */
4423 +       /* Assume that we get aligned addresses */
4424 +       if (!(buf = dump_alloc_mem(DUMP_BUFFER_SIZE + 3 * DUMP_PAGE_SIZE)))
4425 +               return -ENOMEM;
4426 +
4427 +       if ((unsigned long)buf & (PAGE_SIZE - 1)) {
4428 +               /* sanity check for page aligned address */
4429 +               dump_free_mem(buf);
4430 +               return -ENOMEM; /* fixme: better error code */
4431 +       }
4432 +
4433 +       /* Initialize the rest of the fields */
4434 +       dump_config.dumper->dump_buf = buf + DUMP_PAGE_SIZE;
4435 +       dumper_reset();
4436 +
4437 +       /* Open the dump device */
4438 +       if (!dev)
4439 +               return -ENODEV;
4440 +
4441 +       if ((ret = dev->ops->open(dev, devid))) {
4442 +              return ret;
4443 +       }
4444 +
4445 +       /* Initialise the memory ranges in the dump filter */
4446 +       for (filter = dump_config.dumper->filter ;filter->selector; filter++) {
4447 +               if (!filter->start && !filter->end) {
4448 +                       filter->start = 0;
4449 +                       filter->end = num_physpages << PAGE_SHIFT;
4450 +               }
4451 +       }
4452 +
4453 +       return 0;
4454 +}
4455 +
4456 +int dump_generic_unconfigure(void)
4457 +{
4458 +       struct dump_dev *dev = dump_config.dumper->dev;
4459 +       void *buf = dump_config.dumper->dump_buf;
4460 +       int ret = 0;
4461 +
4462 +       pr_debug("Generic unconfigure\n");
4463 +       /* Close the dump device */
4464 +       if (dev && (ret = dev->ops->release(dev)))
4465 +               return ret;
4466 +
4467 +       printk("Closed dump device\n");
4468 +       
4469 +       if (buf)
4470 +               dump_free_mem((buf - DUMP_PAGE_SIZE));
4471 +
4472 +       dump_config.dumper->curr_buf = dump_config.dumper->dump_buf = NULL;
4473 +       pr_debug("Released dump buffer\n");
4474 +
4475 +       return 0;
4476 +}
4477 +
4478 +
4479 +/* Set up the default dump scheme */
4480 +
4481 +struct dump_scheme_ops dump_scheme_singlestage_ops = {
4482 +       .configure      = dump_generic_configure,
4483 +       .unconfigure    = dump_generic_unconfigure,
4484 +       .sequencer      = dump_generic_sequencer,
4485 +       .iterator       = dump_page_iterator,
4486 +       .save_data      = dump_generic_save_data,
4487 +       .skip_data      = dump_generic_skip_data,
4488 +       .write_buffer   = dump_generic_write_buffer,
4489 +};
4490 +
4491 +struct dump_scheme dump_scheme_singlestage = {
4492 +       .name           = "single-stage",
4493 +       .ops            = &dump_scheme_singlestage_ops
4494 +};
4495 +
4496 +/* The single stage dumper comprising all these */
4497 +struct dumper dumper_singlestage = {
4498 +       .name           = "single-stage",
4499 +       .scheme         = &dump_scheme_singlestage,
4500 +       .fmt            = &dump_fmt_lcrash,
4501 +       .compress       = &dump_none_compression,
4502 +       .filter         = dump_filter_table,
4503 +       .dev            = NULL,
4504 +};             
4505 +
4506 --- linux-2.5.69/drivers/dump/dump_setup.c.lkcdbase     Mon Jun  2 17:29:49 2003
4507 +++ linux-2.5.69/drivers/dump/dump_setup.c      Tue Apr 29 03:37:19 2003
4508 @@ -0,0 +1,803 @@
4509 +/*
4510 + * Standard kernel function entry points for Linux crash dumps.
4511 + *
4512 + * Created by: Matt Robinson (yakker@sourceforge.net)
4513 + * Contributions from SGI, IBM, HP, MCL, and others.
4514 + *
4515 + * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
4516 + * Copyright (C) 2000 - 2002 TurboLinux, Inc.  All rights reserved.
4517 + * Copyright (C) 2001 - 2002 Matt D. Robinson.  All rights reserved.
4518 + * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
4519 + *
4520 + * This code is released under version 2 of the GNU GPL.
4521 + */
4522 +
4523 +/*
4524 + * -----------------------------------------------------------------------
4525 + *
4526 + * DUMP HISTORY
4527 + *
4528 + * This dump code goes back to SGI's first attempts at dumping system
4529 + * memory on SGI systems running IRIX.  A few developers at SGI needed
4530 + * a way to take this system dump and analyze it, and created 'icrash',
4531 + * or IRIX Crash.  The mechanism (the dumps and 'icrash') were used
4532 + * by support people to generate crash reports when a system failure
4533 + * occurred.  This was vital for large system configurations that
4534 + * couldn't apply patch after patch after fix just to hope that the
4535 + * problems would go away.  So the system memory, along with the crash
4536 + * dump analyzer, allowed support people to quickly figure out what the
4537 + * problem was on the system with the crash dump.
4538 + *
4539 + * In comes Linux.  SGI started moving towards the open source community,
4540 + * and upon doing so, SGI wanted to take its support utilities into Linux
4541 + * with the hopes that they would end up the in kernel and user space to
4542 + * be used by SGI's customers buying SGI Linux systems.  One of the first
4543 + * few products to be open sourced by SGI was LKCD, or Linux Kernel Crash
4544 + * Dumps.  LKCD comprises of a patch to the kernel to enable system
4545 + * dumping, along with 'lcrash', or Linux Crash, to analyze the system
4546 + * memory dump.  A few additional system scripts and kernel modifications
4547 + * are also included to make the dump mechanism and dump data easier to
4548 + * process and use.
4549 + *
4550 + * As soon as LKCD was released into the open source community, a number
4551 + * of larger companies started to take advantage of it.  Today, there are
4552 + * many community members that contribute to LKCD, and it continues to
4553 + * flourish and grow as an open source project.
4554 + */
4555 +
4556 +/*
4557 + * DUMP TUNABLES
4558 + *
4559 + * This is the list of system tunables (via /proc) that are available
4560 + * for Linux systems.  All the read, write, etc., functions are listed
4561 + * here.  Currently, there are a few different tunables for dumps:
4562 + *
4563 + * dump_device (used to be dumpdev):
4564 + *     The device for dumping the memory pages out to.  This 
4565 + *     may be set to the primary swap partition for disruptive dumps,
4566 + *     and must be an unused partition for non-disruptive dumps.
4567 + *     Todo: In the case of network dumps, this may be interpreted 
4568 + *     as the IP address of the netdump server to connect to.
4569 + *
4570 + * dump_compress (used to be dump_compress_pages):
4571 + *     This is the flag which indicates which compression mechanism
4572 + *     to use.  This is a BITMASK, not an index (0,1,2,4,8,16,etc.).
4573 + *     This is the current set of values:
4574 + *
4575 + *     0: DUMP_COMPRESS_NONE -- Don't compress any pages.
4576 + *     1: DUMP_COMPRESS_RLE  -- This uses RLE compression.
4577 + *     2: DUMP_COMPRESS_GZIP -- This uses GZIP compression.
4578 + *
4579 + * dump_level:
4580 + *     The amount of effort the dump module should make to save
4581 + *     information for post crash analysis.  This value is now
4582 + *     a BITMASK value, not an index:
4583 + *
4584 + *     0:   Do nothing, no dumping. (DUMP_LEVEL_NONE)
4585 + *
4586 + *     1:   Print out the dump information to the dump header, and
4587 + *          write it out to the dump_device. (DUMP_LEVEL_HEADER)
4588 + *
4589 + *     2:   Write out the dump header and all kernel memory pages.
4590 + *          (DUMP_LEVEL_KERN)
4591 + *
4592 + *     4:   Write out the dump header and all kernel and user
4593 + *          memory pages.  (DUMP_LEVEL_USED)
4594 + *
4595 + *     8:   Write out the dump header and all conventional/cached 
4596 + *         memory (RAM) pages in the system (kernel, user, free).  
4597 + *         (DUMP_LEVEL_ALL_RAM)
4598 + *
4599 + *    16:   Write out everything, including non-conventional memory
4600 + *         like firmware, proms, I/O registers, uncached memory.
4601 + *         (DUMP_LEVEL_ALL)
4602 + *
4603 + *     The dump_level will default to 1.
4604 + *
4605 + * dump_flags:
4606 + *     These are the flags to use when talking about dumps.  There
4607 + *     are lots of possibilities.  This is a BITMASK value, not an index.
4608 + * 
4609 + * -----------------------------------------------------------------------
4610 + */
4611 +
4612 +#include <linux/kernel.h>
4613 +#include <linux/delay.h>
4614 +#include <linux/reboot.h>
4615 +#include <linux/fs.h>
4616 +#include <linux/dump.h>
4617 +#include "dump_methods.h"
4618 +#include <linux/proc_fs.h>
4619 +#include <linux/module.h>
4620 +#include <linux/utsname.h>
4621 +#include <linux/highmem.h>
4622 +#include <linux/major.h>
4623 +#include <linux/sysrq.h>
4624 +#include <linux/sysctl.h>
4625 +#include <linux/nmi.h>
4626 +
4627 +#include <asm/hardirq.h>
4628 +#include <asm/uaccess.h>
4629 +
4630 +/*
4631 + * -----------------------------------------------------------------------
4632 + *                         V A R I A B L E S
4633 + * -----------------------------------------------------------------------
4634 + */
4635 +
4636 +/* Dump tunables */
4637 +struct dump_config dump_config = {
4638 +       .level          = 0,
4639 +       .flags          = 0,
4640 +       .dump_device    = 0,
4641 +       .dump_addr      = 0,
4642 +       .dumper         = NULL
4643 +};
4644 +
4645 +
4646 +/* Global variables used in dump.h */
4647 +/* degree of system freeze when dumping */
4648 +enum dump_silence_levels dump_silence_level = DUMP_HARD_SPIN_CPUS;      
4649 +
4650 +/* Other global fields */
4651 +extern struct __dump_header dump_header; 
4652 +struct dump_dev *dump_dev = NULL;  /* Active dump device                   */
4653 +static int dump_compress = 0;
4654 +
4655 +static u16 dump_compress_none(const u8 *old, u16 oldsize, u8 *new, u16 newsize);
4656 +struct __dump_compress dump_none_compression = {
4657 +       .compress_type  = DUMP_COMPRESS_NONE,
4658 +       .compress_func  = dump_compress_none,
4659 +       .compress_name  = "none",
4660 +};
4661 +
4662 +/* our device operations and functions */
4663 +static int dump_ioctl(struct inode *i, struct file *f,
4664 +       unsigned int cmd, unsigned long arg);
4665 +
4666 +static struct file_operations dump_fops = {
4667 +       .ioctl          =       dump_ioctl,
4668 +};
4669 +
4670 +/* static variables                                                    */
4671 +static int dump_okay = 0;              /* can we dump out to disk?     */
4672 +static spinlock_t dump_lock = SPIN_LOCK_UNLOCKED;
4673 +
4674 +/* used for dump compressors */
4675 +static struct list_head dump_compress_list = LIST_HEAD_INIT(dump_compress_list);
4676 +
4677 +/* list of registered dump targets */
4678 +static struct list_head dump_target_list = LIST_HEAD_INIT(dump_target_list);
4679 +
4680 +/* lkcd info structure -- this is used by lcrash for basic system data     */
4681 +struct __lkcdinfo lkcdinfo = {
4682 +       .ptrsz          = (sizeof(void *) * 8),
4683 +#if defined(__LITTLE_ENDIAN) 
4684 +       .byte_order     = __LITTLE_ENDIAN,
4685 +#else
4686 +       .byte_order     = __BIG_ENDIAN,
4687 +#endif
4688 +       .page_shift     = PAGE_SHIFT,
4689 +       .page_size      = PAGE_SIZE,
4690 +       .page_mask      = PAGE_MASK,
4691 +       .page_offset    = PAGE_OFFSET,
4692 +};
4693 +
4694 +/*
4695 + * -----------------------------------------------------------------------
4696 + *            / P R O C   T U N A B L E   F U N C T I O N S
4697 + * -----------------------------------------------------------------------
4698 + */
4699 +
4700 +static int proc_dump_device(ctl_table *ctl, int write, struct file *f,
4701 +                           void *buffer, size_t *lenp);
4702 +
4703 +static int proc_doulonghex(ctl_table *ctl, int write, struct file *f,
4704 +                           void *buffer, size_t *lenp);
4705 +/*
4706 + * sysctl-tuning infrastructure.
4707 + */
4708 +static ctl_table dump_table[] = {
4709 +       { .ctl_name = CTL_DUMP_LEVEL,
4710 +         .procname = DUMP_LEVEL_NAME, 
4711 +         .data = &dump_config.level,    
4712 +         .maxlen = sizeof(int),
4713 +         .mode = 0644,
4714 +         .proc_handler = proc_doulonghex, },
4715 +
4716 +       { .ctl_name = CTL_DUMP_FLAGS,
4717 +         .procname = DUMP_FLAGS_NAME,
4718 +         .data = &dump_config.flags,   
4719 +         .maxlen = sizeof(int),
4720 +         .mode = 0644,
4721 +         .proc_handler = proc_doulonghex, },
4722 +
4723 +       { .ctl_name = CTL_DUMP_COMPRESS,
4724 +         .procname = DUMP_COMPRESS_NAME,
4725 +         .data = &dump_compress, /* FIXME */
4726 +         .maxlen = sizeof(int),
4727 +         .mode = 0644,
4728 +         .proc_handler = proc_dointvec, },
4729 +         
4730 +       { .ctl_name = CTL_DUMP_DEVICE,
4731 +         .procname = DUMP_DEVICE_NAME,
4732 +         .mode = 0644,
4733 +         .data = &dump_config.dump_device, /* FIXME */
4734 +         .maxlen = sizeof(int),
4735 +         .proc_handler = proc_dump_device },
4736 +
4737 +#ifdef CONFIG_CRASH_DUMP_MEMDEV
4738 +       { .ctl_name = CTL_DUMP_ADDR,
4739 +         .procname = DUMP_ADDR_NAME,
4740 +         .mode = 0444,
4741 +         .data = &dump_config.dump_addr,
4742 +         .maxlen = sizeof(unsigned long),
4743 +         .proc_handler = proc_doulonghex },
4744 +#endif
4745 +
4746 +       { 0, }
4747 +};
4748 +
4749 +static ctl_table dump_root[] = {
4750 +       { .ctl_name = KERN_DUMP,
4751 +         .procname = "dump",
4752 +         .mode = 0555, 
4753 +         .child = dump_table },
4754 +       { 0, }
4755 +};
4756 +
4757 +static ctl_table kernel_root[] = {
4758 +       { .ctl_name = CTL_KERN,
4759 +         .procname = "kernel",
4760 +         .mode = 0555,
4761 +         .child = dump_root, },
4762 +       { 0, }
4763 +};
4764 +
4765 +static struct ctl_table_header *sysctl_header;
4766 +
4767 +/*
4768 + * -----------------------------------------------------------------------
4769 + *              C O M P R E S S I O N   F U N C T I O N S
4770 + * -----------------------------------------------------------------------
4771 + */
4772 +
4773 +/*
4774 + * Name: dump_compress_none()
4775 + * Func: Don't do any compression, period.
4776 + */
4777 +static u16
4778 +dump_compress_none(const u8 *old, u16 oldsize, u8 *new, u16 newsize)
4779 +{
4780 +       /* just return the old size */
4781 +       return oldsize;
4782 +}
4783 +
4784 +
4785 +/*
4786 + * Name: dump_execute()
4787 + * Func: Execute the dumping process.  This makes sure all the appropriate
4788 + *       fields are updated correctly, and calls dump_execute_memdump(),
4789 + *       which does the real work.
4790 + */
4791 +void
4792 +dump_execute(const char *panic_str, const struct pt_regs *regs)
4793 +{
4794 +       int state = -1;
4795 +       unsigned long flags;
4796 +
4797 +       /* make sure we can dump */
4798 +       if (!dump_okay) {
4799 +               pr_info("LKCD not yet configured, can't take dump now\n");
4800 +               return;
4801 +       }
4802 +
4803 +       /* Exclude multiple dumps at the same time,
4804 +        * and disable interrupts,  some drivers may re-enable
4805 +        * interrupts in with silence()
4806 +        *
4807 +        * Try and acquire spin lock. If successful, leave preempt
4808 +        * and interrupts disabled.  See spin_lock_irqsave in spinlock.h
4809 +        */
4810 +       local_irq_save(flags);
4811 +       if (!spin_trylock(&dump_lock)) {
4812 +               local_irq_restore(flags);
4813 +               pr_info("LKCD dump already in progress\n");
4814 +               return;
4815 +       }
4816 +
4817 +       /* Bring system into the strictest level of quiescing for min drift 
4818 +        * dump drivers can soften this as required in dev->ops->silence() 
4819 +        */
4820 +       dump_oncpu = smp_processor_id() + 1;
4821 +       dump_silence_level = DUMP_HARD_SPIN_CPUS; 
4822 +
4823 +       state = dump_generic_execute(panic_str, regs);
4824 +       
4825 +       dump_oncpu = 0;
4826 +       spin_unlock_irqrestore(&dump_lock, flags);
4827 +
4828 +       if (state < 0) {
4829 +               printk("Dump Incomplete or failed!\n");
4830 +       } else {
4831 +               printk("Dump Complete; %d dump pages saved.\n", 
4832 +                      dump_header.dh_num_dump_pages);
4833 +       }
4834 +}
4835 +
4836 +/*
4837 + * Name: dump_register_compression()
4838 + * Func: Register a dump compression mechanism.
4839 + */
4840 +void
4841 +dump_register_compression(struct __dump_compress *item)
4842 +{
4843 +       if (item)
4844 +               list_add(&(item->list), &dump_compress_list);
4845 +}
4846 +
4847 +/*
4848 + * Name: dump_unregister_compression()
4849 + * Func: Remove a dump compression mechanism, and re-assign the dump
4850 + *       compression pointer if necessary.
4851 + */
4852 +void
4853 +dump_unregister_compression(int compression_type)
4854 +{
4855 +       struct list_head *tmp;
4856 +       struct __dump_compress *dc;
4857 +
4858 +       /* let's make sure our list is valid */
4859 +       if (compression_type != DUMP_COMPRESS_NONE) {
4860 +               list_for_each(tmp, &dump_compress_list) {
4861 +                       dc = list_entry(tmp, struct __dump_compress, list);
4862 +                       if (dc->compress_type == compression_type) {
4863 +                               list_del(&(dc->list));
4864 +                               break;
4865 +                       }
4866 +               }
4867 +       }
4868 +}
4869 +
4870 +/*
4871 + * Name: dump_compress_init()
4872 + * Func: Initialize (or re-initialize) compression scheme.
4873 + */
4874 +static int
4875 +dump_compress_init(int compression_type)
4876 +{
4877 +       struct list_head *tmp;
4878 +       struct __dump_compress *dc;
4879 +
4880 +       /* try to remove the compression item */
4881 +       list_for_each(tmp, &dump_compress_list) {
4882 +               dc = list_entry(tmp, struct __dump_compress, list);
4883 +               if (dc->compress_type == compression_type) {
4884 +                       dump_config.dumper->compress = dc;
4885 +                       dump_compress = compression_type;
4886 +                       pr_debug("Dump Compress %s\n", dc->compress_name);
4887 +                       return 0;
4888 +               }
4889 +       }
4890 +
4891 +       /* 
4892 +        * nothing on the list -- return ENODATA to indicate an error 
4893 +        *
4894 +        * NB: 
4895 +        *      EAGAIN: reports "Resource temporarily unavailable" which
4896 +        *              isn't very enlightening.
4897 +        */
4898 +       printk("compression_type:%d not found\n", compression_type);
4899 +
4900 +       return -ENODATA;
4901 +}
4902 +
4903 +static int
4904 +dumper_setup(unsigned long flags, unsigned long devid)
4905 +{
4906 +       int ret = 0;
4907 +
4908 +       /* unconfigure old dumper if it exists */
4909 +       dump_okay = 0;
4910 +       if (dump_config.dumper) {
4911 +               pr_debug("Unconfiguring current dumper\n");
4912 +               dump_unconfigure();
4913 +       }
4914 +       /* set up new dumper */
4915 +       if (dump_config.flags & DUMP_FLAGS_SOFTBOOT) {
4916 +               printk("Configuring softboot based dump \n");
4917 +#ifdef CONFIG_CRASH_DUMP_MEMDEV
4918 +               dump_config.dumper = &dumper_stage1; 
4919 +#else
4920 +               printk("Requires CONFIG_CRASHDUMP_MEMDEV. Can't proceed.\n");
4921 +               return -1;
4922 +#endif
4923 +       } else {
4924 +               dump_config.dumper = &dumper_singlestage;
4925 +       }       
4926 +       dump_config.dumper->dev = dump_dev;
4927 +
4928 +       ret = dump_configure(devid);
4929 +       if (!ret) {
4930 +               dump_okay = 1;
4931 +               pr_debug("%s dumper set up for dev 0x%lx\n", 
4932 +                       dump_config.dumper->name, devid);
4933 +               dump_config.dump_device = devid;
4934 +       } else {
4935 +               printk("%s dumper set up failed for dev 0x%lx\n", 
4936 +                      dump_config.dumper->name, devid);
4937 +               dump_config.dumper = NULL;
4938 +       }
4939 +       return ret;
4940 +}
4941 +
4942 +static int
4943 +dump_target_init(int target)
4944 +{
4945 +       char type[20];
4946 +       struct list_head *tmp;
4947 +       struct dump_dev *dev;
4948 +       
4949 +       switch (target) {
4950 +               case DUMP_FLAGS_DISKDUMP:
4951 +                       strcpy(type, "blockdev"); break;
4952 +               case DUMP_FLAGS_NETDUMP:
4953 +                       strcpy(type, "networkdev"); break;
4954 +               default:
4955 +                       return -1;
4956 +       }
4957 +
4958 +       /*
4959 +        * This is a bit stupid, generating strings from flag
4960 +        * and doing strcmp. This is done because 'struct dump_dev'
4961 +        * has string 'type_name' and not interger 'type'.
4962 +        */
4963 +       list_for_each(tmp, &dump_target_list) {
4964 +               dev = list_entry(tmp, struct dump_dev, list);
4965 +               if (strcmp(type, dev->type_name) == 0) {
4966 +                       dump_dev = dev;
4967 +                       return 0;
4968 +               }
4969 +       }
4970 +       return -1;
4971 +}
4972 +
4973 +/*
4974 + * Name: dump_ioctl()
4975 + * Func: Allow all dump tunables through a standard ioctl() mechanism.
4976 + *       This is far better than before, where we'd go through /proc,
4977 + *       because now this will work for multiple OS and architectures.
4978 + */
4979 +static int
4980 +dump_ioctl(struct inode *i, struct file *f, unsigned int cmd, unsigned long arg)
4981 +{
4982 +       /* check capabilities */
4983 +       if (!capable(CAP_SYS_ADMIN))
4984 +               return -EPERM;
4985 +
4986 +       if (!dump_config.dumper && cmd == DIOSDUMPCOMPRESS)
4987 +               /* dump device must be configured first */
4988 +               return -ENODEV;
4989 +
4990 +       /*
4991 +        * This is the main mechanism for controlling get/set data
4992 +        * for various dump device parameters.  The real trick here
4993 +        * is setting the dump device (DIOSDUMPDEV).  That's what
4994 +        * triggers everything else.
4995 +        */
4996 +       switch (cmd) {
4997 +       case DIOSDUMPDEV:       /* set dump_device */
4998 +               pr_debug("Configuring dump device\n"); 
4999 +               if (!(f->f_flags & O_RDWR))
5000 +                       return -EPERM;
5001 +
5002 +               __dump_open();
5003 +               return dumper_setup(dump_config.flags, arg);
5004 +
5005 +               
5006 +       case DIOGDUMPDEV:       /* get dump_device */
5007 +               return put_user((long)dump_config.dump_device, (long *)arg);
5008 +
5009 +       case DIOSDUMPLEVEL:     /* set dump_level */
5010 +               if (!(f->f_flags & O_RDWR))
5011 +                       return -EPERM;
5012 +
5013 +               /* make sure we have a positive value */
5014 +               if (arg < 0)
5015 +                       return -EINVAL;
5016 +
5017 +               /* Fixme: clean this up */
5018 +               dump_config.level = 0;
5019 +               switch ((int)arg) {
5020 +                       case DUMP_LEVEL_ALL:
5021 +                       case DUMP_LEVEL_ALL_RAM:
5022 +                               dump_config.level |= DUMP_MASK_UNUSED;
5023 +                       case DUMP_LEVEL_USED:
5024 +                               dump_config.level |= DUMP_MASK_USED;
5025 +                       case DUMP_LEVEL_KERN:
5026 +                               dump_config.level |= DUMP_MASK_KERN;
5027 +                       case DUMP_LEVEL_HEADER:
5028 +                               dump_config.level |= DUMP_MASK_HEADER;
5029 +                       case DUMP_LEVEL_NONE:
5030 +                               break;
5031 +                       default:
5032 +                               return (-EINVAL);
5033 +                       }
5034 +               pr_debug("Dump Level 0x%lx\n", dump_config.level);
5035 +               break;
5036 +
5037 +       case DIOGDUMPLEVEL:     /* get dump_level */
5038 +               /* fixme: handle conversion */
5039 +               return put_user((long)dump_config.level, (long *)arg);
5040 +
5041 +               
5042 +       case DIOSDUMPFLAGS:     /* set dump_flags */
5043 +               /* check flags */
5044 +               if (!(f->f_flags & O_RDWR))
5045 +                       return -EPERM;
5046 +
5047 +               /* make sure we have a positive value */
5048 +               if (arg < 0)
5049 +                       return -EINVAL;
5050 +                       
5051 +               if (dump_target_init(arg & DUMP_FLAGS_TARGETMASK) < 0)
5052 +                       return -EINVAL; /* return proper error */
5053 +
5054 +               dump_config.flags = arg;
5055 +               
5056 +               pr_debug("Dump Flags 0x%lx\n", dump_config.flags);
5057 +               break;
5058 +               
5059 +       case DIOGDUMPFLAGS:     /* get dump_flags */
5060 +               return put_user((long)dump_config.flags, (long *)arg);
5061 +
5062 +       case DIOSDUMPCOMPRESS:  /* set the dump_compress status */
5063 +               if (!(f->f_flags & O_RDWR))
5064 +                       return -EPERM;
5065 +
5066 +               return dump_compress_init((int)arg);
5067 +
5068 +       case DIOGDUMPCOMPRESS:  /* get the dump_compress status */
5069 +               return put_user((long)(dump_config.dumper ? 
5070 +                       dump_config.dumper->compress->compress_type : 0), 
5071 +                       (long *)arg);
5072 +                       
5073 +       default:
5074 +               /* 
5075 +                * these are network dump specific ioctls, let the
5076 +                * module handle them.
5077 +                */
5078 +               return dump_dev_ioctl(cmd, arg);
5079 +       }
5080 +       return 0;
5081 +}
5082 +
5083 +/*
5084 + * Handle special cases for dump_device 
5085 + * changing dump device requires doing an opening the device
5086 + */
5087 +static int 
5088 +proc_dump_device(ctl_table *ctl, int write, struct file *f,
5089 +                void *buffer, size_t *lenp)
5090 +{
5091 +       int *valp = ctl->data;
5092 +       int oval = *valp;
5093 +       int ret = -EPERM;
5094 +
5095 +       /* same permission checks as ioctl */
5096 +       if (capable(CAP_SYS_ADMIN)) {
5097 +               ret = proc_doulonghex(ctl, write, f, buffer, lenp);
5098 +               if (ret == 0 && write && *valp != oval) {
5099 +                       /* need to restore old value to close properly */
5100 +                       dump_config.dump_device = (dev_t) oval;
5101 +                       __dump_open();
5102 +                       ret = dumper_setup(dump_config.flags, (dev_t) *valp);
5103 +               }
5104 +       }
5105 +
5106 +       return ret;
5107 +}
5108 +
5109 +/* All for the want of a proc_do_xxx routine which prints values in hex */
5110 +static int 
5111 +proc_doulonghex(ctl_table *ctl, int write, struct file *f,
5112 +                void *buffer, size_t *lenp)
5113 +{
5114 +#define TMPBUFLEN 20
5115 +       unsigned long *i;
5116 +       size_t len, left;
5117 +       char buf[TMPBUFLEN];
5118 +
5119 +       if (!ctl->data || !ctl->maxlen || !*lenp || (f->f_pos)) {
5120 +               *lenp = 0;
5121 +               return 0;
5122 +       }
5123 +       
5124 +       i = (unsigned long *) ctl->data;
5125 +       left = *lenp;
5126 +       
5127 +       sprintf(buf, "0x%lx\n", (*i));
5128 +       len = strlen(buf);
5129 +       if (len > left)
5130 +               len = left;
5131 +       if(copy_to_user(buffer, buf, len))
5132 +               return -EFAULT;
5133 +       
5134 +       left -= len;
5135 +       *lenp -= left;
5136 +       f->f_pos += *lenp;
5137 +       return 0;
5138 +}
5139 +
5140 +/*
5141 + * -----------------------------------------------------------------------
5142 + *                     I N I T   F U N C T I O N S
5143 + * -----------------------------------------------------------------------
5144 + */
5145 +
5146 +/*
5147 + * These register and unregister routines are exported for modules
5148 + * to register their dump drivers (like block, net etc)
5149 + */
5150 +int
5151 +dump_register_device(struct dump_dev *ddev)
5152 +{
5153 +       struct list_head *tmp;
5154 +       struct dump_dev *dev;
5155 +
5156 +       list_for_each(tmp, &dump_target_list) {
5157 +               dev = list_entry(tmp, struct dump_dev, list);
5158 +               if (strcmp(ddev->type_name, dev->type_name) == 0) {
5159 +                       printk("Target type %s already registered\n",
5160 +                                       dev->type_name);
5161 +                       return -1; /* return proper error */
5162 +               }
5163 +       }
5164 +       list_add(&(ddev->list), &dump_target_list);
5165 +       
5166 +       return 0;
5167 +}
5168 +
5169 +void
5170 +dump_unregister_device(struct dump_dev *ddev)
5171 +{
5172 +       list_del(&(ddev->list));
5173 +       if (ddev != dump_dev)
5174 +               return;
5175 +
5176 +       dump_okay = 0;
5177 +
5178 +       if (dump_config.dumper)
5179 +               dump_unconfigure();
5180 +
5181 +       dump_config.flags &= ~DUMP_FLAGS_TARGETMASK;
5182 +       dump_okay = 0;
5183 +       dump_dev = NULL;
5184 +       dump_config.dumper = NULL;
5185 +}
5186 +
5187 +static int panic_event(struct notifier_block *this, unsigned long event,
5188 +                      void *ptr)
5189 +{
5190 +       struct pt_regs regs;
5191 +
5192 +       get_current_regs(&regs);
5193 +       dump_execute((const char *)ptr, &regs);
5194 +       return 0;
5195 +}
5196 +
5197 +extern struct notifier_block *panic_notifier_list;
5198 +static int panic_event(struct notifier_block *, unsigned long, void *);
5199 +static struct notifier_block panic_block = {
5200 +       .notifier_call = panic_event,
5201 +};
5202 +
5203 +#ifdef CONFIG_MAGIC_SYSRQ
5204 +/* Sysrq handler */
5205 +static void sysrq_handle_crashdump(int key, struct pt_regs *pt_regs,
5206 +               struct tty_struct *tty) {
5207 +       dump_execute("sysrq", pt_regs);
5208 +}
5209 +
5210 +static struct sysrq_key_op sysrq_crashdump_op = {
5211 +       .handler        =       sysrq_handle_crashdump,
5212 +       .help_msg       =       "Dump",
5213 +       .action_msg     =       "Starting crash dump",
5214 +};
5215 +#endif
5216 +
5217 +static inline void
5218 +dump_sysrq_register(void) 
5219 +{
5220 +#ifdef CONFIG_MAGIC_SYSRQ
5221 +       __sysrq_lock_table();
5222 +       __sysrq_put_key_op(DUMP_SYSRQ_KEY, &sysrq_crashdump_op);
5223 +       __sysrq_unlock_table();
5224 +#endif
5225 +}
5226 +
5227 +static inline void
5228 +dump_sysrq_unregister(void)
5229 +{
5230 +#ifdef CONFIG_MAGIC_SYSRQ
5231 +       __sysrq_lock_table();
5232 +       if (__sysrq_get_key_op(DUMP_SYSRQ_KEY) == &sysrq_crashdump_op)
5233 +               __sysrq_put_key_op(DUMP_SYSRQ_KEY, NULL);
5234 +       __sysrq_unlock_table();
5235 +#endif
5236 +}
5237 +
5238 +/*
5239 + * Name: dump_init()
5240 + * Func: Initialize the dump process.  This will set up any architecture
5241 + *       dependent code.  The big key is we need the memory offsets before
5242 + *       the page table is initialized, because the base memory offset
5243 + *       is changed after paging_init() is called.
5244 + */
5245 +static int __init
5246 +dump_init(void)
5247 +{
5248 +       struct sysinfo info;
5249 +
5250 +       /* try to create our dump device */
5251 +       if (register_chrdev(CRASH_DUMP_MAJOR, "dump", &dump_fops)) {
5252 +               printk("cannot register dump character device!\n");
5253 +               return -EBUSY;
5254 +       }
5255 +
5256 +       __dump_init((u64)PAGE_OFFSET);
5257 +
5258 +       /* set the dump_compression_list structure up */
5259 +       dump_register_compression(&dump_none_compression);
5260 +
5261 +       /* grab the total memory size now (not if/when we crash) */
5262 +       si_meminfo(&info);
5263 +
5264 +       /* set the memory size */
5265 +       dump_header.dh_memory_size = (u64)info.totalram;
5266 +
5267 +       sysctl_header = register_sysctl_table(kernel_root, 0);
5268 +       dump_sysrq_register();
5269 +
5270 +       notifier_chain_register(&panic_notifier_list, &panic_block);
5271 +       dump_function_ptr = dump_execute;
5272 +
5273 +       pr_info("Crash dump driver initialized.\n");
5274 +       return 0;
5275 +}
5276 +
5277 +static void __exit
5278 +dump_cleanup(void)
5279 +{
5280 +       dump_okay = 0;
5281 +
5282 +       if (dump_config.dumper)
5283 +               dump_unconfigure();
5284 +
5285 +       /* arch-specific cleanup routine */
5286 +       __dump_cleanup();
5287 +
5288 +       /* ignore errors while unregistering -- since can't do anything */
5289 +       unregister_sysctl_table(sysctl_header);
5290 +       unregister_chrdev(CRASH_DUMP_MAJOR, "dump");
5291 +       dump_sysrq_unregister();
5292 +       notifier_chain_unregister(&panic_notifier_list, &panic_block);
5293 +       dump_function_ptr = NULL;
5294 +}
5295 +
5296 +EXPORT_SYMBOL(dump_register_compression);
5297 +EXPORT_SYMBOL(dump_unregister_compression);
5298 +EXPORT_SYMBOL(dump_register_device);
5299 +EXPORT_SYMBOL(dump_unregister_device);
5300 +EXPORT_SYMBOL(dump_config);
5301 +EXPORT_SYMBOL(dump_silence_level);
5302 +
5303 +EXPORT_SYMBOL(__dump_irq_enable);
5304 +EXPORT_SYMBOL(__dump_irq_restore);
5305 +
5306 +MODULE_AUTHOR("Matt D. Robinson <yakker@sourceforge.net>");
5307 +MODULE_DESCRIPTION("Linux Kernel Crash Dump (LKCD) driver");
5308 +MODULE_LICENSE("GPL");
5309 +
5310 +module_init(dump_init);
5311 +module_exit(dump_cleanup);
5312 --- linux-2.5.69/include/linux/dumpdev.h.lkcdbase       Mon Jun  2 17:28:52 2003
5313 +++ linux-2.5.69/include/linux/dumpdev.h        Mon Jun  2 17:31:01 2003
5314 @@ -0,0 +1,161 @@
5315 +/*
5316 + * Generic dump device interfaces for flexible system dump 
5317 + * (Enables variation of dump target types e.g disk, network, memory)
5318 + *
5319 + * These interfaces have evolved based on discussions on lkcd-devel. 
5320 + * Eventually the intent is to support primary and secondary or 
5321 + * alternate targets registered at the same time, with scope for 
5322 + * situation based failover or multiple dump devices used for parallel 
5323 + * dump i/o.
5324 + *
5325 + * Started: Oct 2002 - Suparna Bhattacharya (suparna@in.ibm.com)
5326 + *
5327 + * Copyright (C) 2001 - 2002 Matt D. Robinson.  All rights reserved.
5328 + * Copyright (C) 2002 International Business Machines Corp. 
5329 + *
5330 + * This code is released under version 2 of the GNU GPL.
5331 + */
5332 +
5333 +#ifndef _LINUX_DUMPDEV_H
5334 +#define _LINUX_DUMPDEV_H
5335 +
5336 +#include <linux/kernel.h>
5337 +#include <linux/wait.h>
5338 +#include <linux/bio.h>
5339 +
5340 +/* Determined by the dump target (device) type */
5341 +
5342 +struct dump_dev;
5343 +
5344 +struct dump_dev_ops {
5345 +       int (*open)(struct dump_dev *, unsigned long); /* configure */
5346 +       int (*release)(struct dump_dev *); /* unconfigure */
5347 +       int (*silence)(struct dump_dev *); /* when dump starts */
5348 +       int (*resume)(struct dump_dev *); /* when dump is over */
5349 +       int (*seek)(struct dump_dev *, loff_t);
5350 +       /* trigger a write (async in nature typically) */
5351 +       int (*write)(struct dump_dev *, void *, unsigned long);
5352 +       /* not usually used during dump, but option available */
5353 +       int (*read)(struct dump_dev *, void *, unsigned long);
5354 +       /* use to poll for completion */
5355 +       int (*ready)(struct dump_dev *, void *); 
5356 +       int (*ioctl)(struct dump_dev *, unsigned int, unsigned long);
5357 +};
5358 +
5359 +struct dump_dev {
5360 +       char type_name[32]; /* block, net-poll etc */
5361 +       unsigned long device_id; /* interpreted differently for various types */
5362 +       struct dump_dev_ops *ops;
5363 +       struct list_head list;
5364 +       loff_t curr_offset;
5365 +};
5366 +
5367 +/*
5368 + * dump_dev type variations: 
5369 + */
5370 +
5371 +/* block */
5372 +struct dump_blockdev {
5373 +       struct dump_dev ddev;
5374 +       kdev_t kdev_id;
5375 +       struct block_device *bdev;
5376 +       struct bio *bio;
5377 +       loff_t start_offset;
5378 +       loff_t limit;
5379 +       int err;
5380 +};
5381 +
5382 +static inline struct dump_blockdev *DUMP_BDEV(struct dump_dev *dev)
5383 +{
5384 +       return container_of(dev, struct dump_blockdev, ddev);
5385 +}
5386 +
5387 +
5388 +/* mem  - for internal use by soft-boot based dumper */
5389 +struct dump_memdev {
5390 +       struct dump_dev ddev;
5391 +       unsigned long indirect_map_root;
5392 +       unsigned long nr_free;
5393 +       struct page *curr_page;
5394 +       unsigned long *curr_map;
5395 +       unsigned long curr_map_offset;
5396 +       unsigned long last_offset;
5397 +       unsigned long last_used_offset;
5398 +       unsigned long last_bs_offset;
5399 +};     
5400 +
5401 +static inline struct dump_memdev *DUMP_MDEV(struct dump_dev *dev)
5402 +{
5403 +       return container_of(dev, struct dump_memdev, ddev);
5404 +}
5405 +
5406 +/* Todo/future - meant for raw dedicated interfaces e.g. mini-ide driver */
5407 +struct dump_rdev {
5408 +       struct dump_dev ddev;
5409 +       char name[32];
5410 +       int (*reset)(struct dump_rdev *, unsigned int, 
5411 +               unsigned long);
5412 +       /* ... to do ... */
5413 +};
5414 +
5415 +/* just to get the size right when saving config across a soft-reboot */
5416 +struct dump_anydev {
5417 +       union {
5418 +               struct dump_blockdev bddev;
5419 +               /* .. add other types here .. */
5420 +       };
5421 +};
5422 +
5423 +
5424 +
5425 +/* Dump device / target operation wrappers */
5426 +/* These assume that dump_dev is initiatized to dump_config.dumper->dev */
5427 +
5428 +extern struct dump_dev *dump_dev;
5429 +
5430 +static inline int dump_dev_open(unsigned long arg)
5431 +{
5432 +       return dump_dev->ops->open(dump_dev, arg);
5433 +}
5434 +
5435 +static inline int dump_dev_release(void)
5436 +{
5437 +       return dump_dev->ops->release(dump_dev);
5438 +}
5439 +
5440 +static inline int dump_dev_silence(void)
5441 +{
5442 +       return dump_dev->ops->silence(dump_dev);
5443 +}
5444 +
5445 +static inline int dump_dev_resume(void)
5446 +{
5447 +       return dump_dev->ops->resume(dump_dev);
5448 +}
5449 +
5450 +static inline int dump_dev_seek(loff_t offset)
5451 +{
5452 +       return dump_dev->ops->seek(dump_dev, offset);
5453 +}
5454 +
5455 +static inline int dump_dev_write(void *buf, unsigned long len)
5456 +{
5457 +       return dump_dev->ops->write(dump_dev, buf, len);
5458 +}
5459 +
5460 +static inline int dump_dev_ready(void *buf)
5461 +{
5462 +       return dump_dev->ops->ready(dump_dev, buf);
5463 +}
5464 +
5465 +static inline int dump_dev_ioctl(unsigned int cmd, unsigned long arg)
5466 +{
5467 +       if (!dump_dev->ops->ioctl)
5468 +               return -EINVAL;
5469 +       return dump_dev->ops->ioctl(dump_dev, cmd, arg);
5470 +}
5471 +
5472 +extern int dump_register_device(struct dump_dev *);
5473 +extern void dump_unregister_device(struct dump_dev *);
5474 +
5475 +#endif /*  _LINUX_DUMPDEV_H */
5476 --- linux-2.5.69/include/linux/dump.h.lkcdbase  Mon Jun  2 17:28:56 2003
5477 +++ linux-2.5.69/include/linux/dump.h   Mon Jun  2 17:31:01 2003
5478 @@ -0,0 +1,376 @@
5479 +/*
5480 + * Kernel header file for Linux crash dumps.
5481 + *
5482 + * Created by: Matt Robinson (yakker@sgi.com)
5483 + * Copyright 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
5484 + *
5485 + * vmdump.h to dump.h by: Matt D. Robinson (yakker@sourceforge.net)
5486 + * Copyright 2001 - 2002 Matt D. Robinson.  All rights reserved.
5487 + * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
5488 + *
5489 + * Most of this is the same old stuff from vmdump.h, except now we're
5490 + * actually a stand-alone driver plugged into the block layer interface,
5491 + * with the exception that we now allow for compression modes externally
5492 + * loaded (e.g., someone can come up with their own).
5493 + *
5494 + * This code is released under version 2 of the GNU GPL.
5495 + */
5496 +
5497 +/* This header file includes all structure definitions for crash dumps. */
5498 +#ifndef _DUMP_H
5499 +#define _DUMP_H
5500 +
5501 +#if defined(CONFIG_CRASH_DUMP) || defined (CONFIG_CRASH_DUMP_MODULE)
5502 +
5503 +#include <linux/list.h>
5504 +#include <linux/notifier.h>
5505 +#include <linux/dumpdev.h>
5506 +
5507 +/* 
5508 + * Predefine default DUMP_PAGE constants, asm header may override.
5509 + *
5510 + * On ia64 discontinuous memory systems it's possible for the memory
5511 + * banks to stop at 2**12 page alignments, the smallest possible page
5512 + * size. But the system page size, PAGE_SIZE, is in fact larger.
5513 + */
5514 +#define DUMP_PAGE_SHIFT        PAGE_SHIFT
5515 +#define DUMP_PAGE_MASK         PAGE_MASK
5516 +#define DUMP_PAGE_ALIGN(addr)  PAGE_ALIGN(addr)
5517 +#define DUMP_HEADER_OFFSET     PAGE_SIZE
5518 +
5519 +/* keep DUMP_PAGE_SIZE constant to 4K = 1<<12
5520 + * it may be different from PAGE_SIZE then.
5521 + */
5522 +#define DUMP_PAGE_SIZE         4096
5523 +
5524 +/* 
5525 + * Predefined default memcpy() to use when copying memory to the dump buffer.
5526 + *
5527 + * On ia64 there is a heads up function that can be called to let the prom
5528 + * machine check monitor know that the current activity is risky and it should
5529 + * ignore the fault (nofault). In this case the ia64 header will redefine this
5530 + * macro to __dump_memcpy() and use it's arch specific version.
5531 + */
5532 +#define DUMP_memcpy            memcpy
5533 +
5534 +/* necessary header files */
5535 +#include <asm/dump.h>                  /* for architecture-specific header */
5536 +
5537 +/* 
5538 + * Size of the buffer that's used to hold:
5539 + *
5540 + *     1. the dump header (padded to fill the complete buffer)
5541 + *     2. the possibly compressed page headers and data
5542 + */
5543 +#define DUMP_BUFFER_SIZE       (64 * 1024)  /* size of dump buffer         */
5544 +#define DUMP_HEADER_SIZE       DUMP_BUFFER_SIZE
5545 +
5546 +/* standard header definitions */
5547 +#define DUMP_MAGIC_NUMBER      0xa8190173618f23edULL  /* dump magic number */
5548 +#define DUMP_MAGIC_LIVE                0xa8190173618f23cdULL  /* live magic number */
5549 +#define DUMP_VERSION_NUMBER    0x8     /* dump version number              */
5550 +#define DUMP_PANIC_LEN         0x100   /* dump panic string length         */
5551 +
5552 +/* dump levels - type specific stuff added later -- add as necessary */
5553 +#define DUMP_LEVEL_NONE                0x0     /* no dumping at all -- just bail   */
5554 +#define DUMP_LEVEL_HEADER      0x1     /* kernel dump header only          */
5555 +#define DUMP_LEVEL_KERN                0x2     /* dump header and kernel pages     */
5556 +#define DUMP_LEVEL_USED                0x4     /* dump header, kernel/user pages   */
5557 +#define DUMP_LEVEL_ALL_RAM     0x8     /* dump header, all RAM pages       */
5558 +#define DUMP_LEVEL_ALL         0x10    /* dump all memory RAM and firmware */
5559 +
5560 +
5561 +/* dump compression options -- add as necessary */
5562 +#define DUMP_COMPRESS_NONE     0x0     /* don't compress this dump         */
5563 +#define DUMP_COMPRESS_RLE      0x1     /* use RLE compression              */
5564 +#define DUMP_COMPRESS_GZIP     0x2     /* use GZIP compression             */
5565 +
5566 +/* dump flags - any dump-type specific flags -- add as necessary */
5567 +#define DUMP_FLAGS_NONE                0x0     /* no flags are set for this dump   */
5568 +#define DUMP_FLAGS_SOFTBOOT    0x2     /* 2 stage soft-boot based dump     */
5569 +
5570 +#define DUMP_FLAGS_TARGETMASK  0xf0000000 /* handle special case targets   */
5571 +#define DUMP_FLAGS_DISKDUMP    0x80000000 /* dump to local disk            */
5572 +#define DUMP_FLAGS_NETDUMP     0x40000000 /* dump over the network         */
5573 +
5574 +/* dump header flags -- add as necessary */
5575 +#define DUMP_DH_FLAGS_NONE     0x0     /* no flags set (error condition!)  */
5576 +#define DUMP_DH_RAW            0x1     /* raw page (no compression)        */
5577 +#define DUMP_DH_COMPRESSED     0x2     /* page is compressed               */
5578 +#define DUMP_DH_END            0x4     /* end marker on a full dump        */
5579 +#define DUMP_DH_TRUNCATED      0x8     /* dump is incomplete               */
5580 +#define DUMP_DH_TEST_PATTERN   0x10    /* dump page is a test pattern      */
5581 +#define DUMP_DH_NOT_USED       0x20    /* 1st bit not used in flags        */
5582 +
5583 +/* names for various dump parameters in /proc/kernel */
5584 +#define DUMP_ROOT_NAME         "sys/dump"
5585 +#define DUMP_DEVICE_NAME       "device"
5586 +#define DUMP_COMPRESS_NAME     "compress"
5587 +#define DUMP_LEVEL_NAME                "level"
5588 +#define DUMP_FLAGS_NAME                "flags"
5589 +#define DUMP_ADDR_NAME         "addr"
5590 +
5591 +#define DUMP_SYSRQ_KEY         'd'     /* key to use for MAGIC_SYSRQ key   */
5592 +
5593 +/* CTL_DUMP names: */
5594 +enum
5595 +{
5596 +       CTL_DUMP_DEVICE=1,
5597 +       CTL_DUMP_COMPRESS=3,
5598 +       CTL_DUMP_LEVEL=3,
5599 +       CTL_DUMP_FLAGS=4,
5600 +       CTL_DUMP_ADDR=5,
5601 +       CTL_DUMP_TEST=6,
5602 +};
5603 +
5604 +
5605 +/* page size for gzip compression -- buffered slightly beyond hardware PAGE_SIZE used by DUMP */
5606 +#define DUMP_DPC_PAGE_SIZE     (DUMP_PAGE_SIZE + 512)
5607 +
5608 +/* dump ioctl() control options */
5609 +#define DIOSDUMPDEV            1       /* set the dump device              */
5610 +#define DIOGDUMPDEV            2       /* get the dump device              */
5611 +#define DIOSDUMPLEVEL          3       /* set the dump level               */
5612 +#define DIOGDUMPLEVEL          4       /* get the dump level               */
5613 +#define DIOSDUMPFLAGS          5       /* set the dump flag parameters     */
5614 +#define DIOGDUMPFLAGS          6       /* get the dump flag parameters     */
5615 +#define DIOSDUMPCOMPRESS       7       /* set the dump compress level      */
5616 +#define DIOGDUMPCOMPRESS       8       /* get the dump compress level      */
5617 +
5618 +/* these ioctls are used only by netdump module */
5619 +#define DIOSTARGETIP           9       /* set the target m/c's ip          */
5620 +#define DIOGTARGETIP           10      /* get the target m/c's ip          */
5621 +#define DIOSTARGETPORT         11      /* set the target m/c's port        */
5622 +#define DIOGTARGETPORT         12      /* get the target m/c's port        */
5623 +#define DIOSSOURCEPORT         13      /* set the source m/c's port        */
5624 +#define DIOGSOURCEPORT         14      /* get the source m/c's port        */
5625 +#define DIOSETHADDR            15      /* set ethernet address             */
5626 +#define DIOGETHADDR            16      /* get ethernet address             */
5627 +
5628 +/*
5629 + * Structure: __dump_header
5630 + *  Function: This is the header dumped at the top of every valid crash
5631 + *            dump.  
5632 + */
5633 +struct __dump_header {
5634 +       /* the dump magic number -- unique to verify dump is valid */
5635 +       u64     dh_magic_number;
5636 +
5637 +       /* the version number of this dump */
5638 +       u32     dh_version;
5639 +
5640 +       /* the size of this header (in case we can't read it) */
5641 +       u32     dh_header_size;
5642 +
5643 +       /* the level of this dump (just a header?) */
5644 +       u32     dh_dump_level;
5645 +
5646 +       /* 
5647 +        * We assume dump_page_size to be 4K in every case.
5648 +        * Store here the configurable system page size (4K, 8K, 16K, etc.) 
5649 +        */
5650 +       u32     dh_page_size;
5651 +
5652 +       /* the size of all physical memory */
5653 +       u64     dh_memory_size;
5654 +
5655 +       /* the start of physical memory */
5656 +       u64     dh_memory_start;
5657 +
5658 +       /* the end of physical memory */
5659 +       u64     dh_memory_end;
5660 +
5661 +       /* the number of hardware/physical pages in this dump specifically */
5662 +       u32     dh_num_dump_pages;
5663 +
5664 +       /* the panic string, if available */
5665 +       char    dh_panic_string[DUMP_PANIC_LEN];
5666 +
5667 +       /* timeval depends on architecture, two long values */
5668 +       struct {
5669 +               u64 tv_sec;
5670 +               u64 tv_usec;
5671 +       } dh_time; /* the time of the system crash */
5672 +
5673 +       /* the NEW utsname (uname) information -- in character form */
5674 +       /* we do this so we don't have to include utsname.h         */
5675 +       /* plus it helps us be more architecture independent        */
5676 +       /* now maybe one day soon they'll make the [65] a #define!  */
5677 +       char    dh_utsname_sysname[65];
5678 +       char    dh_utsname_nodename[65];
5679 +       char    dh_utsname_release[65];
5680 +       char    dh_utsname_version[65];
5681 +       char    dh_utsname_machine[65];
5682 +       char    dh_utsname_domainname[65];
5683 +
5684 +       /* the address of current task (OLD = void *, NEW = u64) */
5685 +       u64     dh_current_task;
5686 +
5687 +       /* what type of compression we're using in this dump (if any) */
5688 +       u32     dh_dump_compress;
5689 +
5690 +       /* any additional flags */
5691 +       u32     dh_dump_flags;
5692 +
5693 +       /* any additional flags */
5694 +       u32     dh_dump_device;
5695 +} __attribute__((packed));
5696 +
5697 +/*
5698 + * Structure: __dump_page
5699 + *  Function: To act as the header associated to each physical page of
5700 + *            memory saved in the system crash dump.  This allows for
5701 + *            easy reassembly of each crash dump page.  The address bits
5702 + *            are split to make things easier for 64-bit/32-bit system
5703 + *            conversions.
5704 + *
5705 + * dp_byte_offset and dp_page_index are landmarks that are helpful when
5706 + * looking at a hex dump of /dev/vmdump,
5707 + */
5708 +struct __dump_page {
5709 +       /* the address of this dump page */
5710 +       u64     dp_address;
5711 +
5712 +       /* the size of this dump page */
5713 +       u32     dp_size;
5714 +
5715 +       /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */
5716 +       u32     dp_flags;
5717 +} __attribute__((packed));
5718 +
5719 +/*
5720 + * Structure: __lkcdinfo
5721 + * Function:  This structure contains information needed for the lkcdutils
5722 + *            package (particularly lcrash) to determine what information is
5723 + *            associated to this kernel, specifically.
5724 + */
5725 +struct __lkcdinfo {
5726 +       int     arch;
5727 +       int     ptrsz;
5728 +       int     byte_order;
5729 +       int     linux_release;
5730 +       int     page_shift;
5731 +       int     page_size;
5732 +       u64     page_mask;
5733 +       u64     page_offset;
5734 +       int     stack_offset;
5735 +};
5736 +
5737 +#ifdef __KERNEL__
5738 +
5739 +/*
5740 + * Structure: __dump_compress
5741 + *  Function: This is what an individual compression mechanism can use
5742 + *            to plug in their own compression techniques.  It's always
5743 + *            best to build these as individual modules so that people
5744 + *            can put in whatever they want.
5745 + */
5746 +struct __dump_compress {
5747 +       /* the list_head structure for list storage */
5748 +       struct list_head list;
5749 +
5750 +       /* the type of compression to use (DUMP_COMPRESS_XXX) */
5751 +       int compress_type;
5752 +       const char *compress_name;
5753 +
5754 +       /* the compression function to call */
5755 +       u16 (*compress_func)(const u8 *, u16, u8 *, u16);
5756 +};
5757 +
5758 +/* functions for dump compression registration */
5759 +extern void dump_register_compression(struct __dump_compress *);
5760 +extern void dump_unregister_compression(int);
5761 +
5762 +/*
5763 + * Structure dump_mbank[]:
5764 + *
5765 + * For CONFIG_DISCONTIGMEM systems this array specifies the
5766 + * memory banks/chunks that need to be dumped after a panic.
5767 + *
5768 + * For classic systems it specifies a single set of pages from
5769 + * 0 to max_mapnr.
5770 + */
5771 +struct __dump_mbank {
5772 +       u64     start;
5773 +       u64     end;
5774 +       int     type;
5775 +       int     pad1;
5776 +       long    pad2;
5777 +};
5778 +
5779 +#define DUMP_MBANK_TYPE_CONVENTIONAL_MEMORY            1
5780 +#define DUMP_MBANK_TYPE_OTHER                          2
5781 +
5782 +#define MAXCHUNKS 256
5783 +extern int dump_mbanks;
5784 +extern struct __dump_mbank dump_mbank[MAXCHUNKS];
5785 +
5786 +/* notification event codes */
5787 +#define DUMP_BEGIN             0x0001  /* dump beginning */
5788 +#define DUMP_END               0x0002  /* dump ending */
5789 +
5790 +/* Scheduler soft spin control.
5791 + *
5792 + * 0 - no dump in progress
5793 + * 1 - cpu0 is dumping, ...
5794 + */
5795 +extern unsigned long dump_oncpu;
5796 +extern void dump_execute(const char *, const struct pt_regs *);
5797 +
5798 +/*
5799 + *     Notifier list for kernel code which wants to be called
5800 + *     at kernel dump. 
5801 + */
5802 +extern struct notifier_block *dump_notifier_list;
5803 +static inline int register_dump_notifier(struct notifier_block *nb)
5804 +{
5805 +       return notifier_chain_register(&dump_notifier_list, nb);
5806 +}
5807 +static inline int unregister_dump_notifier(struct notifier_block * nb)
5808 +{
5809 +       return notifier_chain_unregister(&dump_notifier_list, nb);
5810 +}
5811 +
5812 +extern void (*dump_function_ptr)(const char *, const struct pt_regs *);
5813 +static inline void dump(char * str, struct pt_regs * regs)
5814 +{
5815 +       if (dump_function_ptr)
5816 +               dump_function_ptr(str, regs);
5817 +}
5818 +
5819 +/*
5820 + * Common Arch Specific Functions should be declared here.
5821 + * This allows the C compiler to detect discrepancies.
5822 + */
5823 +extern void    __dump_open(void);
5824 +extern void    __dump_cleanup(void);
5825 +extern void    __dump_init(u64);
5826 +extern void    __dump_save_regs(struct pt_regs *, const struct pt_regs *);
5827 +extern int     __dump_configure_header(const struct pt_regs *);
5828 +extern void    __dump_irq_enable(void);
5829 +extern void    __dump_irq_restore(void);
5830 +extern int     __dump_page_valid(unsigned long index);
5831 +#ifdef CONFIG_SMP
5832 +extern void    __dump_save_other_cpus(void);
5833 +#else
5834 +#define        __dump_save_other_cpus()
5835 +#endif
5836 +
5837 +/* to track all used (compound + zero order) pages */
5838 +#define PageInuse(p)   (PageCompound(p) || page_count(p))
5839 +
5840 +#endif /* __KERNEL__ */
5841 +
5842 +#else  /* !CONFIG_CRASH_DUMP */
5843 +
5844 +/* If not configured then make code disappear! */
5845 +#define register_dump_watchdog(x)      do { } while(0)
5846 +#define unregister_dump_watchdog(x)    do { } while(0)
5847 +#define register_dump_notifier(x)      do { } while(0)
5848 +#define unregister_dump_notifier(x)    do { } while(0)
5849 +#define dump_in_progress()             0
5850 +#define dump(x, y)                     do { } while(0)
5851 +
5852 +#endif /* !CONFIG_CRASH_DUMP */
5853 +
5854 +#endif /* _DUMP_H */
5855 --- linux-2.5.69/include/linux/dump_netdev.h.lkcdbase   Mon Jun  2 17:29:01 2003
5856 +++ linux-2.5.69/include/linux/dump_netdev.h    Mon Jun  2 17:31:01 2003
5857 @@ -0,0 +1,80 @@
5858 +/*
5859 + *  linux/drivers/net/netconsole.h
5860 + *
5861 + *  Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
5862 + *
5863 + *  This file contains the implementation of an IRQ-safe, crash-safe
5864 + *  kernel console implementation that outputs kernel messages to the
5865 + *  network.
5866 + *
5867 + * Modification history:
5868 + *
5869 + * 2001-09-17    started by Ingo Molnar.
5870 + */
5871 +
5872 +/****************************************************************
5873 + *      This program is free software; you can redistribute it and/or modify
5874 + *      it under the terms of the GNU General Public License as published by
5875 + *      the Free Software Foundation; either version 2, or (at your option)
5876 + *      any later version.
5877 + *
5878 + *      This program is distributed in the hope that it will be useful,
5879 + *      but WITHOUT ANY WARRANTY; without even the implied warranty of
5880 + *      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
5881 + *      GNU General Public License for more details.
5882 + *
5883 + *      You should have received a copy of the GNU General Public License
5884 + *      along with this program; if not, write to the Free Software
5885 + *      Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
5886 + *
5887 + ****************************************************************/
5888 +
5889 +#define NETCONSOLE_VERSION 0x03
5890 +
5891 +enum netdump_commands {
5892 +       COMM_NONE = 0,
5893 +       COMM_SEND_MEM = 1,
5894 +       COMM_EXIT = 2,
5895 +       COMM_REBOOT = 3,
5896 +       COMM_HELLO = 4,
5897 +       COMM_GET_NR_PAGES = 5,
5898 +       COMM_GET_PAGE_SIZE = 6,
5899 +       COMM_START_NETDUMP_ACK = 7,
5900 +       COMM_GET_REGS = 8,
5901 +       COMM_GET_MAGIC = 9,
5902 +       COMM_START_WRITE_NETDUMP_ACK = 10,
5903 +};
5904 +
5905 +typedef struct netdump_req_s {
5906 +       u64 magic;
5907 +       u32 nr;
5908 +       u32 command;
5909 +       u32 from;
5910 +       u32 to;
5911 +} req_t;
5912 +
5913 +enum netdump_replies {
5914 +       REPLY_NONE = 0,
5915 +       REPLY_ERROR = 1,
5916 +       REPLY_LOG = 2,
5917 +       REPLY_MEM = 3,
5918 +       REPLY_RESERVED = 4,
5919 +       REPLY_HELLO = 5,
5920 +       REPLY_NR_PAGES = 6,
5921 +       REPLY_PAGE_SIZE = 7,
5922 +       REPLY_START_NETDUMP = 8,
5923 +       REPLY_END_NETDUMP = 9,
5924 +       REPLY_REGS = 10,
5925 +       REPLY_MAGIC = 11,
5926 +       REPLY_START_WRITE_NETDUMP = 12,
5927 +};
5928 +
5929 +typedef struct netdump_reply_s {
5930 +       u32 nr;
5931 +       u32 code;
5932 +       u32 info;
5933 +} reply_t;
5934 +
5935 +#define HEADER_LEN (1 + sizeof(reply_t))
5936 +
5937 +
5938 --- linux-2.5.69/include/asm-i386/dump.h.lkcdbase       Mon Jun  2 17:28:47 2003
5939 +++ linux-2.5.69/include/asm-i386/dump.h        Mon Jun  2 17:31:10 2003
5940 @@ -0,0 +1,93 @@
5941 +/*
5942 + * Kernel header file for Linux crash dumps.
5943 + *
5944 + * Created by: Matt Robinson (yakker@sgi.com)
5945 + *
5946 + * Copyright 1999 Silicon Graphics, Inc. All rights reserved.
5947 + *
5948 + * This code is released under version 2 of the GNU GPL.
5949 + */
5950 +
5951 +/* This header file holds the architecture specific crash dump header */
5952 +#ifndef _ASM_DUMP_H
5953 +#define _ASM_DUMP_H
5954 +
5955 +/* necessary header files */
5956 +#include <asm/ptrace.h>
5957 +#include <asm/page.h>
5958 +#include <linux/threads.h>
5959 +#include <linux/mm.h>
5960 +
5961 +/* definitions */
5962 +#define DUMP_ASM_MAGIC_NUMBER  0xdeaddeadULL   /* magic number            */
5963 +#define DUMP_ASM_VERSION_NUMBER        0x3     /* version number          */
5964 +
5965 +/* max number of cpus */
5966 +#define DUMP_MAX_NUM_CPUS 32
5967 +
5968 +/*
5969 + * Structure: __dump_header_asm
5970 + *  Function: This is the header for architecture-specific stuff.  It
5971 + *            follows right after the dump header.
5972 + */
5973 +struct __dump_header_asm {
5974 +       /* the dump magic number -- unique to verify dump is valid */
5975 +       u64             dha_magic_number;
5976 +
5977 +       /* the version number of this dump */
5978 +       u32             dha_version;
5979 +
5980 +       /* the size of this header (in case we can't read it) */
5981 +       u32             dha_header_size;
5982 +
5983 +       /* the esp for i386 systems */
5984 +       u32             dha_esp;
5985 +
5986 +       /* the eip for i386 systems */
5987 +       u32             dha_eip;
5988 +
5989 +       /* the dump registers */
5990 +       struct pt_regs  dha_regs;
5991 +
5992 +       /* smp specific */
5993 +       u32             dha_smp_num_cpus;
5994 +       u32             dha_dumping_cpu;
5995 +       struct pt_regs  dha_smp_regs[DUMP_MAX_NUM_CPUS];
5996 +       u32             dha_smp_current_task[DUMP_MAX_NUM_CPUS];
5997 +       u32             dha_stack[DUMP_MAX_NUM_CPUS];
5998 +       u32             dha_stack_ptr[DUMP_MAX_NUM_CPUS];
5999 +} __attribute__((packed));
6000 +
6001 +#ifdef __KERNEL__
6002 +
6003 +extern struct __dump_header_asm dump_header_asm;
6004 +
6005 +#ifdef CONFIG_SMP
6006 +extern unsigned long irq_affinity[];
6007 +extern int (*dump_ipi_function_ptr)(struct pt_regs *);
6008 +extern void dump_send_ipi(void);
6009 +#else
6010 +#define dump_send_ipi() do { } while(0)
6011 +#endif
6012 +
6013 +static inline void get_current_regs(struct pt_regs *regs)
6014 +{
6015 +       __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
6016 +       __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
6017 +       __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
6018 +       __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
6019 +       __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
6020 +       __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
6021 +       __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
6022 +       __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
6023 +       __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
6024 +       __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
6025 +       __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
6026 +       __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
6027 +       __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
6028 +       regs->eip = (unsigned long)current_text_addr();
6029 +}
6030 +
6031 +#endif /* __KERNEL__ */
6032 +
6033 +#endif /* _ASM_DUMP_H */
6034 --- linux-2.5.69/init/kerntypes.c.lkcdbase      Mon Jun  2 17:29:10 2003
6035 +++ linux-2.5.69/init/kerntypes.c       Mon Jun  2 17:29:06 2003
6036 @@ -0,0 +1,31 @@
6037 +/*
6038 + * kerntypes.c
6039 + *
6040 + * Copyright (C) 2000 Tom Morano (tjm@sgi.com) and
6041 + *                    Matt D. Robinson (yakker@alacritech.com)
6042 + *
6043 + * Dummy module that includes headers for all kernel types of interest. 
6044 + * The kernel type information is used by the lcrash utility when 
6045 + * analyzing system crash dumps or the live system. Using the type 
6046 + * information for the running system, rather than kernel header files,
6047 + * makes for a more flexible and robust analysis tool.
6048 + *
6049 + * This source code is released under version 2 of the GNU GPL.
6050 + */
6051 +
6052 +#include <linux/compile.h>
6053 +#include <linux/module.h>
6054 +#include <linux/mm.h>
6055 +#include <linux/config.h>
6056 +#include <linux/utsname.h>
6057 +#include <linux/dump.h>
6058 +
6059 +#ifdef LINUX_COMPILE_VERSION_ID_TYPE
6060 +/* Define version type for version validation of dump and kerntypes */
6061 +LINUX_COMPILE_VERSION_ID_TYPE;
6062 +#endif
6063 +
6064 +void
6065 +kerntypes_dummy(void)
6066 +{
6067 +}
6068 --- linux-2.5.69/drivers/dump/dump_methods.h.lkcdbase   Mon Jun  2 17:56:12 2003
6069 +++ linux-2.5.69/drivers/dump/dump_methods.h    Mon Jun  2 17:55:51 2003
6070 @@ -0,0 +1,348 @@
6071 +/*
6072 + * Generic interfaces for flexible system dump 
6073 + *
6074 + * Started: Oct 2002 -  Suparna Bhattacharya (suparna@in.ibm.com)
6075 + *
6076 + * Copyright (C) 2002 International Business Machines Corp. 
6077 + *
6078 + * This code is released under version 2 of the GNU GPL.
6079 + */
6080 +
6081 +#ifndef _LINUX_DUMP_METHODS_H
6082 +#define _LINUX_DUMP_METHODS_H
6083 +
6084 +/*
6085 + * Inspired by Matt Robinson's suggestion of introducing dump 
6086 + * methods as a way to enable different crash dump facilities to 
6087 + * coexist where each employs its own scheme or dumping policy.
6088 + *
6089 + * The code here creates a framework for flexible dump by defining 
6090 + * a set of methods and providing associated helpers that differentiate
6091 + * between the underlying mechanism (how to dump), overall scheme 
6092 + * (sequencing of stages and data dumped and associated quiescing), 
6093 + * output format (what the dump output looks like), target type 
6094 + * (where to save the dump; see dumpdev.h), and selection policy 
6095 + * (state/data to dump).
6096 + * 
6097 + * These sets of interfaces can be mixed and matched to build a 
6098 + * dumper suitable for a given situation, allowing for 
6099 + * flexibility as well appropriate degree of code reuse.
6100 + * For example all features and options of lkcd (including
6101 + * granular selective dumping in the near future) should be
6102 + * available even when say, the 2 stage soft-boot based mechanism 
6103 + * is used for taking disruptive dumps.
6104 + *
6105 + * Todo: Additionally modules or drivers may supply their own
6106 + * custom dumpers which extend dump with module specific
6107 + * information or hardware state, and can even tweak the
6108 + * mechanism when it comes to saving state relevant to
6109 + * them.
6110 + */
6111 +
6112 +#include <linux/sched.h>
6113 +#include <linux/slab.h>
6114 +#include <linux/highmem.h>
6115 +#include <linux/dumpdev.h>
6116 +
6117 +#define MAX_PASSES     6
6118 +#define MAX_DEVS       4
6119 +
6120 +
6121 +/* To customise selection of pages to be dumped in a given pass/group */
6122 +struct dump_data_filter{
6123 +       char name[32];
6124 +       int (*selector)(int, unsigned long, unsigned long);
6125 +       ulong level_mask; /* dump level(s) for which this filter applies */
6126 +       loff_t start, end; /* location range applicable */
6127 +};
6128 +
6129 +
6130 +/* 
6131 + * Determined by the kind of dump mechanism and appropriate 
6132 + * overall scheme 
6133 + */ 
6134 +struct dump_scheme_ops {
6135 +       /* sets aside memory, inits data structures etc */
6136 +       int (*configure)(unsigned long devid); 
6137 +       /* releases  resources */
6138 +       int (*unconfigure)(void); 
6139 +
6140 +       /* ordering of passes, invoking iterator */
6141 +       int (*sequencer)(void); 
6142 +        /* iterates over system data, selects and acts on data to dump */
6143 +       int (*iterator)(int, int (*)(unsigned long, unsigned long), 
6144 +               struct dump_data_filter *); 
6145 +        /* action when data is selected for dump */
6146 +       int (*save_data)(unsigned long, unsigned long); 
6147 +        /* action when data is to be excluded from dump */
6148 +       int (*skip_data)(unsigned long, unsigned long); 
6149 +       /* policies for space, multiple dump devices etc */
6150 +       int (*write_buffer)(void *, unsigned long); 
6151 +};
6152 +
6153 +struct dump_scheme {
6154 +       /* the name serves as an anchor to locate the scheme after reboot */
6155 +       char name[32]; 
6156 +       struct dump_scheme_ops *ops;
6157 +       struct list_head list;
6158 +};
6159 +
6160 +/* Quiescing/Silence levels (controls IPI callback behaviour) */
6161 +extern enum dump_silence_levels {
6162 +       DUMP_SOFT_SPIN_CPUS     = 1,
6163 +       DUMP_HARD_SPIN_CPUS     = 2,
6164 +       DUMP_HALT_CPUS          = 3,
6165 +} dump_silence_level;
6166 +
6167 +/* determined by the dump (file) format */
6168 +struct dump_fmt_ops {
6169 +       /* build header */
6170 +       int (*configure_header)(const char *, const struct pt_regs *); 
6171 +       int (*update_header)(void); /* update header and write it out */
6172 +       /* save curr context  */
6173 +       void (*save_context)(int, const struct pt_regs *, 
6174 +               struct task_struct *); 
6175 +       /* typically called by the save_data action */
6176 +       /* add formatted data to the dump buffer */
6177 +       int (*add_data)(unsigned long, unsigned long); 
6178 +       int (*update_end_marker)(void);
6179 +};
6180 +
6181 +struct dump_fmt {
6182 +       unsigned long magic; 
6183 +       char name[32];  /* lcrash, crash, elf-core etc */
6184 +       struct dump_fmt_ops *ops;
6185 +       struct list_head list;
6186 +};
6187 +
6188 +/* 
6189 + * Modules will be able add their own data capture schemes by 
6190 + * registering their own dumpers. Typically they would use the 
6191 + * primary dumper as a template and tune it with their routines.
6192 + * Still Todo.
6193 + */
6194 +
6195 +/* The combined dumper profile (mechanism, scheme, dev, fmt) */
6196 +struct dumper {
6197 +       char name[32]; /* singlestage, overlay (stg1), passthru(stg2), pull */
6198 +       struct dump_scheme *scheme;
6199 +       struct dump_fmt *fmt;
6200 +       struct __dump_compress *compress;
6201 +       struct dump_data_filter *filter;
6202 +       struct dump_dev *dev; 
6203 +       /* state valid only for active dumper(s) - per instance */
6204 +       /* run time state/context */
6205 +       int curr_pass;
6206 +       unsigned long count;
6207 +       loff_t curr_offset; /* current logical offset into dump device */
6208 +       loff_t curr_loc; /* current memory location */
6209 +       void *curr_buf; /* current position in the dump buffer */
6210 +       void *dump_buf; /* starting addr of dump buffer */
6211 +       int header_dirty; /* whether the header needs to be written out */
6212 +       int header_len; 
6213 +       struct list_head dumper_list; /* links to other dumpers */
6214 +};     
6215 +
6216 +/* Starting point to get to the current configured state */
6217 +struct dump_config {
6218 +       ulong level;
6219 +       ulong flags;
6220 +       struct dumper *dumper;
6221 +       unsigned long dump_device;
6222 +       unsigned long dump_addr; /* relevant only for in-memory dumps */
6223 +       struct list_head dump_dev_list;
6224 +};     
6225 +
6226 +extern struct dump_config dump_config;
6227 +
6228 +/* Used to save the dump config across a reboot for 2-stage dumps: 
6229 + * 
6230 + * Note: The scheme, format, compression and device type should be 
6231 + * registered at bootup, for this config to be sharable across soft-boot. 
6232 + * The function addresses could have changed and become invalid, and
6233 + * need to be set up again.
6234 + */
6235 +struct dump_config_block {
6236 +       u64 magic; /* for a quick sanity check after reboot */
6237 +       struct dump_memdev memdev; /* handle to dump stored in memory */
6238 +       struct dump_config config;
6239 +       struct dumper dumper;
6240 +       struct dump_scheme scheme;
6241 +       struct dump_fmt fmt;
6242 +       struct __dump_compress compress;
6243 +       struct dump_data_filter filter_table[MAX_PASSES];
6244 +       struct dump_anydev dev[MAX_DEVS]; /* target dump device */
6245 +};
6246 +
6247 +
6248 +/* Wrappers that invoke the methods for the current (active) dumper */
6249 +
6250 +/* Scheme operations */
6251 +
6252 +static inline int dump_sequencer(void)
6253 +{
6254 +       return dump_config.dumper->scheme->ops->sequencer();
6255 +}
6256 +
6257 +static inline int dump_iterator(int pass, int (*action)(unsigned long, 
6258 +       unsigned long), struct dump_data_filter *filter)
6259 +{
6260 +       return dump_config.dumper->scheme->ops->iterator(pass, action, filter);
6261 +}
6262 +
6263 +#define dump_save_data dump_config.dumper->scheme->ops->save_data
6264 +#define dump_skip_data dump_config.dumper->scheme->ops->skip_data
6265 +
6266 +static inline int dump_write_buffer(void *buf, unsigned long len)
6267 +{
6268 +       return dump_config.dumper->scheme->ops->write_buffer(buf, len);
6269 +}
6270 +
6271 +static inline int dump_configure(unsigned long devid)
6272 +{
6273 +       return dump_config.dumper->scheme->ops->configure(devid);
6274 +}
6275 +
6276 +static inline int dump_unconfigure(void)
6277 +{
6278 +       return dump_config.dumper->scheme->ops->unconfigure();
6279 +}
6280 +
6281 +/* Format operations */
6282 +
6283 +static inline int dump_configure_header(const char *panic_str, 
6284 +       const struct pt_regs *regs)
6285 +{
6286 +       return dump_config.dumper->fmt->ops->configure_header(panic_str, regs);
6287 +}
6288 +
6289 +static inline void dump_save_context(int cpu, const struct pt_regs *regs, 
6290 +               struct task_struct *tsk)
6291 +{
6292 +       dump_config.dumper->fmt->ops->save_context(cpu, regs, tsk);
6293 +}
6294 +
6295 +static inline int dump_save_this_cpu(const struct pt_regs *regs)
6296 +{
6297 +       int cpu = smp_processor_id();
6298 +
6299 +       dump_save_context(cpu, regs, current);
6300 +       return 1;
6301 +}
6302 +
6303 +static inline int dump_update_header(void)
6304 +{
6305 +       return dump_config.dumper->fmt->ops->update_header();
6306 +}
6307 +
6308 +static inline int dump_update_end_marker(void)
6309 +{
6310 +       return dump_config.dumper->fmt->ops->update_end_marker();
6311 +}
6312 +
6313 +static inline int dump_add_data(unsigned long loc, unsigned long sz)
6314 +{
6315 +       return dump_config.dumper->fmt->ops->add_data(loc, sz);
6316 +}
6317 +
6318 +/* Compression operation */
6319 +static inline int dump_compress_data(char *src, int slen, char *dst)
6320 +{
6321 +       return dump_config.dumper->compress->compress_func(src, slen, 
6322 +               dst, DUMP_DPC_PAGE_SIZE);
6323 +}
6324 +
6325 +
6326 +/* Prototypes of some default implementations of dump methods */
6327 +
6328 +extern struct __dump_compress dump_none_compression;
6329 +
6330 +/* Default scheme methods (dump_scheme.c) */
6331 +
6332 +extern int dump_generic_sequencer(void);
6333 +extern int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned
6334 +       long), struct dump_data_filter *filter);
6335 +extern int dump_generic_save_data(unsigned long loc, unsigned long sz);
6336 +extern int dump_generic_skip_data(unsigned long loc, unsigned long sz);
6337 +extern int dump_generic_write_buffer(void *buf, unsigned long len);
6338 +extern int dump_generic_configure(unsigned long);
6339 +extern int dump_generic_unconfigure(void);
6340 +
6341 +/* Default scheme template */
6342 +extern struct dump_scheme dump_scheme_singlestage;
6343 +
6344 +/* Default dump format methods */
6345 +
6346 +extern int dump_lcrash_configure_header(const char *panic_str, 
6347 +       const struct pt_regs *regs);
6348 +extern void dump_lcrash_save_context(int  cpu, const struct pt_regs *regs, 
6349 +       struct task_struct *tsk);
6350 +extern int dump_generic_update_header(void);
6351 +extern int dump_lcrash_add_data(unsigned long loc, unsigned long sz);
6352 +extern int dump_lcrash_update_end_marker(void);
6353 +
6354 +/* Default format (lcrash) template */
6355 +extern struct dump_fmt dump_fmt_lcrash;
6356 +
6357 +/* Default dump selection filter table */
6358 +
6359 +/* 
6360 + * Entries listed in order of importance and correspond to passes
6361 + * The last entry (with a level_mask of zero) typically reflects data that 
6362 + * won't be dumped  -- this may for example be used to identify data 
6363 + * that will be skipped for certain so the corresponding memory areas can be 
6364 + * utilized as scratch space.
6365 + */   
6366 +extern struct dump_data_filter dump_filter_table[];
6367 +
6368 +/* Some pre-defined dumpers */
6369 +extern struct dumper dumper_singlestage;
6370 +extern struct dumper dumper_stage1;
6371 +extern struct dumper dumper_stage2;
6372 +
6373 +/* These are temporary */
6374 +#define DUMP_MASK_HEADER       DUMP_LEVEL_HEADER
6375 +#define DUMP_MASK_KERN         DUMP_LEVEL_KERN
6376 +#define DUMP_MASK_USED         DUMP_LEVEL_USED
6377 +#define DUMP_MASK_UNUSED       DUMP_LEVEL_ALL_RAM
6378 +#define DUMP_MASK_REST         0 /* dummy for now */
6379 +
6380 +/* Helpers - move these to dump.h later ? */
6381 +
6382 +int dump_generic_execute(const char *panic_str, const struct pt_regs *regs);
6383 +extern int dump_ll_write(void *buf, unsigned long len); 
6384 +int dump_check_and_free_page(struct dump_memdev *dev, struct page *page);
6385 +
6386 +static inline void dumper_reset(void)
6387 +{
6388 +       dump_config.dumper->curr_buf = dump_config.dumper->dump_buf;
6389 +       dump_config.dumper->curr_loc = 0;
6390 +       dump_config.dumper->curr_offset = 0;
6391 +       dump_config.dumper->count = 0;
6392 +       dump_config.dumper->curr_pass = 0;
6393 +}
6394 +
6395 +/* 
6396 + * May later be moulded to perform boot-time allocations so we can dump 
6397 + * earlier during bootup 
6398 + */
6399 +static inline void *dump_alloc_mem(unsigned long size)
6400 +{
6401 +       return kmalloc(size, GFP_KERNEL);
6402 +}
6403 +
6404 +static inline void dump_free_mem(void *buf)
6405 +{
6406 +       struct page *page;
6407 +
6408 +       /* ignore reserved pages (e.g. post soft boot stage) */
6409 +       if (buf && (page = virt_to_page(buf))) {
6410 +               if (PageReserved(page))
6411 +                       return;
6412 +       }
6413 +
6414 +       kfree(buf);
6415 +}
6416 +
6417 +
6418 +#endif /*  _LINUX_DUMP_METHODS_H */