-+#endif /* _MIPS64_KERNTYPES_H */
-Index: linux-2.6.10/net/Kconfig
-===================================================================
---- linux-2.6.10.orig/net/Kconfig 2005-04-05 16:29:27.896349784 +0800
-+++ linux-2.6.10/net/Kconfig 2005-04-05 16:47:53.895212400 +0800
-@@ -632,7 +632,7 @@
- endmenu
-
- config NETPOLL
-- def_bool NETCONSOLE
-+ def_bool NETCONSOLE || CRASH_DUMP_NETDEV
-
- config NETPOLL_RX
- bool "Netpoll support for trapping incoming packets"
-Index: linux-2.6.10/scripts/mkcompile_h
-===================================================================
---- linux-2.6.10.orig/scripts/mkcompile_h 2004-12-25 05:35:50.000000000 +0800
-+++ linux-2.6.10/scripts/mkcompile_h 2005-04-05 16:47:53.950204040 +0800
-@@ -33,7 +33,7 @@
-
- UTS_LEN=64
- UTS_TRUNCATE="sed -e s/\(.\{1,$UTS_LEN\}\).*/\1/"
--
-+LINUX_COMPILE_VERSION_ID="__linux_compile_version_id__`hostname | tr -c '[0-9A-Za-z\n]' '__'`_`LANG=C date | tr -c '[0-9A-Za-z\n]' '_'`"
- # Generate a temporary compile.h
-
- ( echo /\* This file is auto generated, version $VERSION \*/
-@@ -55,6 +55,8 @@
- fi
-
- echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | tail -n 1`\"
-+ echo \#define LINUX_COMPILE_VERSION_ID $LINUX_COMPILE_VERSION_ID
-+ echo \#define LINUX_COMPILE_VERSION_ID_TYPE typedef char* "$LINUX_COMPILE_VERSION_ID""_t"
- ) > .tmpcompile
-
- # Only replace the real compile.h if the new one is different,
-Index: linux-2.6.10/mm/bootmem.c
-===================================================================
---- linux-2.6.10.orig/mm/bootmem.c 2004-12-25 05:34:30.000000000 +0800
-+++ linux-2.6.10/mm/bootmem.c 2005-04-05 16:47:53.903211184 +0800
-@@ -26,6 +26,7 @@
- */
- unsigned long max_low_pfn;
- unsigned long min_low_pfn;
-+EXPORT_SYMBOL(min_low_pfn);
- unsigned long max_pfn;
-
- EXPORT_SYMBOL(max_pfn); /* This is exported so
-@@ -284,6 +285,7 @@
- if (j + 16 < BITS_PER_LONG)
- prefetchw(page + j + 16);
- __ClearPageReserved(page + j);
-+ set_page_count(page + j, 1);
- }
- __free_pages(page, ffs(BITS_PER_LONG)-1);
- i += BITS_PER_LONG;
-Index: linux-2.6.10/mm/page_alloc.c
-===================================================================
---- linux-2.6.10.orig/mm/page_alloc.c 2005-04-05 16:29:28.218300840 +0800
-+++ linux-2.6.10/mm/page_alloc.c 2005-04-05 16:47:53.902211336 +0800
-@@ -47,6 +47,11 @@
- EXPORT_SYMBOL(totalram_pages);
- EXPORT_SYMBOL(nr_swap_pages);
-
-+#ifdef CONFIG_CRASH_DUMP_MODULE
-+/* This symbol has to be exported to use 'for_each_pgdat' macro by modules. */
-+EXPORT_SYMBOL(pgdat_list);
-+#endif
-+
- /*
- * Used by page_zone() to look up the address of the struct zone whose
- * id is encoded in the upper bits of page->flags
-@@ -281,8 +286,11 @@
- arch_free_page(page, order);
-
- mod_page_state(pgfree, 1 << order);
-- for (i = 0 ; i < (1 << order) ; ++i)
-+ for (i = 0 ; i < (1 << order) ; ++i){
-+ if (unlikely(i))
-+ __put_page(page + i);
- free_pages_check(__FUNCTION__, page + i);
-+ }
- list_add(&page->lru, &list);
- kernel_map_pages(page, 1<<order, 0);
- free_pages_bulk(page_zone(page), 1, &list, order);
-@@ -322,44 +330,34 @@
- return page;
- }
-
--static inline void set_page_refs(struct page *page, int order)
--{
--#ifdef CONFIG_MMU
-- set_page_count(page, 1);
--#else
-- int i;
--
-- /*
-- * We need to reference all the pages for this order, otherwise if
-- * anyone accesses one of the pages with (get/put) it will be freed.
-- */
-- for (i = 0; i < (1 << order); i++)
-- set_page_count(page+i, 1);
--#endif /* CONFIG_MMU */
--}
--
- /*
- * This page is about to be returned from the page allocator
- */
--static void prep_new_page(struct page *page, int order)
-+static void prep_new_page(struct page *_page, int order)
- {
-- if (page->mapping || page_mapped(page) ||
-- (page->flags & (
-- 1 << PG_private |
-- 1 << PG_locked |
-- 1 << PG_lru |
-- 1 << PG_active |
-- 1 << PG_dirty |
-- 1 << PG_reclaim |
-- 1 << PG_swapcache |
-- 1 << PG_writeback )))
-+ int i;
-+
-+ for(i = 0; i < (1 << order); i++){
-+ struct page *page = _page + i;
-+
-+ if (page->mapping || page_mapped(page) ||
-+ (page->flags & (
-+ 1 << PG_private |
-+ 1 << PG_locked |
-+ 1 << PG_lru |
-+ 1 << PG_active |
-+ 1 << PG_dirty |
-+ 1 << PG_reclaim |
-+ 1 << PG_swapcache |
-+ 1 << PG_writeback )))
- bad_page(__FUNCTION__, page);
-
-- page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
-- 1 << PG_referenced | 1 << PG_arch_1 |
-- 1 << PG_checked | 1 << PG_mappedtodisk);
-- page->private = 0;
-- set_page_refs(page, order);
-+ page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
-+ 1 << PG_referenced | 1 << PG_arch_1 |
-+ 1 << PG_checked | 1 << PG_mappedtodisk);
-+ page->private = 0;
-+ set_page_count(page, 1);
-+ }
- }
-
- /*
-Index: linux-2.6.10/kernel/sched.c
-===================================================================
---- linux-2.6.10.orig/kernel/sched.c 2005-04-05 16:29:30.335978904 +0800
-+++ linux-2.6.10/kernel/sched.c 2005-04-05 16:47:53.901211488 +0800
-@@ -54,6 +54,10 @@
- #define cpu_to_node_mask(cpu) (cpu_online_map)
- #endif
-
-+/* used to soft spin in sched while dump is in progress */
-+unsigned long dump_oncpu;
-+EXPORT_SYMBOL(dump_oncpu);
-+
- /*
- * Convert user-nice values [ -20 ... 0 ... 19 ]
- * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
-@@ -184,109 +188,6 @@
- #define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \
- < (long long) (sd)->cache_hot_time)
-
--/*
-- * These are the runqueue data structures:
-- */
--
--#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
--
--typedef struct runqueue runqueue_t;
--
--struct prio_array {
-- unsigned int nr_active;
-- unsigned long bitmap[BITMAP_SIZE];
-- struct list_head queue[MAX_PRIO];
--};
--
--/*
-- * This is the main, per-CPU runqueue data structure.
-- *
-- * Locking rule: those places that want to lock multiple runqueues
-- * (such as the load balancing or the thread migration code), lock
-- * acquire operations must be ordered by ascending &runqueue.
-- */
--struct runqueue {
-- spinlock_t lock;
--
-- /*
-- * nr_running and cpu_load should be in the same cacheline because
-- * remote CPUs use both these fields when doing load calculation.
-- */
-- unsigned long nr_running;
--#ifdef CONFIG_SMP
-- unsigned long cpu_load;
--#endif
-- unsigned long long nr_switches;
--
-- /*
-- * This is part of a global counter where only the total sum
-- * over all CPUs matters. A task can increase this counter on
-- * one CPU and if it got migrated afterwards it may decrease
-- * it on another CPU. Always updated under the runqueue lock:
-- */
-- unsigned long nr_uninterruptible;
--
-- unsigned long expired_timestamp;
-- unsigned long long timestamp_last_tick;
-- task_t *curr, *idle;
-- struct mm_struct *prev_mm;
-- prio_array_t *active, *expired, arrays[2];
-- int best_expired_prio;
-- atomic_t nr_iowait;
--
--#ifdef CONFIG_SMP
-- struct sched_domain *sd;
--
-- /* For active balancing */
-- int active_balance;
-- int push_cpu;
--
-- task_t *migration_thread;
-- struct list_head migration_queue;
--#endif
--
--#ifdef CONFIG_SCHEDSTATS
-- /* latency stats */
-- struct sched_info rq_sched_info;
--
-- /* sys_sched_yield() stats */
-- unsigned long yld_exp_empty;
-- unsigned long yld_act_empty;
-- unsigned long yld_both_empty;
-- unsigned long yld_cnt;
--
-- /* schedule() stats */
-- unsigned long sched_noswitch;
-- unsigned long sched_switch;
-- unsigned long sched_cnt;
-- unsigned long sched_goidle;
--
-- /* pull_task() stats */
-- unsigned long pt_gained[MAX_IDLE_TYPES];
-- unsigned long pt_lost[MAX_IDLE_TYPES];
--
-- /* active_load_balance() stats */
-- unsigned long alb_cnt;
-- unsigned long alb_lost;
-- unsigned long alb_gained;
-- unsigned long alb_failed;
--
-- /* try_to_wake_up() stats */
-- unsigned long ttwu_cnt;
-- unsigned long ttwu_attempts;
-- unsigned long ttwu_moved;
--
-- /* wake_up_new_task() stats */
-- unsigned long wunt_cnt;
-- unsigned long wunt_moved;
--
-- /* sched_migrate_task() stats */
-- unsigned long smt_cnt;
--
-- /* sched_balance_exec() stats */
-- unsigned long sbe_cnt;
--#endif
--};
-
- static DEFINE_PER_CPU(struct runqueue, runqueues);
-
-@@ -2535,6 +2436,15 @@
- unsigned long run_time;
- int cpu, idx;
-
-+ /*
-+ * If crash dump is in progress, this other cpu's
-+ * need to wait until it completes.
-+ * NB: this code is optimized away for kernels without
-+ * dumping enabled.
-+ */
-+ if (unlikely(dump_oncpu))
-+ goto dump_scheduling_disabled;
-+
- /*
- * Test if we are atomic. Since do_exit() needs to call into
- * schedule() atomically, we ignore that path for now.
-@@ -2698,6 +2608,16 @@
- preempt_enable_no_resched();
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
- goto need_resched;
-+
-+ return;
-+
-+ dump_scheduling_disabled:
-+ /* allow scheduling only if this is the dumping cpu */
-+ if (dump_oncpu != smp_processor_id()+1) {
-+ while (dump_oncpu)
-+ cpu_relax();
-+ }
-+ return;
- }
-
- EXPORT_SYMBOL(schedule);
-Index: linux-2.6.10/kernel/panic.c
-===================================================================
---- linux-2.6.10.orig/kernel/panic.c 2004-12-25 05:35:29.000000000 +0800
-+++ linux-2.6.10/kernel/panic.c 2005-04-05 16:47:53.898211944 +0800
-@@ -18,12 +18,17 @@
- #include <linux/sysrq.h>
- #include <linux/interrupt.h>
- #include <linux/nmi.h>
-+#ifdef CONFIG_KEXEC
-+#include <linux/kexec.h>
-+#endif
-
- int panic_timeout;
- int panic_on_oops;
- int tainted;
-+void (*dump_function_ptr)(const char *, const struct pt_regs *) = 0;
-
- EXPORT_SYMBOL(panic_timeout);
-+EXPORT_SYMBOL(dump_function_ptr);
-
- struct notifier_block *panic_notifier_list;
-
-@@ -71,11 +76,12 @@
- printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
- bust_spinlocks(0);
-
-+ notifier_call_chain(&panic_notifier_list, 0, buf);
-+
- #ifdef CONFIG_SMP
- smp_send_stop();
- #endif
-
-- notifier_call_chain(&panic_notifier_list, 0, buf);
-
- if (!panic_blink)
- panic_blink = no_blink;
-@@ -87,6 +93,18 @@
- * We can't use the "normal" timers since we just panicked..
- */
- printk(KERN_EMERG "Rebooting in %d seconds..",panic_timeout);
-+#ifdef CONFIG_KEXEC
-+{
-+ struct kimage *image;
-+ image = xchg(&kexec_image, 0);
-+ if (image) {
-+ printk(KERN_EMERG "by starting a new kernel ..\n");
-+ mdelay(panic_timeout*1000);
-+ machine_kexec(image);
-+ }