2 Index: linux-2.4.24/mm/slab.c
3 ===================================================================
4 --- linux-2.4.24.orig/mm/slab.c 2004-07-16 09:33:00.000000000 -0400
5 +++ linux-2.4.24/mm/slab.c 2004-07-17 08:02:02.000000000 -0400
10 +#include <linux/vmalloc.h>
13 * Parameters for kmem_cache_reap
19 +#ifdef CONFIG_DEBUG_UAF
20 +void * uaf_alloc(kmem_cache_t *, int gfp_mask);
21 +int uaf_cache_free(kmem_cache_t *, void *addr);
22 +int uaf_free(void *addr);
23 +struct page *uaf_vaddr_to_page(void *obj);
28 @@ -1340,6 +1348,20 @@
29 unsigned long save_flags;
32 +#ifdef CONFIG_DEBUG_UAF
33 + /* try to use uaf-allocator first */
34 + objp = uaf_alloc(cachep, flags);
37 + unsigned long ctor_flags;
38 + ctor_flags = SLAB_CTOR_CONSTRUCTOR;
39 + if (!(flags & __GFP_WAIT))
40 + ctor_flags |= SLAB_CTOR_ATOMIC;
41 + cachep->ctor(objp, cachep, ctor_flags);
46 kmem_cache_alloc_head(cachep, flags);
48 local_irq_save(save_flags);
49 @@ -1434,13 +1456,17 @@
51 if (cachep->flags & SLAB_RED_ZONE) {
52 objp -= BYTES_PER_WORD;
53 - if (xchg((unsigned long *)objp, RED_MAGIC1) != RED_MAGIC2)
54 + if (xchg((unsigned long *)objp, RED_MAGIC1) != RED_MAGIC2) {
55 /* Either write before start, or a double free. */
56 + printk("inconsistency in %s\n", cachep->name);
59 if (xchg((unsigned long *)(objp+cachep->objsize -
60 - BYTES_PER_WORD), RED_MAGIC1) != RED_MAGIC2)
61 + BYTES_PER_WORD), RED_MAGIC1) != RED_MAGIC2) {
62 /* Either write past end, or a double free. */
63 + printk("inconsistency in %s\n", cachep->name);
67 if (cachep->flags & SLAB_POISON)
68 kmem_poison_obj(cachep, objp);
69 @@ -1576,6 +1602,10 @@
70 void kmem_cache_free (kmem_cache_t *cachep, void *objp)
73 +#ifdef CONFIG_DEBUG_UAF
74 + if (uaf_cache_free(cachep, objp))
78 CHECK_PAGE(virt_to_page(objp));
79 if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
80 @@ -1601,6 +1631,10 @@
84 +#ifdef CONFIG_DEBUG_UAF
85 + if (uaf_free((void *) objp))
88 local_irq_save(flags);
89 CHECK_PAGE(virt_to_page(objp));
90 c = GET_PAGE_CACHE(virt_to_page(objp));
91 @@ -2076,3 +2110,478 @@
98 +#ifdef CONFIG_DEBUG_UAF
100 +#define MAX_UAF_OBJ_SIZE 8 /* in pages */
101 +#define UAF_ASSERT(xxx) if (!(xxx)) BUG();
104 +#define uaf_printk(fmt,a...) printk(fmt, ##a)
106 +#define uaf_printk(a,...)
110 + atomic_t uaf_allocated;
111 + atomic_t uaf_allocations;
112 + atomic_t uaf_failed;
115 +static int uaf_max = 8192;
116 +static void *uaf_bitmap = NULL;
117 +static spinlock_t uaf_lock;
118 +static int uaf_last_found = 0;
119 +static int uaf_used = 0;
120 +static struct vm_struct *uaf_area = NULL;
121 +static struct uaf_stats uaf_stats[MAX_UAF_OBJ_SIZE + 1];
123 +static int __init uaf_setup(char *str)
125 + uaf_max = simple_strtoul(str, NULL, 0);
129 +__setup("uaf=", uaf_setup);
136 + printk("UAF: total vmalloc-space - %lu\n",
137 + VMALLOC_END - VMALLOC_START);
139 + uaf_area = get_vm_area(PAGE_SIZE * uaf_max, VM_ALLOC);
141 + printk(KERN_ALERT "UAF: can't reserve %lu bytes in KVA\n",
142 + PAGE_SIZE * uaf_max);
146 + printk("UAF: reserved %lu bytes in KVA at 0x%p\n",
147 + PAGE_SIZE * uaf_max, uaf_area->addr);
149 + /* how many bytes we need to track space usage? */
150 + size = uaf_max / 8 + 8;
152 + uaf_bitmap = vmalloc(size);
155 + "UAF: can't allocate %d bytes for bitmap\n", size);
158 + memset(uaf_bitmap, 0, size);
159 + spin_lock_init(&uaf_lock);
160 + memset(uaf_stats, 0, sizeof(uaf_stats));
162 + printk("UAF: allocated %d for bitmap\n", size);
165 +static int uaf_find(int len)
167 + int new_last_found = -1;
171 + j = uaf_last_found;
174 + i = find_next_zero_bit(uaf_bitmap, uaf_max, j);
175 + if (i >= uaf_max) {
176 + /* repeat from 0 */
178 + /* this is 2nd loop and it's useless */
182 + i = find_next_zero_bit(uaf_bitmap, uaf_max, 0);
186 + /* save found num for subsequent searches */
187 + if (new_last_found == -1)
188 + new_last_found = uaf_last_found = i;
189 + UAF_ASSERT(new_last_found < uaf_max);
193 + * OK. found first zero bit.
194 + * now, try to find requested cont. zero-space
197 + /* FIXME: implmement multipage allocation! */
201 + j = find_next_bit(uaf_bitmap, uaf_max, i);
202 + if (++loop2 > 10000) {
203 + printk("ALERT: loop2=%d\n", loop2);
207 + } while (j - i < len);
210 + if (new_last_found == -1)
211 + uaf_last_found = i + 1;
212 + if (uaf_last_found >= uaf_max)
213 + uaf_last_found = 0;
217 +extern int __vmalloc_area_pages (unsigned long address, unsigned long size,
218 + int gfp_mask, pgprot_t prot,
219 + struct page ***pages);
220 +void * uaf_alloc(kmem_cache_t *cachep, int gfp_mask)
222 + struct page *ptrs[MAX_UAF_OBJ_SIZE];
223 + int size = cachep->objsize;
224 + struct page **pages;
225 + unsigned long flags;
226 + unsigned long addr;
227 + int i, j, err = -2000;
229 + if (uaf_bitmap == NULL)
232 + if (!(cachep->flags & SLAB_USE_UAF))
235 + pages = (struct page **) ptrs;
236 + size = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
237 + /* FIXME: implement multipage allocation! */
240 + if (size > MAX_UAF_OBJ_SIZE) {
241 + printk(KERN_ALERT "size is too big: %d\n", size);
245 + if (uaf_used == uaf_max) {
246 + uaf_printk("UAF: space exhausted!\n");
247 + atomic_inc(&uaf_stats[size].uaf_failed);
252 + spin_lock_irqsave(&uaf_lock, flags);
253 + i = uaf_find(size);
255 + spin_unlock_irqrestore(&uaf_lock, flags);
256 + atomic_inc(&uaf_stats[size].uaf_failed);
259 + for (j = 0; j < size; j++) {
260 + UAF_ASSERT(!test_bit(i + j, uaf_bitmap));
261 + set_bit(i + j, uaf_bitmap);
264 + spin_unlock_irqrestore(&uaf_lock, flags);
266 + addr = ((unsigned long) uaf_area->addr) + (PAGE_SIZE * i);
267 + uaf_printk("UAF: found %d/%d, base 0x%p, map at 0x%lx: ", i,
268 + size, uaf_area->addr, addr);
270 + /* OK. we've found free space, let's allocate pages */
271 + memset(pages, 0, sizeof(struct page *) * MAX_UAF_OBJ_SIZE);
272 + for (j = 0; j < size; j++) {
273 + pages[j] = alloc_page(gfp_mask);
274 + if (pages[j] == NULL)
276 + uaf_printk("0x%p ", pages[j]);
279 + /* time to map just allocated pages */
280 + err = __vmalloc_area_pages(addr, PAGE_SIZE * size, gfp_mask,
281 + PAGE_KERNEL, &pages);
282 + pages = (struct page **) ptrs;
284 + /* put slab cache pointer in first page */
285 + ptrs[0]->list.next = (void *) cachep;
286 + uaf_printk(" -> 0x%lx\n", addr);
287 + atomic_inc(&uaf_stats[size].uaf_allocated);
288 + atomic_inc(&uaf_stats[size].uaf_allocations);
289 + if (!in_interrupt() && !in_softirq())
293 + size = cachep->objsize;
294 + if (size < PAGE_SIZE)
295 + memset((char *) addr + size, 0xa7, PAGE_SIZE - size);
296 + return (void *) addr;
300 + printk(KERN_ALERT "can't map pages: %d\n", err);
301 + for (j = 0; j < size; j++)
303 + __free_page(pages[j]);
305 + /* can't find free pages */
306 + spin_lock_irqsave(&uaf_lock, flags);
307 + for (j = 0; j < size; j++) {
308 + clear_bit(i + j, uaf_bitmap);
311 + spin_unlock_irqrestore(&uaf_lock, flags);
312 + atomic_inc(&uaf_stats[size].uaf_failed);
317 +extern void free_area_pmd(pgd_t *dir, unsigned long address,
318 + unsigned long size);
319 +static void uaf_unmap(unsigned long address, unsigned long size)
321 + unsigned long end = (address + size);
324 + dir = pgd_offset_k(address);
327 + free_area_pmd(dir, address, end - address);
328 + address = (address + PGDIR_SIZE) & PGDIR_MASK;
330 + } while (address && (address < end));
333 + * we must not call smp_call_function() with interrtups disabled
334 + * otherwise we can get into deadlock
336 + if (!in_interrupt() && !in_softirq())
343 + * returns 1 if free was successfull
345 +int uaf_cache_free(kmem_cache_t *cachep, void *addr)
347 + struct page *pages[MAX_UAF_OBJ_SIZE];
348 + int size = cachep->objsize;
349 + unsigned long flags;
352 + if (cachep->flags & SLAB_USE_UAF)
353 + uaf_printk("UAF: to free 0x%p/%d\n", addr, size);
355 + size = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
356 + if (size > MAX_UAF_OBJ_SIZE)
359 + if (uaf_bitmap == NULL)
362 + /* first, check is address is in UAF space */
363 + if ((unsigned) addr < (unsigned) uaf_area->addr ||
364 + (unsigned) addr >= (unsigned) uaf_area->addr + uaf_area->size)
367 + uaf_printk("UAF: to free 0x%p/%d\n", addr, size);
369 + /* calculate placement in bitmap */
370 + i = (unsigned) addr - (unsigned) uaf_area->addr;
371 + UAF_ASSERT(i >= 0);
374 + /* check against double-free */
375 + spin_lock_irqsave(&uaf_lock, flags);
376 + for (j = 0; j < size; j++) {
377 + /* now check is correspondend bit set */
378 + unsigned long address;
379 + UAF_ASSERT(i+j >= 0 && i+j < uaf_max);
380 + BUG_ON(!test_bit(i+j, uaf_bitmap));
382 + address = ((unsigned long) addr) + (PAGE_SIZE * j);
383 + pages[j] = vmalloc_to_page((void *) address);
384 + BUG_ON(pages[j] == NULL);
386 + /* now free space in UAF */
387 + clear_bit(i+j, uaf_bitmap);
390 + spin_unlock_irqrestore(&uaf_lock, flags);
392 + /* check poison bytes */
393 + if (cachep->objsize < PAGE_SIZE) {
394 + unsigned char *a = (void *) addr;
395 + for (i = 0; i < PAGE_SIZE - cachep->objsize; i++)
396 + if (a[cachep->objsize + i] != 0xa7) {
397 + printk("corruption(0x%x) at %u in %s/0x%p\n",
398 + (unsigned) a[cachep->objsize + i],
399 + cachep->objsize + i, cachep->name, addr);
403 + UAF_ASSERT(((unsigned long) addr & ~PAGE_MASK) == 0UL);
405 + /* calculate placement in bitmap */
406 + i = (unsigned) addr - (unsigned) uaf_area->addr;
407 + UAF_ASSERT(i >= 0);
410 + uaf_unmap((unsigned long) addr, PAGE_SIZE * size);
411 + /* free all the pages */
412 + for (j = 0; j < size; j++)
413 + __free_page(pages[j]);
415 + atomic_dec(&uaf_stats[size].uaf_allocated);
417 + uaf_printk("UAF: freed %d/%d at 0x%p\n", i, size, addr);
418 + //printk("UAF: freed %d/%d at 0x%p\n", i, size, addr);
423 +struct page *uaf_vaddr_to_page(void *obj)
425 + if (uaf_bitmap == NULL)
428 + /* first, check is address is in UAF space */
429 + if ((unsigned) obj < (unsigned) uaf_area->addr ||
430 + (unsigned) obj >= (unsigned) uaf_area->addr + uaf_area->size)
433 + return vmalloc_to_page(obj);
436 +int uaf_free(void *obj)
438 + struct page *page = uaf_vaddr_to_page((void *) obj);
444 + c = GET_PAGE_CACHE(page);
445 + return uaf_cache_free(c, (void *) obj);
448 +int uaf_is_allocated(void *obj)
450 + unsigned long addr = (unsigned long) obj;
453 + if (uaf_bitmap == NULL)
457 + /* first, check is address is in UAF space */
458 + if (addr < (unsigned long) uaf_area->addr ||
459 + addr >= (unsigned long) uaf_area->addr + uaf_area->size)
462 + /* calculate placement in bitmap */
463 + i = (unsigned) addr - (unsigned) uaf_area->addr;
465 + return test_bit(i, uaf_bitmap);
468 +static void *uaf_s_start(struct seq_file *m, loff_t *pos)
473 + seq_printf(m, "size(pgs) allocated failed allocations. "
474 + "%d reserved, %d in use, %d last\n"
475 + "start 0x%p, size %lu, bitmap 0x%p\n"
476 + "VMALLOC_START 0x%x, VMALLOC_END 0x%x\n",
477 + uaf_max, uaf_used, uaf_last_found,
478 + uaf_area->addr, uaf_area->size,
479 + uaf_bitmap, VMALLOC_START, VMALLOC_END);
480 + else if (n > MAX_UAF_OBJ_SIZE)
487 +static void *uaf_s_next(struct seq_file *m, void *p, loff_t *pos)
489 + unsigned long n = *pos;
491 + if (n + 1 > MAX_UAF_OBJ_SIZE)
493 + return (void *) (n + 1);
496 +static void uaf_s_stop(struct seq_file *m, void *p)
500 +static int uaf_s_show(struct seq_file *m, void *p)
504 + if (n > MAX_UAF_OBJ_SIZE)
506 + seq_printf(m, "%d %d %d %d\n", n,
507 + atomic_read(&uaf_stats[n].uaf_allocated),
508 + atomic_read(&uaf_stats[n].uaf_failed),
509 + atomic_read(&uaf_stats[n].uaf_allocations));
513 +struct seq_operations uafinfo_op = {
514 + .start = uaf_s_start,
515 + .next = uaf_s_next,
516 + .stop = uaf_s_stop,
517 + .show = uaf_s_show,
520 +ssize_t uafinfo_write(struct file *file, const char *buffer,
521 + size_t count, loff_t *ppos)
523 + char kbuf[MAX_SLABINFO_WRITE+1], *tmp;
526 + struct list_head *p;
528 + if (count > MAX_SLABINFO_WRITE)
530 + if (copy_from_user(&kbuf, buffer, count))
532 + kbuf[MAX_SLABINFO_WRITE] = '\0';
535 + key = strsep(&tmp, " \t\n");
538 + if (!strcmp(key, "on"))
540 + else if (!strcmp(key, "off"))
545 + name = strsep(&tmp, " \t\n");
549 + /* Find the cache in the chain of caches. */
550 + down(&cache_chain_sem);
551 + list_for_each(p,&cache_chain) {
552 + kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
554 + if (!strcmp(cachep->name, name)) {
556 + printk("UAF: use on %s\n", cachep->name);
557 + cachep->flags |= SLAB_USE_UAF;
559 + printk("UAF: dont use on %s\n", cachep->name);
560 + cachep->flags &= ~SLAB_USE_UAF;
565 + up(&cache_chain_sem);
570 Index: linux-2.4.24/mm/vmalloc.c
571 ===================================================================
572 --- linux-2.4.24.orig/mm/vmalloc.c 2004-07-16 09:24:01.000000000 -0400
573 +++ linux-2.4.24/mm/vmalloc.c 2004-07-16 13:55:05.000000000 -0400
575 } while (address < end);
578 -static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
579 +void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
587 -static inline int __vmalloc_area_pages (unsigned long address,
588 +int __vmalloc_area_pages (unsigned long address,
592 Index: linux-2.4.24/mm/page_alloc.c
593 ===================================================================
594 --- linux-2.4.24.orig/mm/page_alloc.c 2004-07-16 09:33:00.000000000 -0400
595 +++ linux-2.4.24/mm/page_alloc.c 2004-07-16 13:55:05.000000000 -0400
599 arch_free_page(page, order);
601 + for (index = 0; index < (1 << order); index++) {
602 + BUG_ON(atomic_read(&page[index].count) > 0);
603 + BUG_ON(PageSlab(page + index));
607 * Yes, think what happens when other parts of the kernel take
608 * a reference to a page in order to pin it for io. -ben
609 Index: linux-2.4.24/init/main.c
610 ===================================================================
611 --- linux-2.4.24.orig/init/main.c 2004-07-16 09:24:01.000000000 -0400
612 +++ linux-2.4.24/init/main.c 2004-07-16 13:55:05.000000000 -0400
614 #if defined(CONFIG_SYSVIPC)
617 +#ifdef CONFIG_DEBUG_UAF
623 Index: linux-2.4.24/fs/proc/proc_misc.c
624 ===================================================================
625 --- linux-2.4.24.orig/fs/proc/proc_misc.c 2004-07-16 09:23:51.000000000 -0400
626 +++ linux-2.4.24/fs/proc/proc_misc.c 2004-07-16 13:55:05.000000000 -0400
628 release: seq_release,
631 +#ifdef CONFIG_DEBUG_UAF
632 +extern struct seq_operations uafinfo_op;
633 +extern ssize_t uafinfo_write(struct file *, const char *, size_t, loff_t *);
634 +static int uafinfo_open(struct inode *inode, struct file *file)
636 + return seq_open(file, &uafinfo_op);
638 +static struct file_operations proc_uafinfo_operations = {
639 + .open = uafinfo_open,
641 + .write = uafinfo_write,
642 + .llseek = seq_lseek,
643 + .release = seq_release,
647 static int kstat_read_proc(char *page, char **start, off_t off,
648 int count, int *eof, void *data)
651 create_seq_entry("iomem", 0, &proc_iomem_operations);
652 create_seq_entry("partitions", 0, &proc_partitions_operations);
653 create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
654 +#ifdef CONFIG_DEBUG_UAF
655 + create_seq_entry("uafinfo",S_IWUSR|S_IRUGO,&proc_uafinfo_operations);
657 #ifdef CONFIG_MODULES
658 create_seq_entry("ksyms", 0, &proc_ksyms_operations);
660 Index: linux-2.4.24/include/linux/slab.h
661 ===================================================================
662 --- linux-2.4.24.orig/include/linux/slab.h 2004-07-16 09:33:00.000000000 -0400
663 +++ linux-2.4.24/include/linux/slab.h 2004-07-17 05:26:51.000000000 -0400
665 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */
666 #define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */
667 #define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */
668 +#define SLAB_USE_UAF 0x00040000UL /* use UAF allocator */
670 /* flags passed to a constructor func */
671 #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */
672 Index: linux-2.4.24/include/asm-i386/io.h
673 ===================================================================
674 --- linux-2.4.24.orig/include/asm-i386/io.h 2004-07-16 09:23:54.000000000 -0400
675 +++ linux-2.4.24/include/asm-i386/io.h 2004-07-17 05:27:02.000000000 -0400
678 static inline unsigned long virt_to_phys(volatile void * address)
680 +#ifdef CONFIG_DEBUG_UAF
681 + unsigned long addr = (unsigned long) address;
682 + if (vmlist && addr >= VMALLOC_START && addr < VMALLOC_END) {
683 + struct page *page = vmalloc_to_page((void *) address);
685 + unsigned long offset = addr & ~PAGE_MASK;
686 + address = page_address(page) + offset;
690 return __pa(address);
693 Index: linux-2.4.24/include/asm-i386/page.h
694 ===================================================================
695 --- linux-2.4.24.orig/include/asm-i386/page.h 2004-07-16 09:33:00.000000000 -0400
696 +++ linux-2.4.24/include/asm-i386/page.h 2004-07-17 05:26:19.000000000 -0400
698 #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
699 #define __MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
700 #define MAXMEM ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE))
702 +#ifndef CONFIG_DEBUG_UAF
703 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
704 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
705 #define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
707 +#define __pa(x) ({ \
708 + unsigned long __pn, __fr; \
709 + __pn = (unsigned long)(x)-PAGE_OFFSET; \
710 + __fr = __pn >> PAGE_SHIFT; \
711 + if (jiffies > HZ*3 && __fr >= max_mapnr) { \
712 + printk("invalid arg __pa(0x%x)" \
713 + " at %s:%d\n", (unsigned) (x), \
714 + __FILE__, __LINE__); \
720 +#define __va(x) ({ \
721 + unsigned long __pn; \
722 + __pn = (unsigned long) (x) >> PAGE_SHIFT; \
723 + if (jiffies > HZ*3 && __pn >= max_mapnr) { \
724 + printk("invalid arg __va(0x%x)" \
725 + " at %s:%d\n", (unsigned) (x), \
726 + __FILE__, __LINE__); \
729 + ((void *)((unsigned long)(x) + PAGE_OFFSET)); \
732 +#define PKMAP_BASE (0xfe000000UL)
734 +#define virt_to_page(ka) ({ \
736 + if ((unsigned)(ka) >= VMALLOC_START && \
737 + (unsigned)(ka) < VMALLOC_END) { \
738 + _p = vmalloc_to_page((void *)(ka)); \
740 + printk(KERN_ALERT \
741 + "wrong address 0x%x, " \
742 + "VMALLOC_START 0x%x\n", \
744 + (unsigned)VMALLOC_START); \
745 + _p = mem_map+(__pa(ka) >> PAGE_SHIFT); \
749 + _p = mem_map+(__pa(ka) >> PAGE_SHIFT); \
755 #define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
757 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
758 Index: linux-2.4.24/arch/i386/config.in
759 ===================================================================
760 --- linux-2.4.24.orig/arch/i386/config.in 2004-07-16 09:33:02.000000000 -0400
761 +++ linux-2.4.24/arch/i386/config.in 2004-07-16 13:55:05.000000000 -0400
763 bool ' Check for stack overflows' CONFIG_DEBUG_STACKOVERFLOW
764 bool ' Debug high memory support' CONFIG_DEBUG_HIGHMEM
765 bool ' Debug memory allocations' CONFIG_DEBUG_SLAB
766 + if [ "$CONFIG_DEBUG_SLAB" != "n" ]; then
767 + bool ' Debug memory allocations (use-after-free via vmalloced space)' CONFIG_DEBUG_UAF
769 bool ' Memory mapped I/O debugging' CONFIG_DEBUG_IOVIRT
770 bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ
771 bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK
772 Index: linux-2.4.24/kernel/ksyms.c
773 ===================================================================
774 --- linux-2.4.24.orig/kernel/ksyms.c 2004-07-16 09:36:49.000000000 -0400
775 +++ linux-2.4.24/kernel/ksyms.c 2004-07-16 13:55:05.000000000 -0400
777 EXPORT_SYMBOL(kfree);
778 EXPORT_SYMBOL(vfree);
779 EXPORT_SYMBOL(__vmalloc);
780 +extern struct vm_struct * vmlist;
781 +EXPORT_SYMBOL(vmlist);
783 EXPORT_SYMBOL(vmalloc_to_page);
784 EXPORT_SYMBOL(mem_map);
787 arch/i386/config.in | 3
788 fs/proc/proc_misc.c | 19 +
789 include/asm-i386/io.h | 10
790 include/asm-i386/page.h | 51 ++++
791 include/linux/slab.h | 1
795 mm/slab.c | 513 +++++++++++++++++++++++++++++++++++++++++++++++-
797 10 files changed, 608 insertions(+), 4 deletions(-)