2 Index: linux-2.4.22-vanilla/mm/slab.c
3 ===================================================================
4 --- linux-2.4.22-vanilla.orig/mm/slab.c 2003-11-17 15:42:13.000000000 +0300
5 +++ linux-2.4.22-vanilla/mm/slab.c 2003-11-18 01:15:35.000000000 +0300
10 +#include <linux/vmalloc.h>
13 * Parameters for kmem_cache_reap
19 +#ifdef CONFIG_DEBUG_UAF
20 +void * uaf_alloc(kmem_cache_t *, int gfp_mask);
21 +int uaf_cache_free(kmem_cache_t *, void *addr);
22 +int uaf_free(void *addr);
23 +struct page *uaf_vaddr_to_page(void *obj);
28 @@ -1340,6 +1348,20 @@
29 unsigned long save_flags;
32 +#ifdef CONFIG_DEBUG_UAF
33 + /* try to use uaf-allocator first */
34 + objp = uaf_alloc(cachep, flags);
37 + unsigned long ctor_flags;
38 + ctor_flags = SLAB_CTOR_CONSTRUCTOR;
39 + if (!(flags & __GFP_WAIT))
40 + ctor_flags |= SLAB_CTOR_ATOMIC;
41 + cachep->ctor(objp, cachep, ctor_flags);
46 kmem_cache_alloc_head(cachep, flags);
48 local_irq_save(save_flags);
49 @@ -1576,6 +1598,10 @@
50 void kmem_cache_free (kmem_cache_t *cachep, void *objp)
53 +#ifdef CONFIG_DEBUG_UAF
54 + if (uaf_cache_free(cachep, objp))
58 CHECK_PAGE(virt_to_page(objp));
59 if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
60 @@ -1601,6 +1627,10 @@
64 +#ifdef CONFIG_DEBUG_UAF
65 + if (uaf_free((void *) objp))
68 local_irq_save(flags);
69 CHECK_PAGE(virt_to_page(objp));
70 c = GET_PAGE_CACHE(virt_to_page(objp));
71 @@ -2075,3 +2105,460 @@
78 +#ifdef CONFIG_DEBUG_UAF
80 +#define MAX_UAF_OBJ_SIZE 8 /* in pages */
81 +#define UAF_ASSERT(xxx) if (!(xxx)) BUG();
84 +#define uaf_printk(fmt,a...) printk(fmt, ##a)
86 +#define uaf_printk(a,...)
90 + atomic_t uaf_allocated;
91 + atomic_t uaf_allocations;
92 + atomic_t uaf_failed;
95 +static int uaf_max = 32768;
96 +static void *uaf_bitmap = NULL;
97 +static spinlock_t uaf_lock;
98 +static int uaf_last_found = 0;
99 +static int uaf_used = 0;
100 +static struct vm_struct *uaf_area = NULL;
101 +static struct uaf_stats uaf_stats[MAX_UAF_OBJ_SIZE + 1];
103 +static int __init uaf_setup(char *str)
105 + uaf_max = simple_strtoul(str, NULL, 0);
109 +__setup("uaf=", uaf_setup);
116 + printk("UAF: total vmalloc-space - %lu\n",
117 + VMALLOC_END - VMALLOC_START);
119 + uaf_area = get_vm_area(PAGE_SIZE * uaf_max, VM_ALLOC);
121 + printk(KERN_ALERT "UAF: can't reserve %lu bytes in KVA\n",
122 + PAGE_SIZE * uaf_max);
126 + printk("UAF: reserved %lu bytes in KVA at 0x%p\n",
127 + PAGE_SIZE * uaf_max, uaf_area->addr);
129 + /* how many bytes we need to track space usage? */
130 + size = uaf_max / 8 + 8;
132 + uaf_bitmap = vmalloc(size);
135 + "UAF: can't allocate %d bytes for bitmap\n", size);
138 + memset(uaf_bitmap, 0, size);
139 + spin_lock_init(&uaf_lock);
140 + memset(uaf_stats, 0, sizeof(uaf_stats));
142 + printk("UAF: allocated %d for bitmap\n", size);
145 +static int uaf_find(int len)
147 + int new_last_found = -1;
151 + j = uaf_last_found;
154 + i = find_next_zero_bit(uaf_bitmap, uaf_max, j);
155 + if (i >= uaf_max) {
156 + /* repeat from 0 */
158 + /* this is 2nd loop and it's useless */
162 + i = find_next_zero_bit(uaf_bitmap, uaf_max, 0);
166 + /* save found num for subsequent searches */
167 + if (new_last_found == -1)
168 + new_last_found = uaf_last_found = i;
169 + UAF_ASSERT(new_last_found < uaf_max);
173 + * OK. found first zero bit.
174 + * now, try to find requested cont. zero-space
177 + /* FIXME: implmement multipage allocation! */
181 + j = find_next_bit(uaf_bitmap, uaf_max, i);
182 + if (++loop2 > 10000) {
183 + printk("ALERT: loop2=%d\n", loop2);
187 + } while (j - i < len);
190 + if (new_last_found == -1)
191 + uaf_last_found = i + 1;
192 + if (uaf_last_found >= uaf_max)
193 + uaf_last_found = 0;
197 +extern int __vmalloc_area_pages (unsigned long address, unsigned long size,
198 + int gfp_mask, pgprot_t prot,
199 + struct page ***pages);
200 +void * uaf_alloc(kmem_cache_t *cachep, int gfp_mask)
202 + struct page *ptrs[MAX_UAF_OBJ_SIZE];
203 + int size = cachep->objsize;
204 + struct page **pages;
205 + unsigned long flags;
206 + unsigned long addr;
207 + int i, j, err = -2000;
209 + if (uaf_bitmap == NULL)
212 + if (!(cachep->flags & SLAB_USE_UAF))
215 + pages = (struct page **) ptrs;
216 + size = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
217 + /* FIXME: implement multipage allocation! */
220 + if (size > MAX_UAF_OBJ_SIZE) {
221 + printk(KERN_ALERT "size is too big: %d\n", size);
225 + if (uaf_used == uaf_max) {
226 + uaf_printk("UAF: space exhausted!\n");
227 + atomic_inc(&uaf_stats[size].uaf_failed);
232 + spin_lock_irqsave(&uaf_lock, flags);
233 + i = uaf_find(size);
235 + spin_unlock_irqrestore(&uaf_lock, flags);
236 + atomic_inc(&uaf_stats[size].uaf_failed);
239 + for (j = 0; j < size; j++) {
240 + UAF_ASSERT(!test_bit(i + j, uaf_bitmap));
241 + set_bit(i + j, uaf_bitmap);
244 + spin_unlock_irqrestore(&uaf_lock, flags);
246 + addr = ((unsigned long) uaf_area->addr) + (PAGE_SIZE * i);
247 + uaf_printk("UAF: found %d/%d, base 0x%p, map at 0x%lx: ", i,
248 + size, uaf_area->addr, addr);
250 + /* OK. we've found free space, let's allocate pages */
251 + memset(pages, 0, sizeof(struct page *) * MAX_UAF_OBJ_SIZE);
252 + for (j = 0; j < size; j++) {
253 + pages[j] = alloc_page(gfp_mask);
254 + if (pages[j] == NULL)
256 + uaf_printk("0x%p ", pages[j]);
259 + /* time to map just allocated pages */
260 + err = __vmalloc_area_pages(addr, PAGE_SIZE * size, gfp_mask,
261 + PAGE_KERNEL, &pages);
262 + pages = (struct page **) ptrs;
264 + /* put slab cache pointer in first page */
265 + ptrs[0]->list.next = (void *) cachep;
266 + uaf_printk(" -> 0x%lx\n", addr);
267 + atomic_inc(&uaf_stats[size].uaf_allocated);
268 + atomic_inc(&uaf_stats[size].uaf_allocations);
269 + if (!in_interrupt() && !in_softirq())
273 + //printk("UAF: found %d/%d, base 0x%p, map at 0x%lx\n",
274 + // i, cachep->objsize, uaf_area->addr, addr);
275 + return (void *) addr;
279 + printk(KERN_ALERT "can't map pages: %d\n", err);
280 + for (j = 0; j < size; j++)
282 + __free_page(pages[j]);
284 + /* can't find free pages */
285 + spin_lock_irqsave(&uaf_lock, flags);
286 + for (j = 0; j < size; j++) {
287 + clear_bit(i + j, uaf_bitmap);
290 + spin_unlock_irqrestore(&uaf_lock, flags);
291 + atomic_inc(&uaf_stats[size].uaf_failed);
296 +extern void free_area_pmd(pgd_t *dir, unsigned long address,
297 + unsigned long size);
298 +static void uaf_unmap(unsigned long address, unsigned long size)
300 + unsigned long end = (address + size);
303 + dir = pgd_offset_k(address);
306 + free_area_pmd(dir, address, end - address);
307 + address = (address + PGDIR_SIZE) & PGDIR_MASK;
309 + } while (address && (address < end));
312 + * we must not call smp_call_function() with interrtups disabled
313 + * otherwise we can get into deadlock
315 + if (!in_interrupt() && !in_softirq())
322 + * returns 1 if free was successfull
324 +int uaf_cache_free(kmem_cache_t *cachep, void *addr)
326 + struct page *pages[MAX_UAF_OBJ_SIZE];
327 + int size = cachep->objsize;
328 + unsigned long flags;
331 + uaf_printk("UAF: to free 0x%p/%d\n", addr, size);
333 + size = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
334 + if (size > MAX_UAF_OBJ_SIZE)
337 + if (uaf_bitmap == NULL)
340 + /* first, check is address is in UAF space */
341 + if ((unsigned) addr < (unsigned) uaf_area->addr ||
342 + (unsigned) addr >= (unsigned) uaf_area->addr + uaf_area->size)
345 + UAF_ASSERT(((unsigned long) addr & ~PAGE_MASK) == 0UL);
347 + /* calculate placement in bitmap */
348 + i = (unsigned) addr - (unsigned) uaf_area->addr;
349 + UAF_ASSERT(i >= 0);
352 + /* collect all the pages */
353 + uaf_printk("free/unmap %d pages: ", size);
354 + /* NOTE: we need not page_table_lock here. bits in bitmap
355 + * protect those pte's from to be reused */
356 + for (j = 0; j < size; j++) {
357 + unsigned long address;
358 + address = ((unsigned long) addr) + (PAGE_SIZE * j);
359 + pages[j] = vmalloc_to_page((void *) address);
360 + uaf_printk("0x%lx->0x%p ", address, pages[j]);
364 + uaf_unmap((unsigned long) addr, PAGE_SIZE * size);
365 + /* free all the pages */
366 + for (j = 0; j < size; j++)
367 + __free_page(pages[j]);
369 + spin_lock_irqsave(&uaf_lock, flags);
370 + for (j = 0; j < size; j++) {
371 + /* now check is correspondend bit set */
372 + UAF_ASSERT(i+j >= 0 && i+j < uaf_max);
373 + UAF_ASSERT(test_bit(i+j, uaf_bitmap));
375 + /* now free space in UAF */
376 + clear_bit(i+j, uaf_bitmap);
379 + spin_unlock_irqrestore(&uaf_lock, flags);
381 + atomic_dec(&uaf_stats[size].uaf_allocated);
383 + uaf_printk("UAF: freed %d/%d at 0x%p\n", i, size, addr);
384 + //printk("UAF: freed %d/%d at 0x%p\n", i, size, addr);
389 +struct page *uaf_vaddr_to_page(void *obj)
391 + if (uaf_bitmap == NULL)
394 + /* first, check is address is in UAF space */
395 + if ((unsigned) obj < (unsigned) uaf_area->addr ||
396 + (unsigned) obj >= (unsigned) uaf_area->addr + uaf_area->size)
399 + return vmalloc_to_page(obj);
402 +int uaf_free(void *obj)
404 + struct page *page = uaf_vaddr_to_page((void *) obj);
410 + c = GET_PAGE_CACHE(page);
411 + return uaf_cache_free(c, (void *) obj);
414 +int uaf_is_allocated(void *obj)
416 + unsigned long addr = (unsigned long) obj;
419 + if (uaf_bitmap == NULL)
423 + /* first, check is address is in UAF space */
424 + if (addr < (unsigned long) uaf_area->addr ||
425 + addr >= (unsigned long) uaf_area->addr + uaf_area->size)
428 + /* calculate placement in bitmap */
429 + i = (unsigned) addr - (unsigned) uaf_area->addr;
431 + return test_bit(i, uaf_bitmap);
434 +static void *uaf_s_start(struct seq_file *m, loff_t *pos)
439 + seq_printf(m, "size(pgs) allocated failed allocations. "
440 + "%d reserved, %d in use, %d last\n",
441 + uaf_max, uaf_used, uaf_last_found);
442 + else if (n > MAX_UAF_OBJ_SIZE)
449 +static void *uaf_s_next(struct seq_file *m, void *p, loff_t *pos)
451 + unsigned long n = *pos;
453 + if (n + 1 > MAX_UAF_OBJ_SIZE)
455 + return (void *) (n + 1);
458 +static void uaf_s_stop(struct seq_file *m, void *p)
462 +static int uaf_s_show(struct seq_file *m, void *p)
466 + if (n > MAX_UAF_OBJ_SIZE)
468 + seq_printf(m, "%d %d %d %d\n", n,
469 + atomic_read(&uaf_stats[n].uaf_allocated),
470 + atomic_read(&uaf_stats[n].uaf_failed),
471 + atomic_read(&uaf_stats[n].uaf_allocations));
475 +struct seq_operations uafinfo_op = {
476 + .start = uaf_s_start,
477 + .next = uaf_s_next,
478 + .stop = uaf_s_stop,
479 + .show = uaf_s_show,
482 +ssize_t uafinfo_write(struct file *file, const char *buffer,
483 + size_t count, loff_t *ppos)
485 + char kbuf[MAX_SLABINFO_WRITE+1], *tmp;
488 + struct list_head *p;
490 + if (count > MAX_SLABINFO_WRITE)
492 + if (copy_from_user(&kbuf, buffer, count))
494 + kbuf[MAX_SLABINFO_WRITE] = '\0';
497 + key = strsep(&tmp, " \t\n");
500 + if (!strcmp(key, "on"))
502 + else if (!strcmp(key, "off"))
507 + name = strsep(&tmp, " \t\n");
511 + /* Find the cache in the chain of caches. */
512 + down(&cache_chain_sem);
513 + list_for_each(p,&cache_chain) {
514 + kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
516 + if (!strcmp(cachep->name, name)) {
518 + printk("UAF: use on %s\n", cachep->name);
519 + cachep->flags |= SLAB_USE_UAF;
521 + printk("UAF: dont use on %s\n", cachep->name);
522 + cachep->flags &= ~SLAB_USE_UAF;
527 + up(&cache_chain_sem);
532 Index: linux-2.4.22-vanilla/init/main.c
533 ===================================================================
534 --- linux-2.4.22-vanilla.orig/init/main.c 2003-11-03 23:22:13.000000000 +0300
535 +++ linux-2.4.22-vanilla/init/main.c 2003-11-18 01:06:45.000000000 +0300
537 * make syscalls (and thus be locked).
540 +#ifdef CONFIG_DEBUG_UAF
546 Index: linux-2.4.22-vanilla/fs/proc/proc_misc.c
547 ===================================================================
548 --- linux-2.4.22-vanilla.orig/fs/proc/proc_misc.c 2003-11-03 23:22:11.000000000 +0300
549 +++ linux-2.4.22-vanilla/fs/proc/proc_misc.c 2003-11-18 01:06:45.000000000 +0300
551 release: seq_release,
554 +#ifdef CONFIG_DEBUG_UAF
555 +extern struct seq_operations uafinfo_op;
556 +extern ssize_t uafinfo_write(struct file *, const char *, size_t, loff_t *);
557 +static int uafinfo_open(struct inode *inode, struct file *file)
559 + return seq_open(file, &uafinfo_op);
561 +static struct file_operations proc_uafinfo_operations = {
562 + .open = uafinfo_open,
564 + .write = uafinfo_write,
565 + .llseek = seq_lseek,
566 + .release = seq_release,
570 static int kstat_read_proc(char *page, char **start, off_t off,
571 int count, int *eof, void *data)
574 create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations);
575 create_seq_entry("partitions", 0, &proc_partitions_operations);
576 create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
577 +#ifdef CONFIG_DEBUG_UAF
578 + create_seq_entry("uafinfo",S_IWUSR|S_IRUGO,&proc_uafinfo_operations);
580 #ifdef CONFIG_MODULES
581 create_seq_entry("ksyms", 0, &proc_ksyms_operations);
583 Index: linux-2.4.22-vanilla/include/linux/slab.h
584 ===================================================================
585 --- linux-2.4.22-vanilla.orig/include/linux/slab.h 2003-11-17 15:42:13.000000000 +0300
586 +++ linux-2.4.22-vanilla/include/linux/slab.h 2003-11-18 02:14:40.000000000 +0300
588 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */
589 #define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */
590 #define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */
591 +#define SLAB_USE_UAF 0x00040000UL /* use UAF allocator */
593 /* flags passed to a constructor func */
594 #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */
595 Index: linux-2.4.22-vanilla/arch/i386/config.in
596 ===================================================================
597 --- linux-2.4.22-vanilla.orig/arch/i386/config.in 2003-11-03 23:22:06.000000000 +0300
598 +++ linux-2.4.22-vanilla/arch/i386/config.in 2003-11-18 01:06:45.000000000 +0300
600 bool ' Check for stack overflows' CONFIG_DEBUG_STACKOVERFLOW
601 bool ' Debug high memory support' CONFIG_DEBUG_HIGHMEM
602 bool ' Debug memory allocations' CONFIG_DEBUG_SLAB
603 + if [ "$CONFIG_DEBUG_SLAB" != "n" ]; then
604 + bool ' Debug memory allocations (use-after-free via vmalloced space)' CONFIG_DEBUG_UAF
606 bool ' Memory mapped I/O debugging' CONFIG_DEBUG_IOVIRT
607 bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ
608 bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK
609 Index: linux-2.4.22-vanilla/mm/vmalloc.c
610 ===================================================================
611 --- linux-2.4.22-vanilla.orig/mm/vmalloc.c 2003-11-03 23:22:13.000000000 +0300
612 +++ linux-2.4.22-vanilla/mm/vmalloc.c 2003-11-18 01:06:45.000000000 +0300
614 } while (address < end);
617 -static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
618 +void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
626 -static inline int __vmalloc_area_pages (unsigned long address,
627 +int __vmalloc_area_pages (unsigned long address,
631 Index: linux-2.4.22-vanilla/include/asm-i386/io.h
632 ===================================================================
633 --- linux-2.4.22-vanilla.orig/include/asm-i386/io.h 2003-11-17 14:58:37.000000000 +0300
634 +++ linux-2.4.22-vanilla/include/asm-i386/io.h 2003-11-18 02:26:42.000000000 +0300
637 static inline unsigned long virt_to_phys(volatile void * address)
639 +#ifdef CONFIG_DEBUG_UAF
640 + unsigned long addr = (unsigned long) address;
641 + if (vmlist && addr >= VMALLOC_START && addr < VMALLOC_END) {
642 + struct page *page = vmalloc_to_page((void *) address);
644 + unsigned long offset = addr & ~PAGE_MASK;
645 + address = page_address(page) + offset;
649 return __pa(address);
652 Index: linux-2.4.22-vanilla/include/asm-i386/page.h
653 ===================================================================
654 --- linux-2.4.22-vanilla.orig/include/asm-i386/page.h 2003-11-03 23:51:46.000000000 +0300
655 +++ linux-2.4.22-vanilla/include/asm-i386/page.h 2003-11-18 02:14:38.000000000 +0300
657 #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
658 #define __MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
659 #define MAXMEM ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE))
661 +#ifndef CONFIG_DEBUG_UAF
662 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
663 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
664 #define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
666 +#define __pa(x) ({ \
667 + unsigned long __pn, __fr; \
668 + __pn = (unsigned long)(x)-PAGE_OFFSET; \
669 + __fr = __pn >> PAGE_SHIFT; \
670 + if (jiffies > HZ*3 && __fr >= max_mapnr) { \
671 + printk("invalid arg __pa(0x%x)" \
672 + " at %s:%d\n", (unsigned) (x), \
673 + __FILE__, __LINE__); \
679 +#define __va(x) ({ \
680 + unsigned long __pn; \
681 + __pn = (unsigned long) (x) >> PAGE_SHIFT; \
682 + if (jiffies > HZ*3 && __pn >= max_mapnr) { \
683 + printk("invalid arg __va(0x%x)" \
684 + " at %s:%d\n", (unsigned) (x), \
685 + __FILE__, __LINE__); \
688 + ((void *)((unsigned long)(x) + PAGE_OFFSET)); \
691 +#define virt_to_page(ka) ({ \
693 + if ((unsigned long)(ka) >= VMALLOC_START) { \
694 + _p = vmalloc_to_page((void *)(ka)); \
697 + _p = mem_map+(__pa(ka) >> PAGE_SHIFT); \
703 #define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
705 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
708 arch/i386/config.in | 3
709 fs/proc/proc_misc.c | 19 +
710 include/asm-i386/io.h | 10
711 include/asm-i386/page.h | 40 +++
712 include/linux/slab.h | 1
714 mm/slab.c | 487 ++++++++++++++++++++++++++++++++++++++++++++++++
716 8 files changed, 565 insertions(+), 2 deletions(-)