%patch Index: linux-2.4.24/mm/slab.c =================================================================== --- linux-2.4.24.orig/mm/slab.c 2004-02-06 11:15:22.000000000 +0300 +++ linux-2.4.24/mm/slab.c 2004-02-07 00:42:38.000000000 +0300 @@ -97,6 +97,8 @@ #define FORCED_DEBUG 0 #endif +#include + /* * Parameters for kmem_cache_reap */ @@ -825,6 +827,12 @@ return cachep; } +#ifdef CONFIG_DEBUG_UAF +void * uaf_alloc(kmem_cache_t *, int gfp_mask); +int uaf_cache_free(kmem_cache_t *, void *addr); +int uaf_free(void *addr); +struct page *uaf_vaddr_to_page(void *obj); +#endif #if DEBUG /* @@ -1342,6 +1350,20 @@ unsigned long save_flags; void* objp; +#ifdef CONFIG_DEBUG_UAF + /* try to use uaf-allocator first */ + objp = uaf_alloc(cachep, flags); + if (objp) { + if (cachep->ctor) { + unsigned long ctor_flags; + ctor_flags = SLAB_CTOR_CONSTRUCTOR; + if (!(flags & __GFP_WAIT)) + ctor_flags |= SLAB_CTOR_ATOMIC; + cachep->ctor(objp, cachep, ctor_flags); + } + return objp; + } +#endif kmem_cache_alloc_head(cachep, flags); try_again: local_irq_save(save_flags); @@ -1436,13 +1458,17 @@ if (cachep->flags & SLAB_RED_ZONE) { objp -= BYTES_PER_WORD; - if (xchg((unsigned long *)objp, RED_MAGIC1) != RED_MAGIC2) + if (xchg((unsigned long *)objp, RED_MAGIC1) != RED_MAGIC2) { /* Either write before start, or a double free. */ + printk("inconsistency at start of %s\n", cachep->name); BUG(); + } if (xchg((unsigned long *)(objp+cachep->objsize - - BYTES_PER_WORD), RED_MAGIC1) != RED_MAGIC2) + BYTES_PER_WORD), RED_MAGIC1) != RED_MAGIC2) { /* Either write past end, or a double free. */ + printk("inconsistency at end of %s\n", cachep->name); BUG(); + } } if (cachep->flags & SLAB_POISON) kmem_poison_obj(cachep, objp); @@ -1578,6 +1604,10 @@ void kmem_cache_free (kmem_cache_t *cachep, void *objp) { unsigned long flags; +#ifdef CONFIG_DEBUG_UAF + if (uaf_cache_free(cachep, objp)) + return; +#endif #if DEBUG CHECK_PAGE(virt_to_page(objp)); if (cachep != GET_PAGE_CACHE(virt_to_page(objp))) @@ -1603,6 +1633,10 @@ if (!objp) return; +#ifdef CONFIG_DEBUG_UAF + if (uaf_free((void *) objp)) + return; +#endif local_irq_save(flags); CHECK_PAGE(virt_to_page(objp)); c = GET_PAGE_CACHE(virt_to_page(objp)); @@ -2078,3 +2112,471 @@ #endif } #endif + + + +#ifdef CONFIG_DEBUG_UAF + +#define MAX_UAF_OBJ_SIZE 8 /* in pages */ +#define UAF_ASSERT(xxx) if (!(xxx)) BUG(); +#define UAF_DEBUG__ +#ifdef UAF_DEBUG +#define uaf_printk(fmt,a...) printk(fmt, ##a) +#else +#define uaf_printk(a,...) +#endif + +struct uaf_stats { + atomic_t uaf_allocated; + atomic_t uaf_allocations; + atomic_t uaf_failed; +}; + +static int uaf_max = 32768; +static void *uaf_bitmap = NULL; +static spinlock_t uaf_lock; +static int uaf_last_found = 0; +static int uaf_used = 0; +static struct vm_struct *uaf_area = NULL; +static struct uaf_stats uaf_stats[MAX_UAF_OBJ_SIZE + 1]; + +static int __init uaf_setup(char *str) +{ + uaf_max = simple_strtoul(str, NULL, 0); + return 1; +} + +__setup("uaf=", uaf_setup); + + +void uaf_init(void) +{ + int size; + + printk("UAF: total vmalloc-space - %lu\n", + VMALLOC_END - VMALLOC_START); + + uaf_area = get_vm_area(PAGE_SIZE * uaf_max, VM_ALLOC); + if (!uaf_area) { + printk(KERN_ALERT "UAF: can't reserve %lu bytes in KVA\n", + PAGE_SIZE * uaf_max); + return; + } + + printk("UAF: reserved %lu bytes in KVA at 0x%p\n", + PAGE_SIZE * uaf_max, uaf_area->addr); + + /* how many bytes we need to track space usage? */ + size = uaf_max / 8 + 8; + + uaf_bitmap = vmalloc(size); + if (!uaf_bitmap) { + printk(KERN_ALERT + "UAF: can't allocate %d bytes for bitmap\n", size); + return; + } + memset(uaf_bitmap, 0, size); + spin_lock_init(&uaf_lock); + memset(uaf_stats, 0, sizeof(uaf_stats)); + + printk("UAF: allocated %d for bitmap\n", size); +} + +static int uaf_find(int len) +{ + int new_last_found = -1; + int loop = 0; + int i, j; + + j = uaf_last_found; + + do { + i = find_next_zero_bit(uaf_bitmap, uaf_max, j); + if (i >= uaf_max) { + /* repeat from 0 */ + if (++loop > 1) { + /* this is 2nd loop and it's useless */ + return -1; + } + + i = find_next_zero_bit(uaf_bitmap, uaf_max, 0); + if (i >= uaf_max) + return -1; + + /* save found num for subsequent searches */ + if (new_last_found == -1) + new_last_found = uaf_last_found = i; + UAF_ASSERT(new_last_found < uaf_max); + } + + /* + * OK. found first zero bit. + * now, try to find requested cont. zero-space + */ + + /* FIXME: implmement multipage allocation! */ + break; + + /* + j = find_next_bit(uaf_bitmap, uaf_max, i); + if (++loop2 > 10000) { + printk("ALERT: loop2=%d\n", loop2); + return -1; + } + */ + } while (j - i < len); + + /* found! */ + if (new_last_found == -1) + uaf_last_found = i + 1; + if (uaf_last_found >= uaf_max) + uaf_last_found = 0; + return i; +} + +extern int __vmalloc_area_pages (unsigned long address, unsigned long size, + int gfp_mask, pgprot_t prot, + struct page ***pages); +void * uaf_alloc(kmem_cache_t *cachep, int gfp_mask) +{ + struct page *ptrs[MAX_UAF_OBJ_SIZE]; + int size = cachep->objsize; + struct page **pages; + unsigned long flags; + unsigned long addr; + int i, j, err = -2000; + + if (uaf_bitmap == NULL) + return NULL; + + if (!(cachep->flags & SLAB_USE_UAF)) + return NULL; + + pages = (struct page **) ptrs; + size = (size + (PAGE_SIZE - 1)) / PAGE_SIZE; + /* FIXME: implement multipage allocation! */ + if (size > 1) + return NULL; + if (size > MAX_UAF_OBJ_SIZE) { + printk(KERN_ALERT "size is too big: %d\n", size); + return NULL; + } + + if (uaf_used == uaf_max) { + uaf_printk("UAF: space exhausted!\n"); + atomic_inc(&uaf_stats[size].uaf_failed); + return NULL; + } + + + spin_lock_irqsave(&uaf_lock, flags); + i = uaf_find(size); + if (i < 0) { + spin_unlock_irqrestore(&uaf_lock, flags); + atomic_inc(&uaf_stats[size].uaf_failed); + return NULL; + } + for (j = 0; j < size; j++) { + UAF_ASSERT(!test_bit(i + j, uaf_bitmap)); + set_bit(i + j, uaf_bitmap); + uaf_used++; + } + spin_unlock_irqrestore(&uaf_lock, flags); + + addr = ((unsigned long) uaf_area->addr) + (PAGE_SIZE * i); + uaf_printk("UAF: found %d/%d, base 0x%p, map at 0x%lx: ", i, + size, uaf_area->addr, addr); + + /* OK. we've found free space, let's allocate pages */ + memset(pages, 0, sizeof(struct page *) * MAX_UAF_OBJ_SIZE); + for (j = 0; j < size; j++) { + pages[j] = alloc_page(gfp_mask); + if (pages[j] == NULL) + goto nomem; + uaf_printk("0x%p ", pages[j]); + } + + /* time to map just allocated pages */ + err = __vmalloc_area_pages(addr, PAGE_SIZE * size, gfp_mask, + PAGE_KERNEL, &pages); + pages = (struct page **) ptrs; + if (err == 0) { + /* put slab cache pointer in first page */ + ptrs[0]->list.next = (void *) cachep; + uaf_printk(" -> 0x%lx\n", addr); + atomic_inc(&uaf_stats[size].uaf_allocated); + atomic_inc(&uaf_stats[size].uaf_allocations); + if (!in_interrupt() && !in_softirq()) + flush_tlb_all(); + else + local_flush_tlb(); + size = cachep->objsize; + if (size < PAGE_SIZE) + memset((char *) addr + size, 0xa7, PAGE_SIZE - size); + return (void *) addr; + } + +nomem: + printk(KERN_ALERT "can't map pages: %d\n", err); + for (j = 0; j < size; j++) + if (pages[j]) + __free_page(pages[j]); + + /* can't find free pages */ + spin_lock_irqsave(&uaf_lock, flags); + for (j = 0; j < size; j++) { + clear_bit(i + j, uaf_bitmap); + uaf_used--; + } + spin_unlock_irqrestore(&uaf_lock, flags); + atomic_inc(&uaf_stats[size].uaf_failed); + + return NULL; +} + +extern void free_area_pmd(pgd_t *dir, unsigned long address, + unsigned long size); +static void uaf_unmap(unsigned long address, unsigned long size) +{ + unsigned long end = (address + size); + pgd_t *dir; + + dir = pgd_offset_k(address); + flush_cache_all(); + do { + free_area_pmd(dir, address, end - address); + address = (address + PGDIR_SIZE) & PGDIR_MASK; + dir++; + } while (address && (address < end)); + + /* + * we must not call smp_call_function() with interrtups disabled + * otherwise we can get into deadlock + */ + if (!in_interrupt() && !in_softirq()) + flush_tlb_all(); + else + local_flush_tlb(); +} + +/* + * returns 1 if free was successfull + */ +int uaf_cache_free(kmem_cache_t *cachep, void *addr) +{ + struct page *pages[MAX_UAF_OBJ_SIZE]; + int size = cachep->objsize; + unsigned long flags; + int i, j; + + uaf_printk("UAF: to free 0x%p/%d\n", addr, size); + + size = (size + (PAGE_SIZE - 1)) / PAGE_SIZE; + if (size > MAX_UAF_OBJ_SIZE) + return 0; + + if (uaf_bitmap == NULL) + return 0; + + /* first, check is address is in UAF space */ + if ((unsigned) addr < (unsigned) uaf_area->addr || + (unsigned) addr >= (unsigned) uaf_area->addr + uaf_area->size) + return 0; + + if (cachep->objsize < PAGE_SIZE) { + unsigned char *a = (void *) addr; + for (i = 0; i < PAGE_SIZE - cachep->objsize; i++) + if (a[cachep->objsize + i] != 0xa7) { + printk("corruption(0x%x) at %u in %s/0x%p\n", + (unsigned) a[cachep->objsize + i], + cachep->objsize + i, cachep->name, addr); + BUG(); + } + } + UAF_ASSERT(((unsigned long) addr & ~PAGE_MASK) == 0UL); + + /* calculate placement in bitmap */ + i = (unsigned) addr - (unsigned) uaf_area->addr; + UAF_ASSERT(i >= 0); + i = i / PAGE_SIZE; + + /* collect all the pages */ + uaf_printk("free/unmap %d pages: ", size); + /* NOTE: we need not page_table_lock here. bits in bitmap + * protect those pte's from to be reused */ + for (j = 0; j < size; j++) { + unsigned long address; + address = ((unsigned long) addr) + (PAGE_SIZE * j); + pages[j] = vmalloc_to_page((void *) address); + uaf_printk("0x%lx->0x%p ", address, pages[j]); + } + uaf_printk("\n"); + + uaf_unmap((unsigned long) addr, PAGE_SIZE * size); + /* free all the pages */ + for (j = 0; j < size; j++) + __free_page(pages[j]); + + spin_lock_irqsave(&uaf_lock, flags); + for (j = 0; j < size; j++) { + /* now check is correspondend bit set */ + UAF_ASSERT(i+j >= 0 && i+j < uaf_max); + UAF_ASSERT(test_bit(i+j, uaf_bitmap)); + + /* now free space in UAF */ + clear_bit(i+j, uaf_bitmap); + uaf_used--; + } + spin_unlock_irqrestore(&uaf_lock, flags); + + atomic_dec(&uaf_stats[size].uaf_allocated); + + uaf_printk("UAF: freed %d/%d at 0x%p\n", i, size, addr); + //printk("UAF: freed %d/%d at 0x%p\n", i, size, addr); + + return 1; +} + +struct page *uaf_vaddr_to_page(void *obj) +{ + if (uaf_bitmap == NULL) + return NULL; + + /* first, check is address is in UAF space */ + if ((unsigned) obj < (unsigned) uaf_area->addr || + (unsigned) obj >= (unsigned) uaf_area->addr + uaf_area->size) + return NULL; + + return vmalloc_to_page(obj); +} + +int uaf_free(void *obj) +{ + struct page *page = uaf_vaddr_to_page((void *) obj); + kmem_cache_t *c; + + if (!page) + return 0; + + c = GET_PAGE_CACHE(page); + return uaf_cache_free(c, (void *) obj); +} + +int uaf_is_allocated(void *obj) +{ + unsigned long addr = (unsigned long) obj; + int i; + + if (uaf_bitmap == NULL) + return 0; + + addr &= PAGE_MASK; + /* first, check is address is in UAF space */ + if (addr < (unsigned long) uaf_area->addr || + addr >= (unsigned long) uaf_area->addr + uaf_area->size) + return 0; + + /* calculate placement in bitmap */ + i = (unsigned) addr - (unsigned) uaf_area->addr; + i = i / PAGE_SIZE; + return test_bit(i, uaf_bitmap); +} + +static void *uaf_s_start(struct seq_file *m, loff_t *pos) +{ + loff_t n = *pos; + + if (!n) + seq_printf(m, "size(pgs) allocated failed allocations. " + "%d reserved, %d in use, %d last\n", + uaf_max, uaf_used, uaf_last_found); + else if (n > MAX_UAF_OBJ_SIZE) + return NULL; + + *pos = 1; + return (void *) 1; +} + +static void *uaf_s_next(struct seq_file *m, void *p, loff_t *pos) +{ + unsigned long n = *pos; + ++*pos; + if (n + 1 > MAX_UAF_OBJ_SIZE) + return NULL; + return (void *) (n + 1); +} + +static void uaf_s_stop(struct seq_file *m, void *p) +{ +} + +static int uaf_s_show(struct seq_file *m, void *p) +{ + int n = (int) p; + + if (n > MAX_UAF_OBJ_SIZE) + return 0; + seq_printf(m, "%d %d %d %d\n", n, + atomic_read(&uaf_stats[n].uaf_allocated), + atomic_read(&uaf_stats[n].uaf_failed), + atomic_read(&uaf_stats[n].uaf_allocations)); + return 0; +} + +struct seq_operations uafinfo_op = { + .start = uaf_s_start, + .next = uaf_s_next, + .stop = uaf_s_stop, + .show = uaf_s_show, +}; + +ssize_t uafinfo_write(struct file *file, const char *buffer, + size_t count, loff_t *ppos) +{ + char kbuf[MAX_SLABINFO_WRITE+1], *tmp; + char *key, *name; + int res; + struct list_head *p; + + if (count > MAX_SLABINFO_WRITE) + return -EINVAL; + if (copy_from_user(&kbuf, buffer, count)) + return -EFAULT; + kbuf[MAX_SLABINFO_WRITE] = '\0'; + + tmp = kbuf; + key = strsep(&tmp, " \t\n"); + if (!key) + return -EINVAL; + if (!strcmp(key, "on")) + res = 1; + else if (!strcmp(key, "off")) + res = 0; + else + return -EINVAL; + + name = strsep(&tmp, " \t\n"); + if (!name) + return -EINVAL; + + /* Find the cache in the chain of caches. */ + down(&cache_chain_sem); + list_for_each(p,&cache_chain) { + kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next); + + if (!strcmp(cachep->name, name)) { + if (res) { + printk("UAF: use on %s\n", cachep->name); + cachep->flags |= SLAB_USE_UAF; + } else { + printk("UAF: dont use on %s\n", cachep->name); + cachep->flags &= ~SLAB_USE_UAF; + } + break; + } + } + up(&cache_chain_sem); + return count; +} +#endif + Index: linux-2.4.24/mm/vmalloc.c =================================================================== --- linux-2.4.24.orig/mm/vmalloc.c 2004-01-10 17:05:20.000000000 +0300 +++ linux-2.4.24/mm/vmalloc.c 2004-02-06 11:17:09.000000000 +0300 @@ -53,7 +53,7 @@ } while (address < end); } -static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size) +void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size) { pmd_t * pmd; unsigned long end; @@ -152,7 +152,7 @@ return 0; } -static inline int __vmalloc_area_pages (unsigned long address, +int __vmalloc_area_pages (unsigned long address, unsigned long size, int gfp_mask, pgprot_t prot, Index: linux-2.4.24/init/main.c =================================================================== --- linux-2.4.24.orig/init/main.c 2004-01-10 17:05:59.000000000 +0300 +++ linux-2.4.24/init/main.c 2004-02-06 11:17:43.000000000 +0300 @@ -437,6 +437,9 @@ #if defined(CONFIG_SYSVIPC) ipc_init(); #endif +#ifdef CONFIG_DEBUG_UAF + uaf_init(); +#endif rest_init(); } Index: linux-2.4.24/fs/proc/proc_misc.c =================================================================== --- linux-2.4.24.orig/fs/proc/proc_misc.c 2004-01-10 17:05:55.000000000 +0300 +++ linux-2.4.24/fs/proc/proc_misc.c 2004-02-06 11:35:27.000000000 +0300 @@ -303,6 +303,22 @@ release: seq_release, }; +#ifdef CONFIG_DEBUG_UAF +extern struct seq_operations uafinfo_op; +extern ssize_t uafinfo_write(struct file *, const char *, size_t, loff_t *); +static int uafinfo_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &uafinfo_op); +} +static struct file_operations proc_uafinfo_operations = { + .open = uafinfo_open, + .read = seq_read, + .write = uafinfo_write, + .llseek = seq_lseek, + .release = seq_release, +}; +#endif + static int kstat_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { @@ -640,6 +656,9 @@ create_seq_entry("iomem", 0, &proc_iomem_operations); create_seq_entry("partitions", 0, &proc_partitions_operations); create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations); +#ifdef CONFIG_DEBUG_UAF + create_seq_entry("uafinfo",S_IWUSR|S_IRUGO,&proc_uafinfo_operations); +#endif #ifdef CONFIG_MODULES create_seq_entry("ksyms", 0, &proc_ksyms_operations); #endif Index: linux-2.4.24/include/linux/slab.h =================================================================== --- linux-2.4.24.orig/include/linux/slab.h 2004-01-29 15:01:10.000000000 +0300 +++ linux-2.4.24/include/linux/slab.h 2004-02-06 11:18:26.000000000 +0300 @@ -40,6 +40,7 @@ #define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */ #define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */ #define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */ +#define SLAB_USE_UAF 0x00040000UL /* use UAF allocator */ /* flags passed to a constructor func */ #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */ Index: linux-2.4.24/include/asm-i386/io.h =================================================================== --- linux-2.4.24.orig/include/asm-i386/io.h 2004-01-29 15:01:10.000000000 +0300 +++ linux-2.4.24/include/asm-i386/io.h 2004-02-06 11:18:26.000000000 +0300 @@ -75,6 +75,16 @@ static inline unsigned long virt_to_phys(volatile void * address) { +#ifdef CONFIG_DEBUG_UAF + unsigned long addr = (unsigned long) address; + if (vmlist && addr >= VMALLOC_START && addr < VMALLOC_END) { + struct page *page = vmalloc_to_page((void *) address); + if (page) { + unsigned long offset = addr & ~PAGE_MASK; + address = page_address(page) + offset; + } + } +#endif return __pa(address); } Index: linux-2.4.24/include/asm-i386/page.h =================================================================== --- linux-2.4.24.orig/include/asm-i386/page.h 2004-01-14 02:58:46.000000000 +0300 +++ linux-2.4.24/include/asm-i386/page.h 2004-02-06 11:17:09.000000000 +0300 @@ -131,9 +131,49 @@ #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) #define __MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) #define MAXMEM ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE)) + +#ifndef CONFIG_DEBUG_UAF #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT)) +#else +#define __pa(x) ({ \ + unsigned long __pn, __fr; \ + __pn = (unsigned long)(x)-PAGE_OFFSET; \ + __fr = __pn >> PAGE_SHIFT; \ + if (jiffies > HZ*3 && __fr >= max_mapnr) { \ + printk("invalid arg __pa(0x%x)" \ + " at %s:%d\n", (unsigned) (x), \ + __FILE__, __LINE__); \ + dump_stack(); \ + } \ + __pn; \ + }) + +#define __va(x) ({ \ + unsigned long __pn; \ + __pn = (unsigned long) (x) >> PAGE_SHIFT; \ + if (jiffies > HZ*3 && __pn >= max_mapnr) { \ + printk("invalid arg __va(0x%x)" \ + " at %s:%d\n", (unsigned) (x), \ + __FILE__, __LINE__); \ + dump_stack(); \ + } \ + ((void *)((unsigned long)(x) + PAGE_OFFSET)); \ + }) + +#define virt_to_page(ka) ({ \ + struct page *_p; \ + if ((unsigned long)(ka) >= VMALLOC_START) { \ + _p = vmalloc_to_page((void *)(ka)); \ + BUG_ON(!_p); \ + } else \ + _p = mem_map+(__pa(ka) >> PAGE_SHIFT); \ + (_p); \ + }) +#endif + + #define VALID_PAGE(page) ((page - mem_map) < max_mapnr) #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ Index: linux-2.4.24/arch/i386/config.in =================================================================== --- linux-2.4.24.orig/arch/i386/config.in 2004-01-14 02:58:46.000000000 +0300 +++ linux-2.4.24/arch/i386/config.in 2004-02-06 11:17:09.000000000 +0300 @@ -508,6 +508,9 @@ bool ' Check for stack overflows' CONFIG_DEBUG_STACKOVERFLOW bool ' Debug high memory support' CONFIG_DEBUG_HIGHMEM bool ' Debug memory allocations' CONFIG_DEBUG_SLAB + if [ "$CONFIG_DEBUG_SLAB" != "n" ]; then + bool ' Debug memory allocations (use-after-free via vmalloced space)' CONFIG_DEBUG_UAF + fi bool ' Memory mapped I/O debugging' CONFIG_DEBUG_IOVIRT bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK %diffstat arch/i386/config.in | 3 fs/proc/proc_misc.c | 19 + include/asm-i386/io.h | 10 include/asm-i386/page.h | 40 +++ include/linux/slab.h | 1 init/main.c | 3 mm/slab.c | 506 +++++++++++++++++++++++++++++++++++++++++++++++- mm/vmalloc.c | 4 8 files changed, 582 insertions(+), 4 deletions(-)