Whamcloud - gitweb
land v0.9.1 on HEAD, in preparation for a 1.0.x branch
[fs/lustre-release.git] / lustre / kernel_patches / patches / slab-use-after-free-debug-2.4.22.patch
1 %patch
2 Index: linux-2.4.22-vanilla/mm/slab.c
3 ===================================================================
4 --- linux-2.4.22-vanilla.orig/mm/slab.c 2003-11-17 15:42:13.000000000 +0300
5 +++ linux-2.4.22-vanilla/mm/slab.c      2003-11-18 01:15:35.000000000 +0300
6 @@ -97,6 +97,8 @@
7  #define        FORCED_DEBUG    0
8  #endif
9  
10 +#include       <linux/vmalloc.h>
11 +
12  /*
13   * Parameters for kmem_cache_reap
14   */
15 @@ -825,6 +827,12 @@
16         return cachep;
17  }
18  
19 +#ifdef CONFIG_DEBUG_UAF
20 +void * uaf_alloc(kmem_cache_t *, int gfp_mask);
21 +int uaf_cache_free(kmem_cache_t *, void *addr);
22 +int uaf_free(void *addr);
23 +struct page *uaf_vaddr_to_page(void *obj);
24 +#endif
25  
26  #if DEBUG
27  /*
28 @@ -1340,6 +1348,20 @@
29         unsigned long save_flags;
30         void* objp;
31  
32 +#ifdef CONFIG_DEBUG_UAF
33 +       /* try to use uaf-allocator first */
34 +       objp = uaf_alloc(cachep, flags);
35 +       if (objp) {
36 +               if (cachep->ctor) {
37 +                       unsigned long ctor_flags;
38 +                       ctor_flags = SLAB_CTOR_CONSTRUCTOR;
39 +                       if (!(flags & __GFP_WAIT))
40 +                               ctor_flags |= SLAB_CTOR_ATOMIC;
41 +                       cachep->ctor(objp, cachep, ctor_flags);
42 +               }
43 +               return objp;
44 +       }
45 +#endif
46         kmem_cache_alloc_head(cachep, flags);
47  try_again:
48         local_irq_save(save_flags);
49 @@ -1576,6 +1598,10 @@
50  void kmem_cache_free (kmem_cache_t *cachep, void *objp)
51  {
52         unsigned long flags;
53 +#ifdef CONFIG_DEBUG_UAF
54 +       if (uaf_cache_free(cachep, objp))
55 +               return;
56 +#endif
57  #if DEBUG
58         CHECK_PAGE(virt_to_page(objp));
59         if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
60 @@ -1601,6 +1627,10 @@
61  
62         if (!objp)
63                 return;
64 +#ifdef CONFIG_DEBUG_UAF
65 +       if (uaf_free((void *) objp))
66 +               return;
67 +#endif
68         local_irq_save(flags);
69         CHECK_PAGE(virt_to_page(objp));
70         c = GET_PAGE_CACHE(virt_to_page(objp));
71 @@ -2075,3 +2105,460 @@
72  #endif
73  }
74  #endif
75 +
76 +
77 +
78 +#ifdef CONFIG_DEBUG_UAF
79 +
80 +#define MAX_UAF_OBJ_SIZE       8       /* in pages */
81 +#define UAF_ASSERT(xxx)                if (!(xxx)) BUG();
82 +#define UAF_DEBUG__
83 +#ifdef UAF_DEBUG
84 +#define uaf_printk(fmt,a...)   printk(fmt, ##a)
85 +#else
86 +#define uaf_printk(a,...)      
87 +#endif
88 +
89 +struct uaf_stats {
90 +       atomic_t uaf_allocated;
91 +       atomic_t uaf_allocations;
92 +       atomic_t uaf_failed;
93 +};
94 +
95 +static int uaf_max = 32768;
96 +static void *uaf_bitmap = NULL;
97 +static spinlock_t uaf_lock;
98 +static int uaf_last_found = 0;
99 +static int uaf_used = 0;
100 +static struct vm_struct *uaf_area = NULL;
101 +static struct uaf_stats uaf_stats[MAX_UAF_OBJ_SIZE + 1];
102 +
103 +static int __init uaf_setup(char *str)
104 +{
105 +        uaf_max = simple_strtoul(str, NULL, 0);
106 +        return 1;
107 +}
108 +
109 +__setup("uaf=", uaf_setup);
110 +
111 +
112 +void uaf_init(void)
113 +{
114 +       int size;
115 +
116 +       printk("UAF: total vmalloc-space - %lu\n",
117 +                       VMALLOC_END - VMALLOC_START);
118 +
119 +       uaf_area = get_vm_area(PAGE_SIZE * uaf_max, VM_ALLOC);
120 +       if (!uaf_area) {
121 +               printk(KERN_ALERT "UAF: can't reserve %lu bytes in KVA\n",
122 +                               PAGE_SIZE * uaf_max);
123 +               return;
124 +       }
125 +       
126 +       printk("UAF: reserved %lu bytes in KVA at 0x%p\n",
127 +                       PAGE_SIZE * uaf_max, uaf_area->addr);
128 +
129 +       /* how many bytes we need to track space usage? */
130 +       size = uaf_max / 8 + 8;
131 +
132 +       uaf_bitmap = vmalloc(size);
133 +       if (!uaf_bitmap) {
134 +               printk(KERN_ALERT
135 +                       "UAF: can't allocate %d bytes for bitmap\n", size);
136 +               return;
137 +       }
138 +       memset(uaf_bitmap, 0, size);
139 +       spin_lock_init(&uaf_lock);
140 +       memset(uaf_stats, 0, sizeof(uaf_stats));
141 +
142 +       printk("UAF: allocated %d for bitmap\n", size);
143 +}
144 +
145 +static int uaf_find(int len)
146 +{
147 +       int new_last_found = -1;
148 +       int loop = 0;
149 +       int i, j;
150 +
151 +       j = uaf_last_found;
152 +
153 +       do {
154 +               i = find_next_zero_bit(uaf_bitmap, uaf_max, j);
155 +               if (i >= uaf_max) {
156 +                       /* repeat from 0 */
157 +                       if (++loop > 1) {
158 +                               /* this is 2nd loop and it's useless */
159 +                               return -1;
160 +                       }
161 +
162 +                       i = find_next_zero_bit(uaf_bitmap, uaf_max, 0);
163 +                       if (i >= uaf_max)
164 +                               return -1;
165 +
166 +                       /* save found num for subsequent searches */
167 +                       if (new_last_found == -1)
168 +                               new_last_found = uaf_last_found = i;
169 +                       UAF_ASSERT(new_last_found < uaf_max);
170 +               }
171 +
172 +               /*
173 +                * OK. found first zero bit.
174 +                * now, try to find requested cont. zero-space
175 +                */
176 +
177 +               /* FIXME: implmement multipage allocation! */
178 +               break;
179 +
180 +               /*
181 +               j = find_next_bit(uaf_bitmap, uaf_max, i);
182 +               if (++loop2 > 10000) {
183 +                       printk("ALERT: loop2=%d\n", loop2);
184 +                       return -1;
185 +               }
186 +               */
187 +       } while (j - i < len);
188 +
189 +       /* found! */
190 +       if (new_last_found == -1)
191 +               uaf_last_found = i + 1;
192 +       if (uaf_last_found >= uaf_max)
193 +               uaf_last_found = 0;
194 +       return i;
195 +}
196 +
197 +extern int __vmalloc_area_pages (unsigned long address, unsigned long size,
198 +                                       int gfp_mask, pgprot_t prot,
199 +                                       struct page ***pages);
200 +void * uaf_alloc(kmem_cache_t *cachep, int gfp_mask)
201 +{
202 +       struct page *ptrs[MAX_UAF_OBJ_SIZE];
203 +       int size = cachep->objsize;
204 +       struct page **pages;
205 +       unsigned long flags;
206 +       unsigned long addr;
207 +       int i, j, err = -2000;
208 +
209 +       if (uaf_bitmap == NULL)
210 +               return NULL;
211 +
212 +       if (!(cachep->flags & SLAB_USE_UAF))
213 +               return NULL;
214 +
215 +       pages = (struct page **) ptrs;
216 +       size = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
217 +       /* FIXME: implement multipage allocation! */
218 +       if (size > 1)
219 +               return NULL;
220 +       if (size > MAX_UAF_OBJ_SIZE) {
221 +               printk(KERN_ALERT "size is too big: %d\n", size);
222 +               return NULL;
223 +       }
224 +
225 +       if (uaf_used == uaf_max) {
226 +               uaf_printk("UAF: space exhausted!\n");
227 +               atomic_inc(&uaf_stats[size].uaf_failed);
228 +               return NULL;
229 +       }
230 +
231 +
232 +       spin_lock_irqsave(&uaf_lock, flags);
233 +       i = uaf_find(size);
234 +       if (i < 0) {
235 +               spin_unlock_irqrestore(&uaf_lock, flags);
236 +               atomic_inc(&uaf_stats[size].uaf_failed);
237 +               return NULL;
238 +       }
239 +       for (j = 0; j < size; j++) {
240 +               UAF_ASSERT(!test_bit(i + j, uaf_bitmap));
241 +               set_bit(i + j, uaf_bitmap);
242 +               uaf_used++;
243 +       }
244 +       spin_unlock_irqrestore(&uaf_lock, flags);
245 +
246 +       addr = ((unsigned long) uaf_area->addr) + (PAGE_SIZE * i);
247 +       uaf_printk("UAF: found %d/%d, base 0x%p, map at 0x%lx: ", i,
248 +                       size, uaf_area->addr, addr);
249 +
250 +       /* OK. we've found free space, let's allocate pages */
251 +       memset(pages, 0, sizeof(struct page *) * MAX_UAF_OBJ_SIZE);
252 +       for (j = 0; j < size; j++) {
253 +               pages[j] = alloc_page(gfp_mask);
254 +               if (pages[j] == NULL)
255 +                       goto nomem;
256 +               uaf_printk("0x%p ", pages[j]);
257 +       }
258 +
259 +       /* time to map just allocated pages */
260 +       err = __vmalloc_area_pages(addr, PAGE_SIZE * size, gfp_mask,
261 +                                       PAGE_KERNEL, &pages);
262 +       pages = (struct page **) ptrs;
263 +       if (err == 0) {
264 +               /* put slab cache pointer in first page */
265 +               ptrs[0]->list.next = (void *) cachep;
266 +               uaf_printk(" -> 0x%lx\n", addr);
267 +               atomic_inc(&uaf_stats[size].uaf_allocated);
268 +               atomic_inc(&uaf_stats[size].uaf_allocations);
269 +               if (!in_interrupt() && !in_softirq())
270 +                       flush_tlb_all();
271 +               else
272 +                       local_flush_tlb();
273 +               //printk("UAF: found %d/%d, base 0x%p, map at 0x%lx\n",
274 +               //              i, cachep->objsize, uaf_area->addr, addr);
275 +               return (void *) addr;
276 +       }
277 +
278 +nomem:
279 +       printk(KERN_ALERT "can't map pages: %d\n", err);
280 +       for (j = 0; j < size; j++)
281 +               if (pages[j])
282 +                       __free_page(pages[j]);
283 +
284 +       /* can't find free pages */
285 +       spin_lock_irqsave(&uaf_lock, flags);
286 +       for (j = 0; j < size; j++) {
287 +               clear_bit(i + j, uaf_bitmap);
288 +               uaf_used--;
289 +       }
290 +       spin_unlock_irqrestore(&uaf_lock, flags);
291 +       atomic_inc(&uaf_stats[size].uaf_failed);
292 +
293 +       return NULL;
294 +}
295 +
296 +extern void free_area_pmd(pgd_t *dir, unsigned long address,
297 +                                 unsigned long size);
298 +static void uaf_unmap(unsigned long address, unsigned long size)
299 +{
300 +       unsigned long end = (address + size);
301 +       pgd_t *dir;
302 +
303 +       dir = pgd_offset_k(address);
304 +       flush_cache_all();
305 +       do {
306 +               free_area_pmd(dir, address, end - address);
307 +               address = (address + PGDIR_SIZE) & PGDIR_MASK;
308 +               dir++;
309 +       } while (address && (address < end));
310 +
311 +       /*
312 +        * we must not call smp_call_function() with interrtups disabled
313 +        * otherwise we can get into deadlock
314 +        */
315 +       if (!in_interrupt() && !in_softirq())
316 +               flush_tlb_all();
317 +       else
318 +               local_flush_tlb();
319 +}
320 +
321 +/*
322 + * returns 1 if free was successfull
323 + */
324 +int uaf_cache_free(kmem_cache_t *cachep, void *addr)
325 +{
326 +       struct page *pages[MAX_UAF_OBJ_SIZE];
327 +       int size = cachep->objsize;
328 +       unsigned long flags;
329 +       int i, j;
330 +
331 +       uaf_printk("UAF: to free 0x%p/%d\n", addr, size);
332 +
333 +       size = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
334 +       if (size > MAX_UAF_OBJ_SIZE)
335 +               return 0;
336 +
337 +       if (uaf_bitmap == NULL)
338 +               return 0;
339 +
340 +       /* first, check is address is in UAF space */
341 +       if ((unsigned) addr < (unsigned) uaf_area->addr ||
342 +               (unsigned) addr >= (unsigned) uaf_area->addr + uaf_area->size)
343 +               return 0;
344 +
345 +       UAF_ASSERT(((unsigned long) addr & ~PAGE_MASK) == 0UL);
346 +       
347 +       /* calculate placement in bitmap */
348 +       i = (unsigned) addr - (unsigned) uaf_area->addr;
349 +       UAF_ASSERT(i >= 0);
350 +       i = i / PAGE_SIZE;
351 +
352 +       /* collect all the pages */
353 +       uaf_printk("free/unmap %d pages: ", size);
354 +       /* NOTE: we need not page_table_lock here. bits in bitmap
355 +        * protect those pte's from to be reused */
356 +       for (j = 0; j < size; j++) {
357 +               unsigned long address;
358 +               address = ((unsigned long) addr) + (PAGE_SIZE * j);
359 +               pages[j] = vmalloc_to_page((void *) address);
360 +               uaf_printk("0x%lx->0x%p ", address, pages[j]);
361 +       }
362 +       uaf_printk("\n");
363 +
364 +       uaf_unmap((unsigned long) addr, PAGE_SIZE * size);
365 +       /* free all the pages */
366 +       for (j = 0; j < size; j++)
367 +               __free_page(pages[j]);
368 +
369 +       spin_lock_irqsave(&uaf_lock, flags);
370 +       for (j = 0; j < size; j++) {
371 +               /* now check is correspondend bit set */
372 +               UAF_ASSERT(i+j >= 0 && i+j < uaf_max);
373 +               UAF_ASSERT(test_bit(i+j, uaf_bitmap));
374 +               
375 +               /* now free space in UAF */
376 +               clear_bit(i+j, uaf_bitmap);
377 +               uaf_used--;
378 +       }
379 +       spin_unlock_irqrestore(&uaf_lock, flags);
380 +
381 +       atomic_dec(&uaf_stats[size].uaf_allocated);
382 +       
383 +       uaf_printk("UAF: freed %d/%d at 0x%p\n", i, size, addr);
384 +       //printk("UAF: freed %d/%d at 0x%p\n", i, size, addr);
385 +
386 +       return 1;
387 +}
388 +
389 +struct page *uaf_vaddr_to_page(void *obj)
390 +{
391 +       if (uaf_bitmap == NULL)
392 +               return NULL;
393 +
394 +       /* first, check is address is in UAF space */
395 +       if ((unsigned) obj < (unsigned) uaf_area->addr ||
396 +               (unsigned) obj >= (unsigned) uaf_area->addr + uaf_area->size)
397 +               return NULL;
398 +       
399 +       return vmalloc_to_page(obj);
400 +}
401 +
402 +int uaf_free(void *obj)
403 +{
404 +       struct page *page = uaf_vaddr_to_page((void *) obj);
405 +       kmem_cache_t *c;
406 +
407 +       if (!page)
408 +               return 0;
409 +
410 +       c = GET_PAGE_CACHE(page);
411 +       return uaf_cache_free(c, (void *) obj);
412 +}
413 +
414 +int uaf_is_allocated(void *obj)
415 +{
416 +       unsigned long addr = (unsigned long) obj;
417 +       int i;
418 +
419 +       if (uaf_bitmap == NULL)
420 +               return 0;
421 +
422 +       addr &= PAGE_MASK;
423 +       /* first, check is address is in UAF space */
424 +       if (addr < (unsigned long) uaf_area->addr ||
425 +                       addr >= (unsigned long) uaf_area->addr + uaf_area->size)
426 +               return 0;
427 +
428 +       /* calculate placement in bitmap */
429 +       i = (unsigned) addr - (unsigned) uaf_area->addr;
430 +       i = i / PAGE_SIZE;
431 +       return test_bit(i, uaf_bitmap);
432 +}
433 +
434 +static void *uaf_s_start(struct seq_file *m, loff_t *pos)
435 +{
436 +       loff_t n = *pos;
437 +
438 +       if (!n)
439 +               seq_printf(m, "size(pgs) allocated failed allocations. "
440 +                               "%d reserved, %d in use, %d last\n",
441 +                               uaf_max, uaf_used, uaf_last_found);
442 +       else if (n > MAX_UAF_OBJ_SIZE)
443 +               return NULL;
444 +
445 +       *pos = 1;
446 +       return (void *) 1;
447 +}
448 +
449 +static void *uaf_s_next(struct seq_file *m, void *p, loff_t *pos)
450 +{
451 +       unsigned long n = *pos;
452 +       ++*pos;
453 +       if (n + 1 > MAX_UAF_OBJ_SIZE)
454 +               return NULL;
455 +       return (void *) (n + 1);
456 +}
457 +
458 +static void uaf_s_stop(struct seq_file *m, void *p)
459 +{
460 +}
461 +
462 +static int uaf_s_show(struct seq_file *m, void *p)
463 +{
464 +       int n = (int) p;
465 +
466 +       if (n > MAX_UAF_OBJ_SIZE)
467 +               return 0;
468 +       seq_printf(m, "%d  %d  %d %d\n", n, 
469 +                       atomic_read(&uaf_stats[n].uaf_allocated),
470 +                       atomic_read(&uaf_stats[n].uaf_failed),
471 +                       atomic_read(&uaf_stats[n].uaf_allocations));
472 +       return 0;
473 +}
474 +
475 +struct seq_operations uafinfo_op = {
476 +       .start  = uaf_s_start,
477 +       .next   = uaf_s_next,
478 +       .stop   = uaf_s_stop,
479 +       .show   = uaf_s_show,
480 +};
481 +
482 +ssize_t uafinfo_write(struct file *file, const char *buffer,
483 +                               size_t count, loff_t *ppos)
484 +{
485 +       char kbuf[MAX_SLABINFO_WRITE+1], *tmp;
486 +       char *key, *name;
487 +       int res;
488 +       struct list_head *p;
489 +       
490 +       if (count > MAX_SLABINFO_WRITE)
491 +               return -EINVAL;
492 +       if (copy_from_user(&kbuf, buffer, count))
493 +               return -EFAULT;
494 +       kbuf[MAX_SLABINFO_WRITE] = '\0'; 
495 +
496 +       tmp = kbuf;
497 +       key = strsep(&tmp, " \t\n");
498 +       if (!key)
499 +               return -EINVAL;
500 +       if (!strcmp(key, "on"))
501 +               res = 1;
502 +       else if (!strcmp(key, "off"))
503 +               res = 0;
504 +       else
505 +               return -EINVAL;
506 +
507 +       name = strsep(&tmp, " \t\n");
508 +       if (!name)
509 +               return -EINVAL;
510 +
511 +       /* Find the cache in the chain of caches. */
512 +       down(&cache_chain_sem);
513 +       list_for_each(p,&cache_chain) {
514 +               kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
515 +
516 +               if (!strcmp(cachep->name, name)) {
517 +                       if (res) {
518 +                               printk("UAF: use on %s\n", cachep->name);
519 +                               cachep->flags |= SLAB_USE_UAF;
520 +                       } else {
521 +                               printk("UAF: dont use on %s\n", cachep->name);
522 +                               cachep->flags &= ~SLAB_USE_UAF;
523 +                       }
524 +                       break;
525 +               }
526 +       }
527 +       up(&cache_chain_sem);
528 +       return count;
529 +}
530 +#endif
531 +
532 Index: linux-2.4.22-vanilla/init/main.c
533 ===================================================================
534 --- linux-2.4.22-vanilla.orig/init/main.c       2003-11-03 23:22:13.000000000 +0300
535 +++ linux-2.4.22-vanilla/init/main.c    2003-11-18 01:06:45.000000000 +0300
536 @@ -436,6 +436,9 @@
537          *      make syscalls (and thus be locked).
538          */
539         smp_init();
540 +#ifdef CONFIG_DEBUG_UAF
541 +       uaf_init();
542 +#endif
543         rest_init();
544  }
545  
546 Index: linux-2.4.22-vanilla/fs/proc/proc_misc.c
547 ===================================================================
548 --- linux-2.4.22-vanilla.orig/fs/proc/proc_misc.c       2003-11-03 23:22:11.000000000 +0300
549 +++ linux-2.4.22-vanilla/fs/proc/proc_misc.c    2003-11-18 01:06:45.000000000 +0300
550 @@ -301,6 +301,22 @@
551         release:        seq_release,
552  };
553  
554 +#ifdef CONFIG_DEBUG_UAF
555 +extern struct seq_operations uafinfo_op;
556 +extern ssize_t uafinfo_write(struct file *, const char *, size_t, loff_t *);
557 +static int uafinfo_open(struct inode *inode, struct file *file)
558 +{
559 +       return seq_open(file, &uafinfo_op);
560 +}
561 +static struct file_operations proc_uafinfo_operations = {
562 +       .open           = uafinfo_open,
563 +       .read           = seq_read,
564 +       .write          = uafinfo_write,
565 +       .llseek         = seq_lseek,
566 +       .release        = seq_release,
567 +};
568 +#endif
569 +
570  static int kstat_read_proc(char *page, char **start, off_t off,
571                                  int count, int *eof, void *data)
572  {
573 @@ -616,6 +632,9 @@
574         create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations);
575         create_seq_entry("partitions", 0, &proc_partitions_operations);
576         create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
577 +#ifdef CONFIG_DEBUG_UAF
578 +       create_seq_entry("uafinfo",S_IWUSR|S_IRUGO,&proc_uafinfo_operations);
579 +#endif
580  #ifdef CONFIG_MODULES
581         create_seq_entry("ksyms", 0, &proc_ksyms_operations);
582  #endif
583 Index: linux-2.4.22-vanilla/include/linux/slab.h
584 ===================================================================
585 --- linux-2.4.22-vanilla.orig/include/linux/slab.h      2003-11-17 15:42:13.000000000 +0300
586 +++ linux-2.4.22-vanilla/include/linux/slab.h   2003-11-18 02:14:40.000000000 +0300
587 @@ -40,6 +40,7 @@
588  #define        SLAB_HWCACHE_ALIGN      0x00002000UL    /* align objs on a h/w cache lines */
589  #define SLAB_CACHE_DMA         0x00004000UL    /* use GFP_DMA memory */
590  #define SLAB_MUST_HWCACHE_ALIGN        0x00008000UL    /* force alignment */
591 +#define SLAB_USE_UAF           0x00040000UL    /* use UAF allocator */
592  
593  /* flags passed to a constructor func */
594  #define        SLAB_CTOR_CONSTRUCTOR   0x001UL         /* if not set, then deconstructor */
595 Index: linux-2.4.22-vanilla/arch/i386/config.in
596 ===================================================================
597 --- linux-2.4.22-vanilla.orig/arch/i386/config.in       2003-11-03 23:22:06.000000000 +0300
598 +++ linux-2.4.22-vanilla/arch/i386/config.in    2003-11-18 01:06:45.000000000 +0300
599 @@ -470,6 +470,9 @@
600     bool '  Check for stack overflows' CONFIG_DEBUG_STACKOVERFLOW
601     bool '  Debug high memory support' CONFIG_DEBUG_HIGHMEM
602     bool '  Debug memory allocations' CONFIG_DEBUG_SLAB
603 +   if [ "$CONFIG_DEBUG_SLAB" != "n" ]; then
604 +      bool '  Debug memory allocations (use-after-free via vmalloced space)' CONFIG_DEBUG_UAF
605 +   fi
606     bool '  Memory mapped I/O debugging' CONFIG_DEBUG_IOVIRT
607     bool '  Magic SysRq key' CONFIG_MAGIC_SYSRQ
608     bool '  Spinlock debugging' CONFIG_DEBUG_SPINLOCK
609 Index: linux-2.4.22-vanilla/mm/vmalloc.c
610 ===================================================================
611 --- linux-2.4.22-vanilla.orig/mm/vmalloc.c      2003-11-03 23:22:13.000000000 +0300
612 +++ linux-2.4.22-vanilla/mm/vmalloc.c   2003-11-18 01:06:45.000000000 +0300
613 @@ -53,7 +53,7 @@
614         } while (address < end);
615  }
616  
617 -static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
618 +void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
619  {
620         pmd_t * pmd;
621         unsigned long end;
622 @@ -152,7 +152,7 @@
623         return 0;
624  }
625  
626 -static inline int __vmalloc_area_pages (unsigned long address,
627 +int __vmalloc_area_pages (unsigned long address,
628                                         unsigned long size,
629                                         int gfp_mask,
630                                         pgprot_t prot,
631 Index: linux-2.4.22-vanilla/include/asm-i386/io.h
632 ===================================================================
633 --- linux-2.4.22-vanilla.orig/include/asm-i386/io.h     2003-11-17 14:58:37.000000000 +0300
634 +++ linux-2.4.22-vanilla/include/asm-i386/io.h  2003-11-18 02:26:42.000000000 +0300
635 @@ -75,6 +75,16 @@
636   
637  static inline unsigned long virt_to_phys(volatile void * address)
638  {
639 +#ifdef CONFIG_DEBUG_UAF
640 +       unsigned long addr = (unsigned long) address;
641 +       if (vmlist && addr >= VMALLOC_START && addr < VMALLOC_END) {
642 +               struct page *page = vmalloc_to_page((void *) address);
643 +               if (page) {
644 +                       unsigned long offset = addr & ~PAGE_MASK;
645 +                       address = page_address(page) + offset;
646 +               }
647 +       }
648 +#endif
649         return __pa(address);
650  }
651  
652 Index: linux-2.4.22-vanilla/include/asm-i386/page.h
653 ===================================================================
654 --- linux-2.4.22-vanilla.orig/include/asm-i386/page.h   2003-11-03 23:51:46.000000000 +0300
655 +++ linux-2.4.22-vanilla/include/asm-i386/page.h        2003-11-18 02:14:38.000000000 +0300
656 @@ -129,9 +129,49 @@
657  #define VMALLOC_RESERVE                ((unsigned long)__VMALLOC_RESERVE)
658  #define __MAXMEM               (-__PAGE_OFFSET-__VMALLOC_RESERVE)
659  #define MAXMEM                 ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE))
660 +
661 +#ifndef CONFIG_DEBUG_UAF
662  #define __pa(x)                        ((unsigned long)(x)-PAGE_OFFSET)
663  #define __va(x)                        ((void *)((unsigned long)(x)+PAGE_OFFSET))
664  #define virt_to_page(kaddr)    (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
665 +#else
666 +#define __pa(x)                ({                                                      \
667 +                               unsigned long __pn, __fr;                       \
668 +                               __pn = (unsigned long)(x)-PAGE_OFFSET;          \
669 +                               __fr = __pn >> PAGE_SHIFT;                      \
670 +                               if (jiffies > HZ*3 && __fr >= max_mapnr) {      \
671 +                                       printk("invalid arg __pa(0x%x)"         \
672 +                                               " at %s:%d\n", (unsigned) (x),  \
673 +                                               __FILE__, __LINE__);            \
674 +                                       dump_stack();                           \
675 +                               }                                               \
676 +                               __pn;                                           \
677 +                       })
678 +
679 +#define __va(x)                ({                                                      \
680 +                               unsigned long __pn;                             \
681 +                               __pn = (unsigned long) (x) >> PAGE_SHIFT;       \
682 +                               if (jiffies > HZ*3 && __pn >= max_mapnr) {      \
683 +                                       printk("invalid arg __va(0x%x)"         \
684 +                                               " at %s:%d\n", (unsigned) (x),  \
685 +                                               __FILE__, __LINE__);            \
686 +                                       dump_stack();                           \
687 +                               }                                               \
688 +                               ((void *)((unsigned long)(x) + PAGE_OFFSET));   \
689 +                       })
690 +
691 +#define virt_to_page(ka) ({                                                    \
692 +                               struct page *_p;                                \
693 +                               if ((unsigned long)(ka) >= VMALLOC_START) {     \
694 +                                       _p = vmalloc_to_page((void *)(ka));     \
695 +                                       BUG_ON(!_p);                            \
696 +                               } else                                          \
697 +                                       _p = mem_map+(__pa(ka) >> PAGE_SHIFT);  \
698 +                               (_p);                                           \
699 +                       })
700 +#endif
701 +
702 +
703  #define VALID_PAGE(page)       ((page - mem_map) < max_mapnr)
704  
705  #define VM_DATA_DEFAULT_FLAGS  (VM_READ | VM_WRITE | VM_EXEC | \
706
707 %diffstat
708  arch/i386/config.in     |    3 
709  fs/proc/proc_misc.c     |   19 +
710  include/asm-i386/io.h   |   10 
711  include/asm-i386/page.h |   40 +++
712  include/linux/slab.h    |    1 
713  init/main.c             |    3 
714  mm/slab.c               |  487 ++++++++++++++++++++++++++++++++++++++++++++++++
715  mm/vmalloc.c            |    4 
716  8 files changed, 565 insertions(+), 2 deletions(-)
717