Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / kernel_patches / patches / slab-use-after-free-debug-2.4.24.patch
1 %patch
2 Index: linux-2.4.24/mm/slab.c
3 ===================================================================
4 --- linux-2.4.24.orig/mm/slab.c 2004-02-06 11:15:22.000000000 +0300
5 +++ linux-2.4.24/mm/slab.c      2004-02-07 00:42:38.000000000 +0300
6 @@ -97,6 +97,8 @@
7  #define        FORCED_DEBUG    0
8  #endif
9  
10 +#include       <linux/vmalloc.h>
11 +
12  /*
13   * Parameters for kmem_cache_reap
14   */
15 @@ -825,6 +827,12 @@
16         return cachep;
17  }
18  
19 +#ifdef CONFIG_DEBUG_UAF
20 +void * uaf_alloc(kmem_cache_t *, int gfp_mask);
21 +int uaf_cache_free(kmem_cache_t *, void *addr);
22 +int uaf_free(void *addr);
23 +struct page *uaf_vaddr_to_page(void *obj);
24 +#endif
25  
26  #if DEBUG
27  /*
28 @@ -1342,6 +1350,20 @@
29         unsigned long save_flags;
30         void* objp;
31  
32 +#ifdef CONFIG_DEBUG_UAF
33 +       /* try to use uaf-allocator first */
34 +       objp = uaf_alloc(cachep, flags);
35 +       if (objp) {
36 +               if (cachep->ctor) {
37 +                       unsigned long ctor_flags;
38 +                       ctor_flags = SLAB_CTOR_CONSTRUCTOR;
39 +                       if (!(flags & __GFP_WAIT))
40 +                               ctor_flags |= SLAB_CTOR_ATOMIC;
41 +                       cachep->ctor(objp, cachep, ctor_flags);
42 +               }
43 +               return objp;
44 +       }
45 +#endif
46         kmem_cache_alloc_head(cachep, flags);
47  try_again:
48         local_irq_save(save_flags);
49 @@ -1436,13 +1458,17 @@
50  
51         if (cachep->flags & SLAB_RED_ZONE) {
52                 objp -= BYTES_PER_WORD;
53 -               if (xchg((unsigned long *)objp, RED_MAGIC1) != RED_MAGIC2)
54 +               if (xchg((unsigned long *)objp, RED_MAGIC1) != RED_MAGIC2) {
55                         /* Either write before start, or a double free. */
56 +                       printk("inconsistency at start of %s\n", cachep->name);
57                         BUG();
58 +               }
59                 if (xchg((unsigned long *)(objp+cachep->objsize -
60 -                               BYTES_PER_WORD), RED_MAGIC1) != RED_MAGIC2)
61 +                               BYTES_PER_WORD), RED_MAGIC1) != RED_MAGIC2) {
62                         /* Either write past end, or a double free. */
63 +                       printk("inconsistency at end of %s\n", cachep->name);
64                         BUG();
65 +               }
66         }
67         if (cachep->flags & SLAB_POISON)
68                 kmem_poison_obj(cachep, objp);
69 @@ -1578,6 +1604,10 @@
70  void kmem_cache_free (kmem_cache_t *cachep, void *objp)
71  {
72         unsigned long flags;
73 +#ifdef CONFIG_DEBUG_UAF
74 +       if (uaf_cache_free(cachep, objp))
75 +               return;
76 +#endif
77  #if DEBUG
78         CHECK_PAGE(virt_to_page(objp));
79         if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
80 @@ -1603,6 +1633,10 @@
81  
82         if (!objp)
83                 return;
84 +#ifdef CONFIG_DEBUG_UAF
85 +       if (uaf_free((void *) objp))
86 +               return;
87 +#endif
88         local_irq_save(flags);
89         CHECK_PAGE(virt_to_page(objp));
90         c = GET_PAGE_CACHE(virt_to_page(objp));
91 @@ -2078,3 +2112,471 @@
92  #endif
93  }
94  #endif
95 +
96 +
97 +
98 +#ifdef CONFIG_DEBUG_UAF
99 +
100 +#define MAX_UAF_OBJ_SIZE       8       /* in pages */
101 +#define UAF_ASSERT(xxx)                if (!(xxx)) BUG();
102 +#define UAF_DEBUG__
103 +#ifdef UAF_DEBUG
104 +#define uaf_printk(fmt,a...)   printk(fmt, ##a)
105 +#else
106 +#define uaf_printk(a,...)      
107 +#endif
108 +
109 +struct uaf_stats {
110 +       atomic_t uaf_allocated;
111 +       atomic_t uaf_allocations;
112 +       atomic_t uaf_failed;
113 +};
114 +
115 +static int uaf_max = 32768;
116 +static void *uaf_bitmap = NULL;
117 +static spinlock_t uaf_lock;
118 +static int uaf_last_found = 0;
119 +static int uaf_used = 0;
120 +static struct vm_struct *uaf_area = NULL;
121 +static struct uaf_stats uaf_stats[MAX_UAF_OBJ_SIZE + 1];
122 +
123 +static int __init uaf_setup(char *str)
124 +{
125 +        uaf_max = simple_strtoul(str, NULL, 0);
126 +        return 1;
127 +}
128 +
129 +__setup("uaf=", uaf_setup);
130 +
131 +
132 +void uaf_init(void)
133 +{
134 +       int size;
135 +
136 +       printk("UAF: total vmalloc-space - %lu\n",
137 +                       VMALLOC_END - VMALLOC_START);
138 +
139 +       uaf_area = get_vm_area(PAGE_SIZE * uaf_max, VM_ALLOC);
140 +       if (!uaf_area) {
141 +               printk(KERN_ALERT "UAF: can't reserve %lu bytes in KVA\n",
142 +                               PAGE_SIZE * uaf_max);
143 +               return;
144 +       }
145 +       
146 +       printk("UAF: reserved %lu bytes in KVA at 0x%p\n",
147 +                       PAGE_SIZE * uaf_max, uaf_area->addr);
148 +
149 +       /* how many bytes we need to track space usage? */
150 +       size = uaf_max / 8 + 8;
151 +
152 +       uaf_bitmap = vmalloc(size);
153 +       if (!uaf_bitmap) {
154 +               printk(KERN_ALERT
155 +                       "UAF: can't allocate %d bytes for bitmap\n", size);
156 +               return;
157 +       }
158 +       memset(uaf_bitmap, 0, size);
159 +       spin_lock_init(&uaf_lock);
160 +       memset(uaf_stats, 0, sizeof(uaf_stats));
161 +
162 +       printk("UAF: allocated %d for bitmap\n", size);
163 +}
164 +
165 +static int uaf_find(int len)
166 +{
167 +       int new_last_found = -1;
168 +       int loop = 0;
169 +       int i, j;
170 +
171 +       j = uaf_last_found;
172 +
173 +       do {
174 +               i = find_next_zero_bit(uaf_bitmap, uaf_max, j);
175 +               if (i >= uaf_max) {
176 +                       /* repeat from 0 */
177 +                       if (++loop > 1) {
178 +                               /* this is 2nd loop and it's useless */
179 +                               return -1;
180 +                       }
181 +
182 +                       i = find_next_zero_bit(uaf_bitmap, uaf_max, 0);
183 +                       if (i >= uaf_max)
184 +                               return -1;
185 +
186 +                       /* save found num for subsequent searches */
187 +                       if (new_last_found == -1)
188 +                               new_last_found = uaf_last_found = i;
189 +                       UAF_ASSERT(new_last_found < uaf_max);
190 +               }
191 +
192 +               /*
193 +                * OK. found first zero bit.
194 +                * now, try to find requested cont. zero-space
195 +                */
196 +
197 +               /* FIXME: implmement multipage allocation! */
198 +               break;
199 +
200 +               /*
201 +               j = find_next_bit(uaf_bitmap, uaf_max, i);
202 +               if (++loop2 > 10000) {
203 +                       printk("ALERT: loop2=%d\n", loop2);
204 +                       return -1;
205 +               }
206 +               */
207 +       } while (j - i < len);
208 +
209 +       /* found! */
210 +       if (new_last_found == -1)
211 +               uaf_last_found = i + 1;
212 +       if (uaf_last_found >= uaf_max)
213 +               uaf_last_found = 0;
214 +       return i;
215 +}
216 +
217 +extern int __vmalloc_area_pages (unsigned long address, unsigned long size,
218 +                                       int gfp_mask, pgprot_t prot,
219 +                                       struct page ***pages);
220 +void * uaf_alloc(kmem_cache_t *cachep, int gfp_mask)
221 +{
222 +       struct page *ptrs[MAX_UAF_OBJ_SIZE];
223 +       int size = cachep->objsize;
224 +       struct page **pages;
225 +       unsigned long flags;
226 +       unsigned long addr;
227 +       int i, j, err = -2000;
228 +
229 +       if (uaf_bitmap == NULL)
230 +               return NULL;
231 +
232 +       if (!(cachep->flags & SLAB_USE_UAF))
233 +               return NULL;
234 +
235 +       pages = (struct page **) ptrs;
236 +       size = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
237 +       /* FIXME: implement multipage allocation! */
238 +       if (size > 1)
239 +               return NULL;
240 +       if (size > MAX_UAF_OBJ_SIZE) {
241 +               printk(KERN_ALERT "size is too big: %d\n", size);
242 +               return NULL;
243 +       }
244 +
245 +       if (uaf_used == uaf_max) {
246 +               uaf_printk("UAF: space exhausted!\n");
247 +               atomic_inc(&uaf_stats[size].uaf_failed);
248 +               return NULL;
249 +       }
250 +
251 +
252 +       spin_lock_irqsave(&uaf_lock, flags);
253 +       i = uaf_find(size);
254 +       if (i < 0) {
255 +               spin_unlock_irqrestore(&uaf_lock, flags);
256 +               atomic_inc(&uaf_stats[size].uaf_failed);
257 +               return NULL;
258 +       }
259 +       for (j = 0; j < size; j++) {
260 +               UAF_ASSERT(!test_bit(i + j, uaf_bitmap));
261 +               set_bit(i + j, uaf_bitmap);
262 +               uaf_used++;
263 +       }
264 +       spin_unlock_irqrestore(&uaf_lock, flags);
265 +
266 +       addr = ((unsigned long) uaf_area->addr) + (PAGE_SIZE * i);
267 +       uaf_printk("UAF: found %d/%d, base 0x%p, map at 0x%lx: ", i,
268 +                       size, uaf_area->addr, addr);
269 +
270 +       /* OK. we've found free space, let's allocate pages */
271 +       memset(pages, 0, sizeof(struct page *) * MAX_UAF_OBJ_SIZE);
272 +       for (j = 0; j < size; j++) {
273 +               pages[j] = alloc_page(gfp_mask);
274 +               if (pages[j] == NULL)
275 +                       goto nomem;
276 +               uaf_printk("0x%p ", pages[j]);
277 +       }
278 +
279 +       /* time to map just allocated pages */
280 +       err = __vmalloc_area_pages(addr, PAGE_SIZE * size, gfp_mask,
281 +                                       PAGE_KERNEL, &pages);
282 +       pages = (struct page **) ptrs;
283 +       if (err == 0) {
284 +               /* put slab cache pointer in first page */
285 +               ptrs[0]->list.next = (void *) cachep;
286 +               uaf_printk(" -> 0x%lx\n", addr);
287 +               atomic_inc(&uaf_stats[size].uaf_allocated);
288 +               atomic_inc(&uaf_stats[size].uaf_allocations);
289 +               if (!in_interrupt() && !in_softirq())
290 +                       flush_tlb_all();
291 +               else
292 +                       local_flush_tlb();
293 +               size = cachep->objsize;
294 +               if (size < PAGE_SIZE)
295 +                       memset((char *) addr + size, 0xa7, PAGE_SIZE - size);
296 +               return (void *) addr;
297 +       }
298 +
299 +nomem:
300 +       printk(KERN_ALERT "can't map pages: %d\n", err);
301 +       for (j = 0; j < size; j++)
302 +               if (pages[j])
303 +                       __free_page(pages[j]);
304 +
305 +       /* can't find free pages */
306 +       spin_lock_irqsave(&uaf_lock, flags);
307 +       for (j = 0; j < size; j++) {
308 +               clear_bit(i + j, uaf_bitmap);
309 +               uaf_used--;
310 +       }
311 +       spin_unlock_irqrestore(&uaf_lock, flags);
312 +       atomic_inc(&uaf_stats[size].uaf_failed);
313 +
314 +       return NULL;
315 +}
316 +
317 +extern void free_area_pmd(pgd_t *dir, unsigned long address,
318 +                                 unsigned long size);
319 +static void uaf_unmap(unsigned long address, unsigned long size)
320 +{
321 +       unsigned long end = (address + size);
322 +       pgd_t *dir;
323 +
324 +       dir = pgd_offset_k(address);
325 +       flush_cache_all();
326 +       do {
327 +               free_area_pmd(dir, address, end - address);
328 +               address = (address + PGDIR_SIZE) & PGDIR_MASK;
329 +               dir++;
330 +       } while (address && (address < end));
331 +
332 +       /*
333 +        * we must not call smp_call_function() with interrtups disabled
334 +        * otherwise we can get into deadlock
335 +        */
336 +       if (!in_interrupt() && !in_softirq())
337 +               flush_tlb_all();
338 +       else
339 +               local_flush_tlb();
340 +}
341 +
342 +/*
343 + * returns 1 if free was successfull
344 + */
345 +int uaf_cache_free(kmem_cache_t *cachep, void *addr)
346 +{
347 +       struct page *pages[MAX_UAF_OBJ_SIZE];
348 +       int size = cachep->objsize;
349 +       unsigned long flags;
350 +       int i, j;
351 +
352 +       uaf_printk("UAF: to free 0x%p/%d\n", addr, size);
353 +
354 +       size = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
355 +       if (size > MAX_UAF_OBJ_SIZE)
356 +               return 0;
357 +
358 +       if (uaf_bitmap == NULL)
359 +               return 0;
360 +
361 +       /* first, check is address is in UAF space */
362 +       if ((unsigned) addr < (unsigned) uaf_area->addr ||
363 +               (unsigned) addr >= (unsigned) uaf_area->addr + uaf_area->size)
364 +               return 0;
365 +
366 +       if (cachep->objsize < PAGE_SIZE) {
367 +               unsigned char *a = (void *) addr;
368 +               for (i = 0; i < PAGE_SIZE - cachep->objsize; i++)
369 +                       if (a[cachep->objsize + i] != 0xa7) {
370 +                               printk("corruption(0x%x) at %u in %s/0x%p\n",
371 +                                       (unsigned) a[cachep->objsize + i],
372 +                                       cachep->objsize + i, cachep->name, addr);
373 +                               BUG();
374 +                       }
375 +       }
376 +       UAF_ASSERT(((unsigned long) addr & ~PAGE_MASK) == 0UL);
377 +       
378 +       /* calculate placement in bitmap */
379 +       i = (unsigned) addr - (unsigned) uaf_area->addr;
380 +       UAF_ASSERT(i >= 0);
381 +       i = i / PAGE_SIZE;
382 +
383 +       /* collect all the pages */
384 +       uaf_printk("free/unmap %d pages: ", size);
385 +       /* NOTE: we need not page_table_lock here. bits in bitmap
386 +        * protect those pte's from to be reused */
387 +       for (j = 0; j < size; j++) {
388 +               unsigned long address;
389 +               address = ((unsigned long) addr) + (PAGE_SIZE * j);
390 +               pages[j] = vmalloc_to_page((void *) address);
391 +               uaf_printk("0x%lx->0x%p ", address, pages[j]);
392 +       }
393 +       uaf_printk("\n");
394 +
395 +       uaf_unmap((unsigned long) addr, PAGE_SIZE * size);
396 +       /* free all the pages */
397 +       for (j = 0; j < size; j++)
398 +               __free_page(pages[j]);
399 +
400 +       spin_lock_irqsave(&uaf_lock, flags);
401 +       for (j = 0; j < size; j++) {
402 +               /* now check is correspondend bit set */
403 +               UAF_ASSERT(i+j >= 0 && i+j < uaf_max);
404 +               UAF_ASSERT(test_bit(i+j, uaf_bitmap));
405 +               
406 +               /* now free space in UAF */
407 +               clear_bit(i+j, uaf_bitmap);
408 +               uaf_used--;
409 +       }
410 +       spin_unlock_irqrestore(&uaf_lock, flags);
411 +
412 +       atomic_dec(&uaf_stats[size].uaf_allocated);
413 +       
414 +       uaf_printk("UAF: freed %d/%d at 0x%p\n", i, size, addr);
415 +       //printk("UAF: freed %d/%d at 0x%p\n", i, size, addr);
416 +
417 +       return 1;
418 +}
419 +
420 +struct page *uaf_vaddr_to_page(void *obj)
421 +{
422 +       if (uaf_bitmap == NULL)
423 +               return NULL;
424 +
425 +       /* first, check is address is in UAF space */
426 +       if ((unsigned) obj < (unsigned) uaf_area->addr ||
427 +               (unsigned) obj >= (unsigned) uaf_area->addr + uaf_area->size)
428 +               return NULL;
429 +       
430 +       return vmalloc_to_page(obj);
431 +}
432 +
433 +int uaf_free(void *obj)
434 +{
435 +       struct page *page = uaf_vaddr_to_page((void *) obj);
436 +       kmem_cache_t *c;
437 +
438 +       if (!page)
439 +               return 0;
440 +
441 +       c = GET_PAGE_CACHE(page);
442 +       return uaf_cache_free(c, (void *) obj);
443 +}
444 +
445 +int uaf_is_allocated(void *obj)
446 +{
447 +       unsigned long addr = (unsigned long) obj;
448 +       int i;
449 +
450 +       if (uaf_bitmap == NULL)
451 +               return 0;
452 +
453 +       addr &= PAGE_MASK;
454 +       /* first, check is address is in UAF space */
455 +       if (addr < (unsigned long) uaf_area->addr ||
456 +                       addr >= (unsigned long) uaf_area->addr + uaf_area->size)
457 +               return 0;
458 +
459 +       /* calculate placement in bitmap */
460 +       i = (unsigned) addr - (unsigned) uaf_area->addr;
461 +       i = i / PAGE_SIZE;
462 +       return test_bit(i, uaf_bitmap);
463 +}
464 +
465 +static void *uaf_s_start(struct seq_file *m, loff_t *pos)
466 +{
467 +       loff_t n = *pos;
468 +
469 +       if (!n)
470 +               seq_printf(m, "size(pgs) allocated failed allocations. "
471 +                               "%d reserved, %d in use, %d last\n",
472 +                               uaf_max, uaf_used, uaf_last_found);
473 +       else if (n > MAX_UAF_OBJ_SIZE)
474 +               return NULL;
475 +
476 +       *pos = 1;
477 +       return (void *) 1;
478 +}
479 +
480 +static void *uaf_s_next(struct seq_file *m, void *p, loff_t *pos)
481 +{
482 +       unsigned long n = *pos;
483 +       ++*pos;
484 +       if (n + 1 > MAX_UAF_OBJ_SIZE)
485 +               return NULL;
486 +       return (void *) (n + 1);
487 +}
488 +
489 +static void uaf_s_stop(struct seq_file *m, void *p)
490 +{
491 +}
492 +
493 +static int uaf_s_show(struct seq_file *m, void *p)
494 +{
495 +       int n = (int) p;
496 +
497 +       if (n > MAX_UAF_OBJ_SIZE)
498 +               return 0;
499 +       seq_printf(m, "%d  %d  %d %d\n", n, 
500 +                       atomic_read(&uaf_stats[n].uaf_allocated),
501 +                       atomic_read(&uaf_stats[n].uaf_failed),
502 +                       atomic_read(&uaf_stats[n].uaf_allocations));
503 +       return 0;
504 +}
505 +
506 +struct seq_operations uafinfo_op = {
507 +       .start  = uaf_s_start,
508 +       .next   = uaf_s_next,
509 +       .stop   = uaf_s_stop,
510 +       .show   = uaf_s_show,
511 +};
512 +
513 +ssize_t uafinfo_write(struct file *file, const char *buffer,
514 +                               size_t count, loff_t *ppos)
515 +{
516 +       char kbuf[MAX_SLABINFO_WRITE+1], *tmp;
517 +       char *key, *name;
518 +       int res;
519 +       struct list_head *p;
520 +       
521 +       if (count > MAX_SLABINFO_WRITE)
522 +               return -EINVAL;
523 +       if (copy_from_user(&kbuf, buffer, count))
524 +               return -EFAULT;
525 +       kbuf[MAX_SLABINFO_WRITE] = '\0'; 
526 +
527 +       tmp = kbuf;
528 +       key = strsep(&tmp, " \t\n");
529 +       if (!key)
530 +               return -EINVAL;
531 +       if (!strcmp(key, "on"))
532 +               res = 1;
533 +       else if (!strcmp(key, "off"))
534 +               res = 0;
535 +       else
536 +               return -EINVAL;
537 +
538 +       name = strsep(&tmp, " \t\n");
539 +       if (!name)
540 +               return -EINVAL;
541 +
542 +       /* Find the cache in the chain of caches. */
543 +       down(&cache_chain_sem);
544 +       list_for_each(p,&cache_chain) {
545 +               kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
546 +
547 +               if (!strcmp(cachep->name, name)) {
548 +                       if (res) {
549 +                               printk("UAF: use on %s\n", cachep->name);
550 +                               cachep->flags |= SLAB_USE_UAF;
551 +                       } else {
552 +                               printk("UAF: dont use on %s\n", cachep->name);
553 +                               cachep->flags &= ~SLAB_USE_UAF;
554 +                       }
555 +                       break;
556 +               }
557 +       }
558 +       up(&cache_chain_sem);
559 +       return count;
560 +}
561 +#endif
562 +
563 Index: linux-2.4.24/mm/vmalloc.c
564 ===================================================================
565 --- linux-2.4.24.orig/mm/vmalloc.c      2004-01-10 17:05:20.000000000 +0300
566 +++ linux-2.4.24/mm/vmalloc.c   2004-02-06 11:17:09.000000000 +0300
567 @@ -53,7 +53,7 @@
568         } while (address < end);
569  }
570  
571 -static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
572 +void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
573  {
574         pmd_t * pmd;
575         unsigned long end;
576 @@ -152,7 +152,7 @@
577         return 0;
578  }
579  
580 -static inline int __vmalloc_area_pages (unsigned long address,
581 +int __vmalloc_area_pages (unsigned long address,
582                                         unsigned long size,
583                                         int gfp_mask,
584                                         pgprot_t prot,
585 Index: linux-2.4.24/init/main.c
586 ===================================================================
587 --- linux-2.4.24.orig/init/main.c       2004-01-10 17:05:59.000000000 +0300
588 +++ linux-2.4.24/init/main.c    2004-02-06 11:17:43.000000000 +0300
589 @@ -437,6 +437,9 @@
590  #if defined(CONFIG_SYSVIPC)
591         ipc_init();
592  #endif
593 +#ifdef CONFIG_DEBUG_UAF
594 +       uaf_init();
595 +#endif
596         rest_init();
597  }
598  
599 Index: linux-2.4.24/fs/proc/proc_misc.c
600 ===================================================================
601 --- linux-2.4.24.orig/fs/proc/proc_misc.c       2004-01-10 17:05:55.000000000 +0300
602 +++ linux-2.4.24/fs/proc/proc_misc.c    2004-02-06 11:35:27.000000000 +0300
603 @@ -303,6 +303,22 @@
604         release:        seq_release,
605  };
606  
607 +#ifdef CONFIG_DEBUG_UAF
608 +extern struct seq_operations uafinfo_op;
609 +extern ssize_t uafinfo_write(struct file *, const char *, size_t, loff_t *);
610 +static int uafinfo_open(struct inode *inode, struct file *file)
611 +{
612 +       return seq_open(file, &uafinfo_op);
613 +}
614 +static struct file_operations proc_uafinfo_operations = {
615 +       .open           = uafinfo_open,
616 +       .read           = seq_read,
617 +       .write          = uafinfo_write,
618 +       .llseek         = seq_lseek,
619 +       .release        = seq_release,
620 +};
621 +#endif
622 +
623  static int kstat_read_proc(char *page, char **start, off_t off,
624                                  int count, int *eof, void *data)
625  {
626 @@ -640,6 +656,9 @@
627         create_seq_entry("iomem", 0, &proc_iomem_operations);
628         create_seq_entry("partitions", 0, &proc_partitions_operations);
629         create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
630 +#ifdef CONFIG_DEBUG_UAF
631 +       create_seq_entry("uafinfo",S_IWUSR|S_IRUGO,&proc_uafinfo_operations);
632 +#endif
633  #ifdef CONFIG_MODULES
634         create_seq_entry("ksyms", 0, &proc_ksyms_operations);
635  #endif
636 Index: linux-2.4.24/include/linux/slab.h
637 ===================================================================
638 --- linux-2.4.24.orig/include/linux/slab.h      2004-01-29 15:01:10.000000000 +0300
639 +++ linux-2.4.24/include/linux/slab.h   2004-02-06 11:18:26.000000000 +0300
640 @@ -40,6 +40,7 @@
641  #define        SLAB_HWCACHE_ALIGN      0x00002000UL    /* align objs on a h/w cache lines */
642  #define SLAB_CACHE_DMA         0x00004000UL    /* use GFP_DMA memory */
643  #define SLAB_MUST_HWCACHE_ALIGN        0x00008000UL    /* force alignment */
644 +#define SLAB_USE_UAF           0x00040000UL    /* use UAF allocator */
645  
646  /* flags passed to a constructor func */
647  #define        SLAB_CTOR_CONSTRUCTOR   0x001UL         /* if not set, then deconstructor */
648 Index: linux-2.4.24/include/asm-i386/io.h
649 ===================================================================
650 --- linux-2.4.24.orig/include/asm-i386/io.h     2004-01-29 15:01:10.000000000 +0300
651 +++ linux-2.4.24/include/asm-i386/io.h  2004-02-06 11:18:26.000000000 +0300
652 @@ -75,6 +75,16 @@
653   
654  static inline unsigned long virt_to_phys(volatile void * address)
655  {
656 +#ifdef CONFIG_DEBUG_UAF
657 +       unsigned long addr = (unsigned long) address;
658 +       if (vmlist && addr >= VMALLOC_START && addr < VMALLOC_END) {
659 +               struct page *page = vmalloc_to_page((void *) address);
660 +               if (page) {
661 +                       unsigned long offset = addr & ~PAGE_MASK;
662 +                       address = page_address(page) + offset;
663 +               }
664 +       }
665 +#endif
666         return __pa(address);
667  }
668  
669 Index: linux-2.4.24/include/asm-i386/page.h
670 ===================================================================
671 --- linux-2.4.24.orig/include/asm-i386/page.h   2004-01-14 02:58:46.000000000 +0300
672 +++ linux-2.4.24/include/asm-i386/page.h        2004-02-06 11:17:09.000000000 +0300
673 @@ -131,9 +131,49 @@
674  #define VMALLOC_RESERVE                ((unsigned long)__VMALLOC_RESERVE)
675  #define __MAXMEM               (-__PAGE_OFFSET-__VMALLOC_RESERVE)
676  #define MAXMEM                 ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE))
677 +
678 +#ifndef CONFIG_DEBUG_UAF
679  #define __pa(x)                        ((unsigned long)(x)-PAGE_OFFSET)
680  #define __va(x)                        ((void *)((unsigned long)(x)+PAGE_OFFSET))
681  #define virt_to_page(kaddr)    (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
682 +#else
683 +#define __pa(x)                ({                                                      \
684 +                               unsigned long __pn, __fr;                       \
685 +                               __pn = (unsigned long)(x)-PAGE_OFFSET;          \
686 +                               __fr = __pn >> PAGE_SHIFT;                      \
687 +                               if (jiffies > HZ*3 && __fr >= max_mapnr) {      \
688 +                                       printk("invalid arg __pa(0x%x)"         \
689 +                                               " at %s:%d\n", (unsigned) (x),  \
690 +                                               __FILE__, __LINE__);            \
691 +                                       dump_stack();                           \
692 +                               }                                               \
693 +                               __pn;                                           \
694 +                       })
695 +
696 +#define __va(x)                ({                                                      \
697 +                               unsigned long __pn;                             \
698 +                               __pn = (unsigned long) (x) >> PAGE_SHIFT;       \
699 +                               if (jiffies > HZ*3 && __pn >= max_mapnr) {      \
700 +                                       printk("invalid arg __va(0x%x)"         \
701 +                                               " at %s:%d\n", (unsigned) (x),  \
702 +                                               __FILE__, __LINE__);            \
703 +                                       dump_stack();                           \
704 +                               }                                               \
705 +                               ((void *)((unsigned long)(x) + PAGE_OFFSET));   \
706 +                       })
707 +
708 +#define virt_to_page(ka) ({                                                    \
709 +                               struct page *_p;                                \
710 +                               if ((unsigned long)(ka) >= VMALLOC_START) {     \
711 +                                       _p = vmalloc_to_page((void *)(ka));     \
712 +                                       BUG_ON(!_p);                            \
713 +                               } else                                          \
714 +                                       _p = mem_map+(__pa(ka) >> PAGE_SHIFT);  \
715 +                               (_p);                                           \
716 +                       })
717 +#endif
718 +
719 +
720  #define VALID_PAGE(page)       ((page - mem_map) < max_mapnr)
721  
722  #define VM_DATA_DEFAULT_FLAGS  (VM_READ | VM_WRITE | VM_EXEC | \
723 Index: linux-2.4.24/arch/i386/config.in
724 ===================================================================
725 --- linux-2.4.24.orig/arch/i386/config.in       2004-01-14 02:58:46.000000000 +0300
726 +++ linux-2.4.24/arch/i386/config.in    2004-02-06 11:17:09.000000000 +0300
727 @@ -508,6 +508,9 @@
728     bool '  Check for stack overflows' CONFIG_DEBUG_STACKOVERFLOW
729     bool '  Debug high memory support' CONFIG_DEBUG_HIGHMEM
730     bool '  Debug memory allocations' CONFIG_DEBUG_SLAB
731 +   if [ "$CONFIG_DEBUG_SLAB" != "n" ]; then
732 +      bool '  Debug memory allocations (use-after-free via vmalloced space)' CONFIG_DEBUG_UAF
733 +   fi
734     bool '  Memory mapped I/O debugging' CONFIG_DEBUG_IOVIRT
735     bool '  Magic SysRq key' CONFIG_MAGIC_SYSRQ
736     bool '  Spinlock debugging' CONFIG_DEBUG_SPINLOCK
737
738 %diffstat
739  arch/i386/config.in     |    3 
740  fs/proc/proc_misc.c     |   19 +
741  include/asm-i386/io.h   |   10 
742  include/asm-i386/page.h |   40 +++
743  include/linux/slab.h    |    1 
744  init/main.c             |    3 
745  mm/slab.c               |  506 +++++++++++++++++++++++++++++++++++++++++++++++-
746  mm/vmalloc.c            |    4 
747  8 files changed, 582 insertions(+), 4 deletions(-)
748