Whamcloud - gitweb
- missed patches from suse-2.4.21-2 series added
[fs/lustre-release.git] / lustre / kernel_patches / patches / slab-use-after-free-debug-2.4.24.patch
1 %patch
2 Index: linux-2.4.24/mm/slab.c
3 ===================================================================
4 --- linux-2.4.24.orig/mm/slab.c 2004-07-16 09:33:00.000000000 -0400
5 +++ linux-2.4.24/mm/slab.c      2004-07-17 08:02:02.000000000 -0400
6 @@ -97,6 +97,8 @@
7  #define        FORCED_DEBUG    0
8  #endif
9  
10 +#include       <linux/vmalloc.h>
11 +
12  /*
13   * Parameters for kmem_cache_reap
14   */
15 @@ -825,6 +827,12 @@
16         return cachep;
17  }
18  
19 +#ifdef CONFIG_DEBUG_UAF
20 +void * uaf_alloc(kmem_cache_t *, int gfp_mask);
21 +int uaf_cache_free(kmem_cache_t *, void *addr);
22 +int uaf_free(void *addr);
23 +struct page *uaf_vaddr_to_page(void *obj);
24 +#endif
25  
26  #if DEBUG
27  /*
28 @@ -1340,6 +1348,20 @@
29         unsigned long save_flags;
30         void* objp;
31  
32 +#ifdef CONFIG_DEBUG_UAF
33 +       /* try to use uaf-allocator first */
34 +       objp = uaf_alloc(cachep, flags);
35 +       if (objp) {
36 +               if (cachep->ctor) {
37 +                       unsigned long ctor_flags;
38 +                       ctor_flags = SLAB_CTOR_CONSTRUCTOR;
39 +                       if (!(flags & __GFP_WAIT))
40 +                               ctor_flags |= SLAB_CTOR_ATOMIC;
41 +                       cachep->ctor(objp, cachep, ctor_flags);
42 +               }
43 +               return objp;
44 +       }
45 +#endif
46         kmem_cache_alloc_head(cachep, flags);
47  try_again:
48         local_irq_save(save_flags);
49 @@ -1434,13 +1456,17 @@
50  
51         if (cachep->flags & SLAB_RED_ZONE) {
52                 objp -= BYTES_PER_WORD;
53 -               if (xchg((unsigned long *)objp, RED_MAGIC1) != RED_MAGIC2)
54 +               if (xchg((unsigned long *)objp, RED_MAGIC1) != RED_MAGIC2) {
55                         /* Either write before start, or a double free. */
56 +                       printk("inconsistency in %s\n", cachep->name);
57                         BUG();
58 +               }
59                 if (xchg((unsigned long *)(objp+cachep->objsize -
60 -                               BYTES_PER_WORD), RED_MAGIC1) != RED_MAGIC2)
61 +                               BYTES_PER_WORD), RED_MAGIC1) != RED_MAGIC2) {
62                         /* Either write past end, or a double free. */
63 +                       printk("inconsistency in %s\n", cachep->name);
64                         BUG();
65 +               }
66         }
67         if (cachep->flags & SLAB_POISON)
68                 kmem_poison_obj(cachep, objp);
69 @@ -1576,6 +1602,10 @@
70  void kmem_cache_free (kmem_cache_t *cachep, void *objp)
71  {
72         unsigned long flags;
73 +#ifdef CONFIG_DEBUG_UAF
74 +       if (uaf_cache_free(cachep, objp))
75 +               return;
76 +#endif
77  #if DEBUG
78         CHECK_PAGE(virt_to_page(objp));
79         if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
80 @@ -1601,6 +1631,10 @@
81  
82         if (!objp)
83                 return;
84 +#ifdef CONFIG_DEBUG_UAF
85 +       if (uaf_free((void *) objp))
86 +               return;
87 +#endif
88         local_irq_save(flags);
89         CHECK_PAGE(virt_to_page(objp));
90         c = GET_PAGE_CACHE(virt_to_page(objp));
91 @@ -2076,3 +2110,478 @@
92  #endif
93  }
94  #endif
95 +
96 +
97 +
98 +#ifdef CONFIG_DEBUG_UAF
99 +
100 +#define MAX_UAF_OBJ_SIZE       8       /* in pages */
101 +#define UAF_ASSERT(xxx)                if (!(xxx)) BUG();
102 +#define UAF_DEBUG__
103 +#ifdef UAF_DEBUG
104 +#define uaf_printk(fmt,a...)   printk(fmt, ##a)
105 +#else
106 +#define uaf_printk(a,...)      
107 +#endif
108 +
109 +struct uaf_stats {
110 +       atomic_t uaf_allocated;
111 +       atomic_t uaf_allocations;
112 +       atomic_t uaf_failed;
113 +};
114 +
115 +static int uaf_max = 8192;
116 +static void *uaf_bitmap = NULL;
117 +static spinlock_t uaf_lock;
118 +static int uaf_last_found = 0;
119 +static int uaf_used = 0;
120 +static struct vm_struct *uaf_area = NULL;
121 +static struct uaf_stats uaf_stats[MAX_UAF_OBJ_SIZE + 1];
122 +
123 +static int __init uaf_setup(char *str)
124 +{
125 +        uaf_max = simple_strtoul(str, NULL, 0);
126 +        return 1;
127 +}
128 +
129 +__setup("uaf=", uaf_setup);
130 +
131 +
132 +void uaf_init(void)
133 +{
134 +       int size;
135 +
136 +       printk("UAF: total vmalloc-space - %lu\n",
137 +                       VMALLOC_END - VMALLOC_START);
138 +
139 +       uaf_area = get_vm_area(PAGE_SIZE * uaf_max, VM_ALLOC);
140 +       if (!uaf_area) {
141 +               printk(KERN_ALERT "UAF: can't reserve %lu bytes in KVA\n",
142 +                               PAGE_SIZE * uaf_max);
143 +               return;
144 +       }
145 +       
146 +       printk("UAF: reserved %lu bytes in KVA at 0x%p\n",
147 +                       PAGE_SIZE * uaf_max, uaf_area->addr);
148 +
149 +       /* how many bytes we need to track space usage? */
150 +       size = uaf_max / 8 + 8;
151 +
152 +       uaf_bitmap = vmalloc(size);
153 +       if (!uaf_bitmap) {
154 +               printk(KERN_ALERT
155 +                       "UAF: can't allocate %d bytes for bitmap\n", size);
156 +               return;
157 +       }
158 +       memset(uaf_bitmap, 0, size);
159 +       spin_lock_init(&uaf_lock);
160 +       memset(uaf_stats, 0, sizeof(uaf_stats));
161 +
162 +       printk("UAF: allocated %d for bitmap\n", size);
163 +}
164 +
165 +static int uaf_find(int len)
166 +{
167 +       int new_last_found = -1;
168 +       int loop = 0;
169 +       int i, j;
170 +
171 +       j = uaf_last_found;
172 +
173 +       do {
174 +               i = find_next_zero_bit(uaf_bitmap, uaf_max, j);
175 +               if (i >= uaf_max) {
176 +                       /* repeat from 0 */
177 +                       if (++loop > 1) {
178 +                               /* this is 2nd loop and it's useless */
179 +                               return -1;
180 +                       }
181 +
182 +                       i = find_next_zero_bit(uaf_bitmap, uaf_max, 0);
183 +                       if (i >= uaf_max)
184 +                               return -1;
185 +
186 +                       /* save found num for subsequent searches */
187 +                       if (new_last_found == -1)
188 +                               new_last_found = uaf_last_found = i;
189 +                       UAF_ASSERT(new_last_found < uaf_max);
190 +               }
191 +
192 +               /*
193 +                * OK. found first zero bit.
194 +                * now, try to find requested cont. zero-space
195 +                */
196 +
197 +               /* FIXME: implmement multipage allocation! */
198 +               break;
199 +
200 +               /*
201 +               j = find_next_bit(uaf_bitmap, uaf_max, i);
202 +               if (++loop2 > 10000) {
203 +                       printk("ALERT: loop2=%d\n", loop2);
204 +                       return -1;
205 +               }
206 +               */
207 +       } while (j - i < len);
208 +
209 +       /* found! */
210 +       if (new_last_found == -1)
211 +               uaf_last_found = i + 1;
212 +       if (uaf_last_found >= uaf_max)
213 +               uaf_last_found = 0;
214 +       return i;
215 +}
216 +
217 +extern int __vmalloc_area_pages (unsigned long address, unsigned long size,
218 +                                       int gfp_mask, pgprot_t prot,
219 +                                       struct page ***pages);
220 +void * uaf_alloc(kmem_cache_t *cachep, int gfp_mask)
221 +{
222 +       struct page *ptrs[MAX_UAF_OBJ_SIZE];
223 +       int size = cachep->objsize;
224 +       struct page **pages;
225 +       unsigned long flags;
226 +       unsigned long addr;
227 +       int i, j, err = -2000;
228 +
229 +       if (uaf_bitmap == NULL)
230 +               return NULL;
231 +
232 +       if (!(cachep->flags & SLAB_USE_UAF))
233 +               return NULL;
234 +
235 +       pages = (struct page **) ptrs;
236 +       size = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
237 +       /* FIXME: implement multipage allocation! */
238 +       if (size > 1)
239 +               return NULL;
240 +       if (size > MAX_UAF_OBJ_SIZE) {
241 +               printk(KERN_ALERT "size is too big: %d\n", size);
242 +               return NULL;
243 +       }
244 +
245 +       if (uaf_used == uaf_max) {
246 +               uaf_printk("UAF: space exhausted!\n");
247 +               atomic_inc(&uaf_stats[size].uaf_failed);
248 +               return NULL;
249 +       }
250 +
251 +
252 +       spin_lock_irqsave(&uaf_lock, flags);
253 +       i = uaf_find(size);
254 +       if (i < 0) {
255 +               spin_unlock_irqrestore(&uaf_lock, flags);
256 +               atomic_inc(&uaf_stats[size].uaf_failed);
257 +               return NULL;
258 +       }
259 +       for (j = 0; j < size; j++) {
260 +               UAF_ASSERT(!test_bit(i + j, uaf_bitmap));
261 +               set_bit(i + j, uaf_bitmap);
262 +               uaf_used++;
263 +       }
264 +       spin_unlock_irqrestore(&uaf_lock, flags);
265 +
266 +       addr = ((unsigned long) uaf_area->addr) + (PAGE_SIZE * i);
267 +       uaf_printk("UAF: found %d/%d, base 0x%p, map at 0x%lx: ", i,
268 +                       size, uaf_area->addr, addr);
269 +
270 +       /* OK. we've found free space, let's allocate pages */
271 +       memset(pages, 0, sizeof(struct page *) * MAX_UAF_OBJ_SIZE);
272 +       for (j = 0; j < size; j++) {
273 +               pages[j] = alloc_page(gfp_mask);
274 +               if (pages[j] == NULL)
275 +                       goto nomem;
276 +               uaf_printk("0x%p ", pages[j]);
277 +       }
278 +
279 +       /* time to map just allocated pages */
280 +       err = __vmalloc_area_pages(addr, PAGE_SIZE * size, gfp_mask,
281 +                                       PAGE_KERNEL, &pages);
282 +       pages = (struct page **) ptrs;
283 +       if (err == 0) {
284 +               /* put slab cache pointer in first page */
285 +               ptrs[0]->list.next = (void *) cachep;
286 +               uaf_printk(" -> 0x%lx\n", addr);
287 +               atomic_inc(&uaf_stats[size].uaf_allocated);
288 +               atomic_inc(&uaf_stats[size].uaf_allocations);
289 +               if (!in_interrupt() && !in_softirq())
290 +                       flush_tlb_all();
291 +               else
292 +                       local_flush_tlb();
293 +               size = cachep->objsize;
294 +               if (size < PAGE_SIZE)
295 +                       memset((char *) addr + size, 0xa7, PAGE_SIZE - size);
296 +               return (void *) addr;
297 +       }
298 +
299 +nomem:
300 +       printk(KERN_ALERT "can't map pages: %d\n", err);
301 +       for (j = 0; j < size; j++)
302 +               if (pages[j])
303 +                       __free_page(pages[j]);
304 +
305 +       /* can't find free pages */
306 +       spin_lock_irqsave(&uaf_lock, flags);
307 +       for (j = 0; j < size; j++) {
308 +               clear_bit(i + j, uaf_bitmap);
309 +               uaf_used--;
310 +       }
311 +       spin_unlock_irqrestore(&uaf_lock, flags);
312 +       atomic_inc(&uaf_stats[size].uaf_failed);
313 +
314 +       return NULL;
315 +}
316 +
317 +extern void free_area_pmd(pgd_t *dir, unsigned long address,
318 +                                 unsigned long size);
319 +static void uaf_unmap(unsigned long address, unsigned long size)
320 +{
321 +       unsigned long end = (address + size);
322 +       pgd_t *dir;
323 +
324 +       dir = pgd_offset_k(address);
325 +       flush_cache_all();
326 +       do {
327 +               free_area_pmd(dir, address, end - address);
328 +               address = (address + PGDIR_SIZE) & PGDIR_MASK;
329 +               dir++;
330 +       } while (address && (address < end));
331 +
332 +       /*
333 +        * we must not call smp_call_function() with interrtups disabled
334 +        * otherwise we can get into deadlock
335 +        */
336 +       if (!in_interrupt() && !in_softirq())
337 +               flush_tlb_all();
338 +       else
339 +               local_flush_tlb();
340 +}
341 +
342 +/*
343 + * returns 1 if free was successfull
344 + */
345 +int uaf_cache_free(kmem_cache_t *cachep, void *addr)
346 +{
347 +       struct page *pages[MAX_UAF_OBJ_SIZE];
348 +       int size = cachep->objsize;
349 +       unsigned long flags;
350 +       int i, j;
351 +
352 +       if (cachep->flags & SLAB_USE_UAF)
353 +               uaf_printk("UAF: to free 0x%p/%d\n", addr, size);
354 +
355 +       size = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
356 +       if (size > MAX_UAF_OBJ_SIZE)
357 +               return 0;
358 +
359 +       if (uaf_bitmap == NULL)
360 +               return 0;
361 +
362 +       /* first, check is address is in UAF space */
363 +       if ((unsigned) addr < (unsigned) uaf_area->addr ||
364 +               (unsigned) addr >= (unsigned) uaf_area->addr + uaf_area->size)
365 +               return 0;
366 +
367 +       uaf_printk("UAF: to free 0x%p/%d\n", addr, size);
368 +
369 +       /* calculate placement in bitmap */
370 +       i = (unsigned) addr - (unsigned) uaf_area->addr;
371 +       UAF_ASSERT(i >= 0);
372 +       i = i / PAGE_SIZE;
373 +
374 +       /* check against double-free */
375 +       spin_lock_irqsave(&uaf_lock, flags);
376 +       for (j = 0; j < size; j++) {
377 +               /* now check is correspondend bit set */
378 +               unsigned long address;
379 +               UAF_ASSERT(i+j >= 0 && i+j < uaf_max);
380 +               BUG_ON(!test_bit(i+j, uaf_bitmap));
381 +
382 +               address = ((unsigned long) addr) + (PAGE_SIZE * j);
383 +               pages[j] = vmalloc_to_page((void *) address);
384 +               BUG_ON(pages[j] == NULL);
385 +
386 +               /* now free space in UAF */
387 +               clear_bit(i+j, uaf_bitmap);
388 +               uaf_used--;
389 +       }
390 +       spin_unlock_irqrestore(&uaf_lock, flags);
391 +
392 +       /* check poison bytes */
393 +       if (cachep->objsize < PAGE_SIZE) {
394 +               unsigned char *a = (void *) addr;
395 +               for (i = 0; i < PAGE_SIZE - cachep->objsize; i++)
396 +                       if (a[cachep->objsize + i] != 0xa7) {
397 +                               printk("corruption(0x%x) at %u in %s/0x%p\n",
398 +                                       (unsigned) a[cachep->objsize + i],
399 +                                       cachep->objsize + i, cachep->name, addr);
400 +                               BUG();
401 +                       }
402 +       }
403 +       UAF_ASSERT(((unsigned long) addr & ~PAGE_MASK) == 0UL);
404 +       
405 +       /* calculate placement in bitmap */
406 +       i = (unsigned) addr - (unsigned) uaf_area->addr;
407 +       UAF_ASSERT(i >= 0);
408 +       i = i / PAGE_SIZE;
409 +
410 +       uaf_unmap((unsigned long) addr, PAGE_SIZE * size);
411 +       /* free all the pages */
412 +       for (j = 0; j < size; j++)
413 +               __free_page(pages[j]);
414 +
415 +       atomic_dec(&uaf_stats[size].uaf_allocated);
416 +       
417 +       uaf_printk("UAF: freed %d/%d at 0x%p\n", i, size, addr);
418 +       //printk("UAF: freed %d/%d at 0x%p\n", i, size, addr);
419 +
420 +       return 1;
421 +}
422 +
423 +struct page *uaf_vaddr_to_page(void *obj)
424 +{
425 +       if (uaf_bitmap == NULL)
426 +               return NULL;
427 +
428 +       /* first, check is address is in UAF space */
429 +       if ((unsigned) obj < (unsigned) uaf_area->addr ||
430 +               (unsigned) obj >= (unsigned) uaf_area->addr + uaf_area->size)
431 +               return NULL;
432 +       
433 +       return vmalloc_to_page(obj);
434 +}
435 +
436 +int uaf_free(void *obj)
437 +{
438 +       struct page *page = uaf_vaddr_to_page((void *) obj);
439 +       kmem_cache_t *c;
440 +
441 +       if (!page)
442 +               return 0;
443 +
444 +       c = GET_PAGE_CACHE(page);
445 +       return uaf_cache_free(c, (void *) obj);
446 +}
447 +
448 +int uaf_is_allocated(void *obj)
449 +{
450 +       unsigned long addr = (unsigned long) obj;
451 +       int i;
452 +
453 +       if (uaf_bitmap == NULL)
454 +               return 0;
455 +
456 +       addr &= PAGE_MASK;
457 +       /* first, check is address is in UAF space */
458 +       if (addr < (unsigned long) uaf_area->addr ||
459 +                       addr >= (unsigned long) uaf_area->addr + uaf_area->size)
460 +               return 0;
461 +
462 +       /* calculate placement in bitmap */
463 +       i = (unsigned) addr - (unsigned) uaf_area->addr;
464 +       i = i / PAGE_SIZE;
465 +       return test_bit(i, uaf_bitmap);
466 +}
467 +
468 +static void *uaf_s_start(struct seq_file *m, loff_t *pos)
469 +{
470 +       loff_t n = *pos;
471 +
472 +       if (!n)
473 +               seq_printf(m, "size(pgs) allocated failed allocations. "
474 +                               "%d reserved, %d in use, %d last\n"
475 +                               "start 0x%p, size %lu, bitmap 0x%p\n"
476 +                               "VMALLOC_START 0x%x, VMALLOC_END 0x%x\n",
477 +                               uaf_max, uaf_used, uaf_last_found,
478 +                               uaf_area->addr, uaf_area->size,
479 +                               uaf_bitmap, VMALLOC_START, VMALLOC_END);
480 +       else if (n > MAX_UAF_OBJ_SIZE)
481 +               return NULL;
482 +
483 +       *pos = 1;
484 +       return (void *) 1;
485 +}
486 +
487 +static void *uaf_s_next(struct seq_file *m, void *p, loff_t *pos)
488 +{
489 +       unsigned long n = *pos;
490 +       ++*pos;
491 +       if (n + 1 > MAX_UAF_OBJ_SIZE)
492 +               return NULL;
493 +       return (void *) (n + 1);
494 +}
495 +
496 +static void uaf_s_stop(struct seq_file *m, void *p)
497 +{
498 +}
499 +
500 +static int uaf_s_show(struct seq_file *m, void *p)
501 +{
502 +       int n = (int) p;
503 +
504 +       if (n > MAX_UAF_OBJ_SIZE)
505 +               return 0;
506 +       seq_printf(m, "%d  %d  %d %d\n", n, 
507 +                       atomic_read(&uaf_stats[n].uaf_allocated),
508 +                       atomic_read(&uaf_stats[n].uaf_failed),
509 +                       atomic_read(&uaf_stats[n].uaf_allocations));
510 +       return 0;
511 +}
512 +
513 +struct seq_operations uafinfo_op = {
514 +       .start  = uaf_s_start,
515 +       .next   = uaf_s_next,
516 +       .stop   = uaf_s_stop,
517 +       .show   = uaf_s_show,
518 +};
519 +
520 +ssize_t uafinfo_write(struct file *file, const char *buffer,
521 +                               size_t count, loff_t *ppos)
522 +{
523 +       char kbuf[MAX_SLABINFO_WRITE+1], *tmp;
524 +       char *key, *name;
525 +       int res;
526 +       struct list_head *p;
527 +       
528 +       if (count > MAX_SLABINFO_WRITE)
529 +               return -EINVAL;
530 +       if (copy_from_user(&kbuf, buffer, count))
531 +               return -EFAULT;
532 +       kbuf[MAX_SLABINFO_WRITE] = '\0'; 
533 +
534 +       tmp = kbuf;
535 +       key = strsep(&tmp, " \t\n");
536 +       if (!key)
537 +               return -EINVAL;
538 +       if (!strcmp(key, "on"))
539 +               res = 1;
540 +       else if (!strcmp(key, "off"))
541 +               res = 0;
542 +       else
543 +               return -EINVAL;
544 +
545 +       name = strsep(&tmp, " \t\n");
546 +       if (!name)
547 +               return -EINVAL;
548 +
549 +       /* Find the cache in the chain of caches. */
550 +       down(&cache_chain_sem);
551 +       list_for_each(p,&cache_chain) {
552 +               kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
553 +
554 +               if (!strcmp(cachep->name, name)) {
555 +                       if (res) {
556 +                               printk("UAF: use on %s\n", cachep->name);
557 +                               cachep->flags |= SLAB_USE_UAF;
558 +                       } else {
559 +                               printk("UAF: dont use on %s\n", cachep->name);
560 +                               cachep->flags &= ~SLAB_USE_UAF;
561 +                       }
562 +                       break;
563 +               }
564 +       }
565 +       up(&cache_chain_sem);
566 +       return count;
567 +}
568 +#endif
569 +
570 Index: linux-2.4.24/mm/vmalloc.c
571 ===================================================================
572 --- linux-2.4.24.orig/mm/vmalloc.c      2004-07-16 09:24:01.000000000 -0400
573 +++ linux-2.4.24/mm/vmalloc.c   2004-07-16 13:55:05.000000000 -0400
574 @@ -53,7 +53,7 @@
575         } while (address < end);
576  }
577  
578 -static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
579 +void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
580  {
581         pmd_t * pmd;
582         unsigned long end;
583 @@ -152,7 +152,7 @@
584         return 0;
585  }
586  
587 -static inline int __vmalloc_area_pages (unsigned long address,
588 +int __vmalloc_area_pages (unsigned long address,
589                                         unsigned long size,
590                                         int gfp_mask,
591                                         pgprot_t prot,
592 Index: linux-2.4.24/mm/page_alloc.c
593 ===================================================================
594 --- linux-2.4.24.orig/mm/page_alloc.c   2004-07-16 09:33:00.000000000 -0400
595 +++ linux-2.4.24/mm/page_alloc.c        2004-07-16 13:55:05.000000000 -0400
596 @@ -91,6 +91,12 @@
597         zone_t *zone;
598  
599         arch_free_page(page, order);
600 +
601 +       for (index = 0; index < (1 << order); index++) {
602 +               BUG_ON(atomic_read(&page[index].count) > 0);
603 +               BUG_ON(PageSlab(page + index));
604 +       }
605 +
606         /*
607          * Yes, think what happens when other parts of the kernel take 
608          * a reference to a page in order to pin it for io. -ben
609 Index: linux-2.4.24/init/main.c
610 ===================================================================
611 --- linux-2.4.24.orig/init/main.c       2004-07-16 09:24:01.000000000 -0400
612 +++ linux-2.4.24/init/main.c    2004-07-16 13:55:05.000000000 -0400
613 @@ -437,6 +437,9 @@
614  #if defined(CONFIG_SYSVIPC)
615         ipc_init();
616  #endif
617 +#ifdef CONFIG_DEBUG_UAF
618 +       uaf_init();
619 +#endif
620         rest_init();
621  }
622  
623 Index: linux-2.4.24/fs/proc/proc_misc.c
624 ===================================================================
625 --- linux-2.4.24.orig/fs/proc/proc_misc.c       2004-07-16 09:23:51.000000000 -0400
626 +++ linux-2.4.24/fs/proc/proc_misc.c    2004-07-16 13:55:05.000000000 -0400
627 @@ -303,6 +303,22 @@
628         release:        seq_release,
629  };
630  
631 +#ifdef CONFIG_DEBUG_UAF
632 +extern struct seq_operations uafinfo_op;
633 +extern ssize_t uafinfo_write(struct file *, const char *, size_t, loff_t *);
634 +static int uafinfo_open(struct inode *inode, struct file *file)
635 +{
636 +       return seq_open(file, &uafinfo_op);
637 +}
638 +static struct file_operations proc_uafinfo_operations = {
639 +       .open           = uafinfo_open,
640 +       .read           = seq_read,
641 +       .write          = uafinfo_write,
642 +       .llseek         = seq_lseek,
643 +       .release        = seq_release,
644 +};
645 +#endif
646 +
647  static int kstat_read_proc(char *page, char **start, off_t off,
648                                  int count, int *eof, void *data)
649  {
650 @@ -640,6 +656,9 @@
651         create_seq_entry("iomem", 0, &proc_iomem_operations);
652         create_seq_entry("partitions", 0, &proc_partitions_operations);
653         create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
654 +#ifdef CONFIG_DEBUG_UAF
655 +       create_seq_entry("uafinfo",S_IWUSR|S_IRUGO,&proc_uafinfo_operations);
656 +#endif
657  #ifdef CONFIG_MODULES
658         create_seq_entry("ksyms", 0, &proc_ksyms_operations);
659  #endif
660 Index: linux-2.4.24/include/linux/slab.h
661 ===================================================================
662 --- linux-2.4.24.orig/include/linux/slab.h      2004-07-16 09:33:00.000000000 -0400
663 +++ linux-2.4.24/include/linux/slab.h   2004-07-17 05:26:51.000000000 -0400
664 @@ -40,6 +40,7 @@
665  #define        SLAB_HWCACHE_ALIGN      0x00002000UL    /* align objs on a h/w cache lines */
666  #define SLAB_CACHE_DMA         0x00004000UL    /* use GFP_DMA memory */
667  #define SLAB_MUST_HWCACHE_ALIGN        0x00008000UL    /* force alignment */
668 +#define SLAB_USE_UAF           0x00040000UL    /* use UAF allocator */
669  
670  /* flags passed to a constructor func */
671  #define        SLAB_CTOR_CONSTRUCTOR   0x001UL         /* if not set, then deconstructor */
672 Index: linux-2.4.24/include/asm-i386/io.h
673 ===================================================================
674 --- linux-2.4.24.orig/include/asm-i386/io.h     2004-07-16 09:23:54.000000000 -0400
675 +++ linux-2.4.24/include/asm-i386/io.h  2004-07-17 05:27:02.000000000 -0400
676 @@ -75,6 +75,16 @@
677   
678  static inline unsigned long virt_to_phys(volatile void * address)
679  {
680 +#ifdef CONFIG_DEBUG_UAF
681 +       unsigned long addr = (unsigned long) address;
682 +       if (vmlist && addr >= VMALLOC_START && addr < VMALLOC_END) {
683 +               struct page *page = vmalloc_to_page((void *) address);
684 +               if (page) {
685 +                       unsigned long offset = addr & ~PAGE_MASK;
686 +                       address = page_address(page) + offset;
687 +               }
688 +       }
689 +#endif
690         return __pa(address);
691  }
692  
693 Index: linux-2.4.24/include/asm-i386/page.h
694 ===================================================================
695 --- linux-2.4.24.orig/include/asm-i386/page.h   2004-07-16 09:33:00.000000000 -0400
696 +++ linux-2.4.24/include/asm-i386/page.h        2004-07-17 05:26:19.000000000 -0400
697 @@ -131,9 +131,60 @@
698  #define VMALLOC_RESERVE                ((unsigned long)__VMALLOC_RESERVE)
699  #define __MAXMEM               (-__PAGE_OFFSET-__VMALLOC_RESERVE)
700  #define MAXMEM                 ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE))
701 +
702 +#ifndef CONFIG_DEBUG_UAF
703  #define __pa(x)                        ((unsigned long)(x)-PAGE_OFFSET)
704  #define __va(x)                        ((void *)((unsigned long)(x)+PAGE_OFFSET))
705  #define virt_to_page(kaddr)    (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
706 +#else
707 +#define __pa(x)                ({                                                      \
708 +                               unsigned long __pn, __fr;                       \
709 +                               __pn = (unsigned long)(x)-PAGE_OFFSET;          \
710 +                               __fr = __pn >> PAGE_SHIFT;                      \
711 +                               if (jiffies > HZ*3 && __fr >= max_mapnr) {      \
712 +                                       printk("invalid arg __pa(0x%x)"         \
713 +                                               " at %s:%d\n", (unsigned) (x),  \
714 +                                               __FILE__, __LINE__);            \
715 +                                       dump_stack();                           \
716 +                               }                                               \
717 +                               __pn;                                           \
718 +                       })
719 +
720 +#define __va(x)                ({                                                      \
721 +                               unsigned long __pn;                             \
722 +                               __pn = (unsigned long) (x) >> PAGE_SHIFT;       \
723 +                               if (jiffies > HZ*3 && __pn >= max_mapnr) {      \
724 +                                       printk("invalid arg __va(0x%x)"         \
725 +                                               " at %s:%d\n", (unsigned) (x),  \
726 +                                               __FILE__, __LINE__);            \
727 +                                       dump_stack();                           \
728 +                               }                                               \
729 +                               ((void *)((unsigned long)(x) + PAGE_OFFSET));   \
730 +                       })
731 +#ifndef PKMAP_BASE
732 +#define PKMAP_BASE (0xfe000000UL)
733 +#endif
734 +#define virt_to_page(ka) ({                                                    \
735 +                               struct page *_p;                                \
736 +                               if ((unsigned)(ka) >= VMALLOC_START &&          \
737 +                                               (unsigned)(ka) < VMALLOC_END) { \
738 +                                       _p = vmalloc_to_page((void *)(ka));     \
739 +                                       if (!_p) {                              \
740 +                                               printk(KERN_ALERT               \
741 +                                                       "wrong address 0x%x, "  \
742 +                                                       "VMALLOC_START 0x%x\n", \
743 +                                                       (unsigned) (ka),        \
744 +                                               (unsigned)VMALLOC_START);       \
745 +                                       _p = mem_map+(__pa(ka) >> PAGE_SHIFT);  \
746 +                                               dump_stack();                   \
747 +                                       }                                       \
748 +                               } else                                          \
749 +                                       _p = mem_map+(__pa(ka) >> PAGE_SHIFT);  \
750 +                               (_p);                                           \
751 +                       })
752 +#endif
753 +
754 +
755  #define VALID_PAGE(page)       ((page - mem_map) < max_mapnr)
756  
757  #define VM_DATA_DEFAULT_FLAGS  (VM_READ | VM_WRITE | VM_EXEC | \
758 Index: linux-2.4.24/arch/i386/config.in
759 ===================================================================
760 --- linux-2.4.24.orig/arch/i386/config.in       2004-07-16 09:33:02.000000000 -0400
761 +++ linux-2.4.24/arch/i386/config.in    2004-07-16 13:55:05.000000000 -0400
762 @@ -509,6 +509,9 @@
763     bool '  Check for stack overflows' CONFIG_DEBUG_STACKOVERFLOW
764     bool '  Debug high memory support' CONFIG_DEBUG_HIGHMEM
765     bool '  Debug memory allocations' CONFIG_DEBUG_SLAB
766 +   if [ "$CONFIG_DEBUG_SLAB" != "n" ]; then
767 +      bool '  Debug memory allocations (use-after-free via vmalloced space)' CONFIG_DEBUG_UAF
768 +   fi
769     bool '  Memory mapped I/O debugging' CONFIG_DEBUG_IOVIRT
770     bool '  Magic SysRq key' CONFIG_MAGIC_SYSRQ
771     bool '  Spinlock debugging' CONFIG_DEBUG_SPINLOCK
772 Index: linux-2.4.24/kernel/ksyms.c
773 ===================================================================
774 --- linux-2.4.24.orig/kernel/ksyms.c    2004-07-16 09:36:49.000000000 -0400
775 +++ linux-2.4.24/kernel/ksyms.c 2004-07-16 13:55:05.000000000 -0400
776 @@ -123,6 +123,8 @@
777  EXPORT_SYMBOL(kfree);
778  EXPORT_SYMBOL(vfree);
779  EXPORT_SYMBOL(__vmalloc);
780 +extern struct vm_struct * vmlist;
781 +EXPORT_SYMBOL(vmlist);
782  EXPORT_SYMBOL(vmap);
783  EXPORT_SYMBOL(vmalloc_to_page);
784  EXPORT_SYMBOL(mem_map);
785
786 %diffstat
787  arch/i386/config.in     |    3 
788  fs/proc/proc_misc.c     |   19 +
789  include/asm-i386/io.h   |   10 
790  include/asm-i386/page.h |   51 ++++
791  include/linux/slab.h    |    1 
792  init/main.c             |    3 
793  kernel/ksyms.c          |    2 
794  mm/page_alloc.c         |    6 
795  mm/slab.c               |  513 +++++++++++++++++++++++++++++++++++++++++++++++-
796  mm/vmalloc.c            |    4 
797  10 files changed, 608 insertions(+), 4 deletions(-)
798