1 arch/ia64/mm/init.c | 6 +++++
2 include/linux/slab.h | 1
4 mm/slab.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++
5 4 files changed, 61 insertions(+)
7 --- linux-2.4.19-hp2_pnnl2/arch/ia64/mm/init.c~kmem_cache_validate_hp Sun Jan 19 18:59:23 2003
8 +++ linux-2.4.19-hp2_pnnl2-root/arch/ia64/mm/init.c Sun Jan 19 18:59:24 2003
9 @@ -44,6 +44,12 @@ unsigned long vmalloc_end = VMALLOC_END_
10 static struct page *vmem_map;
11 static unsigned long num_dma_physpages;
13 +struct page *check_get_page(unsigned long kaddr)
15 +#warning FIXME: Lustre team, is this solid?
16 + return virt_to_page(kaddr);
20 do_check_pgt_cache (int low, int high)
22 --- linux-2.4.19-hp2_pnnl2/include/linux/slab.h~kmem_cache_validate_hp Sun Jan 19 18:59:23 2003
23 +++ linux-2.4.19-hp2_pnnl2-root/include/linux/slab.h Sun Jan 19 19:01:07 2003
24 @@ -56,6 +56,7 @@ extern kmem_cache_t *kmem_cache_create(c
25 extern int kmem_cache_destroy(kmem_cache_t *);
26 extern int kmem_cache_shrink(kmem_cache_t *);
27 extern void *kmem_cache_alloc(kmem_cache_t *, int);
28 +extern int kmem_cache_validate(kmem_cache_t *cachep, void *objp);
29 extern void kmem_cache_free(kmem_cache_t *, void *);
30 extern unsigned int kmem_cache_size(kmem_cache_t *);
32 --- linux-2.4.19-hp2_pnnl2/kernel/ksyms.c~kmem_cache_validate_hp Sun Jan 19 18:59:23 2003
33 +++ linux-2.4.19-hp2_pnnl2-root/kernel/ksyms.c Sun Jan 19 19:00:32 2003
34 @@ -118,6 +118,7 @@ EXPORT_SYMBOL(kmem_find_general_cachep);
35 EXPORT_SYMBOL(kmem_cache_create);
36 EXPORT_SYMBOL(kmem_cache_destroy);
37 EXPORT_SYMBOL(kmem_cache_shrink);
38 +EXPORT_SYMBOL(kmem_cache_validate);
39 EXPORT_SYMBOL(kmem_cache_alloc);
40 EXPORT_SYMBOL(kmem_cache_free);
41 EXPORT_SYMBOL(kmem_cache_size);
42 --- linux-2.4.19-hp2_pnnl2/mm/slab.c~kmem_cache_validate_hp Sun Jan 19 18:59:23 2003
43 +++ linux-2.4.19-hp2_pnnl2-root/mm/slab.c Sun Jan 19 18:59:24 2003
44 @@ -1207,6 +1207,59 @@ failed:
45 * Called with the cache-lock held.
48 +extern struct page *check_get_page(unsigned long kaddr);
49 +struct page *page_mem_map(struct page *page);
50 +static int kmem_check_cache_obj (kmem_cache_t * cachep,
51 + slab_t *slabp, void * objp)
57 + if (cachep->flags & SLAB_RED_ZONE) {
58 + objp -= BYTES_PER_WORD;
59 + if ( *(unsigned long *)objp != RED_MAGIC2)
60 + /* Either write before start, or a double free. */
62 + if (*(unsigned long *)(objp+cachep->objsize -
63 + BYTES_PER_WORD) != RED_MAGIC2)
64 + /* Either write past end, or a double free. */
69 + objnr = (objp-slabp->s_mem)/cachep->objsize;
70 + if (objnr >= cachep->num)
72 + if (objp != slabp->s_mem + objnr*cachep->objsize)
75 + /* Check slab's freelist to see if this obj is there. */
76 + for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
84 +int kmem_cache_validate(kmem_cache_t *cachep, void *objp)
86 + struct page *page = check_get_page((unsigned long)objp);
88 + if (!VALID_PAGE(page))
91 + if (!PageSlab(page))
94 + /* XXX check for freed slab objects ? */
95 + if (!kmem_check_cache_obj(cachep, GET_PAGE_SLAB(page), objp))
98 + return (cachep == GET_PAGE_CACHE(page));
102 static int kmem_extra_free_checks (kmem_cache_t * cachep,
103 slab_t *slabp, void * objp)