Skip to content

Commit b1eeab6

Browse files
committed
kmemcheck: add hooks for the page allocator
This adds support for tracking the initializedness of memory that was allocated with the page allocator. Highmem requests are not tracked. Cc: Dave Hansen <dave@linux.vnet.ibm.com> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> [build fix for !CONFIG_KMEMCHECK] Signed-off-by: Ingo Molnar <mingo@elte.hu> [rebased for mainline inclusion] Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
1 parent 9b5cab3 commit b1eeab6

File tree

8 files changed

+122
-31
lines changed

8 files changed

+122
-31
lines changed

arch/x86/include/asm/thread_info.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -154,9 +154,9 @@ struct thread_info {
154154

155155
/* thread information allocation */
156156
#ifdef CONFIG_DEBUG_STACK_USAGE
157-
#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
157+
#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
158158
#else
159-
#define THREAD_FLAGS GFP_KERNEL
159+
#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK)
160160
#endif
161161

162162
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR

arch/x86/mm/kmemcheck/shadow.c

+8
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,14 @@ void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n)
116116
kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE);
117117
}
118118

119+
void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n)
120+
{
121+
unsigned int i;
122+
123+
for (i = 0; i < n; ++i)
124+
kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE);
125+
}
126+
119127
enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size)
120128
{
121129
uint8_t *x;

include/linux/gfp.h

+5
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,12 @@ struct vm_area_struct;
5151
#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
5252
#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
5353
#define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */
54+
55+
#ifdef CONFIG_KMEMCHECK
5456
#define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */
57+
#else
58+
#define __GFP_NOTRACK ((__force gfp_t)0)
59+
#endif
5560

5661
/*
5762
* This may seem redundant, but it's a way of annotating false positives vs.

include/linux/kmemcheck.h

+29-6
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,15 @@
88
extern int kmemcheck_enabled;
99

1010
/* The slab-related functions. */
11-
void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
12-
struct page *page, int order);
13-
void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order);
11+
void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
12+
void kmemcheck_free_shadow(struct page *page, int order);
1413
void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
1514
size_t size);
1615
void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
1716

17+
void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
18+
gfp_t gfpflags);
19+
1820
void kmemcheck_show_pages(struct page *p, unsigned int n);
1921
void kmemcheck_hide_pages(struct page *p, unsigned int n);
2022

@@ -27,20 +29,20 @@ void kmemcheck_mark_freed(void *address, unsigned int n);
2729

2830
void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
2931
void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
32+
void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
3033

3134
int kmemcheck_show_addr(unsigned long address);
3235
int kmemcheck_hide_addr(unsigned long address);
3336
#else
3437
#define kmemcheck_enabled 0
3538

3639
static inline void
37-
kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
38-
struct page *page, int order)
40+
kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
3941
{
4042
}
4143

4244
static inline void
43-
kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order)
45+
kmemcheck_free_shadow(struct page *page, int order)
4446
{
4547
}
4648

@@ -55,6 +57,11 @@ static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
5557
{
5658
}
5759

60+
static inline void kmemcheck_pagealloc_alloc(struct page *p,
61+
unsigned int order, gfp_t gfpflags)
62+
{
63+
}
64+
5865
static inline bool kmemcheck_page_is_tracked(struct page *p)
5966
{
6067
return false;
@@ -75,6 +82,22 @@ static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
7582
static inline void kmemcheck_mark_freed(void *address, unsigned int n)
7683
{
7784
}
85+
86+
static inline void kmemcheck_mark_unallocated_pages(struct page *p,
87+
unsigned int n)
88+
{
89+
}
90+
91+
static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
92+
unsigned int n)
93+
{
94+
}
95+
96+
static inline void kmemcheck_mark_initialized_pages(struct page *p,
97+
unsigned int n)
98+
{
99+
}
100+
78101
#endif /* CONFIG_KMEMCHECK */
79102

80103
#endif /* LINUX_KMEMCHECK_H */

mm/kmemcheck.c

+32-13
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
1+
#include <linux/gfp.h>
12
#include <linux/mm_types.h>
23
#include <linux/mm.h>
34
#include <linux/slab.h>
45
#include <linux/kmemcheck.h>
56

6-
void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
7-
struct page *page, int order)
7+
void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
88
{
99
struct page *shadow;
1010
int pages;
@@ -16,7 +16,7 @@ void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
1616
* With kmemcheck enabled, we need to allocate a memory area for the
1717
* shadow bits as well.
1818
*/
19-
shadow = alloc_pages_node(node, flags, order);
19+
shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
2020
if (!shadow) {
2121
if (printk_ratelimit())
2222
printk(KERN_ERR "kmemcheck: failed to allocate "
@@ -33,23 +33,17 @@ void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node,
3333
* the memory accesses.
3434
*/
3535
kmemcheck_hide_pages(page, pages);
36-
37-
/*
38-
* Objects from caches that have a constructor don't get
39-
* cleared when they're allocated, so we need to do it here.
40-
*/
41-
if (s->ctor)
42-
kmemcheck_mark_uninitialized_pages(page, pages);
43-
else
44-
kmemcheck_mark_unallocated_pages(page, pages);
4536
}
4637

47-
void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order)
38+
void kmemcheck_free_shadow(struct page *page, int order)
4839
{
4940
struct page *shadow;
5041
int pages;
5142
int i;
5243

44+
if (!kmemcheck_page_is_tracked(page))
45+
return;
46+
5347
pages = 1 << order;
5448

5549
kmemcheck_show_pages(page, pages);
@@ -101,3 +95,28 @@ void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
10195
if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
10296
kmemcheck_mark_freed(object, size);
10397
}
98+
99+
void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
100+
gfp_t gfpflags)
101+
{
102+
int pages;
103+
104+
if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
105+
return;
106+
107+
pages = 1 << order;
108+
109+
/*
110+
* NOTE: We choose to track GFP_ZERO pages too; in fact, they
111+
* can become uninitialized by copying uninitialized memory
112+
* into them.
113+
*/
114+
115+
/* XXX: Can use zone->node for node? */
116+
kmemcheck_alloc_shadow(page, order, gfpflags, -1);
117+
118+
if (gfpflags & __GFP_ZERO)
119+
kmemcheck_mark_initialized_pages(page, pages);
120+
else
121+
kmemcheck_mark_uninitialized_pages(page, pages);
122+
}

mm/page_alloc.c

+18
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#include <linux/bootmem.h>
2424
#include <linux/compiler.h>
2525
#include <linux/kernel.h>
26+
#include <linux/kmemcheck.h>
2627
#include <linux/module.h>
2728
#include <linux/suspend.h>
2829
#include <linux/pagevec.h>
@@ -546,6 +547,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
546547
int i;
547548
int bad = 0;
548549

550+
kmemcheck_free_shadow(page, order);
551+
549552
for (i = 0 ; i < (1 << order) ; ++i)
550553
bad += free_pages_check(page + i);
551554
if (bad)
@@ -994,6 +997,8 @@ static void free_hot_cold_page(struct page *page, int cold)
994997
struct per_cpu_pages *pcp;
995998
unsigned long flags;
996999

1000+
kmemcheck_free_shadow(page, 0);
1001+
9971002
if (PageAnon(page))
9981003
page->mapping = NULL;
9991004
if (free_pages_check(page))
@@ -1047,6 +1052,16 @@ void split_page(struct page *page, unsigned int order)
10471052

10481053
VM_BUG_ON(PageCompound(page));
10491054
VM_BUG_ON(!page_count(page));
1055+
1056+
#ifdef CONFIG_KMEMCHECK
1057+
/*
1058+
* Split shadow pages too, because free(page[0]) would
1059+
* otherwise free the whole shadow.
1060+
*/
1061+
if (kmemcheck_page_is_tracked(page))
1062+
split_page(virt_to_page(page[0].shadow), order);
1063+
#endif
1064+
10501065
for (i = 1; i < (1 << order); i++)
10511066
set_page_refcounted(page + i);
10521067
}
@@ -1667,7 +1682,10 @@ __alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
16671682
dump_stack();
16681683
show_mem();
16691684
}
1685+
return page;
16701686
got_pg:
1687+
if (kmemcheck_enabled)
1688+
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
16711689
return page;
16721690
}
16731691
EXPORT_SYMBOL(__alloc_pages_internal);

mm/slab.c

+10-5
Original file line numberDiff line numberDiff line change
@@ -1612,7 +1612,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
16121612
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
16131613
flags |= __GFP_RECLAIMABLE;
16141614

1615-
page = alloc_pages_node(nodeid, flags, cachep->gfporder);
1615+
page = alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
16161616
if (!page)
16171617
return NULL;
16181618

@@ -1626,8 +1626,14 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
16261626
for (i = 0; i < nr_pages; i++)
16271627
__SetPageSlab(page + i);
16281628

1629-
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK))
1630-
kmemcheck_alloc_shadow(cachep, flags, nodeid, page, cachep->gfporder);
1629+
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1630+
kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1631+
1632+
if (cachep->ctor)
1633+
kmemcheck_mark_uninitialized_pages(page, nr_pages);
1634+
else
1635+
kmemcheck_mark_unallocated_pages(page, nr_pages);
1636+
}
16311637

16321638
return page_address(page);
16331639
}
@@ -1641,8 +1647,7 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
16411647
struct page *page = virt_to_page(addr);
16421648
const unsigned long nr_freed = i;
16431649

1644-
if (kmemcheck_page_is_tracked(page))
1645-
kmemcheck_free_shadow(cachep, page, cachep->gfporder);
1650+
kmemcheck_free_shadow(page, cachep->gfporder);
16461651

16471652
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
16481653
sub_zone_page_state(page_zone(page),

mm/slub.c

+18-5
Original file line numberDiff line numberDiff line change
@@ -1066,6 +1066,8 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,
10661066
{
10671067
int order = oo_order(oo);
10681068

1069+
flags |= __GFP_NOTRACK;
1070+
10691071
if (node == -1)
10701072
return alloc_pages(flags, order);
10711073
else
@@ -1097,7 +1099,18 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
10971099
if (kmemcheck_enabled
10981100
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS)))
10991101
{
1100-
kmemcheck_alloc_shadow(s, flags, node, page, compound_order(page));
1102+
int pages = 1 << oo_order(oo);
1103+
1104+
kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
1105+
1106+
/*
1107+
* Objects from caches that have a constructor don't get
1108+
* cleared when they're allocated, so we need to do it here.
1109+
*/
1110+
if (s->ctor)
1111+
kmemcheck_mark_uninitialized_pages(page, pages);
1112+
else
1113+
kmemcheck_mark_unallocated_pages(page, pages);
11011114
}
11021115

11031116
page->objects = oo_objects(oo);
@@ -1173,8 +1186,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
11731186
__ClearPageSlubDebug(page);
11741187
}
11751188

1176-
if (kmemcheck_page_is_tracked(page))
1177-
kmemcheck_free_shadow(s, page, compound_order(page));
1189+
kmemcheck_free_shadow(page, compound_order(page));
11781190

11791191
mod_zone_page_state(page_zone(page),
11801192
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
@@ -2734,9 +2746,10 @@ EXPORT_SYMBOL(__kmalloc);
27342746

27352747
static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
27362748
{
2737-
struct page *page = alloc_pages_node(node, flags | __GFP_COMP,
2738-
get_order(size));
2749+
struct page *page;
27392750

2751+
flags |= __GFP_COMP | __GFP_NOTRACK;
2752+
page = alloc_pages_node(node, flags, get_order(size));
27402753
if (page)
27412754
return page_address(page);
27422755
else

0 commit comments

Comments
 (0)