• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef LINUX_KMEMCHECK_H
2 #define LINUX_KMEMCHECK_H
3 
4 #include <linux/mm_types.h>
5 #include <linux/types.h>
6 
7 #ifdef CONFIG_KMEMCHECK
8 extern int kmemcheck_enabled;
9 
10 /* The slab-related functions. */
11 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
12 void kmemcheck_free_shadow(struct page *page, int order);
13 void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
14 			  size_t size);
15 void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
16 
17 void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
18 			       gfp_t gfpflags);
19 
20 void kmemcheck_show_pages(struct page *p, unsigned int n);
21 void kmemcheck_hide_pages(struct page *p, unsigned int n);
22 
23 bool kmemcheck_page_is_tracked(struct page *p);
24 
25 void kmemcheck_mark_unallocated(void *address, unsigned int n);
26 void kmemcheck_mark_uninitialized(void *address, unsigned int n);
27 void kmemcheck_mark_initialized(void *address, unsigned int n);
28 void kmemcheck_mark_freed(void *address, unsigned int n);
29 
30 void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
31 void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
32 void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
33 
34 int kmemcheck_show_addr(unsigned long address);
35 int kmemcheck_hide_addr(unsigned long address);
36 
37 bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
38 
39 /*
40  * Bitfield annotations
41  *
42  * How to use: If you have a struct using bitfields, for example
43  *
44  *     struct a {
45  *             int x:8, y:8;
46  *     };
47  *
48  * then this should be rewritten as
49  *
50  *     struct a {
51  *             kmemcheck_bitfield_begin(flags);
52  *             int x:8, y:8;
53  *             kmemcheck_bitfield_end(flags);
54  *     };
55  *
56  * Now the "flags_begin" and "flags_end" members may be used to refer to the
57  * beginning and end, respectively, of the bitfield (and things like
58  * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
59  * fields should be annotated:
60  *
61  *     struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
62  *     kmemcheck_annotate_bitfield(a, flags);
63  */
64 #define kmemcheck_bitfield_begin(name)	\
65 	int name##_begin[0];
66 
67 #define kmemcheck_bitfield_end(name)	\
68 	int name##_end[0];
69 
70 #define kmemcheck_annotate_bitfield(ptr, name)				\
71 	do {								\
72 		int _n;							\
73 									\
74 		if (!ptr)						\
75 			break;						\
76 									\
77 		_n = (long) &((ptr)->name##_end)			\
78 			- (long) &((ptr)->name##_begin);		\
79 		BUILD_BUG_ON(_n < 0);					\
80 									\
81 		kmemcheck_mark_initialized(&((ptr)->name##_begin), _n);	\
82 	} while (0)
83 
84 #define kmemcheck_annotate_variable(var)				\
85 	do {								\
86 		kmemcheck_mark_initialized(&(var), sizeof(var));	\
87 	} while (0)							\
88 
89 #else
90 #define kmemcheck_enabled 0
91 
92 static inline void
kmemcheck_alloc_shadow(struct page * page,int order,gfp_t flags,int node)93 kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
94 {
95 }
96 
97 static inline void
kmemcheck_free_shadow(struct page * page,int order)98 kmemcheck_free_shadow(struct page *page, int order)
99 {
100 }
101 
102 static inline void
kmemcheck_slab_alloc(struct kmem_cache * s,gfp_t gfpflags,void * object,size_t size)103 kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
104 		     size_t size)
105 {
106 }
107 
kmemcheck_slab_free(struct kmem_cache * s,void * object,size_t size)108 static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
109 				       size_t size)
110 {
111 }
112 
kmemcheck_pagealloc_alloc(struct page * p,unsigned int order,gfp_t gfpflags)113 static inline void kmemcheck_pagealloc_alloc(struct page *p,
114 	unsigned int order, gfp_t gfpflags)
115 {
116 }
117 
kmemcheck_page_is_tracked(struct page * p)118 static inline bool kmemcheck_page_is_tracked(struct page *p)
119 {
120 	return false;
121 }
122 
kmemcheck_mark_unallocated(void * address,unsigned int n)123 static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
124 {
125 }
126 
kmemcheck_mark_uninitialized(void * address,unsigned int n)127 static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
128 {
129 }
130 
kmemcheck_mark_initialized(void * address,unsigned int n)131 static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
132 {
133 }
134 
kmemcheck_mark_freed(void * address,unsigned int n)135 static inline void kmemcheck_mark_freed(void *address, unsigned int n)
136 {
137 }
138 
kmemcheck_mark_unallocated_pages(struct page * p,unsigned int n)139 static inline void kmemcheck_mark_unallocated_pages(struct page *p,
140 						    unsigned int n)
141 {
142 }
143 
kmemcheck_mark_uninitialized_pages(struct page * p,unsigned int n)144 static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
145 						      unsigned int n)
146 {
147 }
148 
kmemcheck_mark_initialized_pages(struct page * p,unsigned int n)149 static inline void kmemcheck_mark_initialized_pages(struct page *p,
150 						    unsigned int n)
151 {
152 }
153 
kmemcheck_is_obj_initialized(unsigned long addr,size_t size)154 static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
155 {
156 	return true;
157 }
158 
159 #define kmemcheck_bitfield_begin(name)
160 #define kmemcheck_bitfield_end(name)
161 #define kmemcheck_annotate_bitfield(ptr, name)	\
162 	do {					\
163 	} while (0)
164 
165 #define kmemcheck_annotate_variable(var)	\
166 	do {					\
167 	} while (0)
168 
169 #endif /* CONFIG_KMEMCHECK */
170 
171 #endif /* LINUX_KMEMCHECK_H */
172