1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic stack depot for storing stack traces.
4 *
5 * Some debugging tools need to save stack traces of certain events which can
6 * be later presented to the user. For example, KASAN needs to safe alloc and
7 * free stacks for each object, but storing two stack traces per object
8 * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
9 * that).
10 *
11 * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
12 * and free stacks repeat a lot, we save about 100x space.
13 * Stacks are never removed from depot, so we store them contiguously one after
14 * another in a contiguous memory allocation.
15 *
16 * Author: Alexander Potapenko <glider@google.com>
17 * Copyright (C) 2016 Google, Inc.
18 *
19 * Based on code by Dmitry Chernenkov.
20 */
21
22 #include <linux/gfp.h>
23 #include <linux/jhash.h>
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/percpu.h>
27 #include <linux/printk.h>
28 #include <linux/slab.h>
29 #include <linux/stacktrace.h>
30 #include <linux/stackdepot.h>
31 #include <linux/string.h>
32 #include <linux/types.h>
33 #include <linux/memblock.h>
34
35 #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
36
37 #define STACK_ALLOC_NULL_PROTECTION_BITS 1
38 #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
39 #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
40 #define STACK_ALLOC_ALIGN 4
41 #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
42 STACK_ALLOC_ALIGN)
43 #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
44 STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
45 #define STACK_ALLOC_SLABS_CAP 8192
46 #define STACK_ALLOC_MAX_SLABS \
47 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
48 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
49
50 /* The compact structure to store the reference to stacks. */
51 union handle_parts {
52 depot_stack_handle_t handle;
53 struct {
54 u32 slabindex : STACK_ALLOC_INDEX_BITS;
55 u32 offset : STACK_ALLOC_OFFSET_BITS;
56 u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
57 };
58 };
59
60 struct stack_record {
61 struct stack_record *next; /* Link in the hashtable */
62 u32 hash; /* Hash in the hastable */
63 u32 size; /* Number of frames in the stack */
64 union handle_parts handle;
65 unsigned long entries[]; /* Variable-sized array of entries. */
66 };
67
68 static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
69
70 static int depot_index;
71 static int next_slab_inited;
72 static size_t depot_offset;
73 static DEFINE_RAW_SPINLOCK(depot_lock);
74
init_stack_slab(void ** prealloc)75 static bool init_stack_slab(void **prealloc)
76 {
77 if (!*prealloc)
78 return false;
79 /*
80 * This smp_load_acquire() pairs with smp_store_release() to
81 * |next_slab_inited| below and in depot_alloc_stack().
82 */
83 if (smp_load_acquire(&next_slab_inited))
84 return true;
85 if (stack_slabs[depot_index] == NULL) {
86 stack_slabs[depot_index] = *prealloc;
87 *prealloc = NULL;
88 } else {
89 /* If this is the last depot slab, do not touch the next one. */
90 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
91 stack_slabs[depot_index + 1] = *prealloc;
92 *prealloc = NULL;
93 }
94 /*
95 * This smp_store_release pairs with smp_load_acquire() from
96 * |next_slab_inited| above and in stack_depot_save().
97 */
98 smp_store_release(&next_slab_inited, 1);
99 }
100 return true;
101 }
102
103 /* Allocation of a new stack in raw storage */
104 static struct stack_record *
depot_alloc_stack(unsigned long * entries,int size,u32 hash,void ** prealloc)105 depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
106 {
107 struct stack_record *stack;
108 size_t required_size = struct_size(stack, entries, size);
109
110 required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
111
112 if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
113 if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
114 WARN_ONCE(1, "Stack depot reached limit capacity");
115 return NULL;
116 }
117 depot_index++;
118 depot_offset = 0;
119 /*
120 * smp_store_release() here pairs with smp_load_acquire() from
121 * |next_slab_inited| in stack_depot_save() and
122 * init_stack_slab().
123 */
124 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
125 smp_store_release(&next_slab_inited, 0);
126 }
127 init_stack_slab(prealloc);
128 if (stack_slabs[depot_index] == NULL)
129 return NULL;
130
131 stack = stack_slabs[depot_index] + depot_offset;
132
133 stack->hash = hash;
134 stack->size = size;
135 stack->handle.slabindex = depot_index;
136 stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
137 stack->handle.valid = 1;
138 memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
139 depot_offset += required_size;
140
141 return stack;
142 }
143
144 #define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER)
145 #define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
146 #define STACK_HASH_SEED 0x9747b28c
147
148 static bool stack_depot_disable;
149 static struct stack_record **stack_table;
150
is_stack_depot_disabled(char * str)151 static int __init is_stack_depot_disabled(char *str)
152 {
153 int ret;
154
155 ret = kstrtobool(str, &stack_depot_disable);
156 if (!ret && stack_depot_disable) {
157 pr_info("Stack Depot is disabled\n");
158 stack_table = NULL;
159 }
160 return 0;
161 }
162 early_param("stack_depot_disable", is_stack_depot_disabled);
163
stack_depot_init(void)164 int __init stack_depot_init(void)
165 {
166 if (!stack_depot_disable) {
167 size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
168 int i;
169
170 stack_table = memblock_alloc(size, size);
171 for (i = 0; i < STACK_HASH_SIZE; i++)
172 stack_table[i] = NULL;
173 }
174 return 0;
175 }
176
177 /* Calculate hash for a stack */
hash_stack(unsigned long * entries,unsigned int size)178 static inline u32 hash_stack(unsigned long *entries, unsigned int size)
179 {
180 return jhash2((u32 *)entries,
181 array_size(size, sizeof(*entries)) / sizeof(u32),
182 STACK_HASH_SEED);
183 }
184
185 /* Use our own, non-instrumented version of memcmp().
186 *
187 * We actually don't care about the order, just the equality.
188 */
189 static inline
stackdepot_memcmp(const unsigned long * u1,const unsigned long * u2,unsigned int n)190 int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
191 unsigned int n)
192 {
193 for ( ; n-- ; u1++, u2++) {
194 if (*u1 != *u2)
195 return 1;
196 }
197 return 0;
198 }
199
200 /* Find a stack that is equal to the one stored in entries in the hash */
find_stack(struct stack_record * bucket,unsigned long * entries,int size,u32 hash)201 static inline struct stack_record *find_stack(struct stack_record *bucket,
202 unsigned long *entries, int size,
203 u32 hash)
204 {
205 struct stack_record *found;
206
207 for (found = bucket; found; found = found->next) {
208 if (found->hash == hash &&
209 found->size == size &&
210 !stackdepot_memcmp(entries, found->entries, size))
211 return found;
212 }
213 return NULL;
214 }
215
216 /**
217 * stack_depot_print - print stack entries from a depot
218 *
219 * @stack: Stack depot handle which was returned from
220 * stack_depot_save().
221 *
222 */
stack_depot_print(depot_stack_handle_t stack)223 void stack_depot_print(depot_stack_handle_t stack)
224 {
225 unsigned long *entries;
226 unsigned int nr_entries;
227
228 nr_entries = stack_depot_fetch(stack, &entries);
229 if (nr_entries > 0)
230 stack_trace_print(entries, nr_entries, 0);
231 }
232 EXPORT_SYMBOL_GPL(stack_depot_print);
233
234 /**
235 * stack_depot_fetch - Fetch stack entries from a depot
236 *
237 * @handle: Stack depot handle which was returned from
238 * stack_depot_save().
239 * @entries: Pointer to store the entries address
240 *
241 * Return: The number of trace entries for this depot.
242 */
stack_depot_fetch(depot_stack_handle_t handle,unsigned long ** entries)243 unsigned int stack_depot_fetch(depot_stack_handle_t handle,
244 unsigned long **entries)
245 {
246 union handle_parts parts = { .handle = handle };
247 void *slab;
248 size_t offset = parts.offset << STACK_ALLOC_ALIGN;
249 struct stack_record *stack;
250
251 *entries = NULL;
252 if (!handle)
253 return 0;
254
255 if (parts.slabindex > depot_index) {
256 WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
257 parts.slabindex, depot_index, handle);
258 return 0;
259 }
260 slab = stack_slabs[parts.slabindex];
261 if (!slab)
262 return 0;
263 stack = slab + offset;
264
265 *entries = stack->entries;
266 return stack->size;
267 }
268 EXPORT_SYMBOL_GPL(stack_depot_fetch);
269
270 /**
271 * __stack_depot_save - Save a stack trace from an array
272 *
273 * @entries: Pointer to storage array
274 * @nr_entries: Size of the storage array
275 * @alloc_flags: Allocation gfp flags
276 * @can_alloc: Allocate stack slabs (increased chance of failure if false)
277 *
278 * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
279 * %true, is allowed to replenish the stack slab pool in case no space is left
280 * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
281 * any allocations and will fail if no space is left to store the stack trace.
282 *
283 * If the stack trace in @entries is from an interrupt, only the portion up to
284 * interrupt entry is saved.
285 *
286 * Context: Any context, but setting @can_alloc to %false is required if
287 * alloc_pages() cannot be used from the current context. Currently
288 * this is the case from contexts where neither %GFP_ATOMIC nor
289 * %GFP_NOWAIT can be used (NMI, raw_spin_lock).
290 *
291 * Return: The handle of the stack struct stored in depot, 0 on failure.
292 */
__stack_depot_save(unsigned long * entries,unsigned int nr_entries,gfp_t alloc_flags,bool can_alloc)293 depot_stack_handle_t __stack_depot_save(unsigned long *entries,
294 unsigned int nr_entries,
295 gfp_t alloc_flags, bool can_alloc)
296 {
297 struct stack_record *found = NULL, **bucket;
298 depot_stack_handle_t retval = 0;
299 struct page *page = NULL;
300 void *prealloc = NULL;
301 unsigned long flags;
302 u32 hash;
303
304 /*
305 * If this stack trace is from an interrupt, including anything before
306 * interrupt entry usually leads to unbounded stackdepot growth.
307 *
308 * Because use of filter_irq_stacks() is a requirement to ensure
309 * stackdepot can efficiently deduplicate interrupt stacks, always
310 * filter_irq_stacks() to simplify all callers' use of stackdepot.
311 */
312 nr_entries = filter_irq_stacks(entries, nr_entries);
313
314 if (unlikely(nr_entries == 0) || stack_depot_disable)
315 goto fast_exit;
316
317 hash = hash_stack(entries, nr_entries);
318 bucket = &stack_table[hash & STACK_HASH_MASK];
319
320 /*
321 * Fast path: look the stack trace up without locking.
322 * The smp_load_acquire() here pairs with smp_store_release() to
323 * |bucket| below.
324 */
325 found = find_stack(smp_load_acquire(bucket), entries,
326 nr_entries, hash);
327 if (found)
328 goto exit;
329
330 /*
331 * Check if the current or the next stack slab need to be initialized.
332 * If so, allocate the memory - we won't be able to do that under the
333 * lock.
334 *
335 * The smp_load_acquire() here pairs with smp_store_release() to
336 * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
337 */
338 if (unlikely(can_alloc && !smp_load_acquire(&next_slab_inited))) {
339 /*
340 * Zero out zone modifiers, as we don't have specific zone
341 * requirements. Keep the flags related to allocation in atomic
342 * contexts and I/O.
343 */
344 alloc_flags &= ~GFP_ZONEMASK;
345 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
346 alloc_flags |= __GFP_NOWARN;
347 page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
348 if (page)
349 prealloc = page_address(page);
350 }
351
352 raw_spin_lock_irqsave(&depot_lock, flags);
353
354 found = find_stack(*bucket, entries, nr_entries, hash);
355 if (!found) {
356 struct stack_record *new = depot_alloc_stack(entries, nr_entries, hash, &prealloc);
357
358 if (new) {
359 new->next = *bucket;
360 /*
361 * This smp_store_release() pairs with
362 * smp_load_acquire() from |bucket| above.
363 */
364 smp_store_release(bucket, new);
365 found = new;
366 }
367 } else if (prealloc) {
368 /*
369 * We didn't need to store this stack trace, but let's keep
370 * the preallocated memory for the future.
371 */
372 WARN_ON(!init_stack_slab(&prealloc));
373 }
374
375 raw_spin_unlock_irqrestore(&depot_lock, flags);
376 exit:
377 if (prealloc) {
378 /* Nobody used this memory, ok to free it. */
379 free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
380 }
381 if (found)
382 retval = found->handle.handle;
383 fast_exit:
384 return retval;
385 }
386 EXPORT_SYMBOL_GPL(__stack_depot_save);
387
388 /**
389 * stack_depot_save - Save a stack trace from an array
390 *
391 * @entries: Pointer to storage array
392 * @nr_entries: Size of the storage array
393 * @alloc_flags: Allocation gfp flags
394 *
395 * Context: Contexts where allocations via alloc_pages() are allowed.
396 * See __stack_depot_save() for more details.
397 *
398 * Return: The handle of the stack struct stored in depot, 0 on failure.
399 */
stack_depot_save(unsigned long * entries,unsigned int nr_entries,gfp_t alloc_flags)400 depot_stack_handle_t stack_depot_save(unsigned long *entries,
401 unsigned int nr_entries,
402 gfp_t alloc_flags)
403 {
404 return __stack_depot_save(entries, nr_entries, alloc_flags, true);
405 }
406 EXPORT_SYMBOL_GPL(stack_depot_save);
407