1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic stack depot for storing stack traces.
4 *
5 * Some debugging tools need to save stack traces of certain events which can
6 * be later presented to the user. For example, KASAN needs to safe alloc and
7 * free stacks for each object, but storing two stack traces per object
8 * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
9 * that).
10 *
11 * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
12 * and free stacks repeat a lot, we save about 100x space.
13 * Stacks are never removed from depot, so we store them contiguously one after
14 * another in a contiguos memory allocation.
15 *
16 * Author: Alexander Potapenko <glider@google.com>
17 * Copyright (C) 2016 Google, Inc.
18 *
19 * Based on code by Dmitry Chernenkov.
20 */
21
22 #include <linux/gfp.h>
23 #include <linux/interrupt.h>
24 #include <linux/jhash.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/percpu.h>
28 #include <linux/printk.h>
29 #include <linux/slab.h>
30 #include <linux/stacktrace.h>
31 #include <linux/stackdepot.h>
32 #include <linux/string.h>
33 #include <linux/types.h>
34 #include <linux/memblock.h>
35
36 #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
37
38 #define STACK_ALLOC_NULL_PROTECTION_BITS 1
39 #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
40 #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
41 #define STACK_ALLOC_ALIGN 4
42 #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
43 STACK_ALLOC_ALIGN)
44 #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
45 STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
46 #define STACK_ALLOC_SLABS_CAP 8192
47 #define STACK_ALLOC_MAX_SLABS \
48 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
49 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
50
51 /* The compact structure to store the reference to stacks. */
52 union handle_parts {
53 depot_stack_handle_t handle;
54 struct {
55 u32 slabindex : STACK_ALLOC_INDEX_BITS;
56 u32 offset : STACK_ALLOC_OFFSET_BITS;
57 u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
58 };
59 };
60
61 struct stack_record {
62 struct stack_record *next; /* Link in the hashtable */
63 u32 hash; /* Hash in the hastable */
64 u32 size; /* Number of frames in the stack */
65 union handle_parts handle;
66 unsigned long entries[]; /* Variable-sized array of entries. */
67 };
68
69 static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
70
71 static int depot_index;
72 static int next_slab_inited;
73 static size_t depot_offset;
74 static DEFINE_RAW_SPINLOCK(depot_lock);
75
init_stack_slab(void ** prealloc)76 static bool init_stack_slab(void **prealloc)
77 {
78 if (!*prealloc)
79 return false;
80 /*
81 * This smp_load_acquire() pairs with smp_store_release() to
82 * |next_slab_inited| below and in depot_alloc_stack().
83 */
84 if (smp_load_acquire(&next_slab_inited))
85 return true;
86 if (stack_slabs[depot_index] == NULL) {
87 stack_slabs[depot_index] = *prealloc;
88 *prealloc = NULL;
89 } else {
90 /* If this is the last depot slab, do not touch the next one. */
91 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
92 stack_slabs[depot_index + 1] = *prealloc;
93 *prealloc = NULL;
94 }
95 /*
96 * This smp_store_release pairs with smp_load_acquire() from
97 * |next_slab_inited| above and in stack_depot_save().
98 */
99 smp_store_release(&next_slab_inited, 1);
100 }
101 return true;
102 }
103
104 /* Allocation of a new stack in raw storage */
105 static struct stack_record *
depot_alloc_stack(unsigned long * entries,int size,u32 hash,void ** prealloc)106 depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
107 {
108 struct stack_record *stack;
109 size_t required_size = struct_size(stack, entries, size);
110
111 required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
112
113 if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
114 if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
115 WARN_ONCE(1, "Stack depot reached limit capacity");
116 return NULL;
117 }
118 depot_index++;
119 depot_offset = 0;
120 /*
121 * smp_store_release() here pairs with smp_load_acquire() from
122 * |next_slab_inited| in stack_depot_save() and
123 * init_stack_slab().
124 */
125 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
126 smp_store_release(&next_slab_inited, 0);
127 }
128 init_stack_slab(prealloc);
129 if (stack_slabs[depot_index] == NULL)
130 return NULL;
131
132 stack = stack_slabs[depot_index] + depot_offset;
133
134 stack->hash = hash;
135 stack->size = size;
136 stack->handle.slabindex = depot_index;
137 stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
138 stack->handle.valid = 1;
139 memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
140 depot_offset += required_size;
141
142 return stack;
143 }
144
145 #define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER)
146 #define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
147 #define STACK_HASH_SEED 0x9747b28c
148
149 static bool stack_depot_disable;
150 static struct stack_record **stack_table;
151
is_stack_depot_disabled(char * str)152 static int __init is_stack_depot_disabled(char *str)
153 {
154 int ret;
155
156 ret = kstrtobool(str, &stack_depot_disable);
157 if (!ret && stack_depot_disable) {
158 pr_info("Stack Depot is disabled\n");
159 stack_table = NULL;
160 }
161 return 0;
162 }
163 early_param("stack_depot_disable", is_stack_depot_disabled);
164
stack_depot_init(void)165 int __init stack_depot_init(void)
166 {
167 if (!stack_depot_disable) {
168 size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
169 int i;
170
171 stack_table = memblock_alloc(size, size);
172 for (i = 0; i < STACK_HASH_SIZE; i++)
173 stack_table[i] = NULL;
174 }
175 return 0;
176 }
177
178 /* Calculate hash for a stack */
hash_stack(unsigned long * entries,unsigned int size)179 static inline u32 hash_stack(unsigned long *entries, unsigned int size)
180 {
181 return jhash2((u32 *)entries,
182 array_size(size, sizeof(*entries)) / sizeof(u32),
183 STACK_HASH_SEED);
184 }
185
186 /* Use our own, non-instrumented version of memcmp().
187 *
188 * We actually don't care about the order, just the equality.
189 */
190 static inline
stackdepot_memcmp(const unsigned long * u1,const unsigned long * u2,unsigned int n)191 int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
192 unsigned int n)
193 {
194 for ( ; n-- ; u1++, u2++) {
195 if (*u1 != *u2)
196 return 1;
197 }
198 return 0;
199 }
200
201 /* Find a stack that is equal to the one stored in entries in the hash */
find_stack(struct stack_record * bucket,unsigned long * entries,int size,u32 hash)202 static inline struct stack_record *find_stack(struct stack_record *bucket,
203 unsigned long *entries, int size,
204 u32 hash)
205 {
206 struct stack_record *found;
207
208 for (found = bucket; found; found = found->next) {
209 if (found->hash == hash &&
210 found->size == size &&
211 !stackdepot_memcmp(entries, found->entries, size))
212 return found;
213 }
214 return NULL;
215 }
216
217 /**
218 * stack_depot_fetch - Fetch stack entries from a depot
219 *
220 * @handle: Stack depot handle which was returned from
221 * stack_depot_save().
222 * @entries: Pointer to store the entries address
223 *
224 * Return: The number of trace entries for this depot.
225 */
stack_depot_fetch(depot_stack_handle_t handle,unsigned long ** entries)226 unsigned int stack_depot_fetch(depot_stack_handle_t handle,
227 unsigned long **entries)
228 {
229 union handle_parts parts = { .handle = handle };
230 void *slab;
231 size_t offset = parts.offset << STACK_ALLOC_ALIGN;
232 struct stack_record *stack;
233
234 *entries = NULL;
235 if (parts.slabindex > depot_index) {
236 WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
237 parts.slabindex, depot_index, handle);
238 return 0;
239 }
240 slab = stack_slabs[parts.slabindex];
241 if (!slab)
242 return 0;
243 stack = slab + offset;
244
245 *entries = stack->entries;
246 return stack->size;
247 }
248 EXPORT_SYMBOL_GPL(stack_depot_fetch);
249
250 /**
251 * __stack_depot_save - Save a stack trace from an array
252 *
253 * @entries: Pointer to storage array
254 * @nr_entries: Size of the storage array
255 * @alloc_flags: Allocation gfp flags
256 * @can_alloc: Allocate stack slabs (increased chance of failure if false)
257 *
258 * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
259 * %true, is allowed to replenish the stack slab pool in case no space is left
260 * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
261 * any allocations and will fail if no space is left to store the stack trace.
262 *
263 * If the stack trace in @entries is from an interrupt, only the portion up to
264 * interrupt entry is saved.
265 *
266 * Context: Any context, but setting @can_alloc to %false is required if
267 * alloc_pages() cannot be used from the current context. Currently
268 * this is the case from contexts where neither %GFP_ATOMIC nor
269 * %GFP_NOWAIT can be used (NMI, raw_spin_lock).
270 *
271 * Return: The handle of the stack struct stored in depot, 0 on failure.
272 */
__stack_depot_save(unsigned long * entries,unsigned int nr_entries,gfp_t alloc_flags,bool can_alloc)273 depot_stack_handle_t __stack_depot_save(unsigned long *entries,
274 unsigned int nr_entries,
275 gfp_t alloc_flags, bool can_alloc)
276 {
277 struct stack_record *found = NULL, **bucket;
278 depot_stack_handle_t retval = 0;
279 struct page *page = NULL;
280 void *prealloc = NULL;
281 unsigned long flags;
282 u32 hash;
283
284 /*
285 * If this stack trace is from an interrupt, including anything before
286 * interrupt entry usually leads to unbounded stackdepot growth.
287 *
288 * Because use of filter_irq_stacks() is a requirement to ensure
289 * stackdepot can efficiently deduplicate interrupt stacks, always
290 * filter_irq_stacks() to simplify all callers' use of stackdepot.
291 */
292 nr_entries = filter_irq_stacks(entries, nr_entries);
293
294 if (unlikely(nr_entries == 0) || stack_depot_disable)
295 goto fast_exit;
296
297 hash = hash_stack(entries, nr_entries);
298 bucket = &stack_table[hash & STACK_HASH_MASK];
299
300 /*
301 * Fast path: look the stack trace up without locking.
302 * The smp_load_acquire() here pairs with smp_store_release() to
303 * |bucket| below.
304 */
305 found = find_stack(smp_load_acquire(bucket), entries,
306 nr_entries, hash);
307 if (found)
308 goto exit;
309
310 /*
311 * Check if the current or the next stack slab need to be initialized.
312 * If so, allocate the memory - we won't be able to do that under the
313 * lock.
314 *
315 * The smp_load_acquire() here pairs with smp_store_release() to
316 * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
317 */
318 if (unlikely(can_alloc && !smp_load_acquire(&next_slab_inited))) {
319 /*
320 * Zero out zone modifiers, as we don't have specific zone
321 * requirements. Keep the flags related to allocation in atomic
322 * contexts and I/O.
323 */
324 alloc_flags &= ~GFP_ZONEMASK;
325 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
326 alloc_flags |= __GFP_NOWARN;
327 page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
328 if (page)
329 prealloc = page_address(page);
330 }
331
332 raw_spin_lock_irqsave(&depot_lock, flags);
333
334 found = find_stack(*bucket, entries, nr_entries, hash);
335 if (!found) {
336 struct stack_record *new = depot_alloc_stack(entries, nr_entries, hash, &prealloc);
337
338 if (new) {
339 new->next = *bucket;
340 /*
341 * This smp_store_release() pairs with
342 * smp_load_acquire() from |bucket| above.
343 */
344 smp_store_release(bucket, new);
345 found = new;
346 }
347 } else if (prealloc) {
348 /*
349 * We didn't need to store this stack trace, but let's keep
350 * the preallocated memory for the future.
351 */
352 WARN_ON(!init_stack_slab(&prealloc));
353 }
354
355 raw_spin_unlock_irqrestore(&depot_lock, flags);
356 exit:
357 if (prealloc) {
358 /* Nobody used this memory, ok to free it. */
359 free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
360 }
361 if (found)
362 retval = found->handle.handle;
363 fast_exit:
364 return retval;
365 }
366 EXPORT_SYMBOL_GPL(__stack_depot_save);
367
368 /**
369 * stack_depot_save - Save a stack trace from an array
370 *
371 * @entries: Pointer to storage array
372 * @nr_entries: Size of the storage array
373 * @alloc_flags: Allocation gfp flags
374 *
375 * Context: Contexts where allocations via alloc_pages() are allowed.
376 * See __stack_depot_save() for more details.
377 *
378 * Return: The handle of the stack struct stored in depot, 0 on failure.
379 */
stack_depot_save(unsigned long * entries,unsigned int nr_entries,gfp_t alloc_flags)380 depot_stack_handle_t stack_depot_save(unsigned long *entries,
381 unsigned int nr_entries,
382 gfp_t alloc_flags)
383 {
384 return __stack_depot_save(entries, nr_entries, alloc_flags, true);
385 }
386 EXPORT_SYMBOL_GPL(stack_depot_save);
387
in_irqentry_text(unsigned long ptr)388 static inline int in_irqentry_text(unsigned long ptr)
389 {
390 return (ptr >= (unsigned long)&__irqentry_text_start &&
391 ptr < (unsigned long)&__irqentry_text_end) ||
392 (ptr >= (unsigned long)&__softirqentry_text_start &&
393 ptr < (unsigned long)&__softirqentry_text_end);
394 }
395
filter_irq_stacks(unsigned long * entries,unsigned int nr_entries)396 unsigned int filter_irq_stacks(unsigned long *entries,
397 unsigned int nr_entries)
398 {
399 unsigned int i;
400
401 for (i = 0; i < nr_entries; i++) {
402 if (in_irqentry_text(entries[i])) {
403 /* Include the irqentry function into the stack. */
404 return i + 1;
405 }
406 }
407 return nr_entries;
408 }
409 EXPORT_SYMBOL_GPL(filter_irq_stacks);
410