Lines Matching full:stack
3 * Stack depot - a stack trace storage that avoids duplication.
5 * Internally, stack depot maintains a hash table of unique stacktraces. The
6 * stack traces themselves are stored contiguously one after another in a set
47 /* Compact structure that stores a reference to a stack. */
78 /* Hash table of pointers to stored stack traces. */
85 /* Array of memory regions that store stack traces. */
94 * Stack depot tries to keep an extra pool allocated even before it runs out
136 * stack traces being stored in stack depot. in stack_depot_early_init()
221 /* Uses preallocated memory to initialize a new stack depot pool. */
258 /* Allocates a new stack in a stack depot pool. */
262 struct stack_record *stack; in depot_alloc_stack() local
263 size_t required_size = struct_size(stack, entries, size); in depot_alloc_stack()
271 WARN_ONCE(1, "Stack depot reached limit capacity"); in depot_alloc_stack()
296 /* Check if we have a pool to save the stack trace. */ in depot_alloc_stack()
300 /* Save the stack trace. */ in depot_alloc_stack()
301 stack = stack_pools[pool_index] + pool_offset; in depot_alloc_stack()
302 stack->hash = hash; in depot_alloc_stack()
303 stack->size = size; in depot_alloc_stack()
304 stack->handle.pool_index = pool_index; in depot_alloc_stack()
305 stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN; in depot_alloc_stack()
306 stack->handle.valid = 1; in depot_alloc_stack()
307 stack->handle.extra = 0; in depot_alloc_stack()
308 memcpy(stack->entries, entries, flex_array_size(stack, entries, size)); in depot_alloc_stack()
311 * Let KMSAN know the stored stack record is initialized. This shall in depot_alloc_stack()
314 kmsan_unpoison_memory(stack, required_size); in depot_alloc_stack()
316 return stack; in depot_alloc_stack()
319 /* Calculates the hash for a stack. */
342 /* Finds a stack in a bucket of the hash table. */
370 * If this stack trace is from an interrupt, including anything before in __stack_depot_save()
371 * interrupt entry usually leads to unbounded stack depot growth. in __stack_depot_save()
373 * Since use of filter_irq_stacks() is a requirement to ensure stack in __stack_depot_save()
375 * filter_irq_stacks() to simplify all callers' use of stack depot. in __stack_depot_save()
386 * Fast path: look the stack trace up without locking. in __stack_depot_save()
395 * Check if another stack pool needs to be initialized. If so, allocate in __stack_depot_save()
433 * Stack depot already contains this stack trace, but let's in __stack_depot_save()
442 /* Stack depot didn't use this memory, free it. */ in __stack_depot_save()
471 struct stack_record *stack; in stack_depot_fetch() local
484 WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n", in stack_depot_fetch()
491 stack = pool + offset; in stack_depot_fetch()
493 *entries = stack->entries; in stack_depot_fetch()
494 return stack->size; in stack_depot_fetch()
498 void stack_depot_print(depot_stack_handle_t stack) in stack_depot_print() argument
503 nr_entries = stack_depot_fetch(stack, &entries); in stack_depot_print()