1 /* 2 * Provide a default dump_stack() function for architectures 3 * which don't implement their own. 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/export.h> 8 #include <linux/sched.h> 9 #include <linux/smp.h> 10 #include <linux/atomic.h> 11 __dump_stack(void)12static void __dump_stack(void) 13 { 14 dump_stack_print_info(KERN_DEFAULT); 15 show_stack(NULL, NULL); 16 } 17 18 /** 19 * dump_stack - dump the current task information and its stack trace 20 * 21 * Architectures can override this implementation by implementing its own. 22 */ 23 #ifdef CONFIG_SMP 24 static atomic_t dump_lock = ATOMIC_INIT(-1); 25 dump_stack(void)26asmlinkage __visible void dump_stack(void) 27 { 28 unsigned long flags; 29 int was_locked; 30 int old; 31 int cpu; 32 33 /* 34 * Permit this cpu to perform nested stack dumps while serialising 35 * against other CPUs 36 */ 37 retry: 38 local_irq_save(flags); 39 cpu = smp_processor_id(); 40 old = atomic_cmpxchg(&dump_lock, -1, cpu); 41 if (old == -1) { 42 was_locked = 0; 43 } else if (old == cpu) { 44 was_locked = 1; 45 } else { 46 local_irq_restore(flags); 47 /* 48 * Wait for the lock to release before jumping to 49 * atomic_cmpxchg() in order to mitigate the thundering herd 50 * problem. 51 */ 52 do { cpu_relax(); } while (atomic_read(&dump_lock) != -1); 53 goto retry; 54 } 55 56 __dump_stack(); 57 58 if (!was_locked) 59 atomic_set(&dump_lock, -1); 60 61 local_irq_restore(flags); 62 } 63 #else dump_stack(void)64asmlinkage __visible void dump_stack(void) 65 { 66 __dump_stack(); 67 } 68 #endif 69 EXPORT_SYMBOL(dump_stack); 70