1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * KVM nVHE hypervisor stack tracing support.
4 *
5 * Copyright (C) 2022 Google LLC
6 */
7 #include <asm/kvm_asm.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/memory.h>
10 #include <asm/percpu.h>
11
12 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
13 __aligned(16);
14
15 DEFINE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
16
17 /*
18 * hyp_prepare_backtrace - Prepare non-protected nVHE backtrace.
19 *
20 * @fp : frame pointer at which to start the unwinding.
21 * @pc : program counter at which to start the unwinding.
22 *
23 * Save the information needed by the host to unwind the non-protected
24 * nVHE hypervisor stack in EL1.
25 */
hyp_prepare_backtrace(unsigned long fp,unsigned long pc)26 static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
27 {
28 struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
29 struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
30
31 stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - NVHE_STACK_SIZE);
32 stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
33 stacktrace_info->fp = fp;
34 stacktrace_info->pc = pc;
35 }
36
37 #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
38 #include <asm/stacktrace/nvhe.h>
39
40 DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
41
on_overflow_stack(unsigned long sp,unsigned long size,struct stack_info * info)42 static bool on_overflow_stack(unsigned long sp, unsigned long size,
43 struct stack_info *info)
44 {
45 unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack);
46 unsigned long high = low + OVERFLOW_STACK_SIZE;
47
48 return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
49 }
50
on_hyp_stack(unsigned long sp,unsigned long size,struct stack_info * info)51 static bool on_hyp_stack(unsigned long sp, unsigned long size,
52 struct stack_info *info)
53 {
54 struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
55 unsigned long high = params->stack_hyp_va;
56 unsigned long low = high - NVHE_STACK_SIZE;
57
58 return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
59 }
60
on_accessible_stack(const struct task_struct * tsk,unsigned long sp,unsigned long size,struct stack_info * info)61 static bool on_accessible_stack(const struct task_struct *tsk,
62 unsigned long sp, unsigned long size,
63 struct stack_info *info)
64 {
65 if (info)
66 info->type = STACK_TYPE_UNKNOWN;
67
68 return (on_overflow_stack(sp, size, info) ||
69 on_hyp_stack(sp, size, info));
70 }
71
unwind_next(struct unwind_state * state)72 static int unwind_next(struct unwind_state *state)
73 {
74 struct stack_info info;
75
76 return unwind_next_common(state, &info, on_accessible_stack, NULL);
77 }
78
unwind(struct unwind_state * state,stack_trace_consume_fn consume_entry,void * cookie)79 static void notrace unwind(struct unwind_state *state,
80 stack_trace_consume_fn consume_entry,
81 void *cookie)
82 {
83 while (1) {
84 int ret;
85
86 if (!consume_entry(cookie, state->pc))
87 break;
88 ret = unwind_next(state);
89 if (ret < 0)
90 break;
91 }
92 }
93
94 /*
95 * pkvm_save_backtrace_entry - Saves a protected nVHE HYP stacktrace entry
96 *
97 * @arg : index of the entry in the stacktrace buffer
98 * @where : the program counter corresponding to the stack frame
99 *
100 * Save the return address of a stack frame to the shared stacktrace buffer.
101 * The host can access this shared buffer from EL1 to dump the backtrace.
102 */
pkvm_save_backtrace_entry(void * arg,unsigned long where)103 static bool pkvm_save_backtrace_entry(void *arg, unsigned long where)
104 {
105 unsigned long *stacktrace = this_cpu_ptr(pkvm_stacktrace);
106 int *idx = (int *)arg;
107
108 /*
109 * Need 2 free slots: 1 for current entry and 1 for the
110 * delimiter.
111 */
112 if (*idx > ARRAY_SIZE(pkvm_stacktrace) - 2)
113 return false;
114
115 stacktrace[*idx] = where;
116 stacktrace[++*idx] = 0UL;
117
118 return true;
119 }
120
121 /*
122 * pkvm_save_backtrace - Saves the protected nVHE HYP stacktrace
123 *
124 * @fp : frame pointer at which to start the unwinding.
125 * @pc : program counter at which to start the unwinding.
126 *
127 * Save the unwinded stack addresses to the shared stacktrace buffer.
128 * The host can access this shared buffer from EL1 to dump the backtrace.
129 */
pkvm_save_backtrace(unsigned long fp,unsigned long pc)130 static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
131 {
132 struct unwind_state state;
133 int idx = 0;
134
135 kvm_nvhe_unwind_init(&state, fp, pc);
136
137 unwind(&state, pkvm_save_backtrace_entry, &idx);
138 }
139 #else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
pkvm_save_backtrace(unsigned long fp,unsigned long pc)140 static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
141 {
142 }
143 #endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
144
145 /*
146 * kvm_nvhe_prepare_backtrace - prepare to dump the nVHE backtrace
147 *
148 * @fp : frame pointer at which to start the unwinding.
149 * @pc : program counter at which to start the unwinding.
150 *
151 * Saves the information needed by the host to dump the nVHE hypervisor
152 * backtrace.
153 */
kvm_nvhe_prepare_backtrace(unsigned long fp,unsigned long pc)154 void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc)
155 {
156 if (is_protected_kvm_enabled())
157 pkvm_save_backtrace(fp, pc);
158 else
159 hyp_prepare_backtrace(fp, pc);
160 }
161