1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * AMD Encrypted Register State Support
4 *
5 * Author: Joerg Roedel <jroedel@suse.de>
6 */
7
8 /*
9 * misc.h needs to be first because it knows how to include the other kernel
10 * headers in the pre-decompression code in a way that does not break
11 * compilation.
12 */
13 #include "misc.h"
14
15 #include <asm/pgtable_types.h>
16 #include <asm/sev-es.h>
17 #include <asm/trapnr.h>
18 #include <asm/trap_pf.h>
19 #include <asm/msr-index.h>
20 #include <asm/fpu/xcr.h>
21 #include <asm/ptrace.h>
22 #include <asm/svm.h>
23
24 #include "error.h"
25
26 struct ghcb boot_ghcb_page __aligned(PAGE_SIZE);
27 struct ghcb *boot_ghcb;
28
29 /*
30 * Copy a version of this function here - insn-eval.c can't be used in
31 * pre-decompression code.
32 */
insn_has_rep_prefix(struct insn * insn)33 static bool insn_has_rep_prefix(struct insn *insn)
34 {
35 insn_byte_t p;
36 int i;
37
38 insn_get_prefixes(insn);
39
40 for_each_insn_prefix(insn, i, p) {
41 if (p == 0xf2 || p == 0xf3)
42 return true;
43 }
44
45 return false;
46 }
47
48 /*
49 * Only a dummy for insn_get_seg_base() - Early boot-code is 64bit only and
50 * doesn't use segments.
51 */
insn_get_seg_base(struct pt_regs * regs,int seg_reg_idx)52 static unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
53 {
54 return 0UL;
55 }
56
sev_es_rd_ghcb_msr(void)57 static inline u64 sev_es_rd_ghcb_msr(void)
58 {
59 unsigned long low, high;
60
61 asm volatile("rdmsr" : "=a" (low), "=d" (high) :
62 "c" (MSR_AMD64_SEV_ES_GHCB));
63
64 return ((high << 32) | low);
65 }
66
sev_es_wr_ghcb_msr(u64 val)67 static inline void sev_es_wr_ghcb_msr(u64 val)
68 {
69 u32 low, high;
70
71 low = val & 0xffffffffUL;
72 high = val >> 32;
73
74 asm volatile("wrmsr" : : "c" (MSR_AMD64_SEV_ES_GHCB),
75 "a"(low), "d" (high) : "memory");
76 }
77
vc_decode_insn(struct es_em_ctxt * ctxt)78 static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
79 {
80 char buffer[MAX_INSN_SIZE];
81 enum es_result ret;
82
83 memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
84
85 insn_init(&ctxt->insn, buffer, MAX_INSN_SIZE, 1);
86 insn_get_length(&ctxt->insn);
87
88 ret = ctxt->insn.immediate.got ? ES_OK : ES_DECODE_FAILED;
89
90 return ret;
91 }
92
vc_write_mem(struct es_em_ctxt * ctxt,void * dst,char * buf,size_t size)93 static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
94 void *dst, char *buf, size_t size)
95 {
96 memcpy(dst, buf, size);
97
98 return ES_OK;
99 }
100
vc_read_mem(struct es_em_ctxt * ctxt,void * src,char * buf,size_t size)101 static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
102 void *src, char *buf, size_t size)
103 {
104 memcpy(buf, src, size);
105
106 return ES_OK;
107 }
108
109 #undef __init
110 #undef __pa
111 #define __init
112 #define __pa(x) ((unsigned long)(x))
113
114 #define __BOOT_COMPRESSED
115
116 /* Basic instruction decoding support needed */
117 #include "../../lib/inat.c"
118 #include "../../lib/insn.c"
119
120 /* Include code for early handlers */
121 #include "../../kernel/sev-es-shared.c"
122
early_setup_sev_es(void)123 static bool early_setup_sev_es(void)
124 {
125 if (!sev_es_negotiate_protocol())
126 sev_es_terminate(GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED);
127
128 if (set_page_decrypted((unsigned long)&boot_ghcb_page))
129 return false;
130
131 /* Page is now mapped decrypted, clear it */
132 memset(&boot_ghcb_page, 0, sizeof(boot_ghcb_page));
133
134 boot_ghcb = &boot_ghcb_page;
135
136 /* Initialize lookup tables for the instruction decoder */
137 inat_init_tables();
138
139 return true;
140 }
141
sev_es_shutdown_ghcb(void)142 void sev_es_shutdown_ghcb(void)
143 {
144 if (!boot_ghcb)
145 return;
146
147 if (!sev_es_check_cpu_features())
148 error("SEV-ES CPU Features missing.");
149
150 /*
151 * GHCB Page must be flushed from the cache and mapped encrypted again.
152 * Otherwise the running kernel will see strange cache effects when
153 * trying to use that page.
154 */
155 if (set_page_encrypted((unsigned long)&boot_ghcb_page))
156 error("Can't map GHCB page encrypted");
157
158 /*
159 * GHCB page is mapped encrypted again and flushed from the cache.
160 * Mark it non-present now to catch bugs when #VC exceptions trigger
161 * after this point.
162 */
163 if (set_page_non_present((unsigned long)&boot_ghcb_page))
164 error("Can't unmap GHCB page");
165 }
166
sev_es_check_ghcb_fault(unsigned long address)167 bool sev_es_check_ghcb_fault(unsigned long address)
168 {
169 /* Check whether the fault was on the GHCB page */
170 return ((address & PAGE_MASK) == (unsigned long)&boot_ghcb_page);
171 }
172
do_boot_stage2_vc(struct pt_regs * regs,unsigned long exit_code)173 void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code)
174 {
175 struct es_em_ctxt ctxt;
176 enum es_result result;
177
178 if (!boot_ghcb && !early_setup_sev_es())
179 sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
180
181 vc_ghcb_invalidate(boot_ghcb);
182 result = vc_init_em_ctxt(&ctxt, regs, exit_code);
183 if (result != ES_OK)
184 goto finish;
185
186 switch (exit_code) {
187 case SVM_EXIT_RDTSC:
188 case SVM_EXIT_RDTSCP:
189 result = vc_handle_rdtsc(boot_ghcb, &ctxt, exit_code);
190 break;
191 case SVM_EXIT_IOIO:
192 result = vc_handle_ioio(boot_ghcb, &ctxt);
193 break;
194 case SVM_EXIT_CPUID:
195 result = vc_handle_cpuid(boot_ghcb, &ctxt);
196 break;
197 default:
198 result = ES_UNSUPPORTED;
199 break;
200 }
201
202 finish:
203 if (result == ES_OK) {
204 vc_finish_insn(&ctxt);
205 } else if (result != ES_RETRY) {
206 /*
207 * For now, just halt the machine. That makes debugging easier,
208 * later we just call sev_es_terminate() here.
209 */
210 while (true)
211 asm volatile("hlt\n");
212 }
213 }
214