1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2022 Intel Corporation */
3
4 #undef pr_fmt
5 #define pr_fmt(fmt) "tdx: " fmt
6
7 #include <linux/cpufeature.h>
8 #include <linux/export.h>
9 #include <linux/io.h>
10 #include <linux/kexec.h>
11 #include <asm/coco.h>
12 #include <asm/tdx.h>
13 #include <asm/vmx.h>
14 #include <asm/ia32.h>
15 #include <asm/insn.h>
16 #include <asm/insn-eval.h>
17 #include <asm/paravirt_types.h>
18 #include <asm/pgtable.h>
19 #include <asm/set_memory.h>
20 #include <asm/traps.h>
21
22 /* MMIO direction */
23 #define EPT_READ 0
24 #define EPT_WRITE 1
25
26 /* Port I/O direction */
27 #define PORT_READ 0
28 #define PORT_WRITE 1
29
30 /* See Exit Qualification for I/O Instructions in VMX documentation */
31 #define VE_IS_IO_IN(e) ((e) & BIT(3))
32 #define VE_GET_IO_SIZE(e) (((e) & GENMASK(2, 0)) + 1)
33 #define VE_GET_PORT_NUM(e) ((e) >> 16)
34 #define VE_IS_IO_STRING(e) ((e) & BIT(4))
35
36 #define ATTR_DEBUG BIT(0)
37 #define ATTR_SEPT_VE_DISABLE BIT(28)
38
39 /* TDX Module call error codes */
40 #define TDCALL_RETURN_CODE(a) ((a) >> 32)
41 #define TDCALL_INVALID_OPERAND 0xc0000100
42
43 #define TDREPORT_SUBTYPE_0 0
44
45 static atomic_long_t nr_shared;
46
47 /* Called from __tdx_hypercall() for unrecoverable failure */
__tdx_hypercall_failed(void)48 noinstr void __noreturn __tdx_hypercall_failed(void)
49 {
50 instrumentation_begin();
51 panic("TDVMCALL failed. TDX module bug?");
52 }
53
54 #ifdef CONFIG_KVM_GUEST
tdx_kvm_hypercall(unsigned int nr,unsigned long p1,unsigned long p2,unsigned long p3,unsigned long p4)55 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
56 unsigned long p3, unsigned long p4)
57 {
58 struct tdx_module_args args = {
59 .r10 = nr,
60 .r11 = p1,
61 .r12 = p2,
62 .r13 = p3,
63 .r14 = p4,
64 };
65
66 return __tdx_hypercall(&args);
67 }
68 EXPORT_SYMBOL_GPL(tdx_kvm_hypercall);
69 #endif
70
71 /*
72 * Used for TDX guests to make calls directly to the TD module. This
73 * should only be used for calls that have no legitimate reason to fail
74 * or where the kernel can not survive the call failing.
75 */
tdcall(u64 fn,struct tdx_module_args * args)76 static inline void tdcall(u64 fn, struct tdx_module_args *args)
77 {
78 if (__tdcall_ret(fn, args))
79 panic("TDCALL %lld failed (Buggy TDX module!)\n", fn);
80 }
81
82 /* Read TD-scoped metadata */
tdg_vm_rd(u64 field,u64 * value)83 static inline u64 tdg_vm_rd(u64 field, u64 *value)
84 {
85 struct tdx_module_args args = {
86 .rdx = field,
87 };
88 u64 ret;
89
90 ret = __tdcall_ret(TDG_VM_RD, &args);
91 *value = args.r8;
92
93 return ret;
94 }
95
96 /* Write TD-scoped metadata */
tdg_vm_wr(u64 field,u64 value,u64 mask)97 static inline u64 tdg_vm_wr(u64 field, u64 value, u64 mask)
98 {
99 struct tdx_module_args args = {
100 .rdx = field,
101 .r8 = value,
102 .r9 = mask,
103 };
104
105 return __tdcall(TDG_VM_WR, &args);
106 }
107
108 /**
109 * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT
110 * subtype 0) using TDG.MR.REPORT TDCALL.
111 * @reportdata: Address of the input buffer which contains user-defined
112 * REPORTDATA to be included into TDREPORT.
113 * @tdreport: Address of the output buffer to store TDREPORT.
114 *
115 * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module
116 * v1.0 specification for more information on TDG.MR.REPORT TDCALL.
117 * It is used in the TDX guest driver module to get the TDREPORT0.
118 *
119 * Return 0 on success, -EINVAL for invalid operands, or -EIO on
120 * other TDCALL failures.
121 */
tdx_mcall_get_report0(u8 * reportdata,u8 * tdreport)122 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport)
123 {
124 struct tdx_module_args args = {
125 .rcx = virt_to_phys(tdreport),
126 .rdx = virt_to_phys(reportdata),
127 .r8 = TDREPORT_SUBTYPE_0,
128 };
129 u64 ret;
130
131 ret = __tdcall(TDG_MR_REPORT, &args);
132 if (ret) {
133 if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND)
134 return -EINVAL;
135 return -EIO;
136 }
137
138 return 0;
139 }
140 EXPORT_SYMBOL_GPL(tdx_mcall_get_report0);
141
142 /**
143 * tdx_hcall_get_quote() - Wrapper to request TD Quote using GetQuote
144 * hypercall.
145 * @buf: Address of the directly mapped shared kernel buffer which
146 * contains TDREPORT. The same buffer will be used by VMM to
147 * store the generated TD Quote output.
148 * @size: size of the tdquote buffer (4KB-aligned).
149 *
150 * Refer to section titled "TDG.VP.VMCALL<GetQuote>" in the TDX GHCI
151 * v1.0 specification for more information on GetQuote hypercall.
152 * It is used in the TDX guest driver module to get the TD Quote.
153 *
154 * Return 0 on success or error code on failure.
155 */
tdx_hcall_get_quote(u8 * buf,size_t size)156 u64 tdx_hcall_get_quote(u8 *buf, size_t size)
157 {
158 /* Since buf is a shared memory, set the shared (decrypted) bits */
159 return _tdx_hypercall(TDVMCALL_GET_QUOTE, cc_mkdec(virt_to_phys(buf)), size, 0, 0);
160 }
161 EXPORT_SYMBOL_GPL(tdx_hcall_get_quote);
162
tdx_panic(const char * msg)163 static void __noreturn tdx_panic(const char *msg)
164 {
165 struct tdx_module_args args = {
166 .r10 = TDX_HYPERCALL_STANDARD,
167 .r11 = TDVMCALL_REPORT_FATAL_ERROR,
168 .r12 = 0, /* Error code: 0 is Panic */
169 };
170 union {
171 /* Define register order according to the GHCI */
172 struct { u64 r14, r15, rbx, rdi, rsi, r8, r9, rdx; };
173
174 char str[64];
175 } message;
176
177 /* VMM assumes '\0' in byte 65, if the message took all 64 bytes */
178 strtomem_pad(message.str, msg, '\0');
179
180 args.r8 = message.r8;
181 args.r9 = message.r9;
182 args.r14 = message.r14;
183 args.r15 = message.r15;
184 args.rdi = message.rdi;
185 args.rsi = message.rsi;
186 args.rbx = message.rbx;
187 args.rdx = message.rdx;
188
189 /*
190 * This hypercall should never return and it is not safe
191 * to keep the guest running. Call it forever if it
192 * happens to return.
193 */
194 while (1)
195 __tdx_hypercall(&args);
196 }
197
198 /*
199 * The kernel cannot handle #VEs when accessing normal kernel memory. Ensure
200 * that no #VE will be delivered for accesses to TD-private memory.
201 *
202 * TDX 1.0 does not allow the guest to disable SEPT #VE on its own. The VMM
203 * controls if the guest will receive such #VE with TD attribute
204 * ATTR_SEPT_VE_DISABLE.
205 *
206 * Newer TDX modules allow the guest to control if it wants to receive SEPT
207 * violation #VEs.
208 *
209 * Check if the feature is available and disable SEPT #VE if possible.
210 *
211 * If the TD is allowed to disable/enable SEPT #VEs, the ATTR_SEPT_VE_DISABLE
212 * attribute is no longer reliable. It reflects the initial state of the
213 * control for the TD, but it will not be updated if someone (e.g. bootloader)
214 * changes it before the kernel starts. Kernel must check TDCS_TD_CTLS bit to
215 * determine if SEPT #VEs are enabled or disabled.
216 */
disable_sept_ve(u64 td_attr)217 static void disable_sept_ve(u64 td_attr)
218 {
219 const char *msg = "TD misconfiguration: SEPT #VE has to be disabled";
220 bool debug = td_attr & ATTR_DEBUG;
221 u64 config, controls;
222
223 /* Is this TD allowed to disable SEPT #VE */
224 tdg_vm_rd(TDCS_CONFIG_FLAGS, &config);
225 if (!(config & TDCS_CONFIG_FLEXIBLE_PENDING_VE)) {
226 /* No SEPT #VE controls for the guest: check the attribute */
227 if (td_attr & ATTR_SEPT_VE_DISABLE)
228 return;
229
230 /* Relax SEPT_VE_DISABLE check for debug TD for backtraces */
231 if (debug)
232 pr_warn("%s\n", msg);
233 else
234 tdx_panic(msg);
235 return;
236 }
237
238 /* Check if SEPT #VE has been disabled before us */
239 tdg_vm_rd(TDCS_TD_CTLS, &controls);
240 if (controls & TD_CTLS_PENDING_VE_DISABLE)
241 return;
242
243 /* Keep #VEs enabled for splats in debugging environments */
244 if (debug)
245 return;
246
247 /* Disable SEPT #VEs */
248 tdg_vm_wr(TDCS_TD_CTLS, TD_CTLS_PENDING_VE_DISABLE,
249 TD_CTLS_PENDING_VE_DISABLE);
250 }
251
tdx_setup(u64 * cc_mask)252 static void tdx_setup(u64 *cc_mask)
253 {
254 struct tdx_module_args args = {};
255 unsigned int gpa_width;
256 u64 td_attr;
257
258 /*
259 * TDINFO TDX module call is used to get the TD execution environment
260 * information like GPA width, number of available vcpus, debug mode
261 * information, etc. More details about the ABI can be found in TDX
262 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL
263 * [TDG.VP.INFO].
264 */
265 tdcall(TDG_VP_INFO, &args);
266
267 /*
268 * The highest bit of a guest physical address is the "sharing" bit.
269 * Set it for shared pages and clear it for private pages.
270 *
271 * The GPA width that comes out of this call is critical. TDX guests
272 * can not meaningfully run without it.
273 */
274 gpa_width = args.rcx & GENMASK(5, 0);
275 *cc_mask = BIT_ULL(gpa_width - 1);
276
277 td_attr = args.rdx;
278
279 /* Kernel does not use NOTIFY_ENABLES and does not need random #VEs */
280 tdg_vm_wr(TDCS_NOTIFY_ENABLES, 0, -1ULL);
281
282 disable_sept_ve(td_attr);
283 }
284
285 /*
286 * The TDX module spec states that #VE may be injected for a limited set of
287 * reasons:
288 *
289 * - Emulation of the architectural #VE injection on EPT violation;
290 *
291 * - As a result of guest TD execution of a disallowed instruction,
292 * a disallowed MSR access, or CPUID virtualization;
293 *
294 * - A notification to the guest TD about anomalous behavior;
295 *
296 * The last one is opt-in and is not used by the kernel.
297 *
298 * The Intel Software Developer's Manual describes cases when instruction
299 * length field can be used in section "Information for VM Exits Due to
300 * Instruction Execution".
301 *
302 * For TDX, it ultimately means GET_VEINFO provides reliable instruction length
303 * information if #VE occurred due to instruction execution, but not for EPT
304 * violations.
305 */
ve_instr_len(struct ve_info * ve)306 static int ve_instr_len(struct ve_info *ve)
307 {
308 switch (ve->exit_reason) {
309 case EXIT_REASON_HLT:
310 case EXIT_REASON_MSR_READ:
311 case EXIT_REASON_MSR_WRITE:
312 case EXIT_REASON_CPUID:
313 case EXIT_REASON_IO_INSTRUCTION:
314 /* It is safe to use ve->instr_len for #VE due instructions */
315 return ve->instr_len;
316 case EXIT_REASON_EPT_VIOLATION:
317 /*
318 * For EPT violations, ve->insn_len is not defined. For those,
319 * the kernel must decode instructions manually and should not
320 * be using this function.
321 */
322 WARN_ONCE(1, "ve->instr_len is not defined for EPT violations");
323 return 0;
324 default:
325 WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason);
326 return ve->instr_len;
327 }
328 }
329
__halt(const bool irq_disabled)330 static u64 __cpuidle __halt(const bool irq_disabled)
331 {
332 struct tdx_module_args args = {
333 .r10 = TDX_HYPERCALL_STANDARD,
334 .r11 = hcall_func(EXIT_REASON_HLT),
335 .r12 = irq_disabled,
336 };
337
338 /*
339 * Emulate HLT operation via hypercall. More info about ABI
340 * can be found in TDX Guest-Host-Communication Interface
341 * (GHCI), section 3.8 TDG.VP.VMCALL<Instruction.HLT>.
342 *
343 * The VMM uses the "IRQ disabled" param to understand IRQ
344 * enabled status (RFLAGS.IF) of the TD guest and to determine
345 * whether or not it should schedule the halted vCPU if an
346 * IRQ becomes pending. E.g. if IRQs are disabled, the VMM
347 * can keep the vCPU in virtual HLT, even if an IRQ is
348 * pending, without hanging/breaking the guest.
349 */
350 return __tdx_hypercall(&args);
351 }
352
handle_halt(struct ve_info * ve)353 static int handle_halt(struct ve_info *ve)
354 {
355 const bool irq_disabled = irqs_disabled();
356
357 if (__halt(irq_disabled))
358 return -EIO;
359
360 return ve_instr_len(ve);
361 }
362
tdx_halt(void)363 void __cpuidle tdx_halt(void)
364 {
365 const bool irq_disabled = false;
366
367 /*
368 * Use WARN_ONCE() to report the failure.
369 */
370 if (__halt(irq_disabled))
371 WARN_ONCE(1, "HLT instruction emulation failed\n");
372 }
373
tdx_safe_halt(void)374 static void __cpuidle tdx_safe_halt(void)
375 {
376 tdx_halt();
377 /*
378 * "__cpuidle" section doesn't support instrumentation, so stick
379 * with raw_* variant that avoids tracing hooks.
380 */
381 raw_local_irq_enable();
382 }
383
read_msr(struct pt_regs * regs,struct ve_info * ve)384 static int read_msr(struct pt_regs *regs, struct ve_info *ve)
385 {
386 struct tdx_module_args args = {
387 .r10 = TDX_HYPERCALL_STANDARD,
388 .r11 = hcall_func(EXIT_REASON_MSR_READ),
389 .r12 = regs->cx,
390 };
391
392 /*
393 * Emulate the MSR read via hypercall. More info about ABI
394 * can be found in TDX Guest-Host-Communication Interface
395 * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>".
396 */
397 if (__tdx_hypercall(&args))
398 return -EIO;
399
400 regs->ax = lower_32_bits(args.r11);
401 regs->dx = upper_32_bits(args.r11);
402 return ve_instr_len(ve);
403 }
404
write_msr(struct pt_regs * regs,struct ve_info * ve)405 static int write_msr(struct pt_regs *regs, struct ve_info *ve)
406 {
407 struct tdx_module_args args = {
408 .r10 = TDX_HYPERCALL_STANDARD,
409 .r11 = hcall_func(EXIT_REASON_MSR_WRITE),
410 .r12 = regs->cx,
411 .r13 = (u64)regs->dx << 32 | regs->ax,
412 };
413
414 /*
415 * Emulate the MSR write via hypercall. More info about ABI
416 * can be found in TDX Guest-Host-Communication Interface
417 * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>".
418 */
419 if (__tdx_hypercall(&args))
420 return -EIO;
421
422 return ve_instr_len(ve);
423 }
424
handle_cpuid(struct pt_regs * regs,struct ve_info * ve)425 static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve)
426 {
427 struct tdx_module_args args = {
428 .r10 = TDX_HYPERCALL_STANDARD,
429 .r11 = hcall_func(EXIT_REASON_CPUID),
430 .r12 = regs->ax,
431 .r13 = regs->cx,
432 };
433
434 /*
435 * Only allow VMM to control range reserved for hypervisor
436 * communication.
437 *
438 * Return all-zeros for any CPUID outside the range. It matches CPU
439 * behaviour for non-supported leaf.
440 */
441 if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) {
442 regs->ax = regs->bx = regs->cx = regs->dx = 0;
443 return ve_instr_len(ve);
444 }
445
446 /*
447 * Emulate the CPUID instruction via a hypercall. More info about
448 * ABI can be found in TDX Guest-Host-Communication Interface
449 * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>".
450 */
451 if (__tdx_hypercall(&args))
452 return -EIO;
453
454 /*
455 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of
456 * EAX, EBX, ECX, EDX registers after the CPUID instruction execution.
457 * So copy the register contents back to pt_regs.
458 */
459 regs->ax = args.r12;
460 regs->bx = args.r13;
461 regs->cx = args.r14;
462 regs->dx = args.r15;
463
464 return ve_instr_len(ve);
465 }
466
mmio_read(int size,unsigned long addr,unsigned long * val)467 static bool mmio_read(int size, unsigned long addr, unsigned long *val)
468 {
469 struct tdx_module_args args = {
470 .r10 = TDX_HYPERCALL_STANDARD,
471 .r11 = hcall_func(EXIT_REASON_EPT_VIOLATION),
472 .r12 = size,
473 .r13 = EPT_READ,
474 .r14 = addr,
475 };
476
477 if (__tdx_hypercall(&args))
478 return false;
479
480 *val = args.r11;
481 return true;
482 }
483
mmio_write(int size,unsigned long addr,unsigned long val)484 static bool mmio_write(int size, unsigned long addr, unsigned long val)
485 {
486 return !_tdx_hypercall(hcall_func(EXIT_REASON_EPT_VIOLATION), size,
487 EPT_WRITE, addr, val);
488 }
489
handle_mmio(struct pt_regs * regs,struct ve_info * ve)490 static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
491 {
492 unsigned long *reg, val, vaddr;
493 char buffer[MAX_INSN_SIZE];
494 enum insn_mmio_type mmio;
495 struct insn insn = {};
496 int size, extend_size;
497 u8 extend_val = 0;
498
499 /* Only in-kernel MMIO is supported */
500 if (WARN_ON_ONCE(user_mode(regs)))
501 return -EFAULT;
502
503 if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE))
504 return -EFAULT;
505
506 if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64))
507 return -EINVAL;
508
509 mmio = insn_decode_mmio(&insn, &size);
510 if (WARN_ON_ONCE(mmio == INSN_MMIO_DECODE_FAILED))
511 return -EINVAL;
512
513 if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
514 reg = insn_get_modrm_reg_ptr(&insn, regs);
515 if (!reg)
516 return -EINVAL;
517 }
518
519 if (!fault_in_kernel_space(ve->gla)) {
520 WARN_ONCE(1, "Access to userspace address is not supported");
521 return -EINVAL;
522 }
523
524 /*
525 * Reject EPT violation #VEs that split pages.
526 *
527 * MMIO accesses are supposed to be naturally aligned and therefore
528 * never cross page boundaries. Seeing split page accesses indicates
529 * a bug or a load_unaligned_zeropad() that stepped into an MMIO page.
530 *
531 * load_unaligned_zeropad() will recover using exception fixups.
532 */
533 vaddr = (unsigned long)insn_get_addr_ref(&insn, regs);
534 if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE)
535 return -EFAULT;
536
537 /* Handle writes first */
538 switch (mmio) {
539 case INSN_MMIO_WRITE:
540 memcpy(&val, reg, size);
541 if (!mmio_write(size, ve->gpa, val))
542 return -EIO;
543 return insn.length;
544 case INSN_MMIO_WRITE_IMM:
545 val = insn.immediate.value;
546 if (!mmio_write(size, ve->gpa, val))
547 return -EIO;
548 return insn.length;
549 case INSN_MMIO_READ:
550 case INSN_MMIO_READ_ZERO_EXTEND:
551 case INSN_MMIO_READ_SIGN_EXTEND:
552 /* Reads are handled below */
553 break;
554 case INSN_MMIO_MOVS:
555 case INSN_MMIO_DECODE_FAILED:
556 /*
557 * MMIO was accessed with an instruction that could not be
558 * decoded or handled properly. It was likely not using io.h
559 * helpers or accessed MMIO accidentally.
560 */
561 return -EINVAL;
562 default:
563 WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?");
564 return -EINVAL;
565 }
566
567 /* Handle reads */
568 if (!mmio_read(size, ve->gpa, &val))
569 return -EIO;
570
571 switch (mmio) {
572 case INSN_MMIO_READ:
573 /* Zero-extend for 32-bit operation */
574 extend_size = size == 4 ? sizeof(*reg) : 0;
575 break;
576 case INSN_MMIO_READ_ZERO_EXTEND:
577 /* Zero extend based on operand size */
578 extend_size = insn.opnd_bytes;
579 break;
580 case INSN_MMIO_READ_SIGN_EXTEND:
581 /* Sign extend based on operand size */
582 extend_size = insn.opnd_bytes;
583 if (size == 1 && val & BIT(7))
584 extend_val = 0xFF;
585 else if (size > 1 && val & BIT(15))
586 extend_val = 0xFF;
587 break;
588 default:
589 /* All other cases has to be covered with the first switch() */
590 WARN_ON_ONCE(1);
591 return -EINVAL;
592 }
593
594 if (extend_size)
595 memset(reg, extend_val, extend_size);
596 memcpy(reg, &val, size);
597 return insn.length;
598 }
599
handle_in(struct pt_regs * regs,int size,int port)600 static bool handle_in(struct pt_regs *regs, int size, int port)
601 {
602 struct tdx_module_args args = {
603 .r10 = TDX_HYPERCALL_STANDARD,
604 .r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION),
605 .r12 = size,
606 .r13 = PORT_READ,
607 .r14 = port,
608 };
609 u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
610 bool success;
611
612 /*
613 * Emulate the I/O read via hypercall. More info about ABI can be found
614 * in TDX Guest-Host-Communication Interface (GHCI) section titled
615 * "TDG.VP.VMCALL<Instruction.IO>".
616 */
617 success = !__tdx_hypercall(&args);
618
619 /* Update part of the register affected by the emulated instruction */
620 regs->ax &= ~mask;
621 if (success)
622 regs->ax |= args.r11 & mask;
623
624 return success;
625 }
626
handle_out(struct pt_regs * regs,int size,int port)627 static bool handle_out(struct pt_regs *regs, int size, int port)
628 {
629 u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
630
631 /*
632 * Emulate the I/O write via hypercall. More info about ABI can be found
633 * in TDX Guest-Host-Communication Interface (GHCI) section titled
634 * "TDG.VP.VMCALL<Instruction.IO>".
635 */
636 return !_tdx_hypercall(hcall_func(EXIT_REASON_IO_INSTRUCTION), size,
637 PORT_WRITE, port, regs->ax & mask);
638 }
639
640 /*
641 * Emulate I/O using hypercall.
642 *
643 * Assumes the IO instruction was using ax, which is enforced
644 * by the standard io.h macros.
645 *
646 * Return True on success or False on failure.
647 */
handle_io(struct pt_regs * regs,struct ve_info * ve)648 static int handle_io(struct pt_regs *regs, struct ve_info *ve)
649 {
650 u32 exit_qual = ve->exit_qual;
651 int size, port;
652 bool in, ret;
653
654 if (VE_IS_IO_STRING(exit_qual))
655 return -EIO;
656
657 in = VE_IS_IO_IN(exit_qual);
658 size = VE_GET_IO_SIZE(exit_qual);
659 port = VE_GET_PORT_NUM(exit_qual);
660
661
662 if (in)
663 ret = handle_in(regs, size, port);
664 else
665 ret = handle_out(regs, size, port);
666 if (!ret)
667 return -EIO;
668
669 return ve_instr_len(ve);
670 }
671
672 /*
673 * Early #VE exception handler. Only handles a subset of port I/O.
674 * Intended only for earlyprintk. If failed, return false.
675 */
tdx_early_handle_ve(struct pt_regs * regs)676 __init bool tdx_early_handle_ve(struct pt_regs *regs)
677 {
678 struct ve_info ve;
679 int insn_len;
680
681 tdx_get_ve_info(&ve);
682
683 if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION)
684 return false;
685
686 insn_len = handle_io(regs, &ve);
687 if (insn_len < 0)
688 return false;
689
690 regs->ip += insn_len;
691 return true;
692 }
693
tdx_get_ve_info(struct ve_info * ve)694 void tdx_get_ve_info(struct ve_info *ve)
695 {
696 struct tdx_module_args args = {};
697
698 /*
699 * Called during #VE handling to retrieve the #VE info from the
700 * TDX module.
701 *
702 * This has to be called early in #VE handling. A "nested" #VE which
703 * occurs before this will raise a #DF and is not recoverable.
704 *
705 * The call retrieves the #VE info from the TDX module, which also
706 * clears the "#VE valid" flag. This must be done before anything else
707 * because any #VE that occurs while the valid flag is set will lead to
708 * #DF.
709 *
710 * Note, the TDX module treats virtual NMIs as inhibited if the #VE
711 * valid flag is set. It means that NMI=>#VE will not result in a #DF.
712 */
713 tdcall(TDG_VP_VEINFO_GET, &args);
714
715 /* Transfer the output parameters */
716 ve->exit_reason = args.rcx;
717 ve->exit_qual = args.rdx;
718 ve->gla = args.r8;
719 ve->gpa = args.r9;
720 ve->instr_len = lower_32_bits(args.r10);
721 ve->instr_info = upper_32_bits(args.r10);
722 }
723
724 /*
725 * Handle the user initiated #VE.
726 *
727 * On success, returns the number of bytes RIP should be incremented (>=0)
728 * or -errno on error.
729 */
virt_exception_user(struct pt_regs * regs,struct ve_info * ve)730 static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve)
731 {
732 switch (ve->exit_reason) {
733 case EXIT_REASON_CPUID:
734 return handle_cpuid(regs, ve);
735 default:
736 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
737 return -EIO;
738 }
739 }
740
is_private_gpa(u64 gpa)741 static inline bool is_private_gpa(u64 gpa)
742 {
743 return gpa == cc_mkenc(gpa);
744 }
745
746 /*
747 * Handle the kernel #VE.
748 *
749 * On success, returns the number of bytes RIP should be incremented (>=0)
750 * or -errno on error.
751 */
virt_exception_kernel(struct pt_regs * regs,struct ve_info * ve)752 static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve)
753 {
754 switch (ve->exit_reason) {
755 case EXIT_REASON_HLT:
756 return handle_halt(ve);
757 case EXIT_REASON_MSR_READ:
758 return read_msr(regs, ve);
759 case EXIT_REASON_MSR_WRITE:
760 return write_msr(regs, ve);
761 case EXIT_REASON_CPUID:
762 return handle_cpuid(regs, ve);
763 case EXIT_REASON_EPT_VIOLATION:
764 if (is_private_gpa(ve->gpa))
765 panic("Unexpected EPT-violation on private memory.");
766 return handle_mmio(regs, ve);
767 case EXIT_REASON_IO_INSTRUCTION:
768 return handle_io(regs, ve);
769 default:
770 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
771 return -EIO;
772 }
773 }
774
tdx_handle_virt_exception(struct pt_regs * regs,struct ve_info * ve)775 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve)
776 {
777 int insn_len;
778
779 if (user_mode(regs))
780 insn_len = virt_exception_user(regs, ve);
781 else
782 insn_len = virt_exception_kernel(regs, ve);
783 if (insn_len < 0)
784 return false;
785
786 /* After successful #VE handling, move the IP */
787 regs->ip += insn_len;
788
789 return true;
790 }
791
tdx_tlb_flush_required(bool private)792 static bool tdx_tlb_flush_required(bool private)
793 {
794 /*
795 * TDX guest is responsible for flushing TLB on private->shared
796 * transition. VMM is responsible for flushing on shared->private.
797 *
798 * The VMM _can't_ flush private addresses as it can't generate PAs
799 * with the guest's HKID. Shared memory isn't subject to integrity
800 * checking, i.e. the VMM doesn't need to flush for its own protection.
801 *
802 * There's no need to flush when converting from shared to private,
803 * as flushing is the VMM's responsibility in this case, e.g. it must
804 * flush to avoid integrity failures in the face of a buggy or
805 * malicious guest.
806 */
807 return !private;
808 }
809
tdx_cache_flush_required(void)810 static bool tdx_cache_flush_required(void)
811 {
812 /*
813 * AMD SME/SEV can avoid cache flushing if HW enforces cache coherence.
814 * TDX doesn't have such capability.
815 *
816 * Flush cache unconditionally.
817 */
818 return true;
819 }
820
821 /*
822 * Notify the VMM about page mapping conversion. More info about ABI
823 * can be found in TDX Guest-Host-Communication Interface (GHCI),
824 * section "TDG.VP.VMCALL<MapGPA>".
825 */
tdx_map_gpa(phys_addr_t start,phys_addr_t end,bool enc)826 static bool tdx_map_gpa(phys_addr_t start, phys_addr_t end, bool enc)
827 {
828 /* Retrying the hypercall a second time should succeed; use 3 just in case */
829 const int max_retries_per_page = 3;
830 int retry_count = 0;
831
832 if (!enc) {
833 /* Set the shared (decrypted) bits: */
834 start |= cc_mkdec(0);
835 end |= cc_mkdec(0);
836 }
837
838 while (retry_count < max_retries_per_page) {
839 struct tdx_module_args args = {
840 .r10 = TDX_HYPERCALL_STANDARD,
841 .r11 = TDVMCALL_MAP_GPA,
842 .r12 = start,
843 .r13 = end - start };
844
845 u64 map_fail_paddr;
846 u64 ret = __tdx_hypercall(&args);
847
848 if (ret != TDVMCALL_STATUS_RETRY)
849 return !ret;
850 /*
851 * The guest must retry the operation for the pages in the
852 * region starting at the GPA specified in R11. R11 comes
853 * from the untrusted VMM. Sanity check it.
854 */
855 map_fail_paddr = args.r11;
856 if (map_fail_paddr < start || map_fail_paddr >= end)
857 return false;
858
859 /* "Consume" a retry without forward progress */
860 if (map_fail_paddr == start) {
861 retry_count++;
862 continue;
863 }
864
865 start = map_fail_paddr;
866 retry_count = 0;
867 }
868
869 return false;
870 }
871
872 /*
873 * Inform the VMM of the guest's intent for this physical page: shared with
874 * the VMM or private to the guest. The VMM is expected to change its mapping
875 * of the page in response.
876 */
tdx_enc_status_changed(unsigned long vaddr,int numpages,bool enc)877 static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
878 {
879 phys_addr_t start = __pa(vaddr);
880 phys_addr_t end = __pa(vaddr + numpages * PAGE_SIZE);
881
882 if (!tdx_map_gpa(start, end, enc))
883 return false;
884
885 /* shared->private conversion requires memory to be accepted before use */
886 if (enc)
887 return tdx_accept_memory(start, end);
888
889 return true;
890 }
891
tdx_enc_status_change_prepare(unsigned long vaddr,int numpages,bool enc)892 static int tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
893 bool enc)
894 {
895 /*
896 * Only handle shared->private conversion here.
897 * See the comment in tdx_early_init().
898 */
899 if (enc && !tdx_enc_status_changed(vaddr, numpages, enc))
900 return -EIO;
901
902 return 0;
903 }
904
tdx_enc_status_change_finish(unsigned long vaddr,int numpages,bool enc)905 static int tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
906 bool enc)
907 {
908 /*
909 * Only handle private->shared conversion here.
910 * See the comment in tdx_early_init().
911 */
912 if (!enc && !tdx_enc_status_changed(vaddr, numpages, enc))
913 return -EIO;
914
915 if (enc)
916 atomic_long_sub(numpages, &nr_shared);
917 else
918 atomic_long_add(numpages, &nr_shared);
919
920 return 0;
921 }
922
923 /* Stop new private<->shared conversions */
tdx_kexec_begin(void)924 static void tdx_kexec_begin(void)
925 {
926 if (!IS_ENABLED(CONFIG_KEXEC_CORE))
927 return;
928
929 /*
930 * Crash kernel reaches here with interrupts disabled: can't wait for
931 * conversions to finish.
932 *
933 * If race happened, just report and proceed.
934 */
935 if (!set_memory_enc_stop_conversion())
936 pr_warn("Failed to stop shared<->private conversions\n");
937 }
938
939 /* Walk direct mapping and convert all shared memory back to private */
tdx_kexec_finish(void)940 static void tdx_kexec_finish(void)
941 {
942 unsigned long addr, end;
943 long found = 0, shared;
944
945 if (!IS_ENABLED(CONFIG_KEXEC_CORE))
946 return;
947
948 lockdep_assert_irqs_disabled();
949
950 addr = PAGE_OFFSET;
951 end = PAGE_OFFSET + get_max_mapped();
952
953 while (addr < end) {
954 unsigned long size;
955 unsigned int level;
956 pte_t *pte;
957
958 pte = lookup_address(addr, &level);
959 size = page_level_size(level);
960
961 if (pte && pte_decrypted(*pte)) {
962 int pages = size / PAGE_SIZE;
963
964 /*
965 * Touching memory with shared bit set triggers implicit
966 * conversion to shared.
967 *
968 * Make sure nobody touches the shared range from
969 * now on.
970 */
971 set_pte(pte, __pte(0));
972
973 /*
974 * Memory encryption state persists across kexec.
975 * If tdx_enc_status_changed() fails in the first
976 * kernel, it leaves memory in an unknown state.
977 *
978 * If that memory remains shared, accessing it in the
979 * *next* kernel through a private mapping will result
980 * in an unrecoverable guest shutdown.
981 *
982 * The kdump kernel boot is not impacted as it uses
983 * a pre-reserved memory range that is always private.
984 * However, gathering crash information could lead to
985 * a crash if it accesses unconverted memory through
986 * a private mapping which is possible when accessing
987 * that memory through /proc/vmcore, for example.
988 *
989 * In all cases, print error info in order to leave
990 * enough bread crumbs for debugging.
991 */
992 if (!tdx_enc_status_changed(addr, pages, true)) {
993 pr_err("Failed to unshare range %#lx-%#lx\n",
994 addr, addr + size);
995 }
996
997 found += pages;
998 }
999
1000 addr += size;
1001 }
1002
1003 __flush_tlb_all();
1004
1005 shared = atomic_long_read(&nr_shared);
1006 if (shared != found) {
1007 pr_err("shared page accounting is off\n");
1008 pr_err("nr_shared = %ld, nr_found = %ld\n", shared, found);
1009 }
1010 }
1011
tdx_early_init(void)1012 void __init tdx_early_init(void)
1013 {
1014 u64 cc_mask;
1015 u32 eax, sig[3];
1016
1017 cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax, &sig[0], &sig[2], &sig[1]);
1018
1019 if (memcmp(TDX_IDENT, sig, sizeof(sig)))
1020 return;
1021
1022 setup_force_cpu_cap(X86_FEATURE_TDX_GUEST);
1023
1024 /* TSC is the only reliable clock in TDX guest */
1025 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
1026
1027 cc_vendor = CC_VENDOR_INTEL;
1028
1029 /* Configure the TD */
1030 tdx_setup(&cc_mask);
1031
1032 cc_set_mask(cc_mask);
1033
1034 /*
1035 * All bits above GPA width are reserved and kernel treats shared bit
1036 * as flag, not as part of physical address.
1037 *
1038 * Adjust physical mask to only cover valid GPA bits.
1039 */
1040 physical_mask &= cc_mask - 1;
1041
1042 /*
1043 * The kernel mapping should match the TDX metadata for the page.
1044 * load_unaligned_zeropad() can touch memory *adjacent* to that which is
1045 * owned by the caller and can catch even _momentary_ mismatches. Bad
1046 * things happen on mismatch:
1047 *
1048 * - Private mapping => Shared Page == Guest shutdown
1049 * - Shared mapping => Private Page == Recoverable #VE
1050 *
1051 * guest.enc_status_change_prepare() converts the page from
1052 * shared=>private before the mapping becomes private.
1053 *
1054 * guest.enc_status_change_finish() converts the page from
1055 * private=>shared after the mapping becomes private.
1056 *
1057 * In both cases there is a temporary shared mapping to a private page,
1058 * which can result in a #VE. But, there is never a private mapping to
1059 * a shared page.
1060 */
1061 x86_platform.guest.enc_status_change_prepare = tdx_enc_status_change_prepare;
1062 x86_platform.guest.enc_status_change_finish = tdx_enc_status_change_finish;
1063
1064 x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required;
1065 x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required;
1066
1067 x86_platform.guest.enc_kexec_begin = tdx_kexec_begin;
1068 x86_platform.guest.enc_kexec_finish = tdx_kexec_finish;
1069
1070 /*
1071 * Avoid "sti;hlt" execution in TDX guests as HLT induces a #VE that
1072 * will enable interrupts before HLT TDCALL invocation if executed
1073 * in STI-shadow, possibly resulting in missed wakeup events.
1074 *
1075 * Modify all possible HLT execution paths to use TDX specific routines
1076 * that directly execute TDCALL and toggle the interrupt state as
1077 * needed after TDCALL completion. This also reduces HLT related #VEs
1078 * in addition to having a reliable halt logic execution.
1079 */
1080 pv_ops.irq.safe_halt = tdx_safe_halt;
1081 pv_ops.irq.halt = tdx_halt;
1082
1083 /*
1084 * TDX intercepts the RDMSR to read the X2APIC ID in the parallel
1085 * bringup low level code. That raises #VE which cannot be handled
1086 * there.
1087 *
1088 * Intel-TDX has a secure RDMSR hypercall, but that needs to be
1089 * implemented separately in the low level startup ASM code.
1090 * Until that is in place, disable parallel bringup for TDX.
1091 */
1092 x86_cpuinit.parallel_bringup = false;
1093
1094 pr_info("Guest detected\n");
1095 }
1096