1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * AMD Encrypted Register State Support
4 *
5 * Author: Joerg Roedel <jroedel@suse.de>
6 */
7
8 #ifndef __ASM_ENCRYPTED_STATE_H
9 #define __ASM_ENCRYPTED_STATE_H
10
11 #include <linux/types.h>
12 #include <asm/insn.h>
13
14 #define GHCB_SEV_INFO 0x001UL
15 #define GHCB_SEV_INFO_REQ 0x002UL
16 #define GHCB_INFO(v) ((v) & 0xfffUL)
17 #define GHCB_PROTO_MAX(v) (((v) >> 48) & 0xffffUL)
18 #define GHCB_PROTO_MIN(v) (((v) >> 32) & 0xffffUL)
19 #define GHCB_PROTO_OUR 0x0001UL
20 #define GHCB_SEV_CPUID_REQ 0x004UL
21 #define GHCB_CPUID_REQ_EAX 0
22 #define GHCB_CPUID_REQ_EBX 1
23 #define GHCB_CPUID_REQ_ECX 2
24 #define GHCB_CPUID_REQ_EDX 3
25 #define GHCB_CPUID_REQ(fn, reg) (GHCB_SEV_CPUID_REQ | \
26 (((unsigned long)reg & 3) << 30) | \
27 (((unsigned long)fn) << 32))
28
29 #define GHCB_PROTOCOL_MAX 0x0001UL
30 #define GHCB_DEFAULT_USAGE 0x0000UL
31
32 #define GHCB_SEV_CPUID_RESP 0x005UL
33 #define GHCB_SEV_TERMINATE 0x100UL
34 #define GHCB_SEV_TERMINATE_REASON(reason_set, reason_val) \
35 (((((u64)reason_set) & 0x7) << 12) | \
36 ((((u64)reason_val) & 0xff) << 16))
37 #define GHCB_SEV_ES_REASON_GENERAL_REQUEST 0
38 #define GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED 1
39
40 #define GHCB_SEV_GHCB_RESP_CODE(v) ((v) & 0xfff)
41 #define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); }
42
43 enum es_result {
44 ES_OK, /* All good */
45 ES_UNSUPPORTED, /* Requested operation not supported */
46 ES_VMM_ERROR, /* Unexpected state from the VMM */
47 ES_DECODE_FAILED, /* Instruction decoding failed */
48 ES_EXCEPTION, /* Instruction caused exception */
49 ES_RETRY, /* Retry instruction emulation */
50 };
51
52 struct es_fault_info {
53 unsigned long vector;
54 unsigned long error_code;
55 unsigned long cr2;
56 };
57
58 struct pt_regs;
59
60 /* ES instruction emulation context */
61 struct es_em_ctxt {
62 struct pt_regs *regs;
63 struct insn insn;
64 struct es_fault_info fi;
65 };
66
67 void do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code);
68
lower_bits(u64 val,unsigned int bits)69 static inline u64 lower_bits(u64 val, unsigned int bits)
70 {
71 u64 mask = (1ULL << bits) - 1;
72
73 return (val & mask);
74 }
75
76 struct real_mode_header;
77 enum stack_type;
78
79 /* Early IDT entry points for #VC handler */
80 extern void vc_no_ghcb(void);
81 extern void vc_boot_ghcb(void);
82 extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
83
84 #ifdef CONFIG_AMD_MEM_ENCRYPT
85 extern struct static_key_false sev_es_enable_key;
86 extern void __sev_es_ist_enter(struct pt_regs *regs);
87 extern void __sev_es_ist_exit(void);
sev_es_ist_enter(struct pt_regs * regs)88 static __always_inline void sev_es_ist_enter(struct pt_regs *regs)
89 {
90 if (static_branch_unlikely(&sev_es_enable_key))
91 __sev_es_ist_enter(regs);
92 }
sev_es_ist_exit(void)93 static __always_inline void sev_es_ist_exit(void)
94 {
95 if (static_branch_unlikely(&sev_es_enable_key))
96 __sev_es_ist_exit();
97 }
98 extern int sev_es_setup_ap_jump_table(struct real_mode_header *rmh);
99 extern void __sev_es_nmi_complete(void);
sev_es_nmi_complete(void)100 static __always_inline void sev_es_nmi_complete(void)
101 {
102 if (static_branch_unlikely(&sev_es_enable_key))
103 __sev_es_nmi_complete();
104 }
105 extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd);
106 #else
sev_es_ist_enter(struct pt_regs * regs)107 static inline void sev_es_ist_enter(struct pt_regs *regs) { }
sev_es_ist_exit(void)108 static inline void sev_es_ist_exit(void) { }
sev_es_setup_ap_jump_table(struct real_mode_header * rmh)109 static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; }
sev_es_nmi_complete(void)110 static inline void sev_es_nmi_complete(void) { }
sev_es_efi_map_ghcbs(pgd_t * pgd)111 static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; }
112 #endif
113
114 #endif
115