1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15 #ifndef __SVM_SVM_H
16 #define __SVM_SVM_H
17
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bits.h>
21
22 #include <asm/svm.h>
23 #include <asm/sev-common.h>
24
25 #include "kvm_cache_regs.h"
26
27 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
28
29 #define IOPM_SIZE PAGE_SIZE * 3
30 #define MSRPM_SIZE PAGE_SIZE * 2
31
32 #define MAX_DIRECT_ACCESS_MSRS 20
33 #define MSRPM_OFFSETS 16
34 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
35 extern bool npt_enabled;
36 extern bool intercept_smi;
37
38 /*
39 * Clean bits in VMCB.
40 * VMCB_ALL_CLEAN_MASK might also need to
41 * be updated if this enum is modified.
42 */
43 enum {
44 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
45 pause filter count */
46 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
47 VMCB_ASID, /* ASID */
48 VMCB_INTR, /* int_ctl, int_vector */
49 VMCB_NPT, /* npt_en, nCR3, gPAT */
50 VMCB_CR, /* CR0, CR3, CR4, EFER */
51 VMCB_DR, /* DR6, DR7 */
52 VMCB_DT, /* GDT, IDT */
53 VMCB_SEG, /* CS, DS, SS, ES, CPL */
54 VMCB_CR2, /* CR2 only */
55 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
56 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
57 * AVIC PHYSICAL_TABLE pointer,
58 * AVIC LOGICAL_TABLE pointer
59 */
60 VMCB_SW = 31, /* Reserved for hypervisor/software use */
61 };
62
63 #define VMCB_ALL_CLEAN_MASK ( \
64 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
65 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
66 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
67 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
68 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
69 (1U << VMCB_SW))
70
71 /* TPR and CR2 are always written before VMRUN */
72 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
73
74 struct kvm_sev_info {
75 bool active; /* SEV enabled guest */
76 bool es_active; /* SEV-ES enabled guest */
77 unsigned int asid; /* ASID used for this guest */
78 unsigned int handle; /* SEV firmware handle */
79 int fd; /* SEV device fd */
80 unsigned long pages_locked; /* Number of pages locked */
81 struct list_head regions_list; /* List of registered regions */
82 u64 ap_jump_table; /* SEV-ES AP Jump Table address */
83 struct kvm *enc_context_owner; /* Owner of copied encryption context */
84 struct misc_cg *misc_cg; /* For misc cgroup accounting */
85 };
86
87 struct kvm_svm {
88 struct kvm kvm;
89
90 /* Struct members for AVIC */
91 u32 avic_vm_id;
92 struct page *avic_logical_id_table_page;
93 struct page *avic_physical_id_table_page;
94 struct hlist_node hnode;
95
96 struct kvm_sev_info sev_info;
97 };
98
99 struct kvm_vcpu;
100
101 struct kvm_vmcb_info {
102 struct vmcb *ptr;
103 unsigned long pa;
104 int cpu;
105 uint64_t asid_generation;
106 };
107
108 struct svm_nested_state {
109 struct kvm_vmcb_info vmcb02;
110 u64 hsave_msr;
111 u64 vm_cr_msr;
112 u64 vmcb12_gpa;
113 u64 last_vmcb12_gpa;
114
115 /* These are the merged vectors */
116 u32 *msrpm;
117
118 /* A VMRUN has started but has not yet been performed, so
119 * we cannot inject a nested vmexit yet. */
120 bool nested_run_pending;
121
122 /* cache for control fields of the guest */
123 struct vmcb_control_area ctl;
124
125 bool initialized;
126 };
127
128 struct vcpu_svm {
129 struct kvm_vcpu vcpu;
130 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
131 struct vmcb *vmcb;
132 struct kvm_vmcb_info vmcb01;
133 struct kvm_vmcb_info *current_vmcb;
134 struct svm_cpu_data *svm_data;
135 u32 asid;
136 u32 sysenter_esp_hi;
137 u32 sysenter_eip_hi;
138 uint64_t tsc_aux;
139
140 u64 msr_decfg;
141
142 u64 next_rip;
143
144 u64 spec_ctrl;
145 /*
146 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
147 * translated into the appropriate L2_CFG bits on the host to
148 * perform speculative control.
149 */
150 u64 virt_spec_ctrl;
151
152 u32 *msrpm;
153
154 ulong nmi_iret_rip;
155
156 struct svm_nested_state nested;
157
158 bool nmi_singlestep;
159 u64 nmi_singlestep_guest_rflags;
160
161 unsigned int3_injected;
162 unsigned long int3_rip;
163
164 /* cached guest cpuid flags for faster access */
165 bool nrips_enabled : 1;
166
167 u32 ldr_reg;
168 u32 dfr_reg;
169 struct page *avic_backing_page;
170 u64 *avic_physical_id_cache;
171 bool avic_is_running;
172
173 /*
174 * Per-vcpu list of struct amd_svm_iommu_ir:
175 * This is used mainly to store interrupt remapping information used
176 * when update the vcpu affinity. This avoids the need to scan for
177 * IRTE and try to match ga_tag in the IOMMU driver.
178 */
179 struct list_head ir_list;
180 spinlock_t ir_list_lock;
181
182 /* Save desired MSR intercept (read: pass-through) state */
183 struct {
184 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
185 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
186 } shadow_msr_intercept;
187
188 /* SEV-ES support */
189 struct vmcb_save_area *vmsa;
190 struct ghcb *ghcb;
191 struct kvm_host_map ghcb_map;
192 bool received_first_sipi;
193
194 /* SEV-ES scratch area support */
195 void *ghcb_sa;
196 u32 ghcb_sa_len;
197 bool ghcb_sa_sync;
198 bool ghcb_sa_free;
199
200 bool guest_state_loaded;
201 };
202
203 struct svm_cpu_data {
204 int cpu;
205
206 u64 asid_generation;
207 u32 max_asid;
208 u32 next_asid;
209 u32 min_asid;
210 struct kvm_ldttss_desc *tss_desc;
211
212 struct page *save_area;
213 struct vmcb *current_vmcb;
214
215 /* index = sev_asid, value = vmcb pointer */
216 struct vmcb **sev_vmcbs;
217 };
218
219 DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
220
221 void recalc_intercepts(struct vcpu_svm *svm);
222
to_kvm_svm(struct kvm * kvm)223 static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
224 {
225 return container_of(kvm, struct kvm_svm, kvm);
226 }
227
sev_guest(struct kvm * kvm)228 static inline bool sev_guest(struct kvm *kvm)
229 {
230 #ifdef CONFIG_KVM_AMD_SEV
231 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
232
233 return sev->active;
234 #else
235 return false;
236 #endif
237 }
238
sev_es_guest(struct kvm * kvm)239 static inline bool sev_es_guest(struct kvm *kvm)
240 {
241 #ifdef CONFIG_KVM_AMD_SEV
242 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
243
244 return sev_guest(kvm) && sev->es_active;
245 #else
246 return false;
247 #endif
248 }
249
vmcb_mark_all_dirty(struct vmcb * vmcb)250 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
251 {
252 vmcb->control.clean = 0;
253 }
254
vmcb_mark_all_clean(struct vmcb * vmcb)255 static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
256 {
257 vmcb->control.clean = VMCB_ALL_CLEAN_MASK
258 & ~VMCB_ALWAYS_DIRTY_MASK;
259 }
260
vmcb_is_clean(struct vmcb * vmcb,int bit)261 static inline bool vmcb_is_clean(struct vmcb *vmcb, int bit)
262 {
263 return (vmcb->control.clean & (1 << bit));
264 }
265
vmcb_mark_dirty(struct vmcb * vmcb,int bit)266 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
267 {
268 vmcb->control.clean &= ~(1 << bit);
269 }
270
vmcb_is_dirty(struct vmcb * vmcb,int bit)271 static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
272 {
273 return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
274 }
275
to_svm(struct kvm_vcpu * vcpu)276 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
277 {
278 return container_of(vcpu, struct vcpu_svm, vcpu);
279 }
280
vmcb_set_intercept(struct vmcb_control_area * control,u32 bit)281 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
282 {
283 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
284 __set_bit(bit, (unsigned long *)&control->intercepts);
285 }
286
vmcb_clr_intercept(struct vmcb_control_area * control,u32 bit)287 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
288 {
289 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
290 __clear_bit(bit, (unsigned long *)&control->intercepts);
291 }
292
vmcb_is_intercept(struct vmcb_control_area * control,u32 bit)293 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
294 {
295 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
296 return test_bit(bit, (unsigned long *)&control->intercepts);
297 }
298
set_dr_intercepts(struct vcpu_svm * svm)299 static inline void set_dr_intercepts(struct vcpu_svm *svm)
300 {
301 struct vmcb *vmcb = svm->vmcb01.ptr;
302
303 if (!sev_es_guest(svm->vcpu.kvm)) {
304 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
305 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
306 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
307 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
308 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
309 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
310 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
311 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
312 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
313 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
314 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
315 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
316 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
317 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
318 }
319
320 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
321 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
322
323 recalc_intercepts(svm);
324 }
325
clr_dr_intercepts(struct vcpu_svm * svm)326 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
327 {
328 struct vmcb *vmcb = svm->vmcb01.ptr;
329
330 vmcb->control.intercepts[INTERCEPT_DR] = 0;
331
332 /* DR7 access must remain intercepted for an SEV-ES guest */
333 if (sev_es_guest(svm->vcpu.kvm)) {
334 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
335 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
336 }
337
338 recalc_intercepts(svm);
339 }
340
set_exception_intercept(struct vcpu_svm * svm,u32 bit)341 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
342 {
343 struct vmcb *vmcb = svm->vmcb01.ptr;
344
345 WARN_ON_ONCE(bit >= 32);
346 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
347
348 recalc_intercepts(svm);
349 }
350
clr_exception_intercept(struct vcpu_svm * svm,u32 bit)351 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
352 {
353 struct vmcb *vmcb = svm->vmcb01.ptr;
354
355 WARN_ON_ONCE(bit >= 32);
356 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
357
358 recalc_intercepts(svm);
359 }
360
svm_set_intercept(struct vcpu_svm * svm,int bit)361 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
362 {
363 struct vmcb *vmcb = svm->vmcb01.ptr;
364
365 vmcb_set_intercept(&vmcb->control, bit);
366
367 recalc_intercepts(svm);
368 }
369
svm_clr_intercept(struct vcpu_svm * svm,int bit)370 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
371 {
372 struct vmcb *vmcb = svm->vmcb01.ptr;
373
374 vmcb_clr_intercept(&vmcb->control, bit);
375
376 recalc_intercepts(svm);
377 }
378
svm_is_intercept(struct vcpu_svm * svm,int bit)379 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
380 {
381 return vmcb_is_intercept(&svm->vmcb->control, bit);
382 }
383
vgif_enabled(struct vcpu_svm * svm)384 static inline bool vgif_enabled(struct vcpu_svm *svm)
385 {
386 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
387 }
388
enable_gif(struct vcpu_svm * svm)389 static inline void enable_gif(struct vcpu_svm *svm)
390 {
391 if (vgif_enabled(svm))
392 svm->vmcb->control.int_ctl |= V_GIF_MASK;
393 else
394 svm->vcpu.arch.hflags |= HF_GIF_MASK;
395 }
396
disable_gif(struct vcpu_svm * svm)397 static inline void disable_gif(struct vcpu_svm *svm)
398 {
399 if (vgif_enabled(svm))
400 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
401 else
402 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
403 }
404
gif_set(struct vcpu_svm * svm)405 static inline bool gif_set(struct vcpu_svm *svm)
406 {
407 if (vgif_enabled(svm))
408 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
409 else
410 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
411 }
412
413 /* svm.c */
414 #define MSR_INVALID 0xffffffffU
415
416 extern bool dump_invalid_vmcb;
417
418 u32 svm_msrpm_offset(u32 msr);
419 u32 *svm_vcpu_alloc_msrpm(void);
420 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
421 void svm_vcpu_free_msrpm(u32 *msrpm);
422
423 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
424 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
425 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
426 void svm_flush_tlb(struct kvm_vcpu *vcpu);
427 void disable_nmi_singlestep(struct vcpu_svm *svm);
428 bool svm_smi_blocked(struct kvm_vcpu *vcpu);
429 bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
430 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
431 void svm_set_gif(struct vcpu_svm *svm, bool value);
432 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
433 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
434 int read, int write);
435
436 /* nested.c */
437
438 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
439 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
440 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
441
nested_svm_virtualize_tpr(struct kvm_vcpu * vcpu)442 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
443 {
444 struct vcpu_svm *svm = to_svm(vcpu);
445
446 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
447 }
448
nested_exit_on_smi(struct vcpu_svm * svm)449 static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
450 {
451 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
452 }
453
nested_exit_on_intr(struct vcpu_svm * svm)454 static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
455 {
456 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
457 }
458
nested_exit_on_nmi(struct vcpu_svm * svm)459 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
460 {
461 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
462 }
463
464 int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
465 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
466 void svm_leave_nested(struct kvm_vcpu *vcpu);
467 void svm_free_nested(struct vcpu_svm *svm);
468 int svm_allocate_nested(struct vcpu_svm *svm);
469 int nested_svm_vmrun(struct kvm_vcpu *vcpu);
470 void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
471 struct vmcb_save_area *from_save);
472 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
473 int nested_svm_vmexit(struct vcpu_svm *svm);
474
nested_svm_simple_vmexit(struct vcpu_svm * svm,u32 exit_code)475 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
476 {
477 svm->vmcb->control.exit_code = exit_code;
478 svm->vmcb->control.exit_info_1 = 0;
479 svm->vmcb->control.exit_info_2 = 0;
480 return nested_svm_vmexit(svm);
481 }
482
483 int nested_svm_exit_handled(struct vcpu_svm *svm);
484 int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
485 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
486 bool has_error_code, u32 error_code);
487 int nested_svm_exit_special(struct vcpu_svm *svm);
488 void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
489 struct vmcb_control_area *control);
490 void __svm_write_tsc_multiplier(u64 multiplier);
491 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
492 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
493 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
494
495 extern struct kvm_x86_nested_ops svm_nested_ops;
496
497 /* avic.c */
498
499 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
500 #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
501 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
502
503 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK GENMASK_ULL(11, 0)
504 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
505 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
506 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
507
508 #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
509
avic_vcpu_is_running(struct kvm_vcpu * vcpu)510 static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
511 {
512 struct vcpu_svm *svm = to_svm(vcpu);
513 u64 *entry = svm->avic_physical_id_cache;
514
515 if (!entry)
516 return false;
517
518 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
519 }
520
521 int avic_ga_log_notifier(u32 ga_tag);
522 void avic_vm_destroy(struct kvm *kvm);
523 int avic_vm_init(struct kvm *kvm);
524 void avic_init_vmcb(struct vcpu_svm *svm);
525 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
526 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
527 int avic_init_vcpu(struct vcpu_svm *svm);
528 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
529 void avic_vcpu_put(struct kvm_vcpu *vcpu);
530 void avic_post_state_restore(struct kvm_vcpu *vcpu);
531 void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
532 void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
533 bool svm_check_apicv_inhibit_reasons(ulong bit);
534 void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
535 void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
536 void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
537 int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
538 bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
539 int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
540 uint32_t guest_irq, bool set);
541 void svm_vcpu_blocking(struct kvm_vcpu *vcpu);
542 void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
543
544 /* sev.c */
545
546 #define GHCB_VERSION_MAX 1ULL
547 #define GHCB_VERSION_MIN 1ULL
548
549
550 extern unsigned int max_sev_asid;
551
552 void sev_vm_destroy(struct kvm *kvm);
553 int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
554 int svm_register_enc_region(struct kvm *kvm,
555 struct kvm_enc_region *range);
556 int svm_unregister_enc_region(struct kvm *kvm,
557 struct kvm_enc_region *range);
558 int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd);
559 void sev_guest_memory_reclaimed(struct kvm *kvm);
560
561 void pre_sev_run(struct vcpu_svm *svm, int cpu);
562 void __init sev_set_cpu_caps(void);
563 void __init sev_hardware_setup(void);
564 void sev_hardware_teardown(void);
565 int sev_cpu_init(struct svm_cpu_data *sd);
566 void sev_free_vcpu(struct kvm_vcpu *vcpu);
567 int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
568 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
569 void sev_es_init_vmcb(struct vcpu_svm *svm);
570 void sev_es_create_vcpu(struct vcpu_svm *svm);
571 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
572 void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
573 void sev_es_unmap_ghcb(struct vcpu_svm *svm);
574
575 /* vmenter.S */
576
577 void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
578 void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
579
580 #endif
581