• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef __KVM_X86_VMX_VMCS_H
3  #define __KVM_X86_VMX_VMCS_H
4  
5  #include <linux/ktime.h>
6  #include <linux/list.h>
7  #include <linux/nospec.h>
8  
9  #include <asm/kvm.h>
10  #include <asm/vmx.h>
11  
12  #include "capabilities.h"
13  
14  struct vmcs_hdr {
15  	u32 revision_id:31;
16  	u32 shadow_vmcs:1;
17  };
18  
19  struct vmcs {
20  	struct vmcs_hdr hdr;
21  	u32 abort;
22  	char data[];
23  };
24  
25  DECLARE_PER_CPU(struct vmcs *, current_vmcs);
26  
27  /*
28   * vmcs_host_state tracks registers that are loaded from the VMCS on VMEXIT
29   * and whose values change infrequently, but are not constant.  I.e. this is
30   * used as a write-through cache of the corresponding VMCS fields.
31   */
32  struct vmcs_host_state {
33  	unsigned long cr3;	/* May not match real cr3 */
34  	unsigned long cr4;	/* May not match real cr4 */
35  	unsigned long gs_base;
36  	unsigned long fs_base;
37  	unsigned long rsp;
38  
39  	u16           fs_sel, gs_sel, ldt_sel;
40  #ifdef CONFIG_X86_64
41  	u16           ds_sel, es_sel;
42  #endif
43  };
44  
45  struct vmcs_controls_shadow {
46  	u32 vm_entry;
47  	u32 vm_exit;
48  	u32 pin;
49  	u32 exec;
50  	u32 secondary_exec;
51  };
52  
53  /*
54   * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
55   * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
56   * loaded on this CPU (so we can clear them if the CPU goes down).
57   */
58  struct loaded_vmcs {
59  	struct vmcs *vmcs;
60  	struct vmcs *shadow_vmcs;
61  	int cpu;
62  	bool launched;
63  	bool nmi_known_unmasked;
64  	bool hv_timer_soft_disabled;
65  	/* Support for vnmi-less CPUs */
66  	int soft_vnmi_blocked;
67  	ktime_t entry_time;
68  	s64 vnmi_blocked_time;
69  	unsigned long *msr_bitmap;
70  	struct list_head loaded_vmcss_on_cpu_link;
71  	struct vmcs_host_state host_state;
72  	struct vmcs_controls_shadow controls_shadow;
73  };
74  
is_intr_type(u32 intr_info,u32 type)75  static inline bool is_intr_type(u32 intr_info, u32 type)
76  {
77  	const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK;
78  
79  	return (intr_info & mask) == (INTR_INFO_VALID_MASK | type);
80  }
81  
is_intr_type_n(u32 intr_info,u32 type,u8 vector)82  static inline bool is_intr_type_n(u32 intr_info, u32 type, u8 vector)
83  {
84  	const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK |
85  			 INTR_INFO_VECTOR_MASK;
86  
87  	return (intr_info & mask) == (INTR_INFO_VALID_MASK | type | vector);
88  }
89  
is_exception_n(u32 intr_info,u8 vector)90  static inline bool is_exception_n(u32 intr_info, u8 vector)
91  {
92  	return is_intr_type_n(intr_info, INTR_TYPE_HARD_EXCEPTION, vector);
93  }
94  
is_debug(u32 intr_info)95  static inline bool is_debug(u32 intr_info)
96  {
97  	return is_exception_n(intr_info, DB_VECTOR);
98  }
99  
is_breakpoint(u32 intr_info)100  static inline bool is_breakpoint(u32 intr_info)
101  {
102  	return is_exception_n(intr_info, BP_VECTOR);
103  }
104  
is_double_fault(u32 intr_info)105  static inline bool is_double_fault(u32 intr_info)
106  {
107  	return is_exception_n(intr_info, DF_VECTOR);
108  }
109  
is_page_fault(u32 intr_info)110  static inline bool is_page_fault(u32 intr_info)
111  {
112  	return is_exception_n(intr_info, PF_VECTOR);
113  }
114  
is_invalid_opcode(u32 intr_info)115  static inline bool is_invalid_opcode(u32 intr_info)
116  {
117  	return is_exception_n(intr_info, UD_VECTOR);
118  }
119  
is_gp_fault(u32 intr_info)120  static inline bool is_gp_fault(u32 intr_info)
121  {
122  	return is_exception_n(intr_info, GP_VECTOR);
123  }
124  
is_alignment_check(u32 intr_info)125  static inline bool is_alignment_check(u32 intr_info)
126  {
127  	return is_exception_n(intr_info, AC_VECTOR);
128  }
129  
is_machine_check(u32 intr_info)130  static inline bool is_machine_check(u32 intr_info)
131  {
132  	return is_exception_n(intr_info, MC_VECTOR);
133  }
134  
135  /* Undocumented: icebp/int1 */
is_icebp(u32 intr_info)136  static inline bool is_icebp(u32 intr_info)
137  {
138  	return is_intr_type(intr_info, INTR_TYPE_PRIV_SW_EXCEPTION);
139  }
140  
is_nmi(u32 intr_info)141  static inline bool is_nmi(u32 intr_info)
142  {
143  	return is_intr_type(intr_info, INTR_TYPE_NMI_INTR);
144  }
145  
is_external_intr(u32 intr_info)146  static inline bool is_external_intr(u32 intr_info)
147  {
148  	return is_intr_type(intr_info, INTR_TYPE_EXT_INTR);
149  }
150  
is_exception_with_error_code(u32 intr_info)151  static inline bool is_exception_with_error_code(u32 intr_info)
152  {
153  	const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK;
154  
155  	return (intr_info & mask) == mask;
156  }
157  
158  enum vmcs_field_width {
159  	VMCS_FIELD_WIDTH_U16 = 0,
160  	VMCS_FIELD_WIDTH_U64 = 1,
161  	VMCS_FIELD_WIDTH_U32 = 2,
162  	VMCS_FIELD_WIDTH_NATURAL_WIDTH = 3
163  };
164  
vmcs_field_width(unsigned long field)165  static inline int vmcs_field_width(unsigned long field)
166  {
167  	if (0x1 & field)	/* the *_HIGH fields are all 32 bit */
168  		return VMCS_FIELD_WIDTH_U32;
169  	return (field >> 13) & 0x3;
170  }
171  
vmcs_field_readonly(unsigned long field)172  static inline int vmcs_field_readonly(unsigned long field)
173  {
174  	return (((field >> 10) & 0x3) == 1);
175  }
176  
177  #endif /* __KVM_X86_VMX_VMCS_H */
178