• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include <linux/compiler.h>
19 #include <linux/kvm_host.h>
20 
21 #include <asm/debug-monitors.h>
22 #include <asm/kvm_asm.h>
23 #include <asm/kvm_hyp.h>
24 
25 #define read_debug(r,n)		read_sysreg(r##n##_el1)
26 #define write_debug(v,r,n)	write_sysreg(v, r##n##_el1)
27 
28 #define save_debug(ptr,reg,nr)						\
29 	switch (nr) {							\
30 	case 15:	ptr[15] = read_debug(reg, 15);			\
31 	case 14:	ptr[14] = read_debug(reg, 14);			\
32 	case 13:	ptr[13] = read_debug(reg, 13);			\
33 	case 12:	ptr[12] = read_debug(reg, 12);			\
34 	case 11:	ptr[11] = read_debug(reg, 11);			\
35 	case 10:	ptr[10] = read_debug(reg, 10);			\
36 	case 9:		ptr[9] = read_debug(reg, 9);			\
37 	case 8:		ptr[8] = read_debug(reg, 8);			\
38 	case 7:		ptr[7] = read_debug(reg, 7);			\
39 	case 6:		ptr[6] = read_debug(reg, 6);			\
40 	case 5:		ptr[5] = read_debug(reg, 5);			\
41 	case 4:		ptr[4] = read_debug(reg, 4);			\
42 	case 3:		ptr[3] = read_debug(reg, 3);			\
43 	case 2:		ptr[2] = read_debug(reg, 2);			\
44 	case 1:		ptr[1] = read_debug(reg, 1);			\
45 	default:	ptr[0] = read_debug(reg, 0);			\
46 	}
47 
48 #define restore_debug(ptr,reg,nr)					\
49 	switch (nr) {							\
50 	case 15:	write_debug(ptr[15], reg, 15);			\
51 	case 14:	write_debug(ptr[14], reg, 14);			\
52 	case 13:	write_debug(ptr[13], reg, 13);			\
53 	case 12:	write_debug(ptr[12], reg, 12);			\
54 	case 11:	write_debug(ptr[11], reg, 11);			\
55 	case 10:	write_debug(ptr[10], reg, 10);			\
56 	case 9:		write_debug(ptr[9], reg, 9);			\
57 	case 8:		write_debug(ptr[8], reg, 8);			\
58 	case 7:		write_debug(ptr[7], reg, 7);			\
59 	case 6:		write_debug(ptr[6], reg, 6);			\
60 	case 5:		write_debug(ptr[5], reg, 5);			\
61 	case 4:		write_debug(ptr[4], reg, 4);			\
62 	case 3:		write_debug(ptr[3], reg, 3);			\
63 	case 2:		write_debug(ptr[2], reg, 2);			\
64 	case 1:		write_debug(ptr[1], reg, 1);			\
65 	default:	write_debug(ptr[0], reg, 0);			\
66 	}
67 
68 #define PMSCR_EL1		sys_reg(3, 0, 9, 9, 0)
69 
70 #define PMBLIMITR_EL1		sys_reg(3, 0, 9, 10, 0)
71 #define PMBLIMITR_EL1_E		BIT(0)
72 
73 #define PMBIDR_EL1		sys_reg(3, 0, 9, 10, 7)
74 #define PMBIDR_EL1_P		BIT(4)
75 
76 #define psb_csync()		asm volatile("hint #17")
77 
__debug_save_spe_vhe(u64 * pmscr_el1)78 static void __hyp_text __debug_save_spe_vhe(u64 *pmscr_el1)
79 {
80 	/* The vcpu can run. but it can't hide. */
81 }
82 
__debug_save_spe_nvhe(u64 * pmscr_el1)83 static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
84 {
85 	u64 reg;
86 
87 	/* Clear pmscr in case of early return */
88 	*pmscr_el1 = 0;
89 
90 	/* SPE present on this CPU? */
91 	if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
92 						  ID_AA64DFR0_PMSVER_SHIFT))
93 		return;
94 
95 	/* Yes; is it owned by EL3? */
96 	reg = read_sysreg_s(PMBIDR_EL1);
97 	if (reg & PMBIDR_EL1_P)
98 		return;
99 
100 	/* No; is the host actually using the thing? */
101 	reg = read_sysreg_s(PMBLIMITR_EL1);
102 	if (!(reg & PMBLIMITR_EL1_E))
103 		return;
104 
105 	/* Yes; save the control register and disable data generation */
106 	*pmscr_el1 = read_sysreg_s(PMSCR_EL1);
107 	write_sysreg_s(0, PMSCR_EL1);
108 	isb();
109 
110 	/* Now drain all buffered data to memory */
111 	psb_csync();
112 	dsb(nsh);
113 }
114 
115 static hyp_alternate_select(__debug_save_spe,
116 			    __debug_save_spe_nvhe, __debug_save_spe_vhe,
117 			    ARM64_HAS_VIRT_HOST_EXTN);
118 
__debug_restore_spe(u64 pmscr_el1)119 static void __hyp_text __debug_restore_spe(u64 pmscr_el1)
120 {
121 	if (!pmscr_el1)
122 		return;
123 
124 	/* The host page table is installed, but not yet synchronised */
125 	isb();
126 
127 	/* Re-enable data generation */
128 	write_sysreg_s(pmscr_el1, PMSCR_EL1);
129 }
130 
__debug_save_state(struct kvm_vcpu * vcpu,struct kvm_guest_debug_arch * dbg,struct kvm_cpu_context * ctxt)131 void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
132 				   struct kvm_guest_debug_arch *dbg,
133 				   struct kvm_cpu_context *ctxt)
134 {
135 	u64 aa64dfr0;
136 	int brps, wrps;
137 
138 	if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY))
139 		return;
140 
141 	aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
142 	brps = (aa64dfr0 >> 12) & 0xf;
143 	wrps = (aa64dfr0 >> 20) & 0xf;
144 
145 	save_debug(dbg->dbg_bcr, dbgbcr, brps);
146 	save_debug(dbg->dbg_bvr, dbgbvr, brps);
147 	save_debug(dbg->dbg_wcr, dbgwcr, wrps);
148 	save_debug(dbg->dbg_wvr, dbgwvr, wrps);
149 
150 	ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1);
151 }
152 
__debug_restore_state(struct kvm_vcpu * vcpu,struct kvm_guest_debug_arch * dbg,struct kvm_cpu_context * ctxt)153 void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
154 				      struct kvm_guest_debug_arch *dbg,
155 				      struct kvm_cpu_context *ctxt)
156 {
157 	u64 aa64dfr0;
158 	int brps, wrps;
159 
160 	if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY))
161 		return;
162 
163 	aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
164 
165 	brps = (aa64dfr0 >> 12) & 0xf;
166 	wrps = (aa64dfr0 >> 20) & 0xf;
167 
168 	restore_debug(dbg->dbg_bcr, dbgbcr, brps);
169 	restore_debug(dbg->dbg_bvr, dbgbvr, brps);
170 	restore_debug(dbg->dbg_wcr, dbgwcr, wrps);
171 	restore_debug(dbg->dbg_wvr, dbgwvr, wrps);
172 
173 	write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1);
174 }
175 
__debug_cond_save_host_state(struct kvm_vcpu * vcpu)176 void __hyp_text __debug_cond_save_host_state(struct kvm_vcpu *vcpu)
177 {
178 	/* If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY is set, perform
179 	 * a full save/restore cycle. */
180 	if ((vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_KDE) ||
181 	    (vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_MDE))
182 		vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
183 
184 	__debug_save_state(vcpu, &vcpu->arch.host_debug_state.regs,
185 			   kern_hyp_va(vcpu->arch.host_cpu_context));
186 	__debug_save_spe()(&vcpu->arch.host_debug_state.pmscr_el1);
187 }
188 
__debug_cond_restore_host_state(struct kvm_vcpu * vcpu)189 void __hyp_text __debug_cond_restore_host_state(struct kvm_vcpu *vcpu)
190 {
191 	__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
192 	__debug_restore_state(vcpu, &vcpu->arch.host_debug_state.regs,
193 			      kern_hyp_va(vcpu->arch.host_cpu_context));
194 
195 	if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
196 		vcpu->arch.debug_flags &= ~KVM_ARM64_DEBUG_DIRTY;
197 }
198 
__kvm_get_mdcr_el2(void)199 u32 __hyp_text __kvm_get_mdcr_el2(void)
200 {
201 	return read_sysreg(mdcr_el2);
202 }
203