• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_mmio.h>
9 #include <asm/kvm_emulate.h>
10 #include <trace/events/kvm.h>
11 
12 #include "trace.h"
13 
kvm_mmio_write_buf(void * buf,unsigned int len,unsigned long data)14 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
15 {
16 	void *datap = NULL;
17 	union {
18 		u8	byte;
19 		u16	hword;
20 		u32	word;
21 		u64	dword;
22 	} tmp;
23 
24 	switch (len) {
25 	case 1:
26 		tmp.byte	= data;
27 		datap		= &tmp.byte;
28 		break;
29 	case 2:
30 		tmp.hword	= data;
31 		datap		= &tmp.hword;
32 		break;
33 	case 4:
34 		tmp.word	= data;
35 		datap		= &tmp.word;
36 		break;
37 	case 8:
38 		tmp.dword	= data;
39 		datap		= &tmp.dword;
40 		break;
41 	}
42 
43 	memcpy(buf, datap, len);
44 }
45 
kvm_mmio_read_buf(const void * buf,unsigned int len)46 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
47 {
48 	unsigned long data = 0;
49 	union {
50 		u16	hword;
51 		u32	word;
52 		u64	dword;
53 	} tmp;
54 
55 	switch (len) {
56 	case 1:
57 		data = *(u8 *)buf;
58 		break;
59 	case 2:
60 		memcpy(&tmp.hword, buf, len);
61 		data = tmp.hword;
62 		break;
63 	case 4:
64 		memcpy(&tmp.word, buf, len);
65 		data = tmp.word;
66 		break;
67 	case 8:
68 		memcpy(&tmp.dword, buf, len);
69 		data = tmp.dword;
70 		break;
71 	}
72 
73 	return data;
74 }
75 
76 /**
77  * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
78  *			     or in-kernel IO emulation
79  *
80  * @vcpu: The VCPU pointer
81  * @run:  The VCPU run struct containing the mmio data
82  */
kvm_handle_mmio_return(struct kvm_vcpu * vcpu,struct kvm_run * run)83 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
84 {
85 	unsigned long data;
86 	unsigned int len;
87 	int mask;
88 
89 	/* Detect an already handled MMIO return */
90 	if (unlikely(!vcpu->mmio_needed))
91 		return 0;
92 
93 	vcpu->mmio_needed = 0;
94 
95 	if (!run->mmio.is_write) {
96 		len = run->mmio.len;
97 		if (len > sizeof(unsigned long))
98 			return -EINVAL;
99 
100 		data = kvm_mmio_read_buf(run->mmio.data, len);
101 
102 		if (vcpu->arch.mmio_decode.sign_extend &&
103 		    len < sizeof(unsigned long)) {
104 			mask = 1U << ((len * 8) - 1);
105 			data = (data ^ mask) - mask;
106 		}
107 
108 		if (!vcpu->arch.mmio_decode.sixty_four)
109 			data = data & 0xffffffff;
110 
111 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
112 			       &data);
113 		data = vcpu_data_host_to_guest(vcpu, data, len);
114 		vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
115 	}
116 
117 	/*
118 	 * The MMIO instruction is emulated and should not be re-executed
119 	 * in the guest.
120 	 */
121 	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
122 
123 	return 0;
124 }
125 
decode_hsr(struct kvm_vcpu * vcpu,bool * is_write,int * len)126 static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
127 {
128 	unsigned long rt;
129 	int access_size;
130 	bool sign_extend;
131 	bool sixty_four;
132 
133 	if (kvm_vcpu_abt_iss1tw(vcpu)) {
134 		/* page table accesses IO mem: tell guest to fix its TTBR */
135 		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
136 		return 1;
137 	}
138 
139 	access_size = kvm_vcpu_dabt_get_as(vcpu);
140 	if (unlikely(access_size < 0))
141 		return access_size;
142 
143 	*is_write = kvm_vcpu_dabt_iswrite(vcpu);
144 	sign_extend = kvm_vcpu_dabt_issext(vcpu);
145 	sixty_four = kvm_vcpu_dabt_issf(vcpu);
146 	rt = kvm_vcpu_dabt_get_rd(vcpu);
147 
148 	*len = access_size;
149 	vcpu->arch.mmio_decode.sign_extend = sign_extend;
150 	vcpu->arch.mmio_decode.rt = rt;
151 	vcpu->arch.mmio_decode.sixty_four = sixty_four;
152 
153 	return 0;
154 }
155 
io_mem_abort(struct kvm_vcpu * vcpu,struct kvm_run * run,phys_addr_t fault_ipa)156 int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
157 		 phys_addr_t fault_ipa)
158 {
159 	unsigned long data;
160 	unsigned long rt;
161 	int ret;
162 	bool is_write;
163 	int len;
164 	u8 data_buf[8];
165 
166 	/*
167 	 * Prepare MMIO operation. First decode the syndrome data we get
168 	 * from the CPU. Then try if some in-kernel emulation feels
169 	 * responsible, otherwise let user space do its magic.
170 	 */
171 	if (kvm_vcpu_dabt_isvalid(vcpu)) {
172 		ret = decode_hsr(vcpu, &is_write, &len);
173 		if (ret)
174 			return ret;
175 	} else {
176 		kvm_err("load/store instruction decoding not implemented\n");
177 		return -ENOSYS;
178 	}
179 
180 	rt = vcpu->arch.mmio_decode.rt;
181 
182 	if (is_write) {
183 		data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
184 					       len);
185 
186 		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
187 		kvm_mmio_write_buf(data_buf, len, data);
188 
189 		ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
190 				       data_buf);
191 	} else {
192 		trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
193 			       fault_ipa, NULL);
194 
195 		ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
196 				      data_buf);
197 	}
198 
199 	/* Now prepare kvm_run for the potential return to userland. */
200 	run->mmio.is_write	= is_write;
201 	run->mmio.phys_addr	= fault_ipa;
202 	run->mmio.len		= len;
203 	vcpu->mmio_needed	= 1;
204 
205 	if (!ret) {
206 		/* We handled the access successfully in the kernel. */
207 		if (!is_write)
208 			memcpy(run->mmio.data, data_buf, len);
209 		vcpu->stat.mmio_exit_kernel++;
210 		kvm_handle_mmio_return(vcpu, run);
211 		return 1;
212 	}
213 
214 	if (is_write)
215 		memcpy(run->mmio.data, data_buf, len);
216 	vcpu->stat.mmio_exit_user++;
217 	run->exit_reason	= KVM_EXIT_MMIO;
218 	return 0;
219 }
220