• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_mmio.h>
9 #include <asm/kvm_emulate.h>
10 #include <trace/events/kvm.h>
11 
12 #include "trace.h"
13 
kvm_mmio_write_buf(void * buf,unsigned int len,unsigned long data)14 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
15 {
16 	void *datap = NULL;
17 	union {
18 		u8	byte;
19 		u16	hword;
20 		u32	word;
21 		u64	dword;
22 	} tmp;
23 
24 	switch (len) {
25 	case 1:
26 		tmp.byte	= data;
27 		datap		= &tmp.byte;
28 		break;
29 	case 2:
30 		tmp.hword	= data;
31 		datap		= &tmp.hword;
32 		break;
33 	case 4:
34 		tmp.word	= data;
35 		datap		= &tmp.word;
36 		break;
37 	case 8:
38 		tmp.dword	= data;
39 		datap		= &tmp.dword;
40 		break;
41 	}
42 
43 	memcpy(buf, datap, len);
44 }
45 
kvm_mmio_read_buf(const void * buf,unsigned int len)46 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
47 {
48 	unsigned long data = 0;
49 	union {
50 		u16	hword;
51 		u32	word;
52 		u64	dword;
53 	} tmp;
54 
55 	switch (len) {
56 	case 1:
57 		data = *(u8 *)buf;
58 		break;
59 	case 2:
60 		memcpy(&tmp.hword, buf, len);
61 		data = tmp.hword;
62 		break;
63 	case 4:
64 		memcpy(&tmp.word, buf, len);
65 		data = tmp.word;
66 		break;
67 	case 8:
68 		memcpy(&tmp.dword, buf, len);
69 		data = tmp.dword;
70 		break;
71 	}
72 
73 	return data;
74 }
75 
76 /**
77  * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
78  *			     or in-kernel IO emulation
79  *
80  * @vcpu: The VCPU pointer
81  * @run:  The VCPU run struct containing the mmio data
82  */
kvm_handle_mmio_return(struct kvm_vcpu * vcpu,struct kvm_run * run)83 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
84 {
85 	unsigned long data;
86 	unsigned int len;
87 	int mask;
88 
89 	/* Detect an already handled MMIO return */
90 	if (unlikely(!vcpu->mmio_needed))
91 		return 0;
92 
93 	vcpu->mmio_needed = 0;
94 
95 	if (!run->mmio.is_write) {
96 		len = run->mmio.len;
97 		if (len > sizeof(unsigned long))
98 			return -EINVAL;
99 
100 		data = kvm_mmio_read_buf(run->mmio.data, len);
101 
102 		if (vcpu->arch.mmio_decode.sign_extend &&
103 		    len < sizeof(unsigned long)) {
104 			mask = 1U << ((len * 8) - 1);
105 			data = (data ^ mask) - mask;
106 		}
107 
108 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
109 			       &data);
110 		data = vcpu_data_host_to_guest(vcpu, data, len);
111 		vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
112 	}
113 
114 	/*
115 	 * The MMIO instruction is emulated and should not be re-executed
116 	 * in the guest.
117 	 */
118 	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
119 
120 	return 0;
121 }
122 
decode_hsr(struct kvm_vcpu * vcpu,bool * is_write,int * len)123 static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
124 {
125 	unsigned long rt;
126 	int access_size;
127 	bool sign_extend;
128 
129 	if (kvm_vcpu_dabt_iss1tw(vcpu)) {
130 		/* page table accesses IO mem: tell guest to fix its TTBR */
131 		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
132 		return 1;
133 	}
134 
135 	access_size = kvm_vcpu_dabt_get_as(vcpu);
136 	if (unlikely(access_size < 0))
137 		return access_size;
138 
139 	*is_write = kvm_vcpu_dabt_iswrite(vcpu);
140 	sign_extend = kvm_vcpu_dabt_issext(vcpu);
141 	rt = kvm_vcpu_dabt_get_rd(vcpu);
142 
143 	*len = access_size;
144 	vcpu->arch.mmio_decode.sign_extend = sign_extend;
145 	vcpu->arch.mmio_decode.rt = rt;
146 
147 	return 0;
148 }
149 
io_mem_abort(struct kvm_vcpu * vcpu,struct kvm_run * run,phys_addr_t fault_ipa)150 int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
151 		 phys_addr_t fault_ipa)
152 {
153 	unsigned long data;
154 	unsigned long rt;
155 	int ret;
156 	bool is_write;
157 	int len;
158 	u8 data_buf[8];
159 
160 	/*
161 	 * Prepare MMIO operation. First decode the syndrome data we get
162 	 * from the CPU. Then try if some in-kernel emulation feels
163 	 * responsible, otherwise let user space do its magic.
164 	 */
165 	if (kvm_vcpu_dabt_isvalid(vcpu)) {
166 		ret = decode_hsr(vcpu, &is_write, &len);
167 		if (ret)
168 			return ret;
169 	} else {
170 		kvm_err("load/store instruction decoding not implemented\n");
171 		return -ENOSYS;
172 	}
173 
174 	rt = vcpu->arch.mmio_decode.rt;
175 
176 	if (is_write) {
177 		data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
178 					       len);
179 
180 		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
181 		kvm_mmio_write_buf(data_buf, len, data);
182 
183 		ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
184 				       data_buf);
185 	} else {
186 		trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
187 			       fault_ipa, NULL);
188 
189 		ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
190 				      data_buf);
191 	}
192 
193 	/* Now prepare kvm_run for the potential return to userland. */
194 	run->mmio.is_write	= is_write;
195 	run->mmio.phys_addr	= fault_ipa;
196 	run->mmio.len		= len;
197 	vcpu->mmio_needed	= 1;
198 
199 	if (!ret) {
200 		/* We handled the access successfully in the kernel. */
201 		if (!is_write)
202 			memcpy(run->mmio.data, data_buf, len);
203 		vcpu->stat.mmio_exit_kernel++;
204 		kvm_handle_mmio_return(vcpu, run);
205 		return 1;
206 	}
207 
208 	if (is_write)
209 		memcpy(run->mmio.data, data_buf, len);
210 	vcpu->stat.mmio_exit_user++;
211 	run->exit_reason	= KVM_EXIT_MMIO;
212 	return 0;
213 }
214