1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2023 MediaTek Inc.
4 */
5
6 #include <linux/anon_inodes.h>
7 #include <linux/device.h>
8 #include <linux/file.h>
9 #include <linux/mm.h>
10 #include <linux/platform_device.h>
11 #include <linux/slab.h>
12 #include <linux/soc/mediatek/gzvm_drv.h>
13 #include <trace/events/geniezone.h>
14 #include <trace/hooks/gzvm.h>
15
16 /* maximum size needed for holding an integer */
17 #define ITOA_MAX_LEN 12
18
19 /**
20 * gzvm_vcpu_wakeup_all - wakes up all vCPUs associated with the specified
21 * gzvm.
22 * @gzvm: Pointer to gzvm structure.
23 */
gzvm_vcpu_wakeup_all(struct gzvm * gzvm)24 void gzvm_vcpu_wakeup_all(struct gzvm *gzvm)
25 {
26 for (int i = 0; i < GZVM_MAX_VCPUS; i++) {
27 if (gzvm->vcpus[i]) {
28 gzvm->vcpus[i]->idle_events.virtio_irq += 1;
29 rcuwait_wake_up(&gzvm->vcpus[i]->wait);
30 }
31 }
32 }
33
gzvm_vtimer_expire(struct hrtimer * hrt)34 static enum hrtimer_restart gzvm_vtimer_expire(struct hrtimer *hrt)
35 {
36 struct gzvm_vcpu *vcpu;
37
38 vcpu = container_of(hrt, struct gzvm_vcpu, gzvm_vtimer);
39
40 gzvm_vcpu_wakeup_all(vcpu->gzvm);
41
42 return HRTIMER_NORESTART;
43 }
44
gzvm_vtimer_init(struct gzvm_vcpu * vcpu)45 static void gzvm_vtimer_init(struct gzvm_vcpu *vcpu)
46 {
47 /* gzvm_vtimer init based on hrtimer */
48 hrtimer_init(&vcpu->gzvm_vtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
49 vcpu->gzvm_vtimer.function = gzvm_vtimer_expire;
50 }
51
gzvm_vtimer_set(struct gzvm_vcpu * vcpu,u64 ns)52 void gzvm_vtimer_set(struct gzvm_vcpu *vcpu, u64 ns)
53 {
54 hrtimer_start(&vcpu->gzvm_vtimer, ktime_add_ns(ktime_get(), ns), HRTIMER_MODE_ABS_HARD);
55 }
56
gzvm_vtimer_release(struct gzvm_vcpu * vcpu)57 void gzvm_vtimer_release(struct gzvm_vcpu *vcpu)
58 {
59 hrtimer_cancel(&vcpu->gzvm_vtimer);
60 }
61
gzvm_vcpu_update_one_reg(struct gzvm_vcpu * vcpu,void __user * argp,bool is_write)62 static long gzvm_vcpu_update_one_reg(struct gzvm_vcpu *vcpu,
63 void __user *argp,
64 bool is_write)
65 {
66 struct gzvm_one_reg reg;
67 void __user *reg_addr;
68 u64 data = 0;
69 u64 reg_size;
70 long ret;
71
72 if (copy_from_user(®, argp, sizeof(reg)))
73 return -EFAULT;
74
75 reg_addr = u64_to_user_ptr(reg.addr);
76 reg_size = (reg.id & GZVM_REG_SIZE_MASK) >> GZVM_REG_SIZE_SHIFT;
77 reg_size = BIT(reg_size);
78
79 if (reg_size != 1 && reg_size != 2 && reg_size != 4 && reg_size != 8)
80 return -EINVAL;
81
82 if (is_write) {
83 /* GZ hypervisor would filter out invalid vcpu register access */
84 if (copy_from_user(&data, reg_addr, reg_size))
85 return -EFAULT;
86 } else {
87 return -EOPNOTSUPP;
88 }
89
90 ret = gzvm_arch_vcpu_update_one_reg(vcpu, reg.id, is_write, &data);
91
92 if (ret)
93 return ret;
94
95 return 0;
96 }
97
98 /**
99 * gzvm_vcpu_handle_mmio() - Handle mmio in kernel space.
100 * @vcpu: Pointer to vcpu.
101 *
102 * Return:
103 * * true - This mmio exit has been processed.
104 * * false - This mmio exit has not been processed, require userspace.
105 */
gzvm_vcpu_handle_mmio(struct gzvm_vcpu * vcpu)106 static bool gzvm_vcpu_handle_mmio(struct gzvm_vcpu *vcpu)
107 {
108 __u64 addr;
109 __u32 len;
110 const void *val_ptr;
111
112 /* So far, we don't have in-kernel mmio read handler */
113 if (!vcpu->run->mmio.is_write)
114 return false;
115 addr = vcpu->run->mmio.phys_addr;
116 len = vcpu->run->mmio.size;
117 val_ptr = &vcpu->run->mmio.data;
118
119 return gzvm_ioevent_write(vcpu, addr, len, val_ptr);
120 }
121
122 /**
123 * gzvm_vcpu_run() - Handle vcpu run ioctl, entry point to guest and exit
124 * point from guest
125 * @vcpu: Pointer to struct gzvm_vcpu
126 * @argp: Pointer to struct gzvm_vcpu_run in userspace
127 *
128 * Return:
129 * * 0 - Success.
130 * * Negative - Failure.
131 */
gzvm_vcpu_run(struct gzvm_vcpu * vcpu,void __user * argp)132 static long gzvm_vcpu_run(struct gzvm_vcpu *vcpu, void __user *argp)
133 {
134 bool need_userspace = false;
135 u64 exit_reason;
136
137 if (copy_from_user(vcpu->run, argp, sizeof(struct gzvm_vcpu_run)))
138 return -EFAULT;
139
140 for (int i = 0; i < ARRAY_SIZE(vcpu->run->padding1); i++) {
141 if (vcpu->run->padding1[i])
142 return -EINVAL;
143 }
144
145 if (vcpu->run->immediate_exit == 1)
146 return -EINTR;
147
148 while (!need_userspace && !signal_pending(current)) {
149 gzvm_arch_vcpu_run(vcpu, &exit_reason);
150 trace_mtk_vcpu_exit(exit_reason);
151
152 switch (exit_reason) {
153 case GZVM_EXIT_MMIO:
154 if (!gzvm_vcpu_handle_mmio(vcpu))
155 need_userspace = true;
156 break;
157 /**
158 * it's geniezone's responsibility to fill corresponding data
159 * structure
160 */
161 case GZVM_EXIT_HYPERCALL:
162 if (!gzvm_handle_guest_hvc(vcpu))
163 need_userspace = true;
164 break;
165 case GZVM_EXIT_EXCEPTION:
166 if (!gzvm_handle_guest_exception(vcpu))
167 need_userspace = true;
168 break;
169 case GZVM_EXIT_DEBUG:
170 fallthrough;
171 case GZVM_EXIT_FAIL_ENTRY:
172 fallthrough;
173 case GZVM_EXIT_INTERNAL_ERROR:
174 fallthrough;
175 case GZVM_EXIT_SYSTEM_EVENT:
176 fallthrough;
177 case GZVM_EXIT_SHUTDOWN:
178 need_userspace = true;
179 break;
180 case GZVM_EXIT_IRQ:
181 fallthrough;
182 case GZVM_EXIT_GZ:
183 break;
184 case GZVM_EXIT_IDLE:
185 gzvm_handle_guest_idle(vcpu);
186 break;
187 case GZVM_EXIT_IPI:
188 gzvm_handle_guest_ipi(vcpu);
189 break;
190 case GZVM_EXIT_UNKNOWN:
191 fallthrough;
192 default:
193 pr_err("vcpu unknown exit\n");
194 need_userspace = true;
195 }
196 trace_android_vh_gzvm_vcpu_exit_reason(vcpu, &need_userspace);
197 }
198
199 if (copy_to_user(argp, vcpu->run, sizeof(struct gzvm_vcpu_run)))
200 return -EFAULT;
201 if (signal_pending(current)) {
202 // invoke hvc to inform gz to map memory
203 gzvm_arch_inform_exit(vcpu->gzvm->vm_id);
204 return -ERESTARTSYS;
205 }
206 return 0;
207 }
208
gzvm_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)209 static long gzvm_vcpu_ioctl(struct file *filp, unsigned int ioctl,
210 unsigned long arg)
211 {
212 int ret = -ENOTTY;
213 void __user *argp = (void __user *)arg;
214 struct gzvm_vcpu *vcpu = filp->private_data;
215
216 switch (ioctl) {
217 case GZVM_RUN:
218 ret = gzvm_vcpu_run(vcpu, argp);
219 break;
220 case GZVM_GET_ONE_REG:
221 /* !is_write */
222 ret = -EOPNOTSUPP;
223 break;
224 case GZVM_SET_ONE_REG:
225 /* is_write */
226 ret = gzvm_vcpu_update_one_reg(vcpu, argp, true);
227 break;
228 default:
229 break;
230 }
231
232 return ret;
233 }
234
gzvm_vcpu_release(struct inode * inode,struct file * filp)235 static int gzvm_vcpu_release(struct inode *inode, struct file *filp)
236 {
237 struct gzvm_vcpu *vcpu = filp->private_data;
238
239 gzvm_vm_put(vcpu->gzvm);
240 return 0;
241 }
242
243 static const struct file_operations gzvm_vcpu_fops = {
244 .release = gzvm_vcpu_release,
245 .unlocked_ioctl = gzvm_vcpu_ioctl,
246 .llseek = noop_llseek,
247 };
248
249 /* caller must hold the vm lock */
gzvm_destroy_vcpu(struct gzvm_vcpu * vcpu)250 static void gzvm_destroy_vcpu(struct gzvm_vcpu *vcpu)
251 {
252 if (!vcpu)
253 return;
254
255 hrtimer_cancel(&vcpu->gzvm_vtimer);
256 gzvm_arch_destroy_vcpu(vcpu->gzvm->vm_id, vcpu->vcpuid);
257 /* clean guest's data */
258 memset(vcpu->run, 0, GZVM_VCPU_RUN_MAP_SIZE);
259 free_pages_exact(vcpu->run, GZVM_VCPU_RUN_MAP_SIZE);
260 kfree(vcpu);
261 }
262
263 /**
264 * gzvm_destroy_vcpus() - Destroy all vcpus, caller has to hold the vm lock
265 *
266 * @gzvm: vm struct that owns the vcpus
267 */
gzvm_destroy_vcpus(struct gzvm * gzvm)268 void gzvm_destroy_vcpus(struct gzvm *gzvm)
269 {
270 int i;
271
272 for (i = 0; i < GZVM_MAX_VCPUS; i++) {
273 gzvm_destroy_vcpu(gzvm->vcpus[i]);
274 gzvm->vcpus[i] = NULL;
275 }
276 }
277
278 /* create_vcpu_fd() - Allocates an inode for the vcpu. */
create_vcpu_fd(struct gzvm_vcpu * vcpu)279 static int create_vcpu_fd(struct gzvm_vcpu *vcpu)
280 {
281 /* sizeof("gzvm-vcpu:") + max(strlen(itoa(vcpuid))) + null */
282 char name[10 + ITOA_MAX_LEN + 1];
283
284 snprintf(name, sizeof(name), "gzvm-vcpu:%d", vcpu->vcpuid);
285 return anon_inode_getfd(name, &gzvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
286 }
287
288 /**
289 * gzvm_vm_ioctl_create_vcpu() - for GZVM_CREATE_VCPU
290 * @gzvm: Pointer to struct gzvm
291 * @cpuid: equals arg
292 *
293 * Return: Fd of vcpu, negative errno if error occurs
294 */
gzvm_vm_ioctl_create_vcpu(struct gzvm * gzvm,u32 cpuid)295 int gzvm_vm_ioctl_create_vcpu(struct gzvm *gzvm, u32 cpuid)
296 {
297 struct gzvm_vcpu *vcpu;
298 int ret;
299
300 gzvm_vm_get(gzvm);
301
302 if (cpuid >= GZVM_MAX_VCPUS)
303 return -EINVAL;
304
305 vcpu = kzalloc(sizeof(*vcpu), GFP_KERNEL);
306 if (!vcpu)
307 return -ENOMEM;
308
309 /**
310 * Allocate 2 pages for data sharing between driver and gz hypervisor
311 *
312 * |- page 0 -|- page 1 -|
313 * |gzvm_vcpu_run|......|hwstate|.......|
314 *
315 */
316 vcpu->run = alloc_pages_exact(GZVM_VCPU_RUN_MAP_SIZE,
317 GFP_KERNEL_ACCOUNT | __GFP_ZERO);
318 if (!vcpu->run) {
319 ret = -ENOMEM;
320 goto free_vcpu;
321 }
322 vcpu->hwstate = (void *)vcpu->run + PAGE_SIZE;
323 vcpu->vcpuid = cpuid;
324 vcpu->gzvm = gzvm;
325 mutex_init(&vcpu->lock);
326
327 ret = gzvm_arch_create_vcpu(gzvm->vm_id, vcpu->vcpuid, vcpu->run);
328 if (ret < 0)
329 goto free_vcpu_run;
330
331 ret = create_vcpu_fd(vcpu);
332 if (ret < 0)
333 goto free_vcpu_run;
334 gzvm->vcpus[cpuid] = vcpu;
335
336 gzvm_vtimer_init(vcpu);
337 return ret;
338
339 free_vcpu_run:
340 free_pages_exact(vcpu->run, GZVM_VCPU_RUN_MAP_SIZE);
341 free_vcpu:
342 kfree(vcpu);
343 return ret;
344 }
345