1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 */
5
6 #define pr_fmt(fmt) "gunyah_vm_mgr: " fmt
7
8 #include <linux/anon_inodes.h>
9 #include <linux/compat.h>
10 #include <linux/file.h>
11 #include <linux/miscdevice.h>
12 #include <linux/module.h>
13 #include <linux/xarray.h>
14
15 #include <uapi/linux/gunyah.h>
16 #include <trace/hooks/gunyah.h>
17
18 #include "rsc_mgr.h"
19 #include "vm_mgr.h"
20
21 #define GUNYAH_VM_ADDRSPACE_LABEL 0
22 // "To" extent for memory private to guest
23 #define GUNYAH_VM_MEM_EXTENT_GUEST_PRIVATE_LABEL 0
24 // "From" extent for memory shared with guest
25 #define GUNYAH_VM_MEM_EXTENT_HOST_SHARED_LABEL 1
26 // "To" extent for memory shared with the guest
27 #define GUNYAH_VM_MEM_EXTENT_GUEST_SHARED_LABEL 3
28 // "From" extent for memory private to guest
29 #define GUNYAH_VM_MEM_EXTENT_HOST_PRIVATE_LABEL 2
30
31 #define BOOT_CONTEXT_REG_MASK GUNYAH_VM_BOOT_CONTEXT_REG(0xff, 0xff)
32
33 #define GUNYAH_EVENT_CREATE_VM 0
34 #define GUNYAH_EVENT_DESTROY_VM 1
35
36 static DEFINE_XARRAY(gunyah_vm_functions);
37 static DEFINE_XARRAY(gunyah_auth_vm_mgr);
38
gunyah_vm_fill_boot_context(struct gunyah_vm * ghvm)39 static inline int gunyah_vm_fill_boot_context(struct gunyah_vm *ghvm)
40 {
41 unsigned long reg_set, reg_index, id;
42 void *entry;
43 int ret;
44
45 xa_for_each(&ghvm->boot_context, id, entry) {
46 reg_set = (id >> GUNYAH_VM_BOOT_CONTEXT_REG_SHIFT) & 0xff;
47 reg_index = id & 0xff;
48 ret = gunyah_rm_vm_set_boot_context(ghvm->rm, ghvm->vmid,
49 reg_set, reg_index,
50 *(u64 *)entry);
51 if (ret)
52 return ret;
53 }
54
55 return 0;
56 }
57
gunyah_auth_vm_mgr_register(struct gunyah_auth_vm_mgr * auth_vm)58 int gunyah_auth_vm_mgr_register(struct gunyah_auth_vm_mgr *auth_vm)
59 {
60 if (!auth_vm->vm_attach || !auth_vm->vm_detach)
61 return -EINVAL;
62
63 return xa_err(xa_store(&gunyah_auth_vm_mgr, auth_vm->type, auth_vm, GFP_KERNEL));
64 }
65 EXPORT_SYMBOL_GPL(gunyah_auth_vm_mgr_register);
66
gunyah_auth_vm_mgr_unregister(struct gunyah_auth_vm_mgr * auth_vm)67 void gunyah_auth_vm_mgr_unregister(struct gunyah_auth_vm_mgr *auth_vm)
68 {
69 /* Expecting unregister to only come when unloading a module */
70 WARN_ON(auth_vm->mod && module_refcount(auth_vm->mod));
71 xa_erase(&gunyah_auth_vm_mgr, auth_vm->type);
72 }
73 EXPORT_SYMBOL_GPL(gunyah_auth_vm_mgr_unregister);
74
gunyah_get_auth_vm_mgr(u32 auth_type)75 static struct gunyah_auth_vm_mgr *gunyah_get_auth_vm_mgr(u32 auth_type)
76 {
77 struct gunyah_auth_vm_mgr *auth_vm;
78
79 auth_vm = xa_load(&gunyah_auth_vm_mgr, auth_type);
80 if (!auth_vm || !try_module_get(auth_vm->mod))
81 auth_vm = ERR_PTR(-ENOENT);
82
83 return auth_vm;
84 }
85
gunyah_put_auth_vm_mgr(struct gunyah_vm * ghvm)86 static void gunyah_put_auth_vm_mgr(struct gunyah_vm *ghvm)
87 {
88 struct gunyah_auth_vm_mgr *auth_vm;
89
90 auth_vm = xa_load(&gunyah_auth_vm_mgr, ghvm->auth);
91 if (!auth_vm)
92 return;
93
94 auth_vm->vm_detach(ghvm);
95 module_put(auth_vm->mod);
96 }
97
gunyah_vm_set_auth_type(struct gunyah_vm * ghvm,struct gunyah_auth_desc * auth_desc)98 static long gunyah_vm_set_auth_type(struct gunyah_vm *ghvm,
99 struct gunyah_auth_desc *auth_desc)
100 {
101 struct gunyah_auth_vm_mgr *auth_vm;
102
103 auth_vm = gunyah_get_auth_vm_mgr(auth_desc->type);
104 if (IS_ERR(auth_vm))
105 return PTR_ERR(auth_vm);
106
107 /* The auth mgr should be populating the auth_vm_mgr_ops*/
108 return auth_vm->vm_attach(ghvm, auth_desc);
109 }
110
gunyah_generic_pre_vm_configure(struct gunyah_vm * ghvm)111 static int gunyah_generic_pre_vm_configure(struct gunyah_vm *ghvm)
112 {
113 /*
114 * VMs use dtb mem parcel as the config image parcel as they
115 * don't have any explicit auth image/metadata
116 */
117
118 ghvm->config_image.parcel.start = gunyah_gpa_to_gfn(ghvm->dtb.config.guest_phys_addr);
119 ghvm->config_image.parcel.pages = gunyah_gpa_to_gfn(ghvm->dtb.config.size);
120
121 ghvm->config_image.image_offset = 0;
122 ghvm->config_image.image_size = 0;
123 ghvm->config_image.dtb_offset = ghvm->dtb.config.guest_phys_addr -
124 gunyah_gfn_to_gpa(ghvm->config_image.parcel.start);
125 ghvm->config_image.dtb_size = ghvm->dtb.config.size;
126 return 0;
127 }
128
gunyah_generic_pre_vm_init(struct gunyah_vm * ghvm)129 static int gunyah_generic_pre_vm_init(struct gunyah_vm *ghvm)
130 {
131 int ret;
132
133 ret = gunyah_setup_demand_paging(ghvm, 0, ULONG_MAX);
134 if (ret) {
135 dev_warn(ghvm->parent,
136 "Failed to set up demand paging: %d\n", ret);
137 return ret;
138 }
139
140 ret = gunyah_rm_vm_set_address_layout(
141 ghvm->rm, ghvm->vmid, GUNYAH_RM_RANGE_ID_IMAGE,
142 gunyah_gfn_to_gpa(ghvm->config_image.parcel.start),
143 gunyah_gfn_to_gpa(ghvm->config_image.parcel.pages));
144 if (ret) {
145 dev_warn(ghvm->parent,
146 "Failed to set location of the config image mem parcel: %d\n", ret);
147 return ret;
148 }
149
150 return ret;
151 }
152
gunyah_generic_pre_vm_start(struct gunyah_vm * ghvm)153 static int gunyah_generic_pre_vm_start(struct gunyah_vm *ghvm)
154 {
155 int ret;
156
157 ret = gunyah_vm_parcel_to_paged(ghvm, &ghvm->config_image.parcel.parcel,
158 ghvm->config_image.parcel.start,
159 ghvm->config_image.parcel.pages);
160 if (ret)
161 return ret;
162
163 if (ghvm->auth != GUNYAH_RM_VM_AUTH_NONE)
164 return ret;
165
166 ret = gunyah_vm_fill_boot_context(ghvm);
167 if (ret) {
168 dev_warn(ghvm->parent, "Failed to setup boot context: %d\n",
169 ret);
170 return ret;
171 }
172 return ret;
173 }
174
gunyah_generic_vm_start_fail(struct gunyah_vm * ghvm)175 void gunyah_generic_vm_start_fail(struct gunyah_vm *ghvm)
176 {
177 /**
178 * need to rollback parcel_to_paged because RM is still
179 * tracking the parcel
180 */
181 gunyah_vm_mm_erase_range(ghvm,
182 ghvm->config_image.parcel.start,
183 ghvm->config_image.parcel.pages);
184 }
185
186 static struct gunyah_auth_vm_mgr_ops generic_vm_ops = {
187 .pre_vm_configure = gunyah_generic_pre_vm_configure,
188 .pre_vm_init = gunyah_generic_pre_vm_init,
189 .pre_vm_start = gunyah_generic_pre_vm_start,
190 .vm_start_fail = gunyah_generic_vm_start_fail,
191 };
192
gunyah_vm_put_function(struct gunyah_vm_function * fn)193 static void gunyah_vm_put_function(struct gunyah_vm_function *fn)
194 {
195 module_put(fn->mod);
196 }
197
gunyah_vm_get_function(u32 type)198 static struct gunyah_vm_function *gunyah_vm_get_function(u32 type)
199 {
200 struct gunyah_vm_function *fn;
201
202 fn = xa_load(&gunyah_vm_functions, type);
203 if (!fn) {
204 request_module("ghfunc:%d", type);
205
206 fn = xa_load(&gunyah_vm_functions, type);
207 }
208
209 if (!fn || !try_module_get(fn->mod))
210 fn = ERR_PTR(-ENOENT);
211
212 return fn;
213 }
214
215 static void
gunyah_vm_remove_function_instance(struct gunyah_vm_function_instance * inst)216 gunyah_vm_remove_function_instance(struct gunyah_vm_function_instance *inst)
217 __must_hold(&inst->ghvm->fn_lock)
218 {
219 inst->fn->unbind(inst);
220 list_del(&inst->vm_list);
221 gunyah_vm_put_function(inst->fn);
222 kfree(inst->argp);
223 kfree(inst);
224 }
225
gunyah_vm_remove_functions(struct gunyah_vm * ghvm)226 static void gunyah_vm_remove_functions(struct gunyah_vm *ghvm)
227 {
228 struct gunyah_vm_function_instance *inst, *iiter;
229
230 mutex_lock(&ghvm->fn_lock);
231 list_for_each_entry_safe(inst, iiter, &ghvm->functions, vm_list) {
232 gunyah_vm_remove_function_instance(inst);
233 }
234 mutex_unlock(&ghvm->fn_lock);
235 }
236
gunyah_vm_add_function_instance(struct gunyah_vm * ghvm,struct gunyah_fn_desc * f)237 static long gunyah_vm_add_function_instance(struct gunyah_vm *ghvm,
238 struct gunyah_fn_desc *f)
239 {
240 struct gunyah_vm_function_instance *inst;
241 void __user *argp;
242 long r = 0;
243
244 if (f->arg_size > GUNYAH_FN_MAX_ARG_SIZE) {
245 dev_err_ratelimited(ghvm->parent, "%s: arg_size > %d\n",
246 __func__, GUNYAH_FN_MAX_ARG_SIZE);
247 return -EINVAL;
248 }
249
250 inst = kzalloc(sizeof(*inst), GFP_KERNEL);
251 if (!inst)
252 return -ENOMEM;
253
254 inst->arg_size = f->arg_size;
255 if (inst->arg_size) {
256 inst->argp = kzalloc(inst->arg_size, GFP_KERNEL);
257 if (!inst->argp) {
258 r = -ENOMEM;
259 goto free;
260 }
261
262 argp = u64_to_user_ptr(f->arg);
263 if (copy_from_user(inst->argp, argp, f->arg_size)) {
264 r = -EFAULT;
265 goto free_arg;
266 }
267 }
268
269 inst->fn = gunyah_vm_get_function(f->type);
270 if (IS_ERR(inst->fn)) {
271 r = PTR_ERR(inst->fn);
272 goto free_arg;
273 }
274
275 inst->ghvm = ghvm;
276 inst->rm = ghvm->rm;
277
278 mutex_lock(&ghvm->fn_lock);
279 r = inst->fn->bind(inst);
280 if (r < 0) {
281 mutex_unlock(&ghvm->fn_lock);
282 gunyah_vm_put_function(inst->fn);
283 goto free_arg;
284 }
285
286 list_add(&inst->vm_list, &ghvm->functions);
287 mutex_unlock(&ghvm->fn_lock);
288
289 return r;
290 free_arg:
291 kfree(inst->argp);
292 free:
293 kfree(inst);
294 return r;
295 }
296
gunyah_vm_rm_function_instance(struct gunyah_vm * ghvm,struct gunyah_fn_desc * f)297 static long gunyah_vm_rm_function_instance(struct gunyah_vm *ghvm,
298 struct gunyah_fn_desc *f)
299 {
300 struct gunyah_vm_function_instance *inst, *iter;
301 void __user *user_argp;
302 void *argp __free(kfree) = NULL;
303 long r = 0;
304
305 if (f->arg_size) {
306 argp = kzalloc(f->arg_size, GFP_KERNEL);
307 if (!argp)
308 return -ENOMEM;
309
310 user_argp = u64_to_user_ptr(f->arg);
311 if (copy_from_user(argp, user_argp, f->arg_size))
312 return -EFAULT;
313 }
314
315 r = mutex_lock_interruptible(&ghvm->fn_lock);
316 if (r)
317 return r;
318
319 r = -ENOENT;
320 list_for_each_entry_safe(inst, iter, &ghvm->functions, vm_list) {
321 if (inst->fn->type == f->type &&
322 inst->fn->compare(inst, argp, f->arg_size)) {
323 gunyah_vm_remove_function_instance(inst);
324 r = 0;
325 }
326 }
327
328 mutex_unlock(&ghvm->fn_lock);
329 return r;
330 }
331
gunyah_vm_function_register(struct gunyah_vm_function * fn)332 int gunyah_vm_function_register(struct gunyah_vm_function *fn)
333 {
334 if (!fn->bind || !fn->unbind)
335 return -EINVAL;
336
337 return xa_err(xa_store(&gunyah_vm_functions, fn->type, fn, GFP_KERNEL));
338 }
339 EXPORT_SYMBOL_GPL(gunyah_vm_function_register);
340
gunyah_vm_function_unregister(struct gunyah_vm_function * fn)341 void gunyah_vm_function_unregister(struct gunyah_vm_function *fn)
342 {
343 /* Expecting unregister to only come when unloading a module */
344 WARN_ON(fn->mod && module_refcount(fn->mod));
345 xa_erase(&gunyah_vm_functions, fn->type);
346 }
347 EXPORT_SYMBOL_GPL(gunyah_vm_function_unregister);
348
gunyah_vm_resource_ticket_populate_noop(struct gunyah_vm_resource_ticket * ticket,struct gunyah_resource * ghrsc)349 static bool gunyah_vm_resource_ticket_populate_noop(
350 struct gunyah_vm_resource_ticket *ticket, struct gunyah_resource *ghrsc)
351 {
352 return true;
353 }
gunyah_vm_resource_ticket_unpopulate_noop(struct gunyah_vm_resource_ticket * ticket,struct gunyah_resource * ghrsc)354 static void gunyah_vm_resource_ticket_unpopulate_noop(
355 struct gunyah_vm_resource_ticket *ticket, struct gunyah_resource *ghrsc)
356 {
357 }
358
gunyah_vm_add_resource_ticket(struct gunyah_vm * ghvm,struct gunyah_vm_resource_ticket * ticket)359 int gunyah_vm_add_resource_ticket(struct gunyah_vm *ghvm,
360 struct gunyah_vm_resource_ticket *ticket)
361 {
362 struct gunyah_vm_resource_ticket *iter;
363 struct gunyah_resource *ghrsc, *rsc_iter;
364 int ret = 0;
365
366 mutex_lock(&ghvm->resources_lock);
367 list_for_each_entry(iter, &ghvm->resource_tickets, vm_list) {
368 if (iter->resource_type == ticket->resource_type &&
369 iter->label == ticket->label) {
370 ret = -EEXIST;
371 goto out;
372 }
373 }
374
375 if (!try_module_get(ticket->owner)) {
376 ret = -ENODEV;
377 goto out;
378 }
379
380 list_add(&ticket->vm_list, &ghvm->resource_tickets);
381 INIT_LIST_HEAD(&ticket->resources);
382
383 list_for_each_entry_safe(ghrsc, rsc_iter, &ghvm->resources, list) {
384 if (ghrsc->type == ticket->resource_type &&
385 ghrsc->rm_label == ticket->label) {
386 if (ticket->populate(ticket, ghrsc))
387 list_move(&ghrsc->list, &ticket->resources);
388 }
389 }
390 out:
391 mutex_unlock(&ghvm->resources_lock);
392 return ret;
393 }
394 EXPORT_SYMBOL_GPL(gunyah_vm_add_resource_ticket);
395
396 static void
__gunyah_vm_remove_resource_ticket(struct gunyah_vm * ghvm,struct gunyah_vm_resource_ticket * ticket)397 __gunyah_vm_remove_resource_ticket(struct gunyah_vm *ghvm,
398 struct gunyah_vm_resource_ticket *ticket)
399 {
400 struct gunyah_resource *ghrsc, *iter;
401
402 list_for_each_entry_safe(ghrsc, iter, &ticket->resources, list) {
403 ticket->unpopulate(ticket, ghrsc);
404 list_move(&ghrsc->list, &ghvm->resources);
405 }
406
407 module_put(ticket->owner);
408 list_del(&ticket->vm_list);
409 }
410
gunyah_vm_remove_resource_ticket(struct gunyah_vm * ghvm,struct gunyah_vm_resource_ticket * ticket)411 void gunyah_vm_remove_resource_ticket(struct gunyah_vm *ghvm,
412 struct gunyah_vm_resource_ticket *ticket)
413 {
414 mutex_lock(&ghvm->resources_lock);
415 __gunyah_vm_remove_resource_ticket(ghvm, ticket);
416 mutex_unlock(&ghvm->resources_lock);
417 }
418 EXPORT_SYMBOL_GPL(gunyah_vm_remove_resource_ticket);
419
gunyah_vm_add_resource(struct gunyah_vm * ghvm,struct gunyah_resource * ghrsc)420 static void gunyah_vm_add_resource(struct gunyah_vm *ghvm,
421 struct gunyah_resource *ghrsc)
422 {
423 struct gunyah_vm_resource_ticket *ticket;
424
425 mutex_lock(&ghvm->resources_lock);
426 list_for_each_entry(ticket, &ghvm->resource_tickets, vm_list) {
427 if (ghrsc->type == ticket->resource_type &&
428 ghrsc->rm_label == ticket->label) {
429 if (ticket->populate(ticket, ghrsc))
430 list_add(&ghrsc->list, &ticket->resources);
431 else
432 list_add(&ghrsc->list, &ghvm->resources);
433 /* unconditonal -- we prevent multiple identical
434 * resource tickets so there will not be some other
435 * ticket elsewhere in the list if populate() failed.
436 */
437 goto found;
438 }
439 }
440 list_add(&ghrsc->list, &ghvm->resources);
441 found:
442 mutex_unlock(&ghvm->resources_lock);
443 }
444
gunyah_vm_clean_resources(struct gunyah_vm * ghvm)445 static void gunyah_vm_clean_resources(struct gunyah_vm *ghvm)
446 {
447 struct gunyah_vm_resource_ticket *ticket, *titer;
448 struct gunyah_resource *ghrsc, *riter;
449
450 mutex_lock(&ghvm->resources_lock);
451 if (!list_empty(&ghvm->resource_tickets)) {
452 dev_warn(ghvm->parent, "Dangling resource tickets:\n");
453 list_for_each_entry_safe(ticket, titer, &ghvm->resource_tickets,
454 vm_list) {
455 dev_warn(ghvm->parent, " %pS\n", ticket->populate);
456 __gunyah_vm_remove_resource_ticket(ghvm, ticket);
457 }
458 }
459
460 list_for_each_entry_safe(ghrsc, riter, &ghvm->resources, list) {
461 list_del(&ghrsc->list);
462 gunyah_rm_free_resource(ghrsc);
463 }
464 mutex_unlock(&ghvm->resources_lock);
465 }
466
_gunyah_vm_io_handler_compare(const struct rb_node * node,const struct rb_node * parent)467 static int _gunyah_vm_io_handler_compare(const struct rb_node *node,
468 const struct rb_node *parent)
469 {
470 struct gunyah_vm_io_handler *n =
471 container_of(node, struct gunyah_vm_io_handler, node);
472 struct gunyah_vm_io_handler *p =
473 container_of(parent, struct gunyah_vm_io_handler, node);
474
475 if (n->addr < p->addr)
476 return -1;
477 if (n->addr > p->addr)
478 return 1;
479 if ((n->len && !p->len) || (!n->len && p->len))
480 return 0;
481 if (n->len < p->len)
482 return -1;
483 if (n->len > p->len)
484 return 1;
485 /* one of the io handlers doesn't have datamatch and the other does.
486 * For purposes of comparison, that makes them identical since the
487 * one that doesn't have datamatch will cover the same handler that
488 * does.
489 */
490 if (n->datamatch != p->datamatch)
491 return 0;
492 if (n->data < p->data)
493 return -1;
494 if (n->data > p->data)
495 return 1;
496 return 0;
497 }
498
gunyah_vm_io_handler_compare(struct rb_node * node,const struct rb_node * parent)499 static int gunyah_vm_io_handler_compare(struct rb_node *node,
500 const struct rb_node *parent)
501 {
502 return _gunyah_vm_io_handler_compare(node, parent);
503 }
504
gunyah_vm_io_handler_find(const void * key,const struct rb_node * node)505 static int gunyah_vm_io_handler_find(const void *key,
506 const struct rb_node *node)
507 {
508 const struct gunyah_vm_io_handler *k = key;
509
510 return _gunyah_vm_io_handler_compare(&k->node, node);
511 }
512
513 static struct gunyah_vm_io_handler *
gunyah_vm_mgr_find_io_hdlr(struct gunyah_vm * ghvm,u64 addr,u64 len,u64 data)514 gunyah_vm_mgr_find_io_hdlr(struct gunyah_vm *ghvm, u64 addr, u64 len, u64 data)
515 {
516 struct gunyah_vm_io_handler key = {
517 .addr = addr,
518 .len = len,
519 .datamatch = true,
520 .data = data,
521 };
522 struct rb_node *node;
523
524 node = rb_find(&key, &ghvm->mmio_handler_root,
525 gunyah_vm_io_handler_find);
526 if (!node)
527 return NULL;
528
529 return container_of(node, struct gunyah_vm_io_handler, node);
530 }
531
gunyah_vm_mmio_write(struct gunyah_vm * ghvm,u64 addr,u32 len,u64 data)532 int gunyah_vm_mmio_write(struct gunyah_vm *ghvm, u64 addr, u32 len, u64 data)
533 {
534 struct gunyah_vm_io_handler *io_hdlr = NULL;
535 int ret;
536
537 down_read(&ghvm->mmio_handler_lock);
538 io_hdlr = gunyah_vm_mgr_find_io_hdlr(ghvm, addr, len, data);
539 if (!io_hdlr || !io_hdlr->ops || !io_hdlr->ops->write) {
540 ret = -ENOENT;
541 goto out;
542 }
543
544 ret = io_hdlr->ops->write(io_hdlr, addr, len, data);
545
546 out:
547 up_read(&ghvm->mmio_handler_lock);
548 return ret;
549 }
550 EXPORT_SYMBOL_GPL(gunyah_vm_mmio_write);
551
gunyah_vm_add_io_handler(struct gunyah_vm * ghvm,struct gunyah_vm_io_handler * io_hdlr)552 int gunyah_vm_add_io_handler(struct gunyah_vm *ghvm,
553 struct gunyah_vm_io_handler *io_hdlr)
554 {
555 struct rb_node *found;
556
557 if (io_hdlr->datamatch &&
558 (!io_hdlr->len || io_hdlr->len > sizeof(io_hdlr->data)))
559 return -EINVAL;
560
561 down_write(&ghvm->mmio_handler_lock);
562 found = rb_find_add(&io_hdlr->node, &ghvm->mmio_handler_root,
563 gunyah_vm_io_handler_compare);
564 up_write(&ghvm->mmio_handler_lock);
565
566 return found ? -EEXIST : 0;
567 }
568 EXPORT_SYMBOL_GPL(gunyah_vm_add_io_handler);
569
gunyah_vm_remove_io_handler(struct gunyah_vm * ghvm,struct gunyah_vm_io_handler * io_hdlr)570 void gunyah_vm_remove_io_handler(struct gunyah_vm *ghvm,
571 struct gunyah_vm_io_handler *io_hdlr)
572 {
573 down_write(&ghvm->mmio_handler_lock);
574 rb_erase(&io_hdlr->node, &ghvm->mmio_handler_root);
575 up_write(&ghvm->mmio_handler_lock);
576 }
577 EXPORT_SYMBOL_GPL(gunyah_vm_remove_io_handler);
578
gunyah_vm_rm_notification_status(struct gunyah_vm * ghvm,void * data)579 static int gunyah_vm_rm_notification_status(struct gunyah_vm *ghvm, void *data)
580 {
581 struct gunyah_rm_vm_status_payload *payload = data;
582
583 if (le16_to_cpu(payload->vmid) != ghvm->vmid)
584 return NOTIFY_OK;
585
586 /* All other state transitions are synchronous to a corresponding RM call */
587 switch (payload->vm_status) {
588 case GUNYAH_RM_VM_STATUS_RESET_FAILED:
589 dev_warn(ghvm->parent, "VM: %u RESET failed with status %u\n",
590 ghvm->vmid, payload->vm_status);
591 fallthrough;
592 case GUNYAH_RM_VM_STATUS_RESET:
593 down_write(&ghvm->status_lock);
594 ghvm->vm_status = payload->vm_status;
595 up_write(&ghvm->status_lock);
596 wake_up(&ghvm->vm_status_wait);
597 break;
598 default:
599 break;
600 }
601
602 return NOTIFY_DONE;
603 }
604
gunyah_vm_rm_notification_exited(struct gunyah_vm * ghvm,void * data)605 static int gunyah_vm_rm_notification_exited(struct gunyah_vm *ghvm, void *data)
606 {
607 struct gunyah_rm_vm_exited_payload *payload = data;
608
609 if (le16_to_cpu(payload->vmid) != ghvm->vmid)
610 return NOTIFY_OK;
611
612 down_write(&ghvm->status_lock);
613 ghvm->vm_status = GUNYAH_RM_VM_STATUS_EXITED;
614 ghvm->exit_info.type = le16_to_cpu(payload->exit_type);
615 ghvm->exit_info.reason_size = le32_to_cpu(payload->exit_reason_size);
616 memcpy(&ghvm->exit_info.reason, payload->exit_reason,
617 min(GUNYAH_VM_MAX_EXIT_REASON_SIZE,
618 ghvm->exit_info.reason_size));
619 up_write(&ghvm->status_lock);
620 wake_up(&ghvm->vm_status_wait);
621
622 return NOTIFY_DONE;
623 }
624
gunyah_vm_rm_notification(struct notifier_block * nb,unsigned long action,void * data)625 static int gunyah_vm_rm_notification(struct notifier_block *nb,
626 unsigned long action, void *data)
627 {
628 struct gunyah_vm *ghvm = container_of(nb, struct gunyah_vm, nb);
629
630 switch (action) {
631 case GUNYAH_RM_NOTIFICATION_VM_STATUS:
632 return gunyah_vm_rm_notification_status(ghvm, data);
633 case GUNYAH_RM_NOTIFICATION_VM_EXITED:
634 return gunyah_vm_rm_notification_exited(ghvm, data);
635 default:
636 return NOTIFY_OK;
637 }
638 }
639
gunyah_uevent_notify_change(unsigned int type,struct gunyah_vm * ghvm)640 static void gunyah_uevent_notify_change(unsigned int type, struct gunyah_vm *ghvm)
641 {
642 struct kobj_uevent_env *env;
643
644 env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
645 if (!env)
646 return;
647
648 if (type == GUNYAH_EVENT_CREATE_VM)
649 add_uevent_var(env, "EVENT=create");
650 else if (type == GUNYAH_EVENT_DESTROY_VM) {
651 add_uevent_var(env, "EVENT=destroy");
652 add_uevent_var(env, "vm_exit=%d", ghvm->exit_info.type);
653 }
654
655 add_uevent_var(env, "vm_id=%hu", ghvm->vmid);
656 env->envp[env->envp_idx++] = NULL;
657 kobject_uevent_env(&ghvm->parent->kobj, KOBJ_CHANGE, env->envp);
658 kfree(env);
659 }
660
gunyah_vm_stop(struct gunyah_vm * ghvm)661 static void gunyah_vm_stop(struct gunyah_vm *ghvm)
662 {
663 int ret;
664
665 if (ghvm->vm_status == GUNYAH_RM_VM_STATUS_RUNNING) {
666 ret = gunyah_rm_vm_stop(ghvm->rm, ghvm->vmid);
667 if (ret)
668 dev_warn(ghvm->parent, "Failed to stop VM: %d\n", ret);
669 }
670
671 wait_event(ghvm->vm_status_wait,
672 ghvm->vm_status != GUNYAH_RM_VM_STATUS_RUNNING);
673 }
674
setup_extent_ticket(struct gunyah_vm * ghvm,struct gunyah_vm_resource_ticket * ticket,u32 label)675 static inline void setup_extent_ticket(struct gunyah_vm *ghvm,
676 struct gunyah_vm_resource_ticket *ticket,
677 u32 label)
678 {
679 ticket->resource_type = GUNYAH_RESOURCE_TYPE_MEM_EXTENT;
680 ticket->label = label;
681 ticket->populate = gunyah_vm_resource_ticket_populate_noop;
682 ticket->unpopulate = gunyah_vm_resource_ticket_unpopulate_noop;
683 gunyah_vm_add_resource_ticket(ghvm, ticket);
684 }
685
gunyah_vm_alloc(struct gunyah_rm * rm)686 static __must_check struct gunyah_vm *gunyah_vm_alloc(struct gunyah_rm *rm)
687 {
688 struct gunyah_vm *ghvm;
689
690 ghvm = kzalloc(sizeof(*ghvm), GFP_KERNEL);
691 if (!ghvm)
692 return ERR_PTR(-ENOMEM);
693
694 ghvm->parent = gunyah_rm_get(rm);
695 ghvm->vmid = GUNYAH_VMID_INVAL;
696 ghvm->rm = rm;
697
698 mmgrab(current->mm);
699 ghvm->mm_s = current->mm;
700 init_rwsem(&ghvm->status_lock);
701 init_waitqueue_head(&ghvm->vm_status_wait);
702 kref_init(&ghvm->kref);
703 ghvm->vm_status = GUNYAH_RM_VM_STATUS_NO_STATE;
704
705 INIT_LIST_HEAD(&ghvm->functions);
706 mutex_init(&ghvm->fn_lock);
707 mutex_init(&ghvm->resources_lock);
708 INIT_LIST_HEAD(&ghvm->resources);
709 INIT_LIST_HEAD(&ghvm->resource_tickets);
710 xa_init(&ghvm->boot_context);
711
712 init_rwsem(&ghvm->mmio_handler_lock);
713 ghvm->mmio_handler_root = RB_ROOT;
714
715 mt_init(&ghvm->mm);
716 mt_init(&ghvm->bindings);
717 init_rwsem(&ghvm->bindings_lock);
718
719 ghvm->addrspace_ticket.resource_type = GUNYAH_RESOURCE_TYPE_ADDR_SPACE;
720 ghvm->addrspace_ticket.label = GUNYAH_VM_ADDRSPACE_LABEL;
721 ghvm->addrspace_ticket.populate =
722 gunyah_vm_resource_ticket_populate_noop;
723 ghvm->addrspace_ticket.unpopulate =
724 gunyah_vm_resource_ticket_unpopulate_noop;
725 gunyah_vm_add_resource_ticket(ghvm, &ghvm->addrspace_ticket);
726
727 setup_extent_ticket(ghvm, &ghvm->host_private_extent_ticket,
728 GUNYAH_VM_MEM_EXTENT_HOST_PRIVATE_LABEL);
729 setup_extent_ticket(ghvm, &ghvm->host_shared_extent_ticket,
730 GUNYAH_VM_MEM_EXTENT_HOST_SHARED_LABEL);
731 setup_extent_ticket(ghvm, &ghvm->guest_private_extent_ticket,
732 GUNYAH_VM_MEM_EXTENT_GUEST_PRIVATE_LABEL);
733 setup_extent_ticket(ghvm, &ghvm->guest_shared_extent_ticket,
734 GUNYAH_VM_MEM_EXTENT_GUEST_SHARED_LABEL);
735
736 ghvm->auth = GUNYAH_RM_VM_AUTH_NONE;
737 ghvm->auth_vm_mgr_ops = &generic_vm_ops;
738
739 return ghvm;
740 }
741
gunyah_vm_set_boot_context(struct gunyah_vm * ghvm,struct gunyah_vm_boot_context * boot_ctx)742 static long gunyah_vm_set_boot_context(struct gunyah_vm *ghvm,
743 struct gunyah_vm_boot_context *boot_ctx)
744 {
745 u8 reg_set, reg_index; /* to check values are reasonable */
746 u64 *value;
747 int ret;
748
749 if (boot_ctx->reg & ~BOOT_CONTEXT_REG_MASK)
750 return -EINVAL;
751
752 reg_set = (boot_ctx->reg >> GUNYAH_VM_BOOT_CONTEXT_REG_SHIFT) & 0xff;
753 reg_index = boot_ctx->reg & 0xff;
754
755 switch (reg_set) {
756 case REG_SET_X:
757 if (reg_index > 31)
758 return -EINVAL;
759 break;
760 case REG_SET_PC:
761 if (reg_index)
762 return -EINVAL;
763 break;
764 case REG_SET_SP:
765 if (reg_index > 2)
766 return -EINVAL;
767 break;
768 default:
769 return -EINVAL;
770 }
771
772 ret = down_read_interruptible(&ghvm->status_lock);
773 if (ret)
774 return ret;
775
776 if (ghvm->vm_status != GUNYAH_RM_VM_STATUS_NO_STATE) {
777 ret = -EINVAL;
778 goto out;
779 }
780
781 /*
782 * allocate memory for the value because xarray supports [0, LONG_MAX]
783 * for values and we want [0, ULONG_MAX]
784 */
785 value = kmalloc(sizeof(*value), GFP_KERNEL);
786 if (!value) {
787 ret = -ENOMEM;
788 goto out;
789 }
790 *value = boot_ctx->value;
791
792 ret = xa_err(xa_store(&ghvm->boot_context, boot_ctx->reg,
793 value, GFP_KERNEL));
794 if (ret)
795 kfree(value);
796 out:
797 up_read(&ghvm->status_lock);
798 return ret;
799 }
800
gunyah_vm_start(struct gunyah_vm * ghvm)801 static int gunyah_vm_start(struct gunyah_vm *ghvm)
802 {
803 struct gunyah_rm_hyp_resources *resources;
804 struct gunyah_resource *ghrsc;
805 int ret, i, n;
806 u16 vmid = 0;
807
808 down_write(&ghvm->status_lock);
809 if (ghvm->vm_status != GUNYAH_RM_VM_STATUS_NO_STATE) {
810 up_write(&ghvm->status_lock);
811 return 0;
812 }
813
814 ghvm->nb.notifier_call = gunyah_vm_rm_notification;
815 ret = gunyah_rm_notifier_register(ghvm->rm, &ghvm->nb);
816 if (ret)
817 goto err;
818
819 ret = gunyah_vm_pre_alloc_vmid(ghvm);
820 if (ret)
821 vmid = ret;
822
823 ret = gunyah_rm_alloc_vmid(ghvm->rm, vmid);
824 if (ret < 0)
825 goto err_rm_notifier_unregister;
826
827 ghvm->vmid = vmid ? vmid : ret;
828 gunyah_uevent_notify_change(GUNYAH_EVENT_CREATE_VM, ghvm);
829
830 ret = gunyah_vm_pre_vm_configure(ghvm);
831 if (ret)
832 goto err_dealloc_vmid;
833
834 if (ghvm->fw.config.size > 0) {
835 ghvm->fw.parcel.start = gunyah_gpa_to_gfn(ghvm->fw.config.guest_phys_addr);
836 ghvm->fw.parcel.pages = gunyah_gpa_to_gfn(ghvm->fw.config.size);
837 ret = gunyah_share_parcel(ghvm, &ghvm->fw.parcel,
838 &ghvm->fw.parcel.start,
839 &ghvm->fw.parcel.pages);
840 if (ret) {
841 dev_warn(ghvm->parent,
842 "Failed to share parcel for the fw: %d\n", ret);
843 goto err_dealloc_vmid;
844 }
845 }
846
847 ghvm->vm_status = GUNYAH_RM_VM_STATUS_LOAD;
848
849 ret = gunyah_share_parcel(ghvm, &ghvm->config_image.parcel,
850 &ghvm->config_image.parcel.start,
851 &ghvm->config_image.parcel.pages);
852 if (ret) {
853 dev_warn(ghvm->parent,
854 "Failed to allocate parcel for the config image: %d\n", ret);
855 goto err;
856 }
857
858 ret = gunyah_rm_vm_configure(ghvm->rm, ghvm->vmid, ghvm->auth,
859 ghvm->config_image.parcel.parcel.mem_handle,
860 ghvm->config_image.image_offset,
861 ghvm->config_image.image_size,
862 ghvm->config_image.dtb_offset,
863 ghvm->config_image.dtb_size);
864 if (ret) {
865 dev_warn(ghvm->parent, "Failed to configure VM: %d\n", ret);
866 goto err;
867 }
868
869 ret = gunyah_vm_authenticate(ghvm);
870 if (ret)
871 goto err;
872
873 if (ghvm->fw.config.size > 0) {
874 ret = gunyah_rm_vm_set_firmware_mem(ghvm->rm, ghvm->vmid, &ghvm->fw.parcel.parcel,
875 ghvm->fw.config.guest_phys_addr - (ghvm->fw.parcel.start << PAGE_SHIFT),
876 ghvm->fw.config.size);
877 if (ret) {
878 pr_warn("Failed to configure firmware\n");
879 goto err;
880 }
881 }
882
883 ret = gunyah_vm_pre_vm_init(ghvm);
884 if (ret)
885 goto err;
886
887 ret = gunyah_rm_vm_init(ghvm->rm, ghvm->vmid);
888 if (ret) {
889 ghvm->vm_status = GUNYAH_RM_VM_STATUS_INIT_FAILED;
890 dev_warn(ghvm->parent, "Failed to initialize VM: %d\n", ret);
891 goto err;
892 }
893 ghvm->vm_status = GUNYAH_RM_VM_STATUS_READY;
894
895 ret = gunyah_rm_get_hyp_resources(ghvm->rm, ghvm->vmid, &resources);
896 if (ret) {
897 dev_warn(ghvm->parent,
898 "Failed to get hypervisor resources for VM: %d\n",
899 ret);
900 goto err;
901 }
902
903 for (i = 0, n = le32_to_cpu(resources->n_entries); i < n; i++) {
904 ghrsc = gunyah_rm_alloc_resource(ghvm->rm,
905 &resources->entries[i]);
906 if (!ghrsc) {
907 ret = -ENOMEM;
908 goto err;
909 }
910
911 gunyah_vm_add_resource(ghvm, ghrsc);
912 }
913
914 ret = gunyah_vm_pre_vm_start(ghvm);
915 if (ret)
916 goto err;
917
918 ret = gunyah_rm_vm_start(ghvm->rm, ghvm->vmid);
919 if (ret) {
920 dev_warn(ghvm->parent, "Failed to start VM: %d\n", ret);
921 goto err;
922 }
923
924 ghvm->vm_status = GUNYAH_RM_VM_STATUS_RUNNING;
925 up_write(&ghvm->status_lock);
926 return ret;
927 err_dealloc_vmid:
928 ret = gunyah_rm_dealloc_vmid(ghvm->rm, ghvm->vmid);
929 if (ret)
930 dev_warn(ghvm->parent,
931 "Failed to deallocate vmid: %d\n", ret);
932 err_rm_notifier_unregister:
933 gunyah_rm_notifier_unregister(ghvm->rm, &ghvm->nb);
934 err:
935 /* gunyah_vm_free will handle releasing resources and reclaiming memory */
936 gunyah_vm_start_fail(ghvm);
937 up_write(&ghvm->status_lock);
938 return ret;
939 }
940
gunyah_vm_ensure_started(struct gunyah_vm * ghvm)941 static int gunyah_vm_ensure_started(struct gunyah_vm *ghvm)
942 {
943 int ret;
944
945 ret = down_read_interruptible(&ghvm->status_lock);
946 if (ret)
947 return ret;
948
949 /* Unlikely because VM is typically started */
950 if (unlikely(ghvm->vm_status == GUNYAH_RM_VM_STATUS_NO_STATE)) {
951 up_read(&ghvm->status_lock);
952 ret = gunyah_vm_start(ghvm);
953 if (ret)
954 return ret;
955 ret = down_read_interruptible(&ghvm->status_lock);
956 if (ret)
957 return ret;
958 }
959
960 /* Unlikely because VM is typically running */
961 if (unlikely(ghvm->vm_status != GUNYAH_RM_VM_STATUS_RUNNING))
962 ret = -ENODEV;
963
964 up_read(&ghvm->status_lock);
965 return ret;
966 }
967
gunyah_vm_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)968 static long gunyah_vm_ioctl(struct file *filp, unsigned int cmd,
969 unsigned long arg)
970 {
971 struct gunyah_vm *ghvm = filp->private_data;
972 void __user *argp = (void __user *)arg;
973 long r;
974 bool lend = false;
975
976 switch (cmd) {
977 case GUNYAH_VM_SET_DTB_CONFIG: {
978 struct gunyah_vm_dtb_config dtb_config;
979
980 if (copy_from_user(&dtb_config, argp, sizeof(dtb_config)))
981 return -EFAULT;
982
983 if (overflows_type(dtb_config.guest_phys_addr + dtb_config.size,
984 u64))
985 return -EOVERFLOW;
986
987 ghvm->dtb.config = dtb_config;
988
989 r = 0;
990 break;
991 }
992 case GH_VM_ANDROID_SET_FW_CONFIG: {
993 struct gunyah_vm_firmware_config fw_config;
994
995 if (copy_from_user(&fw_config, argp, sizeof(fw_config)))
996 return -EFAULT;
997
998 if (overflows_type(fw_config.guest_phys_addr + fw_config.size,
999 u64))
1000 return -EOVERFLOW;
1001
1002 ghvm->fw.config = fw_config;
1003 /* Set new auth type only if type was not set until now */
1004 if (ghvm->auth == GUNYAH_RM_VM_AUTH_NONE)
1005 ghvm->auth = GUNYAH_RM_VM_AUTH_QCOM_ANDROID_PVM;
1006
1007 r = 0;
1008 break;
1009 }
1010 case GUNYAH_VM_START: {
1011 r = gunyah_vm_ensure_started(ghvm);
1012 break;
1013 }
1014 case GUNYAH_VM_ADD_FUNCTION: {
1015 struct gunyah_fn_desc f;
1016
1017 if (copy_from_user(&f, argp, sizeof(f)))
1018 return -EFAULT;
1019
1020 r = gunyah_vm_add_function_instance(ghvm, &f);
1021 break;
1022 }
1023 case GUNYAH_VM_REMOVE_FUNCTION: {
1024 struct gunyah_fn_desc f;
1025
1026 if (copy_from_user(&f, argp, sizeof(f)))
1027 return -EFAULT;
1028
1029 r = gunyah_vm_rm_function_instance(ghvm, &f);
1030 break;
1031 }
1032 case GH_VM_ANDROID_LEND_USER_MEM:
1033 lend = true;
1034 fallthrough;
1035 case GH_VM_SET_USER_MEM_REGION: {
1036 struct gunyah_userspace_memory_region region;
1037
1038 /* only allow owner task to add memory */
1039 if (ghvm->mm_s != current->mm)
1040 return -EPERM;
1041 if (copy_from_user(®ion, argp, sizeof(region)))
1042 return -EFAULT;
1043
1044 if (region.flags & ~(GUNYAH_MEM_ALLOW_READ |
1045 GUNYAH_MEM_ALLOW_WRITE |
1046 GUNYAH_MEM_ALLOW_EXEC))
1047 return -EINVAL;
1048
1049 r = gunyah_vm_binding_alloc(ghvm, ®ion, lend);
1050 break;
1051 }
1052 case GH_VM_RECLAIM_REGION: {
1053 struct gunyah_address_range range;
1054
1055 /* only allow owner task to remove memory */
1056 if (ghvm->mm_s != current->mm)
1057 return -EPERM;
1058 if (copy_from_user(&range, argp, sizeof(range)))
1059 return -EFAULT;
1060 if (!PAGE_ALIGNED(range.size) || !PAGE_ALIGNED(range.guest_phys_addr))
1061 return -EINVAL;
1062
1063 r = gunyah_vm_reclaim_range(ghvm,
1064 gunyah_gpa_to_gfn(range.guest_phys_addr),
1065 gunyah_gpa_to_gfn(range.size) - 1);
1066 break;
1067 }
1068 case GH_VM_ANDROID_MAP_CMA_MEM: {
1069 struct gunyah_map_cma_mem_args cma_mem;
1070
1071 /* only allow owner task to add memory */
1072 if (ghvm->mm_s != current->mm)
1073 return -EPERM;
1074
1075 if (copy_from_user(&cma_mem, argp, sizeof(cma_mem)))
1076 return -EFAULT;
1077
1078 r = gunyah_vm_binding_cma_alloc(ghvm, &cma_mem);
1079 break;
1080 }
1081 case GUNYAH_VM_SET_BOOT_CONTEXT: {
1082 struct gunyah_vm_boot_context boot_ctx;
1083
1084 if (copy_from_user(&boot_ctx, argp, sizeof(boot_ctx)))
1085 return -EFAULT;
1086
1087 return gunyah_vm_set_boot_context(ghvm, &boot_ctx);
1088 }
1089 case GH_VM_ANDROID_SET_AUTH_TYPE: {
1090 struct gunyah_auth_desc auth_desc;
1091
1092 if (copy_from_user(&auth_desc, argp, sizeof(auth_desc)))
1093 return -EFAULT;
1094
1095 return gunyah_vm_set_auth_type(ghvm, &auth_desc);
1096 }
1097 default:
1098 r = -ENOTTY;
1099 break;
1100 }
1101
1102 return r;
1103 }
1104
gunyah_vm_get(struct gunyah_vm * ghvm)1105 int __must_check gunyah_vm_get(struct gunyah_vm *ghvm)
1106 {
1107 return kref_get_unless_zero(&ghvm->kref);
1108 }
1109 EXPORT_SYMBOL_GPL(gunyah_vm_get);
1110
_gunyah_vm_put(struct kref * kref)1111 static void _gunyah_vm_put(struct kref *kref)
1112 {
1113 struct gunyah_vm *ghvm = container_of(kref, struct gunyah_vm, kref);
1114 struct gunyah_vm_binding *b;
1115 unsigned long index = 0;
1116 void *entry;
1117 int ret;
1118
1119 /**
1120 * We might race with a VM exit notification, but that's ok:
1121 * gh_rm_vm_stop() will just return right away.
1122 */
1123 if (ghvm->vm_status == GUNYAH_RM_VM_STATUS_RUNNING)
1124 gunyah_vm_stop(ghvm);
1125
1126 gunyah_vm_remove_functions(ghvm);
1127
1128 /**
1129 * If this fails, we're going to lose the memory for good and is
1130 * BUG_ON-worthy, but not unrecoverable (we just lose memory).
1131 * This call should always succeed though because the VM is in not
1132 * running and RM will let us reclaim all the memory.
1133 */
1134 WARN_ON(gunyah_vm_reclaim_range(ghvm, 0, U64_MAX));
1135 WARN_ON(!mtree_empty(&ghvm->mm));
1136 mtree_destroy(&ghvm->mm);
1137
1138 /* clang-format off */
1139 gunyah_vm_remove_resource_ticket(ghvm, &ghvm->addrspace_ticket);
1140 gunyah_vm_remove_resource_ticket(ghvm, &ghvm->host_shared_extent_ticket);
1141 gunyah_vm_remove_resource_ticket(ghvm, &ghvm->host_private_extent_ticket);
1142 gunyah_vm_remove_resource_ticket(ghvm, &ghvm->guest_shared_extent_ticket);
1143 gunyah_vm_remove_resource_ticket(ghvm, &ghvm->guest_private_extent_ticket);
1144 /* clang-format on */
1145
1146 ret = gunyah_vm_pre_vm_reset(ghvm);
1147 if (ret)
1148 dev_err(ghvm->parent, "Failed pre reset the vm: %d\n", ret);
1149
1150 gunyah_vm_clean_resources(ghvm);
1151
1152 if (ghvm->vm_status == GUNYAH_RM_VM_STATUS_EXITED ||
1153 ghvm->vm_status == GUNYAH_RM_VM_STATUS_READY ||
1154 ghvm->vm_status == GUNYAH_RM_VM_STATUS_INIT_FAILED) {
1155 ret = gunyah_rm_vm_reset(ghvm->rm, ghvm->vmid);
1156 /* clang-format off */
1157 if (!ret)
1158 wait_event(ghvm->vm_status_wait,
1159 (ghvm->vm_status == GUNYAH_RM_VM_STATUS_RESET) ||
1160 (ghvm->vm_status == GUNYAH_RM_VM_STATUS_RESET_FAILED));
1161 else
1162 dev_err(ghvm->parent, "Failed to reset the vm: %d\n", ret);
1163
1164 ret = gunyah_vm_post_vm_reset(ghvm);
1165 if (ret)
1166 dev_err(ghvm->parent, "Failed post reset the vm: %d\n", ret);
1167 /* clang-format on */
1168 }
1169
1170 WARN_ON(gunyah_reclaim_parcels(ghvm, 0, ULONG_MAX));
1171 down_write(&ghvm->bindings_lock);
1172 mt_for_each(&ghvm->bindings, b, index, ULONG_MAX) {
1173 mtree_erase(&ghvm->bindings, gunyah_gpa_to_gfn(b->guest_phys_addr));
1174 kfree(b);
1175 }
1176 up_write(&ghvm->bindings_lock);
1177 WARN_ON(!mtree_empty(&ghvm->bindings));
1178 mtree_destroy(&ghvm->bindings);
1179 gunyah_uevent_notify_change(GUNYAH_EVENT_DESTROY_VM, ghvm);
1180
1181 if (ghvm->vm_status > GUNYAH_RM_VM_STATUS_NO_STATE) {
1182 gunyah_rm_notifier_unregister(ghvm->rm, &ghvm->nb);
1183
1184 ret = gunyah_rm_dealloc_vmid(ghvm->rm, ghvm->vmid);
1185 if (ret)
1186 dev_warn(ghvm->parent,
1187 "Failed to deallocate vmid: %d\n", ret);
1188 }
1189
1190 xa_for_each(&ghvm->boot_context, index, entry)
1191 kfree(entry);
1192
1193 gunyah_put_auth_vm_mgr(ghvm);
1194 xa_destroy(&ghvm->boot_context);
1195 gunyah_rm_put(ghvm->rm);
1196 mmdrop(ghvm->mm_s);
1197 kfree(ghvm);
1198 }
1199
gunyah_vm_put(struct gunyah_vm * ghvm)1200 void gunyah_vm_put(struct gunyah_vm *ghvm)
1201 {
1202 kref_put(&ghvm->kref, _gunyah_vm_put);
1203 }
1204 EXPORT_SYMBOL_GPL(gunyah_vm_put);
1205
gunyah_vm_release(struct inode * inode,struct file * filp)1206 static int gunyah_vm_release(struct inode *inode, struct file *filp)
1207 {
1208 struct gunyah_vm *ghvm = filp->private_data;
1209
1210 trace_android_rvh_gh_vm_release(ghvm->vmid, ghvm);
1211 gunyah_vm_put(ghvm);
1212 return 0;
1213 }
1214
1215 static const struct file_operations gunyah_vm_fops = {
1216 .owner = THIS_MODULE,
1217 .unlocked_ioctl = gunyah_vm_ioctl,
1218 .compat_ioctl = compat_ptr_ioctl,
1219 .release = gunyah_vm_release,
1220 .llseek = noop_llseek,
1221 };
1222
gunyah_dev_ioctl_create_vm(struct gunyah_rm * rm,unsigned long arg)1223 static long gunyah_dev_ioctl_create_vm(struct gunyah_rm *rm, unsigned long arg)
1224 {
1225 struct gunyah_vm *ghvm;
1226 struct file *file;
1227 int fd, err;
1228
1229 /* arg reserved for future use. */
1230 if (arg)
1231 return -EINVAL;
1232
1233 ghvm = gunyah_vm_alloc(rm);
1234 if (IS_ERR(ghvm))
1235 return PTR_ERR(ghvm);
1236
1237 fd = get_unused_fd_flags(O_CLOEXEC);
1238 if (fd < 0) {
1239 err = fd;
1240 goto err_destroy_vm;
1241 }
1242
1243 file = anon_inode_getfile("gunyah-vm", &gunyah_vm_fops, ghvm, O_RDWR);
1244 if (IS_ERR(file)) {
1245 err = PTR_ERR(file);
1246 goto err_put_fd;
1247 }
1248
1249 fd_install(fd, file);
1250
1251 return fd;
1252
1253 err_put_fd:
1254 put_unused_fd(fd);
1255 err_destroy_vm:
1256 gunyah_rm_put(ghvm->rm);
1257 kfree(ghvm);
1258 return err;
1259 }
1260
gunyah_dev_vm_mgr_ioctl(struct gunyah_rm * rm,unsigned int cmd,unsigned long arg)1261 long gunyah_dev_vm_mgr_ioctl(struct gunyah_rm *rm, unsigned int cmd,
1262 unsigned long arg)
1263 {
1264 switch (cmd) {
1265 case GUNYAH_CREATE_VM:
1266 return gunyah_dev_ioctl_create_vm(rm, arg);
1267 default:
1268 return -ENOTTY;
1269 }
1270 }
1271