1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 */
5
6 #include <linux/error-injection.h>
7
8 #include "rsc_mgr.h"
9
10 /* Message IDs: Memory Management */
11 #define GUNYAH_RM_RPC_MEM_LEND 0x51000012
12 #define GUNYAH_RM_RPC_MEM_SHARE 0x51000013
13 #define GUNYAH_RM_RPC_MEM_RECLAIM 0x51000015
14 #define GUNYAH_RM_RPC_MEM_APPEND 0x51000018
15
16 /* Message IDs: VM Management */
17 /* clang-format off */
18 #define GUNYAH_RM_RPC_VM_ALLOC_VMID 0x56000001
19 #define GUNYAH_RM_RPC_VM_DEALLOC_VMID 0x56000002
20 #define GUNYAH_RM_RPC_VM_START 0x56000004
21 #define GUNYAH_RM_RPC_VM_STOP 0x56000005
22 #define GUNYAH_RM_RPC_VM_RESET 0x56000006
23 #define GUNYAH_RM_RPC_VM_CONFIG_IMAGE 0x56000009
24 #define GUNYAH_RM_RPC_VM_AUTH_IMAGE 0x5600000A
25 #define GUNYAH_RM_RPC_VM_INIT 0x5600000B
26 #define GUNYAH_RM_RPC_VM_GET_HYP_RESOURCES 0x56000020
27 #define GUNYAH_RM_RPC_VM_GET_VMID 0x56000024
28 #define GUNYAH_RM_RPC_VM_SET_BOOT_CONTEXT 0x56000031
29 #define GUNYAH_RM_RPC_VM_SET_FIRMWARE_MEM 0x56000032
30 #define GUNYAH_RM_RPC_VM_SET_DEMAND_PAGING 0x56000033
31 #define GUNYAH_RM_RPC_VM_SET_ADDRESS_LAYOUT 0x56000034
32 /* clang-format on */
33
34 struct gunyah_rm_vm_common_vmid_req {
35 __le16 vmid;
36 __le16 _padding;
37 } __packed;
38
39 /* Call: MEM_LEND, MEM_SHARE */
40 #define GUNYAH_RM_MAX_MEM_ENTRIES 512
41
42 #define GUNYAH_MEM_SHARE_REQ_FLAGS_APPEND BIT(1)
43
44 struct gunyah_rm_mem_share_req_header {
45 u8 mem_type;
46 u8 _padding0;
47 u8 flags;
48 u8 _padding1;
49 __le32 label;
50 } __packed;
51
52 struct gunyah_rm_mem_share_req_acl_section {
53 __le16 n_entries;
54 __le16 _padding;
55 struct gunyah_rm_mem_acl_entry entries[];
56 } __packed;
57
58 struct gunyah_rm_mem_share_req_mem_section {
59 __le16 n_entries;
60 __le16 _padding;
61 struct gunyah_rm_mem_entry entries[];
62 } __packed;
63
64 /* Call: MEM_RELEASE */
65 struct gunyah_rm_mem_release_req {
66 __le32 mem_handle;
67 u8 flags; /* currently not used */
68 u8 _padding0;
69 __le16 _padding1;
70 } __packed;
71
72 /* Call: MEM_APPEND */
73 #define GUNYAH_MEM_APPEND_REQ_FLAGS_END BIT(0)
74
75 struct gunyah_rm_mem_append_req_header {
76 __le32 mem_handle;
77 u8 flags;
78 u8 _padding0;
79 __le16 _padding1;
80 } __packed;
81
82 /* Call: VM_ALLOC */
83 struct gunyah_rm_vm_alloc_vmid_resp {
84 __le16 vmid;
85 __le16 _padding;
86 } __packed;
87
88 /* Call: VM_STOP */
89 #define GUNYAH_RM_VM_STOP_FLAG_FORCE_STOP BIT(0)
90
91 #define GUNYAH_RM_VM_STOP_REASON_FORCE_STOP 3
92
93 struct gunyah_rm_vm_stop_req {
94 __le16 vmid;
95 u8 flags;
96 u8 _padding;
97 __le32 stop_reason;
98 } __packed;
99
100 /* Call: VM_AUTH_IMAGE */
101 struct gunyah_rm_vm_authenticate_req_header {
102 __le16 vmid;
103 __le16 n_params;
104 } __packed;
105
106 /* Call: VM_CONFIG_IMAGE */
107 struct gunyah_rm_vm_config_image_req {
108 __le16 vmid;
109 __le16 auth_mech;
110 __le32 mem_handle;
111 __le64 image_offset;
112 __le64 image_size;
113 __le64 dtb_offset;
114 __le64 dtb_size;
115 } __packed;
116
117 /* Call: VM_SET_BOOT_CONTEXT */
118 struct gunyah_rm_vm_set_boot_context_req {
119 __le16 vmid;
120 u8 reg_set;
121 u8 reg_index;
122 __le32 _padding;
123 __le64 value;
124 } __packed;
125
126 /* Call: VM_SET_DEMAND_PAGING */
127 struct gunyah_rm_vm_set_demand_paging_req {
128 __le16 vmid;
129 __le16 _padding;
130 __le32 range_count;
131 DECLARE_FLEX_ARRAY(struct gunyah_rm_mem_entry, ranges);
132 } __packed;
133
134 /* Call: VM_SET_ADDRESS_LAYOUT */
135 struct gunyah_rm_vm_set_address_layout_req {
136 __le16 vmid;
137 __le16 _padding;
138 __le32 range_id;
139 __le64 range_base;
140 __le64 range_size;
141 } __packed;
142
143 /* Call: VM_SET_FIRMWARE_MEM */
144 struct gunyah_vm_set_firmware_mem_req {
145 __le16 vmid;
146 __le16 reserved;
147 __le32 mem_handle;
148 __le64 fw_offset;
149 __le64 fw_size;
150 } __packed;
151
152 /*
153 * Several RM calls take only a VMID as a parameter and give only standard
154 * response back. Deduplicate boilerplate code by using this common call.
155 */
gunyah_rm_common_vmid_call(struct gunyah_rm * rm,u32 message_id,u16 vmid)156 static int gunyah_rm_common_vmid_call(struct gunyah_rm *rm, u32 message_id,
157 u16 vmid)
158 {
159 struct gunyah_rm_vm_common_vmid_req req_payload = {
160 .vmid = cpu_to_le16(vmid),
161 };
162
163 return gunyah_rm_call(rm, message_id, &req_payload, sizeof(req_payload),
164 NULL, NULL);
165 }
166
gunyah_rm_mem_append(struct gunyah_rm * rm,u32 mem_handle,struct gunyah_rm_mem_entry * entries,size_t n_entries)167 static int gunyah_rm_mem_append(struct gunyah_rm *rm, u32 mem_handle,
168 struct gunyah_rm_mem_entry *entries,
169 size_t n_entries)
170 {
171 struct gunyah_rm_mem_append_req_header *req __free(kfree) = NULL;
172 struct gunyah_rm_mem_share_req_mem_section *mem;
173 int ret = 0;
174 size_t n;
175
176 req = kzalloc(sizeof(*req) + struct_size(mem, entries, GUNYAH_RM_MAX_MEM_ENTRIES),
177 GFP_KERNEL);
178 if (!req)
179 return -ENOMEM;
180
181 req->mem_handle = cpu_to_le32(mem_handle);
182 mem = (void *)(req + 1);
183
184 while (n_entries) {
185 req->flags = 0;
186 if (n_entries > GUNYAH_RM_MAX_MEM_ENTRIES) {
187 n = GUNYAH_RM_MAX_MEM_ENTRIES;
188 } else {
189 req->flags |= GUNYAH_MEM_APPEND_REQ_FLAGS_END;
190 n = n_entries;
191 }
192
193 mem->n_entries = cpu_to_le16(n);
194 memcpy(mem->entries, entries, sizeof(*entries) * n);
195
196 ret = gunyah_rm_call(rm, GUNYAH_RM_RPC_MEM_APPEND, req,
197 sizeof(*req) + struct_size(mem, entries, n),
198 NULL, NULL);
199 if (ret)
200 break;
201
202 entries += n;
203 n_entries -= n;
204 }
205
206 return ret;
207 }
208
209 /**
210 * gunyah_rm_mem_share() - Share memory with other virtual machines.
211 * @rm: Handle to a Gunyah resource manager
212 * @p: Information about the memory to be shared.
213 *
214 * Sharing keeps Linux's access to the memory while the memory parcel is shared.
215 */
gunyah_rm_mem_share(struct gunyah_rm * rm,struct gunyah_rm_mem_parcel * p)216 int gunyah_rm_mem_share(struct gunyah_rm *rm, struct gunyah_rm_mem_parcel *p)
217 {
218 u32 message_id = p->n_acl_entries == 1 ? GUNYAH_RM_RPC_MEM_LEND :
219 GUNYAH_RM_RPC_MEM_SHARE;
220 size_t msg_size, initial_mem_entries = p->n_mem_entries, resp_size;
221 struct gunyah_rm_mem_share_req_acl_section *acl;
222 struct gunyah_rm_mem_share_req_mem_section *mem;
223 struct gunyah_rm_mem_share_req_header *req_header;
224 size_t acl_size, mem_size;
225 u32 *attr_section;
226 bool need_append = false;
227 __le32 *resp;
228 void *msg;
229 int ret;
230
231 if (!p->acl_entries || !p->n_acl_entries || !p->mem_entries ||
232 !p->n_mem_entries || p->n_acl_entries > U8_MAX ||
233 p->mem_handle != GUNYAH_MEM_HANDLE_INVAL)
234 return -EINVAL;
235
236 if (initial_mem_entries > GUNYAH_RM_MAX_MEM_ENTRIES) {
237 initial_mem_entries = GUNYAH_RM_MAX_MEM_ENTRIES;
238 need_append = true;
239 }
240
241 acl_size = struct_size(acl, entries, p->n_acl_entries);
242 mem_size = struct_size(mem, entries, initial_mem_entries);
243
244 /* The format of the message goes:
245 * request header
246 * ACL entries (which VMs get what kind of access to this memory parcel)
247 * Memory entries (list of memory regions to share)
248 * Memory attributes (currently unused, we'll hard-code the size to 0)
249 */
250 msg_size = sizeof(struct gunyah_rm_mem_share_req_header) + acl_size +
251 mem_size +
252 sizeof(u32); /* for memory attributes, currently unused */
253
254 msg = kzalloc(msg_size, GFP_KERNEL);
255 if (!msg)
256 return -ENOMEM;
257
258 ret = gunyah_rm_platform_pre_mem_share(rm, p);
259 if (ret) {
260 kfree(msg);
261 return ret;
262 }
263
264 req_header = msg;
265 acl = (void *)req_header + sizeof(*req_header);
266 mem = (void *)acl + acl_size;
267 attr_section = (void *)mem + mem_size;
268
269 req_header->mem_type = p->mem_type;
270 if (need_append)
271 req_header->flags |= GUNYAH_MEM_SHARE_REQ_FLAGS_APPEND;
272 req_header->label = cpu_to_le32(p->label);
273
274 acl->n_entries = cpu_to_le32(p->n_acl_entries);
275 memcpy(acl->entries, p->acl_entries,
276 flex_array_size(acl, entries, p->n_acl_entries));
277
278 mem->n_entries = cpu_to_le16(initial_mem_entries);
279 memcpy(mem->entries, p->mem_entries,
280 flex_array_size(mem, entries, initial_mem_entries));
281
282 /* Set n_entries for memory attribute section to 0 */
283 *attr_section = 0;
284
285 ret = gunyah_rm_call(rm, message_id, msg, msg_size, (void **)&resp,
286 &resp_size);
287 kfree(msg);
288
289 if (ret) {
290 gunyah_rm_platform_post_mem_reclaim(rm, p);
291 return ret;
292 }
293
294 p->mem_handle = le32_to_cpu(*resp);
295 kfree(resp);
296
297 if (need_append) {
298 ret = gunyah_rm_mem_append(
299 rm, p->mem_handle, &p->mem_entries[initial_mem_entries],
300 p->n_mem_entries - initial_mem_entries);
301 if (ret) {
302 gunyah_rm_mem_reclaim(rm, p);
303 p->mem_handle = GUNYAH_MEM_HANDLE_INVAL;
304 }
305 }
306
307 return ret;
308 }
309 ALLOW_ERROR_INJECTION(gunyah_rm_mem_share, ERRNO);
310
311 /**
312 * gunyah_rm_mem_reclaim() - Reclaim a memory parcel
313 * @rm: Handle to a Gunyah resource manager
314 * @parcel: Information about the memory to be reclaimed.
315 *
316 * RM maps the associated memory back into the stage-2 page tables of the owner VM.
317 */
gunyah_rm_mem_reclaim(struct gunyah_rm * rm,struct gunyah_rm_mem_parcel * parcel)318 int gunyah_rm_mem_reclaim(struct gunyah_rm *rm,
319 struct gunyah_rm_mem_parcel *parcel)
320 {
321 struct gunyah_rm_mem_release_req req = {
322 .mem_handle = cpu_to_le32(parcel->mem_handle),
323 };
324 int ret;
325
326 ret = gunyah_rm_call(rm, GUNYAH_RM_RPC_MEM_RECLAIM, &req, sizeof(req),
327 NULL, NULL);
328 /* Only call platform mem reclaim hooks if we reclaimed the memory */
329 if (ret)
330 return ret;
331
332 return gunyah_rm_platform_post_mem_reclaim(rm, parcel);
333 }
334 ALLOW_ERROR_INJECTION(gunyah_rm_mem_reclaim, ERRNO);
335
336 /**
337 * gunyah_rm_alloc_vmid() - Allocate a new VM in Gunyah. Returns the VM identifier.
338 * @rm: Handle to a Gunyah resource manager
339 * @vmid: Use 0 to dynamically allocate a VM. A reserved VMID can be supplied
340 * to request allocation of a platform-defined VM.
341 *
342 * Return: the allocated VMID or negative value on error
343 */
gunyah_rm_alloc_vmid(struct gunyah_rm * rm,u16 vmid)344 int gunyah_rm_alloc_vmid(struct gunyah_rm *rm, u16 vmid)
345 {
346 struct gunyah_rm_vm_common_vmid_req req_payload = {
347 .vmid = cpu_to_le16(vmid),
348 };
349 struct gunyah_rm_vm_alloc_vmid_resp *resp_payload;
350 size_t resp_size;
351 void *resp;
352 int ret;
353
354 ret = gunyah_rm_call(rm, GUNYAH_RM_RPC_VM_ALLOC_VMID, &req_payload,
355 sizeof(req_payload), &resp, &resp_size);
356 if (ret)
357 return ret;
358
359 if (!vmid) {
360 resp_payload = resp;
361 ret = le16_to_cpu(resp_payload->vmid);
362 kfree(resp);
363 }
364
365 return ret;
366 }
367 ALLOW_ERROR_INJECTION(gunyah_rm_alloc_vmid, ERRNO);
368
369 /**
370 * gunyah_rm_dealloc_vmid() - Dispose of a VMID
371 * @rm: Handle to a Gunyah resource manager
372 * @vmid: VM identifier allocated with gunyah_rm_alloc_vmid
373 */
gunyah_rm_dealloc_vmid(struct gunyah_rm * rm,u16 vmid)374 int gunyah_rm_dealloc_vmid(struct gunyah_rm *rm, u16 vmid)
375 {
376 return gunyah_rm_common_vmid_call(rm, GUNYAH_RM_RPC_VM_DEALLOC_VMID,
377 vmid);
378 }
379 ALLOW_ERROR_INJECTION(gunyah_rm_dealloc_vmid, ERRNO);
380
381 /**
382 * gunyah_rm_vm_reset() - Reset a VM's resources
383 * @rm: Handle to a Gunyah resource manager
384 * @vmid: VM identifier allocated with gunyah_rm_alloc_vmid
385 *
386 * As part of tearing down the VM, request RM to clean up all the VM resources
387 * associated with the VM. Only after this, Linux can clean up all the
388 * references it maintains to resources.
389 */
gunyah_rm_vm_reset(struct gunyah_rm * rm,u16 vmid)390 int gunyah_rm_vm_reset(struct gunyah_rm *rm, u16 vmid)
391 {
392 return gunyah_rm_common_vmid_call(rm, GUNYAH_RM_RPC_VM_RESET, vmid);
393 }
394 ALLOW_ERROR_INJECTION(gunyah_rm_vm_reset, ERRNO);
395
396 /**
397 * gunyah_rm_vm_start() - Move a VM into "ready to run" state
398 * @rm: Handle to a Gunyah resource manager
399 * @vmid: VM identifier allocated with gunyah_rm_alloc_vmid
400 *
401 * On VMs which use proxy scheduling, vcpu_run is needed to actually run the VM.
402 * On VMs which use Gunyah's scheduling, the vCPUs start executing in accordance with Gunyah
403 * scheduling policies.
404 */
gunyah_rm_vm_start(struct gunyah_rm * rm,u16 vmid)405 int gunyah_rm_vm_start(struct gunyah_rm *rm, u16 vmid)
406 {
407 return gunyah_rm_common_vmid_call(rm, GUNYAH_RM_RPC_VM_START, vmid);
408 }
409 ALLOW_ERROR_INJECTION(gunyah_rm_vm_start, ERRNO);
410
411 /**
412 * gunyah_rm_vm_stop() - Send a request to Resource Manager VM to forcibly stop a VM.
413 * @rm: Handle to a Gunyah resource manager
414 * @vmid: VM identifier allocated with gunyah_rm_alloc_vmid
415 */
gunyah_rm_vm_stop(struct gunyah_rm * rm,u16 vmid)416 int gunyah_rm_vm_stop(struct gunyah_rm *rm, u16 vmid)
417 {
418 struct gunyah_rm_vm_stop_req req_payload = {
419 .vmid = cpu_to_le16(vmid),
420 .flags = GUNYAH_RM_VM_STOP_FLAG_FORCE_STOP,
421 .stop_reason = cpu_to_le32(GUNYAH_RM_VM_STOP_REASON_FORCE_STOP),
422 };
423
424 return gunyah_rm_call(rm, GUNYAH_RM_RPC_VM_STOP, &req_payload,
425 sizeof(req_payload), NULL, NULL);
426 }
427 ALLOW_ERROR_INJECTION(gunyah_rm_vm_stop, ERRNO);
428
429 /**
430 * gunyah_rm_vm_authenticate() - Send a request to Resource Manager VM to authenticate a VM.
431 * @rm: Handle to a Gunyah resource manager
432 * @vmid: VM identifier allocated with gunyah_rm_alloc_vmid
433 * @n_entries: Number of entries of type gunyah_rm_vm_auth_param_entry
434 * @entry: Type of authentication parameters
435 */
gunyah_rm_vm_authenticate(struct gunyah_rm * rm,u16 vmid,ssize_t n_entries,struct gunyah_rm_vm_authenticate_param_entry * entry)436 int gunyah_rm_vm_authenticate(struct gunyah_rm *rm, u16 vmid,
437 ssize_t n_entries,
438 struct gunyah_rm_vm_authenticate_param_entry *entry)
439 {
440 struct gunyah_rm_vm_authenticate_req_header *req_header;
441 struct gunyah_rm_vm_authenticate_param_entry *dest_entry;
442 size_t resp_payload_size;
443 size_t req_size;
444 int err;
445 void *req_buf;
446 __le32 *resp;
447
448 req_size = sizeof(*req_header) + n_entries * sizeof(*entry);
449
450 req_buf = kzalloc(req_size, GFP_KERNEL);
451 if (!req_buf)
452 return -ENOMEM;
453
454 req_header = req_buf;
455 req_header->vmid = vmid;
456 req_header->n_params = n_entries;
457
458 dest_entry = req_buf + sizeof(*req_header);
459 memcpy(dest_entry, entry, sizeof(*entry) * n_entries);
460
461 err = gunyah_rm_call(rm, GUNYAH_RM_RPC_VM_AUTH_IMAGE,
462 req_buf, req_size, (void **)&resp,
463 &resp_payload_size);
464 if (err) {
465 pr_err("%s: Unable to send VM_AUTH_IMAGE to RM: %d\n", __func__, err);
466 kfree(req_buf);
467 return err;
468 }
469
470 if (resp_payload_size) {
471 pr_err("%s: Invalid size received for VM_AUTH_IMAGE: %zu\n",
472 __func__, resp_payload_size);
473 kfree(resp);
474 kfree(req_buf);
475 return -EINVAL;
476 }
477
478 /* no need to free the resp as no payload is expected */
479 kfree(req_buf);
480 return 0;
481 }
482 ALLOW_ERROR_INJECTION(gunyah_rm_vm_authenticate, ERRNO);
483 EXPORT_SYMBOL_GPL(gunyah_rm_vm_authenticate);
484
485 /**
486 * gunyah_rm_vm_configure() - Prepare a VM to start and provide the common
487 * configuration needed by RM to configure a VM
488 * @rm: Handle to a Gunyah resource manager
489 * @vmid: VM identifier allocated with gunyah_rm_alloc_vmid
490 * @auth_mechanism: Authentication mechanism used by resource manager to verify
491 * the virtual machine
492 * @mem_handle: Handle to a previously shared memparcel that contains all parts
493 * of the VM image subject to authentication.
494 * @image_offset: Start address of VM image, relative to the start of memparcel
495 * @image_size: Size of the VM image
496 * @dtb_offset: Start address of the devicetree binary with VM configuration,
497 * relative to start of memparcel.
498 * @dtb_size: Maximum size of devicetree binary.
499 */
gunyah_rm_vm_configure(struct gunyah_rm * rm,u16 vmid,enum gunyah_rm_vm_auth_mechanism auth_mechanism,u32 mem_handle,u64 image_offset,u64 image_size,u64 dtb_offset,u64 dtb_size)500 int gunyah_rm_vm_configure(struct gunyah_rm *rm, u16 vmid,
501 enum gunyah_rm_vm_auth_mechanism auth_mechanism,
502 u32 mem_handle, u64 image_offset, u64 image_size,
503 u64 dtb_offset, u64 dtb_size)
504 {
505 struct gunyah_rm_vm_config_image_req req_payload = {
506 .vmid = cpu_to_le16(vmid),
507 .auth_mech = cpu_to_le16(auth_mechanism),
508 .mem_handle = cpu_to_le32(mem_handle),
509 .image_offset = cpu_to_le64(image_offset),
510 .image_size = cpu_to_le64(image_size),
511 .dtb_offset = cpu_to_le64(dtb_offset),
512 .dtb_size = cpu_to_le64(dtb_size),
513 };
514
515 return gunyah_rm_call(rm, GUNYAH_RM_RPC_VM_CONFIG_IMAGE, &req_payload,
516 sizeof(req_payload), NULL, NULL);
517 }
518 ALLOW_ERROR_INJECTION(gunyah_rm_vm_configure, ERRNO);
519
520 /**
521 * gunyah_rm_vm_init() - Move the VM to initialized state.
522 * @rm: Handle to a Gunyah resource manager
523 * @vmid: VM identifier
524 *
525 * RM will allocate needed resources for the VM.
526 */
gunyah_rm_vm_init(struct gunyah_rm * rm,u16 vmid)527 int gunyah_rm_vm_init(struct gunyah_rm *rm, u16 vmid)
528 {
529 return gunyah_rm_common_vmid_call(rm, GUNYAH_RM_RPC_VM_INIT, vmid);
530 }
531 ALLOW_ERROR_INJECTION(gunyah_rm_vm_init, ERRNO);
532
533 /**
534 * gunyah_rm_vm_set_boot_context() - set the initial boot context of the primary vCPU
535 * @rm: Handle to a Gunyah resource manager
536 * @vmid: VM identifier
537 * @reg_set: See &enum gunyah_vm_boot_context_reg
538 * @reg_index: Which register to set; must be 0 for REG_SET_PC
539 * @value: Value to set in the register
540 */
gunyah_rm_vm_set_boot_context(struct gunyah_rm * rm,u16 vmid,u8 reg_set,u8 reg_index,u64 value)541 int gunyah_rm_vm_set_boot_context(struct gunyah_rm *rm, u16 vmid, u8 reg_set,
542 u8 reg_index, u64 value)
543 {
544 struct gunyah_rm_vm_set_boot_context_req req_payload = {
545 .vmid = cpu_to_le16(vmid),
546 .reg_set = reg_set,
547 .reg_index = reg_index,
548 .value = cpu_to_le64(value),
549 };
550
551 return gunyah_rm_call(rm, GUNYAH_RM_RPC_VM_SET_BOOT_CONTEXT,
552 &req_payload, sizeof(req_payload), NULL, NULL);
553 }
554 ALLOW_ERROR_INJECTION(gunyah_rm_vm_set_boot_context, ERRNO);
555
556 /**
557 * gunyah_rm_get_hyp_resources() - Retrieve hypervisor resources (capabilities) associated with a VM
558 * @rm: Handle to a Gunyah resource manager
559 * @vmid: VMID of the other VM to get the resources of
560 * @resources: Set by gunyah_rm_get_hyp_resources and contains the returned hypervisor resources.
561 * Caller must free the resources pointer if successful.
562 */
gunyah_rm_get_hyp_resources(struct gunyah_rm * rm,u16 vmid,struct gunyah_rm_hyp_resources ** resources)563 int gunyah_rm_get_hyp_resources(struct gunyah_rm *rm, u16 vmid,
564 struct gunyah_rm_hyp_resources **resources)
565 {
566 struct gunyah_rm_vm_common_vmid_req req_payload = {
567 .vmid = cpu_to_le16(vmid),
568 };
569 struct gunyah_rm_hyp_resources *resp;
570 size_t resp_size;
571 int ret;
572
573 ret = gunyah_rm_call(rm, GUNYAH_RM_RPC_VM_GET_HYP_RESOURCES,
574 &req_payload, sizeof(req_payload), (void **)&resp,
575 &resp_size);
576 if (ret)
577 return ret;
578
579 if (!resp_size)
580 return -EBADMSG;
581
582 if (resp_size < struct_size(resp, entries, 0) ||
583 resp_size !=
584 struct_size(resp, entries, le32_to_cpu(resp->n_entries))) {
585 kfree(resp);
586 return -EBADMSG;
587 }
588
589 *resources = resp;
590 return 0;
591 }
592 ALLOW_ERROR_INJECTION(gunyah_rm_get_hyp_resources, ERRNO);
593
594 /**
595 * gunyah_rm_get_vmid() - Retrieve VMID of this virtual machine
596 * @rm: Handle to a Gunyah resource manager
597 * @vmid: Filled with the VMID of this VM
598 */
gunyah_rm_get_vmid(struct gunyah_rm * rm,u16 * vmid)599 int gunyah_rm_get_vmid(struct gunyah_rm *rm, u16 *vmid)
600 {
601 static u16 cached_vmid = GUNYAH_VMID_INVAL;
602 size_t resp_size;
603 __le32 *resp;
604 int ret;
605
606 if (cached_vmid != GUNYAH_VMID_INVAL) {
607 *vmid = cached_vmid;
608 return 0;
609 }
610
611 ret = gunyah_rm_call(rm, GUNYAH_RM_RPC_VM_GET_VMID, NULL, 0,
612 (void **)&resp, &resp_size);
613 if (ret)
614 return ret;
615
616 *vmid = cached_vmid = lower_16_bits(le32_to_cpu(*resp));
617 kfree(resp);
618
619 return ret;
620 }
621 EXPORT_SYMBOL_GPL(gunyah_rm_get_vmid);
622
623 /**
624 * gunyah_rm_vm_set_demand_paging() - Enable demand paging of memory regions
625 * @rm: Handle to a Gunyah resource manager
626 * @vmid: VMID of the other VM
627 * @count: Number of demand paged memory regions
628 * @entries: Array of the regions
629 */
gunyah_rm_vm_set_demand_paging(struct gunyah_rm * rm,u16 vmid,u32 count,struct gunyah_rm_mem_entry * entries)630 int gunyah_rm_vm_set_demand_paging(struct gunyah_rm *rm, u16 vmid, u32 count,
631 struct gunyah_rm_mem_entry *entries)
632 {
633 struct gunyah_rm_vm_set_demand_paging_req *req __free(kfree) = NULL;
634 size_t req_size;
635
636 req_size = struct_size(req, ranges, count);
637 if (req_size == SIZE_MAX)
638 return -EINVAL;
639
640 req = kzalloc(req_size, GFP_KERNEL);
641 if (!req)
642 return -ENOMEM;
643
644 req->vmid = cpu_to_le16(vmid);
645 req->range_count = cpu_to_le32(count);
646 memcpy(req->ranges, entries, sizeof(*entries) * count);
647
648 return gunyah_rm_call(rm, GUNYAH_RM_RPC_VM_SET_DEMAND_PAGING, req,
649 req_size, NULL, NULL);
650 }
651 ALLOW_ERROR_INJECTION(gunyah_rm_vm_set_demand_paging, ERRNO);
652
653 /**
654 * gunyah_rm_vm_set_address_layout() - Set the start address of images
655 * @rm: Handle to a Gunyah resource manager
656 * @vmid: VMID of the other VM
657 * @range_id: Which image to set
658 * @base_address: Base address
659 * @size: Size
660 */
gunyah_rm_vm_set_address_layout(struct gunyah_rm * rm,u16 vmid,enum gunyah_rm_range_id range_id,u64 base_address,u64 size)661 int gunyah_rm_vm_set_address_layout(struct gunyah_rm *rm, u16 vmid,
662 enum gunyah_rm_range_id range_id,
663 u64 base_address, u64 size)
664 {
665 struct gunyah_rm_vm_set_address_layout_req req = {
666 .vmid = cpu_to_le16(vmid),
667 .range_id = cpu_to_le32(range_id),
668 .range_base = cpu_to_le64(base_address),
669 .range_size = cpu_to_le64(size),
670 };
671
672 return gunyah_rm_call(rm, GUNYAH_RM_RPC_VM_SET_ADDRESS_LAYOUT, &req,
673 sizeof(req), NULL, NULL);
674 }
675 ALLOW_ERROR_INJECTION(gunyah_rm_vm_set_address_layout, ERRNO);
676
677 /**
678 * gunyah_rm_vm_set_firmware_mem() - Set the location of firmware for GH_RM_VM_AUTH_QCOM_ANDROID_PVM VMs
679 * @rm: Handle to a Gunyah resource manager.
680 * @vmid: VM identifier allocated with gh_rm_alloc_vmid.
681 * @parcel: Memory parcel where the firmware should be loaded.
682 * @fw_offset: offset into the memory parcel where the firmware should be loaded.
683 * @fw_size: Maxmimum size of the fw that can be loaded.
684 */
gunyah_rm_vm_set_firmware_mem(struct gunyah_rm * rm,u16 vmid,struct gunyah_rm_mem_parcel * parcel,u64 fw_offset,u64 fw_size)685 int gunyah_rm_vm_set_firmware_mem(struct gunyah_rm *rm, u16 vmid, struct gunyah_rm_mem_parcel *parcel,
686 u64 fw_offset, u64 fw_size)
687 {
688 struct gunyah_vm_set_firmware_mem_req req = {
689 .vmid = cpu_to_le16(vmid),
690 .mem_handle = cpu_to_le32(parcel->mem_handle),
691 .fw_offset = cpu_to_le64(fw_offset),
692 .fw_size = cpu_to_le64(fw_size),
693 };
694
695 return gunyah_rm_call(rm, GUNYAH_RM_RPC_VM_SET_FIRMWARE_MEM, &req, sizeof(req), NULL, NULL);
696 }
697 ALLOW_ERROR_INJECTION(gunyah_rm_vm_set_firmware_mem, ERRNO);
698