1 /*
2 * Copyright (c) 2019-2020 LK Trusty Authors. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <compiler.h>
25 #include <debug.h>
26 #include <err.h>
27 #include <interface/arm_ffa/arm_ffa.h>
28 #include <inttypes.h>
29 #include <kernel/mutex.h>
30 #include <kernel/vm.h>
31 #include <lib/arm_ffa/arm_ffa.h>
32 #include <lib/extmem/extmem.h>
33 #include <lib/page_alloc.h>
34 #include <lib/sm.h>
35 #include <lib/smc/smc.h>
36 #include <lk/init.h>
37 #include <string.h>
38 #include <sys/types.h>
39 #include <trace.h>
40
41 #define LOCAL_TRACE 0
42
43 struct sm_mem_obj {
44 struct sm_vm* vm;
45 struct obj_ref vm_ref;
46 struct ext_mem_obj ext_mem_obj;
47 };
48
sm_mem_obj_compat_destroy(struct vmm_obj * vmm_obj)49 static void sm_mem_obj_compat_destroy(struct vmm_obj* vmm_obj) {
50 struct ext_mem_obj* obj = containerof(vmm_obj, struct ext_mem_obj, vmm_obj);
51 free(obj);
52 }
53
54 static struct vmm_obj_ops sm_mem_obj_compat_ops = {
55 .check_flags = ext_mem_obj_check_flags,
56 .get_page = ext_mem_obj_get_page,
57 .destroy = sm_mem_obj_compat_destroy,
58 };
59
60 /**
61 * sm_mem_compat_get_vmm_obj - Create vmm_obj from id.
62 * @client_id: Id of external entity where the memory originated.
63 * @mem_obj_id: Object id containing a packed address and attibutes.
64 * @size: Size of object.
65 * @objp: Pointer to return object in.
66 * @obj_ref: Reference to *@objp.
67 *
68 * The object paddr and attibutes are encoded in the id for now. Convert it to a
69 * paddr and mmu-flags using the existing helper function.
70 *
71 * Return: 0 on success, negative error code if object could not be created.
72 */
sm_mem_compat_get_vmm_obj(ext_mem_client_id_t client_id,ext_mem_obj_id_t mem_obj_id,size_t size,struct vmm_obj ** objp,struct obj_ref * obj_ref)73 static status_t sm_mem_compat_get_vmm_obj(ext_mem_client_id_t client_id,
74 ext_mem_obj_id_t mem_obj_id,
75 size_t size,
76 struct vmm_obj** objp,
77 struct obj_ref* obj_ref) {
78 int ret;
79 struct ext_mem_obj* obj;
80 struct ns_page_info pinf = {mem_obj_id};
81 ns_addr_t ns_paddr;
82 paddr_t paddr;
83 uint arch_mmu_flags;
84
85 ret = sm_decode_ns_memory_attr(&pinf, &ns_paddr, &arch_mmu_flags);
86 if (ret) {
87 return ret;
88 }
89
90 paddr = (paddr_t)ns_paddr;
91 if (paddr != ns_paddr) {
92 /*
93 * If ns_addr_t is larger than paddr_t and we get an address that does
94 * not fit, return an error as we cannot map that address.
95 */
96 TRACEF("unsupported paddr, 0x%0" PRIxNS_ADDR "\n", ns_paddr);
97 return ERR_INVALID_ARGS;
98 }
99
100 obj = malloc(sizeof(*obj) + ext_mem_obj_page_runs_size(1));
101 if (!obj) {
102 return ERR_NO_MEMORY;
103 }
104
105 arch_mmu_flags |= ARCH_MMU_FLAG_NS | ARCH_MMU_FLAG_PERM_NO_EXECUTE;
106 ext_mem_obj_initialize(obj, obj_ref, mem_obj_id, 0, &sm_mem_obj_compat_ops,
107 arch_mmu_flags, 1);
108 obj->page_runs[0].paddr = paddr;
109 obj->page_runs[0].size = size;
110 *objp = &obj->vmm_obj;
111
112 return 0;
113 }
114
115 /**
116 * sm_mem_obj_destroy: Destroy memory object.
117 * @vmm_obj: VMM object to destroy.
118 *
119 * Called after the last reference to @vmm_obj has been released. Relinquish
120 * shared memory object id with SPM/Hypervisor and free local tracking object.
121 */
sm_mem_obj_destroy(struct vmm_obj * vmm_obj)122 static void sm_mem_obj_destroy(struct vmm_obj* vmm_obj) {
123 int ret;
124 struct sm_mem_obj* obj =
125 containerof(vmm_obj, struct sm_mem_obj, ext_mem_obj.vmm_obj);
126
127 DEBUG_ASSERT(obj);
128 DEBUG_ASSERT(obj->vm);
129
130 ret = arm_ffa_mem_relinquish(obj->ext_mem_obj.id);
131 if (ret != NO_ERROR) {
132 TRACEF("Failed to relinquish the shared memory (%d)\n", ret);
133 }
134
135 /* Release the VM reference */
136 sm_vm_del_ref(obj->vm, &obj->vm_ref);
137 free(obj);
138 }
139
140 static struct vmm_obj_ops sm_mem_obj_ops = {
141 .check_flags = ext_mem_obj_check_flags,
142 .get_page = ext_mem_obj_get_page,
143 .destroy = sm_mem_obj_destroy,
144 };
145
146 /**
147 * sm_mem_alloc_obj - Allocate and initialize memory object.
148 * @vm: Pointer to VM object of sender.
149 * @mem_id: Id of object.
150 * @tag: Tag of the object
151 * @page_run_count: Number of page runs to allocate for object.
152 * @arch_mmu_flags: Memory type and permissions.
153 * @obj_ref: Reference to returned object.
154 *
155 * Return: Pointer to &struct sm_mem_obj, or %NULL if allocation fails.
156 */
sm_mem_alloc_obj(struct sm_vm * vm,ext_mem_obj_id_t mem_id,uint64_t tag,size_t page_run_count,uint arch_mmu_flags,struct obj_ref * obj_ref)157 static struct sm_mem_obj* sm_mem_alloc_obj(struct sm_vm* vm,
158 ext_mem_obj_id_t mem_id,
159 uint64_t tag,
160 size_t page_run_count,
161 uint arch_mmu_flags,
162 struct obj_ref* obj_ref) {
163 struct sm_mem_obj* obj =
164 malloc(sizeof(*obj) + ext_mem_obj_page_runs_size(page_run_count));
165 if (!obj) {
166 return NULL;
167 }
168 ext_mem_obj_initialize(&obj->ext_mem_obj, obj_ref, mem_id, tag,
169 &sm_mem_obj_ops, arch_mmu_flags, page_run_count);
170
171 obj->vm = vm;
172 obj_ref_init(&obj->vm_ref);
173 sm_vm_add_ref(vm, &obj->vm_ref);
174
175 return obj;
176 }
177
178 /* sm_mem_get_vmm_obj - Looks up a shared memory object using FF-A.
179 * @vm: Pointer to VM object of sender.
180 * @mem_obj_id: Id of shared memory object to lookup and return.
181 * @tag: Tag of the memory.
182 * @size: Size hint for object. Caller expects an object at least this
183 * big.
184 * @objp: Pointer to return object in.
185 * @obj_ref: Reference to *@objp.
186 *
187 * Return: 0 on success. ERR_NOT_FOUND if @id does not exist.
188 */
sm_mem_get_vmm_obj(struct sm_vm * vm,ext_mem_obj_id_t mem_obj_id,uint64_t tag,size_t size,struct vmm_obj ** objp,struct obj_ref * obj_ref)189 static status_t sm_mem_get_vmm_obj(struct sm_vm* vm,
190 ext_mem_obj_id_t mem_obj_id,
191 uint64_t tag,
192 size_t size,
193 struct vmm_obj** objp,
194 struct obj_ref* obj_ref) {
195 int ret;
196 ext_mem_client_id_t client_id;
197 struct arm_ffa_mem_frag_info frag_info;
198 uint32_t address_range_count;
199 uint arch_mmu_flags;
200 struct sm_mem_obj* obj;
201 struct obj_ref tmp_obj_ref = OBJ_REF_INITIAL_VALUE(tmp_obj_ref);
202
203 DEBUG_ASSERT(vm);
204 DEBUG_ASSERT(objp);
205 DEBUG_ASSERT(obj_ref);
206
207 client_id = sm_vm_get_id(vm);
208 if ((client_id & 0xffff) != client_id) {
209 TRACEF("Invalid client ID\n");
210 return ERR_INVALID_ARGS;
211 }
212
213 ret = arm_ffa_mem_retrieve_start((uint16_t)client_id, mem_obj_id, tag,
214 &address_range_count, &arch_mmu_flags,
215 &frag_info);
216
217 if (ret != NO_ERROR) {
218 TRACEF("Failed to get FF-A memory buffer, err=%d\n", ret);
219 goto err_mem_get_access;
220 }
221 obj = sm_mem_alloc_obj(vm, mem_obj_id, tag, address_range_count,
222 arch_mmu_flags, &tmp_obj_ref);
223 if (!obj) {
224 TRACEF("Failed to allocate a shared memory object\n");
225 ret = ERR_NO_MEMORY;
226 goto err_mem_alloc_obj;
227 }
228
229 for (uint32_t i = 0; i < address_range_count; i++) {
230 if (frag_info.start_index + frag_info.count <= i) {
231 arm_ffa_rx_release();
232 ret = arm_ffa_mem_retrieve_next_frag(mem_obj_id, &frag_info);
233 if (ret != NO_ERROR) {
234 TRACEF("Failed to get next fragment, err=%d\n", ret);
235 goto err_mem_next_frag;
236 }
237 }
238 ret = arm_ffa_mem_address_range_get(
239 &frag_info, i, &obj->ext_mem_obj.page_runs[i].paddr,
240 &obj->ext_mem_obj.page_runs[i].size);
241 if (ret != NO_ERROR) {
242 TRACEF("Failed to get address range, err=%d\n", ret);
243 goto err_mem_address_range;
244 }
245 }
246
247 /* No lock needed as the object is not yet visible to anyone else */
248 obj_ref_transfer(obj_ref, &tmp_obj_ref);
249 *objp = &obj->ext_mem_obj.vmm_obj;
250
251 arm_ffa_rx_release();
252
253 return 0;
254
255 err_mem_address_range:
256 err_mem_next_frag:
257 DEBUG_ASSERT(obj_ref_active(&tmp_obj_ref));
258 vmm_obj_del_ref(&obj->ext_mem_obj.vmm_obj, &tmp_obj_ref);
259
260 err_mem_alloc_obj:
261 err_mem_get_access:
262 arm_ffa_rx_release();
263 return ret;
264 }
265
266 /**
267 * ext_mem_get_vm_vmm_obj - Lookup or create shared memory object for a VM.
268 * @vm: VM where the memory originated.
269 * @mem_obj_id: Id of shared memory object to lookup and return.
270 * @tag: Value to identify the transaction.
271 * @size: Size hint for object.
272 * @objp: Pointer to return object in.
273 * @obj_ref: Reference to *@objp.
274 *
275 * Call SPM/Hypervisor to retrieve memory region or extract address and
276 * attributes from id for old clients.
277 */
ext_mem_get_vm_vmm_obj(struct sm_vm * vm,ext_mem_obj_id_t mem_obj_id,uint64_t tag,size_t size,struct vmm_obj ** objp,struct obj_ref * obj_ref)278 status_t ext_mem_get_vm_vmm_obj(struct sm_vm* vm,
279 ext_mem_obj_id_t mem_obj_id,
280 uint64_t tag,
281 size_t size,
282 struct vmm_obj** objp,
283 struct obj_ref* obj_ref) {
284 ext_mem_client_id_t client_id = sm_vm_get_id(vm);
285
286 if (sm_get_api_version() >= TRUSTY_API_VERSION_MEM_OBJ) {
287 return sm_mem_get_vmm_obj(vm, mem_obj_id, tag, size, objp, obj_ref);
288 } else if (!client_id && !tag) {
289 /* If client is not running under a hypervisor allow using
290 old api. */
291 return sm_mem_compat_get_vmm_obj(client_id, mem_obj_id, size, objp,
292 obj_ref);
293 } else {
294 return ERR_NOT_SUPPORTED;
295 }
296 }
297
298 /*
299 * ext_mem_get_vmm_obj - Lookup or create shared memory object.
300 * @client_id: Id of external entity where the memory originated.
301 * @mem_obj_id: Id of shared memory object to lookup and return.
302 * @tag: Value to identify the transaction.
303 * @size: Size hint for object.
304 * @objp: Pointer to return object in.
305 * @obj_ref: Reference to *@objp.
306 *
307 * Call SPM/Hypervisor to retrieve memory region or extract address and
308 * attributes from id for old clients.
309 */
ext_mem_get_vmm_obj(ext_mem_client_id_t client_id,ext_mem_obj_id_t mem_obj_id,uint64_t tag,size_t size,struct vmm_obj ** objp,struct obj_ref * obj_ref)310 status_t ext_mem_get_vmm_obj(ext_mem_client_id_t client_id,
311 ext_mem_obj_id_t mem_obj_id,
312 uint64_t tag,
313 size_t size,
314 struct vmm_obj** objp,
315 struct obj_ref* obj_ref) {
316 struct sm_vm* vm = NULL;
317 struct obj_ref vm_ref = OBJ_REF_INITIAL_VALUE(vm_ref);
318 status_t ret;
319
320 /*
321 * Get the VM for the given client ID.
322 * This should work even for the compatibility path because
323 * we have a default compatibility VM that the callee should return here.
324 */
325 ret = sm_vm_get(client_id, &vm_ref, &vm);
326 if (ret != NO_ERROR) {
327 TRACEF("Failed to get VM %" PRIu64 " reference (%d)\n", client_id, ret);
328 return ret;
329 }
330
331 ret = ext_mem_get_vm_vmm_obj(vm, mem_obj_id, tag, size, objp, obj_ref);
332 sm_vm_del_ref(vm, &vm_ref);
333 return ret;
334 }
335
336 /**
337 * shared_mem_init - Connect to SPM/Hypervisor.
338 * @level: Unused.
339 *
340 */
shared_mem_init(uint level)341 static void shared_mem_init(uint level) {
342 /* Check the FF-A module initialized successfully */
343 if (!arm_ffa_is_init()) {
344 TRACEF("arm_ffa module is not initialized\n");
345 if (sm_check_and_lock_api_version(TRUSTY_API_VERSION_MEM_OBJ)) {
346 panic("shared_mem_init failed after mem_obj version selected\n");
347 }
348 }
349 }
350
351 LK_INIT_HOOK(shared_mem, shared_mem_init, LK_INIT_LEVEL_APPS);
352