1 /*
2 * Copyright (C) 2010-2014, 2016-2017 ARM Limited. All rights reserved.
3 *
4 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6 *
7 * A copy of the licence is included with the program, and can also be obtained from Free Software
8 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
9 */
10
11 #include "mali_osk.h"
12 #include "mali_osk_list.h"
13 #include "ump_osk.h"
14 #include "ump_uk_types.h"
15 #include "ump_kernel_interface.h"
16 #include "ump_kernel_common.h"
17 #include "ump_kernel_random_mapping.h"
18
19
20
21 /* ---------------- UMP kernel space API functions follows ---------------- */
22
23
24
ump_dd_secure_id_get(ump_dd_handle memh)25 UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle memh)
26 {
27 ump_dd_mem *mem = (ump_dd_mem *)memh;
28
29 DEBUG_ASSERT_POINTER(mem);
30
31 DBG_MSG(5, ("Returning secure ID. ID: %u\n", mem->secure_id));
32
33 return mem->secure_id;
34 }
35
36
37
ump_dd_handle_create_from_secure_id(ump_secure_id secure_id)38 UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id)
39 {
40 ump_dd_mem *mem;
41
42 DBG_MSG(5, ("Getting handle from secure ID. ID: %u\n", secure_id));
43 mem = ump_random_mapping_get(device.secure_id_map, (int)secure_id);
44 if (NULL == mem) {
45 DBG_MSG(1, ("Secure ID not found. ID: %u\n", secure_id));
46 return UMP_DD_HANDLE_INVALID;
47 }
48
49 /* Keep the reference taken in ump_random_mapping_get() */
50
51 return (ump_dd_handle)mem;
52 }
53
54
55
ump_dd_phys_block_count_get(ump_dd_handle memh)56 UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle memh)
57 {
58 ump_dd_mem *mem = (ump_dd_mem *) memh;
59
60 DEBUG_ASSERT_POINTER(mem);
61
62 return mem->nr_blocks;
63 }
64
65
66
ump_dd_phys_blocks_get(ump_dd_handle memh,ump_dd_physical_block * blocks,unsigned long num_blocks)67 UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle memh, ump_dd_physical_block *blocks, unsigned long num_blocks)
68 {
69 ump_dd_mem *mem = (ump_dd_mem *)memh;
70
71 DEBUG_ASSERT_POINTER(mem);
72
73 if (blocks == NULL) {
74 DBG_MSG(1, ("NULL parameter in ump_dd_phys_blocks_get()\n"));
75 return UMP_DD_INVALID;
76 }
77
78 if (mem->nr_blocks != num_blocks) {
79 DBG_MSG(1, ("Specified number of blocks do not match actual number of blocks\n"));
80 return UMP_DD_INVALID;
81 }
82
83 DBG_MSG(5, ("Returning physical block information. ID: %u\n", mem->secure_id));
84
85 _mali_osk_memcpy(blocks, mem->block_array, sizeof(ump_dd_physical_block) * mem->nr_blocks);
86
87 return UMP_DD_SUCCESS;
88 }
89
90
91
ump_dd_phys_block_get(ump_dd_handle memh,unsigned long index,ump_dd_physical_block * block)92 UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle memh, unsigned long index, ump_dd_physical_block *block)
93 {
94 ump_dd_mem *mem = (ump_dd_mem *)memh;
95
96 DEBUG_ASSERT_POINTER(mem);
97
98 if (block == NULL) {
99 DBG_MSG(1, ("NULL parameter in ump_dd_phys_block_get()\n"));
100 return UMP_DD_INVALID;
101 }
102
103 if (index >= mem->nr_blocks) {
104 DBG_MSG(5, ("Invalid index specified in ump_dd_phys_block_get()\n"));
105 return UMP_DD_INVALID;
106 }
107
108 DBG_MSG(5, ("Returning physical block information. ID: %u, index: %lu\n", mem->secure_id, index));
109
110 *block = mem->block_array[index];
111
112 return UMP_DD_SUCCESS;
113 }
114
115
116
ump_dd_size_get(ump_dd_handle memh)117 UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle memh)
118 {
119 ump_dd_mem *mem = (ump_dd_mem *)memh;
120
121 DEBUG_ASSERT_POINTER(mem);
122
123 DBG_MSG(5, ("Returning size. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
124
125 return mem->size_bytes;
126 }
127
128
129
ump_dd_reference_add(ump_dd_handle memh)130 UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle memh)
131 {
132 ump_dd_mem *mem = (ump_dd_mem *)memh;
133 int new_ref;
134
135 DEBUG_ASSERT_POINTER(mem);
136
137 new_ref = _ump_osk_atomic_inc_and_read(&mem->ref_count);
138
139 DBG_MSG(5, ("Memory reference incremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
140 }
141
142
143
ump_dd_reference_release(ump_dd_handle memh)144 UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle memh)
145 {
146 ump_dd_mem *mem = (ump_dd_mem *)memh;
147
148 DEBUG_ASSERT_POINTER(mem);
149
150 ump_random_mapping_put(mem);
151 }
152
153
154
155 /* --------------- Handling of user space requests follows --------------- */
156
157
_ump_uku_get_api_version(_ump_uk_api_version_s * args)158 _mali_osk_errcode_t _ump_uku_get_api_version(_ump_uk_api_version_s *args)
159 {
160 ump_session_data *session_data;
161
162 DEBUG_ASSERT_POINTER(args);
163 DEBUG_ASSERT_POINTER(args->ctx);
164
165 session_data = (ump_session_data *)args->ctx;
166
167 /* check compatability */
168 if (args->version == UMP_IOCTL_API_VERSION) {
169 DBG_MSG(3, ("API version set to newest %d (compatible)\n",
170 GET_VERSION(args->version)));
171 args->compatible = 1;
172 session_data->api_version = args->version;
173 } else {
174 DBG_MSG(2, ("API version set to %d (incompatible with client version %d)\n",
175 GET_VERSION(UMP_IOCTL_API_VERSION), GET_VERSION(args->version)));
176 args->compatible = 0;
177 args->version = UMP_IOCTL_API_VERSION; /* report our version */
178 }
179
180 return _MALI_OSK_ERR_OK;
181 }
182
183
_ump_ukk_release(_ump_uk_release_s * release_info)184 _mali_osk_errcode_t _ump_ukk_release(_ump_uk_release_s *release_info)
185 {
186 ump_session_memory_list_element *session_memory_element;
187 ump_session_memory_list_element *tmp;
188 ump_session_data *session_data;
189 _mali_osk_errcode_t ret = _MALI_OSK_ERR_INVALID_FUNC;
190 int secure_id;
191
192 DEBUG_ASSERT_POINTER(release_info);
193 DEBUG_ASSERT_POINTER(release_info->ctx);
194
195 /* Retreive the session data */
196 session_data = (ump_session_data *)release_info->ctx;
197
198 /* If there are many items in the memory session list we
199 * could be de-referencing this pointer a lot so keep a local copy
200 */
201 secure_id = release_info->secure_id;
202
203 DBG_MSG(4, ("Releasing memory with IOCTL, ID: %u\n", secure_id));
204
205 /* Iterate through the memory list looking for the requested secure ID */
206 _mali_osk_mutex_wait(session_data->lock);
207 _MALI_OSK_LIST_FOREACHENTRY(session_memory_element, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list) {
208 if (session_memory_element->mem->secure_id == secure_id) {
209 ump_dd_mem *release_mem;
210
211 release_mem = session_memory_element->mem;
212 _mali_osk_list_del(&session_memory_element->list);
213 ump_dd_reference_release(release_mem);
214 _mali_osk_free(session_memory_element);
215
216 ret = _MALI_OSK_ERR_OK;
217 break;
218 }
219 }
220
221 _mali_osk_mutex_signal(session_data->lock);
222 DBG_MSG_IF(1, _MALI_OSK_ERR_OK != ret, ("UMP memory with ID %u does not belong to this session.\n", secure_id));
223
224 DBG_MSG(4, ("_ump_ukk_release() returning 0x%x\n", ret));
225 return ret;
226 }
227
_ump_ukk_size_get(_ump_uk_size_get_s * user_interaction)228 _mali_osk_errcode_t _ump_ukk_size_get(_ump_uk_size_get_s *user_interaction)
229 {
230 ump_dd_mem *mem;
231 _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
232
233 DEBUG_ASSERT_POINTER(user_interaction);
234
235 /* We lock the mappings so things don't get removed while we are looking for the memory */
236 mem = ump_random_mapping_get(device.secure_id_map, user_interaction->secure_id);
237 if (NULL != mem) {
238 user_interaction->size = mem->size_bytes;
239 DBG_MSG(4, ("Returning size. ID: %u, size: %lu ",
240 (ump_secure_id)user_interaction->secure_id,
241 (unsigned long)user_interaction->size));
242 ump_random_mapping_put(mem);
243 ret = _MALI_OSK_ERR_OK;
244 } else {
245 user_interaction->size = 0;
246 DBG_MSG(1, ("Failed to look up mapping in ump_ioctl_size_get(). ID: %u\n",
247 (ump_secure_id)user_interaction->secure_id));
248 }
249
250 return ret;
251 }
252
253
254
_ump_ukk_msync(_ump_uk_msync_s * args)255 void _ump_ukk_msync(_ump_uk_msync_s *args)
256 {
257 ump_dd_mem *mem = NULL;
258 void *virtual = NULL;
259 u32 size = 0;
260 u32 offset = 0;
261
262 mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
263 if (NULL == mem) {
264 DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_msync(). ID: %u\n",
265 (ump_secure_id)args->secure_id));
266 return;
267 }
268
269 /* Returns the cache settings back to Userspace */
270 args->is_cached = mem->is_cached;
271
272 /* If this flag is the only one set, we should not do the actual flush, only the readout */
273 if (_UMP_UK_MSYNC_READOUT_CACHE_ENABLED == args->op) {
274 DBG_MSG(3, ("_ump_ukk_msync READOUT ID: %u Enabled: %d\n", (ump_secure_id)args->secure_id, mem->is_cached));
275 goto msync_release_and_return;
276 }
277
278 /* Nothing to do if the memory is not caches */
279 if (0 == mem->is_cached) {
280 DBG_MSG(3, ("_ump_ukk_msync IGNORING ID: %u Enabled: %d OP: %d\n", (ump_secure_id)args->secure_id, mem->is_cached, args->op));
281 goto msync_release_and_return;
282 }
283 DBG_MSG(3, ("UMP[%02u] _ump_ukk_msync Flush OP: %d Address: 0x%08x Mapping: 0x%08x\n",
284 (ump_secure_id)args->secure_id, args->op, args->address, args->mapping));
285
286 if (args->address) {
287 virtual = (void *)((u32)args->address);
288 offset = (u32)((args->address) - (args->mapping));
289 } else {
290 /* Flush entire mapping when no address is specified. */
291 virtual = args->mapping;
292 }
293 if (args->size) {
294 size = args->size;
295 } else {
296 /* Flush entire mapping when no size is specified. */
297 size = mem->size_bytes - offset;
298 }
299
300 if ((offset + size) > mem->size_bytes) {
301 DBG_MSG(1, ("Trying to flush more than the entire UMP allocation: offset: %u + size: %u > %u\n", offset, size, mem->size_bytes));
302 goto msync_release_and_return;
303 }
304
305 /* The actual cache flush - Implemented for each OS*/
306 _ump_osk_msync(mem, virtual, offset, size, args->op, NULL);
307
308 msync_release_and_return:
309 ump_random_mapping_put(mem);
310 return;
311 }
312
_ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s * args)313 void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s *args)
314 {
315 ump_session_data *session_data;
316 ump_uk_cache_op_control op;
317
318 DEBUG_ASSERT_POINTER(args);
319 DEBUG_ASSERT_POINTER(args->ctx);
320
321 op = args->op;
322 session_data = (ump_session_data *)args->ctx;
323
324 _mali_osk_mutex_wait(session_data->lock);
325 if (op == _UMP_UK_CACHE_OP_START) {
326 session_data->cache_operations_ongoing++;
327 DBG_MSG(4, ("Cache ops start\n"));
328 if (session_data->cache_operations_ongoing != 1) {
329 DBG_MSG(2, ("UMP: Number of simultanious cache control ops: %d\n", session_data->cache_operations_ongoing));
330 }
331 } else if (op == _UMP_UK_CACHE_OP_FINISH) {
332 DBG_MSG(4, ("Cache ops finish\n"));
333 session_data->cache_operations_ongoing--;
334 #if 0
335 if (session_data->has_pending_level1_cache_flush) {
336 /* This function will set has_pending_level1_cache_flush=0 */
337 _ump_osk_msync(NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
338 }
339 #endif
340
341 /* to be on the safe side: always flush l1 cache when cache operations are done */
342 _ump_osk_msync(NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
343 DBG_MSG(4, ("Cache ops finish end\n"));
344 } else {
345 DBG_MSG(1, ("Illegal call to %s at line %d\n", __FUNCTION__, __LINE__));
346 }
347 _mali_osk_mutex_signal(session_data->lock);
348
349 }
350
_ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s * args)351 void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args)
352 {
353 ump_dd_mem *mem = NULL;
354 ump_uk_user old_user;
355 ump_uk_msync_op cache_op = _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE;
356 ump_session_data *session_data;
357
358 DEBUG_ASSERT_POINTER(args);
359 DEBUG_ASSERT_POINTER(args->ctx);
360
361 session_data = (ump_session_data *)args->ctx;
362
363 mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
364 if (NULL == mem) {
365 DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_switch_hw_usage(). ID: %u\n",
366 (ump_secure_id)args->secure_id));
367 return;
368 }
369
370 old_user = mem->hw_device;
371 mem->hw_device = args->new_user;
372
373 DBG_MSG(3, ("UMP[%02u] Switch usage Start New: %s Prev: %s.\n",
374 (ump_secure_id)args->secure_id,
375 args->new_user ? "MALI" : "CPU",
376 old_user ? "MALI" : "CPU"));
377
378 if (!mem->is_cached) {
379 DBG_MSG(3, ("UMP[%02u] Changing owner of uncached memory. Cache flushing not needed.\n",
380 (ump_secure_id)args->secure_id));
381 goto out;
382 }
383
384 if (old_user == args->new_user) {
385 DBG_MSG(4, ("UMP[%02u] Setting the new_user equal to previous for. Cache flushing not needed.\n",
386 (ump_secure_id)args->secure_id));
387 goto out;
388 }
389 if (
390 /* Previous AND new is both different from CPU */
391 (old_user != _UMP_UK_USED_BY_CPU) && (args->new_user != _UMP_UK_USED_BY_CPU)
392 ) {
393 DBG_MSG(4, ("UMP[%02u] Previous and new user is not CPU. Cache flushing not needed.\n",
394 (ump_secure_id)args->secure_id));
395 goto out;
396 }
397
398 if ((old_user != _UMP_UK_USED_BY_CPU) && (args->new_user == _UMP_UK_USED_BY_CPU)) {
399 cache_op = _UMP_UK_MSYNC_INVALIDATE;
400 DBG_MSG(4, ("UMP[%02u] Cache invalidation needed\n", (ump_secure_id)args->secure_id));
401 #ifdef UMP_SKIP_INVALIDATION
402 #error
403 DBG_MSG(4, ("UMP[%02u] Performing Cache invalidation SKIPPED\n", (ump_secure_id)args->secure_id));
404 goto out;
405 #endif
406 }
407
408 /* Take lock to protect: session->cache_operations_ongoing and session->has_pending_level1_cache_flush */
409 _mali_osk_mutex_wait(session_data->lock);
410 /* Actual cache flush */
411 _ump_osk_msync(mem, NULL, 0, mem->size_bytes, cache_op, session_data);
412 _mali_osk_mutex_signal(session_data->lock);
413
414 out:
415 ump_random_mapping_put(mem);
416 DBG_MSG(4, ("UMP[%02u] Switch usage Finish\n", (ump_secure_id)args->secure_id));
417 return;
418 }
419
_ump_ukk_lock(_ump_uk_lock_s * args)420 void _ump_ukk_lock(_ump_uk_lock_s *args)
421 {
422 ump_dd_mem *mem = NULL;
423
424 mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
425 if (NULL == mem) {
426 DBG_MSG(1, ("UMP[%02u] Failed to look up mapping in _ump_ukk_lock(). ID: %u\n",
427 (ump_secure_id)args->secure_id));
428 return;
429 }
430
431 DBG_MSG(1, ("UMP[%02u] Lock. New lock flag: %d. Old Lock flag:\n", (u32)args->secure_id, (u32)args->lock_usage, (u32) mem->lock_usage));
432
433 mem->lock_usage = (ump_lock_usage) args->lock_usage;
434
435 ump_random_mapping_put(mem);
436 }
437
_ump_ukk_unlock(_ump_uk_unlock_s * args)438 void _ump_ukk_unlock(_ump_uk_unlock_s *args)
439 {
440 ump_dd_mem *mem = NULL;
441
442 mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
443 if (NULL == mem) {
444 DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_unlock(). ID: %u\n",
445 (ump_secure_id)args->secure_id));
446 return;
447 }
448
449 DBG_MSG(1, ("UMP[%02u] Unlocking. Old Lock flag:\n",
450 (u32)args->secure_id, (u32) mem->lock_usage));
451
452 mem->lock_usage = (ump_lock_usage) UMP_NOT_LOCKED;
453
454 ump_random_mapping_put(mem);
455 }
456