1 /*
2 * Copyright (C) 2016-2020 ARM Limited. All rights reserved.
3 *
4 * Copyright (C) 2008 The Android Open Source Project
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
20
21 #include <string.h>
22 #include <errno.h>
23 #include <inttypes.h>
24 #include <pthread.h>
25 #include <stdlib.h>
26 #include <limits.h>
27
28 #include <log/log.h>
29 #include <cutils/atomic.h>
30 #include <utils/Trace.h>
31
32 #include <linux/dma-buf.h>
33 #include <vector>
34 #include <sys/ioctl.h>
35
36 #include <hardware/hardware.h>
37 #include <hardware/gralloc1.h>
38
39 #include <BufferAllocator/BufferAllocator.h>
40 #include "mali_gralloc_buffer.h"
41 #include "gralloc_helper.h"
42 #include "mali_gralloc_formats.h"
43 #include "mali_gralloc_usages.h"
44 #include "core/format_info.h"
45 #include "core/mali_gralloc_bufferdescriptor.h"
46 #include "core/mali_gralloc_bufferallocation.h"
47
48 #include "mali_gralloc_ion.h"
49
50 #include <array>
51 #include <cassert>
52 #include <string>
53
54 static const char kDmabufSensorDirectHeapName[] = "sensor_direct_heap";
55 static const char kDmabufFaceauthTpuHeapName[] = "faceauth_tpu-secure";
56 static const char kDmabufFaceauthImgHeapName[] = "faimg-secure";
57 static const char kDmabufFaceauthRawImgHeapName[] = "farawimg-secure";
58 static const char kDmabufFaceauthPrevHeapName[] = "faprev-secure";
59 static const char kDmabufFaceauthModelHeapName[] = "famodel-secure";
60 static const char kDmabufVframeSecureHeapName[] = "vframe-secure";
61 static const char kDmabufVstreamSecureHeapName[] = "vstream-secure";
62 static const char kDmabufVscalerSecureHeapName[] = "vscaler-secure";
63 static const char kDmabufFramebufferSecureHeapName[] = "framebuffer-secure";
64 static const char kDmabufGcmaCameraHeapName[] = "gcma_camera";
65 static const char kDmabufGcmaCameraUncachedHeapName[] = "gcma_camera-uncached";
66
get_allocator()67 BufferAllocator& get_allocator() {
68 static BufferAllocator allocator;
69 return allocator;
70 }
71
find_first_available_heap(const std::initializer_list<std::string> && options)72 std::string find_first_available_heap(const std::initializer_list<std::string>&& options) {
73 static auto available_heaps = BufferAllocator::GetDmabufHeapList();
74
75 for (const auto& heap: options)
76 if (available_heaps.find(heap) != available_heaps.end())
77 return heap;
78
79 return "";
80 }
81
select_dmabuf_heap(uint64_t usage)82 std::string select_dmabuf_heap(uint64_t usage)
83 {
84 struct HeapSpecifier
85 {
86 uint64_t usage_bits;
87 std::string name;
88 };
89
90 static const std::array<HeapSpecifier, 7> exact_usage_heaps =
91 {{
92 // Faceauth heaps
93 { // isp_image_heap
94 GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_HW_CAMERA_WRITE | GS101_GRALLOC_USAGE_TPU_INPUT,
95 kDmabufFaceauthImgHeapName
96 },
97 { // isp_internal_heap
98 GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_HW_CAMERA_WRITE | GRALLOC_USAGE_HW_CAMERA_READ,
99 kDmabufFaceauthRawImgHeapName
100 },
101 { // isp_preview_heap
102 GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_HW_CAMERA_WRITE | GRALLOC_USAGE_HW_COMPOSER |
103 GRALLOC_USAGE_HW_TEXTURE,
104 kDmabufFaceauthPrevHeapName
105 },
106 { // ml_model_heap
107 GRALLOC_USAGE_PROTECTED | GS101_GRALLOC_USAGE_TPU_INPUT,
108 kDmabufFaceauthModelHeapName
109 },
110 { // tpu_heap
111 GRALLOC_USAGE_PROTECTED | GS101_GRALLOC_USAGE_TPU_OUTPUT | GS101_GRALLOC_USAGE_TPU_INPUT,
112 kDmabufFaceauthTpuHeapName
113 },
114
115 {
116 GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER |
117 GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_FB,
118 find_first_available_heap({kDmabufFramebufferSecureHeapName, kDmabufVframeSecureHeapName})
119 },
120
121 {
122 GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER |
123 GRALLOC_USAGE_HW_COMPOSER,
124 find_first_available_heap({kDmabufFramebufferSecureHeapName, kDmabufVframeSecureHeapName})
125 },
126 }};
127
128 static const std::array<HeapSpecifier, 8> inexact_usage_heaps =
129 {{
130 // If GPU, use vframe-secure
131 {
132 GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_HW_TEXTURE,
133 kDmabufVframeSecureHeapName
134 },
135 {
136 GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_HW_RENDER,
137 kDmabufVframeSecureHeapName
138 },
139
140 // If HWC but not GPU
141 {
142 GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_HW_COMPOSER,
143 kDmabufVscalerSecureHeapName
144 },
145
146 // Catchall for protected
147 {
148 GRALLOC_USAGE_PROTECTED,
149 kDmabufVframeSecureHeapName
150 },
151
152 // Sensor heap
153 {
154 GRALLOC_USAGE_SENSOR_DIRECT_DATA,
155 kDmabufSensorDirectHeapName
156 },
157
158 // Camera GCMA heap
159 {
160 GRALLOC_USAGE_HW_CAMERA_WRITE,
161 find_first_available_heap({kDmabufGcmaCameraUncachedHeapName, kDmabufSystemUncachedHeapName})
162 },
163
164 // Camera GCMA heap
165 {
166 GRALLOC_USAGE_HW_CAMERA_READ,
167 find_first_available_heap({kDmabufGcmaCameraUncachedHeapName, kDmabufSystemUncachedHeapName})
168 },
169
170 // Catchall to system
171 {
172 0,
173 kDmabufSystemUncachedHeapName
174 }
175 }};
176
177 for (const HeapSpecifier &heap : exact_usage_heaps)
178 {
179 if (usage == heap.usage_bits)
180 {
181 return heap.name;
182 }
183 }
184
185 for (const HeapSpecifier &heap : inexact_usage_heaps)
186 {
187 if ((usage & heap.usage_bits) == heap.usage_bits)
188 {
189 if (heap.name == kDmabufGcmaCameraUncachedHeapName &&
190 ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN))
191 return kDmabufGcmaCameraHeapName;
192 else if (heap.name == kDmabufSystemUncachedHeapName &&
193 ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN))
194 return kDmabufSystemHeapName;
195
196 return heap.name;
197 }
198 }
199
200 return "";
201 }
202
alloc_from_dmabuf_heap(uint64_t usage,size_t size,const std::string & buffer_name="")203 int alloc_from_dmabuf_heap(uint64_t usage, size_t size, const std::string& buffer_name = "")
204 {
205 ATRACE_CALL();
206 if (size == 0) { return -1; }
207
208 auto heap_name = select_dmabuf_heap(usage);
209 if (heap_name.empty()) {
210 MALI_GRALLOC_LOGW("No heap found for usage: %s (0x%" PRIx64 ")", describe_usage(usage).c_str(), usage);
211 return -EINVAL;
212 }
213
214 ATRACE_NAME(("alloc_from_dmabuf_heap " + heap_name).c_str());
215 int shared_fd = get_allocator().Alloc(heap_name, size, 0);
216 if (shared_fd < 0)
217 {
218 ALOGE("Allocation failed for heap %s error: %d\n", heap_name.c_str(), shared_fd);
219 }
220
221 if (!buffer_name.empty()) {
222 if (get_allocator().DmabufSetName(shared_fd, buffer_name)) {
223 ALOGW("Unable to set buffer name %s: %s", buffer_name.c_str(), strerror(errno));
224 }
225 }
226
227 return shared_fd;
228 }
229
sync_type_for_flags(const bool read,const bool write)230 SyncType sync_type_for_flags(const bool read, const bool write)
231 {
232 if (read && !write)
233 {
234 return SyncType::kSyncRead;
235 }
236 else if (write && !read)
237 {
238 return SyncType::kSyncWrite;
239 }
240 else
241 {
242 // Deliberately also allowing "not sure" to map to ReadWrite.
243 return SyncType::kSyncReadWrite;
244 }
245 }
246
sync(const int fd,const bool read,const bool write,const bool start)247 int sync(const int fd, const bool read, const bool write, const bool start)
248 {
249 if (start)
250 {
251 return get_allocator().CpuSyncStart(fd, sync_type_for_flags(read, write));
252 }
253 else
254 {
255 return get_allocator().CpuSyncEnd(fd, sync_type_for_flags(read, write));
256 }
257 }
258
mali_gralloc_ion_sync(const private_handle_t * const hnd,const bool read,const bool write,const bool start)259 int mali_gralloc_ion_sync(const private_handle_t * const hnd,
260 const bool read,
261 const bool write,
262 const bool start)
263 {
264 if (hnd == NULL)
265 {
266 return -EINVAL;
267 }
268
269 for (int i = 0; i < hnd->fd_count; i++)
270 {
271 const int fd = hnd->fds[i];
272 if (const int ret = sync(fd, read, write, start))
273 {
274 return ret;
275 }
276 }
277
278 return 0;
279 }
280
281
mali_gralloc_ion_sync_start(const private_handle_t * const hnd,const bool read,const bool write)282 int mali_gralloc_ion_sync_start(const private_handle_t * const hnd,
283 const bool read,
284 const bool write)
285 {
286 return mali_gralloc_ion_sync(hnd, read, write, true);
287 }
288
289
mali_gralloc_ion_sync_end(const private_handle_t * const hnd,const bool read,const bool write)290 int mali_gralloc_ion_sync_end(const private_handle_t * const hnd,
291 const bool read,
292 const bool write)
293 {
294 return mali_gralloc_ion_sync(hnd, read, write, false);
295 }
296
297
mali_gralloc_ion_free(private_handle_t * const hnd)298 void mali_gralloc_ion_free(private_handle_t * const hnd)
299 {
300 for (int i = 0; i < hnd->fd_count; i++)
301 {
302 close(hnd->fds[i]);
303 hnd->fds[i] = -1;
304 }
305 delete hnd;
306 }
307
mali_gralloc_ion_free_internal(buffer_handle_t * const pHandle,const uint32_t num_hnds)308 void mali_gralloc_ion_free_internal(buffer_handle_t * const pHandle,
309 const uint32_t num_hnds)
310 {
311 for (uint32_t i = 0; i < num_hnds; i++)
312 {
313 if (pHandle[i] != NULL)
314 {
315 private_handle_t * const hnd = (private_handle_t * const)pHandle[i];
316 mali_gralloc_ion_free(hnd);
317 }
318 }
319 }
320
mali_gralloc_ion_allocate_attr(private_handle_t * hnd)321 int mali_gralloc_ion_allocate_attr(private_handle_t *hnd)
322 {
323 ATRACE_CALL();
324
325 int idx = hnd->get_share_attr_fd_index();
326 uint64_t usage = GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN;
327
328 hnd->fds[idx] = alloc_from_dmabuf_heap(usage, hnd->attr_size);
329 if (hnd->fds[idx] < 0)
330 {
331 MALI_GRALLOC_LOGE("ion_alloc failed");
332 return -1;
333 }
334
335 hnd->incr_numfds(1);
336
337 return 0;
338 }
339
340 /*
341 * Allocates ION buffers
342 *
343 * @param descriptors [in] Buffer request descriptors
344 * @param numDescriptors [in] Number of descriptors
345 * @param pHandle [out] Handle for each allocated buffer
346 * @param shared_backend [out] Shared buffers flag
347 *
348 * @return File handle which can be used for allocation, on success
349 * -1, otherwise.
350 */
mali_gralloc_ion_allocate(const gralloc_buffer_descriptor_t * descriptors,uint32_t numDescriptors,buffer_handle_t * pHandle,bool * shared_backend,int ion_fd)351 int mali_gralloc_ion_allocate(const gralloc_buffer_descriptor_t *descriptors,
352 uint32_t numDescriptors, buffer_handle_t *pHandle,
353 bool *shared_backend, int ion_fd)
354 {
355 ATRACE_CALL();
356 GRALLOC_UNUSED(shared_backend);
357
358 unsigned int priv_heap_flag = 0;
359 uint64_t usage;
360 uint32_t i;
361
362 for (i = 0; i < numDescriptors; i++)
363 {
364 buffer_descriptor_t *bufDescriptor = reinterpret_cast<buffer_descriptor_t *>(descriptors[i]);
365 assert(bufDescriptor);
366 assert(bufDescriptor->fd_count >= 0);
367 assert(bufDescriptor->fd_count <= MAX_FDS);
368
369 auto hnd = new private_handle_t(
370 priv_heap_flag,
371 bufDescriptor->alloc_sizes,
372 bufDescriptor->consumer_usage, bufDescriptor->producer_usage,
373 nullptr, bufDescriptor->fd_count,
374 bufDescriptor->hal_format, bufDescriptor->alloc_format,
375 bufDescriptor->width, bufDescriptor->height, bufDescriptor->pixel_stride,
376 bufDescriptor->layer_count, bufDescriptor->plane_info);
377
378 /* Reset the number of valid filedescriptors, we will increment
379 * it each time a valid fd is added, so we can rely on the
380 * cleanup functions to close open fds. */
381 hnd->set_numfds(0);
382
383 if (nullptr == hnd)
384 {
385 MALI_GRALLOC_LOGE("Private handle could not be created for descriptor:%d in non-shared usecase", i);
386 mali_gralloc_ion_free_internal(pHandle, i);
387 return -1;
388 }
389
390 pHandle[i] = hnd;
391 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
392
393 for (uint32_t fidx = 0; fidx < bufDescriptor->fd_count; fidx++)
394 {
395 int& fd = hnd->fds[fidx];
396
397 if (ion_fd >= 0 && fidx == 0) {
398 fd = ion_fd;
399 } else {
400 fd = alloc_from_dmabuf_heap(usage, bufDescriptor->alloc_sizes[fidx], bufDescriptor->name);
401 }
402
403 if (fd < 0)
404 {
405 MALI_GRALLOC_LOGE("ion_alloc failed for fds[%u] = %d", fidx, fd);
406 mali_gralloc_ion_free_internal(pHandle, i + 1);
407 return -1;
408 }
409
410 hnd->incr_numfds(1);
411 }
412 }
413
414 #if defined(GRALLOC_INIT_AFBC) && (GRALLOC_INIT_AFBC == 1)
415 ATRACE_NAME("AFBC init block");
416 unsigned char *cpu_ptr = NULL;
417 for (i = 0; i < numDescriptors; i++)
418 {
419 buffer_descriptor_t *bufDescriptor = reinterpret_cast<buffer_descriptor_t *>(descriptors[i]);
420 const private_handle_t *hnd = static_cast<const private_handle_t *>(pHandle[i]);
421
422 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
423
424 if ((bufDescriptor->alloc_format & MALI_GRALLOC_INTFMT_AFBCENABLE_MASK)
425 && !(usage & GRALLOC_USAGE_PROTECTED))
426 {
427 {
428 ATRACE_NAME("mmap");
429 /* TODO: only map for AFBC buffers */
430 cpu_ptr =
431 (unsigned char *)mmap(NULL, bufDescriptor->alloc_sizes[0], PROT_READ | PROT_WRITE, MAP_SHARED, hnd->fds[0], 0);
432
433 if (MAP_FAILED == cpu_ptr)
434 {
435 MALI_GRALLOC_LOGE("mmap failed for fd ( %d )", hnd->fds[0]);
436 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
437 return -1;
438 }
439
440 mali_gralloc_ion_sync_start(hnd, true, true);
441 }
442
443 {
444 ATRACE_NAME("data init");
445 /* For separated plane YUV, there is a header to initialise per plane. */
446 const plane_info_t *plane_info = bufDescriptor->plane_info;
447 assert(plane_info);
448 const bool is_multi_plane = hnd->is_multi_plane();
449 for (int i = 0; i < MAX_PLANES && (i == 0 || plane_info[i].byte_stride != 0); i++)
450 {
451 init_afbc(cpu_ptr + plane_info[i].offset,
452 bufDescriptor->alloc_format,
453 is_multi_plane,
454 plane_info[i].alloc_width,
455 plane_info[i].alloc_height);
456 }
457 }
458
459 {
460 ATRACE_NAME("munmap");
461 mali_gralloc_ion_sync_end(hnd, true, true);
462 munmap(cpu_ptr, bufDescriptor->alloc_sizes[0]);
463 }
464 }
465 }
466 #endif
467
468 return 0;
469 }
470
mali_gralloc_ion_map(private_handle_t * hnd)471 std::array<void*, MAX_BUFFER_FDS> mali_gralloc_ion_map(private_handle_t *hnd)
472 {
473 std::array<void*, MAX_BUFFER_FDS> vaddrs;
474 vaddrs.fill(nullptr);
475
476 uint64_t usage = hnd->producer_usage | hnd->consumer_usage;
477 /* Do not allow cpu access to secure buffers */
478 if (usage & (GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_NOZEROED)
479 && !(usage & GRALLOC_USAGE_PRIVATE_NONSECURE))
480 {
481 return vaddrs;
482 }
483
484 for (int fidx = 0; fidx < hnd->fd_count; fidx++) {
485 unsigned char *mappedAddress =
486 (unsigned char *)mmap(NULL, hnd->alloc_sizes[fidx], PROT_READ | PROT_WRITE,
487 MAP_SHARED, hnd->fds[fidx], 0);
488
489 if (MAP_FAILED == mappedAddress)
490 {
491 int err = errno;
492 MALI_GRALLOC_LOGE("mmap( fds[%d]:%d size:%" PRIu64 " ) failed with %s",
493 fidx, hnd->fds[fidx], hnd->alloc_sizes[fidx], strerror(err));
494 hnd->dump("map fail");
495
496 for (int cidx = 0; cidx < fidx; fidx++)
497 {
498 munmap((void*)vaddrs[cidx], hnd->alloc_sizes[cidx]);
499 vaddrs[cidx] = 0;
500 }
501
502 return vaddrs;
503 }
504
505 vaddrs[fidx] = mappedAddress;
506 }
507
508 return vaddrs;
509 }
510
mali_gralloc_ion_unmap(private_handle_t * hnd,std::array<void *,MAX_BUFFER_FDS> & vaddrs)511 void mali_gralloc_ion_unmap(private_handle_t *hnd, std::array<void*, MAX_BUFFER_FDS>& vaddrs)
512 {
513 for (int i = 0; i < hnd->fd_count; i++)
514 {
515 int err = 0;
516
517 if (vaddrs[i])
518 {
519 err = munmap(vaddrs[i], hnd->alloc_sizes[i]);
520 }
521
522 if (err)
523 {
524 MALI_GRALLOC_LOGE("Could not munmap base:%p size:%" PRIu64 " '%s'",
525 (void*)vaddrs[i], hnd->alloc_sizes[i], strerror(errno));
526 }
527 else
528 {
529 vaddrs[i] = 0;
530 }
531 }
532
533 hnd->cpu_read = 0;
534 hnd->cpu_write = 0;
535 }
536