• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016-2020 ARM Limited. All rights reserved.
3  *
4  * Copyright (C) 2008 The Android Open Source Project
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *      http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
20 
21 #include <string.h>
22 #include <errno.h>
23 #include <inttypes.h>
24 #include <pthread.h>
25 #include <stdlib.h>
26 #include <limits.h>
27 
28 #include <log/log.h>
29 #include <cutils/atomic.h>
30 #include <utils/Trace.h>
31 
32 
33 #include <linux/dma-buf.h>
34 #include <vector>
35 #include <sys/ioctl.h>
36 
37 #include <hardware/hardware.h>
38 #include <hardware/gralloc1.h>
39 
40 #include <hardware/exynos/ion.h>
41 #include <hardware/exynos/dmabuf_container.h>
42 
43 #include <BufferAllocator/BufferAllocator.h>
44 #include "mali_gralloc_buffer.h"
45 #include "gralloc_helper.h"
46 #include "mali_gralloc_formats.h"
47 #include "mali_gralloc_usages.h"
48 #include "core/format_info.h"
49 #include "core/mali_gralloc_bufferdescriptor.h"
50 #include "core/mali_gralloc_bufferallocation.h"
51 
52 #include "mali_gralloc_ion.h"
53 
54 #include <array>
55 
56 #define INIT_ZERO(obj) (memset(&(obj), 0, sizeof((obj))))
57 
58 #define HEAP_MASK_FROM_ID(id) (1 << id)
59 #define HEAP_MASK_FROM_TYPE(type) (1 << type)
60 
61 #if defined(ION_HEAP_SECURE_MASK)
62 #if (HEAP_MASK_FROM_TYPE(ION_HEAP_TYPE_SECURE) != ION_HEAP_SECURE_MASK)
63 #error "ION_HEAP_TYPE_SECURE value is not compatible with ION_HEAP_SECURE_MASK"
64 #endif
65 #endif
66 
67 static const char kDmabufSensorDirectHeapName[] = "sensor_direct_heap";
68 static const char kDmabufFaceauthTpuHeapName[] = "faceauth_tpu-secure";
69 static const char kDmabufFaceauthImgHeapName[] = "faimg-secure";
70 static const char kDmabufFaceauthRawImgHeapName[] = "farawimg-secure";
71 static const char kDmabufFaceauthPrevHeapName[] = "faprev-secure";
72 static const char kDmabufFaceauthModelHeapName[] = "famodel-secure";
73 static const char kDmabufVframeSecureHeapName[] = "vframe-secure";
74 static const char kDmabufVstreamSecureHeapName[] = "vstream-secure";
75 
76 struct ion_device
77 {
cliention_device78 	int client()
79 	{
80 		return ion_client;
81 	}
82 
closeion_device83 	static void close()
84 	{
85 		ion_device &dev = get_inst();
86 		if (dev.ion_client >= 0)
87 		{
88 			exynos_ion_close(dev.ion_client);
89 			dev.ion_client = -1;
90 		}
91 
92 		dev.buffer_allocator.reset();
93 	}
94 
getion_device95 	static ion_device *get()
96 	{
97 		ion_device &dev = get_inst();
98 		if (!dev.buffer_allocator)
99 		{
100 			dev.buffer_allocator = std::make_unique<BufferAllocator>();
101 			if (!dev.buffer_allocator)
102 				ALOGE("Unable to create BufferAllocator object");
103 		}
104 
105 		if (dev.ion_client < 0)
106 		{
107 			if (dev.open_and_query_ion() != 0)
108 			{
109 				close();
110 			}
111 		}
112 
113 		if (dev.ion_client < 0)
114 		{
115 			return nullptr;
116 		}
117 		return &dev;
118 	}
119 
120 	/*
121 	 *  Identifies a heap and retrieves file descriptor from ION for allocation
122 	 *
123 	 * @param usage     [in]    Producer and consumer combined usage.
124 	 * @param size      [in]    Requested buffer size (in bytes).
125 	 * @param heap_type [in]    Requested heap type.
126 	 * @param flags     [in]    ION allocation attributes defined by ION_FLAG_*.
127 	 * @param min_pgsz  [out]   Minimum page size (in bytes).
128 	 *
129 	 * @return File handle which can be used for allocation, on success
130 	 *         -1, otherwise.
131 	 */
132 	int alloc_from_ion_heap(uint64_t usage, size_t size, unsigned int flags, int *min_pgsz);
133 
134 	/*
135 	 *  Signals the start or end of a region where the CPU is accessing a
136 	 *  buffer, allowing appropriate cache synchronization.
137 	 *
138 	 * @param fd        [in]    fd for the buffer
139 	 * @param read      [in]    True if the CPU is reading from the buffer
140 	 * @param write     [in]    True if the CPU is writing to the buffer
141 	 * @param start     [in]    True if the CPU has not yet performed the
142 	 *                          operations; false if the operations are
143 	 *                          completed.
144 	 *
145 	 * @return 0 on success; an error code otherwise.
146 	 */
147 	int sync(int fd, bool read, bool write, bool start);
148 
149 private:
150 	int ion_client;
151 	std::unique_ptr<BufferAllocator> buffer_allocator;
152 
ion_deviceion_device153 	ion_device()
154 	    : ion_client(-1)
155 	{
156 	}
157 
get_instion_device158 	static ion_device& get_inst()
159 	{
160 		static ion_device dev;
161 		return dev;
162 	}
163 
164 	/*
165 	 * Opens the ION module. Queries heap information and stores it for later use
166 	 *
167 	 * @return              0 in case of success
168 	 *                      -1 for all error cases
169 	 */
170 	int open_and_query_ion();
171 
172 	/*
173 	 *  Allocates in the DMA-BUF heap with name @heap_name. If allocation fails from
174 	 *  the DMA-BUF heap or if it does not exist, falls back to an ION heap of the
175 	 *  same name.
176 	 *
177 	 * @param heap_name [in]    DMA-BUF heap name for allocation
178 	 * @param size      [in]    Requested buffer size (in bytes).
179 	 * @param flags     [in]    ION allocation attributes defined by ION_FLAG_* to
180 	 *                          be used for ION allocations. Will not be used with
181 	 *                          DMA-BUF heaps since the framework does not support
182 	 *                          allocation flags.
183 	 *
184 	 * @return fd of the allocated buffer on success, -1 otherwise;
185 	 */
186 
187 	int alloc_from_dmabuf_heap(const std::string& heap_name, size_t size, unsigned int flags);
188 };
189 
set_ion_flags(uint64_t usage,unsigned int * ion_flags)190 static void set_ion_flags(uint64_t usage, unsigned int *ion_flags)
191 {
192 	if (ion_flags == nullptr)
193 		return;
194 
195 	if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
196 	{
197 		*ion_flags |= ION_FLAG_CACHED;
198 	}
199 
200 	// DRM or Secure Camera
201 	if (usage & (GRALLOC_USAGE_PROTECTED))
202 	{
203 		*ion_flags |= ION_FLAG_PROTECTED;
204 	}
205 
206 	/* TODO: used for exynos3830. Add this as an option to Android.bp */
207 #if defined(GRALLOC_SCALER_WFD) && GRALLOC_SCALER_WFD == 1
208 	if (usage & GRALLOC_USAGE_PRIVATE_NONSECURE && usage & GRALLOC_USAGE_HW_COMPOSER)
209 	{
210 		*ion_flags |= ION_FLAG_PROTECTED;
211 	}
212 #endif
213 	/* Sensor direct channels require uncached allocations. */
214 	if (usage & GRALLOC_USAGE_SENSOR_DIRECT_DATA)
215 	{
216 		*ion_flags &= ~ION_FLAG_CACHED;
217 	}
218 }
219 
select_faceauth_heap_mask(uint64_t usage)220 static unsigned int select_faceauth_heap_mask(uint64_t usage)
221 {
222 	struct HeapSpecifier
223 	{
224 		uint64_t      usage_bits; // exact match required
225 		unsigned int  mask;
226 	};
227 
228 	static constexpr std::array<HeapSpecifier, 5> faceauth_heaps =
229 	{{
230 		{ // isp_image_heap
231 			GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_HW_CAMERA_WRITE | GS101_GRALLOC_USAGE_TPU_INPUT,
232 			EXYNOS_ION_HEAP_FA_IMG_MASK
233 		},
234 		{ // isp_internal_heap
235 			GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_HW_CAMERA_WRITE | GRALLOC_USAGE_HW_CAMERA_READ,
236 			EXYNOS_ION_HEAP_FA_RAWIMG_MASK
237 		},
238 		{ // isp_preview_heap
239 			GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_HW_CAMERA_WRITE | GRALLOC_USAGE_HW_COMPOSER |
240             GRALLOC_USAGE_HW_TEXTURE,
241 			EXYNOS_ION_HEAP_FA_PREV_MASK
242 		},
243 		{ // ml_model_heap
244 			GRALLOC_USAGE_PROTECTED | GS101_GRALLOC_USAGE_TPU_INPUT,
245 			EXYNOS_ION_HEAP_FA_MODEL_MASK
246 		},
247 		{ // tpu_heap
248 			GRALLOC_USAGE_PROTECTED | GS101_GRALLOC_USAGE_TPU_OUTPUT | GS101_GRALLOC_USAGE_TPU_INPUT,
249 			EXYNOS_ION_HEAP_FA_TPU_MASK
250 		}
251 	}};
252 
253 	for (const HeapSpecifier &heap : faceauth_heaps)
254 	{
255 		if (usage == heap.usage_bits)
256 		{
257 			ALOGV("Using FaceAuth heap mask 0x%x for usage 0x%" PRIx64 "\n",
258 			      heap.mask, usage);
259 			return heap.mask;
260 		}
261 	}
262 
263 	return 0;
264 }
265 
select_heap_mask(uint64_t usage)266 static unsigned int select_heap_mask(uint64_t usage)
267 {
268 	if (unsigned int faceauth_heap_mask = select_faceauth_heap_mask(usage);
269 	    faceauth_heap_mask != 0)
270 	{
271 		return faceauth_heap_mask;
272 	}
273 
274 	unsigned int heap_mask;
275 
276 	if (usage & GRALLOC_USAGE_PROTECTED)
277 	{
278 		if (usage & GRALLOC_USAGE_PRIVATE_NONSECURE)
279 		{
280 			heap_mask = EXYNOS_ION_HEAP_SYSTEM_MASK;
281 		}
282 		else if ((usage & GRALLOC_USAGE_HW_COMPOSER) &&
283 			!(usage & GRALLOC_USAGE_HW_TEXTURE) &&
284 			!(usage & GRALLOC_USAGE_HW_RENDER))
285 		{
286 			heap_mask = EXYNOS_ION_HEAP_VIDEO_SCALER_MASK;
287 		}
288 		else
289 		{
290 			heap_mask = EXYNOS_ION_HEAP_VIDEO_FRAME_MASK;
291 		}
292 	}
293 	/* TODO: used for exynos3830. Add this as a an option to Android.bp */
294 #if defined(GRALLOC_SCALER_WFD) && GRALLOC_SCALER_WFD == 1
295 	else if (usage & GRALLOC_USAGE_PRIVATE_NONSECURE && usage & GRALLOC_USAGE_HW_COMPOSER)
296 	{
297 		heap_mask = EXYNOS_ION_HEAP_EXT_UI_MASK;
298 	}
299 #endif
300 	else if (usage & GRALLOC_USAGE_SENSOR_DIRECT_DATA)
301 	{
302 		heap_mask = EXYNOS_ION_HEAP_SENSOR_DIRECT_MASK;
303 	}
304 	else
305 	{
306 		heap_mask = EXYNOS_ION_HEAP_SYSTEM_MASK;
307 	}
308 
309 	return heap_mask;
310 }
311 
312 /*
313  * Selects a DMA-BUF heap name.
314  *
315  * @param heap_mask     [in]    heap_mask for which the equivalent DMA-BUF heap
316  *                              name must be found.
317  *
318  * @return the name of the DMA-BUF heap equivalent to the ION heap of mask
319  *         @heap_mask.
320  *
321  */
select_dmabuf_heap(unsigned int heap_mask)322 static std::string select_dmabuf_heap(unsigned int heap_mask)
323 {
324 	switch (heap_mask) {
325 		case EXYNOS_ION_HEAP_SENSOR_DIRECT_MASK:
326 			return kDmabufSensorDirectHeapName;
327 		case EXYNOS_ION_HEAP_FA_TPU_MASK:
328 			return kDmabufFaceauthTpuHeapName;
329 		case EXYNOS_ION_HEAP_FA_IMG_MASK:
330 			return kDmabufFaceauthImgHeapName;
331 		case EXYNOS_ION_HEAP_FA_RAWIMG_MASK:
332 			return kDmabufFaceauthRawImgHeapName;
333 		case EXYNOS_ION_HEAP_FA_PREV_MASK:
334 			return kDmabufFaceauthPrevHeapName;
335 		case EXYNOS_ION_HEAP_FA_MODEL_MASK:
336 			return kDmabufFaceauthModelHeapName;
337 		case EXYNOS_ION_HEAP_VIDEO_FRAME_MASK:
338 			return kDmabufVframeSecureHeapName;
339 		case EXYNOS_ION_HEAP_VIDEO_STREAM_MASK:
340 			return kDmabufVstreamSecureHeapName;
341 		default:
342 			return {};
343 	}
344 }
345 
alloc_from_dmabuf_heap(const std::string & heap_name,size_t size,unsigned int flags)346 int ion_device::alloc_from_dmabuf_heap(const std::string& heap_name, size_t size,
347 				       unsigned int flags)
348 {
349 	ATRACE_NAME(("alloc_from_dmabuf_heap " +  heap_name).c_str());
350 	if (!buffer_allocator)
351 	{
352 		return -1;
353 	}
354 
355 	int shared_fd = buffer_allocator->Alloc(heap_name, size, flags);
356 	if (shared_fd < 0)
357 	{
358 		ALOGE("Allocation failed for heap %s error: %d\n", heap_name.c_str(), shared_fd);
359 	}
360 
361 	return shared_fd;
362 }
363 
alloc_from_ion_heap(uint64_t usage,size_t size,unsigned int flags,int * min_pgsz)364 int ion_device::alloc_from_ion_heap(uint64_t usage, size_t size, unsigned int flags, int *min_pgsz)
365 {
366 	ATRACE_CALL();
367 	/* TODO: remove min_pgsz? I don't think this is useful on Exynos */
368 	if (size == 0 || min_pgsz == NULL)
369 	{
370 		return -1;
371 	}
372 
373 	unsigned int heap_mask = select_heap_mask(usage);
374 
375 	int shared_fd;
376 	auto dmabuf_heap_name = select_dmabuf_heap(heap_mask);
377 	if (!dmabuf_heap_name.empty())
378 	{
379 		shared_fd = alloc_from_dmabuf_heap(dmabuf_heap_name, size, flags);
380 	}
381 	else
382 	{
383 		if (ion_client < 0)
384 		{
385 			return -1;
386 		}
387 
388 		shared_fd = exynos_ion_alloc(ion_client, size, heap_mask, flags);
389 	}
390 
391 	*min_pgsz = SZ_4K;
392 
393 	return shared_fd;
394 }
395 
open_and_query_ion()396 int ion_device::open_and_query_ion()
397 {
398 	if (ion_client >= 0)
399 	{
400 		MALI_GRALLOC_LOGW("ION device already open");
401 		return 0;
402 	}
403 
404 	ion_client = exynos_ion_open();
405 	if (ion_client < 0)
406 	{
407 		MALI_GRALLOC_LOGE("ion_open failed with %s", strerror(errno));
408 		return -1;
409 	}
410 
411 	return 0;
412 }
413 
sync_type_for_flags(const bool read,const bool write)414 static SyncType sync_type_for_flags(const bool read, const bool write)
415 {
416 	if (read && !write)
417 	{
418 		return SyncType::kSyncRead;
419 	}
420 	else if (write && !read)
421 	{
422 		return SyncType::kSyncWrite;
423 	}
424 	else
425 	{
426 		// Deliberately also allowing "not sure" to map to ReadWrite.
427 		return SyncType::kSyncReadWrite;
428 	}
429 }
430 
sync(const int fd,const bool read,const bool write,const bool start)431 int ion_device::sync(const int fd, const bool read, const bool write, const bool start)
432 {
433 	if (!buffer_allocator)
434 	{
435 		return -1;
436 	}
437 
438 	if (start)
439 	{
440 		return buffer_allocator->CpuSyncStart(fd, sync_type_for_flags(read, write));
441 	}
442 	else
443 	{
444 		return buffer_allocator->CpuSyncEnd(fd, sync_type_for_flags(read, write));
445 	}
446 }
447 
mali_gralloc_ion_sync(const private_handle_t * const hnd,const bool read,const bool write,const bool start)448 static int mali_gralloc_ion_sync(const private_handle_t * const hnd,
449                                        const bool read,
450                                        const bool write,
451                                        const bool start)
452 {
453 	if (hnd == NULL)
454 	{
455 		return -EINVAL;
456 	}
457 
458 	ion_device *dev = ion_device::get();
459 	if (!dev)
460 	{
461 		return -1;
462 	}
463 
464 	for (int i = 0; i < hnd->fd_count; i++)
465 	{
466 		const int fd = hnd->fds[i];
467 		if (const int ret = dev->sync(fd, read, write, start))
468 		{
469 			return ret;
470 		}
471 	}
472 
473 	return 0;
474 }
475 
476 
477 /*
478  * Signal start of CPU access to the DMABUF exported from ION.
479  *
480  * @param hnd   [in]    Buffer handle
481  * @param read  [in]    Flag indicating CPU read access to memory
482  * @param write [in]    Flag indicating CPU write access to memory
483  *
484  * @return              0 in case of success
485  *                      errno for all error cases
486  */
mali_gralloc_ion_sync_start(const private_handle_t * const hnd,const bool read,const bool write)487 int mali_gralloc_ion_sync_start(const private_handle_t * const hnd,
488                                 const bool read,
489                                 const bool write)
490 {
491 	return mali_gralloc_ion_sync(hnd, read, write, true);
492 }
493 
494 
495 /*
496  * Signal end of CPU access to the DMABUF exported from ION.
497  *
498  * @param hnd   [in]    Buffer handle
499  * @param read  [in]    Flag indicating CPU read access to memory
500  * @param write [in]    Flag indicating CPU write access to memory
501  *
502  * @return              0 in case of success
503  *                      errno for all error cases
504  */
mali_gralloc_ion_sync_end(const private_handle_t * const hnd,const bool read,const bool write)505 int mali_gralloc_ion_sync_end(const private_handle_t * const hnd,
506                               const bool read,
507                               const bool write)
508 {
509 	return mali_gralloc_ion_sync(hnd, read, write, false);
510 }
511 
512 
mali_gralloc_ion_free(private_handle_t * const hnd)513 void mali_gralloc_ion_free(private_handle_t * const hnd)
514 {
515 	for (int i = 0; i < hnd->fd_count; i++)
516 	{
517 		void* mapped_addr = reinterpret_cast<void*>(hnd->bases[i]);
518 
519 		/* Buffer might be unregistered already so we need to assure we have a valid handle */
520 		if (mapped_addr != nullptr)
521 		{
522 			if (munmap(mapped_addr, hnd->alloc_sizes[i]) != 0)
523 			{
524 				/* TODO: more detailed error logs */
525 				MALI_GRALLOC_LOGE("Failed to munmap handle %p", hnd);
526 			}
527 		}
528 		close(hnd->fds[i]);
529 		hnd->fds[i] = -1;
530 		hnd->bases[i] = 0;
531 	}
532 	delete hnd;
533 }
534 
mali_gralloc_ion_free_internal(buffer_handle_t * const pHandle,const uint32_t num_hnds)535 static void mali_gralloc_ion_free_internal(buffer_handle_t * const pHandle,
536                                            const uint32_t num_hnds)
537 {
538 	for (uint32_t i = 0; i < num_hnds; i++)
539 	{
540 		if (pHandle[i] != NULL)
541 		{
542 			private_handle_t * const hnd = (private_handle_t * const)pHandle[i];
543 			mali_gralloc_ion_free(hnd);
544 		}
545 	}
546 }
547 
mali_gralloc_ion_allocate_attr(private_handle_t * hnd)548 int mali_gralloc_ion_allocate_attr(private_handle_t *hnd)
549 {
550 	ATRACE_CALL();
551 	ion_device *dev = ion_device::get();
552 	if (!dev)
553 	{
554 		return -1;
555 	}
556 
557 	int idx = hnd->get_share_attr_fd_index();
558 	int ion_flags = 0;
559 	int min_pgsz;
560 	uint64_t usage = GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN;
561 
562 	ion_flags = ION_FLAG_CACHED;
563 
564 	hnd->fds[idx] = dev->alloc_from_ion_heap(usage, hnd->attr_size, ion_flags, &min_pgsz);
565 	if (hnd->fds[idx] < 0)
566 	{
567 		MALI_GRALLOC_LOGE("ion_alloc failed from client ( %d )", dev->client());
568 		return -1;
569 	}
570 
571 	hnd->incr_numfds(1);
572 
573 	return 0;
574 }
575 
576 /*
577  *  Allocates ION buffers
578  *
579  * @param descriptors     [in]    Buffer request descriptors
580  * @param numDescriptors  [in]    Number of descriptors
581  * @param pHandle         [out]   Handle for each allocated buffer
582  * @param shared_backend  [out]   Shared buffers flag
583  *
584  * @return File handle which can be used for allocation, on success
585  *         -1, otherwise.
586  */
mali_gralloc_ion_allocate(const gralloc_buffer_descriptor_t * descriptors,uint32_t numDescriptors,buffer_handle_t * pHandle,bool * shared_backend,int ion_fd)587 int mali_gralloc_ion_allocate(const gralloc_buffer_descriptor_t *descriptors,
588                               uint32_t numDescriptors, buffer_handle_t *pHandle,
589                               bool *shared_backend, int ion_fd)
590 {
591 	GRALLOC_UNUSED(shared_backend);
592 
593 	unsigned int priv_heap_flag = 0;
594 	uint64_t usage;
595 	uint32_t i;
596 	unsigned int ion_flags = 0;
597 	int min_pgsz = 0;
598 	int fds[MAX_FDS];
599 	std::fill(fds, fds + MAX_FDS, -1);
600 
601 	ion_device *dev = ion_device::get();
602 	if (!dev)
603 	{
604 		return -1;
605 	}
606 
607 	for (i = 0; i < numDescriptors; i++)
608 	{
609 		buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
610 		usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
611 
612 		ion_flags = 0;
613 		set_ion_flags(usage, &ion_flags);
614 
615 		for (int fidx = 0; fidx < bufDescriptor->fd_count; fidx++)
616 		{
617 			if (ion_fd >= 0 && fidx == 0) {
618 				fds[fidx] = ion_fd;
619 			} else {
620 				fds[fidx] = dev->alloc_from_ion_heap(usage, bufDescriptor->alloc_sizes[fidx], ion_flags, &min_pgsz);
621 			}
622 			if (fds[fidx] < 0)
623 			{
624 				MALI_GRALLOC_LOGE("ion_alloc failed from client ( %d )", dev->client());
625 
626 				for (int cidx = 0; cidx < fidx; cidx++)
627 				{
628 					close(fds[cidx]);
629 				}
630 
631 				/* need to free already allocated memory. not just this one */
632 				mali_gralloc_ion_free_internal(pHandle, numDescriptors);
633 
634 				return -1;
635 			}
636 		}
637 
638 		private_handle_t *hnd = new private_handle_t(
639 		    priv_heap_flag,
640 		    bufDescriptor->alloc_sizes,
641 		    bufDescriptor->consumer_usage, bufDescriptor->producer_usage,
642 		    fds, bufDescriptor->fd_count,
643 		    bufDescriptor->hal_format, bufDescriptor->alloc_format,
644 		    bufDescriptor->width, bufDescriptor->height, bufDescriptor->pixel_stride,
645 		    bufDescriptor->layer_count, bufDescriptor->plane_info);
646 
647 		if (NULL == hnd)
648 		{
649 			MALI_GRALLOC_LOGE("Private handle could not be created for descriptor:%d in non-shared usecase", i);
650 
651 			/* Close the obtained shared file descriptor for the current handle */
652 			for (int j = 0; j < bufDescriptor->fd_count; j++)
653 			{
654 				close(fds[j]);
655 			}
656 
657 			mali_gralloc_ion_free_internal(pHandle, numDescriptors);
658 			return -1;
659 		}
660 
661 		pHandle[i] = hnd;
662 	}
663 
664 #if defined(GRALLOC_INIT_AFBC) && (GRALLOC_INIT_AFBC == 1)
665 	unsigned char *cpu_ptr = NULL;
666 	for (i = 0; i < numDescriptors; i++)
667 	{
668 		buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
669 		private_handle_t *hnd = (private_handle_t *)(pHandle[i]);
670 
671 		usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
672 
673 		if ((bufDescriptor->alloc_format & MALI_GRALLOC_INTFMT_AFBCENABLE_MASK)
674 			&& !(usage & GRALLOC_USAGE_PROTECTED))
675 		{
676 			/* TODO: only map for AFBC buffers */
677 			cpu_ptr =
678 			    (unsigned char *)mmap(NULL, bufDescriptor->alloc_sizes[0], PROT_READ | PROT_WRITE, MAP_SHARED, hnd->fds[0], 0);
679 
680 			if (MAP_FAILED == cpu_ptr)
681 			{
682 				MALI_GRALLOC_LOGE("mmap failed from client ( %d ), fd ( %d )", dev->client(), hnd->fds[0]);
683 				mali_gralloc_ion_free_internal(pHandle, numDescriptors);
684 				return -1;
685 			}
686 
687 			mali_gralloc_ion_sync_start(hnd, true, true);
688 
689 			/* For separated plane YUV, there is a header to initialise per plane. */
690 			const plane_info_t *plane_info = bufDescriptor->plane_info;
691 			const bool is_multi_plane = hnd->is_multi_plane();
692 			for (int i = 0; i < MAX_PLANES && (i == 0 || plane_info[i].byte_stride != 0); i++)
693 			{
694 				init_afbc(cpu_ptr + plane_info[i].offset,
695 				          bufDescriptor->alloc_format,
696 				          is_multi_plane,
697 				          plane_info[i].alloc_width,
698 				          plane_info[i].alloc_height);
699 			}
700 
701 			mali_gralloc_ion_sync_end(hnd, true, true);
702 
703 			munmap(cpu_ptr, bufDescriptor->alloc_sizes[0]);
704 		}
705 	}
706 #endif
707 
708 	return 0;
709 }
710 
711 
mali_gralloc_ion_map(private_handle_t * hnd)712 int mali_gralloc_ion_map(private_handle_t *hnd)
713 {
714 	uint64_t usage = hnd->producer_usage | hnd->consumer_usage;
715 
716 	/* Do not allow cpu access to secure buffers */
717 	if (usage & (GRALLOC_USAGE_PROTECTED | GRALLOC_USAGE_NOZEROED)
718 			&& !(usage & GRALLOC_USAGE_PRIVATE_NONSECURE))
719 	{
720 		return 0;
721 	}
722 
723 	for (int fidx = 0; fidx < hnd->fd_count; fidx++) {
724 		unsigned char *mappedAddress =
725 			(unsigned char *)mmap(NULL, hnd->alloc_sizes[fidx], PROT_READ | PROT_WRITE,
726 					MAP_SHARED, hnd->fds[fidx], 0);
727 
728 		if (MAP_FAILED == mappedAddress)
729 		{
730 			int err = errno;
731 			MALI_GRALLOC_LOGE("mmap( fds[%d]:%d size:%" PRIu64 " ) failed with %s",
732 					fidx, hnd->fds[fidx], hnd->alloc_sizes[fidx], strerror(err));
733 			hnd->dump("map fail");
734 
735 			for (int cidx = 0; cidx < fidx; fidx++)
736 			{
737 				munmap((void*)hnd->bases[cidx], hnd->alloc_sizes[cidx]);
738 				hnd->bases[cidx] = 0;
739 			}
740 
741 			return -err;
742 		}
743 
744 		hnd->bases[fidx] = uintptr_t(mappedAddress);
745 	}
746 
747 	return 0;
748 }
749 
import_exynos_ion_handles(private_handle_t * hnd)750 int import_exynos_ion_handles(private_handle_t *hnd)
751 {
752 	int retval = -1;
753 
754 	ion_device *dev = ion_device::get();
755 
756 	for (int idx = 0; idx < hnd->fd_count; idx++)
757 	{
758 		if (hnd->fds[idx] >= 0)
759 		{
760 			retval = exynos_ion_import_handle(dev->client(), hnd->fds[idx], &hnd->ion_handles[idx]);
761 			if (retval)
762 			{
763 				MALI_GRALLOC_LOGE("error importing ion_handle. ion_client(%d), ion_handle[%d](%d) format(%s %#" PRIx64 ")",
764 				     dev->client(), idx, hnd->ion_handles[idx], format_name(hnd->alloc_format), hnd->alloc_format);
765 				goto error;
766 			}
767 		}
768 	}
769 
770 	return retval;
771 
772 error:
773 	for (int idx = 0; idx < hnd->fd_count; idx++)
774 	{
775 		if (hnd->ion_handles[idx])
776 		{
777 			exynos_ion_free_handle(dev->client(), hnd->ion_handles[idx]);
778 		}
779 	}
780 
781 	return retval;
782 }
783 
free_exynos_ion_handles(private_handle_t * hnd)784 void free_exynos_ion_handles(private_handle_t *hnd)
785 {
786 	ion_device *dev = ion_device::get();
787 
788 	for (int idx = 0; idx < hnd->fd_count; idx++)
789 	{
790 		if (hnd->ion_handles[idx])
791 		{
792 			if (hnd->ion_handles[idx] &&
793 			    exynos_ion_free_handle(dev->client(), hnd->ion_handles[idx]))
794 			{
795 				MALI_GRALLOC_LOGE("error freeing ion_handle. ion_client(%d), ion_handle[%d](%d) format(%s %#" PRIx64 ")",
796 					dev->client(), idx, hnd->ion_handles[idx], format_name(hnd->alloc_format), hnd->alloc_format);
797 			}
798 		}
799 	}
800 }
801 
802 
mali_gralloc_ion_unmap(private_handle_t * hnd)803 void mali_gralloc_ion_unmap(private_handle_t *hnd)
804 {
805 	for (int i = 0; i < hnd->fd_count; i++)
806 	{
807 		int err = 0;
808 
809 		if (hnd->bases[i])
810 		{
811 			err = munmap((void*)hnd->bases[i], hnd->alloc_sizes[i]);
812 		}
813 
814 		if (err)
815 		{
816 			MALI_GRALLOC_LOGE("Could not munmap base:%p size:%" PRIu64 " '%s'",
817 					(void*)hnd->bases[i], hnd->alloc_sizes[i], strerror(errno));
818 		}
819 		else
820 		{
821 			hnd->bases[i] = 0;
822 		}
823 	}
824 
825 	hnd->cpu_read = 0;
826 	hnd->cpu_write = 0;
827 }
828 
mali_gralloc_ion_close(void)829 void mali_gralloc_ion_close(void)
830 {
831 	ion_device::close();
832 }
833 
834