• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2010 ARM Limited. All rights reserved.
3  *
4  * Copyright (C) 2008 The Android Open Source Project
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *      http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 #include <cstdlib>
20 #include <string.h>
21 #include <errno.h>
22 #include <pthread.h>
23 
24 #include <cutils/log.h>
25 #include <cutils/atomic.h>
26 #include <hardware/hardware.h>
27 #include <hardware/gralloc.h>
28 
29 #include <sys/ioctl.h>
30 
31 #include "alloc_device.h"
32 #include "gralloc_priv.h"
33 #include "gralloc_helper.h"
34 #include "framebuffer_device.h"
35 
36 #if GRALLOC_ARM_UMP_MODULE
37 #include <ump/ump.h>
38 #include <ump/ump_ref_drv.h>
39 #endif
40 
41 #if GRALLOC_ARM_DMA_BUF_MODULE
42 #include <ion/ion.h>
43 #include "ion_4.12.h"
44 
45 #define ION_SYSTEM	(char*)"ion_system_heap"
46 #define ION_CMA		(char*)"linux,cma"
47 
48 #endif
49 
50 #if GRALLOC_SIMULATE_FAILURES
51 #include <cutils/properties.h>
52 
53 /* system property keys for controlling simulated UMP allocation failures */
54 #define PROP_MALI_TEST_GRALLOC_FAIL_FIRST     "mali.test.gralloc.fail_first"
55 #define PROP_MALI_TEST_GRALLOC_FAIL_INTERVAL  "mali.test.gralloc.fail_interval"
56 
__ump_alloc_should_fail()57 static int __ump_alloc_should_fail()
58 {
59 
60 	static unsigned int call_count  = 0;
61 	unsigned int        first_fail  = 0;
62 	int                 fail_period = 0;
63 	int                 fail        = 0;
64 
65 	++call_count;
66 
67 	/* read the system properties that control failure simulation */
68 	{
69 		char prop_value[PROPERTY_VALUE_MAX];
70 
71 		if (property_get(PROP_MALI_TEST_GRALLOC_FAIL_FIRST, prop_value, "0") > 0)
72 		{
73 			sscanf(prop_value, "%11u", &first_fail);
74 		}
75 
76 		if (property_get(PROP_MALI_TEST_GRALLOC_FAIL_INTERVAL, prop_value, "0") > 0)
77 		{
78 			sscanf(prop_value, "%11u", &fail_period);
79 		}
80 	}
81 
82 	/* failure simulation is enabled by setting the first_fail property to non-zero */
83 	if (first_fail > 0)
84 	{
85 		LOGI("iteration %u (fail=%u, period=%u)\n", call_count, first_fail, fail_period);
86 
87 		fail = (call_count == first_fail) ||
88 		       (call_count > first_fail && fail_period > 0 && 0 == (call_count - first_fail) % fail_period);
89 
90 		if (fail)
91 		{
92 			AERR("failed ump_ref_drv_allocate on iteration #%d\n", call_count);
93 		}
94 	}
95 
96 	return fail;
97 }
98 #endif
99 
100 #ifdef FBIOGET_DMABUF
fb_get_framebuffer_dmabuf(private_module_t * m,private_handle_t * hnd)101 static int fb_get_framebuffer_dmabuf(private_module_t *m, private_handle_t *hnd)
102 {
103 	struct fb_dmabuf_export fb_dma_buf;
104 	int res;
105 	res = ioctl(m->framebuffer->fd, FBIOGET_DMABUF, &fb_dma_buf);
106 
107 	if (res == 0)
108 	{
109 		hnd->share_fd = fb_dma_buf.fd;
110 		return 0;
111 	}
112 	else
113 	{
114 		AINF("FBIOGET_DMABUF ioctl failed(%d). See gralloc_priv.h and the integration manual for vendor framebuffer "
115 		     "integration",
116 		     res);
117 		return -1;
118 	}
119 }
120 #endif
121 
gralloc_alloc_buffer(alloc_device_t * dev,size_t size,int usage,buffer_handle_t * pHandle)122 static int gralloc_alloc_buffer(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle)
123 {
124 #if GRALLOC_ARM_DMA_BUF_MODULE
125 	{
126 		private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
127 		ion_user_handle_t ion_hnd;
128 		void *cpu_ptr = MAP_FAILED;
129 		int shared_fd;
130 		int ret;
131 		unsigned int heap_mask;
132 		int lock_state = 0;
133 		int map_mask = 0;
134 
135 		if (usage & GRALLOC_USAGE_PROTECTED)
136 		{
137 #if defined(ION_HEAP_SECURE_MASK)
138 			heap_mask = ION_HEAP_SECURE_MASK;
139 #else
140 			AERR("The platform does NOT support protected ION memory.");
141 			return -1;
142 #endif
143 		}
144 		else
145 		{
146 			heap_mask = ION_HEAP_SYSTEM_MASK;
147 		}
148 
149 		if (m->gralloc_legacy_ion)
150 		{
151 			if (usage & GRALLOC_USAGE_HW_FB)
152 				ret = ion_alloc(m->ion_client, size, 0, ION_HEAP_TYPE_DMA_MASK, 0, &(ion_hnd));
153 			else
154 				ret = ion_alloc(m->ion_client, size, 0, ION_HEAP_SYSTEM_MASK, 0, &(ion_hnd));
155 
156 			if (ret != 0)
157 			{
158 				AERR("Failed to ion_alloc from ion_client:%d", m->ion_client);
159 				return -1;
160 			}
161 
162 			ret = ion_share(m->ion_client, ion_hnd, &shared_fd);
163 
164 			if (ret != 0)
165 			{
166 				AERR("ion_share( %d ) failed", m->ion_client);
167 
168 				if (0 != ion_free(m->ion_client, ion_hnd))
169 				{
170 					AERR("ion_free( %d ) failed", m->ion_client);
171 				}
172 
173 				return -1;
174 			}
175 
176 			// we do not need ion_hnd once we have shared_fd
177 			if (0 != ion_free(m->ion_client, ion_hnd))
178 			{
179 				AWAR("ion_free( %d ) failed", m->ion_client);
180 			}
181 			ion_hnd = ION_INVALID_HANDLE;
182 		}
183 		else
184 		{
185 			if (usage & GRALLOC_USAGE_HW_FB)
186 				ret = ion_alloc_fd(m->ion_client, size, 0, 1 << m->cma_heap_id, 0, &(shared_fd));
187 			else
188 				ret = ion_alloc_fd(m->ion_client, size, 0, 1 << m->system_heap_id, 0, &(shared_fd));
189 
190 			if (ret != 0)
191 			{
192 				AERR("Failed to ion_alloc_fd from ion_client:%d", m->ion_client);
193 				return -1;
194 			}
195 		}
196 
197 		if (!(usage & GRALLOC_USAGE_PROTECTED))
198 		{
199 			map_mask = PROT_READ | PROT_WRITE;
200 		}
201 		else
202 		{
203 			map_mask = PROT_WRITE;
204 		}
205 
206 		cpu_ptr = mmap(NULL, size, map_mask, MAP_SHARED, shared_fd, 0);
207 
208 		if (MAP_FAILED == cpu_ptr)
209 		{
210 			AERR("ion_map( %d ) failed", m->ion_client);
211 
212 			close(shared_fd);
213 			return -1;
214 		}
215 
216 		lock_state = private_handle_t::LOCK_STATE_MAPPED;
217 
218 		private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_ION, usage, size, cpu_ptr, lock_state);
219 
220 		if (NULL != hnd)
221 		{
222 			hnd->share_fd = shared_fd;
223 			*pHandle = hnd;
224 			return 0;
225 		}
226 		else
227 		{
228 			AERR("Gralloc out of mem for ion_client:%d", m->ion_client);
229 		}
230 
231 		close(shared_fd);
232 
233 		ret = munmap(cpu_ptr, size);
234 
235 		if (0 != ret)
236 		{
237 			AERR("munmap failed for base:%p size: %lu", cpu_ptr, (unsigned long)size);
238 		}
239 
240 		return -1;
241 	}
242 #endif
243 
244 #if GRALLOC_ARM_UMP_MODULE
245 	MALI_IGNORE(dev);
246 	{
247 		ump_handle ump_mem_handle;
248 		void *cpu_ptr;
249 		ump_secure_id ump_id;
250 		ump_alloc_constraints constraints;
251 
252 		size = round_up_to_page_size(size);
253 
254 		if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
255 		{
256 			constraints =  UMP_REF_DRV_CONSTRAINT_USE_CACHE;
257 		}
258 		else
259 		{
260 			constraints = UMP_REF_DRV_CONSTRAINT_NONE;
261 		}
262 
263 #ifdef GRALLOC_SIMULATE_FAILURES
264 
265 		/* if the failure condition matches, fail this iteration */
266 		if (__ump_alloc_should_fail())
267 		{
268 			ump_mem_handle = UMP_INVALID_MEMORY_HANDLE;
269 		}
270 		else
271 #endif
272 		{
273 			if (usage & GRALLOC_USAGE_PROTECTED)
274 			{
275 				AERR("gralloc_alloc_buffer() does not support to allocate protected UMP memory.");
276 			}
277 			else
278 			{
279 				ump_mem_handle = ump_ref_drv_allocate(size, constraints);
280 
281 				if (UMP_INVALID_MEMORY_HANDLE != ump_mem_handle)
282 				{
283 					cpu_ptr = ump_mapped_pointer_get(ump_mem_handle);
284 
285 					if (NULL != cpu_ptr)
286 					{
287 						ump_id = ump_secure_id_get(ump_mem_handle);
288 
289 						if (UMP_INVALID_SECURE_ID != ump_id)
290 						{
291 							private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_UMP, usage, size, cpu_ptr,
292 							        private_handle_t::LOCK_STATE_MAPPED, ump_id, ump_mem_handle);
293 
294 							if (NULL != hnd)
295 							{
296 								*pHandle = hnd;
297 								return 0;
298 							}
299 							else
300 							{
301 								AERR("gralloc_alloc_buffer() failed to allocate handle. ump_handle = %p, ump_id = %d", ump_mem_handle, ump_id);
302 							}
303 						}
304 						else
305 						{
306 							AERR("gralloc_alloc_buffer() failed to retrieve valid secure id. ump_handle = %p", ump_mem_handle);
307 						}
308 
309 						ump_mapped_pointer_release(ump_mem_handle);
310 					}
311 					else
312 					{
313 						AERR("gralloc_alloc_buffer() failed to map UMP memory. ump_handle = %p", ump_mem_handle);
314 					}
315 
316 					ump_reference_release(ump_mem_handle);
317 				}
318 				else
319 				{
320 					AERR("gralloc_alloc_buffer() failed to allocate UMP memory. size:%d constraints: %d", size, constraints);
321 				}
322 			}
323 		}
324 
325 		return -1;
326 	}
327 #endif
328 
329 }
330 
331 #ifndef DISABLE_FRAMEBUFFER_HAL
gralloc_alloc_framebuffer_locked(alloc_device_t * dev,size_t size,int usage,buffer_handle_t * pHandle)332 static int gralloc_alloc_framebuffer_locked(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle)
333 {
334 	private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
335 
336 	// allocate the framebuffer
337 	if (m->framebuffer == NULL)
338 	{
339 		// initialize the framebuffer, the framebuffer is mapped once and forever.
340 		int err = init_frame_buffer_locked(m);
341 
342 		if (err < 0)
343 		{
344 			return err;
345 		}
346 	}
347 
348 	uint32_t bufferMask = m->bufferMask;
349 	const uint32_t numBuffers = m->numBuffers;
350 	const size_t bufferSize = m->finfo.line_length * m->info.yres;
351 
352 	if (numBuffers == 1)
353 	{
354 		// If we have only one buffer, we never use page-flipping. Instead,
355 		// we return a regular buffer which will be memcpy'ed to the main
356 		// screen when post is called.
357 		int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
358 		AERR("fallback to single buffering. Virtual Y-res too small %d", m->info.yres);
359 		return gralloc_alloc_buffer(dev, bufferSize, newUsage, pHandle);
360 	}
361 
362 	if (bufferMask >= ((1LU << numBuffers) - 1))
363 	{
364 		// We ran out of buffers, reset bufferMask.
365 		bufferMask = 0;
366 		m->bufferMask = 0;
367 	}
368 
369 	void *vaddr = m->framebuffer->base;
370 
371 	// find a free slot
372 	for (uint32_t i = 0 ; i < numBuffers ; i++)
373 	{
374 		if ((bufferMask & (1LU << i)) == 0)
375 		{
376 			m->bufferMask |= (1LU << i);
377 			break;
378 		}
379 
380 		vaddr = (void *)((uintptr_t)vaddr + bufferSize);
381 	}
382 
383 	// The entire framebuffer memory is already mapped, now create a buffer object for parts of this memory
384 	private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_FRAMEBUFFER, usage, size, vaddr,
385 	        0, m->framebuffer->fd, (uintptr_t)vaddr - (uintptr_t) m->framebuffer->base, m->framebuffer->fb_paddr);
386 
387 #if GRALLOC_ARM_UMP_MODULE
388 	hnd->ump_id = m->framebuffer->ump_id;
389 
390 	/* create a backing ump memory handle if the framebuffer is exposed as a secure ID */
391 	if ((int)UMP_INVALID_SECURE_ID != hnd->ump_id)
392 	{
393 		hnd->ump_mem_handle = (int)ump_handle_create_from_secure_id(hnd->ump_id);
394 
395 		if ((int)UMP_INVALID_MEMORY_HANDLE == hnd->ump_mem_handle)
396 		{
397 			AINF("warning: unable to create UMP handle from secure ID %i\n", hnd->ump_id);
398 		}
399 	}
400 
401 #endif
402 
403 #if GRALLOC_ARM_DMA_BUF_MODULE
404 	{
405 #ifdef FBIOGET_DMABUF
406 		/*
407 		 * Perform allocator specific actions. If these fail we fall back to a regular buffer
408 		 * which will be memcpy'ed to the main screen when fb_post is called.
409 		 */
410 		if (fb_get_framebuffer_dmabuf(m, hnd) == -1)
411 		{
412 			int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
413 
414 			AINF("Fallback to single buffering. Unable to map framebuffer memory to handle:%p", hnd);
415 			return gralloc_alloc_buffer(dev, bufferSize, newUsage, pHandle);
416 		}
417 #endif
418 	}
419 
420 	// correct numFds/numInts when there is no dmabuf fd
421 	if (hnd->share_fd < 0)
422 	{
423 		hnd->numFds--;
424 		hnd->numInts++;
425 	}
426 #endif
427 
428 	*pHandle = hnd;
429 
430 	return 0;
431 }
432 
gralloc_alloc_framebuffer(alloc_device_t * dev,size_t size,int usage,buffer_handle_t * pHandle)433 static int gralloc_alloc_framebuffer(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle)
434 {
435 	private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
436 	pthread_mutex_lock(&m->lock);
437 	int err = gralloc_alloc_framebuffer_locked(dev, size, usage, pHandle);
438 	pthread_mutex_unlock(&m->lock);
439 	return err;
440 }
441 #endif /* DISABLE_FRAMEBUFFER_HAL */
442 
alloc_device_alloc(alloc_device_t * dev,int w,int h,int format,int usage,buffer_handle_t * pHandle,int * pStride)443 static int alloc_device_alloc(alloc_device_t *dev, int w, int h, int format, int usage, buffer_handle_t *pHandle, int *pStride)
444 {
445 	if (!pHandle || !pStride)
446 	{
447 		return -EINVAL;
448 	}
449 
450 	size_t size;
451 	size_t stride;
452 	int bpp = 1;
453 
454 	if (format == HAL_PIXEL_FORMAT_YCrCb_420_SP || format == HAL_PIXEL_FORMAT_YV12
455 	        /* HAL_PIXEL_FORMAT_YCbCr_420_SP, HAL_PIXEL_FORMAT_YCbCr_420_P, HAL_PIXEL_FORMAT_YCbCr_422_I are not defined in Android.
456 	         * To enable Mali DDK EGLImage support for those formats, firstly, you have to add them in Android system/core/include/system/graphics.h.
457 	         * Then, define SUPPORT_LEGACY_FORMAT in the same header file(Mali DDK will also check this definition).
458 	         */
459 #ifdef SUPPORT_LEGACY_FORMAT
460 	        || format == HAL_PIXEL_FORMAT_YCbCr_420_SP || format == HAL_PIXEL_FORMAT_YCbCr_420_P || format == HAL_PIXEL_FORMAT_YCbCr_422_I
461 #endif
462 	   )
463 	{
464 		switch (format)
465 		{
466 			case HAL_PIXEL_FORMAT_YCrCb_420_SP:
467 				stride = GRALLOC_ALIGN(w, 16);
468 				size = GRALLOC_ALIGN(h, 16) * (stride + GRALLOC_ALIGN(stride / 2, 16));
469 				break;
470 
471 			case HAL_PIXEL_FORMAT_YV12:
472 #ifdef SUPPORT_LEGACY_FORMAT
473 			case HAL_PIXEL_FORMAT_YCbCr_420_P:
474 #endif
475 				/*
476 				 * Since Utgard has limitation that "64-byte alignment is enforced on texture and mipmap addresses", here to make sure
477 				 * the v, u plane start addresses are 64-byte aligned.
478 				 */
479 				stride = GRALLOC_ALIGN(w, (h % 8 == 0) ? GRALLOC_ALIGN_BASE_16 :
480 										 ((h % 4 == 0) ? GRALLOC_ALIGN_BASE_64 : GRALLOC_ALIGN_BASE_128));
481 				size = GRALLOC_ALIGN(h, 2) * (stride + GRALLOC_ALIGN(stride / 2, 16));
482 
483 				break;
484 #ifdef SUPPORT_LEGACY_FORMAT
485 
486 			case HAL_PIXEL_FORMAT_YCbCr_420_SP:
487 				stride = GRALLOC_ALIGN(w, 16);
488 				size = GRALLOC_ALIGN(h, 16) * (stride + GRALLOC_ALIGN(stride / 2, 16));
489 				break;
490 
491 			case HAL_PIXEL_FORMAT_YCbCr_422_I:
492 				stride = GRALLOC_ALIGN(w, 16);
493 				size = h * stride * 2;
494 
495 				break;
496 #endif
497 
498 			default:
499 				return -EINVAL;
500 		}
501 	}
502 	else
503 	{
504 
505 		switch (format)
506 		{
507 			case HAL_PIXEL_FORMAT_RGBA_8888:
508 			case HAL_PIXEL_FORMAT_RGBX_8888:
509 			case HAL_PIXEL_FORMAT_BGRA_8888:
510 				bpp = 4;
511 				break;
512 
513 			case HAL_PIXEL_FORMAT_RGB_888:
514 				bpp = 3;
515 				break;
516 
517 			case HAL_PIXEL_FORMAT_RGB_565:
518 #if PLATFORM_SDK_VERSION < 19
519 			case HAL_PIXEL_FORMAT_RGBA_5551:
520 			case HAL_PIXEL_FORMAT_RGBA_4444:
521 #endif
522 				bpp = 2;
523 				break;
524 
525 			case HAL_PIXEL_FORMAT_BLOB:
526 				if (h != 1) {
527 					AERR("Height for HAL_PIXEL_FORMAT_BLOB must be 1. h=%d", h);
528 					return -EINVAL;
529 				}
530 				break;
531 
532 			default:
533 				AERR("The format is not supported yet: format=%d\n",  format);
534 				return -EINVAL;
535 		}
536 
537 		if (format == HAL_PIXEL_FORMAT_BLOB) {
538 			stride = 0; /* No 'rows', it's effectively a long one dimensional array */
539 			size = w;
540 		}else{
541 			size_t bpr = GRALLOC_ALIGN(w * bpp, 64);
542 			size = bpr * h;
543 			stride = bpr / bpp;
544 		}
545 	}
546 
547 	int err;
548 
549 #ifndef DISABLE_FRAMEBUFFER_HAL
550 
551 	if (usage & GRALLOC_USAGE_HW_FB)
552 	{
553 		err = gralloc_alloc_framebuffer(dev, size, usage, pHandle);
554 	}
555 	else
556 #endif
557 
558 	{
559 		err = gralloc_alloc_buffer(dev, size, usage, pHandle);
560 	}
561 
562 	if (err < 0)
563 	{
564 		return err;
565 	}
566 
567 	/* match the framebuffer format */
568 	if (usage & GRALLOC_USAGE_HW_FB)
569 	{
570 #ifdef GRALLOC_16_BITS
571 		format = HAL_PIXEL_FORMAT_RGB_565;
572 #else
573 		format = HAL_PIXEL_FORMAT_BGRA_8888;
574 #endif
575 	}
576 
577 	private_handle_t *hnd = (private_handle_t *)*pHandle;
578 	int               private_usage = usage & (GRALLOC_USAGE_PRIVATE_0 |
579 	                                  GRALLOC_USAGE_PRIVATE_1);
580 
581 	switch (private_usage)
582 	{
583 		case 0:
584 			hnd->yuv_info = MALI_YUV_BT601_NARROW;
585 			break;
586 
587 		case GRALLOC_USAGE_PRIVATE_1:
588 			hnd->yuv_info = MALI_YUV_BT601_WIDE;
589 			break;
590 
591 		case GRALLOC_USAGE_PRIVATE_0:
592 			hnd->yuv_info = MALI_YUV_BT709_NARROW;
593 			break;
594 
595 		case (GRALLOC_USAGE_PRIVATE_0 | GRALLOC_USAGE_PRIVATE_1):
596 			hnd->yuv_info = MALI_YUV_BT709_WIDE;
597 			break;
598 	}
599 
600 	hnd->width = w;
601 	hnd->height = h;
602 	hnd->format = format;
603 	hnd->stride = stride;
604 	hnd->byte_stride = GRALLOC_ALIGN(w*bpp,64);
605 	*pStride = stride;
606 	return 0;
607 }
608 
alloc_device_free(alloc_device_t __unused * dev,buffer_handle_t handle)609 static int alloc_device_free(alloc_device_t __unused *dev, buffer_handle_t handle)
610 {
611 	if (private_handle_t::validate(handle) < 0)
612 	{
613 		return -EINVAL;
614 	}
615 
616 	private_handle_t const *hnd = reinterpret_cast<private_handle_t const *>(handle);
617 
618 	if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)
619 	{
620 #if GRALLOC_ARM_UMP_MODULE
621 
622 		if ((int)UMP_INVALID_MEMORY_HANDLE != hnd->ump_mem_handle)
623 		{
624 			ump_reference_release((ump_handle)hnd->ump_mem_handle);
625 		}
626 
627 #endif
628 	}
629 	else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP)
630 	{
631 #if GRALLOC_ARM_UMP_MODULE
632 
633 		/* Buffer might be unregistered so we need to check for invalid ump handle*/
634 		if ((int)UMP_INVALID_MEMORY_HANDLE != hnd->ump_mem_handle)
635 		{
636 			ump_mapped_pointer_release((ump_handle)hnd->ump_mem_handle);
637 			ump_reference_release((ump_handle)hnd->ump_mem_handle);
638 		}
639 
640 #else
641 		AERR("Can't free ump memory for handle:%p. Not supported.", hnd);
642 #endif
643 	}
644 	else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
645 	{
646 #if GRALLOC_ARM_DMA_BUF_MODULE
647 		/* Buffer might be unregistered so we need to check for invalid ump handle*/
648 		if (0 != hnd->base)
649 		{
650 			if (0 != munmap((void *)hnd->base, hnd->size))
651 			{
652 				AERR("Failed to munmap handle %p", hnd);
653 			}
654 		}
655 
656 		close(hnd->share_fd);
657 
658 		memset((void *)hnd, 0, sizeof(*hnd));
659 #else
660 		AERR("Can't free dma_buf memory for handle:0x%x. Not supported.", (unsigned int)hnd);
661 #endif
662 
663 	}
664 
665 	delete hnd;
666 
667 	return 0;
668 }
669 
alloc_device_close(struct hw_device_t * device)670 static int alloc_device_close(struct hw_device_t *device)
671 {
672 	alloc_device_t *dev = reinterpret_cast<alloc_device_t *>(device);
673 
674 	if (dev)
675 	{
676 #if GRALLOC_ARM_DMA_BUF_MODULE
677 		private_module_t *m = reinterpret_cast<private_module_t *>(device);
678 
679 		if (0 != ion_close(m->ion_client))
680 		{
681 			AERR("Failed to close ion_client: %d", m->ion_client);
682 		}
683 
684 		close(m->ion_client);
685 #endif
686 		delete dev;
687 #if GRALLOC_ARM_UMP_MODULE
688 		ump_close(); // Our UMP memory refs will be released automatically here...
689 #endif
690 	}
691 
692 	return 0;
693 }
694 
695 #if GRALLOC_ARM_DMA_BUF_MODULE
find_ion_heap_id(int ion_client,char * name)696 static int find_ion_heap_id(int ion_client, char* name)
697 {
698 	int i, ret, cnt, heap_id = -1;
699 	struct ion_heap_data *data;
700 
701 	ret = ion_query_heap_cnt(ion_client, &cnt);
702 
703 	if (ret)
704 	{
705 		AERR("ion count query failed with %s", strerror(errno));
706 		return -1;
707 	}
708 
709 	data = (struct ion_heap_data *)malloc(cnt * sizeof(*data));
710 	if (!data)
711 	{
712 		AERR("Error allocating data %s\n", strerror(errno));
713 		return -1;
714 	}
715 
716 	ret = ion_query_get_heaps(ion_client, cnt, data);
717 	if (ret)
718 	{
719 		AERR("Error querying heaps from ion %s", strerror(errno));
720 	}
721 	else
722 	{
723 		for (i = 0; i < cnt; i++) {
724 			struct ion_heap_data *dat = (struct ion_heap_data *)data;
725 			if (strcmp(dat[i].name, name) == 0) {
726 				heap_id = dat[i].heap_id;
727 				break;
728 			}
729 		}
730 
731 		if (i > cnt)
732 		{
733 			AERR("No System Heap Found amongst %d heaps\n", cnt);
734 			heap_id = -1;
735 		}
736 	}
737 
738 	free(data);
739 	return heap_id;
740 }
741 #endif
742 
alloc_device_open(hw_module_t const * module,const char * name,hw_device_t ** device)743 int alloc_device_open(hw_module_t const *module, const char *name, hw_device_t **device)
744 {
745 	MALI_IGNORE(name);
746 	alloc_device_t *dev;
747 
748 	dev = new alloc_device_t;
749 
750 	if (NULL == dev)
751 	{
752 		return -1;
753 	}
754 
755 #if GRALLOC_ARM_UMP_MODULE
756 	ump_result ump_res = ump_open();
757 
758 	if (UMP_OK != ump_res)
759 	{
760 		AERR("UMP open failed with %d", ump_res);
761 		delete dev;
762 		return -1;
763 	}
764 
765 #endif
766 
767 	/* initialize our state here */
768 	memset(dev, 0, sizeof(*dev));
769 
770 	/* initialize the procs */
771 	dev->common.tag = HARDWARE_DEVICE_TAG;
772 	dev->common.version = 0;
773 	dev->common.module = const_cast<hw_module_t *>(module);
774 	dev->common.close = alloc_device_close;
775 	dev->alloc = alloc_device_alloc;
776 	dev->free = alloc_device_free;
777 
778 #if GRALLOC_ARM_DMA_BUF_MODULE
779 	private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
780 	m->ion_client = ion_open();
781 
782 	if (m->ion_client < 0)
783 	{
784 		AERR("ion_open failed with %s", strerror(errno));
785 		delete dev;
786 		return -1;
787 	}
788 
789 	m->gralloc_legacy_ion = ion_is_legacy(m->ion_client);
790 
791 	if (!m->gralloc_legacy_ion)
792 	{
793 		m->system_heap_id = find_ion_heap_id(m->ion_client, ION_SYSTEM);
794 		m->cma_heap_id = find_ion_heap_id(m->ion_client, ION_CMA);
795 		if (m->system_heap_id < 0 || m->cma_heap_id < 0)
796 		{
797 			delete dev;
798 			ion_close(m->ion_client);
799 			m->ion_client = -1;
800 			return -1;
801 		}
802 	}
803 
804 #endif
805 
806 	*device = &dev->common;
807 
808 	return 0;
809 }
810