1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 #define ATRACE_TAG (ATRACE_TAG_GRAPHICS | ATRACE_TAG_HAL)
19 #include <limits.h>
20 #include <errno.h>
21 #include <pthread.h>
22 #include <unistd.h>
23 #include <string.h>
24 #include <stdarg.h>
25
26 #include <sys/mman.h>
27 #include <sys/stat.h>
28 #include <sys/types.h>
29 #include <sys/ioctl.h>
30
31 #include <cutils/log.h>
32 #include <cutils/atomic.h>
33 #include <utils/Trace.h>
34
35 #include <hardware/hardware.h>
36 #include <hardware/gralloc.h>
37
38 #include <gralloc1-adapter.h>
39
40 #include "gralloc_priv.h"
41 #include "gr.h"
42 #include "alloc_controller.h"
43 #include "memalloc.h"
44 #include <qdMetaData.h>
45
46
47 using namespace gralloc;
48 /*****************************************************************************/
49
50 // Return the type of allocator -
51 // these are used for mapping/unmapping
getAllocator(int flags)52 static IMemAlloc* getAllocator(int flags)
53 {
54 IMemAlloc* memalloc;
55 IAllocController* alloc_ctrl = IAllocController::getInstance();
56 memalloc = alloc_ctrl->getAllocator(flags);
57 return memalloc;
58 }
59
gralloc_map_metadata(buffer_handle_t handle)60 static int gralloc_map_metadata(buffer_handle_t handle) {
61 private_handle_t* hnd = (private_handle_t*)handle;
62 hnd->base_metadata = 0;
63 IMemAlloc* memalloc = getAllocator(hnd->flags) ;
64 void *mappedAddress = MAP_FAILED;
65 unsigned int size = 0;
66 if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)) {
67 mappedAddress = MAP_FAILED;
68 size = ROUND_UP_PAGESIZE(sizeof(MetaData_t));
69 int ret = memalloc->map_buffer(&mappedAddress, size,
70 hnd->offset_metadata, hnd->fd_metadata);
71 if(ret || mappedAddress == MAP_FAILED) {
72 ALOGE("Could not mmap metadata for handle %p, fd=%d (%s)",
73 hnd, hnd->fd_metadata, strerror(errno));
74 return -errno;
75 }
76 hnd->base_metadata = uint64_t(mappedAddress);
77 }
78 return 0;
79 }
80
gralloc_map(gralloc_module_t const * module,buffer_handle_t handle)81 static int gralloc_map(gralloc_module_t const* module,
82 buffer_handle_t handle)
83 {
84 ATRACE_CALL();
85 if(!module)
86 return -EINVAL;
87
88 private_handle_t* hnd = (private_handle_t*)handle;
89 unsigned int size = 0;
90 int err = 0;
91 IMemAlloc* memalloc = getAllocator(hnd->flags) ;
92 void *mappedAddress = MAP_FAILED;
93 hnd->base = 0;
94
95 // Dont map framebuffer and secure buffers
96 if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) &&
97 !(hnd->flags & private_handle_t::PRIV_FLAGS_SECURE_BUFFER)) {
98 size = hnd->size;
99 err = memalloc->map_buffer(&mappedAddress, size,
100 hnd->offset, hnd->fd);
101 if(err || mappedAddress == MAP_FAILED) {
102 ALOGE("Could not mmap handle %p, fd=%d (%s)",
103 handle, hnd->fd, strerror(errno));
104 return -errno;
105 }
106
107 hnd->base = uint64_t(mappedAddress);
108 } else {
109 // Cannot map secure buffers or framebuffers, but still need to map
110 // metadata for secure buffers.
111 // If mapping a secure buffers fails, the framework needs to get
112 // an error code.
113 err = -EACCES;
114 }
115
116 //Allow mapping of metadata for all buffers including secure ones, but not
117 //of framebuffer
118 int metadata_err = gralloc_map_metadata(handle);
119 if(!err) {
120 err = metadata_err;
121 }
122 return err;
123 }
124
gralloc_unmap(gralloc_module_t const * module,buffer_handle_t handle)125 static int gralloc_unmap(gralloc_module_t const* module,
126 buffer_handle_t handle)
127 {
128 ATRACE_CALL();
129 int err = -EINVAL;
130 if(!module)
131 return err;
132
133 private_handle_t* hnd = (private_handle_t*)handle;
134 IMemAlloc* memalloc = getAllocator(hnd->flags) ;
135 if(!memalloc)
136 return err;
137
138 if(hnd->base) {
139 err = memalloc->unmap_buffer((void*)hnd->base, hnd->size, hnd->offset);
140 if (err) {
141 ALOGE("Could not unmap memory at address %p, %s", (void*) hnd->base,
142 strerror(errno));
143 return -errno;
144 }
145 hnd->base = 0;
146 }
147
148 if(hnd->base_metadata) {
149 unsigned int size = ROUND_UP_PAGESIZE(sizeof(MetaData_t));
150 err = memalloc->unmap_buffer((void*)hnd->base_metadata,
151 size, hnd->offset_metadata);
152 if (err) {
153 ALOGE("Could not unmap memory at address %p, %s",
154 (void*) hnd->base_metadata, strerror(errno));
155 return -errno;
156 }
157 hnd->base_metadata = 0;
158 }
159
160 return 0;
161 }
162
163 /*****************************************************************************/
164
165 static pthread_mutex_t sMapLock = PTHREAD_MUTEX_INITIALIZER;
166
167 /*****************************************************************************/
168
gralloc_register_buffer(gralloc_module_t const * module,buffer_handle_t handle)169 int gralloc_register_buffer(gralloc_module_t const* module,
170 buffer_handle_t handle)
171 {
172 ATRACE_CALL();
173 if (!module || private_handle_t::validate(handle) < 0)
174 return -EINVAL;
175
176 int err = gralloc_map(module, handle);
177 /* Do not fail register_buffer for secure buffers*/
178 if (err == -EACCES)
179 err = 0;
180 return err;
181 }
182
gralloc_unregister_buffer(gralloc_module_t const * module,buffer_handle_t handle)183 int gralloc_unregister_buffer(gralloc_module_t const* module,
184 buffer_handle_t handle)
185 {
186 ATRACE_CALL();
187 if (!module || private_handle_t::validate(handle) < 0)
188 return -EINVAL;
189
190 /*
191 * If the buffer has been mapped during a lock operation, it's time
192 * to un-map it. It's an error to be here with a locked buffer.
193 * NOTE: the framebuffer is handled differently and is never unmapped.
194 * Also base and base_metadata are reset.
195 */
196 return gralloc_unmap(module, handle);
197 }
198
terminateBuffer(gralloc_module_t const * module,private_handle_t * hnd)199 int terminateBuffer(gralloc_module_t const* module,
200 private_handle_t* hnd)
201 {
202 ATRACE_CALL();
203 if(!module)
204 return -EINVAL;
205
206 /*
207 * If the buffer has been mapped during a lock operation, it's time
208 * to un-map it. It's an error to be here with a locked buffer.
209 * NOTE: the framebuffer is handled differently and is never unmapped.
210 * Also base and base_metadata are reset.
211 */
212 return gralloc_unmap(module, hnd);
213 }
214
gralloc_map_and_invalidate(gralloc_module_t const * module,buffer_handle_t handle,int usage)215 static int gralloc_map_and_invalidate (gralloc_module_t const* module,
216 buffer_handle_t handle, int usage)
217 {
218 ATRACE_CALL();
219 if (!module || private_handle_t::validate(handle) < 0)
220 return -EINVAL;
221
222 int err = 0;
223 private_handle_t* hnd = (private_handle_t*)handle;
224 if (usage & (GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK)) {
225 if (hnd->base == 0) {
226 // we need to map for real
227 pthread_mutex_t* const lock = &sMapLock;
228 pthread_mutex_lock(lock);
229 err = gralloc_map(module, handle);
230 pthread_mutex_unlock(lock);
231 }
232 if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION and
233 hnd->flags & private_handle_t::PRIV_FLAGS_CACHED) {
234 //Invalidate if CPU reads in software and there are non-CPU
235 //writers. No need to do this for the metadata buffer as it is
236 //only read/written in software.
237 if ((usage & GRALLOC_USAGE_SW_READ_MASK) and
238 (hnd->flags & private_handle_t::PRIV_FLAGS_NON_CPU_WRITER))
239 {
240 IMemAlloc* memalloc = getAllocator(hnd->flags) ;
241 err = memalloc->clean_buffer((void*)hnd->base,
242 hnd->size, hnd->offset, hnd->fd,
243 CACHE_INVALIDATE);
244 }
245 //Mark the buffer to be flushed after CPU write.
246 if (usage & GRALLOC_USAGE_SW_WRITE_MASK) {
247 hnd->flags |= private_handle_t::PRIV_FLAGS_NEEDS_FLUSH;
248 }
249 }
250 }
251
252 return err;
253 }
254
gralloc_lock(gralloc_module_t const * module,buffer_handle_t handle,int usage,int,int,int,int,void ** vaddr)255 int gralloc_lock(gralloc_module_t const* module,
256 buffer_handle_t handle, int usage,
257 int /*l*/, int /*t*/, int /*w*/, int /*h*/,
258 void** vaddr)
259 {
260 ATRACE_CALL();
261 private_handle_t* hnd = (private_handle_t*)handle;
262 int err = gralloc_map_and_invalidate(module, handle, usage);
263 if(!err)
264 *vaddr = (void*)hnd->base;
265 return err;
266 }
267
gralloc_lock_ycbcr(gralloc_module_t const * module,buffer_handle_t handle,int usage,int,int,int,int,struct android_ycbcr * ycbcr)268 int gralloc_lock_ycbcr(gralloc_module_t const* module,
269 buffer_handle_t handle, int usage,
270 int /*l*/, int /*t*/, int /*w*/, int /*h*/,
271 struct android_ycbcr *ycbcr)
272 {
273 ATRACE_CALL();
274 private_handle_t* hnd = (private_handle_t*)handle;
275 int err = gralloc_map_and_invalidate(module, handle, usage);
276 if(!err)
277 err = getYUVPlaneInfo(hnd, ycbcr);
278 return err;
279 }
280
gralloc_unlock(gralloc_module_t const * module,buffer_handle_t handle)281 int gralloc_unlock(gralloc_module_t const* module,
282 buffer_handle_t handle)
283 {
284 ATRACE_CALL();
285 if (!module || private_handle_t::validate(handle) < 0)
286 return -EINVAL;
287
288 int err = 0;
289 private_handle_t* hnd = (private_handle_t*)handle;
290
291 IMemAlloc* memalloc = getAllocator(hnd->flags);
292 if (hnd->flags & private_handle_t::PRIV_FLAGS_NEEDS_FLUSH) {
293 err = memalloc->clean_buffer((void*)hnd->base,
294 hnd->size, hnd->offset, hnd->fd,
295 CACHE_CLEAN);
296 hnd->flags &= ~private_handle_t::PRIV_FLAGS_NEEDS_FLUSH;
297 }
298
299 return err;
300 }
301
302 /*****************************************************************************/
303
isYUV(private_handle_t * hnd)304 static bool isYUV(private_handle_t* hnd)
305 {
306 bool is_yuv;
307
308 switch (hnd->format) {
309 //Semiplanar
310 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
311 case HAL_PIXEL_FORMAT_YCbCr_422_SP:
312 case HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS:
313 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE: //Same as YCbCr_420_SP_VENUS
314 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
315 case HAL_PIXEL_FORMAT_YCrCb_422_SP:
316 case HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO:
317 case HAL_PIXEL_FORMAT_NV21_ZSL:
318 case HAL_PIXEL_FORMAT_RAW10:
319 case HAL_PIXEL_FORMAT_RAW16:
320 //Planar
321 case HAL_PIXEL_FORMAT_YV12:
322 is_yuv = true;
323 break;
324 //Unsupported formats
325 case HAL_PIXEL_FORMAT_YCbCr_422_I:
326 case HAL_PIXEL_FORMAT_YCrCb_422_I:
327 case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED:
328 default:
329 is_yuv = false;
330 break;
331 }
332
333 return is_yuv;
334 }
335
ycbcr_to_flexible_layout(const struct android_ycbcr * ycbcr,struct android_flex_layout * layout)336 static void ycbcr_to_flexible_layout(const struct android_ycbcr* ycbcr,
337 struct android_flex_layout* layout)
338 {
339 layout->format = FLEX_FORMAT_YCbCr;
340 layout->num_planes = 3;
341
342 for (uint32_t i = 0; i < layout->num_planes; i++) {
343 layout->planes[i].bits_per_component = 8;
344 layout->planes[i].bits_used = 8;
345 layout->planes[i].h_increment = 1;
346 layout->planes[i].v_increment = 1;
347 layout->planes[i].h_subsampling = 2;
348 layout->planes[i].v_subsampling = 2;
349 }
350
351 layout->planes[0].top_left = (uint8_t*)ycbcr->y;
352 layout->planes[0].component = FLEX_COMPONENT_Y;
353 layout->planes[0].v_increment = (int32_t)ycbcr->ystride;
354
355 layout->planes[1].top_left = (uint8_t*)ycbcr->cb;
356 layout->planes[1].component = FLEX_COMPONENT_Cb;
357 layout->planes[1].h_increment = (int32_t)ycbcr->chroma_step;
358 layout->planes[1].v_increment = (int32_t)ycbcr->cstride;
359
360 layout->planes[2].top_left = (uint8_t*)ycbcr->cr;
361 layout->planes[2].component = FLEX_COMPONENT_Cr;
362 layout->planes[2].h_increment = (int32_t)ycbcr->chroma_step;
363 layout->planes[2].v_increment = (int32_t)ycbcr->cstride;
364 }
365
gralloc_perform(struct gralloc_module_t const * module,int operation,...)366 int gralloc_perform(struct gralloc_module_t const* module,
367 int operation, ... )
368 {
369 int res = -EINVAL;
370 va_list args;
371 if(!module)
372 return res;
373
374 va_start(args, operation);
375 switch (operation) {
376 case GRALLOC_MODULE_PERFORM_CREATE_HANDLE_FROM_BUFFER:
377 {
378 int fd = va_arg(args, int);
379 unsigned int size = va_arg(args, unsigned int);
380 unsigned int offset = va_arg(args, unsigned int);
381 void* base = va_arg(args, void*);
382 int width = va_arg(args, int);
383 int height = va_arg(args, int);
384 int format = va_arg(args, int);
385
386 native_handle_t** handle = va_arg(args, native_handle_t**);
387 private_handle_t* hnd = (private_handle_t*)native_handle_create(
388 private_handle_t::sNumFds, private_handle_t::sNumInts());
389 hnd->magic = private_handle_t::sMagic;
390 hnd->fd = fd;
391 hnd->flags = private_handle_t::PRIV_FLAGS_USES_ION;
392 hnd->size = size;
393 hnd->offset = offset;
394 hnd->base = uint64_t(base) + offset;
395 hnd->gpuaddr = 0;
396 hnd->width = width;
397 hnd->height = height;
398 hnd->format = format;
399 *handle = (native_handle_t *)hnd;
400 res = 0;
401 break;
402
403 }
404 case GRALLOC_MODULE_PERFORM_GET_STRIDE:
405 {
406 int width = va_arg(args, int);
407 int format = va_arg(args, int);
408 int *stride = va_arg(args, int *);
409 int alignedw = 0, alignedh = 0;
410 AdrenoMemInfo::getInstance().getAlignedWidthAndHeight(width,
411 0, format, 0, alignedw, alignedh);
412 *stride = alignedw;
413 res = 0;
414 } break;
415
416 case GRALLOC_MODULE_PERFORM_GET_CUSTOM_STRIDE_FROM_HANDLE:
417 {
418 private_handle_t* hnd = va_arg(args, private_handle_t*);
419 int *stride = va_arg(args, int *);
420 if (private_handle_t::validate(hnd)) {
421 return res;
422 }
423 MetaData_t *metadata = (MetaData_t *)hnd->base_metadata;
424 if(metadata && metadata->operation & UPDATE_BUFFER_GEOMETRY) {
425 *stride = metadata->bufferDim.sliceWidth;
426 } else {
427 *stride = hnd->width;
428 }
429 res = 0;
430 } break;
431
432 case GRALLOC_MODULE_PERFORM_GET_CUSTOM_STRIDE_AND_HEIGHT_FROM_HANDLE:
433 {
434 private_handle_t* hnd = va_arg(args, private_handle_t*);
435 int *stride = va_arg(args, int *);
436 int *height = va_arg(args, int *);
437 if (private_handle_t::validate(hnd)) {
438 return res;
439 }
440 MetaData_t *metadata = (MetaData_t *)hnd->base_metadata;
441 if(metadata && metadata->operation & UPDATE_BUFFER_GEOMETRY) {
442 *stride = metadata->bufferDim.sliceWidth;
443 *height = metadata->bufferDim.sliceHeight;
444 } else {
445 *stride = hnd->width;
446 *height = hnd->height;
447 }
448 res = 0;
449 } break;
450
451 case GRALLOC_MODULE_PERFORM_GET_ATTRIBUTES:
452 {
453 int width = va_arg(args, int);
454 int height = va_arg(args, int);
455 int format = va_arg(args, int);
456 int usage = va_arg(args, int);
457 int *alignedWidth = va_arg(args, int *);
458 int *alignedHeight = va_arg(args, int *);
459 int *tileEnabled = va_arg(args,int *);
460 *tileEnabled = isMacroTileEnabled(format, usage);
461 AdrenoMemInfo::getInstance().getAlignedWidthAndHeight(width,
462 height, format, usage, *alignedWidth, *alignedHeight);
463 res = 0;
464 } break;
465
466 case GRALLOC_MODULE_PERFORM_GET_COLOR_SPACE_FROM_HANDLE:
467 {
468 private_handle_t* hnd = va_arg(args, private_handle_t*);
469 int *color_space = va_arg(args, int *);
470 if (private_handle_t::validate(hnd)) {
471 return res;
472 }
473 MetaData_t *metadata = (MetaData_t *)hnd->base_metadata;
474 if(metadata && metadata->operation & UPDATE_COLOR_SPACE) {
475 *color_space = metadata->colorSpace;
476 res = 0;
477 }
478 } break;
479
480 case GRALLOC_MODULE_PERFORM_GET_YUV_PLANE_INFO:
481 {
482 private_handle_t* hnd = va_arg(args, private_handle_t*);
483 android_ycbcr* ycbcr = va_arg(args, struct android_ycbcr *);
484 if (!private_handle_t::validate(hnd)) {
485 res = getYUVPlaneInfo(hnd, ycbcr);
486 }
487 } break;
488
489 case GRALLOC_MODULE_PERFORM_GET_MAP_SECURE_BUFFER_INFO:
490 {
491 private_handle_t* hnd = va_arg(args, private_handle_t*);
492 int *map_secure_buffer = va_arg(args, int *);
493 if (private_handle_t::validate(hnd)) {
494 return res;
495 }
496 MetaData_t *metadata = (MetaData_t *)hnd->base_metadata;
497 if(metadata && metadata->operation & MAP_SECURE_BUFFER) {
498 *map_secure_buffer = metadata->mapSecureBuffer;
499 res = 0;
500 } else {
501 *map_secure_buffer = 0;
502 }
503 } break;
504
505 case GRALLOC1_ADAPTER_PERFORM_GET_REAL_MODULE_API_VERSION_MINOR:
506 {
507 auto outMinorVersion = va_arg(args, int*);
508 *outMinorVersion = 1; // GRALLOC_MODULE_API_VERSION_0_1
509 } break;
510
511 case GRALLOC1_ADAPTER_PERFORM_SET_USAGES:
512 {
513 auto hnd = va_arg(args, private_handle_t*);
514 auto producerUsage = va_arg(args, int);
515 auto consumerUsage = va_arg(args, int);
516 hnd->producer_usage = producerUsage;
517 hnd->consumer_usage = consumerUsage;
518 } break;
519
520 case GRALLOC1_ADAPTER_PERFORM_GET_DIMENSIONS:
521 {
522 auto hnd = va_arg(args, private_handle_t*);
523 auto outWidth = va_arg(args, int*);
524 auto outHeight = va_arg(args, int*);
525 *outWidth = hnd->original_width;
526 *outHeight = hnd->height;
527 } break;
528
529 case GRALLOC1_ADAPTER_PERFORM_GET_FORMAT:
530 {
531 auto hnd = va_arg(args, private_handle_t*);
532 auto outFormat = va_arg(args, int*);
533 *outFormat = hnd->original_format;
534 } break;
535
536 case GRALLOC1_ADAPTER_PERFORM_GET_PRODUCER_USAGE:
537 {
538 auto hnd = va_arg(args, private_handle_t*);
539 auto outUsage = va_arg(args, int*);
540 *outUsage = hnd->producer_usage;
541 } break;
542
543 case GRALLOC1_ADAPTER_PERFORM_GET_CONSUMER_USAGE:
544 {
545 auto hnd = va_arg(args, private_handle_t*);
546 auto outUsage = va_arg(args, int*);
547 *outUsage = hnd->consumer_usage;
548 } break;
549
550 case GRALLOC1_ADAPTER_PERFORM_GET_BACKING_STORE:
551 {
552 auto hnd = va_arg(args, private_handle_t*);
553 auto outBackingStore = va_arg(args, uint64_t*);
554 *outBackingStore = hnd->backing_store;
555 } break;
556
557 case GRALLOC1_ADAPTER_PERFORM_GET_NUM_FLEX_PLANES:
558 {
559 auto hnd = va_arg(args, private_handle_t*);
560 auto outNumFlexPlanes = va_arg(args, int*);
561
562 (void) hnd;
563 // for simpilicity
564 *outNumFlexPlanes = 4;
565 } break;
566
567 case GRALLOC1_ADAPTER_PERFORM_GET_STRIDE:
568 {
569 auto hnd = va_arg(args, private_handle_t*);
570 auto outStride = va_arg(args, int*);
571 *outStride = hnd->width;
572 } break;
573
574 case GRALLOC1_ADAPTER_PERFORM_LOCK_FLEX:
575 {
576 auto hnd = va_arg(args, private_handle_t*);
577 auto producerUsage = va_arg(args, int);
578 auto consumerUsage = va_arg(args, int);
579 auto left = va_arg(args, int);
580 auto top = va_arg(args, int);
581 auto width = va_arg(args, int);
582 auto height = va_arg(args, int);
583 auto outLayout = va_arg(args, android_flex_layout*);
584 // always -1
585 auto acquireFence = va_arg(args, int);
586 (void) acquireFence;
587
588 // TODO lock RGB as a flexible format
589 if (!isYUV(hnd)) {
590 return -EINVAL;
591 }
592
593 struct android_ycbcr ycbcr;
594 res = gralloc_lock_ycbcr(module, hnd,
595 producerUsage | consumerUsage,
596 left, top, width, height, &ycbcr);
597 if (res != 0) {
598 return res;
599 }
600
601 ycbcr_to_flexible_layout(&ycbcr, outLayout);
602 } break;
603
604 default:
605 break;
606 }
607 va_end(args);
608 return res;
609 }
610