1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 #define ATRACE_TAG (ATRACE_TAG_GRAPHICS | ATRACE_TAG_HAL)
19 #include <limits.h>
20 #include <errno.h>
21 #include <pthread.h>
22 #include <unistd.h>
23 #include <string.h>
24 #include <stdarg.h>
25
26 #include <sys/mman.h>
27 #include <sys/stat.h>
28 #include <sys/types.h>
29 #include <sys/ioctl.h>
30
31 #include <cutils/log.h>
32 #include <cutils/atomic.h>
33 #include <utils/Trace.h>
34
35 #include <hardware/hardware.h>
36 #include <hardware/gralloc.h>
37
38 #include "gralloc_priv.h"
39 #include "gr.h"
40 #include "alloc_controller.h"
41 #include "memalloc.h"
42 #include <qdMetaData.h>
43
44
45 using namespace gralloc;
46 /*****************************************************************************/
47
48 // Return the type of allocator -
49 // these are used for mapping/unmapping
getAllocator(int flags)50 static IMemAlloc* getAllocator(int flags)
51 {
52 IMemAlloc* memalloc;
53 IAllocController* alloc_ctrl = IAllocController::getInstance();
54 memalloc = alloc_ctrl->getAllocator(flags);
55 return memalloc;
56 }
57
gralloc_map_metadata(buffer_handle_t handle)58 static int gralloc_map_metadata(buffer_handle_t handle) {
59 private_handle_t* hnd = (private_handle_t*)handle;
60 hnd->base_metadata = 0;
61 IMemAlloc* memalloc = getAllocator(hnd->flags) ;
62 void *mappedAddress = MAP_FAILED;
63 unsigned int size = 0;
64 if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)) {
65 mappedAddress = MAP_FAILED;
66 size = ROUND_UP_PAGESIZE(sizeof(MetaData_t));
67 int ret = memalloc->map_buffer(&mappedAddress, size,
68 hnd->offset_metadata, hnd->fd_metadata);
69 if(ret || mappedAddress == MAP_FAILED) {
70 ALOGE("Could not mmap metadata for handle %p, fd=%d (%s)",
71 hnd, hnd->fd_metadata, strerror(errno));
72 return -errno;
73 }
74 hnd->base_metadata = uint64_t(mappedAddress) + hnd->offset_metadata;
75 }
76 return 0;
77 }
78
gralloc_map(gralloc_module_t const * module,buffer_handle_t handle)79 static int gralloc_map(gralloc_module_t const* module,
80 buffer_handle_t handle)
81 {
82 ATRACE_CALL();
83 if(!module)
84 return -EINVAL;
85
86 private_handle_t* hnd = (private_handle_t*)handle;
87 unsigned int size = 0;
88 int err = 0;
89 IMemAlloc* memalloc = getAllocator(hnd->flags) ;
90 void *mappedAddress = MAP_FAILED;
91 hnd->base = 0;
92
93 // Dont map framebuffer and secure buffers
94 if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) &&
95 !(hnd->flags & private_handle_t::PRIV_FLAGS_SECURE_BUFFER)) {
96 size = hnd->size;
97 err = memalloc->map_buffer(&mappedAddress, size,
98 hnd->offset, hnd->fd);
99 if(err || mappedAddress == MAP_FAILED) {
100 ALOGE("Could not mmap handle %p, fd=%d (%s)",
101 handle, hnd->fd, strerror(errno));
102 return -errno;
103 }
104
105 hnd->base = uint64_t(mappedAddress) + hnd->offset;
106 } else {
107 // Cannot map secure buffers or framebuffers, but still need to map
108 // metadata for secure buffers.
109 // If mapping a secure buffers fails, the framework needs to get
110 // an error code.
111 err = -EACCES;
112 }
113
114 //Allow mapping of metadata for all buffers including secure ones, but not
115 //of framebuffer
116 int metadata_err = gralloc_map_metadata(handle);
117 if(!err) {
118 err = metadata_err;
119 }
120 return err;
121 }
122
gralloc_unmap(gralloc_module_t const * module,buffer_handle_t handle)123 static int gralloc_unmap(gralloc_module_t const* module,
124 buffer_handle_t handle)
125 {
126 ATRACE_CALL();
127 int err = -EINVAL;
128 if(!module)
129 return err;
130
131 private_handle_t* hnd = (private_handle_t*)handle;
132 IMemAlloc* memalloc = getAllocator(hnd->flags) ;
133 if(!memalloc)
134 return err;
135
136 if(hnd->base) {
137 err = memalloc->unmap_buffer((void*)hnd->base, hnd->size, hnd->offset);
138 if (err) {
139 ALOGE("Could not unmap memory at address %p, %s", hnd->base,
140 strerror(errno));
141 return -errno;
142 }
143 hnd->base = 0;
144 }
145
146 if(hnd->base_metadata) {
147 unsigned int size = ROUND_UP_PAGESIZE(sizeof(MetaData_t));
148 err = memalloc->unmap_buffer((void*)hnd->base_metadata,
149 size, hnd->offset_metadata);
150 if (err) {
151 ALOGE("Could not unmap memory at address %p, %s",
152 hnd->base_metadata, strerror(errno));
153 return -errno;
154 }
155 hnd->base_metadata = 0;
156 }
157
158 return 0;
159 }
160
161 /*****************************************************************************/
162
163 static pthread_mutex_t sMapLock = PTHREAD_MUTEX_INITIALIZER;
164
165 /*****************************************************************************/
166
gralloc_register_buffer(gralloc_module_t const * module,buffer_handle_t handle)167 int gralloc_register_buffer(gralloc_module_t const* module,
168 buffer_handle_t handle)
169 {
170 ATRACE_CALL();
171 if (!module || private_handle_t::validate(handle) < 0)
172 return -EINVAL;
173
174 int err = gralloc_map(module, handle);
175 /* Do not fail register_buffer for secure buffers*/
176 if (err == -EACCES)
177 err = 0;
178 return err;
179 }
180
gralloc_unregister_buffer(gralloc_module_t const * module,buffer_handle_t handle)181 int gralloc_unregister_buffer(gralloc_module_t const* module,
182 buffer_handle_t handle)
183 {
184 ATRACE_CALL();
185 if (!module || private_handle_t::validate(handle) < 0)
186 return -EINVAL;
187
188 /*
189 * If the buffer has been mapped during a lock operation, it's time
190 * to un-map it. It's an error to be here with a locked buffer.
191 * NOTE: the framebuffer is handled differently and is never unmapped.
192 * Also base and base_metadata are reset.
193 */
194 return gralloc_unmap(module, handle);
195 }
196
terminateBuffer(gralloc_module_t const * module,private_handle_t * hnd)197 int terminateBuffer(gralloc_module_t const* module,
198 private_handle_t* hnd)
199 {
200 ATRACE_CALL();
201 if(!module)
202 return -EINVAL;
203
204 /*
205 * If the buffer has been mapped during a lock operation, it's time
206 * to un-map it. It's an error to be here with a locked buffer.
207 * NOTE: the framebuffer is handled differently and is never unmapped.
208 * Also base and base_metadata are reset.
209 */
210 return gralloc_unmap(module, hnd);
211 }
212
gralloc_map_and_invalidate(gralloc_module_t const * module,buffer_handle_t handle,int usage)213 static int gralloc_map_and_invalidate (gralloc_module_t const* module,
214 buffer_handle_t handle, int usage)
215 {
216 ATRACE_CALL();
217 if (!module || private_handle_t::validate(handle) < 0)
218 return -EINVAL;
219
220 int err = 0;
221 private_handle_t* hnd = (private_handle_t*)handle;
222 if (usage & (GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK)) {
223 if (hnd->base == 0) {
224 // we need to map for real
225 pthread_mutex_t* const lock = &sMapLock;
226 pthread_mutex_lock(lock);
227 err = gralloc_map(module, handle);
228 pthread_mutex_unlock(lock);
229 }
230 if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION and
231 hnd->flags & private_handle_t::PRIV_FLAGS_CACHED) {
232 //Invalidate if CPU reads in software and there are non-CPU
233 //writers. No need to do this for the metadata buffer as it is
234 //only read/written in software.
235 if ((usage & GRALLOC_USAGE_SW_READ_MASK) and
236 (hnd->flags & private_handle_t::PRIV_FLAGS_NON_CPU_WRITER))
237 {
238 IMemAlloc* memalloc = getAllocator(hnd->flags) ;
239 err = memalloc->clean_buffer((void*)hnd->base,
240 hnd->size, hnd->offset, hnd->fd,
241 CACHE_INVALIDATE);
242 }
243 //Mark the buffer to be flushed after CPU write.
244 if (usage & GRALLOC_USAGE_SW_WRITE_MASK) {
245 hnd->flags |= private_handle_t::PRIV_FLAGS_NEEDS_FLUSH;
246 }
247 }
248 }
249
250 return err;
251 }
252
gralloc_lock(gralloc_module_t const * module,buffer_handle_t handle,int usage,int,int,int,int,void ** vaddr)253 int gralloc_lock(gralloc_module_t const* module,
254 buffer_handle_t handle, int usage,
255 int /*l*/, int /*t*/, int /*w*/, int /*h*/,
256 void** vaddr)
257 {
258 ATRACE_CALL();
259 private_handle_t* hnd = (private_handle_t*)handle;
260 int err = gralloc_map_and_invalidate(module, handle, usage);
261 if(!err)
262 *vaddr = (void*)hnd->base;
263 return err;
264 }
265
gralloc_lock_ycbcr(gralloc_module_t const * module,buffer_handle_t handle,int usage,int,int,int,int,struct android_ycbcr * ycbcr)266 int gralloc_lock_ycbcr(gralloc_module_t const* module,
267 buffer_handle_t handle, int usage,
268 int /*l*/, int /*t*/, int /*w*/, int /*h*/,
269 struct android_ycbcr *ycbcr)
270 {
271 ATRACE_CALL();
272 private_handle_t* hnd = (private_handle_t*)handle;
273 int err = gralloc_map_and_invalidate(module, handle, usage);
274 if(!err)
275 err = getYUVPlaneInfo(hnd, ycbcr);
276 return err;
277 }
278
gralloc_unlock(gralloc_module_t const * module,buffer_handle_t handle)279 int gralloc_unlock(gralloc_module_t const* module,
280 buffer_handle_t handle)
281 {
282 ATRACE_CALL();
283 if (!module || private_handle_t::validate(handle) < 0)
284 return -EINVAL;
285
286 int err = 0;
287 private_handle_t* hnd = (private_handle_t*)handle;
288
289 IMemAlloc* memalloc = getAllocator(hnd->flags);
290 if (hnd->flags & private_handle_t::PRIV_FLAGS_NEEDS_FLUSH) {
291 err = memalloc->clean_buffer((void*)hnd->base,
292 hnd->size, hnd->offset, hnd->fd,
293 CACHE_CLEAN);
294 hnd->flags &= ~private_handle_t::PRIV_FLAGS_NEEDS_FLUSH;
295 }
296
297 return err;
298 }
299
300 /*****************************************************************************/
301
gralloc_perform(struct gralloc_module_t const * module,int operation,...)302 int gralloc_perform(struct gralloc_module_t const* module,
303 int operation, ... )
304 {
305 int res = -EINVAL;
306 va_list args;
307 if(!module)
308 return res;
309
310 va_start(args, operation);
311 switch (operation) {
312 case GRALLOC_MODULE_PERFORM_CREATE_HANDLE_FROM_BUFFER:
313 {
314 int fd = va_arg(args, int);
315 unsigned int size = va_arg(args, unsigned int);
316 unsigned int offset = va_arg(args, unsigned int);
317 void* base = va_arg(args, void*);
318 int width = va_arg(args, int);
319 int height = va_arg(args, int);
320 int format = va_arg(args, int);
321
322 native_handle_t** handle = va_arg(args, native_handle_t**);
323 private_handle_t* hnd = (private_handle_t*)native_handle_create(
324 private_handle_t::sNumFds, private_handle_t::sNumInts());
325 hnd->magic = private_handle_t::sMagic;
326 hnd->fd = fd;
327 hnd->flags = private_handle_t::PRIV_FLAGS_USES_ION;
328 hnd->size = size;
329 hnd->offset = offset;
330 hnd->base = uint64_t(base) + offset;
331 hnd->gpuaddr = 0;
332 hnd->width = width;
333 hnd->height = height;
334 hnd->format = format;
335 *handle = (native_handle_t *)hnd;
336 res = 0;
337 break;
338
339 }
340 case GRALLOC_MODULE_PERFORM_GET_STRIDE:
341 {
342 int width = va_arg(args, int);
343 int format = va_arg(args, int);
344 int *stride = va_arg(args, int *);
345 int alignedw = 0, alignedh = 0;
346 AdrenoMemInfo::getInstance().getAlignedWidthAndHeight(width,
347 0, format, 0, alignedw, alignedh);
348 *stride = alignedw;
349 res = 0;
350 } break;
351
352 case GRALLOC_MODULE_PERFORM_GET_CUSTOM_STRIDE_FROM_HANDLE:
353 {
354 private_handle_t* hnd = va_arg(args, private_handle_t*);
355 int *stride = va_arg(args, int *);
356 if (private_handle_t::validate(hnd)) {
357 return res;
358 }
359 MetaData_t *metadata = (MetaData_t *)hnd->base_metadata;
360 if(metadata && metadata->operation & UPDATE_BUFFER_GEOMETRY) {
361 *stride = metadata->bufferDim.sliceWidth;
362 } else {
363 *stride = hnd->width;
364 }
365 res = 0;
366 } break;
367
368 case GRALLOC_MODULE_PERFORM_GET_CUSTOM_STRIDE_AND_HEIGHT_FROM_HANDLE:
369 {
370 private_handle_t* hnd = va_arg(args, private_handle_t*);
371 int *stride = va_arg(args, int *);
372 int *height = va_arg(args, int *);
373 if (private_handle_t::validate(hnd)) {
374 return res;
375 }
376 MetaData_t *metadata = (MetaData_t *)hnd->base_metadata;
377 if(metadata && metadata->operation & UPDATE_BUFFER_GEOMETRY) {
378 *stride = metadata->bufferDim.sliceWidth;
379 *height = metadata->bufferDim.sliceHeight;
380 } else {
381 *stride = hnd->width;
382 *height = hnd->height;
383 }
384 res = 0;
385 } break;
386
387 case GRALLOC_MODULE_PERFORM_GET_ATTRIBUTES:
388 {
389 int width = va_arg(args, int);
390 int height = va_arg(args, int);
391 int format = va_arg(args, int);
392 int usage = va_arg(args, int);
393 int *alignedWidth = va_arg(args, int *);
394 int *alignedHeight = va_arg(args, int *);
395 int *tileEnabled = va_arg(args,int *);
396 *tileEnabled = isMacroTileEnabled(format, usage);
397 AdrenoMemInfo::getInstance().getAlignedWidthAndHeight(width,
398 height, format, usage, *alignedWidth, *alignedHeight);
399 res = 0;
400 } break;
401
402 case GRALLOC_MODULE_PERFORM_GET_COLOR_SPACE_FROM_HANDLE:
403 {
404 private_handle_t* hnd = va_arg(args, private_handle_t*);
405 int *color_space = va_arg(args, int *);
406 if (private_handle_t::validate(hnd)) {
407 return res;
408 }
409 MetaData_t *metadata = (MetaData_t *)hnd->base_metadata;
410 if(metadata && metadata->operation & UPDATE_COLOR_SPACE) {
411 *color_space = metadata->colorSpace;
412 res = 0;
413 }
414 } break;
415
416 case GRALLOC_MODULE_PERFORM_GET_YUV_PLANE_INFO:
417 {
418 private_handle_t* hnd = va_arg(args, private_handle_t*);
419 android_ycbcr* ycbcr = va_arg(args, struct android_ycbcr *);
420 if (!private_handle_t::validate(hnd)) {
421 res = getYUVPlaneInfo(hnd, ycbcr);
422 }
423 } break;
424
425 case GRALLOC_MODULE_PERFORM_GET_MAP_SECURE_BUFFER_INFO:
426 {
427 private_handle_t* hnd = va_arg(args, private_handle_t*);
428 int *map_secure_buffer = va_arg(args, int *);
429 if (private_handle_t::validate(hnd)) {
430 return res;
431 }
432 MetaData_t *metadata = (MetaData_t *)hnd->base_metadata;
433 if(metadata && metadata->operation & MAP_SECURE_BUFFER) {
434 *map_secure_buffer = metadata->mapSecureBuffer;
435 res = 0;
436 } else {
437 *map_secure_buffer = 0;
438 }
439 } break;
440
441 default:
442 break;
443 }
444 va_end(args);
445 return res;
446 }
447