1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <limits.h>
18 #include <unistd.h>
19 #include <fcntl.h>
20 #include <errno.h>
21 #include <pthread.h>
22 #include <stdlib.h>
23 #include <string.h>
24
25 #include <sys/mman.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <sys/ioctl.h>
29
30 #include <cutils/ashmem.h>
31 #include <cutils/log.h>
32 #include <cutils/atomic.h>
33
34 #include <hardware/hardware.h>
35 #include <hardware/gralloc.h>
36
37 #include <linux/msm_hw3d.h>
38 #include <linux/android_pmem.h>
39
40 #include "gralloc_priv.h"
41 #include "allocator.h"
42
43 /*****************************************************************************/
44
45 // NOTE: must be the same than in oem.h
46 #define ALLOCATORREGION_RESERVED_SIZE (1200<<10)
47
48 static SimpleBestFitAllocator sAllocator;
49 static SimpleBestFitAllocator sAllocatorGPU(ALLOCATORREGION_RESERVED_SIZE);
50
51 /*****************************************************************************/
52
53 struct gralloc_context_t {
54 alloc_device_t device;
55 /* our private data here */
56 };
57
58 static int gralloc_alloc_buffer(alloc_device_t* dev,
59 size_t size, int usage, buffer_handle_t* pHandle);
60
61 /*****************************************************************************/
62
63 int fb_device_open(const hw_module_t* module, const char* name,
64 hw_device_t** device);
65
66 static int gralloc_device_open(const hw_module_t* module, const char* name,
67 hw_device_t** device);
68
69 extern int gralloc_lock(gralloc_module_t const* module,
70 buffer_handle_t handle, int usage,
71 int l, int t, int w, int h,
72 void** vaddr);
73
74 extern int gralloc_unlock(gralloc_module_t const* module,
75 buffer_handle_t handle);
76
77 extern int gralloc_register_buffer(gralloc_module_t const* module,
78 buffer_handle_t handle);
79
80 extern int gralloc_unregister_buffer(gralloc_module_t const* module,
81 buffer_handle_t handle);
82
83 extern int gralloc_perform(struct gralloc_module_t const* module,
84 int operation, ... );
85
86 /*****************************************************************************/
87
88 static struct hw_module_methods_t gralloc_module_methods = {
89 open: gralloc_device_open
90 };
91
92 struct private_module_t HAL_MODULE_INFO_SYM = {
93 base: {
94 common: {
95 tag: HARDWARE_MODULE_TAG,
96 version_major: 1,
97 version_minor: 0,
98 id: GRALLOC_HARDWARE_MODULE_ID,
99 name: "Graphics Memory Allocator Module",
100 author: "The Android Open Source Project",
101 methods: &gralloc_module_methods
102 },
103 registerBuffer: gralloc_register_buffer,
104 unregisterBuffer: gralloc_unregister_buffer,
105 lock: gralloc_lock,
106 unlock: gralloc_unlock,
107 perform: gralloc_perform,
108 },
109 framebuffer: 0,
110 flags: 0,
111 numBuffers: 0,
112 bufferMask: 0,
113 lock: PTHREAD_MUTEX_INITIALIZER,
114 currentBuffer: 0,
115 pmem_master: -1,
116 pmem_master_base: 0,
117 master_phys: 0,
118 gpu: -1,
119 gpu_base: 0
120 };
121
122 /*****************************************************************************/
123
gralloc_alloc_framebuffer_locked(alloc_device_t * dev,size_t size,int usage,buffer_handle_t * pHandle)124 static int gralloc_alloc_framebuffer_locked(alloc_device_t* dev,
125 size_t size, int usage, buffer_handle_t* pHandle)
126 {
127 private_module_t* m = reinterpret_cast<private_module_t*>(
128 dev->common.module);
129
130 // allocate the framebuffer
131 if (m->framebuffer == NULL) {
132 // initialize the framebuffer, the framebuffer is mapped once
133 // and forever.
134 int err = mapFrameBufferLocked(m);
135 if (err < 0) {
136 return err;
137 }
138 }
139
140 const uint32_t bufferMask = m->bufferMask;
141 const uint32_t numBuffers = m->numBuffers;
142 const size_t bufferSize = m->finfo.line_length * m->info.yres;
143 if (numBuffers == 1) {
144 // If we have only one buffer, we never use page-flipping. Instead,
145 // we return a regular buffer which will be memcpy'ed to the main
146 // screen when post is called.
147 int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
148 return gralloc_alloc_buffer(dev, bufferSize, newUsage, pHandle);
149 }
150
151 if (bufferMask >= ((1LU<<numBuffers)-1)) {
152 // We ran out of buffers.
153 return -ENOMEM;
154 }
155
156 // create a "fake" handles for it
157 intptr_t vaddr = intptr_t(m->framebuffer->base);
158 private_handle_t* hnd = new private_handle_t(dup(m->framebuffer->fd), size,
159 private_handle_t::PRIV_FLAGS_USES_PMEM |
160 private_handle_t::PRIV_FLAGS_FRAMEBUFFER);
161
162 // find a free slot
163 for (uint32_t i=0 ; i<numBuffers ; i++) {
164 if ((bufferMask & (1LU<<i)) == 0) {
165 m->bufferMask |= (1LU<<i);
166 break;
167 }
168 vaddr += bufferSize;
169 }
170
171 hnd->base = vaddr;
172 hnd->offset = vaddr - intptr_t(m->framebuffer->base);
173 *pHandle = hnd;
174
175 return 0;
176 }
177
gralloc_alloc_framebuffer(alloc_device_t * dev,size_t size,int usage,buffer_handle_t * pHandle)178 static int gralloc_alloc_framebuffer(alloc_device_t* dev,
179 size_t size, int usage, buffer_handle_t* pHandle)
180 {
181 private_module_t* m = reinterpret_cast<private_module_t*>(
182 dev->common.module);
183 pthread_mutex_lock(&m->lock);
184 int err = gralloc_alloc_framebuffer_locked(dev, size, usage, pHandle);
185 pthread_mutex_unlock(&m->lock);
186 return err;
187 }
188
189
init_pmem_area_locked(private_module_t * m)190 static int init_pmem_area_locked(private_module_t* m)
191 {
192 int err = 0;
193 int master_fd = open("/dev/pmem", O_RDWR, 0);
194 if (master_fd >= 0) {
195
196 size_t size;
197 pmem_region region;
198 if (ioctl(master_fd, PMEM_GET_TOTAL_SIZE, ®ion) < 0) {
199 LOGE("PMEM_GET_TOTAL_SIZE failed, limp mode");
200 size = 8<<20; // 8 MiB
201 } else {
202 size = region.len;
203 }
204 sAllocator.setSize(size);
205
206 void* base = mmap(0, size,
207 PROT_READ|PROT_WRITE, MAP_SHARED, master_fd, 0);
208 if (base == MAP_FAILED) {
209 err = -errno;
210 base = 0;
211 close(master_fd);
212 master_fd = -1;
213 }
214 m->pmem_master = master_fd;
215 m->pmem_master_base = base;
216 } else {
217 err = -errno;
218 }
219 return err;
220 }
221
init_pmem_area(private_module_t * m)222 static int init_pmem_area(private_module_t* m)
223 {
224 pthread_mutex_lock(&m->lock);
225 int err = m->pmem_master;
226 if (err == -1) {
227 // first time, try to initialize pmem
228 err = init_pmem_area_locked(m);
229 if (err) {
230 m->pmem_master = err;
231 }
232 } else if (err < 0) {
233 // pmem couldn't be initialized, never use it
234 } else {
235 // pmem OK
236 err = 0;
237 }
238 pthread_mutex_unlock(&m->lock);
239 return err;
240 }
241
init_gpu_area_locked(private_module_t * m)242 static int init_gpu_area_locked(private_module_t* m)
243 {
244 int err = 0;
245 int gpu = open("/dev/msm_hw3dm", O_RDWR, 0);
246 LOGE_IF(gpu<0, "could not open hw3dm (%s)", strerror(errno));
247 if (gpu >= 0) {
248 struct hw3d_region regions[HW3D_NUM_REGIONS];
249 if (ioctl(gpu, HW3D_GET_REGIONS, regions) < 0) {
250 LOGE("HW3D_GET_REGIONS failed (%s)", strerror(errno));
251 err = -errno;
252 } else {
253 LOGD("smi: offset=%08lx, len=%08lx, phys=%p",
254 regions[HW3D_SMI].map_offset,
255 regions[HW3D_SMI].len,
256 regions[HW3D_SMI].phys);
257 LOGD("ebi: offset=%08lx, len=%08lx, phys=%p",
258 regions[HW3D_EBI].map_offset,
259 regions[HW3D_EBI].len,
260 regions[HW3D_EBI].phys);
261 LOGD("reg: offset=%08lx, len=%08lx, phys=%p",
262 regions[HW3D_REGS].map_offset,
263 regions[HW3D_REGS].len,
264 regions[HW3D_REGS].phys);
265
266 void* base = mmap(0, regions[HW3D_EBI].len,
267 PROT_READ|PROT_WRITE, MAP_SHARED,
268 gpu, regions[HW3D_EBI].map_offset);
269
270 if (base == MAP_FAILED) {
271 LOGE("mmap EBI1 (%s)", strerror(errno));
272 err = -errno;
273 base = 0;
274 close(gpu);
275 gpu = -1;
276 }
277
278 m->gpu = gpu;
279 m->gpu_base = base;
280 }
281 } else {
282 err = -errno;
283 m->gpu = 0;
284 m->gpu_base = 0;
285 }
286 return err;
287 }
288
init_gpu_area(private_module_t * m)289 static int init_gpu_area(private_module_t* m)
290 {
291 pthread_mutex_lock(&m->lock);
292 int err = m->gpu;
293 if (err == -1) {
294 // first time, try to initialize gpu
295 err = init_gpu_area_locked(m);
296 if (err) {
297 m->gpu = err;
298 }
299 } else if (err < 0) {
300 // gpu couldn't be initialized, never use it
301 } else {
302 // gpu OK
303 err = 0;
304 }
305 pthread_mutex_unlock(&m->lock);
306 return err;
307 }
308
gralloc_alloc_buffer(alloc_device_t * dev,size_t size,int usage,buffer_handle_t * pHandle)309 static int gralloc_alloc_buffer(alloc_device_t* dev,
310 size_t size, int usage, buffer_handle_t* pHandle)
311 {
312 int err = 0;
313 int flags = 0;
314
315 int fd = -1;
316 int gpu_fd = -1;
317 void* base = 0;
318 int offset = 0;
319 int lockState = 0;
320
321 size = roundUpToPageSize(size);
322
323 if (usage & GRALLOC_USAGE_HW_TEXTURE) {
324 // enable pmem in that case, so our software GL can fallback to
325 // the copybit module.
326 flags |= private_handle_t::PRIV_FLAGS_USES_PMEM;
327 }
328
329 if (usage & GRALLOC_USAGE_HW_2D) {
330 flags |= private_handle_t::PRIV_FLAGS_USES_PMEM;
331 }
332
333 if ((flags & private_handle_t::PRIV_FLAGS_USES_PMEM) == 0) {
334 try_ashmem:
335 fd = ashmem_create_region("gralloc-buffer", size);
336 if (fd < 0) {
337 LOGE("couldn't create ashmem (%s)", strerror(errno));
338 err = -errno;
339 }
340 } else if ((usage & GRALLOC_USAGE_HW_RENDER) == 0) {
341 private_module_t* m = reinterpret_cast<private_module_t*>(
342 dev->common.module);
343
344 err = init_pmem_area(m);
345 if (err == 0) {
346 // PMEM buffers are always mmapped
347 base = m->pmem_master_base;
348 lockState |= private_handle_t::LOCK_STATE_MAPPED;
349
350 offset = sAllocator.allocate(size);
351 if (offset < 0) {
352 // no more pmem memory
353 err = -ENOMEM;
354 } else {
355 struct pmem_region sub = { offset, size };
356
357 // now create the "sub-heap"
358 fd = open("/dev/pmem", O_RDWR, 0);
359 err = fd < 0 ? fd : 0;
360
361 // and connect to it
362 if (err == 0)
363 err = ioctl(fd, PMEM_CONNECT, m->pmem_master);
364
365 // and make it available to the client process
366 if (err == 0)
367 err = ioctl(fd, PMEM_MAP, &sub);
368
369 if (err < 0) {
370 err = -errno;
371 close(fd);
372 sAllocator.deallocate(offset);
373 fd = -1;
374 }
375 memset((char*)base + offset, 0, size);
376 //LOGD_IF(!err, "allocating pmem size=%d, offset=%d", size, offset);
377 }
378 } else {
379 if ((usage & GRALLOC_USAGE_HW_2D) == 0) {
380 // the caller didn't request PMEM, so we can try something else
381 flags &= ~private_handle_t::PRIV_FLAGS_USES_PMEM;
382 err = 0;
383 goto try_ashmem;
384 } else {
385 LOGE("couldn't open pmem (%s)", strerror(errno));
386 }
387 }
388 } else {
389 // looks like we want 3D...
390 flags &= ~private_handle_t::PRIV_FLAGS_USES_PMEM;
391 flags |= private_handle_t::PRIV_FLAGS_USES_GPU;
392
393 private_module_t* m = reinterpret_cast<private_module_t*>(
394 dev->common.module);
395
396 err = init_gpu_area(m);
397 if (err == 0) {
398 // GPU buffers are always mmapped
399 base = m->gpu_base;
400 lockState |= private_handle_t::LOCK_STATE_MAPPED;
401 offset = sAllocatorGPU.allocate(size);
402 if (offset < 0) {
403 // no more pmem memory
404 err = -ENOMEM;
405 } else {
406 LOGD("allocating GPU size=%d, offset=%d", size, offset);
407 fd = open("/dev/null", O_RDONLY); // just so marshalling doesn't fail
408 gpu_fd = m->gpu;
409 memset((char*)base + offset, 0, size);
410 }
411 } else {
412 // not enough memory, try ashmem
413 flags &= ~private_handle_t::PRIV_FLAGS_USES_GPU;
414 err = 0;
415 goto try_ashmem;
416 }
417 }
418
419 if (err == 0) {
420 private_handle_t* hnd = new private_handle_t(fd, size, flags);
421 hnd->offset = offset;
422 hnd->base = int(base)+offset;
423 hnd->lockState = lockState;
424 hnd->gpu_fd = gpu_fd;
425 *pHandle = hnd;
426 }
427
428 LOGE_IF(err, "gralloc failed err=%s", strerror(-err));
429
430 return err;
431 }
432
433 /*****************************************************************************/
434
gralloc_alloc(alloc_device_t * dev,int w,int h,int format,int usage,buffer_handle_t * pHandle,int * pStride)435 static int gralloc_alloc(alloc_device_t* dev,
436 int w, int h, int format, int usage,
437 buffer_handle_t* pHandle, int* pStride)
438 {
439 if (!pHandle || !pStride)
440 return -EINVAL;
441
442 size_t size, stride;
443 if (format == HAL_PIXEL_FORMAT_YCbCr_420_SP ||
444 format == HAL_PIXEL_FORMAT_YCbCr_422_SP)
445 {
446 // FIXME: there is no way to return the vstride
447 int vstride;
448 stride = (w + 1) & ~1;
449 switch (format) {
450 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
451 size = stride * h * 2;
452 break;
453 case HAL_PIXEL_FORMAT_YCbCr_422_SP:
454 vstride = (h+1) & ~1;
455 size = (stride * vstride) + (w/2 * h/2) * 2;
456 break;
457 default:
458 return -EINVAL;
459 }
460 } else {
461 int align = 4;
462 int bpp = 0;
463 switch (format) {
464 case HAL_PIXEL_FORMAT_RGBA_8888:
465 case HAL_PIXEL_FORMAT_RGBX_8888:
466 case HAL_PIXEL_FORMAT_BGRA_8888:
467 bpp = 4;
468 break;
469 case HAL_PIXEL_FORMAT_RGB_888:
470 bpp = 3;
471 break;
472 case HAL_PIXEL_FORMAT_RGB_565:
473 case HAL_PIXEL_FORMAT_RGBA_5551:
474 case HAL_PIXEL_FORMAT_RGBA_4444:
475 bpp = 2;
476 break;
477 default:
478 return -EINVAL;
479 }
480 size_t bpr = (w*bpp + (align-1)) & ~(align-1);
481 size = bpr * h;
482 stride = bpr / bpp;
483 }
484
485 int err;
486 if (usage & GRALLOC_USAGE_HW_FB) {
487 err = gralloc_alloc_framebuffer(dev, size, usage, pHandle);
488 } else {
489 err = gralloc_alloc_buffer(dev, size, usage, pHandle);
490 }
491
492 if (err < 0) {
493 return err;
494 }
495
496 *pStride = stride;
497 return 0;
498 }
499
gralloc_free(alloc_device_t * dev,buffer_handle_t handle)500 static int gralloc_free(alloc_device_t* dev,
501 buffer_handle_t handle)
502 {
503 if (private_handle_t::validate(handle) < 0)
504 return -EINVAL;
505
506 private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(handle);
507 if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
508 // free this buffer
509 private_module_t* m = reinterpret_cast<private_module_t*>(
510 dev->common.module);
511 const size_t bufferSize = m->finfo.line_length * m->info.yres;
512 int index = (hnd->base - m->framebuffer->base) / bufferSize;
513 m->bufferMask &= ~(1<<index);
514 } else {
515 if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_PMEM) {
516 if (hnd->fd >= 0) {
517 struct pmem_region sub = { hnd->offset, hnd->size };
518 int err = ioctl(hnd->fd, PMEM_UNMAP, &sub);
519 LOGE_IF(err<0, "PMEM_UNMAP failed (%s), "
520 "fd=%d, sub.offset=%lu, sub.size=%lu",
521 strerror(errno), hnd->fd, hnd->offset, hnd->size);
522 if (err == 0) {
523 // we can't deallocate the memory in case of UNMAP failure
524 // because it would give that process access to someone else's
525 // surfaces, which would be a security breach.
526 sAllocator.deallocate(hnd->offset);
527 }
528 }
529 } else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_GPU) {
530 LOGD("freeing GPU buffer at %d", hnd->offset);
531 sAllocatorGPU.deallocate(hnd->offset);
532 }
533
534 gralloc_module_t* module = reinterpret_cast<gralloc_module_t*>(
535 dev->common.module);
536 terminateBuffer(module, const_cast<private_handle_t*>(hnd));
537 }
538
539 close(hnd->fd);
540 delete hnd;
541 return 0;
542 }
543
544 /*****************************************************************************/
545
gralloc_close(struct hw_device_t * dev)546 static int gralloc_close(struct hw_device_t *dev)
547 {
548 gralloc_context_t* ctx = reinterpret_cast<gralloc_context_t*>(dev);
549 if (ctx) {
550 /* TODO: keep a list of all buffer_handle_t created, and free them
551 * all here.
552 */
553 free(ctx);
554 }
555 return 0;
556 }
557
gralloc_device_open(const hw_module_t * module,const char * name,hw_device_t ** device)558 int gralloc_device_open(const hw_module_t* module, const char* name,
559 hw_device_t** device)
560 {
561 int status = -EINVAL;
562 if (!strcmp(name, GRALLOC_HARDWARE_GPU0)) {
563 gralloc_context_t *dev;
564 dev = (gralloc_context_t*)malloc(sizeof(*dev));
565
566 /* initialize our state here */
567 memset(dev, 0, sizeof(*dev));
568
569 /* initialize the procs */
570 dev->device.common.tag = HARDWARE_DEVICE_TAG;
571 dev->device.common.version = 0;
572 dev->device.common.module = const_cast<hw_module_t*>(module);
573 dev->device.common.close = gralloc_close;
574
575 dev->device.alloc = gralloc_alloc;
576 dev->device.free = gralloc_free;
577
578 *device = &dev->device.common;
579 status = 0;
580 } else {
581 status = fb_device_open(module, name, device);
582 }
583 return status;
584 }
585