• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <limits.h>
18 #include <unistd.h>
19 #include <fcntl.h>
20 #include <errno.h>
21 #include <pthread.h>
22 #include <stdlib.h>
23 #include <string.h>
24 
25 #include <sys/mman.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <sys/ioctl.h>
29 
30 #include <cutils/ashmem.h>
31 #include <cutils/log.h>
32 #include <cutils/atomic.h>
33 
34 #include <hardware/hardware.h>
35 #include <hardware/gralloc.h>
36 
37 #include <linux/msm_hw3d.h>
38 #include <linux/android_pmem.h>
39 
40 #include "gralloc_priv.h"
41 #include "allocator.h"
42 
43 /*****************************************************************************/
44 
45 // NOTE: must be the same than in oem.h
46 #define ALLOCATORREGION_RESERVED_SIZE           (1200<<10)
47 #define FB_ARENA                                HW3D_EBI
48 
49 
50 
51 static SimpleBestFitAllocator sAllocator;
52 static SimpleBestFitAllocator sAllocatorGPU(ALLOCATORREGION_RESERVED_SIZE);
53 
54 /*****************************************************************************/
55 
56 struct gralloc_context_t {
57     alloc_device_t  device;
58     /* our private data here */
59 };
60 
61 static int gralloc_alloc_buffer(alloc_device_t* dev,
62         size_t size, int usage, buffer_handle_t* pHandle);
63 
64 /*****************************************************************************/
65 
66 int fb_device_open(const hw_module_t* module, const char* name,
67         hw_device_t** device);
68 
69 static int gralloc_device_open(const hw_module_t* module, const char* name,
70         hw_device_t** device);
71 
72 extern int gralloc_lock(gralloc_module_t const* module,
73         buffer_handle_t handle, int usage,
74         int l, int t, int w, int h,
75         void** vaddr);
76 
77 extern int gralloc_unlock(gralloc_module_t const* module,
78         buffer_handle_t handle);
79 
80 extern int gralloc_register_buffer(gralloc_module_t const* module,
81         buffer_handle_t handle);
82 
83 extern int gralloc_unregister_buffer(gralloc_module_t const* module,
84         buffer_handle_t handle);
85 
86 extern int gralloc_perform(struct gralloc_module_t const* module,
87         int operation, ... );
88 
89 /*****************************************************************************/
90 
91 static struct hw_module_methods_t gralloc_module_methods = {
92         open: gralloc_device_open
93 };
94 
95 struct private_module_t HAL_MODULE_INFO_SYM = {
96     base: {
97         common: {
98             tag: HARDWARE_MODULE_TAG,
99             version_major: 1,
100             version_minor: 0,
101             id: GRALLOC_HARDWARE_MODULE_ID,
102             name: "Graphics Memory Allocator Module",
103             author: "The Android Open Source Project",
104             methods: &gralloc_module_methods
105         },
106         registerBuffer: gralloc_register_buffer,
107         unregisterBuffer: gralloc_unregister_buffer,
108         lock: gralloc_lock,
109         unlock: gralloc_unlock,
110         perform: gralloc_perform,
111     },
112     framebuffer: 0,
113     flags: 0,
114     numBuffers: 0,
115     bufferMask: 0,
116     lock: PTHREAD_MUTEX_INITIALIZER,
117     currentBuffer: 0,
118     pmem_master: -1,
119     pmem_master_base: 0,
120     master_phys: 0,
121     gpu: -1,
122     gpu_base: 0,
123     fb_map_offset: 0
124 };
125 
126 /*****************************************************************************/
127 
gralloc_alloc_framebuffer_locked(alloc_device_t * dev,size_t size,int usage,buffer_handle_t * pHandle)128 static int gralloc_alloc_framebuffer_locked(alloc_device_t* dev,
129         size_t size, int usage, buffer_handle_t* pHandle)
130 {
131     private_module_t* m = reinterpret_cast<private_module_t*>(
132             dev->common.module);
133 
134     // allocate the framebuffer
135     if (m->framebuffer == NULL) {
136         // initialize the framebuffer, the framebuffer is mapped once
137         // and forever.
138         int err = mapFrameBufferLocked(m);
139         if (err < 0) {
140             return err;
141         }
142     }
143 
144     const uint32_t bufferMask = m->bufferMask;
145     const uint32_t numBuffers = m->numBuffers;
146     const size_t bufferSize = m->finfo.line_length * m->info.yres;
147     if (numBuffers == 1) {
148         // If we have only one buffer, we never use page-flipping. Instead,
149         // we return a regular buffer which will be memcpy'ed to the main
150         // screen when post is called.
151         int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
152         return gralloc_alloc_buffer(dev, bufferSize, newUsage, pHandle);
153     }
154 
155     if (bufferMask >= ((1LU<<numBuffers)-1)) {
156         // We ran out of buffers.
157         return -ENOMEM;
158     }
159 
160     // create a "fake" handles for it
161     intptr_t vaddr = intptr_t(m->framebuffer->base);
162     private_handle_t* hnd = new private_handle_t(dup(m->framebuffer->fd), size,
163             private_handle_t::PRIV_FLAGS_USES_PMEM |
164             private_handle_t::PRIV_FLAGS_FRAMEBUFFER);
165 
166     // find a free slot
167     for (uint32_t i=0 ; i<numBuffers ; i++) {
168         if ((bufferMask & (1LU<<i)) == 0) {
169             m->bufferMask |= (1LU<<i);
170             break;
171         }
172         vaddr += bufferSize;
173     }
174 
175     hnd->base = vaddr;
176     hnd->offset = vaddr - intptr_t(m->framebuffer->base);
177     *pHandle = hnd;
178 
179     return 0;
180 }
181 
gralloc_alloc_framebuffer(alloc_device_t * dev,size_t size,int usage,buffer_handle_t * pHandle)182 static int gralloc_alloc_framebuffer(alloc_device_t* dev,
183         size_t size, int usage, buffer_handle_t* pHandle)
184 {
185     private_module_t* m = reinterpret_cast<private_module_t*>(
186             dev->common.module);
187     pthread_mutex_lock(&m->lock);
188     int err = gralloc_alloc_framebuffer_locked(dev, size, usage, pHandle);
189     pthread_mutex_unlock(&m->lock);
190     return err;
191 }
192 
193 
init_pmem_area_locked(private_module_t * m)194 static int init_pmem_area_locked(private_module_t* m)
195 {
196     int err = 0;
197     int master_fd = open("/dev/pmem", O_RDWR, 0);
198     if (master_fd >= 0) {
199 
200         size_t size;
201         pmem_region region;
202         if (ioctl(master_fd, PMEM_GET_TOTAL_SIZE, &region) < 0) {
203             ALOGE("PMEM_GET_TOTAL_SIZE failed, limp mode");
204             size = 8<<20;   // 8 MiB
205         } else {
206             size = region.len;
207         }
208         sAllocator.setSize(size);
209 
210         void* base = mmap(0, size,
211                 PROT_READ|PROT_WRITE, MAP_SHARED, master_fd, 0);
212         if (base == MAP_FAILED) {
213             err = -errno;
214             base = 0;
215             close(master_fd);
216             master_fd = -1;
217         }
218         m->pmem_master = master_fd;
219         m->pmem_master_base = base;
220     } else {
221         err = -errno;
222     }
223     return err;
224 }
225 
init_pmem_area(private_module_t * m)226 static int init_pmem_area(private_module_t* m)
227 {
228     pthread_mutex_lock(&m->lock);
229     int err = m->pmem_master;
230     if (err == -1) {
231         // first time, try to initialize pmem
232         err = init_pmem_area_locked(m);
233         if (err) {
234             m->pmem_master = err;
235         }
236     } else if (err < 0) {
237         // pmem couldn't be initialized, never use it
238     } else {
239         // pmem OK
240         err = 0;
241     }
242     pthread_mutex_unlock(&m->lock);
243     return err;
244 }
245 
init_gpu_area_locked(private_module_t * m)246 static int init_gpu_area_locked(private_module_t* m)
247 {
248     int err = 0;
249     int gpu = open("/dev/msm_hw3dm", O_RDWR, 0);
250     ALOGE_IF(gpu<0, "could not open hw3dm (%s)", strerror(errno));
251     if (gpu >= 0) {
252         struct hw3d_region regions[HW3D_NUM_REGIONS];
253         if (ioctl(gpu, HW3D_GET_REGIONS, regions) < 0) {
254             ALOGE("HW3D_GET_REGIONS failed (%s)", strerror(errno));
255             err = -errno;
256         } else {
257             ALOGD("smi: offset=%08lx, len=%08lx, phys=%p",
258                     regions[HW3D_SMI].map_offset,
259                     regions[HW3D_SMI].len,
260                     regions[HW3D_SMI].phys);
261             ALOGD("ebi: offset=%08lx, len=%08lx, phys=%p",
262                     regions[HW3D_EBI].map_offset,
263                     regions[HW3D_EBI].len,
264                     regions[HW3D_EBI].phys);
265             ALOGD("reg: offset=%08lx, len=%08lx, phys=%p",
266                     regions[HW3D_REGS].map_offset,
267                     regions[HW3D_REGS].len,
268                     regions[HW3D_REGS].phys);
269 
270             void* base = mmap(0, ALLOCATORREGION_RESERVED_SIZE,
271                     PROT_READ|PROT_WRITE, MAP_SHARED,
272                     gpu, regions[FB_ARENA].map_offset);
273 
274             if (base == MAP_FAILED) {
275                 ALOGE("mmap EBI1 (%s)", strerror(errno));
276                 err = -errno;
277                 base = 0;
278                 close(gpu);
279                 gpu = -1;
280             }
281 
282             m->fb_map_offset = regions[FB_ARENA].map_offset;
283             m->gpu = gpu;
284             m->gpu_base = base;
285         }
286     } else {
287         err = -errno;
288         m->gpu = 0;
289         m->gpu_base = 0;
290     }
291     return err;
292 }
293 
init_gpu_area(private_module_t * m)294 static int init_gpu_area(private_module_t* m)
295 {
296     pthread_mutex_lock(&m->lock);
297     int err = m->gpu;
298     if (err == -1) {
299         // first time, try to initialize gpu
300         err = init_gpu_area_locked(m);
301         if (err) {
302             m->gpu = err;
303         }
304     } else if (err < 0) {
305         // gpu couldn't be initialized, never use it
306     } else {
307         // gpu OK
308         err = 0;
309     }
310     pthread_mutex_unlock(&m->lock);
311     return err;
312 }
313 
gralloc_alloc_buffer(alloc_device_t * dev,size_t size,int usage,buffer_handle_t * pHandle)314 static int gralloc_alloc_buffer(alloc_device_t* dev,
315         size_t size, int usage, buffer_handle_t* pHandle)
316 {
317     int err = 0;
318     int flags = 0;
319 
320     int fd = -1;
321     int gpu_fd = -1;
322     void* base = 0;
323     int offset = 0;
324 
325     size = roundUpToPageSize(size);
326 
327     if (usage & GRALLOC_USAGE_HW_TEXTURE) {
328         // enable pmem in that case, so our software GL can fallback to
329         // the copybit module.
330         flags |= private_handle_t::PRIV_FLAGS_USES_PMEM;
331     }
332 
333     if (usage & GRALLOC_USAGE_HW_2D) {
334         flags |= private_handle_t::PRIV_FLAGS_USES_PMEM;
335     }
336 
337     if ((flags & private_handle_t::PRIV_FLAGS_USES_PMEM) == 0) {
338 try_ashmem:
339         fd = ashmem_create_region("gralloc-buffer", size);
340         if (fd < 0) {
341             ALOGE("couldn't create ashmem (%s)", strerror(errno));
342             err = -errno;
343         }
344     } else if ((usage & GRALLOC_USAGE_HW_RENDER) == 0) {
345         private_module_t* m = reinterpret_cast<private_module_t*>(
346                 dev->common.module);
347 
348         err = init_pmem_area(m);
349         if (err == 0) {
350             // PMEM buffers are always mmapped
351             base = m->pmem_master_base;
352             offset = sAllocator.allocate(size);
353             if (offset < 0) {
354                 // no more pmem memory
355                 err = -ENOMEM;
356             } else {
357                 struct pmem_region sub = { offset, size };
358 
359                 // now create the "sub-heap"
360                 fd = open("/dev/pmem", O_RDWR, 0);
361                 err = fd < 0 ? fd : 0;
362 
363                 // and connect to it
364                 if (err == 0)
365                     err = ioctl(fd, PMEM_CONNECT, m->pmem_master);
366 
367                 // and make it available to the client process
368                 if (err == 0)
369                     err = ioctl(fd, PMEM_MAP, &sub);
370 
371                 if (err < 0) {
372                     err = -errno;
373                     close(fd);
374                     sAllocator.deallocate(offset);
375                     fd = -1;
376                 }
377                 memset((char*)base + offset, 0, size);
378                 //ALOGD_IF(!err, "allocating pmem size=%d, offset=%d", size, offset);
379             }
380         } else {
381             if ((usage & GRALLOC_USAGE_HW_2D) == 0) {
382                 // the caller didn't request PMEM, so we can try something else
383                 flags &= ~private_handle_t::PRIV_FLAGS_USES_PMEM;
384                 err = 0;
385                 goto try_ashmem;
386             } else {
387                 ALOGE("couldn't open pmem (%s)", strerror(errno));
388             }
389         }
390     } else {
391         // looks like we want 3D...
392         flags &= ~private_handle_t::PRIV_FLAGS_USES_PMEM;
393         flags |= private_handle_t::PRIV_FLAGS_USES_GPU;
394 
395         private_module_t* m = reinterpret_cast<private_module_t*>(
396                 dev->common.module);
397 
398         err = init_gpu_area(m);
399         if (err == 0) {
400             // GPU buffers are always mmapped
401             base = m->gpu_base;
402 
403             // When a process holding GPU surfaces gets killed, it may take
404             // up to a few seconds until SurfaceFlinger is notified and can
405             // release the memory. So it's useful to wait a little bit here.
406             long sleeptime = 0;
407             int retry = 8; // roughly 5 seconds
408             do {
409                 offset = sAllocatorGPU.allocate(size);
410                 if (offset < 0) {
411                     // no more pmem memory
412                     ALOGW("%d KiB allocation failed in GPU memory, retrying...",
413                             size/1024);
414                     err = -ENOMEM;
415                     sleeptime += 250000;
416                     usleep(sleeptime);
417                 } else {
418                     ALOGD("allocating GPU size=%d, offset=%d", size, offset);
419                     fd = open("/dev/null", O_RDONLY); // just so marshalling doesn't fail
420                     gpu_fd = m->gpu;
421                     memset((char*)base + offset, 0, size);
422                     err = 0;
423                 }
424             } while ((err == -ENOMEM) && (retry-- > 0));
425 
426         } else {
427             // not enough memory, try ashmem
428             flags &= ~private_handle_t::PRIV_FLAGS_USES_GPU;
429             err = 0;
430             goto try_ashmem;
431         }
432     }
433 
434     if (err == 0) {
435         private_handle_t* hnd = new private_handle_t(fd, size, flags);
436         if (base == NULL) {
437             gralloc_module_t* module = reinterpret_cast<gralloc_module_t*>(
438                     dev->common.module);
439             err = mapBuffer(module, hnd);
440             if (err == 0) {
441                 *pHandle = hnd;
442             }
443         } else {
444             private_module_t* m = reinterpret_cast<private_module_t*>(
445                     dev->common.module);
446             hnd->offset = offset;
447             hnd->base = int(base)+offset;
448             hnd->gpu_fd = gpu_fd;
449             hnd->map_offset = m->fb_map_offset;
450             *pHandle = hnd;
451         }
452     }
453 
454     ALOGE_IF(err, "gralloc failed err=%s", strerror(-err));
455 
456     return err;
457 }
458 
459 /*****************************************************************************/
460 
gralloc_alloc(alloc_device_t * dev,int w,int h,int format,int usage,buffer_handle_t * pHandle,int * pStride)461 static int gralloc_alloc(alloc_device_t* dev,
462         int w, int h, int format, int usage,
463         buffer_handle_t* pHandle, int* pStride)
464 {
465     if (!pHandle || !pStride)
466         return -EINVAL;
467 
468     size_t size, stride;
469 
470     int bpp = 0;
471     switch (format) {
472         case HAL_PIXEL_FORMAT_RGBA_8888:
473         case HAL_PIXEL_FORMAT_RGBX_8888:
474         case HAL_PIXEL_FORMAT_BGRA_8888:
475             bpp = 4;
476             break;
477         case HAL_PIXEL_FORMAT_RGB_888:
478             bpp = 3;
479             break;
480         case HAL_PIXEL_FORMAT_RGB_565:
481         case HAL_PIXEL_FORMAT_RGBA_5551:
482         case HAL_PIXEL_FORMAT_RGBA_4444:
483             bpp = 2;
484             break;
485         default:
486             return -EINVAL;
487     }
488 
489     if (usage & GRALLOC_USAGE_HW_RENDER) {
490         /* buffers MUST be aligned to the NEXT 8 pixels multiple any other
491          * alignments will fail do to assumptions in the driver */
492         const int pixelAlignment = 8;
493         const int mask = pixelAlignment - 1;
494         stride = (w + mask) & ~mask;
495         size = stride * h * bpp;
496     } else {
497         const int align = 4;
498         size_t bpr = (w*bpp + (align-1)) & ~(align-1);
499         size = bpr * h;
500         stride = bpr / bpp;
501     }
502 
503     int err;
504     if (usage & GRALLOC_USAGE_HW_FB) {
505         err = gralloc_alloc_framebuffer(dev, size, usage, pHandle);
506     } else {
507         err = gralloc_alloc_buffer(dev, size, usage, pHandle);
508     }
509 
510     if (err < 0) {
511         return err;
512     }
513 
514     *pStride = stride;
515     return 0;
516 }
517 
gralloc_free(alloc_device_t * dev,buffer_handle_t handle)518 static int gralloc_free(alloc_device_t* dev,
519         buffer_handle_t handle)
520 {
521     if (private_handle_t::validate(handle) < 0)
522         return -EINVAL;
523 
524     private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(handle);
525     if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
526         // free this buffer
527         private_module_t* m = reinterpret_cast<private_module_t*>(
528                 dev->common.module);
529         const size_t bufferSize = m->finfo.line_length * m->info.yres;
530         int index = (hnd->base - m->framebuffer->base) / bufferSize;
531         m->bufferMask &= ~(1<<index);
532     } else {
533         if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_PMEM) {
534             if (hnd->fd >= 0) {
535                 struct pmem_region sub = { hnd->offset, hnd->size };
536                 int err = ioctl(hnd->fd, PMEM_UNMAP, &sub);
537                 ALOGE_IF(err<0, "PMEM_UNMAP failed (%s), "
538                         "fd=%d, sub.offset=%lu, sub.size=%lu",
539                         strerror(errno), hnd->fd, hnd->offset, hnd->size);
540                 if (err == 0) {
541                     // we can't deallocate the memory in case of UNMAP failure
542                     // because it would give that process access to someone else's
543                     // surfaces, which would be a security breach.
544                     sAllocator.deallocate(hnd->offset);
545                 }
546             }
547         } else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_GPU) {
548             ALOGD("freeing GPU buffer at %d", hnd->offset);
549             sAllocatorGPU.deallocate(hnd->offset);
550         }
551 
552         gralloc_module_t* module = reinterpret_cast<gralloc_module_t*>(
553                 dev->common.module);
554         terminateBuffer(module, const_cast<private_handle_t*>(hnd));
555     }
556 
557     close(hnd->fd);
558     delete hnd;
559     return 0;
560 }
561 
562 /*****************************************************************************/
563 
gralloc_close(struct hw_device_t * dev)564 static int gralloc_close(struct hw_device_t *dev)
565 {
566     gralloc_context_t* ctx = reinterpret_cast<gralloc_context_t*>(dev);
567     if (ctx) {
568         /* TODO: keep a list of all buffer_handle_t created, and free them
569          * all here.
570          */
571         free(ctx);
572     }
573     return 0;
574 }
575 
gralloc_device_open(const hw_module_t * module,const char * name,hw_device_t ** device)576 int gralloc_device_open(const hw_module_t* module, const char* name,
577         hw_device_t** device)
578 {
579     int status = -EINVAL;
580     if (!strcmp(name, GRALLOC_HARDWARE_GPU0)) {
581         gralloc_context_t *dev;
582         dev = (gralloc_context_t*)malloc(sizeof(*dev));
583 
584         /* initialize our state here */
585         memset(dev, 0, sizeof(*dev));
586 
587         /* initialize the procs */
588         dev->device.common.tag = HARDWARE_DEVICE_TAG;
589         dev->device.common.version = 0;
590         dev->device.common.module = const_cast<hw_module_t*>(module);
591         dev->device.common.close = gralloc_close;
592 
593         dev->device.alloc   = gralloc_alloc;
594         dev->device.free    = gralloc_free;
595 
596         *device = &dev->device.common;
597         status = 0;
598     } else {
599         status = fb_device_open(module, name, device);
600     }
601     return status;
602 }
603