1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 * Copyright (c) 2011-2012 Code Aurora Forum. All rights reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 #include <limits.h>
19 #include <unistd.h>
20 #include <fcntl.h>
21 #include <cutils/properties.h>
22 #include <sys/mman.h>
23
24 #include <genlock.h>
25
26 #include "gr.h"
27 #include "gpu.h"
28 #include "memalloc.h"
29 #include "alloc_controller.h"
30
31 using namespace gralloc;
32
gpu_context_t(const private_module_t * module,IAllocController * alloc_ctrl)33 gpu_context_t::gpu_context_t(const private_module_t* module,
34 IAllocController* alloc_ctrl ) :
35 mAllocCtrl(alloc_ctrl)
36 {
37 // Zero out the alloc_device_t
38 memset(static_cast<alloc_device_t*>(this), 0, sizeof(alloc_device_t));
39
40 // Initialize the procs
41 common.tag = HARDWARE_DEVICE_TAG;
42 common.version = 0;
43 common.module = const_cast<hw_module_t*>(&module->base.common);
44 common.close = gralloc_close;
45 alloc = gralloc_alloc;
46 free = gralloc_free;
47
48 }
49
gralloc_alloc_framebuffer_locked(size_t size,int usage,buffer_handle_t * pHandle)50 int gpu_context_t::gralloc_alloc_framebuffer_locked(size_t size, int usage,
51 buffer_handle_t* pHandle)
52 {
53 private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
54
55 // we don't support framebuffer allocations with graphics heap flags
56 if (usage & GRALLOC_HEAP_MASK) {
57 return -EINVAL;
58 }
59
60 if (m->framebuffer == NULL) {
61 ALOGE("%s: Invalid framebuffer", __FUNCTION__);
62 return -EINVAL;
63 }
64
65 const uint32_t bufferMask = m->bufferMask;
66 const uint32_t numBuffers = m->numBuffers;
67 size_t bufferSize = m->finfo.line_length * m->info.yres;
68
69 //adreno needs FB size to be page aligned
70 bufferSize = roundUpToPageSize(bufferSize);
71
72 if (numBuffers == 1) {
73 // If we have only one buffer, we never use page-flipping. Instead,
74 // we return a regular buffer which will be memcpy'ed to the main
75 // screen when post is called.
76 int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
77 return gralloc_alloc_buffer(bufferSize, newUsage, pHandle, BUFFER_TYPE_UI,
78 m->fbFormat, m->info.xres, m->info.yres);
79 }
80
81 if (bufferMask >= ((1LU<<numBuffers)-1)) {
82 // We ran out of buffers.
83 return -ENOMEM;
84 }
85
86 // create a "fake" handles for it
87 // Set the PMEM flag as well, since adreno
88 // treats the FB memory as pmem
89 intptr_t vaddr = intptr_t(m->framebuffer->base);
90 private_handle_t* hnd = new private_handle_t(dup(m->framebuffer->fd), bufferSize,
91 private_handle_t::PRIV_FLAGS_USES_PMEM |
92 private_handle_t::PRIV_FLAGS_FRAMEBUFFER,
93 BUFFER_TYPE_UI, m->fbFormat, m->info.xres,
94 m->info.yres);
95
96 // find a free slot
97 for (uint32_t i=0 ; i<numBuffers ; i++) {
98 if ((bufferMask & (1LU<<i)) == 0) {
99 m->bufferMask |= (1LU<<i);
100 break;
101 }
102 vaddr += bufferSize;
103 }
104
105 hnd->base = vaddr;
106 hnd->offset = vaddr - intptr_t(m->framebuffer->base);
107 *pHandle = hnd;
108 return 0;
109 }
110
111
gralloc_alloc_framebuffer(size_t size,int usage,buffer_handle_t * pHandle)112 int gpu_context_t::gralloc_alloc_framebuffer(size_t size, int usage,
113 buffer_handle_t* pHandle)
114 {
115 private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
116 pthread_mutex_lock(&m->lock);
117 int err = gralloc_alloc_framebuffer_locked(size, usage, pHandle);
118 pthread_mutex_unlock(&m->lock);
119 return err;
120 }
121
gralloc_alloc_buffer(size_t size,int usage,buffer_handle_t * pHandle,int bufferType,int format,int width,int height)122 int gpu_context_t::gralloc_alloc_buffer(size_t size, int usage,
123 buffer_handle_t* pHandle, int bufferType,
124 int format, int width, int height)
125 {
126 int err = 0;
127 int flags = 0;
128 size = roundUpToPageSize(size);
129 alloc_data data;
130 data.offset = 0;
131 data.fd = -1;
132 data.base = 0;
133 data.size = size;
134 if(format == HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED)
135 data.align = 8192;
136 else
137 data.align = getpagesize();
138 data.pHandle = (unsigned int) pHandle;
139 err = mAllocCtrl->allocate(data, usage);
140
141 if (usage & GRALLOC_USAGE_PRIVATE_UNSYNCHRONIZED) {
142 flags |= private_handle_t::PRIV_FLAGS_UNSYNCHRONIZED;
143 }
144
145 if (usage & GRALLOC_USAGE_PRIVATE_EXTERNAL_ONLY) {
146 flags |= private_handle_t::PRIV_FLAGS_EXTERNAL_ONLY;
147 //The EXTERNAL_BLOCK flag is always an add-on
148 if (usage & GRALLOC_USAGE_PRIVATE_EXTERNAL_BLOCK) {
149 flags |= private_handle_t::PRIV_FLAGS_EXTERNAL_BLOCK;
150 }if (usage & GRALLOC_USAGE_PRIVATE_EXTERNAL_CC) {
151 flags |= private_handle_t::PRIV_FLAGS_EXTERNAL_CC;
152 }
153 }
154
155 if (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER ) {
156 flags |= private_handle_t::PRIV_FLAGS_VIDEO_ENCODER;
157 }
158
159 if (usage & GRALLOC_USAGE_HW_CAMERA_WRITE) {
160 flags |= private_handle_t::PRIV_FLAGS_CAMERA_WRITE;
161 }
162
163 if (usage & GRALLOC_USAGE_HW_CAMERA_READ) {
164 flags |= private_handle_t::PRIV_FLAGS_CAMERA_READ;
165 }
166
167 if (err == 0) {
168 flags |= data.allocType;
169 private_handle_t* hnd = new private_handle_t(data.fd, size, flags,
170 bufferType, format, width,
171 height);
172
173 hnd->offset = data.offset;
174 hnd->base = int(data.base) + data.offset;
175 *pHandle = hnd;
176 }
177
178 ALOGE_IF(err, "gralloc failed err=%s", strerror(-err));
179 return err;
180 }
181
getGrallocInformationFromFormat(int inputFormat,int * bufferType)182 void gpu_context_t::getGrallocInformationFromFormat(int inputFormat,
183 int *bufferType)
184 {
185 *bufferType = BUFFER_TYPE_VIDEO;
186
187 if (inputFormat < 0x7) {
188 // RGB formats
189 *bufferType = BUFFER_TYPE_UI;
190 } else if ((inputFormat == HAL_PIXEL_FORMAT_R_8) ||
191 (inputFormat == HAL_PIXEL_FORMAT_RG_88)) {
192 *bufferType = BUFFER_TYPE_UI;
193 }
194 }
195
alloc_impl(int w,int h,int format,int usage,buffer_handle_t * pHandle,int * pStride,size_t bufferSize)196 int gpu_context_t::alloc_impl(int w, int h, int format, int usage,
197 buffer_handle_t* pHandle, int* pStride,
198 size_t bufferSize) {
199 if (!pHandle || !pStride)
200 return -EINVAL;
201
202 size_t size;
203 int alignedw, alignedh;
204 int grallocFormat = format;
205 int bufferType;
206
207 //If input format is HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED then based on
208 //the usage bits, gralloc assigns a format.
209 if(format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
210 if(usage & GRALLOC_USAGE_HW_VIDEO_ENCODER)
211 grallocFormat = HAL_PIXEL_FORMAT_YCbCr_420_SP; //NV12
212 else if(usage & GRALLOC_USAGE_HW_CAMERA_READ)
213 grallocFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP; //NV21
214 else if(usage & GRALLOC_USAGE_HW_CAMERA_WRITE)
215 grallocFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP; //NV21
216 }
217
218 getGrallocInformationFromFormat(grallocFormat, &bufferType);
219 size = getBufferSizeAndDimensions(w, h, grallocFormat, alignedw, alignedh);
220
221 if ((ssize_t)size <= 0)
222 return -EINVAL;
223 size = (bufferSize >= size)? bufferSize : size;
224
225 // All buffers marked as protected or for external
226 // display need to go to overlay
227 if ((usage & GRALLOC_USAGE_EXTERNAL_DISP) ||
228 (usage & GRALLOC_USAGE_PROTECTED) ||
229 (usage & GRALLOC_USAGE_PRIVATE_CP_BUFFER)) {
230 bufferType = BUFFER_TYPE_VIDEO;
231 }
232
233 int err = gralloc_alloc_buffer(size, usage, pHandle, bufferType,
234 grallocFormat, alignedw, alignedh);
235
236 if (err < 0) {
237 return err;
238 }
239
240 // Create a genlock lock for this buffer handle.
241 err = genlock_create_lock((native_handle_t*)(*pHandle));
242 if (err) {
243 ALOGE("%s: genlock_create_lock failed", __FUNCTION__);
244 free_impl(reinterpret_cast<private_handle_t*>(pHandle));
245 return err;
246 }
247 *pStride = alignedw;
248 return 0;
249 }
250
free_impl(private_handle_t const * hnd)251 int gpu_context_t::free_impl(private_handle_t const* hnd) {
252 private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
253 if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
254 // free this buffer
255 const size_t bufferSize = m->finfo.line_length * m->info.yres;
256 int index = (hnd->base - m->framebuffer->base) / bufferSize;
257 m->bufferMask &= ~(1<<index);
258 } else {
259 terminateBuffer(&m->base, const_cast<private_handle_t*>(hnd));
260 IMemAlloc* memalloc = mAllocCtrl->getAllocator(hnd->flags);
261 int err = memalloc->free_buffer((void*)hnd->base, (size_t) hnd->size,
262 hnd->offset, hnd->fd);
263 if(err)
264 return err;
265 }
266
267 // Release the genlock
268 int err = genlock_release_lock((native_handle_t*)hnd);
269 if (err) {
270 ALOGE("%s: genlock_release_lock failed", __FUNCTION__);
271 }
272
273 delete hnd;
274 return 0;
275 }
276
gralloc_alloc(alloc_device_t * dev,int w,int h,int format,int usage,buffer_handle_t * pHandle,int * pStride)277 int gpu_context_t::gralloc_alloc(alloc_device_t* dev, int w, int h, int format,
278 int usage, buffer_handle_t* pHandle,
279 int* pStride)
280 {
281 if (!dev) {
282 return -EINVAL;
283 }
284 gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
285 return gpu->alloc_impl(w, h, format, usage, pHandle, pStride, 0);
286 }
gralloc_alloc_size(alloc_device_t * dev,int w,int h,int format,int usage,buffer_handle_t * pHandle,int * pStride,int bufferSize)287 int gpu_context_t::gralloc_alloc_size(alloc_device_t* dev, int w, int h,
288 int format, int usage,
289 buffer_handle_t* pHandle, int* pStride,
290 int bufferSize)
291 {
292 if (!dev) {
293 return -EINVAL;
294 }
295 gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
296 return gpu->alloc_impl(w, h, format, usage, pHandle, pStride, bufferSize);
297 }
298
299
gralloc_free(alloc_device_t * dev,buffer_handle_t handle)300 int gpu_context_t::gralloc_free(alloc_device_t* dev,
301 buffer_handle_t handle)
302 {
303 if (private_handle_t::validate(handle) < 0)
304 return -EINVAL;
305
306 private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(handle);
307 gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
308 return gpu->free_impl(hnd);
309 }
310
311 /*****************************************************************************/
312
gralloc_close(struct hw_device_t * dev)313 int gpu_context_t::gralloc_close(struct hw_device_t *dev)
314 {
315 gpu_context_t* ctx = reinterpret_cast<gpu_context_t*>(dev);
316 if (ctx) {
317 /* TODO: keep a list of all buffer_handle_t created, and free them
318 * all here.
319 */
320 delete ctx;
321 }
322 return 0;
323 }
324
325