1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 * Copyright (c) 2011-2014 The Linux Foundation. All rights reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 #include <limits.h>
19 #include <unistd.h>
20 #include <fcntl.h>
21 #include <cutils/properties.h>
22 #include <sys/mman.h>
23
24 #include "gr.h"
25 #include "gpu.h"
26 #include "memalloc.h"
27 #include "alloc_controller.h"
28 #include <qdMetaData.h>
29 #include <linux/msm_ion.h>
30
31 using namespace gralloc;
32
gpu_context_t(const private_module_t * module,IAllocController * alloc_ctrl)33 gpu_context_t::gpu_context_t(const private_module_t* module,
34 IAllocController* alloc_ctrl ) :
35 mAllocCtrl(alloc_ctrl)
36 {
37 // Zero out the alloc_device_t
38 memset(static_cast<alloc_device_t*>(this), 0, sizeof(alloc_device_t));
39
40 // Initialize the procs
41 common.tag = HARDWARE_DEVICE_TAG;
42 common.version = 0;
43 common.module = const_cast<hw_module_t*>(&module->base.common);
44 common.close = gralloc_close;
45 alloc = gralloc_alloc;
46 free = gralloc_free;
47
48 }
49
gralloc_alloc_buffer(unsigned int size,int usage,buffer_handle_t * pHandle,int bufferType,int format,int width,int height)50 int gpu_context_t::gralloc_alloc_buffer(unsigned int size, int usage,
51 buffer_handle_t* pHandle, int bufferType,
52 int format, int width, int height)
53 {
54 int err = 0;
55 int flags = 0;
56 int alignedw = 0;
57 int alignedh = 0;
58
59 AdrenoMemInfo::getInstance().getAlignedWidthAndHeight(width,
60 height,
61 format,
62 usage,
63 alignedw,
64 alignedh);
65
66 size = roundUpToPageSize(size);
67 alloc_data data;
68 data.offset = 0;
69 data.fd = -1;
70 data.base = 0;
71 if(format == HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED)
72 data.align = 8192;
73 else
74 data.align = getpagesize();
75
76 if (usage & GRALLOC_USAGE_PROTECTED) {
77 if ((usage & GRALLOC_USAGE_PRIVATE_SECURE_DISPLAY) ||
78 (usage & GRALLOC_USAGE_HW_CAMERA_MASK)) {
79 /* The alignment here reflects qsee mmu V7L/V8L requirement */
80 data.align = SZ_2M;
81 } else {
82 data.align = SECURE_ALIGN;
83 }
84 size = ALIGN(size, data.align);
85 }
86
87 data.size = size;
88 data.pHandle = (uintptr_t) pHandle;
89 err = mAllocCtrl->allocate(data, usage);
90
91 if (!err) {
92 /* allocate memory for enhancement data */
93 alloc_data eData;
94 eData.fd = -1;
95 eData.base = 0;
96 eData.offset = 0;
97 eData.size = ROUND_UP_PAGESIZE(sizeof(MetaData_t));
98 eData.pHandle = data.pHandle;
99 eData.align = getpagesize();
100 int eDataUsage = 0;
101 int eDataErr = mAllocCtrl->allocate(eData, eDataUsage);
102 ALOGE_IF(eDataErr, "gralloc failed for eDataErr=%s",
103 strerror(-eDataErr));
104
105 if (usage & GRALLOC_USAGE_PRIVATE_EXTERNAL_ONLY) {
106 flags |= private_handle_t::PRIV_FLAGS_EXTERNAL_ONLY;
107 }
108
109 if (usage & GRALLOC_USAGE_PRIVATE_INTERNAL_ONLY) {
110 flags |= private_handle_t::PRIV_FLAGS_INTERNAL_ONLY;
111 }
112
113 if (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER ) {
114 flags |= private_handle_t::PRIV_FLAGS_VIDEO_ENCODER;
115 }
116
117 if (usage & GRALLOC_USAGE_HW_CAMERA_WRITE) {
118 flags |= private_handle_t::PRIV_FLAGS_CAMERA_WRITE;
119 }
120
121 if (usage & GRALLOC_USAGE_HW_CAMERA_READ) {
122 flags |= private_handle_t::PRIV_FLAGS_CAMERA_READ;
123 }
124
125 if (usage & GRALLOC_USAGE_HW_COMPOSER) {
126 flags |= private_handle_t::PRIV_FLAGS_HW_COMPOSER;
127 }
128
129 if (usage & GRALLOC_USAGE_HW_TEXTURE) {
130 flags |= private_handle_t::PRIV_FLAGS_HW_TEXTURE;
131 }
132
133 if(usage & GRALLOC_USAGE_PRIVATE_SECURE_DISPLAY) {
134 flags |= private_handle_t::PRIV_FLAGS_SECURE_DISPLAY;
135 }
136
137 if(isMacroTileEnabled(format, usage)) {
138 flags |= private_handle_t::PRIV_FLAGS_TILE_RENDERED;
139 }
140
141 if (isUBwcEnabled(format, usage)) {
142 flags |= private_handle_t::PRIV_FLAGS_UBWC_ALIGNED;
143 }
144
145 if(usage & (GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK)) {
146 flags |= private_handle_t::PRIV_FLAGS_CPU_RENDERED;
147 }
148
149 if (usage & (GRALLOC_USAGE_HW_VIDEO_ENCODER |
150 GRALLOC_USAGE_HW_CAMERA_WRITE |
151 GRALLOC_USAGE_HW_RENDER |
152 GRALLOC_USAGE_HW_FB)) {
153 flags |= private_handle_t::PRIV_FLAGS_NON_CPU_WRITER;
154 }
155
156 if(usage & GRALLOC_USAGE_HW_COMPOSER) {
157 flags |= private_handle_t::PRIV_FLAGS_DISP_CONSUMER;
158 }
159
160 if(false == data.uncached) {
161 flags |= private_handle_t::PRIV_FLAGS_CACHED;
162 }
163
164 flags |= data.allocType;
165 uint64_t eBaseAddr = (uint64_t)(eData.base) + eData.offset;
166 private_handle_t *hnd = new private_handle_t(data.fd, size, flags,
167 bufferType, format, alignedw, alignedh,
168 eData.fd, eData.offset, eBaseAddr, width, height);
169
170 hnd->offset = data.offset;
171 hnd->base = (uint64_t)(data.base) + data.offset;
172 hnd->gpuaddr = 0;
173 ColorSpace_t colorSpace = ITU_R_601;
174 setMetaData(hnd, UPDATE_COLOR_SPACE, (void*) &colorSpace);
175
176 *pHandle = hnd;
177 }
178
179 ALOGE_IF(err, "gralloc failed err=%s", strerror(-err));
180
181 return err;
182 }
183
getGrallocInformationFromFormat(int inputFormat,int * bufferType)184 void gpu_context_t::getGrallocInformationFromFormat(int inputFormat,
185 int *bufferType)
186 {
187 *bufferType = BUFFER_TYPE_VIDEO;
188
189 if (isUncompressedRgbFormat(inputFormat) == TRUE) {
190 // RGB formats
191 *bufferType = BUFFER_TYPE_UI;
192 }
193 }
194
gralloc_alloc_framebuffer_locked(int usage,buffer_handle_t * pHandle)195 int gpu_context_t::gralloc_alloc_framebuffer_locked(int usage,
196 buffer_handle_t* pHandle)
197 {
198 private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
199
200 // This allocation will only happen when gralloc is in fb mode
201
202 if (m->framebuffer == NULL) {
203 ALOGE("%s: Invalid framebuffer", __FUNCTION__);
204 return -EINVAL;
205 }
206
207 const unsigned int bufferMask = m->bufferMask;
208 const uint32_t numBuffers = m->numBuffers;
209 unsigned int bufferSize = m->finfo.line_length * m->info.yres;
210
211 //adreno needs FB size to be page aligned
212 bufferSize = roundUpToPageSize(bufferSize);
213
214 if (numBuffers == 1) {
215 // If we have only one buffer, we never use page-flipping. Instead,
216 // we return a regular buffer which will be memcpy'ed to the main
217 // screen when post is called.
218 int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
219 return gralloc_alloc_buffer(bufferSize, newUsage, pHandle, BUFFER_TYPE_UI,
220 m->fbFormat, m->info.xres, m->info.yres);
221 }
222
223 if (bufferMask >= ((1LU<<numBuffers)-1)) {
224 // We ran out of buffers.
225 return -ENOMEM;
226 }
227
228 // create a "fake" handle for it
229 uint64_t vaddr = uint64_t(m->framebuffer->base);
230 // As GPU needs ION FD, the private handle is created
231 // using ION fd and ION flags are set
232 private_handle_t* hnd = new private_handle_t(
233 dup(m->framebuffer->fd), bufferSize,
234 private_handle_t::PRIV_FLAGS_USES_ION |
235 private_handle_t::PRIV_FLAGS_FRAMEBUFFER,
236 BUFFER_TYPE_UI, m->fbFormat, m->info.xres,
237 m->info.yres);
238
239 // find a free slot
240 for (uint32_t i=0 ; i<numBuffers ; i++) {
241 if ((bufferMask & (1LU<<i)) == 0) {
242 m->bufferMask |= (uint32_t)(1LU<<i);
243 break;
244 }
245 vaddr += bufferSize;
246 }
247 hnd->base = vaddr;
248 hnd->offset = (unsigned int)(vaddr - m->framebuffer->base);
249 *pHandle = hnd;
250 return 0;
251 }
252
253
gralloc_alloc_framebuffer(int usage,buffer_handle_t * pHandle)254 int gpu_context_t::gralloc_alloc_framebuffer(int usage,
255 buffer_handle_t* pHandle)
256 {
257 private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
258 pthread_mutex_lock(&m->lock);
259 int err = gralloc_alloc_framebuffer_locked(usage, pHandle);
260 pthread_mutex_unlock(&m->lock);
261 return err;
262 }
263
alloc_impl(int w,int h,int format,int usage,buffer_handle_t * pHandle,int * pStride,unsigned int bufferSize)264 int gpu_context_t::alloc_impl(int w, int h, int format, int usage,
265 buffer_handle_t* pHandle, int* pStride,
266 unsigned int bufferSize) {
267 if (!pHandle || !pStride)
268 return -EINVAL;
269
270 unsigned int size;
271 int alignedw, alignedh;
272 int grallocFormat = format;
273 int bufferType;
274
275 //If input format is HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED then based on
276 //the usage bits, gralloc assigns a format.
277 if(format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED ||
278 format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
279 if (usage & GRALLOC_USAGE_PRIVATE_ALLOC_UBWC)
280 grallocFormat = HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS_UBWC;
281 else if(usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) {
282 if(MDPCapabilityInfo::getInstance().isWBUBWCSupportedByMDP() &&
283 usage & GRALLOC_USAGE_HW_COMPOSER)
284 grallocFormat = HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS_UBWC;
285 else
286 grallocFormat = HAL_PIXEL_FORMAT_NV12_ENCODEABLE; //NV12
287 } else if((usage & GRALLOC_USAGE_HW_CAMERA_MASK)
288 == GRALLOC_USAGE_HW_CAMERA_ZSL)
289 grallocFormat = HAL_PIXEL_FORMAT_NV21_ZSL; //NV21 ZSL
290 else if(usage & GRALLOC_USAGE_HW_CAMERA_READ)
291 grallocFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP; //NV21
292 else if(usage & GRALLOC_USAGE_HW_CAMERA_WRITE) {
293 if (format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
294 grallocFormat = HAL_PIXEL_FORMAT_NV21_ZSL; //NV21
295 } else {
296 grallocFormat = HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS; //NV12 preview
297 }
298 } else if(usage & GRALLOC_USAGE_HW_COMPOSER)
299 //XXX: If we still haven't set a format, default to RGBA8888
300 grallocFormat = HAL_PIXEL_FORMAT_RGBA_8888;
301 else if(format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
302 //If no other usage flags are detected, default the
303 //flexible YUV format to NV21_ZSL
304 grallocFormat = HAL_PIXEL_FORMAT_NV21_ZSL;
305 }
306 }
307
308 bool useFbMem = false;
309 char property[PROPERTY_VALUE_MAX];
310 char isUBWC[PROPERTY_VALUE_MAX];
311 if (usage & GRALLOC_USAGE_HW_FB) {
312 if ((property_get("debug.gralloc.map_fb_memory", property, NULL) > 0) &&
313 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
314 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
315 useFbMem = true;
316 } else {
317 usage &= ~GRALLOC_USAGE_PRIVATE_ALLOC_UBWC;
318 if (property_get("debug.gralloc.enable_fb_ubwc", isUBWC, NULL) > 0){
319 if ((!strncmp(isUBWC, "1", PROPERTY_VALUE_MAX)) ||
320 (!strncasecmp(isUBWC, "true", PROPERTY_VALUE_MAX))) {
321 // Allocate UBWC aligned framebuffer
322 usage |= GRALLOC_USAGE_PRIVATE_ALLOC_UBWC;
323 }
324 }
325 }
326 }
327
328 getGrallocInformationFromFormat(grallocFormat, &bufferType);
329 size = getBufferSizeAndDimensions(w, h, grallocFormat, usage, alignedw,
330 alignedh);
331
332 if ((unsigned int)size <= 0)
333 return -EINVAL;
334 size = (bufferSize >= size)? bufferSize : size;
335
336 int err = 0;
337 if(useFbMem) {
338 err = gralloc_alloc_framebuffer(usage, pHandle);
339 } else {
340 err = gralloc_alloc_buffer(size, usage, pHandle, bufferType,
341 grallocFormat, w, h);
342 }
343
344 if (err < 0) {
345 return err;
346 }
347
348 *pStride = alignedw;
349 return 0;
350 }
351
free_impl(private_handle_t const * hnd)352 int gpu_context_t::free_impl(private_handle_t const* hnd) {
353 private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
354 if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
355 const unsigned int bufferSize = m->finfo.line_length * m->info.yres;
356 unsigned int index = (unsigned int) ((hnd->base - m->framebuffer->base)
357 / bufferSize);
358 m->bufferMask &= (uint32_t)~(1LU<<index);
359 } else {
360
361 terminateBuffer(&m->base, const_cast<private_handle_t*>(hnd));
362 IMemAlloc* memalloc = mAllocCtrl->getAllocator(hnd->flags);
363 int err = memalloc->free_buffer((void*)hnd->base, hnd->size,
364 hnd->offset, hnd->fd);
365 if(err)
366 return err;
367 // free the metadata space
368 unsigned int size = ROUND_UP_PAGESIZE(sizeof(MetaData_t));
369 err = memalloc->free_buffer((void*)hnd->base_metadata,
370 size, hnd->offset_metadata,
371 hnd->fd_metadata);
372 if (err)
373 return err;
374 }
375 delete hnd;
376 return 0;
377 }
378
gralloc_alloc(alloc_device_t * dev,int w,int h,int format,int usage,buffer_handle_t * pHandle,int * pStride)379 int gpu_context_t::gralloc_alloc(alloc_device_t* dev, int w, int h, int format,
380 int usage, buffer_handle_t* pHandle,
381 int* pStride)
382 {
383 if (!dev) {
384 return -EINVAL;
385 }
386 gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
387 return gpu->alloc_impl(w, h, format, usage, pHandle, pStride, 0);
388 }
gralloc_alloc_size(alloc_device_t * dev,int w,int h,int format,int usage,buffer_handle_t * pHandle,int * pStride,int bufferSize)389 int gpu_context_t::gralloc_alloc_size(alloc_device_t* dev, int w, int h,
390 int format, int usage,
391 buffer_handle_t* pHandle, int* pStride,
392 int bufferSize)
393 {
394 if (!dev) {
395 return -EINVAL;
396 }
397 gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
398 return gpu->alloc_impl(w, h, format, usage, pHandle, pStride, bufferSize);
399 }
400
401
gralloc_free(alloc_device_t * dev,buffer_handle_t handle)402 int gpu_context_t::gralloc_free(alloc_device_t* dev,
403 buffer_handle_t handle)
404 {
405 if (private_handle_t::validate(handle) < 0)
406 return -EINVAL;
407
408 private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(handle);
409 gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
410 return gpu->free_impl(hnd);
411 }
412
413 /*****************************************************************************/
414
gralloc_close(struct hw_device_t * dev)415 int gpu_context_t::gralloc_close(struct hw_device_t *dev)
416 {
417 gpu_context_t* ctx = reinterpret_cast<gpu_context_t*>(dev);
418 if (ctx) {
419 /* TODO: keep a list of all buffer_handle_t created, and free them
420 * all here.
421 */
422 delete ctx;
423 }
424 return 0;
425 }
426
427