1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 * Copyright (c) 2011-2014,2017 The Linux Foundation. All rights reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 #include <limits.h>
19 #include <unistd.h>
20 #include <fcntl.h>
21 #include <cutils/properties.h>
22 #include <sys/mman.h>
23 #include <linux/msm_ion.h>
24 #include <qdMetaData.h>
25 #include <algorithm>
26
27 #include "gr.h"
28 #include "gpu.h"
29 #include "memalloc.h"
30 #include "alloc_controller.h"
31
32 using namespace gralloc;
33
gpu_context_t(const private_module_t * module,IAllocController * alloc_ctrl)34 gpu_context_t::gpu_context_t(const private_module_t* module,
35 IAllocController* alloc_ctrl ) :
36 mAllocCtrl(alloc_ctrl)
37 {
38 // Zero out the alloc_device_t
39 memset(static_cast<alloc_device_t*>(this), 0, sizeof(alloc_device_t));
40
41 // Initialize the procs
42 common.tag = HARDWARE_DEVICE_TAG;
43 common.version = 0;
44 common.module = const_cast<hw_module_t*>(&module->base.common);
45 common.close = gralloc_close;
46 alloc = gralloc_alloc;
47 free = gralloc_free;
48
49 }
50
gralloc_alloc_buffer(unsigned int size,int usage,buffer_handle_t * pHandle,int bufferType,int format,int width,int height)51 int gpu_context_t::gralloc_alloc_buffer(unsigned int size, int usage,
52 buffer_handle_t* pHandle, int bufferType,
53 int format, int width, int height)
54 {
55 int err = 0;
56 int flags = 0;
57 int alignedw = 0;
58 int alignedh = 0;
59
60 AdrenoMemInfo::getInstance().getAlignedWidthAndHeight(width,
61 height,
62 format,
63 usage,
64 alignedw,
65 alignedh);
66
67 size = roundUpToPageSize(size);
68 alloc_data data;
69 data.offset = 0;
70 data.fd = -1;
71 data.base = 0;
72 if(format == HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED)
73 data.align = 8192;
74 else
75 data.align = getpagesize();
76
77 if (usage & GRALLOC_USAGE_PROTECTED) {
78 if ((usage & GRALLOC_USAGE_PRIVATE_SECURE_DISPLAY) ||
79 (usage & GRALLOC_USAGE_HW_CAMERA_MASK)) {
80 /* The alignment here reflects qsee mmu V7L/V8L requirement */
81 data.align = SZ_2M;
82 } else {
83 data.align = SECURE_ALIGN;
84 }
85 size = ALIGN(size, data.align);
86 }
87
88 data.size = size;
89 data.pHandle = (uintptr_t) pHandle;
90 err = mAllocCtrl->allocate(data, usage);
91
92 if (!err) {
93 /* allocate memory for enhancement data */
94 alloc_data eData;
95 eData.fd = -1;
96 eData.base = 0;
97 eData.offset = 0;
98 eData.size = ROUND_UP_PAGESIZE(sizeof(MetaData_t));
99 eData.pHandle = data.pHandle;
100 eData.align = getpagesize();
101 int eDataUsage = 0;
102 int eDataErr = mAllocCtrl->allocate(eData, eDataUsage);
103 ALOGE_IF(eDataErr, "gralloc failed for eDataErr=%s",
104 strerror(-eDataErr));
105
106 if (usage & GRALLOC_USAGE_PRIVATE_EXTERNAL_ONLY) {
107 flags |= private_handle_t::PRIV_FLAGS_EXTERNAL_ONLY;
108 }
109
110 if (usage & GRALLOC_USAGE_PRIVATE_INTERNAL_ONLY) {
111 flags |= private_handle_t::PRIV_FLAGS_INTERNAL_ONLY;
112 }
113
114 if (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER ) {
115 flags |= private_handle_t::PRIV_FLAGS_VIDEO_ENCODER;
116 }
117
118 if (usage & GRALLOC_USAGE_HW_CAMERA_WRITE) {
119 flags |= private_handle_t::PRIV_FLAGS_CAMERA_WRITE;
120 }
121
122 if (usage & GRALLOC_USAGE_HW_CAMERA_READ) {
123 flags |= private_handle_t::PRIV_FLAGS_CAMERA_READ;
124 }
125
126 if (usage & GRALLOC_USAGE_HW_COMPOSER) {
127 flags |= private_handle_t::PRIV_FLAGS_HW_COMPOSER;
128 }
129
130 if (usage & GRALLOC_USAGE_HW_TEXTURE) {
131 flags |= private_handle_t::PRIV_FLAGS_HW_TEXTURE;
132 }
133
134 if(usage & GRALLOC_USAGE_PRIVATE_SECURE_DISPLAY) {
135 flags |= private_handle_t::PRIV_FLAGS_SECURE_DISPLAY;
136 }
137
138 if (isUBwcEnabled(format, usage)) {
139 flags |= private_handle_t::PRIV_FLAGS_UBWC_ALIGNED;
140 }
141
142 if(usage & (GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK)) {
143 flags |= private_handle_t::PRIV_FLAGS_CPU_RENDERED;
144 }
145
146 if (usage & (GRALLOC_USAGE_HW_VIDEO_ENCODER |
147 GRALLOC_USAGE_HW_CAMERA_WRITE |
148 GRALLOC_USAGE_HW_RENDER |
149 GRALLOC_USAGE_HW_FB)) {
150 flags |= private_handle_t::PRIV_FLAGS_NON_CPU_WRITER;
151 }
152
153 if(usage & GRALLOC_USAGE_HW_COMPOSER) {
154 flags |= private_handle_t::PRIV_FLAGS_DISP_CONSUMER;
155 }
156
157 if(false == data.uncached) {
158 flags |= private_handle_t::PRIV_FLAGS_CACHED;
159 }
160
161 flags |= data.allocType;
162 uint64_t eBaseAddr = (uint64_t)(eData.base) + eData.offset;
163 private_handle_t *hnd = new private_handle_t(data.fd, size, flags,
164 bufferType, format, alignedw, alignedh,
165 eData.fd, eData.offset, eBaseAddr, width, height);
166
167 hnd->offset = data.offset;
168 hnd->base = (uint64_t)(data.base) + data.offset;
169 hnd->gpuaddr = 0;
170 ColorSpace_t colorSpace = ITU_R_601;
171 setMetaData(hnd, UPDATE_COLOR_SPACE, (void*) &colorSpace);
172 *pHandle = hnd;
173 }
174
175 ALOGE_IF(err, "gralloc failed err=%s", strerror(-err));
176 return err;
177 }
178
getGrallocInformationFromFormat(int inputFormat,int * bufferType)179 void gpu_context_t::getGrallocInformationFromFormat(int inputFormat,
180 int *bufferType)
181 {
182 *bufferType = BUFFER_TYPE_VIDEO;
183
184 if (isUncompressedRgbFormat(inputFormat) == TRUE) {
185 // RGB formats
186 *bufferType = BUFFER_TYPE_UI;
187 }
188 }
189
gralloc_alloc_framebuffer_locked(int usage,buffer_handle_t * pHandle)190 int gpu_context_t::gralloc_alloc_framebuffer_locked(int usage,
191 buffer_handle_t* pHandle)
192 {
193 private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
194
195 // This allocation will only happen when gralloc is in fb mode
196
197 if (m->framebuffer == NULL) {
198 ALOGE("%s: Invalid framebuffer", __FUNCTION__);
199 return -EINVAL;
200 }
201
202 const unsigned int bufferMask = m->bufferMask;
203 const uint32_t numBuffers = m->numBuffers;
204 unsigned int bufferSize = m->finfo.line_length * m->info.yres;
205
206 //adreno needs FB size to be page aligned
207 bufferSize = roundUpToPageSize(bufferSize);
208
209 if (numBuffers == 1) {
210 // If we have only one buffer, we never use page-flipping. Instead,
211 // we return a regular buffer which will be memcpy'ed to the main
212 // screen when post is called.
213 int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
214 return gralloc_alloc_buffer(bufferSize, newUsage, pHandle, BUFFER_TYPE_UI,
215 m->fbFormat, m->info.xres, m->info.yres);
216 }
217
218 if (bufferMask >= ((1LU<<numBuffers)-1)) {
219 // We ran out of buffers.
220 return -ENOMEM;
221 }
222
223 // create a "fake" handle for it
224 uint64_t vaddr = uint64_t(m->framebuffer->base);
225 // As GPU needs ION FD, the private handle is created
226 // using ION fd and ION flags are set
227 private_handle_t* hnd = new private_handle_t(
228 dup(m->framebuffer->fd), bufferSize,
229 private_handle_t::PRIV_FLAGS_USES_ION |
230 private_handle_t::PRIV_FLAGS_FRAMEBUFFER,
231 BUFFER_TYPE_UI, m->fbFormat, m->info.xres,
232 m->info.yres);
233
234 // find a free slot
235 for (uint32_t i=0 ; i<numBuffers ; i++) {
236 if ((bufferMask & (1LU<<i)) == 0) {
237 m->bufferMask |= (uint32_t)(1LU<<i);
238 break;
239 }
240 vaddr += bufferSize;
241 }
242 hnd->base = vaddr;
243 hnd->offset = (unsigned int)(vaddr - m->framebuffer->base);
244 *pHandle = hnd;
245 return 0;
246 }
247
248
gralloc_alloc_framebuffer(int usage,buffer_handle_t * pHandle)249 int gpu_context_t::gralloc_alloc_framebuffer(int usage,
250 buffer_handle_t* pHandle)
251 {
252 private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
253 pthread_mutex_lock(&m->lock);
254 int err = gralloc_alloc_framebuffer_locked(usage, pHandle);
255 pthread_mutex_unlock(&m->lock);
256 return err;
257 }
258
alloc_impl(int w,int h,int format,int usage,buffer_handle_t * pHandle,int * pStride,unsigned int bufferSize)259 int gpu_context_t::alloc_impl(int w, int h, int format, int usage,
260 buffer_handle_t* pHandle, int* pStride,
261 unsigned int bufferSize) {
262 if (!pHandle || !pStride)
263 return -EINVAL;
264
265 unsigned int size;
266 int alignedw, alignedh;
267 int grallocFormat = format;
268 int bufferType;
269
270 //If input format is HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED then based on
271 //the usage bits, gralloc assigns a format.
272 if(format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED ||
273 format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
274 if (usage & GRALLOC_USAGE_PRIVATE_ALLOC_UBWC)
275 grallocFormat = HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS_UBWC;
276 else if(usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) {
277 if(MDPCapabilityInfo::getInstance().isWBUBWCSupportedByMDP() &&
278 !IAllocController::getInstance()->isDisableUBWCForEncoder() &&
279 usage & GRALLOC_USAGE_HW_COMPOSER)
280 grallocFormat = HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS_UBWC;
281 else
282 grallocFormat = HAL_PIXEL_FORMAT_NV12_ENCODEABLE; //NV12
283 } else if((usage & GRALLOC_USAGE_HW_CAMERA_MASK)
284 == GRALLOC_USAGE_HW_CAMERA_ZSL)
285 grallocFormat = HAL_PIXEL_FORMAT_NV21_ZSL; //NV21 ZSL
286 else if(usage & GRALLOC_USAGE_HW_CAMERA_READ)
287 grallocFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP; //NV21
288 else if(usage & GRALLOC_USAGE_HW_CAMERA_WRITE) {
289 if (format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
290 grallocFormat = HAL_PIXEL_FORMAT_NV21_ZSL; //NV21
291 } else {
292 grallocFormat = HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS; //NV12 preview
293 }
294 } else if(usage & GRALLOC_USAGE_HW_COMPOSER)
295 //XXX: If we still haven't set a format, default to RGBA8888
296 grallocFormat = HAL_PIXEL_FORMAT_RGBA_8888;
297 else if(format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
298 //If no other usage flags are detected, default the
299 //flexible YUV format to NV21_ZSL
300 grallocFormat = HAL_PIXEL_FORMAT_NV21_ZSL;
301 }
302 }
303
304 bool useFbMem = false;
305 char property[PROPERTY_VALUE_MAX];
306 char isUBWC[PROPERTY_VALUE_MAX];
307 if (usage & GRALLOC_USAGE_HW_FB) {
308 if ((property_get("debug.gralloc.map_fb_memory", property, NULL) > 0) &&
309 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
310 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
311 useFbMem = true;
312 } else {
313 usage &= ~GRALLOC_USAGE_PRIVATE_ALLOC_UBWC;
314 if (property_get("debug.gralloc.enable_fb_ubwc", isUBWC, NULL) > 0){
315 if ((!strncmp(isUBWC, "1", PROPERTY_VALUE_MAX)) ||
316 (!strncasecmp(isUBWC, "true", PROPERTY_VALUE_MAX))) {
317 // Allocate UBWC aligned framebuffer
318 usage |= GRALLOC_USAGE_PRIVATE_ALLOC_UBWC;
319 }
320 }
321 }
322 }
323
324 getGrallocInformationFromFormat(grallocFormat, &bufferType);
325 size = getBufferSizeAndDimensions(w, h, grallocFormat, usage, alignedw,
326 alignedh);
327
328 if ((unsigned int)size <= 0)
329 return -EINVAL;
330 size = (bufferSize >= size)? bufferSize : size;
331
332 int err = 0;
333 if(useFbMem) {
334 err = gralloc_alloc_framebuffer(usage, pHandle);
335 } else {
336 err = gralloc_alloc_buffer(size, usage, pHandle, bufferType,
337 grallocFormat, w, h);
338 }
339
340 if (err < 0) {
341 return err;
342 }
343
344 *pStride = alignedw;
345 return 0;
346 }
347
free_impl(private_handle_t const * hnd)348 int gpu_context_t::free_impl(private_handle_t const* hnd) {
349 private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
350 if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
351 const unsigned int bufferSize = m->finfo.line_length * m->info.yres;
352 unsigned int index = (unsigned int) ((hnd->base - m->framebuffer->base)
353 / bufferSize);
354 m->bufferMask &= (uint32_t)~(1LU<<index);
355 } else {
356
357 terminateBuffer(&m->base, const_cast<private_handle_t*>(hnd));
358 IMemAlloc* memalloc = mAllocCtrl->getAllocator(hnd->flags);
359 int err = memalloc->free_buffer((void*)hnd->base, hnd->size,
360 hnd->offset, hnd->fd);
361 if(err)
362 return err;
363 // free the metadata space
364 unsigned int size = ROUND_UP_PAGESIZE(sizeof(MetaData_t));
365 err = memalloc->free_buffer((void*)hnd->base_metadata,
366 size, hnd->offset_metadata,
367 hnd->fd_metadata);
368 if (err)
369 return err;
370 }
371
372 delete hnd;
373 return 0;
374 }
375
gralloc_alloc(alloc_device_t * dev,int w,int h,int format,int usage,buffer_handle_t * pHandle,int * pStride)376 int gpu_context_t::gralloc_alloc(alloc_device_t* dev, int w, int h, int format,
377 int usage, buffer_handle_t* pHandle,
378 int* pStride)
379 {
380 if (!dev) {
381 return -EINVAL;
382 }
383 gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
384 return gpu->alloc_impl(w, h, format, usage, pHandle, pStride, 0);
385 }
gralloc_alloc_size(alloc_device_t * dev,int w,int h,int format,int usage,buffer_handle_t * pHandle,int * pStride,int bufferSize)386 int gpu_context_t::gralloc_alloc_size(alloc_device_t* dev, int w, int h,
387 int format, int usage,
388 buffer_handle_t* pHandle, int* pStride,
389 int bufferSize)
390 {
391 if (!dev) {
392 return -EINVAL;
393 }
394 gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
395 return gpu->alloc_impl(w, h, format, usage, pHandle, pStride, bufferSize);
396 }
397
398
gralloc_free(alloc_device_t * dev,buffer_handle_t handle)399 int gpu_context_t::gralloc_free(alloc_device_t* dev,
400 buffer_handle_t handle)
401 {
402 if (private_handle_t::validate(handle) < 0)
403 return -EINVAL;
404
405 private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(handle);
406 gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
407 return gpu->free_impl(hnd);
408 }
409
410 /*****************************************************************************/
411
gralloc_close(struct hw_device_t * dev)412 int gpu_context_t::gralloc_close(struct hw_device_t *dev)
413 {
414 gpu_context_t* ctx = reinterpret_cast<gpu_context_t*>(dev);
415 if (ctx) {
416 /* TODO: keep a list of all buffer_handle_t created, and free them
417 * all here.
418 */
419 delete ctx;
420 }
421 return 0;
422 }
423
424