1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 * Copyright (C) 2012-2013, The Linux Foundation. All rights reserved.
4 *
5 * Not a Contribution, Apache license notifications and license are retained
6 * for attribution purposes only.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20 #define ATRACE_TAG (ATRACE_TAG_GRAPHICS | ATRACE_TAG_HAL)
21 #include <fcntl.h>
22 #include <errno.h>
23
24 #include <cutils/log.h>
25 #include <cutils/atomic.h>
26 #include <EGL/egl.h>
27 #include <utils/Trace.h>
28 #include <sys/ioctl.h>
29 #include <overlay.h>
30 #include <overlayRotator.h>
31 #include <mdp_version.h>
32 #include "hwc_utils.h"
33 #include "hwc_fbupdate.h"
34 #include "hwc_mdpcomp.h"
35 #include "external.h"
36 #include "hwc_copybit.h"
37 #include "profiler.h"
38
39 using namespace qhwc;
40 #define VSYNC_DEBUG 0
41 #define BLANK_DEBUG 0
42
43 static int hwc_device_open(const struct hw_module_t* module,
44 const char* name,
45 struct hw_device_t** device);
46
47 static struct hw_module_methods_t hwc_module_methods = {
48 open: hwc_device_open
49 };
50
51 hwc_module_t HAL_MODULE_INFO_SYM = {
52 common: {
53 tag: HARDWARE_MODULE_TAG,
54 version_major: 2,
55 version_minor: 0,
56 id: HWC_HARDWARE_MODULE_ID,
57 name: "Qualcomm Hardware Composer Module",
58 author: "CodeAurora Forum",
59 methods: &hwc_module_methods,
60 dso: 0,
61 reserved: {0},
62 }
63 };
64
65 /*
66 * Save callback functions registered to HWC
67 */
hwc_registerProcs(struct hwc_composer_device_1 * dev,hwc_procs_t const * procs)68 static void hwc_registerProcs(struct hwc_composer_device_1* dev,
69 hwc_procs_t const* procs)
70 {
71 ALOGI("%s", __FUNCTION__);
72 hwc_context_t* ctx = (hwc_context_t*)(dev);
73 if(!ctx) {
74 ALOGE("%s: Invalid context", __FUNCTION__);
75 return;
76 }
77 ctx->proc = procs;
78
79 // Now that we have the functions needed, kick off
80 // the uevent & vsync threads
81 init_uevent_thread(ctx);
82 init_vsync_thread(ctx);
83 }
84
85 //Helper
reset(hwc_context_t * ctx,int numDisplays,hwc_display_contents_1_t ** displays)86 static void reset(hwc_context_t *ctx, int numDisplays,
87 hwc_display_contents_1_t** displays) {
88 memset(ctx->listStats, 0, sizeof(ctx->listStats));
89 for(int i = 0; i < MAX_DISPLAYS; i++) {
90 hwc_display_contents_1_t *list = displays[i];
91 // XXX:SurfaceFlinger no longer guarantees that this
92 // value is reset on every prepare. However, for the layer
93 // cache we need to reset it.
94 // We can probably rethink that later on
95 if (LIKELY(list && list->numHwLayers > 1)) {
96 for(uint32_t j = 0; j < list->numHwLayers; j++) {
97 if(list->hwLayers[j].compositionType != HWC_FRAMEBUFFER_TARGET)
98 list->hwLayers[j].compositionType = HWC_FRAMEBUFFER;
99 }
100 }
101
102 if(ctx->mFBUpdate[i])
103 ctx->mFBUpdate[i]->reset();
104 if(ctx->mCopyBit[i])
105 ctx->mCopyBit[i]->reset();
106 if(ctx->mLayerRotMap[i])
107 ctx->mLayerRotMap[i]->reset();
108 }
109 }
110
111 //clear prev layer prop flags and realloc for current frame
reset_layer_prop(hwc_context_t * ctx,int dpy,int numAppLayers)112 static void reset_layer_prop(hwc_context_t* ctx, int dpy, int numAppLayers) {
113 if(ctx->layerProp[dpy]) {
114 delete[] ctx->layerProp[dpy];
115 ctx->layerProp[dpy] = NULL;
116 }
117 ctx->layerProp[dpy] = new LayerProp[numAppLayers];
118 }
119
display_commit(hwc_context_t * ctx,int dpy)120 static int display_commit(hwc_context_t *ctx, int dpy) {
121 int fbFd = ctx->dpyAttr[dpy].fd;
122 if(fbFd == -1) {
123 ALOGE("%s: Invalid FB fd for display: %d", __FUNCTION__, dpy);
124 return -1;
125 }
126
127 struct mdp_display_commit commit_info;
128 memset(&commit_info, 0, sizeof(struct mdp_display_commit));
129 commit_info.flags = MDP_DISPLAY_COMMIT_OVERLAY;
130 if(ioctl(fbFd, MSMFB_DISPLAY_COMMIT, &commit_info) == -1) {
131 ALOGE("%s: MSMFB_DISPLAY_COMMIT for primary failed", __FUNCTION__);
132 return -errno;
133 }
134 return 0;
135 }
136
hwc_prepare_primary(hwc_composer_device_1 * dev,hwc_display_contents_1_t * list)137 static int hwc_prepare_primary(hwc_composer_device_1 *dev,
138 hwc_display_contents_1_t *list) {
139 hwc_context_t* ctx = (hwc_context_t*)(dev);
140 const int dpy = HWC_DISPLAY_PRIMARY;
141 if(UNLIKELY(!ctx->mBasePipeSetup))
142 setupBasePipe(ctx);
143 if (LIKELY(list && list->numHwLayers > 1) &&
144 ctx->dpyAttr[dpy].isActive) {
145 reset_layer_prop(ctx, dpy, list->numHwLayers - 1);
146 uint32_t last = list->numHwLayers - 1;
147 hwc_layer_1_t *fbLayer = &list->hwLayers[last];
148 if(fbLayer->handle) {
149 setListStats(ctx, list, dpy);
150 int fbZOrder = ctx->mMDPComp[dpy]->prepare(ctx, list);
151 if(fbZOrder >= 0)
152 ctx->mFBUpdate[dpy]->prepare(ctx, list, fbZOrder);
153
154 /* Temporarily commenting out C2D until we support partial
155 copybit composition for mixed mode MDP
156
157 // Use Copybit, when MDP comp fails
158 if((fbZOrder >= 0) && ctx->mCopyBit[dpy])
159 ctx->mCopyBit[dpy]->prepare(ctx, list, dpy);
160 */
161 }
162 }
163 return 0;
164 }
165
hwc_prepare_external(hwc_composer_device_1 * dev,hwc_display_contents_1_t * list,int dpy)166 static int hwc_prepare_external(hwc_composer_device_1 *dev,
167 hwc_display_contents_1_t *list, int dpy) {
168 hwc_context_t* ctx = (hwc_context_t*)(dev);
169
170 if (LIKELY(list && list->numHwLayers > 1) &&
171 ctx->dpyAttr[dpy].isActive &&
172 ctx->dpyAttr[dpy].connected) {
173 reset_layer_prop(ctx, dpy, list->numHwLayers - 1);
174 uint32_t last = list->numHwLayers - 1;
175 hwc_layer_1_t *fbLayer = &list->hwLayers[last];
176 if(!ctx->dpyAttr[dpy].isPause) {
177 if(fbLayer->handle) {
178 ctx->mExtDispConfiguring = false;
179 setListStats(ctx, list, dpy);
180 int fbZOrder = ctx->mMDPComp[dpy]->prepare(ctx, list);
181 if(fbZOrder >= 0)
182 ctx->mFBUpdate[dpy]->prepare(ctx, list, fbZOrder);
183
184 /* Temporarily commenting out C2D until we support partial
185 copybit composition for mixed mode MDP
186
187 if((fbZOrder >= 0) && ctx->mCopyBit[dpy])
188 ctx->mCopyBit[dpy]->prepare(ctx, list, dpy);
189 */
190 }
191 } else {
192 // External Display is in Pause state.
193 // ToDo:
194 // Mark all application layers as OVERLAY so that
195 // GPU will not compose. This is done for power
196 // optimization
197 }
198 }
199 return 0;
200 }
201
hwc_prepare_virtual(hwc_composer_device_1 * dev,hwc_display_contents_1_t * list,int dpy)202 static int hwc_prepare_virtual(hwc_composer_device_1 *dev,
203 hwc_display_contents_1_t *list, int dpy) {
204 //XXX: Fix when framework support is added
205 return 0;
206 }
207
hwc_prepare(hwc_composer_device_1 * dev,size_t numDisplays,hwc_display_contents_1_t ** displays)208 static int hwc_prepare(hwc_composer_device_1 *dev, size_t numDisplays,
209 hwc_display_contents_1_t** displays)
210 {
211 int ret = 0;
212 hwc_context_t* ctx = (hwc_context_t*)(dev);
213 Locker::Autolock _l(ctx->mBlankLock);
214 reset(ctx, numDisplays, displays);
215
216 ctx->mOverlay->configBegin();
217 ctx->mRotMgr->configBegin();
218 ctx->mNeedsRotator = false;
219
220 for (int32_t i = numDisplays - 1; i >= 0; i--) {
221 hwc_display_contents_1_t *list = displays[i];
222 switch(i) {
223 case HWC_DISPLAY_PRIMARY:
224 ret = hwc_prepare_primary(dev, list);
225 break;
226 case HWC_DISPLAY_EXTERNAL:
227 ret = hwc_prepare_external(dev, list, i);
228 break;
229 case HWC_DISPLAY_VIRTUAL:
230 ret = hwc_prepare_virtual(dev, list, i);
231 break;
232 default:
233 ret = -EINVAL;
234 }
235 }
236
237 ctx->mOverlay->configDone();
238 ctx->mRotMgr->configDone();
239
240 return ret;
241 }
242
hwc_eventControl(struct hwc_composer_device_1 * dev,int dpy,int event,int enable)243 static int hwc_eventControl(struct hwc_composer_device_1* dev, int dpy,
244 int event, int enable)
245 {
246 int ret = 0;
247 hwc_context_t* ctx = (hwc_context_t*)(dev);
248 Locker::Autolock _l(ctx->mBlankLock);
249 if(!ctx->dpyAttr[dpy].isActive) {
250 ALOGE("Display is blanked - Cannot %s vsync",
251 enable ? "enable" : "disable");
252 return -EINVAL;
253 }
254
255 switch(event) {
256 case HWC_EVENT_VSYNC:
257 if (ctx->vstate.enable == enable)
258 break;
259 ret = hwc_vsync_control(ctx, dpy, enable);
260 if(ret == 0)
261 ctx->vstate.enable = !!enable;
262 ALOGD_IF (VSYNC_DEBUG, "VSYNC state changed to %s",
263 (enable)?"ENABLED":"DISABLED");
264 break;
265 default:
266 ret = -EINVAL;
267 }
268 return ret;
269 }
270
hwc_blank(struct hwc_composer_device_1 * dev,int dpy,int blank)271 static int hwc_blank(struct hwc_composer_device_1* dev, int dpy, int blank)
272 {
273 ATRACE_CALL();
274 hwc_context_t* ctx = (hwc_context_t*)(dev);
275
276 Locker::Autolock _l(ctx->mBlankLock);
277 int ret = 0;
278 ALOGD_IF(BLANK_DEBUG, "%s: %s display: %d", __FUNCTION__,
279 blank==1 ? "Blanking":"Unblanking", dpy);
280 if(blank) {
281 // free up all the overlay pipes in use
282 // when we get a blank for either display
283 // makes sure that all pipes are freed
284 ctx->mOverlay->configBegin();
285 ctx->mOverlay->configDone();
286 ctx->mRotMgr->clear();
287 }
288 switch(dpy) {
289 case HWC_DISPLAY_PRIMARY:
290 if(blank) {
291 ret = ioctl(ctx->dpyAttr[dpy].fd, FBIOBLANK,
292 FB_BLANK_POWERDOWN);
293 } else {
294 ret = ioctl(ctx->dpyAttr[dpy].fd, FBIOBLANK,FB_BLANK_UNBLANK);
295 }
296 break;
297 case HWC_DISPLAY_EXTERNAL:
298 case HWC_DISPLAY_VIRTUAL:
299 if(blank) {
300 // call external framebuffer commit on blank,
301 // so that any pipe unsets gets committed
302 if (display_commit(ctx, dpy) < 0) {
303 ret = -1;
304 ALOGE("%s:post failed for external display !! ",
305 __FUNCTION__);
306 }
307 } else {
308 }
309 break;
310 default:
311 return -EINVAL;
312 }
313 // Enable HPD here, as during bootup unblank is called
314 // when SF is completely initialized
315 ctx->mExtDisplay->setHPD(1);
316 if(ret == 0){
317 ctx->dpyAttr[dpy].isActive = !blank;
318 } else {
319 ALOGE("%s: Failed in %s display: %d error:%s", __FUNCTION__,
320 blank==1 ? "blanking":"unblanking", dpy, strerror(errno));
321 return ret;
322 }
323
324 ALOGD_IF(BLANK_DEBUG, "%s: Done %s display: %d", __FUNCTION__,
325 blank==1 ? "blanking":"unblanking", dpy);
326 return 0;
327 }
328
hwc_query(struct hwc_composer_device_1 * dev,int param,int * value)329 static int hwc_query(struct hwc_composer_device_1* dev,
330 int param, int* value)
331 {
332 hwc_context_t* ctx = (hwc_context_t*)(dev);
333 int supported = HWC_DISPLAY_PRIMARY_BIT;
334
335 switch (param) {
336 case HWC_BACKGROUND_LAYER_SUPPORTED:
337 // Not supported for now
338 value[0] = 0;
339 break;
340 case HWC_DISPLAY_TYPES_SUPPORTED:
341 if(ctx->mMDP.hasOverlay)
342 supported |= HWC_DISPLAY_EXTERNAL_BIT;
343 value[0] = supported;
344 break;
345 default:
346 return -EINVAL;
347 }
348 return 0;
349
350 }
351
352
hwc_set_primary(hwc_context_t * ctx,hwc_display_contents_1_t * list)353 static int hwc_set_primary(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
354 ATRACE_CALL();
355 int ret = 0;
356 const int dpy = HWC_DISPLAY_PRIMARY;
357
358 if (LIKELY(list) && ctx->dpyAttr[dpy].isActive) {
359 uint32_t last = list->numHwLayers - 1;
360 hwc_layer_1_t *fbLayer = &list->hwLayers[last];
361 int fd = -1; //FenceFD from the Copybit(valid in async mode)
362 bool copybitDone = false;
363 if(ctx->mCopyBit[dpy])
364 copybitDone = ctx->mCopyBit[dpy]->draw(ctx, list, dpy, &fd);
365 if(list->numHwLayers > 1)
366 hwc_sync(ctx, list, dpy, fd);
367
368 if (!ctx->mMDPComp[dpy]->draw(ctx, list)) {
369 ALOGE("%s: MDPComp draw failed", __FUNCTION__);
370 ret = -1;
371 }
372
373 //TODO We dont check for SKIP flag on this layer because we need PAN
374 //always. Last layer is always FB
375 private_handle_t *hnd = (private_handle_t *)fbLayer->handle;
376 if(copybitDone) {
377 hnd = ctx->mCopyBit[dpy]->getCurrentRenderBuffer();
378 }
379
380 if(hnd) {
381 if (!ctx->mFBUpdate[dpy]->draw(ctx, hnd)) {
382 ALOGE("%s: FBUpdate draw failed", __FUNCTION__);
383 ret = -1;
384 }
385 }
386
387 if (display_commit(ctx, dpy) < 0) {
388 ALOGE("%s: display commit fail!", __FUNCTION__);
389 return -1;
390 }
391 }
392
393 closeAcquireFds(list);
394 return ret;
395 }
396
hwc_set_external(hwc_context_t * ctx,hwc_display_contents_1_t * list,int dpy)397 static int hwc_set_external(hwc_context_t *ctx,
398 hwc_display_contents_1_t* list, int dpy)
399 {
400 ATRACE_CALL();
401 int ret = 0;
402 Locker::Autolock _l(ctx->mExtSetLock);
403
404 if (LIKELY(list) && ctx->dpyAttr[dpy].isActive &&
405 !ctx->dpyAttr[dpy].isPause &&
406 ctx->dpyAttr[dpy].connected) {
407 uint32_t last = list->numHwLayers - 1;
408 hwc_layer_1_t *fbLayer = &list->hwLayers[last];
409 int fd = -1; //FenceFD from the Copybit(valid in async mode)
410 bool copybitDone = false;
411 if(ctx->mCopyBit[dpy])
412 copybitDone = ctx->mCopyBit[dpy]->draw(ctx, list, dpy, &fd);
413
414 if(list->numHwLayers > 1)
415 hwc_sync(ctx, list, dpy, fd);
416
417 if (!ctx->mMDPComp[dpy]->draw(ctx, list)) {
418 ALOGE("%s: MDPComp draw failed", __FUNCTION__);
419 ret = -1;
420 }
421
422 private_handle_t *hnd = (private_handle_t *)fbLayer->handle;
423 if(copybitDone) {
424 hnd = ctx->mCopyBit[dpy]->getCurrentRenderBuffer();
425 }
426
427 if(hnd) {
428 if (!ctx->mFBUpdate[dpy]->draw(ctx, hnd)) {
429 ALOGE("%s: FBUpdate::draw fail!", __FUNCTION__);
430 ret = -1;
431 }
432 }
433
434 if (display_commit(ctx, dpy) < 0) {
435 ALOGE("%s: display commit fail!", __FUNCTION__);
436 ret = -1;
437 }
438 }
439
440 closeAcquireFds(list);
441 return ret;
442 }
443
hwc_set_virtual(hwc_context_t * ctx,hwc_display_contents_1_t * list,int dpy)444 static int hwc_set_virtual(hwc_context_t *ctx,
445 hwc_display_contents_1_t* list, int dpy)
446 {
447 //XXX: Implement set.
448 closeAcquireFds(list);
449 if (list) {
450 // SF assumes HWC waits for the acquire fence and returns a new fence
451 // that signals when we're done. Since we don't wait, and also don't
452 // touch the buffer, we can just handle the acquire fence back to SF
453 // as the retire fence.
454 list->retireFenceFd = list->outbufAcquireFenceFd;
455 }
456 return 0;
457 }
458
459
hwc_set(hwc_composer_device_1 * dev,size_t numDisplays,hwc_display_contents_1_t ** displays)460 static int hwc_set(hwc_composer_device_1 *dev,
461 size_t numDisplays,
462 hwc_display_contents_1_t** displays)
463 {
464 int ret = 0;
465 hwc_context_t* ctx = (hwc_context_t*)(dev);
466 Locker::Autolock _l(ctx->mBlankLock);
467 for (uint32_t i = 0; i < numDisplays; i++) {
468 hwc_display_contents_1_t* list = displays[i];
469 switch(i) {
470 case HWC_DISPLAY_PRIMARY:
471 ret = hwc_set_primary(ctx, list);
472 break;
473 case HWC_DISPLAY_EXTERNAL:
474 ret = hwc_set_external(ctx, list, i);
475 break;
476 case HWC_DISPLAY_VIRTUAL:
477 ret = hwc_set_virtual(ctx, list, i);
478 break;
479 default:
480 ret = -EINVAL;
481 }
482 }
483 // This is only indicative of how many times SurfaceFlinger posts
484 // frames to the display.
485 CALC_FPS();
486 MDPComp::resetIdleFallBack();
487 return ret;
488 }
489
hwc_getDisplayConfigs(struct hwc_composer_device_1 * dev,int disp,uint32_t * configs,size_t * numConfigs)490 int hwc_getDisplayConfigs(struct hwc_composer_device_1* dev, int disp,
491 uint32_t* configs, size_t* numConfigs) {
492 int ret = 0;
493 hwc_context_t* ctx = (hwc_context_t*)(dev);
494 //in 1.1 there is no way to choose a config, report as config id # 0
495 //This config is passed to getDisplayAttributes. Ignore for now.
496 switch(disp) {
497 case HWC_DISPLAY_PRIMARY:
498 if(*numConfigs > 0) {
499 configs[0] = 0;
500 *numConfigs = 1;
501 }
502 ret = 0; //NO_ERROR
503 break;
504 case HWC_DISPLAY_EXTERNAL:
505 ret = -1; //Not connected
506 if(ctx->dpyAttr[HWC_DISPLAY_EXTERNAL].connected) {
507 ret = 0; //NO_ERROR
508 if(*numConfigs > 0) {
509 configs[0] = 0;
510 *numConfigs = 1;
511 }
512 }
513 break;
514 }
515 return ret;
516 }
517
hwc_getDisplayAttributes(struct hwc_composer_device_1 * dev,int disp,uint32_t config,const uint32_t * attributes,int32_t * values)518 int hwc_getDisplayAttributes(struct hwc_composer_device_1* dev, int disp,
519 uint32_t config, const uint32_t* attributes, int32_t* values) {
520
521 hwc_context_t* ctx = (hwc_context_t*)(dev);
522 //If hotpluggable displays are inactive return error
523 if(disp == HWC_DISPLAY_EXTERNAL && !ctx->dpyAttr[disp].connected) {
524 return -1;
525 }
526
527 //From HWComposer
528 static const uint32_t DISPLAY_ATTRIBUTES[] = {
529 HWC_DISPLAY_VSYNC_PERIOD,
530 HWC_DISPLAY_WIDTH,
531 HWC_DISPLAY_HEIGHT,
532 HWC_DISPLAY_DPI_X,
533 HWC_DISPLAY_DPI_Y,
534 HWC_DISPLAY_NO_ATTRIBUTE,
535 };
536
537 const int NUM_DISPLAY_ATTRIBUTES = (sizeof(DISPLAY_ATTRIBUTES) /
538 sizeof(DISPLAY_ATTRIBUTES)[0]);
539
540 for (size_t i = 0; i < NUM_DISPLAY_ATTRIBUTES - 1; i++) {
541 switch (attributes[i]) {
542 case HWC_DISPLAY_VSYNC_PERIOD:
543 values[i] = ctx->dpyAttr[disp].vsync_period;
544 break;
545 case HWC_DISPLAY_WIDTH:
546 values[i] = ctx->dpyAttr[disp].xres;
547 ALOGD("%s disp = %d, width = %d",__FUNCTION__, disp,
548 ctx->dpyAttr[disp].xres);
549 break;
550 case HWC_DISPLAY_HEIGHT:
551 values[i] = ctx->dpyAttr[disp].yres;
552 ALOGD("%s disp = %d, height = %d",__FUNCTION__, disp,
553 ctx->dpyAttr[disp].yres);
554 break;
555 case HWC_DISPLAY_DPI_X:
556 values[i] = (int32_t) (ctx->dpyAttr[disp].xdpi*1000.0);
557 break;
558 case HWC_DISPLAY_DPI_Y:
559 values[i] = (int32_t) (ctx->dpyAttr[disp].ydpi*1000.0);
560 break;
561 default:
562 ALOGE("Unknown display attribute %d",
563 attributes[i]);
564 return -EINVAL;
565 }
566 }
567 return 0;
568 }
569
hwc_dump(struct hwc_composer_device_1 * dev,char * buff,int buff_len)570 void hwc_dump(struct hwc_composer_device_1* dev, char *buff, int buff_len)
571 {
572 hwc_context_t* ctx = (hwc_context_t*)(dev);
573 android::String8 aBuf("");
574 dumpsys_log(aBuf, "Qualcomm HWC state:\n");
575 dumpsys_log(aBuf, " MDPVersion=%d\n", ctx->mMDP.version);
576 dumpsys_log(aBuf, " DisplayPanel=%c\n", ctx->mMDP.panel);
577 for(int dpy = 0; dpy < MAX_DISPLAYS; dpy++) {
578 if(ctx->mMDPComp[dpy])
579 ctx->mMDPComp[dpy]->dump(aBuf);
580 }
581 char ovDump[2048] = {'\0'};
582 ctx->mOverlay->getDump(ovDump, 2048);
583 dumpsys_log(aBuf, ovDump);
584 ovDump[0] = '\0';
585 ctx->mRotMgr->getDump(ovDump, 2048);
586 dumpsys_log(aBuf, ovDump);
587 strlcpy(buff, aBuf.string(), buff_len);
588 }
589
hwc_device_close(struct hw_device_t * dev)590 static int hwc_device_close(struct hw_device_t *dev)
591 {
592 if(!dev) {
593 ALOGE("%s: NULL device pointer", __FUNCTION__);
594 return -1;
595 }
596 closeContext((hwc_context_t*)dev);
597 free(dev);
598
599 return 0;
600 }
601
hwc_device_open(const struct hw_module_t * module,const char * name,struct hw_device_t ** device)602 static int hwc_device_open(const struct hw_module_t* module, const char* name,
603 struct hw_device_t** device)
604 {
605 int status = -EINVAL;
606
607 if (!strcmp(name, HWC_HARDWARE_COMPOSER)) {
608 struct hwc_context_t *dev;
609 dev = (hwc_context_t*)malloc(sizeof(*dev));
610 memset(dev, 0, sizeof(*dev));
611
612 //Initialize hwc context
613 initContext(dev);
614
615 //Setup HWC methods
616 dev->device.common.tag = HARDWARE_DEVICE_TAG;
617 dev->device.common.version = HWC_DEVICE_API_VERSION_1_2;
618 dev->device.common.module = const_cast<hw_module_t*>(module);
619 dev->device.common.close = hwc_device_close;
620 dev->device.prepare = hwc_prepare;
621 dev->device.set = hwc_set;
622 dev->device.eventControl = hwc_eventControl;
623 dev->device.blank = hwc_blank;
624 dev->device.query = hwc_query;
625 dev->device.registerProcs = hwc_registerProcs;
626 dev->device.dump = hwc_dump;
627 dev->device.getDisplayConfigs = hwc_getDisplayConfigs;
628 dev->device.getDisplayAttributes = hwc_getDisplayAttributes;
629 *device = &dev->device.common;
630 status = 0;
631 }
632 return status;
633 }
634