1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 * Copyright (C) 2012-2013, The Linux Foundation. All rights reserved.
4 *
5 * Not a Contribution, Apache license notifications and license are retained
6 * for attribution purposes only.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20 #define ATRACE_TAG (ATRACE_TAG_GRAPHICS | ATRACE_TAG_HAL)
21 #include <fcntl.h>
22 #include <errno.h>
23
24 #include <cutils/log.h>
25 #include <cutils/atomic.h>
26 #include <EGL/egl.h>
27 #include <utils/Trace.h>
28 #include <sys/ioctl.h>
29 #include <overlay.h>
30 #include <overlayRotator.h>
31 #include <mdp_version.h>
32 #include "hwc_utils.h"
33 #include "hwc_fbupdate.h"
34 #include "hwc_mdpcomp.h"
35 #include "external.h"
36 #include "hwc_copybit.h"
37 #include "profiler.h"
38
39 using namespace qhwc;
40 #define VSYNC_DEBUG 0
41 #define BLANK_DEBUG 0
42
43 static int hwc_device_open(const struct hw_module_t* module,
44 const char* name,
45 struct hw_device_t** device);
46
47 static struct hw_module_methods_t hwc_module_methods = {
48 .open = hwc_device_open
49 };
50
51 hwc_module_t HAL_MODULE_INFO_SYM = {
52 .common = {
53 .tag = HARDWARE_MODULE_TAG,
54 .version_major = 2,
55 .version_minor = 0,
56 .id = HWC_HARDWARE_MODULE_ID,
57 .name = "Qualcomm Hardware Composer Module",
58 .author = "CodeAurora Forum",
59 .methods = &hwc_module_methods,
60 .dso = 0,
61 .reserved = {0},
62 }
63 };
64
65 /*
66 * Save callback functions registered to HWC
67 */
hwc_registerProcs(struct hwc_composer_device_1 * dev,hwc_procs_t const * procs)68 static void hwc_registerProcs(struct hwc_composer_device_1* dev,
69 hwc_procs_t const* procs)
70 {
71 ALOGI("%s", __FUNCTION__);
72 hwc_context_t* ctx = (hwc_context_t*)(dev);
73 if(!ctx) {
74 ALOGE("%s: Invalid context", __FUNCTION__);
75 return;
76 }
77 ctx->proc = procs;
78
79 // Now that we have the functions needed, kick off
80 // the uevent & vsync threads
81 init_uevent_thread(ctx);
82 init_vsync_thread(ctx);
83 }
84
85 //Helper
reset(hwc_context_t * ctx,int numDisplays,hwc_display_contents_1_t ** displays)86 static void reset(hwc_context_t *ctx, int numDisplays,
87 hwc_display_contents_1_t** displays) {
88 for(int i = 0; i < MAX_DISPLAYS; i++) {
89 hwc_display_contents_1_t *list = displays[i];
90 // XXX:SurfaceFlinger no longer guarantees that this
91 // value is reset on every prepare. However, for the layer
92 // cache we need to reset it.
93 // We can probably rethink that later on
94 if (LIKELY(list && list->numHwLayers > 1)) {
95 for(uint32_t j = 0; j < list->numHwLayers; j++) {
96 if(list->hwLayers[j].compositionType != HWC_FRAMEBUFFER_TARGET)
97 list->hwLayers[j].compositionType = HWC_FRAMEBUFFER;
98 }
99 }
100
101 if(ctx->mFBUpdate[i])
102 ctx->mFBUpdate[i]->reset();
103 if(ctx->mCopyBit[i])
104 ctx->mCopyBit[i]->reset();
105 if(ctx->mLayerRotMap[i])
106 ctx->mLayerRotMap[i]->reset();
107 }
108 }
109
110 //clear prev layer prop flags and realloc for current frame
reset_layer_prop(hwc_context_t * ctx,int dpy,int numAppLayers)111 static void reset_layer_prop(hwc_context_t* ctx, int dpy, int numAppLayers) {
112 if(ctx->layerProp[dpy]) {
113 delete[] ctx->layerProp[dpy];
114 ctx->layerProp[dpy] = NULL;
115 }
116 ctx->layerProp[dpy] = new LayerProp[numAppLayers];
117 }
118
display_commit(hwc_context_t * ctx,int dpy)119 static int display_commit(hwc_context_t *ctx, int dpy) {
120 int fbFd = ctx->dpyAttr[dpy].fd;
121 if(fbFd == -1) {
122 ALOGE("%s: Invalid FB fd for display: %d", __FUNCTION__, dpy);
123 return -1;
124 }
125
126 struct mdp_display_commit commit_info;
127 memset(&commit_info, 0, sizeof(struct mdp_display_commit));
128 commit_info.flags = MDP_DISPLAY_COMMIT_OVERLAY;
129 if(ioctl(fbFd, MSMFB_DISPLAY_COMMIT, &commit_info) == -1) {
130 ALOGE("%s: MSMFB_DISPLAY_COMMIT for primary failed", __FUNCTION__);
131 return -errno;
132 }
133 return 0;
134 }
135
hwc_prepare_primary(hwc_composer_device_1 * dev,hwc_display_contents_1_t * list)136 static int hwc_prepare_primary(hwc_composer_device_1 *dev,
137 hwc_display_contents_1_t *list) {
138 hwc_context_t* ctx = (hwc_context_t*)(dev);
139 const int dpy = HWC_DISPLAY_PRIMARY;
140 if(UNLIKELY(!ctx->mBasePipeSetup))
141 setupBasePipe(ctx);
142 if (LIKELY(list && list->numHwLayers > 1) &&
143 ctx->dpyAttr[dpy].isActive) {
144 reset_layer_prop(ctx, dpy, list->numHwLayers - 1);
145 uint32_t last = list->numHwLayers - 1;
146 hwc_layer_1_t *fbLayer = &list->hwLayers[last];
147 if(fbLayer->handle) {
148 setListStats(ctx, list, dpy);
149 int fbZOrder = ctx->mMDPComp[dpy]->prepare(ctx, list);
150 if(fbZOrder >= 0)
151 ctx->mFBUpdate[dpy]->prepare(ctx, list, fbZOrder);
152
153 /* Temporarily commenting out C2D until we support partial
154 copybit composition for mixed mode MDP
155
156 // Use Copybit, when MDP comp fails
157 if((fbZOrder >= 0) && ctx->mCopyBit[dpy])
158 ctx->mCopyBit[dpy]->prepare(ctx, list, dpy);
159 */
160 }
161 }
162 return 0;
163 }
164
hwc_prepare_external(hwc_composer_device_1 * dev,hwc_display_contents_1_t * list,int dpy)165 static int hwc_prepare_external(hwc_composer_device_1 *dev,
166 hwc_display_contents_1_t *list, int dpy) {
167 hwc_context_t* ctx = (hwc_context_t*)(dev);
168
169 if (LIKELY(list && list->numHwLayers > 1) &&
170 ctx->dpyAttr[dpy].isActive &&
171 ctx->dpyAttr[dpy].connected) {
172 reset_layer_prop(ctx, dpy, list->numHwLayers - 1);
173 uint32_t last = list->numHwLayers - 1;
174 hwc_layer_1_t *fbLayer = &list->hwLayers[last];
175 if(!ctx->dpyAttr[dpy].isPause) {
176 if(fbLayer->handle) {
177 ctx->mExtDispConfiguring = false;
178 setListStats(ctx, list, dpy);
179 int fbZOrder = ctx->mMDPComp[dpy]->prepare(ctx, list);
180 if(fbZOrder >= 0)
181 ctx->mFBUpdate[dpy]->prepare(ctx, list, fbZOrder);
182
183 /* Temporarily commenting out C2D until we support partial
184 copybit composition for mixed mode MDP
185
186 if((fbZOrder >= 0) && ctx->mCopyBit[dpy])
187 ctx->mCopyBit[dpy]->prepare(ctx, list, dpy);
188 */
189 }
190 } else {
191 // External Display is in Pause state.
192 // ToDo:
193 // Mark all application layers as OVERLAY so that
194 // GPU will not compose. This is done for power
195 // optimization
196 }
197 }
198 return 0;
199 }
200
hwc_prepare_virtual(hwc_composer_device_1 * dev,hwc_display_contents_1_t * list,int dpy)201 static int hwc_prepare_virtual(hwc_composer_device_1 *dev,
202 hwc_display_contents_1_t *list, int dpy) {
203 //XXX: Fix when framework support is added
204 return 0;
205 }
206
hwc_prepare(hwc_composer_device_1 * dev,size_t numDisplays,hwc_display_contents_1_t ** displays)207 static int hwc_prepare(hwc_composer_device_1 *dev, size_t numDisplays,
208 hwc_display_contents_1_t** displays)
209 {
210 int ret = 0;
211 hwc_context_t* ctx = (hwc_context_t*)(dev);
212 Locker::Autolock _l(ctx->mBlankLock);
213 reset(ctx, numDisplays, displays);
214
215 ctx->mOverlay->configBegin();
216 ctx->mRotMgr->configBegin();
217 ctx->mNeedsRotator = false;
218
219 for (int32_t i = numDisplays - 1; i >= 0; i--) {
220 hwc_display_contents_1_t *list = displays[i];
221 switch(i) {
222 case HWC_DISPLAY_PRIMARY:
223 ret = hwc_prepare_primary(dev, list);
224 break;
225 case HWC_DISPLAY_EXTERNAL:
226 ret = hwc_prepare_external(dev, list, i);
227 break;
228 case HWC_DISPLAY_VIRTUAL:
229 ret = hwc_prepare_virtual(dev, list, i);
230 break;
231 default:
232 ret = -EINVAL;
233 }
234 }
235
236 ctx->mOverlay->configDone();
237 ctx->mRotMgr->configDone();
238
239 return ret;
240 }
241
hwc_eventControl(struct hwc_composer_device_1 * dev,int dpy,int event,int enable)242 static int hwc_eventControl(struct hwc_composer_device_1* dev, int dpy,
243 int event, int enable)
244 {
245 int ret = 0;
246 hwc_context_t* ctx = (hwc_context_t*)(dev);
247 if(!ctx->dpyAttr[dpy].isActive) {
248 ALOGE("Display is blanked - Cannot %s vsync",
249 enable ? "enable" : "disable");
250 return -EINVAL;
251 }
252
253 switch(event) {
254 case HWC_EVENT_VSYNC:
255 if (ctx->vstate.enable == enable)
256 break;
257 ret = hwc_vsync_control(ctx, dpy, enable);
258 if(ret == 0)
259 ctx->vstate.enable = !!enable;
260 ALOGD_IF (VSYNC_DEBUG, "VSYNC state changed to %s",
261 (enable)?"ENABLED":"DISABLED");
262 break;
263 default:
264 ret = -EINVAL;
265 }
266 return ret;
267 }
268
hwc_blank(struct hwc_composer_device_1 * dev,int dpy,int blank)269 static int hwc_blank(struct hwc_composer_device_1* dev, int dpy, int blank)
270 {
271 ATRACE_CALL();
272 hwc_context_t* ctx = (hwc_context_t*)(dev);
273
274 Locker::Autolock _l(ctx->mBlankLock);
275 int ret = 0;
276 ALOGD_IF(BLANK_DEBUG, "%s: %s display: %d", __FUNCTION__,
277 blank==1 ? "Blanking":"Unblanking", dpy);
278 if(blank) {
279 // free up all the overlay pipes in use
280 // when we get a blank for either display
281 // makes sure that all pipes are freed
282 ctx->mOverlay->configBegin();
283 ctx->mOverlay->configDone();
284 ctx->mRotMgr->clear();
285 }
286 switch(dpy) {
287 case HWC_DISPLAY_PRIMARY:
288 if(blank) {
289 ret = ioctl(ctx->dpyAttr[dpy].fd, FBIOBLANK,
290 FB_BLANK_POWERDOWN);
291 } else {
292 ret = ioctl(ctx->dpyAttr[dpy].fd, FBIOBLANK,FB_BLANK_UNBLANK);
293 }
294 break;
295 case HWC_DISPLAY_EXTERNAL:
296 case HWC_DISPLAY_VIRTUAL:
297 if(blank) {
298 // call external framebuffer commit on blank,
299 // so that any pipe unsets gets committed
300 if (display_commit(ctx, dpy) < 0) {
301 ret = -1;
302 ALOGE("%s:post failed for external display !! ",
303 __FUNCTION__);
304 }
305 } else {
306 }
307 break;
308 default:
309 return -EINVAL;
310 }
311 // Enable HPD here, as during bootup unblank is called
312 // when SF is completely initialized
313 ctx->mExtDisplay->setHPD(1);
314 if(ret == 0){
315 ctx->dpyAttr[dpy].isActive = !blank;
316 } else {
317 ALOGE("%s: Failed in %s display: %d error:%s", __FUNCTION__,
318 blank==1 ? "blanking":"unblanking", dpy, strerror(errno));
319 return ret;
320 }
321
322 ALOGD_IF(BLANK_DEBUG, "%s: Done %s display: %d", __FUNCTION__,
323 blank==1 ? "blanking":"unblanking", dpy);
324 return 0;
325 }
326
hwc_query(struct hwc_composer_device_1 * dev,int param,int * value)327 static int hwc_query(struct hwc_composer_device_1* dev,
328 int param, int* value)
329 {
330 hwc_context_t* ctx = (hwc_context_t*)(dev);
331 int supported = HWC_DISPLAY_PRIMARY_BIT;
332
333 switch (param) {
334 case HWC_BACKGROUND_LAYER_SUPPORTED:
335 // Not supported for now
336 value[0] = 0;
337 break;
338 case HWC_DISPLAY_TYPES_SUPPORTED:
339 if(ctx->mMDP.hasOverlay)
340 supported |= HWC_DISPLAY_EXTERNAL_BIT;
341 value[0] = supported;
342 break;
343 default:
344 return -EINVAL;
345 }
346 return 0;
347
348 }
349
350
hwc_set_primary(hwc_context_t * ctx,hwc_display_contents_1_t * list)351 static int hwc_set_primary(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
352 ATRACE_CALL();
353 int ret = 0;
354 const int dpy = HWC_DISPLAY_PRIMARY;
355
356 if (LIKELY(list) && ctx->dpyAttr[dpy].isActive) {
357 uint32_t last = list->numHwLayers - 1;
358 hwc_layer_1_t *fbLayer = &list->hwLayers[last];
359 int fd = -1; //FenceFD from the Copybit(valid in async mode)
360 bool copybitDone = false;
361 if(ctx->mCopyBit[dpy])
362 copybitDone = ctx->mCopyBit[dpy]->draw(ctx, list, dpy, &fd);
363 if(list->numHwLayers > 1)
364 hwc_sync(ctx, list, dpy, fd);
365
366 if (!ctx->mMDPComp[dpy]->draw(ctx, list)) {
367 ALOGE("%s: MDPComp draw failed", __FUNCTION__);
368 ret = -1;
369 }
370
371 //TODO We dont check for SKIP flag on this layer because we need PAN
372 //always. Last layer is always FB
373 private_handle_t *hnd = (private_handle_t *)fbLayer->handle;
374 if(copybitDone) {
375 hnd = ctx->mCopyBit[dpy]->getCurrentRenderBuffer();
376 }
377
378 if(hnd) {
379 if (!ctx->mFBUpdate[dpy]->draw(ctx, hnd)) {
380 ALOGE("%s: FBUpdate draw failed", __FUNCTION__);
381 ret = -1;
382 }
383 }
384
385 if (display_commit(ctx, dpy) < 0) {
386 ALOGE("%s: display commit fail!", __FUNCTION__);
387 return -1;
388 }
389 }
390
391 closeAcquireFds(list);
392 return ret;
393 }
394
hwc_set_external(hwc_context_t * ctx,hwc_display_contents_1_t * list,int dpy)395 static int hwc_set_external(hwc_context_t *ctx,
396 hwc_display_contents_1_t* list, int dpy)
397 {
398 ATRACE_CALL();
399 int ret = 0;
400 Locker::Autolock _l(ctx->mExtSetLock);
401
402 if (LIKELY(list) && ctx->dpyAttr[dpy].isActive &&
403 !ctx->dpyAttr[dpy].isPause &&
404 ctx->dpyAttr[dpy].connected) {
405 uint32_t last = list->numHwLayers - 1;
406 hwc_layer_1_t *fbLayer = &list->hwLayers[last];
407 int fd = -1; //FenceFD from the Copybit(valid in async mode)
408 bool copybitDone = false;
409 if(ctx->mCopyBit[dpy])
410 copybitDone = ctx->mCopyBit[dpy]->draw(ctx, list, dpy, &fd);
411
412 if(list->numHwLayers > 1)
413 hwc_sync(ctx, list, dpy, fd);
414
415 if (!ctx->mMDPComp[dpy]->draw(ctx, list)) {
416 ALOGE("%s: MDPComp draw failed", __FUNCTION__);
417 ret = -1;
418 }
419
420 private_handle_t *hnd = (private_handle_t *)fbLayer->handle;
421 if(copybitDone) {
422 hnd = ctx->mCopyBit[dpy]->getCurrentRenderBuffer();
423 }
424
425 if(hnd) {
426 if (!ctx->mFBUpdate[dpy]->draw(ctx, hnd)) {
427 ALOGE("%s: FBUpdate::draw fail!", __FUNCTION__);
428 ret = -1;
429 }
430 }
431
432 if (display_commit(ctx, dpy) < 0) {
433 ALOGE("%s: display commit fail!", __FUNCTION__);
434 ret = -1;
435 }
436 }
437
438 closeAcquireFds(list);
439 return ret;
440 }
441
hwc_set_virtual(hwc_context_t * ctx,hwc_display_contents_1_t * list,int dpy)442 static int hwc_set_virtual(hwc_context_t *ctx,
443 hwc_display_contents_1_t* list, int dpy)
444 {
445 //XXX: Implement set.
446 closeAcquireFds(list);
447 if (list) {
448 // SF assumes HWC waits for the acquire fence and returns a new fence
449 // that signals when we're done. Since we don't wait, and also don't
450 // touch the buffer, we can just handle the acquire fence back to SF
451 // as the retire fence.
452 list->retireFenceFd = list->outbufAcquireFenceFd;
453 }
454 return 0;
455 }
456
457
hwc_set(hwc_composer_device_1 * dev,size_t numDisplays,hwc_display_contents_1_t ** displays)458 static int hwc_set(hwc_composer_device_1 *dev,
459 size_t numDisplays,
460 hwc_display_contents_1_t** displays)
461 {
462 int ret = 0;
463 hwc_context_t* ctx = (hwc_context_t*)(dev);
464 Locker::Autolock _l(ctx->mBlankLock);
465 for (uint32_t i = 0; i < numDisplays; i++) {
466 hwc_display_contents_1_t* list = displays[i];
467 switch(i) {
468 case HWC_DISPLAY_PRIMARY:
469 ret = hwc_set_primary(ctx, list);
470 break;
471 case HWC_DISPLAY_EXTERNAL:
472 ret = hwc_set_external(ctx, list, i);
473 break;
474 case HWC_DISPLAY_VIRTUAL:
475 ret = hwc_set_virtual(ctx, list, i);
476 break;
477 default:
478 ret = -EINVAL;
479 }
480 }
481 // This is only indicative of how many times SurfaceFlinger posts
482 // frames to the display.
483 CALC_FPS();
484 MDPComp::resetIdleFallBack();
485 ctx->mVideoTransFlag = false;
486 return ret;
487 }
488
hwc_getDisplayConfigs(struct hwc_composer_device_1 * dev,int disp,uint32_t * configs,size_t * numConfigs)489 int hwc_getDisplayConfigs(struct hwc_composer_device_1* dev, int disp,
490 uint32_t* configs, size_t* numConfigs) {
491 int ret = 0;
492 hwc_context_t* ctx = (hwc_context_t*)(dev);
493 //in 1.1 there is no way to choose a config, report as config id # 0
494 //This config is passed to getDisplayAttributes. Ignore for now.
495 switch(disp) {
496 case HWC_DISPLAY_PRIMARY:
497 if(*numConfigs > 0) {
498 configs[0] = 0;
499 *numConfigs = 1;
500 }
501 ret = 0; //NO_ERROR
502 break;
503 case HWC_DISPLAY_EXTERNAL:
504 ret = -1; //Not connected
505 if(ctx->dpyAttr[HWC_DISPLAY_EXTERNAL].connected) {
506 ret = 0; //NO_ERROR
507 if(*numConfigs > 0) {
508 configs[0] = 0;
509 *numConfigs = 1;
510 }
511 }
512 break;
513 }
514 return ret;
515 }
516
hwc_getDisplayAttributes(struct hwc_composer_device_1 * dev,int disp,uint32_t config,const uint32_t * attributes,int32_t * values)517 int hwc_getDisplayAttributes(struct hwc_composer_device_1* dev, int disp,
518 uint32_t config, const uint32_t* attributes, int32_t* values) {
519
520 hwc_context_t* ctx = (hwc_context_t*)(dev);
521 //If hotpluggable displays are inactive return error
522 if(disp == HWC_DISPLAY_EXTERNAL && !ctx->dpyAttr[disp].connected) {
523 return -1;
524 }
525
526 //From HWComposer
527 static const uint32_t DISPLAY_ATTRIBUTES[] = {
528 HWC_DISPLAY_VSYNC_PERIOD,
529 HWC_DISPLAY_WIDTH,
530 HWC_DISPLAY_HEIGHT,
531 HWC_DISPLAY_DPI_X,
532 HWC_DISPLAY_DPI_Y,
533 HWC_DISPLAY_NO_ATTRIBUTE,
534 };
535
536 const int NUM_DISPLAY_ATTRIBUTES = (sizeof(DISPLAY_ATTRIBUTES) /
537 sizeof(DISPLAY_ATTRIBUTES)[0]);
538
539 for (size_t i = 0; i < NUM_DISPLAY_ATTRIBUTES - 1; i++) {
540 switch (attributes[i]) {
541 case HWC_DISPLAY_VSYNC_PERIOD:
542 values[i] = ctx->dpyAttr[disp].vsync_period;
543 break;
544 case HWC_DISPLAY_WIDTH:
545 values[i] = ctx->dpyAttr[disp].xres;
546 ALOGD("%s disp = %d, width = %d",__FUNCTION__, disp,
547 ctx->dpyAttr[disp].xres);
548 break;
549 case HWC_DISPLAY_HEIGHT:
550 values[i] = ctx->dpyAttr[disp].yres;
551 ALOGD("%s disp = %d, height = %d",__FUNCTION__, disp,
552 ctx->dpyAttr[disp].yres);
553 break;
554 case HWC_DISPLAY_DPI_X:
555 values[i] = (int32_t) (ctx->dpyAttr[disp].xdpi*1000.0);
556 break;
557 case HWC_DISPLAY_DPI_Y:
558 values[i] = (int32_t) (ctx->dpyAttr[disp].ydpi*1000.0);
559 break;
560 default:
561 ALOGE("Unknown display attribute %d",
562 attributes[i]);
563 return -EINVAL;
564 }
565 }
566 return 0;
567 }
568
hwc_dump(struct hwc_composer_device_1 * dev,char * buff,int buff_len)569 void hwc_dump(struct hwc_composer_device_1* dev, char *buff, int buff_len)
570 {
571 hwc_context_t* ctx = (hwc_context_t*)(dev);
572 android::String8 aBuf("");
573 dumpsys_log(aBuf, "Qualcomm HWC state:\n");
574 dumpsys_log(aBuf, " MDPVersion=%d\n", ctx->mMDP.version);
575 dumpsys_log(aBuf, " DisplayPanel=%c\n", ctx->mMDP.panel);
576 for(int dpy = 0; dpy < MAX_DISPLAYS; dpy++) {
577 if(ctx->mMDPComp[dpy])
578 ctx->mMDPComp[dpy]->dump(aBuf);
579 }
580 char ovDump[2048] = {'\0'};
581 ctx->mOverlay->getDump(ovDump, 2048);
582 dumpsys_log(aBuf, ovDump);
583 ovDump[0] = '\0';
584 ctx->mRotMgr->getDump(ovDump, 2048);
585 dumpsys_log(aBuf, ovDump);
586 strlcpy(buff, aBuf.string(), buff_len);
587 }
588
hwc_device_close(struct hw_device_t * dev)589 static int hwc_device_close(struct hw_device_t *dev)
590 {
591 if(!dev) {
592 ALOGE("%s: NULL device pointer", __FUNCTION__);
593 return -1;
594 }
595 closeContext((hwc_context_t*)dev);
596 free(dev);
597
598 return 0;
599 }
600
hwc_device_open(const struct hw_module_t * module,const char * name,struct hw_device_t ** device)601 static int hwc_device_open(const struct hw_module_t* module, const char* name,
602 struct hw_device_t** device)
603 {
604 int status = -EINVAL;
605
606 if (!strcmp(name, HWC_HARDWARE_COMPOSER)) {
607 struct hwc_context_t *dev;
608 dev = (hwc_context_t*)malloc(sizeof(*dev));
609 memset(dev, 0, sizeof(*dev));
610
611 //Initialize hwc context
612 initContext(dev);
613
614 //Setup HWC methods
615 dev->device.common.tag = HARDWARE_DEVICE_TAG;
616 dev->device.common.version = HWC_DEVICE_API_VERSION_1_2;
617 dev->device.common.module = const_cast<hw_module_t*>(module);
618 dev->device.common.close = hwc_device_close;
619 dev->device.prepare = hwc_prepare;
620 dev->device.set = hwc_set;
621 dev->device.eventControl = hwc_eventControl;
622 dev->device.blank = hwc_blank;
623 dev->device.query = hwc_query;
624 dev->device.registerProcs = hwc_registerProcs;
625 dev->device.dump = hwc_dump;
626 dev->device.getDisplayConfigs = hwc_getDisplayConfigs;
627 dev->device.getDisplayAttributes = hwc_getDisplayAttributes;
628 *device = &dev->device.common;
629 status = 0;
630 }
631 return status;
632 }
633