• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012-2014, The Linux Foundation. All rights reserved.
3  * Not a Contribution, Apache license notifications and license are retained
4  * for attribution purposes only.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *      http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 #include <math.h>
20 #include "hwc_mdpcomp.h"
21 #include <sys/ioctl.h>
22 #include "external.h"
23 #include "virtual.h"
24 #include "qdMetaData.h"
25 #include "mdp_version.h"
26 #include "hwc_fbupdate.h"
27 #include "hwc_ad.h"
28 #include <overlayRotator.h>
29 
30 using namespace overlay;
31 using namespace qdutils;
32 using namespace overlay::utils;
33 namespace ovutils = overlay::utils;
34 
35 namespace qhwc {
36 
37 //==============MDPComp========================================================
38 
39 IdleInvalidator *MDPComp::idleInvalidator = NULL;
40 bool MDPComp::sIdleFallBack = false;
41 bool MDPComp::sHandleTimeout = false;
42 bool MDPComp::sDebugLogs = false;
43 bool MDPComp::sEnabled = false;
44 bool MDPComp::sEnableMixedMode = true;
45 int MDPComp::sSimulationFlags = 0;
46 int MDPComp::sMaxPipesPerMixer = MAX_PIPES_PER_MIXER;
47 bool MDPComp::sEnable4k2kYUVSplit = false;
48 bool MDPComp::sSrcSplitEnabled = false;
getObject(hwc_context_t * ctx,const int & dpy)49 MDPComp* MDPComp::getObject(hwc_context_t *ctx, const int& dpy) {
50     if(qdutils::MDPVersion::getInstance().isSrcSplit()) {
51         sSrcSplitEnabled = true;
52         return new MDPCompSrcSplit(dpy);
53     } else if(isDisplaySplit(ctx, dpy)) {
54         return new MDPCompSplit(dpy);
55     }
56     return new MDPCompNonSplit(dpy);
57 }
58 
MDPComp(int dpy)59 MDPComp::MDPComp(int dpy):mDpy(dpy){};
60 
dump(android::String8 & buf,hwc_context_t * ctx)61 void MDPComp::dump(android::String8& buf, hwc_context_t *ctx)
62 {
63     if(mCurrentFrame.layerCount > MAX_NUM_APP_LAYERS)
64         return;
65 
66     dumpsys_log(buf,"HWC Map for Dpy: %s \n",
67                 (mDpy == 0) ? "\"PRIMARY\"" :
68                 (mDpy == 1) ? "\"EXTERNAL\"" : "\"VIRTUAL\"");
69     dumpsys_log(buf,"CURR_FRAME: layerCount:%2d mdpCount:%2d "
70                 "fbCount:%2d \n", mCurrentFrame.layerCount,
71                 mCurrentFrame.mdpCount, mCurrentFrame.fbCount);
72     dumpsys_log(buf,"needsFBRedraw:%3s  pipesUsed:%2d  MaxPipesPerMixer: %d \n",
73                 (mCurrentFrame.needsRedraw? "YES" : "NO"),
74                 mCurrentFrame.mdpCount, sMaxPipesPerMixer);
75     if(isDisplaySplit(ctx, mDpy)) {
76         dumpsys_log(buf, "Programmed ROI's: Left: [%d, %d, %d, %d] "
77                 "Right: [%d, %d, %d, %d] \n",
78                 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top,
79                 ctx->listStats[mDpy].lRoi.right,
80                 ctx->listStats[mDpy].lRoi.bottom,
81                 ctx->listStats[mDpy].rRoi.left,ctx->listStats[mDpy].rRoi.top,
82                 ctx->listStats[mDpy].rRoi.right,
83                 ctx->listStats[mDpy].rRoi.bottom);
84     } else {
85         dumpsys_log(buf, "Programmed ROI: [%d, %d, %d, %d] \n",
86                 ctx->listStats[mDpy].lRoi.left,ctx->listStats[mDpy].lRoi.top,
87                 ctx->listStats[mDpy].lRoi.right,
88                 ctx->listStats[mDpy].lRoi.bottom);
89     }
90     dumpsys_log(buf," ---------------------------------------------  \n");
91     dumpsys_log(buf," listIdx | cached? | mdpIndex | comptype  |  Z  \n");
92     dumpsys_log(buf," ---------------------------------------------  \n");
93     for(int index = 0; index < mCurrentFrame.layerCount; index++ )
94         dumpsys_log(buf," %7d | %7s | %8d | %9s | %2d \n",
95                     index,
96                     (mCurrentFrame.isFBComposed[index] ? "YES" : "NO"),
97                      mCurrentFrame.layerToMDP[index],
98                     (mCurrentFrame.isFBComposed[index] ?
99                     (mCurrentFrame.drop[index] ? "DROP" :
100                     (mCurrentFrame.needsRedraw ? "GLES" : "CACHE")) : "MDP"),
101                     (mCurrentFrame.isFBComposed[index] ? mCurrentFrame.fbZ :
102     mCurrentFrame.mdpToLayer[mCurrentFrame.layerToMDP[index]].pipeInfo->zOrder));
103     dumpsys_log(buf,"\n");
104 }
105 
init(hwc_context_t * ctx)106 bool MDPComp::init(hwc_context_t *ctx) {
107 
108     if(!ctx) {
109         ALOGE("%s: Invalid hwc context!!",__FUNCTION__);
110         return false;
111     }
112 
113     char property[PROPERTY_VALUE_MAX];
114 
115     sEnabled = false;
116     if((property_get("persist.hwc.mdpcomp.enable", property, NULL) > 0) &&
117        (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
118         (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
119         sEnabled = true;
120     }
121 
122     sEnableMixedMode = true;
123     if((property_get("debug.mdpcomp.mixedmode.disable", property, NULL) > 0) &&
124        (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
125         (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
126         sEnableMixedMode = false;
127     }
128 
129     if(property_get("debug.mdpcomp.logs", property, NULL) > 0) {
130         if(atoi(property) != 0)
131             sDebugLogs = true;
132     }
133 
134     sMaxPipesPerMixer = MAX_PIPES_PER_MIXER;
135     if(property_get("debug.mdpcomp.maxpermixer", property, "-1") > 0) {
136         int val = atoi(property);
137         if(val >= 0)
138             sMaxPipesPerMixer = min(val, MAX_PIPES_PER_MIXER);
139     }
140 
141     if(ctx->mMDP.panel != MIPI_CMD_PANEL) {
142         // Idle invalidation is not necessary on command mode panels
143         long idle_timeout = DEFAULT_IDLE_TIME;
144         if(property_get("debug.mdpcomp.idletime", property, NULL) > 0) {
145             if(atoi(property) != 0)
146                 idle_timeout = atoi(property);
147         }
148 
149         //create Idle Invalidator only when not disabled through property
150         if(idle_timeout != -1)
151             idleInvalidator = IdleInvalidator::getInstance();
152 
153         if(idleInvalidator == NULL) {
154             ALOGE("%s: failed to instantiate idleInvalidator object",
155                   __FUNCTION__);
156         } else {
157             idleInvalidator->init(timeout_handler, ctx,
158                                   (unsigned int)idle_timeout);
159         }
160     }
161 
162     if(!qdutils::MDPVersion::getInstance().isSrcSplit() &&
163             property_get("persist.mdpcomp.4k2kSplit", property, "0") > 0 &&
164             (!strncmp(property, "1", PROPERTY_VALUE_MAX) ||
165             !strncasecmp(property,"true", PROPERTY_VALUE_MAX))) {
166         sEnable4k2kYUVSplit = true;
167     }
168     return true;
169 }
170 
reset(hwc_context_t * ctx)171 void MDPComp::reset(hwc_context_t *ctx) {
172     const int numLayers = ctx->listStats[mDpy].numAppLayers;
173     mCurrentFrame.reset(numLayers);
174     ctx->mOverlay->clear(mDpy);
175     ctx->mLayerRotMap[mDpy]->clear();
176 }
177 
timeout_handler(void * udata)178 void MDPComp::timeout_handler(void *udata) {
179     struct hwc_context_t* ctx = (struct hwc_context_t*)(udata);
180 
181     if(!ctx) {
182         ALOGE("%s: received empty data in timer callback", __FUNCTION__);
183         return;
184     }
185     Locker::Autolock _l(ctx->mDrawLock);
186     // Handle timeout event only if the previous composition is MDP or MIXED.
187     if(!sHandleTimeout) {
188         ALOGD_IF(isDebug(), "%s:Do not handle this timeout", __FUNCTION__);
189         return;
190     }
191     if(!ctx->proc) {
192         ALOGE("%s: HWC proc not registered", __FUNCTION__);
193         return;
194     }
195     sIdleFallBack = true;
196     /* Trigger SF to redraw the current frame */
197     ctx->proc->invalidate(ctx->proc);
198 }
199 
setMDPCompLayerFlags(hwc_context_t * ctx,hwc_display_contents_1_t * list)200 void MDPComp::setMDPCompLayerFlags(hwc_context_t *ctx,
201                                    hwc_display_contents_1_t* list) {
202     LayerProp *layerProp = ctx->layerProp[mDpy];
203 
204     for(int index = 0; index < ctx->listStats[mDpy].numAppLayers; index++) {
205         hwc_layer_1_t* layer = &(list->hwLayers[index]);
206         if(!mCurrentFrame.isFBComposed[index]) {
207             layerProp[index].mFlags |= HWC_MDPCOMP;
208             layer->compositionType = HWC_OVERLAY;
209             layer->hints |= HWC_HINT_CLEAR_FB;
210         } else {
211             /* Drop the layer when its already present in FB OR when it lies
212              * outside frame's ROI */
213             if(!mCurrentFrame.needsRedraw || mCurrentFrame.drop[index]) {
214                 layer->compositionType = HWC_OVERLAY;
215             }
216         }
217     }
218 }
219 
setRedraw(hwc_context_t * ctx,hwc_display_contents_1_t * list)220 void MDPComp::setRedraw(hwc_context_t *ctx,
221         hwc_display_contents_1_t* list) {
222     mCurrentFrame.needsRedraw = false;
223     if(!mCachedFrame.isSameFrame(mCurrentFrame, list) ||
224             (list->flags & HWC_GEOMETRY_CHANGED) ||
225             isSkipPresent(ctx, mDpy)) {
226         mCurrentFrame.needsRedraw = true;
227     }
228 }
229 
FrameInfo()230 MDPComp::FrameInfo::FrameInfo() {
231     memset(&mdpToLayer, 0, sizeof(mdpToLayer));
232     reset(0);
233 }
234 
reset(const int & numLayers)235 void MDPComp::FrameInfo::reset(const int& numLayers) {
236     for(int i = 0 ; i < MAX_PIPES_PER_MIXER; i++ ) {
237         if(mdpToLayer[i].pipeInfo) {
238             delete mdpToLayer[i].pipeInfo;
239             mdpToLayer[i].pipeInfo = NULL;
240             //We dont own the rotator
241             mdpToLayer[i].rot = NULL;
242         }
243     }
244 
245     memset(&mdpToLayer, 0, sizeof(mdpToLayer));
246     memset(&layerToMDP, -1, sizeof(layerToMDP));
247     memset(&isFBComposed, 1, sizeof(isFBComposed));
248 
249     layerCount = numLayers;
250     fbCount = numLayers;
251     mdpCount = 0;
252     needsRedraw = true;
253     fbZ = -1;
254 }
255 
map()256 void MDPComp::FrameInfo::map() {
257     // populate layer and MDP maps
258     int mdpIdx = 0;
259     for(int idx = 0; idx < layerCount; idx++) {
260         if(!isFBComposed[idx]) {
261             mdpToLayer[mdpIdx].listIndex = idx;
262             layerToMDP[idx] = mdpIdx++;
263         }
264     }
265 }
266 
LayerCache()267 MDPComp::LayerCache::LayerCache() {
268     reset();
269 }
270 
reset()271 void MDPComp::LayerCache::reset() {
272     memset(&isFBComposed, true, sizeof(isFBComposed));
273     memset(&drop, false, sizeof(drop));
274     layerCount = 0;
275 }
276 
updateCounts(const FrameInfo & curFrame)277 void MDPComp::LayerCache::updateCounts(const FrameInfo& curFrame) {
278     layerCount = curFrame.layerCount;
279     memcpy(&isFBComposed, &curFrame.isFBComposed, sizeof(isFBComposed));
280     memcpy(&drop, &curFrame.drop, sizeof(drop));
281 }
282 
isSameFrame(const FrameInfo & curFrame,hwc_display_contents_1_t * list)283 bool MDPComp::LayerCache::isSameFrame(const FrameInfo& curFrame,
284                                       hwc_display_contents_1_t* list) {
285     if(layerCount != curFrame.layerCount)
286         return false;
287     for(int i = 0; i < curFrame.layerCount; i++) {
288         if((curFrame.isFBComposed[i] != isFBComposed[i]) ||
289                 (curFrame.drop[i] != drop[i])) {
290             return false;
291         }
292         hwc_layer_1_t const* layer = &list->hwLayers[i];
293         if(curFrame.isFBComposed[i] && layerUpdating(layer)){
294             return false;
295         }
296     }
297     return true;
298 }
299 
isSupportedForMDPComp(hwc_context_t * ctx,hwc_layer_1_t * layer)300 bool MDPComp::isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer) {
301     private_handle_t *hnd = (private_handle_t *)layer->handle;
302     if((not isYuvBuffer(hnd) and has90Transform(layer)) or
303         (not isValidDimension(ctx,layer))
304         //More conditions here, SKIP, sRGB+Blend etc
305         ) {
306         return false;
307     }
308     return true;
309 }
310 
isValidDimension(hwc_context_t * ctx,hwc_layer_1_t * layer)311 bool MDPComp::isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer) {
312     private_handle_t *hnd = (private_handle_t *)layer->handle;
313 
314     if(!hnd) {
315         if (layer->flags & HWC_COLOR_FILL) {
316             // Color layer
317             return true;
318         }
319         ALOGE("%s: layer handle is NULL", __FUNCTION__);
320         return false;
321     }
322 
323     //XXX: Investigate doing this with pixel phase on MDSS
324     if(!isSecureBuffer(hnd) && isNonIntegralSourceCrop(layer->sourceCropf))
325         return false;
326 
327     hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
328     hwc_rect_t dst = layer->displayFrame;
329     int crop_w = crop.right - crop.left;
330     int crop_h = crop.bottom - crop.top;
331     int dst_w = dst.right - dst.left;
332     int dst_h = dst.bottom - dst.top;
333     float w_scale = ((float)crop_w / (float)dst_w);
334     float h_scale = ((float)crop_h / (float)dst_h);
335 
336     /* Workaround for MDP HW limitation in DSI command mode panels where
337      * FPS will not go beyond 30 if buffers on RGB pipes are of width or height
338      * less than 5 pixels
339      * There also is a HW limilation in MDP, minimum block size is 2x2
340      * Fallback to GPU if height is less than 2.
341      */
342     if(qdutils::MDPVersion::getInstance().hasMinCropWidthLimitation() and
343             (crop_w < 5 or crop_h < 5))
344         return false;
345 
346     if((w_scale > 1.0f) || (h_scale > 1.0f)) {
347         const uint32_t maxMDPDownscale =
348             qdutils::MDPVersion::getInstance().getMaxMDPDownscale();
349         const float w_dscale = w_scale;
350         const float h_dscale = h_scale;
351 
352         if(ctx->mMDP.version >= qdutils::MDSS_V5) {
353 
354             if(!qdutils::MDPVersion::getInstance().supportsDecimation()) {
355                 /* On targets that doesnt support Decimation (eg.,8x26)
356                  * maximum downscale support is overlay pipe downscale.
357                  */
358                 if(crop_w > MAX_DISPLAY_DIM || w_dscale > maxMDPDownscale ||
359                         h_dscale > maxMDPDownscale)
360                     return false;
361             } else {
362                 // Decimation on macrotile format layers is not supported.
363                 if(isTileRendered(hnd)) {
364                     /* MDP can read maximum MAX_DISPLAY_DIM width.
365                      * Bail out if
366                      *      1. Src crop > MAX_DISPLAY_DIM on nonsplit MDPComp
367                      *      2. exceeds maximum downscale limit
368                      */
369                     if(((crop_w > MAX_DISPLAY_DIM) && !sSrcSplitEnabled) ||
370                             w_dscale > maxMDPDownscale ||
371                             h_dscale > maxMDPDownscale) {
372                         return false;
373                     }
374                 } else if(w_dscale > 64 || h_dscale > 64)
375                     return false;
376             }
377         } else { //A-family
378             if(w_dscale > maxMDPDownscale || h_dscale > maxMDPDownscale)
379                 return false;
380         }
381     }
382 
383     if((w_scale < 1.0f) || (h_scale < 1.0f)) {
384         const uint32_t upscale =
385             qdutils::MDPVersion::getInstance().getMaxMDPUpscale();
386         const float w_uscale = 1.0f / w_scale;
387         const float h_uscale = 1.0f / h_scale;
388 
389         if(w_uscale > upscale || h_uscale > upscale)
390             return false;
391     }
392 
393     return true;
394 }
395 
isFrameDoable(hwc_context_t * ctx)396 bool MDPComp::isFrameDoable(hwc_context_t *ctx) {
397     bool ret = true;
398 
399     if(!isEnabled()) {
400         ALOGD_IF(isDebug(),"%s: MDP Comp. not enabled.", __FUNCTION__);
401         ret = false;
402     } else if(qdutils::MDPVersion::getInstance().is8x26() &&
403             ctx->mVideoTransFlag &&
404             isSecondaryConnected(ctx)) {
405         //1 Padding round to shift pipes across mixers
406         ALOGD_IF(isDebug(),"%s: MDP Comp. video transition padding round",
407                 __FUNCTION__);
408         ret = false;
409     } else if(isSecondaryConfiguring(ctx)) {
410         ALOGD_IF( isDebug(),"%s: External Display connection is pending",
411                   __FUNCTION__);
412         ret = false;
413     } else if(ctx->isPaddingRound) {
414         ALOGD_IF(isDebug(), "%s: padding round invoked for dpy %d",
415                  __FUNCTION__,mDpy);
416         ret = false;
417     }
418     return ret;
419 }
420 
calculateDirtyRect(const hwc_layer_1_t * layer,hwc_rect_t & scissor)421 hwc_rect_t MDPComp::calculateDirtyRect(const hwc_layer_1_t* layer,
422                     hwc_rect_t& scissor) {
423   hwc_region_t surfDamage = layer->surfaceDamage;
424   hwc_rect_t src = integerizeSourceCrop(layer->sourceCropf);
425   hwc_rect_t dst = layer->displayFrame;
426   int x_off = dst.left - src.left;
427   int y_off = dst.top - src.top;
428   hwc_rect dirtyRect = (hwc_rect){0, 0, 0, 0};
429   hwc_rect_t updatingRect = dst;
430 
431   if (surfDamage.numRects == 0) {
432       // full layer updating, dirty rect is full frame
433       dirtyRect = getIntersection(layer->displayFrame, scissor);
434   } else {
435       for(uint32_t i = 0; i < surfDamage.numRects; i++) {
436           updatingRect = moveRect(surfDamage.rects[i], x_off, y_off);
437           hwc_rect_t intersect = getIntersection(updatingRect, scissor);
438           if(isValidRect(intersect)) {
439               dirtyRect = getUnion(intersect, dirtyRect);
440           }
441       }
442   }
443 
444   return dirtyRect;
445 }
446 
trimAgainstROI(hwc_context_t * ctx,hwc_rect_t & fbRect)447 void MDPCompNonSplit::trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) {
448     hwc_rect_t roi = ctx->listStats[mDpy].lRoi;
449     fbRect = getIntersection(fbRect, roi);
450 }
451 
452 /* 1) Identify layers that are not visible or lying outside the updating ROI and
453  *    drop them from composition.
454  * 2) If we have a scaling layer which needs cropping against generated
455  *    ROI, reset ROI to full resolution. */
validateAndApplyROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)456 bool MDPCompNonSplit::validateAndApplyROI(hwc_context_t *ctx,
457         hwc_display_contents_1_t* list) {
458     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
459     hwc_rect_t visibleRect = ctx->listStats[mDpy].lRoi;
460 
461     for(int i = numAppLayers - 1; i >= 0; i--){
462         if(!isValidRect(visibleRect)) {
463             mCurrentFrame.drop[i] = true;
464             mCurrentFrame.dropCount++;
465             continue;
466         }
467 
468         const hwc_layer_1_t* layer =  &list->hwLayers[i];
469         hwc_rect_t dstRect = layer->displayFrame;
470         hwc_rect_t res  = getIntersection(visibleRect, dstRect);
471 
472         if(!isValidRect(res)) {
473             mCurrentFrame.drop[i] = true;
474             mCurrentFrame.dropCount++;
475         } else {
476             /* Reset frame ROI when any layer which needs scaling also needs ROI
477              * cropping */
478             if(!isSameRect(res, dstRect) && needsScaling (layer)) {
479                 ALOGI("%s: Resetting ROI due to scaling", __FUNCTION__);
480                 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
481                 mCurrentFrame.dropCount = 0;
482                 return false;
483             }
484 
485             /* deduct any opaque region from visibleRect */
486             if (layer->blending == HWC_BLENDING_NONE)
487                 visibleRect = deductRect(visibleRect, res);
488         }
489     }
490     return true;
491 }
492 
493 /* Calculate ROI for the frame by accounting all the layer's dispalyFrame which
494  * are updating. If DirtyRegion is applicable, calculate it by accounting all
495  * the changing layer's dirtyRegion. */
generateROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)496 void MDPCompNonSplit::generateROI(hwc_context_t *ctx,
497         hwc_display_contents_1_t* list) {
498     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
499     if(!canPartialUpdate(ctx, list))
500         return;
501 
502     struct hwc_rect roi = (struct hwc_rect){0, 0, 0, 0};
503     hwc_rect fullFrame = (struct hwc_rect) {0, 0,(int)ctx->dpyAttr[mDpy].xres,
504         (int)ctx->dpyAttr[mDpy].yres};
505 
506     for(int index = 0; index < numAppLayers; index++ ) {
507         hwc_layer_1_t* layer = &list->hwLayers[index];
508         if (layerUpdating(layer) ||
509                 isYuvBuffer((private_handle_t *)layer->handle)) {
510             hwc_rect_t dirtyRect = (struct hwc_rect){0, 0, 0, 0};;
511             if(!needsScaling(layer) && !layer->transform) {
512                 dirtyRect = calculateDirtyRect(layer, fullFrame);
513             }
514 
515             roi = getUnion(roi, dirtyRect);
516         }
517     }
518 
519     /* No layer is updating. Still SF wants a refresh.*/
520     if(!isValidRect(roi))
521         return;
522 
523     // Align ROI coordinates to panel restrictions
524     roi = getSanitizeROI(roi, fullFrame);
525 
526     ctx->listStats[mDpy].lRoi = roi;
527     if(!validateAndApplyROI(ctx, list))
528         resetROI(ctx, mDpy);
529 
530     ALOGD_IF(isDebug(),"%s: generated ROI: [%d, %d, %d, %d]", __FUNCTION__,
531             ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top,
532             ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom);
533 }
534 
trimAgainstROI(hwc_context_t * ctx,hwc_rect_t & fbRect)535 void MDPCompSplit::trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) {
536     hwc_rect l_roi = ctx->listStats[mDpy].lRoi;
537     hwc_rect r_roi = ctx->listStats[mDpy].rRoi;
538 
539     hwc_rect_t l_fbRect = getIntersection(fbRect, l_roi);
540     hwc_rect_t r_fbRect = getIntersection(fbRect, r_roi);
541     fbRect = getUnion(l_fbRect, r_fbRect);
542 }
543 /* 1) Identify layers that are not visible or lying outside BOTH the updating
544  *    ROI's and drop them from composition. If a layer is spanning across both
545  *    the halves of the screen but needed by only ROI, the non-contributing
546  *    half will not be programmed for MDP.
547  * 2) If we have a scaling layer which needs cropping against generated
548  *    ROI, reset ROI to full resolution. */
validateAndApplyROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)549 bool MDPCompSplit::validateAndApplyROI(hwc_context_t *ctx,
550         hwc_display_contents_1_t* list) {
551 
552     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
553 
554     hwc_rect_t visibleRectL = ctx->listStats[mDpy].lRoi;
555     hwc_rect_t visibleRectR = ctx->listStats[mDpy].rRoi;
556 
557     for(int i = numAppLayers - 1; i >= 0; i--){
558         if(!isValidRect(visibleRectL) && !isValidRect(visibleRectR))
559         {
560             mCurrentFrame.drop[i] = true;
561             mCurrentFrame.dropCount++;
562             continue;
563         }
564 
565         const hwc_layer_1_t* layer =  &list->hwLayers[i];
566         hwc_rect_t dstRect = layer->displayFrame;
567 
568         hwc_rect_t l_res  = getIntersection(visibleRectL, dstRect);
569         hwc_rect_t r_res  = getIntersection(visibleRectR, dstRect);
570         hwc_rect_t res = getUnion(l_res, r_res);
571 
572         if(!isValidRect(l_res) && !isValidRect(r_res)) {
573             mCurrentFrame.drop[i] = true;
574             mCurrentFrame.dropCount++;
575         } else {
576             /* Reset frame ROI when any layer which needs scaling also needs ROI
577              * cropping */
578             if(!isSameRect(res, dstRect) && needsScaling (layer)) {
579                 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
580                 mCurrentFrame.dropCount = 0;
581                 return false;
582             }
583 
584             if (layer->blending == HWC_BLENDING_NONE) {
585                 visibleRectL = deductRect(visibleRectL, l_res);
586                 visibleRectR = deductRect(visibleRectR, r_res);
587             }
588         }
589     }
590     return true;
591 }
592 /* Calculate ROI for the frame by accounting all the layer's dispalyFrame which
593  * are updating. If DirtyRegion is applicable, calculate it by accounting all
594  * the changing layer's dirtyRegion. */
generateROI(hwc_context_t * ctx,hwc_display_contents_1_t * list)595 void MDPCompSplit::generateROI(hwc_context_t *ctx,
596         hwc_display_contents_1_t* list) {
597     if(!canPartialUpdate(ctx, list))
598         return;
599 
600     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
601     int lSplit = getLeftSplit(ctx, mDpy);
602 
603     int hw_h = (int)ctx->dpyAttr[mDpy].yres;
604     int hw_w = (int)ctx->dpyAttr[mDpy].xres;
605 
606     struct hwc_rect l_frame = (struct hwc_rect){0, 0, lSplit, hw_h};
607     struct hwc_rect r_frame = (struct hwc_rect){lSplit, 0, hw_w, hw_h};
608 
609     struct hwc_rect l_roi = (struct hwc_rect){0, 0, 0, 0};
610     struct hwc_rect r_roi = (struct hwc_rect){0, 0, 0, 0};
611 
612     for(int index = 0; index < numAppLayers; index++ ) {
613         hwc_layer_1_t* layer = &list->hwLayers[index];
614         private_handle_t *hnd = (private_handle_t *)layer->handle;
615         if (layerUpdating(layer) || isYuvBuffer(hnd)) {
616             hwc_rect_t l_dirtyRect = (struct hwc_rect){0, 0, 0, 0};
617             hwc_rect_t r_dirtyRect = (struct hwc_rect){0, 0, 0, 0};
618 
619             if(!needsScaling(layer) && !layer->transform) {
620                 l_dirtyRect = calculateDirtyRect(layer, l_frame);
621                 r_dirtyRect = calculateDirtyRect(layer, r_frame);
622             }
623             if(isValidRect(l_dirtyRect))
624                 l_roi = getUnion(l_roi, l_dirtyRect);
625 
626             if(isValidRect(r_dirtyRect))
627                 r_roi = getUnion(r_roi, r_dirtyRect);
628         }
629     }
630 
631     /* For panels that cannot accept commands in both the interfaces, we cannot
632      * send two ROI's (for each half). We merge them into single ROI and split
633      * them across lSplit for MDP mixer use. The ROI's will be merged again
634      * finally before udpating the panel in the driver. */
635     if(qdutils::MDPVersion::getInstance().needsROIMerge()) {
636         hwc_rect_t temp_roi = getUnion(l_roi, r_roi);
637         l_roi = getIntersection(temp_roi, l_frame);
638         r_roi = getIntersection(temp_roi, r_frame);
639     }
640 
641     /* No layer is updating. Still SF wants a refresh. */
642     if(!isValidRect(l_roi) && !isValidRect(r_roi))
643         return;
644 
645     l_roi = getSanitizeROI(l_roi, l_frame);
646     r_roi = getSanitizeROI(r_roi, r_frame);
647 
648     ctx->listStats[mDpy].lRoi = l_roi;
649     ctx->listStats[mDpy].rRoi = r_roi;
650 
651     if(!validateAndApplyROI(ctx, list))
652         resetROI(ctx, mDpy);
653 
654     ALOGD_IF(isDebug(),"%s: generated L_ROI: [%d, %d, %d, %d]"
655             "R_ROI: [%d, %d, %d, %d]", __FUNCTION__,
656             ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top,
657             ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom,
658             ctx->listStats[mDpy].rRoi.left, ctx->listStats[mDpy].rRoi.top,
659             ctx->listStats[mDpy].rRoi.right, ctx->listStats[mDpy].rRoi.bottom);
660 }
661 
662 /* Checks for conditions where all the layers marked for MDP comp cannot be
663  * bypassed. On such conditions we try to bypass atleast YUV layers */
tryFullFrame(hwc_context_t * ctx,hwc_display_contents_1_t * list)664 bool MDPComp::tryFullFrame(hwc_context_t *ctx,
665                                 hwc_display_contents_1_t* list){
666 
667     const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
668     int priDispW = ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres;
669 
670     if(sIdleFallBack && !ctx->listStats[mDpy].secureUI) {
671         ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy);
672         return false;
673     }
674 
675     if(isSkipPresent(ctx, mDpy)) {
676         ALOGD_IF(isDebug(),"%s: SKIP present: %d",
677                 __FUNCTION__,
678                 isSkipPresent(ctx, mDpy));
679         return false;
680     }
681 
682     if(mDpy > HWC_DISPLAY_PRIMARY && (priDispW > MAX_DISPLAY_DIM) &&
683                               (ctx->dpyAttr[mDpy].xres < MAX_DISPLAY_DIM)) {
684         // Disable MDP comp on Secondary when the primary is highres panel and
685         // the secondary is a normal 1080p, because, MDP comp on secondary under
686         // in such usecase, decimation gets used for downscale and there will be
687         // a quality mismatch when there will be a fallback to GPU comp
688         ALOGD_IF(isDebug(), "%s: Disable MDP Compositon for Secondary Disp",
689               __FUNCTION__);
690         return false;
691     }
692 
693     // check for action safe flag and downscale mode which requires scaling.
694     if(ctx->dpyAttr[mDpy].mActionSafePresent
695             || ctx->dpyAttr[mDpy].mDownScaleMode) {
696         ALOGD_IF(isDebug(), "%s: Scaling needed for this frame",__FUNCTION__);
697         return false;
698     }
699 
700     for(int i = 0; i < numAppLayers; ++i) {
701         hwc_layer_1_t* layer = &list->hwLayers[i];
702         private_handle_t *hnd = (private_handle_t *)layer->handle;
703 
704         if(isYuvBuffer(hnd) && has90Transform(layer)) {
705             if(!canUseRotator(ctx, mDpy)) {
706                 ALOGD_IF(isDebug(), "%s: Can't use rotator for dpy %d",
707                         __FUNCTION__, mDpy);
708                 return false;
709             }
710         }
711 
712         //For 8x26 with panel width>1k, if RGB layer needs HFLIP fail mdp comp
713         // may not need it if Gfx pre-rotation can handle all flips & rotations
714         if(qdutils::MDPVersion::getInstance().is8x26() &&
715                                 (ctx->dpyAttr[mDpy].xres > 1024) &&
716                                 (layer->transform & HWC_TRANSFORM_FLIP_H) &&
717                                 (!isYuvBuffer(hnd)))
718                    return false;
719     }
720 
721     if(ctx->mAD->isDoable()) {
722         return false;
723     }
724 
725     //If all above hard conditions are met we can do full or partial MDP comp.
726     bool ret = false;
727     if(fullMDPComp(ctx, list)) {
728         ret = true;
729     } else if(partialMDPComp(ctx, list)) {
730         ret = true;
731     }
732 
733     return ret;
734 }
735 
fullMDPComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)736 bool MDPComp::fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
737 
738     if(sSimulationFlags & MDPCOMP_AVOID_FULL_MDP)
739         return false;
740 
741     //Will benefit presentation / secondary-only layer.
742     if((mDpy > HWC_DISPLAY_PRIMARY) &&
743             (list->numHwLayers - 1) > MAX_SEC_LAYERS) {
744         ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__);
745         return false;
746     }
747 
748     const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
749     for(int i = 0; i < numAppLayers; i++) {
750         hwc_layer_1_t* layer = &list->hwLayers[i];
751         if(not mCurrentFrame.drop[i] and
752            not isSupportedForMDPComp(ctx, layer)) {
753             ALOGD_IF(isDebug(), "%s: Unsupported layer in list",__FUNCTION__);
754             return false;
755         }
756 
757         //For 8x26, if there is only one layer which needs scale for secondary
758         //while no scale for primary display, DMA pipe is occupied by primary.
759         //If need to fall back to GLES composition, virtual display lacks DMA
760         //pipe and error is reported.
761         if(qdutils::MDPVersion::getInstance().is8x26() &&
762                                 mDpy >= HWC_DISPLAY_EXTERNAL &&
763                                 qhwc::needsScaling(layer))
764             return false;
765     }
766 
767     mCurrentFrame.fbCount = 0;
768     memcpy(&mCurrentFrame.isFBComposed, &mCurrentFrame.drop,
769            sizeof(mCurrentFrame.isFBComposed));
770     mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount -
771         mCurrentFrame.dropCount;
772 
773     if(sEnable4k2kYUVSplit){
774         adjustForSourceSplit(ctx, list);
775     }
776 
777     if(!postHeuristicsHandling(ctx, list)) {
778         ALOGD_IF(isDebug(), "post heuristic handling failed");
779         reset(ctx);
780         return false;
781     }
782     ALOGD_IF(sSimulationFlags,"%s: FULL_MDP_COMP SUCCEEDED",
783              __FUNCTION__);
784     return true;
785 }
786 
partialMDPComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)787 bool MDPComp::partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list)
788 {
789     if(!sEnableMixedMode) {
790         //Mixed mode is disabled. No need to even try caching.
791         return false;
792     }
793 
794     bool ret = false;
795     if(list->flags & HWC_GEOMETRY_CHANGED) { //Try load based first
796         ret =   loadBasedComp(ctx, list) or
797                 cacheBasedComp(ctx, list);
798     } else {
799         ret =   cacheBasedComp(ctx, list) or
800                 loadBasedComp(ctx, list);
801     }
802 
803     return ret;
804 }
805 
cacheBasedComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)806 bool MDPComp::cacheBasedComp(hwc_context_t *ctx,
807         hwc_display_contents_1_t* list) {
808     if(sSimulationFlags & MDPCOMP_AVOID_CACHE_MDP)
809         return false;
810 
811     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
812     mCurrentFrame.reset(numAppLayers);
813     updateLayerCache(ctx, list);
814 
815     //If an MDP marked layer is unsupported cannot do partial MDP Comp
816     for(int i = 0; i < numAppLayers; i++) {
817         if(!mCurrentFrame.isFBComposed[i]) {
818             hwc_layer_1_t* layer = &list->hwLayers[i];
819             if(not isSupportedForMDPComp(ctx, layer)) {
820                 ALOGD_IF(isDebug(), "%s: Unsupported layer in list",
821                         __FUNCTION__);
822                 reset(ctx);
823                 return false;
824             }
825         }
826     }
827 
828     updateYUV(ctx, list, false /*secure only*/);
829     bool ret = markLayersForCaching(ctx, list); //sets up fbZ also
830     if(!ret) {
831         ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy);
832         reset(ctx);
833         return false;
834     }
835 
836     int mdpCount = mCurrentFrame.mdpCount;
837 
838     if(sEnable4k2kYUVSplit){
839         adjustForSourceSplit(ctx, list);
840     }
841 
842     //Will benefit cases where a video has non-updating background.
843     if((mDpy > HWC_DISPLAY_PRIMARY) and
844             (mdpCount > MAX_SEC_LAYERS)) {
845         ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__);
846         reset(ctx);
847         return false;
848     }
849 
850     if(!postHeuristicsHandling(ctx, list)) {
851         ALOGD_IF(isDebug(), "post heuristic handling failed");
852         reset(ctx);
853         return false;
854     }
855     ALOGD_IF(sSimulationFlags,"%s: CACHE_MDP_COMP SUCCEEDED",
856              __FUNCTION__);
857 
858     return true;
859 }
860 
loadBasedComp(hwc_context_t * ctx,hwc_display_contents_1_t * list)861 bool MDPComp::loadBasedComp(hwc_context_t *ctx,
862         hwc_display_contents_1_t* list) {
863     if(sSimulationFlags & MDPCOMP_AVOID_LOAD_MDP)
864         return false;
865 
866     if(not isLoadBasedCompDoable(ctx)) {
867         return false;
868     }
869 
870     const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
871     const int numNonDroppedLayers = numAppLayers - mCurrentFrame.dropCount;
872     const int stagesForMDP = min(sMaxPipesPerMixer,
873             ctx->mOverlay->availablePipes(mDpy, Overlay::MIXER_DEFAULT));
874 
875     int mdpBatchSize = stagesForMDP - 1; //1 stage for FB
876     int fbBatchSize = numNonDroppedLayers - mdpBatchSize;
877     int lastMDPSupportedIndex = numAppLayers;
878     int dropCount = 0;
879 
880     //Find the minimum MDP batch size
881     for(int i = 0; i < numAppLayers;i++) {
882         if(mCurrentFrame.drop[i]) {
883             dropCount++;
884             continue;
885         }
886         hwc_layer_1_t* layer = &list->hwLayers[i];
887         if(not isSupportedForMDPComp(ctx, layer)) {
888             lastMDPSupportedIndex = i;
889             mdpBatchSize = min(i - dropCount, stagesForMDP - 1);
890             fbBatchSize = numNonDroppedLayers - mdpBatchSize;
891             break;
892         }
893     }
894 
895     ALOGD_IF(isDebug(), "%s:Before optimizing fbBatch, mdpbatch %d, fbbatch %d "
896             "dropped %d", __FUNCTION__, mdpBatchSize, fbBatchSize,
897             mCurrentFrame.dropCount);
898 
899     //Start at a point where the fb batch should at least have 2 layers, for
900     //this mode to be justified.
901     while(fbBatchSize < 2) {
902         ++fbBatchSize;
903         --mdpBatchSize;
904     }
905 
906     //If there are no layers for MDP, this mode doesnt make sense.
907     if(mdpBatchSize < 1) {
908         ALOGD_IF(isDebug(), "%s: No MDP layers after optimizing for fbBatch",
909                 __FUNCTION__);
910         return false;
911     }
912 
913     mCurrentFrame.reset(numAppLayers);
914 
915     //Try with successively smaller mdp batch sizes until we succeed or reach 1
916     while(mdpBatchSize > 0) {
917         //Mark layers for MDP comp
918         int mdpBatchLeft = mdpBatchSize;
919         for(int i = 0; i < lastMDPSupportedIndex and mdpBatchLeft; i++) {
920             if(mCurrentFrame.drop[i]) {
921                 continue;
922             }
923             mCurrentFrame.isFBComposed[i] = false;
924             --mdpBatchLeft;
925         }
926 
927         mCurrentFrame.fbZ = mdpBatchSize;
928         mCurrentFrame.fbCount = fbBatchSize;
929         mCurrentFrame.mdpCount = mdpBatchSize;
930 
931         ALOGD_IF(isDebug(), "%s:Trying with: mdpbatch %d fbbatch %d dropped %d",
932                 __FUNCTION__, mdpBatchSize, fbBatchSize,
933                 mCurrentFrame.dropCount);
934 
935         if(postHeuristicsHandling(ctx, list)) {
936             ALOGD_IF(isDebug(), "%s: Postheuristics handling succeeded",
937                      __FUNCTION__);
938             ALOGD_IF(sSimulationFlags,"%s: LOAD_MDP_COMP SUCCEEDED",
939                      __FUNCTION__);
940             return true;
941         }
942 
943         reset(ctx);
944         --mdpBatchSize;
945         ++fbBatchSize;
946     }
947 
948     return false;
949 }
950 
isLoadBasedCompDoable(hwc_context_t * ctx)951 bool MDPComp::isLoadBasedCompDoable(hwc_context_t *ctx) {
952     if(mDpy or isSecurePresent(ctx, mDpy) or
953             isYuvPresent(ctx, mDpy)) {
954         return false;
955     }
956     return true;
957 }
958 
canPartialUpdate(hwc_context_t * ctx,hwc_display_contents_1_t * list)959 bool MDPComp::canPartialUpdate(hwc_context_t *ctx,
960         hwc_display_contents_1_t* list){
961     if(!qdutils::MDPVersion::getInstance().isPartialUpdateEnabled() ||
962             isSkipPresent(ctx, mDpy) || (list->flags & HWC_GEOMETRY_CHANGED) ||
963             mDpy ) {
964         return false;
965     }
966     return true;
967 }
968 
tryVideoOnly(hwc_context_t * ctx,hwc_display_contents_1_t * list)969 bool MDPComp::tryVideoOnly(hwc_context_t *ctx,
970         hwc_display_contents_1_t* list) {
971     const bool secureOnly = true;
972     return videoOnlyComp(ctx, list, not secureOnly) or
973             videoOnlyComp(ctx, list, secureOnly);
974 }
975 
videoOnlyComp(hwc_context_t * ctx,hwc_display_contents_1_t * list,bool secureOnly)976 bool MDPComp::videoOnlyComp(hwc_context_t *ctx,
977         hwc_display_contents_1_t* list, bool secureOnly) {
978     if(sSimulationFlags & MDPCOMP_AVOID_VIDEO_ONLY)
979         return false;
980     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
981 
982     mCurrentFrame.reset(numAppLayers);
983     mCurrentFrame.fbCount -= mCurrentFrame.dropCount;
984     updateYUV(ctx, list, secureOnly);
985     int mdpCount = mCurrentFrame.mdpCount;
986 
987     if(!isYuvPresent(ctx, mDpy) or (mdpCount == 0)) {
988         reset(ctx);
989         return false;
990     }
991 
992     /* Bail out if we are processing only secured video layers
993      * and we dont have any */
994     if(!isSecurePresent(ctx, mDpy) && secureOnly){
995         reset(ctx);
996         return false;
997     }
998 
999     if(mCurrentFrame.fbCount)
1000         mCurrentFrame.fbZ = mCurrentFrame.mdpCount;
1001 
1002     if(sEnable4k2kYUVSplit){
1003         adjustForSourceSplit(ctx, list);
1004     }
1005 
1006     if(!postHeuristicsHandling(ctx, list)) {
1007         ALOGD_IF(isDebug(), "post heuristic handling failed");
1008         reset(ctx);
1009         return false;
1010     }
1011 
1012     ALOGD_IF(sSimulationFlags,"%s: VIDEO_ONLY_COMP SUCCEEDED",
1013              __FUNCTION__);
1014     return true;
1015 }
1016 
1017 /* Checks for conditions where YUV layers cannot be bypassed */
isYUVDoable(hwc_context_t * ctx,hwc_layer_1_t * layer)1018 bool MDPComp::isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) {
1019     if(isSkipLayer(layer)) {
1020         ALOGD_IF(isDebug(), "%s: Video marked SKIP dpy %d", __FUNCTION__, mDpy);
1021         return false;
1022     }
1023 
1024     if(layer->transform & HWC_TRANSFORM_ROT_90 && !canUseRotator(ctx,mDpy)) {
1025         ALOGD_IF(isDebug(), "%s: no free DMA pipe",__FUNCTION__);
1026         return false;
1027     }
1028 
1029     if(isSecuring(ctx, layer)) {
1030         ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__);
1031         return false;
1032     }
1033 
1034     if(!isValidDimension(ctx, layer)) {
1035         ALOGD_IF(isDebug(), "%s: Buffer is of invalid width",
1036             __FUNCTION__);
1037         return false;
1038     }
1039 
1040     if(layer->planeAlpha < 0xFF) {
1041         ALOGD_IF(isDebug(), "%s: Cannot handle YUV layer with plane alpha\
1042                  in video only mode",
1043                  __FUNCTION__);
1044         return false;
1045     }
1046 
1047     return true;
1048 }
1049 
1050 /* starts at fromIndex and check for each layer to find
1051  * if it it has overlapping with any Updating layer above it in zorder
1052  * till the end of the batch. returns true if it finds any intersection */
canPushBatchToTop(const hwc_display_contents_1_t * list,int fromIndex,int toIndex)1053 bool MDPComp::canPushBatchToTop(const hwc_display_contents_1_t* list,
1054         int fromIndex, int toIndex) {
1055     for(int i = fromIndex; i < toIndex; i++) {
1056         if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) {
1057             if(intersectingUpdatingLayers(list, i+1, toIndex, i)) {
1058                 return false;
1059             }
1060         }
1061     }
1062     return true;
1063 }
1064 
1065 /* Checks if given layer at targetLayerIndex has any
1066  * intersection with all the updating layers in beween
1067  * fromIndex and toIndex. Returns true if it finds intersectiion */
intersectingUpdatingLayers(const hwc_display_contents_1_t * list,int fromIndex,int toIndex,int targetLayerIndex)1068 bool MDPComp::intersectingUpdatingLayers(const hwc_display_contents_1_t* list,
1069         int fromIndex, int toIndex, int targetLayerIndex) {
1070     for(int i = fromIndex; i <= toIndex; i++) {
1071         if(!mCurrentFrame.isFBComposed[i]) {
1072             if(areLayersIntersecting(&list->hwLayers[i],
1073                         &list->hwLayers[targetLayerIndex]))  {
1074                 return true;
1075             }
1076         }
1077     }
1078     return false;
1079 }
1080 
getBatch(hwc_display_contents_1_t * list,int & maxBatchStart,int & maxBatchEnd,int & maxBatchCount)1081 int MDPComp::getBatch(hwc_display_contents_1_t* list,
1082         int& maxBatchStart, int& maxBatchEnd,
1083         int& maxBatchCount) {
1084     int i = 0;
1085     int fbZOrder =-1;
1086     int droppedLayerCt = 0;
1087     while (i < mCurrentFrame.layerCount) {
1088         int batchCount = 0;
1089         int batchStart = i;
1090         int batchEnd = i;
1091         /* Adjust batch Z order with the dropped layers so far */
1092         int fbZ = batchStart - droppedLayerCt;
1093         int firstZReverseIndex = -1;
1094         int updatingLayersAbove = 0;//Updating layer count in middle of batch
1095         while(i < mCurrentFrame.layerCount) {
1096             if(!mCurrentFrame.isFBComposed[i]) {
1097                 if(!batchCount) {
1098                     i++;
1099                     break;
1100                 }
1101                 updatingLayersAbove++;
1102                 i++;
1103                 continue;
1104             } else {
1105                 if(mCurrentFrame.drop[i]) {
1106                     i++;
1107                     droppedLayerCt++;
1108                     continue;
1109                 } else if(updatingLayersAbove <= 0) {
1110                     batchCount++;
1111                     batchEnd = i;
1112                     i++;
1113                     continue;
1114                 } else { //Layer is FBComposed, not a drop & updatingLayer > 0
1115 
1116                     // We have a valid updating layer already. If layer-i not
1117                     // have overlapping with all updating layers in between
1118                     // batch-start and i, then we can add layer i to batch.
1119                     if(!intersectingUpdatingLayers(list, batchStart, i-1, i)) {
1120                         batchCount++;
1121                         batchEnd = i;
1122                         i++;
1123                         continue;
1124                     } else if(canPushBatchToTop(list, batchStart, i)) {
1125                         //If All the non-updating layers with in this batch
1126                         //does not have intersection with the updating layers
1127                         //above in z-order, then we can safely move the batch to
1128                         //higher z-order. Increment fbZ as it is moving up.
1129                         if( firstZReverseIndex < 0) {
1130                             firstZReverseIndex = i;
1131                         }
1132                         batchCount++;
1133                         batchEnd = i;
1134                         fbZ += updatingLayersAbove;
1135                         i++;
1136                         updatingLayersAbove = 0;
1137                         continue;
1138                     } else {
1139                         //both failed.start the loop again from here.
1140                         if(firstZReverseIndex >= 0) {
1141                             i = firstZReverseIndex;
1142                         }
1143                         break;
1144                     }
1145                 }
1146             }
1147         }
1148         if(batchCount > maxBatchCount) {
1149             maxBatchCount = batchCount;
1150             maxBatchStart = batchStart;
1151             maxBatchEnd = batchEnd;
1152             fbZOrder = fbZ;
1153         }
1154     }
1155     return fbZOrder;
1156 }
1157 
markLayersForCaching(hwc_context_t * ctx,hwc_display_contents_1_t * list)1158 bool  MDPComp::markLayersForCaching(hwc_context_t* ctx,
1159         hwc_display_contents_1_t* list) {
1160     /* Idea is to keep as many non-updating(cached) layers in FB and
1161      * send rest of them through MDP. This is done in 2 steps.
1162      *   1. Find the maximum contiguous batch of non-updating layers.
1163      *   2. See if we can improve this batch size for caching by adding
1164      *      opaque layers around the batch, if they don't have
1165      *      any overlapping with the updating layers in between.
1166      * NEVER mark an updating layer for caching.
1167      * But cached ones can be marked for MDP */
1168 
1169     int maxBatchStart = -1;
1170     int maxBatchEnd = -1;
1171     int maxBatchCount = 0;
1172     int fbZ = -1;
1173 
1174     /* Nothing is cached. No batching needed */
1175     if(mCurrentFrame.fbCount == 0) {
1176         return true;
1177     }
1178 
1179     /* No MDP comp layers, try to use other comp modes */
1180     if(mCurrentFrame.mdpCount == 0) {
1181         return false;
1182     }
1183 
1184     fbZ = getBatch(list, maxBatchStart, maxBatchEnd, maxBatchCount);
1185 
1186     /* reset rest of the layers lying inside ROI for MDP comp */
1187     for(int i = 0; i < mCurrentFrame.layerCount; i++) {
1188         hwc_layer_1_t* layer = &list->hwLayers[i];
1189         if((i < maxBatchStart || i > maxBatchEnd) &&
1190                 mCurrentFrame.isFBComposed[i]){
1191             if(!mCurrentFrame.drop[i]){
1192                 //If an unsupported layer is being attempted to
1193                 //be pulled out we should fail
1194                 if(not isSupportedForMDPComp(ctx, layer)) {
1195                     return false;
1196                 }
1197                 mCurrentFrame.isFBComposed[i] = false;
1198             }
1199         }
1200     }
1201 
1202     // update the frame data
1203     mCurrentFrame.fbZ = fbZ;
1204     mCurrentFrame.fbCount = maxBatchCount;
1205     mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
1206             mCurrentFrame.fbCount - mCurrentFrame.dropCount;
1207 
1208     ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__,
1209             mCurrentFrame.fbCount);
1210 
1211     return true;
1212 }
1213 
updateLayerCache(hwc_context_t * ctx,hwc_display_contents_1_t * list)1214 void MDPComp::updateLayerCache(hwc_context_t* ctx,
1215         hwc_display_contents_1_t* list) {
1216     int numAppLayers = ctx->listStats[mDpy].numAppLayers;
1217     int fbCount = 0;
1218 
1219     for(int i = 0; i < numAppLayers; i++) {
1220         hwc_layer_1_t * layer = &list->hwLayers[i];
1221         if (!layerUpdating(layer)) {
1222             if(!mCurrentFrame.drop[i])
1223                 fbCount++;
1224             mCurrentFrame.isFBComposed[i] = true;
1225         } else {
1226             mCurrentFrame.isFBComposed[i] = false;
1227         }
1228     }
1229 
1230     mCurrentFrame.fbCount = fbCount;
1231     mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount
1232                                                     - mCurrentFrame.dropCount;
1233 
1234     ALOGD_IF(isDebug(),"%s: MDP count: %d FB count %d drop count: %d"
1235              ,__FUNCTION__, mCurrentFrame.mdpCount, mCurrentFrame.fbCount,
1236             mCurrentFrame.dropCount);
1237 }
1238 
updateYUV(hwc_context_t * ctx,hwc_display_contents_1_t * list,bool secureOnly)1239 void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
1240         bool secureOnly) {
1241     int nYuvCount = ctx->listStats[mDpy].yuvCount;
1242     for(int index = 0;index < nYuvCount; index++){
1243         int nYuvIndex = ctx->listStats[mDpy].yuvIndices[index];
1244         hwc_layer_1_t* layer = &list->hwLayers[nYuvIndex];
1245 
1246         if(!isYUVDoable(ctx, layer)) {
1247             if(!mCurrentFrame.isFBComposed[nYuvIndex]) {
1248                 mCurrentFrame.isFBComposed[nYuvIndex] = true;
1249                 mCurrentFrame.fbCount++;
1250             }
1251         } else {
1252             if(mCurrentFrame.isFBComposed[nYuvIndex]) {
1253                 private_handle_t *hnd = (private_handle_t *)layer->handle;
1254                 if(!secureOnly || isSecureBuffer(hnd)) {
1255                     mCurrentFrame.isFBComposed[nYuvIndex] = false;
1256                     mCurrentFrame.fbCount--;
1257                 }
1258             }
1259         }
1260     }
1261 
1262     mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
1263             mCurrentFrame.fbCount - mCurrentFrame.dropCount;
1264     ALOGD_IF(isDebug(),"%s: fb count: %d",__FUNCTION__,
1265              mCurrentFrame.fbCount);
1266 }
1267 
getUpdatingFBRect(hwc_context_t * ctx,hwc_display_contents_1_t * list)1268 hwc_rect_t MDPComp::getUpdatingFBRect(hwc_context_t *ctx,
1269         hwc_display_contents_1_t* list){
1270     hwc_rect_t fbRect = (struct hwc_rect){0, 0, 0, 0};
1271 
1272     /* Update only the region of FB needed for composition */
1273     for(int i = 0; i < mCurrentFrame.layerCount; i++ ) {
1274         if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) {
1275             hwc_layer_1_t* layer = &list->hwLayers[i];
1276             hwc_rect_t dst = layer->displayFrame;
1277             fbRect = getUnion(fbRect, dst);
1278         }
1279     }
1280     trimAgainstROI(ctx, fbRect);
1281     return fbRect;
1282 }
1283 
postHeuristicsHandling(hwc_context_t * ctx,hwc_display_contents_1_t * list)1284 bool MDPComp::postHeuristicsHandling(hwc_context_t *ctx,
1285         hwc_display_contents_1_t* list) {
1286 
1287     //Capability checks
1288     if(!resourceCheck()) {
1289         ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__);
1290         return false;
1291     }
1292 
1293     //Limitations checks
1294     if(!hwLimitationsCheck(ctx, list)) {
1295         ALOGD_IF(isDebug(), "%s: HW limitations",__FUNCTION__);
1296         return false;
1297     }
1298 
1299     //Configure framebuffer first if applicable
1300     if(mCurrentFrame.fbZ >= 0) {
1301         hwc_rect_t fbRect = getUpdatingFBRect(ctx, list);
1302         if(!ctx->mFBUpdate[mDpy]->prepare(ctx, list, fbRect, mCurrentFrame.fbZ))
1303         {
1304             ALOGD_IF(isDebug(), "%s configure framebuffer failed",
1305                     __FUNCTION__);
1306             return false;
1307         }
1308     }
1309 
1310     mCurrentFrame.map();
1311 
1312     if(!allocLayerPipes(ctx, list)) {
1313         ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__);
1314         return false;
1315     }
1316 
1317     for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
1318             index++) {
1319         if(!mCurrentFrame.isFBComposed[index]) {
1320             int mdpIndex = mCurrentFrame.layerToMDP[index];
1321             hwc_layer_1_t* layer = &list->hwLayers[index];
1322 
1323             //Leave fbZ for framebuffer. CACHE/GLES layers go here.
1324             if(mdpNextZOrder == mCurrentFrame.fbZ) {
1325                 mdpNextZOrder++;
1326             }
1327             MdpPipeInfo* cur_pipe = mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1328             cur_pipe->zOrder = mdpNextZOrder++;
1329 
1330             private_handle_t *hnd = (private_handle_t *)layer->handle;
1331             if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){
1332                 if(configure4k2kYuv(ctx, layer,
1333                             mCurrentFrame.mdpToLayer[mdpIndex])
1334                         != 0 ){
1335                     ALOGD_IF(isDebug(), "%s: Failed to configure split pipes \
1336                             for layer %d",__FUNCTION__, index);
1337                     return false;
1338                 }
1339                 else{
1340                     mdpNextZOrder++;
1341                 }
1342                 continue;
1343             }
1344             if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){
1345                 ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \
1346                         layer %d",__FUNCTION__, index);
1347                 return false;
1348             }
1349         }
1350     }
1351 
1352     if(!ctx->mOverlay->validateAndSet(mDpy, ctx->dpyAttr[mDpy].fd)) {
1353         ALOGD_IF(isDebug(), "%s: Failed to validate and set overlay for dpy %d"
1354                 ,__FUNCTION__, mDpy);
1355         return false;
1356     }
1357 
1358     setRedraw(ctx, list);
1359     return true;
1360 }
1361 
resourceCheck()1362 bool MDPComp::resourceCheck() {
1363     const bool fbUsed = mCurrentFrame.fbCount;
1364     if(mCurrentFrame.mdpCount > sMaxPipesPerMixer - fbUsed) {
1365         ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__);
1366         return false;
1367     }
1368     return true;
1369 }
1370 
hwLimitationsCheck(hwc_context_t * ctx,hwc_display_contents_1_t * list)1371 bool MDPComp::hwLimitationsCheck(hwc_context_t* ctx,
1372         hwc_display_contents_1_t* list) {
1373 
1374     //A-family hw limitation:
1375     //If a layer need alpha scaling, MDP can not support.
1376     if(ctx->mMDP.version < qdutils::MDSS_V5) {
1377         for(int i = 0; i < mCurrentFrame.layerCount; ++i) {
1378             if(!mCurrentFrame.isFBComposed[i] &&
1379                     isAlphaScaled( &list->hwLayers[i])) {
1380                 ALOGD_IF(isDebug(), "%s:frame needs alphaScaling",__FUNCTION__);
1381                 return false;
1382             }
1383         }
1384     }
1385 
1386     // On 8x26 & 8974 hw, we have a limitation of downscaling+blending.
1387     //If multiple layers requires downscaling and also they are overlapping
1388     //fall back to GPU since MDSS can not handle it.
1389     if(qdutils::MDPVersion::getInstance().is8x74v2() ||
1390             qdutils::MDPVersion::getInstance().is8x26()) {
1391         for(int i = 0; i < mCurrentFrame.layerCount-1; ++i) {
1392             hwc_layer_1_t* botLayer = &list->hwLayers[i];
1393             if(!mCurrentFrame.isFBComposed[i] &&
1394                     isDownscaleRequired(botLayer)) {
1395                 //if layer-i is marked for MDP and needs downscaling
1396                 //check if any MDP layer on top of i & overlaps with layer-i
1397                 for(int j = i+1; j < mCurrentFrame.layerCount; ++j) {
1398                     hwc_layer_1_t* topLayer = &list->hwLayers[j];
1399                     if(!mCurrentFrame.isFBComposed[j] &&
1400                             isDownscaleRequired(topLayer)) {
1401                         hwc_rect_t r = getIntersection(botLayer->displayFrame,
1402                                 topLayer->displayFrame);
1403                         if(isValidRect(r))
1404                             return false;
1405                     }
1406                 }
1407             }
1408         }
1409     }
1410     return true;
1411 }
1412 
prepare(hwc_context_t * ctx,hwc_display_contents_1_t * list)1413 int MDPComp::prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
1414     int ret = 0;
1415     const int numLayers = ctx->listStats[mDpy].numAppLayers;
1416     char property[PROPERTY_VALUE_MAX];
1417 
1418     if(property_get("debug.hwc.simulate", property, NULL) > 0) {
1419         int currentFlags = atoi(property);
1420         if(currentFlags != sSimulationFlags) {
1421             sSimulationFlags = currentFlags;
1422             ALOGE("%s: Simulation Flag read: 0x%x (%d)", __FUNCTION__,
1423                     sSimulationFlags, sSimulationFlags);
1424         }
1425     }
1426 
1427     //Do not cache the information for next draw cycle.
1428     if(numLayers > MAX_NUM_APP_LAYERS or (!numLayers)) {
1429         ALOGI("%s: Unsupported layer count for mdp composition",
1430                 __FUNCTION__);
1431         mCachedFrame.reset();
1432         return -1;
1433     }
1434 
1435     //reset old data
1436     mCurrentFrame.reset(numLayers);
1437     memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
1438     mCurrentFrame.dropCount = 0;
1439 
1440     // Detect the start of animation and fall back to GPU only once to cache
1441     // all the layers in FB and display FB content untill animation completes.
1442     if(ctx->listStats[mDpy].isDisplayAnimating) {
1443         mCurrentFrame.needsRedraw = false;
1444         if(ctx->mAnimationState[mDpy] == ANIMATION_STOPPED) {
1445             mCurrentFrame.needsRedraw = true;
1446             ctx->mAnimationState[mDpy] = ANIMATION_STARTED;
1447         }
1448         setMDPCompLayerFlags(ctx, list);
1449         mCachedFrame.updateCounts(mCurrentFrame);
1450         ret = -1;
1451         return ret;
1452     } else {
1453         ctx->mAnimationState[mDpy] = ANIMATION_STOPPED;
1454     }
1455 
1456     //Hard conditions, if not met, cannot do MDP comp
1457     if(isFrameDoable(ctx)) {
1458         generateROI(ctx, list);
1459 
1460         if(tryFullFrame(ctx, list) || tryVideoOnly(ctx, list)) {
1461             setMDPCompLayerFlags(ctx, list);
1462         } else {
1463             resetROI(ctx, mDpy);
1464             reset(ctx);
1465             memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
1466             mCurrentFrame.dropCount = 0;
1467             ret = -1;
1468         }
1469     } else {
1470         ALOGD_IF( isDebug(),"%s: MDP Comp not possible for this frame",
1471                 __FUNCTION__);
1472         ret = -1;
1473     }
1474 
1475     if(isDebug()) {
1476         ALOGD("GEOMETRY change: %d",
1477                 (list->flags & HWC_GEOMETRY_CHANGED));
1478         android::String8 sDump("");
1479         dump(sDump, ctx);
1480         ALOGD("%s",sDump.string());
1481     }
1482 
1483     mCachedFrame.updateCounts(mCurrentFrame);
1484     return ret;
1485 }
1486 
allocSplitVGPipesfor4k2k(hwc_context_t * ctx,int index)1487 bool MDPComp::allocSplitVGPipesfor4k2k(hwc_context_t *ctx, int index) {
1488 
1489     bool bRet = true;
1490     int mdpIndex = mCurrentFrame.layerToMDP[index];
1491     PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
1492     info.pipeInfo = new MdpYUVPipeInfo;
1493     info.rot = NULL;
1494     MdpYUVPipeInfo& pipe_info = *(MdpYUVPipeInfo*)info.pipeInfo;
1495 
1496     pipe_info.lIndex = ovutils::OV_INVALID;
1497     pipe_info.rIndex = ovutils::OV_INVALID;
1498 
1499     Overlay::PipeSpecs pipeSpecs;
1500     pipeSpecs.formatClass = Overlay::FORMAT_YUV;
1501     pipeSpecs.needsScaling = true;
1502     pipeSpecs.dpy = mDpy;
1503     pipeSpecs.fb = false;
1504 
1505     pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs);
1506     if(pipe_info.lIndex == ovutils::OV_INVALID){
1507         bRet = false;
1508         ALOGD_IF(isDebug(),"%s: allocating first VG pipe failed",
1509                 __FUNCTION__);
1510     }
1511     pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs);
1512     if(pipe_info.rIndex == ovutils::OV_INVALID){
1513         bRet = false;
1514         ALOGD_IF(isDebug(),"%s: allocating second VG pipe failed",
1515                 __FUNCTION__);
1516     }
1517     return bRet;
1518 }
1519 //=============MDPCompNonSplit==================================================
1520 
adjustForSourceSplit(hwc_context_t * ctx,hwc_display_contents_1_t * list)1521 void MDPCompNonSplit::adjustForSourceSplit(hwc_context_t *ctx,
1522         hwc_display_contents_1_t* list) {
1523     //If 4k2k Yuv layer split is possible,  and if
1524     //fbz is above 4k2k layer, increment fb zorder by 1
1525     //as we split 4k2k layer and increment zorder for right half
1526     //of the layer
1527     if(mCurrentFrame.fbZ >= 0) {
1528         for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
1529                 index++) {
1530             if(!mCurrentFrame.isFBComposed[index]) {
1531                 if(mdpNextZOrder == mCurrentFrame.fbZ) {
1532                     mdpNextZOrder++;
1533                 }
1534                 mdpNextZOrder++;
1535                 hwc_layer_1_t* layer = &list->hwLayers[index];
1536                 private_handle_t *hnd = (private_handle_t *)layer->handle;
1537                 if(is4kx2kYuvBuffer(hnd)) {
1538                     if(mdpNextZOrder <= mCurrentFrame.fbZ)
1539                         mCurrentFrame.fbZ += 1;
1540                     mdpNextZOrder++;
1541                     //As we split 4kx2k yuv layer and program to 2 VG pipes
1542                     //(if available) increase mdpcount by 1.
1543                     mCurrentFrame.mdpCount++;
1544                 }
1545             }
1546         }
1547     }
1548 }
1549 
1550 /*
1551  * Configures pipe(s) for MDP composition
1552  */
configure(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)1553 int MDPCompNonSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
1554                              PipeLayerPair& PipeLayerPair) {
1555     MdpPipeInfoNonSplit& mdp_info =
1556         *(static_cast<MdpPipeInfoNonSplit*>(PipeLayerPair.pipeInfo));
1557     eMdpFlags mdpFlags = OV_MDP_BACKEND_COMPOSITION;
1558     eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
1559     eIsFg isFg = IS_FG_OFF;
1560     eDest dest = mdp_info.index;
1561 
1562     ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipe: %d",
1563              __FUNCTION__, layer, zOrder, dest);
1564 
1565     return configureNonSplit(ctx, layer, mDpy, mdpFlags, zOrder, isFg, dest,
1566                            &PipeLayerPair.rot);
1567 }
1568 
allocLayerPipes(hwc_context_t * ctx,hwc_display_contents_1_t * list)1569 bool MDPCompNonSplit::allocLayerPipes(hwc_context_t *ctx,
1570         hwc_display_contents_1_t* list) {
1571     for(int index = 0; index < mCurrentFrame.layerCount; index++) {
1572 
1573         if(mCurrentFrame.isFBComposed[index]) continue;
1574 
1575         hwc_layer_1_t* layer = &list->hwLayers[index];
1576         private_handle_t *hnd = (private_handle_t *)layer->handle;
1577         if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){
1578             if(allocSplitVGPipesfor4k2k(ctx, index)){
1579                 continue;
1580             }
1581         }
1582 
1583         int mdpIndex = mCurrentFrame.layerToMDP[index];
1584         PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
1585         info.pipeInfo = new MdpPipeInfoNonSplit;
1586         info.rot = NULL;
1587         MdpPipeInfoNonSplit& pipe_info = *(MdpPipeInfoNonSplit*)info.pipeInfo;
1588 
1589         Overlay::PipeSpecs pipeSpecs;
1590         pipeSpecs.formatClass = isYuvBuffer(hnd) ?
1591                 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB;
1592         pipeSpecs.needsScaling = qhwc::needsScaling(layer) or
1593                 (qdutils::MDPVersion::getInstance().is8x26() and
1594                 ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres > 1024);
1595         pipeSpecs.dpy = mDpy;
1596         pipeSpecs.fb = false;
1597 
1598         pipe_info.index = ctx->mOverlay->getPipe(pipeSpecs);
1599 
1600         if(pipe_info.index == ovutils::OV_INVALID) {
1601             ALOGD_IF(isDebug(), "%s: Unable to get pipe", __FUNCTION__);
1602             return false;
1603         }
1604     }
1605     return true;
1606 }
1607 
configure4k2kYuv(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)1608 int MDPCompNonSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
1609         PipeLayerPair& PipeLayerPair) {
1610     MdpYUVPipeInfo& mdp_info =
1611             *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo));
1612     eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
1613     eIsFg isFg = IS_FG_OFF;
1614     eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION;
1615     eDest lDest = mdp_info.lIndex;
1616     eDest rDest = mdp_info.rIndex;
1617 
1618     return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg,
1619             lDest, rDest, &PipeLayerPair.rot);
1620 }
1621 
draw(hwc_context_t * ctx,hwc_display_contents_1_t * list)1622 bool MDPCompNonSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
1623 
1624     if(!isEnabled()) {
1625         ALOGD_IF(isDebug(),"%s: MDP Comp not configured", __FUNCTION__);
1626         return true;
1627     }
1628 
1629     if(!ctx || !list) {
1630         ALOGE("%s: invalid contxt or list",__FUNCTION__);
1631         return false;
1632     }
1633 
1634     if(ctx->listStats[mDpy].numAppLayers > MAX_NUM_APP_LAYERS) {
1635         ALOGD_IF(isDebug(),"%s: Exceeding max layer count", __FUNCTION__);
1636         return true;
1637     }
1638 
1639     // Set the Handle timeout to true for MDP or MIXED composition.
1640     if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) {
1641         sHandleTimeout = true;
1642     }
1643 
1644     overlay::Overlay& ov = *ctx->mOverlay;
1645     LayerProp *layerProp = ctx->layerProp[mDpy];
1646 
1647     int numHwLayers = ctx->listStats[mDpy].numAppLayers;
1648     for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ )
1649     {
1650         if(mCurrentFrame.isFBComposed[i]) continue;
1651 
1652         hwc_layer_1_t *layer = &list->hwLayers[i];
1653         private_handle_t *hnd = (private_handle_t *)layer->handle;
1654         if(!hnd) {
1655             if (!(layer->flags & HWC_COLOR_FILL)) {
1656                 ALOGE("%s handle null", __FUNCTION__);
1657                 return false;
1658             }
1659             // No PLAY for Color layer
1660             layerProp[i].mFlags &= ~HWC_MDPCOMP;
1661             continue;
1662         }
1663 
1664         int mdpIndex = mCurrentFrame.layerToMDP[i];
1665 
1666         if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit)
1667         {
1668             MdpYUVPipeInfo& pipe_info =
1669                 *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1670             Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
1671             ovutils::eDest indexL = pipe_info.lIndex;
1672             ovutils::eDest indexR = pipe_info.rIndex;
1673             int fd = hnd->fd;
1674             uint32_t offset = (uint32_t)hnd->offset;
1675             if(rot) {
1676                 rot->queueBuffer(fd, offset);
1677                 fd = rot->getDstMemId();
1678                 offset = rot->getDstOffset();
1679             }
1680             if(indexL != ovutils::OV_INVALID) {
1681                 ovutils::eDest destL = (ovutils::eDest)indexL;
1682                 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1683                         using  pipe: %d", __FUNCTION__, layer, hnd, indexL );
1684                 if (!ov.queueBuffer(fd, offset, destL)) {
1685                     ALOGE("%s: queueBuffer failed for display:%d",
1686                             __FUNCTION__, mDpy);
1687                     return false;
1688                 }
1689             }
1690 
1691             if(indexR != ovutils::OV_INVALID) {
1692                 ovutils::eDest destR = (ovutils::eDest)indexR;
1693                 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1694                         using  pipe: %d", __FUNCTION__, layer, hnd, indexR );
1695                 if (!ov.queueBuffer(fd, offset, destR)) {
1696                     ALOGE("%s: queueBuffer failed for display:%d",
1697                             __FUNCTION__, mDpy);
1698                     return false;
1699                 }
1700             }
1701         }
1702         else{
1703             MdpPipeInfoNonSplit& pipe_info =
1704             *(MdpPipeInfoNonSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1705             ovutils::eDest dest = pipe_info.index;
1706             if(dest == ovutils::OV_INVALID) {
1707                 ALOGE("%s: Invalid pipe index (%d)", __FUNCTION__, dest);
1708                 return false;
1709             }
1710 
1711             if(!(layerProp[i].mFlags & HWC_MDPCOMP)) {
1712                 continue;
1713             }
1714 
1715             ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1716                     using  pipe: %d", __FUNCTION__, layer,
1717                     hnd, dest );
1718 
1719             int fd = hnd->fd;
1720             uint32_t offset = (uint32_t)hnd->offset;
1721 
1722             Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
1723             if(rot) {
1724                 if(!rot->queueBuffer(fd, offset))
1725                     return false;
1726                 fd = rot->getDstMemId();
1727                 offset = rot->getDstOffset();
1728             }
1729 
1730             if (!ov.queueBuffer(fd, offset, dest)) {
1731                 ALOGE("%s: queueBuffer failed for display:%d ",
1732                         __FUNCTION__, mDpy);
1733                 return false;
1734             }
1735         }
1736 
1737         layerProp[i].mFlags &= ~HWC_MDPCOMP;
1738     }
1739     return true;
1740 }
1741 
1742 //=============MDPCompSplit===================================================
1743 
adjustForSourceSplit(hwc_context_t * ctx,hwc_display_contents_1_t * list)1744 void MDPCompSplit::adjustForSourceSplit(hwc_context_t *ctx,
1745          hwc_display_contents_1_t* list){
1746     //if 4kx2k yuv layer is totally present in either in left half
1747     //or right half then try splitting the yuv layer to avoid decimation
1748     const int lSplit = getLeftSplit(ctx, mDpy);
1749     if(mCurrentFrame.fbZ >= 0) {
1750         for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
1751                 index++) {
1752             if(!mCurrentFrame.isFBComposed[index]) {
1753                 if(mdpNextZOrder == mCurrentFrame.fbZ) {
1754                     mdpNextZOrder++;
1755                 }
1756                 mdpNextZOrder++;
1757                 hwc_layer_1_t* layer = &list->hwLayers[index];
1758                 private_handle_t *hnd = (private_handle_t *)layer->handle;
1759                 if(is4kx2kYuvBuffer(hnd)) {
1760                     hwc_rect_t dst = layer->displayFrame;
1761                     if((dst.left > lSplit) || (dst.right < lSplit)) {
1762                         mCurrentFrame.mdpCount += 1;
1763                     }
1764                     if(mdpNextZOrder <= mCurrentFrame.fbZ)
1765                         mCurrentFrame.fbZ += 1;
1766                     mdpNextZOrder++;
1767                 }
1768             }
1769         }
1770     }
1771 }
1772 
acquireMDPPipes(hwc_context_t * ctx,hwc_layer_1_t * layer,MdpPipeInfoSplit & pipe_info)1773 bool MDPCompSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer,
1774         MdpPipeInfoSplit& pipe_info) {
1775 
1776     const int lSplit = getLeftSplit(ctx, mDpy);
1777     private_handle_t *hnd = (private_handle_t *)layer->handle;
1778     hwc_rect_t dst = layer->displayFrame;
1779     pipe_info.lIndex = ovutils::OV_INVALID;
1780     pipe_info.rIndex = ovutils::OV_INVALID;
1781 
1782     Overlay::PipeSpecs pipeSpecs;
1783     pipeSpecs.formatClass = isYuvBuffer(hnd) ?
1784             Overlay::FORMAT_YUV : Overlay::FORMAT_RGB;
1785     pipeSpecs.needsScaling = qhwc::needsScalingWithSplit(ctx, layer, mDpy);
1786     pipeSpecs.dpy = mDpy;
1787     pipeSpecs.mixer = Overlay::MIXER_LEFT;
1788     pipeSpecs.fb = false;
1789 
1790     // Acquire pipe only for the updating half
1791     hwc_rect_t l_roi = ctx->listStats[mDpy].lRoi;
1792     hwc_rect_t r_roi = ctx->listStats[mDpy].rRoi;
1793 
1794     if (dst.left < lSplit && isValidRect(getIntersection(dst, l_roi))) {
1795         pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs);
1796         if(pipe_info.lIndex == ovutils::OV_INVALID)
1797             return false;
1798     }
1799 
1800     if(dst.right > lSplit && isValidRect(getIntersection(dst, r_roi))) {
1801         pipeSpecs.mixer = Overlay::MIXER_RIGHT;
1802         pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs);
1803         if(pipe_info.rIndex == ovutils::OV_INVALID)
1804             return false;
1805     }
1806 
1807     return true;
1808 }
1809 
allocLayerPipes(hwc_context_t * ctx,hwc_display_contents_1_t * list)1810 bool MDPCompSplit::allocLayerPipes(hwc_context_t *ctx,
1811         hwc_display_contents_1_t* list) {
1812     for(int index = 0 ; index < mCurrentFrame.layerCount; index++) {
1813 
1814         if(mCurrentFrame.isFBComposed[index]) continue;
1815 
1816         hwc_layer_1_t* layer = &list->hwLayers[index];
1817         private_handle_t *hnd = (private_handle_t *)layer->handle;
1818         hwc_rect_t dst = layer->displayFrame;
1819         const int lSplit = getLeftSplit(ctx, mDpy);
1820         if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){
1821             if((dst.left > lSplit)||(dst.right < lSplit)){
1822                 if(allocSplitVGPipesfor4k2k(ctx, index)){
1823                     continue;
1824                 }
1825             }
1826         }
1827         int mdpIndex = mCurrentFrame.layerToMDP[index];
1828         PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
1829         info.pipeInfo = new MdpPipeInfoSplit;
1830         info.rot = NULL;
1831         MdpPipeInfoSplit& pipe_info = *(MdpPipeInfoSplit*)info.pipeInfo;
1832 
1833         if(!acquireMDPPipes(ctx, layer, pipe_info)) {
1834             ALOGD_IF(isDebug(), "%s: Unable to get pipe for type",
1835                     __FUNCTION__);
1836             return false;
1837         }
1838     }
1839     return true;
1840 }
1841 
configure4k2kYuv(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)1842 int MDPCompSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
1843         PipeLayerPair& PipeLayerPair) {
1844     const int lSplit = getLeftSplit(ctx, mDpy);
1845     hwc_rect_t dst = layer->displayFrame;
1846     if((dst.left > lSplit)||(dst.right < lSplit)){
1847         MdpYUVPipeInfo& mdp_info =
1848                 *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo));
1849         eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
1850         eIsFg isFg = IS_FG_OFF;
1851         eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION;
1852         eDest lDest = mdp_info.lIndex;
1853         eDest rDest = mdp_info.rIndex;
1854 
1855         return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg,
1856                 lDest, rDest, &PipeLayerPair.rot);
1857     }
1858     else{
1859         return configure(ctx, layer, PipeLayerPair);
1860     }
1861 }
1862 
1863 /*
1864  * Configures pipe(s) for MDP composition
1865  */
configure(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)1866 int MDPCompSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
1867         PipeLayerPair& PipeLayerPair) {
1868     MdpPipeInfoSplit& mdp_info =
1869         *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo));
1870     eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
1871     eIsFg isFg = IS_FG_OFF;
1872     eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION;
1873     eDest lDest = mdp_info.lIndex;
1874     eDest rDest = mdp_info.rIndex;
1875 
1876     ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d"
1877              "dest_pipeR: %d",__FUNCTION__, layer, zOrder, lDest, rDest);
1878 
1879     return configureSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg, lDest,
1880                             rDest, &PipeLayerPair.rot);
1881 }
1882 
draw(hwc_context_t * ctx,hwc_display_contents_1_t * list)1883 bool MDPCompSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
1884 
1885     if(!isEnabled()) {
1886         ALOGD_IF(isDebug(),"%s: MDP Comp not configured", __FUNCTION__);
1887         return true;
1888     }
1889 
1890     if(!ctx || !list) {
1891         ALOGE("%s: invalid contxt or list",__FUNCTION__);
1892         return false;
1893     }
1894 
1895     if(ctx->listStats[mDpy].numAppLayers > MAX_NUM_APP_LAYERS) {
1896         ALOGD_IF(isDebug(),"%s: Exceeding max layer count", __FUNCTION__);
1897         return true;
1898     }
1899 
1900     // Set the Handle timeout to true for MDP or MIXED composition.
1901     if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) {
1902         sHandleTimeout = true;
1903     }
1904 
1905     overlay::Overlay& ov = *ctx->mOverlay;
1906     LayerProp *layerProp = ctx->layerProp[mDpy];
1907 
1908     int numHwLayers = ctx->listStats[mDpy].numAppLayers;
1909     for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ )
1910     {
1911         if(mCurrentFrame.isFBComposed[i]) continue;
1912 
1913         hwc_layer_1_t *layer = &list->hwLayers[i];
1914         private_handle_t *hnd = (private_handle_t *)layer->handle;
1915         if(!hnd) {
1916             ALOGE("%s handle null", __FUNCTION__);
1917             return false;
1918         }
1919 
1920         if(!(layerProp[i].mFlags & HWC_MDPCOMP)) {
1921             continue;
1922         }
1923 
1924         int mdpIndex = mCurrentFrame.layerToMDP[i];
1925 
1926         if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit)
1927         {
1928             MdpYUVPipeInfo& pipe_info =
1929                 *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1930             Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
1931             ovutils::eDest indexL = pipe_info.lIndex;
1932             ovutils::eDest indexR = pipe_info.rIndex;
1933             int fd = hnd->fd;
1934             uint32_t offset = (uint32_t)hnd->offset;
1935             if(rot) {
1936                 rot->queueBuffer(fd, offset);
1937                 fd = rot->getDstMemId();
1938                 offset = rot->getDstOffset();
1939             }
1940             if(indexL != ovutils::OV_INVALID) {
1941                 ovutils::eDest destL = (ovutils::eDest)indexL;
1942                 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1943                         using  pipe: %d", __FUNCTION__, layer, hnd, indexL );
1944                 if (!ov.queueBuffer(fd, offset, destL)) {
1945                     ALOGE("%s: queueBuffer failed for display:%d",
1946                             __FUNCTION__, mDpy);
1947                     return false;
1948                 }
1949             }
1950 
1951             if(indexR != ovutils::OV_INVALID) {
1952                 ovutils::eDest destR = (ovutils::eDest)indexR;
1953                 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1954                         using  pipe: %d", __FUNCTION__, layer, hnd, indexR );
1955                 if (!ov.queueBuffer(fd, offset, destR)) {
1956                     ALOGE("%s: queueBuffer failed for display:%d",
1957                             __FUNCTION__, mDpy);
1958                     return false;
1959                 }
1960             }
1961         }
1962         else{
1963             MdpPipeInfoSplit& pipe_info =
1964                 *(MdpPipeInfoSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1965             Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
1966 
1967             ovutils::eDest indexL = pipe_info.lIndex;
1968             ovutils::eDest indexR = pipe_info.rIndex;
1969 
1970             int fd = hnd->fd;
1971             int offset = (uint32_t)hnd->offset;
1972 
1973             if(ctx->mAD->isModeOn()) {
1974                 if(ctx->mAD->draw(ctx, fd, offset)) {
1975                     fd = ctx->mAD->getDstFd();
1976                     offset = ctx->mAD->getDstOffset();
1977                 }
1978             }
1979 
1980             if(rot) {
1981                 rot->queueBuffer(fd, offset);
1982                 fd = rot->getDstMemId();
1983                 offset = rot->getDstOffset();
1984             }
1985 
1986             //************* play left mixer **********
1987             if(indexL != ovutils::OV_INVALID) {
1988                 ovutils::eDest destL = (ovutils::eDest)indexL;
1989                 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1990                         using  pipe: %d", __FUNCTION__, layer, hnd, indexL );
1991                 if (!ov.queueBuffer(fd, offset, destL)) {
1992                     ALOGE("%s: queueBuffer failed for left mixer",
1993                             __FUNCTION__);
1994                     return false;
1995                 }
1996             }
1997 
1998             //************* play right mixer **********
1999             if(indexR != ovutils::OV_INVALID) {
2000                 ovutils::eDest destR = (ovutils::eDest)indexR;
2001                 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
2002                         using  pipe: %d", __FUNCTION__, layer, hnd, indexR );
2003                 if (!ov.queueBuffer(fd, offset, destR)) {
2004                     ALOGE("%s: queueBuffer failed for right mixer",
2005                             __FUNCTION__);
2006                     return false;
2007                 }
2008             }
2009         }
2010 
2011         layerProp[i].mFlags &= ~HWC_MDPCOMP;
2012     }
2013 
2014     return true;
2015 }
2016 
2017 //================MDPCompSrcSplit==============================================
acquireMDPPipes(hwc_context_t * ctx,hwc_layer_1_t * layer,MdpPipeInfoSplit & pipe_info)2018 bool MDPCompSrcSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer,
2019         MdpPipeInfoSplit& pipe_info) {
2020     private_handle_t *hnd = (private_handle_t *)layer->handle;
2021     hwc_rect_t dst = layer->displayFrame;
2022     hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
2023     pipe_info.lIndex = ovutils::OV_INVALID;
2024     pipe_info.rIndex = ovutils::OV_INVALID;
2025 
2026     //If 2 pipes are staged on a single stage of a mixer, then the left pipe
2027     //should have a higher priority than the right one. Pipe priorities are
2028     //starting with VG0, VG1 ... , RGB0 ..., DMA1
2029 
2030     Overlay::PipeSpecs pipeSpecs;
2031     pipeSpecs.formatClass = isYuvBuffer(hnd) ?
2032             Overlay::FORMAT_YUV : Overlay::FORMAT_RGB;
2033     pipeSpecs.needsScaling = qhwc::needsScaling(layer);
2034     pipeSpecs.dpy = mDpy;
2035     pipeSpecs.fb = false;
2036 
2037     //1 pipe by default for a layer
2038     pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs);
2039     if(pipe_info.lIndex == ovutils::OV_INVALID) {
2040         return false;
2041     }
2042 
2043     /* Use 2 pipes IF
2044         a) Layer's crop width is > 2048 or
2045         b) Layer's dest width > 2048 or
2046         c) On primary, driver has indicated with caps to split always. This is
2047            based on an empirically derived value of panel height. Applied only
2048            if the layer's width is > mixer's width
2049     */
2050 
2051     bool primarySplitAlways = (mDpy == HWC_DISPLAY_PRIMARY) and
2052             qdutils::MDPVersion::getInstance().isSrcSplitAlways();
2053     int lSplit = getLeftSplit(ctx, mDpy);
2054     int dstWidth = dst.right - dst.left;
2055     int cropWidth = has90Transform(layer) ? crop.bottom - crop.top :
2056             crop.right - crop.left;
2057 
2058     if(dstWidth > qdutils::MAX_DISPLAY_DIM or
2059             cropWidth > qdutils::MAX_DISPLAY_DIM or
2060             (primarySplitAlways and (cropWidth > lSplit))) {
2061         pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs);
2062         if(pipe_info.rIndex == ovutils::OV_INVALID) {
2063             return false;
2064         }
2065 
2066         // Return values
2067         // 1  Left pipe is higher priority, do nothing.
2068         // 0  Pipes of same priority.
2069         //-1  Right pipe is of higher priority, needs swap.
2070         if(ctx->mOverlay->comparePipePriority(pipe_info.lIndex,
2071                 pipe_info.rIndex) == -1) {
2072             qhwc::swap(pipe_info.lIndex, pipe_info.rIndex);
2073         }
2074     }
2075 
2076     return true;
2077 }
2078 
configure(hwc_context_t * ctx,hwc_layer_1_t * layer,PipeLayerPair & PipeLayerPair)2079 int MDPCompSrcSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
2080         PipeLayerPair& PipeLayerPair) {
2081     private_handle_t *hnd = (private_handle_t *)layer->handle;
2082     if(!hnd) {
2083         ALOGE("%s: layer handle is NULL", __FUNCTION__);
2084         return -1;
2085     }
2086     MetaData_t *metadata = (MetaData_t *)hnd->base_metadata;
2087     MdpPipeInfoSplit& mdp_info =
2088         *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo));
2089     Rotator **rot = &PipeLayerPair.rot;
2090     eZorder z = static_cast<eZorder>(mdp_info.zOrder);
2091     eIsFg isFg = IS_FG_OFF;
2092     eDest lDest = mdp_info.lIndex;
2093     eDest rDest = mdp_info.rIndex;
2094     hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
2095     hwc_rect_t dst = layer->displayFrame;
2096     int transform = layer->transform;
2097     eTransform orient = static_cast<eTransform>(transform);
2098     const int downscale = 0;
2099     int rotFlags = ROT_FLAGS_NONE;
2100     uint32_t format = ovutils::getMdpFormat(hnd->format, isTileRendered(hnd));
2101     Whf whf(getWidth(hnd), getHeight(hnd), format, hnd->size);
2102 
2103     ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d"
2104              "dest_pipeR: %d",__FUNCTION__, layer, z, lDest, rDest);
2105 
2106     // Handle R/B swap
2107     if (layer->flags & HWC_FORMAT_RB_SWAP) {
2108         if (hnd->format == HAL_PIXEL_FORMAT_RGBA_8888)
2109             whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRA_8888);
2110         else if (hnd->format == HAL_PIXEL_FORMAT_RGBX_8888)
2111             whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRX_8888);
2112     }
2113 
2114     eMdpFlags mdpFlags = OV_MDP_BACKEND_COMPOSITION;
2115     setMdpFlags(layer, mdpFlags, 0, transform);
2116 
2117     if(lDest != OV_INVALID && rDest != OV_INVALID) {
2118         //Enable overfetch
2119         setMdpFlags(mdpFlags, OV_MDSS_MDP_DUAL_PIPE);
2120     }
2121 
2122     if(isYuvBuffer(hnd) && (transform & HWC_TRANSFORM_ROT_90)) {
2123         (*rot) = ctx->mRotMgr->getNext();
2124         if((*rot) == NULL) return -1;
2125         ctx->mLayerRotMap[mDpy]->add(layer, *rot);
2126         //If the video is using a single pipe, enable BWC
2127         if(rDest == OV_INVALID) {
2128             BwcPM::setBwc(crop, dst, transform, mdpFlags);
2129         }
2130         //Configure rotator for pre-rotation
2131         if(configRotator(*rot, whf, crop, mdpFlags, orient, downscale) < 0) {
2132             ALOGE("%s: configRotator failed!", __FUNCTION__);
2133             return -1;
2134         }
2135         whf.format = (*rot)->getDstFormat();
2136         updateSource(orient, whf, crop);
2137         rotFlags |= ROT_PREROTATED;
2138     }
2139 
2140     //If 2 pipes being used, divide layer into half, crop and dst
2141     hwc_rect_t cropL = crop;
2142     hwc_rect_t cropR = crop;
2143     hwc_rect_t dstL = dst;
2144     hwc_rect_t dstR = dst;
2145     if(lDest != OV_INVALID && rDest != OV_INVALID) {
2146         cropL.right = (crop.right + crop.left) / 2;
2147         cropR.left = cropL.right;
2148         sanitizeSourceCrop(cropL, cropR, hnd);
2149 
2150         //Swap crops on H flip since 2 pipes are being used
2151         if((orient & OVERLAY_TRANSFORM_FLIP_H) && (*rot) == NULL) {
2152             hwc_rect_t tmp = cropL;
2153             cropL = cropR;
2154             cropR = tmp;
2155         }
2156 
2157         dstL.right = (dst.right + dst.left) / 2;
2158         dstR.left = dstL.right;
2159     }
2160 
2161     //For the mdp, since either we are pre-rotating or MDP does flips
2162     orient = OVERLAY_TRANSFORM_0;
2163     transform = 0;
2164 
2165     //configure left pipe
2166     if(lDest != OV_INVALID) {
2167         PipeArgs pargL(mdpFlags, whf, z, isFg,
2168                 static_cast<eRotFlags>(rotFlags), layer->planeAlpha,
2169                 (ovutils::eBlending) getBlending(layer->blending));
2170 
2171         if(configMdp(ctx->mOverlay, pargL, orient,
2172                     cropL, dstL, metadata, lDest) < 0) {
2173             ALOGE("%s: commit failed for left mixer config", __FUNCTION__);
2174             return -1;
2175         }
2176     }
2177 
2178     //configure right pipe
2179     if(rDest != OV_INVALID) {
2180         PipeArgs pargR(mdpFlags, whf, z, isFg,
2181                 static_cast<eRotFlags>(rotFlags),
2182                 layer->planeAlpha,
2183                 (ovutils::eBlending) getBlending(layer->blending));
2184         if(configMdp(ctx->mOverlay, pargR, orient,
2185                     cropR, dstR, metadata, rDest) < 0) {
2186             ALOGE("%s: commit failed for right mixer config", __FUNCTION__);
2187             return -1;
2188         }
2189     }
2190 
2191     return 0;
2192 }
2193 
2194 }; //namespace
2195 
2196