• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "LayerBuilder.h"
18 
19 #include "BakedOpState.h"
20 #include "RenderNode.h"
21 #include "utils/PaintUtils.h"
22 #include "utils/TraceUtils.h"
23 
24 #include <utils/TypeHelpers.h>
25 
26 namespace android {
27 namespace uirenderer {
28 
29 class BatchBase {
30 public:
BatchBase(batchid_t batchId,BakedOpState * op,bool merging)31     BatchBase(batchid_t batchId, BakedOpState* op, bool merging)
32             : mBatchId(batchId), mMerging(merging) {
33         mBounds = op->computedState.clippedBounds;
34         mOps.push_back(op);
35     }
36 
intersects(const Rect & rect) const37     bool intersects(const Rect& rect) const {
38         if (!rect.intersects(mBounds)) return false;
39 
40         for (const BakedOpState* op : mOps) {
41             if (rect.intersects(op->computedState.clippedBounds)) {
42                 return true;
43             }
44         }
45         return false;
46     }
47 
getBatchId() const48     batchid_t getBatchId() const { return mBatchId; }
isMerging() const49     bool isMerging() const { return mMerging; }
50 
getOps() const51     const std::vector<BakedOpState*>& getOps() const { return mOps; }
52 
dump() const53     void dump() const {
54         ALOGD("    Batch %p, id %d, merging %d, count %d, bounds " RECT_STRING, this, mBatchId,
55               mMerging, (int)mOps.size(), RECT_ARGS(mBounds));
56     }
57 
58 protected:
59     batchid_t mBatchId;
60     Rect mBounds;
61     std::vector<BakedOpState*> mOps;
62     bool mMerging;
63 };
64 
65 class OpBatch : public BatchBase {
66 public:
OpBatch(batchid_t batchId,BakedOpState * op)67     OpBatch(batchid_t batchId, BakedOpState* op) : BatchBase(batchId, op, false) {}
68 
batchOp(BakedOpState * op)69     void batchOp(BakedOpState* op) {
70         mBounds.unionWith(op->computedState.clippedBounds);
71         mOps.push_back(op);
72     }
73 };
74 
75 class MergingOpBatch : public BatchBase {
76 public:
MergingOpBatch(batchid_t batchId,BakedOpState * op)77     MergingOpBatch(batchid_t batchId, BakedOpState* op)
78             : BatchBase(batchId, op, true), mClipSideFlags(op->computedState.clipSideFlags) {}
79 
80     /*
81      * Helper for determining if a new op can merge with a MergingDrawBatch based on their bounds
82      * and clip side flags. Positive bounds delta means new bounds fit in old.
83      */
checkSide(const int currentFlags,const int newFlags,const int side,float boundsDelta)84     static inline bool checkSide(const int currentFlags, const int newFlags, const int side,
85                                  float boundsDelta) {
86         bool currentClipExists = currentFlags & side;
87         bool newClipExists = newFlags & side;
88 
89         // if current is clipped, we must be able to fit new bounds in current
90         if (boundsDelta > 0 && currentClipExists) return false;
91 
92         // if new is clipped, we must be able to fit current bounds in new
93         if (boundsDelta < 0 && newClipExists) return false;
94 
95         return true;
96     }
97 
paintIsDefault(const SkPaint & paint)98     static bool paintIsDefault(const SkPaint& paint) {
99         return paint.getAlpha() == 255 && paint.getColorFilter() == nullptr &&
100                paint.getShader() == nullptr;
101     }
102 
paintsAreEquivalent(const SkPaint & a,const SkPaint & b)103     static bool paintsAreEquivalent(const SkPaint& a, const SkPaint& b) {
104         // Note: don't check color, since all currently mergeable ops can merge across colors
105         return a.getAlpha() == b.getAlpha() && a.getColorFilter() == b.getColorFilter() &&
106                a.getShader() == b.getShader();
107     }
108 
109     /*
110      * Checks if a (mergeable) op can be merged into this batch
111      *
112      * If true, the op's multiDraw must be guaranteed to handle both ops simultaneously, so it is
113      * important to consider all paint attributes used in the draw calls in deciding both a) if an
114      * op tries to merge at all, and b) if the op can merge with another set of ops
115      *
116      * False positives can lead to information from the paints of subsequent merged operations being
117      * dropped, so we make simplifying qualifications on the ops that can merge, per op type.
118      */
canMergeWith(BakedOpState * op) const119     bool canMergeWith(BakedOpState* op) const {
120         bool isTextBatch =
121                 getBatchId() == OpBatchType::Text || getBatchId() == OpBatchType::ColorText;
122 
123         // Overlapping other operations is only allowed for text without shadow. For other ops,
124         // multiDraw isn't guaranteed to overdraw correctly
125         if (!isTextBatch || PaintUtils::hasTextShadow(op->op->paint)) {
126             if (intersects(op->computedState.clippedBounds)) return false;
127         }
128 
129         const BakedOpState* lhs = op;
130         const BakedOpState* rhs = mOps[0];
131 
132         if (!MathUtils::areEqual(lhs->alpha, rhs->alpha)) return false;
133 
134         // Identical round rect clip state means both ops will clip in the same way, or not at all.
135         // As the state objects are const, we can compare their pointers to determine mergeability
136         if (lhs->roundRectClipState != rhs->roundRectClipState) return false;
137 
138         // Local masks prevent merge, since they're potentially in different coordinate spaces
139         if (lhs->computedState.localProjectionPathMask ||
140             rhs->computedState.localProjectionPathMask)
141             return false;
142 
143         /* Clipping compatibility check
144          *
145          * Exploits the fact that if a op or batch is clipped on a side, its bounds will equal its
146          * clip for that side.
147          */
148         const int currentFlags = mClipSideFlags;
149         const int newFlags = op->computedState.clipSideFlags;
150         if (currentFlags != OpClipSideFlags::None || newFlags != OpClipSideFlags::None) {
151             const Rect& opBounds = op->computedState.clippedBounds;
152             float boundsDelta = mBounds.left - opBounds.left;
153             if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Left, boundsDelta))
154                 return false;
155             boundsDelta = mBounds.top - opBounds.top;
156             if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Top, boundsDelta)) return false;
157 
158             // right and bottom delta calculation reversed to account for direction
159             boundsDelta = opBounds.right - mBounds.right;
160             if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Right, boundsDelta))
161                 return false;
162             boundsDelta = opBounds.bottom - mBounds.bottom;
163             if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Bottom, boundsDelta))
164                 return false;
165         }
166 
167         const SkPaint* newPaint = op->op->paint;
168         const SkPaint* oldPaint = mOps[0]->op->paint;
169 
170         if (newPaint == oldPaint) {
171             // if paints are equal, then modifiers + paint attribs don't need to be compared
172             return true;
173         } else if (newPaint && !oldPaint) {
174             return paintIsDefault(*newPaint);
175         } else if (!newPaint && oldPaint) {
176             return paintIsDefault(*oldPaint);
177         }
178         return paintsAreEquivalent(*newPaint, *oldPaint);
179     }
180 
mergeOp(BakedOpState * op)181     void mergeOp(BakedOpState* op) {
182         mBounds.unionWith(op->computedState.clippedBounds);
183         mOps.push_back(op);
184 
185         // Because a new op must have passed canMergeWith(), we know it's passed the clipping compat
186         // check, and doesn't extend past a side of the clip that's in use by the merged batch.
187         // Therefore it's safe to simply always merge flags, and use the bounds as the clip rect.
188         mClipSideFlags |= op->computedState.clipSideFlags;
189     }
190 
getClipSideFlags() const191     int getClipSideFlags() const { return mClipSideFlags; }
getClipRect() const192     const Rect& getClipRect() const { return mBounds; }
193 
194 private:
195     int mClipSideFlags;
196 };
197 
LayerBuilder(uint32_t width,uint32_t height,const Rect & repaintRect,const BeginLayerOp * beginLayerOp,RenderNode * renderNode)198 LayerBuilder::LayerBuilder(uint32_t width, uint32_t height, const Rect& repaintRect,
199                            const BeginLayerOp* beginLayerOp, RenderNode* renderNode)
200         : width(width)
201         , height(height)
202         , repaintRect(repaintRect)
203         , repaintClip(repaintRect)
204         , offscreenBuffer(renderNode ? renderNode->getLayer() : nullptr)
205         , beginLayerOp(beginLayerOp)
206         , renderNode(renderNode) {}
207 
208 // iterate back toward target to see if anything drawn since should overlap the new op
209 // if no target, merging ops still iterate to find similar batch to insert after
locateInsertIndex(int batchId,const Rect & clippedBounds,BatchBase ** targetBatch,size_t * insertBatchIndex) const210 void LayerBuilder::locateInsertIndex(int batchId, const Rect& clippedBounds,
211                                      BatchBase** targetBatch, size_t* insertBatchIndex) const {
212     for (int i = mBatches.size() - 1; i >= 0; i--) {
213         BatchBase* overBatch = mBatches[i];
214 
215         if (overBatch == *targetBatch) break;
216 
217         // TODO: also consider shader shared between batch types
218         if (batchId == overBatch->getBatchId()) {
219             *insertBatchIndex = i + 1;
220             if (!*targetBatch) break;  // found insert position, quit
221         }
222 
223         if (overBatch->intersects(clippedBounds)) {
224             // NOTE: it may be possible to optimize for special cases where two operations
225             // of the same batch/paint could swap order, such as with a non-mergeable
226             // (clipped) and a mergeable text operation
227             *targetBatch = nullptr;
228             break;
229         }
230     }
231 }
232 
deferLayerClear(const Rect & rect)233 void LayerBuilder::deferLayerClear(const Rect& rect) {
234     mClearRects.push_back(rect);
235 }
236 
onDeferOp(LinearAllocator & allocator,const BakedOpState * bakedState)237 void LayerBuilder::onDeferOp(LinearAllocator& allocator, const BakedOpState* bakedState) {
238     if (bakedState->op->opId != RecordedOpId::CopyToLayerOp) {
239         // First non-CopyToLayer, so stop stashing up layer clears for unclipped save layers,
240         // and issue them together in one draw.
241         flushLayerClears(allocator);
242 
243         if (CC_UNLIKELY(activeUnclippedSaveLayers.empty() &&
244                         bakedState->computedState.opaqueOverClippedBounds &&
245                         bakedState->computedState.clippedBounds.contains(repaintRect) &&
246                         !Properties::debugOverdraw)) {
247             // discard all deferred drawing ops, since new one will occlude them
248             clear();
249         }
250     }
251 }
252 
flushLayerClears(LinearAllocator & allocator)253 void LayerBuilder::flushLayerClears(LinearAllocator& allocator) {
254     if (CC_UNLIKELY(!mClearRects.empty())) {
255         const int vertCount = mClearRects.size() * 4;
256         // put the verts in the frame allocator, since
257         //     1) SimpleRectsOps needs verts, not rects
258         //     2) even if mClearRects stored verts, std::vectors will move their contents
259         Vertex* const verts = (Vertex*)allocator.create_trivial_array<Vertex>(vertCount);
260 
261         Vertex* currentVert = verts;
262         Rect bounds = mClearRects[0];
263         for (auto&& rect : mClearRects) {
264             bounds.unionWith(rect);
265             Vertex::set(currentVert++, rect.left, rect.top);
266             Vertex::set(currentVert++, rect.right, rect.top);
267             Vertex::set(currentVert++, rect.left, rect.bottom);
268             Vertex::set(currentVert++, rect.right, rect.bottom);
269         }
270         mClearRects.clear();  // discard rects before drawing so this method isn't reentrant
271 
272         // One or more unclipped saveLayers have been enqueued, with deferred clears.
273         // Flush all of these clears with a single draw
274         SkPaint* paint = allocator.create<SkPaint>();
275         paint->setBlendMode(SkBlendMode::kClear);
276         SimpleRectsOp* op = allocator.create_trivial<SimpleRectsOp>(
277                 bounds, Matrix4::identity(), nullptr, paint, verts, vertCount);
278         BakedOpState* bakedState =
279                 BakedOpState::directConstruct(allocator, &repaintClip, bounds, *op);
280         deferUnmergeableOp(allocator, bakedState, OpBatchType::Vertices);
281     }
282 }
283 
deferUnmergeableOp(LinearAllocator & allocator,BakedOpState * op,batchid_t batchId)284 void LayerBuilder::deferUnmergeableOp(LinearAllocator& allocator, BakedOpState* op,
285                                       batchid_t batchId) {
286     onDeferOp(allocator, op);
287     OpBatch* targetBatch = mBatchLookup[batchId];
288 
289     size_t insertBatchIndex = mBatches.size();
290     if (targetBatch) {
291         locateInsertIndex(batchId, op->computedState.clippedBounds, (BatchBase**)(&targetBatch),
292                           &insertBatchIndex);
293     }
294 
295     if (targetBatch) {
296         targetBatch->batchOp(op);
297     } else {
298         // new non-merging batch
299         targetBatch = allocator.create<OpBatch>(batchId, op);
300         mBatchLookup[batchId] = targetBatch;
301         mBatches.insert(mBatches.begin() + insertBatchIndex, targetBatch);
302     }
303 }
304 
deferMergeableOp(LinearAllocator & allocator,BakedOpState * op,batchid_t batchId,mergeid_t mergeId)305 void LayerBuilder::deferMergeableOp(LinearAllocator& allocator, BakedOpState* op, batchid_t batchId,
306                                     mergeid_t mergeId) {
307     onDeferOp(allocator, op);
308     MergingOpBatch* targetBatch = nullptr;
309 
310     // Try to merge with any existing batch with same mergeId
311     auto getResult = mMergingBatchLookup[batchId].find(mergeId);
312     if (getResult != mMergingBatchLookup[batchId].end()) {
313         targetBatch = getResult->second;
314         if (!targetBatch->canMergeWith(op)) {
315             targetBatch = nullptr;
316         }
317     }
318 
319     size_t insertBatchIndex = mBatches.size();
320     locateInsertIndex(batchId, op->computedState.clippedBounds, (BatchBase**)(&targetBatch),
321                       &insertBatchIndex);
322 
323     if (targetBatch) {
324         targetBatch->mergeOp(op);
325     } else {
326         // new merging batch
327         targetBatch = allocator.create<MergingOpBatch>(batchId, op);
328         mMergingBatchLookup[batchId].insert(std::make_pair(mergeId, targetBatch));
329 
330         mBatches.insert(mBatches.begin() + insertBatchIndex, targetBatch);
331     }
332 }
333 
replayBakedOpsImpl(void * arg,BakedOpReceiver * unmergedReceivers,MergedOpReceiver * mergedReceivers) const334 void LayerBuilder::replayBakedOpsImpl(void* arg, BakedOpReceiver* unmergedReceivers,
335                                       MergedOpReceiver* mergedReceivers) const {
336     if (renderNode) {
337         ATRACE_FORMAT_BEGIN("Issue HW Layer DisplayList %s %ux%u", renderNode->getName(), width,
338                             height);
339     } else {
340         ATRACE_BEGIN("flush drawing commands");
341     }
342 
343     for (const BatchBase* batch : mBatches) {
344         size_t size = batch->getOps().size();
345         if (size > 1 && batch->isMerging()) {
346             int opId = batch->getOps()[0]->op->opId;
347             const MergingOpBatch* mergingBatch = static_cast<const MergingOpBatch*>(batch);
348             MergedBakedOpList data = {batch->getOps().data(), size,
349                                       mergingBatch->getClipSideFlags(),
350                                       mergingBatch->getClipRect()};
351             mergedReceivers[opId](arg, data);
352         } else {
353             for (const BakedOpState* op : batch->getOps()) {
354                 unmergedReceivers[op->op->opId](arg, *op);
355             }
356         }
357     }
358     ATRACE_END();
359 }
360 
clear()361 void LayerBuilder::clear() {
362     mBatches.clear();
363     for (int i = 0; i < OpBatchType::Count; i++) {
364         mBatchLookup[i] = nullptr;
365         mMergingBatchLookup[i].clear();
366     }
367 }
368 
dump() const369 void LayerBuilder::dump() const {
370     ALOGD("LayerBuilder %p, %ux%u buffer %p, blo %p, rn %p (%s)", this, width, height,
371           offscreenBuffer, beginLayerOp, renderNode, renderNode ? renderNode->getName() : "-");
372     for (const BatchBase* batch : mBatches) {
373         batch->dump();
374     }
375 }
376 
377 }  // namespace uirenderer
378 }  // namespace android
379