1 /*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrRenderTargetOpList.h"
9 #include "GrAuditTrail.h"
10 #include "GrCaps.h"
11 #include "GrGpu.h"
12 #include "GrGpuCommandBuffer.h"
13 #include "GrRect.h"
14 #include "GrRenderTargetContext.h"
15 #include "instanced/InstancedRendering.h"
16 #include "ops/GrClearOp.h"
17 #include "ops/GrCopySurfaceOp.h"
18
19 using gr_instanced::InstancedRendering;
20
21 ////////////////////////////////////////////////////////////////////////////////
22
23 // Experimentally we have found that most combining occurs within the first 10 comparisons.
24 static const int kMaxOpLookback = 10;
25 static const int kMaxOpLookahead = 10;
26
GrRenderTargetOpList(GrRenderTargetProxy * proxy,GrGpu * gpu,GrAuditTrail * auditTrail)27 GrRenderTargetOpList::GrRenderTargetOpList(GrRenderTargetProxy* proxy, GrGpu* gpu,
28 GrAuditTrail* auditTrail)
29 : INHERITED(gpu->getContext()->resourceProvider(), proxy, auditTrail)
30 , fLastClipStackGenID(SK_InvalidUniqueID)
31 SkDEBUGCODE(, fNumClips(0)) {
32 if (GrCaps::InstancedSupport::kNone != gpu->caps()->instancedSupport()) {
33 fInstancedRendering.reset(gpu->createInstancedRendering());
34 }
35 }
36
~GrRenderTargetOpList()37 GrRenderTargetOpList::~GrRenderTargetOpList() {
38 }
39
40 ////////////////////////////////////////////////////////////////////////////////
41
42 #ifdef SK_DEBUG
dump() const43 void GrRenderTargetOpList::dump() const {
44 INHERITED::dump();
45
46 SkDebugf("ops (%d):\n", fRecordedOps.count());
47 for (int i = 0; i < fRecordedOps.count(); ++i) {
48 SkDebugf("*******************************\n");
49 if (!fRecordedOps[i].fOp) {
50 SkDebugf("%d: <combined forward>\n", i);
51 } else {
52 SkDebugf("%d: %s\n", i, fRecordedOps[i].fOp->name());
53 SkString str = fRecordedOps[i].fOp->dumpInfo();
54 SkDebugf("%s\n", str.c_str());
55 const SkRect& bounds = fRecordedOps[i].fOp->bounds();
56 SkDebugf("ClippedBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", bounds.fLeft,
57 bounds.fTop, bounds.fRight, bounds.fBottom);
58 }
59 }
60 }
61 #endif
62
prepareOps(GrOpFlushState * flushState)63 void GrRenderTargetOpList::prepareOps(GrOpFlushState* flushState) {
64 SkASSERT(fTarget.get()->priv().peekRenderTarget());
65 SkASSERT(this->isClosed());
66
67 // Loop over the ops that haven't yet been prepared.
68 for (int i = 0; i < fRecordedOps.count(); ++i) {
69 if (fRecordedOps[i].fOp) {
70 GrOpFlushState::DrawOpArgs opArgs = {
71 fTarget.get()->priv().peekRenderTarget(),
72 fRecordedOps[i].fAppliedClip,
73 fRecordedOps[i].fDstProxy
74 };
75
76 flushState->setDrawOpArgs(&opArgs);
77 fRecordedOps[i].fOp->prepare(flushState);
78 flushState->setDrawOpArgs(nullptr);
79 }
80 }
81
82 if (fInstancedRendering) {
83 fInstancedRendering->beginFlush(flushState->resourceProvider());
84 }
85 }
86
create_command_buffer(GrGpu * gpu)87 static std::unique_ptr<GrGpuCommandBuffer> create_command_buffer(GrGpu* gpu) {
88 static const GrGpuCommandBuffer::LoadAndStoreInfo kBasicLoadStoreInfo {
89 GrGpuCommandBuffer::LoadOp::kLoad,
90 GrGpuCommandBuffer::StoreOp::kStore,
91 GrColor_ILLEGAL
92 };
93
94 std::unique_ptr<GrGpuCommandBuffer> buffer(
95 gpu->createCommandBuffer(kBasicLoadStoreInfo, // Color
96 kBasicLoadStoreInfo)); // Stencil
97 return buffer;
98 }
99
finish_command_buffer(GrGpuCommandBuffer * buffer)100 static inline void finish_command_buffer(GrGpuCommandBuffer* buffer) {
101 if (!buffer) {
102 return;
103 }
104
105 buffer->end();
106 buffer->submit();
107 }
108
109 // TODO: this is where GrOp::renderTarget is used (which is fine since it
110 // is at flush time). However, we need to store the RenderTargetProxy in the
111 // Ops and instantiate them here.
executeOps(GrOpFlushState * flushState)112 bool GrRenderTargetOpList::executeOps(GrOpFlushState* flushState) {
113 if (0 == fRecordedOps.count()) {
114 return false;
115 }
116
117 SkASSERT(fTarget.get()->priv().peekRenderTarget());
118
119 std::unique_ptr<GrGpuCommandBuffer> commandBuffer = create_command_buffer(flushState->gpu());
120 flushState->setCommandBuffer(commandBuffer.get());
121
122 // Draw all the generated geometry.
123 for (int i = 0; i < fRecordedOps.count(); ++i) {
124 if (!fRecordedOps[i].fOp) {
125 continue;
126 }
127
128 if (fRecordedOps[i].fOp->needsCommandBufferIsolation()) {
129 // This op is a special snowflake and must occur between command buffers
130 // TODO: make this go through the command buffer
131 finish_command_buffer(commandBuffer.get());
132
133 commandBuffer.reset();
134 flushState->setCommandBuffer(commandBuffer.get());
135 } else if (!commandBuffer) {
136 commandBuffer = create_command_buffer(flushState->gpu());
137 flushState->setCommandBuffer(commandBuffer.get());
138 }
139
140 GrOpFlushState::DrawOpArgs opArgs {
141 fTarget.get()->priv().peekRenderTarget(),
142 fRecordedOps[i].fAppliedClip,
143 fRecordedOps[i].fDstProxy
144 };
145
146 flushState->setDrawOpArgs(&opArgs);
147 fRecordedOps[i].fOp->execute(flushState);
148 flushState->setDrawOpArgs(nullptr);
149 }
150
151 finish_command_buffer(commandBuffer.get());
152 flushState->setCommandBuffer(nullptr);
153
154 return true;
155 }
156
reset()157 void GrRenderTargetOpList::reset() {
158 fLastFullClearOp = nullptr;
159 fLastClipStackGenID = SK_InvalidUniqueID;
160 fRecordedOps.reset();
161 if (fInstancedRendering) {
162 fInstancedRendering->endFlush();
163 fInstancedRendering = nullptr;
164 }
165
166 INHERITED::reset();
167 }
168
abandonGpuResources()169 void GrRenderTargetOpList::abandonGpuResources() {
170 if (fInstancedRendering) {
171 fInstancedRendering->resetGpuResources(InstancedRendering::ResetType::kAbandon);
172 }
173 }
174
freeGpuResources()175 void GrRenderTargetOpList::freeGpuResources() {
176 if (fInstancedRendering) {
177 fInstancedRendering->resetGpuResources(InstancedRendering::ResetType::kDestroy);
178 }
179 }
180
fullClear(const GrCaps & caps,GrColor color)181 void GrRenderTargetOpList::fullClear(const GrCaps& caps, GrColor color) {
182 // Currently this just inserts or updates the last clear op. However, once in MDB this can
183 // remove all the previously recorded ops and change the load op to clear with supplied
184 // color.
185 if (fLastFullClearOp) {
186 // As currently implemented, fLastFullClearOp should be the last op because we would
187 // have cleared it when another op was recorded.
188 SkASSERT(fRecordedOps.back().fOp.get() == fLastFullClearOp);
189 GrOP_INFO("opList: %d Fusing clears (opID: %d Color: 0x%08x -> 0x%08x)\n",
190 this->uniqueID(),
191 fLastFullClearOp->uniqueID(),
192 fLastFullClearOp->color(), color);
193 fLastFullClearOp->setColor(color);
194 return;
195 }
196 std::unique_ptr<GrClearOp> op(GrClearOp::Make(GrFixedClip::Disabled(), color, fTarget.get()));
197 if (!op) {
198 return;
199 }
200
201 if (GrOp* clearOp = this->recordOp(std::move(op), caps)) {
202 // This is either the clear op we just created or another one that it combined with.
203 fLastFullClearOp = static_cast<GrClearOp*>(clearOp);
204 }
205 }
206
207 ////////////////////////////////////////////////////////////////////////////////
208
209 // This closely parallels GrTextureOpList::copySurface but renderTargetOpLists
210 // also store the applied clip and dest proxy with the op
copySurface(const GrCaps & caps,GrSurfaceProxy * dst,GrSurfaceProxy * src,const SkIRect & srcRect,const SkIPoint & dstPoint)211 bool GrRenderTargetOpList::copySurface(const GrCaps& caps,
212 GrSurfaceProxy* dst,
213 GrSurfaceProxy* src,
214 const SkIRect& srcRect,
215 const SkIPoint& dstPoint) {
216 SkASSERT(dst->asRenderTargetProxy() == fTarget.get());
217 std::unique_ptr<GrOp> op = GrCopySurfaceOp::Make(dst, src, srcRect, dstPoint);
218 if (!op) {
219 return false;
220 }
221 #ifdef ENABLE_MDB
222 this->addDependency(src);
223 #endif
224
225 this->recordOp(std::move(op), caps);
226 return true;
227 }
228
can_reorder(const SkRect & a,const SkRect & b)229 static inline bool can_reorder(const SkRect& a, const SkRect& b) { return !GrRectsOverlap(a, b); }
230
combineIfPossible(const RecordedOp & a,GrOp * b,const GrAppliedClip * bClip,const DstProxy * bDstProxy,const GrCaps & caps)231 bool GrRenderTargetOpList::combineIfPossible(const RecordedOp& a, GrOp* b,
232 const GrAppliedClip* bClip,
233 const DstProxy* bDstProxy,
234 const GrCaps& caps) {
235 if (a.fAppliedClip) {
236 if (!bClip) {
237 return false;
238 }
239 if (*a.fAppliedClip != *bClip) {
240 return false;
241 }
242 } else if (bClip) {
243 return false;
244 }
245 if (bDstProxy) {
246 if (a.fDstProxy != *bDstProxy) {
247 return false;
248 }
249 } else if (a.fDstProxy.proxy()) {
250 return false;
251 }
252 return a.fOp->combineIfPossible(b, caps);
253 }
254
recordOp(std::unique_ptr<GrOp> op,const GrCaps & caps,GrAppliedClip * clip,const DstProxy * dstProxy)255 GrOp* GrRenderTargetOpList::recordOp(std::unique_ptr<GrOp> op,
256 const GrCaps& caps,
257 GrAppliedClip* clip,
258 const DstProxy* dstProxy) {
259 SkASSERT(fTarget.get());
260
261 // A closed GrOpList should never receive new/more ops
262 SkASSERT(!this->isClosed());
263
264 // Check if there is an op we can combine with by linearly searching back until we either
265 // 1) check every op
266 // 2) intersect with something
267 // 3) find a 'blocker'
268 GR_AUDIT_TRAIL_ADD_OP(fAuditTrail, op.get(), fTarget.get()->uniqueID());
269 GrOP_INFO("opList: %d Recording (%s, opID: %u)\n"
270 "\tBounds [L: %.2f, T: %.2f R: %.2f B: %.2f]\n",
271 this->uniqueID(),
272 op->name(),
273 op->uniqueID(),
274 op->bounds().fLeft, op->bounds().fTop,
275 op->bounds().fRight, op->bounds().fBottom);
276 GrOP_INFO(SkTabString(op->dumpInfo(), 1).c_str());
277 GrOP_INFO("\tOutcome:\n");
278 int maxCandidates = SkTMin(kMaxOpLookback, fRecordedOps.count());
279 // If we don't have a valid destination render target then we cannot reorder.
280 if (maxCandidates) {
281 int i = 0;
282 while (true) {
283 const RecordedOp& candidate = fRecordedOps.fromBack(i);
284
285 if (this->combineIfPossible(candidate, op.get(), clip, dstProxy, caps)) {
286 GrOP_INFO("\t\tBackward: Combining with (%s, opID: %u)\n", candidate.fOp->name(),
287 candidate.fOp->uniqueID());
288 GrOP_INFO("\t\t\tBackward: Combined op info:\n");
289 GrOP_INFO(SkTabString(candidate.fOp->dumpInfo(), 4).c_str());
290 GR_AUDIT_TRAIL_OPS_RESULT_COMBINED(fAuditTrail, candidate.fOp.get(), op.get());
291 return candidate.fOp.get();
292 }
293 // Stop going backwards if we would cause a painter's order violation.
294 if (!can_reorder(fRecordedOps.fromBack(i).fOp->bounds(), op->bounds())) {
295 GrOP_INFO("\t\tBackward: Intersects with (%s, opID: %u)\n", candidate.fOp->name(),
296 candidate.fOp->uniqueID());
297 break;
298 }
299 ++i;
300 if (i == maxCandidates) {
301 GrOP_INFO("\t\tBackward: Reached max lookback or beginning of op array %d\n", i);
302 break;
303 }
304 }
305 } else {
306 GrOP_INFO("\t\tBackward: FirstOp\n");
307 }
308 GR_AUDIT_TRAIL_OP_RESULT_NEW(fAuditTrail, op);
309 if (clip) {
310 clip = fClipAllocator.make<GrAppliedClip>(std::move(*clip));
311 SkDEBUGCODE(fNumClips++;)
312 }
313 fRecordedOps.emplace_back(std::move(op), clip, dstProxy);
314 fRecordedOps.back().fOp->wasRecorded(this);
315 fLastFullClearOp = nullptr;
316 return fRecordedOps.back().fOp.get();
317 }
318
forwardCombine(const GrCaps & caps)319 void GrRenderTargetOpList::forwardCombine(const GrCaps& caps) {
320 SkASSERT(!this->isClosed());
321
322 GrOP_INFO("opList: %d ForwardCombine %d ops:\n", this->uniqueID(), fRecordedOps.count());
323
324 for (int i = 0; i < fRecordedOps.count() - 1; ++i) {
325 GrOp* op = fRecordedOps[i].fOp.get();
326
327 int maxCandidateIdx = SkTMin(i + kMaxOpLookahead, fRecordedOps.count() - 1);
328 int j = i + 1;
329 while (true) {
330 const RecordedOp& candidate = fRecordedOps[j];
331
332 if (this->combineIfPossible(fRecordedOps[i], candidate.fOp.get(),
333 candidate.fAppliedClip, &candidate.fDstProxy, caps)) {
334 GrOP_INFO("\t\t%d: (%s opID: %u) -> Combining with (%s, opID: %u)\n",
335 i, op->name(), op->uniqueID(),
336 candidate.fOp->name(), candidate.fOp->uniqueID());
337 GR_AUDIT_TRAIL_OPS_RESULT_COMBINED(fAuditTrail, op, candidate.fOp.get());
338 fRecordedOps[j].fOp = std::move(fRecordedOps[i].fOp);
339 break;
340 }
341 // Stop traversing if we would cause a painter's order violation.
342 if (!can_reorder(fRecordedOps[j].fOp->bounds(), op->bounds())) {
343 GrOP_INFO("\t\t%d: (%s opID: %u) -> Intersects with (%s, opID: %u)\n",
344 i, op->name(), op->uniqueID(),
345 candidate.fOp->name(), candidate.fOp->uniqueID());
346 break;
347 }
348 ++j;
349 if (j > maxCandidateIdx) {
350 GrOP_INFO("\t\t%d: (%s opID: %u) -> Reached max lookahead or end of array\n",
351 i, op->name(), op->uniqueID());
352 break;
353 }
354 }
355 }
356 }
357
358