1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/GrAuditTrail.h"
9 #include "src/gpu/ganesh/ops/GrOp.h"
10
11 const int GrAuditTrail::kGrAuditTrailInvalidID = -1;
12
addOp(const GrOp * op,GrRenderTargetProxy::UniqueID proxyID)13 void GrAuditTrail::addOp(const GrOp* op, GrRenderTargetProxy::UniqueID proxyID) {
14 SkASSERT(fEnabled);
15 Op* auditOp = new Op;
16 fOpPool.emplace_back(auditOp);
17 auditOp->fName = op->name();
18 auditOp->fBounds = op->bounds();
19 auditOp->fClientID = kGrAuditTrailInvalidID;
20 auditOp->fOpsTaskID = kGrAuditTrailInvalidID;
21 auditOp->fChildID = kGrAuditTrailInvalidID;
22
23 // consume the current stack trace if any
24 auditOp->fStackTrace = fCurrentStackTrace;
25 fCurrentStackTrace.clear();
26
27 if (fClientID != kGrAuditTrailInvalidID) {
28 auditOp->fClientID = fClientID;
29 Ops** opsLookup = fClientIDLookup.find(fClientID);
30 Ops* ops = nullptr;
31 if (!opsLookup) {
32 ops = new Ops;
33 fClientIDLookup.set(fClientID, ops);
34 } else {
35 ops = *opsLookup;
36 }
37
38 ops->push_back(auditOp);
39 }
40
41 // Our algorithm doesn't bother to reorder inside of an OpNode so the ChildID will start at 0
42 auditOp->fOpsTaskID = fOpsTask.size();
43 auditOp->fChildID = 0;
44
45 // We use the op pointer as a key to find the OpNode we are 'glomming' ops onto
46 fIDLookup.set(op->uniqueID(), auditOp->fOpsTaskID);
47 OpNode* opNode = new OpNode(proxyID);
48 opNode->fBounds = op->bounds();
49 opNode->fChildren.push_back(auditOp);
50 fOpsTask.emplace_back(opNode);
51 }
52
opsCombined(const GrOp * consumer,const GrOp * consumed)53 void GrAuditTrail::opsCombined(const GrOp* consumer, const GrOp* consumed) {
54 // Look up the op we are going to glom onto
55 int* indexPtr = fIDLookup.find(consumer->uniqueID());
56 SkASSERT(indexPtr);
57 int index = *indexPtr;
58 SkASSERT(index < fOpsTask.size() && fOpsTask[index]);
59 OpNode& consumerOp = *fOpsTask[index];
60
61 // Look up the op which will be glommed
62 int* consumedPtr = fIDLookup.find(consumed->uniqueID());
63 SkASSERT(consumedPtr);
64 int consumedIndex = *consumedPtr;
65 SkASSERT(consumedIndex < fOpsTask.size() && fOpsTask[consumedIndex]);
66 OpNode& consumedOp = *fOpsTask[consumedIndex];
67
68 // steal all of consumed's ops
69 for (int i = 0; i < consumedOp.fChildren.size(); i++) {
70 Op* childOp = consumedOp.fChildren[i];
71
72 // set the ids for the child op
73 childOp->fOpsTaskID = index;
74 childOp->fChildID = consumerOp.fChildren.size();
75 consumerOp.fChildren.push_back(childOp);
76 }
77
78 // Update the bounds for the combineWith node
79 consumerOp.fBounds = consumer->bounds();
80
81 // remove the old node from our opsTask and clear the combinee's lookup
82 // NOTE: because we can't change the shape of the oplist, we use a sentinel
83 fOpsTask[consumedIndex].reset(nullptr);
84 fIDLookup.remove(consumed->uniqueID());
85 }
86
copyOutFromOpsTask(OpInfo * outOpInfo,int opsTaskID)87 void GrAuditTrail::copyOutFromOpsTask(OpInfo* outOpInfo, int opsTaskID) {
88 SkASSERT(opsTaskID < fOpsTask.size());
89 const OpNode* bn = fOpsTask[opsTaskID].get();
90 SkASSERT(bn);
91 outOpInfo->fBounds = bn->fBounds;
92 outOpInfo->fProxyUniqueID = bn->fProxyUniqueID;
93 for (int j = 0; j < bn->fChildren.size(); j++) {
94 OpInfo::Op& outOp = outOpInfo->fOps.push_back();
95 const Op* currentOp = bn->fChildren[j];
96 outOp.fBounds = currentOp->fBounds;
97 outOp.fClientID = currentOp->fClientID;
98 }
99 }
100
getBoundsByClientID(SkTArray<OpInfo> * outInfo,int clientID)101 void GrAuditTrail::getBoundsByClientID(SkTArray<OpInfo>* outInfo, int clientID) {
102 Ops** opsLookup = fClientIDLookup.find(clientID);
103 if (opsLookup) {
104 // We track which oplistID we're currently looking at. If it changes, then we need to push
105 // back a new op info struct. We happen to know that ops are in sequential order in the
106 // oplist, otherwise we'd have to do more bookkeeping
107 int currentOpsTaskID = kGrAuditTrailInvalidID;
108 for (int i = 0; i < (*opsLookup)->size(); i++) {
109 const Op* op = (**opsLookup)[i];
110
111 // Because we will copy out all of the ops associated with a given op list id everytime
112 // the id changes, we only have to update our struct when the id changes.
113 if (kGrAuditTrailInvalidID == currentOpsTaskID || op->fOpsTaskID != currentOpsTaskID) {
114 OpInfo& outOpInfo = outInfo->push_back();
115
116 // copy out all of the ops so the client can display them even if they have a
117 // different clientID
118 this->copyOutFromOpsTask(&outOpInfo, op->fOpsTaskID);
119 }
120 }
121 }
122 }
123
getBoundsByOpsTaskID(OpInfo * outInfo,int opsTaskID)124 void GrAuditTrail::getBoundsByOpsTaskID(OpInfo* outInfo, int opsTaskID) {
125 this->copyOutFromOpsTask(outInfo, opsTaskID);
126 }
127
fullReset()128 void GrAuditTrail::fullReset() {
129 SkASSERT(fEnabled);
130 fOpsTask.clear();
131 fIDLookup.reset();
132 // free all client ops
133 fClientIDLookup.foreach ([](const int&, Ops** ops) { delete *ops; });
134 fClientIDLookup.reset();
135 fOpPool.clear(); // must be last, frees all of the memory
136 }
137
138 #ifdef SK_ENABLE_DUMP_GPU
139 #include "src/utils/SkJSONWriter.h"
140
141 template <typename T>
JsonifyTArray(SkJSONWriter & writer,const char * name,const T & array)142 void GrAuditTrail::JsonifyTArray(SkJSONWriter& writer, const char* name, const T& array) {
143 if (array.size()) {
144 writer.beginArray(name);
145 for (int i = 0; i < array.size(); i++) {
146 // Handle sentinel nullptrs
147 if (array[i]) {
148 array[i]->toJson(writer);
149 }
150 }
151 writer.endArray();
152 }
153 }
154
toJson(SkJSONWriter & writer) const155 void GrAuditTrail::toJson(SkJSONWriter& writer) const {
156 writer.beginObject();
157 JsonifyTArray(writer, "Ops", fOpsTask);
158 writer.endObject();
159 }
160
toJson(SkJSONWriter & writer,int clientID) const161 void GrAuditTrail::toJson(SkJSONWriter& writer, int clientID) const {
162 writer.beginObject();
163 Ops** ops = fClientIDLookup.find(clientID);
164 if (ops) {
165 JsonifyTArray(writer, "Ops", **ops);
166 }
167 writer.endObject();
168 }
169
skrect_to_json(SkJSONWriter & writer,const char * name,const SkRect & rect)170 static void skrect_to_json(SkJSONWriter& writer, const char* name, const SkRect& rect) {
171 writer.beginObject(name);
172 writer.appendFloat("Left", rect.fLeft);
173 writer.appendFloat("Right", rect.fRight);
174 writer.appendFloat("Top", rect.fTop);
175 writer.appendFloat("Bottom", rect.fBottom);
176 writer.endObject();
177 }
178
toJson(SkJSONWriter & writer) const179 void GrAuditTrail::Op::toJson(SkJSONWriter& writer) const {
180 writer.beginObject();
181 writer.appendString("Name", fName);
182 writer.appendS32("ClientID", fClientID);
183 writer.appendS32("OpsTaskID", fOpsTaskID);
184 writer.appendS32("ChildID", fChildID);
185 skrect_to_json(writer, "Bounds", fBounds);
186 if (fStackTrace.size()) {
187 writer.beginArray("Stack");
188 for (int i = 0; i < fStackTrace.size(); i++) {
189 writer.appendString(fStackTrace[i]);
190 }
191 writer.endArray();
192 }
193 writer.endObject();
194 }
195
toJson(SkJSONWriter & writer) const196 void GrAuditTrail::OpNode::toJson(SkJSONWriter& writer) const {
197 writer.beginObject();
198 writer.appendU32("ProxyID", fProxyUniqueID.asUInt());
199 skrect_to_json(writer, "Bounds", fBounds);
200 JsonifyTArray(writer, "Ops", fChildren);
201 writer.endObject();
202 }
203 #else
204 template <typename T>
JsonifyTArray(SkJSONWriter & writer,const char * name,const T & array)205 void GrAuditTrail::JsonifyTArray(SkJSONWriter& writer, const char* name, const T& array) {}
toJson(SkJSONWriter & writer) const206 void GrAuditTrail::toJson(SkJSONWriter& writer) const {}
toJson(SkJSONWriter & writer,int clientID) const207 void GrAuditTrail::toJson(SkJSONWriter& writer, int clientID) const {}
toJson(SkJSONWriter & writer) const208 void GrAuditTrail::Op::toJson(SkJSONWriter& writer) const {}
toJson(SkJSONWriter & writer) const209 void GrAuditTrail::OpNode::toJson(SkJSONWriter& writer) const {}
210 #endif // SK_ENABLE_DUMP_GPU
211