• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2021 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/GrRenderTaskCluster.h"
9 
10 #include "include/private/SkTHash.h"
11 #include "src/gpu/GrRenderTask.h"
12 
13 // Uncomment to get lots of logging.
14 #define CLUSTER_DEBUGF(...) //SkDebugf(__VA_ARGS__)
15 
first_target(GrRenderTask * task)16 static GrSurfaceProxy* first_target(GrRenderTask* task) { return task->target(0); }
17 
18 #ifdef SK_DEBUG
describe_task(GrRenderTask * t)19 [[maybe_unused]] static SkString describe_task(GrRenderTask* t) {
20     if (GrSurfaceProxy* target = first_target(t)) {
21         return SkStringPrintf("%s(%d)", target->getDebugName().c_str(), t->uniqueID());
22     } else {
23         return SkStringPrintf("%d", t->uniqueID());
24     }
25 }
26 
describe_tasks(SkSpan<const sk_sp<GrRenderTask>> collection)27 [[maybe_unused]] static SkString describe_tasks(SkSpan<const sk_sp<GrRenderTask>> collection) {
28     SkString s;
29     for (const sk_sp<GrRenderTask>& t : collection) {
30         s.appendf("%s ", describe_task(t.get()).c_str());
31     }
32     return s;
33 }
34 
describe_tasks(const SkTInternalLList<GrRenderTask> & collection)35 [[maybe_unused]] static SkString describe_tasks(const SkTInternalLList<GrRenderTask>& collection) {
36     SkString s;
37     for (GrRenderTask* t : collection) {
38         s.appendf("%s ", describe_task(t).c_str());
39     }
40     return s;
41 }
42 
validate(SkSpan<const sk_sp<GrRenderTask>> input,const SkTInternalLList<GrRenderTask> & llist)43 static void validate(SkSpan<const sk_sp<GrRenderTask>> input,
44                      const SkTInternalLList<GrRenderTask>& llist) {
45     // Check that we didn't break dependencies.
46     SkTHashSet<GrRenderTask*> seen;
47     for (GrRenderTask* t : llist) {
48         seen.add(t);
49         for (GrRenderTask* dep : t->dependencies()) {
50             SkASSERTF(seen.contains(dep),
51                       "%s came before dependency %s",
52                       describe_task(t).c_str(),
53                       describe_task(dep).c_str());
54         }
55     }
56     // Check that llist has the same entries as the input.
57     for (const auto& orig : input) {
58         seen.remove(orig.get());
59     }
60     SkASSERT(seen.empty());
61 }
62 
63 #endif  // SK_DEBUG
64 
65 // Returns whether `dependee` is a formal dependent or if it uses a surface `depender` targets.
depends_on(GrRenderTask * depender,GrRenderTask * dependee)66 static bool depends_on(GrRenderTask* depender, GrRenderTask* dependee) {
67     // Check if depender writes to something dependee reads.
68     // TODO: Establish real DAG dependencies for this during recording? E.g. when a task adds a
69     // target, search backward for the last task that uses the target and add a dep.
70     for (int i = 0; i < depender->numTargets(); i++) {
71         if (dependee->isUsed(depender->target(i))) {
72             CLUSTER_DEBUGF("Cluster: Bail, %s can't write before %s reads from %s.\n",
73                            describe_task(depender).c_str(),
74                            describe_task(dependee).c_str(),
75                            depender->target(i)->getDebugName().c_str());
76             return true;
77         }
78     }
79     // Check for a formal dependency.
80     if (depender->dependsOn(dependee)) {
81         CLUSTER_DEBUGF("Cluster: Bail, %s depends on %s.\n",
82                        describe_task(depender).c_str(),
83                        describe_task(dependee).c_str());
84         return true;
85     }
86     return false;
87 }
88 
89 // Returns whether reordering occurred.
task_cluster_visit(GrRenderTask * task,SkTInternalLList<GrRenderTask> * llist,SkTHashMap<GrSurfaceProxy *,GrRenderTask * > * lastTaskMap)90 static bool task_cluster_visit(GrRenderTask* task, SkTInternalLList<GrRenderTask>* llist,
91                                SkTHashMap<GrSurfaceProxy*, GrRenderTask*>* lastTaskMap) {
92     CLUSTER_DEBUGF("Cluster: ***Step***\nLooking at %s\n",
93                    describe_task(task).c_str());
94     if (task->numTargets() != 1) {
95         CLUSTER_DEBUGF("Cluster: %d targets. Emitting barriers.\n", task->numTargets());
96         // Tasks with 0 or multiple targets are treated as full barriers
97         // for all their targets.
98         for (int j = 0; j < task->numTargets(); j++) {
99             if (lastTaskMap->find(task->target(0))) {
100                 lastTaskMap->remove(task->target(0));
101             }
102         }
103         return false;
104     }
105 
106     GrSurfaceProxy* target = first_target(task);
107     GrRenderTask* clusterTail = (lastTaskMap->find(target) ? *lastTaskMap->find(target) : nullptr);
108     lastTaskMap->set(target, task);
109 
110     if (!clusterTail) {
111         CLUSTER_DEBUGF("Cluster: Bail, no cluster to extend.\n");
112         return false;
113     }
114 
115     CLUSTER_DEBUGF("Cluster: clusterTail is %s.\n", describe_task(clusterTail).c_str());
116 
117     if (clusterTail == llist->tail()) {
118         CLUSTER_DEBUGF("Cluster: Bail, cluster is already tail.\n");
119         return false;
120     }
121     GrRenderTask* movedHead = clusterTail->fNext;
122 
123     // Now, let's refer to the "cluster" as the chain of tasks with the same target, that we're
124     // hoping to extend by reordering. Let "moved tasks" be the ones we want to move to extend the
125     // cluster.
126     GrRenderTask* clusterHead = clusterTail;
127     while (clusterHead->fPrev
128            && 1 == clusterHead->fPrev->numTargets()
129            && target == first_target(clusterHead->fPrev)) {
130         clusterHead = clusterHead->fPrev;
131     }
132 
133     // We can't reorder if any moved task depends on anything in the cluster.
134     // Time complexity here is high, but making a hash set is worse in profiling.
135     for (GrRenderTask* moved = movedHead; moved; moved = moved->fNext) {
136         for (GrRenderTask* passed = clusterHead; passed != movedHead; passed = passed->fNext) {
137             if (depends_on(moved, passed)) {
138                 return false;
139             }
140         }
141     }
142 
143     // Grab the moved tasks and pull them before clusterHead.
144     for (GrRenderTask* moved = movedHead; moved;) {
145         CLUSTER_DEBUGF("Cluster: Reorder %s behind %s.\n",
146                        describe_task(moved).c_str(),
147                        describe_task(clusterHead).c_str());
148         // Be careful to save fNext before each move.
149         GrRenderTask* nextMoved = moved->fNext;
150         llist->remove(moved);
151         llist->addBefore(moved, clusterHead);
152         moved = nextMoved;
153     }
154     return true;
155 }
156 
GrClusterRenderTasks(SkSpan<const sk_sp<GrRenderTask>> input,SkTInternalLList<GrRenderTask> * llist)157 bool GrClusterRenderTasks(SkSpan<const sk_sp<GrRenderTask>> input,
158                           SkTInternalLList<GrRenderTask>* llist) {
159     SkASSERT(llist->isEmpty());
160 
161     if (input.size() < 3) {
162         return false;
163     }
164 
165     CLUSTER_DEBUGF("Cluster: Original order is %s\n", describe_tasks(input).c_str());
166 
167     SkTHashMap<GrSurfaceProxy*, GrRenderTask*> lastTaskMap;
168     bool didReorder = false;
169     for (const auto& t : input) {
170         didReorder |= task_cluster_visit(t.get(), llist, &lastTaskMap);
171         llist->addToTail(t.get());
172         CLUSTER_DEBUGF("Cluster: Output order is now: %s\n", describe_tasks(*llist).c_str());
173     }
174 
175 #ifdef SK_DEBUG
176     if (didReorder) {
177         validate(input, *llist);
178     }
179 #endif
180 
181     return didReorder;
182 }
183 
184 #undef CLUSTER_DEBUGF
185