• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "include/gpu/GrRecordingContext.h"
9 
10 #include "include/core/SkString.h"
11 #include "include/core/SkTypes.h"
12 #include "include/gpu/GpuTypes.h"
13 #include "include/gpu/GrBackendSurface.h"
14 #include "include/gpu/GrContextOptions.h"
15 #include "include/gpu/GrContextThreadSafeProxy.h"
16 #include "include/gpu/GrTypes.h"
17 #include "include/private/base/SkDebug.h"
18 #include "include/private/gpu/ganesh/GrTypesPriv.h"
19 #include "src/base/SkArenaAlloc.h"
20 #include "src/gpu/ganesh/GrAuditTrail.h"
21 #include "src/gpu/ganesh/GrCaps.h"
22 #include "src/gpu/ganesh/GrContextThreadSafeProxyPriv.h"
23 #include "src/gpu/ganesh/GrDrawingManager.h"
24 #include "src/gpu/ganesh/GrProgramDesc.h"
25 #include "src/gpu/ganesh/GrProxyProvider.h"
26 #include "src/gpu/ganesh/PathRendererChain.h"
27 #include "src/gpu/ganesh/ops/AtlasTextOp.h"
28 #include "src/text/gpu/SubRunAllocator.h"
29 #include "src/text/gpu/TextBlobRedrawCoordinator.h"
30 
31 #include <utility>
32 
33 using namespace skia_private;
34 
35 using TextBlobRedrawCoordinator = sktext::gpu::TextBlobRedrawCoordinator;
36 
ProgramData(std::unique_ptr<const GrProgramDesc> desc,const GrProgramInfo * info)37 GrRecordingContext::ProgramData::ProgramData(std::unique_ptr<const GrProgramDesc> desc,
38                                              const GrProgramInfo* info)
39         : fDesc(std::move(desc))
40         , fInfo(info) {
41 }
42 
ProgramData(ProgramData && other)43 GrRecordingContext::ProgramData::ProgramData(ProgramData&& other)
44         : fDesc(std::move(other.fDesc))
45         , fInfo(other.fInfo) {
46 }
47 
48 GrRecordingContext::ProgramData::~ProgramData() = default;
49 
GrRecordingContext(sk_sp<GrContextThreadSafeProxy> proxy,bool ddlRecording)50 GrRecordingContext::GrRecordingContext(sk_sp<GrContextThreadSafeProxy> proxy, bool ddlRecording)
51         : GrImageContext(std::move(proxy))
52         , fAuditTrail(new GrAuditTrail())
53         , fArenas(ddlRecording) {
54     fProxyProvider = std::make_unique<GrProxyProvider>(this);
55 }
56 
~GrRecordingContext()57 GrRecordingContext::~GrRecordingContext() {
58     skgpu::ganesh::AtlasTextOp::ClearCache();
59 }
60 
init()61 bool GrRecordingContext::init() {
62     if (!GrImageContext::init()) {
63         return false;
64     }
65 
66     skgpu::ganesh::PathRendererChain::Options prcOptions;
67     prcOptions.fAllowPathMaskCaching = this->options().fAllowPathMaskCaching;
68 #if defined(GR_TEST_UTILS)
69     prcOptions.fGpuPathRenderers = this->options().fGpuPathRenderers;
70 #endif
71     // FIXME: Once this is removed from Chrome and Android, rename to fEnable"".
72     if (this->options().fDisableDistanceFieldPaths) {
73         prcOptions.fGpuPathRenderers &= ~GpuPathRenderers::kSmall;
74     }
75 
76     bool reduceOpsTaskSplitting = true;
77     if (this->caps()->avoidReorderingRenderTasks()) {
78         reduceOpsTaskSplitting = false;
79     } else if (GrContextOptions::Enable::kYes == this->options().fReduceOpsTaskSplitting) {
80         reduceOpsTaskSplitting = true;
81     } else if (GrContextOptions::Enable::kNo == this->options().fReduceOpsTaskSplitting) {
82         reduceOpsTaskSplitting = false;
83     }
84     fDrawingManager.reset(new GrDrawingManager(this,
85                                                prcOptions,
86                                                reduceOpsTaskSplitting));
87     return true;
88 }
89 
abandonContext()90 void GrRecordingContext::abandonContext() {
91     GrImageContext::abandonContext();
92 
93     this->destroyDrawingManager();
94 }
95 
drawingManager()96 GrDrawingManager* GrRecordingContext::drawingManager() {
97     return fDrawingManager.get();
98 }
99 
destroyDrawingManager()100 void GrRecordingContext::destroyDrawingManager() {
101     fDrawingManager.reset();
102 }
103 
Arenas(SkArenaAlloc * recordTimeAllocator,sktext::gpu::SubRunAllocator * subRunAllocator)104 GrRecordingContext::Arenas::Arenas(SkArenaAlloc* recordTimeAllocator,
105                                    sktext::gpu::SubRunAllocator* subRunAllocator)
106         : fRecordTimeAllocator(recordTimeAllocator)
107         , fRecordTimeSubRunAllocator(subRunAllocator) {
108     // OwnedArenas should instantiate these before passing the bare pointer off to this struct.
109     SkASSERT(subRunAllocator);
110 }
111 
112 // Must be defined here so that std::unique_ptr can see the sizes of the various pools, otherwise
113 // it can't generate a default destructor for them.
OwnedArenas(bool ddlRecording)114 GrRecordingContext::OwnedArenas::OwnedArenas(bool ddlRecording) : fDDLRecording(ddlRecording) {}
~OwnedArenas()115 GrRecordingContext::OwnedArenas::~OwnedArenas() {}
116 
operator =(OwnedArenas && a)117 GrRecordingContext::OwnedArenas& GrRecordingContext::OwnedArenas::operator=(OwnedArenas&& a) {
118     fDDLRecording = a.fDDLRecording;
119     fRecordTimeAllocator = std::move(a.fRecordTimeAllocator);
120     fRecordTimeSubRunAllocator = std::move(a.fRecordTimeSubRunAllocator);
121     return *this;
122 }
123 
get()124 GrRecordingContext::Arenas GrRecordingContext::OwnedArenas::get() {
125     if (!fRecordTimeAllocator && fDDLRecording) {
126         // TODO: empirically determine a better number for SkArenaAlloc's firstHeapAllocation param
127         fRecordTimeAllocator = std::make_unique<SkArenaAlloc>(1024);
128     }
129 
130     if (!fRecordTimeSubRunAllocator) {
131         fRecordTimeSubRunAllocator = std::make_unique<sktext::gpu::SubRunAllocator>();
132     }
133 
134     return {fRecordTimeAllocator.get(), fRecordTimeSubRunAllocator.get()};
135 }
136 
detachArenas()137 GrRecordingContext::OwnedArenas&& GrRecordingContext::detachArenas() {
138     return std::move(fArenas);
139 }
140 
getTextBlobRedrawCoordinator()141 TextBlobRedrawCoordinator* GrRecordingContext::getTextBlobRedrawCoordinator() {
142     return fThreadSafeProxy->priv().getTextBlobRedrawCoordinator();
143 }
144 
getTextBlobRedrawCoordinator() const145 const TextBlobRedrawCoordinator* GrRecordingContext::getTextBlobRedrawCoordinator() const {
146     return fThreadSafeProxy->priv().getTextBlobRedrawCoordinator();
147 }
148 
threadSafeCache()149 GrThreadSafeCache* GrRecordingContext::threadSafeCache() {
150     return fThreadSafeProxy->priv().threadSafeCache();
151 }
152 
threadSafeCache() const153 const GrThreadSafeCache* GrRecordingContext::threadSafeCache() const {
154     return fThreadSafeProxy->priv().threadSafeCache();
155 }
156 
addOnFlushCallbackObject(GrOnFlushCallbackObject * onFlushCBObject)157 void GrRecordingContext::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
158     this->drawingManager()->addOnFlushCallbackObject(onFlushCBObject);
159 }
160 
161 ////////////////////////////////////////////////////////////////////////////////
162 
skCapabilities() const163 sk_sp<const SkCapabilities> GrRecordingContext::skCapabilities() const {
164     return this->refCaps();
165 }
166 
maxTextureSize() const167 int GrRecordingContext::maxTextureSize() const { return this->caps()->maxTextureSize(); }
168 
maxRenderTargetSize() const169 int GrRecordingContext::maxRenderTargetSize() const { return this->caps()->maxRenderTargetSize(); }
170 
colorTypeSupportedAsImage(SkColorType colorType) const171 bool GrRecordingContext::colorTypeSupportedAsImage(SkColorType colorType) const {
172     GrBackendFormat format =
173             this->caps()->getDefaultBackendFormat(SkColorTypeToGrColorType(colorType),
174                                                   GrRenderable::kNo);
175     return format.isValid();
176 }
177 
supportsProtectedContent() const178 bool GrRecordingContext::supportsProtectedContent() const {
179     return this->caps()->supportsProtectedContent();
180 }
181 
182 ///////////////////////////////////////////////////////////////////////////////////////////////////
183 
184 #ifdef SK_ENABLE_DUMP_GPU
185 #include "src/utils/SkJSONWriter.h"
186 
dumpJSON(SkJSONWriter * writer) const187 void GrRecordingContext::dumpJSON(SkJSONWriter* writer) const {
188     writer->beginObject();
189 
190 #if GR_GPU_STATS
191     writer->appendS32("path_masks_generated", this->stats()->numPathMasksGenerated());
192     writer->appendS32("path_mask_cache_hits", this->stats()->numPathMaskCacheHits());
193 #endif
194 
195     writer->endObject();
196 }
197 #else
dumpJSON(SkJSONWriter *) const198 void GrRecordingContext::dumpJSON(SkJSONWriter*) const { }
199 #endif
200 
201 #if defined(GR_TEST_UTILS)
202 
203 #if GR_GPU_STATS
204 
dump(SkString * out) const205 void GrRecordingContext::Stats::dump(SkString* out) const {
206     out->appendf("Num Path Masks Generated: %d\n", fNumPathMasksGenerated);
207     out->appendf("Num Path Mask Cache Hits: %d\n", fNumPathMaskCacheHits);
208 }
209 
dumpKeyValuePairs(TArray<SkString> * keys,TArray<double> * values) const210 void GrRecordingContext::Stats::dumpKeyValuePairs(TArray<SkString>* keys,
211                                                   TArray<double>* values) const {
212     keys->push_back(SkString("path_masks_generated"));
213     values->push_back(fNumPathMasksGenerated);
214 
215     keys->push_back(SkString("path_mask_cache_hits"));
216     values->push_back(fNumPathMaskCacheHits);
217 }
218 
dumpKeyValuePairs(TArray<SkString> * keys,TArray<double> * values) const219 void GrRecordingContext::DMSAAStats::dumpKeyValuePairs(TArray<SkString>* keys,
220                                                        TArray<double>* values) const {
221     keys->push_back(SkString("dmsaa_render_passes"));
222     values->push_back(fNumRenderPasses);
223 
224     keys->push_back(SkString("dmsaa_multisample_render_passes"));
225     values->push_back(fNumMultisampleRenderPasses);
226 
227     for (const auto& [name, count] : fTriggerCounts) {
228         keys->push_back(SkStringPrintf("dmsaa_trigger_%s", name.c_str()));
229         values->push_back(count);
230     }
231 }
232 
dump() const233 void GrRecordingContext::DMSAAStats::dump() const {
234     SkDebugf("DMSAA Render Passes: %d\n", fNumRenderPasses);
235     SkDebugf("DMSAA Multisample Render Passes: %d\n", fNumMultisampleRenderPasses);
236     if (!fTriggerCounts.empty()) {
237         SkDebugf("DMSAA Triggers:\n");
238         for (const auto& [name, count] : fTriggerCounts) {
239             SkDebugf("    %s: %d\n", name.c_str(), count);
240         }
241     }
242 }
243 
merge(const DMSAAStats & stats)244 void GrRecordingContext::DMSAAStats::merge(const DMSAAStats& stats) {
245     fNumRenderPasses += stats.fNumRenderPasses;
246     fNumMultisampleRenderPasses += stats.fNumMultisampleRenderPasses;
247     for (const auto& [name, count] : stats.fTriggerCounts) {
248         fTriggerCounts[name] += count;
249     }
250 }
251 
252 #endif // GR_GPU_STATS
253 #endif // defined(GR_TEST_UTILS)
254