1 /*
2 * Copyright 2019 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrRecordingContext_DEFINED
9 #define GrRecordingContext_DEFINED
10
11 #include "include/core/SkColorType.h"
12 #include "include/core/SkRefCnt.h"
13 #include "include/core/SkString.h" // IWYU pragma: keep
14 #include "include/core/SkTypes.h"
15 #include "include/private/base/SkTArray.h"
16 #include "include/private/gpu/ganesh/GrContext_Base.h"
17 #include "include/private/gpu/ganesh/GrImageContext.h"
18
19 #include <map>
20 #include <memory>
21 #include <string>
22
23 class GrAuditTrail;
24 class GrContextThreadSafeProxy;
25 class GrDirectContext;
26 class GrDrawingManager;
27 class GrOnFlushCallbackObject;
28 class GrProgramDesc;
29 class GrProgramInfo;
30 class GrProxyProvider;
31 class GrRecordingContextPriv;
32 class GrThreadSafeCache;
33 class SkArenaAlloc;
34 class SkCapabilities;
35 class SkJSONWriter;
36
37 namespace sktext::gpu {
38 class SubRunAllocator;
39 class TextBlobRedrawCoordinator;
40 }
41
42 class GrRecordingContext : public GrImageContext {
43 public:
44 ~GrRecordingContext() override;
45
46 /**
47 * Reports whether the GrDirectContext associated with this GrRecordingContext is abandoned.
48 * When called on a GrDirectContext it may actively check whether the underlying 3D API
49 * device/context has been disconnected before reporting the status. If so, calling this
50 * method will transition the GrDirectContext to the abandoned state.
51 */
abandoned()52 bool abandoned() override { return GrImageContext::abandoned(); }
53
54 /*
55 * Can a SkSurface be created with the given color type. To check whether MSAA is supported
56 * use maxSurfaceSampleCountForColorType().
57 */
colorTypeSupportedAsSurface(SkColorType colorType)58 SK_API bool colorTypeSupportedAsSurface(SkColorType colorType) const {
59 if (kR16G16_unorm_SkColorType == colorType ||
60 kA16_unorm_SkColorType == colorType ||
61 kA16_float_SkColorType == colorType ||
62 kR16G16_float_SkColorType == colorType ||
63 kR16G16B16A16_unorm_SkColorType == colorType ||
64 kGray_8_SkColorType == colorType) {
65 return false;
66 }
67
68 return this->maxSurfaceSampleCountForColorType(colorType) > 0;
69 }
70
71 /**
72 * Gets the maximum supported texture size.
73 */
74 SK_API int maxTextureSize() const;
75
76 /**
77 * Gets the maximum supported render target size.
78 */
79 SK_API int maxRenderTargetSize() const;
80
81 /**
82 * Can a SkImage be created with the given color type.
83 */
84 SK_API bool colorTypeSupportedAsImage(SkColorType) const;
85
86 /**
87 * Does this context support protected content?
88 */
89 SK_API bool supportsProtectedContent() const;
90
91 /**
92 * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
93 * rendering is supported for the color type. 0 is returned if rendering to this color type
94 * is not supported at all.
95 */
maxSurfaceSampleCountForColorType(SkColorType colorType)96 SK_API int maxSurfaceSampleCountForColorType(SkColorType colorType) const {
97 return GrImageContext::maxSurfaceSampleCountForColorType(colorType);
98 }
99
100 SK_API sk_sp<const SkCapabilities> skCapabilities() const;
101
102 // Provides access to functions that aren't part of the public API.
103 GrRecordingContextPriv priv();
104 const GrRecordingContextPriv priv() const; // NOLINT(readability-const-return-type)
105
106 // The collection of specialized memory arenas for different types of data recorded by a
107 // GrRecordingContext. Arenas does not maintain ownership of the pools it groups together.
108 class Arenas {
109 public:
110 Arenas(SkArenaAlloc*, sktext::gpu::SubRunAllocator*);
111
112 // For storing pipelines and other complex data as-needed by ops
recordTimeAllocator()113 SkArenaAlloc* recordTimeAllocator() { return fRecordTimeAllocator; }
114
115 // For storing GrTextBlob SubRuns
recordTimeSubRunAllocator()116 sktext::gpu::SubRunAllocator* recordTimeSubRunAllocator() {
117 return fRecordTimeSubRunAllocator;
118 }
119
120 private:
121 SkArenaAlloc* fRecordTimeAllocator;
122 sktext::gpu::SubRunAllocator* fRecordTimeSubRunAllocator;
123 };
124
125 // OH ISSUE: check whether the PID is abnormal.
isPidAbnormal()126 virtual bool isPidAbnormal() const { return false; }
127
128 protected:
129 friend class GrRecordingContextPriv; // for hidden functions
130 friend class GrDeferredDisplayList; // for OwnedArenas
131 friend class GrDeferredDisplayListPriv; // for ProgramData
132
133 // Like Arenas, but preserves ownership of the underlying pools.
134 class OwnedArenas {
135 public:
136 OwnedArenas(bool ddlRecording);
137 ~OwnedArenas();
138
139 Arenas get();
140
141 OwnedArenas& operator=(OwnedArenas&&);
142
143 private:
144 bool fDDLRecording;
145 std::unique_ptr<SkArenaAlloc> fRecordTimeAllocator;
146 std::unique_ptr<sktext::gpu::SubRunAllocator> fRecordTimeSubRunAllocator;
147 };
148
149 GrRecordingContext(sk_sp<GrContextThreadSafeProxy>, bool ddlRecording);
150
151 bool init() override;
152
153 void abandonContext() override;
154
155 GrDrawingManager* drawingManager();
156
157 // There is no going back from this method. It should only be called to control the timing
158 // during abandon or destruction of the context.
159 void destroyDrawingManager();
160
arenas()161 Arenas arenas() { return fArenas.get(); }
162 // This entry point should only be used for DDL creation where we want the ops' lifetime to
163 // match that of the DDL.
164 OwnedArenas&& detachArenas();
165
proxyProvider()166 GrProxyProvider* proxyProvider() { return fProxyProvider.get(); }
proxyProvider()167 const GrProxyProvider* proxyProvider() const { return fProxyProvider.get(); }
168
169 struct ProgramData {
170 ProgramData(std::unique_ptr<const GrProgramDesc>, const GrProgramInfo*);
171 ProgramData(ProgramData&&); // for SkTArray
172 ProgramData(const ProgramData&) = delete;
173 ~ProgramData();
174
descProgramData175 const GrProgramDesc& desc() const { return *fDesc; }
infoProgramData176 const GrProgramInfo& info() const { return *fInfo; }
177
178 private:
179 // TODO: store the GrProgramDescs in the 'fRecordTimeData' arena
180 std::unique_ptr<const GrProgramDesc> fDesc;
181 // The program infos should be stored in 'fRecordTimeData' so do not need to be ref
182 // counted or deleted in the destructor.
183 const GrProgramInfo* fInfo = nullptr;
184 };
185
186 // This entry point gives the recording context a chance to cache the provided
187 // programInfo. The DDL context takes this opportunity to store programInfos as a sidecar
188 // to the DDL.
recordProgramInfo(const GrProgramInfo *)189 virtual void recordProgramInfo(const GrProgramInfo*) {}
190 // This asks the recording context to return any programInfos it may have collected
191 // via the 'recordProgramInfo' call. It is up to the caller to ensure that the lifetime
192 // of the programInfos matches the intended use. For example, in DDL-record mode it
193 // is known that all the programInfos will have been allocated in an arena with the
194 // same lifetime at the DDL itself.
detachProgramData(skia_private::TArray<ProgramData> *)195 virtual void detachProgramData(skia_private::TArray<ProgramData>*) {}
196
197 sktext::gpu::TextBlobRedrawCoordinator* getTextBlobRedrawCoordinator();
198 const sktext::gpu::TextBlobRedrawCoordinator* getTextBlobRedrawCoordinator() const;
199
200 GrThreadSafeCache* threadSafeCache();
201 const GrThreadSafeCache* threadSafeCache() const;
202
203 /**
204 * Registers an object for flush-related callbacks. (See GrOnFlushCallbackObject.)
205 *
206 * NOTE: the drawing manager tracks this object as a raw pointer; it is up to the caller to
207 * ensure its lifetime is tied to that of the context.
208 */
209 void addOnFlushCallbackObject(GrOnFlushCallbackObject*);
210
asRecordingContext()211 GrRecordingContext* asRecordingContext() override { return this; }
212
213 class Stats {
214 public:
215 Stats() = default;
216
217 #if GR_GPU_STATS
reset()218 void reset() { *this = {}; }
219
numPathMasksGenerated()220 int numPathMasksGenerated() const { return fNumPathMasksGenerated; }
incNumPathMasksGenerated()221 void incNumPathMasksGenerated() { fNumPathMasksGenerated++; }
222
numPathMaskCacheHits()223 int numPathMaskCacheHits() const { return fNumPathMaskCacheHits; }
incNumPathMasksCacheHits()224 void incNumPathMasksCacheHits() { fNumPathMaskCacheHits++; }
225
226 #if defined(GPU_TEST_UTILS)
227 void dump(SkString* out) const;
228 void dumpKeyValuePairs(skia_private::TArray<SkString>* keys,
229 skia_private::TArray<double>* values) const;
230 #endif
231
232 private:
233 int fNumPathMasksGenerated{0};
234 int fNumPathMaskCacheHits{0};
235
236 #else // GR_GPU_STATS
incNumPathMasksGenerated()237 void incNumPathMasksGenerated() {}
incNumPathMasksCacheHits()238 void incNumPathMasksCacheHits() {}
239
240 #if defined(GPU_TEST_UTILS)
dump(SkString *)241 void dump(SkString*) const {}
dumpKeyValuePairs(skia_private::TArray<SkString> * keys,skia_private::TArray<double> * values)242 void dumpKeyValuePairs(skia_private::TArray<SkString>* keys,
243 skia_private::TArray<double>* values) const {}
244 #endif
245 #endif // GR_GPU_STATS
246 } fStats;
247
248 #if GR_GPU_STATS && defined(GPU_TEST_UTILS)
249 struct DMSAAStats {
250 void dumpKeyValuePairs(skia_private::TArray<SkString>* keys,
251 skia_private::TArray<double>* values) const;
252 void dump() const;
253 void merge(const DMSAAStats&);
254 int fNumRenderPasses = 0;
255 int fNumMultisampleRenderPasses = 0;
256 std::map<std::string, int> fTriggerCounts;
257 };
258
259 DMSAAStats fDMSAAStats;
260 #endif
261
stats()262 Stats* stats() { return &fStats; }
stats()263 const Stats* stats() const { return &fStats; }
264 void dumpJSON(SkJSONWriter*) const;
265
266 protected:
267 // Delete last in case other objects call it during destruction.
268 std::unique_ptr<GrAuditTrail> fAuditTrail;
269
270 private:
271 OwnedArenas fArenas;
272
273 std::unique_ptr<GrDrawingManager> fDrawingManager;
274 std::unique_ptr<GrProxyProvider> fProxyProvider;
275
276 #if defined(GPU_TEST_UTILS)
277 int fSuppressWarningMessages = 0;
278 #endif
279 };
280
281 /**
282 * Safely cast a possibly-null base context to direct context.
283 */
GrAsDirectContext(GrContext_Base * base)284 static inline GrDirectContext* GrAsDirectContext(GrContext_Base* base) {
285 return base ? base->asDirectContext() : nullptr;
286 }
287
288 #endif
289