1 /*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 #include "src/gpu/ganesh/mock/GrMockGpu.h"
8
9 #include "include/gpu/GpuTypes.h"
10 #include "include/private/base/SkDebug.h"
11 #include "include/private/base/SkMath.h"
12 #include "src/gpu/ganesh/GrCaps.h"
13 #include "src/gpu/ganesh/GrGpuBuffer.h"
14 #include "src/gpu/ganesh/GrRenderTarget.h"
15 #include "src/gpu/ganesh/GrTexture.h"
16 #include "src/gpu/ganesh/GrThreadSafePipelineBuilder.h" // IWYU pragma: keep
17 #include "src/gpu/ganesh/mock/GrMockAttachment.h"
18 #include "src/gpu/ganesh/mock/GrMockBuffer.h"
19 #include "src/gpu/ganesh/mock/GrMockCaps.h"
20 #include "src/gpu/ganesh/mock/GrMockOpsRenderPass.h"
21 #include "src/gpu/ganesh/mock/GrMockTexture.h"
22
23 #include <atomic>
24
25 using namespace skia_private;
26
NextInternalTextureID()27 int GrMockGpu::NextInternalTextureID() {
28 static std::atomic<int> nextID{1};
29 int id;
30 do {
31 id = nextID.fetch_add(1, std::memory_order_relaxed);
32 } while (0 == id); // Reserve 0 for an invalid ID.
33 return id;
34 }
35
NextExternalTextureID()36 int GrMockGpu::NextExternalTextureID() {
37 // We use negative ints for the "testing only external textures" so they can easily be
38 // identified when debugging.
39 static std::atomic<int> nextID{-1};
40 return nextID.fetch_add(-1, std::memory_order_relaxed);
41 }
42
NextInternalRenderTargetID()43 int GrMockGpu::NextInternalRenderTargetID() {
44 // We start off with large numbers to differentiate from texture IDs, even though they're
45 // technically in a different space.
46 static std::atomic<int> nextID{SK_MaxS32};
47 return nextID.fetch_add(-1, std::memory_order_relaxed);
48 }
49
NextExternalRenderTargetID()50 int GrMockGpu::NextExternalRenderTargetID() {
51 // We use large negative ints for the "testing only external render targets" so they can easily
52 // be identified when debugging.
53 static std::atomic<int> nextID{SK_MinS32};
54 return nextID.fetch_add(1, std::memory_order_relaxed);
55 }
56
Make(const GrMockOptions * mockOptions,const GrContextOptions & contextOptions,GrDirectContext * direct)57 std::unique_ptr<GrGpu> GrMockGpu::Make(const GrMockOptions* mockOptions,
58 const GrContextOptions& contextOptions,
59 GrDirectContext* direct) {
60 static const GrMockOptions kDefaultOptions = GrMockOptions();
61 if (!mockOptions) {
62 mockOptions = &kDefaultOptions;
63 }
64 return std::unique_ptr<GrGpu>(new GrMockGpu(direct, *mockOptions, contextOptions));
65 }
66
onGetOpsRenderPass(GrRenderTarget * rt,bool,GrAttachment *,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo &,const TArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)67 GrOpsRenderPass* GrMockGpu::onGetOpsRenderPass(GrRenderTarget* rt,
68 bool /*useMSAASurface*/,
69 GrAttachment*,
70 GrSurfaceOrigin origin,
71 const SkIRect& bounds,
72 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
73 const GrOpsRenderPass::StencilLoadAndStoreInfo&,
74 const TArray<GrSurfaceProxy*,true>& sampledProxies,
75 GrXferBarrierFlags renderPassXferBarriers) {
76 return new GrMockOpsRenderPass(this, rt, origin, colorInfo);
77 }
78
submit(GrOpsRenderPass * renderPass)79 void GrMockGpu::submit(GrOpsRenderPass* renderPass) {
80 for (int i = 0; i < static_cast<GrMockOpsRenderPass*>(renderPass)->numDraws(); ++i) {
81 fStats.incNumDraws();
82 }
83 delete renderPass;
84 }
85
GrMockGpu(GrDirectContext * direct,const GrMockOptions & options,const GrContextOptions & contextOptions)86 GrMockGpu::GrMockGpu(GrDirectContext* direct, const GrMockOptions& options,
87 const GrContextOptions& contextOptions)
88 : INHERITED(direct)
89 , fMockOptions(options) {
90 this->initCaps(sk_make_sp<GrMockCaps>(contextOptions, options));
91 }
92
~GrMockGpu()93 GrMockGpu::~GrMockGpu() {}
94
pipelineBuilder()95 GrThreadSafePipelineBuilder* GrMockGpu::pipelineBuilder() {
96 return nullptr;
97 }
98
refPipelineBuilder()99 sk_sp<GrThreadSafePipelineBuilder> GrMockGpu::refPipelineBuilder() {
100 return nullptr;
101 }
102
onCreateTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,skgpu::Budgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask,std::string_view label)103 sk_sp<GrTexture> GrMockGpu::onCreateTexture(SkISize dimensions,
104 const GrBackendFormat& format,
105 GrRenderable renderable,
106 int renderTargetSampleCnt,
107 skgpu::Budgeted budgeted,
108 GrProtected isProtected,
109 int mipLevelCount,
110 uint32_t levelClearMask,
111 std::string_view label) {
112 if (fMockOptions.fFailTextureAllocations) {
113 return nullptr;
114 }
115
116 // Compressed formats should go through onCreateCompressedTexture
117 SkASSERT(format.asMockCompressionType() == SkTextureCompressionType::kNone);
118
119 GrColorType ct = format.asMockColorType();
120 SkASSERT(ct != GrColorType::kUnknown);
121
122 GrMipmapStatus mipmapStatus =
123 mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
124 GrMockTextureInfo texInfo(ct, SkTextureCompressionType::kNone, NextInternalTextureID(),
125 isProtected);
126 if (renderable == GrRenderable::kYes) {
127 GrMockRenderTargetInfo rtInfo(ct, NextInternalRenderTargetID(), isProtected);
128 return sk_sp<GrTexture>(new GrMockTextureRenderTarget(this, budgeted, dimensions,
129 renderTargetSampleCnt,
130 mipmapStatus,
131 texInfo,
132 rtInfo,
133 label));
134 }
135 return sk_sp<GrTexture>(new GrMockTexture(
136 this, budgeted, dimensions, mipmapStatus, texInfo, label));
137 }
138
139 // TODO: why no 'isProtected' ?!
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Budgeted budgeted,skgpu::Mipmapped mipmapped,GrProtected isProtected,const void * data,size_t dataSize)140 sk_sp<GrTexture> GrMockGpu::onCreateCompressedTexture(SkISize dimensions,
141 const GrBackendFormat& format,
142 skgpu::Budgeted budgeted,
143 skgpu::Mipmapped mipmapped,
144 GrProtected isProtected,
145 const void* data,
146 size_t dataSize) {
147 if (fMockOptions.fFailTextureAllocations) {
148 return nullptr;
149 }
150
151 #ifdef SK_DEBUG
152 // Uncompressed formats should go through onCreateTexture
153 SkTextureCompressionType compression = format.asMockCompressionType();
154 SkASSERT(compression != SkTextureCompressionType::kNone);
155 #endif
156
157 GrMipmapStatus mipmapStatus = (mipmapped == skgpu::Mipmapped::kYes)
158 ? GrMipmapStatus::kValid
159 : GrMipmapStatus::kNotAllocated;
160 GrMockTextureInfo texInfo(GrColorType::kUnknown,
161 format.asMockCompressionType(),
162 NextInternalTextureID(),
163 isProtected);
164
165 return sk_sp<GrTexture>(new GrMockTexture(
166 this, budgeted, dimensions, mipmapStatus, texInfo,
167 /*label=*/"MockGpu_CreateCompressedTexture"));
168 }
169
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Budgeted budgeted,skgpu::Mipmapped mipMapped,GrProtected isProtected,OH_NativeBuffer * nativeBuffer,size_t bufferSize)170 sk_sp<GrTexture> GrMockGpu::onCreateCompressedTexture(SkISize dimensions,
171 const GrBackendFormat& format,
172 skgpu::Budgeted budgeted,
173 skgpu::Mipmapped mipMapped,
174 GrProtected isProtected,
175 OH_NativeBuffer* nativeBuffer,
176 size_t bufferSize) {
177 SkASSERT(!"unimplemented");
178 return nullptr;
179 }
180
onWrapBackendTexture(const GrBackendTexture & tex,GrWrapOwnership ownership,GrWrapCacheable wrapType,GrIOType ioType)181 sk_sp<GrTexture> GrMockGpu::onWrapBackendTexture(const GrBackendTexture& tex,
182 GrWrapOwnership ownership,
183 GrWrapCacheable wrapType,
184 GrIOType ioType) {
185 GrMockTextureInfo texInfo;
186 SkAssertResult(tex.getMockTextureInfo(&texInfo));
187
188 SkTextureCompressionType compression = texInfo.compressionType();
189 if (compression != SkTextureCompressionType::kNone) {
190 return nullptr;
191 }
192
193 GrMipmapStatus mipmapStatus = tex.hasMipmaps() ? GrMipmapStatus::kValid
194 : GrMipmapStatus::kNotAllocated;
195 return sk_sp<GrTexture>(new GrMockTexture(this,
196 tex.dimensions(),
197 mipmapStatus,
198 texInfo,
199 wrapType,
200 ioType,
201 /*label=*/"MockGpu_WrapBackendTexture"));
202 }
203
onWrapCompressedBackendTexture(const GrBackendTexture & tex,GrWrapOwnership ownership,GrWrapCacheable wrapType)204 sk_sp<GrTexture> GrMockGpu::onWrapCompressedBackendTexture(const GrBackendTexture& tex,
205 GrWrapOwnership ownership,
206 GrWrapCacheable wrapType) {
207 return nullptr;
208 }
209
onWrapRenderableBackendTexture(const GrBackendTexture & tex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)210 sk_sp<GrTexture> GrMockGpu::onWrapRenderableBackendTexture(const GrBackendTexture& tex,
211 int sampleCnt,
212 GrWrapOwnership ownership,
213 GrWrapCacheable cacheable) {
214 GrMockTextureInfo texInfo;
215 SkAssertResult(tex.getMockTextureInfo(&texInfo));
216 SkASSERT(texInfo.compressionType() == SkTextureCompressionType::kNone);
217
218 GrMipmapStatus mipmapStatus =
219 tex.hasMipmaps() ? GrMipmapStatus::kValid : GrMipmapStatus::kNotAllocated;
220
221 // The client gave us the texture ID but we supply the render target ID.
222 GrMockRenderTargetInfo rtInfo(texInfo.colorType(), NextInternalRenderTargetID(),
223 texInfo.getProtected());
224
225 return sk_sp<GrTexture>(
226 new GrMockTextureRenderTarget(this,
227 tex.dimensions(),
228 sampleCnt,
229 mipmapStatus,
230 texInfo,
231 rtInfo,
232 cacheable,
233 /*label=*/"MockGpu_WrapRenderableBackendTexture"));
234 }
235
onWrapBackendRenderTarget(const GrBackendRenderTarget & rt)236 sk_sp<GrRenderTarget> GrMockGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& rt) {
237 GrMockRenderTargetInfo info;
238 SkAssertResult(rt.getMockRenderTargetInfo(&info));
239
240 return sk_sp<GrRenderTarget>(
241 new GrMockRenderTarget(this,
242 GrMockRenderTarget::kWrapped,
243 rt.dimensions(),
244 rt.sampleCnt(),
245 info,
246 /*label=*/"MockGpu_WrapBackendRenderTarget"));
247 }
248
onCreateBuffer(size_t sizeInBytes,GrGpuBufferType type,GrAccessPattern accessPattern)249 sk_sp<GrGpuBuffer> GrMockGpu::onCreateBuffer(size_t sizeInBytes,
250 GrGpuBufferType type,
251 GrAccessPattern accessPattern) {
252 return sk_sp<GrGpuBuffer>(
253 new GrMockBuffer(this, sizeInBytes, type, accessPattern,
254 /*label=*/"MockGpu_CreateBuffer"));
255 }
256
makeStencilAttachment(const GrBackendFormat &,SkISize dimensions,int numStencilSamples)257 sk_sp<GrAttachment> GrMockGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
258 SkISize dimensions, int numStencilSamples) {
259 fStats.incStencilAttachmentCreates();
260 return sk_sp<GrAttachment>(new GrMockAttachment(this,
261 dimensions,
262 GrAttachment::UsageFlags::kStencilAttachment,
263 numStencilSamples,
264 /*label=*/"MockGpu_MakeStencilAttachment"));
265 }
266
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable,skgpu::Mipmapped mipmapped,GrProtected isProtected,std::string_view label)267 GrBackendTexture GrMockGpu::onCreateBackendTexture(SkISize dimensions,
268 const GrBackendFormat& format,
269 GrRenderable,
270 skgpu::Mipmapped mipmapped,
271 GrProtected isProtected,
272 std::string_view label) {
273 SkTextureCompressionType compression = format.asMockCompressionType();
274 if (compression != SkTextureCompressionType::kNone) {
275 return {}; // should go through onCreateCompressedBackendTexture
276 }
277
278 auto colorType = format.asMockColorType();
279 if (!this->caps()->isFormatTexturable(format, GrTextureType::k2D)) {
280 return GrBackendTexture(); // invalid
281 }
282
283 GrMockTextureInfo info(colorType, SkTextureCompressionType::kNone, NextExternalTextureID(),
284 isProtected);
285
286 fOutstandingTestingOnlyTextureIDs.add(info.id());
287 return GrBackendTexture(dimensions.width(), dimensions.height(), mipmapped, info);
288 }
289
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Mipmapped mipmapped,GrProtected isProtected)290 GrBackendTexture GrMockGpu::onCreateCompressedBackendTexture(SkISize dimensions,
291 const GrBackendFormat& format,
292 skgpu::Mipmapped mipmapped,
293 GrProtected isProtected) {
294 SkTextureCompressionType compression = format.asMockCompressionType();
295 if (compression == SkTextureCompressionType::kNone) {
296 return {}; // should go through onCreateBackendTexture
297 }
298
299 if (!this->caps()->isFormatTexturable(format, GrTextureType::k2D)) {
300 return {};
301 }
302
303 GrMockTextureInfo info(GrColorType::kUnknown, compression, NextExternalTextureID(),
304 isProtected);
305
306 fOutstandingTestingOnlyTextureIDs.add(info.id());
307 return GrBackendTexture(dimensions.width(), dimensions.height(), mipmapped, info);
308 }
309
deleteBackendTexture(const GrBackendTexture & tex)310 void GrMockGpu::deleteBackendTexture(const GrBackendTexture& tex) {
311 SkASSERT(GrBackendApi::kMock == tex.backend());
312
313 GrMockTextureInfo info;
314 if (tex.getMockTextureInfo(&info)) {
315 fOutstandingTestingOnlyTextureIDs.remove(info.id());
316 }
317 }
318
319 #if defined(GPU_TEST_UTILS)
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const320 bool GrMockGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
321 SkASSERT(GrBackendApi::kMock == tex.backend());
322
323 GrMockTextureInfo info;
324 if (!tex.getMockTextureInfo(&info)) {
325 return false;
326 }
327
328 return fOutstandingTestingOnlyTextureIDs.contains(info.id());
329 }
330
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType colorType,int sampleCnt,GrProtected isProtected)331 GrBackendRenderTarget GrMockGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
332 GrColorType colorType,
333 int sampleCnt,
334 GrProtected isProtected) {
335 GrMockRenderTargetInfo info(colorType, NextExternalRenderTargetID(), isProtected);
336 static constexpr int kStencilBits = 8;
337 return GrBackendRenderTarget(dimensions.width(), dimensions.height(), sampleCnt, kStencilBits,
338 info);
339 }
340
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget &)341 void GrMockGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) {}
342 #endif
343