• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrDawnGpu_DEFINED
9 #define GrDawnGpu_DEFINED
10 
11 #include "src/gpu/ganesh/GrGpu.h"
12 
13 #include "src/core/SkLRUCache.h"
14 #include "src/core/SkTHash.h"
15 #include "src/gpu/ganesh/GrFinishCallbacks.h"
16 #include "src/gpu/ganesh/GrProgramDesc.h"
17 #include "src/gpu/ganesh/GrStagingBufferManager.h"
18 #include "src/gpu/ganesh/dawn/GrDawnAsyncWait.h"
19 #include "src/gpu/ganesh/dawn/GrDawnRingBuffer.h"
20 #include "src/sksl/ir/SkSLProgram.h"
21 #include "webgpu/webgpu_cpp.h"
22 
23 #include <unordered_map>
24 
25 class GrDawnOpsRenderPass;
26 class GrDawnStagingBuffer;
27 class GrDirectContext;
28 class GrPipeline;
29 struct GrDawnProgram;
30 
31 class GrDawnGpu : public GrGpu {
32 public:
33     static sk_sp<GrGpu> Make(const wgpu::Device&, const GrContextOptions&, GrDirectContext*);
34 
35     ~GrDawnGpu() override;
36 
37     void disconnect(DisconnectType) override;
38 
39     GrThreadSafePipelineBuilder* pipelineBuilder() override;
40     sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() override;
41 
stagingBufferManager()42     GrStagingBufferManager* stagingBufferManager() override { return &fStagingBufferManager; }
43     void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) override;
44 
device()45     const wgpu::Device& device() const { return fDevice; }
queue()46     const wgpu::Queue&  queue() const { return fQueue; }
47 
xferBarrier(GrRenderTarget *,GrXferBarrierType)48     void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
49 
50     void deleteBackendTexture(const GrBackendTexture&) override;
51 
52     bool compile(const GrProgramDesc&, const GrProgramInfo&) override;
53 
54 #if GR_TEST_UTILS
55     bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
56 
57     GrBackendRenderTarget createTestingOnlyBackendRenderTarget(SkISize dimensions,
58                                                                GrColorType,
59                                                                int sampleCnt,
60                                                                GrProtected) override;
61     void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override;
62 #endif
63 
64     sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
65                                               SkISize dimensions, int numStencilSamples) override;
66 
getPreferredStencilFormat(const GrBackendFormat &)67     GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) override {
68         return GrBackendFormat::MakeDawn(wgpu::TextureFormat::Depth24PlusStencil8);
69     }
70 
makeMSAAAttachment(SkISize dimensions,const GrBackendFormat & format,int numSamples,GrProtected isProtected,GrMemoryless isMemoryless)71     sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions,
72                                            const GrBackendFormat& format,
73                                            int numSamples,
74                                            GrProtected isProtected,
75                                            GrMemoryless isMemoryless) override {
76         return nullptr;
77     }
78 
79     void submit(GrOpsRenderPass*) override;
80 
81     GrFence SK_WARN_UNUSED_RESULT insertFence() override;
82     bool waitFence(GrFence) override;
83     void deleteFence(GrFence) override;
84 
85     std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned = true) override;
86     std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&,
87                                                       GrSemaphoreWrapType,
88                                                       GrWrapOwnership) override;
89     void insertSemaphore(GrSemaphore* semaphore) override;
90     void waitSemaphore(GrSemaphore* semaphore) override;
91     void checkFinishProcs() override;
92     void finishOutstandingGpuWork() override;
93 
94     std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
95 
96     sk_sp<GrDawnProgram> getOrCreateRenderPipeline(GrRenderTarget*, const GrProgramInfo&);
97 
98     wgpu::Sampler getOrCreateSampler(GrSamplerState samplerState);
99 
100     GrDawnRingBuffer::Slice allocateUniformRingBufferSlice(int size);
101     wgpu::CommandEncoder getCopyEncoder();
102     void flushCopyEncoder();
103     void appendCommandBuffer(wgpu::CommandBuffer commandBuffer);
104 
105     std::string SkSLToSPIRV(const char* shaderString,
106                             SkSL::ProgramKind,
107                             uint32_t rtFlipOffset,
108                             SkSL::Program::Inputs*);
109     wgpu::ShaderModule createShaderModule(const std::string& spirvSource);
110 
111 private:
112     GrDawnGpu(GrDirectContext*, const GrContextOptions&, const wgpu::Device&);
113 
114     sk_sp<GrTexture> onCreateTexture(SkISize,
115                                      const GrBackendFormat&,
116                                      GrRenderable,
117                                      int renderTargetSampleCnt,
118                                      skgpu::Budgeted,
119                                      GrProtected,
120                                      int mipLevelCount,
121                                      uint32_t levelClearMask,
122                                      std::string_view label) override;
123 
124     sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions,
125                                                const GrBackendFormat&,
126                                                skgpu::Budgeted,
127                                                GrMipmapped,
128                                                GrProtected,
129                                                const void* data,
130                                                size_t dataSize) override;
131 
132     sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&,
133                                           GrWrapOwnership,
134                                           GrWrapCacheable,
135                                           GrIOType) override;
136     sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&,
137                                                     GrWrapOwnership,
138                                                     GrWrapCacheable) override;
139     sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&,
140                                                     int sampleCnt,
141                                                     GrWrapOwnership,
142                                                     GrWrapCacheable) override;
143     sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) override;
144 
145     GrBackendTexture onCreateBackendTexture(SkISize dimensions,
146                                             const GrBackendFormat&,
147                                             GrRenderable,
148                                             GrMipmapped,
149                                             GrProtected,
150                                             std::string_view label) override;
151 
152     bool onClearBackendTexture(const GrBackendTexture&,
153                                sk_sp<skgpu::RefCntedCallback> finishedCallback,
154                                std::array<float, 4> color) override;
155 
156     GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions,
157                                                       const GrBackendFormat&,
158                                                       GrMipmapped,
159                                                       GrProtected) override;
160 
161     bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
162                                           sk_sp<skgpu::RefCntedCallback> finishedCallback,
163                                           const void* data,
164                                           size_t size) override;
165 
166     sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern) override;
167 
168     bool onReadPixels(GrSurface*,
169                       SkIRect,
170                       GrColorType surfaceColorType,
171                       GrColorType dstColorType,
172                       void*,
173                       size_t rowBytes) override;
174 
175     bool onWritePixels(GrSurface*,
176                        SkIRect,
177                        GrColorType surfaceColorType,
178                        GrColorType srcColorType,
179                        const GrMipLevel[],
180                        int mipLevelCount,
181                        bool) override;
182 
183     bool onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
184                                       size_t srcOffset,
185                                       sk_sp<GrGpuBuffer> dst,
186                                       size_t dstOffset,
187                                       size_t size) override;
188 
189     bool onTransferPixelsTo(GrTexture*,
190                             SkIRect,
191                             GrColorType textureColorType,
192                             GrColorType bufferColorType,
193                             sk_sp<GrGpuBuffer>,
194                             size_t offset,
195                             size_t rowBytes) override;
196 
197     bool onTransferPixelsFrom(GrSurface*,
198                               SkIRect,
199                               GrColorType surfaceColorType,
200                               GrColorType bufferColorType,
201                               sk_sp<GrGpuBuffer>,
202                               size_t offset) override;
203 
onResolveRenderTarget(GrRenderTarget *,const SkIRect &)204     void onResolveRenderTarget(GrRenderTarget*, const SkIRect&) override {}
205 
206     bool onRegenerateMipMapLevels(GrTexture*) override;
207 
208     bool onCopySurface(GrSurface* dst, const SkIRect& dstRect,
209                        GrSurface* src, const SkIRect& srcRect,
210                        GrSamplerState::Filter) override;
211 
212     void addFinishedProc(GrGpuFinishedProc finishedProc,
213                          GrGpuFinishedContext finishedContext) override;
214 
215     GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*,
216                                         bool useMSAASurface,
217                                         GrAttachment*,
218                                         GrSurfaceOrigin,
219                                         const SkIRect&,
220                                         const GrOpsRenderPass::LoadAndStoreInfo&,
221                                         const GrOpsRenderPass::StencilLoadAndStoreInfo&,
222                                         const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
223                                         GrXferBarrierFlags renderPassXferBarriers) override;
224 
225     bool onSubmitToGpu(bool syncCpu) override;
226     void onSubmittedWorkDone(WGPUQueueWorkDoneStatus status);
227     void mapPendingStagingBuffers();
228 
229     GrDawnAsyncWait* createFence();
230     void destroyFence(GrDawnAsyncWait* fence);
231 
232     void uploadTextureData(GrColorType srcColorType, const GrMipLevel texels[], int mipLevelCount,
233                            const SkIRect& rect, wgpu::Texture texture);
234 
235     wgpu::Device                                    fDevice;
236     wgpu::Queue                                     fQueue;
237     std::unique_ptr<GrDawnOpsRenderPass>            fOpsRenderPass;
238     GrDawnRingBuffer                                fUniformRingBuffer;
239     wgpu::CommandEncoder                            fCopyEncoder;
240     std::vector<wgpu::CommandBuffer>                fCommandBuffers;
241     GrStagingBufferManager                          fStagingBufferManager;
242 
243     // Temporary array of staging buffers to hold refs on the staging buffers after detaching
244     // from the GrStagingManager. During submission, the buffers are requested to asynchronously map
245     // (which Dawn will ensure will happen after the submitted work completes) and this list gets
246     // cleared. The buffers are returned to their backing resource provider by dropping their
247     // reference once the map request completes asynchronously.
248     //
249     // NOTE: In general operation the buffers will be mapped to memory when they are made available.
250     // However, it is possible for the map operation to fail (e.g. due to a lost connection to the
251     // GPU), in which case the buffers will still be made available but in an unmapped state. If a
252     // client requests to map such a buffer, GrDawnBuffer will try to map itself again if necessary.
253     std::vector<sk_sp<GrGpuBuffer>>                 fSubmittedStagingBuffers;
254 
255     // Fence that tracks the completion of all outstanding asynchronous buffer mapping requests.
256     // This is necessary to ensure a clean shut down since we need to ensure that buffers are not
257     // returned to the resource provider AFTER the provider is destroyed.
258     class PendingMapAsyncRequests {
259     public:
260         explicit PendingMapAsyncRequests(const wgpu::Device& device);
261         void addOne();
262         void completeOne();
263         void waitUntilDone() const;
264 
265     private:
266         int fCount = 0;
267         GrDawnAsyncWait wait_;
268     };
269     PendingMapAsyncRequests fPendingMapAsyncRequests;
270 
271     // Every time command buffers are submitted to the queue (in onSubmitToGpu) we register a single
272     // OnSubmittedWorkDone callback which is responsible for signaling all fences added via
273     // `insertFence`.
274     //
275     // NOTE: We use this approach instead of registering an individual callback for each
276     // fence because Dawn currently does not support unregistering a callback to prevent a potential
277     // use-after-free.
278     bool fSubmittedWorkDoneCallbackPending = false;
279     SkTHashSet<GrDawnAsyncWait*> fQueueFences;
280 
281     struct ProgramDescHash {
operatorProgramDescHash282         uint32_t operator()(const GrProgramDesc& desc) const {
283             return SkOpts::hash_fn(desc.asKey(), desc.keyLength(), 0);
284         }
285     };
286 
287     struct SamplerHash {
operatorSamplerHash288         size_t operator()(GrSamplerState samplerState) const {
289             // In WebGPU it is required that minFilter, magFilter, and mipmapFilter are all
290             // "linear" when maxAnisotropy is > 1.
291             return samplerState.asKey(/*anisoIsOrthogonal=*/false);
292         }
293     };
294 
295     SkLRUCache<GrProgramDesc, sk_sp<GrDawnProgram>, ProgramDescHash>    fRenderPipelineCache;
296     std::unordered_map<GrSamplerState, wgpu::Sampler, SamplerHash> fSamplers;
297 
298     GrFinishCallbacks         fFinishCallbacks;
299 
300     using INHERITED = GrGpu;
301 };
302 
303 #endif
304