• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrDawnGpu_DEFINED
9 #define GrDawnGpu_DEFINED
10 
11 #include "src/gpu/GrGpu.h"
12 
13 #include "dawn/webgpu_cpp.h"
14 #include "src/core/SkLRUCache.h"
15 #include "src/gpu/GrFinishCallbacks.h"
16 #include "src/gpu/GrProgramDesc.h"
17 #include "src/gpu/GrStagingBufferManager.h"
18 #include "src/gpu/dawn/GrDawnRingBuffer.h"
19 #include "src/sksl/ir/SkSLProgram.h"
20 
21 #include <unordered_map>
22 
23 class GrDawnOpsRenderPass;
24 class GrDawnStagingBuffer;
25 class GrDirectContext;
26 class GrPipeline;
27 struct GrDawnProgram;
28 
29 class GrDawnGpu : public GrGpu {
30 public:
31     static sk_sp<GrGpu> Make(const wgpu::Device&, const GrContextOptions&, GrDirectContext*);
32 
33     ~GrDawnGpu() override;
34 
35     void disconnect(DisconnectType) override;
36 
37     GrThreadSafePipelineBuilder* pipelineBuilder() override;
38     sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() override;
39 
stagingBufferManager()40     GrStagingBufferManager* stagingBufferManager() override { return &fStagingBufferManager; }
41     void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) override;
42 
device()43     const wgpu::Device& device() const { return fDevice; }
queue()44     const wgpu::Queue&  queue() const { return fQueue; }
45 
xferBarrier(GrRenderTarget *,GrXferBarrierType)46     void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
47 
48     void deleteBackendTexture(const GrBackendTexture&) override;
49 
50     bool compile(const GrProgramDesc&, const GrProgramInfo&) override;
51 
52 #if GR_TEST_UTILS
53     bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
54 
55     GrBackendRenderTarget createTestingOnlyBackendRenderTarget(SkISize dimensions,
56                                                                GrColorType,
57                                                                int sampleCnt,
58                                                                GrProtected) override;
59     void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override;
60 #endif
61 
62     sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
63                                               SkISize dimensions, int numStencilSamples) override;
64 
getPreferredStencilFormat(const GrBackendFormat &)65     GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) override {
66         return GrBackendFormat::MakeDawn(wgpu::TextureFormat::Depth24PlusStencil8);
67     }
68 
makeMSAAAttachment(SkISize dimensions,const GrBackendFormat & format,int numSamples,GrProtected isProtected,GrMemoryless isMemoryless)69     sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions,
70                                            const GrBackendFormat& format,
71                                            int numSamples,
72                                            GrProtected isProtected,
73                                            GrMemoryless isMemoryless) override {
74         return nullptr;
75     }
76 
77     void submit(GrOpsRenderPass*) override;
78 
79     GrFence SK_WARN_UNUSED_RESULT insertFence() override;
80     bool waitFence(GrFence) override;
81     void deleteFence(GrFence) const override;
82 
83     std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned = true) override;
84     std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&,
85                                                       GrSemaphoreWrapType,
86                                                       GrWrapOwnership) override;
87     void insertSemaphore(GrSemaphore* semaphore) override;
88     void waitSemaphore(GrSemaphore* semaphore) override;
89     void checkFinishProcs() override;
90     void finishOutstandingGpuWork() override;
91 
92     std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
93 
94     sk_sp<GrDawnProgram> getOrCreateRenderPipeline(GrRenderTarget*, const GrProgramInfo&);
95 
96     wgpu::Sampler getOrCreateSampler(GrSamplerState samplerState);
97 
98     GrDawnRingBuffer::Slice allocateUniformRingBufferSlice(int size);
99     wgpu::CommandEncoder getCopyEncoder();
100     void flushCopyEncoder();
101     void appendCommandBuffer(wgpu::CommandBuffer commandBuffer);
102 
103     void waitOnAllBusyStagingBuffers();
104     SkSL::String SkSLToSPIRV(const char* shaderString,
105                              SkSL::ProgramKind,
106                              uint32_t rtFlipOffset,
107                              SkSL::Program::Inputs*);
108     wgpu::ShaderModule createShaderModule(const SkSL::String& spirvSource);
109 
110 private:
111     GrDawnGpu(GrDirectContext*, const GrContextOptions&, const wgpu::Device&);
112 
113     sk_sp<GrTexture> onCreateTexture(SkISize,
114                                      const GrBackendFormat&,
115                                      GrRenderable,
116                                      int renderTargetSampleCnt,
117                                      SkBudgeted,
118                                      GrProtected,
119                                      int mipLevelCount,
120                                      uint32_t levelClearMask) override;
121 
122     sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions,
123                                                const GrBackendFormat&,
124                                                SkBudgeted,
125                                                GrMipmapped,
126                                                GrProtected,
127                                                const void* data, size_t dataSize) override;
128 
129     sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&,
130                                           GrWrapOwnership,
131                                           GrWrapCacheable,
132                                           GrIOType) override;
133     sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&,
134                                                     GrWrapOwnership,
135                                                     GrWrapCacheable) override;
136     sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&,
137                                                     int sampleCnt,
138                                                     GrWrapOwnership,
139                                                     GrWrapCacheable) override;
140     sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) override;
141 
142     GrBackendTexture onCreateBackendTexture(SkISize dimensions,
143                                             const GrBackendFormat&,
144                                             GrRenderable,
145                                             GrMipmapped,
146                                             GrProtected) override;
147 
148     bool onClearBackendTexture(const GrBackendTexture&,
149                                sk_sp<GrRefCntedCallback> finishedCallback,
150                                std::array<float, 4> color) override;
151 
152     GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions,
153                                                       const GrBackendFormat&,
154                                                       GrMipmapped,
155                                                       GrProtected) override;
156 
157     bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
158                                           sk_sp<GrRefCntedCallback> finishedCallback,
159                                           const void* data,
160                                           size_t size) override;
161 
162     sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern,
163                                       const void* data) override;
164 
165     bool onReadPixels(GrSurface*,
166                       SkIRect,
167                       GrColorType surfaceColorType,
168                       GrColorType dstColorType,
169                       void*,
170                       size_t rowBytes) override;
171 
172     bool onWritePixels(GrSurface*,
173                        SkIRect,
174                        GrColorType surfaceColorType,
175                        GrColorType srcColorType,
176                        const GrMipLevel[],
177                        int mipLevelCount,
178                        bool) override;
179 
180     bool onTransferPixelsTo(GrTexture*,
181                             SkIRect,
182                             GrColorType textureColorType,
183                             GrColorType bufferColorType,
184                             sk_sp<GrGpuBuffer>,
185                             size_t offset,
186                             size_t rowBytes) override;
187 
188     bool onTransferPixelsFrom(GrSurface*,
189                               SkIRect,
190                               GrColorType surfaceColorType,
191                               GrColorType bufferColorType,
192                               sk_sp<GrGpuBuffer>,
193                               size_t offset) override;
194 
onResolveRenderTarget(GrRenderTarget *,const SkIRect &)195     void onResolveRenderTarget(GrRenderTarget*, const SkIRect&) override {}
196 
197     bool onRegenerateMipMapLevels(GrTexture*) override;
198 
199     bool onCopySurface(GrSurface* dst, GrSurface* src,
200                        const SkIRect& srcRect, const SkIPoint& dstPoint) override;
201 
202     void addFinishedProc(GrGpuFinishedProc finishedProc,
203                          GrGpuFinishedContext finishedContext) override;
204 
205     GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*,
206                                         bool useMSAASurface,
207                                         GrAttachment*,
208                                         GrSurfaceOrigin,
209                                         const SkIRect&,
210                                         const GrOpsRenderPass::LoadAndStoreInfo&,
211                                         const GrOpsRenderPass::StencilLoadAndStoreInfo&,
212                                         const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
213                                         GrXferBarrierFlags renderPassXferBarriers) override;
214 
215     bool onSubmitToGpu(bool syncCpu) override;
216 
217     void uploadTextureData(GrColorType srcColorType, const GrMipLevel texels[], int mipLevelCount,
218                            const SkIRect& rect, wgpu::Texture texture);
219 
220     void moveStagingBuffersToBusyAndMapAsync();
221     void checkForCompletedStagingBuffers();
222 
223     wgpu::Device                                    fDevice;
224     wgpu::Queue                                     fQueue;
225     std::unique_ptr<GrDawnOpsRenderPass>            fOpsRenderPass;
226     GrDawnRingBuffer                                fUniformRingBuffer;
227     wgpu::CommandEncoder                            fCopyEncoder;
228     std::vector<wgpu::CommandBuffer>                fCommandBuffers;
229     GrStagingBufferManager                          fStagingBufferManager;
230     std::list<sk_sp<GrGpuBuffer>>                   fBusyStagingBuffers;
231     // Temporary array of staging buffers to hold refs on the staging buffers between detaching
232     // from the GrStagingManager and moving them to the busy list which must happen after
233     // submission.
234     std::vector<sk_sp<GrGpuBuffer>>                 fSubmittedStagingBuffers;
235 
236     struct ProgramDescHash {
operatorProgramDescHash237         uint32_t operator()(const GrProgramDesc& desc) const {
238             return SkOpts::hash_fn(desc.asKey(), desc.keyLength(), 0);
239         }
240     };
241 
242     struct SamplerHash {
operatorSamplerHash243         size_t operator()(GrSamplerState samplerState) const {
244             return SkOpts::hash_fn(&samplerState, sizeof(samplerState), 0);
245         }
246     };
247 
248     SkLRUCache<GrProgramDesc, sk_sp<GrDawnProgram>, ProgramDescHash>    fRenderPipelineCache;
249     std::unordered_map<GrSamplerState, wgpu::Sampler, SamplerHash> fSamplers;
250 
251     GrFinishCallbacks         fFinishCallbacks;
252 
253     using INHERITED = GrGpu;
254 };
255 
256 #endif
257