• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrDawnGpu_DEFINED
9 #define GrDawnGpu_DEFINED
10 
11 #include "src/gpu/GrGpu.h"
12 
13 #include "dawn/webgpu_cpp.h"
14 #include "src/core/SkLRUCache.h"
15 #include "src/gpu/GrFinishCallbacks.h"
16 #include "src/gpu/GrProgramDesc.h"
17 #include "src/gpu/GrStagingBufferManager.h"
18 #include "src/gpu/dawn/GrDawnRingBuffer.h"
19 #include "src/sksl/ir/SkSLProgram.h"
20 
21 #include <unordered_map>
22 
23 class GrDawnOpsRenderPass;
24 class GrDawnStagingBuffer;
25 class GrDirectContext;
26 class GrPipeline;
27 struct GrDawnProgram;
28 
29 class GrDawnGpu : public GrGpu {
30 public:
31     static sk_sp<GrGpu> Make(const wgpu::Device&, const GrContextOptions&, GrDirectContext*);
32 
33     ~GrDawnGpu() override;
34 
35     void disconnect(DisconnectType) override;
36 
37     GrThreadSafePipelineBuilder* pipelineBuilder() override;
38     sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() override;
39 
stagingBufferManager()40     GrStagingBufferManager* stagingBufferManager() override { return &fStagingBufferManager; }
41     void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) override;
42 
device()43     const wgpu::Device& device() const { return fDevice; }
queue()44     const wgpu::Queue&  queue() const { return fQueue; }
45 
xferBarrier(GrRenderTarget *,GrXferBarrierType)46     void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
47 
48     void deleteBackendTexture(const GrBackendTexture&) override;
49 
50     bool compile(const GrProgramDesc&, const GrProgramInfo&) override;
51 
52 #if GR_TEST_UTILS
53     bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
54 
55     GrBackendRenderTarget createTestingOnlyBackendRenderTarget(SkISize dimensions,
56                                                                GrColorType,
57                                                                int sampleCnt,
58                                                                GrProtected) override;
59     void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override;
60 #endif
61 
62     sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
63                                               SkISize dimensions, int numStencilSamples) override;
64 
getPreferredStencilFormat(const GrBackendFormat &)65     GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) override {
66         return GrBackendFormat::MakeDawn(wgpu::TextureFormat::Depth24PlusStencil8);
67     }
68 
makeMSAAAttachment(SkISize dimensions,const GrBackendFormat & format,int numSamples,GrProtected isProtected,GrMemoryless isMemoryless)69     sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions,
70                                            const GrBackendFormat& format,
71                                            int numSamples,
72                                            GrProtected isProtected,
73                                            GrMemoryless isMemoryless) override {
74         return nullptr;
75     }
76 
77     void submit(GrOpsRenderPass*) override;
78 
79     GrFence SK_WARN_UNUSED_RESULT insertFence() override;
80     bool waitFence(GrFence) override;
81     void deleteFence(GrFence) const override;
82 
83     std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned = true) override;
84     std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&,
85                                                       GrSemaphoreWrapType,
86                                                       GrWrapOwnership) override;
87     void insertSemaphore(GrSemaphore* semaphore) override;
88     void waitSemaphore(GrSemaphore* semaphore) override;
89     void checkFinishProcs() override;
90     void finishOutstandingGpuWork() override;
91 
92     std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
93 
94     sk_sp<GrDawnProgram> getOrCreateRenderPipeline(GrRenderTarget*, const GrProgramInfo&);
95 
96     wgpu::Sampler getOrCreateSampler(GrSamplerState samplerState);
97 
98     GrDawnRingBuffer::Slice allocateUniformRingBufferSlice(int size);
99     wgpu::CommandEncoder getCopyEncoder();
100     void flushCopyEncoder();
101     void appendCommandBuffer(wgpu::CommandBuffer commandBuffer);
102 
103     void waitOnAllBusyStagingBuffers();
104     SkSL::String SkSLToSPIRV(const char* shaderString,
105                              SkSL::ProgramKind,
106                              uint32_t rtFlipOffset,
107                              SkSL::Program::Inputs*);
108     wgpu::ShaderModule createShaderModule(const SkSL::String& spirvSource);
109 
110 private:
111     GrDawnGpu(GrDirectContext*, const GrContextOptions&, const wgpu::Device&);
112 
113     sk_sp<GrTexture> onCreateTexture(SkISize,
114                                      const GrBackendFormat&,
115                                      GrRenderable,
116                                      int renderTargetSampleCnt,
117                                      SkBudgeted,
118                                      GrProtected,
119                                      int mipLevelCount,
120                                      uint32_t levelClearMask) override;
121 
122     sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions,
123                                                const GrBackendFormat&,
124                                                SkBudgeted,
125                                                GrMipmapped,
126                                                GrProtected,
127                                                const void* data, size_t dataSize) override;
128 
129     sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions,
130                                                const GrBackendFormat&,
131                                                SkBudgeted,
132                                                GrMipmapped,
133                                                GrProtected,
134                                                OH_NativeBuffer* nativeBuffer,
135                                                size_t bufferSize) override;
136 
137     sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&,
138                                           GrWrapOwnership,
139                                           GrWrapCacheable,
140                                           GrIOType) override;
141     sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&,
142                                                     GrWrapOwnership,
143                                                     GrWrapCacheable) override;
144     sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&,
145                                                     int sampleCnt,
146                                                     GrWrapOwnership,
147                                                     GrWrapCacheable) override;
148     sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) override;
149 
150     GrBackendTexture onCreateBackendTexture(SkISize dimensions,
151                                             const GrBackendFormat&,
152                                             GrRenderable,
153                                             GrMipmapped,
154                                             GrProtected) override;
155 
156     bool onClearBackendTexture(const GrBackendTexture&,
157                                sk_sp<GrRefCntedCallback> finishedCallback,
158                                std::array<float, 4> color) override;
159 
160     GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions,
161                                                       const GrBackendFormat&,
162                                                       GrMipmapped,
163                                                       GrProtected) override;
164 
165     bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
166                                           sk_sp<GrRefCntedCallback> finishedCallback,
167                                           const void* data,
168                                           size_t size) override;
169 
170     sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern,
171                                       const void* data) override;
172 
173     bool onReadPixels(GrSurface*,
174                       SkIRect,
175                       GrColorType surfaceColorType,
176                       GrColorType dstColorType,
177                       void*,
178                       size_t rowBytes) override;
179 
180     bool onWritePixels(GrSurface*,
181                        SkIRect,
182                        GrColorType surfaceColorType,
183                        GrColorType srcColorType,
184                        const GrMipLevel[],
185                        int mipLevelCount,
186                        bool) override;
187 
188     bool onTransferPixelsTo(GrTexture*,
189                             SkIRect,
190                             GrColorType textureColorType,
191                             GrColorType bufferColorType,
192                             sk_sp<GrGpuBuffer>,
193                             size_t offset,
194                             size_t rowBytes) override;
195 
196     bool onTransferPixelsFrom(GrSurface*,
197                               SkIRect,
198                               GrColorType surfaceColorType,
199                               GrColorType bufferColorType,
200                               sk_sp<GrGpuBuffer>,
201                               size_t offset) override;
202 
onResolveRenderTarget(GrRenderTarget *,const SkIRect &)203     void onResolveRenderTarget(GrRenderTarget*, const SkIRect&) override {}
204 
205     bool onRegenerateMipMapLevels(GrTexture*) override;
206 
207     bool onCopySurface(GrSurface* dst, GrSurface* src,
208                        const SkIRect& srcRect, const SkIPoint& dstPoint) override;
209 
210     void addFinishedProc(GrGpuFinishedProc finishedProc,
211                          GrGpuFinishedContext finishedContext) override;
212 
213     GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*,
214                                         bool useMSAASurface,
215                                         GrAttachment*,
216                                         GrSurfaceOrigin,
217                                         const SkIRect&,
218                                         const GrOpsRenderPass::LoadAndStoreInfo&,
219                                         const GrOpsRenderPass::StencilLoadAndStoreInfo&,
220                                         const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
221                                         GrXferBarrierFlags renderPassXferBarriers) override;
222 
223     bool onSubmitToGpu(bool syncCpu) override;
224 
225     void uploadTextureData(GrColorType srcColorType, const GrMipLevel texels[], int mipLevelCount,
226                            const SkIRect& rect, wgpu::Texture texture);
227 
228     void moveStagingBuffersToBusyAndMapAsync();
229     void checkForCompletedStagingBuffers();
230 
231     wgpu::Device                                    fDevice;
232     wgpu::Queue                                     fQueue;
233     std::unique_ptr<GrDawnOpsRenderPass>            fOpsRenderPass;
234     GrDawnRingBuffer                                fUniformRingBuffer;
235     wgpu::CommandEncoder                            fCopyEncoder;
236     std::vector<wgpu::CommandBuffer>                fCommandBuffers;
237     GrStagingBufferManager                          fStagingBufferManager;
238     std::list<sk_sp<GrGpuBuffer>>                   fBusyStagingBuffers;
239     // Temporary array of staging buffers to hold refs on the staging buffers between detaching
240     // from the GrStagingManager and moving them to the busy list which must happen after
241     // submission.
242     std::vector<sk_sp<GrGpuBuffer>>                 fSubmittedStagingBuffers;
243 
244     struct ProgramDescHash {
operatorProgramDescHash245         uint32_t operator()(const GrProgramDesc& desc) const {
246             return SkOpts::hash_fn(desc.asKey(), desc.keyLength(), 0);
247         }
248     };
249 
250     struct SamplerHash {
operatorSamplerHash251         size_t operator()(GrSamplerState samplerState) const {
252             return SkOpts::hash_fn(&samplerState, sizeof(samplerState), 0);
253         }
254     };
255 
256     SkLRUCache<GrProgramDesc, sk_sp<GrDawnProgram>, ProgramDescHash>    fRenderPipelineCache;
257     std::unordered_map<GrSamplerState, wgpu::Sampler, SamplerHash> fSamplers;
258 
259     GrFinishCallbacks         fFinishCallbacks;
260 
261     using INHERITED = GrGpu;
262 };
263 
264 #endif
265