1 /* 2 * Copyright 2017 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrMtlGpu_DEFINED 9 #define GrMtlGpu_DEFINED 10 11 #include "include/gpu/mtl/GrMtlBackendContext.h" 12 #include "include/private/base/SkDeque.h" 13 #include "include/private/gpu/ganesh/GrMtlTypesPriv.h" 14 15 #include "src/gpu/ganesh/GrFinishCallbacks.h" 16 #include "src/gpu/ganesh/GrGpu.h" 17 #include "src/gpu/ganesh/GrRenderTarget.h" 18 #include "src/gpu/ganesh/GrRingBuffer.h" 19 #include "src/gpu/ganesh/GrSemaphore.h" 20 #include "src/gpu/ganesh/GrStagingBufferManager.h" 21 #include "src/gpu/ganesh/GrTexture.h" 22 23 #include "src/gpu/ganesh/mtl/GrMtlAttachment.h" 24 #include "src/gpu/ganesh/mtl/GrMtlCaps.h" 25 #include "src/gpu/ganesh/mtl/GrMtlCommandBuffer.h" 26 #include "src/gpu/ganesh/mtl/GrMtlResourceProvider.h" 27 #include "src/gpu/ganesh/mtl/GrMtlUtil.h" 28 29 #import <Metal/Metal.h> 30 31 class GrMtlOpsRenderPass; 32 class GrMtlTexture; 33 class GrSemaphore; 34 class GrMtlCommandBuffer; 35 36 class GrMtlGpu : public GrGpu { 37 public: 38 static sk_sp<GrGpu> Make(const GrMtlBackendContext&, const GrContextOptions&, GrDirectContext*); 39 ~GrMtlGpu() override; 40 41 void disconnect(DisconnectType) override; 42 43 GrThreadSafePipelineBuilder* pipelineBuilder() override; 44 sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() override; 45 mtlCaps()46 const GrMtlCaps& mtlCaps() const { return *fMtlCaps.get(); } 47 device()48 id<MTLDevice> device() const { return fDevice; } 49 resourceProvider()50 GrMtlResourceProvider& resourceProvider() { return fResourceProvider; } 51 stagingBufferManager()52 GrStagingBufferManager* stagingBufferManager() override { return &fStagingBufferManager; } 53 54 GrMtlCommandBuffer* commandBuffer(); 55 56 enum SyncQueue { 57 kForce_SyncQueue, 58 kSkip_SyncQueue 59 }; 60 61 void deleteBackendTexture(const GrBackendTexture&) override; 62 63 bool compile(const GrProgramDesc&, const GrProgramInfo&) override; 64 65 bool precompileShader(const SkData& key, const SkData& data) override; 66 67 #if GR_TEST_UTILS 68 bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override; 69 70 GrBackendRenderTarget createTestingOnlyBackendRenderTarget(SkISize dimensions, 71 GrColorType, 72 int sampleCnt, 73 GrProtected) override; 74 void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override; 75 resetShaderCacheForTesting()76 void resetShaderCacheForTesting() const override { 77 fResourceProvider.resetShaderCacheForTesting(); 78 } 79 #endif 80 81 void copySurfaceAsResolve(GrSurface* dst, GrSurface* src); 82 83 void copySurfaceAsBlit(GrSurface* dst, GrSurface* src, 84 GrMtlAttachment* dstAttachment, GrMtlAttachment* srcAttachment, 85 const SkIRect& srcRect, const SkIPoint& dstPoint); 86 87 bool onCopySurface(GrSurface* dst, const SkIRect& dstRect, 88 GrSurface* src, const SkIRect& srcRect, 89 GrSamplerState::Filter) override; 90 91 #if GR_METAL_SDK_VERSION >= 230 binaryArchive()92 id<MTLBinaryArchive> binaryArchive() const SK_API_AVAILABLE(macos(11.0), ios(14.0)) { 93 return fBinaryArchive; 94 } 95 #endif 96 97 void submit(GrOpsRenderPass* renderPass) override; 98 99 GrFence SK_WARN_UNUSED_RESULT insertFence() override; 100 bool waitFence(GrFence) override; 101 void deleteFence(GrFence) override; 102 103 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override; 104 std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&, 105 GrSemaphoreWrapType, 106 GrWrapOwnership) override; 107 void insertSemaphore(GrSemaphore* semaphore) override; 108 void waitSemaphore(GrSemaphore* semaphore) override; checkFinishProcs()109 void checkFinishProcs() override { this->checkForFinishedCommandBuffers(); } 110 void finishOutstandingGpuWork() override; 111 std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override; 112 113 GrMtlRenderCommandEncoder* loadMSAAFromResolve(GrAttachment* dst, 114 GrMtlAttachment* src, 115 const SkIRect& srcRect, 116 MTLRenderPassStencilAttachmentDescriptor*); 117 118 // When the Metal backend actually uses indirect command buffers, this function will actually do 119 // what it says. For now, every command is encoded directly into the primary command buffer, so 120 // this function is pretty useless, except for indicating that a render target has been drawn 121 // to. submitIndirectCommandBuffer(GrSurface * surface,GrSurfaceOrigin origin,const SkIRect * bounds)122 void submitIndirectCommandBuffer(GrSurface* surface, GrSurfaceOrigin origin, 123 const SkIRect* bounds) { 124 this->didWriteToSurface(surface, origin, bounds); 125 } 126 uniformsRingBuffer()127 GrRingBuffer* uniformsRingBuffer() override { return &fUniformsRingBuffer; } 128 129 private: 130 GrMtlGpu(GrDirectContext*, const GrContextOptions&, id<MTLDevice>, 131 id<MTLCommandQueue>, GrMTLHandle binaryArchive); 132 133 void destroyResources(); 134 xferBarrier(GrRenderTarget *,GrXferBarrierType)135 void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {} 136 137 void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) override; 138 139 GrBackendTexture onCreateBackendTexture(SkISize dimensions, 140 const GrBackendFormat&, 141 GrRenderable, 142 GrMipmapped, 143 GrProtected, 144 std::string_view label) override; 145 146 bool onClearBackendTexture(const GrBackendTexture&, 147 sk_sp<skgpu::RefCntedCallback> finishedCallback, 148 std::array<float, 4> color) override; 149 150 GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions, 151 const GrBackendFormat&, 152 GrMipmapped, 153 GrProtected) override; 154 155 bool onUpdateCompressedBackendTexture(const GrBackendTexture&, 156 sk_sp<skgpu::RefCntedCallback> finishedCallback, 157 const void* data, 158 size_t size) override; 159 160 sk_sp<GrTexture> onCreateTexture(SkISize, 161 const GrBackendFormat&, 162 GrRenderable, 163 int renderTargetSampleCnt, 164 skgpu::Budgeted, 165 GrProtected, 166 int mipLevelCount, 167 uint32_t levelClearMask, 168 std::string_view label) override; 169 sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions, 170 const GrBackendFormat&, 171 skgpu::Budgeted, 172 GrMipmapped, 173 GrProtected, 174 const void* data, 175 size_t dataSize) override; 176 177 sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, 178 GrWrapOwnership, 179 GrWrapCacheable, 180 GrIOType) override; 181 182 sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&, 183 GrWrapOwnership, 184 GrWrapCacheable) override; 185 186 sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&, 187 int sampleCnt, 188 GrWrapOwnership, 189 GrWrapCacheable) override; 190 191 sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) override; 192 193 sk_sp<GrGpuBuffer> onCreateBuffer(size_t, GrGpuBufferType, GrAccessPattern) override; 194 195 bool onReadPixels(GrSurface* surface, 196 SkIRect, 197 GrColorType surfaceColorType, 198 GrColorType bufferColorType, 199 void*, 200 size_t rowBytes) override; 201 202 bool onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src, 203 size_t srcOffset, 204 sk_sp<GrGpuBuffer> dst, 205 size_t dstOffset, 206 size_t size) override; 207 208 bool onWritePixels(GrSurface*, 209 SkIRect, 210 GrColorType surfaceColorType, 211 GrColorType bufferColorType, 212 const GrMipLevel[], 213 int mipLevelCount, 214 bool prepForTexSampling) override; 215 216 bool onTransferPixelsTo(GrTexture*, 217 SkIRect, 218 GrColorType textureColorType, 219 GrColorType bufferColorType, 220 sk_sp<GrGpuBuffer>, 221 size_t offset, 222 size_t rowBytes) override; 223 224 bool onTransferPixelsFrom(GrSurface*, 225 SkIRect, 226 GrColorType surfaceColorType, 227 GrColorType bufferColorType, 228 sk_sp<GrGpuBuffer>, 229 size_t offset) override; 230 231 bool onRegenerateMipMapLevels(GrTexture*) override; 232 233 void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) override; 234 235 void resolve(GrMtlAttachment* resolveAttachment, GrMtlAttachment* msaaAttachment); 236 237 void addFinishedProc(GrGpuFinishedProc finishedProc, 238 GrGpuFinishedContext finishedContext) override; 239 void addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback); 240 241 GrOpsRenderPass* onGetOpsRenderPass(GrRenderTarget*, 242 bool useMSAASurface, 243 GrAttachment*, 244 GrSurfaceOrigin, 245 const SkIRect&, 246 const GrOpsRenderPass::LoadAndStoreInfo&, 247 const GrOpsRenderPass::StencilLoadAndStoreInfo&, 248 const SkTArray<GrSurfaceProxy*, true>& sampledProxies, 249 GrXferBarrierFlags renderPassXferBarriers) override; 250 251 bool onSubmitToGpu(bool syncCpu) override; 252 253 // Commits the current command buffer to the queue and then creates a new command buffer. If 254 // sync is set to kForce_SyncQueue, the function will wait for all work in the committed 255 // command buffer to finish before returning. 256 bool submitCommandBuffer(SyncQueue sync); 257 258 void checkForFinishedCommandBuffers(); 259 260 // Function that uploads data onto textures with private storage mode (GPU access only). 261 bool uploadToTexture(GrMtlTexture* tex, 262 SkIRect rect, 263 GrColorType dataColorType, 264 const GrMipLevel texels[], 265 int mipLevels); 266 267 // Function that fills texture levels with transparent black based on levelMask. 268 bool clearTexture(GrMtlTexture*, size_t bbp, uint32_t levelMask); 269 270 bool readOrTransferPixels(GrSurface* surface, 271 SkIRect rect, 272 GrColorType dstColorType, 273 id<MTLBuffer> transferBuffer, 274 size_t offset, 275 size_t imageBytes, 276 size_t rowBytes); 277 278 sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& /*colorFormat*/, 279 SkISize dimensions, int numStencilSamples) override; 280 getPreferredStencilFormat(const GrBackendFormat &)281 GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) override { 282 return GrBackendFormat::MakeMtl(this->mtlCaps().preferredStencilFormat()); 283 } 284 285 sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions, 286 const GrBackendFormat& format, 287 int numSamples, 288 GrProtected isProtected, 289 GrMemoryless isMemoryless) override; 290 291 bool createMtlTextureForBackendSurface(MTLPixelFormat, 292 SkISize dimensions, 293 int sampleCnt, 294 GrTexturable, 295 GrRenderable, 296 GrMipmapped, 297 GrMtlTextureInfo*); 298 299 #if GR_TEST_UTILS 300 void testingOnly_startCapture() override; 301 void testingOnly_stopCapture() override; 302 #endif 303 304 #ifdef SK_ENABLE_DUMP_GPU 305 void onDumpJSON(SkJSONWriter*) const override; 306 #endif 307 308 sk_sp<GrMtlCaps> fMtlCaps; 309 310 id<MTLDevice> fDevice; 311 id<MTLCommandQueue> fQueue; 312 313 sk_sp<GrMtlCommandBuffer> fCurrentCmdBuffer; 314 315 using OutstandingCommandBuffer = sk_sp<GrMtlCommandBuffer>; 316 SkDeque fOutstandingCommandBuffers; 317 318 #if GR_METAL_SDK_VERSION >= 230 319 id<MTLBinaryArchive> fBinaryArchive SK_API_AVAILABLE(macos(11.0), ios(14.0)); 320 #endif 321 322 GrMtlResourceProvider fResourceProvider; 323 GrStagingBufferManager fStagingBufferManager; 324 GrRingBuffer fUniformsRingBuffer; 325 326 bool fDisconnected; 327 328 using INHERITED = GrGpu; 329 }; 330 331 #endif 332