1 /* 2 * Copyright 2018 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrMtlResourceProvider_DEFINED 9 #define GrMtlResourceProvider_DEFINED 10 11 #include "include/private/SkSpinlock.h" 12 #include "include/private/SkTArray.h" 13 #include "src/core/SkLRUCache.h" 14 #include "src/gpu/mtl/GrMtlDepthStencil.h" 15 #include "src/gpu/mtl/GrMtlPipelineStateBuilder.h" 16 #include "src/gpu/mtl/GrMtlSampler.h" 17 18 #import <Metal/Metal.h> 19 20 class GrMtlGpu; 21 class GrMtlCommandBuffer; 22 23 class GrMtlResourceProvider { 24 public: 25 GrMtlResourceProvider(GrMtlGpu* gpu); 26 27 GrMtlPipelineState* findOrCreateCompatiblePipelineState( 28 GrRenderTarget*, GrSurfaceOrigin, 29 const GrPipeline&, 30 const GrPrimitiveProcessor&, 31 const GrTextureProxy* const primProcProxies[], 32 GrPrimitiveType); 33 34 // Finds or creates a compatible MTLDepthStencilState based on the GrStencilSettings. 35 GrMtlDepthStencil* findOrCreateCompatibleDepthStencilState(const GrStencilSettings&, 36 GrSurfaceOrigin); 37 38 // Finds or creates a compatible MTLSamplerState based on the GrSamplerState. 39 GrMtlSampler* findOrCreateCompatibleSampler(const GrSamplerState&, uint32_t maxMipLevel); 40 41 id<MTLBuffer> getDynamicBuffer(size_t size, size_t* offset); 42 void addBufferCompletionHandler(GrMtlCommandBuffer* cmdBuffer); 43 44 // Destroy any cached resources. To be called before releasing the MtlDevice. 45 void destroyResources(); 46 47 private: 48 #ifdef SK_DEBUG 49 #define GR_PIPELINE_STATE_CACHE_STATS 50 #endif 51 52 class PipelineStateCache : public ::SkNoncopyable { 53 public: 54 PipelineStateCache(GrMtlGpu* gpu); 55 ~PipelineStateCache(); 56 57 void release(); 58 GrMtlPipelineState* refPipelineState(GrRenderTarget*, GrSurfaceOrigin, 59 const GrPrimitiveProcessor&, 60 const GrTextureProxy* const primProcProxies[], 61 const GrPipeline&, 62 GrPrimitiveType); 63 64 private: 65 enum { 66 // We may actually have kMaxEntries+1 PipelineStates in context because we create a new 67 // PipelineState before evicting from the cache. 68 kMaxEntries = 128, 69 }; 70 71 struct Entry; 72 73 struct DescHash { operatorDescHash74 uint32_t operator()(const GrProgramDesc& desc) const { 75 return SkOpts::hash_fn(desc.asKey(), desc.keyLength(), 0); 76 } 77 }; 78 79 SkLRUCache<const GrMtlPipelineStateBuilder::Desc, std::unique_ptr<Entry>, DescHash> fMap; 80 81 GrMtlGpu* fGpu; 82 83 #ifdef GR_PIPELINE_STATE_CACHE_STATS 84 int fTotalRequests; 85 int fCacheMisses; 86 #endif 87 }; 88 89 // Buffer allocator 90 class BufferSuballocator : public SkRefCnt { 91 public: 92 BufferSuballocator(id<MTLDevice> device, size_t size); ~BufferSuballocator()93 ~BufferSuballocator() { 94 fBuffer = nil; 95 fTotalSize = 0; 96 } 97 98 id<MTLBuffer> getAllocation(size_t size, size_t* offset); 99 void addCompletionHandler(GrMtlCommandBuffer* cmdBuffer); size()100 size_t size() { return fTotalSize; } 101 102 private: 103 id<MTLBuffer> fBuffer; 104 size_t fTotalSize; 105 size_t fHead SK_GUARDED_BY(fMutex); // where we start allocating 106 size_t fTail SK_GUARDED_BY(fMutex); // where we start deallocating 107 SkSpinlock fMutex; 108 }; 109 static constexpr size_t kBufferSuballocatorStartSize = 1024*1024; 110 111 GrMtlGpu* fGpu; 112 113 // Cache of GrMtlPipelineStates 114 std::unique_ptr<PipelineStateCache> fPipelineStateCache; 115 116 SkTDynamicHash<GrMtlSampler, GrMtlSampler::Key> fSamplers; 117 SkTDynamicHash<GrMtlDepthStencil, GrMtlDepthStencil::Key> fDepthStencilStates; 118 119 // This is ref-counted because we might delete the GrContext before the command buffer 120 // finishes. The completion handler will retain a reference to this so it won't get 121 // deleted along with the GrContext. 122 sk_sp<BufferSuballocator> fBufferSuballocator; 123 size_t fBufferSuballocatorMaxSize; 124 }; 125 126 #endif 127