1/* 2 * Copyright 2018 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8#include "src/gpu/mtl/GrMtlResourceProvider.h" 9 10#include "include/gpu/GrContextOptions.h" 11#include "include/gpu/GrDirectContext.h" 12#include "src/core/SkTraceEvent.h" 13#include "src/gpu/GrDirectContextPriv.h" 14#include "src/gpu/GrProgramDesc.h" 15#include "src/gpu/mtl/GrMtlCommandBuffer.h" 16#include "src/gpu/mtl/GrMtlGpu.h" 17#include "src/gpu/mtl/GrMtlPipelineState.h" 18#include "src/gpu/mtl/GrMtlUtil.h" 19 20#include "src/sksl/SkSLCompiler.h" 21 22#if !__has_feature(objc_arc) 23#error This file must be compiled with Arc. Use -fobjc-arc flag 24#endif 25 26GR_NORETAIN_BEGIN 27 28GrMtlResourceProvider::GrMtlResourceProvider(GrMtlGpu* gpu) 29 : fGpu(gpu) { 30 fPipelineStateCache.reset(new PipelineStateCache(gpu)); 31} 32 33GrMtlPipelineState* GrMtlResourceProvider::findOrCreateCompatiblePipelineState( 34 const GrProgramDesc& programDesc, 35 const GrProgramInfo& programInfo, 36 GrThreadSafePipelineBuilder::Stats::ProgramCacheResult* stat) { 37 return fPipelineStateCache->refPipelineState(programDesc, programInfo, stat); 38} 39 40bool GrMtlResourceProvider::precompileShader(const SkData& key, const SkData& data) { 41 return fPipelineStateCache->precompileShader(key, data); 42} 43 44//////////////////////////////////////////////////////////////////////////////////////////////// 45 46GrMtlDepthStencil* GrMtlResourceProvider::findOrCreateCompatibleDepthStencilState( 47 const GrStencilSettings& stencil, GrSurfaceOrigin origin) { 48 GrMtlDepthStencil* depthStencilState; 49 GrMtlDepthStencil::Key key = GrMtlDepthStencil::GenerateKey(stencil, origin); 50 depthStencilState = fDepthStencilStates.find(key); 51 if (!depthStencilState) { 52 depthStencilState = GrMtlDepthStencil::Create(fGpu, stencil, origin); 53 fDepthStencilStates.add(depthStencilState); 54 } 55 SkASSERT(depthStencilState); 56 return depthStencilState; 57} 58 59GrMtlSampler* GrMtlResourceProvider::findOrCreateCompatibleSampler(GrSamplerState params) { 60 GrMtlSampler* sampler; 61 sampler = fSamplers.find(GrMtlSampler::GenerateKey(params)); 62 if (!sampler) { 63 sampler = GrMtlSampler::Create(fGpu, params); 64 fSamplers.add(sampler); 65 } 66 SkASSERT(sampler); 67 return sampler; 68} 69 70const GrMtlRenderPipeline* GrMtlResourceProvider::findOrCreateMSAALoadPipeline( 71 MTLPixelFormat colorFormat, int sampleCount, MTLPixelFormat stencilFormat) { 72 if (!fMSAALoadLibrary) { 73 TRACE_EVENT0("skia", TRACE_FUNC); 74 75 SkSL::String shaderText; 76 shaderText.append( 77 "#include <metal_stdlib>\n" 78 "#include <simd/simd.h>\n" 79 "using namespace metal;\n" 80 "\n" 81 "typedef struct {\n" 82 " float4 position [[position]];\n" 83 "} VertexOutput;\n" 84 "\n" 85 "typedef struct {\n" 86 " float4 uPosXform;\n" 87 " uint2 uTextureSize;\n" 88 "} VertexUniforms;\n" 89 "\n" 90 "vertex VertexOutput vertexMain(constant VertexUniforms& uniforms [[buffer(0)]],\n" 91 " uint vertexID [[vertex_id]]) {\n" 92 " VertexOutput out;\n" 93 " float2 position = float2(float(vertexID >> 1), float(vertexID & 1));\n" 94 " out.position.xy = position * uniforms.uPosXform.xy + uniforms.uPosXform.zw;\n" 95 " out.position.zw = float2(0.0, 1.0);\n" 96 " return out;\n" 97 "}\n" 98 "\n" 99 "fragment float4 fragmentMain(VertexOutput in [[stage_in]],\n" 100 " texture2d<half> colorMap [[texture(0)]]) {\n" 101 " uint2 coords = uint2(in.position.x, in.position.y);" 102 " half4 colorSample = colorMap.read(coords);\n" 103 " return float4(colorSample);\n" 104 "}" 105 ); 106 107 auto errorHandler = fGpu->getContext()->priv().getShaderErrorHandler(); 108 fMSAALoadLibrary = GrCompileMtlShaderLibrary(fGpu, shaderText, errorHandler); 109 if (!fMSAALoadLibrary) { 110 return nullptr; 111 } 112 } 113 114 for (int i = 0; i < fMSAALoadPipelines.count(); ++i) { 115 if (fMSAALoadPipelines[i].fColorFormat == colorFormat && 116 fMSAALoadPipelines[i].fSampleCount == sampleCount && 117 fMSAALoadPipelines[i].fStencilFormat == stencilFormat) { 118 return fMSAALoadPipelines[i].fPipeline.get(); 119 } 120 } 121 122 auto pipelineDescriptor = [[MTLRenderPipelineDescriptor alloc] init]; 123 124 pipelineDescriptor.label = @"loadMSAAFromResolve"; 125 126 pipelineDescriptor.vertexFunction = 127 [fMSAALoadLibrary newFunctionWithName: @"vertexMain"]; 128 pipelineDescriptor.fragmentFunction = 129 [fMSAALoadLibrary newFunctionWithName: @"fragmentMain"]; 130 131 auto mtlColorAttachment = [[MTLRenderPipelineColorAttachmentDescriptor alloc] init]; 132 133 mtlColorAttachment.pixelFormat = colorFormat; 134 mtlColorAttachment.blendingEnabled = FALSE; 135 mtlColorAttachment.writeMask = MTLColorWriteMaskAll; 136 137 pipelineDescriptor.colorAttachments[0] = mtlColorAttachment; 138 pipelineDescriptor.sampleCount = sampleCount; 139 140 pipelineDescriptor.stencilAttachmentPixelFormat = stencilFormat; 141 142 NSError* error; 143 auto pso = 144 [fGpu->device() newRenderPipelineStateWithDescriptor: pipelineDescriptor 145 error: &error]; 146 if (!pso) { 147 SkDebugf("Error creating pipeline: %s\n", 148 [[error localizedDescription] cStringUsingEncoding: NSASCIIStringEncoding]); 149 } 150 151 auto renderPipeline = GrMtlRenderPipeline::Make(pso); 152 153 fMSAALoadPipelines.push_back({renderPipeline, colorFormat, sampleCount, stencilFormat}); 154 return fMSAALoadPipelines[fMSAALoadPipelines.count()-1].fPipeline.get(); 155} 156 157void GrMtlResourceProvider::destroyResources() { 158 fMSAALoadLibrary = nil; 159 fMSAALoadPipelines.reset(); 160 161 fSamplers.foreach([&](GrMtlSampler* sampler) { sampler->unref(); }); 162 fSamplers.reset(); 163 164 fDepthStencilStates.foreach([&](GrMtlDepthStencil* stencil) { stencil->unref(); }); 165 fDepthStencilStates.reset(); 166 167 fPipelineStateCache->release(); 168} 169 170//////////////////////////////////////////////////////////////////////////////////////////////// 171 172struct GrMtlResourceProvider::PipelineStateCache::Entry { 173 Entry(GrMtlPipelineState* pipelineState) 174 : fPipelineState(pipelineState) {} 175 Entry(const GrMtlPrecompiledLibraries& precompiledLibraries) 176 : fPipelineState(nullptr) 177 , fPrecompiledLibraries(precompiledLibraries) {} 178 179 std::unique_ptr<GrMtlPipelineState> fPipelineState; 180 181 // TODO: change to one library once we can build that 182 GrMtlPrecompiledLibraries fPrecompiledLibraries; 183}; 184 185GrMtlResourceProvider::PipelineStateCache::PipelineStateCache(GrMtlGpu* gpu) 186 : fMap(gpu->getContext()->priv().options().fRuntimeProgramCacheSize) 187 , fGpu(gpu) {} 188 189GrMtlResourceProvider::PipelineStateCache::~PipelineStateCache() { 190 SkASSERT(0 == fMap.count()); 191} 192 193void GrMtlResourceProvider::PipelineStateCache::release() { 194 fMap.reset(); 195} 196 197GrMtlPipelineState* GrMtlResourceProvider::PipelineStateCache::refPipelineState( 198 const GrProgramDesc& desc, 199 const GrProgramInfo& programInfo, 200 Stats::ProgramCacheResult* statPtr) { 201 202 if (!statPtr) { 203 // If stat is NULL we are using inline compilation rather than through DDL, 204 // so we need to track those stats as well. 205 GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat; 206 auto tmp = this->onRefPipelineState(desc, programInfo, &stat); 207 if (!tmp) { 208 fStats.incNumInlineCompilationFailures(); 209 } else { 210 fStats.incNumInlineProgramCacheResult(stat); 211 } 212 return tmp; 213 } else { 214 return this->onRefPipelineState(desc, programInfo, statPtr); 215 } 216} 217 218GrMtlPipelineState* GrMtlResourceProvider::PipelineStateCache::onRefPipelineState( 219 const GrProgramDesc& desc, 220 const GrProgramInfo& programInfo, 221 Stats::ProgramCacheResult* stat) { 222 *stat = Stats::ProgramCacheResult::kHit; 223 std::unique_ptr<Entry>* entry = fMap.find(desc); 224 if (entry && !(*entry)->fPipelineState) { 225 // We've pre-compiled the MSL shaders but don't yet have the pipelineState 226 const GrMtlPrecompiledLibraries* precompiledLibs = &((*entry)->fPrecompiledLibraries); 227 SkASSERT(precompiledLibs->fVertexLibrary); 228 SkASSERT(precompiledLibs->fFragmentLibrary); 229 (*entry)->fPipelineState.reset( 230 GrMtlPipelineStateBuilder::CreatePipelineState(fGpu, desc, programInfo, 231 precompiledLibs)); 232 if (!(*entry)->fPipelineState) { 233 // Should we purge the precompiled shaders from the cache at this point? 234 SkDEBUGFAIL("Couldn't create pipelineState from precompiled shaders"); 235 fStats.incNumCompilationFailures(); 236 return nullptr; 237 } 238 // release the libraries 239 (*entry)->fPrecompiledLibraries.fVertexLibrary = nil; 240 (*entry)->fPrecompiledLibraries.fFragmentLibrary = nil; 241 242 fStats.incNumPartialCompilationSuccesses(); 243 *stat = Stats::ProgramCacheResult::kPartial; 244 } else if (!entry) { 245 GrMtlPipelineState* pipelineState( 246 GrMtlPipelineStateBuilder::CreatePipelineState(fGpu, desc, programInfo)); 247 if (!pipelineState) { 248 fStats.incNumCompilationFailures(); 249 return nullptr; 250 } 251 fStats.incNumCompilationSuccesses(); 252 entry = fMap.insert(desc, std::unique_ptr<Entry>(new Entry(pipelineState))); 253 *stat = Stats::ProgramCacheResult::kMiss; 254 return (*entry)->fPipelineState.get(); 255 } 256 return (*entry)->fPipelineState.get(); 257} 258 259bool GrMtlResourceProvider::PipelineStateCache::precompileShader(const SkData& key, 260 const SkData& data) { 261 GrProgramDesc desc; 262 if (!GrProgramDesc::BuildFromData(&desc, key.data(), key.size())) { 263 return false; 264 } 265 266 std::unique_ptr<Entry>* entry = fMap.find(desc); 267 if (entry) { 268 // We've already seen/compiled this shader 269 return true; 270 } 271 272 GrMtlPrecompiledLibraries precompiledLibraries; 273 if (!GrMtlPipelineStateBuilder::PrecompileShaders(fGpu, data, &precompiledLibraries)) { 274 return false; 275 } 276 277 fMap.insert(desc, std::make_unique<Entry>(precompiledLibraries)); 278 return true; 279 280} 281 282GR_NORETAIN_END 283