1/* 2 * Copyright 2021 Google LLC 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8#include "experimental/graphite/src/mtl/MtlGraphicsPipeline.h" 9 10#include "experimental/graphite/include/TextureInfo.h" 11#include "experimental/graphite/src/GraphicsPipelineDesc.h" 12#include "experimental/graphite/src/Log.h" 13#include "experimental/graphite/src/Renderer.h" 14#include "experimental/graphite/src/mtl/MtlGpu.h" 15#include "experimental/graphite/src/mtl/MtlResourceProvider.h" 16#include "experimental/graphite/src/mtl/MtlUtils.h" 17#include "include/core/SkSpan.h" 18#include "include/gpu/ShaderErrorHandler.h" 19#include "include/private/SkSLString.h" 20#include "src/core/SkShaderCodeDictionary.h" 21 22namespace skgpu::mtl { 23 24namespace { 25 26std::string get_uniform_header(int bufferID, const char* name) { 27 std::string result; 28 29 SkSL::String::appendf(&result, "layout (binding=%d) uniform %sUniforms {\n", bufferID, name); 30 31 return result; 32} 33 34std::string get_uniforms(SkSpan<const SkUniform> uniforms, int* offset, int manglingSuffix) { 35 std::string result; 36 37 for (auto u : uniforms) { 38 int count = u.count() ? u.count() : 1; 39 // TODO: this is sufficient for the sprint but should be changed to use SkSL's 40 // machinery 41 SkSL::String::appendf(&result, " layout(offset=%d) ", *offset); 42 switch (u.type()) { 43 case SkSLType::kFloat4: 44 result.append("float4"); 45 *offset += 16 * count; 46 break; 47 case SkSLType::kFloat2: 48 result.append("float2"); 49 *offset += 8 * count; 50 break; 51 case SkSLType::kFloat: 52 result.append("float"); 53 *offset += 4 * count; 54 break; 55 case SkSLType::kFloat4x4: 56 result.append("float4x4"); 57 *offset += 64 * count; 58 break; 59 case SkSLType::kHalf4: 60 result.append("half4"); 61 *offset += 8 * count; 62 break; 63 default: 64 SkASSERT(0); 65 } 66 67 result.append(" "); 68 result.append(u.name()); 69 if (manglingSuffix >= 0) { 70 result.append("_"); 71 result.append(std::to_string(manglingSuffix)); 72 } 73 if (u.count()) { 74 result.append("["); 75 result.append(std::to_string(u.count())); 76 result.append("]"); 77 } 78 result.append(";\n"); 79 } 80 81 return result; 82} 83 84std::string emit_SKSL_uniforms(int bufferID, const char* name, SkSpan<const SkUniform> uniforms) { 85 int offset = 0; 86 87 std::string result = get_uniform_header(bufferID, name); 88 result += get_uniforms(uniforms, &offset, -1); 89 result.append("};\n\n"); 90 91 return result; 92} 93 94std::string emit_SkSL_attributes(SkSpan<const Attribute> vertexAttrs, 95 SkSpan<const Attribute> instanceAttrs) { 96 std::string result; 97 98 int attr = 0; 99 auto add_attrs = [&](SkSpan<const Attribute> attrs) { 100 for (auto a : attrs) { 101 // TODO: this is sufficient for the sprint but should be changed to use SkSL's 102 // machinery 103 SkSL::String::appendf(&result, " layout(location=%d) in ", attr++); 104 switch (a.gpuType()) { 105 case SkSLType::kFloat4: 106 result.append("float4"); 107 break; 108 case SkSLType::kFloat2: 109 result.append("float2"); 110 break; 111 case SkSLType::kFloat3: 112 result.append("float3"); 113 break; 114 case SkSLType::kFloat: 115 result.append("float"); 116 break; 117 case SkSLType::kHalf4: 118 result.append("half4"); 119 break; 120 default: 121 SkASSERT(0); 122 } 123 124 SkSL::String::appendf(&result, " %s;\n", a.name()); 125 } 126 }; 127 128 if (!vertexAttrs.empty()) { 129 result.append("// vertex attrs\n"); 130 add_attrs(vertexAttrs); 131 } 132 if (!instanceAttrs.empty()) { 133 result.append("// instance attrs\n"); 134 add_attrs(instanceAttrs); 135 } 136 137 return result; 138} 139 140std::string get_sksl_vs(const GraphicsPipelineDesc& desc) { 141 const RenderStep* step = desc.renderStep(); 142 // TODO: To more completely support end-to-end rendering, this will need to be updated so that 143 // the RenderStep shader snippet can produce a device coord, a local coord, and depth. 144 // If the paint combination doesn't need the local coord it can be ignored, otherwise we need 145 // a varying for it. The fragment function's output will need to be updated to have a color and 146 // the depth, or when there's no combination, just the depth. Lastly, we also should add the 147 // static/intrinsic uniform binding point so that we can handle normalizing the device position 148 // produced by the RenderStep automatically. 149 150 // Fixed program header 151 std::string sksl = 152 "layout (binding=0) uniform intrinsicUniforms {\n" 153 " layout(offset=0) float4 rtAdjust;\n" 154 "};\n" 155 "\n"; 156 157 if (step->numVertexAttributes() > 0 || step->numInstanceAttributes() > 0) { 158 sksl += emit_SkSL_attributes(step->vertexAttributes(), step->instanceAttributes()); 159 } 160 161 // Uniforms needed by RenderStep 162 if (step->numUniforms() > 0) { 163 sksl += emit_SKSL_uniforms(1, "Step", step->uniforms()); 164 } 165 166 // Vertex shader function declaration 167 sksl += "void main() {\n"; 168 // Vertex shader body 169 sksl += step->vertexSkSL(); 170 sksl += "sk_Position = float4(devPosition.xy * rtAdjust.xy + rtAdjust.zw, devPosition.zw);\n" 171 "}\n"; 172 173 return sksl; 174} 175 176std::string get_sksl_fs(SkShaderCodeDictionary* dict, 177 const GraphicsPipelineDesc& desc, 178 bool* writesColor) { 179 if (!desc.paintParamsID().isValid()) { 180 *writesColor = false; 181 return {}; 182 } 183 184 SkShaderInfo shaderInfo; 185 186 dict->getShaderInfo(desc.paintParamsID(), &shaderInfo); 187 188 *writesColor = shaderInfo.writesColor(); 189#if SK_SUPPORT_GPU 190 return shaderInfo.toSkSL(); 191#else 192 return {}; 193#endif 194} 195 196inline MTLVertexFormat attribute_type_to_mtlformat(VertexAttribType type) { 197 switch (type) { 198 case VertexAttribType::kFloat: 199 return MTLVertexFormatFloat; 200 case VertexAttribType::kFloat2: 201 return MTLVertexFormatFloat2; 202 case VertexAttribType::kFloat3: 203 return MTLVertexFormatFloat3; 204 case VertexAttribType::kFloat4: 205 return MTLVertexFormatFloat4; 206 case VertexAttribType::kHalf: 207 if (@available(macOS 10.13, iOS 11.0, *)) { 208 return MTLVertexFormatHalf; 209 } else { 210 return MTLVertexFormatInvalid; 211 } 212 case VertexAttribType::kHalf2: 213 return MTLVertexFormatHalf2; 214 case VertexAttribType::kHalf4: 215 return MTLVertexFormatHalf4; 216 case VertexAttribType::kInt2: 217 return MTLVertexFormatInt2; 218 case VertexAttribType::kInt3: 219 return MTLVertexFormatInt3; 220 case VertexAttribType::kInt4: 221 return MTLVertexFormatInt4; 222 case VertexAttribType::kByte: 223 if (@available(macOS 10.13, iOS 11.0, *)) { 224 return MTLVertexFormatChar; 225 } else { 226 return MTLVertexFormatInvalid; 227 } 228 case VertexAttribType::kByte2: 229 return MTLVertexFormatChar2; 230 case VertexAttribType::kByte4: 231 return MTLVertexFormatChar4; 232 case VertexAttribType::kUByte: 233 if (@available(macOS 10.13, iOS 11.0, *)) { 234 return MTLVertexFormatUChar; 235 } else { 236 return MTLVertexFormatInvalid; 237 } 238 case VertexAttribType::kUByte2: 239 return MTLVertexFormatUChar2; 240 case VertexAttribType::kUByte4: 241 return MTLVertexFormatUChar4; 242 case VertexAttribType::kUByte_norm: 243 if (@available(macOS 10.13, iOS 11.0, *)) { 244 return MTLVertexFormatUCharNormalized; 245 } else { 246 return MTLVertexFormatInvalid; 247 } 248 case VertexAttribType::kUByte4_norm: 249 return MTLVertexFormatUChar4Normalized; 250 case VertexAttribType::kShort2: 251 return MTLVertexFormatShort2; 252 case VertexAttribType::kShort4: 253 return MTLVertexFormatShort4; 254 case VertexAttribType::kUShort2: 255 return MTLVertexFormatUShort2; 256 case VertexAttribType::kUShort2_norm: 257 return MTLVertexFormatUShort2Normalized; 258 case VertexAttribType::kInt: 259 return MTLVertexFormatInt; 260 case VertexAttribType::kUInt: 261 return MTLVertexFormatUInt; 262 case VertexAttribType::kUShort_norm: 263 if (@available(macOS 10.13, iOS 11.0, *)) { 264 return MTLVertexFormatUShortNormalized; 265 } else { 266 return MTLVertexFormatInvalid; 267 } 268 case VertexAttribType::kUShort4_norm: 269 return MTLVertexFormatUShort4Normalized; 270 } 271 SK_ABORT("Unknown vertex attribute type"); 272} 273 274MTLVertexDescriptor* create_vertex_descriptor(const RenderStep* step) { 275 auto vertexDescriptor = [[MTLVertexDescriptor alloc] init]; 276 int attributeIndex = 0; 277 278 int vertexAttributeCount = step->numVertexAttributes(); 279 size_t vertexAttributeOffset = 0; 280 for (const auto& attribute : step->vertexAttributes()) { 281 MTLVertexAttributeDescriptor* mtlAttribute = vertexDescriptor.attributes[attributeIndex]; 282 MTLVertexFormat format = attribute_type_to_mtlformat(attribute.cpuType()); 283 SkASSERT(MTLVertexFormatInvalid != format); 284 mtlAttribute.format = format; 285 mtlAttribute.offset = vertexAttributeOffset; 286 mtlAttribute.bufferIndex = GraphicsPipeline::kVertexBufferIndex; 287 288 vertexAttributeOffset += attribute.sizeAlign4(); 289 attributeIndex++; 290 } 291 SkASSERT(vertexAttributeOffset == step->vertexStride()); 292 293 if (vertexAttributeCount) { 294 MTLVertexBufferLayoutDescriptor* vertexBufferLayout = 295 vertexDescriptor.layouts[GraphicsPipeline::kVertexBufferIndex]; 296 vertexBufferLayout.stepFunction = MTLVertexStepFunctionPerVertex; 297 vertexBufferLayout.stepRate = 1; 298 vertexBufferLayout.stride = vertexAttributeOffset; 299 } 300 301 int instanceAttributeCount = step->numInstanceAttributes(); 302 size_t instanceAttributeOffset = 0; 303 for (const auto& attribute : step->instanceAttributes()) { 304 MTLVertexAttributeDescriptor* mtlAttribute = vertexDescriptor.attributes[attributeIndex]; 305 MTLVertexFormat format = attribute_type_to_mtlformat(attribute.cpuType()); 306 SkASSERT(MTLVertexFormatInvalid != format); 307 mtlAttribute.format = format; 308 mtlAttribute.offset = instanceAttributeOffset; 309 mtlAttribute.bufferIndex = GraphicsPipeline::kInstanceBufferIndex; 310 311 instanceAttributeOffset += attribute.sizeAlign4(); 312 attributeIndex++; 313 } 314 SkASSERT(instanceAttributeOffset == step->instanceStride()); 315 316 if (instanceAttributeCount) { 317 MTLVertexBufferLayoutDescriptor* instanceBufferLayout = 318 vertexDescriptor.layouts[GraphicsPipeline::kInstanceBufferIndex]; 319 instanceBufferLayout.stepFunction = MTLVertexStepFunctionPerInstance; 320 instanceBufferLayout.stepRate = 1; 321 instanceBufferLayout.stride = instanceAttributeOffset; 322 } 323 return vertexDescriptor; 324} 325 326} // anonymous namespace 327 328std::string GetMtlUniforms(int bufferID, 329 const char* name, 330 const std::vector<SkShaderInfo::SnippetEntry>& codeSnippets) { 331 size_t numUniforms = 0; 332 for (auto e : codeSnippets) { 333 numUniforms += e.fUniforms.size(); 334 } 335 336 if (!numUniforms) { 337 return {}; 338 } 339 340 int offset = 0; 341 342 std::string result = get_uniform_header(bufferID, name); 343 for (int i = 0; i < (int) codeSnippets.size(); ++i) { 344 result += get_uniforms(codeSnippets[i].fUniforms, &offset, i); 345 } 346 result.append("};\n\n"); 347 348 return result; 349} 350 351 352enum ShaderType { 353 kVertex_ShaderType = 0, 354 kFragment_ShaderType = 1, 355 356 kLast_ShaderType = kFragment_ShaderType 357}; 358static const int kShaderTypeCount = kLast_ShaderType + 1; 359 360sk_sp<GraphicsPipeline> GraphicsPipeline::Make(ResourceProvider* resourceProvider, 361 const Gpu* gpu, 362 const skgpu::GraphicsPipelineDesc& pipelineDesc, 363 const skgpu::RenderPassDesc& renderPassDesc) { 364 sk_cfp<MTLRenderPipelineDescriptor*> psoDescriptor([[MTLRenderPipelineDescriptor alloc] init]); 365 366 std::string msl[kShaderTypeCount]; 367 SkSL::Program::Inputs inputs[kShaderTypeCount]; 368 SkSL::Program::Settings settings; 369 370 ShaderErrorHandler* errorHandler = DefaultShaderErrorHandler(); 371 if (!SkSLToMSL(gpu, 372 get_sksl_vs(pipelineDesc), 373 SkSL::ProgramKind::kVertex, 374 settings, 375 &msl[kVertex_ShaderType], 376 &inputs[kVertex_ShaderType], 377 errorHandler)) { 378 return nullptr; 379 } 380 381 bool writesColor; 382 auto dict = resourceProvider->shaderCodeDictionary(); 383 if (!SkSLToMSL(gpu, 384 get_sksl_fs(dict, pipelineDesc, &writesColor), 385 SkSL::ProgramKind::kFragment, 386 settings, 387 &msl[kFragment_ShaderType], 388 &inputs[kFragment_ShaderType], 389 errorHandler)) { 390 return nullptr; 391 } 392 393 sk_cfp<id<MTLLibrary>> shaderLibraries[kShaderTypeCount]; 394 395 shaderLibraries[kVertex_ShaderType] = CompileShaderLibrary(gpu, 396 msl[kVertex_ShaderType], 397 errorHandler); 398 shaderLibraries[kFragment_ShaderType] = CompileShaderLibrary(gpu, 399 msl[kFragment_ShaderType], 400 errorHandler); 401 if (!shaderLibraries[kVertex_ShaderType] || !shaderLibraries[kFragment_ShaderType]) { 402 return nullptr; 403 } 404 405 (*psoDescriptor).label = @(pipelineDesc.renderStep()->name()); 406 407 (*psoDescriptor).vertexFunction = 408 [shaderLibraries[kVertex_ShaderType].get() newFunctionWithName: @"vertexMain"]; 409 (*psoDescriptor).fragmentFunction = 410 [shaderLibraries[kFragment_ShaderType].get() newFunctionWithName: @"fragmentMain"]; 411 412 // TODO: I *think* this gets cleaned up by the pipelineDescriptor? 413 (*psoDescriptor).vertexDescriptor = create_vertex_descriptor(pipelineDesc.renderStep()); 414 415 // TODO: I *think* this gets cleaned up by the pipelineDescriptor as well? 416 auto mtlColorAttachment = [[MTLRenderPipelineColorAttachmentDescriptor alloc] init]; 417 418 mtl::TextureInfo mtlTexInfo; 419 renderPassDesc.fColorAttachment.fTextureInfo.getMtlTextureInfo(&mtlTexInfo); 420 421 mtlColorAttachment.pixelFormat = (MTLPixelFormat)mtlTexInfo.fFormat; 422 423 mtlColorAttachment.blendingEnabled = FALSE; 424 425 mtlColorAttachment.writeMask = writesColor ? MTLColorWriteMaskAll : MTLColorWriteMaskNone; 426 427 (*psoDescriptor).colorAttachments[0] = mtlColorAttachment; 428 429 renderPassDesc.fDepthStencilAttachment.fTextureInfo.getMtlTextureInfo(&mtlTexInfo); 430 MTLPixelFormat depthStencilFormat = (MTLPixelFormat)mtlTexInfo.fFormat; 431 if (FormatIsStencil(depthStencilFormat)) { 432 (*psoDescriptor).stencilAttachmentPixelFormat = depthStencilFormat; 433 } else { 434 (*psoDescriptor).stencilAttachmentPixelFormat = MTLPixelFormatInvalid; 435 } 436 if (FormatIsDepth(depthStencilFormat)) { 437 (*psoDescriptor).depthAttachmentPixelFormat = depthStencilFormat; 438 } else { 439 (*psoDescriptor).depthAttachmentPixelFormat = MTLPixelFormatInvalid; 440 } 441 442 NSError* error; 443 sk_cfp<id<MTLRenderPipelineState>> pso( 444 [gpu->device() newRenderPipelineStateWithDescriptor:psoDescriptor.get() 445 error:&error]); 446 if (!pso) { 447 SKGPU_LOG_E("Pipeline creation failure:\n%s", error.debugDescription.UTF8String); 448 return nullptr; 449 } 450 451 const DepthStencilSettings& depthStencilSettings = 452 pipelineDesc.renderStep()->depthStencilSettings(); 453 id<MTLDepthStencilState> dss = resourceProvider->findOrCreateCompatibleDepthStencilState( 454 depthStencilSettings); 455 456 return sk_sp<GraphicsPipeline>( 457 new GraphicsPipeline(gpu, 458 std::move(pso), 459 dss, 460 depthStencilSettings.fStencilReferenceValue, 461 pipelineDesc.renderStep()->vertexStride(), 462 pipelineDesc.renderStep()->instanceStride())); 463} 464 465void GraphicsPipeline::onFreeGpuData() { 466 fPipelineState.reset(); 467} 468 469} // namespace skgpu::mtl 470