1 /* 2 * Copyright 2022 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef skgpu_graphite_MtlComputeCommandEncoder_DEFINED 9 #define skgpu_graphite_MtlComputeCommandEncoder_DEFINED 10 11 #include "include/core/SkRefCnt.h" 12 #include "include/ports/SkCFObject.h" 13 #include "src/gpu/graphite/ComputeTypes.h" 14 #include "src/gpu/graphite/Resource.h" 15 16 #import <Metal/Metal.h> 17 18 namespace skgpu::graphite { 19 20 /** 21 * Wraps a MTLComputeCommandEncoder object and associated tracked state 22 */ 23 class MtlComputeCommandEncoder : public Resource { 24 public: Make(const SharedContext * sharedContext,id<MTLCommandBuffer> commandBuffer)25 static sk_sp<MtlComputeCommandEncoder> Make(const SharedContext* sharedContext, 26 id<MTLCommandBuffer> commandBuffer) { 27 // Adding a retain here to keep our own ref separate from the autorelease pool 28 sk_cfp<id<MTLComputeCommandEncoder>> encoder = 29 sk_ret_cfp([commandBuffer computeCommandEncoder]); 30 31 // TODO(armansito): Support concurrent dispatch of compute passes using 32 // MTLDispatchTypeConcurrent on macOS 10.14+ and iOS 12.0+. 33 return sk_sp<MtlComputeCommandEncoder>( 34 new MtlComputeCommandEncoder(sharedContext, std::move(encoder))); 35 } 36 setLabel(NSString * label)37 void setLabel(NSString* label) { [(*fCommandEncoder) setLabel:label]; } 38 pushDebugGroup(NSString * string)39 void pushDebugGroup(NSString* string) { [(*fCommandEncoder) pushDebugGroup:string]; } popDebugGroup()40 void popDebugGroup() { [(*fCommandEncoder) popDebugGroup]; } insertDebugSignpost(NSString * string)41 void insertDebugSignpost(NSString* string) { [(*fCommandEncoder) insertDebugSignpost:string]; } 42 setComputePipelineState(id<MTLComputePipelineState> pso)43 void setComputePipelineState(id<MTLComputePipelineState> pso) { 44 if (fCurrentComputePipelineState != pso) { 45 [(*fCommandEncoder) setComputePipelineState:pso]; 46 fCurrentComputePipelineState = pso; 47 } 48 } 49 setBuffer(id<MTLBuffer> buffer,NSUInteger offset,NSUInteger index)50 void setBuffer(id<MTLBuffer> buffer, NSUInteger offset, NSUInteger index) { 51 SkASSERT(buffer != nil); 52 SkASSERT(index < kMaxExpectedBuffers); 53 if (@available(macOS 10.11, iOS 8.3, *)) { 54 if (fBuffers[index] == buffer) { 55 this->setBufferOffset(offset, index); 56 return; 57 } 58 } 59 if (fBuffers[index] != buffer || fBufferOffsets[index] != offset) { 60 [(*fCommandEncoder) setBuffer:buffer offset:offset atIndex:index]; 61 fBuffers[index] = buffer; 62 fBufferOffsets[index] = offset; 63 } 64 } 65 setBufferOffset(NSUInteger offset,NSUInteger index)66 void setBufferOffset(NSUInteger offset, NSUInteger index) 67 SK_API_AVAILABLE(macos(10.11), ios(0.3)) { 68 SkASSERT(index < kMaxExpectedBuffers); 69 if (fBufferOffsets[index] != offset) { 70 [(*fCommandEncoder) setBufferOffset:offset atIndex:index]; 71 fBufferOffsets[index] = offset; 72 } 73 } 74 setTexture(id<MTLTexture> texture,NSUInteger index)75 void setTexture(id<MTLTexture> texture, NSUInteger index) { 76 SkASSERT(index < kMaxExpectedTextures); 77 if (fTextures[index] != texture) { 78 [(*fCommandEncoder) setTexture:texture atIndex:index]; 79 fTextures[index] = texture; 80 } 81 } 82 setSamplerState(id<MTLSamplerState> sampler,NSUInteger index)83 void setSamplerState(id<MTLSamplerState> sampler, NSUInteger index) { 84 SkASSERT(index < kMaxExpectedTextures); 85 if (fSamplers[index] != sampler) { 86 [(*fCommandEncoder) setSamplerState:sampler atIndex:index]; 87 fSamplers[index] = sampler; 88 } 89 } 90 dispatchThreadgroups(const WorkgroupSize & globalSize,const WorkgroupSize & localSize)91 void dispatchThreadgroups(const WorkgroupSize& globalSize, const WorkgroupSize& localSize) { 92 MTLSize threadgroupCount = 93 MTLSizeMake(globalSize.fWidth, globalSize.fHeight, globalSize.fDepth); 94 MTLSize threadsPerThreadgroup = 95 MTLSizeMake(localSize.fWidth, localSize.fHeight, localSize.fDepth); 96 [(*fCommandEncoder) dispatchThreadgroups:threadgroupCount 97 threadsPerThreadgroup:threadsPerThreadgroup]; 98 } 99 endEncoding()100 void endEncoding() { [(*fCommandEncoder) endEncoding]; } 101 102 private: 103 inline static constexpr int kMaxExpectedBuffers = 16; 104 inline static constexpr int kMaxExpectedTextures = 16; 105 MtlComputeCommandEncoder(const SharedContext * sharedContext,sk_cfp<id<MTLComputeCommandEncoder>> encoder)106 MtlComputeCommandEncoder(const SharedContext* sharedContext, 107 sk_cfp<id<MTLComputeCommandEncoder>> encoder) 108 : Resource(sharedContext, Ownership::kOwned, skgpu::Budgeted::kYes) 109 , fCommandEncoder(std::move(encoder)) { 110 for (int i = 0; i < kMaxExpectedBuffers; i++) { 111 fBuffers[i] = nil; 112 } 113 for (int i = 0; i < kMaxExpectedTextures; i++) { 114 fTextures[i] = nil; 115 fSamplers[i] = nil; 116 } 117 } 118 freeGpuData()119 void freeGpuData() override { fCommandEncoder.reset(); } 120 121 sk_cfp<id<MTLComputeCommandEncoder>> fCommandEncoder; 122 123 id<MTLComputePipelineState> fCurrentComputePipelineState = nil; 124 125 id<MTLBuffer> fBuffers[kMaxExpectedBuffers]; 126 NSUInteger fBufferOffsets[kMaxExpectedBuffers]; 127 128 id<MTLTexture> fTextures[kMaxExpectedTextures]; 129 id<MTLSamplerState> fSamplers[kMaxExpectedTextures]; 130 }; 131 132 } // namespace skgpu::graphite 133 134 #endif // skgpu_graphite_MtlComputeCommandEncoder_DEFINED 135