• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2022 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef skgpu_graphite_MtlComputeCommandEncoder_DEFINED
9 #define skgpu_graphite_MtlComputeCommandEncoder_DEFINED
10 
11 #include "include/core/SkRefCnt.h"
12 #include "include/ports/SkCFObject.h"
13 #include "src/gpu/graphite/ComputeTypes.h"
14 #include "src/gpu/graphite/Resource.h"
15 
16 #import <Metal/Metal.h>
17 
18 namespace skgpu::graphite {
19 
20 /**
21  * Wraps a MTLComputeCommandEncoder object and associated tracked state
22  */
23 class MtlComputeCommandEncoder : public Resource {
24 public:
Make(const SharedContext * sharedContext,id<MTLCommandBuffer> commandBuffer)25     static sk_sp<MtlComputeCommandEncoder> Make(const SharedContext* sharedContext,
26                                                 id<MTLCommandBuffer> commandBuffer) {
27         // Inserting a pool here so the autorelease occurs when we return and the
28         // only remaining ref is the retain below.
29         @autoreleasepool {
30             // Adding a retain here to keep our own ref separate from the autorelease pool
31             sk_cfp<id<MTLComputeCommandEncoder>> encoder =
32                     sk_ret_cfp([commandBuffer computeCommandEncoder]);
33 
34             // TODO(armansito): Support concurrent dispatch of compute passes using
35             // MTLDispatchTypeConcurrent on macOS 10.14+ and iOS 12.0+.
36             return sk_sp<MtlComputeCommandEncoder>(
37                     new MtlComputeCommandEncoder(sharedContext, std::move(encoder)));
38         }
39     }
40 
getResourceType()41     const char* getResourceType() const override { return "Metal Compute Command Encoder"; }
42 
setLabel(NSString * label)43     void setLabel(NSString* label) { [(*fCommandEncoder) setLabel:label]; }
44 
pushDebugGroup(NSString * string)45     void pushDebugGroup(NSString* string) { [(*fCommandEncoder) pushDebugGroup:string]; }
popDebugGroup()46     void popDebugGroup() { [(*fCommandEncoder) popDebugGroup]; }
insertDebugSignpost(NSString * string)47     void insertDebugSignpost(NSString* string) { [(*fCommandEncoder) insertDebugSignpost:string]; }
48 
setComputePipelineState(id<MTLComputePipelineState> pso)49     void setComputePipelineState(id<MTLComputePipelineState> pso) {
50         if (fCurrentComputePipelineState != pso) {
51             [(*fCommandEncoder) setComputePipelineState:pso];
52             fCurrentComputePipelineState = pso;
53         }
54     }
55 
setBuffer(id<MTLBuffer> buffer,NSUInteger offset,NSUInteger index)56     void setBuffer(id<MTLBuffer> buffer, NSUInteger offset, NSUInteger index) {
57         SkASSERT(buffer != nil);
58         SkASSERT(index < kMaxExpectedBuffers);
59         if (@available(macOS 10.11, iOS 8.3, tvOS 9.0, *)) {
60             if (fBuffers[index] == buffer) {
61                 this->setBufferOffset(offset, index);
62                 return;
63             }
64         }
65         if (fBuffers[index] != buffer || fBufferOffsets[index] != offset) {
66             [(*fCommandEncoder) setBuffer:buffer offset:offset atIndex:index];
67             fBuffers[index] = buffer;
68             fBufferOffsets[index] = offset;
69         }
70     }
71 
setBufferOffset(NSUInteger offset,NSUInteger index)72     void setBufferOffset(NSUInteger offset, NSUInteger index)
73             SK_API_AVAILABLE(macos(10.11), ios(8.3), tvos(9.0)) {
74         SkASSERT(index < kMaxExpectedBuffers);
75         if (fBufferOffsets[index] != offset) {
76             [(*fCommandEncoder) setBufferOffset:offset atIndex:index];
77             fBufferOffsets[index] = offset;
78         }
79     }
80 
setTexture(id<MTLTexture> texture,NSUInteger index)81     void setTexture(id<MTLTexture> texture, NSUInteger index) {
82         SkASSERT(index < kMaxExpectedTextures);
83         if (fTextures[index] != texture) {
84             [(*fCommandEncoder) setTexture:texture atIndex:index];
85             fTextures[index] = texture;
86         }
87     }
88 
setSamplerState(id<MTLSamplerState> sampler,NSUInteger index)89     void setSamplerState(id<MTLSamplerState> sampler, NSUInteger index) {
90         SkASSERT(index < kMaxExpectedTextures);
91         if (fSamplers[index] != sampler) {
92             [(*fCommandEncoder) setSamplerState:sampler atIndex:index];
93             fSamplers[index] = sampler;
94         }
95     }
96 
97     // `length` must be 16-byte aligned
setThreadgroupMemoryLength(NSUInteger length,NSUInteger index)98     void setThreadgroupMemoryLength(NSUInteger length, NSUInteger index) {
99         SkASSERT(length % 16 == 0);
100         [(*fCommandEncoder) setThreadgroupMemoryLength:length atIndex:index];
101     }
102 
dispatchThreadgroups(const WorkgroupSize & globalSize,const WorkgroupSize & localSize)103     void dispatchThreadgroups(const WorkgroupSize& globalSize, const WorkgroupSize& localSize) {
104         MTLSize threadgroupCount =
105                 MTLSizeMake(globalSize.fWidth, globalSize.fHeight, globalSize.fDepth);
106         MTLSize threadsPerThreadgroup =
107                 MTLSizeMake(localSize.fWidth, localSize.fHeight, localSize.fDepth);
108         [(*fCommandEncoder) dispatchThreadgroups:threadgroupCount
109                            threadsPerThreadgroup:threadsPerThreadgroup];
110     }
111 
dispatchThreadgroupsWithIndirectBuffer(id<MTLBuffer> indirectBuffer,NSUInteger offset,const WorkgroupSize & localSize)112     void dispatchThreadgroupsWithIndirectBuffer(id<MTLBuffer> indirectBuffer,
113                                                 NSUInteger offset,
114                                                 const WorkgroupSize& localSize) {
115         MTLSize threadsPerThreadgroup =
116                 MTLSizeMake(localSize.fWidth, localSize.fHeight, localSize.fDepth);
117         [(*fCommandEncoder) dispatchThreadgroupsWithIndirectBuffer:indirectBuffer
118                                               indirectBufferOffset:offset
119                                              threadsPerThreadgroup:threadsPerThreadgroup];
120     }
121 
endEncoding()122     void endEncoding() { [(*fCommandEncoder) endEncoding]; }
123 
124 private:
125     inline static constexpr int kMaxExpectedBuffers = 16;
126     inline static constexpr int kMaxExpectedTextures = 16;
127 
MtlComputeCommandEncoder(const SharedContext * sharedContext,sk_cfp<id<MTLComputeCommandEncoder>> encoder)128     MtlComputeCommandEncoder(const SharedContext* sharedContext,
129                              sk_cfp<id<MTLComputeCommandEncoder>> encoder)
130             : Resource(sharedContext,
131                        Ownership::kOwned,
132                        /*gpuMemorySize=*/0)
133             , fCommandEncoder(std::move(encoder)) {
134         for (int i = 0; i < kMaxExpectedBuffers; i++) {
135             fBuffers[i] = nil;
136         }
137         for (int i = 0; i < kMaxExpectedTextures; i++) {
138             fTextures[i] = nil;
139             fSamplers[i] = nil;
140         }
141     }
142 
freeGpuData()143     void freeGpuData() override { fCommandEncoder.reset(); }
144 
145     sk_cfp<id<MTLComputeCommandEncoder>> fCommandEncoder;
146 
147     id<MTLComputePipelineState> fCurrentComputePipelineState = nil;
148 
149     id<MTLBuffer> fBuffers[kMaxExpectedBuffers];
150     NSUInteger    fBufferOffsets[kMaxExpectedBuffers];
151 
152     id<MTLTexture>      fTextures[kMaxExpectedTextures];
153     id<MTLSamplerState> fSamplers[kMaxExpectedTextures];
154 };
155 
156 }  // namespace skgpu::graphite
157 
158 #endif  // skgpu_graphite_MtlComputeCommandEncoder_DEFINED
159