1 /*
2 * Copyright 2023 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/compute/ComputeStep.h"
9
10 #include "include/private/base/SkAssert.h"
11
12 #include <atomic>
13 #include <unordered_set>
14
15 namespace skgpu::graphite {
16 namespace {
17
next_id()18 static uint32_t next_id() {
19 static std::atomic<uint32_t> nextId{0};
20 // Not worried about overflow since a Context isn't expected to have that many ComputeSteps.
21 // Even if this it wraps around to 0, that ComputeStep will not be in the same Context as the
22 // original 0.
23 return nextId.fetch_add(1, std::memory_order_relaxed);
24 }
25
26 } // namespace
27
ComputeStep(std::string_view name,WorkgroupSize localDispatchSize,SkSpan<const ResourceDesc> resources,SkSpan<const WorkgroupBufferDesc> workgroupBuffers,Flags baseFlags)28 ComputeStep::ComputeStep(std::string_view name,
29 WorkgroupSize localDispatchSize,
30 SkSpan<const ResourceDesc> resources,
31 SkSpan<const WorkgroupBufferDesc> workgroupBuffers,
32 Flags baseFlags)
33 : fUniqueID(next_id())
34 , fFlags(baseFlags)
35 , fName(name)
36 , fResources(resources.data(), resources.size())
37 , fWorkgroupBuffers(workgroupBuffers.data(), workgroupBuffers.size())
38 , fLocalDispatchSize(localDispatchSize) {
39 #ifdef SK_DEBUG
40 std::unordered_set<int> slots;
41 for (const ResourceDesc& r : fResources) {
42 // Validate that slot assignments within a ComputeStep are unique.
43 if (r.fFlow == DataFlow::kShared) {
44 SkASSERT(r.fSlot > -1);
45 SkASSERT(r.fSlot < kMaxComputeDataFlowSlots);
46 auto [_, inserted] = slots.insert(r.fSlot);
47 SkASSERT(inserted);
48 }
49 }
50 #endif // SK_DEBUG
51 }
52
prepareStorageBuffer(int,const ResourceDesc &,void *,size_t) const53 void ComputeStep::prepareStorageBuffer(int, const ResourceDesc&, void*, size_t) const {
54 SK_ABORT("ComputeSteps that initialize a mapped storage buffer must override "
55 "prepareStorageBuffer()");
56 }
57
prepareUniformBuffer(int,const ResourceDesc &,UniformManager *) const58 void ComputeStep::prepareUniformBuffer(int, const ResourceDesc&, UniformManager*) const {
59 SK_ABORT("ComputeSteps that initialize a uniform buffer must override prepareUniformBuffer()");
60 }
61
computeSkSL() const62 std::string ComputeStep::computeSkSL() const {
63 SK_ABORT("ComputeSteps must override computeSkSL() unless they support native shader source");
64 return "";
65 }
66
nativeShaderSource(NativeShaderFormat) const67 ComputeStep::NativeShaderSource ComputeStep::nativeShaderSource(NativeShaderFormat) const {
68 SK_ABORT("ComputeSteps that support native shader source must override nativeShaderSource()");
69 return {};
70 }
71
calculateBufferSize(int,const ResourceDesc &) const72 size_t ComputeStep::calculateBufferSize(int, const ResourceDesc&) const {
73 SK_ABORT("ComputeSteps that initialize a storage buffer must override calculateBufferSize()");
74 return 0u;
75 }
76
calculateTextureParameters(int,const ResourceDesc &) const77 std::tuple<SkISize, SkColorType> ComputeStep::calculateTextureParameters(
78 int, const ResourceDesc&) const {
79 SK_ABORT("ComputeSteps that initialize a texture must override calculateTextureParameters()");
80 return {SkISize::MakeEmpty(), kUnknown_SkColorType};
81 }
82
calculateSamplerParameters(int resourceIndex,const ResourceDesc &) const83 SamplerDesc ComputeStep::calculateSamplerParameters(int resourceIndex, const ResourceDesc&) const {
84 SK_ABORT("ComputeSteps that initialize a sampler must override calculateSamplerParameters()");
85 constexpr SkTileMode kTileModes[2] = {SkTileMode::kClamp, SkTileMode::kClamp};
86 return {{}, kTileModes};
87 }
88
calculateGlobalDispatchSize() const89 WorkgroupSize ComputeStep::calculateGlobalDispatchSize() const {
90 SK_ABORT("ComputeSteps must override calculateGlobalDispatchSize() unless "
91 "the workgroup count is determined out-of-band");
92 return WorkgroupSize();
93 }
94
95 } // namespace skgpu::graphite
96