• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Dawn Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "dawn_native/CommandBufferStateTracker.h"
16 
17 #include "common/Assert.h"
18 #include "common/BitSetIterator.h"
19 #include "dawn_native/BindGroup.h"
20 #include "dawn_native/ComputePassEncoder.h"
21 #include "dawn_native/ComputePipeline.h"
22 #include "dawn_native/Forward.h"
23 #include "dawn_native/ObjectType_autogen.h"
24 #include "dawn_native/PipelineLayout.h"
25 #include "dawn_native/RenderPipeline.h"
26 
27 // TODO(dawn:563): None of the error messages in this file include the buffer objects they are
28 // validating against. It would be nice to improve that, but difficult to do without incurring
29 // additional tracking costs.
30 
31 namespace dawn_native {
32 
33     namespace {
BufferSizesAtLeastAsBig(const ityp::span<uint32_t,uint64_t> unverifiedBufferSizes,const std::vector<uint64_t> & pipelineMinBufferSizes)34         bool BufferSizesAtLeastAsBig(const ityp::span<uint32_t, uint64_t> unverifiedBufferSizes,
35                                      const std::vector<uint64_t>& pipelineMinBufferSizes) {
36             ASSERT(unverifiedBufferSizes.size() == pipelineMinBufferSizes.size());
37 
38             for (uint32_t i = 0; i < unverifiedBufferSizes.size(); ++i) {
39                 if (unverifiedBufferSizes[i] < pipelineMinBufferSizes[i]) {
40                     return false;
41                 }
42             }
43 
44             return true;
45         }
46     }  // namespace
47 
48     enum ValidationAspect {
49         VALIDATION_ASPECT_PIPELINE,
50         VALIDATION_ASPECT_BIND_GROUPS,
51         VALIDATION_ASPECT_VERTEX_BUFFERS,
52         VALIDATION_ASPECT_INDEX_BUFFER,
53 
54         VALIDATION_ASPECT_COUNT
55     };
56     static_assert(VALIDATION_ASPECT_COUNT == CommandBufferStateTracker::kNumAspects, "");
57 
58     static constexpr CommandBufferStateTracker::ValidationAspects kDispatchAspects =
59         1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS;
60 
61     static constexpr CommandBufferStateTracker::ValidationAspects kDrawAspects =
62         1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
63         1 << VALIDATION_ASPECT_VERTEX_BUFFERS;
64 
65     static constexpr CommandBufferStateTracker::ValidationAspects kDrawIndexedAspects =
66         1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
67         1 << VALIDATION_ASPECT_VERTEX_BUFFERS | 1 << VALIDATION_ASPECT_INDEX_BUFFER;
68 
69     static constexpr CommandBufferStateTracker::ValidationAspects kLazyAspects =
70         1 << VALIDATION_ASPECT_BIND_GROUPS | 1 << VALIDATION_ASPECT_VERTEX_BUFFERS |
71         1 << VALIDATION_ASPECT_INDEX_BUFFER;
72 
ValidateCanDispatch()73     MaybeError CommandBufferStateTracker::ValidateCanDispatch() {
74         return ValidateOperation(kDispatchAspects);
75     }
76 
ValidateCanDraw()77     MaybeError CommandBufferStateTracker::ValidateCanDraw() {
78         return ValidateOperation(kDrawAspects);
79     }
80 
ValidateCanDrawIndexed()81     MaybeError CommandBufferStateTracker::ValidateCanDrawIndexed() {
82         return ValidateOperation(kDrawIndexedAspects);
83     }
84 
ValidateBufferInRangeForVertexBuffer(uint32_t vertexCount,uint32_t firstVertex)85     MaybeError CommandBufferStateTracker::ValidateBufferInRangeForVertexBuffer(
86         uint32_t vertexCount,
87         uint32_t firstVertex) {
88         RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
89 
90         const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
91             vertexBufferSlotsUsedAsVertexBuffer =
92                 lastRenderPipeline->GetVertexBufferSlotsUsedAsVertexBuffer();
93 
94         for (auto usedSlotVertex : IterateBitSet(vertexBufferSlotsUsedAsVertexBuffer)) {
95             const VertexBufferInfo& vertexBuffer =
96                 lastRenderPipeline->GetVertexBuffer(usedSlotVertex);
97             uint64_t arrayStride = vertexBuffer.arrayStride;
98             uint64_t bufferSize = mVertexBufferSizes[usedSlotVertex];
99 
100             if (arrayStride == 0) {
101                 DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
102                                 "Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
103                                 "is smaller than the required size for all attributes (%u)",
104                                 bufferSize, static_cast<uint8_t>(usedSlotVertex),
105                                 vertexBuffer.usedBytesInStride);
106             } else {
107                 uint64_t requiredSize =
108                     (static_cast<uint64_t>(firstVertex) + vertexCount) * arrayStride;
109                 // firstVertex and vertexCount are in uint32_t, and arrayStride must not
110                 // be larger than kMaxVertexBufferArrayStride, which is currently 2048. So by
111                 // doing checks in uint64_t we avoid overflows.
112                 DAWN_INVALID_IF(
113                     requiredSize > bufferSize,
114                     "Vertex range (first: %u, count: %u) requires a larger buffer (%u) than the "
115                     "bound buffer size (%u) of the vertex buffer at slot %u with stride (%u).",
116                     firstVertex, vertexCount, requiredSize, bufferSize,
117                     static_cast<uint8_t>(usedSlotVertex), arrayStride);
118             }
119         }
120 
121         return {};
122     }
123 
ValidateBufferInRangeForInstanceBuffer(uint32_t instanceCount,uint32_t firstInstance)124     MaybeError CommandBufferStateTracker::ValidateBufferInRangeForInstanceBuffer(
125         uint32_t instanceCount,
126         uint32_t firstInstance) {
127         RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
128 
129         const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
130             vertexBufferSlotsUsedAsInstanceBuffer =
131                 lastRenderPipeline->GetVertexBufferSlotsUsedAsInstanceBuffer();
132 
133         for (auto usedSlotInstance : IterateBitSet(vertexBufferSlotsUsedAsInstanceBuffer)) {
134             const VertexBufferInfo& vertexBuffer =
135                 lastRenderPipeline->GetVertexBuffer(usedSlotInstance);
136             uint64_t arrayStride = vertexBuffer.arrayStride;
137             uint64_t bufferSize = mVertexBufferSizes[usedSlotInstance];
138             if (arrayStride == 0) {
139                 DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
140                                 "Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
141                                 "is smaller than the required size for all attributes (%u)",
142                                 bufferSize, static_cast<uint8_t>(usedSlotInstance),
143                                 vertexBuffer.usedBytesInStride);
144             } else {
145                 uint64_t requiredSize =
146                     (static_cast<uint64_t>(firstInstance) + instanceCount) * arrayStride;
147                 // firstInstance and instanceCount are in uint32_t, and arrayStride must
148                 // not be larger than kMaxVertexBufferArrayStride, which is currently 2048.
149                 // So by doing checks in uint64_t we avoid overflows.
150                 DAWN_INVALID_IF(
151                     requiredSize > bufferSize,
152                     "Instance range (first: %u, count: %u) requires a larger buffer (%u) than the "
153                     "bound buffer size (%u) of the vertex buffer at slot %u with stride (%u).",
154                     firstInstance, instanceCount, requiredSize, bufferSize,
155                     static_cast<uint8_t>(usedSlotInstance), arrayStride);
156             }
157         }
158 
159         return {};
160     }
161 
ValidateIndexBufferInRange(uint32_t indexCount,uint32_t firstIndex)162     MaybeError CommandBufferStateTracker::ValidateIndexBufferInRange(uint32_t indexCount,
163                                                                      uint32_t firstIndex) {
164         // Validate the range of index buffer
165         // firstIndex and indexCount are in uint32_t, while IndexFormatSize is 2 (for
166         // wgpu::IndexFormat::Uint16) or 4 (for wgpu::IndexFormat::Uint32), so by doing checks in
167         // uint64_t we avoid overflows.
168         DAWN_INVALID_IF(
169             (static_cast<uint64_t>(firstIndex) + indexCount) * IndexFormatSize(mIndexFormat) >
170                 mIndexBufferSize,
171             "Index range (first: %u, count: %u, format: %s) does not fit in index buffer size "
172             "(%u).",
173             firstIndex, indexCount, mIndexFormat, mIndexBufferSize);
174         return {};
175     }
176 
ValidateOperation(ValidationAspects requiredAspects)177     MaybeError CommandBufferStateTracker::ValidateOperation(ValidationAspects requiredAspects) {
178         // Fast return-true path if everything is good
179         ValidationAspects missingAspects = requiredAspects & ~mAspects;
180         if (missingAspects.none()) {
181             return {};
182         }
183 
184         // Generate an error immediately if a non-lazy aspect is missing as computing lazy aspects
185         // requires the pipeline to be set.
186         DAWN_TRY(CheckMissingAspects(missingAspects & ~kLazyAspects));
187 
188         RecomputeLazyAspects(missingAspects);
189 
190         DAWN_TRY(CheckMissingAspects(requiredAspects & ~mAspects));
191 
192         return {};
193     }
194 
RecomputeLazyAspects(ValidationAspects aspects)195     void CommandBufferStateTracker::RecomputeLazyAspects(ValidationAspects aspects) {
196         ASSERT(mAspects[VALIDATION_ASPECT_PIPELINE]);
197         ASSERT((aspects & ~kLazyAspects).none());
198 
199         if (aspects[VALIDATION_ASPECT_BIND_GROUPS]) {
200             bool matches = true;
201 
202             for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
203                 if (mBindgroups[i] == nullptr ||
204                     mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout() ||
205                     !BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
206                                              (*mMinBufferSizes)[i])) {
207                     matches = false;
208                     break;
209                 }
210             }
211 
212             if (matches) {
213                 mAspects.set(VALIDATION_ASPECT_BIND_GROUPS);
214             }
215         }
216 
217         if (aspects[VALIDATION_ASPECT_VERTEX_BUFFERS]) {
218             RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
219 
220             const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& requiredVertexBuffers =
221                 lastRenderPipeline->GetVertexBufferSlotsUsed();
222             if (IsSubset(requiredVertexBuffers, mVertexBufferSlotsUsed)) {
223                 mAspects.set(VALIDATION_ASPECT_VERTEX_BUFFERS);
224             }
225         }
226 
227         if (aspects[VALIDATION_ASPECT_INDEX_BUFFER] && mIndexBufferSet) {
228             RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
229             if (!IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology()) ||
230                 mIndexFormat == lastRenderPipeline->GetStripIndexFormat()) {
231                 mAspects.set(VALIDATION_ASPECT_INDEX_BUFFER);
232             }
233         }
234     }
235 
CheckMissingAspects(ValidationAspects aspects)236     MaybeError CommandBufferStateTracker::CheckMissingAspects(ValidationAspects aspects) {
237         if (!aspects.any()) {
238             return {};
239         }
240 
241         DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_PIPELINE], "No pipeline set.");
242 
243         if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_INDEX_BUFFER])) {
244             DAWN_INVALID_IF(!mIndexBufferSet, "Index buffer was not set.");
245 
246             RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
247             wgpu::IndexFormat pipelineIndexFormat = lastRenderPipeline->GetStripIndexFormat();
248 
249             if (IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology())) {
250                 DAWN_INVALID_IF(
251                     pipelineIndexFormat == wgpu::IndexFormat::Undefined,
252                     "%s has a strip primitive topology (%s) but a strip index format of %s, which "
253                     "prevents it for being used for indexed draw calls.",
254                     lastRenderPipeline, lastRenderPipeline->GetPrimitiveTopology(),
255                     pipelineIndexFormat);
256 
257                 DAWN_INVALID_IF(
258                     mIndexFormat != pipelineIndexFormat,
259                     "Strip index format (%s) of %s does not match index buffer format (%s).",
260                     pipelineIndexFormat, lastRenderPipeline, mIndexFormat);
261             }
262 
263             // The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
264             // It returns the first invalid state found. We shouldn't be able to reach this line
265             // because to have invalid aspects one of the above conditions must have failed earlier.
266             // If this is reached, make sure lazy aspects and the error checks above are consistent.
267             UNREACHABLE();
268             return DAWN_FORMAT_VALIDATION_ERROR("Index buffer is invalid.");
269         }
270 
271         // TODO(dawn:563): Indicate which slots were not set.
272         DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_VERTEX_BUFFERS],
273                         "Vertex buffer slots required by %s were not set.", GetRenderPipeline());
274 
275         if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_BIND_GROUPS])) {
276             for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
277                 ASSERT(HasPipeline());
278 
279                 DAWN_INVALID_IF(mBindgroups[i] == nullptr, "No bind group set at index %u.",
280                                 static_cast<uint32_t>(i));
281 
282                 BindGroupLayoutBase* requiredBGL = mLastPipelineLayout->GetBindGroupLayout(i);
283                 BindGroupLayoutBase* currentBGL = mBindgroups[i]->GetLayout();
284 
285                 DAWN_INVALID_IF(
286                     requiredBGL->GetPipelineCompatibilityToken() != PipelineCompatibilityToken(0) &&
287                         currentBGL->GetPipelineCompatibilityToken() !=
288                             requiredBGL->GetPipelineCompatibilityToken(),
289                     "The current pipeline (%s) was created with a default layout, and is not "
290                     "compatible with the %s at index %u which uses a %s that was not created by "
291                     "the pipeline. Either use the bind group layout returned by calling "
292                     "getBindGroupLayout(%u) on the pipeline when creating the bind group, or "
293                     "provide an explicit pipeline layout when creating the pipeline.",
294                     mLastPipeline, mBindgroups[i], static_cast<uint32_t>(i), currentBGL,
295                     static_cast<uint32_t>(i));
296 
297                 DAWN_INVALID_IF(
298                     requiredBGL->GetPipelineCompatibilityToken() == PipelineCompatibilityToken(0) &&
299                         currentBGL->GetPipelineCompatibilityToken() !=
300                             PipelineCompatibilityToken(0),
301                     "%s at index %u uses a %s which was created as part of the default layout for "
302                     "a different pipeline than the current one (%s), and as a result is not "
303                     "compatible. Use an explicit bind group layout when creating bind groups and "
304                     "an explicit pipeline layout when creating pipelines to share bind groups "
305                     "between pipelines.",
306                     mBindgroups[i], static_cast<uint32_t>(i), currentBGL, mLastPipeline);
307 
308                 DAWN_INVALID_IF(
309                     mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout(),
310                     "Bind group layout %s of pipeline layout %s does not match layout %s of bind "
311                     "group %s at index %u.",
312                     requiredBGL, mLastPipelineLayout, currentBGL, mBindgroups[i],
313                     static_cast<uint32_t>(i));
314 
315                 // TODO(dawn:563): Report the binding sizes and which ones are failing.
316                 DAWN_INVALID_IF(!BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
317                                                          (*mMinBufferSizes)[i]),
318                                 "Binding sizes are too small for bind group %s at index %u",
319                                 mBindgroups[i], static_cast<uint32_t>(i));
320             }
321 
322             // The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
323             // It returns the first invalid state found. We shouldn't be able to reach this line
324             // because to have invalid aspects one of the above conditions must have failed earlier.
325             // If this is reached, make sure lazy aspects and the error checks above are consistent.
326             UNREACHABLE();
327             return DAWN_FORMAT_VALIDATION_ERROR("Bind groups are invalid.");
328         }
329 
330         UNREACHABLE();
331     }
332 
SetComputePipeline(ComputePipelineBase * pipeline)333     void CommandBufferStateTracker::SetComputePipeline(ComputePipelineBase* pipeline) {
334         SetPipelineCommon(pipeline);
335     }
336 
SetRenderPipeline(RenderPipelineBase * pipeline)337     void CommandBufferStateTracker::SetRenderPipeline(RenderPipelineBase* pipeline) {
338         SetPipelineCommon(pipeline);
339     }
340 
SetBindGroup(BindGroupIndex index,BindGroupBase * bindgroup,uint32_t dynamicOffsetCount,const uint32_t * dynamicOffsets)341     void CommandBufferStateTracker::SetBindGroup(BindGroupIndex index,
342                                                  BindGroupBase* bindgroup,
343                                                  uint32_t dynamicOffsetCount,
344                                                  const uint32_t* dynamicOffsets) {
345         mBindgroups[index] = bindgroup;
346         mDynamicOffsets[index].assign(dynamicOffsets, dynamicOffsets + dynamicOffsetCount);
347         mAspects.reset(VALIDATION_ASPECT_BIND_GROUPS);
348     }
349 
SetIndexBuffer(wgpu::IndexFormat format,uint64_t size)350     void CommandBufferStateTracker::SetIndexBuffer(wgpu::IndexFormat format, uint64_t size) {
351         mIndexBufferSet = true;
352         mIndexFormat = format;
353         mIndexBufferSize = size;
354     }
355 
SetVertexBuffer(VertexBufferSlot slot,uint64_t size)356     void CommandBufferStateTracker::SetVertexBuffer(VertexBufferSlot slot, uint64_t size) {
357         mVertexBufferSlotsUsed.set(slot);
358         mVertexBufferSizes[slot] = size;
359     }
360 
SetPipelineCommon(PipelineBase * pipeline)361     void CommandBufferStateTracker::SetPipelineCommon(PipelineBase* pipeline) {
362         mLastPipeline = pipeline;
363         mLastPipelineLayout = pipeline != nullptr ? pipeline->GetLayout() : nullptr;
364         mMinBufferSizes = pipeline != nullptr ? &pipeline->GetMinBufferSizes() : nullptr;
365 
366         mAspects.set(VALIDATION_ASPECT_PIPELINE);
367 
368         // Reset lazy aspects so they get recomputed on the next operation.
369         mAspects &= ~kLazyAspects;
370     }
371 
GetBindGroup(BindGroupIndex index) const372     BindGroupBase* CommandBufferStateTracker::GetBindGroup(BindGroupIndex index) const {
373         return mBindgroups[index];
374     }
375 
GetDynamicOffsets(BindGroupIndex index) const376     const std::vector<uint32_t>& CommandBufferStateTracker::GetDynamicOffsets(
377         BindGroupIndex index) const {
378         return mDynamicOffsets[index];
379     }
380 
HasPipeline() const381     bool CommandBufferStateTracker::HasPipeline() const {
382         return mLastPipeline != nullptr;
383     }
384 
GetRenderPipeline() const385     RenderPipelineBase* CommandBufferStateTracker::GetRenderPipeline() const {
386         ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::RenderPipeline);
387         return static_cast<RenderPipelineBase*>(mLastPipeline);
388     }
389 
GetComputePipeline() const390     ComputePipelineBase* CommandBufferStateTracker::GetComputePipeline() const {
391         ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::ComputePipeline);
392         return static_cast<ComputePipelineBase*>(mLastPipeline);
393     }
394 
GetPipelineLayout() const395     PipelineLayoutBase* CommandBufferStateTracker::GetPipelineLayout() const {
396         return mLastPipelineLayout;
397     }
398 
GetIndexFormat() const399     wgpu::IndexFormat CommandBufferStateTracker::GetIndexFormat() const {
400         return mIndexFormat;
401     }
402 
GetIndexBufferSize() const403     uint64_t CommandBufferStateTracker::GetIndexBufferSize() const {
404         return mIndexBufferSize;
405     }
406 
407 }  // namespace dawn_native
408