• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Dawn Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "dawn_native/Queue.h"
16 
17 #include "common/Constants.h"
18 #include "dawn_native/Buffer.h"
19 #include "dawn_native/CommandBuffer.h"
20 #include "dawn_native/CommandEncoder.h"
21 #include "dawn_native/CommandValidation.h"
22 #include "dawn_native/Commands.h"
23 #include "dawn_native/CopyTextureForBrowserHelper.h"
24 #include "dawn_native/Device.h"
25 #include "dawn_native/DynamicUploader.h"
26 #include "dawn_native/ExternalTexture.h"
27 #include "dawn_native/ObjectType_autogen.h"
28 #include "dawn_native/QuerySet.h"
29 #include "dawn_native/RenderPassEncoder.h"
30 #include "dawn_native/RenderPipeline.h"
31 #include "dawn_native/Texture.h"
32 #include "dawn_platform/DawnPlatform.h"
33 #include "dawn_platform/tracing/TraceEvent.h"
34 
35 #include <cstring>
36 
37 namespace dawn_native {
38 
39     namespace {
40 
CopyTextureData(uint8_t * dstPointer,const uint8_t * srcPointer,uint32_t depth,uint32_t rowsPerImage,uint64_t imageAdditionalStride,uint32_t actualBytesPerRow,uint32_t dstBytesPerRow,uint32_t srcBytesPerRow)41         void CopyTextureData(uint8_t* dstPointer,
42                              const uint8_t* srcPointer,
43                              uint32_t depth,
44                              uint32_t rowsPerImage,
45                              uint64_t imageAdditionalStride,
46                              uint32_t actualBytesPerRow,
47                              uint32_t dstBytesPerRow,
48                              uint32_t srcBytesPerRow) {
49             bool copyWholeLayer =
50                 actualBytesPerRow == dstBytesPerRow && dstBytesPerRow == srcBytesPerRow;
51             bool copyWholeData = copyWholeLayer && imageAdditionalStride == 0;
52 
53             if (!copyWholeLayer) {  // copy row by row
54                 for (uint32_t d = 0; d < depth; ++d) {
55                     for (uint32_t h = 0; h < rowsPerImage; ++h) {
56                         memcpy(dstPointer, srcPointer, actualBytesPerRow);
57                         dstPointer += dstBytesPerRow;
58                         srcPointer += srcBytesPerRow;
59                     }
60                     srcPointer += imageAdditionalStride;
61                 }
62             } else {
63                 uint64_t layerSize = uint64_t(rowsPerImage) * actualBytesPerRow;
64                 if (!copyWholeData) {  // copy layer by layer
65                     for (uint32_t d = 0; d < depth; ++d) {
66                         memcpy(dstPointer, srcPointer, layerSize);
67                         dstPointer += layerSize;
68                         srcPointer += layerSize + imageAdditionalStride;
69                     }
70                 } else {  // do a single copy
71                     memcpy(dstPointer, srcPointer, layerSize * depth);
72                 }
73             }
74         }
75 
UploadTextureDataAligningBytesPerRowAndOffset(DeviceBase * device,const void * data,uint32_t alignedBytesPerRow,uint32_t optimallyAlignedBytesPerRow,uint32_t alignedRowsPerImage,const TextureDataLayout & dataLayout,bool hasDepthOrStencil,const TexelBlockInfo & blockInfo,const Extent3D & writeSizePixel)76         ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRowAndOffset(
77             DeviceBase* device,
78             const void* data,
79             uint32_t alignedBytesPerRow,
80             uint32_t optimallyAlignedBytesPerRow,
81             uint32_t alignedRowsPerImage,
82             const TextureDataLayout& dataLayout,
83             bool hasDepthOrStencil,
84             const TexelBlockInfo& blockInfo,
85             const Extent3D& writeSizePixel) {
86             uint64_t newDataSizeBytes;
87             DAWN_TRY_ASSIGN(
88                 newDataSizeBytes,
89                 ComputeRequiredBytesInCopy(blockInfo, writeSizePixel, optimallyAlignedBytesPerRow,
90                                            alignedRowsPerImage));
91 
92             uint64_t optimalOffsetAlignment =
93                 device->GetOptimalBufferToTextureCopyOffsetAlignment();
94             ASSERT(IsPowerOfTwo(optimalOffsetAlignment));
95             ASSERT(IsPowerOfTwo(blockInfo.byteSize));
96             // We need the offset to be aligned to both optimalOffsetAlignment and blockByteSize,
97             // since both of them are powers of two, we only need to align to the max value.
98             uint64_t offsetAlignment =
99                 std::max(optimalOffsetAlignment, uint64_t(blockInfo.byteSize));
100 
101             // For depth-stencil texture, buffer offset must be a multiple of 4, which is required
102             // by WebGPU and Vulkan SPEC.
103             if (hasDepthOrStencil) {
104                 constexpr uint64_t kOffsetAlignmentForDepthStencil = 4;
105                 offsetAlignment = std::max(offsetAlignment, kOffsetAlignmentForDepthStencil);
106             }
107 
108             UploadHandle uploadHandle;
109             DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
110                                               newDataSizeBytes, device->GetPendingCommandSerial(),
111                                               offsetAlignment));
112             ASSERT(uploadHandle.mappedBuffer != nullptr);
113 
114             uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
115             const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
116             srcPointer += dataLayout.offset;
117 
118             uint32_t dataRowsPerImage = dataLayout.rowsPerImage;
119             if (dataRowsPerImage == 0) {
120                 dataRowsPerImage = writeSizePixel.height / blockInfo.height;
121             }
122 
123             ASSERT(dataRowsPerImage >= alignedRowsPerImage);
124             uint64_t imageAdditionalStride =
125                 dataLayout.bytesPerRow * (dataRowsPerImage - alignedRowsPerImage);
126 
127             CopyTextureData(dstPointer, srcPointer, writeSizePixel.depthOrArrayLayers,
128                             alignedRowsPerImage, imageAdditionalStride, alignedBytesPerRow,
129                             optimallyAlignedBytesPerRow, dataLayout.bytesPerRow);
130 
131             return uploadHandle;
132         }
133 
134         struct SubmittedWorkDone : QueueBase::TaskInFlight {
SubmittedWorkDonedawn_native::__anona39e347c0111::SubmittedWorkDone135             SubmittedWorkDone(WGPUQueueWorkDoneCallback callback, void* userdata)
136                 : mCallback(callback), mUserdata(userdata) {
137             }
Finishdawn_native::__anona39e347c0111::SubmittedWorkDone138             void Finish() override {
139                 ASSERT(mCallback != nullptr);
140                 mCallback(WGPUQueueWorkDoneStatus_Success, mUserdata);
141                 mCallback = nullptr;
142             }
HandleDeviceLossdawn_native::__anona39e347c0111::SubmittedWorkDone143             void HandleDeviceLoss() override {
144                 ASSERT(mCallback != nullptr);
145                 mCallback(WGPUQueueWorkDoneStatus_DeviceLost, mUserdata);
146                 mCallback = nullptr;
147             }
148             ~SubmittedWorkDone() override = default;
149 
150           private:
151             WGPUQueueWorkDoneCallback mCallback = nullptr;
152             void* mUserdata;
153         };
154 
155         class ErrorQueue : public QueueBase {
156           public:
ErrorQueue(DeviceBase * device)157             ErrorQueue(DeviceBase* device) : QueueBase(device, ObjectBase::kError) {
158             }
159 
160           private:
SubmitImpl(uint32_t commandCount,CommandBufferBase * const * commands)161             MaybeError SubmitImpl(uint32_t commandCount,
162                                   CommandBufferBase* const* commands) override {
163                 UNREACHABLE();
164             }
165         };
166     }  // namespace
167 
168     // QueueBase
169 
~TaskInFlight()170     QueueBase::TaskInFlight::~TaskInFlight() {
171     }
172 
QueueBase(DeviceBase * device)173     QueueBase::QueueBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
174     }
175 
QueueBase(DeviceBase * device,ObjectBase::ErrorTag tag)176     QueueBase::QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag)
177         : ApiObjectBase(device, tag) {
178     }
179 
~QueueBase()180     QueueBase::~QueueBase() {
181         ASSERT(mTasksInFlight.Empty());
182     }
183 
DestroyImpl()184     void QueueBase::DestroyImpl() {
185     }
186 
187     // static
MakeError(DeviceBase * device)188     QueueBase* QueueBase::MakeError(DeviceBase* device) {
189         return new ErrorQueue(device);
190     }
191 
GetType() const192     ObjectType QueueBase::GetType() const {
193         return ObjectType::Queue;
194     }
195 
APISubmit(uint32_t commandCount,CommandBufferBase * const * commands)196     void QueueBase::APISubmit(uint32_t commandCount, CommandBufferBase* const* commands) {
197         SubmitInternal(commandCount, commands);
198 
199         for (uint32_t i = 0; i < commandCount; ++i) {
200             commands[i]->Destroy();
201         }
202     }
203 
APIOnSubmittedWorkDone(uint64_t signalValue,WGPUQueueWorkDoneCallback callback,void * userdata)204     void QueueBase::APIOnSubmittedWorkDone(uint64_t signalValue,
205                                            WGPUQueueWorkDoneCallback callback,
206                                            void* userdata) {
207         // The error status depends on the type of error so we let the validation function choose it
208         WGPUQueueWorkDoneStatus status;
209         if (GetDevice()->ConsumedError(ValidateOnSubmittedWorkDone(signalValue, &status))) {
210             callback(status, userdata);
211             return;
212         }
213 
214         std::unique_ptr<SubmittedWorkDone> task =
215             std::make_unique<SubmittedWorkDone>(callback, userdata);
216 
217         // Technically we only need to wait for previously submitted work but OnSubmittedWorkDone is
218         // also used to make sure ALL queue work is finished in tests, so we also wait for pending
219         // commands (this is non-observable outside of tests so it's ok to do deviate a bit from the
220         // spec).
221         TrackTask(std::move(task), GetDevice()->GetPendingCommandSerial());
222     }
223 
TrackTask(std::unique_ptr<TaskInFlight> task,ExecutionSerial serial)224     void QueueBase::TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial) {
225         mTasksInFlight.Enqueue(std::move(task), serial);
226         GetDevice()->AddFutureSerial(serial);
227     }
228 
Tick(ExecutionSerial finishedSerial)229     void QueueBase::Tick(ExecutionSerial finishedSerial) {
230         // If a user calls Queue::Submit inside a task, for example in a Buffer::MapAsync callback,
231         // then the device will be ticked, which in turns ticks the queue, causing reentrance here.
232         // To prevent the reentrant call from invalidating mTasksInFlight while in use by the first
233         // call, we remove the tasks to finish from the queue, update mTasksInFlight, then run the
234         // callbacks.
235         std::vector<std::unique_ptr<TaskInFlight>> tasks;
236         for (auto& task : mTasksInFlight.IterateUpTo(finishedSerial)) {
237             tasks.push_back(std::move(task));
238         }
239         mTasksInFlight.ClearUpTo(finishedSerial);
240 
241         for (auto& task : tasks) {
242             task->Finish();
243         }
244     }
245 
HandleDeviceLoss()246     void QueueBase::HandleDeviceLoss() {
247         for (auto& task : mTasksInFlight.IterateAll()) {
248             task->HandleDeviceLoss();
249         }
250         mTasksInFlight.Clear();
251     }
252 
APIWriteBuffer(BufferBase * buffer,uint64_t bufferOffset,const void * data,size_t size)253     void QueueBase::APIWriteBuffer(BufferBase* buffer,
254                                    uint64_t bufferOffset,
255                                    const void* data,
256                                    size_t size) {
257         GetDevice()->ConsumedError(WriteBuffer(buffer, bufferOffset, data, size));
258     }
259 
WriteBuffer(BufferBase * buffer,uint64_t bufferOffset,const void * data,size_t size)260     MaybeError QueueBase::WriteBuffer(BufferBase* buffer,
261                                       uint64_t bufferOffset,
262                                       const void* data,
263                                       size_t size) {
264         DAWN_TRY(GetDevice()->ValidateIsAlive());
265         DAWN_TRY(GetDevice()->ValidateObject(this));
266         DAWN_TRY(ValidateWriteBuffer(GetDevice(), buffer, bufferOffset, size));
267         DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
268         return WriteBufferImpl(buffer, bufferOffset, data, size);
269     }
270 
WriteBufferImpl(BufferBase * buffer,uint64_t bufferOffset,const void * data,size_t size)271     MaybeError QueueBase::WriteBufferImpl(BufferBase* buffer,
272                                           uint64_t bufferOffset,
273                                           const void* data,
274                                           size_t size) {
275         if (size == 0) {
276             return {};
277         }
278 
279         DeviceBase* device = GetDevice();
280 
281         UploadHandle uploadHandle;
282         DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
283                                           size, device->GetPendingCommandSerial(),
284                                           kCopyBufferToBufferOffsetAlignment));
285         ASSERT(uploadHandle.mappedBuffer != nullptr);
286 
287         memcpy(uploadHandle.mappedBuffer, data, size);
288 
289         device->AddFutureSerial(device->GetPendingCommandSerial());
290 
291         return device->CopyFromStagingToBuffer(uploadHandle.stagingBuffer, uploadHandle.startOffset,
292                                                buffer, bufferOffset, size);
293     }
294 
APIWriteTexture(const ImageCopyTexture * destination,const void * data,size_t dataSize,const TextureDataLayout * dataLayout,const Extent3D * writeSize)295     void QueueBase::APIWriteTexture(const ImageCopyTexture* destination,
296                                     const void* data,
297                                     size_t dataSize,
298                                     const TextureDataLayout* dataLayout,
299                                     const Extent3D* writeSize) {
300         GetDevice()->ConsumedError(
301             WriteTextureInternal(destination, data, dataSize, *dataLayout, writeSize));
302     }
303 
WriteTextureInternal(const ImageCopyTexture * destination,const void * data,size_t dataSize,const TextureDataLayout & dataLayout,const Extent3D * writeSize)304     MaybeError QueueBase::WriteTextureInternal(const ImageCopyTexture* destination,
305                                                const void* data,
306                                                size_t dataSize,
307                                                const TextureDataLayout& dataLayout,
308                                                const Extent3D* writeSize) {
309         DAWN_TRY(ValidateWriteTexture(destination, dataSize, dataLayout, writeSize));
310 
311         if (writeSize->width == 0 || writeSize->height == 0 || writeSize->depthOrArrayLayers == 0) {
312             return {};
313         }
314 
315         const TexelBlockInfo& blockInfo =
316             destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
317         TextureDataLayout layout = dataLayout;
318         ApplyDefaultTextureDataLayoutOptions(&layout, blockInfo, *writeSize);
319         return WriteTextureImpl(*destination, data, layout, *writeSize);
320     }
321 
WriteTextureImpl(const ImageCopyTexture & destination,const void * data,const TextureDataLayout & dataLayout,const Extent3D & writeSizePixel)322     MaybeError QueueBase::WriteTextureImpl(const ImageCopyTexture& destination,
323                                            const void* data,
324                                            const TextureDataLayout& dataLayout,
325                                            const Extent3D& writeSizePixel) {
326         const Format& format = destination.texture->GetFormat();
327         const TexelBlockInfo& blockInfo = format.GetAspectInfo(destination.aspect).block;
328 
329         // We are only copying the part of the data that will appear in the texture.
330         // Note that validating texture copy range ensures that writeSizePixel->width and
331         // writeSizePixel->height are multiples of blockWidth and blockHeight respectively.
332         ASSERT(writeSizePixel.width % blockInfo.width == 0);
333         ASSERT(writeSizePixel.height % blockInfo.height == 0);
334         uint32_t alignedBytesPerRow = writeSizePixel.width / blockInfo.width * blockInfo.byteSize;
335         uint32_t alignedRowsPerImage = writeSizePixel.height / blockInfo.height;
336 
337         uint32_t optimalBytesPerRowAlignment = GetDevice()->GetOptimalBytesPerRowAlignment();
338         uint32_t optimallyAlignedBytesPerRow =
339             Align(alignedBytesPerRow, optimalBytesPerRowAlignment);
340 
341         UploadHandle uploadHandle;
342         DAWN_TRY_ASSIGN(uploadHandle,
343                         UploadTextureDataAligningBytesPerRowAndOffset(
344                             GetDevice(), data, alignedBytesPerRow, optimallyAlignedBytesPerRow,
345                             alignedRowsPerImage, dataLayout, format.HasDepthOrStencil(), blockInfo,
346                             writeSizePixel));
347 
348         TextureDataLayout passDataLayout = dataLayout;
349         passDataLayout.offset = uploadHandle.startOffset;
350         passDataLayout.bytesPerRow = optimallyAlignedBytesPerRow;
351         passDataLayout.rowsPerImage = alignedRowsPerImage;
352 
353         TextureCopy textureCopy;
354         textureCopy.texture = destination.texture;
355         textureCopy.mipLevel = destination.mipLevel;
356         textureCopy.origin = destination.origin;
357         textureCopy.aspect = ConvertAspect(format, destination.aspect);
358 
359         DeviceBase* device = GetDevice();
360 
361         device->AddFutureSerial(device->GetPendingCommandSerial());
362 
363         return device->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout,
364                                                 &textureCopy, writeSizePixel);
365     }
366 
APICopyTextureForBrowser(const ImageCopyTexture * source,const ImageCopyTexture * destination,const Extent3D * copySize,const CopyTextureForBrowserOptions * options)367     void QueueBase::APICopyTextureForBrowser(const ImageCopyTexture* source,
368                                              const ImageCopyTexture* destination,
369                                              const Extent3D* copySize,
370                                              const CopyTextureForBrowserOptions* options) {
371         GetDevice()->ConsumedError(
372             CopyTextureForBrowserInternal(source, destination, copySize, options));
373     }
374 
CopyTextureForBrowserInternal(const ImageCopyTexture * source,const ImageCopyTexture * destination,const Extent3D * copySize,const CopyTextureForBrowserOptions * options)375     MaybeError QueueBase::CopyTextureForBrowserInternal(
376         const ImageCopyTexture* source,
377         const ImageCopyTexture* destination,
378         const Extent3D* copySize,
379         const CopyTextureForBrowserOptions* options) {
380         if (GetDevice()->IsValidationEnabled()) {
381             DAWN_TRY_CONTEXT(
382                 ValidateCopyTextureForBrowser(GetDevice(), source, destination, copySize, options),
383                 "validating CopyTextureForBrowser from %s to %s", source->texture,
384                 destination->texture);
385         }
386 
387         return DoCopyTextureForBrowser(GetDevice(), source, destination, copySize, options);
388     }
389 
ValidateSubmit(uint32_t commandCount,CommandBufferBase * const * commands) const390     MaybeError QueueBase::ValidateSubmit(uint32_t commandCount,
391                                          CommandBufferBase* const* commands) const {
392         TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "Queue::ValidateSubmit");
393         DAWN_TRY(GetDevice()->ValidateObject(this));
394 
395         for (uint32_t i = 0; i < commandCount; ++i) {
396             DAWN_TRY(GetDevice()->ValidateObject(commands[i]));
397             DAWN_TRY(commands[i]->ValidateCanUseInSubmitNow());
398 
399             const CommandBufferResourceUsage& usages = commands[i]->GetResourceUsages();
400 
401             for (const SyncScopeResourceUsage& scope : usages.renderPasses) {
402                 for (const BufferBase* buffer : scope.buffers) {
403                     DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
404                 }
405 
406                 for (const TextureBase* texture : scope.textures) {
407                     DAWN_TRY(texture->ValidateCanUseInSubmitNow());
408                 }
409 
410                 for (const ExternalTextureBase* externalTexture : scope.externalTextures) {
411                     DAWN_TRY(externalTexture->ValidateCanUseInSubmitNow());
412                 }
413             }
414 
415             for (const ComputePassResourceUsage& pass : usages.computePasses) {
416                 for (const BufferBase* buffer : pass.referencedBuffers) {
417                     DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
418                 }
419                 for (const TextureBase* texture : pass.referencedTextures) {
420                     DAWN_TRY(texture->ValidateCanUseInSubmitNow());
421                 }
422                 for (const ExternalTextureBase* externalTexture : pass.referencedExternalTextures) {
423                     DAWN_TRY(externalTexture->ValidateCanUseInSubmitNow());
424                 }
425             }
426 
427             for (const BufferBase* buffer : usages.topLevelBuffers) {
428                 DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
429             }
430             for (const TextureBase* texture : usages.topLevelTextures) {
431                 DAWN_TRY(texture->ValidateCanUseInSubmitNow());
432             }
433             for (const QuerySetBase* querySet : usages.usedQuerySets) {
434                 DAWN_TRY(querySet->ValidateCanUseInSubmitNow());
435             }
436         }
437 
438         return {};
439     }
440 
ValidateOnSubmittedWorkDone(uint64_t signalValue,WGPUQueueWorkDoneStatus * status) const441     MaybeError QueueBase::ValidateOnSubmittedWorkDone(uint64_t signalValue,
442                                                       WGPUQueueWorkDoneStatus* status) const {
443         *status = WGPUQueueWorkDoneStatus_DeviceLost;
444         DAWN_TRY(GetDevice()->ValidateIsAlive());
445 
446         *status = WGPUQueueWorkDoneStatus_Error;
447         DAWN_TRY(GetDevice()->ValidateObject(this));
448 
449         DAWN_INVALID_IF(signalValue != 0, "SignalValue (%u) is not 0.", signalValue);
450 
451         return {};
452     }
453 
ValidateWriteTexture(const ImageCopyTexture * destination,size_t dataSize,const TextureDataLayout & dataLayout,const Extent3D * writeSize) const454     MaybeError QueueBase::ValidateWriteTexture(const ImageCopyTexture* destination,
455                                                size_t dataSize,
456                                                const TextureDataLayout& dataLayout,
457                                                const Extent3D* writeSize) const {
458         DAWN_TRY(GetDevice()->ValidateIsAlive());
459         DAWN_TRY(GetDevice()->ValidateObject(this));
460         DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
461 
462         DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *writeSize));
463 
464         DAWN_INVALID_IF(dataLayout.offset > dataSize,
465                         "Data offset (%u) is greater than the data size (%u).", dataLayout.offset,
466                         dataSize);
467 
468         DAWN_INVALID_IF(!(destination->texture->GetUsage() & wgpu::TextureUsage::CopyDst),
469                         "Usage (%s) of %s does not include %s.", destination->texture->GetUsage(),
470                         destination->texture, wgpu::TextureUsage::CopyDst);
471 
472         DAWN_INVALID_IF(destination->texture->GetSampleCount() > 1,
473                         "Sample count (%u) of %s is not 1", destination->texture->GetSampleCount(),
474                         destination->texture);
475 
476         DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
477         // We validate texture copy range before validating linear texture data,
478         // because in the latter we divide copyExtent.width by blockWidth and
479         // copyExtent.height by blockHeight while the divisibility conditions are
480         // checked in validating texture copy range.
481         DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *writeSize));
482 
483         const TexelBlockInfo& blockInfo =
484             destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
485 
486         DAWN_TRY(ValidateLinearTextureData(dataLayout, dataSize, blockInfo, *writeSize));
487 
488         DAWN_TRY(destination->texture->ValidateCanUseInSubmitNow());
489 
490         return {};
491     }
492 
SubmitInternal(uint32_t commandCount,CommandBufferBase * const * commands)493     void QueueBase::SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands) {
494         DeviceBase* device = GetDevice();
495         if (device->ConsumedError(device->ValidateIsAlive())) {
496             // If device is lost, don't let any commands be submitted
497             return;
498         }
499 
500         TRACE_EVENT0(device->GetPlatform(), General, "Queue::Submit");
501         if (device->IsValidationEnabled() &&
502             device->ConsumedError(ValidateSubmit(commandCount, commands))) {
503             return;
504         }
505         ASSERT(!IsError());
506 
507         if (device->ConsumedError(SubmitImpl(commandCount, commands))) {
508             return;
509         }
510     }
511 
512 }  // namespace dawn_native
513