• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Dawn Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "dawn_native/Device.h"
16 
17 #include "common/Log.h"
18 #include "dawn_native/Adapter.h"
19 #include "dawn_native/AsyncTask.h"
20 #include "dawn_native/AttachmentState.h"
21 #include "dawn_native/BindGroup.h"
22 #include "dawn_native/BindGroupLayout.h"
23 #include "dawn_native/Buffer.h"
24 #include "dawn_native/CommandBuffer.h"
25 #include "dawn_native/CommandEncoder.h"
26 #include "dawn_native/CompilationMessages.h"
27 #include "dawn_native/CreatePipelineAsyncTask.h"
28 #include "dawn_native/DynamicUploader.h"
29 #include "dawn_native/ErrorData.h"
30 #include "dawn_native/ErrorInjector.h"
31 #include "dawn_native/ErrorScope.h"
32 #include "dawn_native/ExternalTexture.h"
33 #include "dawn_native/Instance.h"
34 #include "dawn_native/InternalPipelineStore.h"
35 #include "dawn_native/ObjectType_autogen.h"
36 #include "dawn_native/PersistentCache.h"
37 #include "dawn_native/QuerySet.h"
38 #include "dawn_native/Queue.h"
39 #include "dawn_native/RenderBundleEncoder.h"
40 #include "dawn_native/RenderPipeline.h"
41 #include "dawn_native/Sampler.h"
42 #include "dawn_native/Surface.h"
43 #include "dawn_native/SwapChain.h"
44 #include "dawn_native/Texture.h"
45 #include "dawn_native/ValidationUtils_autogen.h"
46 #include "dawn_platform/DawnPlatform.h"
47 #include "dawn_platform/tracing/TraceEvent.h"
48 #include "utils/WGPUHelpers.h"
49 
50 #include <array>
51 #include <mutex>
52 #include <unordered_set>
53 
54 namespace dawn_native {
55 
56     // DeviceBase sub-structures
57 
58     // The caches are unordered_sets of pointers with special hash and compare functions
59     // to compare the value of the objects, instead of the pointers.
60     template <typename Object>
61     using ContentLessObjectCache =
62         std::unordered_set<Object*, typename Object::HashFunc, typename Object::EqualityFunc>;
63 
64     struct DeviceBase::Caches {
~Cachesdawn_native::DeviceBase::Caches65         ~Caches() {
66             ASSERT(attachmentStates.empty());
67             ASSERT(bindGroupLayouts.empty());
68             ASSERT(computePipelines.empty());
69             ASSERT(pipelineLayouts.empty());
70             ASSERT(renderPipelines.empty());
71             ASSERT(samplers.empty());
72             ASSERT(shaderModules.empty());
73         }
74 
75         ContentLessObjectCache<AttachmentStateBlueprint> attachmentStates;
76         ContentLessObjectCache<BindGroupLayoutBase> bindGroupLayouts;
77         ContentLessObjectCache<ComputePipelineBase> computePipelines;
78         ContentLessObjectCache<PipelineLayoutBase> pipelineLayouts;
79         ContentLessObjectCache<RenderPipelineBase> renderPipelines;
80         ContentLessObjectCache<SamplerBase> samplers;
81         ContentLessObjectCache<ShaderModuleBase> shaderModules;
82     };
83 
84     struct DeviceBase::DeprecationWarnings {
85         std::unordered_set<std::string> emitted;
86         size_t count = 0;
87     };
88 
89     namespace {
90         struct LoggingCallbackTask : CallbackTask {
91           public:
92             LoggingCallbackTask() = delete;
LoggingCallbackTaskdawn_native::__anon8b124b680111::LoggingCallbackTask93             LoggingCallbackTask(wgpu::LoggingCallback loggingCallback,
94                                 WGPULoggingType loggingType,
95                                 const char* message,
96                                 void* userdata)
97                 : mCallback(loggingCallback),
98                   mLoggingType(loggingType),
99                   mMessage(message),
100                   mUserdata(userdata) {
101                 // Since the Finish() will be called in uncertain future in which time the message
102                 // may already disposed, we must keep a local copy in the CallbackTask.
103             }
104 
Finishdawn_native::__anon8b124b680111::LoggingCallbackTask105             void Finish() override {
106                 mCallback(mLoggingType, mMessage.c_str(), mUserdata);
107             }
108 
HandleShutDowndawn_native::__anon8b124b680111::LoggingCallbackTask109             void HandleShutDown() override {
110                 // Do the logging anyway
111                 mCallback(mLoggingType, mMessage.c_str(), mUserdata);
112             }
113 
HandleDeviceLossdawn_native::__anon8b124b680111::LoggingCallbackTask114             void HandleDeviceLoss() override {
115                 mCallback(mLoggingType, mMessage.c_str(), mUserdata);
116             }
117 
118           private:
119             // As all deferred callback tasks will be triggered before modifying the registered
120             // callback or shutting down, we are ensured that callback function and userdata pointer
121             // stored in tasks is valid when triggered.
122             wgpu::LoggingCallback mCallback;
123             WGPULoggingType mLoggingType;
124             std::string mMessage;
125             void* mUserdata;
126         };
127 
128         ResultOrError<Ref<PipelineLayoutBase>>
ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(DeviceBase * device,const ComputePipelineDescriptor & descriptor,ComputePipelineDescriptor * outDescriptor)129         ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
130             DeviceBase* device,
131             const ComputePipelineDescriptor& descriptor,
132             ComputePipelineDescriptor* outDescriptor) {
133             Ref<PipelineLayoutBase> layoutRef;
134             *outDescriptor = descriptor;
135 
136             if (outDescriptor->layout == nullptr) {
137                 DAWN_TRY_ASSIGN(layoutRef, PipelineLayoutBase::CreateDefault(
138                                                device, {{
139                                                            SingleShaderStage::Compute,
140                                                            outDescriptor->compute.module,
141                                                            outDescriptor->compute.entryPoint,
142                                                            outDescriptor->compute.constantCount,
143                                                            outDescriptor->compute.constants,
144                                                        }}));
145                 outDescriptor->layout = layoutRef.Get();
146             }
147 
148             return layoutRef;
149         }
150 
151         ResultOrError<Ref<PipelineLayoutBase>>
ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(DeviceBase * device,const RenderPipelineDescriptor & descriptor,RenderPipelineDescriptor * outDescriptor)152         ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
153             DeviceBase* device,
154             const RenderPipelineDescriptor& descriptor,
155             RenderPipelineDescriptor* outDescriptor) {
156             Ref<PipelineLayoutBase> layoutRef;
157             *outDescriptor = descriptor;
158 
159             if (descriptor.layout == nullptr) {
160                 // Ref will keep the pipeline layout alive until the end of the function where
161                 // the pipeline will take another reference.
162                 DAWN_TRY_ASSIGN(layoutRef,
163                                 PipelineLayoutBase::CreateDefault(
164                                     device, GetRenderStagesAndSetDummyShader(device, &descriptor)));
165                 outDescriptor->layout = layoutRef.Get();
166             }
167 
168             return layoutRef;
169         }
170 
171     }  // anonymous namespace
172 
173     // DeviceBase
174 
DeviceBase(AdapterBase * adapter,const DawnDeviceDescriptor * descriptor)175     DeviceBase::DeviceBase(AdapterBase* adapter, const DawnDeviceDescriptor* descriptor)
176         : mInstance(adapter->GetInstance()), mAdapter(adapter), mNextPipelineCompatibilityToken(1) {
177         if (descriptor != nullptr) {
178             ApplyToggleOverrides(descriptor);
179             ApplyFeatures(descriptor);
180         }
181 
182         if (descriptor != nullptr && descriptor->requiredLimits != nullptr) {
183             mLimits.v1 = ReifyDefaultLimits(FromAPI(descriptor->requiredLimits)->limits);
184         } else {
185             GetDefaultLimits(&mLimits.v1);
186         }
187 
188         mFormatTable = BuildFormatTable(this);
189         SetDefaultToggles();
190     }
191 
DeviceBase()192     DeviceBase::DeviceBase() : mState(State::Alive) {
193         mCaches = std::make_unique<DeviceBase::Caches>();
194     }
195 
~DeviceBase()196     DeviceBase::~DeviceBase() {
197         // We need to explicitly release the Queue before we complete the destructor so that the
198         // Queue does not get destroyed after the Device.
199         mQueue = nullptr;
200     }
201 
Initialize(QueueBase * defaultQueue)202     MaybeError DeviceBase::Initialize(QueueBase* defaultQueue) {
203         mQueue = AcquireRef(defaultQueue);
204 
205 #if defined(DAWN_ENABLE_ASSERTS)
206         mUncapturedErrorCallback = [](WGPUErrorType, char const*, void*) {
207             static bool calledOnce = false;
208             if (!calledOnce) {
209                 calledOnce = true;
210                 dawn::WarningLog() << "No Dawn device uncaptured error callback was set. This is "
211                                       "probably not intended. If you really want to ignore errors "
212                                       "and suppress this message, set the callback to null.";
213             }
214         };
215 
216         mDeviceLostCallback = [](WGPUDeviceLostReason, char const*, void*) {
217             static bool calledOnce = false;
218             if (!calledOnce) {
219                 calledOnce = true;
220                 dawn::WarningLog() << "No Dawn device lost callback was set. This is probably not "
221                                       "intended. If you really want to ignore device lost "
222                                       "and suppress this message, set the callback to null.";
223             }
224         };
225 #endif  // DAWN_ENABLE_ASSERTS
226 
227         mCaches = std::make_unique<DeviceBase::Caches>();
228         mErrorScopeStack = std::make_unique<ErrorScopeStack>();
229         mDynamicUploader = std::make_unique<DynamicUploader>(this);
230         mCallbackTaskManager = std::make_unique<CallbackTaskManager>();
231         mDeprecationWarnings = std::make_unique<DeprecationWarnings>();
232         mInternalPipelineStore = std::make_unique<InternalPipelineStore>(this);
233         mPersistentCache = std::make_unique<PersistentCache>(this);
234 
235         ASSERT(GetPlatform() != nullptr);
236         mWorkerTaskPool = GetPlatform()->CreateWorkerTaskPool();
237         mAsyncTaskManager = std::make_unique<AsyncTaskManager>(mWorkerTaskPool.get());
238 
239         // Starting from now the backend can start doing reentrant calls so the device is marked as
240         // alive.
241         mState = State::Alive;
242 
243         DAWN_TRY_ASSIGN(mEmptyBindGroupLayout, CreateEmptyBindGroupLayout());
244 
245         // If dummy fragment shader module is needed, initialize it
246         if (IsToggleEnabled(Toggle::UseDummyFragmentInVertexOnlyPipeline)) {
247             // The empty fragment shader, used as a work around for vertex-only render pipeline
248             constexpr char kEmptyFragmentShader[] = R"(
249                 [[stage(fragment)]] fn fs_empty_main() {}
250             )";
251             ShaderModuleDescriptor descriptor;
252             ShaderModuleWGSLDescriptor wgslDesc;
253             wgslDesc.source = kEmptyFragmentShader;
254             descriptor.nextInChain = &wgslDesc;
255 
256             DAWN_TRY_ASSIGN(mInternalPipelineStore->dummyFragmentShader,
257                             CreateShaderModule(&descriptor));
258         }
259 
260         return {};
261     }
262 
DestroyObjects()263     void DeviceBase::DestroyObjects() {
264         // List of object types in reverse "dependency" order so we can iterate and delete the
265         // objects safely starting at leaf objects. We define dependent here such that if B has
266         // a ref to A, then B depends on A. We therefore try to destroy B before destroying A. Note
267         // that this only considers the immediate frontend dependencies, while backend objects could
268         // add complications and extra dependencies.
269         //
270         // Note that AttachmentState is not an ApiObject so it cannot be eagerly destroyed. However,
271         // since AttachmentStates are cached by the device, objects that hold references to
272         // AttachmentStates should make sure to un-ref them in their Destroy operation so that we
273         // can destroy the frontend cache.
274 
275         // clang-format off
276         static constexpr std::array<ObjectType, 19> kObjectTypeDependencyOrder = {
277             ObjectType::ComputePassEncoder,
278             ObjectType::RenderPassEncoder,
279             ObjectType::RenderBundleEncoder,
280             ObjectType::RenderBundle,
281             ObjectType::CommandEncoder,
282             ObjectType::CommandBuffer,
283             ObjectType::RenderPipeline,
284             ObjectType::ComputePipeline,
285             ObjectType::PipelineLayout,
286             ObjectType::SwapChain,
287             ObjectType::BindGroup,
288             ObjectType::BindGroupLayout,
289             ObjectType::ShaderModule,
290             ObjectType::ExternalTexture,
291             ObjectType::TextureView,
292             ObjectType::Texture,
293             ObjectType::QuerySet,
294             ObjectType::Sampler,
295             ObjectType::Buffer,
296         };
297         // clang-format on
298 
299         // We first move all objects out from the tracking list into a separate list so that we can
300         // avoid locking the same mutex twice. We can then iterate across the separate list to call
301         // the actual destroy function.
302         LinkedList<ApiObjectBase> objects;
303         for (ObjectType type : kObjectTypeDependencyOrder) {
304             ApiObjectList& objList = mObjectLists[type];
305             const std::lock_guard<std::mutex> lock(objList.mutex);
306             objList.objects.MoveInto(&objects);
307         }
308         for (LinkNode<ApiObjectBase>* node : objects) {
309             node->value()->Destroy();
310         }
311     }
312 
Destroy()313     void DeviceBase::Destroy() {
314         // Skip if we are already destroyed.
315         if (mState == State::Destroyed) {
316             return;
317         }
318 
319         // Skip handling device facilities if they haven't even been created (or failed doing so)
320         if (mState != State::BeingCreated) {
321             // The device is being destroyed so it will be lost, call the application callback.
322             if (mDeviceLostCallback != nullptr) {
323                 mDeviceLostCallback(WGPUDeviceLostReason_Destroyed, "Device was destroyed.",
324                                     mDeviceLostUserdata);
325                 mDeviceLostCallback = nullptr;
326             }
327 
328             // Call all the callbacks immediately as the device is about to shut down.
329             // TODO(crbug.com/dawn/826): Cancel the tasks that are in flight if possible.
330             mAsyncTaskManager->WaitAllPendingTasks();
331             auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
332             for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
333                 callbackTask->HandleShutDown();
334             }
335         }
336 
337         // Disconnect the device, depending on which state we are currently in.
338         switch (mState) {
339             case State::BeingCreated:
340                 // The GPU timeline was never started so we don't have to wait.
341                 break;
342 
343             case State::Alive:
344                 // Alive is the only state which can have GPU work happening. Wait for all of it to
345                 // complete before proceeding with destruction.
346                 // Ignore errors so that we can continue with destruction
347                 IgnoreErrors(WaitForIdleForDestruction());
348                 AssumeCommandsComplete();
349                 break;
350 
351             case State::BeingDisconnected:
352                 // Getting disconnected is a transient state happening in a single API call so there
353                 // is always an external reference keeping the Device alive, which means the
354                 // destructor cannot run while BeingDisconnected.
355                 UNREACHABLE();
356                 break;
357 
358             case State::Disconnected:
359                 break;
360 
361             case State::Destroyed:
362                 // If we are already destroyed we should've skipped this work entirely.
363                 UNREACHABLE();
364                 break;
365         }
366         ASSERT(mCompletedSerial == mLastSubmittedSerial);
367         ASSERT(mFutureSerial <= mCompletedSerial);
368 
369         if (mState != State::BeingCreated) {
370             // The GPU timeline is finished.
371             // Finish destroying all objects owned by the device and tick the queue-related tasks
372             // since they should be complete. This must be done before DestroyImpl() it may
373             // relinquish resources that will be freed by backends in the DestroyImpl() call.
374             DestroyObjects();
375             mQueue->Tick(GetCompletedCommandSerial());
376             // Call TickImpl once last time to clean up resources
377             // Ignore errors so that we can continue with destruction
378             IgnoreErrors(TickImpl());
379         }
380 
381         // At this point GPU operations are always finished, so we are in the disconnected state.
382         // Note that currently this state change is required because some of the backend
383         // implementations of DestroyImpl checks that we are disconnected before doing work.
384         mState = State::Disconnected;
385 
386         mDynamicUploader = nullptr;
387         mCallbackTaskManager = nullptr;
388         mAsyncTaskManager = nullptr;
389         mPersistentCache = nullptr;
390         mEmptyBindGroupLayout = nullptr;
391         mInternalPipelineStore = nullptr;
392 
393         AssumeCommandsComplete();
394 
395         // Now that the GPU timeline is empty, destroy the backend device.
396         DestroyImpl();
397 
398         mCaches = nullptr;
399         mState = State::Destroyed;
400     }
401 
APIDestroy()402     void DeviceBase::APIDestroy() {
403         Destroy();
404     }
405 
HandleError(InternalErrorType type,const char * message)406     void DeviceBase::HandleError(InternalErrorType type, const char* message) {
407         if (type == InternalErrorType::DeviceLost) {
408             mState = State::Disconnected;
409 
410             // If the ErrorInjector is enabled, then the device loss might be fake and the device
411             // still be executing commands. Force a wait for idle in this case, with State being
412             // Disconnected so we can detect this case in WaitForIdleForDestruction.
413             if (ErrorInjectorEnabled()) {
414                 IgnoreErrors(WaitForIdleForDestruction());
415             }
416 
417             // A real device lost happened. Set the state to disconnected as the device cannot be
418             // used. Also tags all commands as completed since the device stopped running.
419             AssumeCommandsComplete();
420         } else if (type == InternalErrorType::Internal) {
421             // If we receive an internal error, assume the backend can't recover and proceed with
422             // device destruction. We first wait for all previous commands to be completed so that
423             // backend objects can be freed immediately, before handling the loss.
424 
425             // Move away from the Alive state so that the application cannot use this device
426             // anymore.
427             // TODO(crbug.com/dawn/831): Do we need atomics for this to become visible to other
428             // threads in a multithreaded scenario?
429             mState = State::BeingDisconnected;
430 
431             // Ignore errors so that we can continue with destruction
432             // Assume all commands are complete after WaitForIdleForDestruction (because they were)
433             IgnoreErrors(WaitForIdleForDestruction());
434             IgnoreErrors(TickImpl());
435             AssumeCommandsComplete();
436             ASSERT(mFutureSerial <= mCompletedSerial);
437             mState = State::Disconnected;
438 
439             // Now everything is as if the device was lost.
440             type = InternalErrorType::DeviceLost;
441         }
442 
443         if (type == InternalErrorType::DeviceLost) {
444             // The device was lost, call the application callback.
445             if (mDeviceLostCallback != nullptr) {
446                 mDeviceLostCallback(WGPUDeviceLostReason_Undefined, message, mDeviceLostUserdata);
447                 mDeviceLostCallback = nullptr;
448             }
449 
450             mQueue->HandleDeviceLoss();
451 
452             // TODO(crbug.com/dawn/826): Cancel the tasks that are in flight if possible.
453             mAsyncTaskManager->WaitAllPendingTasks();
454             auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
455             for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
456                 callbackTask->HandleDeviceLoss();
457             }
458 
459             // Still forward device loss errors to the error scopes so they all reject.
460             mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
461         } else {
462             // Pass the error to the error scope stack and call the uncaptured error callback
463             // if it isn't handled. DeviceLost is not handled here because it should be
464             // handled by the lost callback.
465             bool captured = mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
466             if (!captured && mUncapturedErrorCallback != nullptr) {
467                 mUncapturedErrorCallback(static_cast<WGPUErrorType>(ToWGPUErrorType(type)), message,
468                                          mUncapturedErrorUserdata);
469             }
470         }
471     }
472 
ConsumeError(std::unique_ptr<ErrorData> error)473     void DeviceBase::ConsumeError(std::unique_ptr<ErrorData> error) {
474         ASSERT(error != nullptr);
475         HandleError(error->GetType(), error->GetFormattedMessage().c_str());
476     }
477 
APISetLoggingCallback(wgpu::LoggingCallback callback,void * userdata)478     void DeviceBase::APISetLoggingCallback(wgpu::LoggingCallback callback, void* userdata) {
479         // The registered callback function and userdata pointer are stored and used by deferred
480         // callback tasks, and after setting a different callback (especially in the case of
481         // resetting) the resources pointed by such pointer may be freed. Flush all deferred
482         // callback tasks to guarantee we are never going to use the previous callback after
483         // this call.
484         if (IsLost()) {
485             return;
486         }
487         FlushCallbackTaskQueue();
488         mLoggingCallback = callback;
489         mLoggingUserdata = userdata;
490     }
491 
APISetUncapturedErrorCallback(wgpu::ErrorCallback callback,void * userdata)492     void DeviceBase::APISetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata) {
493         // The registered callback function and userdata pointer are stored and used by deferred
494         // callback tasks, and after setting a different callback (especially in the case of
495         // resetting) the resources pointed by such pointer may be freed. Flush all deferred
496         // callback tasks to guarantee we are never going to use the previous callback after
497         // this call.
498         if (IsLost()) {
499             return;
500         }
501         FlushCallbackTaskQueue();
502         mUncapturedErrorCallback = callback;
503         mUncapturedErrorUserdata = userdata;
504     }
505 
APISetDeviceLostCallback(wgpu::DeviceLostCallback callback,void * userdata)506     void DeviceBase::APISetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata) {
507         // The registered callback function and userdata pointer are stored and used by deferred
508         // callback tasks, and after setting a different callback (especially in the case of
509         // resetting) the resources pointed by such pointer may be freed. Flush all deferred
510         // callback tasks to guarantee we are never going to use the previous callback after
511         // this call.
512         if (IsLost()) {
513             return;
514         }
515         FlushCallbackTaskQueue();
516         mDeviceLostCallback = callback;
517         mDeviceLostUserdata = userdata;
518     }
519 
APIPushErrorScope(wgpu::ErrorFilter filter)520     void DeviceBase::APIPushErrorScope(wgpu::ErrorFilter filter) {
521         if (ConsumedError(ValidateErrorFilter(filter))) {
522             return;
523         }
524         mErrorScopeStack->Push(filter);
525     }
526 
APIPopErrorScope(wgpu::ErrorCallback callback,void * userdata)527     bool DeviceBase::APIPopErrorScope(wgpu::ErrorCallback callback, void* userdata) {
528         if (mErrorScopeStack->Empty()) {
529             return false;
530         }
531         ErrorScope scope = mErrorScopeStack->Pop();
532         if (callback != nullptr) {
533             callback(static_cast<WGPUErrorType>(scope.GetErrorType()), scope.GetErrorMessage(),
534                      userdata);
535         }
536 
537         return true;
538     }
539 
GetPersistentCache()540     PersistentCache* DeviceBase::GetPersistentCache() {
541         ASSERT(mPersistentCache.get() != nullptr);
542         return mPersistentCache.get();
543     }
544 
ValidateObject(const ApiObjectBase * object) const545     MaybeError DeviceBase::ValidateObject(const ApiObjectBase* object) const {
546         ASSERT(object != nullptr);
547         DAWN_INVALID_IF(object->GetDevice() != this,
548                         "%s is associated with %s, and cannot be used with %s.", object,
549                         object->GetDevice(), this);
550 
551         // TODO(dawn:563): Preserve labels for error objects.
552         DAWN_INVALID_IF(object->IsError(), "%s is invalid.", object);
553 
554         return {};
555     }
556 
ValidateIsAlive() const557     MaybeError DeviceBase::ValidateIsAlive() const {
558         DAWN_INVALID_IF(mState != State::Alive, "%s is lost.", this);
559         return {};
560     }
561 
APILoseForTesting()562     void DeviceBase::APILoseForTesting() {
563         if (mState != State::Alive) {
564             return;
565         }
566 
567         HandleError(InternalErrorType::Internal, "Device lost for testing");
568     }
569 
GetState() const570     DeviceBase::State DeviceBase::GetState() const {
571         return mState;
572     }
573 
IsLost() const574     bool DeviceBase::IsLost() const {
575         ASSERT(mState != State::BeingCreated);
576         return mState != State::Alive;
577     }
578 
TrackObject(ApiObjectBase * object)579     void DeviceBase::TrackObject(ApiObjectBase* object) {
580         ApiObjectList& objectList = mObjectLists[object->GetType()];
581         std::lock_guard<std::mutex> lock(objectList.mutex);
582         object->InsertBefore(objectList.objects.head());
583     }
584 
GetObjectListMutex(ObjectType type)585     std::mutex* DeviceBase::GetObjectListMutex(ObjectType type) {
586         return &mObjectLists[type].mutex;
587     }
588 
GetAdapter() const589     AdapterBase* DeviceBase::GetAdapter() const {
590         return mAdapter;
591     }
592 
GetPlatform() const593     dawn_platform::Platform* DeviceBase::GetPlatform() const {
594         return GetAdapter()->GetInstance()->GetPlatform();
595     }
596 
GetCompletedCommandSerial() const597     ExecutionSerial DeviceBase::GetCompletedCommandSerial() const {
598         return mCompletedSerial;
599     }
600 
GetLastSubmittedCommandSerial() const601     ExecutionSerial DeviceBase::GetLastSubmittedCommandSerial() const {
602         return mLastSubmittedSerial;
603     }
604 
GetFutureSerial() const605     ExecutionSerial DeviceBase::GetFutureSerial() const {
606         return mFutureSerial;
607     }
608 
GetInternalPipelineStore()609     InternalPipelineStore* DeviceBase::GetInternalPipelineStore() {
610         return mInternalPipelineStore.get();
611     }
612 
IncrementLastSubmittedCommandSerial()613     void DeviceBase::IncrementLastSubmittedCommandSerial() {
614         mLastSubmittedSerial++;
615     }
616 
AssumeCommandsComplete()617     void DeviceBase::AssumeCommandsComplete() {
618         ExecutionSerial maxSerial =
619             ExecutionSerial(std::max(mLastSubmittedSerial + ExecutionSerial(1), mFutureSerial));
620         mLastSubmittedSerial = maxSerial;
621         mCompletedSerial = maxSerial;
622     }
623 
IsDeviceIdle()624     bool DeviceBase::IsDeviceIdle() {
625         if (mAsyncTaskManager->HasPendingTasks()) {
626             return false;
627         }
628 
629         ExecutionSerial maxSerial = std::max(mLastSubmittedSerial, mFutureSerial);
630         if (mCompletedSerial == maxSerial) {
631             return true;
632         }
633         return false;
634     }
635 
GetPendingCommandSerial() const636     ExecutionSerial DeviceBase::GetPendingCommandSerial() const {
637         return mLastSubmittedSerial + ExecutionSerial(1);
638     }
639 
AddFutureSerial(ExecutionSerial serial)640     void DeviceBase::AddFutureSerial(ExecutionSerial serial) {
641         if (serial > mFutureSerial) {
642             mFutureSerial = serial;
643         }
644     }
645 
CheckPassedSerials()646     MaybeError DeviceBase::CheckPassedSerials() {
647         ExecutionSerial completedSerial;
648         DAWN_TRY_ASSIGN(completedSerial, CheckAndUpdateCompletedSerials());
649 
650         ASSERT(completedSerial <= mLastSubmittedSerial);
651         // completedSerial should not be less than mCompletedSerial unless it is 0.
652         // It can be 0 when there's no fences to check.
653         ASSERT(completedSerial >= mCompletedSerial || completedSerial == ExecutionSerial(0));
654 
655         if (completedSerial > mCompletedSerial) {
656             mCompletedSerial = completedSerial;
657         }
658 
659         return {};
660     }
661 
GetInternalFormat(wgpu::TextureFormat format) const662     ResultOrError<const Format*> DeviceBase::GetInternalFormat(wgpu::TextureFormat format) const {
663         size_t index = ComputeFormatIndex(format);
664         DAWN_INVALID_IF(index >= mFormatTable.size(), "Unknown texture format %s.", format);
665 
666         const Format* internalFormat = &mFormatTable[index];
667         DAWN_INVALID_IF(!internalFormat->isSupported, "Unsupported texture format %s.", format);
668 
669         return internalFormat;
670     }
671 
GetValidInternalFormat(wgpu::TextureFormat format) const672     const Format& DeviceBase::GetValidInternalFormat(wgpu::TextureFormat format) const {
673         size_t index = ComputeFormatIndex(format);
674         ASSERT(index < mFormatTable.size());
675         ASSERT(mFormatTable[index].isSupported);
676         return mFormatTable[index];
677     }
678 
GetOrCreateBindGroupLayout(const BindGroupLayoutDescriptor * descriptor,PipelineCompatibilityToken pipelineCompatibilityToken)679     ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::GetOrCreateBindGroupLayout(
680         const BindGroupLayoutDescriptor* descriptor,
681         PipelineCompatibilityToken pipelineCompatibilityToken) {
682         BindGroupLayoutBase blueprint(this, descriptor, pipelineCompatibilityToken,
683                                       ApiObjectBase::kUntrackedByDevice);
684 
685         const size_t blueprintHash = blueprint.ComputeContentHash();
686         blueprint.SetContentHash(blueprintHash);
687 
688         Ref<BindGroupLayoutBase> result;
689         auto iter = mCaches->bindGroupLayouts.find(&blueprint);
690         if (iter != mCaches->bindGroupLayouts.end()) {
691             result = *iter;
692         } else {
693             DAWN_TRY_ASSIGN(result,
694                             CreateBindGroupLayoutImpl(descriptor, pipelineCompatibilityToken));
695             result->SetIsCachedReference();
696             result->SetContentHash(blueprintHash);
697             mCaches->bindGroupLayouts.insert(result.Get());
698         }
699 
700         return std::move(result);
701     }
702 
UncacheBindGroupLayout(BindGroupLayoutBase * obj)703     void DeviceBase::UncacheBindGroupLayout(BindGroupLayoutBase* obj) {
704         ASSERT(obj->IsCachedReference());
705         size_t removedCount = mCaches->bindGroupLayouts.erase(obj);
706         ASSERT(removedCount == 1);
707     }
708 
709     // Private function used at initialization
CreateEmptyBindGroupLayout()710     ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateEmptyBindGroupLayout() {
711         BindGroupLayoutDescriptor desc = {};
712         desc.entryCount = 0;
713         desc.entries = nullptr;
714 
715         return GetOrCreateBindGroupLayout(&desc);
716     }
717 
GetEmptyBindGroupLayout()718     BindGroupLayoutBase* DeviceBase::GetEmptyBindGroupLayout() {
719         ASSERT(mEmptyBindGroupLayout != nullptr);
720         return mEmptyBindGroupLayout.Get();
721     }
722 
GetCachedComputePipeline(ComputePipelineBase * uninitializedComputePipeline)723     Ref<ComputePipelineBase> DeviceBase::GetCachedComputePipeline(
724         ComputePipelineBase* uninitializedComputePipeline) {
725         Ref<ComputePipelineBase> cachedPipeline;
726         auto iter = mCaches->computePipelines.find(uninitializedComputePipeline);
727         if (iter != mCaches->computePipelines.end()) {
728             cachedPipeline = *iter;
729         }
730 
731         return cachedPipeline;
732     }
733 
GetCachedRenderPipeline(RenderPipelineBase * uninitializedRenderPipeline)734     Ref<RenderPipelineBase> DeviceBase::GetCachedRenderPipeline(
735         RenderPipelineBase* uninitializedRenderPipeline) {
736         Ref<RenderPipelineBase> cachedPipeline;
737         auto iter = mCaches->renderPipelines.find(uninitializedRenderPipeline);
738         if (iter != mCaches->renderPipelines.end()) {
739             cachedPipeline = *iter;
740         }
741         return cachedPipeline;
742     }
743 
AddOrGetCachedComputePipeline(Ref<ComputePipelineBase> computePipeline)744     Ref<ComputePipelineBase> DeviceBase::AddOrGetCachedComputePipeline(
745         Ref<ComputePipelineBase> computePipeline) {
746         auto insertion = mCaches->computePipelines.insert(computePipeline.Get());
747         if (insertion.second) {
748             computePipeline->SetIsCachedReference();
749             return computePipeline;
750         } else {
751             return *(insertion.first);
752         }
753     }
754 
AddOrGetCachedRenderPipeline(Ref<RenderPipelineBase> renderPipeline)755     Ref<RenderPipelineBase> DeviceBase::AddOrGetCachedRenderPipeline(
756         Ref<RenderPipelineBase> renderPipeline) {
757         auto insertion = mCaches->renderPipelines.insert(renderPipeline.Get());
758         if (insertion.second) {
759             renderPipeline->SetIsCachedReference();
760             return renderPipeline;
761         } else {
762             return *(insertion.first);
763         }
764     }
765 
UncacheComputePipeline(ComputePipelineBase * obj)766     void DeviceBase::UncacheComputePipeline(ComputePipelineBase* obj) {
767         ASSERT(obj->IsCachedReference());
768         size_t removedCount = mCaches->computePipelines.erase(obj);
769         ASSERT(removedCount == 1);
770     }
771 
GetOrCreatePipelineLayout(const PipelineLayoutDescriptor * descriptor)772     ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::GetOrCreatePipelineLayout(
773         const PipelineLayoutDescriptor* descriptor) {
774         PipelineLayoutBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
775 
776         const size_t blueprintHash = blueprint.ComputeContentHash();
777         blueprint.SetContentHash(blueprintHash);
778 
779         Ref<PipelineLayoutBase> result;
780         auto iter = mCaches->pipelineLayouts.find(&blueprint);
781         if (iter != mCaches->pipelineLayouts.end()) {
782             result = *iter;
783         } else {
784             DAWN_TRY_ASSIGN(result, CreatePipelineLayoutImpl(descriptor));
785             result->SetIsCachedReference();
786             result->SetContentHash(blueprintHash);
787             mCaches->pipelineLayouts.insert(result.Get());
788         }
789 
790         return std::move(result);
791     }
792 
UncachePipelineLayout(PipelineLayoutBase * obj)793     void DeviceBase::UncachePipelineLayout(PipelineLayoutBase* obj) {
794         ASSERT(obj->IsCachedReference());
795         size_t removedCount = mCaches->pipelineLayouts.erase(obj);
796         ASSERT(removedCount == 1);
797     }
798 
UncacheRenderPipeline(RenderPipelineBase * obj)799     void DeviceBase::UncacheRenderPipeline(RenderPipelineBase* obj) {
800         ASSERT(obj->IsCachedReference());
801         size_t removedCount = mCaches->renderPipelines.erase(obj);
802         ASSERT(removedCount == 1);
803     }
804 
GetOrCreateSampler(const SamplerDescriptor * descriptor)805     ResultOrError<Ref<SamplerBase>> DeviceBase::GetOrCreateSampler(
806         const SamplerDescriptor* descriptor) {
807         SamplerBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
808 
809         const size_t blueprintHash = blueprint.ComputeContentHash();
810         blueprint.SetContentHash(blueprintHash);
811 
812         Ref<SamplerBase> result;
813         auto iter = mCaches->samplers.find(&blueprint);
814         if (iter != mCaches->samplers.end()) {
815             result = *iter;
816         } else {
817             DAWN_TRY_ASSIGN(result, CreateSamplerImpl(descriptor));
818             result->SetIsCachedReference();
819             result->SetContentHash(blueprintHash);
820             mCaches->samplers.insert(result.Get());
821         }
822 
823         return std::move(result);
824     }
825 
UncacheSampler(SamplerBase * obj)826     void DeviceBase::UncacheSampler(SamplerBase* obj) {
827         ASSERT(obj->IsCachedReference());
828         size_t removedCount = mCaches->samplers.erase(obj);
829         ASSERT(removedCount == 1);
830     }
831 
GetOrCreateShaderModule(const ShaderModuleDescriptor * descriptor,ShaderModuleParseResult * parseResult,OwnedCompilationMessages * compilationMessages)832     ResultOrError<Ref<ShaderModuleBase>> DeviceBase::GetOrCreateShaderModule(
833         const ShaderModuleDescriptor* descriptor,
834         ShaderModuleParseResult* parseResult,
835         OwnedCompilationMessages* compilationMessages) {
836         ASSERT(parseResult != nullptr);
837 
838         ShaderModuleBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
839 
840         const size_t blueprintHash = blueprint.ComputeContentHash();
841         blueprint.SetContentHash(blueprintHash);
842 
843         Ref<ShaderModuleBase> result;
844         auto iter = mCaches->shaderModules.find(&blueprint);
845         if (iter != mCaches->shaderModules.end()) {
846             result = *iter;
847         } else {
848             if (!parseResult->HasParsedShader()) {
849                 // We skip the parse on creation if validation isn't enabled which let's us quickly
850                 // lookup in the cache without validating and parsing. We need the parsed module
851                 // now, so call validate. Most of |ValidateShaderModuleDescriptor| is parsing, but
852                 // we can consider splitting it if additional validation is added.
853                 ASSERT(!IsValidationEnabled());
854                 DAWN_TRY(ValidateShaderModuleDescriptor(this, descriptor, parseResult,
855                                                         compilationMessages));
856             }
857             DAWN_TRY_ASSIGN(result, CreateShaderModuleImpl(descriptor, parseResult));
858             result->SetIsCachedReference();
859             result->SetContentHash(blueprintHash);
860             mCaches->shaderModules.insert(result.Get());
861         }
862 
863         return std::move(result);
864     }
865 
UncacheShaderModule(ShaderModuleBase * obj)866     void DeviceBase::UncacheShaderModule(ShaderModuleBase* obj) {
867         ASSERT(obj->IsCachedReference());
868         size_t removedCount = mCaches->shaderModules.erase(obj);
869         ASSERT(removedCount == 1);
870     }
871 
GetOrCreateAttachmentState(AttachmentStateBlueprint * blueprint)872     Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
873         AttachmentStateBlueprint* blueprint) {
874         auto iter = mCaches->attachmentStates.find(blueprint);
875         if (iter != mCaches->attachmentStates.end()) {
876             return static_cast<AttachmentState*>(*iter);
877         }
878 
879         Ref<AttachmentState> attachmentState = AcquireRef(new AttachmentState(this, *blueprint));
880         attachmentState->SetIsCachedReference();
881         attachmentState->SetContentHash(attachmentState->ComputeContentHash());
882         mCaches->attachmentStates.insert(attachmentState.Get());
883         return attachmentState;
884     }
885 
GetOrCreateAttachmentState(const RenderBundleEncoderDescriptor * descriptor)886     Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
887         const RenderBundleEncoderDescriptor* descriptor) {
888         AttachmentStateBlueprint blueprint(descriptor);
889         return GetOrCreateAttachmentState(&blueprint);
890     }
891 
GetOrCreateAttachmentState(const RenderPipelineDescriptor * descriptor)892     Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
893         const RenderPipelineDescriptor* descriptor) {
894         AttachmentStateBlueprint blueprint(descriptor);
895         return GetOrCreateAttachmentState(&blueprint);
896     }
897 
GetOrCreateAttachmentState(const RenderPassDescriptor * descriptor)898     Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
899         const RenderPassDescriptor* descriptor) {
900         AttachmentStateBlueprint blueprint(descriptor);
901         return GetOrCreateAttachmentState(&blueprint);
902     }
903 
UncacheAttachmentState(AttachmentState * obj)904     void DeviceBase::UncacheAttachmentState(AttachmentState* obj) {
905         ASSERT(obj->IsCachedReference());
906         size_t removedCount = mCaches->attachmentStates.erase(obj);
907         ASSERT(removedCount == 1);
908     }
909 
910     // Object creation API methods
911 
APICreateBindGroup(const BindGroupDescriptor * descriptor)912     BindGroupBase* DeviceBase::APICreateBindGroup(const BindGroupDescriptor* descriptor) {
913         Ref<BindGroupBase> result;
914         if (ConsumedError(CreateBindGroup(descriptor), &result, "calling %s.CreateBindGroup(%s).",
915                           this, descriptor)) {
916             return BindGroupBase::MakeError(this);
917         }
918         return result.Detach();
919     }
APICreateBindGroupLayout(const BindGroupLayoutDescriptor * descriptor)920     BindGroupLayoutBase* DeviceBase::APICreateBindGroupLayout(
921         const BindGroupLayoutDescriptor* descriptor) {
922         Ref<BindGroupLayoutBase> result;
923         if (ConsumedError(CreateBindGroupLayout(descriptor), &result,
924                           "calling %s.CreateBindGroupLayout(%s).", this, descriptor)) {
925             return BindGroupLayoutBase::MakeError(this);
926         }
927         return result.Detach();
928     }
APICreateBuffer(const BufferDescriptor * descriptor)929     BufferBase* DeviceBase::APICreateBuffer(const BufferDescriptor* descriptor) {
930         Ref<BufferBase> result = nullptr;
931         if (ConsumedError(CreateBuffer(descriptor), &result, "calling %s.CreateBuffer(%s).", this,
932                           descriptor)) {
933             ASSERT(result == nullptr);
934             return BufferBase::MakeError(this, descriptor);
935         }
936         return result.Detach();
937     }
APICreateCommandEncoder(const CommandEncoderDescriptor * descriptor)938     CommandEncoder* DeviceBase::APICreateCommandEncoder(
939         const CommandEncoderDescriptor* descriptor) {
940         const CommandEncoderDescriptor defaultDescriptor = {};
941         if (descriptor == nullptr) {
942             descriptor = &defaultDescriptor;
943         }
944         return new CommandEncoder(this, descriptor);
945     }
APICreateComputePipeline(const ComputePipelineDescriptor * descriptor)946     ComputePipelineBase* DeviceBase::APICreateComputePipeline(
947         const ComputePipelineDescriptor* descriptor) {
948         TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateComputePipeline", "label",
949                      utils::GetLabelForTrace(descriptor->label));
950 
951         Ref<ComputePipelineBase> result;
952         if (ConsumedError(CreateComputePipeline(descriptor), &result,
953                           "calling %s.CreateComputePipeline(%s).", this, descriptor)) {
954             return ComputePipelineBase::MakeError(this);
955         }
956         return result.Detach();
957     }
APICreateComputePipelineAsync(const ComputePipelineDescriptor * descriptor,WGPUCreateComputePipelineAsyncCallback callback,void * userdata)958     void DeviceBase::APICreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
959                                                    WGPUCreateComputePipelineAsyncCallback callback,
960                                                    void* userdata) {
961         TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateComputePipelineAsync", "label",
962                      utils::GetLabelForTrace(descriptor->label));
963 
964         MaybeError maybeResult = CreateComputePipelineAsync(descriptor, callback, userdata);
965 
966         // Call the callback directly when a validation error has been found in the front-end
967         // validations. If there is no error, then CreateComputePipelineAsync will call the
968         // callback.
969         if (maybeResult.IsError()) {
970             std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
971             callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
972                      userdata);
973         }
974     }
APICreatePipelineLayout(const PipelineLayoutDescriptor * descriptor)975     PipelineLayoutBase* DeviceBase::APICreatePipelineLayout(
976         const PipelineLayoutDescriptor* descriptor) {
977         Ref<PipelineLayoutBase> result;
978         if (ConsumedError(CreatePipelineLayout(descriptor), &result,
979                           "calling %s.CreatePipelineLayout(%s).", this, descriptor)) {
980             return PipelineLayoutBase::MakeError(this);
981         }
982         return result.Detach();
983     }
APICreateQuerySet(const QuerySetDescriptor * descriptor)984     QuerySetBase* DeviceBase::APICreateQuerySet(const QuerySetDescriptor* descriptor) {
985         Ref<QuerySetBase> result;
986         if (ConsumedError(CreateQuerySet(descriptor), &result, "calling %s.CreateQuerySet(%s).",
987                           this, descriptor)) {
988             return QuerySetBase::MakeError(this);
989         }
990         return result.Detach();
991     }
APICreateSampler(const SamplerDescriptor * descriptor)992     SamplerBase* DeviceBase::APICreateSampler(const SamplerDescriptor* descriptor) {
993         Ref<SamplerBase> result;
994         if (ConsumedError(CreateSampler(descriptor), &result, "calling %s.CreateSampler(%s).", this,
995                           descriptor)) {
996             return SamplerBase::MakeError(this);
997         }
998         return result.Detach();
999     }
APICreateRenderPipelineAsync(const RenderPipelineDescriptor * descriptor,WGPUCreateRenderPipelineAsyncCallback callback,void * userdata)1000     void DeviceBase::APICreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
1001                                                   WGPUCreateRenderPipelineAsyncCallback callback,
1002                                                   void* userdata) {
1003         TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateRenderPipelineAsync", "label",
1004                      utils::GetLabelForTrace(descriptor->label));
1005         // TODO(dawn:563): Add validation error context.
1006         MaybeError maybeResult = CreateRenderPipelineAsync(descriptor, callback, userdata);
1007 
1008         // Call the callback directly when a validation error has been found in the front-end
1009         // validations. If there is no error, then CreateRenderPipelineAsync will call the
1010         // callback.
1011         if (maybeResult.IsError()) {
1012             std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
1013             callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
1014                      userdata);
1015         }
1016     }
APICreateRenderBundleEncoder(const RenderBundleEncoderDescriptor * descriptor)1017     RenderBundleEncoder* DeviceBase::APICreateRenderBundleEncoder(
1018         const RenderBundleEncoderDescriptor* descriptor) {
1019         Ref<RenderBundleEncoder> result;
1020         if (ConsumedError(CreateRenderBundleEncoder(descriptor), &result,
1021                           "calling %s.CreateRenderBundleEncoder(%s).", this, descriptor)) {
1022             return RenderBundleEncoder::MakeError(this);
1023         }
1024         return result.Detach();
1025     }
APICreateRenderPipeline(const RenderPipelineDescriptor * descriptor)1026     RenderPipelineBase* DeviceBase::APICreateRenderPipeline(
1027         const RenderPipelineDescriptor* descriptor) {
1028         TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateRenderPipeline", "label",
1029                      utils::GetLabelForTrace(descriptor->label));
1030 
1031         Ref<RenderPipelineBase> result;
1032         if (ConsumedError(CreateRenderPipeline(descriptor), &result,
1033                           "calling %s.CreateRenderPipeline(%s).", this, descriptor)) {
1034             return RenderPipelineBase::MakeError(this);
1035         }
1036         return result.Detach();
1037     }
APICreateShaderModule(const ShaderModuleDescriptor * descriptor)1038     ShaderModuleBase* DeviceBase::APICreateShaderModule(const ShaderModuleDescriptor* descriptor) {
1039         TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateShaderModule", "label",
1040                      utils::GetLabelForTrace(descriptor->label));
1041 
1042         Ref<ShaderModuleBase> result;
1043         std::unique_ptr<OwnedCompilationMessages> compilationMessages(
1044             std::make_unique<OwnedCompilationMessages>());
1045         if (ConsumedError(CreateShaderModule(descriptor, compilationMessages.get()), &result,
1046                           "calling %s.CreateShaderModule(%s).", this, descriptor)) {
1047             DAWN_ASSERT(result == nullptr);
1048             result = ShaderModuleBase::MakeError(this);
1049         }
1050         // Move compilation messages into ShaderModuleBase and emit tint errors and warnings
1051         // after all other operations are finished successfully.
1052         result->InjectCompilationMessages(std::move(compilationMessages));
1053 
1054         return result.Detach();
1055     }
APICreateSwapChain(Surface * surface,const SwapChainDescriptor * descriptor)1056     SwapChainBase* DeviceBase::APICreateSwapChain(Surface* surface,
1057                                                   const SwapChainDescriptor* descriptor) {
1058         Ref<SwapChainBase> result;
1059         if (ConsumedError(CreateSwapChain(surface, descriptor), &result,
1060                           "calling %s.CreateSwapChain(%s).", this, descriptor)) {
1061             return SwapChainBase::MakeError(this);
1062         }
1063         return result.Detach();
1064     }
APICreateTexture(const TextureDescriptor * descriptor)1065     TextureBase* DeviceBase::APICreateTexture(const TextureDescriptor* descriptor) {
1066         Ref<TextureBase> result;
1067         if (ConsumedError(CreateTexture(descriptor), &result, "calling %s.CreateTexture(%s).", this,
1068                           descriptor)) {
1069             return TextureBase::MakeError(this);
1070         }
1071         return result.Detach();
1072     }
1073 
1074     // For Dawn Wire
1075 
APICreateErrorBuffer()1076     BufferBase* DeviceBase::APICreateErrorBuffer() {
1077         BufferDescriptor desc = {};
1078         return BufferBase::MakeError(this, &desc);
1079     }
1080 
1081     // Other Device API methods
1082 
1083     // Returns true if future ticking is needed.
APITick()1084     bool DeviceBase::APITick() {
1085         if (ConsumedError(Tick())) {
1086             return false;
1087         }
1088         return !IsDeviceIdle();
1089     }
1090 
Tick()1091     MaybeError DeviceBase::Tick() {
1092         DAWN_TRY(ValidateIsAlive());
1093 
1094         // to avoid overly ticking, we only want to tick when:
1095         // 1. the last submitted serial has moved beyond the completed serial
1096         // 2. or the completed serial has not reached the future serial set by the trackers
1097         if (mLastSubmittedSerial > mCompletedSerial || mCompletedSerial < mFutureSerial) {
1098             DAWN_TRY(CheckPassedSerials());
1099             DAWN_TRY(TickImpl());
1100 
1101             // There is no GPU work in flight, we need to move the serials forward so that
1102             // so that CPU operations waiting on GPU completion can know they don't have to wait.
1103             // AssumeCommandsComplete will assign the max serial we must tick to in order to
1104             // fire the awaiting callbacks.
1105             if (mCompletedSerial == mLastSubmittedSerial) {
1106                 AssumeCommandsComplete();
1107             }
1108 
1109             // TODO(crbug.com/dawn/833): decouple TickImpl from updating the serial so that we can
1110             // tick the dynamic uploader before the backend resource allocators. This would allow
1111             // reclaiming resources one tick earlier.
1112             mDynamicUploader->Deallocate(mCompletedSerial);
1113             mQueue->Tick(mCompletedSerial);
1114         }
1115 
1116         // We have to check callback tasks in every Tick because it is not related to any global
1117         // serials.
1118         FlushCallbackTaskQueue();
1119 
1120         return {};
1121     }
1122 
APIGetQueue()1123     QueueBase* DeviceBase::APIGetQueue() {
1124         // Backends gave the primary queue during initialization.
1125         ASSERT(mQueue != nullptr);
1126 
1127         // Returns a new reference to the queue.
1128         mQueue->Reference();
1129         return mQueue.Get();
1130     }
1131 
APICreateExternalTexture(const ExternalTextureDescriptor * descriptor)1132     ExternalTextureBase* DeviceBase::APICreateExternalTexture(
1133         const ExternalTextureDescriptor* descriptor) {
1134         Ref<ExternalTextureBase> result = nullptr;
1135         if (ConsumedError(CreateExternalTexture(descriptor), &result,
1136                           "calling %s.CreateExternalTexture(%s).", this, descriptor)) {
1137             return ExternalTextureBase::MakeError(this);
1138         }
1139 
1140         return result.Detach();
1141     }
1142 
ApplyFeatures(const DawnDeviceDescriptor * deviceDescriptor)1143     void DeviceBase::ApplyFeatures(const DawnDeviceDescriptor* deviceDescriptor) {
1144         ASSERT(deviceDescriptor);
1145         ASSERT(GetAdapter()->SupportsAllRequestedFeatures(deviceDescriptor->requiredFeatures));
1146 
1147         mEnabledFeatures = GetAdapter()->GetInstance()->FeatureNamesToFeaturesSet(
1148             deviceDescriptor->requiredFeatures);
1149     }
1150 
GetEnabledFeatures() const1151     std::vector<const char*> DeviceBase::GetEnabledFeatures() const {
1152         return mEnabledFeatures.GetEnabledFeatureNames();
1153     }
1154 
IsFeatureEnabled(Feature feature) const1155     bool DeviceBase::IsFeatureEnabled(Feature feature) const {
1156         return mEnabledFeatures.IsEnabled(feature);
1157     }
1158 
IsValidationEnabled() const1159     bool DeviceBase::IsValidationEnabled() const {
1160         return !IsToggleEnabled(Toggle::SkipValidation);
1161     }
1162 
IsRobustnessEnabled() const1163     bool DeviceBase::IsRobustnessEnabled() const {
1164         return !IsToggleEnabled(Toggle::DisableRobustness);
1165     }
1166 
GetLazyClearCountForTesting()1167     size_t DeviceBase::GetLazyClearCountForTesting() {
1168         return mLazyClearCountForTesting;
1169     }
1170 
IncrementLazyClearCountForTesting()1171     void DeviceBase::IncrementLazyClearCountForTesting() {
1172         ++mLazyClearCountForTesting;
1173     }
1174 
GetDeprecationWarningCountForTesting()1175     size_t DeviceBase::GetDeprecationWarningCountForTesting() {
1176         return mDeprecationWarnings->count;
1177     }
1178 
EmitDeprecationWarning(const char * warning)1179     void DeviceBase::EmitDeprecationWarning(const char* warning) {
1180         mDeprecationWarnings->count++;
1181         if (mDeprecationWarnings->emitted.insert(warning).second) {
1182             dawn::WarningLog() << warning;
1183         }
1184     }
1185 
EmitLog(const char * message)1186     void DeviceBase::EmitLog(const char* message) {
1187         this->EmitLog(WGPULoggingType_Info, message);
1188     }
1189 
EmitLog(WGPULoggingType loggingType,const char * message)1190     void DeviceBase::EmitLog(WGPULoggingType loggingType, const char* message) {
1191         if (mLoggingCallback != nullptr) {
1192             // Use the thread-safe CallbackTaskManager routine
1193             std::unique_ptr<LoggingCallbackTask> callbackTask =
1194                 std::make_unique<LoggingCallbackTask>(mLoggingCallback, loggingType, message,
1195                                                       mLoggingUserdata);
1196             mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
1197         }
1198     }
1199 
APIGetLimits(SupportedLimits * limits)1200     bool DeviceBase::APIGetLimits(SupportedLimits* limits) {
1201         ASSERT(limits != nullptr);
1202         if (limits->nextInChain != nullptr) {
1203             return false;
1204         }
1205         limits->limits = mLimits.v1;
1206         return true;
1207     }
1208 
APIInjectError(wgpu::ErrorType type,const char * message)1209     void DeviceBase::APIInjectError(wgpu::ErrorType type, const char* message) {
1210         if (ConsumedError(ValidateErrorType(type))) {
1211             return;
1212         }
1213 
1214         // This method should only be used to make error scope reject. For DeviceLost there is the
1215         // LoseForTesting function that can be used instead.
1216         if (type != wgpu::ErrorType::Validation && type != wgpu::ErrorType::OutOfMemory) {
1217             HandleError(InternalErrorType::Validation,
1218                         "Invalid injected error, must be Validation or OutOfMemory");
1219             return;
1220         }
1221 
1222         HandleError(FromWGPUErrorType(type), message);
1223     }
1224 
GetQueue() const1225     QueueBase* DeviceBase::GetQueue() const {
1226         return mQueue.Get();
1227     }
1228 
1229     // Implementation details of object creation
1230 
CreateBindGroup(const BindGroupDescriptor * descriptor)1231     ResultOrError<Ref<BindGroupBase>> DeviceBase::CreateBindGroup(
1232         const BindGroupDescriptor* descriptor) {
1233         DAWN_TRY(ValidateIsAlive());
1234         if (IsValidationEnabled()) {
1235             DAWN_TRY_CONTEXT(ValidateBindGroupDescriptor(this, descriptor),
1236                              "validating %s against %s", descriptor, descriptor->layout);
1237         }
1238         return CreateBindGroupImpl(descriptor);
1239     }
1240 
CreateBindGroupLayout(const BindGroupLayoutDescriptor * descriptor,bool allowInternalBinding)1241     ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateBindGroupLayout(
1242         const BindGroupLayoutDescriptor* descriptor,
1243         bool allowInternalBinding) {
1244         DAWN_TRY(ValidateIsAlive());
1245         if (IsValidationEnabled()) {
1246             DAWN_TRY_CONTEXT(
1247                 ValidateBindGroupLayoutDescriptor(this, descriptor, allowInternalBinding),
1248                 "validating %s", descriptor);
1249         }
1250         return GetOrCreateBindGroupLayout(descriptor);
1251     }
1252 
CreateBuffer(const BufferDescriptor * descriptor)1253     ResultOrError<Ref<BufferBase>> DeviceBase::CreateBuffer(const BufferDescriptor* descriptor) {
1254         DAWN_TRY(ValidateIsAlive());
1255         if (IsValidationEnabled()) {
1256             DAWN_TRY_CONTEXT(ValidateBufferDescriptor(this, descriptor), "validating %s",
1257                              descriptor);
1258         }
1259 
1260         Ref<BufferBase> buffer;
1261         DAWN_TRY_ASSIGN(buffer, CreateBufferImpl(descriptor));
1262 
1263         if (descriptor->mappedAtCreation) {
1264             DAWN_TRY(buffer->MapAtCreation());
1265         }
1266 
1267         return std::move(buffer);
1268     }
1269 
CreateComputePipeline(const ComputePipelineDescriptor * descriptor)1270     ResultOrError<Ref<ComputePipelineBase>> DeviceBase::CreateComputePipeline(
1271         const ComputePipelineDescriptor* descriptor) {
1272         DAWN_TRY(ValidateIsAlive());
1273         if (IsValidationEnabled()) {
1274             DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
1275         }
1276 
1277         // Ref will keep the pipeline layout alive until the end of the function where
1278         // the pipeline will take another reference.
1279         Ref<PipelineLayoutBase> layoutRef;
1280         ComputePipelineDescriptor appliedDescriptor;
1281         DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
1282                                        this, *descriptor, &appliedDescriptor));
1283 
1284         Ref<ComputePipelineBase> uninitializedComputePipeline =
1285             CreateUninitializedComputePipelineImpl(&appliedDescriptor);
1286         Ref<ComputePipelineBase> cachedComputePipeline =
1287             GetCachedComputePipeline(uninitializedComputePipeline.Get());
1288         if (cachedComputePipeline.Get() != nullptr) {
1289             return cachedComputePipeline;
1290         }
1291 
1292         DAWN_TRY(uninitializedComputePipeline->Initialize());
1293         return AddOrGetCachedComputePipeline(std::move(uninitializedComputePipeline));
1294     }
1295 
CreateComputePipelineAsync(const ComputePipelineDescriptor * descriptor,WGPUCreateComputePipelineAsyncCallback callback,void * userdata)1296     MaybeError DeviceBase::CreateComputePipelineAsync(
1297         const ComputePipelineDescriptor* descriptor,
1298         WGPUCreateComputePipelineAsyncCallback callback,
1299         void* userdata) {
1300         DAWN_TRY(ValidateIsAlive());
1301         if (IsValidationEnabled()) {
1302             DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
1303         }
1304 
1305         Ref<PipelineLayoutBase> layoutRef;
1306         ComputePipelineDescriptor appliedDescriptor;
1307         DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
1308                                        this, *descriptor, &appliedDescriptor));
1309 
1310         Ref<ComputePipelineBase> uninitializedComputePipeline =
1311             CreateUninitializedComputePipelineImpl(&appliedDescriptor);
1312 
1313         // Call the callback directly when we can get a cached compute pipeline object.
1314         Ref<ComputePipelineBase> cachedComputePipeline =
1315             GetCachedComputePipeline(uninitializedComputePipeline.Get());
1316         if (cachedComputePipeline.Get() != nullptr) {
1317             callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedComputePipeline.Detach()),
1318                      "", userdata);
1319         } else {
1320             // Otherwise we will create the pipeline object in InitializeComputePipelineAsyncImpl(),
1321             // where the pipeline object may be initialized asynchronously and the result will be
1322             // saved to mCreatePipelineAsyncTracker.
1323             InitializeComputePipelineAsyncImpl(std::move(uninitializedComputePipeline), callback,
1324                                                userdata);
1325         }
1326 
1327         return {};
1328     }
1329 
1330     // This function is overwritten with the async version on the backends that supports
1331     //  initializing compute pipelines asynchronously.
InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,WGPUCreateComputePipelineAsyncCallback callback,void * userdata)1332     void DeviceBase::InitializeComputePipelineAsyncImpl(
1333         Ref<ComputePipelineBase> computePipeline,
1334         WGPUCreateComputePipelineAsyncCallback callback,
1335         void* userdata) {
1336         Ref<ComputePipelineBase> result;
1337         std::string errorMessage;
1338 
1339         MaybeError maybeError = computePipeline->Initialize();
1340         if (maybeError.IsError()) {
1341             std::unique_ptr<ErrorData> error = maybeError.AcquireError();
1342             errorMessage = error->GetMessage();
1343         } else {
1344             result = AddOrGetCachedComputePipeline(std::move(computePipeline));
1345         }
1346 
1347         std::unique_ptr<CreateComputePipelineAsyncCallbackTask> callbackTask =
1348             std::make_unique<CreateComputePipelineAsyncCallbackTask>(
1349                 std::move(result), errorMessage, callback, userdata);
1350         mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
1351     }
1352 
1353     // This function is overwritten with the async version on the backends
1354     // that supports initializing render pipeline asynchronously
InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,WGPUCreateRenderPipelineAsyncCallback callback,void * userdata)1355     void DeviceBase::InitializeRenderPipelineAsyncImpl(
1356         Ref<RenderPipelineBase> renderPipeline,
1357         WGPUCreateRenderPipelineAsyncCallback callback,
1358         void* userdata) {
1359         Ref<RenderPipelineBase> result;
1360         std::string errorMessage;
1361 
1362         MaybeError maybeError = renderPipeline->Initialize();
1363         if (maybeError.IsError()) {
1364             std::unique_ptr<ErrorData> error = maybeError.AcquireError();
1365             errorMessage = error->GetMessage();
1366         } else {
1367             result = AddOrGetCachedRenderPipeline(std::move(renderPipeline));
1368         }
1369 
1370         std::unique_ptr<CreateRenderPipelineAsyncCallbackTask> callbackTask =
1371             std::make_unique<CreateRenderPipelineAsyncCallbackTask>(std::move(result), errorMessage,
1372                                                                     callback, userdata);
1373         mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
1374     }
1375 
CreatePipelineLayout(const PipelineLayoutDescriptor * descriptor)1376     ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::CreatePipelineLayout(
1377         const PipelineLayoutDescriptor* descriptor) {
1378         DAWN_TRY(ValidateIsAlive());
1379         if (IsValidationEnabled()) {
1380             DAWN_TRY(ValidatePipelineLayoutDescriptor(this, descriptor));
1381         }
1382         return GetOrCreatePipelineLayout(descriptor);
1383     }
1384 
CreateExternalTexture(const ExternalTextureDescriptor * descriptor)1385     ResultOrError<Ref<ExternalTextureBase>> DeviceBase::CreateExternalTexture(
1386         const ExternalTextureDescriptor* descriptor) {
1387         if (IsValidationEnabled()) {
1388             DAWN_TRY_CONTEXT(ValidateExternalTextureDescriptor(this, descriptor), "validating %s",
1389                              descriptor);
1390         }
1391 
1392         return ExternalTextureBase::Create(this, descriptor);
1393     }
1394 
CreateQuerySet(const QuerySetDescriptor * descriptor)1395     ResultOrError<Ref<QuerySetBase>> DeviceBase::CreateQuerySet(
1396         const QuerySetDescriptor* descriptor) {
1397         DAWN_TRY(ValidateIsAlive());
1398         if (IsValidationEnabled()) {
1399             DAWN_TRY_CONTEXT(ValidateQuerySetDescriptor(this, descriptor), "validating %s",
1400                              descriptor);
1401         }
1402         return CreateQuerySetImpl(descriptor);
1403     }
1404 
CreateRenderBundleEncoder(const RenderBundleEncoderDescriptor * descriptor)1405     ResultOrError<Ref<RenderBundleEncoder>> DeviceBase::CreateRenderBundleEncoder(
1406         const RenderBundleEncoderDescriptor* descriptor) {
1407         DAWN_TRY(ValidateIsAlive());
1408         if (IsValidationEnabled()) {
1409             DAWN_TRY(ValidateRenderBundleEncoderDescriptor(this, descriptor));
1410         }
1411         return RenderBundleEncoder::Create(this, descriptor);
1412     }
1413 
CreateRenderPipeline(const RenderPipelineDescriptor * descriptor)1414     ResultOrError<Ref<RenderPipelineBase>> DeviceBase::CreateRenderPipeline(
1415         const RenderPipelineDescriptor* descriptor) {
1416         DAWN_TRY(ValidateIsAlive());
1417         if (IsValidationEnabled()) {
1418             DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
1419         }
1420 
1421         // Ref will keep the pipeline layout alive until the end of the function where
1422         // the pipeline will take another reference.
1423         Ref<PipelineLayoutBase> layoutRef;
1424         RenderPipelineDescriptor appliedDescriptor;
1425         DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
1426                                        this, *descriptor, &appliedDescriptor));
1427 
1428         Ref<RenderPipelineBase> uninitializedRenderPipeline =
1429             CreateUninitializedRenderPipelineImpl(&appliedDescriptor);
1430 
1431         Ref<RenderPipelineBase> cachedRenderPipeline =
1432             GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
1433         if (cachedRenderPipeline != nullptr) {
1434             return cachedRenderPipeline;
1435         }
1436 
1437         DAWN_TRY(uninitializedRenderPipeline->Initialize());
1438         return AddOrGetCachedRenderPipeline(std::move(uninitializedRenderPipeline));
1439     }
1440 
CreateRenderPipelineAsync(const RenderPipelineDescriptor * descriptor,WGPUCreateRenderPipelineAsyncCallback callback,void * userdata)1441     MaybeError DeviceBase::CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
1442                                                      WGPUCreateRenderPipelineAsyncCallback callback,
1443                                                      void* userdata) {
1444         DAWN_TRY(ValidateIsAlive());
1445         if (IsValidationEnabled()) {
1446             DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
1447         }
1448 
1449         // Ref will keep the pipeline layout alive until the end of the function where
1450         // the pipeline will take another reference.
1451         Ref<PipelineLayoutBase> layoutRef;
1452         RenderPipelineDescriptor appliedDescriptor;
1453         DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
1454                                        this, *descriptor, &appliedDescriptor));
1455 
1456         Ref<RenderPipelineBase> uninitializedRenderPipeline =
1457             CreateUninitializedRenderPipelineImpl(&appliedDescriptor);
1458 
1459         // Call the callback directly when we can get a cached render pipeline object.
1460         Ref<RenderPipelineBase> cachedRenderPipeline =
1461             GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
1462         if (cachedRenderPipeline != nullptr) {
1463             callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedRenderPipeline.Detach()),
1464                      "", userdata);
1465         } else {
1466             // Otherwise we will create the pipeline object in InitializeRenderPipelineAsyncImpl(),
1467             // where the pipeline object may be initialized asynchronously and the result will be
1468             // saved to mCreatePipelineAsyncTracker.
1469             InitializeRenderPipelineAsyncImpl(std::move(uninitializedRenderPipeline), callback,
1470                                               userdata);
1471         }
1472 
1473         return {};
1474     }
1475 
CreateSampler(const SamplerDescriptor * descriptor)1476     ResultOrError<Ref<SamplerBase>> DeviceBase::CreateSampler(const SamplerDescriptor* descriptor) {
1477         const SamplerDescriptor defaultDescriptor = {};
1478         DAWN_TRY(ValidateIsAlive());
1479         descriptor = descriptor != nullptr ? descriptor : &defaultDescriptor;
1480         if (IsValidationEnabled()) {
1481             DAWN_TRY_CONTEXT(ValidateSamplerDescriptor(this, descriptor), "validating %s",
1482                              descriptor);
1483         }
1484         return GetOrCreateSampler(descriptor);
1485     }
1486 
CreateShaderModule(const ShaderModuleDescriptor * descriptor,OwnedCompilationMessages * compilationMessages)1487     ResultOrError<Ref<ShaderModuleBase>> DeviceBase::CreateShaderModule(
1488         const ShaderModuleDescriptor* descriptor,
1489         OwnedCompilationMessages* compilationMessages) {
1490         DAWN_TRY(ValidateIsAlive());
1491 
1492         // CreateShaderModule can be called from inside dawn_native. If that's the case handle the
1493         // error directly in Dawn and no compilationMessages held in the shader module. It is ok as
1494         // long as dawn_native don't use the compilationMessages of these internal shader modules.
1495         ShaderModuleParseResult parseResult;
1496 
1497         if (IsValidationEnabled()) {
1498             DAWN_TRY_CONTEXT(
1499                 ValidateShaderModuleDescriptor(this, descriptor, &parseResult, compilationMessages),
1500                 "validating %s", descriptor);
1501         }
1502 
1503         return GetOrCreateShaderModule(descriptor, &parseResult, compilationMessages);
1504     }
1505 
CreateSwapChain(Surface * surface,const SwapChainDescriptor * descriptor)1506     ResultOrError<Ref<SwapChainBase>> DeviceBase::CreateSwapChain(
1507         Surface* surface,
1508         const SwapChainDescriptor* descriptor) {
1509         DAWN_TRY(ValidateIsAlive());
1510         if (IsValidationEnabled()) {
1511             DAWN_TRY_CONTEXT(ValidateSwapChainDescriptor(this, surface, descriptor),
1512                              "validating %s", descriptor);
1513         }
1514 
1515         // TODO(dawn:269): Remove this code path once implementation-based swapchains are removed.
1516         if (surface == nullptr) {
1517             return CreateSwapChainImpl(descriptor);
1518         } else {
1519             ASSERT(descriptor->implementation == 0);
1520 
1521             NewSwapChainBase* previousSwapChain = surface->GetAttachedSwapChain();
1522             ResultOrError<Ref<NewSwapChainBase>> maybeNewSwapChain =
1523                 CreateSwapChainImpl(surface, previousSwapChain, descriptor);
1524 
1525             if (previousSwapChain != nullptr) {
1526                 previousSwapChain->DetachFromSurface();
1527             }
1528 
1529             Ref<NewSwapChainBase> newSwapChain;
1530             DAWN_TRY_ASSIGN(newSwapChain, std::move(maybeNewSwapChain));
1531 
1532             newSwapChain->SetIsAttached();
1533             surface->SetAttachedSwapChain(newSwapChain.Get());
1534             return newSwapChain;
1535         }
1536     }
1537 
CreateTexture(const TextureDescriptor * descriptor)1538     ResultOrError<Ref<TextureBase>> DeviceBase::CreateTexture(const TextureDescriptor* descriptor) {
1539         DAWN_TRY(ValidateIsAlive());
1540         if (IsValidationEnabled()) {
1541             DAWN_TRY_CONTEXT(ValidateTextureDescriptor(this, descriptor), "validating %s.",
1542                              descriptor);
1543         }
1544         return CreateTextureImpl(descriptor);
1545     }
1546 
CreateTextureView(TextureBase * texture,const TextureViewDescriptor * descriptor)1547     ResultOrError<Ref<TextureViewBase>> DeviceBase::CreateTextureView(
1548         TextureBase* texture,
1549         const TextureViewDescriptor* descriptor) {
1550         DAWN_TRY(ValidateIsAlive());
1551         DAWN_TRY(ValidateObject(texture));
1552         TextureViewDescriptor desc = GetTextureViewDescriptorWithDefaults(texture, descriptor);
1553         if (IsValidationEnabled()) {
1554             DAWN_TRY_CONTEXT(ValidateTextureViewDescriptor(this, texture, &desc),
1555                              "validating %s against %s.", &desc, texture);
1556         }
1557         return CreateTextureViewImpl(texture, &desc);
1558     }
1559 
1560     // Other implementation details
1561 
GetDynamicUploader() const1562     DynamicUploader* DeviceBase::GetDynamicUploader() const {
1563         return mDynamicUploader.get();
1564     }
1565 
1566     // The Toggle device facility
1567 
GetTogglesUsed() const1568     std::vector<const char*> DeviceBase::GetTogglesUsed() const {
1569         return mEnabledToggles.GetContainedToggleNames();
1570     }
1571 
IsToggleEnabled(Toggle toggle) const1572     bool DeviceBase::IsToggleEnabled(Toggle toggle) const {
1573         return mEnabledToggles.Has(toggle);
1574     }
1575 
SetToggle(Toggle toggle,bool isEnabled)1576     void DeviceBase::SetToggle(Toggle toggle, bool isEnabled) {
1577         if (!mOverridenToggles.Has(toggle)) {
1578             mEnabledToggles.Set(toggle, isEnabled);
1579         }
1580     }
1581 
ForceSetToggle(Toggle toggle,bool isEnabled)1582     void DeviceBase::ForceSetToggle(Toggle toggle, bool isEnabled) {
1583         if (!mOverridenToggles.Has(toggle) && mEnabledToggles.Has(toggle) != isEnabled) {
1584             dawn::WarningLog() << "Forcing toggle \"" << ToggleEnumToName(toggle) << "\" to "
1585                                << isEnabled << " when it was overriden to be " << !isEnabled;
1586         }
1587         mEnabledToggles.Set(toggle, isEnabled);
1588     }
1589 
SetDefaultToggles()1590     void DeviceBase::SetDefaultToggles() {
1591         SetToggle(Toggle::LazyClearResourceOnFirstUse, true);
1592         SetToggle(Toggle::DisallowUnsafeAPIs, true);
1593     }
1594 
ApplyToggleOverrides(const DawnDeviceDescriptor * deviceDescriptor)1595     void DeviceBase::ApplyToggleOverrides(const DawnDeviceDescriptor* deviceDescriptor) {
1596         ASSERT(deviceDescriptor);
1597 
1598         for (const char* toggleName : deviceDescriptor->forceEnabledToggles) {
1599             Toggle toggle = GetAdapter()->GetInstance()->ToggleNameToEnum(toggleName);
1600             if (toggle != Toggle::InvalidEnum) {
1601                 mEnabledToggles.Set(toggle, true);
1602                 mOverridenToggles.Set(toggle, true);
1603             }
1604         }
1605         for (const char* toggleName : deviceDescriptor->forceDisabledToggles) {
1606             Toggle toggle = GetAdapter()->GetInstance()->ToggleNameToEnum(toggleName);
1607             if (toggle != Toggle::InvalidEnum) {
1608                 mEnabledToggles.Set(toggle, false);
1609                 mOverridenToggles.Set(toggle, true);
1610             }
1611         }
1612     }
1613 
FlushCallbackTaskQueue()1614     void DeviceBase::FlushCallbackTaskQueue() {
1615         if (!mCallbackTaskManager->IsEmpty()) {
1616             // If a user calls Queue::Submit inside the callback, then the device will be ticked,
1617             // which in turns ticks the tracker, causing reentrance and dead lock here. To prevent
1618             // such reentrant call, we remove all the callback tasks from mCallbackTaskManager,
1619             // update mCallbackTaskManager, then call all the callbacks.
1620             auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
1621             for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
1622                 callbackTask->Finish();
1623             }
1624         }
1625     }
1626 
GetLimits() const1627     const CombinedLimits& DeviceBase::GetLimits() const {
1628         return mLimits;
1629     }
1630 
GetAsyncTaskManager() const1631     AsyncTaskManager* DeviceBase::GetAsyncTaskManager() const {
1632         return mAsyncTaskManager.get();
1633     }
1634 
GetCallbackTaskManager() const1635     CallbackTaskManager* DeviceBase::GetCallbackTaskManager() const {
1636         return mCallbackTaskManager.get();
1637     }
1638 
GetWorkerTaskPool() const1639     dawn_platform::WorkerTaskPool* DeviceBase::GetWorkerTaskPool() const {
1640         return mWorkerTaskPool.get();
1641     }
1642 
AddComputePipelineAsyncCallbackTask(Ref<ComputePipelineBase> pipeline,std::string errorMessage,WGPUCreateComputePipelineAsyncCallback callback,void * userdata)1643     void DeviceBase::AddComputePipelineAsyncCallbackTask(
1644         Ref<ComputePipelineBase> pipeline,
1645         std::string errorMessage,
1646         WGPUCreateComputePipelineAsyncCallback callback,
1647         void* userdata) {
1648         // CreateComputePipelineAsyncWaitableCallbackTask is declared as an internal class as it
1649         // needs to call the private member function DeviceBase::AddOrGetCachedComputePipeline().
1650         struct CreateComputePipelineAsyncWaitableCallbackTask final
1651             : CreateComputePipelineAsyncCallbackTask {
1652             using CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask;
1653             void Finish() final {
1654                 // TODO(dawn:529): call AddOrGetCachedComputePipeline() asynchronously in
1655                 // CreateComputePipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
1656                 // thread-safe.
1657                 if (mPipeline.Get() != nullptr) {
1658                     mPipeline = mPipeline->GetDevice()->AddOrGetCachedComputePipeline(mPipeline);
1659                 }
1660 
1661                 CreateComputePipelineAsyncCallbackTask::Finish();
1662             }
1663         };
1664 
1665         mCallbackTaskManager->AddCallbackTask(
1666             std::make_unique<CreateComputePipelineAsyncWaitableCallbackTask>(
1667                 std::move(pipeline), errorMessage, callback, userdata));
1668     }
1669 
AddRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,std::string errorMessage,WGPUCreateRenderPipelineAsyncCallback callback,void * userdata)1670     void DeviceBase::AddRenderPipelineAsyncCallbackTask(
1671         Ref<RenderPipelineBase> pipeline,
1672         std::string errorMessage,
1673         WGPUCreateRenderPipelineAsyncCallback callback,
1674         void* userdata) {
1675         // CreateRenderPipelineAsyncWaitableCallbackTask is declared as an internal class as it
1676         // needs to call the private member function DeviceBase::AddOrGetCachedRenderPipeline().
1677         struct CreateRenderPipelineAsyncWaitableCallbackTask final
1678             : CreateRenderPipelineAsyncCallbackTask {
1679             using CreateRenderPipelineAsyncCallbackTask::CreateRenderPipelineAsyncCallbackTask;
1680 
1681             void Finish() final {
1682                 // TODO(dawn:529): call AddOrGetCachedRenderPipeline() asynchronously in
1683                 // CreateRenderPipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
1684                 // thread-safe.
1685                 if (mPipeline.Get() != nullptr) {
1686                     mPipeline = mPipeline->GetDevice()->AddOrGetCachedRenderPipeline(mPipeline);
1687                 }
1688 
1689                 CreateRenderPipelineAsyncCallbackTask::Finish();
1690             }
1691         };
1692 
1693         mCallbackTaskManager->AddCallbackTask(
1694             std::make_unique<CreateRenderPipelineAsyncWaitableCallbackTask>(
1695                 std::move(pipeline), errorMessage, callback, userdata));
1696     }
1697 
GetNextPipelineCompatibilityToken()1698     PipelineCompatibilityToken DeviceBase::GetNextPipelineCompatibilityToken() {
1699         return PipelineCompatibilityToken(mNextPipelineCompatibilityToken++);
1700     }
1701 
GetLabel() const1702     const std::string& DeviceBase::GetLabel() const {
1703         return mLabel;
1704     }
1705 
APISetLabel(const char * label)1706     void DeviceBase::APISetLabel(const char* label) {
1707         mLabel = label;
1708         SetLabelImpl();
1709     }
1710 
SetLabelImpl()1711     void DeviceBase::SetLabelImpl() {
1712     }
1713 
ShouldDuplicateNumWorkgroupsForDispatchIndirect(ComputePipelineBase * computePipeline) const1714     bool DeviceBase::ShouldDuplicateNumWorkgroupsForDispatchIndirect(
1715         ComputePipelineBase* computePipeline) const {
1716         return false;
1717     }
1718 
1719 }  // namespace dawn_native
1720