1 /*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/gpu/graphite/dawn/DawnUtils.h"
9 #include "src/gpu/graphite/dawn/DawnGraphiteUtilsPriv.h"
10
11 #include "include/gpu/ShaderErrorHandler.h"
12 #include "include/gpu/graphite/Context.h"
13 #include "include/gpu/graphite/dawn/DawnBackendContext.h"
14 #include "src/gpu/graphite/ContextPriv.h"
15 #include "src/gpu/graphite/dawn/DawnQueueManager.h"
16 #include "src/gpu/graphite/dawn/DawnSharedContext.h"
17
18 namespace skgpu::graphite {
19
20 namespace ContextFactory {
MakeDawn(const DawnBackendContext & backendContext,const ContextOptions & options)21 std::unique_ptr<Context> MakeDawn(const DawnBackendContext& backendContext,
22 const ContextOptions& options) {
23 sk_sp<SharedContext> sharedContext = DawnSharedContext::Make(backendContext, options);
24 if (!sharedContext) {
25 return nullptr;
26 }
27
28 auto queueManager =
29 std::make_unique<DawnQueueManager>(backendContext.fQueue, sharedContext.get());
30 if (!queueManager) {
31 return nullptr;
32 }
33
34 auto context = ContextCtorAccessor::MakeContext(std::move(sharedContext),
35 std::move(queueManager),
36 options);
37 SkASSERT(context);
38 return context;
39 }
40 } // namespace ContextFactory
41
DawnFormatIsDepthOrStencil(wgpu::TextureFormat format)42 bool DawnFormatIsDepthOrStencil(wgpu::TextureFormat format) {
43 switch (format) {
44 case wgpu::TextureFormat::Stencil8: [[fallthrough]];
45 case wgpu::TextureFormat::Depth16Unorm:
46 case wgpu::TextureFormat::Depth32Float:
47 case wgpu::TextureFormat::Depth24PlusStencil8:
48 case wgpu::TextureFormat::Depth32FloatStencil8:
49 return true;
50 default:
51 return false;
52 }
53 }
54
DawnFormatIsDepth(wgpu::TextureFormat format)55 bool DawnFormatIsDepth(wgpu::TextureFormat format) {
56 switch (format) {
57 case wgpu::TextureFormat::Depth16Unorm: [[fallthrough]];
58 case wgpu::TextureFormat::Depth32Float:
59 case wgpu::TextureFormat::Depth24PlusStencil8:
60 case wgpu::TextureFormat::Depth32FloatStencil8:
61 return true;
62 default:
63 return false;
64 }
65 }
66
DawnFormatIsStencil(wgpu::TextureFormat format)67 bool DawnFormatIsStencil(wgpu::TextureFormat format) {
68 switch (format) {
69 case wgpu::TextureFormat::Stencil8: [[fallthrough]];
70 case wgpu::TextureFormat::Depth24PlusStencil8:
71 case wgpu::TextureFormat::Depth32FloatStencil8:
72 return true;
73 default:
74 return false;
75 }
76 }
77
DawnDepthStencilFlagsToFormat(SkEnumBitMask<DepthStencilFlags> mask)78 wgpu::TextureFormat DawnDepthStencilFlagsToFormat(SkEnumBitMask<DepthStencilFlags> mask) {
79 // TODO: Decide if we want to change this to always return a combined depth and stencil format
80 // to allow more sharing of depth stencil allocations.
81 if (mask == DepthStencilFlags::kDepth) {
82 // If needed for workarounds or performance, Depth32Float is also available but requires 2x
83 // the amount of memory.
84 return wgpu::TextureFormat::Depth16Unorm;
85 } else if (mask == DepthStencilFlags::kStencil) {
86 return wgpu::TextureFormat::Stencil8;
87 } else if (mask == DepthStencilFlags::kDepthStencil) {
88 return wgpu::TextureFormat::Depth24PlusStencil8;
89 }
90 SkASSERT(false);
91 return wgpu::TextureFormat::Undefined;
92 }
93
check_shader_module(const DawnSharedContext * sharedContext,wgpu::ShaderModule * module,const char * shaderText,ShaderErrorHandler * errorHandler)94 static bool check_shader_module([[maybe_unused]] const DawnSharedContext* sharedContext,
95 wgpu::ShaderModule* module,
96 const char* shaderText,
97 ShaderErrorHandler* errorHandler) {
98 // Prior to emsdk 3.1.51 wgpu::ShaderModule::GetCompilationInfo is unimplemented.
99 #if defined(__EMSCRIPTEN__) && \
100 ((__EMSCRIPTEN_major__ < 3 || \
101 (__EMSCRIPTEN_major__ == 3 && __EMSCRIPTEN_minor__ < 1) || \
102 (__EMSCRIPTEN_major__ == 3 && __EMSCRIPTEN_minor__ == 1 && __EMSCRIPTEN_tiny__ < 51)))
103 return true;
104 #else
105 struct Handler {
106 static void Fn(WGPUCompilationInfoRequestStatus status,
107 const WGPUCompilationInfo* info,
108 void* userdata) {
109 Handler* self = reinterpret_cast<Handler*>(userdata);
110 SkASSERT(status == WGPUCompilationInfoRequestStatus_Success);
111
112 // Walk the message list and check for hard errors.
113 self->fSuccess = true;
114 for (size_t index = 0; index < info->messageCount; ++index) {
115 const WGPUCompilationMessage& entry = info->messages[index];
116 if (entry.type == WGPUCompilationMessageType_Error) {
117 self->fSuccess = false;
118 break;
119 }
120 }
121
122 // If we found a hard error, report the compilation messages to the error handler.
123 if (!self->fSuccess) {
124 std::string errors;
125 for (size_t index = 0; index < info->messageCount; ++index) {
126 const WGPUCompilationMessage& entry = info->messages[index];
127 errors += "line " +
128 std::to_string(entry.lineNum) + ':' +
129 std::to_string(entry.linePos) + ' ' +
130 entry.message + '\n';
131 }
132 self->fErrorHandler->compileError(
133 self->fShaderText, errors.c_str(), /*shaderWasCached=*/false);
134 }
135 }
136
137 const char* fShaderText;
138 ShaderErrorHandler* fErrorHandler;
139 bool fSuccess = false;
140 };
141
142 Handler handler;
143 handler.fShaderText = shaderText;
144 handler.fErrorHandler = errorHandler;
145 #if defined(__EMSCRIPTEN__)
146 // Deprecated function.
147 module->GetCompilationInfo(&Handler::Fn, &handler);
148 #else
149 // New API.
150 wgpu::FutureWaitInfo waitInfo{};
151 waitInfo.future = module->GetCompilationInfo(
152 wgpu::CallbackMode::WaitAnyOnly,
153 [handlerPtr = &handler](wgpu::CompilationInfoRequestStatus status,
154 const wgpu::CompilationInfo* info) {
155 Handler::Fn(static_cast<WGPUCompilationInfoRequestStatus>(status),
156 reinterpret_cast<const WGPUCompilationInfo*>(info),
157 handlerPtr);
158 });
159
160 const auto& instance = static_cast<const DawnSharedContext*>(sharedContext)
161 ->device()
162 .GetAdapter()
163 .GetInstance();
164 [[maybe_unused]] auto status =
165 instance.WaitAny(1, &waitInfo, /*timeoutNS=*/std::numeric_limits<uint64_t>::max());
166 SkASSERT(status == wgpu::WaitStatus::Success);
167 #endif // defined(__EMSCRIPTEN__)
168
169 return handler.fSuccess;
170 #endif
171 }
172
DawnCompileWGSLShaderModule(const DawnSharedContext * sharedContext,const char * label,const std::string & wgsl,wgpu::ShaderModule * module,ShaderErrorHandler * errorHandler)173 bool DawnCompileWGSLShaderModule(const DawnSharedContext* sharedContext,
174 const char* label,
175 const std::string& wgsl,
176 wgpu::ShaderModule* module,
177 ShaderErrorHandler* errorHandler) {
178 wgpu::ShaderModuleWGSLDescriptor wgslDesc;
179 wgslDesc.code = wgsl.c_str();
180
181 wgpu::ShaderModuleDescriptor desc;
182 desc.nextInChain = &wgslDesc;
183 if (sharedContext->caps()->setBackendLabels()) {
184 desc.label = label;
185 }
186
187 *module = sharedContext->device().CreateShaderModule(&desc);
188
189 return check_shader_module(sharedContext, module, wgsl.c_str(), errorHandler);
190 }
191
192 } // namespace skgpu::graphite
193