1 /*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/dawn/DawnGraphiteUtils.h"
9
10 #include "include/gpu/ShaderErrorHandler.h"
11 #include "include/gpu/graphite/Context.h"
12 #include "include/gpu/graphite/dawn/DawnBackendContext.h"
13 #include "src/gpu/graphite/ContextPriv.h"
14 #include "src/gpu/graphite/TextureFormat.h"
15 #include "src/gpu/graphite/dawn/DawnQueueManager.h"
16 #include "src/gpu/graphite/dawn/DawnSharedContext.h"
17
18 namespace skgpu::graphite {
19
20 namespace ContextFactory {
MakeDawn(const DawnBackendContext & backendContext,const ContextOptions & options)21 std::unique_ptr<Context> MakeDawn(const DawnBackendContext& backendContext,
22 const ContextOptions& options) {
23 sk_sp<SharedContext> sharedContext = DawnSharedContext::Make(backendContext, options);
24 if (!sharedContext) {
25 return nullptr;
26 }
27
28 auto queueManager =
29 std::make_unique<DawnQueueManager>(backendContext.fQueue, sharedContext.get());
30 if (!queueManager) {
31 return nullptr;
32 }
33
34 auto context = ContextCtorAccessor::MakeContext(std::move(sharedContext),
35 std::move(queueManager),
36 options);
37 SkASSERT(context);
38 return context;
39 }
40 } // namespace ContextFactory
41
DawnFormatToCompressionType(wgpu::TextureFormat format)42 SkTextureCompressionType DawnFormatToCompressionType(wgpu::TextureFormat format) {
43 switch (format) {
44 case wgpu::TextureFormat::ETC2RGB8Unorm: return SkTextureCompressionType::kETC2_RGB8_UNORM;
45 case wgpu::TextureFormat::BC1RGBAUnorm: return SkTextureCompressionType::kBC1_RGBA8_UNORM;
46 default: return SkTextureCompressionType::kNone;
47 }
48 }
49
DawnFormatIsDepthOrStencil(wgpu::TextureFormat format)50 bool DawnFormatIsDepthOrStencil(wgpu::TextureFormat format) {
51 switch (format) {
52 case wgpu::TextureFormat::Stencil8: [[fallthrough]];
53 case wgpu::TextureFormat::Depth16Unorm:
54 case wgpu::TextureFormat::Depth32Float:
55 case wgpu::TextureFormat::Depth24PlusStencil8:
56 case wgpu::TextureFormat::Depth32FloatStencil8:
57 return true;
58 default:
59 return false;
60 }
61
62 SkUNREACHABLE;
63 }
64
DawnFormatIsDepth(wgpu::TextureFormat format)65 bool DawnFormatIsDepth(wgpu::TextureFormat format) {
66 switch (format) {
67 case wgpu::TextureFormat::Depth16Unorm: [[fallthrough]];
68 case wgpu::TextureFormat::Depth32Float:
69 case wgpu::TextureFormat::Depth24PlusStencil8:
70 case wgpu::TextureFormat::Depth32FloatStencil8:
71 return true;
72 default:
73 return false;
74 }
75
76 SkUNREACHABLE;
77 }
78
DawnFormatIsStencil(wgpu::TextureFormat format)79 bool DawnFormatIsStencil(wgpu::TextureFormat format) {
80 switch (format) {
81 case wgpu::TextureFormat::Stencil8: [[fallthrough]];
82 case wgpu::TextureFormat::Depth24PlusStencil8:
83 case wgpu::TextureFormat::Depth32FloatStencil8:
84 return true;
85 default:
86 return false;
87 }
88
89 SkUNREACHABLE;
90 }
91
DawnDepthStencilFlagsToFormat(SkEnumBitMask<DepthStencilFlags> mask)92 wgpu::TextureFormat DawnDepthStencilFlagsToFormat(SkEnumBitMask<DepthStencilFlags> mask) {
93 // TODO: Decide if we want to change this to always return a combined depth and stencil format
94 // to allow more sharing of depth stencil allocations.
95 if (mask == DepthStencilFlags::kDepth) {
96 // If needed for workarounds or performance, Depth32Float is also available but requires 2x
97 // the amount of memory.
98 return wgpu::TextureFormat::Depth16Unorm;
99 } else if (mask == DepthStencilFlags::kStencil) {
100 return wgpu::TextureFormat::Stencil8;
101 } else if (mask == DepthStencilFlags::kDepthStencil) {
102 return wgpu::TextureFormat::Depth24PlusStencil8;
103 }
104 SkASSERT(false);
105 return wgpu::TextureFormat::Undefined;
106 }
107
DawnFormatToTextureFormat(wgpu::TextureFormat format)108 TextureFormat DawnFormatToTextureFormat(wgpu::TextureFormat format) {
109 switch (format) {
110 case wgpu::TextureFormat::R8Unorm: return TextureFormat::kR8;
111 case wgpu::TextureFormat::R16Float: return TextureFormat::kR16F;
112 case wgpu::TextureFormat::R32Float: return TextureFormat::kR32F;
113 case wgpu::TextureFormat::RG8Unorm: return TextureFormat::kRG8;
114 case wgpu::TextureFormat::RG16Float: return TextureFormat::kRG16F;
115 case wgpu::TextureFormat::RG32Float: return TextureFormat::kRG32F;
116 case wgpu::TextureFormat::RGBA8Unorm: return TextureFormat::kRGBA8;
117 case wgpu::TextureFormat::RGBA16Float: return TextureFormat::kRGBA16F;
118 case wgpu::TextureFormat::RGBA32Float: return TextureFormat::kRGBA32F;
119 case wgpu::TextureFormat::RGB10A2Unorm: return TextureFormat::kRGB10_A2;
120 case wgpu::TextureFormat::RGBA8UnormSrgb: return TextureFormat::kRGBA8_sRGB;
121 case wgpu::TextureFormat::BGRA8Unorm: return TextureFormat::kBGRA8;
122 case wgpu::TextureFormat::BGRA8UnormSrgb: return TextureFormat::kBGRA8_sRGB;
123 case wgpu::TextureFormat::ETC2RGB8Unorm: return TextureFormat::kRGB8_ETC2;
124 case wgpu::TextureFormat::ETC2RGB8UnormSrgb: return TextureFormat::kRGB8_ETC2_sRGB;
125 case wgpu::TextureFormat::BC1RGBAUnorm: return TextureFormat::kRGBA8_BC1;
126 case wgpu::TextureFormat::BC1RGBAUnormSrgb: return TextureFormat::kRGBA8_BC1_sRGB;
127 case wgpu::TextureFormat::Stencil8: return TextureFormat::kS8;
128 case wgpu::TextureFormat::Depth16Unorm: return TextureFormat::kD16;
129 case wgpu::TextureFormat::Depth32Float: return TextureFormat::kD32F;
130 case wgpu::TextureFormat::Depth24PlusStencil8: return TextureFormat::kD24_S8;
131 case wgpu::TextureFormat::Depth32FloatStencil8: return TextureFormat::kD32F_S8;
132 #if !defined(__EMSCRIPTEN__)
133 case wgpu::TextureFormat::R16Unorm: return TextureFormat::kR16;
134 case wgpu::TextureFormat::RG16Unorm: return TextureFormat::kRG16;
135 case wgpu::TextureFormat::RGBA16Unorm: return TextureFormat::kRGBA16;
136 case wgpu::TextureFormat::R8BG8Biplanar420Unorm: return TextureFormat::kYUV8_P2_420;
137 case wgpu::TextureFormat::R10X6BG10X6Biplanar420Unorm:
138 return TextureFormat::kYUV10x6_P2_420;
139 case wgpu::TextureFormat::External: return TextureFormat::kExternal;
140 #endif
141 default: return TextureFormat::kUnsupported;
142 }
143 }
144
check_shader_module(const DawnSharedContext * sharedContext,wgpu::ShaderModule * module,const char * shaderText,ShaderErrorHandler * errorHandler)145 static bool check_shader_module([[maybe_unused]] const DawnSharedContext* sharedContext,
146 wgpu::ShaderModule* module,
147 const char* shaderText,
148 ShaderErrorHandler* errorHandler) {
149 // Prior to emsdk 3.1.51 wgpu::ShaderModule::GetCompilationInfo is unimplemented.
150 #if defined(__EMSCRIPTEN__) && \
151 ((__EMSCRIPTEN_major__ < 3 || \
152 (__EMSCRIPTEN_major__ == 3 && __EMSCRIPTEN_minor__ < 1) || \
153 (__EMSCRIPTEN_major__ == 3 && __EMSCRIPTEN_minor__ == 1 && __EMSCRIPTEN_tiny__ < 51)))
154 return true;
155 #else
156 struct Handler {
157 static void Fn(WGPUCompilationInfoRequestStatus status,
158 const WGPUCompilationInfo* info,
159 void* userdata) {
160 Handler* self = reinterpret_cast<Handler*>(userdata);
161 SkASSERT(status == WGPUCompilationInfoRequestStatus_Success);
162
163 // Walk the message list and check for hard errors.
164 self->fSuccess = true;
165 for (size_t index = 0; index < info->messageCount; ++index) {
166 const WGPUCompilationMessage& entry = info->messages[index];
167 if (entry.type == WGPUCompilationMessageType_Error) {
168 self->fSuccess = false;
169 break;
170 }
171 }
172
173 // If we found a hard error, report the compilation messages to the error handler.
174 if (!self->fSuccess) {
175 std::string errors;
176 for (size_t index = 0; index < info->messageCount; ++index) {
177 const WGPUCompilationMessage& entry = info->messages[index];
178 #if defined(WGPU_BREAKING_CHANGE_STRING_VIEW_OUTPUT_STRUCTS)
179 std::string messageString(entry.message.data, entry.message.length);
180 #else // defined(WGPU_BREAKING_CHANGE_STRING_VIEW_OUTPUT_STRUCTS)
181 std::string messageString(entry.message);
182 #endif // defined(WGPU_BREAKING_CHANGE_STRING_VIEW_OUTPUT_STRUCTS)
183 errors += "line " + std::to_string(entry.lineNum) + ':' +
184 std::to_string(entry.linePos) + ' ' + messageString + '\n';
185 }
186 self->fErrorHandler->compileError(
187 self->fShaderText, errors.c_str(), /*shaderWasCached=*/false);
188 }
189 }
190
191 const char* fShaderText;
192 ShaderErrorHandler* fErrorHandler;
193 bool fSuccess = false;
194 };
195
196 Handler handler;
197 handler.fShaderText = shaderText;
198 handler.fErrorHandler = errorHandler;
199 #if defined(__EMSCRIPTEN__)
200 // Deprecated function.
201 module->GetCompilationInfo(&Handler::Fn, &handler);
202 #else
203 // New API.
204 wgpu::FutureWaitInfo waitInfo{};
205 waitInfo.future = module->GetCompilationInfo(
206 wgpu::CallbackMode::WaitAnyOnly,
207 [handlerPtr = &handler](wgpu::CompilationInfoRequestStatus status,
208 const wgpu::CompilationInfo* info) {
209 Handler::Fn(static_cast<WGPUCompilationInfoRequestStatus>(status),
210 reinterpret_cast<const WGPUCompilationInfo*>(info),
211 handlerPtr);
212 });
213
214 const auto& instance = static_cast<const DawnSharedContext*>(sharedContext)
215 ->device()
216 .GetAdapter()
217 .GetInstance();
218 [[maybe_unused]] auto status =
219 instance.WaitAny(1, &waitInfo, /*timeoutNS=*/std::numeric_limits<uint64_t>::max());
220 SkASSERT(status == wgpu::WaitStatus::Success);
221 #endif // defined(__EMSCRIPTEN__)
222
223 return handler.fSuccess;
224 #endif
225 }
226
DawnCompileWGSLShaderModule(const DawnSharedContext * sharedContext,const char * label,const std::string & wgsl,wgpu::ShaderModule * module,ShaderErrorHandler * errorHandler)227 bool DawnCompileWGSLShaderModule(const DawnSharedContext* sharedContext,
228 const char* label,
229 const std::string& wgsl,
230 wgpu::ShaderModule* module,
231 ShaderErrorHandler* errorHandler) {
232 #if defined(__EMSCRIPTEN__)
233 wgpu::ShaderModuleWGSLDescriptor wgslDesc;
234 #else
235 wgpu::ShaderSourceWGSL wgslDesc;
236 #endif
237 wgslDesc.code = wgsl.c_str();
238
239 wgpu::ShaderModuleDescriptor desc;
240 desc.nextInChain = &wgslDesc;
241 if (sharedContext->caps()->setBackendLabels()) {
242 desc.label = label;
243 }
244
245 *module = sharedContext->device().CreateShaderModule(&desc);
246
247 return check_shader_module(sharedContext, module, wgsl.c_str(), errorHandler);
248 }
249
250 #if !defined(__EMSCRIPTEN__)
251
252 namespace {
253
254 static constexpr int kUsesExternalFormatBits = 1;
255 static constexpr int kYcbcrModelBits = 3;
256 static constexpr int kYcbcrRangeBits = 1;
257 static constexpr int kXChromaOffsetBits = 1;
258 static constexpr int kYChromaOffsetBits = 1;
259 // wgpu::FilterMode contains Undefined/Nearest/Linear entries (Linear is 2).
260 static constexpr int kChromaFilterBits = 2;
261 static constexpr int kForceExplicitReconBits = 1;
262 static constexpr int kComponentBits = 3;
263
264 static constexpr int kUsesExternalFormatShift = 0;
265 static constexpr int kYcbcrModelShift = kUsesExternalFormatShift + kUsesExternalFormatBits;
266 static constexpr int kYcbcrRangeShift = kYcbcrModelShift + kYcbcrModelBits;
267 static constexpr int kXChromaOffsetShift = kYcbcrRangeShift + kYcbcrRangeBits;
268 static constexpr int kYChromaOffsetShift = kXChromaOffsetShift + kXChromaOffsetBits;
269 static constexpr int kChromaFilterShift = kYChromaOffsetShift + kYChromaOffsetBits;
270 static constexpr int kForceExplicitReconShift = kChromaFilterShift + kChromaFilterBits;
271 static constexpr int kComponentRShift = kForceExplicitReconShift + kForceExplicitReconBits;
272 static constexpr int kComponentGShift = kComponentRShift + kComponentBits;
273 static constexpr int kComponentBShift = kComponentGShift + kComponentBits;
274 static constexpr int kComponentAShift = kComponentBShift + kComponentBits;
275
276 static constexpr uint32_t kUseExternalFormatMask =
277 ((1 << kUsesExternalFormatBits) - 1) << kUsesExternalFormatShift;
278 static constexpr uint32_t kYcbcrModelMask =
279 ((1 << kYcbcrModelBits) - 1) << kYcbcrModelShift;
280 static constexpr uint32_t kYcbcrRangeMask =
281 ((1 << kYcbcrRangeBits) - 1) << kYcbcrRangeShift;
282 static constexpr uint32_t kXChromaOffsetMask =
283 ((1 << kXChromaOffsetBits) - 1) << kXChromaOffsetShift;
284 static constexpr uint32_t kYChromaOffsetMask =
285 ((1 << kYChromaOffsetBits) - 1) << kYChromaOffsetShift;
286 static constexpr uint32_t kChromaFilterMask =
287 ((1 << kChromaFilterBits) - 1) << kChromaFilterShift;
288 static constexpr uint32_t kForceExplicitReconMask =
289 ((1 << kForceExplicitReconBits) - 1) << kForceExplicitReconShift;
290 static constexpr uint32_t kComponentRMask = ((1 << kComponentBits) - 1) << kComponentRShift;
291 static constexpr uint32_t kComponentBMask = ((1 << kComponentBits) - 1) << kComponentBShift;
292 static constexpr uint32_t kComponentGMask = ((1 << kComponentBits) - 1) << kComponentGShift;
293 static constexpr uint32_t kComponentAMask = ((1 << kComponentBits) - 1) << kComponentAShift;
294
295 } // anonymous namespace
296
DawnDescriptorsAreEquivalent(const wgpu::YCbCrVkDescriptor & desc1,const wgpu::YCbCrVkDescriptor & desc2)297 bool DawnDescriptorsAreEquivalent(const wgpu::YCbCrVkDescriptor& desc1,
298 const wgpu::YCbCrVkDescriptor& desc2) {
299 return desc1.vkFormat == desc2.vkFormat &&
300 desc1.vkYCbCrModel == desc2.vkYCbCrModel &&
301 desc1.vkYCbCrRange == desc2.vkYCbCrRange &&
302 desc1.vkComponentSwizzleRed == desc2.vkComponentSwizzleRed &&
303 desc1.vkComponentSwizzleGreen == desc2.vkComponentSwizzleGreen &&
304 desc1.vkComponentSwizzleBlue == desc2.vkComponentSwizzleBlue &&
305 desc1.vkComponentSwizzleAlpha == desc2.vkComponentSwizzleAlpha &&
306 desc1.vkXChromaOffset == desc2.vkXChromaOffset &&
307 desc1.vkYChromaOffset == desc2.vkYChromaOffset &&
308 desc1.vkChromaFilter == desc2.vkChromaFilter &&
309 desc1.forceExplicitReconstruction == desc2.forceExplicitReconstruction &&
310 desc1.externalFormat == desc2.externalFormat;
311 }
312
DawnDescriptorIsValid(const wgpu::YCbCrVkDescriptor & desc)313 bool DawnDescriptorIsValid(const wgpu::YCbCrVkDescriptor& desc) {
314 static const wgpu::YCbCrVkDescriptor kDefaultYcbcrDescriptor = {};
315 return !DawnDescriptorsAreEquivalent(desc, kDefaultYcbcrDescriptor);
316 }
317
DawnDescriptorUsesExternalFormat(const wgpu::YCbCrVkDescriptor & desc)318 bool DawnDescriptorUsesExternalFormat(const wgpu::YCbCrVkDescriptor& desc) {
319 SkASSERT(desc.externalFormat != 0 || desc.vkFormat != 0);
320 return desc.externalFormat != 0;
321 }
322
DawnDescriptorToImmutableSamplerInfo(const wgpu::YCbCrVkDescriptor & desc)323 ImmutableSamplerInfo DawnDescriptorToImmutableSamplerInfo(const wgpu::YCbCrVkDescriptor& desc) {
324 static_assert(kComponentAShift + kComponentBits <= 32);
325 SkASSERT(desc.vkYCbCrModel < (1u << kYcbcrModelBits ));
326 SkASSERT(desc.vkYCbCrRange < (1u << kYcbcrRangeBits ));
327 SkASSERT(desc.vkXChromaOffset < (1u << kXChromaOffsetBits ));
328 SkASSERT(desc.vkYChromaOffset < (1u << kYChromaOffsetBits ));
329 SkASSERT(static_cast<uint32_t>(desc.vkChromaFilter) < (1u << kChromaFilterBits ));
330 SkASSERT(desc.vkComponentSwizzleRed < (1u << kComponentBits ));
331 SkASSERT(desc.vkComponentSwizzleGreen < (1u << kComponentBits ));
332 SkASSERT(desc.vkComponentSwizzleBlue < (1u << kComponentBits ));
333 SkASSERT(desc.vkComponentSwizzleAlpha < (1u << kComponentBits ));
334 SkASSERT(static_cast<uint32_t>(desc.forceExplicitReconstruction)
335 < (1u << kForceExplicitReconBits));
336
337 const bool usesExternalFormat = DawnDescriptorUsesExternalFormat(desc);
338
339 ImmutableSamplerInfo info;
340 info.fNonFormatYcbcrConversionInfo =
341 (((uint32_t)(usesExternalFormat ) << kUsesExternalFormatShift) |
342 ((uint32_t)(desc.vkYCbCrModel ) << kYcbcrModelShift ) |
343 ((uint32_t)(desc.vkYCbCrRange ) << kYcbcrRangeShift ) |
344 ((uint32_t)(desc.vkXChromaOffset ) << kXChromaOffsetShift ) |
345 ((uint32_t)(desc.vkYChromaOffset ) << kYChromaOffsetShift ) |
346 ((uint32_t)(desc.vkChromaFilter ) << kChromaFilterShift ) |
347 ((uint32_t)(desc.forceExplicitReconstruction ) << kForceExplicitReconShift) |
348 ((uint32_t)(desc.vkComponentSwizzleRed ) << kComponentRShift ) |
349 ((uint32_t)(desc.vkComponentSwizzleGreen ) << kComponentGShift ) |
350 ((uint32_t)(desc.vkComponentSwizzleBlue ) << kComponentBShift ) |
351 ((uint32_t)(desc.vkComponentSwizzleAlpha ) << kComponentAShift ));
352 info.fFormat = usesExternalFormat ? desc.externalFormat : desc.vkFormat;
353 return info;
354 }
355
DawnDescriptorFromImmutableSamplerInfo(ImmutableSamplerInfo info)356 wgpu::YCbCrVkDescriptor DawnDescriptorFromImmutableSamplerInfo(ImmutableSamplerInfo info) {
357 const uint32_t nonFormatInfo = info.fNonFormatYcbcrConversionInfo;
358
359 wgpu::YCbCrVkDescriptor desc;
360 const bool usesExternalFormat =
361 (nonFormatInfo >> kUsesExternalFormatShift) & kUseExternalFormatMask;
362 if (usesExternalFormat) {
363 desc.vkFormat = 0;
364 desc.externalFormat = info.fFormat;
365 } else {
366 desc.vkFormat = (uint32_t) info.fFormat;
367 desc.externalFormat = 0;
368 }
369
370 desc.vkYCbCrModel = (nonFormatInfo & kYcbcrModelMask) >> kYcbcrModelShift;
371 desc.vkYCbCrRange = (nonFormatInfo & kYcbcrRangeMask) >> kYcbcrRangeShift;
372 desc.vkComponentSwizzleRed = (nonFormatInfo & kComponentRMask) >> kComponentRShift;
373 desc.vkComponentSwizzleGreen = (nonFormatInfo & kComponentGMask) >> kComponentGShift;
374 desc.vkComponentSwizzleBlue = (nonFormatInfo & kComponentBMask) >> kComponentBShift;
375 desc.vkComponentSwizzleAlpha = (nonFormatInfo & kComponentAMask) >> kComponentAShift;
376 desc.vkXChromaOffset = (nonFormatInfo & kXChromaOffsetMask) >> kXChromaOffsetShift;
377 desc.vkYChromaOffset = (nonFormatInfo & kYChromaOffsetMask) >> kYChromaOffsetShift;
378 desc.vkChromaFilter = static_cast<wgpu::FilterMode>(
379 (nonFormatInfo & kChromaFilterMask) >> kChromaFilterShift);
380 desc.forceExplicitReconstruction =
381 (nonFormatInfo & kForceExplicitReconMask) >> kForceExplicitReconShift;
382 return desc;
383 }
384
385 #endif // !defined(__EMSCRIPTEN__)
386
387 } // namespace skgpu::graphite
388