1 /* 2 * Copyright 2022 Google LLC 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef skgpu_VulkanUtilsPriv_DEFINED 9 #define skgpu_VulkanUtilsPriv_DEFINED 10 11 #include "include/gpu/vk/VulkanTypes.h" 12 13 namespace skgpu { 14 VkFormatChannels(VkFormat vkFormat)15static constexpr uint32_t VkFormatChannels(VkFormat vkFormat) { 16 switch (vkFormat) { 17 case VK_FORMAT_R8G8B8A8_UNORM: return kRGBA_SkColorChannelFlags; 18 case VK_FORMAT_R8_UNORM: return kRed_SkColorChannelFlag; 19 case VK_FORMAT_B8G8R8A8_UNORM: return kRGBA_SkColorChannelFlags; 20 case VK_FORMAT_R5G6B5_UNORM_PACK16: return kRGB_SkColorChannelFlags; 21 case VK_FORMAT_R16G16B16A16_SFLOAT: return kRGBA_SkColorChannelFlags; 22 case VK_FORMAT_R16_SFLOAT: return kRed_SkColorChannelFlag; 23 case VK_FORMAT_R8G8B8_UNORM: return kRGB_SkColorChannelFlags; 24 case VK_FORMAT_R8G8_UNORM: return kRG_SkColorChannelFlags; 25 case VK_FORMAT_A2B10G10R10_UNORM_PACK32: return kRGBA_SkColorChannelFlags; 26 case VK_FORMAT_A2R10G10B10_UNORM_PACK32: return kRGBA_SkColorChannelFlags; 27 case VK_FORMAT_B4G4R4A4_UNORM_PACK16: return kRGBA_SkColorChannelFlags; 28 case VK_FORMAT_R4G4B4A4_UNORM_PACK16: return kRGBA_SkColorChannelFlags; 29 case VK_FORMAT_R8G8B8A8_SRGB: return kRGBA_SkColorChannelFlags; 30 case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK: return kRGB_SkColorChannelFlags; 31 case VK_FORMAT_BC1_RGB_UNORM_BLOCK: return kRGB_SkColorChannelFlags; 32 case VK_FORMAT_BC1_RGBA_UNORM_BLOCK: return kRGBA_SkColorChannelFlags; 33 case VK_FORMAT_R16_UNORM: return kRed_SkColorChannelFlag; 34 case VK_FORMAT_R16G16_UNORM: return kRG_SkColorChannelFlags; 35 case VK_FORMAT_R16G16B16A16_UNORM: return kRGBA_SkColorChannelFlags; 36 case VK_FORMAT_R16G16_SFLOAT: return kRG_SkColorChannelFlags; 37 case VK_FORMAT_S8_UINT: return 0; 38 case VK_FORMAT_D24_UNORM_S8_UINT: return 0; 39 case VK_FORMAT_D32_SFLOAT_S8_UINT: return 0; 40 default: return 0; 41 } 42 } 43 VkFormatBytesPerBlock(VkFormat vkFormat)44static constexpr size_t VkFormatBytesPerBlock(VkFormat vkFormat) { 45 switch (vkFormat) { 46 case VK_FORMAT_R8G8B8A8_UNORM: return 4; 47 case VK_FORMAT_R8_UNORM: return 1; 48 case VK_FORMAT_B8G8R8A8_UNORM: return 4; 49 case VK_FORMAT_R5G6B5_UNORM_PACK16: return 2; 50 case VK_FORMAT_R16G16B16A16_SFLOAT: return 8; 51 case VK_FORMAT_R16_SFLOAT: return 2; 52 case VK_FORMAT_R8G8B8_UNORM: return 3; 53 case VK_FORMAT_R8G8_UNORM: return 2; 54 case VK_FORMAT_A2B10G10R10_UNORM_PACK32: return 4; 55 case VK_FORMAT_A2R10G10B10_UNORM_PACK32: return 4; 56 case VK_FORMAT_B4G4R4A4_UNORM_PACK16: return 2; 57 case VK_FORMAT_R4G4B4A4_UNORM_PACK16: return 2; 58 case VK_FORMAT_R8G8B8A8_SRGB: return 4; 59 case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK: return 8; 60 case VK_FORMAT_BC1_RGB_UNORM_BLOCK: return 8; 61 case VK_FORMAT_BC1_RGBA_UNORM_BLOCK: return 8; 62 case VK_FORMAT_R16_UNORM: return 2; 63 case VK_FORMAT_R16G16_UNORM: return 4; 64 case VK_FORMAT_R16G16B16A16_UNORM: return 8; 65 case VK_FORMAT_R16G16_SFLOAT: return 4; 66 // Currently we are just over estimating this value to be used in gpu size calculations even 67 // though the actually size is probably less. We should instead treat planar formats similar 68 // to compressed textures that go through their own special query for calculating size. 69 case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM: return 3; 70 case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM: return 3; 71 case VK_FORMAT_S8_UINT: return 1; 72 case VK_FORMAT_D24_UNORM_S8_UINT: return 4; 73 case VK_FORMAT_D32_SFLOAT_S8_UINT: return 8; 74 75 default: return 0; 76 } 77 } 78 VkFormatStencilBits(VkFormat format)79static constexpr int VkFormatStencilBits(VkFormat format) { 80 switch (format) { 81 case VK_FORMAT_S8_UINT: 82 return 8; 83 case VK_FORMAT_D24_UNORM_S8_UINT: 84 return 8; 85 case VK_FORMAT_D32_SFLOAT_S8_UINT: 86 return 8; 87 default: 88 return 0; 89 } 90 } 91 VkFormatNeedsYcbcrSampler(VkFormat format)92static constexpr bool VkFormatNeedsYcbcrSampler(VkFormat format) { 93 return format == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM || 94 format == VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM; 95 } 96 SampleCountToVkSampleCount(uint32_t samples,VkSampleCountFlagBits * vkSamples)97static constexpr bool SampleCountToVkSampleCount(uint32_t samples, 98 VkSampleCountFlagBits* vkSamples) { 99 SkASSERT(samples >= 1); 100 switch (samples) { 101 case 1: 102 *vkSamples = VK_SAMPLE_COUNT_1_BIT; 103 return true; 104 case 2: 105 *vkSamples = VK_SAMPLE_COUNT_2_BIT; 106 return true; 107 case 4: 108 *vkSamples = VK_SAMPLE_COUNT_4_BIT; 109 return true; 110 case 8: 111 *vkSamples = VK_SAMPLE_COUNT_8_BIT; 112 return true; 113 case 16: 114 *vkSamples = VK_SAMPLE_COUNT_16_BIT; 115 return true; 116 default: 117 return false; 118 } 119 } 120 121 /** 122 * Returns true if the format is compressed. 123 */ VkFormatIsCompressed(VkFormat vkFormat)124static constexpr bool VkFormatIsCompressed(VkFormat vkFormat) { 125 switch (vkFormat) { 126 case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK: 127 case VK_FORMAT_BC1_RGB_UNORM_BLOCK: 128 case VK_FORMAT_BC1_RGBA_UNORM_BLOCK: 129 return true; 130 default: 131 return false; 132 } 133 SkUNREACHABLE; 134 } 135 136 137 #if defined(SK_DEBUG) || GR_TEST_UTILS VkFormatToStr(VkFormat vkFormat)138static constexpr const char* VkFormatToStr(VkFormat vkFormat) { 139 switch (vkFormat) { 140 case VK_FORMAT_R8G8B8A8_UNORM: return "R8G8B8A8_UNORM"; 141 case VK_FORMAT_R8_UNORM: return "R8_UNORM"; 142 case VK_FORMAT_B8G8R8A8_UNORM: return "B8G8R8A8_UNORM"; 143 case VK_FORMAT_R5G6B5_UNORM_PACK16: return "R5G6B5_UNORM_PACK16"; 144 case VK_FORMAT_R16G16B16A16_SFLOAT: return "R16G16B16A16_SFLOAT"; 145 case VK_FORMAT_R16_SFLOAT: return "R16_SFLOAT"; 146 case VK_FORMAT_R8G8B8_UNORM: return "R8G8B8_UNORM"; 147 case VK_FORMAT_R8G8_UNORM: return "R8G8_UNORM"; 148 case VK_FORMAT_A2B10G10R10_UNORM_PACK32: return "A2B10G10R10_UNORM_PACK32"; 149 case VK_FORMAT_A2R10G10B10_UNORM_PACK32: return "A2R10G10B10_UNORM_PACK32"; 150 case VK_FORMAT_B4G4R4A4_UNORM_PACK16: return "B4G4R4A4_UNORM_PACK16"; 151 case VK_FORMAT_R4G4B4A4_UNORM_PACK16: return "R4G4B4A4_UNORM_PACK16"; 152 case VK_FORMAT_R32G32B32A32_SFLOAT: return "R32G32B32A32_SFLOAT"; 153 case VK_FORMAT_R8G8B8A8_SRGB: return "R8G8B8A8_SRGB"; 154 case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK: return "ETC2_R8G8B8_UNORM_BLOCK"; 155 case VK_FORMAT_BC1_RGB_UNORM_BLOCK: return "BC1_RGB_UNORM_BLOCK"; 156 case VK_FORMAT_BC1_RGBA_UNORM_BLOCK: return "BC1_RGBA_UNORM_BLOCK"; 157 case VK_FORMAT_R16_UNORM: return "R16_UNORM"; 158 case VK_FORMAT_R16G16_UNORM: return "R16G16_UNORM"; 159 case VK_FORMAT_R16G16B16A16_UNORM: return "R16G16B16A16_UNORM"; 160 case VK_FORMAT_R16G16_SFLOAT: return "R16G16_SFLOAT"; 161 case VK_FORMAT_S8_UINT: return "S8_UINT"; 162 case VK_FORMAT_D24_UNORM_S8_UINT: return "D24_UNORM_S8_UINT"; 163 case VK_FORMAT_D32_SFLOAT_S8_UINT: return "D32_SFLOAT_S8_UINT"; 164 165 default: return "Unknown"; 166 } 167 } 168 #endif // defined(SK_DEBUG) || GR_TEST_UTILS 169 170 } // namespace skgpu 171 172 #endif // skgpu_VulkanUtilsPriv_DEFINED 173