1 /*
2 * Copyright © 2022 Collabora, LTD
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vk_pipeline.h"
25
26 #include "vk_log.h"
27 #include "vk_nir.h"
28 #include "vk_shader_module.h"
29 #include "vk_util.h"
30
31 #include "nir_serialize.h"
32
33 #include "util/mesa-sha1.h"
34
35 bool
vk_pipeline_shader_stage_is_null(const VkPipelineShaderStageCreateInfo * info)36 vk_pipeline_shader_stage_is_null(const VkPipelineShaderStageCreateInfo *info)
37 {
38 if (info->module != VK_NULL_HANDLE)
39 return false;
40
41 vk_foreach_struct_const(ext, info->pNext) {
42 if (ext->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO ||
43 ext->sType == VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_MODULE_IDENTIFIER_CREATE_INFO_EXT)
44 return false;
45 }
46
47 return true;
48 }
49
50 static uint32_t
get_required_subgroup_size(const VkPipelineShaderStageCreateInfo * info)51 get_required_subgroup_size(const VkPipelineShaderStageCreateInfo *info)
52 {
53 const VkPipelineShaderStageRequiredSubgroupSizeCreateInfo *rss_info =
54 vk_find_struct_const(info->pNext,
55 PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO);
56 return rss_info != NULL ? rss_info->requiredSubgroupSize : 0;
57 }
58
59 VkResult
vk_pipeline_shader_stage_to_nir(struct vk_device * device,const VkPipelineShaderStageCreateInfo * info,const struct spirv_to_nir_options * spirv_options,const struct nir_shader_compiler_options * nir_options,void * mem_ctx,nir_shader ** nir_out)60 vk_pipeline_shader_stage_to_nir(struct vk_device *device,
61 const VkPipelineShaderStageCreateInfo *info,
62 const struct spirv_to_nir_options *spirv_options,
63 const struct nir_shader_compiler_options *nir_options,
64 void *mem_ctx, nir_shader **nir_out)
65 {
66 VK_FROM_HANDLE(vk_shader_module, module, info->module);
67 const gl_shader_stage stage = vk_to_mesa_shader_stage(info->stage);
68
69 assert(info->sType == VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO);
70
71 if (module != NULL && module->nir != NULL) {
72 assert(module->nir->info.stage == stage);
73 assert(exec_list_length(&module->nir->functions) == 1);
74 ASSERTED const char *nir_name =
75 nir_shader_get_entrypoint(module->nir)->function->name;
76 assert(strcmp(nir_name, info->pName) == 0);
77
78 nir_validate_shader(module->nir, "internal shader");
79
80 nir_shader *clone = nir_shader_clone(mem_ctx, module->nir);
81 if (clone == NULL)
82 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
83
84 assert(clone->options == NULL || clone->options == nir_options);
85 clone->options = nir_options;
86
87 *nir_out = clone;
88 return VK_SUCCESS;
89 }
90
91 const uint32_t *spirv_data;
92 uint32_t spirv_size;
93 if (module != NULL) {
94 spirv_data = (uint32_t *)module->data;
95 spirv_size = module->size;
96 } else {
97 const VkShaderModuleCreateInfo *minfo =
98 vk_find_struct_const(info->pNext, SHADER_MODULE_CREATE_INFO);
99 if (unlikely(minfo == NULL)) {
100 return vk_errorf(device, VK_ERROR_UNKNOWN,
101 "No shader module provided");
102 }
103 spirv_data = minfo->pCode;
104 spirv_size = minfo->codeSize;
105 }
106
107 enum gl_subgroup_size subgroup_size;
108 uint32_t req_subgroup_size = get_required_subgroup_size(info);
109 if (req_subgroup_size > 0) {
110 assert(util_is_power_of_two_nonzero(req_subgroup_size));
111 assert(req_subgroup_size >= 8 && req_subgroup_size <= 128);
112 subgroup_size = req_subgroup_size;
113 } else if (info->flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT ||
114 vk_spirv_version(spirv_data, spirv_size) >= 0x10600) {
115 /* Starting with SPIR-V 1.6, varying subgroup size the default */
116 subgroup_size = SUBGROUP_SIZE_VARYING;
117 } else if (info->flags & VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT) {
118 assert(stage == MESA_SHADER_COMPUTE);
119 subgroup_size = SUBGROUP_SIZE_FULL_SUBGROUPS;
120 } else {
121 subgroup_size = SUBGROUP_SIZE_API_CONSTANT;
122 }
123
124 nir_shader *nir = vk_spirv_to_nir(device, spirv_data, spirv_size, stage,
125 info->pName, subgroup_size,
126 info->pSpecializationInfo,
127 spirv_options, nir_options, mem_ctx);
128 if (nir == NULL)
129 return vk_errorf(device, VK_ERROR_UNKNOWN, "spirv_to_nir failed");
130
131 *nir_out = nir;
132
133 return VK_SUCCESS;
134 }
135
136 void
vk_pipeline_hash_shader_stage(const VkPipelineShaderStageCreateInfo * info,unsigned char * stage_sha1)137 vk_pipeline_hash_shader_stage(const VkPipelineShaderStageCreateInfo *info,
138 unsigned char *stage_sha1)
139 {
140 VK_FROM_HANDLE(vk_shader_module, module, info->module);
141
142 if (module && module->nir) {
143 /* Internal NIR module: serialize and hash the NIR shader.
144 * We don't need to hash other info fields since they should match the
145 * NIR data.
146 */
147 assert(module->nir->info.stage == vk_to_mesa_shader_stage(info->stage));
148 ASSERTED nir_function_impl *entrypoint = nir_shader_get_entrypoint(module->nir);
149 assert(strcmp(entrypoint->function->name, info->pName) == 0);
150 assert(info->pSpecializationInfo == NULL);
151
152 struct blob blob;
153
154 blob_init(&blob);
155 nir_serialize(&blob, module->nir, false);
156 assert(!blob.out_of_memory);
157 _mesa_sha1_compute(blob.data, blob.size, stage_sha1);
158 blob_finish(&blob);
159 return;
160 }
161
162 const VkShaderModuleCreateInfo *minfo =
163 vk_find_struct_const(info->pNext, SHADER_MODULE_CREATE_INFO);
164 const VkPipelineShaderStageModuleIdentifierCreateInfoEXT *iinfo =
165 vk_find_struct_const(info->pNext, PIPELINE_SHADER_STAGE_MODULE_IDENTIFIER_CREATE_INFO_EXT);
166
167 struct mesa_sha1 ctx;
168
169 _mesa_sha1_init(&ctx);
170
171 _mesa_sha1_update(&ctx, &info->flags, sizeof(info->flags));
172
173 assert(util_bitcount(info->stage) == 1);
174 _mesa_sha1_update(&ctx, &info->stage, sizeof(info->stage));
175
176 if (module) {
177 _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
178 } else if (minfo) {
179 unsigned char spirv_sha1[SHA1_DIGEST_LENGTH];
180
181 _mesa_sha1_compute(minfo->pCode, minfo->codeSize, spirv_sha1);
182 _mesa_sha1_update(&ctx, spirv_sha1, sizeof(spirv_sha1));
183 } else {
184 /* It is legal to pass in arbitrary identifiers as long as they don't exceed
185 * the limit. Shaders with bogus identifiers are more or less guaranteed to fail. */
186 assert(iinfo);
187 assert(iinfo->identifierSize <= VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT);
188 _mesa_sha1_update(&ctx, iinfo->pIdentifier, iinfo->identifierSize);
189 }
190
191 _mesa_sha1_update(&ctx, info->pName, strlen(info->pName));
192
193 if (info->pSpecializationInfo) {
194 _mesa_sha1_update(&ctx, info->pSpecializationInfo->pMapEntries,
195 info->pSpecializationInfo->mapEntryCount *
196 sizeof(*info->pSpecializationInfo->pMapEntries));
197 _mesa_sha1_update(&ctx, info->pSpecializationInfo->pData,
198 info->pSpecializationInfo->dataSize);
199 }
200
201 uint32_t req_subgroup_size = get_required_subgroup_size(info);
202 _mesa_sha1_update(&ctx, &req_subgroup_size, sizeof(req_subgroup_size));
203
204 _mesa_sha1_final(&ctx, stage_sha1);
205 }
206