1 /*
2 * Copyright © 2022 Collabora Ltd. and Red Hat Inc.
3 * SPDX-License-Identifier: MIT
4 */
5 #include "nvk_pipeline.h"
6
7 #include "nvk_device.h"
8 #include "nvk_entrypoints.h"
9
10 #include "vk_pipeline_cache.h"
11
12 struct nvk_pipeline *
nvk_pipeline_zalloc(struct nvk_device * dev,enum nvk_pipeline_type type,size_t size,const VkAllocationCallbacks * pAllocator)13 nvk_pipeline_zalloc(struct nvk_device *dev,
14 enum nvk_pipeline_type type, size_t size,
15 const VkAllocationCallbacks *pAllocator)
16 {
17 struct nvk_pipeline *pipeline;
18
19 assert(size >= sizeof(*pipeline));
20 pipeline = vk_object_zalloc(&dev->vk, pAllocator, size,
21 VK_OBJECT_TYPE_PIPELINE);
22 if (pipeline == NULL)
23 return NULL;
24
25 pipeline->type = type;
26
27 return pipeline;
28 }
29
30 void
nvk_pipeline_free(struct nvk_device * dev,struct nvk_pipeline * pipeline,const VkAllocationCallbacks * pAllocator)31 nvk_pipeline_free(struct nvk_device *dev,
32 struct nvk_pipeline *pipeline,
33 const VkAllocationCallbacks *pAllocator)
34 {
35 for (uint32_t s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
36 if (pipeline->shaders[s] != NULL)
37 vk_pipeline_cache_object_unref(&dev->vk, &pipeline->shaders[s]->base);
38 }
39
40 vk_object_free(&dev->vk, pAllocator, pipeline);
41 }
42
43 VKAPI_ATTR VkResult VKAPI_CALL
nvk_CreateGraphicsPipelines(VkDevice _device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)44 nvk_CreateGraphicsPipelines(VkDevice _device,
45 VkPipelineCache pipelineCache,
46 uint32_t createInfoCount,
47 const VkGraphicsPipelineCreateInfo *pCreateInfos,
48 const VkAllocationCallbacks *pAllocator,
49 VkPipeline *pPipelines)
50 {
51 VK_FROM_HANDLE(nvk_device, dev, _device);
52 VK_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache);
53 VkResult result = VK_SUCCESS;
54
55 unsigned i = 0;
56 for (; i < createInfoCount; i++) {
57 VkResult r = nvk_graphics_pipeline_create(dev, cache, &pCreateInfos[i],
58 pAllocator, &pPipelines[i]);
59 if (r == VK_SUCCESS)
60 continue;
61
62 result = r;
63 pPipelines[i] = VK_NULL_HANDLE;
64 if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT)
65 break;
66 }
67
68 for (; i < createInfoCount; i++)
69 pPipelines[i] = VK_NULL_HANDLE;
70
71 return result;
72 }
73
74 VKAPI_ATTR VkResult VKAPI_CALL
nvk_CreateComputePipelines(VkDevice _device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkComputePipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)75 nvk_CreateComputePipelines(VkDevice _device,
76 VkPipelineCache pipelineCache,
77 uint32_t createInfoCount,
78 const VkComputePipelineCreateInfo *pCreateInfos,
79 const VkAllocationCallbacks *pAllocator,
80 VkPipeline *pPipelines)
81 {
82 VK_FROM_HANDLE(nvk_device, dev, _device);
83 VK_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache);
84 VkResult result = VK_SUCCESS;
85
86 unsigned i = 0;
87 for (; i < createInfoCount; i++) {
88 VkResult r = nvk_compute_pipeline_create(dev, cache, &pCreateInfos[i],
89 pAllocator, &pPipelines[i]);
90 if (r == VK_SUCCESS)
91 continue;
92
93 result = r;
94 pPipelines[i] = VK_NULL_HANDLE;
95 if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT)
96 break;
97 }
98
99 for (; i < createInfoCount; i++)
100 pPipelines[i] = VK_NULL_HANDLE;
101
102 return result;
103 }
104
105 VKAPI_ATTR void VKAPI_CALL
nvk_DestroyPipeline(VkDevice _device,VkPipeline _pipeline,const VkAllocationCallbacks * pAllocator)106 nvk_DestroyPipeline(VkDevice _device, VkPipeline _pipeline,
107 const VkAllocationCallbacks *pAllocator)
108 {
109 VK_FROM_HANDLE(nvk_device, dev, _device);
110 VK_FROM_HANDLE(nvk_pipeline, pipeline, _pipeline);
111
112 if (!pipeline)
113 return;
114
115 nvk_pipeline_free(dev, pipeline, pAllocator);
116 }
117
118 #define WRITE_STR(field, ...) ({ \
119 memset(field, 0, sizeof(field)); \
120 UNUSED int i = snprintf(field, sizeof(field), __VA_ARGS__); \
121 assert(i > 0 && i < sizeof(field)); \
122 })
123
124 VKAPI_ATTR VkResult VKAPI_CALL
nvk_GetPipelineExecutablePropertiesKHR(VkDevice device,const VkPipelineInfoKHR * pPipelineInfo,uint32_t * pExecutableCount,VkPipelineExecutablePropertiesKHR * pProperties)125 nvk_GetPipelineExecutablePropertiesKHR(
126 VkDevice device,
127 const VkPipelineInfoKHR *pPipelineInfo,
128 uint32_t *pExecutableCount,
129 VkPipelineExecutablePropertiesKHR *pProperties)
130 {
131 VK_FROM_HANDLE(nvk_pipeline, pipeline, pPipelineInfo->pipeline);
132 VK_OUTARRAY_MAKE_TYPED(VkPipelineExecutablePropertiesKHR, out,
133 pProperties, pExecutableCount);
134
135 for (gl_shader_stage stage = 0; stage < MESA_SHADER_STAGES; stage++) {
136 const struct nvk_shader *shader = pipeline->shaders[stage];
137 if (!shader || shader->code_size == 0)
138 continue;
139
140 vk_outarray_append_typed(VkPipelineExecutablePropertiesKHR, &out, props) {
141 props->stages = mesa_to_vk_shader_stage(stage);
142 props->subgroupSize = 32;
143 WRITE_STR(props->name, "%s", _mesa_shader_stage_to_string(stage));
144 WRITE_STR(props->description, "%s shader",
145 _mesa_shader_stage_to_string(stage));
146 }
147 }
148
149 return vk_outarray_status(&out);
150 }
151
152 static struct nvk_shader *
shader_for_exe_idx(struct nvk_pipeline * pipeline,uint32_t idx)153 shader_for_exe_idx(struct nvk_pipeline *pipeline, uint32_t idx)
154 {
155 for (gl_shader_stage stage = 0; stage < MESA_SHADER_STAGES; stage++) {
156 const struct nvk_shader *shader = pipeline->shaders[stage];
157 if (!shader || shader->code_size == 0)
158 continue;
159
160 if (idx == 0)
161 return pipeline->shaders[stage];
162
163 idx--;
164 }
165
166 return NULL;
167 }
168
169 VKAPI_ATTR VkResult VKAPI_CALL
nvk_GetPipelineExecutableStatisticsKHR(VkDevice device,const VkPipelineExecutableInfoKHR * pExecutableInfo,uint32_t * pStatisticCount,VkPipelineExecutableStatisticKHR * pStatistics)170 nvk_GetPipelineExecutableStatisticsKHR(
171 VkDevice device,
172 const VkPipelineExecutableInfoKHR *pExecutableInfo,
173 uint32_t *pStatisticCount,
174 VkPipelineExecutableStatisticKHR *pStatistics)
175 {
176 VK_FROM_HANDLE(nvk_pipeline, pipeline, pExecutableInfo->pipeline);
177 VK_OUTARRAY_MAKE_TYPED(VkPipelineExecutableStatisticKHR, out,
178 pStatistics, pStatisticCount);
179
180 struct nvk_shader *shader =
181 shader_for_exe_idx(pipeline, pExecutableInfo->executableIndex);
182
183 vk_outarray_append_typed(VkPipelineExecutableStatisticKHR, &out, stat) {
184 WRITE_STR(stat->name, "Code Size");
185 WRITE_STR(stat->description,
186 "Size of the compiled shader binary, in bytes");
187 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
188 stat->value.u64 = shader->code_size;
189 }
190
191 vk_outarray_append_typed(VkPipelineExecutableStatisticKHR, &out, stat) {
192 WRITE_STR(stat->name, "Number of GPRs");
193 WRITE_STR(stat->description, "Number of GPRs used by this pipeline");
194 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
195 stat->value.u64 = shader->info.num_gprs;
196 }
197
198 vk_outarray_append_typed(VkPipelineExecutableStatisticKHR, &out, stat) {
199 WRITE_STR(stat->name, "SLM Size");
200 WRITE_STR(stat->description,
201 "Size of shader local (scratch) memory, in bytes");
202 stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
203 stat->value.u64 = shader->info.slm_size;
204 }
205
206 return vk_outarray_status(&out);
207 }
208
209 static bool
write_ir_text(VkPipelineExecutableInternalRepresentationKHR * ir,const char * data)210 write_ir_text(VkPipelineExecutableInternalRepresentationKHR* ir,
211 const char *data)
212 {
213 ir->isText = VK_TRUE;
214
215 size_t data_len = strlen(data) + 1;
216
217 if (ir->pData == NULL) {
218 ir->dataSize = data_len;
219 return true;
220 }
221
222 strncpy(ir->pData, data, ir->dataSize);
223 if (ir->dataSize < data_len)
224 return false;
225
226 ir->dataSize = data_len;
227 return true;
228 }
229
230 VKAPI_ATTR VkResult VKAPI_CALL
nvk_GetPipelineExecutableInternalRepresentationsKHR(VkDevice device,const VkPipelineExecutableInfoKHR * pExecutableInfo,uint32_t * pInternalRepresentationCount,VkPipelineExecutableInternalRepresentationKHR * pInternalRepresentations)231 nvk_GetPipelineExecutableInternalRepresentationsKHR(
232 VkDevice device,
233 const VkPipelineExecutableInfoKHR *pExecutableInfo,
234 uint32_t *pInternalRepresentationCount,
235 VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)
236 {
237 VK_FROM_HANDLE(nvk_pipeline, pipeline, pExecutableInfo->pipeline);
238 VK_OUTARRAY_MAKE_TYPED(VkPipelineExecutableInternalRepresentationKHR, out,
239 pInternalRepresentations,
240 pInternalRepresentationCount);
241 bool incomplete_text = false;
242
243 struct nvk_shader *shader =
244 shader_for_exe_idx(pipeline, pExecutableInfo->executableIndex);
245
246 if (shader->nak != NULL && shader->nak->asm_str != NULL) {
247 vk_outarray_append_typed(VkPipelineExecutableInternalRepresentationKHR, &out, ir) {
248 WRITE_STR(ir->name, "NAK assembly");
249 WRITE_STR(ir->description, "NAK assembly");
250 if (!write_ir_text(ir, shader->nak->asm_str))
251 incomplete_text = true;
252 }
253 }
254
255 return incomplete_text ? VK_INCOMPLETE : vk_outarray_status(&out);
256 }
257