• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Red Hat.
3  * Copyright © 2016 Bas Nieuwenhuizen
4  *
5  * based in part on anv driver which is:
6  * Copyright © 2015 Intel Corporation
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25  * IN THE SOFTWARE.
26  */
27 
28 #include "util/disk_cache.h"
29 #include "util/mesa-sha1.h"
30 #include "util/u_atomic.h"
31 #include "radv_debug.h"
32 #include "radv_private.h"
33 #include "radv_cs.h"
34 #include "radv_shader.h"
35 #include "nir/nir.h"
36 #include "nir/nir_builder.h"
37 #include "nir/nir_xfb_info.h"
38 #include "spirv/nir_spirv.h"
39 #include "vk_util.h"
40 
41 #include "sid.h"
42 #include "ac_binary.h"
43 #include "ac_llvm_util.h"
44 #include "ac_nir_to_llvm.h"
45 #include "vk_format.h"
46 #include "util/debug.h"
47 #include "ac_exp_param.h"
48 #include "ac_shader_util.h"
49 
50 struct radv_blend_state {
51 	uint32_t blend_enable_4bit;
52 	uint32_t need_src_alpha;
53 
54 	uint32_t cb_color_control;
55 	uint32_t cb_target_mask;
56 	uint32_t cb_target_enabled_4bit;
57 	uint32_t sx_mrt_blend_opt[8];
58 	uint32_t cb_blend_control[8];
59 
60 	uint32_t spi_shader_col_format;
61 	uint32_t col_format_is_int8;
62 	uint32_t col_format_is_int10;
63 	uint32_t cb_shader_mask;
64 	uint32_t db_alpha_to_mask;
65 
66 	uint32_t commutative_4bit;
67 
68 	bool single_cb_enable;
69 	bool mrt0_is_dual_src;
70 };
71 
72 struct radv_dsa_order_invariance {
73 	/* Whether the final result in Z/S buffers is guaranteed to be
74 	 * invariant under changes to the order in which fragments arrive.
75 	 */
76 	bool zs;
77 
78 	/* Whether the set of fragments that pass the combined Z/S test is
79 	 * guaranteed to be invariant under changes to the order in which
80 	 * fragments arrive.
81 	 */
82 	bool pass_set;
83 };
84 
85 static const VkPipelineMultisampleStateCreateInfo *
radv_pipeline_get_multisample_state(const VkGraphicsPipelineCreateInfo * pCreateInfo)86 radv_pipeline_get_multisample_state(const VkGraphicsPipelineCreateInfo *pCreateInfo)
87 {
88 	if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable)
89 		return pCreateInfo->pMultisampleState;
90 	return NULL;
91 }
92 
93 static const VkPipelineTessellationStateCreateInfo *
radv_pipeline_get_tessellation_state(const VkGraphicsPipelineCreateInfo * pCreateInfo)94 radv_pipeline_get_tessellation_state(const VkGraphicsPipelineCreateInfo *pCreateInfo)
95 {
96 	for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
97 		if (pCreateInfo->pStages[i].stage == VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT ||
98 		    pCreateInfo->pStages[i].stage == VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) {
99 			return pCreateInfo->pTessellationState;
100 		}
101 	}
102 	return NULL;
103 }
104 
105 static const VkPipelineDepthStencilStateCreateInfo *
radv_pipeline_get_depth_stencil_state(const VkGraphicsPipelineCreateInfo * pCreateInfo)106 radv_pipeline_get_depth_stencil_state(const VkGraphicsPipelineCreateInfo *pCreateInfo)
107 {
108 	RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
109 	struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass;
110 
111 	if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
112 	    subpass->depth_stencil_attachment)
113 		return pCreateInfo->pDepthStencilState;
114 	return NULL;
115 }
116 
117 static const VkPipelineColorBlendStateCreateInfo *
radv_pipeline_get_color_blend_state(const VkGraphicsPipelineCreateInfo * pCreateInfo)118 radv_pipeline_get_color_blend_state(const VkGraphicsPipelineCreateInfo *pCreateInfo)
119 {
120 	RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
121 	struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass;
122 
123 	if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
124 	    subpass->has_color_att)
125 		return pCreateInfo->pColorBlendState;
126 	return NULL;
127 }
128 
radv_pipeline_has_ngg(const struct radv_pipeline * pipeline)129 bool radv_pipeline_has_ngg(const struct radv_pipeline *pipeline)
130 {
131 	struct radv_shader_variant *variant = NULL;
132 	if (pipeline->shaders[MESA_SHADER_GEOMETRY])
133 		variant = pipeline->shaders[MESA_SHADER_GEOMETRY];
134 	else if (pipeline->shaders[MESA_SHADER_TESS_EVAL])
135 		variant = pipeline->shaders[MESA_SHADER_TESS_EVAL];
136 	else if (pipeline->shaders[MESA_SHADER_VERTEX])
137 		variant = pipeline->shaders[MESA_SHADER_VERTEX];
138 	else
139 		return false;
140 	return variant->info.is_ngg;
141 }
142 
radv_pipeline_has_ngg_passthrough(const struct radv_pipeline * pipeline)143 bool radv_pipeline_has_ngg_passthrough(const struct radv_pipeline *pipeline)
144 {
145 	assert(radv_pipeline_has_ngg(pipeline));
146 
147 	struct radv_shader_variant *variant = NULL;
148 	if (pipeline->shaders[MESA_SHADER_GEOMETRY])
149 		variant = pipeline->shaders[MESA_SHADER_GEOMETRY];
150 	else if (pipeline->shaders[MESA_SHADER_TESS_EVAL])
151 		variant = pipeline->shaders[MESA_SHADER_TESS_EVAL];
152 	else if (pipeline->shaders[MESA_SHADER_VERTEX])
153 		variant = pipeline->shaders[MESA_SHADER_VERTEX];
154 	else
155 		return false;
156 	return variant->info.is_ngg_passthrough;
157 }
158 
radv_pipeline_has_gs_copy_shader(const struct radv_pipeline * pipeline)159 bool radv_pipeline_has_gs_copy_shader(const struct radv_pipeline *pipeline)
160 {
161 	if (!radv_pipeline_has_gs(pipeline))
162 		return false;
163 
164 	/* The GS copy shader is required if the pipeline has GS on GFX6-GFX9.
165 	 * On GFX10, it might be required in rare cases if it's not possible to
166 	 * enable NGG.
167 	 */
168 	if (radv_pipeline_has_ngg(pipeline))
169 		return false;
170 
171 	assert(pipeline->gs_copy_shader);
172 	return true;
173 }
174 
175 static void
radv_pipeline_destroy(struct radv_device * device,struct radv_pipeline * pipeline,const VkAllocationCallbacks * allocator)176 radv_pipeline_destroy(struct radv_device *device,
177                       struct radv_pipeline *pipeline,
178                       const VkAllocationCallbacks* allocator)
179 {
180 	for (unsigned i = 0; i < MESA_SHADER_STAGES; ++i)
181 		if (pipeline->shaders[i])
182 			radv_shader_variant_destroy(device, pipeline->shaders[i]);
183 
184 	if (pipeline->gs_copy_shader)
185 		radv_shader_variant_destroy(device, pipeline->gs_copy_shader);
186 
187 	if(pipeline->cs.buf)
188 		free(pipeline->cs.buf);
189 
190 	vk_object_base_finish(&pipeline->base);
191 	vk_free2(&device->vk.alloc, allocator, pipeline);
192 }
193 
radv_DestroyPipeline(VkDevice _device,VkPipeline _pipeline,const VkAllocationCallbacks * pAllocator)194 void radv_DestroyPipeline(
195 	VkDevice                                    _device,
196 	VkPipeline                                  _pipeline,
197 	const VkAllocationCallbacks*                pAllocator)
198 {
199 	RADV_FROM_HANDLE(radv_device, device, _device);
200 	RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
201 
202 	if (!_pipeline)
203 		return;
204 
205 	radv_pipeline_destroy(device, pipeline, pAllocator);
206 }
207 
get_hash_flags(const struct radv_device * device)208 static uint32_t get_hash_flags(const struct radv_device *device)
209 {
210 	uint32_t hash_flags = 0;
211 
212 	if (device->instance->debug_flags & RADV_DEBUG_NO_NGG)
213 		hash_flags |= RADV_HASH_SHADER_NO_NGG;
214 	if (device->physical_device->cs_wave_size == 32)
215 		hash_flags |= RADV_HASH_SHADER_CS_WAVE32;
216 	if (device->physical_device->ps_wave_size == 32)
217 		hash_flags |= RADV_HASH_SHADER_PS_WAVE32;
218 	if (device->physical_device->ge_wave_size == 32)
219 		hash_flags |= RADV_HASH_SHADER_GE_WAVE32;
220 	if (device->physical_device->use_llvm)
221 		hash_flags |= RADV_HASH_SHADER_LLVM;
222 	if (device->instance->debug_flags & RADV_DEBUG_DISCARD_TO_DEMOTE)
223 		hash_flags |= RADV_HASH_SHADER_DISCARD_TO_DEMOTE;
224 	if (device->instance->enable_mrt_output_nan_fixup)
225 		hash_flags |= RADV_HASH_SHADER_MRT_NAN_FIXUP;
226 	if (device->instance->debug_flags & RADV_DEBUG_INVARIANT_GEOM)
227 		hash_flags |= RADV_HASH_SHADER_INVARIANT_GEOM;
228 	return hash_flags;
229 }
230 
231 static void
radv_pipeline_init_scratch(const struct radv_device * device,struct radv_pipeline * pipeline)232 radv_pipeline_init_scratch(const struct radv_device *device,
233                            struct radv_pipeline *pipeline)
234 {
235 	unsigned scratch_bytes_per_wave = 0;
236 	unsigned max_waves = 0;
237 	unsigned min_waves = 1;
238 
239 	for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
240 		if (pipeline->shaders[i] &&
241 		    pipeline->shaders[i]->config.scratch_bytes_per_wave) {
242 			unsigned max_stage_waves = device->scratch_waves;
243 
244 			scratch_bytes_per_wave = MAX2(scratch_bytes_per_wave,
245 			                              pipeline->shaders[i]->config.scratch_bytes_per_wave);
246 
247 			max_stage_waves = MIN2(max_stage_waves,
248 			          4 * device->physical_device->rad_info.num_good_compute_units *
249 			          (256 / pipeline->shaders[i]->config.num_vgprs));
250 			max_waves = MAX2(max_waves, max_stage_waves);
251 		}
252 	}
253 
254 	if (pipeline->shaders[MESA_SHADER_COMPUTE]) {
255 		unsigned group_size = pipeline->shaders[MESA_SHADER_COMPUTE]->info.cs.block_size[0] *
256 		                      pipeline->shaders[MESA_SHADER_COMPUTE]->info.cs.block_size[1] *
257 		                      pipeline->shaders[MESA_SHADER_COMPUTE]->info.cs.block_size[2];
258 		min_waves = MAX2(min_waves, round_up_u32(group_size, 64));
259 	}
260 
261 	pipeline->scratch_bytes_per_wave = scratch_bytes_per_wave;
262 	pipeline->max_waves = max_waves;
263 }
264 
si_translate_blend_logic_op(VkLogicOp op)265 static uint32_t si_translate_blend_logic_op(VkLogicOp op)
266 {
267 	switch (op) {
268 	case VK_LOGIC_OP_CLEAR:
269 		return V_028808_ROP3_CLEAR;
270 	case VK_LOGIC_OP_AND:
271 		return V_028808_ROP3_AND;
272 	case VK_LOGIC_OP_AND_REVERSE:
273 		return V_028808_ROP3_AND_REVERSE;
274 	case VK_LOGIC_OP_COPY:
275 		return V_028808_ROP3_COPY;
276 	case VK_LOGIC_OP_AND_INVERTED:
277 		return V_028808_ROP3_AND_INVERTED;
278 	case VK_LOGIC_OP_NO_OP:
279 		return V_028808_ROP3_NO_OP;
280 	case VK_LOGIC_OP_XOR:
281 		return V_028808_ROP3_XOR;
282 	case VK_LOGIC_OP_OR:
283 		return V_028808_ROP3_OR;
284 	case VK_LOGIC_OP_NOR:
285 		return V_028808_ROP3_NOR;
286 	case VK_LOGIC_OP_EQUIVALENT:
287 		return V_028808_ROP3_EQUIVALENT;
288 	case VK_LOGIC_OP_INVERT:
289 		return V_028808_ROP3_INVERT;
290 	case VK_LOGIC_OP_OR_REVERSE:
291 		return V_028808_ROP3_OR_REVERSE;
292 	case VK_LOGIC_OP_COPY_INVERTED:
293 		return V_028808_ROP3_COPY_INVERTED;
294 	case VK_LOGIC_OP_OR_INVERTED:
295 		return V_028808_ROP3_OR_INVERTED;
296 	case VK_LOGIC_OP_NAND:
297 		return V_028808_ROP3_NAND;
298 	case VK_LOGIC_OP_SET:
299 		return V_028808_ROP3_SET;
300 	default:
301 		unreachable("Unhandled logic op");
302 	}
303 }
304 
305 
si_translate_blend_function(VkBlendOp op)306 static uint32_t si_translate_blend_function(VkBlendOp op)
307 {
308 	switch (op) {
309 	case VK_BLEND_OP_ADD:
310 		return V_028780_COMB_DST_PLUS_SRC;
311 	case VK_BLEND_OP_SUBTRACT:
312 		return V_028780_COMB_SRC_MINUS_DST;
313 	case VK_BLEND_OP_REVERSE_SUBTRACT:
314 		return V_028780_COMB_DST_MINUS_SRC;
315 	case VK_BLEND_OP_MIN:
316 		return V_028780_COMB_MIN_DST_SRC;
317 	case VK_BLEND_OP_MAX:
318 		return V_028780_COMB_MAX_DST_SRC;
319 	default:
320 		return 0;
321 	}
322 }
323 
si_translate_blend_factor(VkBlendFactor factor)324 static uint32_t si_translate_blend_factor(VkBlendFactor factor)
325 {
326 	switch (factor) {
327 	case VK_BLEND_FACTOR_ZERO:
328 		return V_028780_BLEND_ZERO;
329 	case VK_BLEND_FACTOR_ONE:
330 		return V_028780_BLEND_ONE;
331 	case VK_BLEND_FACTOR_SRC_COLOR:
332 		return V_028780_BLEND_SRC_COLOR;
333 	case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
334 		return V_028780_BLEND_ONE_MINUS_SRC_COLOR;
335 	case VK_BLEND_FACTOR_DST_COLOR:
336 		return V_028780_BLEND_DST_COLOR;
337 	case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
338 		return V_028780_BLEND_ONE_MINUS_DST_COLOR;
339 	case VK_BLEND_FACTOR_SRC_ALPHA:
340 		return V_028780_BLEND_SRC_ALPHA;
341 	case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA:
342 		return V_028780_BLEND_ONE_MINUS_SRC_ALPHA;
343 	case VK_BLEND_FACTOR_DST_ALPHA:
344 		return V_028780_BLEND_DST_ALPHA;
345 	case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA:
346 		return V_028780_BLEND_ONE_MINUS_DST_ALPHA;
347 	case VK_BLEND_FACTOR_CONSTANT_COLOR:
348 		return V_028780_BLEND_CONSTANT_COLOR;
349 	case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR:
350 		return V_028780_BLEND_ONE_MINUS_CONSTANT_COLOR;
351 	case VK_BLEND_FACTOR_CONSTANT_ALPHA:
352 		return V_028780_BLEND_CONSTANT_ALPHA;
353 	case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA:
354 		return V_028780_BLEND_ONE_MINUS_CONSTANT_ALPHA;
355 	case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE:
356 		return V_028780_BLEND_SRC_ALPHA_SATURATE;
357 	case VK_BLEND_FACTOR_SRC1_COLOR:
358 		return V_028780_BLEND_SRC1_COLOR;
359 	case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR:
360 		return V_028780_BLEND_INV_SRC1_COLOR;
361 	case VK_BLEND_FACTOR_SRC1_ALPHA:
362 		return V_028780_BLEND_SRC1_ALPHA;
363 	case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA:
364 		return V_028780_BLEND_INV_SRC1_ALPHA;
365 	default:
366 		return 0;
367 	}
368 }
369 
si_translate_blend_opt_function(VkBlendOp op)370 static uint32_t si_translate_blend_opt_function(VkBlendOp op)
371 {
372 	switch (op) {
373 	case VK_BLEND_OP_ADD:
374 		return V_028760_OPT_COMB_ADD;
375 	case VK_BLEND_OP_SUBTRACT:
376 		return V_028760_OPT_COMB_SUBTRACT;
377 	case VK_BLEND_OP_REVERSE_SUBTRACT:
378 		return V_028760_OPT_COMB_REVSUBTRACT;
379 	case VK_BLEND_OP_MIN:
380 		return V_028760_OPT_COMB_MIN;
381 	case VK_BLEND_OP_MAX:
382 		return V_028760_OPT_COMB_MAX;
383 	default:
384 		return V_028760_OPT_COMB_BLEND_DISABLED;
385 	}
386 }
387 
si_translate_blend_opt_factor(VkBlendFactor factor,bool is_alpha)388 static uint32_t si_translate_blend_opt_factor(VkBlendFactor factor, bool is_alpha)
389 {
390 	switch (factor) {
391 	case VK_BLEND_FACTOR_ZERO:
392 		return V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_ALL;
393 	case VK_BLEND_FACTOR_ONE:
394 		return V_028760_BLEND_OPT_PRESERVE_ALL_IGNORE_NONE;
395 	case VK_BLEND_FACTOR_SRC_COLOR:
396 		return is_alpha ? V_028760_BLEND_OPT_PRESERVE_A1_IGNORE_A0
397 				: V_028760_BLEND_OPT_PRESERVE_C1_IGNORE_C0;
398 	case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
399 		return is_alpha ? V_028760_BLEND_OPT_PRESERVE_A0_IGNORE_A1
400 				: V_028760_BLEND_OPT_PRESERVE_C0_IGNORE_C1;
401 	case VK_BLEND_FACTOR_SRC_ALPHA:
402 		return V_028760_BLEND_OPT_PRESERVE_A1_IGNORE_A0;
403 	case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA:
404 		return V_028760_BLEND_OPT_PRESERVE_A0_IGNORE_A1;
405 	case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE:
406 		return is_alpha ? V_028760_BLEND_OPT_PRESERVE_ALL_IGNORE_NONE
407 				: V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_A0;
408 	default:
409 		return V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE;
410 	}
411 }
412 
413 /**
414  * Get rid of DST in the blend factors by commuting the operands:
415  *    func(src * DST, dst * 0) ---> func(src * 0, dst * SRC)
416  */
si_blend_remove_dst(unsigned * func,unsigned * src_factor,unsigned * dst_factor,unsigned expected_dst,unsigned replacement_src)417 static void si_blend_remove_dst(unsigned *func, unsigned *src_factor,
418 				unsigned *dst_factor, unsigned expected_dst,
419 				unsigned replacement_src)
420 {
421 	if (*src_factor == expected_dst &&
422 	    *dst_factor == VK_BLEND_FACTOR_ZERO) {
423 		*src_factor = VK_BLEND_FACTOR_ZERO;
424 		*dst_factor = replacement_src;
425 
426 		/* Commuting the operands requires reversing subtractions. */
427 		if (*func == VK_BLEND_OP_SUBTRACT)
428 			*func = VK_BLEND_OP_REVERSE_SUBTRACT;
429 		else if (*func == VK_BLEND_OP_REVERSE_SUBTRACT)
430 			*func = VK_BLEND_OP_SUBTRACT;
431 	}
432 }
433 
si_blend_factor_uses_dst(unsigned factor)434 static bool si_blend_factor_uses_dst(unsigned factor)
435 {
436 	return factor == VK_BLEND_FACTOR_DST_COLOR ||
437 		factor == VK_BLEND_FACTOR_DST_ALPHA ||
438 		factor == VK_BLEND_FACTOR_SRC_ALPHA_SATURATE ||
439 		factor == VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA ||
440 		factor == VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
441 }
442 
is_dual_src(VkBlendFactor factor)443 static bool is_dual_src(VkBlendFactor factor)
444 {
445 	switch (factor) {
446 	case VK_BLEND_FACTOR_SRC1_COLOR:
447 	case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR:
448 	case VK_BLEND_FACTOR_SRC1_ALPHA:
449 	case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA:
450 		return true;
451 	default:
452 		return false;
453 	}
454 }
455 
radv_choose_spi_color_format(VkFormat vk_format,bool blend_enable,bool blend_need_alpha)456 static unsigned radv_choose_spi_color_format(VkFormat vk_format,
457 					     bool blend_enable,
458 					     bool blend_need_alpha)
459 {
460 	const struct vk_format_description *desc = vk_format_description(vk_format);
461 	struct ac_spi_color_formats formats = {0};
462 	unsigned format, ntype, swap;
463 
464 	format = radv_translate_colorformat(vk_format);
465 	ntype = radv_translate_color_numformat(vk_format, desc,
466 					       vk_format_get_first_non_void_channel(vk_format));
467 	swap = radv_translate_colorswap(vk_format, false);
468 
469 	ac_choose_spi_color_formats(format, swap, ntype, false, &formats);
470 
471 	if (blend_enable && blend_need_alpha)
472 		return formats.blend_alpha;
473 	else if(blend_need_alpha)
474 		return formats.alpha;
475 	else if(blend_enable)
476 		return formats.blend;
477 	else
478 		return formats.normal;
479 }
480 
481 static bool
format_is_int8(VkFormat format)482 format_is_int8(VkFormat format)
483 {
484 	const struct vk_format_description *desc = vk_format_description(format);
485 	int channel =  vk_format_get_first_non_void_channel(format);
486 
487 	return channel >= 0 && desc->channel[channel].pure_integer &&
488 	       desc->channel[channel].size == 8;
489 }
490 
491 static bool
format_is_int10(VkFormat format)492 format_is_int10(VkFormat format)
493 {
494 	const struct vk_format_description *desc = vk_format_description(format);
495 
496 	if (desc->nr_channels != 4)
497 		return false;
498 	for (unsigned i = 0; i < 4; i++) {
499 		if (desc->channel[i].pure_integer && desc->channel[i].size == 10)
500 			return true;
501 	}
502 	return false;
503 }
504 
505 static void
radv_pipeline_compute_spi_color_formats(const struct radv_pipeline * pipeline,const VkGraphicsPipelineCreateInfo * pCreateInfo,struct radv_blend_state * blend)506 radv_pipeline_compute_spi_color_formats(const struct radv_pipeline *pipeline,
507 					const VkGraphicsPipelineCreateInfo *pCreateInfo,
508 					struct radv_blend_state *blend)
509 {
510 	RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
511 	struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass;
512 	unsigned col_format = 0, is_int8 = 0, is_int10 = 0;
513 	unsigned num_targets;
514 
515 	for (unsigned i = 0; i < (blend->single_cb_enable ? 1 : subpass->color_count); ++i) {
516 		unsigned cf;
517 
518 		if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED ||
519 		    !(blend->cb_target_mask & (0xfu << (i * 4)))) {
520 			cf = V_028714_SPI_SHADER_ZERO;
521 		} else {
522 			struct radv_render_pass_attachment *attachment = pass->attachments + subpass->color_attachments[i].attachment;
523 			bool blend_enable =
524 				blend->blend_enable_4bit & (0xfu << (i * 4));
525 
526 			cf = radv_choose_spi_color_format(attachment->format,
527 			                                  blend_enable,
528 							  blend->need_src_alpha & (1 << i));
529 
530 			if (format_is_int8(attachment->format))
531 				is_int8 |= 1 << i;
532 			if (format_is_int10(attachment->format))
533 				is_int10 |= 1 << i;
534 		}
535 
536 		col_format |= cf << (4 * i);
537 	}
538 
539 	if (!(col_format & 0xf) && blend->need_src_alpha & (1 << 0)) {
540 		/* When a subpass doesn't have any color attachments, write the
541 		 * alpha channel of MRT0 when alpha coverage is enabled because
542 		 * the depth attachment needs it.
543 		 */
544 		col_format |= V_028714_SPI_SHADER_32_AR;
545 	}
546 
547 	/* If the i-th target format is set, all previous target formats must
548 	 * be non-zero to avoid hangs.
549 	 */
550 	num_targets = (util_last_bit(col_format) + 3) / 4;
551 	for (unsigned i = 0; i < num_targets; i++) {
552 		if (!(col_format & (0xfu << (i * 4)))) {
553 			col_format |= V_028714_SPI_SHADER_32_R << (i * 4);
554 		}
555 	}
556 
557 	/* The output for dual source blending should have the same format as
558 	 * the first output.
559 	 */
560 	if (blend->mrt0_is_dual_src) {
561 		assert(!(col_format >> 4));
562 		col_format |= (col_format & 0xf) << 4;
563 	}
564 
565 	blend->spi_shader_col_format = col_format;
566 	blend->col_format_is_int8 = is_int8;
567 	blend->col_format_is_int10 = is_int10;
568 }
569 
570 /*
571  * Ordered so that for each i,
572  * radv_format_meta_fs_key(radv_fs_key_format_exemplars[i]) == i.
573  */
574 const VkFormat radv_fs_key_format_exemplars[NUM_META_FS_KEYS] = {
575 	VK_FORMAT_R32_SFLOAT,
576 	VK_FORMAT_R32G32_SFLOAT,
577 	VK_FORMAT_R8G8B8A8_UNORM,
578 	VK_FORMAT_R16G16B16A16_UNORM,
579 	VK_FORMAT_R16G16B16A16_SNORM,
580 	VK_FORMAT_R16G16B16A16_UINT,
581 	VK_FORMAT_R16G16B16A16_SINT,
582 	VK_FORMAT_R32G32B32A32_SFLOAT,
583 	VK_FORMAT_R8G8B8A8_UINT,
584 	VK_FORMAT_R8G8B8A8_SINT,
585 	VK_FORMAT_A2R10G10B10_UINT_PACK32,
586 	VK_FORMAT_A2R10G10B10_SINT_PACK32,
587 };
588 
radv_format_meta_fs_key(VkFormat format)589 unsigned radv_format_meta_fs_key(VkFormat format)
590 {
591 	unsigned col_format = radv_choose_spi_color_format(format, false, false);
592 
593 	assert(col_format != V_028714_SPI_SHADER_32_AR);
594 	if (col_format >= V_028714_SPI_SHADER_32_AR)
595 		--col_format; /* Skip V_028714_SPI_SHADER_32_AR  since there is no such VkFormat */
596 
597 	--col_format; /* Skip V_028714_SPI_SHADER_ZERO */
598 	bool is_int8 = format_is_int8(format);
599 	bool is_int10 = format_is_int10(format);
600 
601 	return col_format + (is_int8 ? 3 : is_int10 ? 5 : 0);
602 }
603 
604 static void
radv_blend_check_commutativity(struct radv_blend_state * blend,VkBlendOp op,VkBlendFactor src,VkBlendFactor dst,unsigned chanmask)605 radv_blend_check_commutativity(struct radv_blend_state *blend,
606 			       VkBlendOp op, VkBlendFactor src,
607 			       VkBlendFactor dst, unsigned chanmask)
608 {
609 	/* Src factor is allowed when it does not depend on Dst. */
610 	static const uint32_t src_allowed =
611 		(1u << VK_BLEND_FACTOR_ONE) |
612 		(1u << VK_BLEND_FACTOR_SRC_COLOR) |
613 		(1u << VK_BLEND_FACTOR_SRC_ALPHA) |
614 		(1u << VK_BLEND_FACTOR_SRC_ALPHA_SATURATE) |
615 		(1u << VK_BLEND_FACTOR_CONSTANT_COLOR) |
616 		(1u << VK_BLEND_FACTOR_CONSTANT_ALPHA) |
617 		(1u << VK_BLEND_FACTOR_SRC1_COLOR) |
618 		(1u << VK_BLEND_FACTOR_SRC1_ALPHA) |
619 		(1u << VK_BLEND_FACTOR_ZERO) |
620 		(1u << VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR) |
621 		(1u << VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA) |
622 		(1u << VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR) |
623 		(1u << VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA) |
624 		(1u << VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) |
625 		(1u << VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA);
626 
627 	if (dst == VK_BLEND_FACTOR_ONE &&
628 	    (src_allowed & (1u << src))) {
629 		/* Addition is commutative, but floating point addition isn't
630 		 * associative: subtle changes can be introduced via different
631 		 * rounding. Be conservative, only enable for min and max.
632 		 */
633 		if (op == VK_BLEND_OP_MAX || op == VK_BLEND_OP_MIN)
634 			blend->commutative_4bit |= chanmask;
635 	}
636 }
637 
638 static struct radv_blend_state
radv_pipeline_init_blend_state(const struct radv_pipeline * pipeline,const VkGraphicsPipelineCreateInfo * pCreateInfo,const struct radv_graphics_pipeline_create_info * extra)639 radv_pipeline_init_blend_state(const struct radv_pipeline *pipeline,
640 			       const VkGraphicsPipelineCreateInfo *pCreateInfo,
641 			       const struct radv_graphics_pipeline_create_info *extra)
642 {
643 	const VkPipelineColorBlendStateCreateInfo *vkblend = radv_pipeline_get_color_blend_state(pCreateInfo);
644 	const VkPipelineMultisampleStateCreateInfo *vkms = radv_pipeline_get_multisample_state(pCreateInfo);
645 	struct radv_blend_state blend = {0};
646 	unsigned mode = V_028808_CB_NORMAL;
647 	int i;
648 
649 	if (extra && extra->custom_blend_mode) {
650 		blend.single_cb_enable = true;
651 		mode = extra->custom_blend_mode;
652 	}
653 
654 	blend.cb_color_control = 0;
655 	if (vkblend) {
656 		if (vkblend->logicOpEnable)
657 			blend.cb_color_control |= S_028808_ROP3(si_translate_blend_logic_op(vkblend->logicOp));
658 		else
659 			blend.cb_color_control |= S_028808_ROP3(V_028808_ROP3_COPY);
660 	}
661 
662 	blend.db_alpha_to_mask = S_028B70_ALPHA_TO_MASK_OFFSET0(3) |
663 		S_028B70_ALPHA_TO_MASK_OFFSET1(1) |
664 		S_028B70_ALPHA_TO_MASK_OFFSET2(0) |
665 		S_028B70_ALPHA_TO_MASK_OFFSET3(2) |
666 		S_028B70_OFFSET_ROUND(1);
667 
668 	if (vkms && vkms->alphaToCoverageEnable) {
669 		blend.db_alpha_to_mask |= S_028B70_ALPHA_TO_MASK_ENABLE(1);
670 		blend.need_src_alpha |= 0x1;
671 	}
672 
673 	blend.cb_target_mask = 0;
674 	if (vkblend) {
675 		for (i = 0; i < vkblend->attachmentCount; i++) {
676 			const VkPipelineColorBlendAttachmentState *att = &vkblend->pAttachments[i];
677 			unsigned blend_cntl = 0;
678 			unsigned srcRGB_opt, dstRGB_opt, srcA_opt, dstA_opt;
679 			VkBlendOp eqRGB = att->colorBlendOp;
680 			VkBlendFactor srcRGB = att->srcColorBlendFactor;
681 			VkBlendFactor dstRGB = att->dstColorBlendFactor;
682 			VkBlendOp eqA = att->alphaBlendOp;
683 			VkBlendFactor srcA = att->srcAlphaBlendFactor;
684 			VkBlendFactor dstA = att->dstAlphaBlendFactor;
685 
686 			blend.sx_mrt_blend_opt[i] = S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED) | S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED);
687 
688 			if (!att->colorWriteMask)
689 				continue;
690 
691 			/* Ignore other blend targets if dual-source blending
692 			 * is enabled to prevent wrong behaviour.
693 			 */
694 			if (blend.mrt0_is_dual_src)
695 				continue;
696 
697 			blend.cb_target_mask |= (unsigned)att->colorWriteMask << (4 * i);
698 			blend.cb_target_enabled_4bit |= 0xfu << (4 * i);
699 			if (!att->blendEnable) {
700 				blend.cb_blend_control[i] = blend_cntl;
701 				continue;
702 			}
703 
704 			if (is_dual_src(srcRGB) || is_dual_src(dstRGB) || is_dual_src(srcA) || is_dual_src(dstA))
705 				if (i == 0)
706 					blend.mrt0_is_dual_src = true;
707 
708 			if (eqRGB == VK_BLEND_OP_MIN || eqRGB == VK_BLEND_OP_MAX) {
709 				srcRGB = VK_BLEND_FACTOR_ONE;
710 				dstRGB = VK_BLEND_FACTOR_ONE;
711 			}
712 			if (eqA == VK_BLEND_OP_MIN || eqA == VK_BLEND_OP_MAX) {
713 				srcA = VK_BLEND_FACTOR_ONE;
714 				dstA = VK_BLEND_FACTOR_ONE;
715 			}
716 
717 			radv_blend_check_commutativity(&blend, eqRGB, srcRGB, dstRGB,
718 						       0x7u << (4 * i));
719 			radv_blend_check_commutativity(&blend, eqA, srcA, dstA,
720 						       0x8u << (4 * i));
721 
722 			/* Blending optimizations for RB+.
723 			 * These transformations don't change the behavior.
724 			 *
725 			 * First, get rid of DST in the blend factors:
726 			 *    func(src * DST, dst * 0) ---> func(src * 0, dst * SRC)
727 			 */
728 			si_blend_remove_dst(&eqRGB, &srcRGB, &dstRGB,
729 					    VK_BLEND_FACTOR_DST_COLOR,
730 					    VK_BLEND_FACTOR_SRC_COLOR);
731 
732 			si_blend_remove_dst(&eqA, &srcA, &dstA,
733 					    VK_BLEND_FACTOR_DST_COLOR,
734 					    VK_BLEND_FACTOR_SRC_COLOR);
735 
736 			si_blend_remove_dst(&eqA, &srcA, &dstA,
737 					    VK_BLEND_FACTOR_DST_ALPHA,
738 					    VK_BLEND_FACTOR_SRC_ALPHA);
739 
740 			/* Look up the ideal settings from tables. */
741 			srcRGB_opt = si_translate_blend_opt_factor(srcRGB, false);
742 			dstRGB_opt = si_translate_blend_opt_factor(dstRGB, false);
743 			srcA_opt = si_translate_blend_opt_factor(srcA, true);
744 			dstA_opt = si_translate_blend_opt_factor(dstA, true);
745 
746 			/* Handle interdependencies. */
747 			if (si_blend_factor_uses_dst(srcRGB))
748 				dstRGB_opt = V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE;
749 			if (si_blend_factor_uses_dst(srcA))
750 				dstA_opt = V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_NONE;
751 
752 			if (srcRGB == VK_BLEND_FACTOR_SRC_ALPHA_SATURATE &&
753 			    (dstRGB == VK_BLEND_FACTOR_ZERO ||
754 			     dstRGB == VK_BLEND_FACTOR_SRC_ALPHA ||
755 			     dstRGB == VK_BLEND_FACTOR_SRC_ALPHA_SATURATE))
756 				dstRGB_opt = V_028760_BLEND_OPT_PRESERVE_NONE_IGNORE_A0;
757 
758 			/* Set the final value. */
759 			blend.sx_mrt_blend_opt[i] =
760 				S_028760_COLOR_SRC_OPT(srcRGB_opt) |
761 				S_028760_COLOR_DST_OPT(dstRGB_opt) |
762 				S_028760_COLOR_COMB_FCN(si_translate_blend_opt_function(eqRGB)) |
763 				S_028760_ALPHA_SRC_OPT(srcA_opt) |
764 				S_028760_ALPHA_DST_OPT(dstA_opt) |
765 				S_028760_ALPHA_COMB_FCN(si_translate_blend_opt_function(eqA));
766 			blend_cntl |= S_028780_ENABLE(1);
767 
768 			blend_cntl |= S_028780_COLOR_COMB_FCN(si_translate_blend_function(eqRGB));
769 			blend_cntl |= S_028780_COLOR_SRCBLEND(si_translate_blend_factor(srcRGB));
770 			blend_cntl |= S_028780_COLOR_DESTBLEND(si_translate_blend_factor(dstRGB));
771 			if (srcA != srcRGB || dstA != dstRGB || eqA != eqRGB) {
772 				blend_cntl |= S_028780_SEPARATE_ALPHA_BLEND(1);
773 				blend_cntl |= S_028780_ALPHA_COMB_FCN(si_translate_blend_function(eqA));
774 				blend_cntl |= S_028780_ALPHA_SRCBLEND(si_translate_blend_factor(srcA));
775 				blend_cntl |= S_028780_ALPHA_DESTBLEND(si_translate_blend_factor(dstA));
776 			}
777 			blend.cb_blend_control[i] = blend_cntl;
778 
779 			blend.blend_enable_4bit |= 0xfu << (i * 4);
780 
781 			if (srcRGB == VK_BLEND_FACTOR_SRC_ALPHA ||
782 			    dstRGB == VK_BLEND_FACTOR_SRC_ALPHA ||
783 			    srcRGB == VK_BLEND_FACTOR_SRC_ALPHA_SATURATE ||
784 			    dstRGB == VK_BLEND_FACTOR_SRC_ALPHA_SATURATE ||
785 			    srcRGB == VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA ||
786 			    dstRGB == VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA)
787 				blend.need_src_alpha |= 1 << i;
788 		}
789 		for (i = vkblend->attachmentCount; i < 8; i++) {
790 			blend.cb_blend_control[i] = 0;
791 			blend.sx_mrt_blend_opt[i] = S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED) | S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED);
792 		}
793 	}
794 
795 	if (pipeline->device->physical_device->rad_info.has_rbplus) {
796 		/* Disable RB+ blend optimizations for dual source blending. */
797 		if (blend.mrt0_is_dual_src) {
798 			for (i = 0; i < 8; i++) {
799 				blend.sx_mrt_blend_opt[i] =
800 					S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_NONE) |
801 					S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_NONE);
802 			}
803 		}
804 
805 		/* RB+ doesn't work with dual source blending, logic op and
806 		 * RESOLVE.
807 		 */
808 		if (blend.mrt0_is_dual_src ||
809 		    (vkblend && vkblend->logicOpEnable) ||
810 		    mode == V_028808_CB_RESOLVE)
811 			blend.cb_color_control |= S_028808_DISABLE_DUAL_QUAD(1);
812 	}
813 
814 	if (blend.cb_target_mask)
815 		blend.cb_color_control |= S_028808_MODE(mode);
816 	else
817 		blend.cb_color_control |= S_028808_MODE(V_028808_CB_DISABLE);
818 
819 	radv_pipeline_compute_spi_color_formats(pipeline, pCreateInfo, &blend);
820 	return blend;
821 }
822 
si_translate_fill(VkPolygonMode func)823 static uint32_t si_translate_fill(VkPolygonMode func)
824 {
825 	switch(func) {
826 	case VK_POLYGON_MODE_FILL:
827 		return V_028814_X_DRAW_TRIANGLES;
828 	case VK_POLYGON_MODE_LINE:
829 		return V_028814_X_DRAW_LINES;
830 	case VK_POLYGON_MODE_POINT:
831 		return V_028814_X_DRAW_POINTS;
832 	default:
833 		assert(0);
834 		return V_028814_X_DRAW_POINTS;
835 	}
836 }
837 
radv_pipeline_get_ps_iter_samples(const VkGraphicsPipelineCreateInfo * pCreateInfo)838 static uint8_t radv_pipeline_get_ps_iter_samples(const VkGraphicsPipelineCreateInfo *pCreateInfo)
839 {
840 	const VkPipelineMultisampleStateCreateInfo *vkms = pCreateInfo->pMultisampleState;
841 	RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
842 	struct radv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
843 	uint32_t ps_iter_samples = 1;
844 	uint32_t num_samples;
845 
846 	/* From the Vulkan 1.1.129 spec, 26.7. Sample Shading:
847 	 *
848 	 * "If the VK_AMD_mixed_attachment_samples extension is enabled and the
849 	 *  subpass uses color attachments, totalSamples is the number of
850 	 *  samples of the color attachments. Otherwise, totalSamples is the
851 	 *  value of VkPipelineMultisampleStateCreateInfo::rasterizationSamples
852 	 *  specified at pipeline creation time."
853 	 */
854 	if (subpass->has_color_att) {
855 		num_samples = subpass->color_sample_count;
856 	} else {
857 		num_samples = vkms->rasterizationSamples;
858 	}
859 
860 	if (vkms->sampleShadingEnable) {
861 		ps_iter_samples = ceilf(vkms->minSampleShading * num_samples);
862 		ps_iter_samples = util_next_power_of_two(ps_iter_samples);
863 	}
864 	return ps_iter_samples;
865 }
866 
867 static bool
radv_is_depth_write_enabled(const VkPipelineDepthStencilStateCreateInfo * pCreateInfo)868 radv_is_depth_write_enabled(const VkPipelineDepthStencilStateCreateInfo *pCreateInfo)
869 {
870 	return pCreateInfo->depthTestEnable &&
871 	       pCreateInfo->depthWriteEnable &&
872 	       pCreateInfo->depthCompareOp != VK_COMPARE_OP_NEVER;
873 }
874 
875 static bool
radv_writes_stencil(const VkStencilOpState * state)876 radv_writes_stencil(const VkStencilOpState *state)
877 {
878 	return state->writeMask &&
879 	       (state->failOp != VK_STENCIL_OP_KEEP ||
880 		state->passOp != VK_STENCIL_OP_KEEP ||
881 		state->depthFailOp != VK_STENCIL_OP_KEEP);
882 }
883 
884 static bool
radv_is_stencil_write_enabled(const VkPipelineDepthStencilStateCreateInfo * pCreateInfo)885 radv_is_stencil_write_enabled(const VkPipelineDepthStencilStateCreateInfo *pCreateInfo)
886 {
887 	return pCreateInfo->stencilTestEnable &&
888 	       (radv_writes_stencil(&pCreateInfo->front) ||
889 		radv_writes_stencil(&pCreateInfo->back));
890 }
891 
892 static bool
radv_is_ds_write_enabled(const VkPipelineDepthStencilStateCreateInfo * pCreateInfo)893 radv_is_ds_write_enabled(const VkPipelineDepthStencilStateCreateInfo *pCreateInfo)
894 {
895 	return radv_is_depth_write_enabled(pCreateInfo) ||
896 	       radv_is_stencil_write_enabled(pCreateInfo);
897 }
898 
899 static bool
radv_order_invariant_stencil_op(VkStencilOp op)900 radv_order_invariant_stencil_op(VkStencilOp op)
901 {
902 	/* REPLACE is normally order invariant, except when the stencil
903 	 * reference value is written by the fragment shader. Tracking this
904 	 * interaction does not seem worth the effort, so be conservative.
905 	 */
906 	return op != VK_STENCIL_OP_INCREMENT_AND_CLAMP &&
907 	       op != VK_STENCIL_OP_DECREMENT_AND_CLAMP &&
908 	       op != VK_STENCIL_OP_REPLACE;
909 }
910 
911 static bool
radv_order_invariant_stencil_state(const VkStencilOpState * state)912 radv_order_invariant_stencil_state(const VkStencilOpState *state)
913 {
914 	/* Compute whether, assuming Z writes are disabled, this stencil state
915 	 * is order invariant in the sense that the set of passing fragments as
916 	 * well as the final stencil buffer result does not depend on the order
917 	 * of fragments.
918 	 */
919 	return !state->writeMask ||
920 	       /* The following assumes that Z writes are disabled. */
921 	       (state->compareOp == VK_COMPARE_OP_ALWAYS &&
922 		radv_order_invariant_stencil_op(state->passOp) &&
923 		radv_order_invariant_stencil_op(state->depthFailOp)) ||
924 	       (state->compareOp == VK_COMPARE_OP_NEVER &&
925 		radv_order_invariant_stencil_op(state->failOp));
926 }
927 
928 static bool
radv_is_state_dynamic(const VkGraphicsPipelineCreateInfo * pCreateInfo,VkDynamicState state)929 radv_is_state_dynamic(const VkGraphicsPipelineCreateInfo *pCreateInfo,
930 		      VkDynamicState state)
931 {
932 	if (pCreateInfo->pDynamicState) {
933 		uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
934 		for (uint32_t i = 0; i < count; i++) {
935 			if (pCreateInfo->pDynamicState->pDynamicStates[i] == state)
936 				return true;
937 		}
938 	}
939 
940 	return false;
941 }
942 
943 static bool
radv_pipeline_has_dynamic_ds_states(const VkGraphicsPipelineCreateInfo * pCreateInfo)944 radv_pipeline_has_dynamic_ds_states(const VkGraphicsPipelineCreateInfo *pCreateInfo)
945 {
946 	VkDynamicState ds_states[] = {
947 		VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT,
948 		VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT,
949 		VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT,
950 		VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT,
951 		VK_DYNAMIC_STATE_STENCIL_OP_EXT,
952 	};
953 
954 	for (uint32_t i = 0; i < ARRAY_SIZE(ds_states); i++) {
955 		if (radv_is_state_dynamic(pCreateInfo, ds_states[i]))
956 			return true;
957 	}
958 
959 	return false;
960 }
961 
962 static bool
radv_pipeline_out_of_order_rast(struct radv_pipeline * pipeline,const struct radv_blend_state * blend,const VkGraphicsPipelineCreateInfo * pCreateInfo)963 radv_pipeline_out_of_order_rast(struct radv_pipeline *pipeline,
964 				const struct radv_blend_state *blend,
965 				const VkGraphicsPipelineCreateInfo *pCreateInfo)
966 {
967 	RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
968 	struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass;
969 	const VkPipelineDepthStencilStateCreateInfo *vkds = radv_pipeline_get_depth_stencil_state(pCreateInfo);
970 	const VkPipelineColorBlendStateCreateInfo *vkblend = radv_pipeline_get_color_blend_state(pCreateInfo);
971 	unsigned colormask = blend->cb_target_enabled_4bit;
972 
973 	if (!pipeline->device->physical_device->out_of_order_rast_allowed)
974 		return false;
975 
976 	/* Be conservative if a logic operation is enabled with color buffers. */
977 	if (colormask && vkblend && vkblend->logicOpEnable)
978 		return false;
979 
980 	/* Be conservative if an extended dynamic depth/stencil state is
981 	 * enabled because the driver can't update out-of-order rasterization
982 	 * dynamically.
983 	 */
984 	if (radv_pipeline_has_dynamic_ds_states(pCreateInfo))
985 		return false;
986 
987 	/* Default depth/stencil invariance when no attachment is bound. */
988 	struct radv_dsa_order_invariance dsa_order_invariant = {
989 		.zs = true, .pass_set = true
990 	};
991 
992 	if (vkds) {
993 		struct radv_render_pass_attachment *attachment =
994 			pass->attachments + subpass->depth_stencil_attachment->attachment;
995 		bool has_stencil = vk_format_is_stencil(attachment->format);
996 		struct radv_dsa_order_invariance order_invariance[2];
997 		struct radv_shader_variant *ps =
998 			pipeline->shaders[MESA_SHADER_FRAGMENT];
999 
1000 		/* Compute depth/stencil order invariance in order to know if
1001 		 * it's safe to enable out-of-order.
1002 		 */
1003 		bool zfunc_is_ordered =
1004 			vkds->depthCompareOp == VK_COMPARE_OP_NEVER ||
1005 			vkds->depthCompareOp == VK_COMPARE_OP_LESS ||
1006 			vkds->depthCompareOp == VK_COMPARE_OP_LESS_OR_EQUAL ||
1007 			vkds->depthCompareOp == VK_COMPARE_OP_GREATER ||
1008 			vkds->depthCompareOp == VK_COMPARE_OP_GREATER_OR_EQUAL;
1009 
1010 		bool nozwrite_and_order_invariant_stencil =
1011 			!radv_is_ds_write_enabled(vkds) ||
1012 			(!radv_is_depth_write_enabled(vkds) &&
1013 			 radv_order_invariant_stencil_state(&vkds->front) &&
1014 			 radv_order_invariant_stencil_state(&vkds->back));
1015 
1016 		order_invariance[1].zs =
1017 			nozwrite_and_order_invariant_stencil ||
1018 			(!radv_is_stencil_write_enabled(vkds) &&
1019 			 zfunc_is_ordered);
1020 		order_invariance[0].zs =
1021 			!radv_is_depth_write_enabled(vkds) || zfunc_is_ordered;
1022 
1023 		order_invariance[1].pass_set =
1024 			nozwrite_and_order_invariant_stencil ||
1025 			(!radv_is_stencil_write_enabled(vkds) &&
1026 			 (vkds->depthCompareOp == VK_COMPARE_OP_ALWAYS ||
1027 			  vkds->depthCompareOp == VK_COMPARE_OP_NEVER));
1028 		order_invariance[0].pass_set =
1029 			!radv_is_depth_write_enabled(vkds) ||
1030 			(vkds->depthCompareOp == VK_COMPARE_OP_ALWAYS ||
1031 			 vkds->depthCompareOp == VK_COMPARE_OP_NEVER);
1032 
1033 		dsa_order_invariant = order_invariance[has_stencil];
1034 		if (!dsa_order_invariant.zs)
1035 			return false;
1036 
1037 		/* The set of PS invocations is always order invariant,
1038 		 * except when early Z/S tests are requested.
1039 		 */
1040 		if (ps &&
1041 		    ps->info.ps.writes_memory &&
1042 		    ps->info.ps.early_fragment_test &&
1043 		    !dsa_order_invariant.pass_set)
1044 			return false;
1045 
1046 		/* Determine if out-of-order rasterization should be disabled
1047 		 * when occlusion queries are used.
1048 		 */
1049 		pipeline->graphics.disable_out_of_order_rast_for_occlusion =
1050 			!dsa_order_invariant.pass_set;
1051 	}
1052 
1053 	/* No color buffers are enabled for writing. */
1054 	if (!colormask)
1055 		return true;
1056 
1057 	unsigned blendmask = colormask & blend->blend_enable_4bit;
1058 
1059 	if (blendmask) {
1060 		/* Only commutative blending. */
1061 		if (blendmask & ~blend->commutative_4bit)
1062 			return false;
1063 
1064 		if (!dsa_order_invariant.pass_set)
1065 			return false;
1066 	}
1067 
1068 	if (colormask & ~blendmask)
1069 		return false;
1070 
1071 	return true;
1072 }
1073 
1074 static const VkConservativeRasterizationModeEXT
radv_get_conservative_raster_mode(const VkPipelineRasterizationStateCreateInfo * pCreateInfo)1075 radv_get_conservative_raster_mode(const VkPipelineRasterizationStateCreateInfo *pCreateInfo)
1076 {
1077 	const VkPipelineRasterizationConservativeStateCreateInfoEXT *conservative_raster =
1078 		vk_find_struct_const(pCreateInfo->pNext, PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT);
1079 
1080 	if (!conservative_raster)
1081 		return VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT;
1082 	return conservative_raster->conservativeRasterizationMode;
1083 }
1084 
1085 static void
radv_pipeline_init_multisample_state(struct radv_pipeline * pipeline,const struct radv_blend_state * blend,const VkGraphicsPipelineCreateInfo * pCreateInfo)1086 radv_pipeline_init_multisample_state(struct radv_pipeline *pipeline,
1087 				     const struct radv_blend_state *blend,
1088 				     const VkGraphicsPipelineCreateInfo *pCreateInfo)
1089 {
1090 	const VkPipelineMultisampleStateCreateInfo *vkms = radv_pipeline_get_multisample_state(pCreateInfo);
1091 	struct radv_multisample_state *ms = &pipeline->graphics.ms;
1092 	unsigned num_tile_pipes = pipeline->device->physical_device->rad_info.num_tile_pipes;
1093 	const VkConservativeRasterizationModeEXT mode =
1094 		radv_get_conservative_raster_mode(pCreateInfo->pRasterizationState);
1095 	bool out_of_order_rast = false;
1096 	int ps_iter_samples = 1;
1097 	uint32_t mask = 0xffff;
1098 
1099 	if (vkms) {
1100 		ms->num_samples = vkms->rasterizationSamples;
1101 
1102 		/* From the Vulkan 1.1.129 spec, 26.7. Sample Shading:
1103 		 *
1104 		 * "Sample shading is enabled for a graphics pipeline:
1105 		 *
1106 		 * - If the interface of the fragment shader entry point of the
1107 		 *   graphics pipeline includes an input variable decorated
1108 		 *   with SampleId or SamplePosition. In this case
1109 		 *   minSampleShadingFactor takes the value 1.0.
1110 		 * - Else if the sampleShadingEnable member of the
1111 		 *   VkPipelineMultisampleStateCreateInfo structure specified
1112 		 *   when creating the graphics pipeline is set to VK_TRUE. In
1113 		 *   this case minSampleShadingFactor takes the value of
1114 		 *   VkPipelineMultisampleStateCreateInfo::minSampleShading.
1115 		 *
1116 		 * Otherwise, sample shading is considered disabled."
1117 		 */
1118 		if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.force_persample) {
1119 			ps_iter_samples = ms->num_samples;
1120 		} else {
1121 			ps_iter_samples = radv_pipeline_get_ps_iter_samples(pCreateInfo);
1122 		}
1123 	} else {
1124 		ms->num_samples = 1;
1125 	}
1126 
1127 	const struct VkPipelineRasterizationStateRasterizationOrderAMD *raster_order =
1128 		vk_find_struct_const(pCreateInfo->pRasterizationState->pNext, PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD);
1129 	if (raster_order && raster_order->rasterizationOrder == VK_RASTERIZATION_ORDER_RELAXED_AMD) {
1130 		/* Out-of-order rasterization is explicitly enabled by the
1131 		 * application.
1132 		 */
1133 		out_of_order_rast = true;
1134 	} else {
1135 		/* Determine if the driver can enable out-of-order
1136 		 * rasterization internally.
1137 		 */
1138 		out_of_order_rast =
1139 			radv_pipeline_out_of_order_rast(pipeline, blend, pCreateInfo);
1140 	}
1141 
1142 	ms->pa_sc_aa_config = 0;
1143 	ms->db_eqaa = S_028804_HIGH_QUALITY_INTERSECTIONS(1) |
1144 		      S_028804_INCOHERENT_EQAA_READS(1) |
1145 		      S_028804_INTERPOLATE_COMP_Z(1) |
1146 		      S_028804_STATIC_ANCHOR_ASSOCIATIONS(1);
1147 
1148 	/* Adjust MSAA state if conservative rasterization is enabled. */
1149 	if (mode != VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT) {
1150 		ms->pa_sc_aa_config |= S_028BE0_AA_MASK_CENTROID_DTMN(1);
1151 
1152 		ms->db_eqaa |= S_028804_ENABLE_POSTZ_OVERRASTERIZATION(1) |
1153 			       S_028804_OVERRASTERIZATION_AMOUNT(4);
1154 	}
1155 
1156 	ms->pa_sc_mode_cntl_1 =
1157 		S_028A4C_WALK_FENCE_ENABLE(1) | //TODO linear dst fixes
1158 		S_028A4C_WALK_FENCE_SIZE(num_tile_pipes == 2 ? 2 : 3) |
1159 		S_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(out_of_order_rast) |
1160 		S_028A4C_OUT_OF_ORDER_WATER_MARK(0x7) |
1161 		/* always 1: */
1162 		S_028A4C_WALK_ALIGN8_PRIM_FITS_ST(1) |
1163 		S_028A4C_SUPERTILE_WALK_ORDER_ENABLE(1) |
1164 		S_028A4C_TILE_WALK_ORDER_ENABLE(1) |
1165 		S_028A4C_MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE(1) |
1166 		S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
1167 		S_028A4C_FORCE_EOV_REZ_ENABLE(1);
1168 	ms->pa_sc_mode_cntl_0 = S_028A48_ALTERNATE_RBS_PER_TILE(pipeline->device->physical_device->rad_info.chip_class >= GFX9) |
1169 	                        S_028A48_VPORT_SCISSOR_ENABLE(1);
1170 
1171 	const VkPipelineRasterizationLineStateCreateInfoEXT *rast_line =
1172 		vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
1173 				     PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT);
1174 	if (rast_line) {
1175 		ms->pa_sc_mode_cntl_0 |= S_028A48_LINE_STIPPLE_ENABLE(rast_line->stippledLineEnable);
1176 		if (rast_line->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT) {
1177 			/* From the Vulkan spec 1.1.129:
1178 			 *
1179 			 * "When VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT lines
1180 			 *  are being rasterized, sample locations may all be
1181 			 *  treated as being at the pixel center (this may
1182 			 *  affect attribute and depth interpolation)."
1183 			 */
1184 			ms->num_samples = 1;
1185 		}
1186 	}
1187 
1188 	if (ms->num_samples > 1) {
1189 		RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
1190 		struct radv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
1191 		uint32_t z_samples = subpass->depth_stencil_attachment ? subpass->depth_sample_count : ms->num_samples;
1192 		unsigned log_samples = util_logbase2(ms->num_samples);
1193 		unsigned log_z_samples = util_logbase2(z_samples);
1194 		unsigned log_ps_iter_samples = util_logbase2(ps_iter_samples);
1195 		ms->pa_sc_mode_cntl_0 |= S_028A48_MSAA_ENABLE(1);
1196 		ms->db_eqaa |= S_028804_MAX_ANCHOR_SAMPLES(log_z_samples) |
1197 			S_028804_PS_ITER_SAMPLES(log_ps_iter_samples) |
1198 			S_028804_MASK_EXPORT_NUM_SAMPLES(log_samples) |
1199 			S_028804_ALPHA_TO_MASK_NUM_SAMPLES(log_samples);
1200 		ms->pa_sc_aa_config |= S_028BE0_MSAA_NUM_SAMPLES(log_samples) |
1201 			S_028BE0_MAX_SAMPLE_DIST(radv_get_default_max_sample_dist(log_samples)) |
1202 			S_028BE0_MSAA_EXPOSED_SAMPLES(log_samples) | /* CM_R_028BE0_PA_SC_AA_CONFIG */
1203 			S_028BE0_COVERED_CENTROID_IS_CENTER(pipeline->device->physical_device->rad_info.chip_class >= GFX10_3);
1204 		ms->pa_sc_mode_cntl_1 |= S_028A4C_PS_ITER_SAMPLE(ps_iter_samples > 1);
1205 		if (ps_iter_samples > 1)
1206 			pipeline->graphics.spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(2);
1207 	}
1208 
1209 	if (vkms && vkms->pSampleMask) {
1210 		mask = vkms->pSampleMask[0] & 0xffff;
1211 	}
1212 
1213 	ms->pa_sc_aa_mask[0] = mask | (mask << 16);
1214 	ms->pa_sc_aa_mask[1] = mask | (mask << 16);
1215 }
1216 
1217 static bool
radv_prim_can_use_guardband(enum VkPrimitiveTopology topology)1218 radv_prim_can_use_guardband(enum VkPrimitiveTopology topology)
1219 {
1220 	switch (topology) {
1221 	case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
1222 	case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
1223 	case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
1224 	case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
1225 	case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
1226 		return false;
1227 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
1228 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
1229 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
1230 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
1231 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
1232 	case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
1233 		return true;
1234 	default:
1235 		unreachable("unhandled primitive type");
1236 	}
1237 }
1238 
1239 static uint32_t
si_conv_gl_prim_to_gs_out(unsigned gl_prim)1240 si_conv_gl_prim_to_gs_out(unsigned gl_prim)
1241 {
1242 	switch (gl_prim) {
1243 	case 0: /* GL_POINTS */
1244 		return V_028A6C_POINTLIST;
1245 	case 1: /* GL_LINES */
1246 	case 3: /* GL_LINE_STRIP */
1247 	case 0xA: /* GL_LINE_STRIP_ADJACENCY_ARB */
1248 	case 0x8E7A: /* GL_ISOLINES */
1249 		return V_028A6C_LINESTRIP;
1250 
1251 	case 4: /* GL_TRIANGLES */
1252 	case 0xc: /* GL_TRIANGLES_ADJACENCY_ARB */
1253 	case 5: /* GL_TRIANGLE_STRIP */
1254 	case 7: /* GL_QUADS */
1255 		return V_028A6C_TRISTRIP;
1256 	default:
1257 		assert(0);
1258 		return 0;
1259 	}
1260 }
1261 
1262 static uint32_t
si_conv_prim_to_gs_out(enum VkPrimitiveTopology topology)1263 si_conv_prim_to_gs_out(enum VkPrimitiveTopology topology)
1264 {
1265 	switch (topology) {
1266 	case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
1267 	case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
1268 		return V_028A6C_POINTLIST;
1269 	case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
1270 	case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
1271 	case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
1272 	case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
1273 		return V_028A6C_LINESTRIP;
1274 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
1275 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
1276 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
1277 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
1278 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
1279 		return V_028A6C_TRISTRIP;
1280 	default:
1281 		assert(0);
1282 		return 0;
1283 	}
1284 }
1285 
radv_dynamic_state_mask(VkDynamicState state)1286 static unsigned radv_dynamic_state_mask(VkDynamicState state)
1287 {
1288 	switch(state) {
1289 	case VK_DYNAMIC_STATE_VIEWPORT:
1290 	case VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT:
1291 		return RADV_DYNAMIC_VIEWPORT;
1292 	case VK_DYNAMIC_STATE_SCISSOR:
1293 	case VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT:
1294 		return RADV_DYNAMIC_SCISSOR;
1295 	case VK_DYNAMIC_STATE_LINE_WIDTH:
1296 		return RADV_DYNAMIC_LINE_WIDTH;
1297 	case VK_DYNAMIC_STATE_DEPTH_BIAS:
1298 		return RADV_DYNAMIC_DEPTH_BIAS;
1299 	case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
1300 		return RADV_DYNAMIC_BLEND_CONSTANTS;
1301 	case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
1302 		return RADV_DYNAMIC_DEPTH_BOUNDS;
1303 	case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
1304 		return RADV_DYNAMIC_STENCIL_COMPARE_MASK;
1305 	case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
1306 		return RADV_DYNAMIC_STENCIL_WRITE_MASK;
1307 	case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
1308 		return RADV_DYNAMIC_STENCIL_REFERENCE;
1309 	case VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT:
1310 		return RADV_DYNAMIC_DISCARD_RECTANGLE;
1311 	case VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT:
1312 		return RADV_DYNAMIC_SAMPLE_LOCATIONS;
1313 	case VK_DYNAMIC_STATE_LINE_STIPPLE_EXT:
1314 		return RADV_DYNAMIC_LINE_STIPPLE;
1315 	case VK_DYNAMIC_STATE_CULL_MODE_EXT:
1316 		return RADV_DYNAMIC_CULL_MODE;
1317 	case VK_DYNAMIC_STATE_FRONT_FACE_EXT:
1318 		return RADV_DYNAMIC_FRONT_FACE;
1319 	case VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT:
1320 		return RADV_DYNAMIC_PRIMITIVE_TOPOLOGY;
1321 	case VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT:
1322 		return RADV_DYNAMIC_DEPTH_TEST_ENABLE;
1323 	case VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT:
1324 		return RADV_DYNAMIC_DEPTH_WRITE_ENABLE;
1325 	case VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT:
1326 		return RADV_DYNAMIC_DEPTH_COMPARE_OP;
1327 	case VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT:
1328 		return RADV_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE;
1329 	case VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT:
1330 		return RADV_DYNAMIC_STENCIL_TEST_ENABLE;
1331 	case VK_DYNAMIC_STATE_STENCIL_OP_EXT:
1332 		return RADV_DYNAMIC_STENCIL_OP;
1333 	case VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT:
1334 		return RADV_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE;
1335 	default:
1336 		unreachable("Unhandled dynamic state");
1337 	}
1338 }
1339 
radv_pipeline_needed_dynamic_state(const VkGraphicsPipelineCreateInfo * pCreateInfo)1340 static uint32_t radv_pipeline_needed_dynamic_state(const VkGraphicsPipelineCreateInfo *pCreateInfo)
1341 {
1342 	uint32_t states = RADV_DYNAMIC_ALL;
1343 
1344 	/* If rasterization is disabled we do not care about any of the
1345 	 * dynamic states, since they are all rasterization related only,
1346 	 * except primitive topology and vertex binding stride.
1347 	 */
1348 	if (pCreateInfo->pRasterizationState->rasterizerDiscardEnable)
1349 		return RADV_DYNAMIC_PRIMITIVE_TOPOLOGY |
1350 		       RADV_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE;
1351 
1352 	if (!pCreateInfo->pRasterizationState->depthBiasEnable)
1353 		states &= ~RADV_DYNAMIC_DEPTH_BIAS;
1354 
1355 	if (!pCreateInfo->pDepthStencilState ||
1356 	    (!pCreateInfo->pDepthStencilState->depthBoundsTestEnable &&
1357 	     !radv_is_state_dynamic(pCreateInfo, VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT)))
1358 		states &= ~RADV_DYNAMIC_DEPTH_BOUNDS;
1359 
1360 	if (!pCreateInfo->pDepthStencilState ||
1361 	    (!pCreateInfo->pDepthStencilState->stencilTestEnable &&
1362 	     !radv_is_state_dynamic(pCreateInfo, VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT)))
1363 		states &= ~(RADV_DYNAMIC_STENCIL_COMPARE_MASK |
1364 		            RADV_DYNAMIC_STENCIL_WRITE_MASK |
1365 		            RADV_DYNAMIC_STENCIL_REFERENCE);
1366 
1367 	if (!vk_find_struct_const(pCreateInfo->pNext, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT))
1368 		states &= ~RADV_DYNAMIC_DISCARD_RECTANGLE;
1369 
1370 	if (!pCreateInfo->pMultisampleState ||
1371 	    !vk_find_struct_const(pCreateInfo->pMultisampleState->pNext,
1372 				  PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT))
1373 		states &= ~RADV_DYNAMIC_SAMPLE_LOCATIONS;
1374 
1375 	if (!pCreateInfo->pRasterizationState ||
1376 	    !vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
1377 				  PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT))
1378 		states &= ~RADV_DYNAMIC_LINE_STIPPLE;
1379 
1380 	/* TODO: blend constants & line width. */
1381 
1382 	return states;
1383 }
1384 
1385 static struct radv_ia_multi_vgt_param_helpers
radv_compute_ia_multi_vgt_param_helpers(struct radv_pipeline * pipeline)1386 radv_compute_ia_multi_vgt_param_helpers(struct radv_pipeline *pipeline)
1387 {
1388 	struct radv_ia_multi_vgt_param_helpers ia_multi_vgt_param = {0};
1389 	const struct radv_device *device = pipeline->device;
1390 
1391 	if (radv_pipeline_has_tess(pipeline))
1392 		ia_multi_vgt_param.primgroup_size = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.num_patches;
1393 	else if (radv_pipeline_has_gs(pipeline))
1394 		ia_multi_vgt_param.primgroup_size = 64;
1395 	else
1396 		ia_multi_vgt_param.primgroup_size = 128; /* recommended without a GS */
1397 
1398 	/* GS requirement. */
1399 	ia_multi_vgt_param.partial_es_wave = false;
1400 	if (radv_pipeline_has_gs(pipeline) && device->physical_device->rad_info.chip_class <= GFX8)
1401 		if (SI_GS_PER_ES / ia_multi_vgt_param.primgroup_size >= pipeline->device->gs_table_depth - 3)
1402 			ia_multi_vgt_param.partial_es_wave = true;
1403 
1404 	ia_multi_vgt_param.ia_switch_on_eoi = false;
1405 	if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.prim_id_input)
1406 		ia_multi_vgt_param.ia_switch_on_eoi = true;
1407 	if (radv_pipeline_has_gs(pipeline) &&
1408 	    pipeline->shaders[MESA_SHADER_GEOMETRY]->info.uses_prim_id)
1409 		ia_multi_vgt_param.ia_switch_on_eoi = true;
1410 	if (radv_pipeline_has_tess(pipeline)) {
1411 		/* SWITCH_ON_EOI must be set if PrimID is used. */
1412 		if (pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.uses_prim_id ||
1413 		    radv_get_shader(pipeline, MESA_SHADER_TESS_EVAL)->info.uses_prim_id)
1414 			ia_multi_vgt_param.ia_switch_on_eoi = true;
1415 	}
1416 
1417 	ia_multi_vgt_param.partial_vs_wave = false;
1418 	if (radv_pipeline_has_tess(pipeline)) {
1419 		/* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
1420 		if ((device->physical_device->rad_info.family == CHIP_TAHITI ||
1421 		     device->physical_device->rad_info.family == CHIP_PITCAIRN ||
1422 		     device->physical_device->rad_info.family == CHIP_BONAIRE) &&
1423 		    radv_pipeline_has_gs(pipeline))
1424 			ia_multi_vgt_param.partial_vs_wave = true;
1425 		/* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
1426 		if (device->physical_device->rad_info.has_distributed_tess) {
1427 			if (radv_pipeline_has_gs(pipeline)) {
1428 				if (device->physical_device->rad_info.chip_class <= GFX8)
1429 					ia_multi_vgt_param.partial_es_wave = true;
1430 			} else {
1431 				ia_multi_vgt_param.partial_vs_wave = true;
1432 			}
1433 		}
1434 	}
1435 
1436 	if (radv_pipeline_has_gs(pipeline)) {
1437 		/* On these chips there is the possibility of a hang if the
1438 		 * pipeline uses a GS and partial_vs_wave is not set.
1439 		 *
1440 		 * This mostly does not hit 4-SE chips, as those typically set
1441 		 * ia_switch_on_eoi and then partial_vs_wave is set for pipelines
1442 		 * with GS due to another workaround.
1443 		 *
1444 		 * Reproducer: https://bugs.freedesktop.org/show_bug.cgi?id=109242
1445 		 */
1446 		if (device->physical_device->rad_info.family == CHIP_TONGA ||
1447 		    device->physical_device->rad_info.family == CHIP_FIJI ||
1448 		    device->physical_device->rad_info.family == CHIP_POLARIS10 ||
1449 		    device->physical_device->rad_info.family == CHIP_POLARIS11 ||
1450 		    device->physical_device->rad_info.family == CHIP_POLARIS12 ||
1451 	            device->physical_device->rad_info.family == CHIP_VEGAM) {
1452 			ia_multi_vgt_param.partial_vs_wave = true;
1453 		}
1454 	}
1455 
1456 	ia_multi_vgt_param.base =
1457 		S_028AA8_PRIMGROUP_SIZE(ia_multi_vgt_param.primgroup_size - 1) |
1458 		/* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
1459 		S_028AA8_MAX_PRIMGRP_IN_WAVE(device->physical_device->rad_info.chip_class == GFX8 ? 2 : 0) |
1460 		S_030960_EN_INST_OPT_BASIC(device->physical_device->rad_info.chip_class >= GFX9) |
1461 		S_030960_EN_INST_OPT_ADV(device->physical_device->rad_info.chip_class >= GFX9);
1462 
1463 	return ia_multi_vgt_param;
1464 }
1465 
1466 static void
radv_pipeline_init_input_assembly_state(struct radv_pipeline * pipeline,const VkGraphicsPipelineCreateInfo * pCreateInfo,const struct radv_graphics_pipeline_create_info * extra)1467 radv_pipeline_init_input_assembly_state(struct radv_pipeline *pipeline,
1468 					const VkGraphicsPipelineCreateInfo *pCreateInfo,
1469 					const struct radv_graphics_pipeline_create_info *extra)
1470 {
1471 	const VkPipelineInputAssemblyStateCreateInfo *ia_state = pCreateInfo->pInputAssemblyState;
1472 	struct radv_shader_variant *tes = pipeline->shaders[MESA_SHADER_TESS_EVAL];
1473 	struct radv_shader_variant *gs = pipeline->shaders[MESA_SHADER_GEOMETRY];
1474 
1475 	pipeline->graphics.prim_restart_enable = !!ia_state->primitiveRestartEnable;
1476 	pipeline->graphics.can_use_guardband = radv_prim_can_use_guardband(ia_state->topology);
1477 
1478 	if (radv_pipeline_has_gs(pipeline)) {
1479 		if (si_conv_gl_prim_to_gs_out(gs->info.gs.output_prim) == V_028A6C_TRISTRIP)
1480 			pipeline->graphics.can_use_guardband = true;
1481 	} else if (radv_pipeline_has_tess(pipeline)) {
1482 		if (!tes->info.tes.point_mode &&
1483 		    si_conv_gl_prim_to_gs_out(tes->info.tes.primitive_mode) == V_028A6C_TRISTRIP)
1484 			pipeline->graphics.can_use_guardband = true;
1485 	}
1486 
1487 	if (extra && extra->use_rectlist) {
1488 		pipeline->graphics.can_use_guardband = true;
1489 	}
1490 
1491 	pipeline->graphics.ia_multi_vgt_param =
1492 		radv_compute_ia_multi_vgt_param_helpers(pipeline);
1493 }
1494 
1495 static void
radv_pipeline_init_dynamic_state(struct radv_pipeline * pipeline,const VkGraphicsPipelineCreateInfo * pCreateInfo,const struct radv_graphics_pipeline_create_info * extra)1496 radv_pipeline_init_dynamic_state(struct radv_pipeline *pipeline,
1497 				 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1498 				 const struct radv_graphics_pipeline_create_info *extra)
1499 {
1500 	uint32_t needed_states = radv_pipeline_needed_dynamic_state(pCreateInfo);
1501 	uint32_t states = needed_states;
1502 	RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
1503 	struct radv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
1504 
1505 	pipeline->dynamic_state = default_dynamic_state;
1506 	pipeline->graphics.needed_dynamic_state = needed_states;
1507 
1508 	if (pCreateInfo->pDynamicState) {
1509 		/* Remove all of the states that are marked as dynamic */
1510 		uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
1511 		for (uint32_t s = 0; s < count; s++)
1512 			states &= ~radv_dynamic_state_mask(pCreateInfo->pDynamicState->pDynamicStates[s]);
1513 	}
1514 
1515 	struct radv_dynamic_state *dynamic = &pipeline->dynamic_state;
1516 
1517 	if (needed_states & RADV_DYNAMIC_VIEWPORT) {
1518 		assert(pCreateInfo->pViewportState);
1519 
1520 		dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
1521 		if (states & RADV_DYNAMIC_VIEWPORT) {
1522 			typed_memcpy(dynamic->viewport.viewports,
1523 			             pCreateInfo->pViewportState->pViewports,
1524 			             pCreateInfo->pViewportState->viewportCount);
1525 		}
1526 	}
1527 
1528 	if (needed_states & RADV_DYNAMIC_SCISSOR) {
1529 		dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
1530 		if (states & RADV_DYNAMIC_SCISSOR) {
1531 			typed_memcpy(dynamic->scissor.scissors,
1532 			             pCreateInfo->pViewportState->pScissors,
1533 			             pCreateInfo->pViewportState->scissorCount);
1534 		}
1535 	}
1536 
1537 	if (states & RADV_DYNAMIC_LINE_WIDTH) {
1538 		assert(pCreateInfo->pRasterizationState);
1539 		dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
1540 	}
1541 
1542 	if (states & RADV_DYNAMIC_DEPTH_BIAS) {
1543 		assert(pCreateInfo->pRasterizationState);
1544 		dynamic->depth_bias.bias =
1545 			pCreateInfo->pRasterizationState->depthBiasConstantFactor;
1546 		dynamic->depth_bias.clamp =
1547 			pCreateInfo->pRasterizationState->depthBiasClamp;
1548 		dynamic->depth_bias.slope =
1549 			pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
1550 	}
1551 
1552 	/* Section 9.2 of the Vulkan 1.0.15 spec says:
1553 	 *
1554 	 *    pColorBlendState is [...] NULL if the pipeline has rasterization
1555 	 *    disabled or if the subpass of the render pass the pipeline is
1556 	 *    created against does not use any color attachments.
1557 	 */
1558 	if (subpass->has_color_att && states & RADV_DYNAMIC_BLEND_CONSTANTS) {
1559 		assert(pCreateInfo->pColorBlendState);
1560 		typed_memcpy(dynamic->blend_constants,
1561 			     pCreateInfo->pColorBlendState->blendConstants, 4);
1562 	}
1563 
1564 	if (states & RADV_DYNAMIC_CULL_MODE) {
1565 		dynamic->cull_mode =
1566 			pCreateInfo->pRasterizationState->cullMode;
1567 	}
1568 
1569 	if (states & RADV_DYNAMIC_FRONT_FACE) {
1570 		dynamic->front_face =
1571 			pCreateInfo->pRasterizationState->frontFace;
1572 	}
1573 
1574 	if (states & RADV_DYNAMIC_PRIMITIVE_TOPOLOGY) {
1575 		dynamic->primitive_topology =
1576 			si_translate_prim(pCreateInfo->pInputAssemblyState->topology);
1577 		if (extra && extra->use_rectlist) {
1578 			dynamic->primitive_topology = V_008958_DI_PT_RECTLIST;
1579 		}
1580 	}
1581 
1582 	/* If there is no depthstencil attachment, then don't read
1583 	 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1584 	 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1585 	 * no need to override the depthstencil defaults in
1586 	 * radv_pipeline::dynamic_state when there is no depthstencil attachment.
1587 	 *
1588 	 * Section 9.2 of the Vulkan 1.0.15 spec says:
1589 	 *
1590 	 *    pDepthStencilState is [...] NULL if the pipeline has rasterization
1591 	 *    disabled or if the subpass of the render pass the pipeline is created
1592 	 *    against does not use a depth/stencil attachment.
1593 	 */
1594 	if (needed_states && subpass->depth_stencil_attachment) {
1595 		assert(pCreateInfo->pDepthStencilState);
1596 
1597 		if (states & RADV_DYNAMIC_DEPTH_BOUNDS) {
1598 			dynamic->depth_bounds.min =
1599 				pCreateInfo->pDepthStencilState->minDepthBounds;
1600 			dynamic->depth_bounds.max =
1601 				pCreateInfo->pDepthStencilState->maxDepthBounds;
1602 		}
1603 
1604 		if (states & RADV_DYNAMIC_STENCIL_COMPARE_MASK) {
1605 			dynamic->stencil_compare_mask.front =
1606 				pCreateInfo->pDepthStencilState->front.compareMask;
1607 			dynamic->stencil_compare_mask.back =
1608 				pCreateInfo->pDepthStencilState->back.compareMask;
1609 		}
1610 
1611 		if (states & RADV_DYNAMIC_STENCIL_WRITE_MASK) {
1612 			dynamic->stencil_write_mask.front =
1613 				pCreateInfo->pDepthStencilState->front.writeMask;
1614 			dynamic->stencil_write_mask.back =
1615 				pCreateInfo->pDepthStencilState->back.writeMask;
1616 		}
1617 
1618 		if (states & RADV_DYNAMIC_STENCIL_REFERENCE) {
1619 			dynamic->stencil_reference.front =
1620 				pCreateInfo->pDepthStencilState->front.reference;
1621 			dynamic->stencil_reference.back =
1622 				pCreateInfo->pDepthStencilState->back.reference;
1623 		}
1624 
1625 		if (states & RADV_DYNAMIC_DEPTH_TEST_ENABLE) {
1626 			dynamic->depth_test_enable =
1627 				pCreateInfo->pDepthStencilState->depthTestEnable;
1628 		}
1629 
1630 		if (states & RADV_DYNAMIC_DEPTH_WRITE_ENABLE) {
1631 			dynamic->depth_write_enable =
1632 				pCreateInfo->pDepthStencilState->depthWriteEnable;
1633 		}
1634 
1635 		if (states & RADV_DYNAMIC_DEPTH_COMPARE_OP) {
1636 			dynamic->depth_compare_op =
1637 				pCreateInfo->pDepthStencilState->depthCompareOp;
1638 		}
1639 
1640 		if (states & RADV_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE) {
1641 			dynamic->depth_bounds_test_enable =
1642 				pCreateInfo->pDepthStencilState->depthBoundsTestEnable;
1643 		}
1644 
1645 		if (states & RADV_DYNAMIC_STENCIL_TEST_ENABLE) {
1646 			dynamic->stencil_test_enable =
1647 				pCreateInfo->pDepthStencilState->stencilTestEnable;
1648 		}
1649 
1650 		if (states & RADV_DYNAMIC_STENCIL_OP) {
1651 			dynamic->stencil_op.front.compare_op =
1652 				pCreateInfo->pDepthStencilState->front.compareOp;
1653 			dynamic->stencil_op.front.fail_op =
1654 				pCreateInfo->pDepthStencilState->front.failOp;
1655 			dynamic->stencil_op.front.pass_op =
1656 				pCreateInfo->pDepthStencilState->front.passOp;
1657 			dynamic->stencil_op.front.depth_fail_op =
1658 				pCreateInfo->pDepthStencilState->front.depthFailOp;
1659 
1660 			dynamic->stencil_op.back.compare_op =
1661 				pCreateInfo->pDepthStencilState->back.compareOp;
1662 			dynamic->stencil_op.back.fail_op =
1663 				pCreateInfo->pDepthStencilState->back.failOp;
1664 			dynamic->stencil_op.back.pass_op =
1665 				pCreateInfo->pDepthStencilState->back.passOp;
1666 			dynamic->stencil_op.back.depth_fail_op =
1667 				pCreateInfo->pDepthStencilState->back.depthFailOp;
1668 		}
1669 	}
1670 
1671 	const  VkPipelineDiscardRectangleStateCreateInfoEXT *discard_rectangle_info =
1672 			vk_find_struct_const(pCreateInfo->pNext, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT);
1673 	if (needed_states & RADV_DYNAMIC_DISCARD_RECTANGLE) {
1674 		dynamic->discard_rectangle.count = discard_rectangle_info->discardRectangleCount;
1675 		if (states & RADV_DYNAMIC_DISCARD_RECTANGLE) {
1676 			typed_memcpy(dynamic->discard_rectangle.rectangles,
1677 			             discard_rectangle_info->pDiscardRectangles,
1678 			             discard_rectangle_info->discardRectangleCount);
1679 		}
1680 	}
1681 
1682 	if (needed_states & RADV_DYNAMIC_SAMPLE_LOCATIONS) {
1683 		const VkPipelineSampleLocationsStateCreateInfoEXT *sample_location_info =
1684 			vk_find_struct_const(pCreateInfo->pMultisampleState->pNext,
1685 					     PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT);
1686 		/* If sampleLocationsEnable is VK_FALSE, the default sample
1687 		 * locations are used and the values specified in
1688 		 * sampleLocationsInfo are ignored.
1689 		 */
1690 		if (sample_location_info->sampleLocationsEnable) {
1691 			const VkSampleLocationsInfoEXT *pSampleLocationsInfo =
1692 				&sample_location_info->sampleLocationsInfo;
1693 
1694 			assert(pSampleLocationsInfo->sampleLocationsCount <= MAX_SAMPLE_LOCATIONS);
1695 
1696 			dynamic->sample_location.per_pixel = pSampleLocationsInfo->sampleLocationsPerPixel;
1697 			dynamic->sample_location.grid_size = pSampleLocationsInfo->sampleLocationGridSize;
1698 			dynamic->sample_location.count = pSampleLocationsInfo->sampleLocationsCount;
1699 			typed_memcpy(&dynamic->sample_location.locations[0],
1700 				     pSampleLocationsInfo->pSampleLocations,
1701 				     pSampleLocationsInfo->sampleLocationsCount);
1702 		}
1703 	}
1704 
1705 	const VkPipelineRasterizationLineStateCreateInfoEXT *rast_line_info =
1706 		vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
1707 				     PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT);
1708 	if (needed_states & RADV_DYNAMIC_LINE_STIPPLE) {
1709 		dynamic->line_stipple.factor = rast_line_info->lineStippleFactor;
1710 		dynamic->line_stipple.pattern = rast_line_info->lineStipplePattern;
1711 	}
1712 
1713 	if (!(states & RADV_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE))
1714 		pipeline->graphics.uses_dynamic_stride = true;
1715 
1716 	pipeline->dynamic_state.mask = states;
1717 }
1718 
1719 static void
radv_pipeline_init_raster_state(struct radv_pipeline * pipeline,const VkGraphicsPipelineCreateInfo * pCreateInfo)1720 radv_pipeline_init_raster_state(struct radv_pipeline *pipeline,
1721 				const VkGraphicsPipelineCreateInfo *pCreateInfo)
1722 {
1723 	const VkPipelineRasterizationStateCreateInfo *raster_info =
1724 		pCreateInfo->pRasterizationState;
1725 
1726 	pipeline->graphics.pa_su_sc_mode_cntl =
1727 		S_028814_FACE(raster_info->frontFace) |
1728 		S_028814_CULL_FRONT(!!(raster_info->cullMode & VK_CULL_MODE_FRONT_BIT)) |
1729 		S_028814_CULL_BACK(!!(raster_info->cullMode & VK_CULL_MODE_BACK_BIT)) |
1730 		S_028814_POLY_MODE(raster_info->polygonMode != VK_POLYGON_MODE_FILL) |
1731 		S_028814_POLYMODE_FRONT_PTYPE(si_translate_fill(raster_info->polygonMode)) |
1732 		S_028814_POLYMODE_BACK_PTYPE(si_translate_fill(raster_info->polygonMode)) |
1733 		S_028814_POLY_OFFSET_FRONT_ENABLE(raster_info->depthBiasEnable ? 1 : 0) |
1734 		S_028814_POLY_OFFSET_BACK_ENABLE(raster_info->depthBiasEnable ? 1 : 0) |
1735 		S_028814_POLY_OFFSET_PARA_ENABLE(raster_info->depthBiasEnable ? 1 : 0);
1736 
1737 	if (pipeline->device->physical_device->rad_info.chip_class >= GFX10) {
1738 		/* It should also be set if PERPENDICULAR_ENDCAP_ENA is set. */
1739 		pipeline->graphics.pa_su_sc_mode_cntl |=
1740 			S_028814_KEEP_TOGETHER_ENABLE(raster_info->polygonMode != VK_POLYGON_MODE_FILL);
1741 	}
1742 }
1743 
1744 static void
radv_pipeline_init_depth_stencil_state(struct radv_pipeline * pipeline,const VkGraphicsPipelineCreateInfo * pCreateInfo)1745 radv_pipeline_init_depth_stencil_state(struct radv_pipeline *pipeline,
1746 				       const VkGraphicsPipelineCreateInfo *pCreateInfo)
1747 {
1748 	const VkPipelineDepthStencilStateCreateInfo *ds_info
1749 		= radv_pipeline_get_depth_stencil_state(pCreateInfo);
1750 	RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
1751 	struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass;
1752 	struct radv_render_pass_attachment *attachment = NULL;
1753 	uint32_t db_depth_control = 0;
1754 
1755 	if (subpass->depth_stencil_attachment)
1756 		attachment = pass->attachments + subpass->depth_stencil_attachment->attachment;
1757 
1758 	bool has_depth_attachment = attachment && vk_format_is_depth(attachment->format);
1759 	bool has_stencil_attachment = attachment && vk_format_is_stencil(attachment->format);
1760 
1761 	if (ds_info) {
1762 		if (has_depth_attachment) {
1763 			db_depth_control = S_028800_Z_ENABLE(ds_info->depthTestEnable ? 1 : 0) |
1764 			                   S_028800_Z_WRITE_ENABLE(ds_info->depthWriteEnable ? 1 : 0) |
1765 			                   S_028800_ZFUNC(ds_info->depthCompareOp) |
1766 			                   S_028800_DEPTH_BOUNDS_ENABLE(ds_info->depthBoundsTestEnable ? 1 : 0);
1767 		}
1768 
1769 		if (has_stencil_attachment && ds_info->stencilTestEnable) {
1770 			db_depth_control |= S_028800_STENCIL_ENABLE(1) | S_028800_BACKFACE_ENABLE(1);
1771 			db_depth_control |= S_028800_STENCILFUNC(ds_info->front.compareOp);
1772 			db_depth_control |= S_028800_STENCILFUNC_BF(ds_info->back.compareOp);
1773 		}
1774 	}
1775 
1776 	pipeline->graphics.db_depth_control = db_depth_control;
1777 }
1778 
1779 static void
gfx9_get_gs_info(const struct radv_pipeline_key * key,const struct radv_pipeline * pipeline,nir_shader ** nir,struct radv_shader_info * infos,struct gfx9_gs_info * out)1780 gfx9_get_gs_info(const struct radv_pipeline_key *key,
1781                  const struct radv_pipeline *pipeline,
1782 		 nir_shader **nir,
1783 		 struct radv_shader_info *infos,
1784 		 struct gfx9_gs_info *out)
1785 {
1786 	struct radv_shader_info *gs_info = &infos[MESA_SHADER_GEOMETRY];
1787 	struct radv_es_output_info *es_info;
1788 	if (pipeline->device->physical_device->rad_info.chip_class >= GFX9)
1789 		es_info = nir[MESA_SHADER_TESS_CTRL] ? &gs_info->tes.es_info : &gs_info->vs.es_info;
1790 	else
1791 		es_info = nir[MESA_SHADER_TESS_CTRL] ?
1792                        &infos[MESA_SHADER_TESS_EVAL].tes.es_info :
1793                        &infos[MESA_SHADER_VERTEX].vs.es_info;
1794 
1795 	unsigned gs_num_invocations = MAX2(gs_info->gs.invocations, 1);
1796 	bool uses_adjacency;
1797 	switch(key->topology) {
1798 	case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
1799 	case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
1800 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
1801 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
1802 		uses_adjacency = true;
1803 		break;
1804 	default:
1805 		uses_adjacency = false;
1806 		break;
1807 	}
1808 
1809 	/* All these are in dwords: */
1810 	/* We can't allow using the whole LDS, because GS waves compete with
1811 	 * other shader stages for LDS space. */
1812 	const unsigned max_lds_size = 8 * 1024;
1813 	const unsigned esgs_itemsize = es_info->esgs_itemsize / 4;
1814 	unsigned esgs_lds_size;
1815 
1816 	/* All these are per subgroup: */
1817 	const unsigned max_out_prims = 32 * 1024;
1818 	const unsigned max_es_verts = 255;
1819 	const unsigned ideal_gs_prims = 64;
1820 	unsigned max_gs_prims, gs_prims;
1821 	unsigned min_es_verts, es_verts, worst_case_es_verts;
1822 
1823 	if (uses_adjacency || gs_num_invocations > 1)
1824 		max_gs_prims = 127 / gs_num_invocations;
1825 	else
1826 		max_gs_prims = 255;
1827 
1828 	/* MAX_PRIMS_PER_SUBGROUP = gs_prims * max_vert_out * gs_invocations.
1829 	 * Make sure we don't go over the maximum value.
1830 	 */
1831 	if (gs_info->gs.vertices_out > 0) {
1832 		max_gs_prims = MIN2(max_gs_prims,
1833 				    max_out_prims /
1834 				    (gs_info->gs.vertices_out * gs_num_invocations));
1835 	}
1836 	assert(max_gs_prims > 0);
1837 
1838 	/* If the primitive has adjacency, halve the number of vertices
1839 	 * that will be reused in multiple primitives.
1840 	 */
1841 	min_es_verts = gs_info->gs.vertices_in / (uses_adjacency ? 2 : 1);
1842 
1843 	gs_prims = MIN2(ideal_gs_prims, max_gs_prims);
1844 	worst_case_es_verts = MIN2(min_es_verts * gs_prims, max_es_verts);
1845 
1846 	/* Compute ESGS LDS size based on the worst case number of ES vertices
1847 	 * needed to create the target number of GS prims per subgroup.
1848 	 */
1849 	esgs_lds_size = esgs_itemsize * worst_case_es_verts;
1850 
1851 	/* If total LDS usage is too big, refactor partitions based on ratio
1852 	 * of ESGS item sizes.
1853 	 */
1854 	if (esgs_lds_size > max_lds_size) {
1855 		/* Our target GS Prims Per Subgroup was too large. Calculate
1856 		 * the maximum number of GS Prims Per Subgroup that will fit
1857 		 * into LDS, capped by the maximum that the hardware can support.
1858 		 */
1859 		gs_prims = MIN2((max_lds_size / (esgs_itemsize * min_es_verts)),
1860 				max_gs_prims);
1861 		assert(gs_prims > 0);
1862 		worst_case_es_verts = MIN2(min_es_verts * gs_prims,
1863 					   max_es_verts);
1864 
1865 		esgs_lds_size = esgs_itemsize * worst_case_es_verts;
1866 		assert(esgs_lds_size <= max_lds_size);
1867 	}
1868 
1869 	/* Now calculate remaining ESGS information. */
1870 	if (esgs_lds_size)
1871 		es_verts = MIN2(esgs_lds_size / esgs_itemsize, max_es_verts);
1872 	else
1873 		es_verts = max_es_verts;
1874 
1875 	/* Vertices for adjacency primitives are not always reused, so restore
1876 	 * it for ES_VERTS_PER_SUBGRP.
1877 	 */
1878 	min_es_verts = gs_info->gs.vertices_in;
1879 
1880 	/* For normal primitives, the VGT only checks if they are past the ES
1881 	 * verts per subgroup after allocating a full GS primitive and if they
1882 	 * are, kick off a new subgroup.  But if those additional ES verts are
1883 	 * unique (e.g. not reused) we need to make sure there is enough LDS
1884 	 * space to account for those ES verts beyond ES_VERTS_PER_SUBGRP.
1885 	 */
1886 	es_verts -= min_es_verts - 1;
1887 
1888 	uint32_t es_verts_per_subgroup = es_verts;
1889 	uint32_t gs_prims_per_subgroup = gs_prims;
1890 	uint32_t gs_inst_prims_in_subgroup = gs_prims * gs_num_invocations;
1891 	uint32_t max_prims_per_subgroup = gs_inst_prims_in_subgroup * gs_info->gs.vertices_out;
1892 	out->lds_size = align(esgs_lds_size, 128) / 128;
1893 	out->vgt_gs_onchip_cntl = S_028A44_ES_VERTS_PER_SUBGRP(es_verts_per_subgroup) |
1894 	                        S_028A44_GS_PRIMS_PER_SUBGRP(gs_prims_per_subgroup) |
1895 	                        S_028A44_GS_INST_PRIMS_IN_SUBGRP(gs_inst_prims_in_subgroup);
1896 	out->vgt_gs_max_prims_per_subgroup = S_028A94_MAX_PRIMS_PER_SUBGROUP(max_prims_per_subgroup);
1897 	out->vgt_esgs_ring_itemsize  = esgs_itemsize;
1898 	assert(max_prims_per_subgroup <= max_out_prims);
1899 }
1900 
clamp_gsprims_to_esverts(unsigned * max_gsprims,unsigned max_esverts,unsigned min_verts_per_prim,bool use_adjacency)1901 static void clamp_gsprims_to_esverts(unsigned *max_gsprims, unsigned max_esverts,
1902 				     unsigned min_verts_per_prim, bool use_adjacency)
1903 {
1904 	unsigned max_reuse = max_esverts - min_verts_per_prim;
1905 	if (use_adjacency)
1906 		max_reuse /= 2;
1907 	*max_gsprims = MIN2(*max_gsprims, 1 + max_reuse);
1908 }
1909 
1910 static unsigned
radv_get_num_input_vertices(nir_shader ** nir)1911 radv_get_num_input_vertices(nir_shader **nir)
1912 {
1913 	if (nir[MESA_SHADER_GEOMETRY]) {
1914 		nir_shader *gs = nir[MESA_SHADER_GEOMETRY];
1915 
1916 		return gs->info.gs.vertices_in;
1917 	}
1918 
1919 	if (nir[MESA_SHADER_TESS_CTRL]) {
1920 		nir_shader *tes = nir[MESA_SHADER_TESS_EVAL];
1921 
1922 		if (tes->info.tess.point_mode)
1923 			return 1;
1924 		if (tes->info.tess.primitive_mode == GL_ISOLINES)
1925 			return 2;
1926 		return 3;
1927 	}
1928 
1929 	return 3;
1930 }
1931 
1932 static void
gfx10_get_ngg_info(const struct radv_pipeline_key * key,struct radv_pipeline * pipeline,nir_shader ** nir,struct radv_shader_info * infos,struct gfx10_ngg_info * ngg)1933 gfx10_get_ngg_info(const struct radv_pipeline_key *key,
1934 		   struct radv_pipeline *pipeline,
1935 		   nir_shader **nir,
1936 		   struct radv_shader_info *infos,
1937 		   struct gfx10_ngg_info *ngg)
1938 {
1939 	struct radv_shader_info *gs_info = &infos[MESA_SHADER_GEOMETRY];
1940 	struct radv_es_output_info *es_info =
1941 		nir[MESA_SHADER_TESS_CTRL] ? &gs_info->tes.es_info : &gs_info->vs.es_info;
1942 	unsigned gs_type = nir[MESA_SHADER_GEOMETRY] ? MESA_SHADER_GEOMETRY : MESA_SHADER_VERTEX;
1943 	unsigned max_verts_per_prim = radv_get_num_input_vertices(nir);
1944 	unsigned min_verts_per_prim =
1945 		gs_type == MESA_SHADER_GEOMETRY ? max_verts_per_prim : 1;
1946 	unsigned gs_num_invocations = nir[MESA_SHADER_GEOMETRY] ? MAX2(gs_info->gs.invocations, 1) : 1;
1947 	bool uses_adjacency;
1948 	switch(key->topology) {
1949 	case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
1950 	case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
1951 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
1952 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
1953 		uses_adjacency = true;
1954 		break;
1955 	default:
1956 		uses_adjacency = false;
1957 		break;
1958 	}
1959 
1960 	/* All these are in dwords: */
1961 	/* We can't allow using the whole LDS, because GS waves compete with
1962 	 * other shader stages for LDS space.
1963 	 *
1964 	 * TODO: We should really take the shader's internal LDS use into
1965 	 *       account. The linker will fail if the size is greater than
1966 	 *       8K dwords.
1967 	 */
1968 	const unsigned max_lds_size = 8 * 1024 - 768;
1969 	const unsigned target_lds_size = max_lds_size;
1970 	unsigned esvert_lds_size = 0;
1971 	unsigned gsprim_lds_size = 0;
1972 
1973 	/* All these are per subgroup: */
1974 	const unsigned min_esverts = pipeline->device->physical_device->rad_info.chip_class >= GFX10_3 ? 29 : 24;
1975 	bool max_vert_out_per_gs_instance = false;
1976 	unsigned max_esverts_base = 256;
1977 	unsigned max_gsprims_base = 128; /* default prim group size clamp */
1978 
1979 	/* Hardware has the following non-natural restrictions on the value
1980 	 * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
1981 	 * the draw:
1982 	 *  - at most 252 for any line input primitive type
1983 	 *  - at most 251 for any quad input primitive type
1984 	 *  - at most 251 for triangle strips with adjacency (this happens to
1985 	 *    be the natural limit for triangle *lists* with adjacency)
1986 	 */
1987 	max_esverts_base = MIN2(max_esverts_base, 251 + max_verts_per_prim - 1);
1988 
1989 	if (gs_type == MESA_SHADER_GEOMETRY) {
1990 		unsigned max_out_verts_per_gsprim =
1991 			gs_info->gs.vertices_out * gs_num_invocations;
1992 
1993 		if (max_out_verts_per_gsprim <= 256) {
1994 			if (max_out_verts_per_gsprim) {
1995 				max_gsprims_base = MIN2(max_gsprims_base,
1996 							256 / max_out_verts_per_gsprim);
1997 			}
1998 		} else {
1999 			/* Use special multi-cycling mode in which each GS
2000 			 * instance gets its own subgroup. Does not work with
2001 			 * tessellation. */
2002 			max_vert_out_per_gs_instance = true;
2003 			max_gsprims_base = 1;
2004 			max_out_verts_per_gsprim = gs_info->gs.vertices_out;
2005 		}
2006 
2007 		esvert_lds_size = es_info->esgs_itemsize / 4;
2008 		gsprim_lds_size = (gs_info->gs.gsvs_vertex_size / 4 + 1) * max_out_verts_per_gsprim;
2009 	} else {
2010 		/* VS and TES. */
2011 		/* LDS size for passing data from GS to ES. */
2012 		struct radv_streamout_info *so_info = nir[MESA_SHADER_TESS_CTRL]
2013 			? &infos[MESA_SHADER_TESS_EVAL].so
2014 			: &infos[MESA_SHADER_VERTEX].so;
2015 
2016 		if (so_info->num_outputs)
2017 			esvert_lds_size = 4 * so_info->num_outputs + 1;
2018 
2019 		/* GS stores Primitive IDs (one DWORD) into LDS at the address
2020 		 * corresponding to the ES thread of the provoking vertex. All
2021 		 * ES threads load and export PrimitiveID for their thread.
2022 		 */
2023 		if (!nir[MESA_SHADER_TESS_CTRL] &&
2024 		    infos[MESA_SHADER_VERTEX].vs.outinfo.export_prim_id)
2025 			esvert_lds_size = MAX2(esvert_lds_size, 1);
2026 	}
2027 
2028 	unsigned max_gsprims = max_gsprims_base;
2029 	unsigned max_esverts = max_esverts_base;
2030 
2031 	if (esvert_lds_size)
2032 		max_esverts = MIN2(max_esverts, target_lds_size / esvert_lds_size);
2033 	if (gsprim_lds_size)
2034 		max_gsprims = MIN2(max_gsprims, target_lds_size / gsprim_lds_size);
2035 
2036 	max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
2037 	clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, uses_adjacency);
2038 	assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
2039 
2040 	if (esvert_lds_size || gsprim_lds_size) {
2041 		/* Now that we have a rough proportionality between esverts
2042 		 * and gsprims based on the primitive type, scale both of them
2043 		 * down simultaneously based on required LDS space.
2044 		 *
2045 		 * We could be smarter about this if we knew how much vertex
2046 		 * reuse to expect.
2047 		 */
2048 		unsigned lds_total = max_esverts * esvert_lds_size +
2049 				     max_gsprims * gsprim_lds_size;
2050 		if (lds_total > target_lds_size) {
2051 			max_esverts = max_esverts * target_lds_size / lds_total;
2052 			max_gsprims = max_gsprims * target_lds_size / lds_total;
2053 
2054 			max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
2055 			clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
2056 						 min_verts_per_prim, uses_adjacency);
2057 			assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
2058 		}
2059 	}
2060 
2061 	/* Round up towards full wave sizes for better ALU utilization. */
2062 	if (!max_vert_out_per_gs_instance) {
2063 		unsigned orig_max_esverts;
2064 		unsigned orig_max_gsprims;
2065 		unsigned wavesize;
2066 
2067 		if (gs_type == MESA_SHADER_GEOMETRY) {
2068 			wavesize = gs_info->wave_size;
2069 		} else {
2070 			wavesize = nir[MESA_SHADER_TESS_CTRL]
2071 				? infos[MESA_SHADER_TESS_EVAL].wave_size
2072 				: infos[MESA_SHADER_VERTEX].wave_size;
2073 		}
2074 
2075 		do {
2076 			orig_max_esverts = max_esverts;
2077 			orig_max_gsprims = max_gsprims;
2078 
2079 			max_esverts = align(max_esverts, wavesize);
2080 			max_esverts = MIN2(max_esverts, max_esverts_base);
2081 			if (esvert_lds_size)
2082 				max_esverts = MIN2(max_esverts,
2083 						   (max_lds_size - max_gsprims * gsprim_lds_size) /
2084 						   esvert_lds_size);
2085 			max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
2086 			/* Hardware restriction: minimum value of max_esverts */
2087 			max_esverts = MAX2(max_esverts, min_esverts - 1 + max_verts_per_prim);
2088 
2089 			max_gsprims = align(max_gsprims, wavesize);
2090 			max_gsprims = MIN2(max_gsprims, max_gsprims_base);
2091 			if (gsprim_lds_size) {
2092 				/* Don't count unusable vertices to the LDS
2093 				 * size. Those are vertices above the maximum
2094 				 * number of vertices that can occur in the
2095 				 * workgroup, which is e.g. max_gsprims * 3
2096 				 * for triangles.
2097 				 */
2098 				unsigned usable_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
2099 				max_gsprims =
2100 					MIN2(max_gsprims, (max_lds_size - usable_esverts * esvert_lds_size) / gsprim_lds_size);
2101 			}
2102 			clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
2103 						 min_verts_per_prim, uses_adjacency);
2104 			assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
2105 		} while (orig_max_esverts != max_esverts || orig_max_gsprims != max_gsprims);
2106 
2107 		/* Verify the restriction. */
2108 		assert(max_esverts >= min_esverts - 1 + max_verts_per_prim);
2109 	} else {
2110 		/* Hardware restriction: minimum value of max_esverts */
2111 		max_esverts = MAX2(max_esverts, min_esverts - 1 + max_verts_per_prim);
2112 	}
2113 
2114 	unsigned max_out_vertices =
2115 		max_vert_out_per_gs_instance ? gs_info->gs.vertices_out :
2116 		gs_type == MESA_SHADER_GEOMETRY ?
2117 		max_gsprims * gs_num_invocations * gs_info->gs.vertices_out :
2118 		max_esverts;
2119 	assert(max_out_vertices <= 256);
2120 
2121 	unsigned prim_amp_factor = 1;
2122 	if (gs_type == MESA_SHADER_GEOMETRY) {
2123 		/* Number of output primitives per GS input primitive after
2124 		 * GS instancing. */
2125 		prim_amp_factor = gs_info->gs.vertices_out;
2126 	}
2127 
2128 	/* The GE only checks against the maximum number of ES verts after
2129 	 * allocating a full GS primitive. So we need to ensure that whenever
2130 	 * this check passes, there is enough space for a full primitive without
2131 	 * vertex reuse.
2132 	 */
2133 	ngg->hw_max_esverts = max_esverts - max_verts_per_prim + 1;
2134 	ngg->max_gsprims = max_gsprims;
2135 	ngg->max_out_verts = max_out_vertices;
2136 	ngg->prim_amp_factor = prim_amp_factor;
2137 	ngg->max_vert_out_per_gs_instance = max_vert_out_per_gs_instance;
2138 	ngg->ngg_emit_size = max_gsprims * gsprim_lds_size;
2139 
2140 	/* Don't count unusable vertices. */
2141 	ngg->esgs_ring_size =
2142 		MIN2(max_esverts, max_gsprims * max_verts_per_prim) * esvert_lds_size * 4;
2143 
2144 	if (gs_type == MESA_SHADER_GEOMETRY) {
2145 		ngg->vgt_esgs_ring_itemsize = es_info->esgs_itemsize / 4;
2146 	} else {
2147 		ngg->vgt_esgs_ring_itemsize = 1;
2148 	}
2149 
2150 	pipeline->graphics.esgs_ring_size = ngg->esgs_ring_size;
2151 
2152 	assert(ngg->hw_max_esverts >= min_esverts); /* HW limitation */
2153 }
2154 
2155 static void
radv_pipeline_init_gs_ring_state(struct radv_pipeline * pipeline,const struct gfx9_gs_info * gs)2156 radv_pipeline_init_gs_ring_state(struct radv_pipeline *pipeline,
2157 				 const struct gfx9_gs_info *gs)
2158 {
2159 	struct radv_device *device = pipeline->device;
2160 	unsigned num_se = device->physical_device->rad_info.max_se;
2161 	unsigned wave_size = 64;
2162 	unsigned max_gs_waves = 32 * num_se; /* max 32 per SE on GCN */
2163 	/* On GFX6-GFX7, the value comes from VGT_GS_VERTEX_REUSE = 16.
2164 	 * On GFX8+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
2165 	 */
2166 	unsigned gs_vertex_reuse =
2167 		(device->physical_device->rad_info.chip_class >= GFX8 ? 32 : 16) * num_se;
2168 	unsigned alignment = 256 * num_se;
2169 	/* The maximum size is 63.999 MB per SE. */
2170 	unsigned max_size = ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se;
2171 	struct radv_shader_info *gs_info = &pipeline->shaders[MESA_SHADER_GEOMETRY]->info;
2172 
2173 	/* Calculate the minimum size. */
2174 	unsigned min_esgs_ring_size = align(gs->vgt_esgs_ring_itemsize * 4 * gs_vertex_reuse *
2175 					    wave_size, alignment);
2176 	/* These are recommended sizes, not minimum sizes. */
2177 	unsigned esgs_ring_size = max_gs_waves * 2 * wave_size *
2178 		gs->vgt_esgs_ring_itemsize * 4 * gs_info->gs.vertices_in;
2179 	unsigned gsvs_ring_size = max_gs_waves * 2 * wave_size *
2180 		gs_info->gs.max_gsvs_emit_size;
2181 
2182 	min_esgs_ring_size = align(min_esgs_ring_size, alignment);
2183 	esgs_ring_size = align(esgs_ring_size, alignment);
2184 	gsvs_ring_size = align(gsvs_ring_size, alignment);
2185 
2186 	if (pipeline->device->physical_device->rad_info.chip_class <= GFX8)
2187 		pipeline->graphics.esgs_ring_size = CLAMP(esgs_ring_size, min_esgs_ring_size, max_size);
2188 
2189 	pipeline->graphics.gsvs_ring_size = MIN2(gsvs_ring_size, max_size);
2190 }
2191 
2192 struct radv_shader_variant *
radv_get_shader(const struct radv_pipeline * pipeline,gl_shader_stage stage)2193 radv_get_shader(const struct radv_pipeline *pipeline,
2194 		gl_shader_stage stage)
2195 {
2196 	if (stage == MESA_SHADER_VERTEX) {
2197 		if (pipeline->shaders[MESA_SHADER_VERTEX])
2198 			return pipeline->shaders[MESA_SHADER_VERTEX];
2199 		if (pipeline->shaders[MESA_SHADER_TESS_CTRL])
2200 			return pipeline->shaders[MESA_SHADER_TESS_CTRL];
2201 		if (pipeline->shaders[MESA_SHADER_GEOMETRY])
2202 			return pipeline->shaders[MESA_SHADER_GEOMETRY];
2203 	} else if (stage == MESA_SHADER_TESS_EVAL) {
2204 		if (!radv_pipeline_has_tess(pipeline))
2205 			return NULL;
2206 		if (pipeline->shaders[MESA_SHADER_TESS_EVAL])
2207 			return pipeline->shaders[MESA_SHADER_TESS_EVAL];
2208 		if (pipeline->shaders[MESA_SHADER_GEOMETRY])
2209 			return pipeline->shaders[MESA_SHADER_GEOMETRY];
2210 	}
2211 	return pipeline->shaders[stage];
2212 }
2213 
get_vs_output_info(const struct radv_pipeline * pipeline)2214 static const struct radv_vs_output_info *get_vs_output_info(const struct radv_pipeline *pipeline)
2215 {
2216 	if (radv_pipeline_has_gs(pipeline))
2217 		if (radv_pipeline_has_ngg(pipeline))
2218 			return &pipeline->shaders[MESA_SHADER_GEOMETRY]->info.vs.outinfo;
2219 		else
2220 			return &pipeline->gs_copy_shader->info.vs.outinfo;
2221 	else if (radv_pipeline_has_tess(pipeline))
2222 		return &pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.tes.outinfo;
2223 	else
2224 		return &pipeline->shaders[MESA_SHADER_VERTEX]->info.vs.outinfo;
2225 }
2226 
2227 static void
radv_link_shaders(struct radv_pipeline * pipeline,nir_shader ** shaders,bool optimize_conservatively)2228 radv_link_shaders(struct radv_pipeline *pipeline, nir_shader **shaders,
2229 		  bool optimize_conservatively)
2230 {
2231 	nir_shader* ordered_shaders[MESA_SHADER_STAGES];
2232 	int shader_count = 0;
2233 
2234 	if(shaders[MESA_SHADER_FRAGMENT]) {
2235 		ordered_shaders[shader_count++] = shaders[MESA_SHADER_FRAGMENT];
2236 	}
2237 	if(shaders[MESA_SHADER_GEOMETRY]) {
2238 		ordered_shaders[shader_count++] = shaders[MESA_SHADER_GEOMETRY];
2239 	}
2240 	if(shaders[MESA_SHADER_TESS_EVAL]) {
2241 		ordered_shaders[shader_count++] = shaders[MESA_SHADER_TESS_EVAL];
2242 	}
2243 	if(shaders[MESA_SHADER_TESS_CTRL]) {
2244 		ordered_shaders[shader_count++] = shaders[MESA_SHADER_TESS_CTRL];
2245 	}
2246 	if(shaders[MESA_SHADER_VERTEX]) {
2247 		ordered_shaders[shader_count++] = shaders[MESA_SHADER_VERTEX];
2248 	}
2249 	if(shaders[MESA_SHADER_COMPUTE]) {
2250 		ordered_shaders[shader_count++] = shaders[MESA_SHADER_COMPUTE];
2251 	}
2252 
2253 	if (!optimize_conservatively && shader_count > 1) {
2254 		unsigned first = ordered_shaders[shader_count - 1]->info.stage;
2255 		unsigned last = ordered_shaders[0]->info.stage;
2256 
2257 		if (ordered_shaders[0]->info.stage == MESA_SHADER_FRAGMENT &&
2258 		    ordered_shaders[1]->info.has_transform_feedback_varyings)
2259 			nir_link_xfb_varyings(ordered_shaders[1], ordered_shaders[0]);
2260 
2261 		for (int i = 1; i < shader_count; ++i) {
2262 			nir_lower_io_arrays_to_elements(ordered_shaders[i],
2263 							ordered_shaders[i - 1]);
2264 		}
2265 
2266 		for (int i = 0; i < shader_count; ++i)  {
2267 			nir_variable_mode mask = 0;
2268 
2269 			if (ordered_shaders[i]->info.stage != first)
2270 				mask = mask | nir_var_shader_in;
2271 
2272 			if (ordered_shaders[i]->info.stage != last)
2273 				mask = mask | nir_var_shader_out;
2274 
2275 			if (nir_lower_io_to_scalar_early(ordered_shaders[i], mask)) {
2276 				/* Optimize the new vector code and then remove dead vars */
2277 				nir_copy_prop(ordered_shaders[i]);
2278 				nir_opt_shrink_vectors(ordered_shaders[i]);
2279 
2280 		                if (ordered_shaders[i]->info.stage != last) {
2281 					/* Optimize swizzled movs of load_const for
2282 					 * nir_link_opt_varyings's constant propagation
2283 					 */
2284 				        nir_opt_constant_folding(ordered_shaders[i]);
2285 				        /* For nir_link_opt_varyings's duplicate input opt */
2286 				        nir_opt_cse(ordered_shaders[i]);
2287 				}
2288 
2289 				/* Run copy-propagation to help remove dead
2290 				 * output variables (some shaders have useless
2291 				 * copies to/from an output), so compaction
2292 				 * later will be more effective.
2293 				 *
2294 				 * This will have been done earlier but it might
2295 				 * not have worked because the outputs were vector.
2296 				 */
2297 				if (ordered_shaders[i]->info.stage == MESA_SHADER_TESS_CTRL)
2298 					nir_opt_copy_prop_vars(ordered_shaders[i]);
2299 
2300 				nir_opt_dce(ordered_shaders[i]);
2301 				nir_remove_dead_variables(ordered_shaders[i],
2302 							  nir_var_function_temp | nir_var_shader_in | nir_var_shader_out, NULL);
2303 			}
2304 		}
2305 	}
2306 
2307 	for (int i = 1; !optimize_conservatively && (i < shader_count); ++i)  {
2308 		if (nir_link_opt_varyings(ordered_shaders[i], ordered_shaders[i - 1])) {
2309 			nir_opt_constant_folding(ordered_shaders[i - 1]);
2310 			nir_opt_algebraic(ordered_shaders[i - 1]);
2311 			nir_opt_dce(ordered_shaders[i - 1]);
2312 		}
2313 
2314 		nir_remove_dead_variables(ordered_shaders[i],
2315 					  nir_var_shader_out, NULL);
2316 		nir_remove_dead_variables(ordered_shaders[i - 1],
2317 					  nir_var_shader_in, NULL);
2318 
2319 		bool progress = nir_remove_unused_varyings(ordered_shaders[i],
2320 							   ordered_shaders[i - 1]);
2321 
2322 		nir_compact_varyings(ordered_shaders[i],
2323 				     ordered_shaders[i - 1], true);
2324 
2325 		if (progress) {
2326 			if (nir_lower_global_vars_to_local(ordered_shaders[i])) {
2327 				ac_lower_indirect_derefs(ordered_shaders[i],
2328 				                         pipeline->device->physical_device->rad_info.chip_class);
2329 				/* remove dead writes, which can remove input loads */
2330 				nir_lower_vars_to_ssa(ordered_shaders[i]);
2331 				nir_opt_dce(ordered_shaders[i]);
2332 			}
2333 
2334 			if (nir_lower_global_vars_to_local(ordered_shaders[i - 1])) {
2335 				ac_lower_indirect_derefs(ordered_shaders[i - 1],
2336 				                         pipeline->device->physical_device->rad_info.chip_class);
2337 			}
2338 		}
2339 	}
2340 }
2341 
2342 static void
radv_set_driver_locations(struct radv_pipeline * pipeline,nir_shader ** shaders,struct radv_shader_info infos[MESA_SHADER_STAGES])2343 radv_set_driver_locations(struct radv_pipeline *pipeline, nir_shader **shaders,
2344                               struct radv_shader_info infos[MESA_SHADER_STAGES])
2345 {
2346 	if (shaders[MESA_SHADER_FRAGMENT]) {
2347 		nir_foreach_shader_out_variable(var, shaders[MESA_SHADER_FRAGMENT])
2348 		{
2349 			var->data.driver_location = var->data.location + var->data.index;
2350 		}
2351 	}
2352 
2353 	if (!shaders[MESA_SHADER_VERTEX])
2354 		return;
2355 
2356 	bool has_tess = shaders[MESA_SHADER_TESS_CTRL];
2357 	bool has_gs = shaders[MESA_SHADER_GEOMETRY];
2358 	unsigned vs_info_idx = MESA_SHADER_VERTEX;
2359 	unsigned tes_info_idx = MESA_SHADER_TESS_EVAL;
2360 	unsigned last_vtg_stage = MESA_SHADER_VERTEX;
2361 
2362 	if (pipeline->device->physical_device->rad_info.chip_class >= GFX9) {
2363 		/* These are merged into the next stage */
2364 		vs_info_idx = has_tess ? MESA_SHADER_TESS_CTRL : MESA_SHADER_GEOMETRY;
2365 		tes_info_idx = has_gs ? MESA_SHADER_GEOMETRY : MESA_SHADER_TESS_EVAL;
2366 	}
2367 
2368 	nir_foreach_shader_in_variable(var, shaders[MESA_SHADER_VERTEX]) {
2369 		var->data.driver_location = var->data.location;
2370 	}
2371 
2372 	if (has_tess) {
2373 		nir_linked_io_var_info vs2tcs =
2374 			nir_assign_linked_io_var_locations(shaders[MESA_SHADER_VERTEX], shaders[MESA_SHADER_TESS_CTRL]);
2375 		nir_linked_io_var_info tcs2tes =
2376 			nir_assign_linked_io_var_locations(shaders[MESA_SHADER_TESS_CTRL], shaders[MESA_SHADER_TESS_EVAL]);
2377 
2378 		infos[vs_info_idx].vs.num_linked_outputs = vs2tcs.num_linked_io_vars;
2379 		infos[MESA_SHADER_TESS_CTRL].tcs.num_linked_inputs = vs2tcs.num_linked_io_vars;
2380 		infos[MESA_SHADER_TESS_CTRL].tcs.num_linked_outputs = tcs2tes.num_linked_io_vars;
2381 		infos[MESA_SHADER_TESS_CTRL].tcs.num_linked_patch_outputs = tcs2tes.num_linked_patch_io_vars;
2382 		infos[tes_info_idx].tes.num_linked_inputs = tcs2tes.num_linked_io_vars;
2383 		infos[tes_info_idx].tes.num_linked_patch_inputs = tcs2tes.num_linked_patch_io_vars;
2384 
2385 		if (has_gs) {
2386 			nir_linked_io_var_info tes2gs =
2387 				nir_assign_linked_io_var_locations(shaders[MESA_SHADER_TESS_EVAL], shaders[MESA_SHADER_GEOMETRY]);
2388 
2389 			infos[tes_info_idx].tes.num_linked_outputs = tes2gs.num_linked_io_vars;
2390 			infos[MESA_SHADER_GEOMETRY].gs.num_linked_inputs = tes2gs.num_linked_io_vars;
2391 			last_vtg_stage = MESA_SHADER_GEOMETRY;
2392 		} else {
2393 			last_vtg_stage = MESA_SHADER_TESS_EVAL;
2394 		}
2395 	} else if (has_gs) {
2396 		nir_linked_io_var_info vs2gs =
2397 			nir_assign_linked_io_var_locations(shaders[MESA_SHADER_VERTEX], shaders[MESA_SHADER_GEOMETRY]);
2398 
2399 		infos[vs_info_idx].vs.num_linked_outputs = vs2gs.num_linked_io_vars;
2400 		infos[MESA_SHADER_GEOMETRY].gs.num_linked_inputs = vs2gs.num_linked_io_vars;
2401 		last_vtg_stage = MESA_SHADER_GEOMETRY;
2402 	}
2403 
2404 	nir_foreach_shader_out_variable(var, shaders[last_vtg_stage]) {
2405 		var->data.driver_location = var->data.location;
2406 	}
2407 }
2408 
2409 static uint32_t
radv_get_attrib_stride(const VkPipelineVertexInputStateCreateInfo * input_state,uint32_t attrib_binding)2410 radv_get_attrib_stride(const VkPipelineVertexInputStateCreateInfo *input_state,
2411 		       uint32_t attrib_binding)
2412 {
2413 	for (uint32_t i = 0; i < input_state->vertexBindingDescriptionCount; i++) {
2414 		const VkVertexInputBindingDescription *input_binding =
2415 			&input_state->pVertexBindingDescriptions[i];
2416 
2417 		if (input_binding->binding == attrib_binding)
2418 			return input_binding->stride;
2419 	}
2420 
2421 	return 0;
2422 }
2423 
2424 static struct radv_pipeline_key
radv_generate_graphics_pipeline_key(const struct radv_pipeline * pipeline,const VkGraphicsPipelineCreateInfo * pCreateInfo,const struct radv_blend_state * blend)2425 radv_generate_graphics_pipeline_key(const struct radv_pipeline *pipeline,
2426                                     const VkGraphicsPipelineCreateInfo *pCreateInfo,
2427                                     const struct radv_blend_state *blend)
2428 {
2429 	RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
2430 	struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass;
2431 	const VkPipelineVertexInputStateCreateInfo *input_state =
2432 	                                         pCreateInfo->pVertexInputState;
2433 	const VkPipelineVertexInputDivisorStateCreateInfoEXT *divisor_state =
2434 		vk_find_struct_const(input_state->pNext, PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT);
2435 	bool uses_dynamic_stride = false;
2436 
2437 	struct radv_pipeline_key key;
2438 	memset(&key, 0, sizeof(key));
2439 
2440 	if (pCreateInfo->flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT)
2441 		key.optimisations_disabled = 1;
2442 
2443 	key.has_multiview_view_index = !!subpass->view_mask;
2444 
2445 	uint32_t binding_input_rate = 0;
2446 	uint32_t instance_rate_divisors[MAX_VERTEX_ATTRIBS];
2447 	for (unsigned i = 0; i < input_state->vertexBindingDescriptionCount; ++i) {
2448 		if (input_state->pVertexBindingDescriptions[i].inputRate) {
2449 			unsigned binding = input_state->pVertexBindingDescriptions[i].binding;
2450 			binding_input_rate |= 1u << binding;
2451 			instance_rate_divisors[binding] = 1;
2452 		}
2453 	}
2454 	if (divisor_state) {
2455 		for (unsigned i = 0; i < divisor_state->vertexBindingDivisorCount; ++i) {
2456 			instance_rate_divisors[divisor_state->pVertexBindingDivisors[i].binding] =
2457 				divisor_state->pVertexBindingDivisors[i].divisor;
2458 		}
2459 	}
2460 
2461 	if (pCreateInfo->pDynamicState) {
2462 		uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
2463 		for (uint32_t i = 0; i < count; i++) {
2464 			if (pCreateInfo->pDynamicState->pDynamicStates[i] == VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT) {
2465 				uses_dynamic_stride = true;
2466 				break;
2467 			}
2468 		}
2469 	}
2470 
2471 	for (unsigned i = 0; i < input_state->vertexAttributeDescriptionCount; ++i) {
2472 		const VkVertexInputAttributeDescription *desc =
2473 			&input_state->pVertexAttributeDescriptions[i];
2474 		const struct vk_format_description *format_desc;
2475 		unsigned location = desc->location;
2476 		unsigned binding = desc->binding;
2477 		unsigned num_format, data_format;
2478 		int first_non_void;
2479 
2480 		if (binding_input_rate & (1u << binding)) {
2481 			key.instance_rate_inputs |= 1u << location;
2482 			key.instance_rate_divisors[location] = instance_rate_divisors[binding];
2483 		}
2484 
2485 		format_desc = vk_format_description(desc->format);
2486 		first_non_void = vk_format_get_first_non_void_channel(desc->format);
2487 
2488 		num_format = radv_translate_buffer_numformat(format_desc, first_non_void);
2489 		data_format = radv_translate_buffer_dataformat(format_desc, first_non_void);
2490 
2491 		key.vertex_attribute_formats[location] = data_format | (num_format << 4);
2492 		key.vertex_attribute_bindings[location] = desc->binding;
2493 		key.vertex_attribute_offsets[location] = desc->offset;
2494 
2495 		if (!uses_dynamic_stride) {
2496 			/* From the Vulkan spec 1.2.157:
2497 			 *
2498 			 * "If the bound pipeline state object was created
2499 			 *  with the
2500 			 *  VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT
2501 			 *  dynamic state enabled then pStrides[i] specifies
2502 			 *  the distance in bytes between two consecutive
2503 			 *  elements within the corresponding buffer. In this
2504 			 *  case the VkVertexInputBindingDescription::stride
2505 			 *  state from the pipeline state object is ignored."
2506 			 *
2507 			 * Make sure the vertex attribute stride is zero to
2508 			 * avoid computing a wrong offset if it's initialized
2509 			 * to something else than zero.
2510 			 */
2511 			key.vertex_attribute_strides[location] =
2512 				radv_get_attrib_stride(input_state, desc->binding);
2513 		}
2514 
2515 		enum ac_fetch_format adjust = AC_FETCH_FORMAT_NONE;
2516 		if (pipeline->device->physical_device->rad_info.chip_class <= GFX8 &&
2517 		    pipeline->device->physical_device->rad_info.family != CHIP_STONEY) {
2518 			VkFormat format = input_state->pVertexAttributeDescriptions[i].format;
2519 			switch(format) {
2520 			case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
2521 			case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
2522 				adjust = AC_FETCH_FORMAT_SNORM;
2523 				break;
2524 			case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
2525 			case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
2526 				adjust = AC_FETCH_FORMAT_SSCALED;
2527 				break;
2528 			case VK_FORMAT_A2R10G10B10_SINT_PACK32:
2529 			case VK_FORMAT_A2B10G10R10_SINT_PACK32:
2530 				adjust = AC_FETCH_FORMAT_SINT;
2531 				break;
2532 			default:
2533 				break;
2534 			}
2535 		}
2536 		key.vertex_alpha_adjust[location] = adjust;
2537 
2538 		switch (desc->format) {
2539 		case VK_FORMAT_B8G8R8A8_UNORM:
2540 		case VK_FORMAT_B8G8R8A8_SNORM:
2541 		case VK_FORMAT_B8G8R8A8_USCALED:
2542 		case VK_FORMAT_B8G8R8A8_SSCALED:
2543 		case VK_FORMAT_B8G8R8A8_UINT:
2544 		case VK_FORMAT_B8G8R8A8_SINT:
2545 		case VK_FORMAT_B8G8R8A8_SRGB:
2546 		case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
2547 		case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
2548 		case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
2549 		case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
2550 		case VK_FORMAT_A2R10G10B10_UINT_PACK32:
2551 		case VK_FORMAT_A2R10G10B10_SINT_PACK32:
2552 			key.vertex_post_shuffle |= 1 << location;
2553 			break;
2554 		default:
2555 			break;
2556 		}
2557 	}
2558 
2559 	const VkPipelineTessellationStateCreateInfo *tess =
2560 		radv_pipeline_get_tessellation_state(pCreateInfo);
2561 	if (tess)
2562 		key.tess_input_vertices = tess->patchControlPoints;
2563 
2564 	const VkPipelineMultisampleStateCreateInfo *vkms =
2565 		radv_pipeline_get_multisample_state(pCreateInfo);
2566 	if (vkms && vkms->rasterizationSamples > 1) {
2567 		uint32_t num_samples = vkms->rasterizationSamples;
2568 		uint32_t ps_iter_samples = radv_pipeline_get_ps_iter_samples(pCreateInfo);
2569 		key.num_samples = num_samples;
2570 		key.log2_ps_iter_samples = util_logbase2(ps_iter_samples);
2571 	}
2572 
2573 	key.col_format = blend->spi_shader_col_format;
2574 	key.is_dual_src = blend->mrt0_is_dual_src;
2575 	if (pipeline->device->physical_device->rad_info.chip_class < GFX8) {
2576 		key.is_int8 = blend->col_format_is_int8;
2577 		key.is_int10 = blend->col_format_is_int10;
2578 	}
2579 
2580 	if (pipeline->device->physical_device->rad_info.chip_class >= GFX10)
2581 		key.topology = pCreateInfo->pInputAssemblyState->topology;
2582 
2583 	return key;
2584 }
2585 
2586 static bool
radv_nir_stage_uses_xfb(const nir_shader * nir)2587 radv_nir_stage_uses_xfb(const nir_shader *nir)
2588 {
2589 	nir_xfb_info *xfb = nir_gather_xfb_info(nir, NULL);
2590 	bool uses_xfb = !!xfb;
2591 
2592 	ralloc_free(xfb);
2593 	return uses_xfb;
2594 }
2595 
2596 static void
radv_fill_shader_keys(struct radv_device * device,struct radv_shader_variant_key * keys,const struct radv_pipeline_key * key,nir_shader ** nir)2597 radv_fill_shader_keys(struct radv_device *device,
2598 		      struct radv_shader_variant_key *keys,
2599                       const struct radv_pipeline_key *key,
2600                       nir_shader **nir)
2601 {
2602 	keys[MESA_SHADER_VERTEX].vs.instance_rate_inputs = key->instance_rate_inputs;
2603 	keys[MESA_SHADER_VERTEX].vs.post_shuffle = key->vertex_post_shuffle;
2604 	for (unsigned i = 0; i < MAX_VERTEX_ATTRIBS; ++i) {
2605 		keys[MESA_SHADER_VERTEX].vs.instance_rate_divisors[i] = key->instance_rate_divisors[i];
2606 		keys[MESA_SHADER_VERTEX].vs.vertex_attribute_formats[i] = key->vertex_attribute_formats[i];
2607 		keys[MESA_SHADER_VERTEX].vs.vertex_attribute_bindings[i] = key->vertex_attribute_bindings[i];
2608 		keys[MESA_SHADER_VERTEX].vs.vertex_attribute_offsets[i] = key->vertex_attribute_offsets[i];
2609 		keys[MESA_SHADER_VERTEX].vs.vertex_attribute_strides[i] = key->vertex_attribute_strides[i];
2610 		keys[MESA_SHADER_VERTEX].vs.alpha_adjust[i] = key->vertex_alpha_adjust[i];
2611 	}
2612 	keys[MESA_SHADER_VERTEX].vs.outprim = si_conv_prim_to_gs_out(key->topology);
2613 
2614 	if (nir[MESA_SHADER_TESS_CTRL]) {
2615 		keys[MESA_SHADER_VERTEX].vs_common_out.as_ls = true;
2616 		keys[MESA_SHADER_TESS_CTRL].tcs.input_vertices = key->tess_input_vertices;
2617 		keys[MESA_SHADER_TESS_CTRL].tcs.primitive_mode = nir[MESA_SHADER_TESS_EVAL]->info.tess.primitive_mode;
2618 
2619 		keys[MESA_SHADER_TESS_CTRL].tcs.tes_reads_tess_factors = !!(nir[MESA_SHADER_TESS_EVAL]->info.inputs_read & (VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER));
2620 	}
2621 
2622 	if (nir[MESA_SHADER_GEOMETRY]) {
2623 		if (nir[MESA_SHADER_TESS_CTRL])
2624 			keys[MESA_SHADER_TESS_EVAL].vs_common_out.as_es = true;
2625 		else
2626 			keys[MESA_SHADER_VERTEX].vs_common_out.as_es = true;
2627 	}
2628 
2629 	if (device->physical_device->use_ngg) {
2630 		if (nir[MESA_SHADER_TESS_CTRL]) {
2631 			keys[MESA_SHADER_TESS_EVAL].vs_common_out.as_ngg = true;
2632 		} else {
2633 			keys[MESA_SHADER_VERTEX].vs_common_out.as_ngg = true;
2634 		}
2635 
2636 		if (nir[MESA_SHADER_TESS_CTRL] &&
2637 		    nir[MESA_SHADER_GEOMETRY] &&
2638 		    nir[MESA_SHADER_GEOMETRY]->info.gs.invocations *
2639 		    nir[MESA_SHADER_GEOMETRY]->info.gs.vertices_out > 256) {
2640 			/* Fallback to the legacy path if tessellation is
2641 			 * enabled with extreme geometry because
2642 			 * EN_MAX_VERT_OUT_PER_GS_INSTANCE doesn't work and it
2643 			 * might hang.
2644 			 */
2645 			keys[MESA_SHADER_TESS_EVAL].vs_common_out.as_ngg = false;
2646 		}
2647 
2648 		gl_shader_stage last_xfb_stage = MESA_SHADER_VERTEX;
2649 
2650 		for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
2651 			if (nir[i])
2652 				last_xfb_stage = i;
2653 		}
2654 
2655 		bool uses_xfb = nir[last_xfb_stage] &&
2656 				radv_nir_stage_uses_xfb(nir[last_xfb_stage]);
2657 
2658 		if (!device->physical_device->use_ngg_streamout && uses_xfb) {
2659 			if (nir[MESA_SHADER_TESS_CTRL])
2660 				keys[MESA_SHADER_TESS_EVAL].vs_common_out.as_ngg = false;
2661 			else
2662 				keys[MESA_SHADER_VERTEX].vs_common_out.as_ngg = false;
2663 		}
2664 
2665 		/* Determine if the pipeline is eligible for the NGG passthrough
2666 		 * mode. It can't be enabled for geometry shaders, for NGG
2667 		 * streamout or for vertex shaders that export the primitive ID
2668 		 * (this is checked later because we don't have the info here.)
2669 		 */
2670 		if (!nir[MESA_SHADER_GEOMETRY] && !uses_xfb) {
2671 			if (nir[MESA_SHADER_TESS_CTRL] &&
2672 			    keys[MESA_SHADER_TESS_EVAL].vs_common_out.as_ngg) {
2673 				keys[MESA_SHADER_TESS_EVAL].vs_common_out.as_ngg_passthrough = true;
2674 			} else if (nir[MESA_SHADER_VERTEX] &&
2675 				   keys[MESA_SHADER_VERTEX].vs_common_out.as_ngg) {
2676 				keys[MESA_SHADER_VERTEX].vs_common_out.as_ngg_passthrough = true;
2677 			}
2678 		}
2679 	}
2680 
2681 	for(int i = 0; i < MESA_SHADER_STAGES; ++i)
2682 		keys[i].has_multiview_view_index = key->has_multiview_view_index;
2683 
2684 	keys[MESA_SHADER_FRAGMENT].fs.col_format = key->col_format;
2685 	keys[MESA_SHADER_FRAGMENT].fs.is_int8 = key->is_int8;
2686 	keys[MESA_SHADER_FRAGMENT].fs.is_int10 = key->is_int10;
2687 	keys[MESA_SHADER_FRAGMENT].fs.log2_ps_iter_samples = key->log2_ps_iter_samples;
2688 	keys[MESA_SHADER_FRAGMENT].fs.num_samples = key->num_samples;
2689 	keys[MESA_SHADER_FRAGMENT].fs.is_dual_src = key->is_dual_src;
2690 
2691 	if (nir[MESA_SHADER_COMPUTE]) {
2692 		keys[MESA_SHADER_COMPUTE].cs.subgroup_size = key->compute_subgroup_size;
2693 	}
2694 }
2695 
2696 static uint8_t
radv_get_wave_size(struct radv_device * device,const VkPipelineShaderStageCreateInfo * pStage,gl_shader_stage stage,const struct radv_shader_variant_key * key)2697 radv_get_wave_size(struct radv_device *device,
2698 		   const VkPipelineShaderStageCreateInfo *pStage,
2699 		   gl_shader_stage stage,
2700 		   const struct radv_shader_variant_key *key)
2701 {
2702 	if (stage == MESA_SHADER_GEOMETRY && !key->vs_common_out.as_ngg)
2703 		return 64;
2704 	else if (stage == MESA_SHADER_COMPUTE) {
2705 		if (key->cs.subgroup_size) {
2706 			/* Return the required subgroup size if specified. */
2707 			return key->cs.subgroup_size;
2708 		}
2709 		return device->physical_device->cs_wave_size;
2710 	}
2711 	else if (stage == MESA_SHADER_FRAGMENT)
2712 		return device->physical_device->ps_wave_size;
2713 	else
2714 		return device->physical_device->ge_wave_size;
2715 }
2716 
2717 static uint8_t
radv_get_ballot_bit_size(struct radv_device * device,const VkPipelineShaderStageCreateInfo * pStage,gl_shader_stage stage,const struct radv_shader_variant_key * key)2718 radv_get_ballot_bit_size(struct radv_device *device,
2719 			 const VkPipelineShaderStageCreateInfo *pStage,
2720 			 gl_shader_stage stage,
2721 			 const struct radv_shader_variant_key *key)
2722 {
2723 	if (stage == MESA_SHADER_COMPUTE && key->cs.subgroup_size)
2724 		return key->cs.subgroup_size;
2725 	return 64;
2726 }
2727 
2728 static void
radv_fill_shader_info(struct radv_pipeline * pipeline,const VkPipelineShaderStageCreateInfo ** pStages,struct radv_shader_variant_key * keys,struct radv_shader_info * infos,nir_shader ** nir)2729 radv_fill_shader_info(struct radv_pipeline *pipeline,
2730 		      const VkPipelineShaderStageCreateInfo **pStages,
2731 		      struct radv_shader_variant_key *keys,
2732                       struct radv_shader_info *infos,
2733                       nir_shader **nir)
2734 {
2735 	unsigned active_stages = 0;
2736 	unsigned filled_stages = 0;
2737 
2738 	for (int i = 0; i < MESA_SHADER_STAGES; i++) {
2739 		if (nir[i])
2740 			active_stages |= (1 << i);
2741 	}
2742 
2743 	if (nir[MESA_SHADER_FRAGMENT]) {
2744 		radv_nir_shader_info_init(&infos[MESA_SHADER_FRAGMENT]);
2745 		radv_nir_shader_info_pass(nir[MESA_SHADER_FRAGMENT],
2746 					  pipeline->layout,
2747 					  &keys[MESA_SHADER_FRAGMENT],
2748 					  &infos[MESA_SHADER_FRAGMENT]);
2749 
2750 		/* TODO: These are no longer used as keys we should refactor this */
2751 		keys[MESA_SHADER_VERTEX].vs_common_out.export_prim_id =
2752 		        infos[MESA_SHADER_FRAGMENT].ps.prim_id_input;
2753 		keys[MESA_SHADER_VERTEX].vs_common_out.export_layer_id =
2754 		        infos[MESA_SHADER_FRAGMENT].ps.layer_input;
2755 		keys[MESA_SHADER_VERTEX].vs_common_out.export_clip_dists =
2756 		        !!infos[MESA_SHADER_FRAGMENT].ps.num_input_clips_culls;
2757 		keys[MESA_SHADER_VERTEX].vs_common_out.export_viewport_index =
2758 		        infos[MESA_SHADER_FRAGMENT].ps.viewport_index_input;
2759 		keys[MESA_SHADER_TESS_EVAL].vs_common_out.export_prim_id =
2760 		        infos[MESA_SHADER_FRAGMENT].ps.prim_id_input;
2761 		keys[MESA_SHADER_TESS_EVAL].vs_common_out.export_layer_id =
2762 		        infos[MESA_SHADER_FRAGMENT].ps.layer_input;
2763 		keys[MESA_SHADER_TESS_EVAL].vs_common_out.export_clip_dists =
2764 		        !!infos[MESA_SHADER_FRAGMENT].ps.num_input_clips_culls;
2765 		keys[MESA_SHADER_TESS_EVAL].vs_common_out.export_viewport_index =
2766 		        infos[MESA_SHADER_FRAGMENT].ps.viewport_index_input;
2767 
2768 		/* NGG passthrough mode can't be enabled for vertex shaders
2769 		 * that export the primitive ID.
2770 		 *
2771 		 * TODO: I should really refactor the keys logic.
2772 		 */
2773 		if (nir[MESA_SHADER_VERTEX] &&
2774 		    keys[MESA_SHADER_VERTEX].vs_common_out.export_prim_id) {
2775 			keys[MESA_SHADER_VERTEX].vs_common_out.as_ngg_passthrough = false;
2776 		}
2777 
2778 		filled_stages |= (1 << MESA_SHADER_FRAGMENT);
2779 	}
2780 
2781 	if (nir[MESA_SHADER_TESS_CTRL]) {
2782 		infos[MESA_SHADER_TESS_CTRL].tcs.tes_inputs_read =
2783 			nir[MESA_SHADER_TESS_EVAL]->info.inputs_read;
2784 		infos[MESA_SHADER_TESS_CTRL].tcs.tes_patch_inputs_read =
2785 			nir[MESA_SHADER_TESS_EVAL]->info.patch_inputs_read;
2786 	}
2787 
2788 	if (pipeline->device->physical_device->rad_info.chip_class >= GFX9 &&
2789 	    nir[MESA_SHADER_TESS_CTRL]) {
2790 		struct nir_shader *combined_nir[] = {nir[MESA_SHADER_VERTEX], nir[MESA_SHADER_TESS_CTRL]};
2791 		struct radv_shader_variant_key key = keys[MESA_SHADER_TESS_CTRL];
2792 		key.tcs.vs_key = keys[MESA_SHADER_VERTEX].vs;
2793 
2794 		radv_nir_shader_info_init(&infos[MESA_SHADER_TESS_CTRL]);
2795 
2796 		for (int i = 0; i < 2; i++) {
2797 			radv_nir_shader_info_pass(combined_nir[i],
2798 						  pipeline->layout, &key,
2799 						  &infos[MESA_SHADER_TESS_CTRL]);
2800 		}
2801 
2802 		keys[MESA_SHADER_TESS_EVAL].tes.num_patches =
2803 			infos[MESA_SHADER_TESS_CTRL].tcs.num_patches;
2804 
2805 		filled_stages |= (1 << MESA_SHADER_VERTEX);
2806 		filled_stages |= (1 << MESA_SHADER_TESS_CTRL);
2807 	}
2808 
2809 	if (pipeline->device->physical_device->rad_info.chip_class >= GFX9 &&
2810 	    nir[MESA_SHADER_GEOMETRY]) {
2811 		gl_shader_stage pre_stage = nir[MESA_SHADER_TESS_EVAL] ? MESA_SHADER_TESS_EVAL : MESA_SHADER_VERTEX;
2812 		struct nir_shader *combined_nir[] = {nir[pre_stage], nir[MESA_SHADER_GEOMETRY]};
2813 
2814 		radv_nir_shader_info_init(&infos[MESA_SHADER_GEOMETRY]);
2815 
2816 		for (int i = 0; i < 2; i++) {
2817 			radv_nir_shader_info_pass(combined_nir[i],
2818 						  pipeline->layout,
2819 						  &keys[pre_stage],
2820 						  &infos[MESA_SHADER_GEOMETRY]);
2821 		}
2822 
2823 		filled_stages |= (1 << pre_stage);
2824 		filled_stages |= (1 << MESA_SHADER_GEOMETRY);
2825 	}
2826 
2827 	active_stages ^= filled_stages;
2828 	while (active_stages) {
2829 		int i = u_bit_scan(&active_stages);
2830 
2831 		if (i == MESA_SHADER_TESS_EVAL) {
2832 			keys[MESA_SHADER_TESS_EVAL].tes.num_patches =
2833 				infos[MESA_SHADER_TESS_CTRL].tcs.num_patches;
2834 		}
2835 
2836 		radv_nir_shader_info_init(&infos[i]);
2837 		radv_nir_shader_info_pass(nir[i], pipeline->layout,
2838 					  &keys[i], &infos[i]);
2839 	}
2840 
2841 	for (int i = 0; i < MESA_SHADER_STAGES; i++) {
2842 		if (nir[i]) {
2843 			infos[i].wave_size =
2844 				radv_get_wave_size(pipeline->device, pStages[i],
2845 						   i, &keys[i]);
2846 			infos[i].ballot_bit_size =
2847 				radv_get_ballot_bit_size(pipeline->device,
2848 							 pStages[i], i,
2849 							 &keys[i]);
2850 		}
2851 	}
2852 }
2853 
2854 static void
merge_tess_info(struct shader_info * tes_info,const struct shader_info * tcs_info)2855 merge_tess_info(struct shader_info *tes_info,
2856                 const struct shader_info *tcs_info)
2857 {
2858 	/* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
2859 	 *
2860 	 *    "PointMode. Controls generation of points rather than triangles
2861 	 *     or lines. This functionality defaults to disabled, and is
2862 	 *     enabled if either shader stage includes the execution mode.
2863 	 *
2864 	 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
2865 	 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
2866 	 * and OutputVertices, it says:
2867 	 *
2868 	 *    "One mode must be set in at least one of the tessellation
2869 	 *     shader stages."
2870 	 *
2871 	 * So, the fields can be set in either the TCS or TES, but they must
2872 	 * agree if set in both.  Our backend looks at TES, so bitwise-or in
2873 	 * the values from the TCS.
2874 	 */
2875 	assert(tcs_info->tess.tcs_vertices_out == 0 ||
2876 	       tes_info->tess.tcs_vertices_out == 0 ||
2877 	       tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
2878 	tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
2879 
2880 	assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
2881 	       tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
2882 	       tcs_info->tess.spacing == tes_info->tess.spacing);
2883 	tes_info->tess.spacing |= tcs_info->tess.spacing;
2884 
2885 	assert(tcs_info->tess.primitive_mode == 0 ||
2886 	       tes_info->tess.primitive_mode == 0 ||
2887 	       tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
2888 	tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
2889 	tes_info->tess.ccw |= tcs_info->tess.ccw;
2890 	tes_info->tess.point_mode |= tcs_info->tess.point_mode;
2891 }
2892 
2893 static
radv_init_feedback(const VkPipelineCreationFeedbackCreateInfoEXT * ext)2894 void radv_init_feedback(const VkPipelineCreationFeedbackCreateInfoEXT *ext)
2895 {
2896 	if (!ext)
2897 		return;
2898 
2899 	if (ext->pPipelineCreationFeedback) {
2900 		ext->pPipelineCreationFeedback->flags = 0;
2901 		ext->pPipelineCreationFeedback->duration = 0;
2902 	}
2903 
2904 	for (unsigned i = 0; i < ext->pipelineStageCreationFeedbackCount; ++i) {
2905 		ext->pPipelineStageCreationFeedbacks[i].flags = 0;
2906 		ext->pPipelineStageCreationFeedbacks[i].duration = 0;
2907 	}
2908 }
2909 
2910 static
radv_start_feedback(VkPipelineCreationFeedbackEXT * feedback)2911 void radv_start_feedback(VkPipelineCreationFeedbackEXT *feedback)
2912 {
2913 	if (!feedback)
2914 		return;
2915 
2916 	feedback->duration -= radv_get_current_time();
2917 	feedback ->flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT;
2918 }
2919 
2920 static
radv_stop_feedback(VkPipelineCreationFeedbackEXT * feedback,bool cache_hit)2921 void radv_stop_feedback(VkPipelineCreationFeedbackEXT *feedback, bool cache_hit)
2922 {
2923 	if (!feedback)
2924 		return;
2925 
2926 	feedback->duration += radv_get_current_time();
2927 	feedback ->flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT |
2928 	                   (cache_hit ? VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT : 0);
2929 }
2930 
2931 static bool
mem_vectorize_callback(unsigned align_mul,unsigned align_offset,unsigned bit_size,unsigned num_components,nir_intrinsic_instr * low,nir_intrinsic_instr * high)2932 mem_vectorize_callback(unsigned align_mul, unsigned align_offset,
2933                        unsigned bit_size,
2934                        unsigned num_components,
2935                        nir_intrinsic_instr *low, nir_intrinsic_instr *high)
2936 {
2937 	if (num_components > 4)
2938 		return false;
2939 
2940 	/* >128 bit loads are split except with SMEM */
2941 	if (bit_size * num_components > 128)
2942 		return false;
2943 
2944 	uint32_t align;
2945 	if (align_offset)
2946 		align = 1 << (ffs(align_offset) - 1);
2947 	else
2948 		align = align_mul;
2949 
2950 	switch (low->intrinsic) {
2951 	case nir_intrinsic_load_global:
2952 	case nir_intrinsic_store_global:
2953 	case nir_intrinsic_store_ssbo:
2954 	case nir_intrinsic_load_ssbo:
2955 	case nir_intrinsic_load_ubo:
2956 	case nir_intrinsic_load_push_constant:
2957 		return align % (bit_size == 8 ? 2 : 4) == 0;
2958 	case nir_intrinsic_load_deref:
2959 	case nir_intrinsic_store_deref:
2960 		assert(nir_deref_mode_is(nir_src_as_deref(low->src[0]),
2961 		                         nir_var_mem_shared));
2962 		/* fallthrough */
2963 	case nir_intrinsic_load_shared:
2964 	case nir_intrinsic_store_shared:
2965 		if (bit_size * num_components > 64) /* 96 and 128 bit loads require 128 bit alignment and are split otherwise */
2966 			return align % 16 == 0;
2967 		else
2968 			return align % (bit_size == 8 ? 2 : 4) == 0;
2969 	default:
2970 		return false;
2971 	}
2972 	return false;
2973 }
2974 
2975 static unsigned
lower_bit_size_callback(const nir_instr * instr,void * _)2976 lower_bit_size_callback(const nir_instr *instr, void *_)
2977 {
2978 	struct radv_device *device = _;
2979 	enum chip_class chip = device->physical_device->rad_info.chip_class;
2980 
2981 	if (instr->type != nir_instr_type_alu)
2982 		return 0;
2983 	nir_alu_instr *alu = nir_instr_as_alu(instr);
2984 
2985 	if (alu->dest.dest.ssa.bit_size & (8 | 16)) {
2986 		unsigned bit_size = alu->dest.dest.ssa.bit_size;
2987 		switch (alu->op) {
2988 		case nir_op_iabs:
2989 		case nir_op_bitfield_select:
2990 		case nir_op_udiv:
2991 		case nir_op_idiv:
2992 		case nir_op_umod:
2993 		case nir_op_imod:
2994 		case nir_op_imul_high:
2995 		case nir_op_umul_high:
2996 		case nir_op_ineg:
2997 		case nir_op_irem:
2998 		case nir_op_isign:
2999 			return 32;
3000 		case nir_op_imax:
3001 		case nir_op_umax:
3002 		case nir_op_imin:
3003 		case nir_op_umin:
3004 		case nir_op_ishr:
3005 		case nir_op_ushr:
3006 		case nir_op_ishl:
3007 		case nir_op_uadd_sat:
3008 			return (bit_size == 8 ||
3009 			        !(chip >= GFX8 && nir_dest_is_divergent(alu->dest.dest))) ? 32 : 0;
3010 		default:
3011 			return 0;
3012 		}
3013 	}
3014 
3015 	if (nir_src_bit_size(alu->src[0].src) & (8 | 16)) {
3016 		unsigned bit_size = nir_src_bit_size(alu->src[0].src);
3017 		switch (alu->op) {
3018 		case nir_op_bit_count:
3019 		case nir_op_find_lsb:
3020 		case nir_op_ufind_msb:
3021 		case nir_op_i2b1:
3022 			return 32;
3023 		case nir_op_ilt:
3024 		case nir_op_ige:
3025 		case nir_op_ieq:
3026 		case nir_op_ine:
3027 		case nir_op_ult:
3028 		case nir_op_uge:
3029 			return (bit_size == 8 ||
3030 			        !(chip >= GFX8 && nir_dest_is_divergent(alu->dest.dest))) ? 32 : 0;
3031 		default:
3032 			return 0;
3033 		}
3034 	}
3035 
3036 	return 0;
3037 }
3038 
radv_create_shaders(struct radv_pipeline * pipeline,struct radv_device * device,struct radv_pipeline_cache * cache,const struct radv_pipeline_key * key,const VkPipelineShaderStageCreateInfo ** pStages,const VkPipelineCreateFlags flags,VkPipelineCreationFeedbackEXT * pipeline_feedback,VkPipelineCreationFeedbackEXT ** stage_feedbacks)3039 VkResult radv_create_shaders(struct radv_pipeline *pipeline,
3040                              struct radv_device *device,
3041                              struct radv_pipeline_cache *cache,
3042                              const struct radv_pipeline_key *key,
3043                              const VkPipelineShaderStageCreateInfo **pStages,
3044                              const VkPipelineCreateFlags flags,
3045                              VkPipelineCreationFeedbackEXT *pipeline_feedback,
3046                              VkPipelineCreationFeedbackEXT **stage_feedbacks)
3047 {
3048 	struct radv_shader_module fs_m = {0};
3049 	struct radv_shader_module *modules[MESA_SHADER_STAGES] = { 0, };
3050 	nir_shader *nir[MESA_SHADER_STAGES] = {0};
3051 	struct radv_shader_binary *binaries[MESA_SHADER_STAGES] = {NULL};
3052 	struct radv_shader_variant_key keys[MESA_SHADER_STAGES] = {{{{{0}}}}};
3053 	struct radv_shader_info infos[MESA_SHADER_STAGES] = {0};
3054 	unsigned char hash[20], gs_copy_hash[20];
3055 	bool keep_executable_info = (flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR) || device->keep_shader_info;
3056 	bool keep_statistic_info = (flags & VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR) ||
3057 	                           (device->instance->debug_flags & RADV_DEBUG_DUMP_SHADER_STATS) ||
3058 	                           device->keep_shader_info;
3059 	bool disable_optimizations = flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT;
3060 
3061 	radv_start_feedback(pipeline_feedback);
3062 
3063 	for (unsigned i = 0; i < MESA_SHADER_STAGES; ++i) {
3064 		if (pStages[i]) {
3065 			modules[i] = radv_shader_module_from_handle(pStages[i]->module);
3066 			if (modules[i]->nir)
3067 				_mesa_sha1_compute(modules[i]->nir->info.name,
3068 				                   strlen(modules[i]->nir->info.name),
3069 				                   modules[i]->sha1);
3070 
3071 			pipeline->active_stages |= mesa_to_vk_shader_stage(i);
3072 		}
3073 	}
3074 
3075 	radv_hash_shaders(hash, pStages, pipeline->layout, key, get_hash_flags(device));
3076 	memcpy(gs_copy_hash, hash, 20);
3077 	gs_copy_hash[0] ^= 1;
3078 
3079 	bool found_in_application_cache = true;
3080 	if (modules[MESA_SHADER_GEOMETRY] && !keep_executable_info && !keep_statistic_info) {
3081 		struct radv_shader_variant *variants[MESA_SHADER_STAGES] = {0};
3082 		radv_create_shader_variants_from_pipeline_cache(device, cache, gs_copy_hash, variants,
3083 		                                                &found_in_application_cache);
3084 		pipeline->gs_copy_shader = variants[MESA_SHADER_GEOMETRY];
3085 	}
3086 
3087 	if (!keep_executable_info && !keep_statistic_info &&
3088 	    radv_create_shader_variants_from_pipeline_cache(device, cache, hash, pipeline->shaders,
3089 	                                                    &found_in_application_cache) &&
3090 	    (!modules[MESA_SHADER_GEOMETRY] || pipeline->gs_copy_shader)) {
3091 		radv_stop_feedback(pipeline_feedback, found_in_application_cache);
3092 		return VK_SUCCESS;
3093 	}
3094 
3095 	if (flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT) {
3096 		radv_stop_feedback(pipeline_feedback, found_in_application_cache);
3097 		return VK_PIPELINE_COMPILE_REQUIRED_EXT;
3098 	}
3099 
3100 	if (!modules[MESA_SHADER_FRAGMENT] && !modules[MESA_SHADER_COMPUTE]) {
3101 		nir_builder fs_b;
3102 		nir_builder_init_simple_shader(&fs_b, NULL, MESA_SHADER_FRAGMENT, NULL);
3103 		fs_b.shader->info.name = ralloc_strdup(fs_b.shader, "noop_fs");
3104 		fs_m.nir = fs_b.shader;
3105 		modules[MESA_SHADER_FRAGMENT] = &fs_m;
3106 	}
3107 
3108 	for (unsigned i = 0; i < MESA_SHADER_STAGES; ++i) {
3109 		const VkPipelineShaderStageCreateInfo *stage = pStages[i];
3110 		unsigned subgroup_size = 64, ballot_bit_size = 64;
3111 
3112 		if (!modules[i])
3113 			continue;
3114 
3115 		radv_start_feedback(stage_feedbacks[i]);
3116 
3117 		if (key->compute_subgroup_size) {
3118 			/* Only compute shaders currently support requiring a
3119 			 * specific subgroup size.
3120                          */
3121 			assert(i == MESA_SHADER_COMPUTE);
3122 			subgroup_size = key->compute_subgroup_size;
3123 			ballot_bit_size = key->compute_subgroup_size;
3124 		}
3125 
3126 		nir[i] = radv_shader_compile_to_nir(device, modules[i],
3127 						    stage ? stage->pName : "main", i,
3128 						    stage ? stage->pSpecializationInfo : NULL,
3129 						    flags, pipeline->layout,
3130 						    subgroup_size, ballot_bit_size);
3131 
3132 		/* We don't want to alter meta shaders IR directly so clone it
3133 		 * first.
3134 		 */
3135 		if (nir[i]->info.name) {
3136 			nir[i] = nir_shader_clone(NULL, nir[i]);
3137 		}
3138 
3139 		radv_stop_feedback(stage_feedbacks[i], false);
3140 	}
3141 
3142 	if (nir[MESA_SHADER_TESS_CTRL]) {
3143 		nir_lower_patch_vertices(nir[MESA_SHADER_TESS_EVAL], nir[MESA_SHADER_TESS_CTRL]->info.tess.tcs_vertices_out, NULL);
3144 		merge_tess_info(&nir[MESA_SHADER_TESS_EVAL]->info, &nir[MESA_SHADER_TESS_CTRL]->info);
3145 	}
3146 
3147 	bool optimize_conservatively = flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT;
3148 
3149 	radv_link_shaders(pipeline, nir, optimize_conservatively);
3150 
3151 	for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
3152 		if (nir[i]) {
3153 			radv_start_feedback(stage_feedbacks[i]);
3154 			radv_optimize_nir(nir[i], optimize_conservatively, false);
3155 			radv_stop_feedback(stage_feedbacks[i], false);
3156 		}
3157 	}
3158 
3159 	radv_set_driver_locations(pipeline, nir, infos);
3160 
3161 	for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
3162 		if (nir[i]) {
3163 			radv_start_feedback(stage_feedbacks[i]);
3164 
3165 			if (!radv_use_llvm_for_stage(device, i)) {
3166 				NIR_PASS_V(nir[i], nir_lower_non_uniform_access,
3167 				           nir_lower_non_uniform_ubo_access |
3168 				           nir_lower_non_uniform_ssbo_access |
3169 				           nir_lower_non_uniform_texture_access |
3170 				           nir_lower_non_uniform_image_access);
3171 			}
3172 			NIR_PASS_V(nir[i], nir_lower_memory_model);
3173 
3174 			bool lower_to_scalar = false;
3175 			bool lower_pack = false;
3176 			nir_variable_mode robust_modes = (nir_variable_mode)0;
3177 
3178 			if (device->robust_buffer_access) {
3179 				robust_modes = nir_var_mem_ubo |
3180 					       nir_var_mem_ssbo |
3181 					       nir_var_mem_global |
3182 					       nir_var_mem_push_const;
3183 			}
3184 
3185 			if (nir_opt_load_store_vectorize(nir[i],
3186 							 nir_var_mem_ssbo | nir_var_mem_ubo |
3187 							 nir_var_mem_push_const | nir_var_mem_shared |
3188 							 nir_var_mem_global,
3189 							 mem_vectorize_callback, robust_modes)) {
3190 				lower_to_scalar = true;
3191 				lower_pack = true;
3192 			}
3193 
3194 			/* do this again since information such as outputs_read can be out-of-date */
3195 			nir_shader_gather_info(nir[i], nir_shader_get_entrypoint(nir[i]));
3196 
3197 			radv_lower_io(device, nir[i]);
3198 
3199 			lower_to_scalar |= nir_opt_shrink_vectors(nir[i]);
3200 
3201 			if (lower_to_scalar)
3202 				nir_lower_alu_to_scalar(nir[i], NULL, NULL);
3203 			if (lower_pack)
3204 				nir_lower_pack(nir[i]);
3205 
3206 			/* lower ALU operations */
3207 			/* TODO: Some 64-bit tests crash inside LLVM. */
3208 			if (!radv_use_llvm_for_stage(device, i))
3209 				nir_lower_int64(nir[i]);
3210 
3211 			/* TODO: Implement nir_op_uadd_sat with LLVM. */
3212 			if (!radv_use_llvm_for_stage(device, i))
3213 				nir_opt_idiv_const(nir[i], 32);
3214 			nir_lower_idiv(nir[i], nir_lower_idiv_precise);
3215 
3216 			/* optimize the lowered ALU operations */
3217 			bool more_algebraic = true;
3218 			while (more_algebraic) {
3219 				more_algebraic = false;
3220 				NIR_PASS_V(nir[i], nir_copy_prop);
3221 				NIR_PASS_V(nir[i], nir_opt_dce);
3222 				NIR_PASS_V(nir[i], nir_opt_constant_folding);
3223 				NIR_PASS(more_algebraic, nir[i], nir_opt_algebraic);
3224 			}
3225 
3226 			/* Do late algebraic optimization to turn add(a,
3227 			 * neg(b)) back into subs, then the mandatory cleanup
3228 			 * after algebraic.  Note that it may produce fnegs,
3229 			 * and if so then we need to keep running to squash
3230 			 * fneg(fneg(a)).
3231 			 */
3232 			bool more_late_algebraic = true;
3233 			while (more_late_algebraic) {
3234 				more_late_algebraic = false;
3235 				NIR_PASS(more_late_algebraic, nir[i], nir_opt_algebraic_late);
3236 				NIR_PASS_V(nir[i], nir_opt_constant_folding);
3237 				NIR_PASS_V(nir[i], nir_copy_prop);
3238 				NIR_PASS_V(nir[i], nir_opt_dce);
3239 				NIR_PASS_V(nir[i], nir_opt_cse);
3240 			}
3241 
3242 			if (nir[i]->info.bit_sizes_int & (8 | 16)) {
3243 				if (device->physical_device->rad_info.chip_class >= GFX8) {
3244 					nir_convert_to_lcssa(nir[i], true, true);
3245 					nir_divergence_analysis(nir[i]);
3246 				}
3247 
3248 				if (nir_lower_bit_size(nir[i], lower_bit_size_callback, device)) {
3249 					nir_lower_idiv(nir[i], nir_lower_idiv_precise);
3250 					nir_opt_constant_folding(nir[i]);
3251 					nir_opt_dce(nir[i]);
3252 				}
3253 
3254 				if (device->physical_device->rad_info.chip_class >= GFX8)
3255 					nir_opt_remove_phis(nir[i]); /* cleanup LCSSA phis */
3256 			}
3257 
3258 			/* cleanup passes */
3259 			nir_lower_load_const_to_scalar(nir[i]);
3260 			nir_move_options move_opts = (nir_move_options)(
3261 				nir_move_const_undef | nir_move_load_ubo | nir_move_load_input |
3262 				nir_move_comparisons | nir_move_copies);
3263 			nir_opt_sink(nir[i], move_opts);
3264 			nir_opt_move(nir[i], move_opts);
3265 
3266 			radv_stop_feedback(stage_feedbacks[i], false);
3267 		}
3268 	}
3269 
3270 	for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
3271 		if (radv_can_dump_shader(device, modules[i], false))
3272 			nir_print_shader(nir[i], stderr);
3273 	}
3274 
3275 	radv_fill_shader_keys(device, keys, key, nir);
3276 
3277 	radv_fill_shader_info(pipeline, pStages, keys, infos, nir);
3278 
3279 	if ((nir[MESA_SHADER_VERTEX] &&
3280 	     keys[MESA_SHADER_VERTEX].vs_common_out.as_ngg) ||
3281 	    (nir[MESA_SHADER_TESS_EVAL] &&
3282 	     keys[MESA_SHADER_TESS_EVAL].vs_common_out.as_ngg)) {
3283 		struct gfx10_ngg_info *ngg_info;
3284 
3285 		if (nir[MESA_SHADER_GEOMETRY])
3286 			ngg_info = &infos[MESA_SHADER_GEOMETRY].ngg_info;
3287 		else if (nir[MESA_SHADER_TESS_CTRL])
3288 			ngg_info = &infos[MESA_SHADER_TESS_EVAL].ngg_info;
3289 		else
3290 			ngg_info = &infos[MESA_SHADER_VERTEX].ngg_info;
3291 
3292 		gfx10_get_ngg_info(key, pipeline, nir, infos, ngg_info);
3293 	} else if (nir[MESA_SHADER_GEOMETRY]) {
3294 		struct gfx9_gs_info *gs_info =
3295 			&infos[MESA_SHADER_GEOMETRY].gs_ring_info;
3296 
3297 		gfx9_get_gs_info(key, pipeline, nir, infos, gs_info);
3298 	}
3299 
3300 	if(modules[MESA_SHADER_GEOMETRY]) {
3301 		struct radv_shader_binary *gs_copy_binary = NULL;
3302 		if (!pipeline->gs_copy_shader &&
3303 		    !radv_pipeline_has_ngg(pipeline)) {
3304 			struct radv_shader_info info = {0};
3305 			struct radv_shader_variant_key key = {0};
3306 
3307 			key.has_multiview_view_index =
3308 				keys[MESA_SHADER_GEOMETRY].has_multiview_view_index;
3309 
3310 			radv_nir_shader_info_pass(nir[MESA_SHADER_GEOMETRY],
3311 						  pipeline->layout, &key,
3312 						  &info);
3313 			info.wave_size = 64; /* Wave32 not supported. */
3314 			info.ballot_bit_size = 64;
3315 
3316 			pipeline->gs_copy_shader = radv_create_gs_copy_shader(
3317 					device, nir[MESA_SHADER_GEOMETRY], &info,
3318 					&gs_copy_binary, keep_executable_info, keep_statistic_info,
3319 					keys[MESA_SHADER_GEOMETRY].has_multiview_view_index,
3320 					disable_optimizations);
3321 		}
3322 
3323 		if (!keep_executable_info && !keep_statistic_info && pipeline->gs_copy_shader) {
3324 			struct radv_shader_binary *binaries[MESA_SHADER_STAGES] = {NULL};
3325 			struct radv_shader_variant *variants[MESA_SHADER_STAGES] = {0};
3326 
3327 			binaries[MESA_SHADER_GEOMETRY] = gs_copy_binary;
3328 			variants[MESA_SHADER_GEOMETRY] = pipeline->gs_copy_shader;
3329 
3330 			radv_pipeline_cache_insert_shaders(device, cache,
3331 							   gs_copy_hash,
3332 							   variants,
3333 							   binaries);
3334 		}
3335 		free(gs_copy_binary);
3336 	}
3337 
3338 	if (nir[MESA_SHADER_FRAGMENT]) {
3339 		if (!pipeline->shaders[MESA_SHADER_FRAGMENT]) {
3340 			radv_start_feedback(stage_feedbacks[MESA_SHADER_FRAGMENT]);
3341 
3342 			pipeline->shaders[MESA_SHADER_FRAGMENT] =
3343 			       radv_shader_variant_compile(device, modules[MESA_SHADER_FRAGMENT], &nir[MESA_SHADER_FRAGMENT], 1,
3344 			                                  pipeline->layout, keys + MESA_SHADER_FRAGMENT,
3345 							  infos + MESA_SHADER_FRAGMENT,
3346 			                                  keep_executable_info, keep_statistic_info,
3347 							  disable_optimizations,
3348 							  &binaries[MESA_SHADER_FRAGMENT]);
3349 
3350 			radv_stop_feedback(stage_feedbacks[MESA_SHADER_FRAGMENT], false);
3351 		}
3352 	}
3353 
3354 	if (device->physical_device->rad_info.chip_class >= GFX9 && modules[MESA_SHADER_TESS_CTRL]) {
3355 		if (!pipeline->shaders[MESA_SHADER_TESS_CTRL]) {
3356 			struct nir_shader *combined_nir[] = {nir[MESA_SHADER_VERTEX], nir[MESA_SHADER_TESS_CTRL]};
3357 			struct radv_shader_variant_key key = keys[MESA_SHADER_TESS_CTRL];
3358 			key.tcs.vs_key = keys[MESA_SHADER_VERTEX].vs;
3359 
3360 			radv_start_feedback(stage_feedbacks[MESA_SHADER_TESS_CTRL]);
3361 
3362 			pipeline->shaders[MESA_SHADER_TESS_CTRL] = radv_shader_variant_compile(device, modules[MESA_SHADER_TESS_CTRL], combined_nir, 2,
3363 			                                                                      pipeline->layout,
3364 			                                                                      &key, &infos[MESA_SHADER_TESS_CTRL], keep_executable_info,
3365 			                                                                      keep_statistic_info,
3366 											      disable_optimizations,
3367 											      &binaries[MESA_SHADER_TESS_CTRL]);
3368 
3369 			radv_stop_feedback(stage_feedbacks[MESA_SHADER_TESS_CTRL], false);
3370 		}
3371 		modules[MESA_SHADER_VERTEX] = NULL;
3372 		keys[MESA_SHADER_TESS_EVAL].tes.num_patches = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.num_patches;
3373 	}
3374 
3375 	if (device->physical_device->rad_info.chip_class >= GFX9 && modules[MESA_SHADER_GEOMETRY]) {
3376 		gl_shader_stage pre_stage = modules[MESA_SHADER_TESS_EVAL] ? MESA_SHADER_TESS_EVAL : MESA_SHADER_VERTEX;
3377 		if (!pipeline->shaders[MESA_SHADER_GEOMETRY]) {
3378 			struct nir_shader *combined_nir[] = {nir[pre_stage], nir[MESA_SHADER_GEOMETRY]};
3379 
3380 			radv_start_feedback(stage_feedbacks[MESA_SHADER_GEOMETRY]);
3381 
3382 			pipeline->shaders[MESA_SHADER_GEOMETRY] = radv_shader_variant_compile(device, modules[MESA_SHADER_GEOMETRY], combined_nir, 2,
3383 			                                                                     pipeline->layout,
3384 			                                                                     &keys[pre_stage], &infos[MESA_SHADER_GEOMETRY], keep_executable_info,
3385 			                                                                     keep_statistic_info,
3386 											     disable_optimizations,
3387 											     &binaries[MESA_SHADER_GEOMETRY]);
3388 
3389 			radv_stop_feedback(stage_feedbacks[MESA_SHADER_GEOMETRY], false);
3390 		}
3391 		modules[pre_stage] = NULL;
3392 	}
3393 
3394 	for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
3395 		if(modules[i] && !pipeline->shaders[i]) {
3396 			if (i == MESA_SHADER_TESS_EVAL) {
3397 				keys[MESA_SHADER_TESS_EVAL].tes.num_patches = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.num_patches;
3398 			}
3399 
3400 			radv_start_feedback(stage_feedbacks[i]);
3401 
3402 			pipeline->shaders[i] = radv_shader_variant_compile(device, modules[i], &nir[i], 1,
3403 									  pipeline->layout,
3404 									  keys + i, infos + i, keep_executable_info,
3405 									  keep_statistic_info,
3406 									  disable_optimizations,
3407 									  &binaries[i]);
3408 
3409 			radv_stop_feedback(stage_feedbacks[i], false);
3410 		}
3411 	}
3412 
3413 	if (!keep_executable_info && !keep_statistic_info) {
3414 		radv_pipeline_cache_insert_shaders(device, cache, hash, pipeline->shaders,
3415 						   binaries);
3416 	}
3417 
3418 	for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
3419 		free(binaries[i]);
3420 		if (nir[i]) {
3421 			ralloc_free(nir[i]);
3422 
3423 			if (radv_can_dump_shader_stats(device, modules[i])) {
3424 				radv_dump_shader_stats(device, pipeline, i, stderr);
3425 			}
3426 		}
3427 	}
3428 
3429 	if (fs_m.nir)
3430 		ralloc_free(fs_m.nir);
3431 
3432 	radv_stop_feedback(pipeline_feedback, false);
3433 	return VK_SUCCESS;
3434 }
3435 
3436 static uint32_t
radv_pipeline_stage_to_user_data_0(struct radv_pipeline * pipeline,gl_shader_stage stage,enum chip_class chip_class)3437 radv_pipeline_stage_to_user_data_0(struct radv_pipeline *pipeline,
3438 				   gl_shader_stage stage, enum chip_class chip_class)
3439 {
3440 	bool has_gs = radv_pipeline_has_gs(pipeline);
3441 	bool has_tess = radv_pipeline_has_tess(pipeline);
3442 	bool has_ngg = radv_pipeline_has_ngg(pipeline);
3443 
3444 	switch (stage) {
3445 	case MESA_SHADER_FRAGMENT:
3446 		return R_00B030_SPI_SHADER_USER_DATA_PS_0;
3447 	case MESA_SHADER_VERTEX:
3448 		if (has_tess) {
3449 			if (chip_class >= GFX10) {
3450 				return R_00B430_SPI_SHADER_USER_DATA_HS_0;
3451 			} else if (chip_class == GFX9) {
3452 				return R_00B430_SPI_SHADER_USER_DATA_LS_0;
3453 			} else {
3454 				return R_00B530_SPI_SHADER_USER_DATA_LS_0;
3455 			}
3456 
3457 		}
3458 
3459 		if (has_gs) {
3460 			if (chip_class >= GFX10) {
3461 				return R_00B230_SPI_SHADER_USER_DATA_GS_0;
3462 			} else {
3463 				return R_00B330_SPI_SHADER_USER_DATA_ES_0;
3464 			}
3465 		}
3466 
3467 		if (has_ngg)
3468 			return R_00B230_SPI_SHADER_USER_DATA_GS_0;
3469 
3470 		return R_00B130_SPI_SHADER_USER_DATA_VS_0;
3471 	case MESA_SHADER_GEOMETRY:
3472 		return chip_class == GFX9 ? R_00B330_SPI_SHADER_USER_DATA_ES_0 :
3473 		                            R_00B230_SPI_SHADER_USER_DATA_GS_0;
3474 	case MESA_SHADER_COMPUTE:
3475 		return R_00B900_COMPUTE_USER_DATA_0;
3476 	case MESA_SHADER_TESS_CTRL:
3477 		return chip_class == GFX9 ? R_00B430_SPI_SHADER_USER_DATA_LS_0 :
3478 		                            R_00B430_SPI_SHADER_USER_DATA_HS_0;
3479 	case MESA_SHADER_TESS_EVAL:
3480 		if (has_gs) {
3481 			return chip_class >= GFX10 ? R_00B230_SPI_SHADER_USER_DATA_GS_0 :
3482 						     R_00B330_SPI_SHADER_USER_DATA_ES_0;
3483 		} else if (has_ngg) {
3484 			return R_00B230_SPI_SHADER_USER_DATA_GS_0;
3485 		} else {
3486 			return R_00B130_SPI_SHADER_USER_DATA_VS_0;
3487 		}
3488 	default:
3489 		unreachable("unknown shader");
3490 	}
3491 }
3492 
3493 struct radv_bin_size_entry {
3494 	unsigned bpp;
3495 	VkExtent2D extent;
3496 };
3497 
3498 static VkExtent2D
radv_gfx9_compute_bin_size(const struct radv_pipeline * pipeline,const VkGraphicsPipelineCreateInfo * pCreateInfo)3499 radv_gfx9_compute_bin_size(const struct radv_pipeline *pipeline, const VkGraphicsPipelineCreateInfo *pCreateInfo)
3500 {
3501 	static const struct radv_bin_size_entry color_size_table[][3][9] = {
3502 		{
3503 			/* One RB / SE */
3504 			{
3505 				/* One shader engine */
3506 				{        0, {128,  128}},
3507 				{        1, { 64,  128}},
3508 				{        2, { 32,  128}},
3509 				{        3, { 16,  128}},
3510 				{       17, {  0,    0}},
3511 				{ UINT_MAX, {  0,    0}},
3512 			},
3513 			{
3514 				/* Two shader engines */
3515 				{        0, {128,  128}},
3516 				{        2, { 64,  128}},
3517 				{        3, { 32,  128}},
3518 				{        5, { 16,  128}},
3519 				{       17, {  0,    0}},
3520 				{ UINT_MAX, {  0,    0}},
3521 			},
3522 			{
3523 				/* Four shader engines */
3524 				{        0, {128,  128}},
3525 				{        3, { 64,  128}},
3526 				{        5, { 16,  128}},
3527 				{       17, {  0,    0}},
3528 				{ UINT_MAX, {  0,    0}},
3529 			},
3530 		},
3531 		{
3532 			/* Two RB / SE */
3533 			{
3534 				/* One shader engine */
3535 				{        0, {128,  128}},
3536 				{        2, { 64,  128}},
3537 				{        3, { 32,  128}},
3538 				{        5, { 16,  128}},
3539 				{       33, {  0,    0}},
3540 				{ UINT_MAX, {  0,    0}},
3541 			},
3542 			{
3543 				/* Two shader engines */
3544 				{        0, {128,  128}},
3545 				{        3, { 64,  128}},
3546 				{        5, { 32,  128}},
3547 				{        9, { 16,  128}},
3548 				{       33, {  0,    0}},
3549 				{ UINT_MAX, {  0,    0}},
3550 			},
3551 			{
3552 				/* Four shader engines */
3553 				{        0, {256,  256}},
3554 				{        2, {128,  256}},
3555 				{        3, {128,  128}},
3556 				{        5, { 64,  128}},
3557 				{        9, { 16,  128}},
3558 				{       33, {  0,    0}},
3559 				{ UINT_MAX, {  0,    0}},
3560 			},
3561 		},
3562 		{
3563 			/* Four RB / SE */
3564 			{
3565 				/* One shader engine */
3566 				{        0, {128,  256}},
3567 				{        2, {128,  128}},
3568 				{        3, { 64,  128}},
3569 				{        5, { 32,  128}},
3570 				{        9, { 16,  128}},
3571 				{       33, {  0,    0}},
3572 				{ UINT_MAX, {  0,    0}},
3573 			},
3574 			{
3575 				/* Two shader engines */
3576 				{        0, {256,  256}},
3577 				{        2, {128,  256}},
3578 				{        3, {128,  128}},
3579 				{        5, { 64,  128}},
3580 				{        9, { 32,  128}},
3581 				{       17, { 16,  128}},
3582 				{       33, {  0,    0}},
3583 				{ UINT_MAX, {  0,    0}},
3584 			},
3585 			{
3586 				/* Four shader engines */
3587 				{        0, {256,  512}},
3588 				{        2, {256,  256}},
3589 				{        3, {128,  256}},
3590 				{        5, {128,  128}},
3591 				{        9, { 64,  128}},
3592 				{       17, { 16,  128}},
3593 				{       33, {  0,    0}},
3594 				{ UINT_MAX, {  0,    0}},
3595 			},
3596 		},
3597 	};
3598 	static const struct radv_bin_size_entry ds_size_table[][3][9] = {
3599 		{
3600 			// One RB / SE
3601 			{
3602 				// One shader engine
3603 				{        0, {128,  256}},
3604 				{        2, {128,  128}},
3605 				{        4, { 64,  128}},
3606 				{        7, { 32,  128}},
3607 				{       13, { 16,  128}},
3608 				{       49, {  0,    0}},
3609 				{ UINT_MAX, {  0,    0}},
3610 			},
3611 			{
3612 				// Two shader engines
3613 				{        0, {256,  256}},
3614 				{        2, {128,  256}},
3615 				{        4, {128,  128}},
3616 				{        7, { 64,  128}},
3617 				{       13, { 32,  128}},
3618 				{       25, { 16,  128}},
3619 				{       49, {  0,    0}},
3620 				{ UINT_MAX, {  0,    0}},
3621 			},
3622 			{
3623 				// Four shader engines
3624 				{        0, {256,  512}},
3625 				{        2, {256,  256}},
3626 				{        4, {128,  256}},
3627 				{        7, {128,  128}},
3628 				{       13, { 64,  128}},
3629 				{       25, { 16,  128}},
3630 				{       49, {  0,    0}},
3631 				{ UINT_MAX, {  0,    0}},
3632 			},
3633 		},
3634 		{
3635 			// Two RB / SE
3636 			{
3637 				// One shader engine
3638 				{        0, {256,  256}},
3639 				{        2, {128,  256}},
3640 				{        4, {128,  128}},
3641 				{        7, { 64,  128}},
3642 				{       13, { 32,  128}},
3643 				{       25, { 16,  128}},
3644 				{       97, {  0,    0}},
3645 				{ UINT_MAX, {  0,    0}},
3646 			},
3647 			{
3648 				// Two shader engines
3649 				{        0, {256,  512}},
3650 				{        2, {256,  256}},
3651 				{        4, {128,  256}},
3652 				{        7, {128,  128}},
3653 				{       13, { 64,  128}},
3654 				{       25, { 32,  128}},
3655 				{       49, { 16,  128}},
3656 				{       97, {  0,    0}},
3657 				{ UINT_MAX, {  0,    0}},
3658 			},
3659 			{
3660 				// Four shader engines
3661 				{        0, {512,  512}},
3662 				{        2, {256,  512}},
3663 				{        4, {256,  256}},
3664 				{        7, {128,  256}},
3665 				{       13, {128,  128}},
3666 				{       25, { 64,  128}},
3667 				{       49, { 16,  128}},
3668 				{       97, {  0,    0}},
3669 				{ UINT_MAX, {  0,    0}},
3670 			},
3671 		},
3672 		{
3673 			// Four RB / SE
3674 			{
3675 				// One shader engine
3676 				{        0, {256,  512}},
3677 				{        2, {256,  256}},
3678 				{        4, {128,  256}},
3679 				{        7, {128,  128}},
3680 				{       13, { 64,  128}},
3681 				{       25, { 32,  128}},
3682 				{       49, { 16,  128}},
3683 				{ UINT_MAX, {  0,    0}},
3684 			},
3685 			{
3686 				// Two shader engines
3687 				{        0, {512,  512}},
3688 				{        2, {256,  512}},
3689 				{        4, {256,  256}},
3690 				{        7, {128,  256}},
3691 				{       13, {128,  128}},
3692 				{       25, { 64,  128}},
3693 				{       49, { 32,  128}},
3694 				{       97, { 16,  128}},
3695 				{ UINT_MAX, {  0,    0}},
3696 			},
3697 			{
3698 				// Four shader engines
3699 				{        0, {512,  512}},
3700 				{        4, {256,  512}},
3701 				{        7, {256,  256}},
3702 				{       13, {128,  256}},
3703 				{       25, {128,  128}},
3704 				{       49, { 64,  128}},
3705 				{       97, { 16,  128}},
3706 				{ UINT_MAX, {  0,    0}},
3707 			},
3708 		},
3709 	};
3710 
3711 	RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
3712 	struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass;
3713 	VkExtent2D extent = {512, 512};
3714 
3715 	unsigned log_num_rb_per_se =
3716 	    util_logbase2_ceil(pipeline->device->physical_device->rad_info.num_render_backends /
3717 	                       pipeline->device->physical_device->rad_info.max_se);
3718 	unsigned log_num_se = util_logbase2_ceil(pipeline->device->physical_device->rad_info.max_se);
3719 
3720 	unsigned total_samples = 1u << G_028BE0_MSAA_NUM_SAMPLES(pipeline->graphics.ms.pa_sc_aa_config);
3721 	unsigned ps_iter_samples = 1u << G_028804_PS_ITER_SAMPLES(pipeline->graphics.ms.db_eqaa);
3722 	unsigned effective_samples = total_samples;
3723 	unsigned color_bytes_per_pixel = 0;
3724 
3725 	const VkPipelineColorBlendStateCreateInfo *vkblend =
3726 		radv_pipeline_get_color_blend_state(pCreateInfo);
3727 	if (vkblend) {
3728 		for (unsigned i = 0; i < subpass->color_count; i++) {
3729 			if (!vkblend->pAttachments[i].colorWriteMask)
3730 				continue;
3731 
3732 			if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED)
3733 				continue;
3734 
3735 			VkFormat format = pass->attachments[subpass->color_attachments[i].attachment].format;
3736 			color_bytes_per_pixel += vk_format_get_blocksize(format);
3737 		}
3738 
3739 		/* MSAA images typically don't use all samples all the time. */
3740 		if (effective_samples >= 2 && ps_iter_samples <= 1)
3741 			effective_samples = 2;
3742 		color_bytes_per_pixel *= effective_samples;
3743 	}
3744 
3745 	const struct radv_bin_size_entry *color_entry = color_size_table[log_num_rb_per_se][log_num_se];
3746 	while(color_entry[1].bpp <= color_bytes_per_pixel)
3747 		++color_entry;
3748 
3749 	extent = color_entry->extent;
3750 
3751 	if (subpass->depth_stencil_attachment) {
3752 		struct radv_render_pass_attachment *attachment = pass->attachments + subpass->depth_stencil_attachment->attachment;
3753 
3754 		/* Coefficients taken from AMDVLK */
3755 		unsigned depth_coeff = vk_format_is_depth(attachment->format) ? 5 : 0;
3756 		unsigned stencil_coeff = vk_format_is_stencil(attachment->format) ? 1 : 0;
3757 		unsigned ds_bytes_per_pixel = 4 * (depth_coeff + stencil_coeff) * total_samples;
3758 
3759 		const struct radv_bin_size_entry *ds_entry = ds_size_table[log_num_rb_per_se][log_num_se];
3760 		while(ds_entry[1].bpp <= ds_bytes_per_pixel)
3761 			++ds_entry;
3762 
3763 		if (ds_entry->extent.width * ds_entry->extent.height < extent.width * extent.height)
3764 			extent = ds_entry->extent;
3765 	}
3766 
3767 	return extent;
3768 }
3769 
3770 static VkExtent2D
radv_gfx10_compute_bin_size(const struct radv_pipeline * pipeline,const VkGraphicsPipelineCreateInfo * pCreateInfo)3771 radv_gfx10_compute_bin_size(const struct radv_pipeline *pipeline, const VkGraphicsPipelineCreateInfo *pCreateInfo)
3772 {
3773 	RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
3774 	struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass;
3775 	VkExtent2D extent = {512, 512};
3776 
3777 	const unsigned db_tag_size = 64;
3778 	const unsigned db_tag_count = 312;
3779 	const unsigned color_tag_size = 1024;
3780 	const unsigned color_tag_count = 31;
3781 	const unsigned fmask_tag_size = 256;
3782 	const unsigned fmask_tag_count = 44;
3783 
3784 	const unsigned rb_count = pipeline->device->physical_device->rad_info.num_render_backends;
3785 	const unsigned pipe_count = MAX2(rb_count, pipeline->device->physical_device->rad_info.num_sdp_interfaces);
3786 
3787 	const unsigned db_tag_part = (db_tag_count * rb_count / pipe_count) * db_tag_size * pipe_count;
3788 	const unsigned color_tag_part = (color_tag_count * rb_count / pipe_count) * color_tag_size * pipe_count;
3789 	const unsigned fmask_tag_part = (fmask_tag_count * rb_count / pipe_count) * fmask_tag_size * pipe_count;
3790 
3791 	const unsigned total_samples = 1u << G_028BE0_MSAA_NUM_SAMPLES(pipeline->graphics.ms.pa_sc_aa_config);
3792 	const unsigned samples_log = util_logbase2_ceil(total_samples);
3793 
3794 	unsigned color_bytes_per_pixel = 0;
3795 	unsigned fmask_bytes_per_pixel = 0;
3796 
3797 	const VkPipelineColorBlendStateCreateInfo *vkblend =
3798 		radv_pipeline_get_color_blend_state(pCreateInfo);
3799 	if (vkblend) {
3800 		for (unsigned i = 0; i < subpass->color_count; i++) {
3801 			if (!vkblend->pAttachments[i].colorWriteMask)
3802 				continue;
3803 
3804 			if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED)
3805 				continue;
3806 
3807 			VkFormat format = pass->attachments[subpass->color_attachments[i].attachment].format;
3808 			color_bytes_per_pixel += vk_format_get_blocksize(format);
3809 
3810 			if (total_samples > 1) {
3811 				assert(samples_log <= 3);
3812 				const unsigned fmask_array[] = {0, 1, 1, 4};
3813 				fmask_bytes_per_pixel += fmask_array[samples_log];
3814 			}
3815 		}
3816 
3817 		color_bytes_per_pixel *= total_samples;
3818 	}
3819 	color_bytes_per_pixel = MAX2(color_bytes_per_pixel, 1);
3820 
3821 	const unsigned color_pixel_count_log = util_logbase2(color_tag_part / color_bytes_per_pixel);
3822 	extent.width = 1ull << ((color_pixel_count_log + 1) / 2);
3823 	extent.height = 1ull << (color_pixel_count_log / 2);
3824 
3825 	if (fmask_bytes_per_pixel) {
3826 		const unsigned fmask_pixel_count_log = util_logbase2(fmask_tag_part / fmask_bytes_per_pixel);
3827 
3828 		const VkExtent2D fmask_extent = (VkExtent2D){
3829 			.width = 1ull << ((fmask_pixel_count_log + 1) / 2),
3830 			.height = 1ull << (color_pixel_count_log / 2)
3831 		};
3832 
3833 		if (fmask_extent.width * fmask_extent.height < extent.width * extent.height)
3834 		    extent = fmask_extent;
3835 	}
3836 
3837 	if (subpass->depth_stencil_attachment) {
3838 		struct radv_render_pass_attachment *attachment = pass->attachments + subpass->depth_stencil_attachment->attachment;
3839 
3840 		/* Coefficients taken from AMDVLK */
3841 		unsigned depth_coeff = vk_format_is_depth(attachment->format) ? 5 : 0;
3842 		unsigned stencil_coeff = vk_format_is_stencil(attachment->format) ? 1 : 0;
3843 		unsigned db_bytes_per_pixel = (depth_coeff + stencil_coeff) * total_samples;
3844 
3845 		const unsigned db_pixel_count_log = util_logbase2(db_tag_part / db_bytes_per_pixel);
3846 
3847 		const VkExtent2D db_extent = (VkExtent2D){
3848 			.width = 1ull << ((db_pixel_count_log + 1) / 2),
3849 			.height = 1ull << (color_pixel_count_log / 2)
3850 		};
3851 
3852 		if (db_extent.width * db_extent.height < extent.width * extent.height)
3853 		    extent = db_extent;
3854 	}
3855 
3856 	extent.width = MAX2(extent.width, 128);
3857 	extent.height = MAX2(extent.width, 64);
3858 
3859 	return extent;
3860 }
3861 
3862 static void
radv_pipeline_init_disabled_binning_state(struct radv_pipeline * pipeline,const VkGraphicsPipelineCreateInfo * pCreateInfo)3863 radv_pipeline_init_disabled_binning_state(struct radv_pipeline *pipeline,
3864 					  const VkGraphicsPipelineCreateInfo *pCreateInfo)
3865 {
3866 	uint32_t pa_sc_binner_cntl_0 =
3867 	                S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_LEGACY_SC) |
3868 	                S_028C44_DISABLE_START_OF_PRIM(1);
3869 	uint32_t db_dfsm_control = S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF);
3870 
3871 	if (pipeline->device->physical_device->rad_info.chip_class >= GFX10) {
3872 		RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
3873 		struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass;
3874 		const VkPipelineColorBlendStateCreateInfo *vkblend =
3875 			radv_pipeline_get_color_blend_state(pCreateInfo);
3876 		unsigned min_bytes_per_pixel = 0;
3877 
3878 		if (vkblend) {
3879 			for (unsigned i = 0; i < subpass->color_count; i++) {
3880 				if (!vkblend->pAttachments[i].colorWriteMask)
3881 					continue;
3882 
3883 				if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED)
3884 					continue;
3885 
3886 				VkFormat format = pass->attachments[subpass->color_attachments[i].attachment].format;
3887 				unsigned bytes = vk_format_get_blocksize(format);
3888 				if (!min_bytes_per_pixel || bytes < min_bytes_per_pixel)
3889 					min_bytes_per_pixel = bytes;
3890 			}
3891 		}
3892 
3893 		pa_sc_binner_cntl_0 =
3894 			S_028C44_BINNING_MODE(V_028C44_DISABLE_BINNING_USE_NEW_SC) |
3895 			S_028C44_BIN_SIZE_X(0) |
3896 			S_028C44_BIN_SIZE_Y(0) |
3897 			S_028C44_BIN_SIZE_X_EXTEND(2) | /* 128 */
3898 			S_028C44_BIN_SIZE_Y_EXTEND(min_bytes_per_pixel <= 4 ? 2 : 1) | /* 128 or 64 */
3899 			S_028C44_DISABLE_START_OF_PRIM(1);
3900 	}
3901 
3902 	pipeline->graphics.binning.pa_sc_binner_cntl_0 = pa_sc_binner_cntl_0;
3903 	pipeline->graphics.binning.db_dfsm_control = db_dfsm_control;
3904 }
3905 
3906 struct radv_binning_settings
radv_get_binning_settings(const struct radv_physical_device * pdev)3907 radv_get_binning_settings(const struct radv_physical_device *pdev)
3908 {
3909 	struct radv_binning_settings settings;
3910 	if (pdev->rad_info.has_dedicated_vram) {
3911 		if (pdev->rad_info.num_render_backends > 4) {
3912 			settings.context_states_per_bin = 1;
3913 			settings.persistent_states_per_bin = 1;
3914 		} else {
3915 			settings.context_states_per_bin = 3;
3916 			settings.persistent_states_per_bin = 8;
3917 		}
3918 		settings.fpovs_per_batch = 63;
3919 	} else {
3920 		/* The context states are affected by the scissor bug. */
3921 		settings.context_states_per_bin = 6;
3922 		/* 32 causes hangs for RAVEN. */
3923 		settings.persistent_states_per_bin = 16;
3924 		settings.fpovs_per_batch = 63;
3925 	}
3926 
3927 	if (pdev->rad_info.has_gfx9_scissor_bug)
3928 		settings.context_states_per_bin = 1;
3929 
3930 	return settings;
3931 }
3932 
3933 static void
radv_pipeline_init_binning_state(struct radv_pipeline * pipeline,const VkGraphicsPipelineCreateInfo * pCreateInfo,const struct radv_blend_state * blend)3934 radv_pipeline_init_binning_state(struct radv_pipeline *pipeline,
3935 				 const VkGraphicsPipelineCreateInfo *pCreateInfo,
3936 				 const struct radv_blend_state *blend)
3937 {
3938 	if (pipeline->device->physical_device->rad_info.chip_class < GFX9)
3939 		return;
3940 
3941 	VkExtent2D bin_size;
3942 	if (pipeline->device->physical_device->rad_info.chip_class >= GFX10) {
3943 		bin_size = radv_gfx10_compute_bin_size(pipeline, pCreateInfo);
3944 	} else if (pipeline->device->physical_device->rad_info.chip_class == GFX9) {
3945 		bin_size = radv_gfx9_compute_bin_size(pipeline, pCreateInfo);
3946 	} else
3947 		unreachable("Unhandled generation for binning bin size calculation");
3948 
3949 	if (pipeline->device->pbb_allowed && bin_size.width && bin_size.height) {
3950 		struct radv_binning_settings settings =
3951 			radv_get_binning_settings(pipeline->device->physical_device);
3952 
3953 		bool disable_start_of_prim = true;
3954 		uint32_t db_dfsm_control = S_028060_PUNCHOUT_MODE(V_028060_FORCE_OFF);
3955 
3956 		const struct radv_shader_variant *ps = pipeline->shaders[MESA_SHADER_FRAGMENT];
3957 
3958 		if (pipeline->device->dfsm_allowed && ps &&
3959 		    !ps->info.ps.can_discard &&
3960 		    !ps->info.ps.writes_memory &&
3961 		    blend->cb_target_enabled_4bit) {
3962 			db_dfsm_control = S_028060_PUNCHOUT_MODE(V_028060_AUTO);
3963 			disable_start_of_prim = (blend->blend_enable_4bit & blend->cb_target_enabled_4bit) != 0;
3964 		}
3965 
3966 		const uint32_t pa_sc_binner_cntl_0 =
3967 	                S_028C44_BINNING_MODE(V_028C44_BINNING_ALLOWED) |
3968 	                S_028C44_BIN_SIZE_X(bin_size.width == 16) |
3969 	                S_028C44_BIN_SIZE_Y(bin_size.height == 16) |
3970 	                S_028C44_BIN_SIZE_X_EXTEND(util_logbase2(MAX2(bin_size.width, 32)) - 5) |
3971 	                S_028C44_BIN_SIZE_Y_EXTEND(util_logbase2(MAX2(bin_size.height, 32)) - 5) |
3972 	                S_028C44_CONTEXT_STATES_PER_BIN(settings.context_states_per_bin - 1) |
3973 	                S_028C44_PERSISTENT_STATES_PER_BIN(settings.persistent_states_per_bin - 1) |
3974 	                S_028C44_DISABLE_START_OF_PRIM(disable_start_of_prim) |
3975 	                S_028C44_FPOVS_PER_BATCH(settings.fpovs_per_batch) |
3976 	                S_028C44_OPTIMAL_BIN_SELECTION(1);
3977 
3978 		pipeline->graphics.binning.pa_sc_binner_cntl_0 = pa_sc_binner_cntl_0;
3979 		pipeline->graphics.binning.db_dfsm_control = db_dfsm_control;
3980 	} else
3981 		radv_pipeline_init_disabled_binning_state(pipeline, pCreateInfo);
3982 }
3983 
3984 
3985 static void
radv_pipeline_generate_depth_stencil_state(struct radeon_cmdbuf * ctx_cs,const struct radv_pipeline * pipeline,const VkGraphicsPipelineCreateInfo * pCreateInfo,const struct radv_graphics_pipeline_create_info * extra)3986 radv_pipeline_generate_depth_stencil_state(struct radeon_cmdbuf *ctx_cs,
3987                                            const struct radv_pipeline *pipeline,
3988                                            const VkGraphicsPipelineCreateInfo *pCreateInfo,
3989                                            const struct radv_graphics_pipeline_create_info *extra)
3990 {
3991 	const VkPipelineDepthStencilStateCreateInfo *vkds = radv_pipeline_get_depth_stencil_state(pCreateInfo);
3992 	RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
3993 	struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass;
3994 	struct radv_shader_variant *ps = pipeline->shaders[MESA_SHADER_FRAGMENT];
3995 	struct radv_render_pass_attachment *attachment = NULL;
3996 	uint32_t db_render_control = 0, db_render_override2 = 0;
3997 	uint32_t db_render_override = 0;
3998 
3999 	if (subpass->depth_stencil_attachment)
4000 		attachment = pass->attachments + subpass->depth_stencil_attachment->attachment;
4001 
4002 	bool has_depth_attachment = attachment && vk_format_is_depth(attachment->format);
4003 
4004 	if (vkds && has_depth_attachment) {
4005 		/* from amdvlk: For 4xAA and 8xAA need to decompress on flush for better performance */
4006 		db_render_override2 |= S_028010_DECOMPRESS_Z_ON_FLUSH(attachment->samples > 2);
4007 
4008 		if (pipeline->device->physical_device->rad_info.chip_class >= GFX10_3)
4009 			db_render_override2 |= S_028010_CENTROID_COMPUTATION_MODE(2);
4010 	}
4011 
4012 	if (attachment && extra) {
4013 		db_render_control |= S_028000_DEPTH_CLEAR_ENABLE(extra->db_depth_clear);
4014 		db_render_control |= S_028000_STENCIL_CLEAR_ENABLE(extra->db_stencil_clear);
4015 
4016 		db_render_control |= S_028000_RESUMMARIZE_ENABLE(extra->resummarize_enable);
4017 		db_render_control |= S_028000_DEPTH_COMPRESS_DISABLE(extra->depth_compress_disable);
4018 		db_render_control |= S_028000_STENCIL_COMPRESS_DISABLE(extra->stencil_compress_disable);
4019 		db_render_override2 |= S_028010_DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION(extra->db_depth_disable_expclear);
4020 		db_render_override2 |= S_028010_DISABLE_SMEM_EXPCLEAR_OPTIMIZATION(extra->db_stencil_disable_expclear);
4021 	}
4022 
4023 	db_render_override |= S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE) |
4024 			      S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE);
4025 
4026 	if (!pCreateInfo->pRasterizationState->depthClampEnable &&
4027 	    ps->info.ps.writes_z) {
4028 		/* From VK_EXT_depth_range_unrestricted spec:
4029 		 *
4030 		 * "The behavior described in Primitive Clipping still applies.
4031 		 *  If depth clamping is disabled the depth values are still
4032 		 *  clipped to 0 ≤ zc ≤ wc before the viewport transform. If
4033 		 *  depth clamping is enabled the above equation is ignored and
4034 		 *  the depth values are instead clamped to the VkViewport
4035 		 *  minDepth and maxDepth values, which in the case of this
4036 		 *  extension can be outside of the 0.0 to 1.0 range."
4037 		 */
4038 		db_render_override |= S_02800C_DISABLE_VIEWPORT_CLAMP(1);
4039 	}
4040 
4041 	radeon_set_context_reg(ctx_cs, R_028000_DB_RENDER_CONTROL, db_render_control);
4042 	radeon_set_context_reg(ctx_cs, R_02800C_DB_RENDER_OVERRIDE, db_render_override);
4043 	radeon_set_context_reg(ctx_cs, R_028010_DB_RENDER_OVERRIDE2, db_render_override2);
4044 }
4045 
4046 static void
radv_pipeline_generate_blend_state(struct radeon_cmdbuf * ctx_cs,const struct radv_pipeline * pipeline,const struct radv_blend_state * blend)4047 radv_pipeline_generate_blend_state(struct radeon_cmdbuf *ctx_cs,
4048                                    const struct radv_pipeline *pipeline,
4049                                    const struct radv_blend_state *blend)
4050 {
4051 	radeon_set_context_reg_seq(ctx_cs, R_028780_CB_BLEND0_CONTROL, 8);
4052 	radeon_emit_array(ctx_cs, blend->cb_blend_control,
4053 			  8);
4054 	radeon_set_context_reg(ctx_cs, R_028808_CB_COLOR_CONTROL, blend->cb_color_control);
4055 	radeon_set_context_reg(ctx_cs, R_028B70_DB_ALPHA_TO_MASK, blend->db_alpha_to_mask);
4056 
4057 	if (pipeline->device->physical_device->rad_info.has_rbplus) {
4058 
4059 		radeon_set_context_reg_seq(ctx_cs, R_028760_SX_MRT0_BLEND_OPT, 8);
4060 		radeon_emit_array(ctx_cs, blend->sx_mrt_blend_opt, 8);
4061 	}
4062 
4063 	radeon_set_context_reg(ctx_cs, R_028714_SPI_SHADER_COL_FORMAT, blend->spi_shader_col_format);
4064 
4065 	radeon_set_context_reg(ctx_cs, R_028238_CB_TARGET_MASK, blend->cb_target_mask);
4066 	radeon_set_context_reg(ctx_cs, R_02823C_CB_SHADER_MASK, blend->cb_shader_mask);
4067 }
4068 
4069 static void
radv_pipeline_generate_raster_state(struct radeon_cmdbuf * ctx_cs,const struct radv_pipeline * pipeline,const VkGraphicsPipelineCreateInfo * pCreateInfo)4070 radv_pipeline_generate_raster_state(struct radeon_cmdbuf *ctx_cs,
4071 				    const struct radv_pipeline *pipeline,
4072                                     const VkGraphicsPipelineCreateInfo *pCreateInfo)
4073 {
4074 	const VkPipelineRasterizationStateCreateInfo *vkraster = pCreateInfo->pRasterizationState;
4075 	const VkConservativeRasterizationModeEXT mode =
4076 		radv_get_conservative_raster_mode(vkraster);
4077 	uint32_t pa_sc_conservative_rast = S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1);
4078 	bool depth_clip_disable = vkraster->depthClampEnable;
4079 
4080 	const VkPipelineRasterizationDepthClipStateCreateInfoEXT *depth_clip_state =
4081 		vk_find_struct_const(vkraster->pNext, PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);
4082 	if (depth_clip_state) {
4083 		depth_clip_disable = !depth_clip_state->depthClipEnable;
4084 	}
4085 
4086 	radeon_set_context_reg(ctx_cs, R_028810_PA_CL_CLIP_CNTL,
4087 	                       S_028810_DX_CLIP_SPACE_DEF(1) | // vulkan uses DX conventions.
4088 	                       S_028810_ZCLIP_NEAR_DISABLE(depth_clip_disable ? 1 : 0) |
4089 	                       S_028810_ZCLIP_FAR_DISABLE(depth_clip_disable ? 1 : 0) |
4090 	                       S_028810_DX_RASTERIZATION_KILL(vkraster->rasterizerDiscardEnable ? 1 : 0) |
4091 	                       S_028810_DX_LINEAR_ATTR_CLIP_ENA(1));
4092 
4093 	radeon_set_context_reg(ctx_cs, R_028BDC_PA_SC_LINE_CNTL,
4094 			       S_028BDC_DX10_DIAMOND_TEST_ENA(1));
4095 
4096 	/* Conservative rasterization. */
4097 	if (mode != VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT) {
4098 		pa_sc_conservative_rast = S_028C4C_PREZ_AA_MASK_ENABLE(1) |
4099 					  S_028C4C_POSTZ_AA_MASK_ENABLE(1) |
4100 					  S_028C4C_CENTROID_SAMPLE_OVERRIDE(1);
4101 
4102 		if (mode == VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT) {
4103 			pa_sc_conservative_rast |=
4104 				S_028C4C_OVER_RAST_ENABLE(1) |
4105 				S_028C4C_OVER_RAST_SAMPLE_SELECT(0) |
4106 				S_028C4C_UNDER_RAST_ENABLE(0) |
4107 				S_028C4C_UNDER_RAST_SAMPLE_SELECT(1) |
4108 				S_028C4C_PBB_UNCERTAINTY_REGION_ENABLE(1);
4109 		} else {
4110 			assert(mode == VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT);
4111 			pa_sc_conservative_rast |=
4112 				S_028C4C_OVER_RAST_ENABLE(0) |
4113 				S_028C4C_OVER_RAST_SAMPLE_SELECT(1) |
4114 				S_028C4C_UNDER_RAST_ENABLE(1) |
4115 				S_028C4C_UNDER_RAST_SAMPLE_SELECT(0) |
4116 				S_028C4C_PBB_UNCERTAINTY_REGION_ENABLE(0);
4117 		}
4118 	}
4119 
4120 	radeon_set_context_reg(ctx_cs, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL,
4121 				   pa_sc_conservative_rast);
4122 }
4123 
4124 
4125 static void
radv_pipeline_generate_multisample_state(struct radeon_cmdbuf * ctx_cs,const struct radv_pipeline * pipeline)4126 radv_pipeline_generate_multisample_state(struct radeon_cmdbuf *ctx_cs,
4127                                          const struct radv_pipeline *pipeline)
4128 {
4129 	const struct radv_multisample_state *ms = &pipeline->graphics.ms;
4130 
4131 	radeon_set_context_reg_seq(ctx_cs, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2);
4132 	radeon_emit(ctx_cs, ms->pa_sc_aa_mask[0]);
4133 	radeon_emit(ctx_cs, ms->pa_sc_aa_mask[1]);
4134 
4135 	radeon_set_context_reg(ctx_cs, R_028804_DB_EQAA, ms->db_eqaa);
4136 	radeon_set_context_reg(ctx_cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0);
4137 	radeon_set_context_reg(ctx_cs, R_028A4C_PA_SC_MODE_CNTL_1, ms->pa_sc_mode_cntl_1);
4138 	radeon_set_context_reg(ctx_cs, R_028BE0_PA_SC_AA_CONFIG, ms->pa_sc_aa_config);
4139 
4140 	/* The exclusion bits can be set to improve rasterization efficiency
4141 	 * if no sample lies on the pixel boundary (-8 sample offset). It's
4142 	 * currently always TRUE because the driver doesn't support 16 samples.
4143 	 */
4144 	bool exclusion = pipeline->device->physical_device->rad_info.chip_class >= GFX7;
4145 	radeon_set_context_reg(ctx_cs, R_02882C_PA_SU_PRIM_FILTER_CNTL,
4146 			       S_02882C_XMAX_RIGHT_EXCLUSION(exclusion) |
4147 			       S_02882C_YMAX_BOTTOM_EXCLUSION(exclusion));
4148 
4149 	/* GFX9: Flush DFSM when the AA mode changes. */
4150 	if (pipeline->device->dfsm_allowed) {
4151 		radeon_emit(ctx_cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
4152 		radeon_emit(ctx_cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
4153 	}
4154 }
4155 
4156 static void
radv_pipeline_generate_vgt_gs_mode(struct radeon_cmdbuf * ctx_cs,const struct radv_pipeline * pipeline)4157 radv_pipeline_generate_vgt_gs_mode(struct radeon_cmdbuf *ctx_cs,
4158                                    const struct radv_pipeline *pipeline)
4159 {
4160 	const struct radv_vs_output_info *outinfo = get_vs_output_info(pipeline);
4161 	const struct radv_shader_variant *vs =
4162 		pipeline->shaders[MESA_SHADER_TESS_EVAL] ?
4163 		pipeline->shaders[MESA_SHADER_TESS_EVAL] :
4164 		pipeline->shaders[MESA_SHADER_VERTEX];
4165 	unsigned vgt_primitiveid_en = 0;
4166 	uint32_t vgt_gs_mode = 0;
4167 
4168 	if (radv_pipeline_has_ngg(pipeline))
4169 		return;
4170 
4171 	if (radv_pipeline_has_gs(pipeline)) {
4172 		const struct radv_shader_variant *gs =
4173 			pipeline->shaders[MESA_SHADER_GEOMETRY];
4174 
4175 		vgt_gs_mode = ac_vgt_gs_mode(gs->info.gs.vertices_out,
4176 		                             pipeline->device->physical_device->rad_info.chip_class);
4177 	} else if (outinfo->export_prim_id || vs->info.uses_prim_id) {
4178 		vgt_gs_mode = S_028A40_MODE(V_028A40_GS_SCENARIO_A);
4179 		vgt_primitiveid_en |= S_028A84_PRIMITIVEID_EN(1);
4180 	}
4181 
4182 	radeon_set_context_reg(ctx_cs, R_028A84_VGT_PRIMITIVEID_EN, vgt_primitiveid_en);
4183 	radeon_set_context_reg(ctx_cs, R_028A40_VGT_GS_MODE, vgt_gs_mode);
4184 }
4185 
4186 static void
radv_pipeline_generate_hw_vs(struct radeon_cmdbuf * ctx_cs,struct radeon_cmdbuf * cs,const struct radv_pipeline * pipeline,const struct radv_shader_variant * shader)4187 radv_pipeline_generate_hw_vs(struct radeon_cmdbuf *ctx_cs,
4188 			     struct radeon_cmdbuf *cs,
4189 			     const struct radv_pipeline *pipeline,
4190 			     const struct radv_shader_variant *shader)
4191 {
4192 	uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
4193 
4194 	radeon_set_sh_reg_seq(cs, R_00B120_SPI_SHADER_PGM_LO_VS, 4);
4195 	radeon_emit(cs, va >> 8);
4196 	radeon_emit(cs, S_00B124_MEM_BASE(va >> 40));
4197 	radeon_emit(cs, shader->config.rsrc1);
4198 	radeon_emit(cs, shader->config.rsrc2);
4199 
4200 	const struct radv_vs_output_info *outinfo = get_vs_output_info(pipeline);
4201 	unsigned clip_dist_mask, cull_dist_mask, total_mask;
4202 	clip_dist_mask = outinfo->clip_dist_mask;
4203 	cull_dist_mask = outinfo->cull_dist_mask;
4204 	total_mask = clip_dist_mask | cull_dist_mask;
4205 	bool misc_vec_ena = outinfo->writes_pointsize ||
4206 		outinfo->writes_layer ||
4207 		outinfo->writes_viewport_index;
4208 	unsigned spi_vs_out_config, nparams;
4209 
4210 	/* VS is required to export at least one param. */
4211 	nparams = MAX2(outinfo->param_exports, 1);
4212 	spi_vs_out_config = S_0286C4_VS_EXPORT_COUNT(nparams - 1);
4213 
4214 	if (pipeline->device->physical_device->rad_info.chip_class >= GFX10) {
4215 		spi_vs_out_config |= S_0286C4_NO_PC_EXPORT(outinfo->param_exports == 0);
4216 	}
4217 
4218 	radeon_set_context_reg(ctx_cs, R_0286C4_SPI_VS_OUT_CONFIG, spi_vs_out_config);
4219 
4220 	radeon_set_context_reg(ctx_cs, R_02870C_SPI_SHADER_POS_FORMAT,
4221 	                       S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
4222 	                       S_02870C_POS1_EXPORT_FORMAT(outinfo->pos_exports > 1 ?
4223 	                                                   V_02870C_SPI_SHADER_4COMP :
4224 	                                                   V_02870C_SPI_SHADER_NONE) |
4225 	                       S_02870C_POS2_EXPORT_FORMAT(outinfo->pos_exports > 2 ?
4226 	                                                   V_02870C_SPI_SHADER_4COMP :
4227 	                                                   V_02870C_SPI_SHADER_NONE) |
4228 	                       S_02870C_POS3_EXPORT_FORMAT(outinfo->pos_exports > 3 ?
4229 	                                                   V_02870C_SPI_SHADER_4COMP :
4230 	                                                   V_02870C_SPI_SHADER_NONE));
4231 
4232 	radeon_set_context_reg(ctx_cs, R_02881C_PA_CL_VS_OUT_CNTL,
4233 	                       S_02881C_USE_VTX_POINT_SIZE(outinfo->writes_pointsize) |
4234 	                       S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo->writes_layer) |
4235 	                       S_02881C_USE_VTX_VIEWPORT_INDX(outinfo->writes_viewport_index) |
4236 	                       S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena) |
4237 	                       S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena) |
4238 	                       S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask & 0x0f) != 0) |
4239 	                       S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask & 0xf0) != 0) |
4240 			       S_02881C_BYPASS_PRIM_RATE_COMBINER(pipeline->device->physical_device->rad_info.chip_class >= GFX10_3) |
4241 			       S_02881C_BYPASS_VTX_RATE_COMBINER(pipeline->device->physical_device->rad_info.chip_class >= GFX10_3) |
4242 	                       cull_dist_mask << 8 |
4243 	                       clip_dist_mask);
4244 
4245 	if (pipeline->device->physical_device->rad_info.chip_class <= GFX8)
4246 		radeon_set_context_reg(ctx_cs, R_028AB4_VGT_REUSE_OFF,
4247 		                       outinfo->writes_viewport_index);
4248 }
4249 
4250 static void
radv_pipeline_generate_hw_es(struct radeon_cmdbuf * cs,const struct radv_pipeline * pipeline,const struct radv_shader_variant * shader)4251 radv_pipeline_generate_hw_es(struct radeon_cmdbuf *cs,
4252 			     const struct radv_pipeline *pipeline,
4253 			     const struct radv_shader_variant *shader)
4254 {
4255 	uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
4256 
4257 	radeon_set_sh_reg_seq(cs, R_00B320_SPI_SHADER_PGM_LO_ES, 4);
4258 	radeon_emit(cs, va >> 8);
4259 	radeon_emit(cs, S_00B324_MEM_BASE(va >> 40));
4260 	radeon_emit(cs, shader->config.rsrc1);
4261 	radeon_emit(cs, shader->config.rsrc2);
4262 }
4263 
4264 static void
radv_pipeline_generate_hw_ls(struct radeon_cmdbuf * cs,const struct radv_pipeline * pipeline,const struct radv_shader_variant * shader)4265 radv_pipeline_generate_hw_ls(struct radeon_cmdbuf *cs,
4266 			     const struct radv_pipeline *pipeline,
4267 			     const struct radv_shader_variant *shader)
4268 {
4269 	unsigned num_lds_blocks = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.num_lds_blocks;
4270 	uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
4271 	uint32_t rsrc2 = shader->config.rsrc2;
4272 
4273 	radeon_set_sh_reg_seq(cs, R_00B520_SPI_SHADER_PGM_LO_LS, 2);
4274 	radeon_emit(cs, va >> 8);
4275 	radeon_emit(cs, S_00B524_MEM_BASE(va >> 40));
4276 
4277 	rsrc2 |= S_00B52C_LDS_SIZE(num_lds_blocks);
4278 	if (pipeline->device->physical_device->rad_info.chip_class == GFX7 &&
4279 	    pipeline->device->physical_device->rad_info.family != CHIP_HAWAII)
4280 		radeon_set_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, rsrc2);
4281 
4282 	radeon_set_sh_reg_seq(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
4283 	radeon_emit(cs, shader->config.rsrc1);
4284 	radeon_emit(cs, rsrc2);
4285 }
4286 
4287 static void
radv_pipeline_generate_hw_ngg(struct radeon_cmdbuf * ctx_cs,struct radeon_cmdbuf * cs,const struct radv_pipeline * pipeline,const struct radv_shader_variant * shader)4288 radv_pipeline_generate_hw_ngg(struct radeon_cmdbuf *ctx_cs,
4289 			      struct radeon_cmdbuf *cs,
4290 			      const struct radv_pipeline *pipeline,
4291 			      const struct radv_shader_variant *shader)
4292 {
4293 	uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
4294 	gl_shader_stage es_type =
4295 		radv_pipeline_has_tess(pipeline) ? MESA_SHADER_TESS_EVAL : MESA_SHADER_VERTEX;
4296 	struct radv_shader_variant *es =
4297 		es_type == MESA_SHADER_TESS_EVAL ? pipeline->shaders[MESA_SHADER_TESS_EVAL] : pipeline->shaders[MESA_SHADER_VERTEX];
4298 	const struct gfx10_ngg_info *ngg_state = &shader->info.ngg_info;
4299 
4300 	radeon_set_sh_reg_seq(cs, R_00B320_SPI_SHADER_PGM_LO_ES, 2);
4301 	radeon_emit(cs, va >> 8);
4302 	radeon_emit(cs, S_00B324_MEM_BASE(va >> 40));
4303 	radeon_set_sh_reg_seq(cs, R_00B228_SPI_SHADER_PGM_RSRC1_GS, 2);
4304 	radeon_emit(cs, shader->config.rsrc1);
4305 	radeon_emit(cs, shader->config.rsrc2);
4306 
4307 	const struct radv_vs_output_info *outinfo = get_vs_output_info(pipeline);
4308 	unsigned clip_dist_mask, cull_dist_mask, total_mask;
4309 	clip_dist_mask = outinfo->clip_dist_mask;
4310 	cull_dist_mask = outinfo->cull_dist_mask;
4311 	total_mask = clip_dist_mask | cull_dist_mask;
4312 	bool misc_vec_ena = outinfo->writes_pointsize ||
4313 		outinfo->writes_layer ||
4314 		outinfo->writes_viewport_index;
4315 	bool es_enable_prim_id = outinfo->export_prim_id ||
4316 				 (es && es->info.uses_prim_id);
4317 	bool break_wave_at_eoi = false;
4318 	unsigned ge_cntl;
4319 	unsigned nparams;
4320 
4321 	if (es_type == MESA_SHADER_TESS_EVAL) {
4322 		struct radv_shader_variant *gs =
4323 			pipeline->shaders[MESA_SHADER_GEOMETRY];
4324 
4325 		if (es_enable_prim_id || (gs && gs->info.uses_prim_id))
4326 			break_wave_at_eoi = true;
4327 	}
4328 
4329 	nparams = MAX2(outinfo->param_exports, 1);
4330 	radeon_set_context_reg(ctx_cs, R_0286C4_SPI_VS_OUT_CONFIG,
4331 	                       S_0286C4_VS_EXPORT_COUNT(nparams - 1) |
4332 			       S_0286C4_NO_PC_EXPORT(outinfo->param_exports == 0));
4333 
4334 	radeon_set_context_reg(ctx_cs, R_028708_SPI_SHADER_IDX_FORMAT,
4335 			       S_028708_IDX0_EXPORT_FORMAT(V_028708_SPI_SHADER_1COMP));
4336 	radeon_set_context_reg(ctx_cs, R_02870C_SPI_SHADER_POS_FORMAT,
4337 	                       S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
4338 	                       S_02870C_POS1_EXPORT_FORMAT(outinfo->pos_exports > 1 ?
4339 	                                                   V_02870C_SPI_SHADER_4COMP :
4340 	                                                   V_02870C_SPI_SHADER_NONE) |
4341 	                       S_02870C_POS2_EXPORT_FORMAT(outinfo->pos_exports > 2 ?
4342 	                                                   V_02870C_SPI_SHADER_4COMP :
4343 	                                                   V_02870C_SPI_SHADER_NONE) |
4344 	                       S_02870C_POS3_EXPORT_FORMAT(outinfo->pos_exports > 3 ?
4345 	                                                   V_02870C_SPI_SHADER_4COMP :
4346 	                                                   V_02870C_SPI_SHADER_NONE));
4347 
4348 	radeon_set_context_reg(ctx_cs, R_02881C_PA_CL_VS_OUT_CNTL,
4349 	                       S_02881C_USE_VTX_POINT_SIZE(outinfo->writes_pointsize) |
4350 	                       S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo->writes_layer) |
4351 	                       S_02881C_USE_VTX_VIEWPORT_INDX(outinfo->writes_viewport_index) |
4352 	                       S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena) |
4353 	                       S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena) |
4354 	                       S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask & 0x0f) != 0) |
4355 	                       S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask & 0xf0) != 0) |
4356 			       S_02881C_BYPASS_PRIM_RATE_COMBINER(pipeline->device->physical_device->rad_info.chip_class >= GFX10_3) |
4357 			       S_02881C_BYPASS_VTX_RATE_COMBINER(pipeline->device->physical_device->rad_info.chip_class >= GFX10_3) |
4358 	                       cull_dist_mask << 8 |
4359 	                       clip_dist_mask);
4360 
4361 	radeon_set_context_reg(ctx_cs, R_028A84_VGT_PRIMITIVEID_EN,
4362 			       S_028A84_PRIMITIVEID_EN(es_enable_prim_id) |
4363 			       S_028A84_NGG_DISABLE_PROVOK_REUSE(outinfo->export_prim_id));
4364 
4365 	radeon_set_context_reg(ctx_cs, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
4366 			       ngg_state->vgt_esgs_ring_itemsize);
4367 
4368 	/* NGG specific registers. */
4369 	struct radv_shader_variant *gs = pipeline->shaders[MESA_SHADER_GEOMETRY];
4370 	uint32_t gs_num_invocations = gs ? gs->info.gs.invocations : 1;
4371 
4372 	radeon_set_context_reg(ctx_cs, R_028A44_VGT_GS_ONCHIP_CNTL,
4373 			       S_028A44_ES_VERTS_PER_SUBGRP(ngg_state->hw_max_esverts) |
4374 			       S_028A44_GS_PRIMS_PER_SUBGRP(ngg_state->max_gsprims) |
4375 			       S_028A44_GS_INST_PRIMS_IN_SUBGRP(ngg_state->max_gsprims * gs_num_invocations));
4376 	radeon_set_context_reg(ctx_cs, R_0287FC_GE_MAX_OUTPUT_PER_SUBGROUP,
4377 			       S_0287FC_MAX_VERTS_PER_SUBGROUP(ngg_state->max_out_verts));
4378 	radeon_set_context_reg(ctx_cs, R_028B4C_GE_NGG_SUBGRP_CNTL,
4379 			       S_028B4C_PRIM_AMP_FACTOR(ngg_state->prim_amp_factor) |
4380 			       S_028B4C_THDS_PER_SUBGRP(0)); /* for fast launch */
4381 	radeon_set_context_reg(ctx_cs, R_028B90_VGT_GS_INSTANCE_CNT,
4382 			       S_028B90_CNT(gs_num_invocations) |
4383 			       S_028B90_ENABLE(gs_num_invocations > 1) |
4384 			       S_028B90_EN_MAX_VERT_OUT_PER_GS_INSTANCE(ngg_state->max_vert_out_per_gs_instance));
4385 
4386 	/* User edge flags are set by the pos exports. If user edge flags are
4387 	 * not used, we must use hw-generated edge flags and pass them via
4388 	 * the prim export to prevent drawing lines on internal edges of
4389 	 * decomposed primitives (such as quads) with polygon mode = lines.
4390 	 *
4391 	 * TODO: We should combine hw-generated edge flags with user edge
4392 	 *       flags in the shader.
4393 	 */
4394 	radeon_set_context_reg(ctx_cs, R_028838_PA_CL_NGG_CNTL,
4395 			       S_028838_INDEX_BUF_EDGE_FLAG_ENA(!radv_pipeline_has_tess(pipeline) &&
4396 			                                        !radv_pipeline_has_gs(pipeline)) |
4397 			       /* Reuse for NGG. */
4398 			       S_028838_VERTEX_REUSE_DEPTH(pipeline->device->physical_device->rad_info.chip_class >= GFX10_3 ? 30 : 0));
4399 
4400 	ge_cntl = S_03096C_PRIM_GRP_SIZE(ngg_state->max_gsprims) |
4401 		  S_03096C_VERT_GRP_SIZE(256) | /* 256 = disable vertex grouping */
4402 		  S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi);
4403 
4404 	/* Bug workaround for a possible hang with non-tessellation cases.
4405 	 * Tessellation always sets GE_CNTL.VERT_GRP_SIZE = 0
4406 	 *
4407 	 * Requirement: GE_CNTL.VERT_GRP_SIZE = VGT_GS_ONCHIP_CNTL.ES_VERTS_PER_SUBGRP - 5
4408 	 */
4409 	if (pipeline->device->physical_device->rad_info.chip_class == GFX10 &&
4410 	    !radv_pipeline_has_tess(pipeline) &&
4411 	    ngg_state->hw_max_esverts != 256) {
4412 		ge_cntl &= C_03096C_VERT_GRP_SIZE;
4413 
4414 		if (ngg_state->hw_max_esverts > 5) {
4415 			ge_cntl |= S_03096C_VERT_GRP_SIZE(ngg_state->hw_max_esverts - 5);
4416 		}
4417 	}
4418 
4419 	radeon_set_uconfig_reg(ctx_cs, R_03096C_GE_CNTL, ge_cntl);
4420 }
4421 
4422 static void
radv_pipeline_generate_hw_hs(struct radeon_cmdbuf * cs,const struct radv_pipeline * pipeline,const struct radv_shader_variant * shader)4423 radv_pipeline_generate_hw_hs(struct radeon_cmdbuf *cs,
4424 			     const struct radv_pipeline *pipeline,
4425 			     const struct radv_shader_variant *shader)
4426 {
4427 	uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
4428 
4429 	if (pipeline->device->physical_device->rad_info.chip_class >= GFX9) {
4430 		if (pipeline->device->physical_device->rad_info.chip_class >= GFX10) {
4431 			radeon_set_sh_reg_seq(cs, R_00B520_SPI_SHADER_PGM_LO_LS, 2);
4432 			radeon_emit(cs, va >> 8);
4433 			radeon_emit(cs, S_00B524_MEM_BASE(va >> 40));
4434 		} else {
4435 			radeon_set_sh_reg_seq(cs, R_00B410_SPI_SHADER_PGM_LO_LS, 2);
4436 			radeon_emit(cs, va >> 8);
4437 			radeon_emit(cs, S_00B414_MEM_BASE(va >> 40));
4438 		}
4439 
4440 		radeon_set_sh_reg_seq(cs, R_00B428_SPI_SHADER_PGM_RSRC1_HS, 2);
4441 		radeon_emit(cs, shader->config.rsrc1);
4442 		radeon_emit(cs, shader->config.rsrc2);
4443 	} else {
4444 		radeon_set_sh_reg_seq(cs, R_00B420_SPI_SHADER_PGM_LO_HS, 4);
4445 		radeon_emit(cs, va >> 8);
4446 		radeon_emit(cs, S_00B424_MEM_BASE(va >> 40));
4447 		radeon_emit(cs, shader->config.rsrc1);
4448 		radeon_emit(cs, shader->config.rsrc2);
4449 	}
4450 }
4451 
4452 static void
radv_pipeline_generate_vertex_shader(struct radeon_cmdbuf * ctx_cs,struct radeon_cmdbuf * cs,const struct radv_pipeline * pipeline)4453 radv_pipeline_generate_vertex_shader(struct radeon_cmdbuf *ctx_cs,
4454 				     struct radeon_cmdbuf *cs,
4455 				     const struct radv_pipeline *pipeline)
4456 {
4457 	struct radv_shader_variant *vs;
4458 
4459 	/* Skip shaders merged into HS/GS */
4460 	vs = pipeline->shaders[MESA_SHADER_VERTEX];
4461 	if (!vs)
4462 		return;
4463 
4464 	if (vs->info.vs.as_ls)
4465 		radv_pipeline_generate_hw_ls(cs, pipeline, vs);
4466 	else if (vs->info.vs.as_es)
4467 		radv_pipeline_generate_hw_es(cs, pipeline, vs);
4468 	else if (vs->info.is_ngg)
4469 		radv_pipeline_generate_hw_ngg(ctx_cs, cs, pipeline, vs);
4470 	else
4471 		radv_pipeline_generate_hw_vs(ctx_cs, cs, pipeline, vs);
4472 }
4473 
4474 static void
radv_pipeline_generate_tess_shaders(struct radeon_cmdbuf * ctx_cs,struct radeon_cmdbuf * cs,const struct radv_pipeline * pipeline)4475 radv_pipeline_generate_tess_shaders(struct radeon_cmdbuf *ctx_cs,
4476 				    struct radeon_cmdbuf *cs,
4477 				    const struct radv_pipeline *pipeline)
4478 {
4479 	struct radv_shader_variant *tes, *tcs;
4480 
4481 	tcs = pipeline->shaders[MESA_SHADER_TESS_CTRL];
4482 	tes = pipeline->shaders[MESA_SHADER_TESS_EVAL];
4483 
4484 	if (tes) {
4485 		if (tes->info.is_ngg) {
4486 			radv_pipeline_generate_hw_ngg(ctx_cs, cs, pipeline, tes);
4487 		} else if (tes->info.tes.as_es)
4488 			radv_pipeline_generate_hw_es(cs, pipeline, tes);
4489 		else
4490 			radv_pipeline_generate_hw_vs(ctx_cs, cs, pipeline, tes);
4491 	}
4492 
4493 	radv_pipeline_generate_hw_hs(cs, pipeline, tcs);
4494 
4495 	if (pipeline->device->physical_device->rad_info.chip_class >= GFX10 &&
4496 	    !radv_pipeline_has_gs(pipeline) && !radv_pipeline_has_ngg(pipeline)) {
4497 		radeon_set_context_reg(ctx_cs, R_028A44_VGT_GS_ONCHIP_CNTL,
4498 		                       S_028A44_ES_VERTS_PER_SUBGRP(250) |
4499 		                       S_028A44_GS_PRIMS_PER_SUBGRP(126) |
4500 		                       S_028A44_GS_INST_PRIMS_IN_SUBGRP(126));
4501 	}
4502 }
4503 
4504 static void
radv_pipeline_generate_tess_state(struct radeon_cmdbuf * ctx_cs,const struct radv_pipeline * pipeline,const VkGraphicsPipelineCreateInfo * pCreateInfo)4505 radv_pipeline_generate_tess_state(struct radeon_cmdbuf *ctx_cs,
4506 				  const struct radv_pipeline *pipeline,
4507 				  const VkGraphicsPipelineCreateInfo *pCreateInfo)
4508 {
4509 	struct radv_shader_variant *tes = radv_get_shader(pipeline, MESA_SHADER_TESS_EVAL);
4510 	unsigned type = 0, partitioning = 0, topology = 0, distribution_mode = 0;
4511 	unsigned num_tcs_input_cp, num_tcs_output_cp, num_patches;
4512 	unsigned ls_hs_config;
4513 
4514 	num_tcs_input_cp = pCreateInfo->pTessellationState->patchControlPoints;
4515 	num_tcs_output_cp = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.tcs_vertices_out; //TCS VERTICES OUT
4516 	num_patches = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.num_patches;
4517 
4518 	ls_hs_config = S_028B58_NUM_PATCHES(num_patches) |
4519 		       S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp) |
4520 		       S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp);
4521 
4522 	if (pipeline->device->physical_device->rad_info.chip_class >= GFX7) {
4523 		radeon_set_context_reg_idx(ctx_cs, R_028B58_VGT_LS_HS_CONFIG,
4524 					   2, ls_hs_config);
4525 	} else {
4526 		radeon_set_context_reg(ctx_cs, R_028B58_VGT_LS_HS_CONFIG,
4527 				       ls_hs_config);
4528 	}
4529 
4530 	switch (tes->info.tes.primitive_mode) {
4531 	case GL_TRIANGLES:
4532 		type = V_028B6C_TESS_TRIANGLE;
4533 		break;
4534 	case GL_QUADS:
4535 		type = V_028B6C_TESS_QUAD;
4536 		break;
4537 	case GL_ISOLINES:
4538 		type = V_028B6C_TESS_ISOLINE;
4539 		break;
4540 	}
4541 
4542 	switch (tes->info.tes.spacing) {
4543 	case TESS_SPACING_EQUAL:
4544 		partitioning = V_028B6C_PART_INTEGER;
4545 		break;
4546 	case TESS_SPACING_FRACTIONAL_ODD:
4547 		partitioning = V_028B6C_PART_FRAC_ODD;
4548 		break;
4549 	case TESS_SPACING_FRACTIONAL_EVEN:
4550 		partitioning = V_028B6C_PART_FRAC_EVEN;
4551 		break;
4552 	default:
4553 		break;
4554 	}
4555 
4556 	bool ccw = tes->info.tes.ccw;
4557 	const VkPipelineTessellationDomainOriginStateCreateInfo *domain_origin_state =
4558 	              vk_find_struct_const(pCreateInfo->pTessellationState,
4559 	                                   PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO);
4560 
4561 	if (domain_origin_state && domain_origin_state->domainOrigin != VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT)
4562 		ccw = !ccw;
4563 
4564 	if (tes->info.tes.point_mode)
4565 		topology = V_028B6C_OUTPUT_POINT;
4566 	else if (tes->info.tes.primitive_mode == GL_ISOLINES)
4567 		topology = V_028B6C_OUTPUT_LINE;
4568 	else if (ccw)
4569 		topology = V_028B6C_OUTPUT_TRIANGLE_CCW;
4570 	else
4571 		topology = V_028B6C_OUTPUT_TRIANGLE_CW;
4572 
4573 	if (pipeline->device->physical_device->rad_info.has_distributed_tess) {
4574 		if (pipeline->device->physical_device->rad_info.family == CHIP_FIJI ||
4575 		    pipeline->device->physical_device->rad_info.family >= CHIP_POLARIS10)
4576 			distribution_mode = V_028B6C_TRAPEZOIDS;
4577 		else
4578 			distribution_mode = V_028B6C_DONUTS;
4579 	} else
4580 		distribution_mode = V_028B6C_NO_DIST;
4581 
4582 	radeon_set_context_reg(ctx_cs, R_028B6C_VGT_TF_PARAM,
4583 			       S_028B6C_TYPE(type) |
4584 			       S_028B6C_PARTITIONING(partitioning) |
4585 			       S_028B6C_TOPOLOGY(topology) |
4586 			       S_028B6C_DISTRIBUTION_MODE(distribution_mode));
4587 }
4588 
4589 static void
radv_pipeline_generate_hw_gs(struct radeon_cmdbuf * ctx_cs,struct radeon_cmdbuf * cs,const struct radv_pipeline * pipeline,const struct radv_shader_variant * gs)4590 radv_pipeline_generate_hw_gs(struct radeon_cmdbuf *ctx_cs,
4591 			     struct radeon_cmdbuf *cs,
4592 			     const struct radv_pipeline *pipeline,
4593 			     const struct radv_shader_variant *gs)
4594 {
4595 	const struct gfx9_gs_info *gs_state = &gs->info.gs_ring_info;
4596 	unsigned gs_max_out_vertices;
4597 	const uint8_t *num_components;
4598 	uint8_t max_stream;
4599 	unsigned offset;
4600 	uint64_t va;
4601 
4602 	gs_max_out_vertices = gs->info.gs.vertices_out;
4603 	max_stream = gs->info.gs.max_stream;
4604 	num_components = gs->info.gs.num_stream_output_components;
4605 
4606 	offset = num_components[0] * gs_max_out_vertices;
4607 
4608 	radeon_set_context_reg_seq(ctx_cs, R_028A60_VGT_GSVS_RING_OFFSET_1, 3);
4609 	radeon_emit(ctx_cs, offset);
4610 	if (max_stream >= 1)
4611 		offset += num_components[1] * gs_max_out_vertices;
4612 	radeon_emit(ctx_cs, offset);
4613 	if (max_stream >= 2)
4614 		offset += num_components[2] * gs_max_out_vertices;
4615 	radeon_emit(ctx_cs, offset);
4616 	if (max_stream >= 3)
4617 		offset += num_components[3] * gs_max_out_vertices;
4618 	radeon_set_context_reg(ctx_cs, R_028AB0_VGT_GSVS_RING_ITEMSIZE, offset);
4619 
4620 	radeon_set_context_reg_seq(ctx_cs, R_028B5C_VGT_GS_VERT_ITEMSIZE, 4);
4621 	radeon_emit(ctx_cs, num_components[0]);
4622 	radeon_emit(ctx_cs, (max_stream >= 1) ? num_components[1] : 0);
4623 	radeon_emit(ctx_cs, (max_stream >= 2) ? num_components[2] : 0);
4624 	radeon_emit(ctx_cs, (max_stream >= 3) ? num_components[3] : 0);
4625 
4626 	uint32_t gs_num_invocations = gs->info.gs.invocations;
4627 	radeon_set_context_reg(ctx_cs, R_028B90_VGT_GS_INSTANCE_CNT,
4628 			       S_028B90_CNT(MIN2(gs_num_invocations, 127)) |
4629 			       S_028B90_ENABLE(gs_num_invocations > 0));
4630 
4631 	radeon_set_context_reg(ctx_cs, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
4632 			       gs_state->vgt_esgs_ring_itemsize);
4633 
4634 	va = radv_buffer_get_va(gs->bo) + gs->bo_offset;
4635 
4636 	if (pipeline->device->physical_device->rad_info.chip_class >= GFX9) {
4637 		if (pipeline->device->physical_device->rad_info.chip_class >= GFX10) {
4638 			radeon_set_sh_reg_seq(cs, R_00B320_SPI_SHADER_PGM_LO_ES, 2);
4639 			radeon_emit(cs, va >> 8);
4640 			radeon_emit(cs, S_00B324_MEM_BASE(va >> 40));
4641 		} else {
4642 			radeon_set_sh_reg_seq(cs, R_00B210_SPI_SHADER_PGM_LO_ES, 2);
4643 			radeon_emit(cs, va >> 8);
4644 			radeon_emit(cs, S_00B214_MEM_BASE(va >> 40));
4645 		}
4646 
4647 		radeon_set_sh_reg_seq(cs, R_00B228_SPI_SHADER_PGM_RSRC1_GS, 2);
4648 		radeon_emit(cs, gs->config.rsrc1);
4649 		radeon_emit(cs, gs->config.rsrc2 | S_00B22C_LDS_SIZE(gs_state->lds_size));
4650 
4651 		radeon_set_context_reg(ctx_cs, R_028A44_VGT_GS_ONCHIP_CNTL, gs_state->vgt_gs_onchip_cntl);
4652 		radeon_set_context_reg(ctx_cs, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP, gs_state->vgt_gs_max_prims_per_subgroup);
4653 	} else {
4654 		radeon_set_sh_reg_seq(cs, R_00B220_SPI_SHADER_PGM_LO_GS, 4);
4655 		radeon_emit(cs, va >> 8);
4656 		radeon_emit(cs, S_00B224_MEM_BASE(va >> 40));
4657 		radeon_emit(cs, gs->config.rsrc1);
4658 		radeon_emit(cs, gs->config.rsrc2);
4659 	}
4660 
4661 	radv_pipeline_generate_hw_vs(ctx_cs, cs, pipeline, pipeline->gs_copy_shader);
4662 }
4663 
4664 static void
radv_pipeline_generate_geometry_shader(struct radeon_cmdbuf * ctx_cs,struct radeon_cmdbuf * cs,const struct radv_pipeline * pipeline)4665 radv_pipeline_generate_geometry_shader(struct radeon_cmdbuf *ctx_cs,
4666 				       struct radeon_cmdbuf *cs,
4667 				       const struct radv_pipeline *pipeline)
4668 {
4669 	struct radv_shader_variant *gs;
4670 
4671 	gs = pipeline->shaders[MESA_SHADER_GEOMETRY];
4672 	if (!gs)
4673 		return;
4674 
4675 	if (gs->info.is_ngg)
4676 		radv_pipeline_generate_hw_ngg(ctx_cs, cs, pipeline, gs);
4677 	else
4678 		radv_pipeline_generate_hw_gs(ctx_cs, cs, pipeline, gs);
4679 
4680 	radeon_set_context_reg(ctx_cs, R_028B38_VGT_GS_MAX_VERT_OUT,
4681 			      gs->info.gs.vertices_out);
4682 }
4683 
offset_to_ps_input(uint32_t offset,bool flat_shade,bool explicit,bool float16)4684 static uint32_t offset_to_ps_input(uint32_t offset, bool flat_shade,
4685 				   bool explicit, bool float16)
4686 {
4687 	uint32_t ps_input_cntl;
4688 	if (offset <= AC_EXP_PARAM_OFFSET_31) {
4689 		ps_input_cntl = S_028644_OFFSET(offset);
4690 		if (flat_shade || explicit)
4691 			ps_input_cntl |= S_028644_FLAT_SHADE(1);
4692 		if (explicit) {
4693 			/* Force parameter cache to be read in passthrough
4694 			 * mode.
4695 			 */
4696 			ps_input_cntl |= S_028644_OFFSET(1 << 5);
4697 		}
4698 		if (float16) {
4699 			ps_input_cntl |= S_028644_FP16_INTERP_MODE(1) |
4700 			                 S_028644_ATTR0_VALID(1);
4701 		}
4702 	} else {
4703 		/* The input is a DEFAULT_VAL constant. */
4704 		assert(offset >= AC_EXP_PARAM_DEFAULT_VAL_0000 &&
4705 		       offset <= AC_EXP_PARAM_DEFAULT_VAL_1111);
4706 		offset -= AC_EXP_PARAM_DEFAULT_VAL_0000;
4707 		ps_input_cntl = S_028644_OFFSET(0x20) |
4708 			S_028644_DEFAULT_VAL(offset);
4709 	}
4710 	return ps_input_cntl;
4711 }
4712 
4713 static void
radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf * ctx_cs,const struct radv_pipeline * pipeline)4714 radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf *ctx_cs,
4715 				 const struct radv_pipeline *pipeline)
4716 {
4717 	struct radv_shader_variant *ps = pipeline->shaders[MESA_SHADER_FRAGMENT];
4718 	const struct radv_vs_output_info *outinfo = get_vs_output_info(pipeline);
4719 	uint32_t ps_input_cntl[32];
4720 
4721 	unsigned ps_offset = 0;
4722 
4723 	if (ps->info.ps.prim_id_input) {
4724 		unsigned vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID];
4725 		if (vs_offset != AC_EXP_PARAM_UNDEFINED) {
4726 			ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, true, false, false);
4727 			++ps_offset;
4728 		}
4729 	}
4730 
4731 	if (ps->info.ps.layer_input ||
4732 	    ps->info.needs_multiview_view_index) {
4733 		unsigned vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_LAYER];
4734 		if (vs_offset != AC_EXP_PARAM_UNDEFINED)
4735 			ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, true, false, false);
4736 		else
4737 			ps_input_cntl[ps_offset] = offset_to_ps_input(AC_EXP_PARAM_DEFAULT_VAL_0000, true, false, false);
4738 		++ps_offset;
4739 	}
4740 
4741 	if (ps->info.ps.viewport_index_input) {
4742 		unsigned vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_VIEWPORT];
4743 		if (vs_offset != AC_EXP_PARAM_UNDEFINED)
4744 			ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, true, false, false);
4745 		else
4746 			ps_input_cntl[ps_offset] = offset_to_ps_input(AC_EXP_PARAM_DEFAULT_VAL_0000, true, false, false);
4747 		++ps_offset;
4748 	}
4749 
4750 	if (ps->info.ps.has_pcoord) {
4751 		unsigned val;
4752 		val = S_028644_PT_SPRITE_TEX(1) | S_028644_OFFSET(0x20);
4753 		ps_input_cntl[ps_offset] = val;
4754 		ps_offset++;
4755 	}
4756 
4757 	if (ps->info.ps.num_input_clips_culls) {
4758 		unsigned vs_offset;
4759 
4760 		vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_CLIP_DIST0];
4761 		if (vs_offset != AC_EXP_PARAM_UNDEFINED) {
4762 			ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, false, false, false);
4763 			++ps_offset;
4764 		}
4765 
4766 		vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_CLIP_DIST1];
4767 		if (vs_offset != AC_EXP_PARAM_UNDEFINED &&
4768 		    ps->info.ps.num_input_clips_culls > 4) {
4769 			ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, false, false, false);
4770 			++ps_offset;
4771 		}
4772 	}
4773 
4774 	for (unsigned i = 0; i < 32 && (1u << i) <= ps->info.ps.input_mask; ++i) {
4775 		unsigned vs_offset;
4776 		bool flat_shade;
4777 		bool explicit;
4778 		bool float16;
4779 		if (!(ps->info.ps.input_mask & (1u << i)))
4780 			continue;
4781 
4782 		vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_VAR0 + i];
4783 		if (vs_offset == AC_EXP_PARAM_UNDEFINED) {
4784 			ps_input_cntl[ps_offset] = S_028644_OFFSET(0x20);
4785 			++ps_offset;
4786 			continue;
4787 		}
4788 
4789 		flat_shade = !!(ps->info.ps.flat_shaded_mask & (1u << ps_offset));
4790 		explicit = !!(ps->info.ps.explicit_shaded_mask & (1u << ps_offset));
4791 		float16 = !!(ps->info.ps.float16_shaded_mask & (1u << ps_offset));
4792 
4793 		ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, flat_shade, explicit, float16);
4794 		++ps_offset;
4795 	}
4796 
4797 	if (ps_offset) {
4798 		radeon_set_context_reg_seq(ctx_cs, R_028644_SPI_PS_INPUT_CNTL_0, ps_offset);
4799 		for (unsigned i = 0; i < ps_offset; i++) {
4800 			radeon_emit(ctx_cs, ps_input_cntl[i]);
4801 		}
4802 	}
4803 }
4804 
4805 static uint32_t
radv_compute_db_shader_control(const struct radv_device * device,const struct radv_pipeline * pipeline,const struct radv_shader_variant * ps)4806 radv_compute_db_shader_control(const struct radv_device *device,
4807 			       const struct radv_pipeline *pipeline,
4808                                const struct radv_shader_variant *ps)
4809 {
4810 	unsigned conservative_z_export = V_02880C_EXPORT_ANY_Z;
4811 	unsigned z_order;
4812 	if (ps->info.ps.early_fragment_test || !ps->info.ps.writes_memory)
4813 		z_order = V_02880C_EARLY_Z_THEN_LATE_Z;
4814 	else
4815 		z_order = V_02880C_LATE_Z;
4816 
4817 	if (ps->info.ps.depth_layout == FRAG_DEPTH_LAYOUT_GREATER)
4818 		conservative_z_export = V_02880C_EXPORT_GREATER_THAN_Z;
4819 	else if (ps->info.ps.depth_layout == FRAG_DEPTH_LAYOUT_LESS)
4820 		conservative_z_export = V_02880C_EXPORT_LESS_THAN_Z;
4821 
4822 	bool disable_rbplus = device->physical_device->rad_info.has_rbplus &&
4823 	                      !device->physical_device->rad_info.rbplus_allowed;
4824 
4825 	/* It shouldn't be needed to export gl_SampleMask when MSAA is disabled
4826 	 * but this appears to break Project Cars (DXVK). See
4827 	 * https://bugs.freedesktop.org/show_bug.cgi?id=109401
4828 	 */
4829 	bool mask_export_enable = ps->info.ps.writes_sample_mask;
4830 
4831 	return  S_02880C_Z_EXPORT_ENABLE(ps->info.ps.writes_z) |
4832 		S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(ps->info.ps.writes_stencil) |
4833 		S_02880C_KILL_ENABLE(!!ps->info.ps.can_discard) |
4834 		S_02880C_MASK_EXPORT_ENABLE(mask_export_enable) |
4835 		S_02880C_CONSERVATIVE_Z_EXPORT(conservative_z_export) |
4836 		S_02880C_Z_ORDER(z_order) |
4837 		S_02880C_DEPTH_BEFORE_SHADER(ps->info.ps.early_fragment_test) |
4838 		S_02880C_PRE_SHADER_DEPTH_COVERAGE_ENABLE(ps->info.ps.post_depth_coverage) |
4839 		S_02880C_EXEC_ON_HIER_FAIL(ps->info.ps.writes_memory) |
4840 		S_02880C_EXEC_ON_NOOP(ps->info.ps.writes_memory) |
4841 		S_02880C_DUAL_QUAD_DISABLE(disable_rbplus);
4842 }
4843 
4844 static void
radv_pipeline_generate_fragment_shader(struct radeon_cmdbuf * ctx_cs,struct radeon_cmdbuf * cs,struct radv_pipeline * pipeline)4845 radv_pipeline_generate_fragment_shader(struct radeon_cmdbuf *ctx_cs,
4846 				       struct radeon_cmdbuf *cs,
4847 				       struct radv_pipeline *pipeline)
4848 {
4849 	struct radv_shader_variant *ps;
4850 	uint64_t va;
4851 	assert (pipeline->shaders[MESA_SHADER_FRAGMENT]);
4852 
4853 	ps = pipeline->shaders[MESA_SHADER_FRAGMENT];
4854 	va = radv_buffer_get_va(ps->bo) + ps->bo_offset;
4855 
4856 	radeon_set_sh_reg_seq(cs, R_00B020_SPI_SHADER_PGM_LO_PS, 4);
4857 	radeon_emit(cs, va >> 8);
4858 	radeon_emit(cs, S_00B024_MEM_BASE(va >> 40));
4859 	radeon_emit(cs, ps->config.rsrc1);
4860 	radeon_emit(cs, ps->config.rsrc2);
4861 
4862 	radeon_set_context_reg(ctx_cs, R_02880C_DB_SHADER_CONTROL,
4863 	                       radv_compute_db_shader_control(pipeline->device,
4864 							      pipeline, ps));
4865 
4866 	radeon_set_context_reg(ctx_cs, R_0286CC_SPI_PS_INPUT_ENA,
4867 			       ps->config.spi_ps_input_ena);
4868 
4869 	radeon_set_context_reg(ctx_cs, R_0286D0_SPI_PS_INPUT_ADDR,
4870 			       ps->config.spi_ps_input_addr);
4871 
4872 	radeon_set_context_reg(ctx_cs, R_0286D8_SPI_PS_IN_CONTROL,
4873 			       S_0286D8_NUM_INTERP(ps->info.ps.num_interp) |
4874 			       S_0286D8_PS_W32_EN(ps->info.wave_size == 32));
4875 
4876 	radeon_set_context_reg(ctx_cs, R_0286E0_SPI_BARYC_CNTL, pipeline->graphics.spi_baryc_cntl);
4877 
4878 	radeon_set_context_reg(ctx_cs, R_028710_SPI_SHADER_Z_FORMAT,
4879 	                       ac_get_spi_shader_z_format(ps->info.ps.writes_z,
4880 	                                                  ps->info.ps.writes_stencil,
4881 	                                                  ps->info.ps.writes_sample_mask));
4882 
4883 	if (pipeline->device->dfsm_allowed) {
4884 		/* optimise this? */
4885 		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
4886 		radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
4887 	}
4888 }
4889 
4890 static void
radv_pipeline_generate_vgt_vertex_reuse(struct radeon_cmdbuf * ctx_cs,const struct radv_pipeline * pipeline)4891 radv_pipeline_generate_vgt_vertex_reuse(struct radeon_cmdbuf *ctx_cs,
4892 					const struct radv_pipeline *pipeline)
4893 {
4894 	if (pipeline->device->physical_device->rad_info.family < CHIP_POLARIS10 ||
4895 	    pipeline->device->physical_device->rad_info.chip_class >= GFX10)
4896 		return;
4897 
4898 	unsigned vtx_reuse_depth = 30;
4899 	if (radv_pipeline_has_tess(pipeline) &&
4900 	    radv_get_shader(pipeline, MESA_SHADER_TESS_EVAL)->info.tes.spacing == TESS_SPACING_FRACTIONAL_ODD) {
4901 		vtx_reuse_depth = 14;
4902 	}
4903 	radeon_set_context_reg(ctx_cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL,
4904 	                       S_028C58_VTX_REUSE_DEPTH(vtx_reuse_depth));
4905 }
4906 
4907 static void
radv_pipeline_generate_vgt_shader_config(struct radeon_cmdbuf * ctx_cs,const struct radv_pipeline * pipeline)4908 radv_pipeline_generate_vgt_shader_config(struct radeon_cmdbuf *ctx_cs,
4909 					 const struct radv_pipeline *pipeline)
4910 {
4911 	uint32_t stages = 0;
4912 	if (radv_pipeline_has_tess(pipeline)) {
4913 		stages |= S_028B54_LS_EN(V_028B54_LS_STAGE_ON) |
4914 			S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1);
4915 
4916 		if (radv_pipeline_has_gs(pipeline))
4917 			stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS) |
4918 				  S_028B54_GS_EN(1);
4919 		else if (radv_pipeline_has_ngg(pipeline))
4920 			stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS);
4921 		else
4922 			stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_DS);
4923 	} else if (radv_pipeline_has_gs(pipeline)) {
4924 		stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) |
4925 			S_028B54_GS_EN(1);
4926 	} else if (radv_pipeline_has_ngg(pipeline)) {
4927 		stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL);
4928 	}
4929 
4930 	if (radv_pipeline_has_ngg(pipeline)) {
4931 		stages |= S_028B54_PRIMGEN_EN(1);
4932 		if (pipeline->streamout_shader)
4933 			stages |= S_028B54_NGG_WAVE_ID_EN(1);
4934 		if (radv_pipeline_has_ngg_passthrough(pipeline))
4935 			stages |= S_028B54_PRIMGEN_PASSTHRU_EN(1);
4936 	} else if (radv_pipeline_has_gs(pipeline)) {
4937 		stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
4938 	}
4939 
4940 	if (pipeline->device->physical_device->rad_info.chip_class >= GFX9)
4941 		stages |= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
4942 
4943 	if (pipeline->device->physical_device->rad_info.chip_class >= GFX10) {
4944 		uint8_t hs_size = 64, gs_size = 64, vs_size = 64;
4945 
4946 		if (radv_pipeline_has_tess(pipeline))
4947 			hs_size = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.wave_size;
4948 
4949 		if (pipeline->shaders[MESA_SHADER_GEOMETRY]) {
4950 			vs_size = gs_size = pipeline->shaders[MESA_SHADER_GEOMETRY]->info.wave_size;
4951 			if (pipeline->gs_copy_shader)
4952 				vs_size = pipeline->gs_copy_shader->info.wave_size;
4953 		} else if (pipeline->shaders[MESA_SHADER_TESS_EVAL])
4954 			vs_size = pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.wave_size;
4955 		else if (pipeline->shaders[MESA_SHADER_VERTEX])
4956 			vs_size = pipeline->shaders[MESA_SHADER_VERTEX]->info.wave_size;
4957 
4958 		if (radv_pipeline_has_ngg(pipeline))
4959 			gs_size = vs_size;
4960 
4961 		/* legacy GS only supports Wave64 */
4962 		stages |= S_028B54_HS_W32_EN(hs_size == 32 ? 1 : 0) |
4963 			  S_028B54_GS_W32_EN(gs_size == 32 ? 1 : 0) |
4964 			  S_028B54_VS_W32_EN(vs_size == 32 ? 1 : 0);
4965 	}
4966 
4967 	radeon_set_context_reg(ctx_cs, R_028B54_VGT_SHADER_STAGES_EN, stages);
4968 }
4969 
4970 static void
radv_pipeline_generate_cliprect_rule(struct radeon_cmdbuf * ctx_cs,const VkGraphicsPipelineCreateInfo * pCreateInfo)4971 radv_pipeline_generate_cliprect_rule(struct radeon_cmdbuf *ctx_cs,
4972 				     const VkGraphicsPipelineCreateInfo *pCreateInfo)
4973 {
4974 	const  VkPipelineDiscardRectangleStateCreateInfoEXT *discard_rectangle_info =
4975 			vk_find_struct_const(pCreateInfo->pNext, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT);
4976 	uint32_t cliprect_rule = 0;
4977 
4978 	if (!discard_rectangle_info) {
4979 		cliprect_rule = 0xffff;
4980 	} else {
4981 		for (unsigned i = 0; i < (1u << MAX_DISCARD_RECTANGLES); ++i) {
4982 			/* Interpret i as a bitmask, and then set the bit in
4983 			 * the mask if that combination of rectangles in which
4984 			 * the pixel is contained should pass the cliprect
4985 			 * test.
4986 			 */
4987 			unsigned relevant_subset = i & ((1u << discard_rectangle_info->discardRectangleCount) - 1);
4988 
4989 			if (discard_rectangle_info->discardRectangleMode == VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT &&
4990 			    !relevant_subset)
4991 				continue;
4992 
4993 			if (discard_rectangle_info->discardRectangleMode == VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT &&
4994 			    relevant_subset)
4995 				continue;
4996 
4997 			cliprect_rule |= 1u << i;
4998 		}
4999 	}
5000 
5001 	radeon_set_context_reg(ctx_cs, R_02820C_PA_SC_CLIPRECT_RULE, cliprect_rule);
5002 }
5003 
5004 static void
gfx10_pipeline_generate_ge_cntl(struct radeon_cmdbuf * ctx_cs,struct radv_pipeline * pipeline)5005 gfx10_pipeline_generate_ge_cntl(struct radeon_cmdbuf *ctx_cs,
5006 				struct radv_pipeline *pipeline)
5007 {
5008 	bool break_wave_at_eoi = false;
5009 	unsigned primgroup_size;
5010 	unsigned vertgroup_size = 256; /* 256 = disable vertex grouping */
5011 
5012 	if (radv_pipeline_has_tess(pipeline)) {
5013 		primgroup_size = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.num_patches;
5014 	} else if (radv_pipeline_has_gs(pipeline)) {
5015 		const struct gfx9_gs_info *gs_state =
5016 			&pipeline->shaders[MESA_SHADER_GEOMETRY]->info.gs_ring_info;
5017 		unsigned vgt_gs_onchip_cntl = gs_state->vgt_gs_onchip_cntl;
5018 		primgroup_size = G_028A44_GS_PRIMS_PER_SUBGRP(vgt_gs_onchip_cntl);
5019 	} else {
5020 		primgroup_size = 128; /* recommended without a GS and tess */
5021 	}
5022 
5023 	if (radv_pipeline_has_tess(pipeline)) {
5024 		if (pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.uses_prim_id ||
5025 		    radv_get_shader(pipeline, MESA_SHADER_TESS_EVAL)->info.uses_prim_id)
5026 			break_wave_at_eoi = true;
5027 	}
5028 
5029 	radeon_set_uconfig_reg(ctx_cs, R_03096C_GE_CNTL,
5030 			       S_03096C_PRIM_GRP_SIZE(primgroup_size) |
5031 			       S_03096C_VERT_GRP_SIZE(vertgroup_size) |
5032 			       S_03096C_PACKET_TO_ONE_PA(0) /* line stipple */ |
5033 			       S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi));
5034 }
5035 
5036 static void
radv_pipeline_generate_vgt_gs_out(struct radeon_cmdbuf * ctx_cs,const struct radv_pipeline * pipeline,const VkGraphicsPipelineCreateInfo * pCreateInfo,const struct radv_graphics_pipeline_create_info * extra)5037 radv_pipeline_generate_vgt_gs_out(struct radeon_cmdbuf *ctx_cs,
5038 				  const struct radv_pipeline *pipeline,
5039 				  const VkGraphicsPipelineCreateInfo *pCreateInfo,
5040 				  const struct radv_graphics_pipeline_create_info *extra)
5041 {
5042 	uint32_t gs_out;
5043 
5044 	if (radv_pipeline_has_gs(pipeline)) {
5045 		gs_out = si_conv_gl_prim_to_gs_out(pipeline->shaders[MESA_SHADER_GEOMETRY]->info.gs.output_prim);
5046 	} else if (radv_pipeline_has_tess(pipeline)) {
5047 		if (pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.tes.point_mode) {
5048 			gs_out = V_028A6C_POINTLIST;
5049 		} else {
5050 			gs_out = si_conv_gl_prim_to_gs_out(pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.tes.primitive_mode);
5051 		}
5052 	} else {
5053 		gs_out = si_conv_prim_to_gs_out(pCreateInfo->pInputAssemblyState->topology);
5054 	}
5055 
5056 	if (extra && extra->use_rectlist) {
5057 		gs_out = V_028A6C_TRISTRIP;
5058 		if (radv_pipeline_has_ngg(pipeline))
5059 			gs_out = V_028A6C_RECTLIST;
5060 	}
5061 
5062 	radeon_set_context_reg(ctx_cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out);
5063 }
5064 
5065 static void
radv_pipeline_generate_pm4(struct radv_pipeline * pipeline,const VkGraphicsPipelineCreateInfo * pCreateInfo,const struct radv_graphics_pipeline_create_info * extra,const struct radv_blend_state * blend)5066 radv_pipeline_generate_pm4(struct radv_pipeline *pipeline,
5067                            const VkGraphicsPipelineCreateInfo *pCreateInfo,
5068                            const struct radv_graphics_pipeline_create_info *extra,
5069                            const struct radv_blend_state *blend)
5070 {
5071 	struct radeon_cmdbuf *ctx_cs = &pipeline->ctx_cs;
5072 	struct radeon_cmdbuf *cs = &pipeline->cs;
5073 
5074 	cs->max_dw = 64;
5075 	ctx_cs->max_dw = 256;
5076 	cs->buf = malloc(4 * (cs->max_dw + ctx_cs->max_dw));
5077 	ctx_cs->buf = cs->buf + cs->max_dw;
5078 
5079 	radv_pipeline_generate_depth_stencil_state(ctx_cs, pipeline, pCreateInfo, extra);
5080 	radv_pipeline_generate_blend_state(ctx_cs, pipeline, blend);
5081 	radv_pipeline_generate_raster_state(ctx_cs, pipeline, pCreateInfo);
5082 	radv_pipeline_generate_multisample_state(ctx_cs, pipeline);
5083 	radv_pipeline_generate_vgt_gs_mode(ctx_cs, pipeline);
5084 	radv_pipeline_generate_vertex_shader(ctx_cs, cs, pipeline);
5085 
5086 	if (radv_pipeline_has_tess(pipeline)) {
5087 		radv_pipeline_generate_tess_shaders(ctx_cs, cs, pipeline);
5088 		radv_pipeline_generate_tess_state(ctx_cs, pipeline, pCreateInfo);
5089 	}
5090 
5091 	radv_pipeline_generate_geometry_shader(ctx_cs, cs, pipeline);
5092 	radv_pipeline_generate_fragment_shader(ctx_cs, cs, pipeline);
5093 	radv_pipeline_generate_ps_inputs(ctx_cs, pipeline);
5094 	radv_pipeline_generate_vgt_vertex_reuse(ctx_cs, pipeline);
5095 	radv_pipeline_generate_vgt_shader_config(ctx_cs, pipeline);
5096 	radv_pipeline_generate_cliprect_rule(ctx_cs, pCreateInfo);
5097 	radv_pipeline_generate_vgt_gs_out(ctx_cs, pipeline, pCreateInfo, extra);
5098 
5099 	if (pipeline->device->physical_device->rad_info.chip_class >= GFX10 && !radv_pipeline_has_ngg(pipeline))
5100 		gfx10_pipeline_generate_ge_cntl(ctx_cs, pipeline);
5101 
5102 	pipeline->ctx_cs_hash = _mesa_hash_data(ctx_cs->buf, ctx_cs->cdw * 4);
5103 
5104 	assert(ctx_cs->cdw <= ctx_cs->max_dw);
5105 	assert(cs->cdw <= cs->max_dw);
5106 }
5107 
5108 static void
radv_pipeline_init_vertex_input_state(struct radv_pipeline * pipeline,const VkGraphicsPipelineCreateInfo * pCreateInfo)5109 radv_pipeline_init_vertex_input_state(struct radv_pipeline *pipeline,
5110 				      const VkGraphicsPipelineCreateInfo *pCreateInfo)
5111 {
5112 	const VkPipelineVertexInputStateCreateInfo *vi_info =
5113 		pCreateInfo->pVertexInputState;
5114 
5115 	for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
5116 		const VkVertexInputBindingDescription *desc =
5117 			&vi_info->pVertexBindingDescriptions[i];
5118 
5119 		pipeline->binding_stride[desc->binding] = desc->stride;
5120 		pipeline->num_vertex_bindings =
5121 			MAX2(pipeline->num_vertex_bindings, desc->binding + 1);
5122 	}
5123 }
5124 
5125 static struct radv_shader_variant *
radv_pipeline_get_streamout_shader(struct radv_pipeline * pipeline)5126 radv_pipeline_get_streamout_shader(struct radv_pipeline *pipeline)
5127 {
5128 	int i;
5129 
5130 	for (i = MESA_SHADER_GEOMETRY; i >= MESA_SHADER_VERTEX; i--) {
5131 		struct radv_shader_variant *shader =
5132 			radv_get_shader(pipeline, i);
5133 
5134 		if (shader && shader->info.so.num_outputs > 0)
5135 			return shader;
5136 	}
5137 
5138 	return NULL;
5139 }
5140 
5141 static void
radv_pipeline_init_shader_stages_state(struct radv_pipeline * pipeline)5142 radv_pipeline_init_shader_stages_state(struct radv_pipeline *pipeline)
5143 {
5144 	struct radv_device *device = pipeline->device;
5145 
5146 	for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
5147 		pipeline->user_data_0[i] =
5148 			radv_pipeline_stage_to_user_data_0(pipeline, i,
5149 							   device->physical_device->rad_info.chip_class);
5150 
5151 		if (pipeline->shaders[i]) {
5152 			pipeline->need_indirect_descriptor_sets |= pipeline->shaders[i]->info.need_indirect_descriptor_sets;
5153 		}
5154 	}
5155 
5156 	struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_VERTEX,
5157 							     AC_UD_VS_BASE_VERTEX_START_INSTANCE);
5158 	if (loc->sgpr_idx != -1) {
5159 		pipeline->graphics.vtx_base_sgpr = pipeline->user_data_0[MESA_SHADER_VERTEX];
5160 		pipeline->graphics.vtx_base_sgpr += loc->sgpr_idx * 4;
5161 		if (radv_get_shader(pipeline, MESA_SHADER_VERTEX)->info.vs.needs_draw_id)
5162 			pipeline->graphics.vtx_emit_num = 3;
5163 		else
5164 			pipeline->graphics.vtx_emit_num = 2;
5165 	}
5166 }
5167 
5168 static VkResult
radv_pipeline_init(struct radv_pipeline * pipeline,struct radv_device * device,struct radv_pipeline_cache * cache,const VkGraphicsPipelineCreateInfo * pCreateInfo,const struct radv_graphics_pipeline_create_info * extra)5169 radv_pipeline_init(struct radv_pipeline *pipeline,
5170 		   struct radv_device *device,
5171 		   struct radv_pipeline_cache *cache,
5172 		   const VkGraphicsPipelineCreateInfo *pCreateInfo,
5173 		   const struct radv_graphics_pipeline_create_info *extra)
5174 {
5175 	VkResult result;
5176 
5177 	pipeline->device = device;
5178 	pipeline->layout = radv_pipeline_layout_from_handle(pCreateInfo->layout);
5179 	assert(pipeline->layout);
5180 
5181 	struct radv_blend_state blend = radv_pipeline_init_blend_state(pipeline, pCreateInfo, extra);
5182 
5183 	const VkPipelineCreationFeedbackCreateInfoEXT *creation_feedback =
5184 		vk_find_struct_const(pCreateInfo->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
5185 	radv_init_feedback(creation_feedback);
5186 
5187 	VkPipelineCreationFeedbackEXT *pipeline_feedback = creation_feedback ? creation_feedback->pPipelineCreationFeedback : NULL;
5188 
5189 	const VkPipelineShaderStageCreateInfo *pStages[MESA_SHADER_STAGES] = { 0, };
5190 	VkPipelineCreationFeedbackEXT *stage_feedbacks[MESA_SHADER_STAGES] = { 0 };
5191 	for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
5192 		gl_shader_stage stage = ffs(pCreateInfo->pStages[i].stage) - 1;
5193 		pStages[stage] = &pCreateInfo->pStages[i];
5194 		if(creation_feedback)
5195 			stage_feedbacks[stage] = &creation_feedback->pPipelineStageCreationFeedbacks[i];
5196 	}
5197 
5198 	struct radv_pipeline_key key = radv_generate_graphics_pipeline_key(pipeline, pCreateInfo, &blend);
5199 
5200 	result = radv_create_shaders(pipeline, device, cache, &key, pStages,
5201 		                     pCreateInfo->flags, pipeline_feedback,
5202 				     stage_feedbacks);
5203 	if (result != VK_SUCCESS)
5204 		return result;
5205 
5206 	pipeline->graphics.spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1);
5207 	radv_pipeline_init_multisample_state(pipeline, &blend, pCreateInfo);
5208 	radv_pipeline_init_input_assembly_state(pipeline, pCreateInfo, extra);
5209 	radv_pipeline_init_dynamic_state(pipeline, pCreateInfo, extra);
5210 	radv_pipeline_init_raster_state(pipeline, pCreateInfo);
5211 	radv_pipeline_init_depth_stencil_state(pipeline, pCreateInfo);
5212 
5213 	/* Ensure that some export memory is always allocated, for two reasons:
5214 	 *
5215 	 * 1) Correctness: The hardware ignores the EXEC mask if no export
5216 	 *    memory is allocated, so KILL and alpha test do not work correctly
5217 	 *    without this.
5218 	 * 2) Performance: Every shader needs at least a NULL export, even when
5219 	 *    it writes no color/depth output. The NULL export instruction
5220 	 *    stalls without this setting.
5221 	 *
5222 	 * Don't add this to CB_SHADER_MASK.
5223 	 *
5224 	 * GFX10 supports pixel shaders without exports by setting both the
5225 	 * color and Z formats to SPI_SHADER_ZERO. The hw will skip export
5226 	 * instructions if any are present.
5227 	 */
5228 	struct radv_shader_variant *ps = pipeline->shaders[MESA_SHADER_FRAGMENT];
5229 	if ((pipeline->device->physical_device->rad_info.chip_class <= GFX9 ||
5230 	     ps->info.ps.can_discard) &&
5231 	    !blend.spi_shader_col_format) {
5232 		if (!ps->info.ps.writes_z &&
5233 		    !ps->info.ps.writes_stencil &&
5234 		    !ps->info.ps.writes_sample_mask)
5235 			blend.spi_shader_col_format = V_028714_SPI_SHADER_32_R;
5236 	}
5237 
5238 	blend.cb_shader_mask = ps->info.ps.cb_shader_mask;
5239 
5240 	if (extra &&
5241 	    (extra->custom_blend_mode == V_028808_CB_ELIMINATE_FAST_CLEAR ||
5242 	     extra->custom_blend_mode == V_028808_CB_FMASK_DECOMPRESS ||
5243 	     extra->custom_blend_mode == V_028808_CB_DCC_DECOMPRESS ||
5244 	     extra->custom_blend_mode == V_028808_CB_RESOLVE)) {
5245 		/* According to the CB spec states, CB_SHADER_MASK should be
5246 		 * set to enable writes to all four channels of MRT0.
5247 		 */
5248 		blend.cb_shader_mask = 0xf;
5249 	}
5250 
5251 	pipeline->graphics.col_format = blend.spi_shader_col_format;
5252 	pipeline->graphics.cb_target_mask = blend.cb_target_mask;
5253 
5254 	if (radv_pipeline_has_gs(pipeline) && !radv_pipeline_has_ngg(pipeline)) {
5255 		struct radv_shader_variant *gs =
5256 			pipeline->shaders[MESA_SHADER_GEOMETRY];
5257 
5258 		radv_pipeline_init_gs_ring_state(pipeline, &gs->info.gs_ring_info);
5259 	}
5260 
5261 	if (radv_pipeline_has_tess(pipeline)) {
5262 		pipeline->graphics.tess_patch_control_points =
5263 			pCreateInfo->pTessellationState->patchControlPoints;
5264 	}
5265 
5266 	radv_pipeline_init_vertex_input_state(pipeline, pCreateInfo);
5267 	radv_pipeline_init_binning_state(pipeline, pCreateInfo, &blend);
5268 	radv_pipeline_init_shader_stages_state(pipeline);
5269 	radv_pipeline_init_scratch(device, pipeline);
5270 
5271 	/* Find the last vertex shader stage that eventually uses streamout. */
5272 	pipeline->streamout_shader = radv_pipeline_get_streamout_shader(pipeline);
5273 
5274 	radv_pipeline_generate_pm4(pipeline, pCreateInfo, extra, &blend);
5275 
5276 	return result;
5277 }
5278 
5279 VkResult
radv_graphics_pipeline_create(VkDevice _device,VkPipelineCache _cache,const VkGraphicsPipelineCreateInfo * pCreateInfo,const struct radv_graphics_pipeline_create_info * extra,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipeline)5280 radv_graphics_pipeline_create(
5281 	VkDevice _device,
5282 	VkPipelineCache _cache,
5283 	const VkGraphicsPipelineCreateInfo *pCreateInfo,
5284 	const struct radv_graphics_pipeline_create_info *extra,
5285 	const VkAllocationCallbacks *pAllocator,
5286 	VkPipeline *pPipeline)
5287 {
5288 	RADV_FROM_HANDLE(radv_device, device, _device);
5289 	RADV_FROM_HANDLE(radv_pipeline_cache, cache, _cache);
5290 	struct radv_pipeline *pipeline;
5291 	VkResult result;
5292 
5293 	pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
5294 			      VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
5295 	if (pipeline == NULL)
5296 		return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
5297 
5298 	vk_object_base_init(&device->vk, &pipeline->base,
5299 			    VK_OBJECT_TYPE_PIPELINE);
5300 
5301 	result = radv_pipeline_init(pipeline, device, cache,
5302 				    pCreateInfo, extra);
5303 	if (result != VK_SUCCESS) {
5304 		radv_pipeline_destroy(device, pipeline, pAllocator);
5305 		return result;
5306 	}
5307 
5308 	*pPipeline = radv_pipeline_to_handle(pipeline);
5309 
5310 	return VK_SUCCESS;
5311 }
5312 
radv_CreateGraphicsPipelines(VkDevice _device,VkPipelineCache pipelineCache,uint32_t count,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)5313 VkResult radv_CreateGraphicsPipelines(
5314 	VkDevice                                    _device,
5315 	VkPipelineCache                             pipelineCache,
5316 	uint32_t                                    count,
5317 	const VkGraphicsPipelineCreateInfo*         pCreateInfos,
5318 	const VkAllocationCallbacks*                pAllocator,
5319 	VkPipeline*                                 pPipelines)
5320 {
5321 	VkResult result = VK_SUCCESS;
5322 	unsigned i = 0;
5323 
5324 	for (; i < count; i++) {
5325 		VkResult r;
5326 		r = radv_graphics_pipeline_create(_device,
5327 						  pipelineCache,
5328 						  &pCreateInfos[i],
5329 						  NULL, pAllocator, &pPipelines[i]);
5330 		if (r != VK_SUCCESS) {
5331 			result = r;
5332 			pPipelines[i] = VK_NULL_HANDLE;
5333 
5334 			if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT)
5335 				break;
5336 		}
5337 	}
5338 
5339 	for (; i < count; ++i)
5340 		pPipelines[i] = VK_NULL_HANDLE;
5341 
5342 	return result;
5343 }
5344 
5345 static void
radv_pipeline_generate_hw_cs(struct radeon_cmdbuf * cs,const struct radv_pipeline * pipeline)5346 radv_pipeline_generate_hw_cs(struct radeon_cmdbuf *cs,
5347 			     const struct radv_pipeline *pipeline)
5348 {
5349 	struct radv_shader_variant *shader = pipeline->shaders[MESA_SHADER_COMPUTE];
5350 	uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
5351 	struct radv_device *device = pipeline->device;
5352 
5353 	radeon_set_sh_reg_seq(cs, R_00B830_COMPUTE_PGM_LO, 2);
5354 	radeon_emit(cs, va >> 8);
5355 	radeon_emit(cs, S_00B834_DATA(va >> 40));
5356 
5357 	radeon_set_sh_reg_seq(cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
5358 	radeon_emit(cs, shader->config.rsrc1);
5359 	radeon_emit(cs, shader->config.rsrc2);
5360 	if (device->physical_device->rad_info.chip_class >= GFX10) {
5361 		radeon_set_sh_reg(cs, R_00B8A0_COMPUTE_PGM_RSRC3, shader->config.rsrc3);
5362 	}
5363 }
5364 
5365 static void
radv_pipeline_generate_compute_state(struct radeon_cmdbuf * cs,const struct radv_pipeline * pipeline)5366 radv_pipeline_generate_compute_state(struct radeon_cmdbuf *cs,
5367 				     const struct radv_pipeline *pipeline)
5368 {
5369 	struct radv_shader_variant *shader = pipeline->shaders[MESA_SHADER_COMPUTE];
5370 	struct radv_device *device = pipeline->device;
5371 	unsigned threads_per_threadgroup;
5372 	unsigned threadgroups_per_cu = 1;
5373 	unsigned waves_per_threadgroup;
5374 	unsigned max_waves_per_sh = 0;
5375 
5376 	/* Calculate best compute resource limits. */
5377 	threads_per_threadgroup = shader->info.cs.block_size[0] *
5378 				  shader->info.cs.block_size[1] *
5379 				  shader->info.cs.block_size[2];
5380 	waves_per_threadgroup = DIV_ROUND_UP(threads_per_threadgroup,
5381 					     shader->info.wave_size);
5382 
5383 	if (device->physical_device->rad_info.chip_class >= GFX10 &&
5384 	    waves_per_threadgroup == 1)
5385 		threadgroups_per_cu = 2;
5386 
5387 	radeon_set_sh_reg(cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
5388 			  ac_get_compute_resource_limits(&device->physical_device->rad_info,
5389 							 waves_per_threadgroup,
5390 							 max_waves_per_sh,
5391 							 threadgroups_per_cu));
5392 
5393 	radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
5394 	radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(shader->info.cs.block_size[0]));
5395 	radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(shader->info.cs.block_size[1]));
5396 	radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(shader->info.cs.block_size[2]));
5397 }
5398 
5399 static void
radv_compute_generate_pm4(struct radv_pipeline * pipeline)5400 radv_compute_generate_pm4(struct radv_pipeline *pipeline)
5401 {
5402 	struct radv_device *device = pipeline->device;
5403 	struct radeon_cmdbuf *cs = &pipeline->cs;
5404 
5405 	cs->max_dw = device->physical_device->rad_info.chip_class >= GFX10 ? 19 : 16;
5406 	cs->buf = malloc(cs->max_dw * 4);
5407 
5408 	radv_pipeline_generate_hw_cs(cs, pipeline);
5409 	radv_pipeline_generate_compute_state(cs, pipeline);
5410 
5411 	assert(pipeline->cs.cdw <= pipeline->cs.max_dw);
5412 }
5413 
5414 static struct radv_pipeline_key
radv_generate_compute_pipeline_key(struct radv_pipeline * pipeline,const VkComputePipelineCreateInfo * pCreateInfo)5415 radv_generate_compute_pipeline_key(struct radv_pipeline *pipeline,
5416 				   const VkComputePipelineCreateInfo *pCreateInfo)
5417 {
5418 	const VkPipelineShaderStageCreateInfo *stage = &pCreateInfo->stage;
5419 	struct radv_pipeline_key key;
5420 	memset(&key, 0, sizeof(key));
5421 
5422 	if (pCreateInfo->flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT)
5423 		key.optimisations_disabled = 1;
5424 
5425 	const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT *subgroup_size =
5426 		vk_find_struct_const(stage->pNext,
5427 				     PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
5428 
5429 	if (subgroup_size) {
5430 		assert(subgroup_size->requiredSubgroupSize == 32 ||
5431 		       subgroup_size->requiredSubgroupSize == 64);
5432 		key.compute_subgroup_size = subgroup_size->requiredSubgroupSize;
5433 	}
5434 
5435 	return key;
5436 }
5437 
radv_compute_pipeline_create(VkDevice _device,VkPipelineCache _cache,const VkComputePipelineCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipeline)5438 static VkResult radv_compute_pipeline_create(
5439 	VkDevice                                    _device,
5440 	VkPipelineCache                             _cache,
5441 	const VkComputePipelineCreateInfo*          pCreateInfo,
5442 	const VkAllocationCallbacks*                pAllocator,
5443 	VkPipeline*                                 pPipeline)
5444 {
5445 	RADV_FROM_HANDLE(radv_device, device, _device);
5446 	RADV_FROM_HANDLE(radv_pipeline_cache, cache, _cache);
5447 	const VkPipelineShaderStageCreateInfo *pStages[MESA_SHADER_STAGES] = { 0, };
5448 	VkPipelineCreationFeedbackEXT *stage_feedbacks[MESA_SHADER_STAGES] = { 0 };
5449 	struct radv_pipeline *pipeline;
5450 	VkResult result;
5451 
5452 	pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
5453 			      VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
5454 	if (pipeline == NULL)
5455 		return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
5456 
5457 	vk_object_base_init(&device->vk, &pipeline->base,
5458 			    VK_OBJECT_TYPE_PIPELINE);
5459 
5460 	pipeline->device = device;
5461 	pipeline->layout = radv_pipeline_layout_from_handle(pCreateInfo->layout);
5462 	assert(pipeline->layout);
5463 
5464 	const VkPipelineCreationFeedbackCreateInfoEXT *creation_feedback =
5465 		vk_find_struct_const(pCreateInfo->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
5466 	radv_init_feedback(creation_feedback);
5467 
5468 	VkPipelineCreationFeedbackEXT *pipeline_feedback = creation_feedback ? creation_feedback->pPipelineCreationFeedback : NULL;
5469 	if (creation_feedback)
5470 		stage_feedbacks[MESA_SHADER_COMPUTE] = &creation_feedback->pPipelineStageCreationFeedbacks[0];
5471 
5472 	pStages[MESA_SHADER_COMPUTE] = &pCreateInfo->stage;
5473 
5474 	struct radv_pipeline_key key =
5475 		radv_generate_compute_pipeline_key(pipeline, pCreateInfo);
5476 
5477 	result = radv_create_shaders(pipeline, device, cache, &key, pStages,
5478 		                     pCreateInfo->flags, pipeline_feedback,
5479 				     stage_feedbacks);
5480 	if (result != VK_SUCCESS) {
5481 		radv_pipeline_destroy(device, pipeline, pAllocator);
5482 		return result;
5483 	}
5484 
5485 	pipeline->user_data_0[MESA_SHADER_COMPUTE] = radv_pipeline_stage_to_user_data_0(pipeline, MESA_SHADER_COMPUTE, device->physical_device->rad_info.chip_class);
5486 	pipeline->need_indirect_descriptor_sets |= pipeline->shaders[MESA_SHADER_COMPUTE]->info.need_indirect_descriptor_sets;
5487 	radv_pipeline_init_scratch(device, pipeline);
5488 
5489 	radv_compute_generate_pm4(pipeline);
5490 
5491 	*pPipeline = radv_pipeline_to_handle(pipeline);
5492 
5493 	return VK_SUCCESS;
5494 }
5495 
radv_CreateComputePipelines(VkDevice _device,VkPipelineCache pipelineCache,uint32_t count,const VkComputePipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)5496 VkResult radv_CreateComputePipelines(
5497 	VkDevice                                    _device,
5498 	VkPipelineCache                             pipelineCache,
5499 	uint32_t                                    count,
5500 	const VkComputePipelineCreateInfo*          pCreateInfos,
5501 	const VkAllocationCallbacks*                pAllocator,
5502 	VkPipeline*                                 pPipelines)
5503 {
5504 	VkResult result = VK_SUCCESS;
5505 
5506 	unsigned i = 0;
5507 	for (; i < count; i++) {
5508 		VkResult r;
5509 		r = radv_compute_pipeline_create(_device, pipelineCache,
5510 						 &pCreateInfos[i],
5511 						 pAllocator, &pPipelines[i]);
5512 		if (r != VK_SUCCESS) {
5513 			result = r;
5514 			pPipelines[i] = VK_NULL_HANDLE;
5515 
5516 			if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT)
5517 				break;
5518 		}
5519 	}
5520 
5521 	for (; i < count; ++i)
5522 		pPipelines[i] = VK_NULL_HANDLE;
5523 
5524 	return result;
5525 }
5526 
5527 
radv_get_executable_count(const struct radv_pipeline * pipeline)5528 static uint32_t radv_get_executable_count(const struct radv_pipeline *pipeline)
5529 {
5530 	uint32_t ret = 0;
5531 	for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
5532 		if (!pipeline->shaders[i])
5533 			continue;
5534 
5535 		if (i == MESA_SHADER_GEOMETRY &&
5536 		    !radv_pipeline_has_ngg(pipeline)) {
5537 			ret += 2u;
5538 		} else {
5539 			ret += 1u;
5540 		}
5541 
5542 	}
5543 	return ret;
5544 }
5545 
5546 static struct radv_shader_variant *
radv_get_shader_from_executable_index(const struct radv_pipeline * pipeline,int index,gl_shader_stage * stage)5547 radv_get_shader_from_executable_index(const struct radv_pipeline *pipeline, int index, gl_shader_stage *stage)
5548 {
5549 	for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
5550 		if (!pipeline->shaders[i])
5551 			continue;
5552 		if (!index) {
5553 			*stage = i;
5554 			return pipeline->shaders[i];
5555 		}
5556 
5557 		--index;
5558 
5559 		if (i == MESA_SHADER_GEOMETRY &&
5560 		    !radv_pipeline_has_ngg(pipeline)) {
5561 			if (!index) {
5562 				*stage = i;
5563 				return pipeline->gs_copy_shader;
5564 			}
5565 			--index;
5566 		}
5567 	}
5568 
5569 	*stage = -1;
5570 	return NULL;
5571 }
5572 
5573 /* Basically strlcpy (which does not exist on linux) specialized for
5574  * descriptions. */
desc_copy(char * desc,const char * src)5575 static void desc_copy(char *desc, const char *src) {
5576 	int len = strlen(src);
5577 	assert(len < VK_MAX_DESCRIPTION_SIZE);
5578 	memcpy(desc, src, len);
5579 	memset(desc + len, 0, VK_MAX_DESCRIPTION_SIZE - len);
5580 }
5581 
radv_GetPipelineExecutablePropertiesKHR(VkDevice _device,const VkPipelineInfoKHR * pPipelineInfo,uint32_t * pExecutableCount,VkPipelineExecutablePropertiesKHR * pProperties)5582 VkResult radv_GetPipelineExecutablePropertiesKHR(
5583     VkDevice                                    _device,
5584     const VkPipelineInfoKHR*                    pPipelineInfo,
5585     uint32_t*                                   pExecutableCount,
5586     VkPipelineExecutablePropertiesKHR*          pProperties)
5587 {
5588 	RADV_FROM_HANDLE(radv_pipeline, pipeline, pPipelineInfo->pipeline);
5589 	const uint32_t total_count = radv_get_executable_count(pipeline);
5590 
5591 	if (!pProperties) {
5592 		*pExecutableCount = total_count;
5593 		return VK_SUCCESS;
5594 	}
5595 
5596 	const uint32_t count = MIN2(total_count, *pExecutableCount);
5597 	for (unsigned i = 0, executable_idx = 0;
5598 	     i < MESA_SHADER_STAGES && executable_idx < count; ++i) {
5599 		if (!pipeline->shaders[i])
5600 			continue;
5601 		pProperties[executable_idx].stages = mesa_to_vk_shader_stage(i);
5602 		const char *name = NULL;
5603 		const char *description = NULL;
5604 		switch(i) {
5605 		case MESA_SHADER_VERTEX:
5606 			name = "Vertex Shader";
5607 			description = "Vulkan Vertex Shader";
5608 			break;
5609 		case MESA_SHADER_TESS_CTRL:
5610 			if (!pipeline->shaders[MESA_SHADER_VERTEX]) {
5611 				pProperties[executable_idx].stages |= VK_SHADER_STAGE_VERTEX_BIT;
5612 				name = "Vertex + Tessellation Control Shaders";
5613 				description = "Combined Vulkan Vertex and Tessellation Control Shaders";
5614 			} else {
5615 				name = "Tessellation Control Shader";
5616 				description = "Vulkan Tessellation Control Shader";
5617 			}
5618 			break;
5619 		case MESA_SHADER_TESS_EVAL:
5620 			name = "Tessellation Evaluation Shader";
5621 			description = "Vulkan Tessellation Evaluation Shader";
5622 			break;
5623 		case MESA_SHADER_GEOMETRY:
5624 			if (radv_pipeline_has_tess(pipeline) && !pipeline->shaders[MESA_SHADER_TESS_EVAL]) {
5625 				pProperties[executable_idx].stages |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
5626 				name = "Tessellation Evaluation + Geometry Shaders";
5627 				description = "Combined Vulkan Tessellation Evaluation and Geometry Shaders";
5628 			} else if (!radv_pipeline_has_tess(pipeline) && !pipeline->shaders[MESA_SHADER_VERTEX]) {
5629 				pProperties[executable_idx].stages |= VK_SHADER_STAGE_VERTEX_BIT;
5630 				name = "Vertex + Geometry Shader";
5631 				description = "Combined Vulkan Vertex and Geometry Shaders";
5632 			} else {
5633 				name = "Geometry Shader";
5634 				description = "Vulkan Geometry Shader";
5635 			}
5636 			break;
5637 		case MESA_SHADER_FRAGMENT:
5638 			name = "Fragment Shader";
5639 			description = "Vulkan Fragment Shader";
5640 			break;
5641 		case MESA_SHADER_COMPUTE:
5642 			name = "Compute Shader";
5643 			description = "Vulkan Compute Shader";
5644 			break;
5645 		}
5646 
5647 		pProperties[executable_idx].subgroupSize = pipeline->shaders[i]->info.wave_size;
5648 		desc_copy(pProperties[executable_idx].name, name);
5649 		desc_copy(pProperties[executable_idx].description, description);
5650 
5651 		++executable_idx;
5652 		if (i == MESA_SHADER_GEOMETRY &&
5653 		    !radv_pipeline_has_ngg(pipeline)) {
5654 			assert(pipeline->gs_copy_shader);
5655 			if (executable_idx >= count)
5656 				break;
5657 
5658 			pProperties[executable_idx].stages = VK_SHADER_STAGE_GEOMETRY_BIT;
5659 			pProperties[executable_idx].subgroupSize = 64;
5660 			desc_copy(pProperties[executable_idx].name, "GS Copy Shader");
5661 			desc_copy(pProperties[executable_idx].description,
5662 				  "Extra shader stage that loads the GS output ringbuffer into the rasterizer");
5663 
5664 			++executable_idx;
5665 		}
5666 	}
5667 
5668 	VkResult result = *pExecutableCount < total_count ? VK_INCOMPLETE : VK_SUCCESS;
5669 	*pExecutableCount = count;
5670 	return result;
5671 }
5672 
radv_GetPipelineExecutableStatisticsKHR(VkDevice _device,const VkPipelineExecutableInfoKHR * pExecutableInfo,uint32_t * pStatisticCount,VkPipelineExecutableStatisticKHR * pStatistics)5673 VkResult radv_GetPipelineExecutableStatisticsKHR(
5674     VkDevice                                    _device,
5675     const VkPipelineExecutableInfoKHR*          pExecutableInfo,
5676     uint32_t*                                   pStatisticCount,
5677     VkPipelineExecutableStatisticKHR*           pStatistics)
5678 {
5679 	RADV_FROM_HANDLE(radv_device, device, _device);
5680 	RADV_FROM_HANDLE(radv_pipeline, pipeline, pExecutableInfo->pipeline);
5681 	gl_shader_stage stage;
5682 	struct radv_shader_variant *shader = radv_get_shader_from_executable_index(pipeline, pExecutableInfo->executableIndex, &stage);
5683 
5684 	enum chip_class chip_class = device->physical_device->rad_info.chip_class;
5685 	unsigned lds_increment = chip_class >= GFX7 ? 512 : 256;
5686 	unsigned max_waves = radv_get_max_waves(device, shader, stage);
5687 
5688 	VkPipelineExecutableStatisticKHR *s = pStatistics;
5689 	VkPipelineExecutableStatisticKHR *end = s + (pStatistics ? *pStatisticCount : 0);
5690 	VkResult result = VK_SUCCESS;
5691 
5692 	if (s < end) {
5693 		desc_copy(s->name, "SGPRs");
5694 		desc_copy(s->description, "Number of SGPR registers allocated per subgroup");
5695 		s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
5696 		s->value.u64 = shader->config.num_sgprs;
5697 	}
5698 	++s;
5699 
5700 	if (s < end) {
5701 		desc_copy(s->name, "VGPRs");
5702 		desc_copy(s->description, "Number of VGPR registers allocated per subgroup");
5703 		s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
5704 		s->value.u64 = shader->config.num_vgprs;
5705 	}
5706 	++s;
5707 
5708 	if (s < end) {
5709 		desc_copy(s->name, "Spilled SGPRs");
5710 		desc_copy(s->description, "Number of SGPR registers spilled per subgroup");
5711 		s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
5712 		s->value.u64 = shader->config.spilled_sgprs;
5713 	}
5714 	++s;
5715 
5716 	if (s < end) {
5717 		desc_copy(s->name, "Spilled VGPRs");
5718 		desc_copy(s->description, "Number of VGPR registers spilled per subgroup");
5719 		s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
5720 		s->value.u64 = shader->config.spilled_vgprs;
5721 	}
5722 	++s;
5723 
5724 	if (s < end) {
5725 		desc_copy(s->name, "PrivMem VGPRs");
5726 		desc_copy(s->description, "Number of VGPRs stored in private memory per subgroup");
5727 		s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
5728 		s->value.u64 = shader->info.private_mem_vgprs;
5729 	}
5730 	++s;
5731 
5732 	if (s < end) {
5733 		desc_copy(s->name, "Code size");
5734 		desc_copy(s->description, "Code size in bytes");
5735 		s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
5736 		s->value.u64 = shader->exec_size;
5737 	}
5738 	++s;
5739 
5740 	if (s < end) {
5741 		desc_copy(s->name, "LDS size");
5742 		desc_copy(s->description, "LDS size in bytes per workgroup");
5743 		s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
5744 		s->value.u64 = shader->config.lds_size * lds_increment;
5745 	}
5746 	++s;
5747 
5748 	if (s < end) {
5749 		desc_copy(s->name, "Scratch size");
5750 		desc_copy(s->description, "Private memory in bytes per subgroup");
5751 		s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
5752 		s->value.u64 = shader->config.scratch_bytes_per_wave;
5753 	}
5754 	++s;
5755 
5756 	if (s < end) {
5757 		desc_copy(s->name, "Subgroups per SIMD");
5758 		desc_copy(s->description, "The maximum number of subgroups in flight on a SIMD unit");
5759 		s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
5760 		s->value.u64 = max_waves;
5761 	}
5762 	++s;
5763 
5764 	if (shader->statistics) {
5765 		for (unsigned i = 0; i < shader->statistics->count; i++) {
5766 			struct aco_compiler_statistic_info *info = &shader->statistics->infos[i];
5767 			uint32_t value = shader->statistics->values[i];
5768 			if (s < end) {
5769 				desc_copy(s->name, info->name);
5770 				desc_copy(s->description, info->desc);
5771 				s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
5772 				s->value.u64 = value;
5773 			}
5774 			++s;
5775 		}
5776 	}
5777 
5778 	if (!pStatistics)
5779 		*pStatisticCount = s - pStatistics;
5780 	else if (s > end) {
5781 		*pStatisticCount = end - pStatistics;
5782 		result = VK_INCOMPLETE;
5783 	} else {
5784 		*pStatisticCount = s - pStatistics;
5785 	}
5786 
5787 	return result;
5788 }
5789 
radv_copy_representation(void * data,size_t * data_size,const char * src)5790 static VkResult radv_copy_representation(void *data, size_t *data_size, const char *src)
5791 {
5792 	size_t total_size  = strlen(src) + 1;
5793 
5794 	if (!data) {
5795 		*data_size = total_size;
5796 		return VK_SUCCESS;
5797 	}
5798 
5799 	size_t size = MIN2(total_size, *data_size);
5800 
5801 	memcpy(data, src, size);
5802 	if (size)
5803 		*((char*)data + size - 1) = 0;
5804 	return size < total_size ? VK_INCOMPLETE : VK_SUCCESS;
5805 }
5806 
radv_GetPipelineExecutableInternalRepresentationsKHR(VkDevice device,const VkPipelineExecutableInfoKHR * pExecutableInfo,uint32_t * pInternalRepresentationCount,VkPipelineExecutableInternalRepresentationKHR * pInternalRepresentations)5807 VkResult radv_GetPipelineExecutableInternalRepresentationsKHR(
5808     VkDevice                                    device,
5809     const VkPipelineExecutableInfoKHR*          pExecutableInfo,
5810     uint32_t*                                   pInternalRepresentationCount,
5811     VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)
5812 {
5813 	RADV_FROM_HANDLE(radv_pipeline, pipeline, pExecutableInfo->pipeline);
5814 	gl_shader_stage stage;
5815 	struct radv_shader_variant *shader = radv_get_shader_from_executable_index(pipeline, pExecutableInfo->executableIndex, &stage);
5816 
5817 	VkPipelineExecutableInternalRepresentationKHR *p = pInternalRepresentations;
5818 	VkPipelineExecutableInternalRepresentationKHR *end = p + (pInternalRepresentations ? *pInternalRepresentationCount : 0);
5819 	VkResult result = VK_SUCCESS;
5820 	/* optimized NIR */
5821 	if (p < end) {
5822 		p->isText = true;
5823 		desc_copy(p->name, "NIR Shader(s)");
5824 		desc_copy(p->description, "The optimized NIR shader(s)");
5825 		if (radv_copy_representation(p->pData, &p->dataSize, shader->nir_string) != VK_SUCCESS)
5826 			result = VK_INCOMPLETE;
5827 	}
5828 	++p;
5829 
5830 	/* backend IR */
5831 	if (p < end) {
5832 		p->isText = true;
5833 		if (radv_use_llvm_for_stage(pipeline->device, stage)) {
5834 			desc_copy(p->name, "LLVM IR");
5835 			desc_copy(p->description, "The LLVM IR after some optimizations");
5836 		} else {
5837 			desc_copy(p->name, "ACO IR");
5838 			desc_copy(p->description, "The ACO IR after some optimizations");
5839 		}
5840 		if (radv_copy_representation(p->pData, &p->dataSize, shader->ir_string) != VK_SUCCESS)
5841 			result = VK_INCOMPLETE;
5842 	}
5843 	++p;
5844 
5845 	/* Disassembler */
5846 	if (p < end) {
5847 		p->isText = true;
5848 		desc_copy(p->name, "Assembly");
5849 		desc_copy(p->description, "Final Assembly");
5850 		if (radv_copy_representation(p->pData, &p->dataSize, shader->disasm_string) != VK_SUCCESS)
5851 			result = VK_INCOMPLETE;
5852 	}
5853 	++p;
5854 
5855 	if (!pInternalRepresentations)
5856 		*pInternalRepresentationCount = p - pInternalRepresentations;
5857 	else if(p > end) {
5858 		result = VK_INCOMPLETE;
5859 		*pInternalRepresentationCount = end - pInternalRepresentations;
5860 	} else {
5861 		*pInternalRepresentationCount = p - pInternalRepresentations;
5862 	}
5863 
5864 	return result;
5865 }
5866