1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file crocus_program.c
25 *
26 * This file contains the driver interface for compiling shaders.
27 *
28 * See crocus_program_cache.c for the in-memory program cache where the
29 * compiled shaders are stored.
30 */
31
32 #include <stdio.h>
33 #include <errno.h>
34 #include "pipe/p_defines.h"
35 #include "pipe/p_state.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_screen.h"
38 #include "util/u_atomic.h"
39 #include "util/u_upload_mgr.h"
40 #include "util/u_debug.h"
41 #include "util/u_prim.h"
42 #include "compiler/nir/nir.h"
43 #include "compiler/nir/nir_builder.h"
44 #include "compiler/nir/nir_serialize.h"
45 #include "intel/compiler/elk/elk_compiler.h"
46 #include "intel/compiler/elk/elk_nir.h"
47 #include "intel/compiler/elk/elk_prim.h"
48 #include "intel/compiler/elk/elk_reg.h"
49 #include "intel/compiler/intel_nir.h"
50 #include "crocus_context.h"
51 #include "nir/tgsi_to_nir.h"
52 #include "program/prog_instruction.h"
53
54 #define KEY_INIT_NO_ID() \
55 .base.tex.swizzles[0 ... ELK_MAX_SAMPLERS - 1] = 0x688
56 #define KEY_INIT() \
57 .base.program_string_id = ish->program_id, \
58 .base.limit_trig_input_range = screen->driconf.limit_trig_input_range, \
59 KEY_INIT_NO_ID()
60
61 static void
crocus_sanitize_tex_key(struct elk_sampler_prog_key_data * key)62 crocus_sanitize_tex_key(struct elk_sampler_prog_key_data *key)
63 {
64 key->gather_channel_quirk_mask = 0;
65 for (unsigned s = 0; s < ELK_MAX_SAMPLERS; s++) {
66 key->swizzles[s] = SWIZZLE_NOOP;
67 key->gfx6_gather_wa[s] = 0;
68 }
69 }
70
71 static uint32_t
crocus_get_texture_swizzle(const struct crocus_context * ice,const struct crocus_sampler_view * t)72 crocus_get_texture_swizzle(const struct crocus_context *ice,
73 const struct crocus_sampler_view *t)
74 {
75 uint32_t swiz = 0;
76
77 for (int i = 0; i < 4; i++) {
78 swiz |= t->swizzle[i] << (i * 3);
79 }
80 return swiz;
81 }
82
can_push_ubo(const struct intel_device_info * devinfo)83 static inline bool can_push_ubo(const struct intel_device_info *devinfo)
84 {
85 /* push works for everyone except SNB at the moment */
86 return devinfo->ver != 6;
87 }
88
89 static uint8_t
gfx6_gather_workaround(enum pipe_format pformat)90 gfx6_gather_workaround(enum pipe_format pformat)
91 {
92 switch (pformat) {
93 case PIPE_FORMAT_R8_SINT: return ELK_WA_SIGN | ELK_WA_8BIT;
94 case PIPE_FORMAT_R8_UINT: return ELK_WA_8BIT;
95 case PIPE_FORMAT_R16_SINT: return ELK_WA_SIGN | ELK_WA_16BIT;
96 case PIPE_FORMAT_R16_UINT: return ELK_WA_16BIT;
97 default:
98 /* Note that even though PIPE_FORMAT_R32_SINT and
99 * PIPE_FORMAT_R32_UINThave format overrides in
100 * the surface state, there is no shader w/a required.
101 */
102 return 0;
103 }
104 }
105
106 static const unsigned crocus_gfx6_swizzle_for_offset[4] = {
107 ELK_SWIZZLE4(0, 1, 2, 3),
108 ELK_SWIZZLE4(1, 2, 3, 3),
109 ELK_SWIZZLE4(2, 3, 3, 3),
110 ELK_SWIZZLE4(3, 3, 3, 3)
111 };
112
113 static void
gfx6_gs_xfb_setup(const struct pipe_stream_output_info * so_info,struct elk_gs_prog_data * gs_prog_data)114 gfx6_gs_xfb_setup(const struct pipe_stream_output_info *so_info,
115 struct elk_gs_prog_data *gs_prog_data)
116 {
117 /* Make sure that the VUE slots won't overflow the unsigned chars in
118 * prog_data->transform_feedback_bindings[].
119 */
120 STATIC_ASSERT(ELK_VARYING_SLOT_COUNT <= 256);
121
122 /* Make sure that we don't need more binding table entries than we've
123 * set aside for use in transform feedback. (We shouldn't, since we
124 * set aside enough binding table entries to have one per component).
125 */
126 assert(so_info->num_outputs <= ELK_MAX_SOL_BINDINGS);
127
128 gs_prog_data->num_transform_feedback_bindings = so_info->num_outputs;
129 for (unsigned i = 0; i < so_info->num_outputs; i++) {
130 gs_prog_data->transform_feedback_bindings[i] =
131 so_info->output[i].register_index;
132 gs_prog_data->transform_feedback_swizzles[i] =
133 crocus_gfx6_swizzle_for_offset[so_info->output[i].start_component];
134 }
135 }
136
137 static void
gfx6_ff_gs_xfb_setup(const struct pipe_stream_output_info * so_info,struct elk_ff_gs_prog_key * key)138 gfx6_ff_gs_xfb_setup(const struct pipe_stream_output_info *so_info,
139 struct elk_ff_gs_prog_key *key)
140 {
141 key->num_transform_feedback_bindings = so_info->num_outputs;
142 for (unsigned i = 0; i < so_info->num_outputs; i++) {
143 key->transform_feedback_bindings[i] =
144 so_info->output[i].register_index;
145 key->transform_feedback_swizzles[i] =
146 crocus_gfx6_swizzle_for_offset[so_info->output[i].start_component];
147 }
148 }
149
150 static void
crocus_populate_sampler_prog_key_data(struct crocus_context * ice,const struct intel_device_info * devinfo,gl_shader_stage stage,struct crocus_uncompiled_shader * ish,bool uses_texture_gather,struct elk_sampler_prog_key_data * key)151 crocus_populate_sampler_prog_key_data(struct crocus_context *ice,
152 const struct intel_device_info *devinfo,
153 gl_shader_stage stage,
154 struct crocus_uncompiled_shader *ish,
155 bool uses_texture_gather,
156 struct elk_sampler_prog_key_data *key)
157 {
158 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
159 uint32_t mask = ish->nir->info.textures_used[0];
160
161 while (mask) {
162 const int s = u_bit_scan(&mask);
163
164 struct crocus_sampler_view *texture = ice->state.shaders[stage].textures[s];
165 key->swizzles[s] = SWIZZLE_NOOP;
166
167 if (!texture)
168 continue;
169 if (texture->base.target == PIPE_BUFFER)
170 continue;
171 if (devinfo->verx10 < 75) {
172 key->swizzles[s] = crocus_get_texture_swizzle(ice, texture);
173 }
174
175 screen->vtbl.fill_clamp_mask(ice->state.shaders[stage].samplers[s], s, key->gl_clamp_mask);
176
177 /* gather4 for RG32* is broken in multiple ways on Gen7. */
178 if (devinfo->ver == 7 && uses_texture_gather) {
179 switch (texture->base.format) {
180 case PIPE_FORMAT_R32G32_UINT:
181 case PIPE_FORMAT_R32G32_SINT: {
182 /* We have to override the format to R32G32_FLOAT_LD.
183 * This means that SCS_ALPHA and SCS_ONE will return 0x3f8
184 * (1.0) rather than integer 1. This needs shader hacks.
185 *
186 * On Ivybridge, we whack W (alpha) to ONE in our key's
187 * swizzle. On Haswell, we look at the original texture
188 * swizzle, and use XYZW with channels overridden to ONE,
189 * leaving normal texture swizzling to SCS.
190 */
191 unsigned src_swizzle = key->swizzles[s];
192 for (int i = 0; i < 4; i++) {
193 unsigned src_comp = GET_SWZ(src_swizzle, i);
194 if (src_comp == SWIZZLE_ONE || src_comp == SWIZZLE_W) {
195 key->swizzles[i] &= ~(0x7 << (3 * i));
196 key->swizzles[i] |= SWIZZLE_ONE << (3 * i);
197 }
198 }
199 }
200 FALLTHROUGH;
201 case PIPE_FORMAT_R32G32_FLOAT:
202 /* The channel select for green doesn't work - we have to
203 * request blue. Haswell can use SCS for this, but Ivybridge
204 * needs a shader workaround.
205 */
206 if (devinfo->verx10 < 75)
207 key->gather_channel_quirk_mask |= 1 << s;
208 break;
209 default:
210 break;
211 }
212 }
213 if (devinfo->ver == 6 && uses_texture_gather) {
214 key->gfx6_gather_wa[s] = gfx6_gather_workaround(texture->base.format);
215 }
216 }
217 }
218
219 static void
crocus_lower_swizzles(struct nir_shader * nir,const struct elk_sampler_prog_key_data * key_tex)220 crocus_lower_swizzles(struct nir_shader *nir,
221 const struct elk_sampler_prog_key_data *key_tex)
222 {
223 struct nir_lower_tex_options tex_options = {
224 .lower_invalid_implicit_lod = true,
225 };
226 uint32_t mask = nir->info.textures_used[0];
227
228 while (mask) {
229 const int s = u_bit_scan(&mask);
230
231 if (key_tex->swizzles[s] == SWIZZLE_NOOP)
232 continue;
233
234 tex_options.swizzle_result |= (1 << s);
235 for (unsigned c = 0; c < 4; c++)
236 tex_options.swizzles[s][c] = GET_SWZ(key_tex->swizzles[s], c);
237 }
238 if (tex_options.swizzle_result)
239 nir_lower_tex(nir, &tex_options);
240 }
241
242 static unsigned
get_new_program_id(struct crocus_screen * screen)243 get_new_program_id(struct crocus_screen *screen)
244 {
245 return p_atomic_inc_return(&screen->program_id);
246 }
247
248 static nir_def *
get_aoa_deref_offset(nir_builder * b,nir_deref_instr * deref,unsigned elem_size)249 get_aoa_deref_offset(nir_builder *b,
250 nir_deref_instr *deref,
251 unsigned elem_size)
252 {
253 unsigned array_size = elem_size;
254 nir_def *offset = nir_imm_int(b, 0);
255
256 while (deref->deref_type != nir_deref_type_var) {
257 assert(deref->deref_type == nir_deref_type_array);
258
259 /* This level's element size is the previous level's array size */
260 nir_def *index = deref->arr.index.ssa;
261 assert(deref->arr.index.ssa);
262 offset = nir_iadd(b, offset,
263 nir_imul_imm(b, index, array_size));
264
265 deref = nir_deref_instr_parent(deref);
266 assert(glsl_type_is_array(deref->type));
267 array_size *= glsl_get_length(deref->type);
268 }
269
270 /* Accessing an invalid surface index with the dataport can result in a
271 * hang. According to the spec "if the index used to select an individual
272 * element is negative or greater than or equal to the size of the array,
273 * the results of the operation are undefined but may not lead to
274 * termination" -- which is one of the possible outcomes of the hang.
275 * Clamp the index to prevent access outside of the array bounds.
276 */
277 return nir_umin(b, offset, nir_imm_int(b, array_size - elem_size));
278 }
279
280 static void
crocus_lower_storage_image_derefs(nir_shader * nir)281 crocus_lower_storage_image_derefs(nir_shader *nir)
282 {
283 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
284
285 nir_builder b = nir_builder_create(impl);
286
287 nir_foreach_block(block, impl) {
288 nir_foreach_instr_safe(instr, block) {
289 if (instr->type != nir_instr_type_intrinsic)
290 continue;
291
292 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
293 switch (intrin->intrinsic) {
294 case nir_intrinsic_image_deref_load:
295 case nir_intrinsic_image_deref_store:
296 case nir_intrinsic_image_deref_atomic:
297 case nir_intrinsic_image_deref_atomic_swap:
298 case nir_intrinsic_image_deref_size:
299 case nir_intrinsic_image_deref_samples:
300 case nir_intrinsic_image_deref_load_raw_intel:
301 case nir_intrinsic_image_deref_store_raw_intel: {
302 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
303 nir_variable *var = nir_deref_instr_get_variable(deref);
304
305 b.cursor = nir_before_instr(&intrin->instr);
306 nir_def *index =
307 nir_iadd_imm(&b, get_aoa_deref_offset(&b, deref, 1),
308 var->data.driver_location);
309 nir_rewrite_image_intrinsic(intrin, index, false);
310 break;
311 }
312
313 default:
314 break;
315 }
316 }
317 }
318 }
319
320 // XXX: need unify_interfaces() at link time...
321
322 /**
323 * Undo nir_lower_passthrough_edgeflags but keep the inputs_read flag.
324 */
325 static bool
crocus_fix_edge_flags(nir_shader * nir)326 crocus_fix_edge_flags(nir_shader *nir)
327 {
328 if (nir->info.stage != MESA_SHADER_VERTEX) {
329 nir_shader_preserve_all_metadata(nir);
330 return false;
331 }
332
333 nir_variable *var = nir_find_variable_with_location(nir, nir_var_shader_out,
334 VARYING_SLOT_EDGE);
335 if (!var) {
336 nir_shader_preserve_all_metadata(nir);
337 return false;
338 }
339
340 var->data.mode = nir_var_shader_temp;
341 nir->info.outputs_written &= ~VARYING_BIT_EDGE;
342 nir->info.inputs_read &= ~VERT_BIT_EDGEFLAG;
343 nir_fixup_deref_modes(nir);
344
345 nir_foreach_function_impl(impl, nir) {
346 nir_metadata_preserve(impl, nir_metadata_block_index |
347 nir_metadata_dominance |
348 nir_metadata_live_defs |
349 nir_metadata_loop_analysis);
350 }
351
352 return true;
353 }
354
355 /**
356 * Fix an uncompiled shader's stream output info.
357 *
358 * Core Gallium stores output->register_index as a "slot" number, where
359 * slots are assigned consecutively to all outputs in info->outputs_written.
360 * This naive packing of outputs doesn't work for us - we too have slots,
361 * but the layout is defined by the VUE map, which we won't have until we
362 * compile a specific shader variant. So, we remap these and simply store
363 * VARYING_SLOT_* in our copy's output->register_index fields.
364 *
365 * We also fix up VARYING_SLOT_{LAYER,VIEWPORT,PSIZ} to select the Y/Z/W
366 * components of our VUE header. See elk_vue_map.c for the layout.
367 */
368 static void
update_so_info(struct pipe_stream_output_info * so_info,uint64_t outputs_written)369 update_so_info(struct pipe_stream_output_info *so_info,
370 uint64_t outputs_written)
371 {
372 uint8_t reverse_map[64] = {};
373 unsigned slot = 0;
374 while (outputs_written) {
375 reverse_map[slot++] = u_bit_scan64(&outputs_written);
376 }
377
378 for (unsigned i = 0; i < so_info->num_outputs; i++) {
379 struct pipe_stream_output *output = &so_info->output[i];
380
381 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
382 output->register_index = reverse_map[output->register_index];
383
384 /* The VUE header contains three scalar fields packed together:
385 * - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
386 * - gl_Layer is stored in VARYING_SLOT_PSIZ.y
387 * - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
388 */
389 switch (output->register_index) {
390 case VARYING_SLOT_LAYER:
391 assert(output->num_components == 1);
392 output->register_index = VARYING_SLOT_PSIZ;
393 output->start_component = 1;
394 break;
395 case VARYING_SLOT_VIEWPORT:
396 assert(output->num_components == 1);
397 output->register_index = VARYING_SLOT_PSIZ;
398 output->start_component = 2;
399 break;
400 case VARYING_SLOT_PSIZ:
401 assert(output->num_components == 1);
402 output->start_component = 3;
403 break;
404 }
405
406 //info->outputs_written |= 1ull << output->register_index;
407 }
408 }
409
410 static void
setup_vec4_image_sysval(uint32_t * sysvals,uint32_t idx,unsigned offset,unsigned n)411 setup_vec4_image_sysval(uint32_t *sysvals, uint32_t idx,
412 unsigned offset, unsigned n)
413 {
414 assert(offset % sizeof(uint32_t) == 0);
415
416 for (unsigned i = 0; i < n; ++i)
417 sysvals[i] = ELK_PARAM_IMAGE(idx, offset / sizeof(uint32_t) + i);
418
419 for (unsigned i = n; i < 4; ++i)
420 sysvals[i] = ELK_PARAM_BUILTIN_ZERO;
421 }
422
423 /**
424 * Associate NIR uniform variables with the prog_data->param[] mechanism
425 * used by the backend. Also, decide which UBOs we'd like to push in an
426 * ideal situation (though the backend can reduce this).
427 */
428 static void
crocus_setup_uniforms(ASSERTED const struct intel_device_info * devinfo,void * mem_ctx,nir_shader * nir,struct elk_stage_prog_data * prog_data,enum elk_param_builtin ** out_system_values,unsigned * out_num_system_values,unsigned * out_num_cbufs)429 crocus_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
430 void *mem_ctx,
431 nir_shader *nir,
432 struct elk_stage_prog_data *prog_data,
433 enum elk_param_builtin **out_system_values,
434 unsigned *out_num_system_values,
435 unsigned *out_num_cbufs)
436 {
437 const unsigned CROCUS_MAX_SYSTEM_VALUES =
438 PIPE_MAX_SHADER_IMAGES * ISL_IMAGE_PARAM_SIZE;
439 enum elk_param_builtin *system_values =
440 rzalloc_array(mem_ctx, enum elk_param_builtin, CROCUS_MAX_SYSTEM_VALUES);
441 unsigned num_system_values = 0;
442
443 unsigned patch_vert_idx = -1;
444 unsigned tess_outer_default_idx = -1;
445 unsigned tess_inner_default_idx = -1;
446 unsigned ucp_idx[CROCUS_MAX_CLIP_PLANES];
447 unsigned img_idx[PIPE_MAX_SHADER_IMAGES];
448 unsigned variable_group_size_idx = -1;
449 memset(ucp_idx, -1, sizeof(ucp_idx));
450 memset(img_idx, -1, sizeof(img_idx));
451
452 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
453
454 nir_builder b = nir_builder_at(nir_before_impl(impl));
455
456 nir_def *temp_ubo_name = nir_undef(&b, 1, 32);
457 nir_def *temp_const_ubo_name = NULL;
458
459 /* Turn system value intrinsics into uniforms */
460 nir_foreach_block(block, impl) {
461 nir_foreach_instr_safe(instr, block) {
462 if (instr->type != nir_instr_type_intrinsic)
463 continue;
464
465 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
466 nir_def *offset;
467
468 switch (intrin->intrinsic) {
469 case nir_intrinsic_load_base_workgroup_id: {
470 /* GL doesn't have a concept of base workgroup */
471 b.cursor = nir_instr_remove(&intrin->instr);
472 nir_def_rewrite_uses(&intrin->def,
473 nir_imm_zero(&b, 3, 32));
474 continue;
475 }
476 case nir_intrinsic_load_constant: {
477 /* This one is special because it reads from the shader constant
478 * data and not cbuf0 which gallium uploads for us.
479 */
480 b.cursor = nir_before_instr(instr);
481 nir_def *offset =
482 nir_iadd_imm(&b, intrin->src[0].ssa,
483 nir_intrinsic_base(intrin));
484
485 if (temp_const_ubo_name == NULL)
486 temp_const_ubo_name = nir_imm_int(&b, 0);
487
488 nir_intrinsic_instr *load_ubo =
489 nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ubo);
490 load_ubo->num_components = intrin->num_components;
491 load_ubo->src[0] = nir_src_for_ssa(temp_const_ubo_name);
492 load_ubo->src[1] = nir_src_for_ssa(offset);
493 nir_intrinsic_set_align(load_ubo, 4, 0);
494 nir_intrinsic_set_range_base(load_ubo, 0);
495 nir_intrinsic_set_range(load_ubo, ~0);
496 nir_def_init(&load_ubo->instr, &load_ubo->def,
497 intrin->def.num_components,
498 intrin->def.bit_size);
499 nir_builder_instr_insert(&b, &load_ubo->instr);
500
501 nir_def_rewrite_uses(&intrin->def,
502 &load_ubo->def);
503 nir_instr_remove(&intrin->instr);
504 continue;
505 }
506 case nir_intrinsic_load_user_clip_plane: {
507 unsigned ucp = nir_intrinsic_ucp_id(intrin);
508
509 if (ucp_idx[ucp] == -1) {
510 ucp_idx[ucp] = num_system_values;
511 num_system_values += 4;
512 }
513
514 for (int i = 0; i < 4; i++) {
515 system_values[ucp_idx[ucp] + i] =
516 ELK_PARAM_BUILTIN_CLIP_PLANE(ucp, i);
517 }
518
519 b.cursor = nir_before_instr(instr);
520 offset = nir_imm_int(&b, ucp_idx[ucp] * sizeof(uint32_t));
521 break;
522 }
523 case nir_intrinsic_load_patch_vertices_in:
524 if (patch_vert_idx == -1)
525 patch_vert_idx = num_system_values++;
526
527 system_values[patch_vert_idx] =
528 ELK_PARAM_BUILTIN_PATCH_VERTICES_IN;
529
530 b.cursor = nir_before_instr(instr);
531 offset = nir_imm_int(&b, patch_vert_idx * sizeof(uint32_t));
532 break;
533 case nir_intrinsic_load_tess_level_outer_default:
534 if (tess_outer_default_idx == -1) {
535 tess_outer_default_idx = num_system_values;
536 num_system_values += 4;
537 }
538
539 for (int i = 0; i < 4; i++) {
540 system_values[tess_outer_default_idx + i] =
541 ELK_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
542 }
543
544 b.cursor = nir_before_instr(instr);
545 offset =
546 nir_imm_int(&b, tess_outer_default_idx * sizeof(uint32_t));
547 break;
548 case nir_intrinsic_load_tess_level_inner_default:
549 if (tess_inner_default_idx == -1) {
550 tess_inner_default_idx = num_system_values;
551 num_system_values += 2;
552 }
553
554 for (int i = 0; i < 2; i++) {
555 system_values[tess_inner_default_idx + i] =
556 ELK_PARAM_BUILTIN_TESS_LEVEL_INNER_X + i;
557 }
558
559 b.cursor = nir_before_instr(instr);
560 offset =
561 nir_imm_int(&b, tess_inner_default_idx * sizeof(uint32_t));
562 break;
563 case nir_intrinsic_image_deref_load_param_intel: {
564 assert(devinfo->ver < 9);
565 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
566 nir_variable *var = nir_deref_instr_get_variable(deref);
567
568 if (img_idx[var->data.binding] == -1) {
569 /* GL only allows arrays of arrays of images. */
570 assert(glsl_type_is_image(glsl_without_array(var->type)));
571 unsigned num_images = MAX2(1, glsl_get_aoa_size(var->type));
572
573 for (int i = 0; i < num_images; i++) {
574 const unsigned img = var->data.binding + i;
575
576 img_idx[img] = num_system_values;
577 num_system_values += ISL_IMAGE_PARAM_SIZE;
578
579 uint32_t *img_sv = &system_values[img_idx[img]];
580
581 setup_vec4_image_sysval(
582 img_sv + ISL_IMAGE_PARAM_OFFSET_OFFSET, img,
583 offsetof(struct isl_image_param, offset), 2);
584 setup_vec4_image_sysval(
585 img_sv + ISL_IMAGE_PARAM_SIZE_OFFSET, img,
586 offsetof(struct isl_image_param, size), 3);
587 setup_vec4_image_sysval(
588 img_sv + ISL_IMAGE_PARAM_STRIDE_OFFSET, img,
589 offsetof(struct isl_image_param, stride), 4);
590 setup_vec4_image_sysval(
591 img_sv + ISL_IMAGE_PARAM_TILING_OFFSET, img,
592 offsetof(struct isl_image_param, tiling), 3);
593 setup_vec4_image_sysval(
594 img_sv + ISL_IMAGE_PARAM_SWIZZLING_OFFSET, img,
595 offsetof(struct isl_image_param, swizzling), 2);
596 }
597 }
598
599 b.cursor = nir_before_instr(instr);
600 offset = nir_iadd_imm(&b,
601 get_aoa_deref_offset(&b, deref, ISL_IMAGE_PARAM_SIZE * 4),
602 img_idx[var->data.binding] * 4 +
603 nir_intrinsic_base(intrin) * 16);
604 break;
605 }
606 case nir_intrinsic_load_workgroup_size: {
607 assert(nir->info.workgroup_size_variable);
608 if (variable_group_size_idx == -1) {
609 variable_group_size_idx = num_system_values;
610 num_system_values += 3;
611 for (int i = 0; i < 3; i++) {
612 system_values[variable_group_size_idx + i] =
613 ELK_PARAM_BUILTIN_WORK_GROUP_SIZE_X + i;
614 }
615 }
616
617 b.cursor = nir_before_instr(instr);
618 offset = nir_imm_int(&b,
619 variable_group_size_idx * sizeof(uint32_t));
620 break;
621 }
622 default:
623 continue;
624 }
625
626 unsigned comps = nir_intrinsic_dest_components(intrin);
627
628 nir_intrinsic_instr *load =
629 nir_intrinsic_instr_create(nir, nir_intrinsic_load_ubo);
630 load->num_components = comps;
631 load->src[0] = nir_src_for_ssa(temp_ubo_name);
632 load->src[1] = nir_src_for_ssa(offset);
633 nir_intrinsic_set_align(load, 4, 0);
634 nir_intrinsic_set_range_base(load, 0);
635 nir_intrinsic_set_range(load, ~0);
636 nir_def_init(&load->instr, &load->def, comps, 32);
637 nir_builder_instr_insert(&b, &load->instr);
638 nir_def_rewrite_uses(&intrin->def,
639 &load->def);
640 nir_instr_remove(instr);
641 }
642 }
643
644 nir_validate_shader(nir, "before remapping");
645
646 /* Uniforms are stored in constant buffer 0, the
647 * user-facing UBOs are indexed by one. So if any constant buffer is
648 * needed, the constant buffer 0 will be needed, so account for it.
649 */
650 unsigned num_cbufs = nir->info.num_ubos;
651 if (num_cbufs || nir->num_uniforms)
652 num_cbufs++;
653
654 /* Place the new params in a new cbuf. */
655 if (num_system_values > 0) {
656 unsigned sysval_cbuf_index = num_cbufs;
657 num_cbufs++;
658
659 system_values = reralloc(mem_ctx, system_values, enum elk_param_builtin,
660 num_system_values);
661
662 nir_foreach_block(block, impl) {
663 nir_foreach_instr_safe(instr, block) {
664 if (instr->type != nir_instr_type_intrinsic)
665 continue;
666
667 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
668
669 if (load->intrinsic != nir_intrinsic_load_ubo)
670 continue;
671
672 b.cursor = nir_before_instr(instr);
673
674 if (load->src[0].ssa == temp_ubo_name) {
675 nir_def *imm = nir_imm_int(&b, sysval_cbuf_index);
676 nir_src_rewrite(&load->src[0], imm);
677 }
678 }
679 }
680
681 /* We need to fold the new iadds for elk_nir_analyze_ubo_ranges */
682 nir_opt_constant_folding(nir);
683 } else {
684 ralloc_free(system_values);
685 system_values = NULL;
686 }
687
688 assert(num_cbufs < PIPE_MAX_CONSTANT_BUFFERS);
689 nir_validate_shader(nir, "after remap");
690
691 /* We don't use params[] but gallium leaves num_uniforms set. We use this
692 * to detect when cbuf0 exists but we don't need it anymore when we get
693 * here. Instead, zero it out so that the back-end doesn't get confused
694 * when nr_params * 4 != num_uniforms != nr_params * 4.
695 */
696 nir->num_uniforms = 0;
697
698 /* Constant loads (if any) need to go at the end of the constant buffers so
699 * we need to know num_cbufs before we can lower to them.
700 */
701 if (temp_const_ubo_name != NULL) {
702 nir_load_const_instr *const_ubo_index =
703 nir_instr_as_load_const(temp_const_ubo_name->parent_instr);
704 assert(const_ubo_index->def.bit_size == 32);
705 const_ubo_index->value[0].u32 = num_cbufs;
706 }
707
708 *out_system_values = system_values;
709 *out_num_system_values = num_system_values;
710 *out_num_cbufs = num_cbufs;
711 }
712
713 static const char *surface_group_names[] = {
714 [CROCUS_SURFACE_GROUP_RENDER_TARGET] = "render target",
715 [CROCUS_SURFACE_GROUP_RENDER_TARGET_READ] = "non-coherent render target read",
716 [CROCUS_SURFACE_GROUP_SOL] = "streamout",
717 [CROCUS_SURFACE_GROUP_CS_WORK_GROUPS] = "CS work groups",
718 [CROCUS_SURFACE_GROUP_TEXTURE] = "texture",
719 [CROCUS_SURFACE_GROUP_TEXTURE_GATHER] = "texture gather",
720 [CROCUS_SURFACE_GROUP_UBO] = "ubo",
721 [CROCUS_SURFACE_GROUP_SSBO] = "ssbo",
722 [CROCUS_SURFACE_GROUP_IMAGE] = "image",
723 };
724
725 static void
crocus_print_binding_table(FILE * fp,const char * name,const struct crocus_binding_table * bt)726 crocus_print_binding_table(FILE *fp, const char *name,
727 const struct crocus_binding_table *bt)
728 {
729 STATIC_ASSERT(ARRAY_SIZE(surface_group_names) == CROCUS_SURFACE_GROUP_COUNT);
730
731 uint32_t total = 0;
732 uint32_t compacted = 0;
733
734 for (int i = 0; i < CROCUS_SURFACE_GROUP_COUNT; i++) {
735 uint32_t size = bt->sizes[i];
736 total += size;
737 if (size)
738 compacted += util_bitcount64(bt->used_mask[i]);
739 }
740
741 if (total == 0) {
742 fprintf(fp, "Binding table for %s is empty\n\n", name);
743 return;
744 }
745
746 if (total != compacted) {
747 fprintf(fp, "Binding table for %s "
748 "(compacted to %u entries from %u entries)\n",
749 name, compacted, total);
750 } else {
751 fprintf(fp, "Binding table for %s (%u entries)\n", name, total);
752 }
753
754 uint32_t entry = 0;
755 for (int i = 0; i < CROCUS_SURFACE_GROUP_COUNT; i++) {
756 uint64_t mask = bt->used_mask[i];
757 while (mask) {
758 int index = u_bit_scan64(&mask);
759 fprintf(fp, " [%u] %s #%d\n", entry++, surface_group_names[i], index);
760 }
761 }
762 fprintf(fp, "\n");
763 }
764
765 enum {
766 /* Max elements in a surface group. */
767 SURFACE_GROUP_MAX_ELEMENTS = 64,
768 };
769
770 static void
rewrite_src_with_bti(nir_builder * b,struct crocus_binding_table * bt,nir_instr * instr,nir_src * src,enum crocus_surface_group group)771 rewrite_src_with_bti(nir_builder *b, struct crocus_binding_table *bt,
772 nir_instr *instr, nir_src *src,
773 enum crocus_surface_group group)
774 {
775 assert(bt->sizes[group] > 0);
776
777 b->cursor = nir_before_instr(instr);
778 nir_def *bti;
779 if (nir_src_is_const(*src)) {
780 uint32_t index = nir_src_as_uint(*src);
781 bti = nir_imm_intN_t(b, crocus_group_index_to_bti(bt, group, index),
782 src->ssa->bit_size);
783 } else {
784 /* Indirect usage makes all the surfaces of the group to be available,
785 * so we can just add the base.
786 */
787 assert(bt->used_mask[group] == BITFIELD64_MASK(bt->sizes[group]));
788 bti = nir_iadd_imm(b, src->ssa, bt->offsets[group]);
789 }
790 nir_src_rewrite(src, bti);
791 }
792
793 static void
mark_used_with_src(struct crocus_binding_table * bt,nir_src * src,enum crocus_surface_group group)794 mark_used_with_src(struct crocus_binding_table *bt, nir_src *src,
795 enum crocus_surface_group group)
796 {
797 assert(bt->sizes[group] > 0);
798
799 if (nir_src_is_const(*src)) {
800 uint64_t index = nir_src_as_uint(*src);
801 assert(index < bt->sizes[group]);
802 bt->used_mask[group] |= 1ull << index;
803 } else {
804 /* There's an indirect usage, we need all the surfaces. */
805 bt->used_mask[group] = BITFIELD64_MASK(bt->sizes[group]);
806 }
807 }
808
809 static bool
skip_compacting_binding_tables(void)810 skip_compacting_binding_tables(void)
811 {
812 static int skip = -1;
813 if (skip < 0)
814 skip = debug_get_bool_option("INTEL_DISABLE_COMPACT_BINDING_TABLE", false);
815 return skip;
816 }
817
818 /**
819 * Set up the binding table indices and apply to the shader.
820 */
821 static void
crocus_setup_binding_table(const struct intel_device_info * devinfo,struct nir_shader * nir,struct crocus_binding_table * bt,unsigned num_render_targets,unsigned num_system_values,unsigned num_cbufs,const struct elk_sampler_prog_key_data * key)822 crocus_setup_binding_table(const struct intel_device_info *devinfo,
823 struct nir_shader *nir,
824 struct crocus_binding_table *bt,
825 unsigned num_render_targets,
826 unsigned num_system_values,
827 unsigned num_cbufs,
828 const struct elk_sampler_prog_key_data *key)
829 {
830 const struct shader_info *info = &nir->info;
831
832 memset(bt, 0, sizeof(*bt));
833
834 /* Set the sizes for each surface group. For some groups, we already know
835 * upfront how many will be used, so mark them.
836 */
837 if (info->stage == MESA_SHADER_FRAGMENT) {
838 bt->sizes[CROCUS_SURFACE_GROUP_RENDER_TARGET] = num_render_targets;
839 /* All render targets used. */
840 bt->used_mask[CROCUS_SURFACE_GROUP_RENDER_TARGET] =
841 BITFIELD64_MASK(num_render_targets);
842
843 /* Setup render target read surface group in order to support non-coherent
844 * framebuffer fetch on Gfx7
845 */
846 if (devinfo->ver >= 6 && info->outputs_read) {
847 bt->sizes[CROCUS_SURFACE_GROUP_RENDER_TARGET_READ] = num_render_targets;
848 bt->used_mask[CROCUS_SURFACE_GROUP_RENDER_TARGET_READ] =
849 BITFIELD64_MASK(num_render_targets);
850 }
851 } else if (info->stage == MESA_SHADER_COMPUTE) {
852 bt->sizes[CROCUS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
853 } else if (info->stage == MESA_SHADER_GEOMETRY) {
854 /* In gfx6 we reserve the first ELK_MAX_SOL_BINDINGS entries for transform
855 * feedback surfaces.
856 */
857 if (devinfo->ver == 6) {
858 bt->sizes[CROCUS_SURFACE_GROUP_SOL] = ELK_MAX_SOL_BINDINGS;
859 bt->used_mask[CROCUS_SURFACE_GROUP_SOL] = (uint64_t)-1;
860 }
861 }
862
863 bt->sizes[CROCUS_SURFACE_GROUP_TEXTURE] = BITSET_LAST_BIT(info->textures_used);
864 bt->used_mask[CROCUS_SURFACE_GROUP_TEXTURE] = info->textures_used[0];
865
866 if (info->uses_texture_gather && devinfo->ver < 8) {
867 bt->sizes[CROCUS_SURFACE_GROUP_TEXTURE_GATHER] = BITSET_LAST_BIT(info->textures_used);
868 bt->used_mask[CROCUS_SURFACE_GROUP_TEXTURE_GATHER] = info->textures_used[0];
869 }
870
871 bt->sizes[CROCUS_SURFACE_GROUP_IMAGE] = info->num_images;
872
873 /* Allocate an extra slot in the UBO section for NIR constants.
874 * Binding table compaction will remove it if unnecessary.
875 *
876 * We don't include them in crocus_compiled_shader::num_cbufs because
877 * they are uploaded separately from shs->constbufs[], but from a shader
878 * point of view, they're another UBO (at the end of the section).
879 */
880 bt->sizes[CROCUS_SURFACE_GROUP_UBO] = num_cbufs + 1;
881
882 bt->sizes[CROCUS_SURFACE_GROUP_SSBO] = info->num_ssbos;
883
884 for (int i = 0; i < CROCUS_SURFACE_GROUP_COUNT; i++)
885 assert(bt->sizes[i] <= SURFACE_GROUP_MAX_ELEMENTS);
886
887 /* Mark surfaces used for the cases we don't have the information available
888 * upfront.
889 */
890 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
891 nir_foreach_block (block, impl) {
892 nir_foreach_instr (instr, block) {
893 if (instr->type != nir_instr_type_intrinsic)
894 continue;
895
896 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
897 switch (intrin->intrinsic) {
898 case nir_intrinsic_load_num_workgroups:
899 bt->used_mask[CROCUS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
900 break;
901
902 case nir_intrinsic_load_output:
903 if (devinfo->ver >= 6) {
904 mark_used_with_src(bt, &intrin->src[0],
905 CROCUS_SURFACE_GROUP_RENDER_TARGET_READ);
906 }
907 break;
908
909 case nir_intrinsic_image_size:
910 case nir_intrinsic_image_load:
911 case nir_intrinsic_image_store:
912 case nir_intrinsic_image_atomic:
913 case nir_intrinsic_image_atomic_swap:
914 case nir_intrinsic_image_load_raw_intel:
915 case nir_intrinsic_image_store_raw_intel:
916 mark_used_with_src(bt, &intrin->src[0], CROCUS_SURFACE_GROUP_IMAGE);
917 break;
918
919 case nir_intrinsic_load_ubo:
920 mark_used_with_src(bt, &intrin->src[0], CROCUS_SURFACE_GROUP_UBO);
921 break;
922
923 case nir_intrinsic_store_ssbo:
924 mark_used_with_src(bt, &intrin->src[1], CROCUS_SURFACE_GROUP_SSBO);
925 break;
926
927 case nir_intrinsic_get_ssbo_size:
928 case nir_intrinsic_ssbo_atomic:
929 case nir_intrinsic_ssbo_atomic_swap:
930 case nir_intrinsic_load_ssbo:
931 mark_used_with_src(bt, &intrin->src[0], CROCUS_SURFACE_GROUP_SSBO);
932 break;
933
934 default:
935 break;
936 }
937 }
938 }
939
940 /* When disable we just mark everything as used. */
941 if (unlikely(skip_compacting_binding_tables())) {
942 for (int i = 0; i < CROCUS_SURFACE_GROUP_COUNT; i++)
943 bt->used_mask[i] = BITFIELD64_MASK(bt->sizes[i]);
944 }
945
946 /* Calculate the offsets and the binding table size based on the used
947 * surfaces. After this point, the functions to go between "group indices"
948 * and binding table indices can be used.
949 */
950 uint32_t next = 0;
951 for (int i = 0; i < CROCUS_SURFACE_GROUP_COUNT; i++) {
952 if (bt->used_mask[i] != 0) {
953 bt->offsets[i] = next;
954 next += util_bitcount64(bt->used_mask[i]);
955 }
956 }
957 bt->size_bytes = next * 4;
958
959 if (INTEL_DEBUG(DEBUG_BT)) {
960 crocus_print_binding_table(stderr, gl_shader_stage_name(info->stage), bt);
961 }
962
963 /* Apply the binding table indices. The backend compiler is not expected
964 * to change those, as we haven't set any of the *_start entries in elk
965 * binding_table.
966 */
967 nir_builder b = nir_builder_create(impl);
968
969 nir_foreach_block (block, impl) {
970 nir_foreach_instr (instr, block) {
971 if (instr->type == nir_instr_type_tex) {
972 nir_tex_instr *tex = nir_instr_as_tex(instr);
973 bool is_gather = devinfo->ver < 8 && tex->op == nir_texop_tg4;
974
975 /* rewrite the tg4 component from green to blue before replacing the
976 texture index */
977 if (devinfo->verx10 == 70) {
978 if (tex->component == 1)
979 if (key->gather_channel_quirk_mask & (1 << tex->texture_index))
980 tex->component = 2;
981 }
982
983 if (is_gather && devinfo->ver == 6 && key->gfx6_gather_wa[tex->texture_index]) {
984 b.cursor = nir_after_instr(instr);
985 enum elk_gfx6_gather_sampler_wa wa = key->gfx6_gather_wa[tex->texture_index];
986 int width = (wa & ELK_WA_8BIT) ? 8 : 16;
987
988 nir_def *val = nir_fmul_imm(&b, &tex->def, (1 << width) - 1);
989 val = nir_f2u32(&b, val);
990 if (wa & ELK_WA_SIGN) {
991 val = nir_ishl_imm(&b, val, 32 - width);
992 val = nir_ishr_imm(&b, val, 32 - width);
993 }
994 nir_def_rewrite_uses_after(&tex->def, val, val->parent_instr);
995 }
996
997 tex->texture_index =
998 crocus_group_index_to_bti(bt, is_gather ? CROCUS_SURFACE_GROUP_TEXTURE_GATHER : CROCUS_SURFACE_GROUP_TEXTURE,
999 tex->texture_index);
1000 continue;
1001 }
1002
1003 if (instr->type != nir_instr_type_intrinsic)
1004 continue;
1005
1006 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1007 switch (intrin->intrinsic) {
1008 case nir_intrinsic_image_size:
1009 case nir_intrinsic_image_load:
1010 case nir_intrinsic_image_store:
1011 case nir_intrinsic_image_atomic:
1012 case nir_intrinsic_image_atomic_swap:
1013 case nir_intrinsic_image_load_raw_intel:
1014 case nir_intrinsic_image_store_raw_intel:
1015 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
1016 CROCUS_SURFACE_GROUP_IMAGE);
1017 break;
1018
1019 case nir_intrinsic_load_ubo:
1020 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
1021 CROCUS_SURFACE_GROUP_UBO);
1022 break;
1023
1024 case nir_intrinsic_store_ssbo:
1025 rewrite_src_with_bti(&b, bt, instr, &intrin->src[1],
1026 CROCUS_SURFACE_GROUP_SSBO);
1027 break;
1028
1029 case nir_intrinsic_load_output:
1030 if (devinfo->ver >= 6) {
1031 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
1032 CROCUS_SURFACE_GROUP_RENDER_TARGET_READ);
1033 }
1034 break;
1035
1036 case nir_intrinsic_get_ssbo_size:
1037 case nir_intrinsic_ssbo_atomic:
1038 case nir_intrinsic_ssbo_atomic_swap:
1039 case nir_intrinsic_load_ssbo:
1040 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
1041 CROCUS_SURFACE_GROUP_SSBO);
1042 break;
1043
1044 default:
1045 break;
1046 }
1047 }
1048 }
1049 }
1050
1051 static void
crocus_debug_recompile(struct crocus_context * ice,struct shader_info * info,const struct elk_base_prog_key * key)1052 crocus_debug_recompile(struct crocus_context *ice,
1053 struct shader_info *info,
1054 const struct elk_base_prog_key *key)
1055 {
1056 struct crocus_screen *screen = (struct crocus_screen *) ice->ctx.screen;
1057 const struct elk_compiler *c = screen->compiler;
1058
1059 if (!info)
1060 return;
1061
1062 elk_shader_perf_log(c, &ice->dbg, "Recompiling %s shader for program %s: %s\n",
1063 _mesa_shader_stage_to_string(info->stage),
1064 info->name ? info->name : "(no identifier)",
1065 info->label ? info->label : "");
1066
1067 const void *old_key =
1068 crocus_find_previous_compile(ice, info->stage, key->program_string_id);
1069
1070 elk_debug_key_recompile(c, &ice->dbg, info->stage, old_key, key);
1071 }
1072
1073 /**
1074 * Get the shader for the last enabled geometry stage.
1075 *
1076 * This stage is the one which will feed stream output and the rasterizer.
1077 */
1078 static gl_shader_stage
last_vue_stage(struct crocus_context * ice)1079 last_vue_stage(struct crocus_context *ice)
1080 {
1081 if (ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
1082 return MESA_SHADER_GEOMETRY;
1083
1084 if (ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
1085 return MESA_SHADER_TESS_EVAL;
1086
1087 return MESA_SHADER_VERTEX;
1088 }
1089
1090 static GLbitfield64
crocus_vs_outputs_written(struct crocus_context * ice,const struct elk_vs_prog_key * key,GLbitfield64 user_varyings)1091 crocus_vs_outputs_written(struct crocus_context *ice,
1092 const struct elk_vs_prog_key *key,
1093 GLbitfield64 user_varyings)
1094 {
1095 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1096 const struct intel_device_info *devinfo = &screen->devinfo;
1097 GLbitfield64 outputs_written = user_varyings;
1098
1099 if (devinfo->ver < 6) {
1100
1101 if (key->copy_edgeflag)
1102 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
1103
1104 /* Put dummy slots into the VUE for the SF to put the replaced
1105 * point sprite coords in. We shouldn't need these dummy slots,
1106 * which take up precious URB space, but it would mean that the SF
1107 * doesn't get nice aligned pairs of input coords into output
1108 * coords, which would be a pain to handle.
1109 */
1110 for (unsigned i = 0; i < 8; i++) {
1111 if (key->point_coord_replace & (1 << i))
1112 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
1113 }
1114
1115 /* if back colors are written, allocate slots for front colors too */
1116 if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0))
1117 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0);
1118 if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1))
1119 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1);
1120 }
1121
1122 /* In order for legacy clipping to work, we need to populate the clip
1123 * distance varying slots whenever clipping is enabled, even if the vertex
1124 * shader doesn't write to gl_ClipDistance.
1125 */
1126 if (key->nr_userclip_plane_consts > 0) {
1127 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0);
1128 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1);
1129 }
1130
1131 return outputs_written;
1132 }
1133
1134 /*
1135 * If no edgeflags come from the user, gen4/5
1136 * require giving the clip shader a default edgeflag.
1137 *
1138 * This will always be 1.0.
1139 */
1140 static void
crocus_lower_default_edgeflags(struct nir_shader * nir)1141 crocus_lower_default_edgeflags(struct nir_shader *nir)
1142 {
1143 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1144
1145 nir_builder b = nir_builder_at(nir_after_impl(impl));
1146
1147 nir_variable *var = nir_variable_create(nir, nir_var_shader_out,
1148 glsl_float_type(),
1149 "edgeflag");
1150 var->data.location = VARYING_SLOT_EDGE;
1151 nir_store_var(&b, var, nir_imm_float(&b, 1.0), 0x1);
1152 }
1153
1154 /**
1155 * Compile a vertex shader, and upload the assembly.
1156 */
1157 static struct crocus_compiled_shader *
crocus_compile_vs(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,const struct elk_vs_prog_key * key)1158 crocus_compile_vs(struct crocus_context *ice,
1159 struct crocus_uncompiled_shader *ish,
1160 const struct elk_vs_prog_key *key)
1161 {
1162 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1163 const struct elk_compiler *compiler = screen->compiler;
1164 const struct intel_device_info *devinfo = &screen->devinfo;
1165 void *mem_ctx = ralloc_context(NULL);
1166 struct elk_vs_prog_data *vs_prog_data =
1167 rzalloc(mem_ctx, struct elk_vs_prog_data);
1168 struct elk_vue_prog_data *vue_prog_data = &vs_prog_data->base;
1169 struct elk_stage_prog_data *prog_data = &vue_prog_data->base;
1170 enum elk_param_builtin *system_values;
1171 unsigned num_system_values;
1172 unsigned num_cbufs;
1173
1174 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1175
1176 if (key->nr_userclip_plane_consts) {
1177 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1178 /* Check if variables were found. */
1179 if (nir_lower_clip_vs(nir, (1 << key->nr_userclip_plane_consts) - 1,
1180 true, false, NULL)) {
1181 nir_lower_io_to_temporaries(nir, impl, true, false);
1182 nir_lower_global_vars_to_local(nir);
1183 nir_lower_vars_to_ssa(nir);
1184 nir_shader_gather_info(nir, impl);
1185 }
1186 }
1187
1188 if (key->clamp_pointsize)
1189 nir_lower_point_size(nir, 1.0, 255.0);
1190
1191 prog_data->use_alt_mode = nir->info.use_legacy_math_rules;
1192
1193 crocus_setup_uniforms(devinfo, mem_ctx, nir, prog_data, &system_values,
1194 &num_system_values, &num_cbufs);
1195
1196 crocus_lower_swizzles(nir, &key->base.tex);
1197
1198 if (devinfo->ver <= 5 &&
1199 !(nir->info.inputs_read & BITFIELD64_BIT(VERT_ATTRIB_EDGEFLAG)))
1200 crocus_lower_default_edgeflags(nir);
1201
1202 struct crocus_binding_table bt;
1203 crocus_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1204 num_system_values, num_cbufs, &key->base.tex);
1205
1206 if (can_push_ubo(devinfo))
1207 elk_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
1208
1209 uint64_t outputs_written =
1210 crocus_vs_outputs_written(ice, key, nir->info.outputs_written);
1211 elk_compute_vue_map(devinfo,
1212 &vue_prog_data->vue_map, outputs_written,
1213 nir->info.separate_shader, /* pos slots */ 1);
1214
1215 /* Don't tell the backend about our clip plane constants, we've already
1216 * lowered them in NIR and we don't want it doing it again.
1217 */
1218 struct elk_vs_prog_key key_no_ucp = *key;
1219 key_no_ucp.nr_userclip_plane_consts = 0;
1220 key_no_ucp.copy_edgeflag = false;
1221 crocus_sanitize_tex_key(&key_no_ucp.base.tex);
1222
1223 struct elk_compile_vs_params params = {
1224 .base = {
1225 .mem_ctx = mem_ctx,
1226 .nir = nir,
1227 .log_data = &ice->dbg,
1228 },
1229 .key = &key_no_ucp,
1230 .prog_data = vs_prog_data,
1231 .edgeflag_is_last = devinfo->ver < 6,
1232 };
1233 const unsigned *program =
1234 elk_compile_vs(compiler, ¶ms);
1235 if (program == NULL) {
1236 dbg_printf("Failed to compile vertex shader: %s\n", params.base.error_str);
1237 ralloc_free(mem_ctx);
1238 return false;
1239 }
1240
1241 if (ish->compiled_once) {
1242 crocus_debug_recompile(ice, &nir->info, &key->base);
1243 } else {
1244 ish->compiled_once = true;
1245 }
1246
1247 uint32_t *so_decls = NULL;
1248 if (devinfo->ver > 6)
1249 so_decls = screen->vtbl.create_so_decl_list(&ish->stream_output,
1250 &vue_prog_data->vue_map);
1251
1252 struct crocus_compiled_shader *shader =
1253 crocus_upload_shader(ice, CROCUS_CACHE_VS, sizeof(*key), key, program,
1254 prog_data->program_size,
1255 prog_data, sizeof(*vs_prog_data), so_decls,
1256 system_values, num_system_values,
1257 num_cbufs, &bt);
1258
1259 crocus_disk_cache_store(screen->disk_cache, ish, shader,
1260 ice->shaders.cache_bo_map,
1261 key, sizeof(*key));
1262
1263 ralloc_free(mem_ctx);
1264 return shader;
1265 }
1266
1267 /**
1268 * Update the current vertex shader variant.
1269 *
1270 * Fill out the key, look in the cache, compile and bind if needed.
1271 */
1272 static void
crocus_update_compiled_vs(struct crocus_context * ice)1273 crocus_update_compiled_vs(struct crocus_context *ice)
1274 {
1275 struct crocus_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
1276 struct crocus_uncompiled_shader *ish =
1277 ice->shaders.uncompiled[MESA_SHADER_VERTEX];
1278 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1279 const struct intel_device_info *devinfo = &screen->devinfo;
1280 struct elk_vs_prog_key key = { KEY_INIT() };
1281
1282 if (ish->nos & (1ull << CROCUS_NOS_TEXTURES))
1283 crocus_populate_sampler_prog_key_data(ice, devinfo, MESA_SHADER_VERTEX, ish,
1284 ish->nir->info.uses_texture_gather, &key.base.tex);
1285 screen->vtbl.populate_vs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1286
1287 struct crocus_compiled_shader *old = ice->shaders.prog[CROCUS_CACHE_VS];
1288 struct crocus_compiled_shader *shader =
1289 crocus_find_cached_shader(ice, CROCUS_CACHE_VS, sizeof(key), &key);
1290
1291 if (!shader)
1292 shader = crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1293
1294 if (!shader)
1295 shader = crocus_compile_vs(ice, ish, &key);
1296
1297 if (old != shader) {
1298 ice->shaders.prog[CROCUS_CACHE_VS] = shader;
1299 if (devinfo->ver == 8)
1300 ice->state.dirty |= CROCUS_DIRTY_GEN8_VF_SGVS;
1301 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_VS |
1302 CROCUS_STAGE_DIRTY_BINDINGS_VS |
1303 CROCUS_STAGE_DIRTY_CONSTANTS_VS;
1304 shs->sysvals_need_upload = true;
1305
1306 const struct elk_vs_prog_data *vs_prog_data =
1307 (void *) shader->prog_data;
1308 const bool uses_draw_params = vs_prog_data->uses_firstvertex ||
1309 vs_prog_data->uses_baseinstance;
1310 const bool uses_derived_draw_params = vs_prog_data->uses_drawid ||
1311 vs_prog_data->uses_is_indexed_draw;
1312 const bool needs_sgvs_element = uses_draw_params ||
1313 vs_prog_data->uses_instanceid ||
1314 vs_prog_data->uses_vertexid;
1315
1316 if (ice->state.vs_uses_draw_params != uses_draw_params ||
1317 ice->state.vs_uses_derived_draw_params != uses_derived_draw_params ||
1318 ice->state.vs_needs_edge_flag != ish->needs_edge_flag ||
1319 ice->state.vs_uses_vertexid != vs_prog_data->uses_vertexid ||
1320 ice->state.vs_uses_instanceid != vs_prog_data->uses_instanceid) {
1321 ice->state.dirty |= CROCUS_DIRTY_VERTEX_BUFFERS |
1322 CROCUS_DIRTY_VERTEX_ELEMENTS;
1323 }
1324 ice->state.vs_uses_draw_params = uses_draw_params;
1325 ice->state.vs_uses_derived_draw_params = uses_derived_draw_params;
1326 ice->state.vs_needs_sgvs_element = needs_sgvs_element;
1327 ice->state.vs_needs_edge_flag = ish->needs_edge_flag;
1328 ice->state.vs_uses_vertexid = vs_prog_data->uses_vertexid;
1329 ice->state.vs_uses_instanceid = vs_prog_data->uses_instanceid;
1330 }
1331 }
1332
1333 /**
1334 * Get the shader_info for a given stage, or NULL if the stage is disabled.
1335 */
1336 const struct shader_info *
crocus_get_shader_info(const struct crocus_context * ice,gl_shader_stage stage)1337 crocus_get_shader_info(const struct crocus_context *ice, gl_shader_stage stage)
1338 {
1339 const struct crocus_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
1340
1341 if (!ish)
1342 return NULL;
1343
1344 const nir_shader *nir = ish->nir;
1345 return &nir->info;
1346 }
1347
1348 /**
1349 * Get the union of TCS output and TES input slots.
1350 *
1351 * TCS and TES need to agree on a common URB entry layout. In particular,
1352 * the data for all patch vertices is stored in a single URB entry (unlike
1353 * GS which has one entry per input vertex). This means that per-vertex
1354 * array indexing needs a stride.
1355 *
1356 * SSO requires locations to match, but doesn't require the number of
1357 * outputs/inputs to match (in fact, the TCS often has extra outputs).
1358 * So, we need to take the extra step of unifying these on the fly.
1359 */
1360 static void
get_unified_tess_slots(const struct crocus_context * ice,uint64_t * per_vertex_slots,uint32_t * per_patch_slots)1361 get_unified_tess_slots(const struct crocus_context *ice,
1362 uint64_t *per_vertex_slots,
1363 uint32_t *per_patch_slots)
1364 {
1365 const struct shader_info *tcs =
1366 crocus_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
1367 const struct shader_info *tes =
1368 crocus_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1369
1370 *per_vertex_slots = tes->inputs_read;
1371 *per_patch_slots = tes->patch_inputs_read;
1372
1373 if (tcs) {
1374 *per_vertex_slots |= tcs->outputs_written;
1375 *per_patch_slots |= tcs->patch_outputs_written;
1376 }
1377 }
1378
1379 /**
1380 * Compile a tessellation control shader, and upload the assembly.
1381 */
1382 static struct crocus_compiled_shader *
crocus_compile_tcs(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,const struct elk_tcs_prog_key * key)1383 crocus_compile_tcs(struct crocus_context *ice,
1384 struct crocus_uncompiled_shader *ish,
1385 const struct elk_tcs_prog_key *key)
1386 {
1387 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1388 const struct elk_compiler *compiler = screen->compiler;
1389 void *mem_ctx = ralloc_context(NULL);
1390 struct elk_tcs_prog_data *tcs_prog_data =
1391 rzalloc(mem_ctx, struct elk_tcs_prog_data);
1392 struct elk_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
1393 struct elk_stage_prog_data *prog_data = &vue_prog_data->base;
1394 const struct intel_device_info *devinfo = &screen->devinfo;
1395 enum elk_param_builtin *system_values = NULL;
1396 unsigned num_system_values = 0;
1397 unsigned num_cbufs = 0;
1398
1399 nir_shader *nir;
1400
1401 struct crocus_binding_table bt;
1402
1403 if (ish) {
1404 nir = nir_shader_clone(mem_ctx, ish->nir);
1405 } else {
1406 nir = elk_nir_create_passthrough_tcs(mem_ctx, compiler, key);
1407 }
1408
1409 crocus_setup_uniforms(devinfo, mem_ctx, nir, prog_data, &system_values,
1410 &num_system_values, &num_cbufs);
1411
1412 crocus_lower_swizzles(nir, &key->base.tex);
1413 crocus_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1414 num_system_values, num_cbufs, &key->base.tex);
1415 if (can_push_ubo(devinfo))
1416 elk_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
1417
1418 struct elk_tcs_prog_key key_clean = *key;
1419 crocus_sanitize_tex_key(&key_clean.base.tex);
1420
1421 struct elk_compile_tcs_params params = {
1422 .base = {
1423 .mem_ctx = mem_ctx,
1424 .nir = nir,
1425 .log_data = &ice->dbg,
1426 },
1427 .key = &key_clean,
1428 .prog_data = tcs_prog_data,
1429 };
1430
1431 const unsigned *program = elk_compile_tcs(compiler, ¶ms);
1432 if (program == NULL) {
1433 dbg_printf("Failed to compile control shader: %s\n", params.base.error_str);
1434 ralloc_free(mem_ctx);
1435 return false;
1436 }
1437
1438 if (ish) {
1439 if (ish->compiled_once) {
1440 crocus_debug_recompile(ice, &nir->info, &key->base);
1441 } else {
1442 ish->compiled_once = true;
1443 }
1444 }
1445
1446 struct crocus_compiled_shader *shader =
1447 crocus_upload_shader(ice, CROCUS_CACHE_TCS, sizeof(*key), key, program,
1448 prog_data->program_size,
1449 prog_data, sizeof(*tcs_prog_data), NULL,
1450 system_values, num_system_values,
1451 num_cbufs, &bt);
1452
1453 if (ish)
1454 crocus_disk_cache_store(screen->disk_cache, ish, shader,
1455 ice->shaders.cache_bo_map,
1456 key, sizeof(*key));
1457
1458 ralloc_free(mem_ctx);
1459 return shader;
1460 }
1461
1462 /**
1463 * Update the current tessellation control shader variant.
1464 *
1465 * Fill out the key, look in the cache, compile and bind if needed.
1466 */
1467 static void
crocus_update_compiled_tcs(struct crocus_context * ice)1468 crocus_update_compiled_tcs(struct crocus_context *ice)
1469 {
1470 struct crocus_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
1471 struct crocus_uncompiled_shader *tcs =
1472 ice->shaders.uncompiled[MESA_SHADER_TESS_CTRL];
1473 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1474 const struct intel_device_info *devinfo = &screen->devinfo;
1475
1476 const struct shader_info *tes_info =
1477 crocus_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1478 struct elk_tcs_prog_key key = {
1479 KEY_INIT_NO_ID(),
1480 .base.program_string_id = tcs ? tcs->program_id : 0,
1481 ._tes_primitive_mode = tes_info->tess._primitive_mode,
1482 .input_vertices = ice->state.vertices_per_patch,
1483 .quads_workaround = tes_info->tess._primitive_mode == TESS_PRIMITIVE_QUADS &&
1484 tes_info->tess.spacing == TESS_SPACING_EQUAL,
1485 };
1486
1487 if (tcs && tcs->nos & (1ull << CROCUS_NOS_TEXTURES))
1488 crocus_populate_sampler_prog_key_data(ice, devinfo, MESA_SHADER_TESS_CTRL, tcs,
1489 tcs->nir->info.uses_texture_gather, &key.base.tex);
1490 get_unified_tess_slots(ice, &key.outputs_written,
1491 &key.patch_outputs_written);
1492 screen->vtbl.populate_tcs_key(ice, &key);
1493
1494 struct crocus_compiled_shader *old = ice->shaders.prog[CROCUS_CACHE_TCS];
1495 struct crocus_compiled_shader *shader =
1496 crocus_find_cached_shader(ice, CROCUS_CACHE_TCS, sizeof(key), &key);
1497
1498 if (tcs && !shader)
1499 shader = crocus_disk_cache_retrieve(ice, tcs, &key, sizeof(key));
1500
1501 if (!shader)
1502 shader = crocus_compile_tcs(ice, tcs, &key);
1503
1504 if (old != shader) {
1505 ice->shaders.prog[CROCUS_CACHE_TCS] = shader;
1506 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_TCS |
1507 CROCUS_STAGE_DIRTY_BINDINGS_TCS |
1508 CROCUS_STAGE_DIRTY_CONSTANTS_TCS;
1509 shs->sysvals_need_upload = true;
1510 }
1511 }
1512
1513 /**
1514 * Compile a tessellation evaluation shader, and upload the assembly.
1515 */
1516 static struct crocus_compiled_shader *
crocus_compile_tes(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,const struct elk_tes_prog_key * key)1517 crocus_compile_tes(struct crocus_context *ice,
1518 struct crocus_uncompiled_shader *ish,
1519 const struct elk_tes_prog_key *key)
1520 {
1521 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1522 const struct elk_compiler *compiler = screen->compiler;
1523 void *mem_ctx = ralloc_context(NULL);
1524 struct elk_tes_prog_data *tes_prog_data =
1525 rzalloc(mem_ctx, struct elk_tes_prog_data);
1526 struct elk_vue_prog_data *vue_prog_data = &tes_prog_data->base;
1527 struct elk_stage_prog_data *prog_data = &vue_prog_data->base;
1528 enum elk_param_builtin *system_values;
1529 const struct intel_device_info *devinfo = &screen->devinfo;
1530 unsigned num_system_values;
1531 unsigned num_cbufs;
1532
1533 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1534
1535 if (key->nr_userclip_plane_consts) {
1536 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1537 nir_lower_clip_vs(nir, (1 << key->nr_userclip_plane_consts) - 1, true,
1538 false, NULL);
1539 nir_lower_io_to_temporaries(nir, impl, true, false);
1540 nir_lower_global_vars_to_local(nir);
1541 nir_lower_vars_to_ssa(nir);
1542 nir_shader_gather_info(nir, impl);
1543 }
1544
1545 if (key->clamp_pointsize)
1546 nir_lower_point_size(nir, 1.0, 255.0);
1547
1548 crocus_setup_uniforms(devinfo, mem_ctx, nir, prog_data, &system_values,
1549 &num_system_values, &num_cbufs);
1550 crocus_lower_swizzles(nir, &key->base.tex);
1551 struct crocus_binding_table bt;
1552 crocus_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1553 num_system_values, num_cbufs, &key->base.tex);
1554
1555 if (can_push_ubo(devinfo))
1556 elk_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
1557
1558 struct intel_vue_map input_vue_map;
1559 elk_compute_tess_vue_map(&input_vue_map, key->inputs_read,
1560 key->patch_inputs_read);
1561
1562 struct elk_tes_prog_key key_clean = *key;
1563 crocus_sanitize_tex_key(&key_clean.base.tex);
1564
1565 struct elk_compile_tes_params params = {
1566 .base = {
1567 .mem_ctx = mem_ctx,
1568 .nir = nir,
1569 .log_data = &ice->dbg,
1570 },
1571 .key = &key_clean,
1572 .prog_data = tes_prog_data,
1573 .input_vue_map = &input_vue_map,
1574 };
1575
1576 const unsigned *program = elk_compile_tes(compiler, ¶ms);
1577 if (program == NULL) {
1578 dbg_printf("Failed to compile evaluation shader: %s\n", params.base.error_str);
1579 ralloc_free(mem_ctx);
1580 return false;
1581 }
1582
1583 if (ish->compiled_once) {
1584 crocus_debug_recompile(ice, &nir->info, &key->base);
1585 } else {
1586 ish->compiled_once = true;
1587 }
1588
1589 uint32_t *so_decls = NULL;
1590 if (devinfo->ver > 6)
1591 so_decls = screen->vtbl.create_so_decl_list(&ish->stream_output,
1592 &vue_prog_data->vue_map);
1593
1594 struct crocus_compiled_shader *shader =
1595 crocus_upload_shader(ice, CROCUS_CACHE_TES, sizeof(*key), key, program,
1596 prog_data->program_size,
1597 prog_data, sizeof(*tes_prog_data), so_decls,
1598 system_values, num_system_values,
1599 num_cbufs, &bt);
1600
1601 crocus_disk_cache_store(screen->disk_cache, ish, shader,
1602 ice->shaders.cache_bo_map,
1603 key, sizeof(*key));
1604
1605 ralloc_free(mem_ctx);
1606 return shader;
1607 }
1608
1609 /**
1610 * Update the current tessellation evaluation shader variant.
1611 *
1612 * Fill out the key, look in the cache, compile and bind if needed.
1613 */
1614 static void
crocus_update_compiled_tes(struct crocus_context * ice)1615 crocus_update_compiled_tes(struct crocus_context *ice)
1616 {
1617 struct crocus_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
1618 struct crocus_uncompiled_shader *ish =
1619 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1620 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1621 struct elk_tes_prog_key key = { KEY_INIT() };
1622 const struct intel_device_info *devinfo = &screen->devinfo;
1623
1624 if (ish->nos & (1ull << CROCUS_NOS_TEXTURES))
1625 crocus_populate_sampler_prog_key_data(ice, devinfo, MESA_SHADER_TESS_EVAL, ish,
1626 ish->nir->info.uses_texture_gather, &key.base.tex);
1627 get_unified_tess_slots(ice, &key.inputs_read, &key.patch_inputs_read);
1628 screen->vtbl.populate_tes_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1629
1630 struct crocus_compiled_shader *old = ice->shaders.prog[CROCUS_CACHE_TES];
1631 struct crocus_compiled_shader *shader =
1632 crocus_find_cached_shader(ice, CROCUS_CACHE_TES, sizeof(key), &key);
1633
1634 if (!shader)
1635 shader = crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1636
1637 if (!shader)
1638 shader = crocus_compile_tes(ice, ish, &key);
1639
1640 if (old != shader) {
1641 ice->shaders.prog[CROCUS_CACHE_TES] = shader;
1642 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_TES |
1643 CROCUS_STAGE_DIRTY_BINDINGS_TES |
1644 CROCUS_STAGE_DIRTY_CONSTANTS_TES;
1645 shs->sysvals_need_upload = true;
1646 }
1647
1648 /* TODO: Could compare and avoid flagging this. */
1649 const struct shader_info *tes_info = &ish->nir->info;
1650 if (BITSET_TEST(tes_info->system_values_read, SYSTEM_VALUE_VERTICES_IN)) {
1651 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_CONSTANTS_TES;
1652 ice->state.shaders[MESA_SHADER_TESS_EVAL].sysvals_need_upload = true;
1653 }
1654 }
1655
1656 /**
1657 * Compile a geometry shader, and upload the assembly.
1658 */
1659 static struct crocus_compiled_shader *
crocus_compile_gs(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,const struct elk_gs_prog_key * key)1660 crocus_compile_gs(struct crocus_context *ice,
1661 struct crocus_uncompiled_shader *ish,
1662 const struct elk_gs_prog_key *key)
1663 {
1664 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1665 const struct elk_compiler *compiler = screen->compiler;
1666 const struct intel_device_info *devinfo = &screen->devinfo;
1667 void *mem_ctx = ralloc_context(NULL);
1668 struct elk_gs_prog_data *gs_prog_data =
1669 rzalloc(mem_ctx, struct elk_gs_prog_data);
1670 struct elk_vue_prog_data *vue_prog_data = &gs_prog_data->base;
1671 struct elk_stage_prog_data *prog_data = &vue_prog_data->base;
1672 enum elk_param_builtin *system_values;
1673 unsigned num_system_values;
1674 unsigned num_cbufs;
1675
1676 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1677
1678 if (key->nr_userclip_plane_consts) {
1679 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1680 nir_lower_clip_gs(nir, (1 << key->nr_userclip_plane_consts) - 1, false,
1681 NULL);
1682 nir_lower_io_to_temporaries(nir, impl, true, false);
1683 nir_lower_global_vars_to_local(nir);
1684 nir_lower_vars_to_ssa(nir);
1685 nir_shader_gather_info(nir, impl);
1686 }
1687
1688 if (key->clamp_pointsize)
1689 nir_lower_point_size(nir, 1.0, 255.0);
1690
1691 crocus_setup_uniforms(devinfo, mem_ctx, nir, prog_data, &system_values,
1692 &num_system_values, &num_cbufs);
1693 crocus_lower_swizzles(nir, &key->base.tex);
1694 struct crocus_binding_table bt;
1695 crocus_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1696 num_system_values, num_cbufs, &key->base.tex);
1697
1698 if (can_push_ubo(devinfo))
1699 elk_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
1700
1701 elk_compute_vue_map(devinfo,
1702 &vue_prog_data->vue_map, nir->info.outputs_written,
1703 nir->info.separate_shader, /* pos slots */ 1);
1704
1705 if (devinfo->ver == 6)
1706 gfx6_gs_xfb_setup(&ish->stream_output, gs_prog_data);
1707 struct elk_gs_prog_key key_clean = *key;
1708 crocus_sanitize_tex_key(&key_clean.base.tex);
1709
1710 struct elk_compile_gs_params params = {
1711 .base = {
1712 .mem_ctx = mem_ctx,
1713 .nir = nir,
1714 .log_data = &ice->dbg,
1715 },
1716 .key = &key_clean,
1717 .prog_data = gs_prog_data,
1718 };
1719
1720 const unsigned *program = elk_compile_gs(compiler, ¶ms);
1721 if (program == NULL) {
1722 dbg_printf("Failed to compile geometry shader: %s\n", params.base.error_str);
1723 ralloc_free(mem_ctx);
1724 return false;
1725 }
1726
1727 if (ish->compiled_once) {
1728 crocus_debug_recompile(ice, &nir->info, &key->base);
1729 } else {
1730 ish->compiled_once = true;
1731 }
1732
1733 uint32_t *so_decls = NULL;
1734 if (devinfo->ver > 6)
1735 so_decls = screen->vtbl.create_so_decl_list(&ish->stream_output,
1736 &vue_prog_data->vue_map);
1737
1738 struct crocus_compiled_shader *shader =
1739 crocus_upload_shader(ice, CROCUS_CACHE_GS, sizeof(*key), key, program,
1740 prog_data->program_size,
1741 prog_data, sizeof(*gs_prog_data), so_decls,
1742 system_values, num_system_values,
1743 num_cbufs, &bt);
1744
1745 crocus_disk_cache_store(screen->disk_cache, ish, shader,
1746 ice->shaders.cache_bo_map,
1747 key, sizeof(*key));
1748
1749 ralloc_free(mem_ctx);
1750 return shader;
1751 }
1752
1753 /**
1754 * Update the current geometry shader variant.
1755 *
1756 * Fill out the key, look in the cache, compile and bind if needed.
1757 */
1758 static void
crocus_update_compiled_gs(struct crocus_context * ice)1759 crocus_update_compiled_gs(struct crocus_context *ice)
1760 {
1761 struct crocus_shader_state *shs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
1762 struct crocus_uncompiled_shader *ish =
1763 ice->shaders.uncompiled[MESA_SHADER_GEOMETRY];
1764 struct crocus_compiled_shader *old = ice->shaders.prog[CROCUS_CACHE_GS];
1765 struct crocus_compiled_shader *shader = NULL;
1766
1767 if (ish) {
1768 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1769 const struct intel_device_info *devinfo = &screen->devinfo;
1770 struct elk_gs_prog_key key = { KEY_INIT() };
1771
1772 if (ish->nos & (1ull << CROCUS_NOS_TEXTURES))
1773 crocus_populate_sampler_prog_key_data(ice, devinfo, MESA_SHADER_GEOMETRY, ish,
1774 ish->nir->info.uses_texture_gather, &key.base.tex);
1775 screen->vtbl.populate_gs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1776
1777 shader =
1778 crocus_find_cached_shader(ice, CROCUS_CACHE_GS, sizeof(key), &key);
1779
1780 if (!shader)
1781 shader = crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1782
1783 if (!shader)
1784 shader = crocus_compile_gs(ice, ish, &key);
1785 }
1786
1787 if (old != shader) {
1788 ice->shaders.prog[CROCUS_CACHE_GS] = shader;
1789 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_GS |
1790 CROCUS_STAGE_DIRTY_BINDINGS_GS |
1791 CROCUS_STAGE_DIRTY_CONSTANTS_GS;
1792 shs->sysvals_need_upload = true;
1793 }
1794 }
1795
1796 /**
1797 * Compile a fragment (pixel) shader, and upload the assembly.
1798 */
1799 static struct crocus_compiled_shader *
crocus_compile_fs(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,const struct elk_wm_prog_key * key,struct intel_vue_map * vue_map)1800 crocus_compile_fs(struct crocus_context *ice,
1801 struct crocus_uncompiled_shader *ish,
1802 const struct elk_wm_prog_key *key,
1803 struct intel_vue_map *vue_map)
1804 {
1805 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1806 const struct elk_compiler *compiler = screen->compiler;
1807 void *mem_ctx = ralloc_context(NULL);
1808 struct elk_wm_prog_data *fs_prog_data =
1809 rzalloc(mem_ctx, struct elk_wm_prog_data);
1810 struct elk_stage_prog_data *prog_data = &fs_prog_data->base;
1811 enum elk_param_builtin *system_values;
1812 const struct intel_device_info *devinfo = &screen->devinfo;
1813 unsigned num_system_values;
1814 unsigned num_cbufs;
1815
1816 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1817
1818 prog_data->use_alt_mode = nir->info.use_legacy_math_rules;
1819
1820 crocus_setup_uniforms(devinfo, mem_ctx, nir, prog_data, &system_values,
1821 &num_system_values, &num_cbufs);
1822
1823 /* Lower output variables to load_output intrinsics before setting up
1824 * binding tables, so crocus_setup_binding_table can map any load_output
1825 * intrinsics to CROCUS_SURFACE_GROUP_RENDER_TARGET_READ on Gen8 for
1826 * non-coherent framebuffer fetches.
1827 */
1828 elk_nir_lower_fs_outputs(nir);
1829
1830 /* lower swizzles before binding table */
1831 crocus_lower_swizzles(nir, &key->base.tex);
1832 int null_rts = 1;
1833
1834 struct crocus_binding_table bt;
1835 crocus_setup_binding_table(devinfo, nir, &bt,
1836 MAX2(key->nr_color_regions, null_rts),
1837 num_system_values, num_cbufs,
1838 &key->base.tex);
1839
1840 if (can_push_ubo(devinfo))
1841 elk_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
1842
1843 struct elk_wm_prog_key key_clean = *key;
1844 crocus_sanitize_tex_key(&key_clean.base.tex);
1845
1846 struct elk_compile_fs_params params = {
1847 .base = {
1848 .mem_ctx = mem_ctx,
1849 .nir = nir,
1850 .log_data = &ice->dbg,
1851 },
1852 .key = &key_clean,
1853 .prog_data = fs_prog_data,
1854
1855 .allow_spilling = true,
1856 .max_polygons = 1,
1857 .vue_map = vue_map,
1858 };
1859 const unsigned *program =
1860 elk_compile_fs(compiler, ¶ms);
1861 if (program == NULL) {
1862 dbg_printf("Failed to compile fragment shader: %s\n", params.base.error_str);
1863 ralloc_free(mem_ctx);
1864 return false;
1865 }
1866
1867 if (ish->compiled_once) {
1868 crocus_debug_recompile(ice, &nir->info, &key->base);
1869 } else {
1870 ish->compiled_once = true;
1871 }
1872
1873 struct crocus_compiled_shader *shader =
1874 crocus_upload_shader(ice, CROCUS_CACHE_FS, sizeof(*key), key, program,
1875 prog_data->program_size,
1876 prog_data, sizeof(*fs_prog_data), NULL,
1877 system_values, num_system_values,
1878 num_cbufs, &bt);
1879
1880 crocus_disk_cache_store(screen->disk_cache, ish, shader,
1881 ice->shaders.cache_bo_map,
1882 key, sizeof(*key));
1883
1884 ralloc_free(mem_ctx);
1885 return shader;
1886 }
1887
1888 /**
1889 * Update the current fragment shader variant.
1890 *
1891 * Fill out the key, look in the cache, compile and bind if needed.
1892 */
1893 static void
crocus_update_compiled_fs(struct crocus_context * ice)1894 crocus_update_compiled_fs(struct crocus_context *ice)
1895 {
1896 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1897 const struct intel_device_info *devinfo = &screen->devinfo;
1898 struct crocus_shader_state *shs = &ice->state.shaders[MESA_SHADER_FRAGMENT];
1899 struct crocus_uncompiled_shader *ish =
1900 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
1901 struct elk_wm_prog_key key = { KEY_INIT() };
1902
1903 if (ish->nos & (1ull << CROCUS_NOS_TEXTURES))
1904 crocus_populate_sampler_prog_key_data(ice, devinfo, MESA_SHADER_FRAGMENT, ish,
1905 ish->nir->info.uses_texture_gather, &key.base.tex);
1906 screen->vtbl.populate_fs_key(ice, &ish->nir->info, &key);
1907
1908 if (ish->nos & (1ull << CROCUS_NOS_LAST_VUE_MAP))
1909 key.input_slots_valid = ice->shaders.last_vue_map->slots_valid;
1910
1911 struct crocus_compiled_shader *old = ice->shaders.prog[CROCUS_CACHE_FS];
1912 struct crocus_compiled_shader *shader =
1913 crocus_find_cached_shader(ice, CROCUS_CACHE_FS, sizeof(key), &key);
1914
1915 if (!shader)
1916 shader = crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1917
1918 if (!shader)
1919 shader = crocus_compile_fs(ice, ish, &key, ice->shaders.last_vue_map);
1920
1921 if (old != shader) {
1922 // XXX: only need to flag CLIP if barycentric has NONPERSPECTIVE
1923 // toggles. might be able to avoid flagging SBE too.
1924 ice->shaders.prog[CROCUS_CACHE_FS] = shader;
1925 ice->state.dirty |= CROCUS_DIRTY_WM;
1926 /* gen4 clip/sf rely on fs prog_data */
1927 if (devinfo->ver < 6)
1928 ice->state.dirty |= CROCUS_DIRTY_GEN4_CLIP_PROG | CROCUS_DIRTY_GEN4_SF_PROG;
1929 else
1930 ice->state.dirty |= CROCUS_DIRTY_CLIP | CROCUS_DIRTY_GEN6_BLEND_STATE;
1931 if (devinfo->ver == 6)
1932 ice->state.dirty |= CROCUS_DIRTY_RASTER;
1933 if (devinfo->ver >= 7)
1934 ice->state.dirty |= CROCUS_DIRTY_GEN7_SBE;
1935 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_FS |
1936 CROCUS_STAGE_DIRTY_BINDINGS_FS |
1937 CROCUS_STAGE_DIRTY_CONSTANTS_FS;
1938 shs->sysvals_need_upload = true;
1939 }
1940 }
1941
1942 /**
1943 * Update the last enabled stage's VUE map.
1944 *
1945 * When the shader feeding the rasterizer's output interface changes, we
1946 * need to re-emit various packets.
1947 */
1948 static void
update_last_vue_map(struct crocus_context * ice,struct elk_stage_prog_data * prog_data)1949 update_last_vue_map(struct crocus_context *ice,
1950 struct elk_stage_prog_data *prog_data)
1951 {
1952 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1953 const struct intel_device_info *devinfo = &screen->devinfo;
1954 struct elk_vue_prog_data *vue_prog_data = (void *) prog_data;
1955 struct intel_vue_map *vue_map = &vue_prog_data->vue_map;
1956 struct intel_vue_map *old_map = ice->shaders.last_vue_map;
1957 const uint64_t changed_slots =
1958 (old_map ? old_map->slots_valid : 0ull) ^ vue_map->slots_valid;
1959
1960 if (changed_slots & VARYING_BIT_VIEWPORT) {
1961 ice->state.num_viewports =
1962 (vue_map->slots_valid & VARYING_BIT_VIEWPORT) ? CROCUS_MAX_VIEWPORTS : 1;
1963 ice->state.dirty |= CROCUS_DIRTY_SF_CL_VIEWPORT |
1964 CROCUS_DIRTY_CC_VIEWPORT;
1965 if (devinfo->ver < 6)
1966 ice->state.dirty |= CROCUS_DIRTY_GEN4_CLIP_PROG | CROCUS_DIRTY_GEN4_SF_PROG;
1967
1968 if (devinfo->ver <= 6)
1969 ice->state.dirty |= CROCUS_DIRTY_GEN4_FF_GS_PROG;
1970
1971 if (devinfo->ver >= 6)
1972 ice->state.dirty |= CROCUS_DIRTY_CLIP |
1973 CROCUS_DIRTY_GEN6_SCISSOR_RECT;;
1974 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_UNCOMPILED_FS |
1975 ice->state.stage_dirty_for_nos[CROCUS_NOS_LAST_VUE_MAP];
1976 }
1977
1978 if (changed_slots || (old_map && old_map->separate != vue_map->separate)) {
1979 ice->state.dirty |= CROCUS_DIRTY_GEN7_SBE;
1980 if (devinfo->ver < 6)
1981 ice->state.dirty |= CROCUS_DIRTY_GEN4_FF_GS_PROG;
1982 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_UNCOMPILED_FS;
1983 }
1984
1985 ice->shaders.last_vue_map = &vue_prog_data->vue_map;
1986 }
1987
1988 static void
crocus_update_pull_constant_descriptors(struct crocus_context * ice,gl_shader_stage stage)1989 crocus_update_pull_constant_descriptors(struct crocus_context *ice,
1990 gl_shader_stage stage)
1991 {
1992 struct crocus_compiled_shader *shader = ice->shaders.prog[stage];
1993
1994 if (!shader || !shader->prog_data->has_ubo_pull)
1995 return;
1996
1997 struct crocus_shader_state *shs = &ice->state.shaders[stage];
1998 bool any_new_descriptors =
1999 shader->num_system_values > 0 && shs->sysvals_need_upload;
2000
2001 unsigned bound_cbufs = shs->bound_cbufs;
2002
2003 while (bound_cbufs) {
2004 const int i = u_bit_scan(&bound_cbufs);
2005 struct pipe_constant_buffer *cbuf = &shs->constbufs[i];
2006 if (cbuf->buffer) {
2007 any_new_descriptors = true;
2008 }
2009 }
2010
2011 if (any_new_descriptors)
2012 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_BINDINGS_VS << stage;
2013 }
2014
2015 /**
2016 * Get the prog_data for a given stage, or NULL if the stage is disabled.
2017 */
2018 static struct elk_vue_prog_data *
get_vue_prog_data(struct crocus_context * ice,gl_shader_stage stage)2019 get_vue_prog_data(struct crocus_context *ice, gl_shader_stage stage)
2020 {
2021 if (!ice->shaders.prog[stage])
2022 return NULL;
2023
2024 return (void *) ice->shaders.prog[stage]->prog_data;
2025 }
2026
2027 static struct crocus_compiled_shader *
crocus_compile_clip(struct crocus_context * ice,struct elk_clip_prog_key * key)2028 crocus_compile_clip(struct crocus_context *ice, struct elk_clip_prog_key *key)
2029 {
2030 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2031 const struct elk_compiler *compiler = screen->compiler;
2032 void *mem_ctx;
2033 unsigned program_size;
2034 mem_ctx = ralloc_context(NULL);
2035
2036 struct elk_clip_prog_data *clip_prog_data =
2037 rzalloc(mem_ctx, struct elk_clip_prog_data);
2038
2039 const unsigned *program = elk_compile_clip(compiler, mem_ctx, key, clip_prog_data,
2040 ice->shaders.last_vue_map, &program_size);
2041
2042 if (program == NULL) {
2043 dbg_printf("failed to compile clip shader\n");
2044 ralloc_free(mem_ctx);
2045 return false;
2046 }
2047 struct crocus_binding_table bt;
2048 memset(&bt, 0, sizeof(bt));
2049
2050 struct crocus_compiled_shader *shader =
2051 crocus_upload_shader(ice, CROCUS_CACHE_CLIP, sizeof(*key), key, program,
2052 program_size,
2053 (struct elk_stage_prog_data *)clip_prog_data, sizeof(*clip_prog_data),
2054 NULL, NULL, 0, 0, &bt);
2055 ralloc_free(mem_ctx);
2056 return shader;
2057 }
2058 static void
crocus_update_compiled_clip(struct crocus_context * ice)2059 crocus_update_compiled_clip(struct crocus_context *ice)
2060 {
2061 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2062 struct elk_clip_prog_key key;
2063 struct crocus_compiled_shader *old = ice->shaders.clip_prog;
2064 memset(&key, 0, sizeof(key));
2065
2066 const struct elk_wm_prog_data *wm_prog_data = elk_wm_prog_data(ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data);
2067 if (wm_prog_data) {
2068 key.contains_flat_varying = wm_prog_data->contains_flat_varying;
2069 key.contains_noperspective_varying =
2070 wm_prog_data->contains_noperspective_varying;
2071 memcpy(key.interp_mode, wm_prog_data->interp_mode, sizeof(key.interp_mode));
2072 }
2073
2074 key.primitive = ice->state.reduced_prim_mode;
2075 key.attrs = ice->shaders.last_vue_map->slots_valid;
2076
2077 struct pipe_rasterizer_state *rs_state = crocus_get_rast_state(ice);
2078 key.pv_first = rs_state->flatshade_first;
2079
2080 if (rs_state->clip_plane_enable)
2081 key.nr_userclip = util_logbase2(rs_state->clip_plane_enable) + 1;
2082
2083 if (screen->devinfo.ver == 5)
2084 key.clip_mode = ELK_CLIP_MODE_KERNEL_CLIP;
2085 else
2086 key.clip_mode = ELK_CLIP_MODE_NORMAL;
2087
2088 if (key.primitive == MESA_PRIM_TRIANGLES) {
2089 if (rs_state->cull_face == PIPE_FACE_FRONT_AND_BACK)
2090 key.clip_mode = ELK_CLIP_MODE_REJECT_ALL;
2091 else {
2092 uint32_t fill_front = ELK_CLIP_FILL_MODE_CULL;
2093 uint32_t fill_back = ELK_CLIP_FILL_MODE_CULL;
2094 uint32_t offset_front = 0;
2095 uint32_t offset_back = 0;
2096
2097 if (!(rs_state->cull_face & PIPE_FACE_FRONT)) {
2098 switch (rs_state->fill_front) {
2099 case PIPE_POLYGON_MODE_FILL:
2100 fill_front = ELK_CLIP_FILL_MODE_FILL;
2101 offset_front = 0;
2102 break;
2103 case PIPE_POLYGON_MODE_LINE:
2104 fill_front = ELK_CLIP_FILL_MODE_LINE;
2105 offset_front = rs_state->offset_line;
2106 break;
2107 case PIPE_POLYGON_MODE_POINT:
2108 fill_front = ELK_CLIP_FILL_MODE_POINT;
2109 offset_front = rs_state->offset_point;
2110 break;
2111 }
2112 }
2113
2114 if (!(rs_state->cull_face & PIPE_FACE_BACK)) {
2115 switch (rs_state->fill_back) {
2116 case PIPE_POLYGON_MODE_FILL:
2117 fill_back = ELK_CLIP_FILL_MODE_FILL;
2118 offset_back = 0;
2119 break;
2120 case PIPE_POLYGON_MODE_LINE:
2121 fill_back = ELK_CLIP_FILL_MODE_LINE;
2122 offset_back = rs_state->offset_line;
2123 break;
2124 case PIPE_POLYGON_MODE_POINT:
2125 fill_back = ELK_CLIP_FILL_MODE_POINT;
2126 offset_back = rs_state->offset_point;
2127 break;
2128 }
2129 }
2130
2131 if (rs_state->fill_back != PIPE_POLYGON_MODE_FILL ||
2132 rs_state->fill_front != PIPE_POLYGON_MODE_FILL) {
2133 key.do_unfilled = 1;
2134
2135 /* Most cases the fixed function units will handle. Cases where
2136 * one or more polygon faces are unfilled will require help:
2137 */
2138 key.clip_mode = ELK_CLIP_MODE_CLIP_NON_REJECTED;
2139
2140 if (offset_back || offset_front) {
2141 double mrd = 0.0;
2142 if (ice->state.framebuffer.zsbuf)
2143 mrd = util_get_depth_format_mrd(util_format_description(ice->state.framebuffer.zsbuf->format));
2144 key.offset_units = rs_state->offset_units * mrd * 2;
2145 key.offset_factor = rs_state->offset_scale * mrd;
2146 key.offset_clamp = rs_state->offset_clamp * mrd;
2147 }
2148
2149 if (!(rs_state->front_ccw ^ rs_state->bottom_edge_rule)) {
2150 key.fill_ccw = fill_front;
2151 key.fill_cw = fill_back;
2152 key.offset_ccw = offset_front;
2153 key.offset_cw = offset_back;
2154 if (rs_state->light_twoside &&
2155 key.fill_cw != ELK_CLIP_FILL_MODE_CULL)
2156 key.copy_bfc_cw = 1;
2157 } else {
2158 key.fill_cw = fill_front;
2159 key.fill_ccw = fill_back;
2160 key.offset_cw = offset_front;
2161 key.offset_ccw = offset_back;
2162 if (rs_state->light_twoside &&
2163 key.fill_ccw != ELK_CLIP_FILL_MODE_CULL)
2164 key.copy_bfc_ccw = 1;
2165 }
2166 }
2167 }
2168 }
2169 struct crocus_compiled_shader *shader =
2170 crocus_find_cached_shader(ice, CROCUS_CACHE_CLIP, sizeof(key), &key);
2171
2172 if (!shader)
2173 shader = crocus_compile_clip(ice, &key);
2174
2175 if (old != shader) {
2176 ice->state.dirty |= CROCUS_DIRTY_CLIP;
2177 ice->shaders.clip_prog = shader;
2178 }
2179 }
2180
2181 static struct crocus_compiled_shader *
crocus_compile_sf(struct crocus_context * ice,struct elk_sf_prog_key * key)2182 crocus_compile_sf(struct crocus_context *ice, struct elk_sf_prog_key *key)
2183 {
2184 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2185 const struct elk_compiler *compiler = screen->compiler;
2186 void *mem_ctx;
2187 unsigned program_size;
2188 mem_ctx = ralloc_context(NULL);
2189
2190 struct elk_sf_prog_data *sf_prog_data =
2191 rzalloc(mem_ctx, struct elk_sf_prog_data);
2192
2193 const unsigned *program = elk_compile_sf(compiler, mem_ctx, key, sf_prog_data,
2194 ice->shaders.last_vue_map, &program_size);
2195
2196 if (program == NULL) {
2197 dbg_printf("failed to compile sf shader\n");
2198 ralloc_free(mem_ctx);
2199 return false;
2200 }
2201
2202 struct crocus_binding_table bt;
2203 memset(&bt, 0, sizeof(bt));
2204 struct crocus_compiled_shader *shader =
2205 crocus_upload_shader(ice, CROCUS_CACHE_SF, sizeof(*key), key, program,
2206 program_size,
2207 (struct elk_stage_prog_data *)sf_prog_data, sizeof(*sf_prog_data),
2208 NULL, NULL, 0, 0, &bt);
2209 ralloc_free(mem_ctx);
2210 return shader;
2211 }
2212
2213 static void
crocus_update_compiled_sf(struct crocus_context * ice)2214 crocus_update_compiled_sf(struct crocus_context *ice)
2215 {
2216 struct elk_sf_prog_key key;
2217 struct crocus_compiled_shader *old = ice->shaders.sf_prog;
2218 memset(&key, 0, sizeof(key));
2219
2220 key.attrs = ice->shaders.last_vue_map->slots_valid;
2221
2222 switch (ice->state.reduced_prim_mode) {
2223 case MESA_PRIM_TRIANGLES:
2224 default:
2225 if (key.attrs & BITFIELD64_BIT(VARYING_SLOT_EDGE))
2226 key.primitive = ELK_SF_PRIM_UNFILLED_TRIS;
2227 else
2228 key.primitive = ELK_SF_PRIM_TRIANGLES;
2229 break;
2230 case MESA_PRIM_LINES:
2231 key.primitive = ELK_SF_PRIM_LINES;
2232 break;
2233 case MESA_PRIM_POINTS:
2234 key.primitive = ELK_SF_PRIM_POINTS;
2235 break;
2236 }
2237
2238 struct pipe_rasterizer_state *rs_state = crocus_get_rast_state(ice);
2239 key.userclip_active = rs_state->clip_plane_enable != 0;
2240 const struct elk_wm_prog_data *wm_prog_data = elk_wm_prog_data(ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data);
2241 if (wm_prog_data) {
2242 key.contains_flat_varying = wm_prog_data->contains_flat_varying;
2243 memcpy(key.interp_mode, wm_prog_data->interp_mode, sizeof(key.interp_mode));
2244 }
2245
2246 key.do_twoside_color = rs_state->light_twoside;
2247
2248 key.do_point_sprite = rs_state->point_quad_rasterization;
2249 if (key.do_point_sprite) {
2250 key.point_sprite_coord_replace = rs_state->sprite_coord_enable & 0xff;
2251 if (rs_state->sprite_coord_enable & (1 << 8))
2252 key.do_point_coord = 1;
2253 if (wm_prog_data && wm_prog_data->urb_setup[VARYING_SLOT_PNTC] != -1)
2254 key.do_point_coord = 1;
2255 }
2256
2257 key.sprite_origin_lower_left = rs_state->sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT;
2258
2259 if (key.do_twoside_color) {
2260 key.frontface_ccw = rs_state->front_ccw;
2261 }
2262 struct crocus_compiled_shader *shader =
2263 crocus_find_cached_shader(ice, CROCUS_CACHE_SF, sizeof(key), &key);
2264
2265 if (!shader)
2266 shader = crocus_compile_sf(ice, &key);
2267
2268 if (old != shader) {
2269 ice->state.dirty |= CROCUS_DIRTY_RASTER;
2270 ice->shaders.sf_prog = shader;
2271 }
2272 }
2273
2274 static struct crocus_compiled_shader *
crocus_compile_ff_gs(struct crocus_context * ice,struct elk_ff_gs_prog_key * key)2275 crocus_compile_ff_gs(struct crocus_context *ice, struct elk_ff_gs_prog_key *key)
2276 {
2277 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2278 struct elk_compiler *compiler = screen->compiler;
2279 void *mem_ctx;
2280 unsigned program_size;
2281 mem_ctx = ralloc_context(NULL);
2282
2283 struct elk_ff_gs_prog_data *ff_gs_prog_data =
2284 rzalloc(mem_ctx, struct elk_ff_gs_prog_data);
2285
2286 const unsigned *program = elk_compile_ff_gs_prog(compiler, mem_ctx, key, ff_gs_prog_data,
2287 ice->shaders.last_vue_map, &program_size);
2288
2289 if (program == NULL) {
2290 dbg_printf("failed to compile sf shader\n");
2291 ralloc_free(mem_ctx);
2292 return false;
2293 }
2294
2295 struct crocus_binding_table bt;
2296 memset(&bt, 0, sizeof(bt));
2297
2298 if (screen->devinfo.ver == 6) {
2299 bt.sizes[CROCUS_SURFACE_GROUP_SOL] = ELK_MAX_SOL_BINDINGS;
2300 bt.used_mask[CROCUS_SURFACE_GROUP_SOL] = (uint64_t)-1;
2301
2302 bt.size_bytes = ELK_MAX_SOL_BINDINGS * 4;
2303 }
2304
2305 struct crocus_compiled_shader *shader =
2306 crocus_upload_shader(ice, CROCUS_CACHE_FF_GS, sizeof(*key), key, program,
2307 program_size,
2308 (struct elk_stage_prog_data *)ff_gs_prog_data, sizeof(*ff_gs_prog_data),
2309 NULL, NULL, 0, 0, &bt);
2310 ralloc_free(mem_ctx);
2311 return shader;
2312 }
2313
2314 static void
crocus_update_compiled_ff_gs(struct crocus_context * ice)2315 crocus_update_compiled_ff_gs(struct crocus_context *ice)
2316 {
2317 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2318 const struct intel_device_info *devinfo = &screen->devinfo;
2319 struct elk_ff_gs_prog_key key;
2320 struct crocus_compiled_shader *old = ice->shaders.ff_gs_prog;
2321 memset(&key, 0, sizeof(key));
2322
2323 assert(devinfo->ver < 7);
2324
2325 key.attrs = ice->shaders.last_vue_map->slots_valid;
2326
2327 key.primitive = screen->vtbl.translate_prim_type(ice->state.prim_mode, 0);
2328
2329 struct pipe_rasterizer_state *rs_state = crocus_get_rast_state(ice);
2330 key.pv_first = rs_state->flatshade_first;
2331
2332 if (key.primitive == _3DPRIM_QUADLIST && !rs_state->flatshade) {
2333 /* Provide consistenbbbbbt primitive order with elk_set_prim's
2334 * optimization of single quads to trifans.
2335 */
2336 key.pv_first = true;
2337 }
2338
2339 if (devinfo->ver >= 6) {
2340 key.need_gs_prog = ice->state.streamout_active;
2341 if (key.need_gs_prog) {
2342 struct crocus_uncompiled_shader *vs =
2343 ice->shaders.uncompiled[MESA_SHADER_VERTEX];
2344 gfx6_ff_gs_xfb_setup(&vs->stream_output,
2345 &key);
2346 }
2347 } else {
2348 key.need_gs_prog = (key.primitive == _3DPRIM_QUADLIST ||
2349 key.primitive == _3DPRIM_QUADSTRIP ||
2350 key.primitive == _3DPRIM_LINELOOP);
2351 }
2352
2353 struct crocus_compiled_shader *shader = NULL;
2354 if (key.need_gs_prog) {
2355 shader = crocus_find_cached_shader(ice, CROCUS_CACHE_FF_GS,
2356 sizeof(key), &key);
2357 if (!shader)
2358 shader = crocus_compile_ff_gs(ice, &key);
2359 }
2360 if (old != shader) {
2361 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_GS;
2362 if (!!old != !!shader)
2363 ice->state.dirty |= CROCUS_DIRTY_GEN6_URB;
2364 ice->shaders.ff_gs_prog = shader;
2365 if (shader) {
2366 const struct elk_ff_gs_prog_data *gs_prog_data = (struct elk_ff_gs_prog_data *)ice->shaders.ff_gs_prog->prog_data;
2367 ice->state.last_xfb_verts_per_prim = gs_prog_data->svbi_postincrement_value;
2368 }
2369 }
2370 }
2371
2372 // XXX: crocus_compiled_shaders are space-leaking :(
2373 // XXX: do remember to unbind them if deleting them.
2374
2375 /**
2376 * Update the current shader variants for the given state.
2377 *
2378 * This should be called on every draw call to ensure that the correct
2379 * shaders are bound. It will also flag any dirty state triggered by
2380 * swapping out those shaders.
2381 */
2382 bool
crocus_update_compiled_shaders(struct crocus_context * ice)2383 crocus_update_compiled_shaders(struct crocus_context *ice)
2384 {
2385 struct crocus_screen *screen = (void *) ice->ctx.screen;
2386 const uint64_t stage_dirty = ice->state.stage_dirty;
2387
2388 struct elk_vue_prog_data *old_prog_datas[4];
2389 if (!(ice->state.dirty & CROCUS_DIRTY_GEN6_URB)) {
2390 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++)
2391 old_prog_datas[i] = get_vue_prog_data(ice, i);
2392 }
2393
2394 if (stage_dirty & (CROCUS_STAGE_DIRTY_UNCOMPILED_TCS |
2395 CROCUS_STAGE_DIRTY_UNCOMPILED_TES)) {
2396 struct crocus_uncompiled_shader *tes =
2397 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
2398 if (tes) {
2399 crocus_update_compiled_tcs(ice);
2400 crocus_update_compiled_tes(ice);
2401 } else {
2402 ice->shaders.prog[CROCUS_CACHE_TCS] = NULL;
2403 ice->shaders.prog[CROCUS_CACHE_TES] = NULL;
2404 ice->state.stage_dirty |=
2405 CROCUS_STAGE_DIRTY_TCS | CROCUS_STAGE_DIRTY_TES |
2406 CROCUS_STAGE_DIRTY_BINDINGS_TCS | CROCUS_STAGE_DIRTY_BINDINGS_TES |
2407 CROCUS_STAGE_DIRTY_CONSTANTS_TCS | CROCUS_STAGE_DIRTY_CONSTANTS_TES;
2408 }
2409 }
2410
2411 if (stage_dirty & CROCUS_STAGE_DIRTY_UNCOMPILED_VS)
2412 crocus_update_compiled_vs(ice);
2413 if (stage_dirty & CROCUS_STAGE_DIRTY_UNCOMPILED_GS)
2414 crocus_update_compiled_gs(ice);
2415
2416 if (stage_dirty & (CROCUS_STAGE_DIRTY_UNCOMPILED_GS |
2417 CROCUS_STAGE_DIRTY_UNCOMPILED_TES)) {
2418 const struct crocus_compiled_shader *gs =
2419 ice->shaders.prog[MESA_SHADER_GEOMETRY];
2420 const struct crocus_compiled_shader *tes =
2421 ice->shaders.prog[MESA_SHADER_TESS_EVAL];
2422
2423 bool points_or_lines = false;
2424
2425 if (gs) {
2426 const struct elk_gs_prog_data *gs_prog_data = (void *) gs->prog_data;
2427 points_or_lines =
2428 gs_prog_data->output_topology == _3DPRIM_POINTLIST ||
2429 gs_prog_data->output_topology == _3DPRIM_LINESTRIP;
2430 } else if (tes) {
2431 const struct elk_tes_prog_data *tes_data = (void *) tes->prog_data;
2432 points_or_lines =
2433 tes_data->output_topology == INTEL_TESS_OUTPUT_TOPOLOGY_LINE ||
2434 tes_data->output_topology == INTEL_TESS_OUTPUT_TOPOLOGY_POINT;
2435 }
2436
2437 if (ice->shaders.output_topology_is_points_or_lines != points_or_lines) {
2438 /* Outbound to XY Clip enables */
2439 ice->shaders.output_topology_is_points_or_lines = points_or_lines;
2440 ice->state.dirty |= CROCUS_DIRTY_CLIP;
2441 }
2442 }
2443
2444 if (!ice->shaders.prog[MESA_SHADER_VERTEX])
2445 return false;
2446
2447 gl_shader_stage last_stage = last_vue_stage(ice);
2448 struct crocus_compiled_shader *shader = ice->shaders.prog[last_stage];
2449 struct crocus_uncompiled_shader *ish = ice->shaders.uncompiled[last_stage];
2450 update_last_vue_map(ice, shader->prog_data);
2451 if (ice->state.streamout != shader->streamout) {
2452 ice->state.streamout = shader->streamout;
2453 ice->state.dirty |= CROCUS_DIRTY_SO_DECL_LIST | CROCUS_DIRTY_STREAMOUT;
2454 }
2455
2456 if (ice->state.streamout_active) {
2457 screen->vtbl.update_so_strides(ice, ish->stream_output.stride);
2458 }
2459
2460 /* use ice->state version as last_vue_map can dirty this bit */
2461 if (ice->state.stage_dirty & CROCUS_STAGE_DIRTY_UNCOMPILED_FS)
2462 crocus_update_compiled_fs(ice);
2463
2464 if (screen->devinfo.ver <= 6) {
2465 if (ice->state.dirty & CROCUS_DIRTY_GEN4_FF_GS_PROG &&
2466 !ice->shaders.prog[MESA_SHADER_GEOMETRY])
2467 crocus_update_compiled_ff_gs(ice);
2468 }
2469
2470 if (screen->devinfo.ver < 6) {
2471 if (ice->state.dirty & CROCUS_DIRTY_GEN4_CLIP_PROG)
2472 crocus_update_compiled_clip(ice);
2473 if (ice->state.dirty & CROCUS_DIRTY_GEN4_SF_PROG)
2474 crocus_update_compiled_sf(ice);
2475 }
2476
2477
2478 /* Changing shader interfaces may require a URB configuration. */
2479 if (!(ice->state.dirty & CROCUS_DIRTY_GEN6_URB)) {
2480 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
2481 struct elk_vue_prog_data *old = old_prog_datas[i];
2482 struct elk_vue_prog_data *new = get_vue_prog_data(ice, i);
2483 if (!!old != !!new ||
2484 (new && new->urb_entry_size != old->urb_entry_size)) {
2485 ice->state.dirty |= CROCUS_DIRTY_GEN6_URB;
2486 break;
2487 }
2488 }
2489 }
2490
2491 if (ice->state.stage_dirty & CROCUS_RENDER_STAGE_DIRTY_CONSTANTS) {
2492 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
2493 if (ice->state.stage_dirty & (CROCUS_STAGE_DIRTY_CONSTANTS_VS << i))
2494 crocus_update_pull_constant_descriptors(ice, i);
2495 }
2496 }
2497 return true;
2498 }
2499
2500 static struct crocus_compiled_shader *
crocus_compile_cs(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,const struct elk_cs_prog_key * key)2501 crocus_compile_cs(struct crocus_context *ice,
2502 struct crocus_uncompiled_shader *ish,
2503 const struct elk_cs_prog_key *key)
2504 {
2505 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2506 const struct elk_compiler *compiler = screen->compiler;
2507 void *mem_ctx = ralloc_context(NULL);
2508 struct elk_cs_prog_data *cs_prog_data =
2509 rzalloc(mem_ctx, struct elk_cs_prog_data);
2510 struct elk_stage_prog_data *prog_data = &cs_prog_data->base;
2511 enum elk_param_builtin *system_values;
2512 const struct intel_device_info *devinfo = &screen->devinfo;
2513 unsigned num_system_values;
2514 unsigned num_cbufs;
2515
2516 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
2517
2518 NIR_PASS_V(nir, elk_nir_lower_cs_intrinsics, devinfo, cs_prog_data);
2519
2520 crocus_setup_uniforms(devinfo, mem_ctx, nir, prog_data, &system_values,
2521 &num_system_values, &num_cbufs);
2522 crocus_lower_swizzles(nir, &key->base.tex);
2523 struct crocus_binding_table bt;
2524 crocus_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
2525 num_system_values, num_cbufs, &key->base.tex);
2526
2527 struct elk_compile_cs_params params = {
2528 .base = {
2529 .mem_ctx = mem_ctx,
2530 .nir = nir,
2531 .log_data = &ice->dbg,
2532 },
2533 .key = key,
2534 .prog_data = cs_prog_data,
2535 };
2536
2537 const unsigned *program =
2538 elk_compile_cs(compiler, ¶ms);
2539 if (program == NULL) {
2540 dbg_printf("Failed to compile compute shader: %s\n", params.base.error_str);
2541 ralloc_free(mem_ctx);
2542 return false;
2543 }
2544
2545 if (ish->compiled_once) {
2546 crocus_debug_recompile(ice, &nir->info, &key->base);
2547 } else {
2548 ish->compiled_once = true;
2549 }
2550
2551 struct crocus_compiled_shader *shader =
2552 crocus_upload_shader(ice, CROCUS_CACHE_CS, sizeof(*key), key, program,
2553 prog_data->program_size,
2554 prog_data, sizeof(*cs_prog_data), NULL,
2555 system_values, num_system_values,
2556 num_cbufs, &bt);
2557
2558 crocus_disk_cache_store(screen->disk_cache, ish, shader,
2559 ice->shaders.cache_bo_map,
2560 key, sizeof(*key));
2561
2562 ralloc_free(mem_ctx);
2563 return shader;
2564 }
2565
2566 static void
crocus_update_compiled_cs(struct crocus_context * ice)2567 crocus_update_compiled_cs(struct crocus_context *ice)
2568 {
2569 struct crocus_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
2570 struct crocus_uncompiled_shader *ish =
2571 ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
2572 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2573 const struct intel_device_info *devinfo = &screen->devinfo;
2574 struct elk_cs_prog_key key = { KEY_INIT() };
2575
2576 if (ish->nos & (1ull << CROCUS_NOS_TEXTURES))
2577 crocus_populate_sampler_prog_key_data(ice, devinfo, MESA_SHADER_COMPUTE, ish,
2578 ish->nir->info.uses_texture_gather, &key.base.tex);
2579 screen->vtbl.populate_cs_key(ice, &key);
2580
2581 struct crocus_compiled_shader *old = ice->shaders.prog[CROCUS_CACHE_CS];
2582 struct crocus_compiled_shader *shader =
2583 crocus_find_cached_shader(ice, CROCUS_CACHE_CS, sizeof(key), &key);
2584
2585 if (!shader)
2586 shader = crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key));
2587
2588 if (!shader)
2589 shader = crocus_compile_cs(ice, ish, &key);
2590
2591 if (old != shader) {
2592 ice->shaders.prog[CROCUS_CACHE_CS] = shader;
2593 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_CS |
2594 CROCUS_STAGE_DIRTY_BINDINGS_CS |
2595 CROCUS_STAGE_DIRTY_CONSTANTS_CS;
2596 shs->sysvals_need_upload = true;
2597 }
2598 }
2599
2600 void
crocus_update_compiled_compute_shader(struct crocus_context * ice)2601 crocus_update_compiled_compute_shader(struct crocus_context *ice)
2602 {
2603 if (ice->state.stage_dirty & CROCUS_STAGE_DIRTY_UNCOMPILED_CS)
2604 crocus_update_compiled_cs(ice);
2605
2606 if (ice->state.stage_dirty & CROCUS_STAGE_DIRTY_CONSTANTS_CS)
2607 crocus_update_pull_constant_descriptors(ice, MESA_SHADER_COMPUTE);
2608 }
2609
2610 void
crocus_fill_cs_push_const_buffer(struct elk_cs_prog_data * cs_prog_data,unsigned threads,uint32_t * dst)2611 crocus_fill_cs_push_const_buffer(struct elk_cs_prog_data *cs_prog_data,
2612 unsigned threads,
2613 uint32_t *dst)
2614 {
2615 assert(elk_cs_push_const_total_size(cs_prog_data, threads) > 0);
2616 assert(cs_prog_data->push.cross_thread.size == 0);
2617 assert(cs_prog_data->push.per_thread.dwords == 1);
2618 assert(cs_prog_data->base.param[0] == ELK_PARAM_BUILTIN_SUBGROUP_ID);
2619 for (unsigned t = 0; t < threads; t++)
2620 dst[8 * t] = t;
2621 }
2622
2623 /**
2624 * Allocate scratch BOs as needed for the given per-thread size and stage.
2625 */
2626 struct crocus_bo *
crocus_get_scratch_space(struct crocus_context * ice,unsigned per_thread_scratch,gl_shader_stage stage)2627 crocus_get_scratch_space(struct crocus_context *ice,
2628 unsigned per_thread_scratch,
2629 gl_shader_stage stage)
2630 {
2631 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2632 struct crocus_bufmgr *bufmgr = screen->bufmgr;
2633 const struct intel_device_info *devinfo = &screen->devinfo;
2634
2635 unsigned encoded_size = ffs(per_thread_scratch) - 11;
2636 assert(encoded_size < (1 << 16));
2637
2638 struct crocus_bo **bop = &ice->shaders.scratch_bos[encoded_size][stage];
2639
2640 if (!*bop) {
2641 assert(stage < ARRAY_SIZE(devinfo->max_scratch_ids));
2642 uint32_t size = per_thread_scratch * devinfo->max_scratch_ids[stage];
2643 *bop = crocus_bo_alloc(bufmgr, "scratch", size);
2644 }
2645
2646 return *bop;
2647 }
2648
2649 /* ------------------------------------------------------------------- */
2650
2651 /**
2652 * The pipe->create_[stage]_state() driver hooks.
2653 *
2654 * Performs basic NIR preprocessing, records any state dependencies, and
2655 * returns an crocus_uncompiled_shader as the Gallium CSO.
2656 *
2657 * Actual shader compilation to assembly happens later, at first use.
2658 */
2659 static void *
crocus_create_uncompiled_shader(struct pipe_context * ctx,nir_shader * nir,const struct pipe_stream_output_info * so_info)2660 crocus_create_uncompiled_shader(struct pipe_context *ctx,
2661 nir_shader *nir,
2662 const struct pipe_stream_output_info *so_info)
2663 {
2664 struct crocus_screen *screen = (struct crocus_screen *)ctx->screen;
2665 const struct intel_device_info *devinfo = &screen->devinfo;
2666 struct crocus_uncompiled_shader *ish =
2667 calloc(1, sizeof(struct crocus_uncompiled_shader));
2668 if (!ish)
2669 return NULL;
2670
2671 if (devinfo->ver >= 6)
2672 NIR_PASS(ish->needs_edge_flag, nir, crocus_fix_edge_flags);
2673 else
2674 ish->needs_edge_flag = false;
2675
2676 struct elk_nir_compiler_opts opts = {};
2677 elk_preprocess_nir(screen->compiler, nir, &opts);
2678
2679 NIR_PASS_V(nir, elk_nir_lower_storage_image,
2680 &(struct elk_nir_lower_storage_image_opts) {
2681 .devinfo = devinfo,
2682 .lower_loads = true,
2683 .lower_stores = true,
2684 .lower_atomics = true,
2685 .lower_get_size = true,
2686 });
2687 NIR_PASS_V(nir, crocus_lower_storage_image_derefs);
2688
2689 nir_sweep(nir);
2690
2691 ish->program_id = get_new_program_id(screen);
2692 ish->nir = nir;
2693 if (so_info) {
2694 memcpy(&ish->stream_output, so_info, sizeof(*so_info));
2695 update_so_info(&ish->stream_output, nir->info.outputs_written);
2696 }
2697
2698 if (screen->disk_cache) {
2699 /* Serialize the NIR to a binary blob that we can hash for the disk
2700 * cache. Drop unnecessary information (like variable names)
2701 * so the serialized NIR is smaller, and also to let us detect more
2702 * isomorphic shaders when hashing, increasing cache hits.
2703 */
2704 struct blob blob;
2705 blob_init(&blob);
2706 nir_serialize(&blob, nir, true);
2707 _mesa_sha1_compute(blob.data, blob.size, ish->nir_sha1);
2708 blob_finish(&blob);
2709 }
2710
2711 return ish;
2712 }
2713
2714 static struct crocus_uncompiled_shader *
crocus_create_shader_state(struct pipe_context * ctx,const struct pipe_shader_state * state)2715 crocus_create_shader_state(struct pipe_context *ctx,
2716 const struct pipe_shader_state *state)
2717 {
2718 struct nir_shader *nir;
2719
2720 if (state->type == PIPE_SHADER_IR_TGSI)
2721 nir = tgsi_to_nir(state->tokens, ctx->screen, false);
2722 else
2723 nir = state->ir.nir;
2724
2725 return crocus_create_uncompiled_shader(ctx, nir, &state->stream_output);
2726 }
2727
2728 static void *
crocus_create_vs_state(struct pipe_context * ctx,const struct pipe_shader_state * state)2729 crocus_create_vs_state(struct pipe_context *ctx,
2730 const struct pipe_shader_state *state)
2731 {
2732 struct crocus_context *ice = (void *) ctx;
2733 struct crocus_screen *screen = (void *) ctx->screen;
2734 struct crocus_uncompiled_shader *ish = crocus_create_shader_state(ctx, state);
2735
2736 ish->nos |= (1ull << CROCUS_NOS_TEXTURES);
2737 /* User clip planes or gen5 sprite coord enable */
2738 if (ish->nir->info.clip_distance_array_size == 0 ||
2739 screen->devinfo.ver <= 5)
2740 ish->nos |= (1ull << CROCUS_NOS_RASTERIZER);
2741
2742 if (screen->devinfo.verx10 < 75)
2743 ish->nos |= (1ull << CROCUS_NOS_VERTEX_ELEMENTS);
2744
2745 if (screen->precompile) {
2746 struct elk_vs_prog_key key = { KEY_INIT() };
2747
2748 if (!crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2749 crocus_compile_vs(ice, ish, &key);
2750 }
2751
2752 return ish;
2753 }
2754
2755 static void *
crocus_create_tcs_state(struct pipe_context * ctx,const struct pipe_shader_state * state)2756 crocus_create_tcs_state(struct pipe_context *ctx,
2757 const struct pipe_shader_state *state)
2758 {
2759 struct crocus_context *ice = (void *) ctx;
2760 struct crocus_screen *screen = (void *) ctx->screen;
2761 struct crocus_uncompiled_shader *ish = crocus_create_shader_state(ctx, state);
2762 struct shader_info *info = &ish->nir->info;
2763
2764 ish->nos |= (1ull << CROCUS_NOS_TEXTURES);
2765 if (screen->precompile) {
2766 struct elk_tcs_prog_key key = {
2767 KEY_INIT(),
2768 // XXX: make sure the linker fills this out from the TES...
2769 ._tes_primitive_mode =
2770 info->tess._primitive_mode ? info->tess._primitive_mode
2771 : TESS_PRIMITIVE_TRIANGLES,
2772 .outputs_written = info->outputs_written,
2773 .patch_outputs_written = info->patch_outputs_written,
2774 };
2775
2776 key.input_vertices = info->tess.tcs_vertices_out;
2777
2778 if (!crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2779 crocus_compile_tcs(ice, ish, &key);
2780 }
2781
2782 return ish;
2783 }
2784
2785 static void *
crocus_create_tes_state(struct pipe_context * ctx,const struct pipe_shader_state * state)2786 crocus_create_tes_state(struct pipe_context *ctx,
2787 const struct pipe_shader_state *state)
2788 {
2789 struct crocus_context *ice = (void *) ctx;
2790 struct crocus_screen *screen = (void *) ctx->screen;
2791 struct crocus_uncompiled_shader *ish = crocus_create_shader_state(ctx, state);
2792 struct shader_info *info = &ish->nir->info;
2793
2794 ish->nos |= (1ull << CROCUS_NOS_TEXTURES);
2795 /* User clip planes */
2796 if (ish->nir->info.clip_distance_array_size == 0)
2797 ish->nos |= (1ull << CROCUS_NOS_RASTERIZER);
2798
2799 if (screen->precompile) {
2800 struct elk_tes_prog_key key = {
2801 KEY_INIT(),
2802 // XXX: not ideal, need TCS output/TES input unification
2803 .inputs_read = info->inputs_read,
2804 .patch_inputs_read = info->patch_inputs_read,
2805 };
2806
2807 if (!crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2808 crocus_compile_tes(ice, ish, &key);
2809 }
2810
2811 return ish;
2812 }
2813
2814 static void *
crocus_create_gs_state(struct pipe_context * ctx,const struct pipe_shader_state * state)2815 crocus_create_gs_state(struct pipe_context *ctx,
2816 const struct pipe_shader_state *state)
2817 {
2818 struct crocus_context *ice = (void *) ctx;
2819 struct crocus_screen *screen = (void *) ctx->screen;
2820 struct crocus_uncompiled_shader *ish = crocus_create_shader_state(ctx, state);
2821
2822 ish->nos |= (1ull << CROCUS_NOS_TEXTURES);
2823 /* User clip planes */
2824 if (ish->nir->info.clip_distance_array_size == 0)
2825 ish->nos |= (1ull << CROCUS_NOS_RASTERIZER);
2826
2827 if (screen->precompile) {
2828 struct elk_gs_prog_key key = { KEY_INIT() };
2829
2830 if (!crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2831 crocus_compile_gs(ice, ish, &key);
2832 }
2833
2834 return ish;
2835 }
2836
2837 static void *
crocus_create_fs_state(struct pipe_context * ctx,const struct pipe_shader_state * state)2838 crocus_create_fs_state(struct pipe_context *ctx,
2839 const struct pipe_shader_state *state)
2840 {
2841 struct crocus_context *ice = (void *) ctx;
2842 struct crocus_screen *screen = (void *) ctx->screen;
2843 struct crocus_uncompiled_shader *ish = crocus_create_shader_state(ctx, state);
2844 struct shader_info *info = &ish->nir->info;
2845
2846 ish->nos |= (1ull << CROCUS_NOS_FRAMEBUFFER) |
2847 (1ull << CROCUS_NOS_DEPTH_STENCIL_ALPHA) |
2848 (1ull << CROCUS_NOS_RASTERIZER) |
2849 (1ull << CROCUS_NOS_TEXTURES) |
2850 (1ull << CROCUS_NOS_BLEND);
2851
2852 /* The program key needs the VUE map if there are > 16 inputs or gen4/5 */
2853 if (screen->devinfo.ver < 6 || util_bitcount64(ish->nir->info.inputs_read &
2854 ELK_FS_VARYING_INPUT_MASK) > 16) {
2855 ish->nos |= (1ull << CROCUS_NOS_LAST_VUE_MAP);
2856 }
2857
2858 if (screen->precompile) {
2859 const uint64_t color_outputs = info->outputs_written &
2860 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
2861 BITFIELD64_BIT(FRAG_RESULT_STENCIL) |
2862 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK));
2863
2864 bool can_rearrange_varyings =
2865 screen->devinfo.ver > 6 && util_bitcount64(info->inputs_read & ELK_FS_VARYING_INPUT_MASK) <= 16;
2866
2867 const struct intel_device_info *devinfo = &screen->devinfo;
2868 struct elk_wm_prog_key key = {
2869 KEY_INIT(),
2870 .nr_color_regions = util_bitcount(color_outputs),
2871 .coherent_fb_fetch = false,
2872 .ignore_sample_mask_out = screen->devinfo.ver < 6 ? 1 : 0,
2873 .input_slots_valid =
2874 can_rearrange_varyings ? 0 : info->inputs_read | VARYING_BIT_POS,
2875 };
2876
2877 struct intel_vue_map vue_map;
2878 if (devinfo->ver < 6) {
2879 elk_compute_vue_map(devinfo, &vue_map,
2880 info->inputs_read | VARYING_BIT_POS,
2881 false, /* pos slots */ 1);
2882 }
2883 if (!crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2884 crocus_compile_fs(ice, ish, &key, &vue_map);
2885 }
2886
2887 return ish;
2888 }
2889
2890 static void *
crocus_create_compute_state(struct pipe_context * ctx,const struct pipe_compute_state * state)2891 crocus_create_compute_state(struct pipe_context *ctx,
2892 const struct pipe_compute_state *state)
2893 {
2894 assert(state->ir_type == PIPE_SHADER_IR_NIR);
2895
2896 struct crocus_context *ice = (void *) ctx;
2897 struct crocus_screen *screen = (void *) ctx->screen;
2898 struct crocus_uncompiled_shader *ish =
2899 crocus_create_uncompiled_shader(ctx, (void *) state->prog, NULL);
2900
2901 ish->nos |= (1ull << CROCUS_NOS_TEXTURES);
2902 // XXX: disallow more than 64KB of shared variables
2903
2904 if (screen->precompile) {
2905 struct elk_cs_prog_key key = { KEY_INIT() };
2906
2907 if (!crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2908 crocus_compile_cs(ice, ish, &key);
2909 }
2910
2911 return ish;
2912 }
2913
2914 /**
2915 * The pipe->delete_[stage]_state() driver hooks.
2916 *
2917 * Frees the crocus_uncompiled_shader.
2918 */
2919 static void
crocus_delete_shader_state(struct pipe_context * ctx,void * state,gl_shader_stage stage)2920 crocus_delete_shader_state(struct pipe_context *ctx, void *state, gl_shader_stage stage)
2921 {
2922 struct crocus_uncompiled_shader *ish = state;
2923 struct crocus_context *ice = (void *) ctx;
2924
2925 if (ice->shaders.uncompiled[stage] == ish) {
2926 ice->shaders.uncompiled[stage] = NULL;
2927 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_UNCOMPILED_VS << stage;
2928 }
2929
2930 if (ish->const_data) {
2931 pipe_resource_reference(&ish->const_data, NULL);
2932 pipe_resource_reference(&ish->const_data_state.res, NULL);
2933 }
2934
2935 ralloc_free(ish->nir);
2936 free(ish);
2937 }
2938
2939 static void
crocus_delete_vs_state(struct pipe_context * ctx,void * state)2940 crocus_delete_vs_state(struct pipe_context *ctx, void *state)
2941 {
2942 crocus_delete_shader_state(ctx, state, MESA_SHADER_VERTEX);
2943 }
2944
2945 static void
crocus_delete_tcs_state(struct pipe_context * ctx,void * state)2946 crocus_delete_tcs_state(struct pipe_context *ctx, void *state)
2947 {
2948 crocus_delete_shader_state(ctx, state, MESA_SHADER_TESS_CTRL);
2949 }
2950
2951 static void
crocus_delete_tes_state(struct pipe_context * ctx,void * state)2952 crocus_delete_tes_state(struct pipe_context *ctx, void *state)
2953 {
2954 crocus_delete_shader_state(ctx, state, MESA_SHADER_TESS_EVAL);
2955 }
2956
2957 static void
crocus_delete_gs_state(struct pipe_context * ctx,void * state)2958 crocus_delete_gs_state(struct pipe_context *ctx, void *state)
2959 {
2960 crocus_delete_shader_state(ctx, state, MESA_SHADER_GEOMETRY);
2961 }
2962
2963 static void
crocus_delete_fs_state(struct pipe_context * ctx,void * state)2964 crocus_delete_fs_state(struct pipe_context *ctx, void *state)
2965 {
2966 crocus_delete_shader_state(ctx, state, MESA_SHADER_FRAGMENT);
2967 }
2968
2969 static void
crocus_delete_cs_state(struct pipe_context * ctx,void * state)2970 crocus_delete_cs_state(struct pipe_context *ctx, void *state)
2971 {
2972 crocus_delete_shader_state(ctx, state, MESA_SHADER_COMPUTE);
2973 }
2974
2975 /**
2976 * The pipe->bind_[stage]_state() driver hook.
2977 *
2978 * Binds an uncompiled shader as the current one for a particular stage.
2979 * Updates dirty tracking to account for the shader's NOS.
2980 */
2981 static void
bind_shader_state(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,gl_shader_stage stage)2982 bind_shader_state(struct crocus_context *ice,
2983 struct crocus_uncompiled_shader *ish,
2984 gl_shader_stage stage)
2985 {
2986 uint64_t dirty_bit = CROCUS_STAGE_DIRTY_UNCOMPILED_VS << stage;
2987 const uint64_t nos = ish ? ish->nos : 0;
2988
2989 const struct shader_info *old_info = crocus_get_shader_info(ice, stage);
2990 const struct shader_info *new_info = ish ? &ish->nir->info : NULL;
2991
2992 if ((old_info ? BITSET_LAST_BIT(old_info->textures_used) : 0) !=
2993 (new_info ? BITSET_LAST_BIT(new_info->textures_used) : 0)) {
2994 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
2995 }
2996
2997 ice->shaders.uncompiled[stage] = ish;
2998 ice->state.stage_dirty |= dirty_bit;
2999
3000 /* Record that CSOs need to mark CROCUS_DIRTY_UNCOMPILED_XS when they change
3001 * (or that they no longer need to do so).
3002 */
3003 for (int i = 0; i < CROCUS_NOS_COUNT; i++) {
3004 if (nos & (1 << i))
3005 ice->state.stage_dirty_for_nos[i] |= dirty_bit;
3006 else
3007 ice->state.stage_dirty_for_nos[i] &= ~dirty_bit;
3008 }
3009 }
3010
3011 static void
crocus_bind_vs_state(struct pipe_context * ctx,void * state)3012 crocus_bind_vs_state(struct pipe_context *ctx, void *state)
3013 {
3014 struct crocus_context *ice = (struct crocus_context *)ctx;
3015 struct crocus_uncompiled_shader *new_ish = state;
3016 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
3017 const struct intel_device_info *devinfo = &screen->devinfo;
3018
3019 if (new_ish &&
3020 ice->state.window_space_position !=
3021 new_ish->nir->info.vs.window_space_position) {
3022 ice->state.window_space_position =
3023 new_ish->nir->info.vs.window_space_position;
3024
3025 ice->state.dirty |= CROCUS_DIRTY_CLIP |
3026 CROCUS_DIRTY_RASTER |
3027 CROCUS_DIRTY_CC_VIEWPORT;
3028 }
3029
3030 if (devinfo->ver == 6) {
3031 ice->state.stage_dirty |= CROCUS_DIRTY_GEN4_FF_GS_PROG;
3032 }
3033
3034 bind_shader_state((void *) ctx, state, MESA_SHADER_VERTEX);
3035 }
3036
3037 static void
crocus_bind_tcs_state(struct pipe_context * ctx,void * state)3038 crocus_bind_tcs_state(struct pipe_context *ctx, void *state)
3039 {
3040 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_CTRL);
3041 }
3042
3043 static void
crocus_bind_tes_state(struct pipe_context * ctx,void * state)3044 crocus_bind_tes_state(struct pipe_context *ctx, void *state)
3045 {
3046 struct crocus_context *ice = (struct crocus_context *)ctx;
3047
3048 /* Enabling/disabling optional stages requires a URB reconfiguration. */
3049 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
3050 ice->state.dirty |= CROCUS_DIRTY_GEN6_URB;
3051
3052 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_EVAL);
3053 }
3054
3055 static void
crocus_bind_gs_state(struct pipe_context * ctx,void * state)3056 crocus_bind_gs_state(struct pipe_context *ctx, void *state)
3057 {
3058 struct crocus_context *ice = (struct crocus_context *)ctx;
3059
3060 /* Enabling/disabling optional stages requires a URB reconfiguration. */
3061 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
3062 ice->state.dirty |= CROCUS_DIRTY_GEN6_URB;
3063
3064 bind_shader_state((void *) ctx, state, MESA_SHADER_GEOMETRY);
3065 }
3066
3067 static void
crocus_bind_fs_state(struct pipe_context * ctx,void * state)3068 crocus_bind_fs_state(struct pipe_context *ctx, void *state)
3069 {
3070 struct crocus_context *ice = (struct crocus_context *) ctx;
3071 struct crocus_screen *screen = (struct crocus_screen *) ctx->screen;
3072 const struct intel_device_info *devinfo = &screen->devinfo;
3073 struct crocus_uncompiled_shader *old_ish =
3074 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
3075 struct crocus_uncompiled_shader *new_ish = state;
3076
3077 const unsigned color_bits =
3078 BITFIELD64_BIT(FRAG_RESULT_COLOR) |
3079 BITFIELD64_RANGE(FRAG_RESULT_DATA0, ELK_MAX_DRAW_BUFFERS);
3080
3081 /* Fragment shader outputs influence HasWriteableRT */
3082 if (!old_ish || !new_ish ||
3083 (old_ish->nir->info.outputs_written & color_bits) !=
3084 (new_ish->nir->info.outputs_written & color_bits)) {
3085 if (devinfo->ver == 8)
3086 ice->state.dirty |= CROCUS_DIRTY_GEN8_PS_BLEND;
3087 else
3088 ice->state.dirty |= CROCUS_DIRTY_WM;
3089 }
3090
3091 if (devinfo->ver == 8)
3092 ice->state.dirty |= CROCUS_DIRTY_GEN8_PMA_FIX;
3093 bind_shader_state((void *) ctx, state, MESA_SHADER_FRAGMENT);
3094 }
3095
3096 static void
crocus_bind_cs_state(struct pipe_context * ctx,void * state)3097 crocus_bind_cs_state(struct pipe_context *ctx, void *state)
3098 {
3099 bind_shader_state((void *) ctx, state, MESA_SHADER_COMPUTE);
3100 }
3101
3102 void
crocus_init_program_functions(struct pipe_context * ctx)3103 crocus_init_program_functions(struct pipe_context *ctx)
3104 {
3105 ctx->create_vs_state = crocus_create_vs_state;
3106 ctx->create_tcs_state = crocus_create_tcs_state;
3107 ctx->create_tes_state = crocus_create_tes_state;
3108 ctx->create_gs_state = crocus_create_gs_state;
3109 ctx->create_fs_state = crocus_create_fs_state;
3110 ctx->create_compute_state = crocus_create_compute_state;
3111
3112 ctx->delete_vs_state = crocus_delete_vs_state;
3113 ctx->delete_tcs_state = crocus_delete_tcs_state;
3114 ctx->delete_tes_state = crocus_delete_tes_state;
3115 ctx->delete_gs_state = crocus_delete_gs_state;
3116 ctx->delete_fs_state = crocus_delete_fs_state;
3117 ctx->delete_compute_state = crocus_delete_cs_state;
3118
3119 ctx->bind_vs_state = crocus_bind_vs_state;
3120 ctx->bind_tcs_state = crocus_bind_tcs_state;
3121 ctx->bind_tes_state = crocus_bind_tes_state;
3122 ctx->bind_gs_state = crocus_bind_gs_state;
3123 ctx->bind_fs_state = crocus_bind_fs_state;
3124 ctx->bind_compute_state = crocus_bind_cs_state;
3125 }
3126