1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file crocus_program.c
25 *
26 * This file contains the driver interface for compiling shaders.
27 *
28 * See crocus_program_cache.c for the in-memory program cache where the
29 * compiled shaders are stored.
30 */
31
32 #include <stdio.h>
33 #include <errno.h>
34 #include "pipe/p_defines.h"
35 #include "pipe/p_state.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_screen.h"
38 #include "util/u_atomic.h"
39 #include "util/u_upload_mgr.h"
40 #include "util/u_debug.h"
41 #include "util/u_prim.h"
42 #include "compiler/nir/nir.h"
43 #include "compiler/nir/nir_builder.h"
44 #include "compiler/nir/nir_serialize.h"
45 #include "intel/compiler/elk/elk_compiler.h"
46 #include "intel/compiler/elk/elk_nir.h"
47 #include "intel/compiler/elk/elk_prim.h"
48 #include "intel/compiler/elk/elk_reg.h"
49 #include "intel/compiler/intel_nir.h"
50 #include "crocus_context.h"
51 #include "nir/tgsi_to_nir.h"
52 #include "program/prog_instruction.h"
53
54 #define KEY_INIT_NO_ID() \
55 .base.tex.swizzles[0 ... ELK_MAX_SAMPLERS - 1] = 0x688
56 #define KEY_INIT() \
57 .base.program_string_id = ish->program_id, \
58 .base.limit_trig_input_range = screen->driconf.limit_trig_input_range, \
59 KEY_INIT_NO_ID()
60
61 static void
crocus_sanitize_tex_key(struct elk_sampler_prog_key_data * key)62 crocus_sanitize_tex_key(struct elk_sampler_prog_key_data *key)
63 {
64 key->gather_channel_quirk_mask = 0;
65 for (unsigned s = 0; s < ELK_MAX_SAMPLERS; s++) {
66 key->swizzles[s] = SWIZZLE_NOOP;
67 key->gfx6_gather_wa[s] = 0;
68 }
69 }
70
71 static uint32_t
crocus_get_texture_swizzle(const struct crocus_context * ice,const struct crocus_sampler_view * t)72 crocus_get_texture_swizzle(const struct crocus_context *ice,
73 const struct crocus_sampler_view *t)
74 {
75 uint32_t swiz = 0;
76
77 for (int i = 0; i < 4; i++) {
78 swiz |= t->swizzle[i] << (i * 3);
79 }
80 return swiz;
81 }
82
can_push_ubo(const struct intel_device_info * devinfo)83 static inline bool can_push_ubo(const struct intel_device_info *devinfo)
84 {
85 /* push works for everyone except SNB at the moment */
86 return devinfo->ver != 6;
87 }
88
89 static uint8_t
gfx6_gather_workaround(enum pipe_format pformat)90 gfx6_gather_workaround(enum pipe_format pformat)
91 {
92 switch (pformat) {
93 case PIPE_FORMAT_R8_SINT: return ELK_WA_SIGN | ELK_WA_8BIT;
94 case PIPE_FORMAT_R8_UINT: return ELK_WA_8BIT;
95 case PIPE_FORMAT_R16_SINT: return ELK_WA_SIGN | ELK_WA_16BIT;
96 case PIPE_FORMAT_R16_UINT: return ELK_WA_16BIT;
97 default:
98 /* Note that even though PIPE_FORMAT_R32_SINT and
99 * PIPE_FORMAT_R32_UINThave format overrides in
100 * the surface state, there is no shader w/a required.
101 */
102 return 0;
103 }
104 }
105
106 static const unsigned crocus_gfx6_swizzle_for_offset[4] = {
107 ELK_SWIZZLE4(0, 1, 2, 3),
108 ELK_SWIZZLE4(1, 2, 3, 3),
109 ELK_SWIZZLE4(2, 3, 3, 3),
110 ELK_SWIZZLE4(3, 3, 3, 3)
111 };
112
113 static void
gfx6_gs_xfb_setup(const struct pipe_stream_output_info * so_info,struct elk_gs_prog_data * gs_prog_data)114 gfx6_gs_xfb_setup(const struct pipe_stream_output_info *so_info,
115 struct elk_gs_prog_data *gs_prog_data)
116 {
117 /* Make sure that the VUE slots won't overflow the unsigned chars in
118 * prog_data->transform_feedback_bindings[].
119 */
120 STATIC_ASSERT(ELK_VARYING_SLOT_COUNT <= 256);
121
122 /* Make sure that we don't need more binding table entries than we've
123 * set aside for use in transform feedback. (We shouldn't, since we
124 * set aside enough binding table entries to have one per component).
125 */
126 assert(so_info->num_outputs <= ELK_MAX_SOL_BINDINGS);
127
128 gs_prog_data->num_transform_feedback_bindings = so_info->num_outputs;
129 for (unsigned i = 0; i < so_info->num_outputs; i++) {
130 gs_prog_data->transform_feedback_bindings[i] =
131 so_info->output[i].register_index;
132 gs_prog_data->transform_feedback_swizzles[i] =
133 crocus_gfx6_swizzle_for_offset[so_info->output[i].start_component];
134 }
135 }
136
137 static void
gfx6_ff_gs_xfb_setup(const struct pipe_stream_output_info * so_info,struct elk_ff_gs_prog_key * key)138 gfx6_ff_gs_xfb_setup(const struct pipe_stream_output_info *so_info,
139 struct elk_ff_gs_prog_key *key)
140 {
141 key->num_transform_feedback_bindings = so_info->num_outputs;
142 for (unsigned i = 0; i < so_info->num_outputs; i++) {
143 key->transform_feedback_bindings[i] =
144 so_info->output[i].register_index;
145 key->transform_feedback_swizzles[i] =
146 crocus_gfx6_swizzle_for_offset[so_info->output[i].start_component];
147 }
148 }
149
150 static void
crocus_populate_sampler_prog_key_data(struct crocus_context * ice,const struct intel_device_info * devinfo,gl_shader_stage stage,struct crocus_uncompiled_shader * ish,bool uses_texture_gather,struct elk_sampler_prog_key_data * key)151 crocus_populate_sampler_prog_key_data(struct crocus_context *ice,
152 const struct intel_device_info *devinfo,
153 gl_shader_stage stage,
154 struct crocus_uncompiled_shader *ish,
155 bool uses_texture_gather,
156 struct elk_sampler_prog_key_data *key)
157 {
158 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
159 uint32_t mask = ish->nir->info.textures_used[0];
160
161 while (mask) {
162 const int s = u_bit_scan(&mask);
163
164 struct crocus_sampler_view *texture = ice->state.shaders[stage].textures[s];
165 key->swizzles[s] = SWIZZLE_NOOP;
166
167 if (!texture)
168 continue;
169 if (texture->base.target == PIPE_BUFFER)
170 continue;
171 if (devinfo->verx10 < 75) {
172 key->swizzles[s] = crocus_get_texture_swizzle(ice, texture);
173 }
174
175 screen->vtbl.fill_clamp_mask(ice->state.shaders[stage].samplers[s], s, key->gl_clamp_mask);
176
177 /* gather4 for RG32* is broken in multiple ways on Gen7. */
178 if (devinfo->ver == 7 && uses_texture_gather) {
179 switch (texture->base.format) {
180 case PIPE_FORMAT_R32G32_UINT:
181 case PIPE_FORMAT_R32G32_SINT: {
182 /* We have to override the format to R32G32_FLOAT_LD.
183 * This means that SCS_ALPHA and SCS_ONE will return 0x3f8
184 * (1.0) rather than integer 1. This needs shader hacks.
185 *
186 * On Ivybridge, we whack W (alpha) to ONE in our key's
187 * swizzle. On Haswell, we look at the original texture
188 * swizzle, and use XYZW with channels overridden to ONE,
189 * leaving normal texture swizzling to SCS.
190 */
191 unsigned src_swizzle = key->swizzles[s];
192 for (int i = 0; i < 4; i++) {
193 unsigned src_comp = GET_SWZ(src_swizzle, i);
194 if (src_comp == SWIZZLE_ONE || src_comp == SWIZZLE_W) {
195 key->swizzles[i] &= ~(0x7 << (3 * i));
196 key->swizzles[i] |= SWIZZLE_ONE << (3 * i);
197 }
198 }
199 }
200 FALLTHROUGH;
201 case PIPE_FORMAT_R32G32_FLOAT:
202 /* The channel select for green doesn't work - we have to
203 * request blue. Haswell can use SCS for this, but Ivybridge
204 * needs a shader workaround.
205 */
206 if (devinfo->verx10 < 75)
207 key->gather_channel_quirk_mask |= 1 << s;
208 break;
209 default:
210 break;
211 }
212 }
213 if (devinfo->ver == 6 && uses_texture_gather) {
214 key->gfx6_gather_wa[s] = gfx6_gather_workaround(texture->base.format);
215 }
216 }
217 }
218
219 static void
crocus_lower_swizzles(struct nir_shader * nir,const struct elk_sampler_prog_key_data * key_tex)220 crocus_lower_swizzles(struct nir_shader *nir,
221 const struct elk_sampler_prog_key_data *key_tex)
222 {
223 struct nir_lower_tex_options tex_options = {
224 .lower_invalid_implicit_lod = true,
225 };
226 uint32_t mask = nir->info.textures_used[0];
227
228 while (mask) {
229 const int s = u_bit_scan(&mask);
230
231 if (key_tex->swizzles[s] == SWIZZLE_NOOP)
232 continue;
233
234 tex_options.swizzle_result |= (1 << s);
235 for (unsigned c = 0; c < 4; c++)
236 tex_options.swizzles[s][c] = GET_SWZ(key_tex->swizzles[s], c);
237 }
238 if (tex_options.swizzle_result)
239 nir_lower_tex(nir, &tex_options);
240 }
241
242 static unsigned
get_new_program_id(struct crocus_screen * screen)243 get_new_program_id(struct crocus_screen *screen)
244 {
245 return p_atomic_inc_return(&screen->program_id);
246 }
247
248 static nir_def *
get_aoa_deref_offset(nir_builder * b,nir_deref_instr * deref,unsigned elem_size)249 get_aoa_deref_offset(nir_builder *b,
250 nir_deref_instr *deref,
251 unsigned elem_size)
252 {
253 unsigned array_size = elem_size;
254 nir_def *offset = nir_imm_int(b, 0);
255
256 while (deref->deref_type != nir_deref_type_var) {
257 assert(deref->deref_type == nir_deref_type_array);
258
259 /* This level's element size is the previous level's array size */
260 nir_def *index = deref->arr.index.ssa;
261 assert(deref->arr.index.ssa);
262 offset = nir_iadd(b, offset,
263 nir_imul_imm(b, index, array_size));
264
265 deref = nir_deref_instr_parent(deref);
266 assert(glsl_type_is_array(deref->type));
267 array_size *= glsl_get_length(deref->type);
268 }
269
270 /* Accessing an invalid surface index with the dataport can result in a
271 * hang. According to the spec "if the index used to select an individual
272 * element is negative or greater than or equal to the size of the array,
273 * the results of the operation are undefined but may not lead to
274 * termination" -- which is one of the possible outcomes of the hang.
275 * Clamp the index to prevent access outside of the array bounds.
276 */
277 return nir_umin(b, offset, nir_imm_int(b, array_size - elem_size));
278 }
279
280 static void
crocus_lower_storage_image_derefs(nir_shader * nir)281 crocus_lower_storage_image_derefs(nir_shader *nir)
282 {
283 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
284
285 nir_builder b = nir_builder_create(impl);
286 bool progress = false;
287
288 nir_foreach_block(block, impl) {
289 nir_foreach_instr_safe(instr, block) {
290 if (instr->type != nir_instr_type_intrinsic)
291 continue;
292
293 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
294 switch (intrin->intrinsic) {
295 case nir_intrinsic_image_deref_load:
296 case nir_intrinsic_image_deref_store:
297 case nir_intrinsic_image_deref_atomic:
298 case nir_intrinsic_image_deref_atomic_swap:
299 case nir_intrinsic_image_deref_size:
300 case nir_intrinsic_image_deref_samples:
301 case nir_intrinsic_image_deref_load_raw_intel:
302 case nir_intrinsic_image_deref_store_raw_intel: {
303 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
304 nir_variable *var = nir_deref_instr_get_variable(deref);
305
306 b.cursor = nir_before_instr(&intrin->instr);
307 nir_def *index =
308 nir_iadd_imm(&b, get_aoa_deref_offset(&b, deref, 1),
309 var->data.driver_location);
310 nir_rewrite_image_intrinsic(intrin, index, false);
311 progress = true;
312 break;
313 }
314
315 default:
316 break;
317 }
318 }
319 }
320
321 if (progress) {
322 nir_metadata_preserve(impl, nir_metadata_control_flow);
323 } else {
324 nir_metadata_preserve(impl, nir_metadata_all);
325 }
326 }
327
328 // XXX: need unify_interfaces() at link time...
329
330 /**
331 * Undo nir_lower_passthrough_edgeflags but keep the inputs_read flag.
332 */
333 static bool
crocus_fix_edge_flags(nir_shader * nir)334 crocus_fix_edge_flags(nir_shader *nir)
335 {
336 if (nir->info.stage != MESA_SHADER_VERTEX) {
337 nir_shader_preserve_all_metadata(nir);
338 return false;
339 }
340
341 nir_variable *var = nir_find_variable_with_location(nir, nir_var_shader_out,
342 VARYING_SLOT_EDGE);
343 if (!var) {
344 nir_shader_preserve_all_metadata(nir);
345 return false;
346 }
347
348 var->data.mode = nir_var_shader_temp;
349 nir->info.outputs_written &= ~VARYING_BIT_EDGE;
350 nir->info.inputs_read &= ~VERT_BIT_EDGEFLAG;
351 nir_fixup_deref_modes(nir);
352
353 nir_foreach_function_impl(impl, nir) {
354 nir_metadata_preserve(impl, nir_metadata_control_flow |
355 nir_metadata_live_defs |
356 nir_metadata_loop_analysis);
357 }
358
359 return true;
360 }
361
362 /**
363 * Fix an uncompiled shader's stream output info.
364 *
365 * Core Gallium stores output->register_index as a "slot" number, where
366 * slots are assigned consecutively to all outputs in info->outputs_written.
367 * This naive packing of outputs doesn't work for us - we too have slots,
368 * but the layout is defined by the VUE map, which we won't have until we
369 * compile a specific shader variant. So, we remap these and simply store
370 * VARYING_SLOT_* in our copy's output->register_index fields.
371 *
372 * We also fix up VARYING_SLOT_{LAYER,VIEWPORT,PSIZ} to select the Y/Z/W
373 * components of our VUE header. See elk_vue_map.c for the layout.
374 */
375 static void
update_so_info(struct pipe_stream_output_info * so_info,uint64_t outputs_written)376 update_so_info(struct pipe_stream_output_info *so_info,
377 uint64_t outputs_written)
378 {
379 uint8_t reverse_map[64] = {};
380 unsigned slot = 0;
381 while (outputs_written) {
382 reverse_map[slot++] = u_bit_scan64(&outputs_written);
383 }
384
385 for (unsigned i = 0; i < so_info->num_outputs; i++) {
386 struct pipe_stream_output *output = &so_info->output[i];
387
388 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
389 output->register_index = reverse_map[output->register_index];
390
391 /* The VUE header contains three scalar fields packed together:
392 * - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
393 * - gl_Layer is stored in VARYING_SLOT_PSIZ.y
394 * - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
395 */
396 switch (output->register_index) {
397 case VARYING_SLOT_LAYER:
398 assert(output->num_components == 1);
399 output->register_index = VARYING_SLOT_PSIZ;
400 output->start_component = 1;
401 break;
402 case VARYING_SLOT_VIEWPORT:
403 assert(output->num_components == 1);
404 output->register_index = VARYING_SLOT_PSIZ;
405 output->start_component = 2;
406 break;
407 case VARYING_SLOT_PSIZ:
408 assert(output->num_components == 1);
409 output->start_component = 3;
410 break;
411 }
412
413 //info->outputs_written |= 1ull << output->register_index;
414 }
415 }
416
417 static void
setup_vec4_image_sysval(uint32_t * sysvals,uint32_t idx,unsigned offset,unsigned n)418 setup_vec4_image_sysval(uint32_t *sysvals, uint32_t idx,
419 unsigned offset, unsigned n)
420 {
421 assert(offset % sizeof(uint32_t) == 0);
422
423 for (unsigned i = 0; i < n; ++i)
424 sysvals[i] = ELK_PARAM_IMAGE(idx, offset / sizeof(uint32_t) + i);
425
426 for (unsigned i = n; i < 4; ++i)
427 sysvals[i] = ELK_PARAM_BUILTIN_ZERO;
428 }
429
430 /**
431 * Associate NIR uniform variables with the prog_data->param[] mechanism
432 * used by the backend. Also, decide which UBOs we'd like to push in an
433 * ideal situation (though the backend can reduce this).
434 */
435 static void
crocus_setup_uniforms(ASSERTED const struct intel_device_info * devinfo,void * mem_ctx,nir_shader * nir,struct elk_stage_prog_data * prog_data,enum elk_param_builtin ** out_system_values,unsigned * out_num_system_values,unsigned * out_num_cbufs)436 crocus_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
437 void *mem_ctx,
438 nir_shader *nir,
439 struct elk_stage_prog_data *prog_data,
440 enum elk_param_builtin **out_system_values,
441 unsigned *out_num_system_values,
442 unsigned *out_num_cbufs)
443 {
444 const unsigned CROCUS_MAX_SYSTEM_VALUES =
445 PIPE_MAX_SHADER_IMAGES * ISL_IMAGE_PARAM_SIZE;
446 enum elk_param_builtin *system_values =
447 rzalloc_array(mem_ctx, enum elk_param_builtin, CROCUS_MAX_SYSTEM_VALUES);
448 unsigned num_system_values = 0;
449
450 unsigned patch_vert_idx = -1;
451 unsigned tess_outer_default_idx = -1;
452 unsigned tess_inner_default_idx = -1;
453 unsigned ucp_idx[CROCUS_MAX_CLIP_PLANES];
454 unsigned img_idx[PIPE_MAX_SHADER_IMAGES];
455 unsigned variable_group_size_idx = -1;
456 memset(ucp_idx, -1, sizeof(ucp_idx));
457 memset(img_idx, -1, sizeof(img_idx));
458
459 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
460
461 nir_builder b = nir_builder_at(nir_before_impl(impl));
462
463 nir_def *temp_ubo_name = nir_undef(&b, 1, 32);
464 nir_def *temp_const_ubo_name = NULL;
465
466 /* Turn system value intrinsics into uniforms */
467 nir_foreach_block(block, impl) {
468 nir_foreach_instr_safe(instr, block) {
469 if (instr->type != nir_instr_type_intrinsic)
470 continue;
471
472 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
473 nir_def *offset;
474
475 switch (intrin->intrinsic) {
476 case nir_intrinsic_load_base_workgroup_id: {
477 /* GL doesn't have a concept of base workgroup */
478 b.cursor = nir_instr_remove(&intrin->instr);
479 nir_def_rewrite_uses(&intrin->def,
480 nir_imm_zero(&b, 3, 32));
481 continue;
482 }
483 case nir_intrinsic_load_constant: {
484 /* This one is special because it reads from the shader constant
485 * data and not cbuf0 which gallium uploads for us.
486 */
487 b.cursor = nir_before_instr(instr);
488 nir_def *offset =
489 nir_iadd_imm(&b, intrin->src[0].ssa,
490 nir_intrinsic_base(intrin));
491
492 if (temp_const_ubo_name == NULL)
493 temp_const_ubo_name = nir_imm_int(&b, 0);
494
495 nir_intrinsic_instr *load_ubo =
496 nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ubo);
497 load_ubo->num_components = intrin->num_components;
498 load_ubo->src[0] = nir_src_for_ssa(temp_const_ubo_name);
499 load_ubo->src[1] = nir_src_for_ssa(offset);
500 nir_intrinsic_set_align(load_ubo, 4, 0);
501 nir_intrinsic_set_range_base(load_ubo, 0);
502 nir_intrinsic_set_range(load_ubo, ~0);
503 nir_def_init(&load_ubo->instr, &load_ubo->def,
504 intrin->def.num_components,
505 intrin->def.bit_size);
506 nir_builder_instr_insert(&b, &load_ubo->instr);
507
508 nir_def_replace(&intrin->def, &load_ubo->def);
509 continue;
510 }
511 case nir_intrinsic_load_user_clip_plane: {
512 unsigned ucp = nir_intrinsic_ucp_id(intrin);
513
514 if (ucp_idx[ucp] == -1) {
515 ucp_idx[ucp] = num_system_values;
516 num_system_values += 4;
517 }
518
519 for (int i = 0; i < 4; i++) {
520 system_values[ucp_idx[ucp] + i] =
521 ELK_PARAM_BUILTIN_CLIP_PLANE(ucp, i);
522 }
523
524 b.cursor = nir_before_instr(instr);
525 offset = nir_imm_int(&b, ucp_idx[ucp] * sizeof(uint32_t));
526 break;
527 }
528 case nir_intrinsic_load_patch_vertices_in:
529 if (patch_vert_idx == -1)
530 patch_vert_idx = num_system_values++;
531
532 system_values[patch_vert_idx] =
533 ELK_PARAM_BUILTIN_PATCH_VERTICES_IN;
534
535 b.cursor = nir_before_instr(instr);
536 offset = nir_imm_int(&b, patch_vert_idx * sizeof(uint32_t));
537 break;
538 case nir_intrinsic_load_tess_level_outer_default:
539 if (tess_outer_default_idx == -1) {
540 tess_outer_default_idx = num_system_values;
541 num_system_values += 4;
542 }
543
544 for (int i = 0; i < 4; i++) {
545 system_values[tess_outer_default_idx + i] =
546 ELK_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
547 }
548
549 b.cursor = nir_before_instr(instr);
550 offset =
551 nir_imm_int(&b, tess_outer_default_idx * sizeof(uint32_t));
552 break;
553 case nir_intrinsic_load_tess_level_inner_default:
554 if (tess_inner_default_idx == -1) {
555 tess_inner_default_idx = num_system_values;
556 num_system_values += 2;
557 }
558
559 for (int i = 0; i < 2; i++) {
560 system_values[tess_inner_default_idx + i] =
561 ELK_PARAM_BUILTIN_TESS_LEVEL_INNER_X + i;
562 }
563
564 b.cursor = nir_before_instr(instr);
565 offset =
566 nir_imm_int(&b, tess_inner_default_idx * sizeof(uint32_t));
567 break;
568 case nir_intrinsic_image_deref_load_param_intel: {
569 assert(devinfo->ver < 9);
570 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
571 nir_variable *var = nir_deref_instr_get_variable(deref);
572
573 if (img_idx[var->data.binding] == -1) {
574 /* GL only allows arrays of arrays of images. */
575 assert(glsl_type_is_image(glsl_without_array(var->type)));
576 unsigned num_images = MAX2(1, glsl_get_aoa_size(var->type));
577
578 for (int i = 0; i < num_images; i++) {
579 const unsigned img = var->data.binding + i;
580
581 img_idx[img] = num_system_values;
582 num_system_values += ISL_IMAGE_PARAM_SIZE;
583
584 uint32_t *img_sv = &system_values[img_idx[img]];
585
586 setup_vec4_image_sysval(
587 img_sv + ISL_IMAGE_PARAM_OFFSET_OFFSET, img,
588 offsetof(struct isl_image_param, offset), 2);
589 setup_vec4_image_sysval(
590 img_sv + ISL_IMAGE_PARAM_SIZE_OFFSET, img,
591 offsetof(struct isl_image_param, size), 3);
592 setup_vec4_image_sysval(
593 img_sv + ISL_IMAGE_PARAM_STRIDE_OFFSET, img,
594 offsetof(struct isl_image_param, stride), 4);
595 setup_vec4_image_sysval(
596 img_sv + ISL_IMAGE_PARAM_TILING_OFFSET, img,
597 offsetof(struct isl_image_param, tiling), 3);
598 setup_vec4_image_sysval(
599 img_sv + ISL_IMAGE_PARAM_SWIZZLING_OFFSET, img,
600 offsetof(struct isl_image_param, swizzling), 2);
601 }
602 }
603
604 b.cursor = nir_before_instr(instr);
605 offset = nir_iadd_imm(&b,
606 get_aoa_deref_offset(&b, deref, ISL_IMAGE_PARAM_SIZE * 4),
607 img_idx[var->data.binding] * 4 +
608 nir_intrinsic_base(intrin) * 16);
609 break;
610 }
611 case nir_intrinsic_load_workgroup_size: {
612 assert(nir->info.workgroup_size_variable);
613 if (variable_group_size_idx == -1) {
614 variable_group_size_idx = num_system_values;
615 num_system_values += 3;
616 for (int i = 0; i < 3; i++) {
617 system_values[variable_group_size_idx + i] =
618 ELK_PARAM_BUILTIN_WORK_GROUP_SIZE_X + i;
619 }
620 }
621
622 b.cursor = nir_before_instr(instr);
623 offset = nir_imm_int(&b,
624 variable_group_size_idx * sizeof(uint32_t));
625 break;
626 }
627 default:
628 continue;
629 }
630
631 unsigned comps = nir_intrinsic_dest_components(intrin);
632
633 nir_intrinsic_instr *load =
634 nir_intrinsic_instr_create(nir, nir_intrinsic_load_ubo);
635 load->num_components = comps;
636 load->src[0] = nir_src_for_ssa(temp_ubo_name);
637 load->src[1] = nir_src_for_ssa(offset);
638 nir_intrinsic_set_align(load, 4, 0);
639 nir_intrinsic_set_range_base(load, 0);
640 nir_intrinsic_set_range(load, ~0);
641 nir_def_init(&load->instr, &load->def, comps, 32);
642 nir_builder_instr_insert(&b, &load->instr);
643 nir_def_rewrite_uses(&intrin->def,
644 &load->def);
645 nir_instr_remove(instr);
646 }
647 }
648
649 nir_validate_shader(nir, "before remapping");
650
651 /* Uniforms are stored in constant buffer 0, the
652 * user-facing UBOs are indexed by one. So if any constant buffer is
653 * needed, the constant buffer 0 will be needed, so account for it.
654 */
655 unsigned num_cbufs = nir->info.num_ubos;
656 if (num_cbufs || nir->num_uniforms)
657 num_cbufs++;
658
659 /* Place the new params in a new cbuf. */
660 if (num_system_values > 0) {
661 unsigned sysval_cbuf_index = num_cbufs;
662 num_cbufs++;
663
664 system_values = reralloc(mem_ctx, system_values, enum elk_param_builtin,
665 num_system_values);
666
667 nir_foreach_block(block, impl) {
668 nir_foreach_instr_safe(instr, block) {
669 if (instr->type != nir_instr_type_intrinsic)
670 continue;
671
672 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
673
674 if (load->intrinsic != nir_intrinsic_load_ubo)
675 continue;
676
677 b.cursor = nir_before_instr(instr);
678
679 if (load->src[0].ssa == temp_ubo_name) {
680 nir_def *imm = nir_imm_int(&b, sysval_cbuf_index);
681 nir_src_rewrite(&load->src[0], imm);
682 }
683 }
684 }
685
686 /* We need to fold the new iadds for elk_nir_analyze_ubo_ranges */
687 nir_opt_constant_folding(nir);
688 } else {
689 ralloc_free(system_values);
690 system_values = NULL;
691 }
692
693 assert(num_cbufs < PIPE_MAX_CONSTANT_BUFFERS);
694 nir_validate_shader(nir, "after remap");
695
696 /* We don't use params[] but gallium leaves num_uniforms set. We use this
697 * to detect when cbuf0 exists but we don't need it anymore when we get
698 * here. Instead, zero it out so that the back-end doesn't get confused
699 * when nr_params * 4 != num_uniforms != nr_params * 4.
700 */
701 nir->num_uniforms = 0;
702
703 /* Constant loads (if any) need to go at the end of the constant buffers so
704 * we need to know num_cbufs before we can lower to them.
705 */
706 if (temp_const_ubo_name != NULL) {
707 nir_load_const_instr *const_ubo_index =
708 nir_instr_as_load_const(temp_const_ubo_name->parent_instr);
709 assert(const_ubo_index->def.bit_size == 32);
710 const_ubo_index->value[0].u32 = num_cbufs;
711 }
712
713 *out_system_values = system_values;
714 *out_num_system_values = num_system_values;
715 *out_num_cbufs = num_cbufs;
716 }
717
718 static const char *surface_group_names[] = {
719 [CROCUS_SURFACE_GROUP_RENDER_TARGET] = "render target",
720 [CROCUS_SURFACE_GROUP_RENDER_TARGET_READ] = "non-coherent render target read",
721 [CROCUS_SURFACE_GROUP_SOL] = "streamout",
722 [CROCUS_SURFACE_GROUP_CS_WORK_GROUPS] = "CS work groups",
723 [CROCUS_SURFACE_GROUP_TEXTURE] = "texture",
724 [CROCUS_SURFACE_GROUP_TEXTURE_GATHER] = "texture gather",
725 [CROCUS_SURFACE_GROUP_UBO] = "ubo",
726 [CROCUS_SURFACE_GROUP_SSBO] = "ssbo",
727 [CROCUS_SURFACE_GROUP_IMAGE] = "image",
728 };
729
730 static void
crocus_print_binding_table(FILE * fp,const char * name,const struct crocus_binding_table * bt)731 crocus_print_binding_table(FILE *fp, const char *name,
732 const struct crocus_binding_table *bt)
733 {
734 STATIC_ASSERT(ARRAY_SIZE(surface_group_names) == CROCUS_SURFACE_GROUP_COUNT);
735
736 uint32_t total = 0;
737 uint32_t compacted = 0;
738
739 for (int i = 0; i < CROCUS_SURFACE_GROUP_COUNT; i++) {
740 uint32_t size = bt->sizes[i];
741 total += size;
742 if (size)
743 compacted += util_bitcount64(bt->used_mask[i]);
744 }
745
746 if (total == 0) {
747 fprintf(fp, "Binding table for %s is empty\n\n", name);
748 return;
749 }
750
751 if (total != compacted) {
752 fprintf(fp, "Binding table for %s "
753 "(compacted to %u entries from %u entries)\n",
754 name, compacted, total);
755 } else {
756 fprintf(fp, "Binding table for %s (%u entries)\n", name, total);
757 }
758
759 uint32_t entry = 0;
760 for (int i = 0; i < CROCUS_SURFACE_GROUP_COUNT; i++) {
761 uint64_t mask = bt->used_mask[i];
762 while (mask) {
763 int index = u_bit_scan64(&mask);
764 fprintf(fp, " [%u] %s #%d\n", entry++, surface_group_names[i], index);
765 }
766 }
767 fprintf(fp, "\n");
768 }
769
770 enum {
771 /* Max elements in a surface group. */
772 SURFACE_GROUP_MAX_ELEMENTS = 64,
773 };
774
775 static void
rewrite_src_with_bti(nir_builder * b,struct crocus_binding_table * bt,nir_instr * instr,nir_src * src,enum crocus_surface_group group)776 rewrite_src_with_bti(nir_builder *b, struct crocus_binding_table *bt,
777 nir_instr *instr, nir_src *src,
778 enum crocus_surface_group group)
779 {
780 assert(bt->sizes[group] > 0);
781
782 b->cursor = nir_before_instr(instr);
783 nir_def *bti;
784 if (nir_src_is_const(*src)) {
785 uint32_t index = nir_src_as_uint(*src);
786 bti = nir_imm_intN_t(b, crocus_group_index_to_bti(bt, group, index),
787 src->ssa->bit_size);
788 } else {
789 /* Indirect usage makes all the surfaces of the group to be available,
790 * so we can just add the base.
791 */
792 assert(bt->used_mask[group] == BITFIELD64_MASK(bt->sizes[group]));
793 bti = nir_iadd_imm(b, src->ssa, bt->offsets[group]);
794 }
795 nir_src_rewrite(src, bti);
796 }
797
798 static void
mark_used_with_src(struct crocus_binding_table * bt,nir_src * src,enum crocus_surface_group group)799 mark_used_with_src(struct crocus_binding_table *bt, nir_src *src,
800 enum crocus_surface_group group)
801 {
802 assert(bt->sizes[group] > 0);
803
804 if (nir_src_is_const(*src)) {
805 uint64_t index = nir_src_as_uint(*src);
806 assert(index < bt->sizes[group]);
807 bt->used_mask[group] |= 1ull << index;
808 } else {
809 /* There's an indirect usage, we need all the surfaces. */
810 bt->used_mask[group] = BITFIELD64_MASK(bt->sizes[group]);
811 }
812 }
813
814 static bool
skip_compacting_binding_tables(void)815 skip_compacting_binding_tables(void)
816 {
817 static int skip = -1;
818 if (skip < 0)
819 skip = debug_get_bool_option("INTEL_DISABLE_COMPACT_BINDING_TABLE", false);
820 return skip;
821 }
822
823 /**
824 * Set up the binding table indices and apply to the shader.
825 */
826 static void
crocus_setup_binding_table(const struct intel_device_info * devinfo,struct nir_shader * nir,struct crocus_binding_table * bt,unsigned num_render_targets,unsigned num_system_values,unsigned num_cbufs,const struct elk_sampler_prog_key_data * key)827 crocus_setup_binding_table(const struct intel_device_info *devinfo,
828 struct nir_shader *nir,
829 struct crocus_binding_table *bt,
830 unsigned num_render_targets,
831 unsigned num_system_values,
832 unsigned num_cbufs,
833 const struct elk_sampler_prog_key_data *key)
834 {
835 const struct shader_info *info = &nir->info;
836
837 memset(bt, 0, sizeof(*bt));
838
839 /* Set the sizes for each surface group. For some groups, we already know
840 * upfront how many will be used, so mark them.
841 */
842 if (info->stage == MESA_SHADER_FRAGMENT) {
843 bt->sizes[CROCUS_SURFACE_GROUP_RENDER_TARGET] = num_render_targets;
844 /* All render targets used. */
845 bt->used_mask[CROCUS_SURFACE_GROUP_RENDER_TARGET] =
846 BITFIELD64_MASK(num_render_targets);
847
848 /* Setup render target read surface group in order to support non-coherent
849 * framebuffer fetch on Gfx7
850 */
851 if (devinfo->ver >= 6 && info->outputs_read) {
852 bt->sizes[CROCUS_SURFACE_GROUP_RENDER_TARGET_READ] = num_render_targets;
853 bt->used_mask[CROCUS_SURFACE_GROUP_RENDER_TARGET_READ] =
854 BITFIELD64_MASK(num_render_targets);
855 }
856 } else if (info->stage == MESA_SHADER_COMPUTE) {
857 bt->sizes[CROCUS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
858 } else if (info->stage == MESA_SHADER_GEOMETRY) {
859 /* In gfx6 we reserve the first ELK_MAX_SOL_BINDINGS entries for transform
860 * feedback surfaces.
861 */
862 if (devinfo->ver == 6) {
863 bt->sizes[CROCUS_SURFACE_GROUP_SOL] = ELK_MAX_SOL_BINDINGS;
864 bt->used_mask[CROCUS_SURFACE_GROUP_SOL] = (uint64_t)-1;
865 }
866 }
867
868 bt->sizes[CROCUS_SURFACE_GROUP_TEXTURE] = BITSET_LAST_BIT(info->textures_used);
869 bt->used_mask[CROCUS_SURFACE_GROUP_TEXTURE] = info->textures_used[0];
870
871 if (info->uses_texture_gather && devinfo->ver < 8) {
872 bt->sizes[CROCUS_SURFACE_GROUP_TEXTURE_GATHER] = BITSET_LAST_BIT(info->textures_used);
873 bt->used_mask[CROCUS_SURFACE_GROUP_TEXTURE_GATHER] = info->textures_used[0];
874 }
875
876 bt->sizes[CROCUS_SURFACE_GROUP_IMAGE] = info->num_images;
877
878 /* Allocate an extra slot in the UBO section for NIR constants.
879 * Binding table compaction will remove it if unnecessary.
880 *
881 * We don't include them in crocus_compiled_shader::num_cbufs because
882 * they are uploaded separately from shs->constbufs[], but from a shader
883 * point of view, they're another UBO (at the end of the section).
884 */
885 bt->sizes[CROCUS_SURFACE_GROUP_UBO] = num_cbufs + 1;
886
887 bt->sizes[CROCUS_SURFACE_GROUP_SSBO] = info->num_ssbos;
888
889 for (int i = 0; i < CROCUS_SURFACE_GROUP_COUNT; i++)
890 assert(bt->sizes[i] <= SURFACE_GROUP_MAX_ELEMENTS);
891
892 /* Mark surfaces used for the cases we don't have the information available
893 * upfront.
894 */
895 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
896 nir_foreach_block (block, impl) {
897 nir_foreach_instr (instr, block) {
898 if (instr->type != nir_instr_type_intrinsic)
899 continue;
900
901 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
902 switch (intrin->intrinsic) {
903 case nir_intrinsic_load_num_workgroups:
904 bt->used_mask[CROCUS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
905 break;
906
907 case nir_intrinsic_load_output:
908 if (devinfo->ver >= 6) {
909 mark_used_with_src(bt, &intrin->src[0],
910 CROCUS_SURFACE_GROUP_RENDER_TARGET_READ);
911 }
912 break;
913
914 case nir_intrinsic_image_size:
915 case nir_intrinsic_image_load:
916 case nir_intrinsic_image_store:
917 case nir_intrinsic_image_atomic:
918 case nir_intrinsic_image_atomic_swap:
919 case nir_intrinsic_image_load_raw_intel:
920 case nir_intrinsic_image_store_raw_intel:
921 mark_used_with_src(bt, &intrin->src[0], CROCUS_SURFACE_GROUP_IMAGE);
922 break;
923
924 case nir_intrinsic_load_ubo:
925 mark_used_with_src(bt, &intrin->src[0], CROCUS_SURFACE_GROUP_UBO);
926 break;
927
928 case nir_intrinsic_store_ssbo:
929 mark_used_with_src(bt, &intrin->src[1], CROCUS_SURFACE_GROUP_SSBO);
930 break;
931
932 case nir_intrinsic_get_ssbo_size:
933 case nir_intrinsic_ssbo_atomic:
934 case nir_intrinsic_ssbo_atomic_swap:
935 case nir_intrinsic_load_ssbo:
936 mark_used_with_src(bt, &intrin->src[0], CROCUS_SURFACE_GROUP_SSBO);
937 break;
938
939 default:
940 break;
941 }
942 }
943 }
944
945 /* When disable we just mark everything as used. */
946 if (unlikely(skip_compacting_binding_tables())) {
947 for (int i = 0; i < CROCUS_SURFACE_GROUP_COUNT; i++)
948 bt->used_mask[i] = BITFIELD64_MASK(bt->sizes[i]);
949 }
950
951 /* Calculate the offsets and the binding table size based on the used
952 * surfaces. After this point, the functions to go between "group indices"
953 * and binding table indices can be used.
954 */
955 uint32_t next = 0;
956 for (int i = 0; i < CROCUS_SURFACE_GROUP_COUNT; i++) {
957 if (bt->used_mask[i] != 0) {
958 bt->offsets[i] = next;
959 next += util_bitcount64(bt->used_mask[i]);
960 }
961 }
962 bt->size_bytes = next * 4;
963
964 if (INTEL_DEBUG(DEBUG_BT)) {
965 crocus_print_binding_table(stderr, gl_shader_stage_name(info->stage), bt);
966 }
967
968 /* Apply the binding table indices. The backend compiler is not expected
969 * to change those, as we haven't set any of the *_start entries in elk
970 * binding_table.
971 */
972 nir_builder b = nir_builder_create(impl);
973
974 nir_foreach_block (block, impl) {
975 nir_foreach_instr (instr, block) {
976 if (instr->type == nir_instr_type_tex) {
977 nir_tex_instr *tex = nir_instr_as_tex(instr);
978 bool is_gather = devinfo->ver < 8 && tex->op == nir_texop_tg4;
979
980 /* rewrite the tg4 component from green to blue before replacing the
981 texture index */
982 if (devinfo->verx10 == 70) {
983 if (tex->component == 1)
984 if (key->gather_channel_quirk_mask & (1 << tex->texture_index))
985 tex->component = 2;
986 }
987
988 if (is_gather && devinfo->ver == 6 && key->gfx6_gather_wa[tex->texture_index]) {
989 b.cursor = nir_after_instr(instr);
990 enum elk_gfx6_gather_sampler_wa wa = key->gfx6_gather_wa[tex->texture_index];
991 int width = (wa & ELK_WA_8BIT) ? 8 : 16;
992
993 nir_def *val = nir_fmul_imm(&b, &tex->def, (1 << width) - 1);
994 val = nir_f2u32(&b, val);
995 if (wa & ELK_WA_SIGN) {
996 val = nir_ishl_imm(&b, val, 32 - width);
997 val = nir_ishr_imm(&b, val, 32 - width);
998 }
999 nir_def_rewrite_uses_after(&tex->def, val, val->parent_instr);
1000 }
1001
1002 tex->texture_index =
1003 crocus_group_index_to_bti(bt, is_gather ? CROCUS_SURFACE_GROUP_TEXTURE_GATHER : CROCUS_SURFACE_GROUP_TEXTURE,
1004 tex->texture_index);
1005 continue;
1006 }
1007
1008 if (instr->type != nir_instr_type_intrinsic)
1009 continue;
1010
1011 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1012 switch (intrin->intrinsic) {
1013 case nir_intrinsic_image_size:
1014 case nir_intrinsic_image_load:
1015 case nir_intrinsic_image_store:
1016 case nir_intrinsic_image_atomic:
1017 case nir_intrinsic_image_atomic_swap:
1018 case nir_intrinsic_image_load_raw_intel:
1019 case nir_intrinsic_image_store_raw_intel:
1020 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
1021 CROCUS_SURFACE_GROUP_IMAGE);
1022 break;
1023
1024 case nir_intrinsic_load_ubo:
1025 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
1026 CROCUS_SURFACE_GROUP_UBO);
1027 break;
1028
1029 case nir_intrinsic_store_ssbo:
1030 rewrite_src_with_bti(&b, bt, instr, &intrin->src[1],
1031 CROCUS_SURFACE_GROUP_SSBO);
1032 break;
1033
1034 case nir_intrinsic_load_output:
1035 if (devinfo->ver >= 6) {
1036 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
1037 CROCUS_SURFACE_GROUP_RENDER_TARGET_READ);
1038 }
1039 break;
1040
1041 case nir_intrinsic_get_ssbo_size:
1042 case nir_intrinsic_ssbo_atomic:
1043 case nir_intrinsic_ssbo_atomic_swap:
1044 case nir_intrinsic_load_ssbo:
1045 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
1046 CROCUS_SURFACE_GROUP_SSBO);
1047 break;
1048
1049 default:
1050 break;
1051 }
1052 }
1053 }
1054 }
1055
1056 static void
crocus_debug_recompile(struct crocus_context * ice,struct shader_info * info,const struct elk_base_prog_key * key)1057 crocus_debug_recompile(struct crocus_context *ice,
1058 struct shader_info *info,
1059 const struct elk_base_prog_key *key)
1060 {
1061 struct crocus_screen *screen = (struct crocus_screen *) ice->ctx.screen;
1062 const struct elk_compiler *c = screen->compiler;
1063
1064 if (!info)
1065 return;
1066
1067 elk_shader_perf_log(c, &ice->dbg, "Recompiling %s shader for program %s: %s\n",
1068 _mesa_shader_stage_to_string(info->stage),
1069 info->name ? info->name : "(no identifier)",
1070 info->label ? info->label : "");
1071
1072 const void *old_key =
1073 crocus_find_previous_compile(ice, info->stage, key->program_string_id);
1074
1075 elk_debug_key_recompile(c, &ice->dbg, info->stage, old_key, key);
1076 }
1077
1078 /**
1079 * Get the shader for the last enabled geometry stage.
1080 *
1081 * This stage is the one which will feed stream output and the rasterizer.
1082 */
1083 static gl_shader_stage
last_vue_stage(struct crocus_context * ice)1084 last_vue_stage(struct crocus_context *ice)
1085 {
1086 if (ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
1087 return MESA_SHADER_GEOMETRY;
1088
1089 if (ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
1090 return MESA_SHADER_TESS_EVAL;
1091
1092 return MESA_SHADER_VERTEX;
1093 }
1094
1095 static GLbitfield64
crocus_vs_outputs_written(struct crocus_context * ice,const struct elk_vs_prog_key * key,GLbitfield64 user_varyings)1096 crocus_vs_outputs_written(struct crocus_context *ice,
1097 const struct elk_vs_prog_key *key,
1098 GLbitfield64 user_varyings)
1099 {
1100 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1101 const struct intel_device_info *devinfo = &screen->devinfo;
1102 GLbitfield64 outputs_written = user_varyings;
1103
1104 if (devinfo->ver < 6) {
1105
1106 if (key->copy_edgeflag)
1107 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
1108
1109 /* Put dummy slots into the VUE for the SF to put the replaced
1110 * point sprite coords in. We shouldn't need these dummy slots,
1111 * which take up precious URB space, but it would mean that the SF
1112 * doesn't get nice aligned pairs of input coords into output
1113 * coords, which would be a pain to handle.
1114 */
1115 for (unsigned i = 0; i < 8; i++) {
1116 if (key->point_coord_replace & (1 << i))
1117 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
1118 }
1119
1120 /* if back colors are written, allocate slots for front colors too */
1121 if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0))
1122 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0);
1123 if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1))
1124 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1);
1125 }
1126
1127 /* In order for legacy clipping to work, we need to populate the clip
1128 * distance varying slots whenever clipping is enabled, even if the vertex
1129 * shader doesn't write to gl_ClipDistance.
1130 */
1131 if (key->nr_userclip_plane_consts > 0) {
1132 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0);
1133 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1);
1134 }
1135
1136 return outputs_written;
1137 }
1138
1139 /*
1140 * If no edgeflags come from the user, gen4/5
1141 * require giving the clip shader a default edgeflag.
1142 *
1143 * This will always be 1.0.
1144 */
1145 static void
crocus_lower_default_edgeflags(struct nir_shader * nir)1146 crocus_lower_default_edgeflags(struct nir_shader *nir)
1147 {
1148 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1149
1150 nir_builder b = nir_builder_at(nir_after_impl(impl));
1151
1152 nir_variable *var = nir_variable_create(nir, nir_var_shader_out,
1153 glsl_float_type(),
1154 "edgeflag");
1155 var->data.location = VARYING_SLOT_EDGE;
1156 nir_store_var(&b, var, nir_imm_float(&b, 1.0), 0x1);
1157 }
1158
1159 /**
1160 * Compile a vertex shader, and upload the assembly.
1161 */
1162 static struct crocus_compiled_shader *
crocus_compile_vs(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,const struct elk_vs_prog_key * key)1163 crocus_compile_vs(struct crocus_context *ice,
1164 struct crocus_uncompiled_shader *ish,
1165 const struct elk_vs_prog_key *key)
1166 {
1167 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1168 const struct elk_compiler *compiler = screen->compiler;
1169 const struct intel_device_info *devinfo = &screen->devinfo;
1170 void *mem_ctx = ralloc_context(NULL);
1171 struct elk_vs_prog_data *vs_prog_data =
1172 rzalloc(mem_ctx, struct elk_vs_prog_data);
1173 struct elk_vue_prog_data *vue_prog_data = &vs_prog_data->base;
1174 struct elk_stage_prog_data *prog_data = &vue_prog_data->base;
1175 enum elk_param_builtin *system_values;
1176 unsigned num_system_values;
1177 unsigned num_cbufs;
1178
1179 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1180
1181 if (key->nr_userclip_plane_consts) {
1182 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1183 /* Check if variables were found. */
1184 if (nir_lower_clip_vs(nir, (1 << key->nr_userclip_plane_consts) - 1,
1185 true, false, NULL)) {
1186 nir_lower_io_to_temporaries(nir, impl, true, false);
1187 nir_lower_global_vars_to_local(nir);
1188 nir_lower_vars_to_ssa(nir);
1189 nir_shader_gather_info(nir, impl);
1190 }
1191 }
1192
1193 if (key->clamp_pointsize)
1194 nir_lower_point_size(nir, 1.0, 255.0);
1195
1196 prog_data->use_alt_mode = nir->info.use_legacy_math_rules;
1197
1198 crocus_setup_uniforms(devinfo, mem_ctx, nir, prog_data, &system_values,
1199 &num_system_values, &num_cbufs);
1200
1201 crocus_lower_swizzles(nir, &key->base.tex);
1202
1203 if (devinfo->ver <= 5 &&
1204 !(nir->info.inputs_read & BITFIELD64_BIT(VERT_ATTRIB_EDGEFLAG)))
1205 crocus_lower_default_edgeflags(nir);
1206
1207 struct crocus_binding_table bt;
1208 crocus_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1209 num_system_values, num_cbufs, &key->base.tex);
1210
1211 if (can_push_ubo(devinfo))
1212 elk_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
1213
1214 uint64_t outputs_written =
1215 crocus_vs_outputs_written(ice, key, nir->info.outputs_written);
1216 elk_compute_vue_map(devinfo,
1217 &vue_prog_data->vue_map, outputs_written,
1218 nir->info.separate_shader, /* pos slots */ 1);
1219
1220 /* Don't tell the backend about our clip plane constants, we've already
1221 * lowered them in NIR and we don't want it doing it again.
1222 */
1223 struct elk_vs_prog_key key_no_ucp = *key;
1224 key_no_ucp.nr_userclip_plane_consts = 0;
1225 key_no_ucp.copy_edgeflag = false;
1226 crocus_sanitize_tex_key(&key_no_ucp.base.tex);
1227
1228 struct elk_compile_vs_params params = {
1229 .base = {
1230 .mem_ctx = mem_ctx,
1231 .nir = nir,
1232 .log_data = &ice->dbg,
1233 },
1234 .key = &key_no_ucp,
1235 .prog_data = vs_prog_data,
1236 .edgeflag_is_last = devinfo->ver < 6,
1237 };
1238 const unsigned *program =
1239 elk_compile_vs(compiler, ¶ms);
1240 if (program == NULL) {
1241 dbg_printf("Failed to compile vertex shader: %s\n", params.base.error_str);
1242 ralloc_free(mem_ctx);
1243 return false;
1244 }
1245
1246 if (ish->compiled_once) {
1247 crocus_debug_recompile(ice, &nir->info, &key->base);
1248 } else {
1249 ish->compiled_once = true;
1250 }
1251
1252 uint32_t *so_decls = NULL;
1253 if (devinfo->ver > 6)
1254 so_decls = screen->vtbl.create_so_decl_list(&ish->stream_output,
1255 &vue_prog_data->vue_map);
1256
1257 struct crocus_compiled_shader *shader =
1258 crocus_upload_shader(ice, CROCUS_CACHE_VS, sizeof(*key), key, program,
1259 prog_data->program_size,
1260 prog_data, sizeof(*vs_prog_data), so_decls,
1261 system_values, num_system_values,
1262 num_cbufs, &bt);
1263
1264 crocus_disk_cache_store(screen->disk_cache, ish, shader,
1265 ice->shaders.cache_bo_map,
1266 key, sizeof(*key));
1267
1268 ralloc_free(mem_ctx);
1269 return shader;
1270 }
1271
1272 /**
1273 * Update the current vertex shader variant.
1274 *
1275 * Fill out the key, look in the cache, compile and bind if needed.
1276 */
1277 static void
crocus_update_compiled_vs(struct crocus_context * ice)1278 crocus_update_compiled_vs(struct crocus_context *ice)
1279 {
1280 struct crocus_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
1281 struct crocus_uncompiled_shader *ish =
1282 ice->shaders.uncompiled[MESA_SHADER_VERTEX];
1283 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1284 const struct intel_device_info *devinfo = &screen->devinfo;
1285 struct elk_vs_prog_key key = { KEY_INIT() };
1286
1287 if (ish->nos & (1ull << CROCUS_NOS_TEXTURES))
1288 crocus_populate_sampler_prog_key_data(ice, devinfo, MESA_SHADER_VERTEX, ish,
1289 ish->nir->info.uses_texture_gather, &key.base.tex);
1290 screen->vtbl.populate_vs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1291
1292 struct crocus_compiled_shader *old = ice->shaders.prog[CROCUS_CACHE_VS];
1293 struct crocus_compiled_shader *shader =
1294 crocus_find_cached_shader(ice, CROCUS_CACHE_VS, sizeof(key), &key);
1295
1296 if (!shader)
1297 shader = crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1298
1299 if (!shader)
1300 shader = crocus_compile_vs(ice, ish, &key);
1301
1302 if (old != shader) {
1303 ice->shaders.prog[CROCUS_CACHE_VS] = shader;
1304 if (devinfo->ver == 8)
1305 ice->state.dirty |= CROCUS_DIRTY_GEN8_VF_SGVS;
1306 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_VS |
1307 CROCUS_STAGE_DIRTY_BINDINGS_VS |
1308 CROCUS_STAGE_DIRTY_CONSTANTS_VS;
1309 shs->sysvals_need_upload = true;
1310
1311 const struct elk_vs_prog_data *vs_prog_data =
1312 (void *) shader->prog_data;
1313 const bool uses_draw_params = vs_prog_data->uses_firstvertex ||
1314 vs_prog_data->uses_baseinstance;
1315 const bool uses_derived_draw_params = vs_prog_data->uses_drawid ||
1316 vs_prog_data->uses_is_indexed_draw;
1317 const bool needs_sgvs_element = uses_draw_params ||
1318 vs_prog_data->uses_instanceid ||
1319 vs_prog_data->uses_vertexid;
1320
1321 if (ice->state.vs_uses_draw_params != uses_draw_params ||
1322 ice->state.vs_uses_derived_draw_params != uses_derived_draw_params ||
1323 ice->state.vs_needs_edge_flag != ish->needs_edge_flag ||
1324 ice->state.vs_uses_vertexid != vs_prog_data->uses_vertexid ||
1325 ice->state.vs_uses_instanceid != vs_prog_data->uses_instanceid) {
1326 ice->state.dirty |= CROCUS_DIRTY_VERTEX_BUFFERS |
1327 CROCUS_DIRTY_VERTEX_ELEMENTS;
1328 }
1329 ice->state.vs_uses_draw_params = uses_draw_params;
1330 ice->state.vs_uses_derived_draw_params = uses_derived_draw_params;
1331 ice->state.vs_needs_sgvs_element = needs_sgvs_element;
1332 ice->state.vs_needs_edge_flag = ish->needs_edge_flag;
1333 ice->state.vs_uses_vertexid = vs_prog_data->uses_vertexid;
1334 ice->state.vs_uses_instanceid = vs_prog_data->uses_instanceid;
1335 }
1336 }
1337
1338 /**
1339 * Get the shader_info for a given stage, or NULL if the stage is disabled.
1340 */
1341 const struct shader_info *
crocus_get_shader_info(const struct crocus_context * ice,gl_shader_stage stage)1342 crocus_get_shader_info(const struct crocus_context *ice, gl_shader_stage stage)
1343 {
1344 const struct crocus_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
1345
1346 if (!ish)
1347 return NULL;
1348
1349 const nir_shader *nir = ish->nir;
1350 return &nir->info;
1351 }
1352
1353 /**
1354 * Get the union of TCS output and TES input slots.
1355 *
1356 * TCS and TES need to agree on a common URB entry layout. In particular,
1357 * the data for all patch vertices is stored in a single URB entry (unlike
1358 * GS which has one entry per input vertex). This means that per-vertex
1359 * array indexing needs a stride.
1360 *
1361 * SSO requires locations to match, but doesn't require the number of
1362 * outputs/inputs to match (in fact, the TCS often has extra outputs).
1363 * So, we need to take the extra step of unifying these on the fly.
1364 */
1365 static void
get_unified_tess_slots(const struct crocus_context * ice,uint64_t * per_vertex_slots,uint32_t * per_patch_slots)1366 get_unified_tess_slots(const struct crocus_context *ice,
1367 uint64_t *per_vertex_slots,
1368 uint32_t *per_patch_slots)
1369 {
1370 const struct shader_info *tcs =
1371 crocus_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
1372 const struct shader_info *tes =
1373 crocus_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1374
1375 *per_vertex_slots = tes->inputs_read;
1376 *per_patch_slots = tes->patch_inputs_read;
1377
1378 if (tcs) {
1379 *per_vertex_slots |= tcs->outputs_written;
1380 *per_patch_slots |= tcs->patch_outputs_written;
1381 }
1382 }
1383
1384 /**
1385 * Compile a tessellation control shader, and upload the assembly.
1386 */
1387 static struct crocus_compiled_shader *
crocus_compile_tcs(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,const struct elk_tcs_prog_key * key)1388 crocus_compile_tcs(struct crocus_context *ice,
1389 struct crocus_uncompiled_shader *ish,
1390 const struct elk_tcs_prog_key *key)
1391 {
1392 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1393 const struct elk_compiler *compiler = screen->compiler;
1394 void *mem_ctx = ralloc_context(NULL);
1395 struct elk_tcs_prog_data *tcs_prog_data =
1396 rzalloc(mem_ctx, struct elk_tcs_prog_data);
1397 struct elk_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
1398 struct elk_stage_prog_data *prog_data = &vue_prog_data->base;
1399 const struct intel_device_info *devinfo = &screen->devinfo;
1400 enum elk_param_builtin *system_values = NULL;
1401 unsigned num_system_values = 0;
1402 unsigned num_cbufs = 0;
1403
1404 nir_shader *nir;
1405
1406 struct crocus_binding_table bt;
1407
1408 if (ish) {
1409 nir = nir_shader_clone(mem_ctx, ish->nir);
1410 } else {
1411 nir = elk_nir_create_passthrough_tcs(mem_ctx, compiler, key);
1412 }
1413
1414 crocus_setup_uniforms(devinfo, mem_ctx, nir, prog_data, &system_values,
1415 &num_system_values, &num_cbufs);
1416
1417 crocus_lower_swizzles(nir, &key->base.tex);
1418 crocus_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1419 num_system_values, num_cbufs, &key->base.tex);
1420 if (can_push_ubo(devinfo))
1421 elk_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
1422
1423 struct elk_tcs_prog_key key_clean = *key;
1424 crocus_sanitize_tex_key(&key_clean.base.tex);
1425
1426 struct elk_compile_tcs_params params = {
1427 .base = {
1428 .mem_ctx = mem_ctx,
1429 .nir = nir,
1430 .log_data = &ice->dbg,
1431 },
1432 .key = &key_clean,
1433 .prog_data = tcs_prog_data,
1434 };
1435
1436 const unsigned *program = elk_compile_tcs(compiler, ¶ms);
1437 if (program == NULL) {
1438 dbg_printf("Failed to compile control shader: %s\n", params.base.error_str);
1439 ralloc_free(mem_ctx);
1440 return false;
1441 }
1442
1443 if (ish) {
1444 if (ish->compiled_once) {
1445 crocus_debug_recompile(ice, &nir->info, &key->base);
1446 } else {
1447 ish->compiled_once = true;
1448 }
1449 }
1450
1451 struct crocus_compiled_shader *shader =
1452 crocus_upload_shader(ice, CROCUS_CACHE_TCS, sizeof(*key), key, program,
1453 prog_data->program_size,
1454 prog_data, sizeof(*tcs_prog_data), NULL,
1455 system_values, num_system_values,
1456 num_cbufs, &bt);
1457
1458 if (ish)
1459 crocus_disk_cache_store(screen->disk_cache, ish, shader,
1460 ice->shaders.cache_bo_map,
1461 key, sizeof(*key));
1462
1463 ralloc_free(mem_ctx);
1464 return shader;
1465 }
1466
1467 /**
1468 * Update the current tessellation control shader variant.
1469 *
1470 * Fill out the key, look in the cache, compile and bind if needed.
1471 */
1472 static void
crocus_update_compiled_tcs(struct crocus_context * ice)1473 crocus_update_compiled_tcs(struct crocus_context *ice)
1474 {
1475 struct crocus_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
1476 struct crocus_uncompiled_shader *tcs =
1477 ice->shaders.uncompiled[MESA_SHADER_TESS_CTRL];
1478 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1479 const struct intel_device_info *devinfo = &screen->devinfo;
1480
1481 const struct shader_info *tes_info =
1482 crocus_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1483 struct elk_tcs_prog_key key = {
1484 KEY_INIT_NO_ID(),
1485 .base.program_string_id = tcs ? tcs->program_id : 0,
1486 ._tes_primitive_mode = tes_info->tess._primitive_mode,
1487 .input_vertices = ice->state.vertices_per_patch,
1488 .quads_workaround = tes_info->tess._primitive_mode == TESS_PRIMITIVE_QUADS &&
1489 tes_info->tess.spacing == TESS_SPACING_EQUAL,
1490 };
1491
1492 if (tcs && tcs->nos & (1ull << CROCUS_NOS_TEXTURES))
1493 crocus_populate_sampler_prog_key_data(ice, devinfo, MESA_SHADER_TESS_CTRL, tcs,
1494 tcs->nir->info.uses_texture_gather, &key.base.tex);
1495 get_unified_tess_slots(ice, &key.outputs_written,
1496 &key.patch_outputs_written);
1497 screen->vtbl.populate_tcs_key(ice, &key);
1498
1499 struct crocus_compiled_shader *old = ice->shaders.prog[CROCUS_CACHE_TCS];
1500 struct crocus_compiled_shader *shader =
1501 crocus_find_cached_shader(ice, CROCUS_CACHE_TCS, sizeof(key), &key);
1502
1503 if (tcs && !shader)
1504 shader = crocus_disk_cache_retrieve(ice, tcs, &key, sizeof(key));
1505
1506 if (!shader)
1507 shader = crocus_compile_tcs(ice, tcs, &key);
1508
1509 if (old != shader) {
1510 ice->shaders.prog[CROCUS_CACHE_TCS] = shader;
1511 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_TCS |
1512 CROCUS_STAGE_DIRTY_BINDINGS_TCS |
1513 CROCUS_STAGE_DIRTY_CONSTANTS_TCS;
1514 shs->sysvals_need_upload = true;
1515 }
1516 }
1517
1518 /**
1519 * Compile a tessellation evaluation shader, and upload the assembly.
1520 */
1521 static struct crocus_compiled_shader *
crocus_compile_tes(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,const struct elk_tes_prog_key * key)1522 crocus_compile_tes(struct crocus_context *ice,
1523 struct crocus_uncompiled_shader *ish,
1524 const struct elk_tes_prog_key *key)
1525 {
1526 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1527 const struct elk_compiler *compiler = screen->compiler;
1528 void *mem_ctx = ralloc_context(NULL);
1529 struct elk_tes_prog_data *tes_prog_data =
1530 rzalloc(mem_ctx, struct elk_tes_prog_data);
1531 struct elk_vue_prog_data *vue_prog_data = &tes_prog_data->base;
1532 struct elk_stage_prog_data *prog_data = &vue_prog_data->base;
1533 enum elk_param_builtin *system_values;
1534 const struct intel_device_info *devinfo = &screen->devinfo;
1535 unsigned num_system_values;
1536 unsigned num_cbufs;
1537
1538 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1539
1540 if (key->nr_userclip_plane_consts) {
1541 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1542 nir_lower_clip_vs(nir, (1 << key->nr_userclip_plane_consts) - 1, true,
1543 false, NULL);
1544 nir_lower_io_to_temporaries(nir, impl, true, false);
1545 nir_lower_global_vars_to_local(nir);
1546 nir_lower_vars_to_ssa(nir);
1547 nir_shader_gather_info(nir, impl);
1548 }
1549
1550 if (key->clamp_pointsize)
1551 nir_lower_point_size(nir, 1.0, 255.0);
1552
1553 crocus_setup_uniforms(devinfo, mem_ctx, nir, prog_data, &system_values,
1554 &num_system_values, &num_cbufs);
1555 crocus_lower_swizzles(nir, &key->base.tex);
1556 struct crocus_binding_table bt;
1557 crocus_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1558 num_system_values, num_cbufs, &key->base.tex);
1559
1560 if (can_push_ubo(devinfo))
1561 elk_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
1562
1563 struct intel_vue_map input_vue_map;
1564 elk_compute_tess_vue_map(&input_vue_map, key->inputs_read,
1565 key->patch_inputs_read);
1566
1567 struct elk_tes_prog_key key_clean = *key;
1568 crocus_sanitize_tex_key(&key_clean.base.tex);
1569
1570 struct elk_compile_tes_params params = {
1571 .base = {
1572 .mem_ctx = mem_ctx,
1573 .nir = nir,
1574 .log_data = &ice->dbg,
1575 },
1576 .key = &key_clean,
1577 .prog_data = tes_prog_data,
1578 .input_vue_map = &input_vue_map,
1579 };
1580
1581 const unsigned *program = elk_compile_tes(compiler, ¶ms);
1582 if (program == NULL) {
1583 dbg_printf("Failed to compile evaluation shader: %s\n", params.base.error_str);
1584 ralloc_free(mem_ctx);
1585 return false;
1586 }
1587
1588 if (ish->compiled_once) {
1589 crocus_debug_recompile(ice, &nir->info, &key->base);
1590 } else {
1591 ish->compiled_once = true;
1592 }
1593
1594 uint32_t *so_decls = NULL;
1595 if (devinfo->ver > 6)
1596 so_decls = screen->vtbl.create_so_decl_list(&ish->stream_output,
1597 &vue_prog_data->vue_map);
1598
1599 struct crocus_compiled_shader *shader =
1600 crocus_upload_shader(ice, CROCUS_CACHE_TES, sizeof(*key), key, program,
1601 prog_data->program_size,
1602 prog_data, sizeof(*tes_prog_data), so_decls,
1603 system_values, num_system_values,
1604 num_cbufs, &bt);
1605
1606 crocus_disk_cache_store(screen->disk_cache, ish, shader,
1607 ice->shaders.cache_bo_map,
1608 key, sizeof(*key));
1609
1610 ralloc_free(mem_ctx);
1611 return shader;
1612 }
1613
1614 /**
1615 * Update the current tessellation evaluation shader variant.
1616 *
1617 * Fill out the key, look in the cache, compile and bind if needed.
1618 */
1619 static void
crocus_update_compiled_tes(struct crocus_context * ice)1620 crocus_update_compiled_tes(struct crocus_context *ice)
1621 {
1622 struct crocus_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
1623 struct crocus_uncompiled_shader *ish =
1624 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1625 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1626 struct elk_tes_prog_key key = { KEY_INIT() };
1627 const struct intel_device_info *devinfo = &screen->devinfo;
1628
1629 if (ish->nos & (1ull << CROCUS_NOS_TEXTURES))
1630 crocus_populate_sampler_prog_key_data(ice, devinfo, MESA_SHADER_TESS_EVAL, ish,
1631 ish->nir->info.uses_texture_gather, &key.base.tex);
1632 get_unified_tess_slots(ice, &key.inputs_read, &key.patch_inputs_read);
1633 screen->vtbl.populate_tes_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1634
1635 struct crocus_compiled_shader *old = ice->shaders.prog[CROCUS_CACHE_TES];
1636 struct crocus_compiled_shader *shader =
1637 crocus_find_cached_shader(ice, CROCUS_CACHE_TES, sizeof(key), &key);
1638
1639 if (!shader)
1640 shader = crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1641
1642 if (!shader)
1643 shader = crocus_compile_tes(ice, ish, &key);
1644
1645 if (old != shader) {
1646 ice->shaders.prog[CROCUS_CACHE_TES] = shader;
1647 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_TES |
1648 CROCUS_STAGE_DIRTY_BINDINGS_TES |
1649 CROCUS_STAGE_DIRTY_CONSTANTS_TES;
1650 shs->sysvals_need_upload = true;
1651 }
1652
1653 /* TODO: Could compare and avoid flagging this. */
1654 const struct shader_info *tes_info = &ish->nir->info;
1655 if (BITSET_TEST(tes_info->system_values_read, SYSTEM_VALUE_VERTICES_IN)) {
1656 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_CONSTANTS_TES;
1657 ice->state.shaders[MESA_SHADER_TESS_EVAL].sysvals_need_upload = true;
1658 }
1659 }
1660
1661 /**
1662 * Compile a geometry shader, and upload the assembly.
1663 */
1664 static struct crocus_compiled_shader *
crocus_compile_gs(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,const struct elk_gs_prog_key * key)1665 crocus_compile_gs(struct crocus_context *ice,
1666 struct crocus_uncompiled_shader *ish,
1667 const struct elk_gs_prog_key *key)
1668 {
1669 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1670 const struct elk_compiler *compiler = screen->compiler;
1671 const struct intel_device_info *devinfo = &screen->devinfo;
1672 void *mem_ctx = ralloc_context(NULL);
1673 struct elk_gs_prog_data *gs_prog_data =
1674 rzalloc(mem_ctx, struct elk_gs_prog_data);
1675 struct elk_vue_prog_data *vue_prog_data = &gs_prog_data->base;
1676 struct elk_stage_prog_data *prog_data = &vue_prog_data->base;
1677 enum elk_param_builtin *system_values;
1678 unsigned num_system_values;
1679 unsigned num_cbufs;
1680
1681 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1682
1683 if (key->nr_userclip_plane_consts) {
1684 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1685 nir_lower_clip_gs(nir, (1 << key->nr_userclip_plane_consts) - 1, false,
1686 NULL);
1687 nir_lower_io_to_temporaries(nir, impl, true, false);
1688 nir_lower_global_vars_to_local(nir);
1689 nir_lower_vars_to_ssa(nir);
1690 nir_shader_gather_info(nir, impl);
1691 }
1692
1693 if (key->clamp_pointsize)
1694 nir_lower_point_size(nir, 1.0, 255.0);
1695
1696 crocus_setup_uniforms(devinfo, mem_ctx, nir, prog_data, &system_values,
1697 &num_system_values, &num_cbufs);
1698 crocus_lower_swizzles(nir, &key->base.tex);
1699 struct crocus_binding_table bt;
1700 crocus_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1701 num_system_values, num_cbufs, &key->base.tex);
1702
1703 if (can_push_ubo(devinfo))
1704 elk_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
1705
1706 elk_compute_vue_map(devinfo,
1707 &vue_prog_data->vue_map, nir->info.outputs_written,
1708 nir->info.separate_shader, /* pos slots */ 1);
1709
1710 if (devinfo->ver == 6)
1711 gfx6_gs_xfb_setup(&ish->stream_output, gs_prog_data);
1712 struct elk_gs_prog_key key_clean = *key;
1713 crocus_sanitize_tex_key(&key_clean.base.tex);
1714
1715 struct elk_compile_gs_params params = {
1716 .base = {
1717 .mem_ctx = mem_ctx,
1718 .nir = nir,
1719 .log_data = &ice->dbg,
1720 },
1721 .key = &key_clean,
1722 .prog_data = gs_prog_data,
1723 };
1724
1725 const unsigned *program = elk_compile_gs(compiler, ¶ms);
1726 if (program == NULL) {
1727 dbg_printf("Failed to compile geometry shader: %s\n", params.base.error_str);
1728 ralloc_free(mem_ctx);
1729 return false;
1730 }
1731
1732 if (ish->compiled_once) {
1733 crocus_debug_recompile(ice, &nir->info, &key->base);
1734 } else {
1735 ish->compiled_once = true;
1736 }
1737
1738 uint32_t *so_decls = NULL;
1739 if (devinfo->ver > 6)
1740 so_decls = screen->vtbl.create_so_decl_list(&ish->stream_output,
1741 &vue_prog_data->vue_map);
1742
1743 struct crocus_compiled_shader *shader =
1744 crocus_upload_shader(ice, CROCUS_CACHE_GS, sizeof(*key), key, program,
1745 prog_data->program_size,
1746 prog_data, sizeof(*gs_prog_data), so_decls,
1747 system_values, num_system_values,
1748 num_cbufs, &bt);
1749
1750 crocus_disk_cache_store(screen->disk_cache, ish, shader,
1751 ice->shaders.cache_bo_map,
1752 key, sizeof(*key));
1753
1754 ralloc_free(mem_ctx);
1755 return shader;
1756 }
1757
1758 /**
1759 * Update the current geometry shader variant.
1760 *
1761 * Fill out the key, look in the cache, compile and bind if needed.
1762 */
1763 static void
crocus_update_compiled_gs(struct crocus_context * ice)1764 crocus_update_compiled_gs(struct crocus_context *ice)
1765 {
1766 struct crocus_shader_state *shs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
1767 struct crocus_uncompiled_shader *ish =
1768 ice->shaders.uncompiled[MESA_SHADER_GEOMETRY];
1769 struct crocus_compiled_shader *old = ice->shaders.prog[CROCUS_CACHE_GS];
1770 struct crocus_compiled_shader *shader = NULL;
1771
1772 if (ish) {
1773 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1774 const struct intel_device_info *devinfo = &screen->devinfo;
1775 struct elk_gs_prog_key key = { KEY_INIT() };
1776
1777 if (ish->nos & (1ull << CROCUS_NOS_TEXTURES))
1778 crocus_populate_sampler_prog_key_data(ice, devinfo, MESA_SHADER_GEOMETRY, ish,
1779 ish->nir->info.uses_texture_gather, &key.base.tex);
1780 screen->vtbl.populate_gs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1781
1782 shader =
1783 crocus_find_cached_shader(ice, CROCUS_CACHE_GS, sizeof(key), &key);
1784
1785 if (!shader)
1786 shader = crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1787
1788 if (!shader)
1789 shader = crocus_compile_gs(ice, ish, &key);
1790 }
1791
1792 if (old != shader) {
1793 ice->shaders.prog[CROCUS_CACHE_GS] = shader;
1794 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_GS |
1795 CROCUS_STAGE_DIRTY_BINDINGS_GS |
1796 CROCUS_STAGE_DIRTY_CONSTANTS_GS;
1797 shs->sysvals_need_upload = true;
1798 }
1799 }
1800
1801 /**
1802 * Compile a fragment (pixel) shader, and upload the assembly.
1803 */
1804 static struct crocus_compiled_shader *
crocus_compile_fs(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,const struct elk_wm_prog_key * key,struct intel_vue_map * vue_map)1805 crocus_compile_fs(struct crocus_context *ice,
1806 struct crocus_uncompiled_shader *ish,
1807 const struct elk_wm_prog_key *key,
1808 struct intel_vue_map *vue_map)
1809 {
1810 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1811 const struct elk_compiler *compiler = screen->compiler;
1812 void *mem_ctx = ralloc_context(NULL);
1813 struct elk_wm_prog_data *fs_prog_data =
1814 rzalloc(mem_ctx, struct elk_wm_prog_data);
1815 struct elk_stage_prog_data *prog_data = &fs_prog_data->base;
1816 enum elk_param_builtin *system_values;
1817 const struct intel_device_info *devinfo = &screen->devinfo;
1818 unsigned num_system_values;
1819 unsigned num_cbufs;
1820
1821 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1822
1823 prog_data->use_alt_mode = nir->info.use_legacy_math_rules;
1824
1825 crocus_setup_uniforms(devinfo, mem_ctx, nir, prog_data, &system_values,
1826 &num_system_values, &num_cbufs);
1827
1828 /* Lower output variables to load_output intrinsics before setting up
1829 * binding tables, so crocus_setup_binding_table can map any load_output
1830 * intrinsics to CROCUS_SURFACE_GROUP_RENDER_TARGET_READ on Gen8 for
1831 * non-coherent framebuffer fetches.
1832 */
1833 elk_nir_lower_fs_outputs(nir);
1834
1835 /* lower swizzles before binding table */
1836 crocus_lower_swizzles(nir, &key->base.tex);
1837 int null_rts = 1;
1838
1839 struct crocus_binding_table bt;
1840 crocus_setup_binding_table(devinfo, nir, &bt,
1841 MAX2(key->nr_color_regions, null_rts),
1842 num_system_values, num_cbufs,
1843 &key->base.tex);
1844
1845 if (can_push_ubo(devinfo))
1846 elk_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
1847
1848 struct elk_wm_prog_key key_clean = *key;
1849 crocus_sanitize_tex_key(&key_clean.base.tex);
1850
1851 struct elk_compile_fs_params params = {
1852 .base = {
1853 .mem_ctx = mem_ctx,
1854 .nir = nir,
1855 .log_data = &ice->dbg,
1856 },
1857 .key = &key_clean,
1858 .prog_data = fs_prog_data,
1859
1860 .allow_spilling = true,
1861 .max_polygons = 1,
1862 .vue_map = vue_map,
1863 };
1864 const unsigned *program =
1865 elk_compile_fs(compiler, ¶ms);
1866 if (program == NULL) {
1867 dbg_printf("Failed to compile fragment shader: %s\n", params.base.error_str);
1868 ralloc_free(mem_ctx);
1869 return false;
1870 }
1871
1872 if (ish->compiled_once) {
1873 crocus_debug_recompile(ice, &nir->info, &key->base);
1874 } else {
1875 ish->compiled_once = true;
1876 }
1877
1878 struct crocus_compiled_shader *shader =
1879 crocus_upload_shader(ice, CROCUS_CACHE_FS, sizeof(*key), key, program,
1880 prog_data->program_size,
1881 prog_data, sizeof(*fs_prog_data), NULL,
1882 system_values, num_system_values,
1883 num_cbufs, &bt);
1884
1885 crocus_disk_cache_store(screen->disk_cache, ish, shader,
1886 ice->shaders.cache_bo_map,
1887 key, sizeof(*key));
1888
1889 ralloc_free(mem_ctx);
1890 return shader;
1891 }
1892
1893 /**
1894 * Update the current fragment shader variant.
1895 *
1896 * Fill out the key, look in the cache, compile and bind if needed.
1897 */
1898 static void
crocus_update_compiled_fs(struct crocus_context * ice)1899 crocus_update_compiled_fs(struct crocus_context *ice)
1900 {
1901 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1902 const struct intel_device_info *devinfo = &screen->devinfo;
1903 struct crocus_shader_state *shs = &ice->state.shaders[MESA_SHADER_FRAGMENT];
1904 struct crocus_uncompiled_shader *ish =
1905 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
1906 struct elk_wm_prog_key key = { KEY_INIT() };
1907
1908 if (ish->nos & (1ull << CROCUS_NOS_TEXTURES))
1909 crocus_populate_sampler_prog_key_data(ice, devinfo, MESA_SHADER_FRAGMENT, ish,
1910 ish->nir->info.uses_texture_gather, &key.base.tex);
1911 screen->vtbl.populate_fs_key(ice, &ish->nir->info, &key);
1912
1913 if (ish->nos & (1ull << CROCUS_NOS_LAST_VUE_MAP))
1914 key.input_slots_valid = ice->shaders.last_vue_map->slots_valid;
1915
1916 struct crocus_compiled_shader *old = ice->shaders.prog[CROCUS_CACHE_FS];
1917 struct crocus_compiled_shader *shader =
1918 crocus_find_cached_shader(ice, CROCUS_CACHE_FS, sizeof(key), &key);
1919
1920 if (!shader)
1921 shader = crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1922
1923 if (!shader)
1924 shader = crocus_compile_fs(ice, ish, &key, ice->shaders.last_vue_map);
1925
1926 if (old != shader) {
1927 // XXX: only need to flag CLIP if barycentric has NONPERSPECTIVE
1928 // toggles. might be able to avoid flagging SBE too.
1929 ice->shaders.prog[CROCUS_CACHE_FS] = shader;
1930 ice->state.dirty |= CROCUS_DIRTY_WM;
1931 /* gen4 clip/sf rely on fs prog_data */
1932 if (devinfo->ver < 6)
1933 ice->state.dirty |= CROCUS_DIRTY_GEN4_CLIP_PROG | CROCUS_DIRTY_GEN4_SF_PROG;
1934 else
1935 ice->state.dirty |= CROCUS_DIRTY_CLIP | CROCUS_DIRTY_GEN6_BLEND_STATE;
1936 if (devinfo->ver == 6)
1937 ice->state.dirty |= CROCUS_DIRTY_RASTER;
1938 if (devinfo->ver >= 7)
1939 ice->state.dirty |= CROCUS_DIRTY_GEN7_SBE;
1940 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_FS |
1941 CROCUS_STAGE_DIRTY_BINDINGS_FS |
1942 CROCUS_STAGE_DIRTY_CONSTANTS_FS;
1943 shs->sysvals_need_upload = true;
1944 }
1945 }
1946
1947 /**
1948 * Update the last enabled stage's VUE map.
1949 *
1950 * When the shader feeding the rasterizer's output interface changes, we
1951 * need to re-emit various packets.
1952 */
1953 static void
update_last_vue_map(struct crocus_context * ice,struct elk_stage_prog_data * prog_data)1954 update_last_vue_map(struct crocus_context *ice,
1955 struct elk_stage_prog_data *prog_data)
1956 {
1957 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1958 const struct intel_device_info *devinfo = &screen->devinfo;
1959 struct elk_vue_prog_data *vue_prog_data = (void *) prog_data;
1960 struct intel_vue_map *vue_map = &vue_prog_data->vue_map;
1961 struct intel_vue_map *old_map = ice->shaders.last_vue_map;
1962 const uint64_t changed_slots =
1963 (old_map ? old_map->slots_valid : 0ull) ^ vue_map->slots_valid;
1964
1965 if (changed_slots & VARYING_BIT_VIEWPORT) {
1966 ice->state.num_viewports =
1967 (vue_map->slots_valid & VARYING_BIT_VIEWPORT) ? CROCUS_MAX_VIEWPORTS : 1;
1968 ice->state.dirty |= CROCUS_DIRTY_SF_CL_VIEWPORT |
1969 CROCUS_DIRTY_CC_VIEWPORT;
1970 if (devinfo->ver < 6)
1971 ice->state.dirty |= CROCUS_DIRTY_GEN4_CLIP_PROG | CROCUS_DIRTY_GEN4_SF_PROG;
1972
1973 if (devinfo->ver <= 6)
1974 ice->state.dirty |= CROCUS_DIRTY_GEN4_FF_GS_PROG;
1975
1976 if (devinfo->ver >= 6)
1977 ice->state.dirty |= CROCUS_DIRTY_CLIP |
1978 CROCUS_DIRTY_GEN6_SCISSOR_RECT;;
1979 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_UNCOMPILED_FS |
1980 ice->state.stage_dirty_for_nos[CROCUS_NOS_LAST_VUE_MAP];
1981 }
1982
1983 if (changed_slots || (old_map && old_map->separate != vue_map->separate)) {
1984 ice->state.dirty |= CROCUS_DIRTY_GEN7_SBE;
1985 if (devinfo->ver < 6)
1986 ice->state.dirty |= CROCUS_DIRTY_GEN4_FF_GS_PROG;
1987 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_UNCOMPILED_FS;
1988 }
1989
1990 ice->shaders.last_vue_map = &vue_prog_data->vue_map;
1991 }
1992
1993 static void
crocus_update_pull_constant_descriptors(struct crocus_context * ice,gl_shader_stage stage)1994 crocus_update_pull_constant_descriptors(struct crocus_context *ice,
1995 gl_shader_stage stage)
1996 {
1997 struct crocus_compiled_shader *shader = ice->shaders.prog[stage];
1998
1999 if (!shader || !shader->prog_data->has_ubo_pull)
2000 return;
2001
2002 struct crocus_shader_state *shs = &ice->state.shaders[stage];
2003 bool any_new_descriptors =
2004 shader->num_system_values > 0 && shs->sysvals_need_upload;
2005
2006 unsigned bound_cbufs = shs->bound_cbufs;
2007
2008 while (bound_cbufs) {
2009 const int i = u_bit_scan(&bound_cbufs);
2010 struct pipe_constant_buffer *cbuf = &shs->constbufs[i];
2011 if (cbuf->buffer) {
2012 any_new_descriptors = true;
2013 }
2014 }
2015
2016 if (any_new_descriptors)
2017 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_BINDINGS_VS << stage;
2018 }
2019
2020 /**
2021 * Get the prog_data for a given stage, or NULL if the stage is disabled.
2022 */
2023 static struct elk_vue_prog_data *
get_vue_prog_data(struct crocus_context * ice,gl_shader_stage stage)2024 get_vue_prog_data(struct crocus_context *ice, gl_shader_stage stage)
2025 {
2026 if (!ice->shaders.prog[stage])
2027 return NULL;
2028
2029 return (void *) ice->shaders.prog[stage]->prog_data;
2030 }
2031
2032 static struct crocus_compiled_shader *
crocus_compile_clip(struct crocus_context * ice,struct elk_clip_prog_key * key)2033 crocus_compile_clip(struct crocus_context *ice, struct elk_clip_prog_key *key)
2034 {
2035 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2036 const struct elk_compiler *compiler = screen->compiler;
2037 void *mem_ctx;
2038 unsigned program_size;
2039 mem_ctx = ralloc_context(NULL);
2040
2041 struct elk_clip_prog_data *clip_prog_data =
2042 rzalloc(mem_ctx, struct elk_clip_prog_data);
2043
2044 const unsigned *program = elk_compile_clip(compiler, mem_ctx, key, clip_prog_data,
2045 ice->shaders.last_vue_map, &program_size);
2046
2047 if (program == NULL) {
2048 dbg_printf("failed to compile clip shader\n");
2049 ralloc_free(mem_ctx);
2050 return false;
2051 }
2052 struct crocus_binding_table bt;
2053 memset(&bt, 0, sizeof(bt));
2054
2055 struct crocus_compiled_shader *shader =
2056 crocus_upload_shader(ice, CROCUS_CACHE_CLIP, sizeof(*key), key, program,
2057 program_size,
2058 (struct elk_stage_prog_data *)clip_prog_data, sizeof(*clip_prog_data),
2059 NULL, NULL, 0, 0, &bt);
2060 ralloc_free(mem_ctx);
2061 return shader;
2062 }
2063 static void
crocus_update_compiled_clip(struct crocus_context * ice)2064 crocus_update_compiled_clip(struct crocus_context *ice)
2065 {
2066 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2067 struct elk_clip_prog_key key;
2068 struct crocus_compiled_shader *old = ice->shaders.clip_prog;
2069 memset(&key, 0, sizeof(key));
2070
2071 const struct elk_wm_prog_data *wm_prog_data = elk_wm_prog_data(ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data);
2072 if (wm_prog_data) {
2073 key.contains_flat_varying = wm_prog_data->contains_flat_varying;
2074 key.contains_noperspective_varying =
2075 wm_prog_data->contains_noperspective_varying;
2076 memcpy(key.interp_mode, wm_prog_data->interp_mode, sizeof(key.interp_mode));
2077 }
2078
2079 key.primitive = ice->state.reduced_prim_mode;
2080 key.attrs = ice->shaders.last_vue_map->slots_valid;
2081
2082 struct pipe_rasterizer_state *rs_state = crocus_get_rast_state(ice);
2083 key.pv_first = rs_state->flatshade_first;
2084
2085 if (rs_state->clip_plane_enable)
2086 key.nr_userclip = util_logbase2(rs_state->clip_plane_enable) + 1;
2087
2088 if (screen->devinfo.ver == 5)
2089 key.clip_mode = ELK_CLIP_MODE_KERNEL_CLIP;
2090 else
2091 key.clip_mode = ELK_CLIP_MODE_NORMAL;
2092
2093 if (key.primitive == MESA_PRIM_TRIANGLES) {
2094 if (rs_state->cull_face == PIPE_FACE_FRONT_AND_BACK)
2095 key.clip_mode = ELK_CLIP_MODE_REJECT_ALL;
2096 else {
2097 uint32_t fill_front = ELK_CLIP_FILL_MODE_CULL;
2098 uint32_t fill_back = ELK_CLIP_FILL_MODE_CULL;
2099 uint32_t offset_front = 0;
2100 uint32_t offset_back = 0;
2101
2102 if (!(rs_state->cull_face & PIPE_FACE_FRONT)) {
2103 switch (rs_state->fill_front) {
2104 case PIPE_POLYGON_MODE_FILL:
2105 fill_front = ELK_CLIP_FILL_MODE_FILL;
2106 offset_front = 0;
2107 break;
2108 case PIPE_POLYGON_MODE_LINE:
2109 fill_front = ELK_CLIP_FILL_MODE_LINE;
2110 offset_front = rs_state->offset_line;
2111 break;
2112 case PIPE_POLYGON_MODE_POINT:
2113 fill_front = ELK_CLIP_FILL_MODE_POINT;
2114 offset_front = rs_state->offset_point;
2115 break;
2116 }
2117 }
2118
2119 if (!(rs_state->cull_face & PIPE_FACE_BACK)) {
2120 switch (rs_state->fill_back) {
2121 case PIPE_POLYGON_MODE_FILL:
2122 fill_back = ELK_CLIP_FILL_MODE_FILL;
2123 offset_back = 0;
2124 break;
2125 case PIPE_POLYGON_MODE_LINE:
2126 fill_back = ELK_CLIP_FILL_MODE_LINE;
2127 offset_back = rs_state->offset_line;
2128 break;
2129 case PIPE_POLYGON_MODE_POINT:
2130 fill_back = ELK_CLIP_FILL_MODE_POINT;
2131 offset_back = rs_state->offset_point;
2132 break;
2133 }
2134 }
2135
2136 if (rs_state->fill_back != PIPE_POLYGON_MODE_FILL ||
2137 rs_state->fill_front != PIPE_POLYGON_MODE_FILL) {
2138 key.do_unfilled = 1;
2139
2140 /* Most cases the fixed function units will handle. Cases where
2141 * one or more polygon faces are unfilled will require help:
2142 */
2143 key.clip_mode = ELK_CLIP_MODE_CLIP_NON_REJECTED;
2144
2145 if (offset_back || offset_front) {
2146 double mrd = 0.0;
2147 if (ice->state.framebuffer.zsbuf)
2148 mrd = util_get_depth_format_mrd(util_format_description(ice->state.framebuffer.zsbuf->format));
2149 key.offset_units = rs_state->offset_units * mrd * 2;
2150 key.offset_factor = rs_state->offset_scale * mrd;
2151 key.offset_clamp = rs_state->offset_clamp * mrd;
2152 }
2153
2154 if (!(rs_state->front_ccw ^ rs_state->bottom_edge_rule)) {
2155 key.fill_ccw = fill_front;
2156 key.fill_cw = fill_back;
2157 key.offset_ccw = offset_front;
2158 key.offset_cw = offset_back;
2159 if (rs_state->light_twoside &&
2160 key.fill_cw != ELK_CLIP_FILL_MODE_CULL)
2161 key.copy_bfc_cw = 1;
2162 } else {
2163 key.fill_cw = fill_front;
2164 key.fill_ccw = fill_back;
2165 key.offset_cw = offset_front;
2166 key.offset_ccw = offset_back;
2167 if (rs_state->light_twoside &&
2168 key.fill_ccw != ELK_CLIP_FILL_MODE_CULL)
2169 key.copy_bfc_ccw = 1;
2170 }
2171 }
2172 }
2173 }
2174 struct crocus_compiled_shader *shader =
2175 crocus_find_cached_shader(ice, CROCUS_CACHE_CLIP, sizeof(key), &key);
2176
2177 if (!shader)
2178 shader = crocus_compile_clip(ice, &key);
2179
2180 if (old != shader) {
2181 ice->state.dirty |= CROCUS_DIRTY_CLIP;
2182 ice->shaders.clip_prog = shader;
2183 }
2184 }
2185
2186 static struct crocus_compiled_shader *
crocus_compile_sf(struct crocus_context * ice,struct elk_sf_prog_key * key)2187 crocus_compile_sf(struct crocus_context *ice, struct elk_sf_prog_key *key)
2188 {
2189 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2190 const struct elk_compiler *compiler = screen->compiler;
2191 void *mem_ctx;
2192 unsigned program_size;
2193 mem_ctx = ralloc_context(NULL);
2194
2195 struct elk_sf_prog_data *sf_prog_data =
2196 rzalloc(mem_ctx, struct elk_sf_prog_data);
2197
2198 const unsigned *program = elk_compile_sf(compiler, mem_ctx, key, sf_prog_data,
2199 ice->shaders.last_vue_map, &program_size);
2200
2201 if (program == NULL) {
2202 dbg_printf("failed to compile sf shader\n");
2203 ralloc_free(mem_ctx);
2204 return false;
2205 }
2206
2207 struct crocus_binding_table bt;
2208 memset(&bt, 0, sizeof(bt));
2209 struct crocus_compiled_shader *shader =
2210 crocus_upload_shader(ice, CROCUS_CACHE_SF, sizeof(*key), key, program,
2211 program_size,
2212 (struct elk_stage_prog_data *)sf_prog_data, sizeof(*sf_prog_data),
2213 NULL, NULL, 0, 0, &bt);
2214 ralloc_free(mem_ctx);
2215 return shader;
2216 }
2217
2218 static void
crocus_update_compiled_sf(struct crocus_context * ice)2219 crocus_update_compiled_sf(struct crocus_context *ice)
2220 {
2221 struct elk_sf_prog_key key;
2222 struct crocus_compiled_shader *old = ice->shaders.sf_prog;
2223 memset(&key, 0, sizeof(key));
2224
2225 key.attrs = ice->shaders.last_vue_map->slots_valid;
2226
2227 switch (ice->state.reduced_prim_mode) {
2228 case MESA_PRIM_TRIANGLES:
2229 default:
2230 if (key.attrs & BITFIELD64_BIT(VARYING_SLOT_EDGE))
2231 key.primitive = ELK_SF_PRIM_UNFILLED_TRIS;
2232 else
2233 key.primitive = ELK_SF_PRIM_TRIANGLES;
2234 break;
2235 case MESA_PRIM_LINES:
2236 key.primitive = ELK_SF_PRIM_LINES;
2237 break;
2238 case MESA_PRIM_POINTS:
2239 key.primitive = ELK_SF_PRIM_POINTS;
2240 break;
2241 }
2242
2243 struct pipe_rasterizer_state *rs_state = crocus_get_rast_state(ice);
2244 key.userclip_active = rs_state->clip_plane_enable != 0;
2245 const struct elk_wm_prog_data *wm_prog_data = elk_wm_prog_data(ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data);
2246 if (wm_prog_data) {
2247 key.contains_flat_varying = wm_prog_data->contains_flat_varying;
2248 memcpy(key.interp_mode, wm_prog_data->interp_mode, sizeof(key.interp_mode));
2249 }
2250
2251 key.do_twoside_color = rs_state->light_twoside;
2252
2253 key.do_point_sprite = rs_state->point_quad_rasterization;
2254 if (key.do_point_sprite) {
2255 key.point_sprite_coord_replace = rs_state->sprite_coord_enable & 0xff;
2256 if (rs_state->sprite_coord_enable & (1 << 8))
2257 key.do_point_coord = 1;
2258 if (wm_prog_data && wm_prog_data->urb_setup[VARYING_SLOT_PNTC] != -1)
2259 key.do_point_coord = 1;
2260 }
2261
2262 key.sprite_origin_lower_left = rs_state->sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT;
2263
2264 if (key.do_twoside_color) {
2265 key.frontface_ccw = rs_state->front_ccw;
2266 }
2267 struct crocus_compiled_shader *shader =
2268 crocus_find_cached_shader(ice, CROCUS_CACHE_SF, sizeof(key), &key);
2269
2270 if (!shader)
2271 shader = crocus_compile_sf(ice, &key);
2272
2273 if (old != shader) {
2274 ice->state.dirty |= CROCUS_DIRTY_RASTER;
2275 ice->shaders.sf_prog = shader;
2276 }
2277 }
2278
2279 static struct crocus_compiled_shader *
crocus_compile_ff_gs(struct crocus_context * ice,struct elk_ff_gs_prog_key * key)2280 crocus_compile_ff_gs(struct crocus_context *ice, struct elk_ff_gs_prog_key *key)
2281 {
2282 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2283 struct elk_compiler *compiler = screen->compiler;
2284 void *mem_ctx;
2285 unsigned program_size;
2286 mem_ctx = ralloc_context(NULL);
2287
2288 struct elk_ff_gs_prog_data *ff_gs_prog_data =
2289 rzalloc(mem_ctx, struct elk_ff_gs_prog_data);
2290
2291 const unsigned *program = elk_compile_ff_gs_prog(compiler, mem_ctx, key, ff_gs_prog_data,
2292 ice->shaders.last_vue_map, &program_size);
2293
2294 if (program == NULL) {
2295 dbg_printf("failed to compile sf shader\n");
2296 ralloc_free(mem_ctx);
2297 return false;
2298 }
2299
2300 struct crocus_binding_table bt;
2301 memset(&bt, 0, sizeof(bt));
2302
2303 if (screen->devinfo.ver == 6) {
2304 bt.sizes[CROCUS_SURFACE_GROUP_SOL] = ELK_MAX_SOL_BINDINGS;
2305 bt.used_mask[CROCUS_SURFACE_GROUP_SOL] = (uint64_t)-1;
2306
2307 bt.size_bytes = ELK_MAX_SOL_BINDINGS * 4;
2308 }
2309
2310 struct crocus_compiled_shader *shader =
2311 crocus_upload_shader(ice, CROCUS_CACHE_FF_GS, sizeof(*key), key, program,
2312 program_size,
2313 (struct elk_stage_prog_data *)ff_gs_prog_data, sizeof(*ff_gs_prog_data),
2314 NULL, NULL, 0, 0, &bt);
2315 ralloc_free(mem_ctx);
2316 return shader;
2317 }
2318
2319 static void
crocus_update_compiled_ff_gs(struct crocus_context * ice)2320 crocus_update_compiled_ff_gs(struct crocus_context *ice)
2321 {
2322 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2323 const struct intel_device_info *devinfo = &screen->devinfo;
2324 struct elk_ff_gs_prog_key key;
2325 struct crocus_compiled_shader *old = ice->shaders.ff_gs_prog;
2326 memset(&key, 0, sizeof(key));
2327
2328 assert(devinfo->ver < 7);
2329
2330 key.attrs = ice->shaders.last_vue_map->slots_valid;
2331
2332 key.primitive = screen->vtbl.translate_prim_type(ice->state.prim_mode, 0);
2333
2334 struct pipe_rasterizer_state *rs_state = crocus_get_rast_state(ice);
2335 key.pv_first = rs_state->flatshade_first;
2336
2337 if (key.primitive == _3DPRIM_QUADLIST && !rs_state->flatshade) {
2338 /* Provide consistenbbbbbt primitive order with elk_set_prim's
2339 * optimization of single quads to trifans.
2340 */
2341 key.pv_first = true;
2342 }
2343
2344 if (devinfo->ver >= 6) {
2345 key.need_gs_prog = ice->state.streamout_active;
2346 if (key.need_gs_prog) {
2347 struct crocus_uncompiled_shader *vs =
2348 ice->shaders.uncompiled[MESA_SHADER_VERTEX];
2349 gfx6_ff_gs_xfb_setup(&vs->stream_output,
2350 &key);
2351 }
2352 } else {
2353 key.need_gs_prog = (key.primitive == _3DPRIM_QUADLIST ||
2354 key.primitive == _3DPRIM_QUADSTRIP ||
2355 key.primitive == _3DPRIM_LINELOOP);
2356 }
2357
2358 struct crocus_compiled_shader *shader = NULL;
2359 if (key.need_gs_prog) {
2360 shader = crocus_find_cached_shader(ice, CROCUS_CACHE_FF_GS,
2361 sizeof(key), &key);
2362 if (!shader)
2363 shader = crocus_compile_ff_gs(ice, &key);
2364 }
2365 if (old != shader) {
2366 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_GS;
2367 if (!!old != !!shader)
2368 ice->state.dirty |= CROCUS_DIRTY_GEN6_URB;
2369 ice->shaders.ff_gs_prog = shader;
2370 if (shader) {
2371 const struct elk_ff_gs_prog_data *gs_prog_data = (struct elk_ff_gs_prog_data *)ice->shaders.ff_gs_prog->prog_data;
2372 ice->state.last_xfb_verts_per_prim = gs_prog_data->svbi_postincrement_value;
2373 }
2374 }
2375 }
2376
2377 // XXX: crocus_compiled_shaders are space-leaking :(
2378 // XXX: do remember to unbind them if deleting them.
2379
2380 /**
2381 * Update the current shader variants for the given state.
2382 *
2383 * This should be called on every draw call to ensure that the correct
2384 * shaders are bound. It will also flag any dirty state triggered by
2385 * swapping out those shaders.
2386 */
2387 bool
crocus_update_compiled_shaders(struct crocus_context * ice)2388 crocus_update_compiled_shaders(struct crocus_context *ice)
2389 {
2390 struct crocus_screen *screen = (void *) ice->ctx.screen;
2391 const uint64_t stage_dirty = ice->state.stage_dirty;
2392
2393 struct elk_vue_prog_data *old_prog_datas[4];
2394 if (!(ice->state.dirty & CROCUS_DIRTY_GEN6_URB)) {
2395 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++)
2396 old_prog_datas[i] = get_vue_prog_data(ice, i);
2397 }
2398
2399 if (stage_dirty & (CROCUS_STAGE_DIRTY_UNCOMPILED_TCS |
2400 CROCUS_STAGE_DIRTY_UNCOMPILED_TES)) {
2401 struct crocus_uncompiled_shader *tes =
2402 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
2403 if (tes) {
2404 crocus_update_compiled_tcs(ice);
2405 crocus_update_compiled_tes(ice);
2406 } else {
2407 ice->shaders.prog[CROCUS_CACHE_TCS] = NULL;
2408 ice->shaders.prog[CROCUS_CACHE_TES] = NULL;
2409 ice->state.stage_dirty |=
2410 CROCUS_STAGE_DIRTY_TCS | CROCUS_STAGE_DIRTY_TES |
2411 CROCUS_STAGE_DIRTY_BINDINGS_TCS | CROCUS_STAGE_DIRTY_BINDINGS_TES |
2412 CROCUS_STAGE_DIRTY_CONSTANTS_TCS | CROCUS_STAGE_DIRTY_CONSTANTS_TES;
2413 }
2414 }
2415
2416 if (stage_dirty & CROCUS_STAGE_DIRTY_UNCOMPILED_VS)
2417 crocus_update_compiled_vs(ice);
2418 if (stage_dirty & CROCUS_STAGE_DIRTY_UNCOMPILED_GS)
2419 crocus_update_compiled_gs(ice);
2420
2421 if (stage_dirty & (CROCUS_STAGE_DIRTY_UNCOMPILED_GS |
2422 CROCUS_STAGE_DIRTY_UNCOMPILED_TES)) {
2423 const struct crocus_compiled_shader *gs =
2424 ice->shaders.prog[MESA_SHADER_GEOMETRY];
2425 const struct crocus_compiled_shader *tes =
2426 ice->shaders.prog[MESA_SHADER_TESS_EVAL];
2427
2428 bool points_or_lines = false;
2429
2430 if (gs) {
2431 const struct elk_gs_prog_data *gs_prog_data = (void *) gs->prog_data;
2432 points_or_lines =
2433 gs_prog_data->output_topology == _3DPRIM_POINTLIST ||
2434 gs_prog_data->output_topology == _3DPRIM_LINESTRIP;
2435 } else if (tes) {
2436 const struct elk_tes_prog_data *tes_data = (void *) tes->prog_data;
2437 points_or_lines =
2438 tes_data->output_topology == INTEL_TESS_OUTPUT_TOPOLOGY_LINE ||
2439 tes_data->output_topology == INTEL_TESS_OUTPUT_TOPOLOGY_POINT;
2440 }
2441
2442 if (ice->shaders.output_topology_is_points_or_lines != points_or_lines) {
2443 /* Outbound to XY Clip enables */
2444 ice->shaders.output_topology_is_points_or_lines = points_or_lines;
2445 ice->state.dirty |= CROCUS_DIRTY_CLIP;
2446 }
2447 }
2448
2449 if (!ice->shaders.prog[MESA_SHADER_VERTEX])
2450 return false;
2451
2452 gl_shader_stage last_stage = last_vue_stage(ice);
2453 struct crocus_compiled_shader *shader = ice->shaders.prog[last_stage];
2454 struct crocus_uncompiled_shader *ish = ice->shaders.uncompiled[last_stage];
2455 update_last_vue_map(ice, shader->prog_data);
2456 if (ice->state.streamout != shader->streamout) {
2457 ice->state.streamout = shader->streamout;
2458 ice->state.dirty |= CROCUS_DIRTY_SO_DECL_LIST | CROCUS_DIRTY_STREAMOUT;
2459 }
2460
2461 if (ice->state.streamout_active) {
2462 screen->vtbl.update_so_strides(ice, ish->stream_output.stride);
2463 }
2464
2465 /* use ice->state version as last_vue_map can dirty this bit */
2466 if (ice->state.stage_dirty & CROCUS_STAGE_DIRTY_UNCOMPILED_FS)
2467 crocus_update_compiled_fs(ice);
2468
2469 if (screen->devinfo.ver <= 6) {
2470 if (ice->state.dirty & CROCUS_DIRTY_GEN4_FF_GS_PROG &&
2471 !ice->shaders.prog[MESA_SHADER_GEOMETRY])
2472 crocus_update_compiled_ff_gs(ice);
2473 }
2474
2475 if (screen->devinfo.ver < 6) {
2476 if (ice->state.dirty & CROCUS_DIRTY_GEN4_CLIP_PROG)
2477 crocus_update_compiled_clip(ice);
2478 if (ice->state.dirty & CROCUS_DIRTY_GEN4_SF_PROG)
2479 crocus_update_compiled_sf(ice);
2480 }
2481
2482
2483 /* Changing shader interfaces may require a URB configuration. */
2484 if (!(ice->state.dirty & CROCUS_DIRTY_GEN6_URB)) {
2485 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
2486 struct elk_vue_prog_data *old = old_prog_datas[i];
2487 struct elk_vue_prog_data *new = get_vue_prog_data(ice, i);
2488 if (!!old != !!new ||
2489 (new && new->urb_entry_size != old->urb_entry_size)) {
2490 ice->state.dirty |= CROCUS_DIRTY_GEN6_URB;
2491 break;
2492 }
2493 }
2494 }
2495
2496 if (ice->state.stage_dirty & CROCUS_RENDER_STAGE_DIRTY_CONSTANTS) {
2497 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
2498 if (ice->state.stage_dirty & (CROCUS_STAGE_DIRTY_CONSTANTS_VS << i))
2499 crocus_update_pull_constant_descriptors(ice, i);
2500 }
2501 }
2502 return true;
2503 }
2504
2505 static struct crocus_compiled_shader *
crocus_compile_cs(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,const struct elk_cs_prog_key * key)2506 crocus_compile_cs(struct crocus_context *ice,
2507 struct crocus_uncompiled_shader *ish,
2508 const struct elk_cs_prog_key *key)
2509 {
2510 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2511 const struct elk_compiler *compiler = screen->compiler;
2512 void *mem_ctx = ralloc_context(NULL);
2513 struct elk_cs_prog_data *cs_prog_data =
2514 rzalloc(mem_ctx, struct elk_cs_prog_data);
2515 struct elk_stage_prog_data *prog_data = &cs_prog_data->base;
2516 enum elk_param_builtin *system_values;
2517 const struct intel_device_info *devinfo = &screen->devinfo;
2518 unsigned num_system_values;
2519 unsigned num_cbufs;
2520
2521 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
2522
2523 NIR_PASS_V(nir, elk_nir_lower_cs_intrinsics, devinfo, cs_prog_data);
2524
2525 crocus_setup_uniforms(devinfo, mem_ctx, nir, prog_data, &system_values,
2526 &num_system_values, &num_cbufs);
2527 crocus_lower_swizzles(nir, &key->base.tex);
2528 struct crocus_binding_table bt;
2529 crocus_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
2530 num_system_values, num_cbufs, &key->base.tex);
2531
2532 struct elk_compile_cs_params params = {
2533 .base = {
2534 .mem_ctx = mem_ctx,
2535 .nir = nir,
2536 .log_data = &ice->dbg,
2537 },
2538 .key = key,
2539 .prog_data = cs_prog_data,
2540 };
2541
2542 const unsigned *program =
2543 elk_compile_cs(compiler, ¶ms);
2544 if (program == NULL) {
2545 dbg_printf("Failed to compile compute shader: %s\n", params.base.error_str);
2546 ralloc_free(mem_ctx);
2547 return false;
2548 }
2549
2550 if (ish->compiled_once) {
2551 crocus_debug_recompile(ice, &nir->info, &key->base);
2552 } else {
2553 ish->compiled_once = true;
2554 }
2555
2556 struct crocus_compiled_shader *shader =
2557 crocus_upload_shader(ice, CROCUS_CACHE_CS, sizeof(*key), key, program,
2558 prog_data->program_size,
2559 prog_data, sizeof(*cs_prog_data), NULL,
2560 system_values, num_system_values,
2561 num_cbufs, &bt);
2562
2563 crocus_disk_cache_store(screen->disk_cache, ish, shader,
2564 ice->shaders.cache_bo_map,
2565 key, sizeof(*key));
2566
2567 ralloc_free(mem_ctx);
2568 return shader;
2569 }
2570
2571 static void
crocus_update_compiled_cs(struct crocus_context * ice)2572 crocus_update_compiled_cs(struct crocus_context *ice)
2573 {
2574 struct crocus_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
2575 struct crocus_uncompiled_shader *ish =
2576 ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
2577 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2578 const struct intel_device_info *devinfo = &screen->devinfo;
2579 struct elk_cs_prog_key key = { KEY_INIT() };
2580
2581 if (ish->nos & (1ull << CROCUS_NOS_TEXTURES))
2582 crocus_populate_sampler_prog_key_data(ice, devinfo, MESA_SHADER_COMPUTE, ish,
2583 ish->nir->info.uses_texture_gather, &key.base.tex);
2584 screen->vtbl.populate_cs_key(ice, &key);
2585
2586 struct crocus_compiled_shader *old = ice->shaders.prog[CROCUS_CACHE_CS];
2587 struct crocus_compiled_shader *shader =
2588 crocus_find_cached_shader(ice, CROCUS_CACHE_CS, sizeof(key), &key);
2589
2590 if (!shader)
2591 shader = crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key));
2592
2593 if (!shader)
2594 shader = crocus_compile_cs(ice, ish, &key);
2595
2596 if (old != shader) {
2597 ice->shaders.prog[CROCUS_CACHE_CS] = shader;
2598 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_CS |
2599 CROCUS_STAGE_DIRTY_BINDINGS_CS |
2600 CROCUS_STAGE_DIRTY_CONSTANTS_CS;
2601 shs->sysvals_need_upload = true;
2602 }
2603 }
2604
2605 void
crocus_update_compiled_compute_shader(struct crocus_context * ice)2606 crocus_update_compiled_compute_shader(struct crocus_context *ice)
2607 {
2608 if (ice->state.stage_dirty & CROCUS_STAGE_DIRTY_UNCOMPILED_CS)
2609 crocus_update_compiled_cs(ice);
2610
2611 if (ice->state.stage_dirty & CROCUS_STAGE_DIRTY_CONSTANTS_CS)
2612 crocus_update_pull_constant_descriptors(ice, MESA_SHADER_COMPUTE);
2613 }
2614
2615 void
crocus_fill_cs_push_const_buffer(struct elk_cs_prog_data * cs_prog_data,unsigned threads,uint32_t * dst)2616 crocus_fill_cs_push_const_buffer(struct elk_cs_prog_data *cs_prog_data,
2617 unsigned threads,
2618 uint32_t *dst)
2619 {
2620 assert(elk_cs_push_const_total_size(cs_prog_data, threads) > 0);
2621 assert(cs_prog_data->push.cross_thread.size == 0);
2622 assert(cs_prog_data->push.per_thread.dwords == 1);
2623 assert(cs_prog_data->base.param[0] == ELK_PARAM_BUILTIN_SUBGROUP_ID);
2624 for (unsigned t = 0; t < threads; t++)
2625 dst[8 * t] = t;
2626 }
2627
2628 /**
2629 * Allocate scratch BOs as needed for the given per-thread size and stage.
2630 */
2631 struct crocus_bo *
crocus_get_scratch_space(struct crocus_context * ice,unsigned per_thread_scratch,gl_shader_stage stage)2632 crocus_get_scratch_space(struct crocus_context *ice,
2633 unsigned per_thread_scratch,
2634 gl_shader_stage stage)
2635 {
2636 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2637 struct crocus_bufmgr *bufmgr = screen->bufmgr;
2638 const struct intel_device_info *devinfo = &screen->devinfo;
2639
2640 unsigned encoded_size = ffs(per_thread_scratch) - 11;
2641 assert(encoded_size < (1 << 16));
2642
2643 struct crocus_bo **bop = &ice->shaders.scratch_bos[encoded_size][stage];
2644
2645 if (!*bop) {
2646 assert(stage < ARRAY_SIZE(devinfo->max_scratch_ids));
2647 uint32_t size = per_thread_scratch * devinfo->max_scratch_ids[stage];
2648 *bop = crocus_bo_alloc(bufmgr, "scratch", size);
2649 }
2650
2651 return *bop;
2652 }
2653
2654 /* ------------------------------------------------------------------- */
2655
2656 /**
2657 * The pipe->create_[stage]_state() driver hooks.
2658 *
2659 * Performs basic NIR preprocessing, records any state dependencies, and
2660 * returns an crocus_uncompiled_shader as the Gallium CSO.
2661 *
2662 * Actual shader compilation to assembly happens later, at first use.
2663 */
2664 static void *
crocus_create_uncompiled_shader(struct pipe_context * ctx,nir_shader * nir,const struct pipe_stream_output_info * so_info)2665 crocus_create_uncompiled_shader(struct pipe_context *ctx,
2666 nir_shader *nir,
2667 const struct pipe_stream_output_info *so_info)
2668 {
2669 struct crocus_screen *screen = (struct crocus_screen *)ctx->screen;
2670 const struct intel_device_info *devinfo = &screen->devinfo;
2671 struct crocus_uncompiled_shader *ish =
2672 calloc(1, sizeof(struct crocus_uncompiled_shader));
2673 if (!ish)
2674 return NULL;
2675
2676 if (devinfo->ver >= 6)
2677 NIR_PASS(ish->needs_edge_flag, nir, crocus_fix_edge_flags);
2678 else
2679 ish->needs_edge_flag = false;
2680
2681 struct elk_nir_compiler_opts opts = {};
2682 elk_preprocess_nir(screen->compiler, nir, &opts);
2683
2684 NIR_PASS_V(nir, elk_nir_lower_storage_image,
2685 &(struct elk_nir_lower_storage_image_opts) {
2686 .devinfo = devinfo,
2687 .lower_loads = true,
2688 .lower_stores = true,
2689 .lower_atomics = true,
2690 .lower_get_size = true,
2691 });
2692 NIR_PASS_V(nir, crocus_lower_storage_image_derefs);
2693
2694 nir_sweep(nir);
2695
2696 ish->program_id = get_new_program_id(screen);
2697 ish->nir = nir;
2698 if (so_info) {
2699 memcpy(&ish->stream_output, so_info, sizeof(*so_info));
2700 update_so_info(&ish->stream_output, nir->info.outputs_written);
2701 }
2702
2703 if (screen->disk_cache) {
2704 /* Serialize the NIR to a binary blob that we can hash for the disk
2705 * cache. Drop unnecessary information (like variable names)
2706 * so the serialized NIR is smaller, and also to let us detect more
2707 * isomorphic shaders when hashing, increasing cache hits.
2708 */
2709 struct blob blob;
2710 blob_init(&blob);
2711 nir_serialize(&blob, nir, true);
2712 _mesa_sha1_compute(blob.data, blob.size, ish->nir_sha1);
2713 blob_finish(&blob);
2714 }
2715
2716 return ish;
2717 }
2718
2719 static struct crocus_uncompiled_shader *
crocus_create_shader_state(struct pipe_context * ctx,const struct pipe_shader_state * state)2720 crocus_create_shader_state(struct pipe_context *ctx,
2721 const struct pipe_shader_state *state)
2722 {
2723 struct nir_shader *nir;
2724
2725 if (state->type == PIPE_SHADER_IR_TGSI)
2726 nir = tgsi_to_nir(state->tokens, ctx->screen, false);
2727 else
2728 nir = state->ir.nir;
2729
2730 return crocus_create_uncompiled_shader(ctx, nir, &state->stream_output);
2731 }
2732
2733 static void *
crocus_create_vs_state(struct pipe_context * ctx,const struct pipe_shader_state * state)2734 crocus_create_vs_state(struct pipe_context *ctx,
2735 const struct pipe_shader_state *state)
2736 {
2737 struct crocus_context *ice = (void *) ctx;
2738 struct crocus_screen *screen = (void *) ctx->screen;
2739 struct crocus_uncompiled_shader *ish = crocus_create_shader_state(ctx, state);
2740
2741 ish->nos |= (1ull << CROCUS_NOS_TEXTURES);
2742 /* User clip planes or gen5 sprite coord enable */
2743 if (ish->nir->info.clip_distance_array_size == 0 ||
2744 screen->devinfo.ver <= 5)
2745 ish->nos |= (1ull << CROCUS_NOS_RASTERIZER);
2746
2747 if (screen->devinfo.verx10 < 75)
2748 ish->nos |= (1ull << CROCUS_NOS_VERTEX_ELEMENTS);
2749
2750 if (screen->precompile) {
2751 struct elk_vs_prog_key key = { KEY_INIT() };
2752
2753 if (!crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2754 crocus_compile_vs(ice, ish, &key);
2755 }
2756
2757 return ish;
2758 }
2759
2760 static void *
crocus_create_tcs_state(struct pipe_context * ctx,const struct pipe_shader_state * state)2761 crocus_create_tcs_state(struct pipe_context *ctx,
2762 const struct pipe_shader_state *state)
2763 {
2764 struct crocus_context *ice = (void *) ctx;
2765 struct crocus_screen *screen = (void *) ctx->screen;
2766 struct crocus_uncompiled_shader *ish = crocus_create_shader_state(ctx, state);
2767 struct shader_info *info = &ish->nir->info;
2768
2769 ish->nos |= (1ull << CROCUS_NOS_TEXTURES);
2770 if (screen->precompile) {
2771 struct elk_tcs_prog_key key = {
2772 KEY_INIT(),
2773 // XXX: make sure the linker fills this out from the TES...
2774 ._tes_primitive_mode =
2775 info->tess._primitive_mode ? info->tess._primitive_mode
2776 : TESS_PRIMITIVE_TRIANGLES,
2777 .outputs_written = info->outputs_written,
2778 .patch_outputs_written = info->patch_outputs_written,
2779 };
2780
2781 key.input_vertices = info->tess.tcs_vertices_out;
2782
2783 if (!crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2784 crocus_compile_tcs(ice, ish, &key);
2785 }
2786
2787 return ish;
2788 }
2789
2790 static void *
crocus_create_tes_state(struct pipe_context * ctx,const struct pipe_shader_state * state)2791 crocus_create_tes_state(struct pipe_context *ctx,
2792 const struct pipe_shader_state *state)
2793 {
2794 struct crocus_context *ice = (void *) ctx;
2795 struct crocus_screen *screen = (void *) ctx->screen;
2796 struct crocus_uncompiled_shader *ish = crocus_create_shader_state(ctx, state);
2797 struct shader_info *info = &ish->nir->info;
2798
2799 ish->nos |= (1ull << CROCUS_NOS_TEXTURES);
2800 /* User clip planes */
2801 if (ish->nir->info.clip_distance_array_size == 0)
2802 ish->nos |= (1ull << CROCUS_NOS_RASTERIZER);
2803
2804 if (screen->precompile) {
2805 struct elk_tes_prog_key key = {
2806 KEY_INIT(),
2807 // XXX: not ideal, need TCS output/TES input unification
2808 .inputs_read = info->inputs_read,
2809 .patch_inputs_read = info->patch_inputs_read,
2810 };
2811
2812 if (!crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2813 crocus_compile_tes(ice, ish, &key);
2814 }
2815
2816 return ish;
2817 }
2818
2819 static void *
crocus_create_gs_state(struct pipe_context * ctx,const struct pipe_shader_state * state)2820 crocus_create_gs_state(struct pipe_context *ctx,
2821 const struct pipe_shader_state *state)
2822 {
2823 struct crocus_context *ice = (void *) ctx;
2824 struct crocus_screen *screen = (void *) ctx->screen;
2825 struct crocus_uncompiled_shader *ish = crocus_create_shader_state(ctx, state);
2826
2827 ish->nos |= (1ull << CROCUS_NOS_TEXTURES);
2828 /* User clip planes */
2829 if (ish->nir->info.clip_distance_array_size == 0)
2830 ish->nos |= (1ull << CROCUS_NOS_RASTERIZER);
2831
2832 if (screen->precompile) {
2833 struct elk_gs_prog_key key = { KEY_INIT() };
2834
2835 if (!crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2836 crocus_compile_gs(ice, ish, &key);
2837 }
2838
2839 return ish;
2840 }
2841
2842 static void *
crocus_create_fs_state(struct pipe_context * ctx,const struct pipe_shader_state * state)2843 crocus_create_fs_state(struct pipe_context *ctx,
2844 const struct pipe_shader_state *state)
2845 {
2846 struct crocus_context *ice = (void *) ctx;
2847 struct crocus_screen *screen = (void *) ctx->screen;
2848 struct crocus_uncompiled_shader *ish = crocus_create_shader_state(ctx, state);
2849 struct shader_info *info = &ish->nir->info;
2850
2851 ish->nos |= (1ull << CROCUS_NOS_FRAMEBUFFER) |
2852 (1ull << CROCUS_NOS_DEPTH_STENCIL_ALPHA) |
2853 (1ull << CROCUS_NOS_RASTERIZER) |
2854 (1ull << CROCUS_NOS_TEXTURES) |
2855 (1ull << CROCUS_NOS_BLEND);
2856
2857 /* The program key needs the VUE map if there are > 16 inputs or gen4/5 */
2858 if (screen->devinfo.ver < 6 || util_bitcount64(ish->nir->info.inputs_read &
2859 ELK_FS_VARYING_INPUT_MASK) > 16) {
2860 ish->nos |= (1ull << CROCUS_NOS_LAST_VUE_MAP);
2861 }
2862
2863 if (screen->precompile) {
2864 const uint64_t color_outputs = info->outputs_written &
2865 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
2866 BITFIELD64_BIT(FRAG_RESULT_STENCIL) |
2867 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK));
2868
2869 bool can_rearrange_varyings =
2870 screen->devinfo.ver > 6 && util_bitcount64(info->inputs_read & ELK_FS_VARYING_INPUT_MASK) <= 16;
2871
2872 const struct intel_device_info *devinfo = &screen->devinfo;
2873 struct elk_wm_prog_key key = {
2874 KEY_INIT(),
2875 .nr_color_regions = util_bitcount(color_outputs),
2876 .coherent_fb_fetch = false,
2877 .ignore_sample_mask_out = screen->devinfo.ver < 6 ? 1 : 0,
2878 .input_slots_valid =
2879 can_rearrange_varyings ? 0 : info->inputs_read | VARYING_BIT_POS,
2880 };
2881
2882 struct intel_vue_map vue_map;
2883 if (devinfo->ver < 6) {
2884 elk_compute_vue_map(devinfo, &vue_map,
2885 info->inputs_read | VARYING_BIT_POS,
2886 false, /* pos slots */ 1);
2887 }
2888 if (!crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2889 crocus_compile_fs(ice, ish, &key, &vue_map);
2890 }
2891
2892 return ish;
2893 }
2894
2895 static void *
crocus_create_compute_state(struct pipe_context * ctx,const struct pipe_compute_state * state)2896 crocus_create_compute_state(struct pipe_context *ctx,
2897 const struct pipe_compute_state *state)
2898 {
2899 assert(state->ir_type == PIPE_SHADER_IR_NIR);
2900
2901 struct crocus_context *ice = (void *) ctx;
2902 struct crocus_screen *screen = (void *) ctx->screen;
2903 struct crocus_uncompiled_shader *ish =
2904 crocus_create_uncompiled_shader(ctx, (void *) state->prog, NULL);
2905
2906 ish->nos |= (1ull << CROCUS_NOS_TEXTURES);
2907 // XXX: disallow more than 64KB of shared variables
2908
2909 if (screen->precompile) {
2910 struct elk_cs_prog_key key = { KEY_INIT() };
2911
2912 if (!crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2913 crocus_compile_cs(ice, ish, &key);
2914 }
2915
2916 return ish;
2917 }
2918
2919 /**
2920 * The pipe->delete_[stage]_state() driver hooks.
2921 *
2922 * Frees the crocus_uncompiled_shader.
2923 */
2924 static void
crocus_delete_shader_state(struct pipe_context * ctx,void * state,gl_shader_stage stage)2925 crocus_delete_shader_state(struct pipe_context *ctx, void *state, gl_shader_stage stage)
2926 {
2927 struct crocus_uncompiled_shader *ish = state;
2928 struct crocus_context *ice = (void *) ctx;
2929
2930 if (ice->shaders.uncompiled[stage] == ish) {
2931 ice->shaders.uncompiled[stage] = NULL;
2932 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_UNCOMPILED_VS << stage;
2933 }
2934
2935 if (ish->const_data) {
2936 pipe_resource_reference(&ish->const_data, NULL);
2937 pipe_resource_reference(&ish->const_data_state.res, NULL);
2938 }
2939
2940 ralloc_free(ish->nir);
2941 free(ish);
2942 }
2943
2944 static void
crocus_delete_vs_state(struct pipe_context * ctx,void * state)2945 crocus_delete_vs_state(struct pipe_context *ctx, void *state)
2946 {
2947 crocus_delete_shader_state(ctx, state, MESA_SHADER_VERTEX);
2948 }
2949
2950 static void
crocus_delete_tcs_state(struct pipe_context * ctx,void * state)2951 crocus_delete_tcs_state(struct pipe_context *ctx, void *state)
2952 {
2953 crocus_delete_shader_state(ctx, state, MESA_SHADER_TESS_CTRL);
2954 }
2955
2956 static void
crocus_delete_tes_state(struct pipe_context * ctx,void * state)2957 crocus_delete_tes_state(struct pipe_context *ctx, void *state)
2958 {
2959 crocus_delete_shader_state(ctx, state, MESA_SHADER_TESS_EVAL);
2960 }
2961
2962 static void
crocus_delete_gs_state(struct pipe_context * ctx,void * state)2963 crocus_delete_gs_state(struct pipe_context *ctx, void *state)
2964 {
2965 crocus_delete_shader_state(ctx, state, MESA_SHADER_GEOMETRY);
2966 }
2967
2968 static void
crocus_delete_fs_state(struct pipe_context * ctx,void * state)2969 crocus_delete_fs_state(struct pipe_context *ctx, void *state)
2970 {
2971 crocus_delete_shader_state(ctx, state, MESA_SHADER_FRAGMENT);
2972 }
2973
2974 static void
crocus_delete_cs_state(struct pipe_context * ctx,void * state)2975 crocus_delete_cs_state(struct pipe_context *ctx, void *state)
2976 {
2977 crocus_delete_shader_state(ctx, state, MESA_SHADER_COMPUTE);
2978 }
2979
2980 /**
2981 * The pipe->bind_[stage]_state() driver hook.
2982 *
2983 * Binds an uncompiled shader as the current one for a particular stage.
2984 * Updates dirty tracking to account for the shader's NOS.
2985 */
2986 static void
bind_shader_state(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,gl_shader_stage stage)2987 bind_shader_state(struct crocus_context *ice,
2988 struct crocus_uncompiled_shader *ish,
2989 gl_shader_stage stage)
2990 {
2991 uint64_t dirty_bit = CROCUS_STAGE_DIRTY_UNCOMPILED_VS << stage;
2992 const uint64_t nos = ish ? ish->nos : 0;
2993
2994 const struct shader_info *old_info = crocus_get_shader_info(ice, stage);
2995 const struct shader_info *new_info = ish ? &ish->nir->info : NULL;
2996
2997 if ((old_info ? BITSET_LAST_BIT(old_info->textures_used) : 0) !=
2998 (new_info ? BITSET_LAST_BIT(new_info->textures_used) : 0)) {
2999 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
3000 }
3001
3002 ice->shaders.uncompiled[stage] = ish;
3003 ice->state.stage_dirty |= dirty_bit;
3004
3005 /* Record that CSOs need to mark CROCUS_DIRTY_UNCOMPILED_XS when they change
3006 * (or that they no longer need to do so).
3007 */
3008 for (int i = 0; i < CROCUS_NOS_COUNT; i++) {
3009 if (nos & (1 << i))
3010 ice->state.stage_dirty_for_nos[i] |= dirty_bit;
3011 else
3012 ice->state.stage_dirty_for_nos[i] &= ~dirty_bit;
3013 }
3014 }
3015
3016 static void
crocus_bind_vs_state(struct pipe_context * ctx,void * state)3017 crocus_bind_vs_state(struct pipe_context *ctx, void *state)
3018 {
3019 struct crocus_context *ice = (struct crocus_context *)ctx;
3020 struct crocus_uncompiled_shader *new_ish = state;
3021 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
3022 const struct intel_device_info *devinfo = &screen->devinfo;
3023
3024 if (new_ish &&
3025 ice->state.window_space_position !=
3026 new_ish->nir->info.vs.window_space_position) {
3027 ice->state.window_space_position =
3028 new_ish->nir->info.vs.window_space_position;
3029
3030 ice->state.dirty |= CROCUS_DIRTY_CLIP |
3031 CROCUS_DIRTY_RASTER |
3032 CROCUS_DIRTY_CC_VIEWPORT;
3033 }
3034
3035 if (devinfo->ver == 6) {
3036 ice->state.stage_dirty |= CROCUS_DIRTY_GEN4_FF_GS_PROG;
3037 }
3038
3039 bind_shader_state((void *) ctx, state, MESA_SHADER_VERTEX);
3040 }
3041
3042 static void
crocus_bind_tcs_state(struct pipe_context * ctx,void * state)3043 crocus_bind_tcs_state(struct pipe_context *ctx, void *state)
3044 {
3045 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_CTRL);
3046 }
3047
3048 static void
crocus_bind_tes_state(struct pipe_context * ctx,void * state)3049 crocus_bind_tes_state(struct pipe_context *ctx, void *state)
3050 {
3051 struct crocus_context *ice = (struct crocus_context *)ctx;
3052
3053 /* Enabling/disabling optional stages requires a URB reconfiguration. */
3054 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
3055 ice->state.dirty |= CROCUS_DIRTY_GEN6_URB;
3056
3057 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_EVAL);
3058 }
3059
3060 static void
crocus_bind_gs_state(struct pipe_context * ctx,void * state)3061 crocus_bind_gs_state(struct pipe_context *ctx, void *state)
3062 {
3063 struct crocus_context *ice = (struct crocus_context *)ctx;
3064
3065 /* Enabling/disabling optional stages requires a URB reconfiguration. */
3066 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
3067 ice->state.dirty |= CROCUS_DIRTY_GEN6_URB;
3068
3069 bind_shader_state((void *) ctx, state, MESA_SHADER_GEOMETRY);
3070 }
3071
3072 static void
crocus_bind_fs_state(struct pipe_context * ctx,void * state)3073 crocus_bind_fs_state(struct pipe_context *ctx, void *state)
3074 {
3075 struct crocus_context *ice = (struct crocus_context *) ctx;
3076 struct crocus_screen *screen = (struct crocus_screen *) ctx->screen;
3077 const struct intel_device_info *devinfo = &screen->devinfo;
3078 struct crocus_uncompiled_shader *old_ish =
3079 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
3080 struct crocus_uncompiled_shader *new_ish = state;
3081
3082 const unsigned color_bits =
3083 BITFIELD64_BIT(FRAG_RESULT_COLOR) |
3084 BITFIELD64_RANGE(FRAG_RESULT_DATA0, ELK_MAX_DRAW_BUFFERS);
3085
3086 /* Fragment shader outputs influence HasWriteableRT */
3087 if (!old_ish || !new_ish ||
3088 (old_ish->nir->info.outputs_written & color_bits) !=
3089 (new_ish->nir->info.outputs_written & color_bits)) {
3090 if (devinfo->ver == 8)
3091 ice->state.dirty |= CROCUS_DIRTY_GEN8_PS_BLEND;
3092 else
3093 ice->state.dirty |= CROCUS_DIRTY_WM;
3094 }
3095
3096 if (devinfo->ver == 8)
3097 ice->state.dirty |= CROCUS_DIRTY_GEN8_PMA_FIX;
3098 bind_shader_state((void *) ctx, state, MESA_SHADER_FRAGMENT);
3099 }
3100
3101 static void
crocus_bind_cs_state(struct pipe_context * ctx,void * state)3102 crocus_bind_cs_state(struct pipe_context *ctx, void *state)
3103 {
3104 bind_shader_state((void *) ctx, state, MESA_SHADER_COMPUTE);
3105 }
3106
3107 void
crocus_init_program_functions(struct pipe_context * ctx)3108 crocus_init_program_functions(struct pipe_context *ctx)
3109 {
3110 ctx->create_vs_state = crocus_create_vs_state;
3111 ctx->create_tcs_state = crocus_create_tcs_state;
3112 ctx->create_tes_state = crocus_create_tes_state;
3113 ctx->create_gs_state = crocus_create_gs_state;
3114 ctx->create_fs_state = crocus_create_fs_state;
3115 ctx->create_compute_state = crocus_create_compute_state;
3116
3117 ctx->delete_vs_state = crocus_delete_vs_state;
3118 ctx->delete_tcs_state = crocus_delete_tcs_state;
3119 ctx->delete_tes_state = crocus_delete_tes_state;
3120 ctx->delete_gs_state = crocus_delete_gs_state;
3121 ctx->delete_fs_state = crocus_delete_fs_state;
3122 ctx->delete_compute_state = crocus_delete_cs_state;
3123
3124 ctx->bind_vs_state = crocus_bind_vs_state;
3125 ctx->bind_tcs_state = crocus_bind_tcs_state;
3126 ctx->bind_tes_state = crocus_bind_tes_state;
3127 ctx->bind_gs_state = crocus_bind_gs_state;
3128 ctx->bind_fs_state = crocus_bind_fs_state;
3129 ctx->bind_compute_state = crocus_bind_cs_state;
3130 }
3131