Lines Matching full:nir
29 #include "compiler/nir/nir_builder.h"
163 brw_nir_lower_vs_inputs(nir_shader *nir, in brw_nir_lower_vs_inputs() argument
168 nir_foreach_shader_in_variable(var, nir) in brw_nir_lower_vs_inputs()
175 nir_lower_io(nir, nir_var_shader_in, type_size_vec4, in brw_nir_lower_vs_inputs()
179 nir_opt_constant_folding(nir); in brw_nir_lower_vs_inputs()
181 nir_io_add_const_offset_to_base(nir, nir_var_shader_in); in brw_nir_lower_vs_inputs()
183 brw_nir_apply_attribute_workarounds(nir, vs_attrib_wa_flags); in brw_nir_lower_vs_inputs()
191 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX) || in brw_nir_lower_vs_inputs()
192 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE) || in brw_nir_lower_vs_inputs()
193 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) || in brw_nir_lower_vs_inputs()
194 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID); in brw_nir_lower_vs_inputs()
196 const unsigned num_inputs = util_bitcount64(nir->info.inputs_read); in brw_nir_lower_vs_inputs()
198 nir_foreach_function(function, nir) { in brw_nir_lower_vs_inputs()
226 nir_intrinsic_instr_create(nir, nir_intrinsic_load_input); in brw_nir_lower_vs_inputs()
275 uint64_t inputs_read = nir->info.inputs_read; in brw_nir_lower_vs_inputs()
298 brw_nir_lower_vue_inputs(nir_shader *nir, in brw_nir_lower_vue_inputs() argument
301 nir_foreach_shader_in_variable(var, nir) in brw_nir_lower_vue_inputs()
305 nir_lower_io(nir, nir_var_shader_in, type_size_vec4, in brw_nir_lower_vue_inputs()
309 nir_opt_constant_folding(nir); in brw_nir_lower_vue_inputs()
311 nir_io_add_const_offset_to_base(nir, nir_var_shader_in); in brw_nir_lower_vue_inputs()
313 nir_foreach_function(function, nir) { in brw_nir_lower_vue_inputs()
351 brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map) in brw_nir_lower_tes_inputs() argument
353 nir_foreach_shader_in_variable(var, nir) in brw_nir_lower_tes_inputs()
356 nir_lower_io(nir, nir_var_shader_in, type_size_vec4, in brw_nir_lower_tes_inputs()
360 nir_opt_constant_folding(nir); in brw_nir_lower_tes_inputs()
362 nir_io_add_const_offset_to_base(nir, nir_var_shader_in); in brw_nir_lower_tes_inputs()
364 nir_foreach_function(function, nir) { in brw_nir_lower_tes_inputs()
370 nir->info.tess._primitive_mode); in brw_nir_lower_tes_inputs()
416 brw_nir_lower_fs_inputs(nir_shader *nir, in brw_nir_lower_fs_inputs() argument
420 nir_foreach_shader_in_variable(var, nir) { in brw_nir_lower_fs_inputs()
451 nir_lower_io(nir, nir_var_shader_in, type_size_vec4, lower_io_options); in brw_nir_lower_fs_inputs()
453 nir_lower_interpolation(nir, ~0); in brw_nir_lower_fs_inputs()
456 nir_lower_single_sampled(nir); in brw_nir_lower_fs_inputs()
458 nir_shader_instructions_pass(nir, lower_barycentric_at_offset, in brw_nir_lower_fs_inputs()
464 nir_opt_constant_folding(nir); in brw_nir_lower_fs_inputs()
466 nir_io_add_const_offset_to_base(nir, nir_var_shader_in); in brw_nir_lower_fs_inputs()
470 brw_nir_lower_vue_outputs(nir_shader *nir) in brw_nir_lower_vue_outputs() argument
472 nir_foreach_shader_out_variable(var, nir) { in brw_nir_lower_vue_outputs()
476 nir_lower_io(nir, nir_var_shader_out, type_size_vec4, in brw_nir_lower_vue_outputs()
481 brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map, in brw_nir_lower_tcs_outputs() argument
484 nir_foreach_shader_out_variable(var, nir) { in brw_nir_lower_tcs_outputs()
488 nir_lower_io(nir, nir_var_shader_out, type_size_vec4, in brw_nir_lower_tcs_outputs()
492 nir_opt_constant_folding(nir); in brw_nir_lower_tcs_outputs()
494 nir_io_add_const_offset_to_base(nir, nir_var_shader_out); in brw_nir_lower_tcs_outputs()
496 nir_foreach_function(function, nir) { in brw_nir_lower_tcs_outputs()
508 brw_nir_lower_fs_outputs(nir_shader *nir) in brw_nir_lower_fs_outputs() argument
510 nir_foreach_shader_out_variable(var, nir) { in brw_nir_lower_fs_outputs()
516 nir_lower_io(nir, nir_var_shader_out, type_size_dvec4, 0); in brw_nir_lower_fs_outputs()
521 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
528 brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler, in brw_nir_optimize() argument
533 (nir->options->lower_flrp16 ? 16 : 0) | in brw_nir_optimize()
534 (nir->options->lower_flrp32 ? 32 : 0) | in brw_nir_optimize()
535 (nir->options->lower_flrp64 ? 64 : 0); in brw_nir_optimize()
594 (nir->info.stage == MESA_SHADER_TESS_CTRL || in brw_nir_optimize()
595 nir->info.stage == MESA_SHADER_TESS_EVAL); in brw_nir_optimize()
630 if (nir->options->max_unroll_iterations != 0) { in brw_nir_optimize()
812 brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir, in brw_preprocess_nir() argument
818 const bool is_scalar = compiler->scalar_stage[nir->info.stage]; in brw_preprocess_nir()
820 nir_validate_ssa_dominance(nir, "before brw_preprocess_nir"); in brw_preprocess_nir()
826 if (nir->info.stage == MESA_SHADER_GEOMETRY) in brw_preprocess_nir()
862 brw_nir_optimize(nir, compiler, is_scalar, true); in brw_preprocess_nir()
864 OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options); in brw_preprocess_nir()
898 brw_nir_no_indirect_mask(compiler, nir->info.stage); in brw_preprocess_nir()
929 brw_nir_optimize(nir, compiler, is_scalar, false); in brw_preprocess_nir()
1037 * those back into 32-bit ones anyway and UBO loads aren't split in NIR so in brw_nir_should_vectorize_mem()
1083 brw_vectorize_lower_mem_access(nir_shader *nir, in brw_vectorize_lower_mem_access() argument
1123 nir_shader_has_local_variables(const nir_shader *nir) in nir_shader_has_local_variables() argument
1125 nir_foreach_function(func, nir) { in nir_shader_has_local_variables()
1141 brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler, in brw_postprocess_nir() argument
1167 if (gl_shader_stage_can_set_fragment_shading_rate(nir->info.stage)) in brw_postprocess_nir()
1168 brw_nir_lower_shading_rate_output(nir); in brw_postprocess_nir()
1170 brw_nir_optimize(nir, compiler, is_scalar, false); in brw_postprocess_nir()
1172 if (is_scalar && nir_shader_has_local_variables(nir)) { in brw_postprocess_nir()
1177 brw_nir_optimize(nir, compiler, is_scalar, false); in brw_postprocess_nir()
1180 brw_vectorize_lower_mem_access(nir, compiler, is_scalar, in brw_postprocess_nir()
1184 brw_nir_optimize(nir, compiler, is_scalar, false); in brw_postprocess_nir()
1204 (nir->info.stage == MESA_SHADER_TESS_CTRL || in brw_postprocess_nir()
1205 nir->info.stage == MESA_SHADER_TESS_EVAL); in brw_postprocess_nir()
1244 NIR_PASS_V(nir, nir_convert_to_lcssa, true, true); in brw_postprocess_nir()
1245 NIR_PASS_V(nir, nir_divergence_analysis); in brw_postprocess_nir()
1255 nir->info.stage != MESA_SHADER_KERNEL && in brw_postprocess_nir()
1256 nir->info.stage != MESA_SHADER_RAYGEN && in brw_postprocess_nir()
1257 !gl_shader_stage_is_callable(nir->info.stage); in brw_postprocess_nir()
1268 brw_nir_optimize(nir, compiler, is_scalar, false); in brw_postprocess_nir()
1282 nir_foreach_function(function, nir) { in brw_postprocess_nir()
1287 fprintf(stderr, "NIR (SSA form) for %s shader:\n", in brw_postprocess_nir()
1288 _mesa_shader_stage_to_string(nir->info.stage)); in brw_postprocess_nir()
1289 nir_print_shader(nir, stderr); in brw_postprocess_nir()
1292 nir_validate_ssa_dominance(nir, "before nir_convert_from_ssa"); in brw_postprocess_nir()
1309 * want that to be squashed by other NIR passes. in brw_postprocess_nir()
1312 brw_nir_analyze_boolean_resolves(nir); in brw_postprocess_nir()
1314 nir_sweep(nir); in brw_postprocess_nir()
1317 fprintf(stderr, "NIR (final form) for %s shader:\n", in brw_postprocess_nir()
1318 _mesa_shader_stage_to_string(nir->info.stage)); in brw_postprocess_nir()
1319 nir_print_shader(nir, stderr); in brw_postprocess_nir()
1324 brw_nir_apply_sampler_key(nir_shader *nir, in brw_nir_apply_sampler_key() argument
1362 return nir_lower_tex(nir, &tex_options); in brw_nir_apply_sampler_key()
1416 brw_nir_apply_key(nir_shader *nir, in brw_nir_apply_key() argument
1427 .subgroup_size = get_subgroup_size(&nir->info, max_subgroup_size), in brw_nir_apply_key()
1438 brw_nir_optimize(nir, compiler, is_scalar, false); in brw_nir_apply_key()
1486 unreachable("Unsupported NIR comparison op"); in brw_cmod_for_nir_comparison()
1553 unreachable("Unsupported NIR atomic intrinsic"); in brw_aop_for_nir_intrinsic()
1604 nir_shader *nir = b.shader; in brw_nir_create_passthrough_tcs() local
1610 nir->info.inputs_read = key->outputs_written & in brw_nir_create_passthrough_tcs()
1612 nir->info.outputs_written = key->outputs_written; in brw_nir_create_passthrough_tcs()
1613 nir->info.tess.tcs_vertices_out = key->input_vertices; in brw_nir_create_passthrough_tcs()
1614 nir->num_uniforms = 8 * sizeof(uint32_t); in brw_nir_create_passthrough_tcs()
1616 var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_0"); in brw_nir_create_passthrough_tcs()
1618 var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_1"); in brw_nir_create_passthrough_tcs()
1631 uint64_t varyings = nir->info.inputs_read; in brw_nir_create_passthrough_tcs()
1645 nir_validate_shader(nir, "in brw_nir_create_passthrough_tcs"); in brw_nir_create_passthrough_tcs()
1647 brw_preprocess_nir(compiler, nir, NULL); in brw_nir_create_passthrough_tcs()
1649 return nir; in brw_nir_create_passthrough_tcs()