1 /*
2 * Copyright © 2011 Intel Corporation
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "brw_fs.h"
7 #include "brw_generator.h"
8 #include "brw_eu.h"
9 #include "brw_nir.h"
10 #include "brw_private.h"
11 #include "dev/intel_debug.h"
12
13 using namespace brw;
14
15 static void
brw_assign_vs_urb_setup(fs_visitor & s)16 brw_assign_vs_urb_setup(fs_visitor &s)
17 {
18 struct brw_vs_prog_data *vs_prog_data = brw_vs_prog_data(s.prog_data);
19
20 assert(s.stage == MESA_SHADER_VERTEX);
21
22 /* Each attribute is 4 regs. */
23 s.first_non_payload_grf += 4 * vs_prog_data->nr_attribute_slots;
24
25 assert(vs_prog_data->base.urb_read_length <= 15);
26
27 /* Rewrite all ATTR file references to the hw grf that they land in. */
28 foreach_block_and_inst(block, fs_inst, inst, s.cfg) {
29 s.convert_attr_sources_to_hw_regs(inst);
30 }
31 }
32
33 static bool
run_vs(fs_visitor & s)34 run_vs(fs_visitor &s)
35 {
36 assert(s.stage == MESA_SHADER_VERTEX);
37
38 s.payload_ = new vs_thread_payload(s);
39
40 nir_to_brw(&s);
41
42 if (s.failed)
43 return false;
44
45 s.emit_urb_writes();
46
47 brw_calculate_cfg(s);
48
49 brw_optimize(s);
50
51 s.assign_curb_setup();
52 brw_assign_vs_urb_setup(s);
53
54 brw_lower_3src_null_dest(s);
55 brw_workaround_memory_fence_before_eot(s);
56 brw_workaround_emit_dummy_mov_instruction(s);
57
58 brw_allocate_registers(s, true /* allow_spilling */);
59
60 brw_workaround_source_arf_before_eot(s);
61
62 return !s.failed;
63 }
64
65 extern "C" const unsigned *
brw_compile_vs(const struct brw_compiler * compiler,struct brw_compile_vs_params * params)66 brw_compile_vs(const struct brw_compiler *compiler,
67 struct brw_compile_vs_params *params)
68 {
69 struct nir_shader *nir = params->base.nir;
70 const struct brw_vs_prog_key *key = params->key;
71 struct brw_vs_prog_data *prog_data = params->prog_data;
72 const bool debug_enabled =
73 brw_should_print_shader(nir, params->base.debug_flag ?
74 params->base.debug_flag : DEBUG_VS);
75
76 prog_data->base.base.stage = MESA_SHADER_VERTEX;
77 prog_data->base.base.ray_queries = nir->info.ray_queries;
78 prog_data->base.base.total_scratch = 0;
79
80 brw_nir_apply_key(nir, compiler, &key->base,
81 brw_geometry_stage_dispatch_width(compiler->devinfo));
82
83 prog_data->inputs_read = nir->info.inputs_read;
84 prog_data->double_inputs_read = nir->info.vs.double_inputs;
85
86 brw_nir_lower_vs_inputs(nir);
87 brw_nir_lower_vue_outputs(nir);
88 brw_postprocess_nir(nir, compiler, debug_enabled,
89 key->base.robust_flags);
90
91 prog_data->base.clip_distance_mask =
92 ((1 << nir->info.clip_distance_array_size) - 1);
93 prog_data->base.cull_distance_mask =
94 ((1 << nir->info.cull_distance_array_size) - 1) <<
95 nir->info.clip_distance_array_size;
96
97 unsigned nr_attribute_slots = util_bitcount64(prog_data->inputs_read);
98
99 /* gl_VertexID and gl_InstanceID are system values, but arrive via an
100 * incoming vertex attribute. So, add an extra slot.
101 */
102 if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX) ||
103 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE) ||
104 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) ||
105 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID)) {
106 nr_attribute_slots++;
107 }
108
109 /* gl_DrawID and IsIndexedDraw share its very own vec4 */
110 if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_DRAW_ID) ||
111 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_IS_INDEXED_DRAW)) {
112 nr_attribute_slots++;
113 }
114
115 if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_IS_INDEXED_DRAW))
116 prog_data->uses_is_indexed_draw = true;
117
118 if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX))
119 prog_data->uses_firstvertex = true;
120
121 if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE))
122 prog_data->uses_baseinstance = true;
123
124 if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE))
125 prog_data->uses_vertexid = true;
126
127 if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID))
128 prog_data->uses_instanceid = true;
129
130 if (BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_DRAW_ID))
131 prog_data->uses_drawid = true;
132
133 prog_data->base.urb_read_length = DIV_ROUND_UP(nr_attribute_slots, 2);
134 prog_data->nr_attribute_slots = nr_attribute_slots;
135
136 /* Since vertex shaders reuse the same VUE entry for inputs and outputs
137 * (overwriting the original contents), we need to make sure the size is
138 * the larger of the two.
139 */
140 const unsigned vue_entries =
141 MAX2(nr_attribute_slots, (unsigned)prog_data->base.vue_map.num_slots);
142
143 prog_data->base.urb_entry_size = DIV_ROUND_UP(vue_entries, 4);
144
145 if (unlikely(debug_enabled)) {
146 fprintf(stderr, "VS Output ");
147 brw_print_vue_map(stderr, &prog_data->base.vue_map, MESA_SHADER_VERTEX);
148 }
149
150 const unsigned dispatch_width = compiler->devinfo->ver >= 20 ? 16 : 8;
151 prog_data->base.dispatch_mode = INTEL_DISPATCH_MODE_SIMD8;
152
153 fs_visitor v(compiler, ¶ms->base, &key->base,
154 &prog_data->base.base, nir, dispatch_width,
155 params->base.stats != NULL, debug_enabled);
156 if (!run_vs(v)) {
157 params->base.error_str =
158 ralloc_strdup(params->base.mem_ctx, v.fail_msg);
159 return NULL;
160 }
161
162 assert(v.payload().num_regs % reg_unit(compiler->devinfo) == 0);
163 prog_data->base.base.dispatch_grf_start_reg =
164 v.payload().num_regs / reg_unit(compiler->devinfo);
165
166 brw_generator g(compiler, ¶ms->base,
167 &prog_data->base.base,
168 MESA_SHADER_VERTEX);
169 if (unlikely(debug_enabled)) {
170 const char *debug_name =
171 ralloc_asprintf(params->base.mem_ctx, "%s vertex shader %s",
172 nir->info.label ? nir->info.label :
173 "unnamed",
174 nir->info.name);
175
176 g.enable_debug(debug_name);
177 }
178 g.generate_code(v.cfg, dispatch_width, v.shader_stats,
179 v.performance_analysis.require(), params->base.stats);
180 g.add_const_data(nir->constant_data, nir->constant_data_size);
181
182 return g.get_assembly();
183 }
184