1 /* -*- mesa-c++ -*-
2 *
3 * Copyright (c) 2018 Collabora LTD
4 *
5 * Author: Gert Wollny <gert.wollny@collabora.com>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */
26
27
28 #include "pipe/p_defines.h"
29 #include "tgsi/tgsi_from_mesa.h"
30 #include "sfn_shader_vertex.h"
31 #include "sfn_instruction_lds.h"
32
33 #include <queue>
34
35
36 namespace r600 {
37
38 using std::priority_queue;
39
VertexShaderFromNir(r600_pipe_shader * sh,r600_pipe_shader_selector & sel,const r600_shader_key & key,struct r600_shader * gs_shader,enum chip_class chip_class)40 VertexShaderFromNir::VertexShaderFromNir(r600_pipe_shader *sh,
41 r600_pipe_shader_selector& sel,
42 const r600_shader_key& key,
43 struct r600_shader* gs_shader,
44 enum chip_class chip_class):
45 VertexStage(PIPE_SHADER_VERTEX, sel, sh->shader,
46 sh->scratch_space_needed, chip_class, key.vs.first_atomic_counter),
47 m_num_clip_dist(0),
48 m_last_param_export(nullptr),
49 m_last_pos_export(nullptr),
50 m_pipe_shader(sh),
51 m_enabled_stream_buffers_mask(0),
52 m_so_info(&sel.so),
53 m_vertex_id(),
54 m_key(key),
55 m_max_attrib(0)
56 {
57 // reg 0 is used in the fetch shader
58 increment_reserved_registers();
59
60 sh_info().atomic_base = key.vs.first_atomic_counter;
61 sh_info().vs_as_gs_a = m_key.vs.as_gs_a;
62
63 if (key.vs.as_es) {
64 sh->shader.vs_as_es = true;
65 m_export_processor.reset(new VertexStageExportForGS(*this, gs_shader));
66 } else if (key.vs.as_ls) {
67 sh->shader.vs_as_ls = true;
68 sfn_log << SfnLog::trans << "Start VS for GS\n";
69 m_export_processor.reset(new VertexStageExportForES(*this));
70 } else {
71 m_export_processor.reset(new VertexStageExportForFS(*this, &sel.so, sh, key));
72 }
73 }
74
do_process_inputs(nir_variable * input)75 bool VertexShaderFromNir::do_process_inputs(nir_variable *input)
76 {
77 ++sh_info().ninput;
78
79 if (input->data.location < VERT_ATTRIB_MAX) {
80 increment_reserved_registers();
81 if (m_max_attrib < input->data.driver_location)
82 m_max_attrib = input->data.driver_location;
83
84 return true;
85 }
86 fprintf(stderr, "r600-NIR-VS: Unimplemented process_inputs for %d\n", input->data.location);
87 return false;
88 }
89
do_allocate_reserved_registers()90 bool VertexShaderFromNir::do_allocate_reserved_registers()
91 {
92 /* Since the vertex ID is nearly always used, we add it here as an input so
93 * that the registers used for vertex attributes don't get clobbered by the
94 * register merge step */
95 auto R0x = new GPRValue(0,0);
96 R0x->set_as_input();
97 m_vertex_id.reset(R0x);
98 inject_register(0, 0, m_vertex_id, false);
99
100 if (m_key.vs.as_gs_a || m_sv_values.test(es_primitive_id)) {
101 auto R0z = new GPRValue(0,2);
102 R0x->set_as_input();
103 m_primitive_id.reset(R0z);
104 inject_register(0, 2, m_primitive_id, false);
105 }
106
107 if (m_sv_values.test(es_instanceid)) {
108 auto R0w = new GPRValue(0,3);
109 R0w->set_as_input();
110 m_instance_id.reset(R0w);
111 inject_register(0, 3, m_instance_id, false);
112 }
113
114
115 if (m_sv_values.test(es_rel_patch_id)) {
116 auto R0y = new GPRValue(0,1);
117 R0y->set_as_input();
118 m_rel_vertex_id.reset(R0y);
119 inject_register(0, 1, m_rel_vertex_id, false);
120 }
121
122 m_attribs.resize(4 * m_max_attrib + 4);
123 for (unsigned i = 0; i < m_max_attrib + 1; ++i) {
124 for (unsigned k = 0; k < 4; ++k) {
125 auto gpr = std::make_shared<GPRValue>(i + 1, k);
126 gpr->set_as_input();
127 m_attribs[4 * i + k] = gpr;
128 inject_register(i + 1, k, gpr, false);
129 }
130 }
131
132 return true;
133 }
134
emit_shader_start()135 void VertexShaderFromNir::emit_shader_start()
136 {
137 }
138
scan_sysvalue_access(nir_instr * instr)139 bool VertexShaderFromNir::scan_sysvalue_access(nir_instr *instr)
140 {
141 switch (instr->type) {
142 case nir_instr_type_intrinsic: {
143 nir_intrinsic_instr *ii = nir_instr_as_intrinsic(instr);
144 switch (ii->intrinsic) {
145 case nir_intrinsic_load_vertex_id:
146 m_sv_values.set(es_vertexid);
147 break;
148 case nir_intrinsic_load_instance_id:
149 m_sv_values.set(es_instanceid);
150 break;
151 case nir_intrinsic_load_tcs_rel_patch_id_r600:
152 m_sv_values.set(es_rel_patch_id);
153 break;
154 default:
155 ;
156 }
157 }
158 default:
159 ;
160 }
161 return true;
162 }
163
emit_intrinsic_instruction_override(nir_intrinsic_instr * instr)164 bool VertexShaderFromNir::emit_intrinsic_instruction_override(nir_intrinsic_instr* instr)
165 {
166 switch (instr->intrinsic) {
167 case nir_intrinsic_load_vertex_id:
168 return load_preloaded_value(instr->dest, 0, m_vertex_id);
169 case nir_intrinsic_load_tcs_rel_patch_id_r600:
170 return load_preloaded_value(instr->dest, 0, m_rel_vertex_id);
171 case nir_intrinsic_load_instance_id:
172 return load_preloaded_value(instr->dest, 0, m_instance_id);
173 case nir_intrinsic_store_local_shared_r600:
174 return emit_store_local_shared(instr);
175 default:
176 return false;
177 }
178 }
179
emit_store_local_shared(nir_intrinsic_instr * instr)180 bool VertexShaderFromNir::emit_store_local_shared(nir_intrinsic_instr* instr)
181 {
182 unsigned write_mask = nir_intrinsic_write_mask(instr);
183
184 auto address = from_nir(instr->src[1], 0);
185 int swizzle_base = (write_mask & 0x3) ? 0 : 2;
186 write_mask |= write_mask >> 2;
187
188 auto value = from_nir(instr->src[0], swizzle_base);
189 if (!(write_mask & 2)) {
190 emit_instruction(new LDSWriteInstruction(address, 1, value));
191 } else {
192 auto value1 = from_nir(instr->src[0], swizzle_base + 1);
193 emit_instruction(new LDSWriteInstruction(address, 1, value, value1));
194 }
195
196 return true;
197 }
198
do_process_outputs(nir_variable * output)199 bool VertexShaderFromNir::do_process_outputs(nir_variable *output)
200 {
201 return m_export_processor->do_process_outputs(output);
202 }
203
do_emit_load_deref(const nir_variable * in_var,nir_intrinsic_instr * instr)204 bool VertexShaderFromNir::do_emit_load_deref(const nir_variable *in_var, nir_intrinsic_instr* instr)
205 {
206 if (in_var->data.location < VERT_ATTRIB_MAX) {
207 for (unsigned i = 0; i < nir_dest_num_components(instr->dest); ++i) {
208 auto src = m_attribs[4 * in_var->data.driver_location + i];
209
210 if (i == 0)
211 set_input(in_var->data.driver_location, src);
212
213 load_preloaded_value(instr->dest, i, src, i == (unsigned)(instr->num_components - 1));
214 }
215 return true;
216 }
217 fprintf(stderr, "r600-NIR: Unimplemented load_deref for %d\n", in_var->data.location);
218 return false;
219 }
220
do_finalize()221 void VertexShaderFromNir::do_finalize()
222 {
223 m_export_processor->finalize_exports();
224 }
225
do_emit_store_deref(const nir_variable * out_var,nir_intrinsic_instr * instr)226 bool VertexShaderFromNir::do_emit_store_deref(const nir_variable *out_var, nir_intrinsic_instr* instr)
227 {
228 return m_export_processor->store_deref(out_var, instr);
229 }
230
231 }
232