1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include "aco_ir.h"
26
27 #include <array>
28 #include <map>
29
30 #include "util/memstream.h"
31
32 namespace aco {
33
aco_log(Program * program,enum radv_compiler_debug_level level,const char * prefix,const char * file,unsigned line,const char * fmt,va_list args)34 static void aco_log(Program *program, enum radv_compiler_debug_level level,
35 const char *prefix, const char *file, unsigned line,
36 const char *fmt, va_list args)
37 {
38 char *msg;
39
40 msg = ralloc_strdup(NULL, prefix);
41
42 ralloc_asprintf_append(&msg, " In file %s:%u\n", file, line);
43 ralloc_asprintf_append(&msg, " ");
44 ralloc_vasprintf_append(&msg, fmt, args);
45
46 if (program->debug.func)
47 program->debug.func(program->debug.private_data, level, msg);
48
49 fprintf(stderr, "%s\n", msg);
50
51 ralloc_free(msg);
52 }
53
_aco_perfwarn(Program * program,const char * file,unsigned line,const char * fmt,...)54 void _aco_perfwarn(Program *program, const char *file, unsigned line,
55 const char *fmt, ...)
56 {
57 va_list args;
58
59 va_start(args, fmt);
60 aco_log(program, RADV_COMPILER_DEBUG_LEVEL_PERFWARN,
61 "ACO PERFWARN:\n", file, line, fmt, args);
62 va_end(args);
63 }
64
_aco_err(Program * program,const char * file,unsigned line,const char * fmt,...)65 void _aco_err(Program *program, const char *file, unsigned line,
66 const char *fmt, ...)
67 {
68 va_list args;
69
70 va_start(args, fmt);
71 aco_log(program, RADV_COMPILER_DEBUG_LEVEL_ERROR,
72 "ACO ERROR:\n", file, line, fmt, args);
73 va_end(args);
74 }
75
validate_ir(Program * program)76 bool validate_ir(Program* program)
77 {
78 bool is_valid = true;
79 auto check = [&program, &is_valid](bool check, const char * msg, aco::Instruction * instr) -> void {
80 if (!check) {
81 char *out;
82 size_t outsize;
83 struct u_memstream mem;
84 u_memstream_open(&mem, &out, &outsize);
85 FILE *const memf = u_memstream_get(&mem);
86
87 fprintf(memf, "%s: ", msg);
88 aco_print_instr(instr, memf);
89 u_memstream_close(&mem);
90
91 aco_err(program, "%s", out);
92 free(out);
93
94 is_valid = false;
95 }
96 };
97
98 auto check_block = [&program, &is_valid](bool check, const char * msg, aco::Block * block) -> void {
99 if (!check) {
100 aco_err(program, "%s: BB%u", msg, block->index);
101 is_valid = false;
102 }
103 };
104
105 for (Block& block : program->blocks) {
106 for (aco_ptr<Instruction>& instr : block.instructions) {
107
108 /* check base format */
109 Format base_format = instr->format;
110 base_format = (Format)((uint32_t)base_format & ~(uint32_t)Format::SDWA);
111 base_format = (Format)((uint32_t)base_format & ~(uint32_t)Format::DPP);
112 if ((uint32_t)base_format & (uint32_t)Format::VOP1)
113 base_format = Format::VOP1;
114 else if ((uint32_t)base_format & (uint32_t)Format::VOP2)
115 base_format = Format::VOP2;
116 else if ((uint32_t)base_format & (uint32_t)Format::VOPC)
117 base_format = Format::VOPC;
118 else if ((uint32_t)base_format & (uint32_t)Format::VINTRP) {
119 if (instr->opcode == aco_opcode::v_interp_p1ll_f16 ||
120 instr->opcode == aco_opcode::v_interp_p1lv_f16 ||
121 instr->opcode == aco_opcode::v_interp_p2_legacy_f16 ||
122 instr->opcode == aco_opcode::v_interp_p2_f16) {
123 /* v_interp_*_fp16 are considered VINTRP by the compiler but
124 * they are emitted as VOP3.
125 */
126 base_format = Format::VOP3;
127 } else {
128 base_format = Format::VINTRP;
129 }
130 }
131 check(base_format == instr_info.format[(int)instr->opcode], "Wrong base format for instruction", instr.get());
132
133 /* check VOP3 modifiers */
134 if (((uint32_t)instr->format & (uint32_t)Format::VOP3) && instr->format != Format::VOP3) {
135 check(base_format == Format::VOP2 ||
136 base_format == Format::VOP1 ||
137 base_format == Format::VOPC ||
138 base_format == Format::VINTRP,
139 "Format cannot have VOP3A/VOP3B applied", instr.get());
140 }
141
142 /* check SDWA */
143 if (instr->isSDWA()) {
144 check(base_format == Format::VOP2 ||
145 base_format == Format::VOP1 ||
146 base_format == Format::VOPC,
147 "Format cannot have SDWA applied", instr.get());
148
149 check(program->chip_class >= GFX8, "SDWA is GFX8+ only", instr.get());
150
151 SDWA_instruction *sdwa = static_cast<SDWA_instruction*>(instr.get());
152 check(sdwa->omod == 0 || program->chip_class >= GFX9, "SDWA omod only supported on GFX9+", instr.get());
153 if (base_format == Format::VOPC) {
154 check(sdwa->clamp == false || program->chip_class == GFX8, "SDWA VOPC clamp only supported on GFX8", instr.get());
155 check((instr->definitions[0].isFixed() && instr->definitions[0].physReg() == vcc) ||
156 program->chip_class >= GFX9,
157 "SDWA+VOPC definition must be fixed to vcc on GFX8", instr.get());
158 }
159
160 if (instr->operands.size() >= 3) {
161 check(instr->operands[2].isFixed() && instr->operands[2].physReg() == vcc,
162 "3rd operand must be fixed to vcc with SDWA", instr.get());
163 }
164 if (instr->definitions.size() >= 2) {
165 check(instr->definitions[1].isFixed() && instr->definitions[1].physReg() == vcc,
166 "2nd definition must be fixed to vcc with SDWA", instr.get());
167 }
168
169 check(instr->opcode != aco_opcode::v_madmk_f32 &&
170 instr->opcode != aco_opcode::v_madak_f32 &&
171 instr->opcode != aco_opcode::v_madmk_f16 &&
172 instr->opcode != aco_opcode::v_madak_f16 &&
173 instr->opcode != aco_opcode::v_readfirstlane_b32 &&
174 instr->opcode != aco_opcode::v_clrexcp &&
175 instr->opcode != aco_opcode::v_swap_b32,
176 "SDWA can't be used with this opcode", instr.get());
177 if (program->chip_class != GFX8) {
178 check(instr->opcode != aco_opcode::v_mac_f32 &&
179 instr->opcode != aco_opcode::v_mac_f16 &&
180 instr->opcode != aco_opcode::v_fmac_f32 &&
181 instr->opcode != aco_opcode::v_fmac_f16,
182 "SDWA can't be used with this opcode", instr.get());
183 }
184
185 for (unsigned i = 0; i < MIN2(instr->operands.size(), 2); i++) {
186 if (instr->operands[i].hasRegClass() && instr->operands[i].regClass().is_subdword())
187 check((sdwa->sel[i] & sdwa_asuint) == (sdwa_isra | instr->operands[i].bytes()), "Unexpected SDWA sel for sub-dword operand", instr.get());
188 }
189 if (instr->definitions[0].regClass().is_subdword())
190 check((sdwa->dst_sel & sdwa_asuint) == (sdwa_isra | instr->definitions[0].bytes()), "Unexpected SDWA sel for sub-dword definition", instr.get());
191 }
192
193 /* check opsel */
194 if (instr->isVOP3()) {
195 VOP3A_instruction *vop3 = static_cast<VOP3A_instruction*>(instr.get());
196 check(vop3->opsel == 0 || program->chip_class >= GFX9, "Opsel is only supported on GFX9+", instr.get());
197
198 for (unsigned i = 0; i < 3; i++) {
199 if (i >= instr->operands.size() ||
200 (instr->operands[i].hasRegClass() && instr->operands[i].regClass().is_subdword() && !instr->operands[i].isFixed()))
201 check((vop3->opsel & (1 << i)) == 0, "Unexpected opsel for operand", instr.get());
202 }
203 if (instr->definitions[0].regClass().is_subdword() && !instr->definitions[0].isFixed())
204 check((vop3->opsel & (1 << 3)) == 0, "Unexpected opsel for sub-dword definition", instr.get());
205 }
206
207 /* check for undefs */
208 for (unsigned i = 0; i < instr->operands.size(); i++) {
209 if (instr->operands[i].isUndefined()) {
210 bool flat = instr->format == Format::FLAT || instr->format == Format::SCRATCH || instr->format == Format::GLOBAL;
211 bool can_be_undef = is_phi(instr) || instr->format == Format::EXP ||
212 instr->format == Format::PSEUDO_REDUCTION ||
213 instr->opcode == aco_opcode::p_create_vector ||
214 (flat && i == 1) || (instr->format == Format::MIMG && i == 1) ||
215 ((instr->format == Format::MUBUF || instr->format == Format::MTBUF) && i == 1);
216 check(can_be_undef, "Undefs can only be used in certain operands", instr.get());
217 } else {
218 check(instr->operands[i].isFixed() || instr->operands[i].isTemp() || instr->operands[i].isConstant(), "Uninitialized Operand", instr.get());
219 }
220 }
221
222 /* check subdword definitions */
223 for (unsigned i = 0; i < instr->definitions.size(); i++) {
224 if (instr->definitions[i].regClass().is_subdword())
225 check(instr->format == Format::PSEUDO || instr->definitions[i].bytes() <= 4, "Only Pseudo instructions can write subdword registers larger than 4 bytes", instr.get());
226 }
227
228 if (instr->isSALU() || instr->isVALU()) {
229 /* check literals */
230 Operand literal(s1);
231 for (unsigned i = 0; i < instr->operands.size(); i++)
232 {
233 Operand op = instr->operands[i];
234 if (!op.isLiteral())
235 continue;
236
237 check(instr->format == Format::SOP1 ||
238 instr->format == Format::SOP2 ||
239 instr->format == Format::SOPC ||
240 instr->format == Format::VOP1 ||
241 instr->format == Format::VOP2 ||
242 instr->format == Format::VOPC ||
243 (instr->isVOP3() && program->chip_class >= GFX10),
244 "Literal applied on wrong instruction format", instr.get());
245
246 check(literal.isUndefined() || (literal.size() == op.size() && literal.constantValue() == op.constantValue()), "Only 1 Literal allowed", instr.get());
247 literal = op;
248 check(!instr->isVALU() || instr->isVOP3() || i == 0 || i == 2, "Wrong source position for Literal argument", instr.get());
249 }
250
251 /* check num sgprs for VALU */
252 if (instr->isVALU()) {
253 bool is_shift64 = instr->opcode == aco_opcode::v_lshlrev_b64 ||
254 instr->opcode == aco_opcode::v_lshrrev_b64 ||
255 instr->opcode == aco_opcode::v_ashrrev_i64;
256 unsigned const_bus_limit = 1;
257 if (program->chip_class >= GFX10 && !is_shift64)
258 const_bus_limit = 2;
259
260 uint32_t scalar_mask = instr->isVOP3() ? 0x7 : 0x5;
261 if (instr->isSDWA())
262 scalar_mask = program->chip_class >= GFX9 ? 0x7 : 0x4;
263
264 if ((int) instr->format & (int) Format::VOPC ||
265 instr->opcode == aco_opcode::v_readfirstlane_b32 ||
266 instr->opcode == aco_opcode::v_readlane_b32 ||
267 instr->opcode == aco_opcode::v_readlane_b32_e64) {
268 check(instr->definitions[0].getTemp().type() == RegType::sgpr,
269 "Wrong Definition type for VALU instruction", instr.get());
270 } else {
271 check(instr->definitions[0].getTemp().type() == RegType::vgpr,
272 "Wrong Definition type for VALU instruction", instr.get());
273 }
274
275 unsigned num_sgprs = 0;
276 unsigned sgpr[] = {0, 0};
277 for (unsigned i = 0; i < instr->operands.size(); i++)
278 {
279 Operand op = instr->operands[i];
280 if (instr->opcode == aco_opcode::v_readfirstlane_b32 ||
281 instr->opcode == aco_opcode::v_readlane_b32 ||
282 instr->opcode == aco_opcode::v_readlane_b32_e64) {
283 check(i != 1 ||
284 (op.isTemp() && op.regClass().type() == RegType::sgpr) ||
285 op.isConstant(),
286 "Must be a SGPR or a constant", instr.get());
287 check(i == 1 ||
288 (op.isTemp() && op.regClass().type() == RegType::vgpr && op.bytes() <= 4),
289 "Wrong Operand type for VALU instruction", instr.get());
290 continue;
291 }
292
293 if (instr->opcode == aco_opcode::v_writelane_b32 ||
294 instr->opcode == aco_opcode::v_writelane_b32_e64) {
295 check(i != 2 ||
296 (op.isTemp() && op.regClass().type() == RegType::vgpr && op.bytes() <= 4),
297 "Wrong Operand type for VALU instruction", instr.get());
298 check(i == 2 ||
299 (op.isTemp() && op.regClass().type() == RegType::sgpr) ||
300 op.isConstant(),
301 "Must be a SGPR or a constant", instr.get());
302 continue;
303 }
304 if (op.isTemp() && instr->operands[i].regClass().type() == RegType::sgpr) {
305 check(scalar_mask & (1 << i), "Wrong source position for SGPR argument", instr.get());
306
307 if (op.tempId() != sgpr[0] && op.tempId() != sgpr[1]) {
308 if (num_sgprs < 2)
309 sgpr[num_sgprs++] = op.tempId();
310 }
311 }
312
313 if (op.isConstant() && !op.isLiteral())
314 check(scalar_mask & (1 << i), "Wrong source position for constant argument", instr.get());
315 }
316 check(num_sgprs + (literal.isUndefined() ? 0 : 1) <= const_bus_limit, "Too many SGPRs/literals", instr.get());
317 }
318
319 if (instr->format == Format::SOP1 || instr->format == Format::SOP2) {
320 check(instr->definitions[0].getTemp().type() == RegType::sgpr, "Wrong Definition type for SALU instruction", instr.get());
321 for (const Operand& op : instr->operands) {
322 check(op.isConstant() || op.regClass().type() <= RegType::sgpr,
323 "Wrong Operand type for SALU instruction", instr.get());
324 }
325 }
326 }
327
328 switch (instr->format) {
329 case Format::PSEUDO: {
330 if (instr->opcode == aco_opcode::p_parallelcopy) {
331 for (unsigned i = 0; i < instr->operands.size(); i++) {
332 if (!instr->definitions[i].regClass().is_subdword())
333 continue;
334 Operand op = instr->operands[i];
335 check(program->chip_class >= GFX9 || !op.isLiteral(), "Sub-dword copies cannot take literals", instr.get());
336 if (op.isConstant() || (op.hasRegClass() && op.regClass().type() == RegType::sgpr))
337 check(program->chip_class >= GFX9, "Sub-dword pseudo instructions can only take constants or SGPRs on GFX9+", instr.get());
338 }
339 } else {
340 bool is_subdword = false;
341 bool has_const_sgpr = false;
342 bool has_literal = false;
343 for (Definition def : instr->definitions)
344 is_subdword |= def.regClass().is_subdword();
345 for (unsigned i = 0; i < instr->operands.size(); i++) {
346 if (instr->opcode == aco_opcode::p_extract_vector && i == 1)
347 continue;
348 Operand op = instr->operands[i];
349 is_subdword |= op.hasRegClass() && op.regClass().is_subdword();
350 has_const_sgpr |= op.isConstant() || (op.hasRegClass() && op.regClass().type() == RegType::sgpr);
351 has_literal |= op.isLiteral();
352 }
353
354 check(!is_subdword || !has_const_sgpr || program->chip_class >= GFX9,
355 "Sub-dword pseudo instructions can only take constants or SGPRs on GFX9+", instr.get());
356 }
357
358 if (instr->opcode == aco_opcode::p_create_vector) {
359 unsigned size = 0;
360 for (const Operand& op : instr->operands) {
361 size += op.bytes();
362 }
363 check(size == instr->definitions[0].bytes(), "Definition size does not match operand sizes", instr.get());
364 if (instr->definitions[0].getTemp().type() == RegType::sgpr) {
365 for (const Operand& op : instr->operands) {
366 check(op.isConstant() || op.regClass().type() == RegType::sgpr,
367 "Wrong Operand type for scalar vector", instr.get());
368 }
369 }
370 } else if (instr->opcode == aco_opcode::p_extract_vector) {
371 check((instr->operands[0].isTemp()) && instr->operands[1].isConstant(), "Wrong Operand types", instr.get());
372 check((instr->operands[1].constantValue() + 1) * instr->definitions[0].bytes() <= instr->operands[0].bytes(), "Index out of range", instr.get());
373 check(instr->definitions[0].getTemp().type() == RegType::vgpr || instr->operands[0].regClass().type() == RegType::sgpr,
374 "Cannot extract SGPR value from VGPR vector", instr.get());
375 } else if (instr->opcode == aco_opcode::p_split_vector) {
376 check(instr->operands[0].isTemp(), "Operand must be a temporary", instr.get());
377 unsigned size = 0;
378 for (const Definition& def : instr->definitions) {
379 size += def.bytes();
380 }
381 check(size == instr->operands[0].bytes(), "Operand size does not match definition sizes", instr.get());
382 if (instr->operands[0].getTemp().type() == RegType::vgpr) {
383 for (const Definition& def : instr->definitions)
384 check(def.regClass().type() == RegType::vgpr, "Wrong Definition type for VGPR split_vector", instr.get());
385 }
386 } else if (instr->opcode == aco_opcode::p_parallelcopy) {
387 check(instr->definitions.size() == instr->operands.size(), "Number of Operands does not match number of Definitions", instr.get());
388 for (unsigned i = 0; i < instr->operands.size(); i++) {
389 if (instr->operands[i].isTemp())
390 check((instr->definitions[i].getTemp().type() == instr->operands[i].regClass().type()) ||
391 (instr->definitions[i].getTemp().type() == RegType::vgpr && instr->operands[i].regClass().type() == RegType::sgpr),
392 "Operand and Definition types do not match", instr.get());
393 }
394 } else if (instr->opcode == aco_opcode::p_phi) {
395 check(instr->operands.size() == block.logical_preds.size(), "Number of Operands does not match number of predecessors", instr.get());
396 check(instr->definitions[0].getTemp().type() == RegType::vgpr, "Logical Phi Definition must be vgpr", instr.get());
397 } else if (instr->opcode == aco_opcode::p_linear_phi) {
398 for (const Operand& op : instr->operands)
399 check(!op.isTemp() || op.getTemp().is_linear(), "Wrong Operand type", instr.get());
400 check(instr->operands.size() == block.linear_preds.size(), "Number of Operands does not match number of predecessors", instr.get());
401 }
402 break;
403 }
404 case Format::PSEUDO_REDUCTION: {
405 for (const Operand &op : instr->operands)
406 check(op.regClass().type() == RegType::vgpr, "All operands of PSEUDO_REDUCTION instructions must be in VGPRs.", instr.get());
407
408 unsigned cluster_size = static_cast<Pseudo_reduction_instruction *>(instr.get())->cluster_size;
409
410 if (instr->opcode == aco_opcode::p_reduce && cluster_size == program->wave_size)
411 check(instr->definitions[0].regClass().type() == RegType::sgpr, "The result of unclustered reductions must go into an SGPR.", instr.get());
412 else
413 check(instr->definitions[0].regClass().type() == RegType::vgpr, "The result of scans and clustered reductions must go into a VGPR.", instr.get());
414
415 break;
416 }
417 case Format::SMEM: {
418 if (instr->operands.size() >= 1)
419 check((instr->operands[0].isFixed() && !instr->operands[0].isConstant()) ||
420 (instr->operands[0].isTemp() && instr->operands[0].regClass().type() == RegType::sgpr), "SMEM operands must be sgpr", instr.get());
421 if (instr->operands.size() >= 2)
422 check(instr->operands[1].isConstant() || (instr->operands[1].isTemp() && instr->operands[1].regClass().type() == RegType::sgpr),
423 "SMEM offset must be constant or sgpr", instr.get());
424 if (!instr->definitions.empty())
425 check(instr->definitions[0].getTemp().type() == RegType::sgpr, "SMEM result must be sgpr", instr.get());
426 break;
427 }
428 case Format::MTBUF:
429 case Format::MUBUF: {
430 check(instr->operands.size() > 1, "VMEM instructions must have at least one operand", instr.get());
431 check(instr->operands[1].hasRegClass() && instr->operands[1].regClass().type() == RegType::vgpr,
432 "VADDR must be in vgpr for VMEM instructions", instr.get());
433 check(instr->operands[0].isTemp() && instr->operands[0].regClass().type() == RegType::sgpr, "VMEM resource constant must be sgpr", instr.get());
434 check(instr->operands.size() < 4 || (instr->operands[3].isTemp() && instr->operands[3].regClass().type() == RegType::vgpr), "VMEM write data must be vgpr", instr.get());
435 break;
436 }
437 case Format::MIMG: {
438 check(instr->operands.size() == 3, "MIMG instructions must have exactly 3 operands", instr.get());
439 check(instr->operands[0].hasRegClass() && (instr->operands[0].regClass() == s4 || instr->operands[0].regClass() == s8),
440 "MIMG operands[0] (resource constant) must be in 4 or 8 SGPRs", instr.get());
441 if (instr->operands[1].hasRegClass() && instr->operands[1].regClass().type() == RegType::sgpr)
442 check(instr->operands[1].regClass() == s4, "MIMG operands[1] (sampler constant) must be 4 SGPRs", instr.get());
443 else if (instr->operands[1].hasRegClass() && instr->operands[1].regClass().type() == RegType::vgpr)
444 check((instr->definitions.empty() || instr->definitions[0].regClass() == instr->operands[1].regClass() ||
445 instr->opcode == aco_opcode::image_atomic_cmpswap || instr->opcode == aco_opcode::image_atomic_fcmpswap),
446 "MIMG operands[1] (VDATA) must be the same as definitions[0] for atomics", instr.get());
447 check(instr->operands[2].hasRegClass() && instr->operands[2].regClass().type() == RegType::vgpr,
448 "MIMG operands[2] (VADDR) must be VGPR", instr.get());
449 check(instr->definitions.empty() || (instr->definitions[0].isTemp() && instr->definitions[0].regClass().type() == RegType::vgpr),
450 "MIMG definitions[0] (VDATA) must be VGPR", instr.get());
451 break;
452 }
453 case Format::DS: {
454 for (const Operand& op : instr->operands) {
455 check((op.isTemp() && op.regClass().type() == RegType::vgpr) || op.physReg() == m0,
456 "Only VGPRs are valid DS instruction operands", instr.get());
457 }
458 if (!instr->definitions.empty())
459 check(instr->definitions[0].getTemp().type() == RegType::vgpr, "DS instruction must return VGPR", instr.get());
460 break;
461 }
462 case Format::EXP: {
463 for (unsigned i = 0; i < 4; i++)
464 check(instr->operands[i].hasRegClass() && instr->operands[i].regClass().type() == RegType::vgpr,
465 "Only VGPRs are valid Export arguments", instr.get());
466 break;
467 }
468 case Format::FLAT:
469 check(instr->operands[1].isUndefined(), "Flat instructions don't support SADDR", instr.get());
470 /* fallthrough */
471 case Format::GLOBAL:
472 case Format::SCRATCH: {
473 check(instr->operands[0].isTemp() && instr->operands[0].regClass().type() == RegType::vgpr, "FLAT/GLOBAL/SCRATCH address must be vgpr", instr.get());
474 check(instr->operands[1].hasRegClass() && instr->operands[1].regClass().type() == RegType::sgpr,
475 "FLAT/GLOBAL/SCRATCH sgpr address must be undefined or sgpr", instr.get());
476 if (!instr->definitions.empty())
477 check(instr->definitions[0].getTemp().type() == RegType::vgpr, "FLAT/GLOBAL/SCRATCH result must be vgpr", instr.get());
478 else
479 check(instr->operands[2].regClass().type() == RegType::vgpr, "FLAT/GLOBAL/SCRATCH data must be vgpr", instr.get());
480 break;
481 }
482 default:
483 break;
484 }
485 }
486 }
487
488 /* validate CFG */
489 for (unsigned i = 0; i < program->blocks.size(); i++) {
490 Block& block = program->blocks[i];
491 check_block(block.index == i, "block.index must match actual index", &block);
492
493 /* predecessors/successors should be sorted */
494 for (unsigned j = 0; j + 1 < block.linear_preds.size(); j++)
495 check_block(block.linear_preds[j] < block.linear_preds[j + 1], "linear predecessors must be sorted", &block);
496 for (unsigned j = 0; j + 1 < block.logical_preds.size(); j++)
497 check_block(block.logical_preds[j] < block.logical_preds[j + 1], "logical predecessors must be sorted", &block);
498 for (unsigned j = 0; j + 1 < block.linear_succs.size(); j++)
499 check_block(block.linear_succs[j] < block.linear_succs[j + 1], "linear successors must be sorted", &block);
500 for (unsigned j = 0; j + 1 < block.logical_succs.size(); j++)
501 check_block(block.logical_succs[j] < block.logical_succs[j + 1], "logical successors must be sorted", &block);
502
503 /* critical edges are not allowed */
504 if (block.linear_preds.size() > 1) {
505 for (unsigned pred : block.linear_preds)
506 check_block(program->blocks[pred].linear_succs.size() == 1, "linear critical edges are not allowed", &program->blocks[pred]);
507 for (unsigned pred : block.logical_preds)
508 check_block(program->blocks[pred].logical_succs.size() == 1, "logical critical edges are not allowed", &program->blocks[pred]);
509 }
510 }
511
512 return is_valid;
513 }
514
515 /* RA validation */
516 namespace {
517
518 struct Location {
Locationaco::__anon986194730311::Location519 Location() : block(NULL), instr(NULL) {}
520
521 Block *block;
522 Instruction *instr; //NULL if it's the block's live-in
523 };
524
525 struct Assignment {
526 Location defloc;
527 Location firstloc;
528 PhysReg reg;
529 };
530
ra_fail(Program * program,Location loc,Location loc2,const char * fmt,...)531 bool ra_fail(Program *program, Location loc, Location loc2, const char *fmt, ...) {
532 va_list args;
533 va_start(args, fmt);
534 char msg[1024];
535 vsprintf(msg, fmt, args);
536 va_end(args);
537
538 char *out;
539 size_t outsize;
540 struct u_memstream mem;
541 u_memstream_open(&mem, &out, &outsize);
542 FILE *const memf = u_memstream_get(&mem);
543
544 fprintf(memf, "RA error found at instruction in BB%d:\n", loc.block->index);
545 if (loc.instr) {
546 aco_print_instr(loc.instr, memf);
547 fprintf(memf, "\n%s", msg);
548 } else {
549 fprintf(memf, "%s", msg);
550 }
551 if (loc2.block) {
552 fprintf(memf, " in BB%d:\n", loc2.block->index);
553 aco_print_instr(loc2.instr, memf);
554 }
555 fprintf(memf, "\n\n");
556 u_memstream_close(&mem);
557
558 aco_err(program, "%s", out);
559 free(out);
560
561 return true;
562 }
563
validate_subdword_operand(chip_class chip,const aco_ptr<Instruction> & instr,unsigned index)564 bool validate_subdword_operand(chip_class chip, const aco_ptr<Instruction>& instr, unsigned index)
565 {
566 Operand op = instr->operands[index];
567 unsigned byte = op.physReg().byte();
568
569 if (instr->opcode == aco_opcode::p_as_uniform)
570 return byte == 0;
571 if (instr->format == Format::PSEUDO && chip >= GFX8)
572 return true;
573 if (instr->isSDWA() && (static_cast<SDWA_instruction *>(instr.get())->sel[index] & sdwa_asuint) == (sdwa_isra | op.bytes()))
574 return true;
575 if (byte == 2 && can_use_opsel(chip, instr->opcode, index, 1))
576 return true;
577
578 switch (instr->opcode) {
579 case aco_opcode::v_cvt_f32_ubyte1:
580 if (byte == 1)
581 return true;
582 break;
583 case aco_opcode::v_cvt_f32_ubyte2:
584 if (byte == 2)
585 return true;
586 break;
587 case aco_opcode::v_cvt_f32_ubyte3:
588 if (byte == 3)
589 return true;
590 break;
591 case aco_opcode::ds_write_b8_d16_hi:
592 case aco_opcode::ds_write_b16_d16_hi:
593 if (byte == 2 && index == 1)
594 return true;
595 break;
596 case aco_opcode::buffer_store_byte_d16_hi:
597 case aco_opcode::buffer_store_short_d16_hi:
598 if (byte == 2 && index == 3)
599 return true;
600 break;
601 case aco_opcode::flat_store_byte_d16_hi:
602 case aco_opcode::flat_store_short_d16_hi:
603 case aco_opcode::scratch_store_byte_d16_hi:
604 case aco_opcode::scratch_store_short_d16_hi:
605 case aco_opcode::global_store_byte_d16_hi:
606 case aco_opcode::global_store_short_d16_hi:
607 if (byte == 2 && index == 2)
608 return true;
609 default:
610 break;
611 }
612
613 return byte == 0;
614 }
615
validate_subdword_definition(chip_class chip,const aco_ptr<Instruction> & instr)616 bool validate_subdword_definition(chip_class chip, const aco_ptr<Instruction>& instr)
617 {
618 Definition def = instr->definitions[0];
619 unsigned byte = def.physReg().byte();
620
621 if (instr->format == Format::PSEUDO && chip >= GFX8)
622 return true;
623 if (instr->isSDWA() && static_cast<SDWA_instruction *>(instr.get())->dst_sel == (sdwa_isra | def.bytes()))
624 return true;
625 if (byte == 2 && can_use_opsel(chip, instr->opcode, -1, 1))
626 return true;
627
628 switch (instr->opcode) {
629 case aco_opcode::buffer_load_ubyte_d16_hi:
630 case aco_opcode::buffer_load_short_d16_hi:
631 case aco_opcode::flat_load_ubyte_d16_hi:
632 case aco_opcode::flat_load_short_d16_hi:
633 case aco_opcode::scratch_load_ubyte_d16_hi:
634 case aco_opcode::scratch_load_short_d16_hi:
635 case aco_opcode::global_load_ubyte_d16_hi:
636 case aco_opcode::global_load_short_d16_hi:
637 case aco_opcode::ds_read_u8_d16_hi:
638 case aco_opcode::ds_read_u16_d16_hi:
639 return byte == 2;
640 default:
641 break;
642 }
643
644 return byte == 0;
645 }
646
get_subdword_bytes_written(Program * program,const aco_ptr<Instruction> & instr,unsigned index)647 unsigned get_subdword_bytes_written(Program *program, const aco_ptr<Instruction>& instr, unsigned index)
648 {
649 chip_class chip = program->chip_class;
650 Definition def = instr->definitions[index];
651
652 if (instr->format == Format::PSEUDO)
653 return chip >= GFX8 ? def.bytes() : def.size() * 4u;
654 if (instr->isSDWA() && static_cast<SDWA_instruction *>(instr.get())->dst_sel == (sdwa_isra | def.bytes()))
655 return def.bytes();
656
657 switch (instr->opcode) {
658 case aco_opcode::buffer_load_ubyte_d16:
659 case aco_opcode::buffer_load_short_d16:
660 case aco_opcode::flat_load_ubyte_d16:
661 case aco_opcode::flat_load_short_d16:
662 case aco_opcode::scratch_load_ubyte_d16:
663 case aco_opcode::scratch_load_short_d16:
664 case aco_opcode::global_load_ubyte_d16:
665 case aco_opcode::global_load_short_d16:
666 case aco_opcode::ds_read_u8_d16:
667 case aco_opcode::ds_read_u16_d16:
668 case aco_opcode::buffer_load_ubyte_d16_hi:
669 case aco_opcode::buffer_load_short_d16_hi:
670 case aco_opcode::flat_load_ubyte_d16_hi:
671 case aco_opcode::flat_load_short_d16_hi:
672 case aco_opcode::scratch_load_ubyte_d16_hi:
673 case aco_opcode::scratch_load_short_d16_hi:
674 case aco_opcode::global_load_ubyte_d16_hi:
675 case aco_opcode::global_load_short_d16_hi:
676 case aco_opcode::ds_read_u8_d16_hi:
677 case aco_opcode::ds_read_u16_d16_hi:
678 return program->sram_ecc_enabled ? 4 : 2;
679 case aco_opcode::v_mad_f16:
680 case aco_opcode::v_mad_u16:
681 case aco_opcode::v_mad_i16:
682 case aco_opcode::v_fma_f16:
683 case aco_opcode::v_div_fixup_f16:
684 case aco_opcode::v_interp_p2_f16:
685 if (chip >= GFX9)
686 return 2;
687 default:
688 break;
689 }
690
691 return MAX2(chip >= GFX10 ? def.bytes() : 4, instr_info.definition_size[(int)instr->opcode] / 8u);
692 }
693
694 } /* end namespace */
695
validate_ra(Program * program)696 bool validate_ra(Program *program) {
697 if (!(debug_flags & DEBUG_VALIDATE_RA))
698 return false;
699
700 bool err = false;
701 aco::live live_vars = aco::live_var_analysis(program);
702 std::vector<std::vector<Temp>> phi_sgpr_ops(program->blocks.size());
703
704 std::map<unsigned, Assignment> assignments;
705 for (Block& block : program->blocks) {
706 Location loc;
707 loc.block = █
708 for (aco_ptr<Instruction>& instr : block.instructions) {
709 if (instr->opcode == aco_opcode::p_phi) {
710 for (unsigned i = 0; i < instr->operands.size(); i++) {
711 if (instr->operands[i].isTemp() &&
712 instr->operands[i].getTemp().type() == RegType::sgpr &&
713 instr->operands[i].isFirstKill())
714 phi_sgpr_ops[block.logical_preds[i]].emplace_back(instr->operands[i].getTemp());
715 }
716 }
717
718 loc.instr = instr.get();
719 for (unsigned i = 0; i < instr->operands.size(); i++) {
720 Operand& op = instr->operands[i];
721 if (!op.isTemp())
722 continue;
723 if (!op.isFixed())
724 err |= ra_fail(program, loc, Location(), "Operand %d is not assigned a register", i);
725 if (assignments.count(op.tempId()) && assignments[op.tempId()].reg != op.physReg())
726 err |= ra_fail(program, loc, assignments.at(op.tempId()).firstloc, "Operand %d has an inconsistent register assignment with instruction", i);
727 if ((op.getTemp().type() == RegType::vgpr && op.physReg().reg_b + op.bytes() > (256 + program->config->num_vgprs) * 4) ||
728 (op.getTemp().type() == RegType::sgpr && op.physReg() + op.size() > program->config->num_sgprs && op.physReg() < program->sgpr_limit))
729 err |= ra_fail(program, loc, assignments.at(op.tempId()).firstloc, "Operand %d has an out-of-bounds register assignment", i);
730 if (op.physReg() == vcc && !program->needs_vcc)
731 err |= ra_fail(program, loc, Location(), "Operand %d fixed to vcc but needs_vcc=false", i);
732 if (op.regClass().is_subdword() && !validate_subdword_operand(program->chip_class, instr, i))
733 err |= ra_fail(program, loc, Location(), "Operand %d not aligned correctly", i);
734 if (!assignments[op.tempId()].firstloc.block)
735 assignments[op.tempId()].firstloc = loc;
736 if (!assignments[op.tempId()].defloc.block)
737 assignments[op.tempId()].reg = op.physReg();
738 }
739
740 for (unsigned i = 0; i < instr->definitions.size(); i++) {
741 Definition& def = instr->definitions[i];
742 if (!def.isTemp())
743 continue;
744 if (!def.isFixed())
745 err |= ra_fail(program, loc, Location(), "Definition %d is not assigned a register", i);
746 if (assignments[def.tempId()].defloc.block)
747 err |= ra_fail(program, loc, assignments.at(def.tempId()).defloc, "Temporary %%%d also defined by instruction", def.tempId());
748 if ((def.getTemp().type() == RegType::vgpr && def.physReg().reg_b + def.bytes() > (256 + program->config->num_vgprs) * 4) ||
749 (def.getTemp().type() == RegType::sgpr && def.physReg() + def.size() > program->config->num_sgprs && def.physReg() < program->sgpr_limit))
750 err |= ra_fail(program, loc, assignments.at(def.tempId()).firstloc, "Definition %d has an out-of-bounds register assignment", i);
751 if (def.physReg() == vcc && !program->needs_vcc)
752 err |= ra_fail(program, loc, Location(), "Definition %d fixed to vcc but needs_vcc=false", i);
753 if (def.regClass().is_subdword() && !validate_subdword_definition(program->chip_class, instr))
754 err |= ra_fail(program, loc, Location(), "Definition %d not aligned correctly", i);
755 if (!assignments[def.tempId()].firstloc.block)
756 assignments[def.tempId()].firstloc = loc;
757 assignments[def.tempId()].defloc = loc;
758 assignments[def.tempId()].reg = def.physReg();
759 }
760 }
761 }
762
763 for (Block& block : program->blocks) {
764 Location loc;
765 loc.block = █
766
767 std::array<unsigned, 2048> regs; /* register file in bytes */
768 regs.fill(0);
769
770 std::set<Temp> live;
771 for (unsigned id : live_vars.live_out[block.index])
772 live.insert(Temp(id, program->temp_rc[id]));
773 /* remove killed p_phi sgpr operands */
774 for (Temp tmp : phi_sgpr_ops[block.index])
775 live.erase(tmp);
776
777 /* check live out */
778 for (Temp tmp : live) {
779 PhysReg reg = assignments.at(tmp.id()).reg;
780 for (unsigned i = 0; i < tmp.bytes(); i++) {
781 if (regs[reg.reg_b + i]) {
782 err |= ra_fail(program, loc, Location(), "Assignment of element %d of %%%d already taken by %%%d in live-out", i, tmp.id(), regs[reg.reg_b + i]);
783 }
784 regs[reg.reg_b + i] = tmp.id();
785 }
786 }
787 regs.fill(0);
788
789 for (auto it = block.instructions.rbegin(); it != block.instructions.rend(); ++it) {
790 aco_ptr<Instruction>& instr = *it;
791
792 /* check killed p_phi sgpr operands */
793 if (instr->opcode == aco_opcode::p_logical_end) {
794 for (Temp tmp : phi_sgpr_ops[block.index]) {
795 PhysReg reg = assignments.at(tmp.id()).reg;
796 for (unsigned i = 0; i < tmp.bytes(); i++) {
797 if (regs[reg.reg_b + i])
798 err |= ra_fail(program, loc, Location(), "Assignment of element %d of %%%d already taken by %%%d in live-out", i, tmp.id(), regs[reg.reg_b + i]);
799 }
800 live.emplace(tmp);
801 }
802 }
803
804 for (const Definition& def : instr->definitions) {
805 if (!def.isTemp())
806 continue;
807 live.erase(def.getTemp());
808 }
809
810 /* don't count phi operands as live-in, since they are actually
811 * killed when they are copied at the predecessor */
812 if (instr->opcode != aco_opcode::p_phi && instr->opcode != aco_opcode::p_linear_phi) {
813 for (const Operand& op : instr->operands) {
814 if (!op.isTemp())
815 continue;
816 live.insert(op.getTemp());
817 }
818 }
819 }
820
821 for (Temp tmp : live) {
822 PhysReg reg = assignments.at(tmp.id()).reg;
823 for (unsigned i = 0; i < tmp.bytes(); i++)
824 regs[reg.reg_b + i] = tmp.id();
825 }
826
827 for (aco_ptr<Instruction>& instr : block.instructions) {
828 loc.instr = instr.get();
829
830 /* remove killed p_phi operands from regs */
831 if (instr->opcode == aco_opcode::p_logical_end) {
832 for (Temp tmp : phi_sgpr_ops[block.index]) {
833 PhysReg reg = assignments.at(tmp.id()).reg;
834 for (unsigned i = 0; i < tmp.bytes(); i++)
835 regs[reg.reg_b + i] = 0;
836 }
837 }
838
839 if (instr->opcode != aco_opcode::p_phi && instr->opcode != aco_opcode::p_linear_phi) {
840 for (const Operand& op : instr->operands) {
841 if (!op.isTemp())
842 continue;
843 if (op.isFirstKillBeforeDef()) {
844 for (unsigned j = 0; j < op.getTemp().bytes(); j++)
845 regs[op.physReg().reg_b + j] = 0;
846 }
847 }
848 }
849
850 for (unsigned i = 0; i < instr->definitions.size(); i++) {
851 Definition& def = instr->definitions[i];
852 if (!def.isTemp())
853 continue;
854 Temp tmp = def.getTemp();
855 PhysReg reg = assignments.at(tmp.id()).reg;
856 for (unsigned j = 0; j < tmp.bytes(); j++) {
857 if (regs[reg.reg_b + j])
858 err |= ra_fail(program, loc, assignments.at(regs[reg.reg_b + j]).defloc, "Assignment of element %d of %%%d already taken by %%%d from instruction", i, tmp.id(), regs[reg.reg_b + j]);
859 regs[reg.reg_b + j] = tmp.id();
860 }
861 if (def.regClass().is_subdword() && def.bytes() < 4) {
862 unsigned written = get_subdword_bytes_written(program, instr, i);
863 /* If written=4, the instruction still might write the upper half. In that case, it's the lower half that isn't preserved */
864 for (unsigned j = reg.byte() & ~(written - 1); j < written; j++) {
865 unsigned written_reg = reg.reg() * 4u + j;
866 if (regs[written_reg] && regs[written_reg] != def.tempId())
867 err |= ra_fail(program, loc, assignments.at(regs[written_reg]).defloc, "Assignment of element %d of %%%d overwrites the full register taken by %%%d from instruction", i, tmp.id(), regs[written_reg]);
868 }
869 }
870 }
871
872 for (const Definition& def : instr->definitions) {
873 if (!def.isTemp())
874 continue;
875 if (def.isKill()) {
876 for (unsigned j = 0; j < def.getTemp().bytes(); j++)
877 regs[def.physReg().reg_b + j] = 0;
878 }
879 }
880
881 if (instr->opcode != aco_opcode::p_phi && instr->opcode != aco_opcode::p_linear_phi) {
882 for (const Operand& op : instr->operands) {
883 if (!op.isTemp())
884 continue;
885 if (op.isLateKill() && op.isFirstKill()) {
886 for (unsigned j = 0; j < op.getTemp().bytes(); j++)
887 regs[op.physReg().reg_b + j] = 0;
888 }
889 }
890 }
891 }
892 }
893
894 return err;
895 }
896 }
897