1 /*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Karol Herbst <kherbst@redhat.com>
23 */
24
25 #include "compiler/nir/nir.h"
26
27 #include "util/u_debug.h"
28
29 #include "codegen/nv50_ir.h"
30 #include "codegen/nv50_ir_from_common.h"
31 #include "codegen/nv50_ir_lowering_helper.h"
32 #include "codegen/nv50_ir_util.h"
33 #include "tgsi/tgsi_from_mesa.h"
34
35 #if __cplusplus >= 201103L
36 #include <unordered_map>
37 #else
38 #include <tr1/unordered_map>
39 #endif
40 #include <cstring>
41 #include <list>
42 #include <vector>
43
44 namespace {
45
46 #if __cplusplus >= 201103L
47 using std::hash;
48 using std::unordered_map;
49 #else
50 using std::tr1::hash;
51 using std::tr1::unordered_map;
52 #endif
53
54 using namespace nv50_ir;
55
56 int
type_size(const struct glsl_type * type,bool bindless)57 type_size(const struct glsl_type *type, bool bindless)
58 {
59 return glsl_count_attribute_slots(type, false);
60 }
61
62 static void
function_temp_type_info(const struct glsl_type * type,unsigned * size,unsigned * align)63 function_temp_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
64 {
65 assert(glsl_type_is_vector_or_scalar(type));
66
67 unsigned comp_size = glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
68 unsigned length = glsl_get_vector_elements(type);
69
70 *size = comp_size * length;
71 *align = 0x10;
72 }
73
74 class Converter : public ConverterCommon
75 {
76 public:
77 Converter(Program *, nir_shader *, nv50_ir_prog_info *, nv50_ir_prog_info_out *);
78
79 bool run();
80 private:
81 typedef std::vector<LValue*> LValues;
82 typedef unordered_map<unsigned, LValues> NirDefMap;
83 typedef unordered_map<unsigned, nir_load_const_instr*> ImmediateMap;
84 typedef unordered_map<unsigned, BasicBlock*> NirBlockMap;
85
86 CacheMode convert(enum gl_access_qualifier);
87 TexTarget convert(glsl_sampler_dim, bool isArray, bool isShadow);
88 LValues& convert(nir_alu_dest *);
89 BasicBlock* convert(nir_block *);
90 LValues& convert(nir_dest *);
91 SVSemantic convert(nir_intrinsic_op);
92 Value* convert(nir_load_const_instr*, uint8_t);
93 LValues& convert(nir_register *);
94 LValues& convert(nir_ssa_def *);
95
96 Value* getSrc(nir_alu_src *, uint8_t component = 0);
97 Value* getSrc(nir_register *, uint8_t);
98 Value* getSrc(nir_src *, uint8_t, bool indirect = false);
99 Value* getSrc(nir_ssa_def *, uint8_t);
100
101 // returned value is the constant part of the given source (either the
102 // nir_src or the selected source component of an intrinsic). Even though
103 // this is mostly an optimization to be able to skip indirects in a few
104 // cases, sometimes we require immediate values or set some fileds on
105 // instructions (e.g. tex) in order for codegen to consume those.
106 // If the found value has not a constant part, the Value gets returned
107 // through the Value parameter.
108 uint32_t getIndirect(nir_src *, uint8_t, Value *&);
109 // isScalar indicates that the addressing is scalar, vec4 addressing is
110 // assumed otherwise
111 uint32_t getIndirect(nir_intrinsic_instr *, uint8_t s, uint8_t c, Value *&,
112 bool isScalar = false);
113
114 uint32_t getSlotAddress(nir_intrinsic_instr *, uint8_t idx, uint8_t slot);
115
116 void setInterpolate(nv50_ir_varying *,
117 uint8_t,
118 bool centroid,
119 unsigned semantics);
120
121 Instruction *loadFrom(DataFile, uint8_t, DataType, Value *def, uint32_t base,
122 uint8_t c, Value *indirect0 = NULL,
123 Value *indirect1 = NULL, bool patch = false);
124 void storeTo(nir_intrinsic_instr *, DataFile, operation, DataType,
125 Value *src, uint8_t idx, uint8_t c, Value *indirect0 = NULL,
126 Value *indirect1 = NULL);
127
128 bool isFloatType(nir_alu_type);
129 bool isSignedType(nir_alu_type);
130 bool isResultFloat(nir_op);
131 bool isResultSigned(nir_op);
132
133 DataType getDType(nir_alu_instr *);
134 DataType getDType(nir_intrinsic_instr *);
135 DataType getDType(nir_op, uint8_t);
136
137 DataFile getFile(nir_intrinsic_op);
138
139 std::vector<DataType> getSTypes(nir_alu_instr *);
140 DataType getSType(nir_src &, bool isFloat, bool isSigned);
141
142 operation getOperation(nir_intrinsic_op);
143 operation getOperation(nir_op);
144 operation getOperation(nir_texop);
145 operation preOperationNeeded(nir_op);
146
147 int getSubOp(nir_intrinsic_op);
148 int getSubOp(nir_op);
149
150 CondCode getCondCode(nir_op);
151
152 bool assignSlots();
153 bool parseNIR();
154
155 bool visit(nir_alu_instr *);
156 bool visit(nir_block *);
157 bool visit(nir_cf_node *);
158 bool visit(nir_function *);
159 bool visit(nir_if *);
160 bool visit(nir_instr *);
161 bool visit(nir_intrinsic_instr *);
162 bool visit(nir_jump_instr *);
163 bool visit(nir_load_const_instr*);
164 bool visit(nir_loop *);
165 bool visit(nir_ssa_undef_instr *);
166 bool visit(nir_tex_instr *);
167
168 // tex stuff
169 unsigned int getNIRArgCount(TexInstruction::Target&);
170
171 nir_shader *nir;
172
173 NirDefMap ssaDefs;
174 NirDefMap regDefs;
175 ImmediateMap immediates;
176 NirBlockMap blocks;
177 unsigned int curLoopDepth;
178 unsigned int curIfDepth;
179
180 BasicBlock *exit;
181 Value *zero;
182 Instruction *immInsertPos;
183
184 int clipVertexOutput;
185
186 union {
187 struct {
188 Value *position;
189 } fp;
190 };
191 };
192
Converter(Program * prog,nir_shader * nir,nv50_ir_prog_info * info,nv50_ir_prog_info_out * info_out)193 Converter::Converter(Program *prog, nir_shader *nir, nv50_ir_prog_info *info,
194 nv50_ir_prog_info_out *info_out)
195 : ConverterCommon(prog, info, info_out),
196 nir(nir),
197 curLoopDepth(0),
198 curIfDepth(0),
199 exit(NULL),
200 immInsertPos(NULL),
201 clipVertexOutput(-1)
202 {
203 zero = mkImm((uint32_t)0);
204 }
205
206 BasicBlock *
convert(nir_block * block)207 Converter::convert(nir_block *block)
208 {
209 NirBlockMap::iterator it = blocks.find(block->index);
210 if (it != blocks.end())
211 return it->second;
212
213 BasicBlock *bb = new BasicBlock(func);
214 blocks[block->index] = bb;
215 return bb;
216 }
217
218 bool
isFloatType(nir_alu_type type)219 Converter::isFloatType(nir_alu_type type)
220 {
221 return nir_alu_type_get_base_type(type) == nir_type_float;
222 }
223
224 bool
isSignedType(nir_alu_type type)225 Converter::isSignedType(nir_alu_type type)
226 {
227 return nir_alu_type_get_base_type(type) == nir_type_int;
228 }
229
230 bool
isResultFloat(nir_op op)231 Converter::isResultFloat(nir_op op)
232 {
233 const nir_op_info &info = nir_op_infos[op];
234 if (info.output_type != nir_type_invalid)
235 return isFloatType(info.output_type);
236
237 ERROR("isResultFloat not implemented for %s\n", nir_op_infos[op].name);
238 assert(false);
239 return true;
240 }
241
242 bool
isResultSigned(nir_op op)243 Converter::isResultSigned(nir_op op)
244 {
245 switch (op) {
246 // there is no umul and we get wrong results if we treat all muls as signed
247 case nir_op_imul:
248 case nir_op_inot:
249 return false;
250 default:
251 const nir_op_info &info = nir_op_infos[op];
252 if (info.output_type != nir_type_invalid)
253 return isSignedType(info.output_type);
254 ERROR("isResultSigned not implemented for %s\n", nir_op_infos[op].name);
255 assert(false);
256 return true;
257 }
258 }
259
260 DataType
getDType(nir_alu_instr * insn)261 Converter::getDType(nir_alu_instr *insn)
262 {
263 if (insn->dest.dest.is_ssa)
264 return getDType(insn->op, insn->dest.dest.ssa.bit_size);
265 else
266 return getDType(insn->op, insn->dest.dest.reg.reg->bit_size);
267 }
268
269 DataType
getDType(nir_intrinsic_instr * insn)270 Converter::getDType(nir_intrinsic_instr *insn)
271 {
272 bool isFloat, isSigned;
273 switch (insn->intrinsic) {
274 case nir_intrinsic_bindless_image_atomic_fadd:
275 case nir_intrinsic_global_atomic_fadd:
276 case nir_intrinsic_image_atomic_fadd:
277 case nir_intrinsic_shared_atomic_fadd:
278 case nir_intrinsic_ssbo_atomic_fadd:
279 isFloat = true;
280 isSigned = false;
281 break;
282 case nir_intrinsic_shared_atomic_imax:
283 case nir_intrinsic_shared_atomic_imin:
284 case nir_intrinsic_ssbo_atomic_imax:
285 case nir_intrinsic_ssbo_atomic_imin:
286 isFloat = false;
287 isSigned = true;
288 break;
289 default:
290 isFloat = false;
291 isSigned = false;
292 break;
293 }
294
295 if (insn->dest.is_ssa)
296 return typeOfSize(insn->dest.ssa.bit_size / 8, isFloat, isSigned);
297 else
298 return typeOfSize(insn->dest.reg.reg->bit_size / 8, isFloat, isSigned);
299 }
300
301 DataType
getDType(nir_op op,uint8_t bitSize)302 Converter::getDType(nir_op op, uint8_t bitSize)
303 {
304 DataType ty = typeOfSize(bitSize / 8, isResultFloat(op), isResultSigned(op));
305 if (ty == TYPE_NONE) {
306 ERROR("couldn't get Type for op %s with bitSize %u\n", nir_op_infos[op].name, bitSize);
307 assert(false);
308 }
309 return ty;
310 }
311
312 std::vector<DataType>
getSTypes(nir_alu_instr * insn)313 Converter::getSTypes(nir_alu_instr *insn)
314 {
315 const nir_op_info &info = nir_op_infos[insn->op];
316 std::vector<DataType> res(info.num_inputs);
317
318 for (uint8_t i = 0; i < info.num_inputs; ++i) {
319 if (info.input_types[i] != nir_type_invalid) {
320 res[i] = getSType(insn->src[i].src, isFloatType(info.input_types[i]), isSignedType(info.input_types[i]));
321 } else {
322 ERROR("getSType not implemented for %s idx %u\n", info.name, i);
323 assert(false);
324 res[i] = TYPE_NONE;
325 break;
326 }
327 }
328
329 return res;
330 }
331
332 DataType
getSType(nir_src & src,bool isFloat,bool isSigned)333 Converter::getSType(nir_src &src, bool isFloat, bool isSigned)
334 {
335 uint8_t bitSize;
336 if (src.is_ssa)
337 bitSize = src.ssa->bit_size;
338 else
339 bitSize = src.reg.reg->bit_size;
340
341 DataType ty = typeOfSize(bitSize / 8, isFloat, isSigned);
342 if (ty == TYPE_NONE) {
343 const char *str;
344 if (isFloat)
345 str = "float";
346 else if (isSigned)
347 str = "int";
348 else
349 str = "uint";
350 ERROR("couldn't get Type for %s with bitSize %u\n", str, bitSize);
351 assert(false);
352 }
353 return ty;
354 }
355
356 DataFile
getFile(nir_intrinsic_op op)357 Converter::getFile(nir_intrinsic_op op)
358 {
359 switch (op) {
360 case nir_intrinsic_load_global:
361 case nir_intrinsic_store_global:
362 case nir_intrinsic_load_global_constant:
363 return FILE_MEMORY_GLOBAL;
364 case nir_intrinsic_load_scratch:
365 case nir_intrinsic_store_scratch:
366 return FILE_MEMORY_LOCAL;
367 case nir_intrinsic_load_shared:
368 case nir_intrinsic_store_shared:
369 return FILE_MEMORY_SHARED;
370 case nir_intrinsic_load_kernel_input:
371 return FILE_SHADER_INPUT;
372 default:
373 ERROR("couldn't get DateFile for op %s\n", nir_intrinsic_infos[op].name);
374 assert(false);
375 }
376 return FILE_NULL;
377 }
378
379 operation
getOperation(nir_op op)380 Converter::getOperation(nir_op op)
381 {
382 switch (op) {
383 // basic ops with float and int variants
384 case nir_op_fabs:
385 case nir_op_iabs:
386 return OP_ABS;
387 case nir_op_fadd:
388 case nir_op_iadd:
389 return OP_ADD;
390 case nir_op_iand:
391 return OP_AND;
392 case nir_op_ifind_msb:
393 case nir_op_ufind_msb:
394 return OP_BFIND;
395 case nir_op_fceil:
396 return OP_CEIL;
397 case nir_op_fcos:
398 return OP_COS;
399 case nir_op_f2f32:
400 case nir_op_f2f64:
401 case nir_op_f2i32:
402 case nir_op_f2i64:
403 case nir_op_f2u32:
404 case nir_op_f2u64:
405 case nir_op_i2f32:
406 case nir_op_i2f64:
407 case nir_op_i2i32:
408 case nir_op_i2i64:
409 case nir_op_u2f32:
410 case nir_op_u2f64:
411 case nir_op_u2u32:
412 case nir_op_u2u64:
413 return OP_CVT;
414 case nir_op_fddx:
415 case nir_op_fddx_coarse:
416 case nir_op_fddx_fine:
417 return OP_DFDX;
418 case nir_op_fddy:
419 case nir_op_fddy_coarse:
420 case nir_op_fddy_fine:
421 return OP_DFDY;
422 case nir_op_fdiv:
423 case nir_op_idiv:
424 case nir_op_udiv:
425 return OP_DIV;
426 case nir_op_fexp2:
427 return OP_EX2;
428 case nir_op_ffloor:
429 return OP_FLOOR;
430 case nir_op_ffma:
431 return OP_FMA;
432 case nir_op_flog2:
433 return OP_LG2;
434 case nir_op_fmax:
435 case nir_op_imax:
436 case nir_op_umax:
437 return OP_MAX;
438 case nir_op_pack_64_2x32_split:
439 return OP_MERGE;
440 case nir_op_fmin:
441 case nir_op_imin:
442 case nir_op_umin:
443 return OP_MIN;
444 case nir_op_fmod:
445 case nir_op_imod:
446 case nir_op_umod:
447 case nir_op_frem:
448 case nir_op_irem:
449 return OP_MOD;
450 case nir_op_fmul:
451 case nir_op_imul:
452 case nir_op_imul_high:
453 case nir_op_umul_high:
454 return OP_MUL;
455 case nir_op_fneg:
456 case nir_op_ineg:
457 return OP_NEG;
458 case nir_op_inot:
459 return OP_NOT;
460 case nir_op_ior:
461 return OP_OR;
462 case nir_op_fpow:
463 return OP_POW;
464 case nir_op_frcp:
465 return OP_RCP;
466 case nir_op_frsq:
467 return OP_RSQ;
468 case nir_op_fsat:
469 return OP_SAT;
470 case nir_op_feq32:
471 case nir_op_ieq32:
472 case nir_op_fge32:
473 case nir_op_ige32:
474 case nir_op_uge32:
475 case nir_op_flt32:
476 case nir_op_ilt32:
477 case nir_op_ult32:
478 case nir_op_fneu32:
479 case nir_op_ine32:
480 return OP_SET;
481 case nir_op_ishl:
482 return OP_SHL;
483 case nir_op_ishr:
484 case nir_op_ushr:
485 return OP_SHR;
486 case nir_op_fsin:
487 return OP_SIN;
488 case nir_op_fsqrt:
489 return OP_SQRT;
490 case nir_op_ftrunc:
491 return OP_TRUNC;
492 case nir_op_ixor:
493 return OP_XOR;
494 default:
495 ERROR("couldn't get operation for op %s\n", nir_op_infos[op].name);
496 assert(false);
497 return OP_NOP;
498 }
499 }
500
501 operation
getOperation(nir_texop op)502 Converter::getOperation(nir_texop op)
503 {
504 switch (op) {
505 case nir_texop_tex:
506 return OP_TEX;
507 case nir_texop_lod:
508 return OP_TXLQ;
509 case nir_texop_txb:
510 return OP_TXB;
511 case nir_texop_txd:
512 return OP_TXD;
513 case nir_texop_txf:
514 case nir_texop_txf_ms:
515 return OP_TXF;
516 case nir_texop_tg4:
517 return OP_TXG;
518 case nir_texop_txl:
519 return OP_TXL;
520 case nir_texop_query_levels:
521 case nir_texop_texture_samples:
522 case nir_texop_txs:
523 return OP_TXQ;
524 default:
525 ERROR("couldn't get operation for nir_texop %u\n", op);
526 assert(false);
527 return OP_NOP;
528 }
529 }
530
531 operation
getOperation(nir_intrinsic_op op)532 Converter::getOperation(nir_intrinsic_op op)
533 {
534 switch (op) {
535 case nir_intrinsic_emit_vertex:
536 return OP_EMIT;
537 case nir_intrinsic_end_primitive:
538 return OP_RESTART;
539 case nir_intrinsic_bindless_image_atomic_add:
540 case nir_intrinsic_image_atomic_add:
541 case nir_intrinsic_bindless_image_atomic_and:
542 case nir_intrinsic_image_atomic_and:
543 case nir_intrinsic_bindless_image_atomic_comp_swap:
544 case nir_intrinsic_image_atomic_comp_swap:
545 case nir_intrinsic_bindless_image_atomic_exchange:
546 case nir_intrinsic_image_atomic_exchange:
547 case nir_intrinsic_bindless_image_atomic_imax:
548 case nir_intrinsic_image_atomic_imax:
549 case nir_intrinsic_bindless_image_atomic_umax:
550 case nir_intrinsic_image_atomic_umax:
551 case nir_intrinsic_bindless_image_atomic_imin:
552 case nir_intrinsic_image_atomic_imin:
553 case nir_intrinsic_bindless_image_atomic_umin:
554 case nir_intrinsic_image_atomic_umin:
555 case nir_intrinsic_bindless_image_atomic_or:
556 case nir_intrinsic_image_atomic_or:
557 case nir_intrinsic_bindless_image_atomic_xor:
558 case nir_intrinsic_image_atomic_xor:
559 case nir_intrinsic_bindless_image_atomic_inc_wrap:
560 case nir_intrinsic_image_atomic_inc_wrap:
561 case nir_intrinsic_bindless_image_atomic_dec_wrap:
562 case nir_intrinsic_image_atomic_dec_wrap:
563 return OP_SUREDP;
564 case nir_intrinsic_bindless_image_load:
565 case nir_intrinsic_image_load:
566 return OP_SULDP;
567 case nir_intrinsic_bindless_image_samples:
568 case nir_intrinsic_image_samples:
569 case nir_intrinsic_bindless_image_size:
570 case nir_intrinsic_image_size:
571 return OP_SUQ;
572 case nir_intrinsic_bindless_image_store:
573 case nir_intrinsic_image_store:
574 return OP_SUSTP;
575 default:
576 ERROR("couldn't get operation for nir_intrinsic_op %u\n", op);
577 assert(false);
578 return OP_NOP;
579 }
580 }
581
582 operation
preOperationNeeded(nir_op op)583 Converter::preOperationNeeded(nir_op op)
584 {
585 switch (op) {
586 case nir_op_fcos:
587 case nir_op_fsin:
588 return OP_PRESIN;
589 default:
590 return OP_NOP;
591 }
592 }
593
594 int
getSubOp(nir_op op)595 Converter::getSubOp(nir_op op)
596 {
597 switch (op) {
598 case nir_op_imul_high:
599 case nir_op_umul_high:
600 return NV50_IR_SUBOP_MUL_HIGH;
601 case nir_op_ishl:
602 case nir_op_ishr:
603 case nir_op_ushr:
604 return NV50_IR_SUBOP_SHIFT_WRAP;
605 default:
606 return 0;
607 }
608 }
609
610 int
getSubOp(nir_intrinsic_op op)611 Converter::getSubOp(nir_intrinsic_op op)
612 {
613 switch (op) {
614 case nir_intrinsic_bindless_image_atomic_add:
615 case nir_intrinsic_global_atomic_add:
616 case nir_intrinsic_image_atomic_add:
617 case nir_intrinsic_shared_atomic_add:
618 case nir_intrinsic_ssbo_atomic_add:
619 return NV50_IR_SUBOP_ATOM_ADD;
620 case nir_intrinsic_bindless_image_atomic_fadd:
621 case nir_intrinsic_global_atomic_fadd:
622 case nir_intrinsic_image_atomic_fadd:
623 case nir_intrinsic_shared_atomic_fadd:
624 case nir_intrinsic_ssbo_atomic_fadd:
625 return NV50_IR_SUBOP_ATOM_ADD;
626 case nir_intrinsic_bindless_image_atomic_and:
627 case nir_intrinsic_global_atomic_and:
628 case nir_intrinsic_image_atomic_and:
629 case nir_intrinsic_shared_atomic_and:
630 case nir_intrinsic_ssbo_atomic_and:
631 return NV50_IR_SUBOP_ATOM_AND;
632 case nir_intrinsic_bindless_image_atomic_comp_swap:
633 case nir_intrinsic_global_atomic_comp_swap:
634 case nir_intrinsic_image_atomic_comp_swap:
635 case nir_intrinsic_shared_atomic_comp_swap:
636 case nir_intrinsic_ssbo_atomic_comp_swap:
637 return NV50_IR_SUBOP_ATOM_CAS;
638 case nir_intrinsic_bindless_image_atomic_exchange:
639 case nir_intrinsic_global_atomic_exchange:
640 case nir_intrinsic_image_atomic_exchange:
641 case nir_intrinsic_shared_atomic_exchange:
642 case nir_intrinsic_ssbo_atomic_exchange:
643 return NV50_IR_SUBOP_ATOM_EXCH;
644 case nir_intrinsic_bindless_image_atomic_or:
645 case nir_intrinsic_global_atomic_or:
646 case nir_intrinsic_image_atomic_or:
647 case nir_intrinsic_shared_atomic_or:
648 case nir_intrinsic_ssbo_atomic_or:
649 return NV50_IR_SUBOP_ATOM_OR;
650 case nir_intrinsic_bindless_image_atomic_imax:
651 case nir_intrinsic_bindless_image_atomic_umax:
652 case nir_intrinsic_global_atomic_imax:
653 case nir_intrinsic_global_atomic_umax:
654 case nir_intrinsic_image_atomic_imax:
655 case nir_intrinsic_image_atomic_umax:
656 case nir_intrinsic_shared_atomic_imax:
657 case nir_intrinsic_shared_atomic_umax:
658 case nir_intrinsic_ssbo_atomic_imax:
659 case nir_intrinsic_ssbo_atomic_umax:
660 return NV50_IR_SUBOP_ATOM_MAX;
661 case nir_intrinsic_bindless_image_atomic_imin:
662 case nir_intrinsic_bindless_image_atomic_umin:
663 case nir_intrinsic_global_atomic_imin:
664 case nir_intrinsic_global_atomic_umin:
665 case nir_intrinsic_image_atomic_imin:
666 case nir_intrinsic_image_atomic_umin:
667 case nir_intrinsic_shared_atomic_imin:
668 case nir_intrinsic_shared_atomic_umin:
669 case nir_intrinsic_ssbo_atomic_imin:
670 case nir_intrinsic_ssbo_atomic_umin:
671 return NV50_IR_SUBOP_ATOM_MIN;
672 case nir_intrinsic_bindless_image_atomic_xor:
673 case nir_intrinsic_global_atomic_xor:
674 case nir_intrinsic_image_atomic_xor:
675 case nir_intrinsic_shared_atomic_xor:
676 case nir_intrinsic_ssbo_atomic_xor:
677 return NV50_IR_SUBOP_ATOM_XOR;
678 case nir_intrinsic_bindless_image_atomic_inc_wrap:
679 case nir_intrinsic_image_atomic_inc_wrap:
680 return NV50_IR_SUBOP_ATOM_INC;
681 case nir_intrinsic_bindless_image_atomic_dec_wrap:
682 case nir_intrinsic_image_atomic_dec_wrap:
683 return NV50_IR_SUBOP_ATOM_DEC;
684
685 case nir_intrinsic_group_memory_barrier:
686 case nir_intrinsic_memory_barrier:
687 case nir_intrinsic_memory_barrier_buffer:
688 case nir_intrinsic_memory_barrier_image:
689 return NV50_IR_SUBOP_MEMBAR(M, GL);
690 case nir_intrinsic_memory_barrier_shared:
691 return NV50_IR_SUBOP_MEMBAR(M, CTA);
692
693 case nir_intrinsic_vote_all:
694 return NV50_IR_SUBOP_VOTE_ALL;
695 case nir_intrinsic_vote_any:
696 return NV50_IR_SUBOP_VOTE_ANY;
697 case nir_intrinsic_vote_ieq:
698 return NV50_IR_SUBOP_VOTE_UNI;
699 default:
700 return 0;
701 }
702 }
703
704 CondCode
getCondCode(nir_op op)705 Converter::getCondCode(nir_op op)
706 {
707 switch (op) {
708 case nir_op_feq32:
709 case nir_op_ieq32:
710 return CC_EQ;
711 case nir_op_fge32:
712 case nir_op_ige32:
713 case nir_op_uge32:
714 return CC_GE;
715 case nir_op_flt32:
716 case nir_op_ilt32:
717 case nir_op_ult32:
718 return CC_LT;
719 case nir_op_fneu32:
720 return CC_NEU;
721 case nir_op_ine32:
722 return CC_NE;
723 default:
724 ERROR("couldn't get CondCode for op %s\n", nir_op_infos[op].name);
725 assert(false);
726 return CC_FL;
727 }
728 }
729
730 Converter::LValues&
convert(nir_alu_dest * dest)731 Converter::convert(nir_alu_dest *dest)
732 {
733 return convert(&dest->dest);
734 }
735
736 Converter::LValues&
convert(nir_dest * dest)737 Converter::convert(nir_dest *dest)
738 {
739 if (dest->is_ssa)
740 return convert(&dest->ssa);
741 if (dest->reg.indirect) {
742 ERROR("no support for indirects.");
743 assert(false);
744 }
745 return convert(dest->reg.reg);
746 }
747
748 Converter::LValues&
convert(nir_register * reg)749 Converter::convert(nir_register *reg)
750 {
751 assert(!reg->num_array_elems);
752
753 NirDefMap::iterator it = regDefs.find(reg->index);
754 if (it != regDefs.end())
755 return it->second;
756
757 LValues newDef(reg->num_components);
758 for (uint8_t i = 0; i < reg->num_components; i++)
759 newDef[i] = getScratch(std::max(4, reg->bit_size / 8));
760 return regDefs[reg->index] = newDef;
761 }
762
763 Converter::LValues&
convert(nir_ssa_def * def)764 Converter::convert(nir_ssa_def *def)
765 {
766 NirDefMap::iterator it = ssaDefs.find(def->index);
767 if (it != ssaDefs.end())
768 return it->second;
769
770 LValues newDef(def->num_components);
771 for (uint8_t i = 0; i < def->num_components; i++)
772 newDef[i] = getSSA(std::max(4, def->bit_size / 8));
773 return ssaDefs[def->index] = newDef;
774 }
775
776 Value*
getSrc(nir_alu_src * src,uint8_t component)777 Converter::getSrc(nir_alu_src *src, uint8_t component)
778 {
779 if (src->abs || src->negate) {
780 ERROR("modifiers currently not supported on nir_alu_src\n");
781 assert(false);
782 }
783 return getSrc(&src->src, src->swizzle[component]);
784 }
785
786 Value*
getSrc(nir_register * reg,uint8_t idx)787 Converter::getSrc(nir_register *reg, uint8_t idx)
788 {
789 NirDefMap::iterator it = regDefs.find(reg->index);
790 if (it == regDefs.end())
791 return convert(reg)[idx];
792 return it->second[idx];
793 }
794
795 Value*
getSrc(nir_src * src,uint8_t idx,bool indirect)796 Converter::getSrc(nir_src *src, uint8_t idx, bool indirect)
797 {
798 if (src->is_ssa)
799 return getSrc(src->ssa, idx);
800
801 if (src->reg.indirect) {
802 if (indirect)
803 return getSrc(src->reg.indirect, idx);
804 ERROR("no support for indirects.");
805 assert(false);
806 return NULL;
807 }
808
809 return getSrc(src->reg.reg, idx);
810 }
811
812 Value*
getSrc(nir_ssa_def * src,uint8_t idx)813 Converter::getSrc(nir_ssa_def *src, uint8_t idx)
814 {
815 ImmediateMap::iterator iit = immediates.find(src->index);
816 if (iit != immediates.end())
817 return convert((*iit).second, idx);
818
819 NirDefMap::iterator it = ssaDefs.find(src->index);
820 if (it == ssaDefs.end()) {
821 ERROR("SSA value %u not found\n", src->index);
822 assert(false);
823 return NULL;
824 }
825 return it->second[idx];
826 }
827
828 uint32_t
getIndirect(nir_src * src,uint8_t idx,Value * & indirect)829 Converter::getIndirect(nir_src *src, uint8_t idx, Value *&indirect)
830 {
831 nir_const_value *offset = nir_src_as_const_value(*src);
832
833 if (offset) {
834 indirect = NULL;
835 return offset[0].u32;
836 }
837
838 indirect = getSrc(src, idx, true);
839 return 0;
840 }
841
842 uint32_t
getIndirect(nir_intrinsic_instr * insn,uint8_t s,uint8_t c,Value * & indirect,bool isScalar)843 Converter::getIndirect(nir_intrinsic_instr *insn, uint8_t s, uint8_t c, Value *&indirect, bool isScalar)
844 {
845 int32_t idx = nir_intrinsic_base(insn) + getIndirect(&insn->src[s], c, indirect);
846 if (indirect && !isScalar)
847 indirect = mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), indirect, loadImm(NULL, 4));
848 return idx;
849 }
850
851 static void
vert_attrib_to_tgsi_semantic(gl_vert_attrib slot,unsigned * name,unsigned * index)852 vert_attrib_to_tgsi_semantic(gl_vert_attrib slot, unsigned *name, unsigned *index)
853 {
854 assert(name && index);
855
856 if (slot >= VERT_ATTRIB_MAX) {
857 ERROR("invalid varying slot %u\n", slot);
858 assert(false);
859 return;
860 }
861
862 if (slot >= VERT_ATTRIB_GENERIC0 &&
863 slot < VERT_ATTRIB_GENERIC0 + VERT_ATTRIB_GENERIC_MAX) {
864 *name = TGSI_SEMANTIC_GENERIC;
865 *index = slot - VERT_ATTRIB_GENERIC0;
866 return;
867 }
868
869 if (slot >= VERT_ATTRIB_TEX0 &&
870 slot < VERT_ATTRIB_TEX0 + VERT_ATTRIB_TEX_MAX) {
871 *name = TGSI_SEMANTIC_TEXCOORD;
872 *index = slot - VERT_ATTRIB_TEX0;
873 return;
874 }
875
876 switch (slot) {
877 case VERT_ATTRIB_COLOR0:
878 *name = TGSI_SEMANTIC_COLOR;
879 *index = 0;
880 break;
881 case VERT_ATTRIB_COLOR1:
882 *name = TGSI_SEMANTIC_COLOR;
883 *index = 1;
884 break;
885 case VERT_ATTRIB_EDGEFLAG:
886 *name = TGSI_SEMANTIC_EDGEFLAG;
887 *index = 0;
888 break;
889 case VERT_ATTRIB_FOG:
890 *name = TGSI_SEMANTIC_FOG;
891 *index = 0;
892 break;
893 case VERT_ATTRIB_NORMAL:
894 *name = TGSI_SEMANTIC_NORMAL;
895 *index = 0;
896 break;
897 case VERT_ATTRIB_POS:
898 *name = TGSI_SEMANTIC_POSITION;
899 *index = 0;
900 break;
901 case VERT_ATTRIB_POINT_SIZE:
902 *name = TGSI_SEMANTIC_PSIZE;
903 *index = 0;
904 break;
905 default:
906 ERROR("unknown vert attrib slot %u\n", slot);
907 assert(false);
908 break;
909 }
910 }
911
912 void
setInterpolate(nv50_ir_varying * var,uint8_t mode,bool centroid,unsigned semantic)913 Converter::setInterpolate(nv50_ir_varying *var,
914 uint8_t mode,
915 bool centroid,
916 unsigned semantic)
917 {
918 switch (mode) {
919 case INTERP_MODE_FLAT:
920 var->flat = 1;
921 break;
922 case INTERP_MODE_NONE:
923 if (semantic == TGSI_SEMANTIC_COLOR)
924 var->sc = 1;
925 else if (semantic == TGSI_SEMANTIC_POSITION)
926 var->linear = 1;
927 break;
928 case INTERP_MODE_NOPERSPECTIVE:
929 var->linear = 1;
930 break;
931 case INTERP_MODE_SMOOTH:
932 break;
933 }
934 var->centroid = centroid;
935 }
936
937 static uint16_t
calcSlots(const glsl_type * type,Program::Type stage,const shader_info & info,bool input,const nir_variable * var)938 calcSlots(const glsl_type *type, Program::Type stage, const shader_info &info,
939 bool input, const nir_variable *var)
940 {
941 if (!type->is_array())
942 return type->count_attribute_slots(false);
943
944 uint16_t slots;
945 switch (stage) {
946 case Program::TYPE_GEOMETRY:
947 slots = type->count_attribute_slots(false);
948 if (input)
949 slots /= info.gs.vertices_in;
950 break;
951 case Program::TYPE_TESSELLATION_CONTROL:
952 case Program::TYPE_TESSELLATION_EVAL:
953 // remove first dimension
954 if (var->data.patch || (!input && stage == Program::TYPE_TESSELLATION_EVAL))
955 slots = type->count_attribute_slots(false);
956 else
957 slots = type->fields.array->count_attribute_slots(false);
958 break;
959 default:
960 slots = type->count_attribute_slots(false);
961 break;
962 }
963
964 return slots;
965 }
966
967 static uint8_t
getMaskForType(const glsl_type * type,uint8_t slot)968 getMaskForType(const glsl_type *type, uint8_t slot) {
969 uint16_t comp = type->without_array()->components();
970 comp = comp ? comp : 4;
971
972 if (glsl_base_type_is_64bit(type->without_array()->base_type)) {
973 comp *= 2;
974 if (comp > 4) {
975 if (slot % 2)
976 comp -= 4;
977 else
978 comp = 4;
979 }
980 }
981
982 return (1 << comp) - 1;
983 }
984
assignSlots()985 bool Converter::assignSlots() {
986 unsigned name;
987 unsigned index;
988
989 info->io.viewportId = -1;
990 info_out->numInputs = 0;
991 info_out->numOutputs = 0;
992 info_out->numSysVals = 0;
993
994 uint8_t i;
995 BITSET_FOREACH_SET(i, nir->info.system_values_read, SYSTEM_VALUE_MAX) {
996 info_out->sv[info_out->numSysVals].sn = tgsi_get_sysval_semantic(i);
997 info_out->sv[info_out->numSysVals].si = 0;
998 info_out->sv[info_out->numSysVals].input = 0; // TODO inferSysValDirection(sn);
999
1000 switch (i) {
1001 case SYSTEM_VALUE_INSTANCE_ID:
1002 info_out->io.instanceId = info_out->numSysVals;
1003 break;
1004 case SYSTEM_VALUE_TESS_LEVEL_INNER:
1005 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
1006 info_out->sv[info_out->numSysVals].patch = 1;
1007 break;
1008 case SYSTEM_VALUE_VERTEX_ID:
1009 info_out->io.vertexId = info_out->numSysVals;
1010 break;
1011 default:
1012 break;
1013 }
1014
1015 info_out->numSysVals += 1;
1016 }
1017
1018 if (prog->getType() == Program::TYPE_COMPUTE)
1019 return true;
1020
1021 nir_foreach_shader_in_variable(var, nir) {
1022 const glsl_type *type = var->type;
1023 int slot = var->data.location;
1024 uint16_t slots = calcSlots(type, prog->getType(), nir->info, true, var);
1025 uint32_t vary = var->data.driver_location;
1026
1027 assert(vary + slots <= PIPE_MAX_SHADER_INPUTS);
1028
1029 switch(prog->getType()) {
1030 case Program::TYPE_FRAGMENT:
1031 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1032 &name, &index);
1033 for (uint16_t i = 0; i < slots; ++i) {
1034 setInterpolate(&info_out->in[vary + i], var->data.interpolation,
1035 var->data.centroid | var->data.sample, name);
1036 }
1037 break;
1038 case Program::TYPE_GEOMETRY:
1039 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1040 &name, &index);
1041 break;
1042 case Program::TYPE_TESSELLATION_CONTROL:
1043 case Program::TYPE_TESSELLATION_EVAL:
1044 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1045 &name, &index);
1046 if (var->data.patch && name == TGSI_SEMANTIC_PATCH)
1047 info_out->numPatchConstants = MAX2(info_out->numPatchConstants, index + slots);
1048 break;
1049 case Program::TYPE_VERTEX:
1050 if (slot >= VERT_ATTRIB_GENERIC0)
1051 slot = VERT_ATTRIB_GENERIC0 + vary;
1052 vert_attrib_to_tgsi_semantic((gl_vert_attrib)slot, &name, &index);
1053 switch (name) {
1054 case TGSI_SEMANTIC_EDGEFLAG:
1055 info_out->io.edgeFlagIn = vary;
1056 break;
1057 default:
1058 break;
1059 }
1060 break;
1061 default:
1062 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1063 return false;
1064 }
1065
1066 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1067 nv50_ir_varying *v = &info_out->in[vary];
1068
1069 v->patch = var->data.patch;
1070 v->sn = name;
1071 v->si = index + i;
1072 v->mask |= getMaskForType(type, i) << var->data.location_frac;
1073 }
1074 info_out->numInputs = std::max<uint8_t>(info_out->numInputs, vary);
1075 }
1076
1077 nir_foreach_shader_out_variable(var, nir) {
1078 const glsl_type *type = var->type;
1079 int slot = var->data.location;
1080 uint16_t slots = calcSlots(type, prog->getType(), nir->info, false, var);
1081 uint32_t vary = var->data.driver_location;
1082
1083 assert(vary < PIPE_MAX_SHADER_OUTPUTS);
1084
1085 switch(prog->getType()) {
1086 case Program::TYPE_FRAGMENT:
1087 tgsi_get_gl_frag_result_semantic((gl_frag_result)slot, &name, &index);
1088 switch (name) {
1089 case TGSI_SEMANTIC_COLOR:
1090 if (!var->data.fb_fetch_output)
1091 info_out->prop.fp.numColourResults++;
1092 if (var->data.location == FRAG_RESULT_COLOR &&
1093 nir->info.outputs_written & BITFIELD64_BIT(var->data.location))
1094 info_out->prop.fp.separateFragData = true;
1095 // sometimes we get FRAG_RESULT_DATAX with data.index 0
1096 // sometimes we get FRAG_RESULT_DATA0 with data.index X
1097 index = index == 0 ? var->data.index : index;
1098 break;
1099 case TGSI_SEMANTIC_POSITION:
1100 info_out->io.fragDepth = vary;
1101 info_out->prop.fp.writesDepth = true;
1102 break;
1103 case TGSI_SEMANTIC_SAMPLEMASK:
1104 info_out->io.sampleMask = vary;
1105 break;
1106 default:
1107 break;
1108 }
1109 break;
1110 case Program::TYPE_GEOMETRY:
1111 case Program::TYPE_TESSELLATION_CONTROL:
1112 case Program::TYPE_TESSELLATION_EVAL:
1113 case Program::TYPE_VERTEX:
1114 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1115 &name, &index);
1116
1117 if (var->data.patch && name != TGSI_SEMANTIC_TESSINNER &&
1118 name != TGSI_SEMANTIC_TESSOUTER)
1119 info_out->numPatchConstants = MAX2(info_out->numPatchConstants, index + slots);
1120
1121 switch (name) {
1122 case TGSI_SEMANTIC_CLIPDIST:
1123 info_out->io.genUserClip = -1;
1124 break;
1125 case TGSI_SEMANTIC_CLIPVERTEX:
1126 clipVertexOutput = vary;
1127 break;
1128 case TGSI_SEMANTIC_EDGEFLAG:
1129 info_out->io.edgeFlagOut = vary;
1130 break;
1131 case TGSI_SEMANTIC_POSITION:
1132 if (clipVertexOutput < 0)
1133 clipVertexOutput = vary;
1134 break;
1135 default:
1136 break;
1137 }
1138 break;
1139 default:
1140 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1141 return false;
1142 }
1143
1144 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1145 nv50_ir_varying *v = &info_out->out[vary];
1146 v->patch = var->data.patch;
1147 v->sn = name;
1148 v->si = index + i;
1149 v->mask |= getMaskForType(type, i) << var->data.location_frac;
1150
1151 if (nir->info.outputs_read & 1ull << slot)
1152 v->oread = 1;
1153 }
1154 info_out->numOutputs = std::max<uint8_t>(info_out->numOutputs, vary);
1155 }
1156
1157 if (info_out->io.genUserClip > 0) {
1158 info_out->io.clipDistances = info_out->io.genUserClip;
1159
1160 const unsigned int nOut = (info_out->io.genUserClip + 3) / 4;
1161
1162 for (unsigned int n = 0; n < nOut; ++n) {
1163 unsigned int i = info_out->numOutputs++;
1164 info_out->out[i].id = i;
1165 info_out->out[i].sn = TGSI_SEMANTIC_CLIPDIST;
1166 info_out->out[i].si = n;
1167 info_out->out[i].mask = ((1 << info_out->io.clipDistances) - 1) >> (n * 4);
1168 }
1169 }
1170
1171 return info->assignSlots(info_out) == 0;
1172 }
1173
1174 uint32_t
getSlotAddress(nir_intrinsic_instr * insn,uint8_t idx,uint8_t slot)1175 Converter::getSlotAddress(nir_intrinsic_instr *insn, uint8_t idx, uint8_t slot)
1176 {
1177 DataType ty;
1178 int offset = nir_intrinsic_component(insn);
1179 bool input;
1180
1181 if (nir_intrinsic_infos[insn->intrinsic].has_dest)
1182 ty = getDType(insn);
1183 else
1184 ty = getSType(insn->src[0], false, false);
1185
1186 switch (insn->intrinsic) {
1187 case nir_intrinsic_load_input:
1188 case nir_intrinsic_load_interpolated_input:
1189 case nir_intrinsic_load_per_vertex_input:
1190 input = true;
1191 break;
1192 case nir_intrinsic_load_output:
1193 case nir_intrinsic_load_per_vertex_output:
1194 case nir_intrinsic_store_output:
1195 case nir_intrinsic_store_per_vertex_output:
1196 input = false;
1197 break;
1198 default:
1199 ERROR("unknown intrinsic in getSlotAddress %s",
1200 nir_intrinsic_infos[insn->intrinsic].name);
1201 input = false;
1202 assert(false);
1203 break;
1204 }
1205
1206 if (typeSizeof(ty) == 8) {
1207 slot *= 2;
1208 slot += offset;
1209 if (slot >= 4) {
1210 idx += 1;
1211 slot -= 4;
1212 }
1213 } else {
1214 slot += offset;
1215 }
1216
1217 assert(slot < 4);
1218 assert(!input || idx < PIPE_MAX_SHADER_INPUTS);
1219 assert(input || idx < PIPE_MAX_SHADER_OUTPUTS);
1220
1221 const nv50_ir_varying *vary = input ? info_out->in : info_out->out;
1222 return vary[idx].slot[slot] * 4;
1223 }
1224
1225 Instruction *
loadFrom(DataFile file,uint8_t i,DataType ty,Value * def,uint32_t base,uint8_t c,Value * indirect0,Value * indirect1,bool patch)1226 Converter::loadFrom(DataFile file, uint8_t i, DataType ty, Value *def,
1227 uint32_t base, uint8_t c, Value *indirect0,
1228 Value *indirect1, bool patch)
1229 {
1230 unsigned int tySize = typeSizeof(ty);
1231
1232 if (tySize == 8 &&
1233 (file == FILE_MEMORY_CONST || file == FILE_MEMORY_BUFFER || indirect0)) {
1234 Value *lo = getSSA();
1235 Value *hi = getSSA();
1236
1237 Instruction *loi =
1238 mkLoad(TYPE_U32, lo,
1239 mkSymbol(file, i, TYPE_U32, base + c * tySize),
1240 indirect0);
1241 loi->setIndirect(0, 1, indirect1);
1242 loi->perPatch = patch;
1243
1244 Instruction *hii =
1245 mkLoad(TYPE_U32, hi,
1246 mkSymbol(file, i, TYPE_U32, base + c * tySize + 4),
1247 indirect0);
1248 hii->setIndirect(0, 1, indirect1);
1249 hii->perPatch = patch;
1250
1251 return mkOp2(OP_MERGE, ty, def, lo, hi);
1252 } else {
1253 Instruction *ld =
1254 mkLoad(ty, def, mkSymbol(file, i, ty, base + c * tySize), indirect0);
1255 ld->setIndirect(0, 1, indirect1);
1256 ld->perPatch = patch;
1257 return ld;
1258 }
1259 }
1260
1261 void
storeTo(nir_intrinsic_instr * insn,DataFile file,operation op,DataType ty,Value * src,uint8_t idx,uint8_t c,Value * indirect0,Value * indirect1)1262 Converter::storeTo(nir_intrinsic_instr *insn, DataFile file, operation op,
1263 DataType ty, Value *src, uint8_t idx, uint8_t c,
1264 Value *indirect0, Value *indirect1)
1265 {
1266 uint8_t size = typeSizeof(ty);
1267 uint32_t address = getSlotAddress(insn, idx, c);
1268
1269 if (size == 8 && indirect0) {
1270 Value *split[2];
1271 mkSplit(split, 4, src);
1272
1273 if (op == OP_EXPORT) {
1274 split[0] = mkMov(getSSA(), split[0], ty)->getDef(0);
1275 split[1] = mkMov(getSSA(), split[1], ty)->getDef(0);
1276 }
1277
1278 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address), indirect0,
1279 split[0])->perPatch = info_out->out[idx].patch;
1280 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address + 4), indirect0,
1281 split[1])->perPatch = info_out->out[idx].patch;
1282 } else {
1283 if (op == OP_EXPORT)
1284 src = mkMov(getSSA(size), src, ty)->getDef(0);
1285 mkStore(op, ty, mkSymbol(file, 0, ty, address), indirect0,
1286 src)->perPatch = info_out->out[idx].patch;
1287 }
1288 }
1289
1290 bool
parseNIR()1291 Converter::parseNIR()
1292 {
1293 info_out->bin.tlsSpace = nir->scratch_size;
1294 info_out->io.clipDistances = nir->info.clip_distance_array_size;
1295 info_out->io.cullDistances = nir->info.cull_distance_array_size;
1296 info_out->io.layer_viewport_relative = nir->info.layer_viewport_relative;
1297
1298 switch(prog->getType()) {
1299 case Program::TYPE_COMPUTE:
1300 info->prop.cp.numThreads[0] = nir->info.workgroup_size[0];
1301 info->prop.cp.numThreads[1] = nir->info.workgroup_size[1];
1302 info->prop.cp.numThreads[2] = nir->info.workgroup_size[2];
1303 info_out->bin.smemSize = std::max(info_out->bin.smemSize, nir->info.shared_size);
1304 break;
1305 case Program::TYPE_FRAGMENT:
1306 info_out->prop.fp.earlyFragTests = nir->info.fs.early_fragment_tests;
1307 prog->persampleInvocation =
1308 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_ID) ||
1309 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_POS);
1310 info_out->prop.fp.postDepthCoverage = nir->info.fs.post_depth_coverage;
1311 info_out->prop.fp.readsSampleLocations =
1312 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_POS);
1313 info_out->prop.fp.usesDiscard = nir->info.fs.uses_discard || nir->info.fs.uses_demote;
1314 info_out->prop.fp.usesSampleMaskIn =
1315 !BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_MASK_IN);
1316 break;
1317 case Program::TYPE_GEOMETRY:
1318 info_out->prop.gp.instanceCount = nir->info.gs.invocations;
1319 info_out->prop.gp.maxVertices = nir->info.gs.vertices_out;
1320 info_out->prop.gp.outputPrim = nir->info.gs.output_primitive;
1321 break;
1322 case Program::TYPE_TESSELLATION_CONTROL:
1323 case Program::TYPE_TESSELLATION_EVAL:
1324 if (nir->info.tess.primitive_mode == GL_ISOLINES)
1325 info_out->prop.tp.domain = GL_LINES;
1326 else
1327 info_out->prop.tp.domain = nir->info.tess.primitive_mode;
1328 info_out->prop.tp.outputPatchSize = nir->info.tess.tcs_vertices_out;
1329 info_out->prop.tp.outputPrim =
1330 nir->info.tess.point_mode ? PIPE_PRIM_POINTS : PIPE_PRIM_TRIANGLES;
1331 info_out->prop.tp.partitioning = (nir->info.tess.spacing + 1) % 3;
1332 info_out->prop.tp.winding = !nir->info.tess.ccw;
1333 break;
1334 case Program::TYPE_VERTEX:
1335 info_out->prop.vp.usesDrawParameters =
1336 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_VERTEX) ||
1337 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE) ||
1338 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_DRAW_ID);
1339 break;
1340 default:
1341 break;
1342 }
1343
1344 return true;
1345 }
1346
1347 bool
visit(nir_function * function)1348 Converter::visit(nir_function *function)
1349 {
1350 assert(function->impl);
1351
1352 // usually the blocks will set everything up, but main is special
1353 BasicBlock *entry = new BasicBlock(prog->main);
1354 exit = new BasicBlock(prog->main);
1355 blocks[nir_start_block(function->impl)->index] = entry;
1356 prog->main->setEntry(entry);
1357 prog->main->setExit(exit);
1358
1359 setPosition(entry, true);
1360
1361 if (info_out->io.genUserClip > 0) {
1362 for (int c = 0; c < 4; ++c)
1363 clipVtx[c] = getScratch();
1364 }
1365
1366 switch (prog->getType()) {
1367 case Program::TYPE_TESSELLATION_CONTROL:
1368 outBase = mkOp2v(
1369 OP_SUB, TYPE_U32, getSSA(),
1370 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LANEID, 0)),
1371 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_INVOCATION_ID, 0)));
1372 break;
1373 case Program::TYPE_FRAGMENT: {
1374 Symbol *sv = mkSysVal(SV_POSITION, 3);
1375 fragCoord[3] = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), sv);
1376 fp.position = mkOp1v(OP_RCP, TYPE_F32, fragCoord[3], fragCoord[3]);
1377 break;
1378 }
1379 default:
1380 break;
1381 }
1382
1383 nir_index_ssa_defs(function->impl);
1384 foreach_list_typed(nir_cf_node, node, node, &function->impl->body) {
1385 if (!visit(node))
1386 return false;
1387 }
1388
1389 bb->cfg.attach(&exit->cfg, Graph::Edge::TREE);
1390 setPosition(exit, true);
1391
1392 if ((prog->getType() == Program::TYPE_VERTEX ||
1393 prog->getType() == Program::TYPE_TESSELLATION_EVAL)
1394 && info_out->io.genUserClip > 0)
1395 handleUserClipPlanes();
1396
1397 // TODO: for non main function this needs to be a OP_RETURN
1398 mkOp(OP_EXIT, TYPE_NONE, NULL)->terminator = 1;
1399 return true;
1400 }
1401
1402 bool
visit(nir_cf_node * node)1403 Converter::visit(nir_cf_node *node)
1404 {
1405 switch (node->type) {
1406 case nir_cf_node_block:
1407 return visit(nir_cf_node_as_block(node));
1408 case nir_cf_node_if:
1409 return visit(nir_cf_node_as_if(node));
1410 case nir_cf_node_loop:
1411 return visit(nir_cf_node_as_loop(node));
1412 default:
1413 ERROR("unknown nir_cf_node type %u\n", node->type);
1414 return false;
1415 }
1416 }
1417
1418 bool
visit(nir_block * block)1419 Converter::visit(nir_block *block)
1420 {
1421 if (!block->predecessors->entries && block->instr_list.is_empty())
1422 return true;
1423
1424 BasicBlock *bb = convert(block);
1425
1426 setPosition(bb, true);
1427 nir_foreach_instr(insn, block) {
1428 if (!visit(insn))
1429 return false;
1430 }
1431 return true;
1432 }
1433
1434 bool
visit(nir_if * nif)1435 Converter::visit(nir_if *nif)
1436 {
1437 curIfDepth++;
1438
1439 DataType sType = getSType(nif->condition, false, false);
1440 Value *src = getSrc(&nif->condition, 0);
1441
1442 nir_block *lastThen = nir_if_last_then_block(nif);
1443 nir_block *lastElse = nir_if_last_else_block(nif);
1444
1445 BasicBlock *headBB = bb;
1446 BasicBlock *ifBB = convert(nir_if_first_then_block(nif));
1447 BasicBlock *elseBB = convert(nir_if_first_else_block(nif));
1448
1449 bb->cfg.attach(&ifBB->cfg, Graph::Edge::TREE);
1450 bb->cfg.attach(&elseBB->cfg, Graph::Edge::TREE);
1451
1452 bool insertJoins = lastThen->successors[0] == lastElse->successors[0];
1453 mkFlow(OP_BRA, elseBB, CC_EQ, src)->setType(sType);
1454
1455 foreach_list_typed(nir_cf_node, node, node, &nif->then_list) {
1456 if (!visit(node))
1457 return false;
1458 }
1459
1460 setPosition(convert(lastThen), true);
1461 if (!bb->isTerminated()) {
1462 BasicBlock *tailBB = convert(lastThen->successors[0]);
1463 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1464 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1465 } else {
1466 insertJoins = insertJoins && bb->getExit()->op == OP_BRA;
1467 }
1468
1469 foreach_list_typed(nir_cf_node, node, node, &nif->else_list) {
1470 if (!visit(node))
1471 return false;
1472 }
1473
1474 setPosition(convert(lastElse), true);
1475 if (!bb->isTerminated()) {
1476 BasicBlock *tailBB = convert(lastElse->successors[0]);
1477 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1478 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1479 } else {
1480 insertJoins = insertJoins && bb->getExit()->op == OP_BRA;
1481 }
1482
1483 /* only insert joins for the most outer if */
1484 if (--curIfDepth)
1485 insertJoins = false;
1486
1487 /* we made sure that all threads would converge at the same block */
1488 if (insertJoins) {
1489 BasicBlock *conv = convert(lastThen->successors[0]);
1490 setPosition(headBB->getExit(), false);
1491 headBB->joinAt = mkFlow(OP_JOINAT, conv, CC_ALWAYS, NULL);
1492 setPosition(conv, false);
1493 mkFlow(OP_JOIN, NULL, CC_ALWAYS, NULL)->fixed = 1;
1494 }
1495
1496 return true;
1497 }
1498
1499 // TODO: add convergency
1500 bool
visit(nir_loop * loop)1501 Converter::visit(nir_loop *loop)
1502 {
1503 curLoopDepth += 1;
1504 func->loopNestingBound = std::max(func->loopNestingBound, curLoopDepth);
1505
1506 BasicBlock *loopBB = convert(nir_loop_first_block(loop));
1507 BasicBlock *tailBB = convert(nir_cf_node_as_block(nir_cf_node_next(&loop->cf_node)));
1508
1509 bb->cfg.attach(&loopBB->cfg, Graph::Edge::TREE);
1510
1511 mkFlow(OP_PREBREAK, tailBB, CC_ALWAYS, NULL);
1512 setPosition(loopBB, false);
1513 mkFlow(OP_PRECONT, loopBB, CC_ALWAYS, NULL);
1514
1515 foreach_list_typed(nir_cf_node, node, node, &loop->body) {
1516 if (!visit(node))
1517 return false;
1518 }
1519
1520 if (!bb->isTerminated()) {
1521 mkFlow(OP_CONT, loopBB, CC_ALWAYS, NULL);
1522 bb->cfg.attach(&loopBB->cfg, Graph::Edge::BACK);
1523 }
1524
1525 if (tailBB->cfg.incidentCount() == 0)
1526 loopBB->cfg.attach(&tailBB->cfg, Graph::Edge::TREE);
1527
1528 curLoopDepth -= 1;
1529
1530 return true;
1531 }
1532
1533 bool
visit(nir_instr * insn)1534 Converter::visit(nir_instr *insn)
1535 {
1536 // we need an insertion point for on the fly generated immediate loads
1537 immInsertPos = bb->getExit();
1538 switch (insn->type) {
1539 case nir_instr_type_alu:
1540 return visit(nir_instr_as_alu(insn));
1541 case nir_instr_type_intrinsic:
1542 return visit(nir_instr_as_intrinsic(insn));
1543 case nir_instr_type_jump:
1544 return visit(nir_instr_as_jump(insn));
1545 case nir_instr_type_load_const:
1546 return visit(nir_instr_as_load_const(insn));
1547 case nir_instr_type_ssa_undef:
1548 return visit(nir_instr_as_ssa_undef(insn));
1549 case nir_instr_type_tex:
1550 return visit(nir_instr_as_tex(insn));
1551 default:
1552 ERROR("unknown nir_instr type %u\n", insn->type);
1553 return false;
1554 }
1555 return true;
1556 }
1557
1558 SVSemantic
convert(nir_intrinsic_op intr)1559 Converter::convert(nir_intrinsic_op intr)
1560 {
1561 switch (intr) {
1562 case nir_intrinsic_load_base_vertex:
1563 return SV_BASEVERTEX;
1564 case nir_intrinsic_load_base_instance:
1565 return SV_BASEINSTANCE;
1566 case nir_intrinsic_load_draw_id:
1567 return SV_DRAWID;
1568 case nir_intrinsic_load_front_face:
1569 return SV_FACE;
1570 case nir_intrinsic_is_helper_invocation:
1571 case nir_intrinsic_load_helper_invocation:
1572 return SV_THREAD_KILL;
1573 case nir_intrinsic_load_instance_id:
1574 return SV_INSTANCE_ID;
1575 case nir_intrinsic_load_invocation_id:
1576 return SV_INVOCATION_ID;
1577 case nir_intrinsic_load_workgroup_size:
1578 return SV_NTID;
1579 case nir_intrinsic_load_local_invocation_id:
1580 return SV_TID;
1581 case nir_intrinsic_load_num_workgroups:
1582 return SV_NCTAID;
1583 case nir_intrinsic_load_patch_vertices_in:
1584 return SV_VERTEX_COUNT;
1585 case nir_intrinsic_load_primitive_id:
1586 return SV_PRIMITIVE_ID;
1587 case nir_intrinsic_load_sample_id:
1588 return SV_SAMPLE_INDEX;
1589 case nir_intrinsic_load_sample_mask_in:
1590 return SV_SAMPLE_MASK;
1591 case nir_intrinsic_load_sample_pos:
1592 return SV_SAMPLE_POS;
1593 case nir_intrinsic_load_subgroup_eq_mask:
1594 return SV_LANEMASK_EQ;
1595 case nir_intrinsic_load_subgroup_ge_mask:
1596 return SV_LANEMASK_GE;
1597 case nir_intrinsic_load_subgroup_gt_mask:
1598 return SV_LANEMASK_GT;
1599 case nir_intrinsic_load_subgroup_le_mask:
1600 return SV_LANEMASK_LE;
1601 case nir_intrinsic_load_subgroup_lt_mask:
1602 return SV_LANEMASK_LT;
1603 case nir_intrinsic_load_subgroup_invocation:
1604 return SV_LANEID;
1605 case nir_intrinsic_load_tess_coord:
1606 return SV_TESS_COORD;
1607 case nir_intrinsic_load_tess_level_inner:
1608 return SV_TESS_INNER;
1609 case nir_intrinsic_load_tess_level_outer:
1610 return SV_TESS_OUTER;
1611 case nir_intrinsic_load_vertex_id:
1612 return SV_VERTEX_ID;
1613 case nir_intrinsic_load_workgroup_id:
1614 return SV_CTAID;
1615 case nir_intrinsic_load_work_dim:
1616 return SV_WORK_DIM;
1617 default:
1618 ERROR("unknown SVSemantic for nir_intrinsic_op %s\n",
1619 nir_intrinsic_infos[intr].name);
1620 assert(false);
1621 return SV_LAST;
1622 }
1623 }
1624
1625 bool
visit(nir_intrinsic_instr * insn)1626 Converter::visit(nir_intrinsic_instr *insn)
1627 {
1628 nir_intrinsic_op op = insn->intrinsic;
1629 const nir_intrinsic_info &opInfo = nir_intrinsic_infos[op];
1630 unsigned dest_components = nir_intrinsic_dest_components(insn);
1631
1632 switch (op) {
1633 case nir_intrinsic_load_uniform: {
1634 LValues &newDefs = convert(&insn->dest);
1635 const DataType dType = getDType(insn);
1636 Value *indirect;
1637 uint32_t coffset = getIndirect(insn, 0, 0, indirect);
1638 for (uint8_t i = 0; i < dest_components; ++i) {
1639 loadFrom(FILE_MEMORY_CONST, 0, dType, newDefs[i], 16 * coffset, i, indirect);
1640 }
1641 break;
1642 }
1643 case nir_intrinsic_store_output:
1644 case nir_intrinsic_store_per_vertex_output: {
1645 Value *indirect;
1646 DataType dType = getSType(insn->src[0], false, false);
1647 uint32_t idx = getIndirect(insn, op == nir_intrinsic_store_output ? 1 : 2, 0, indirect);
1648
1649 for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
1650 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
1651 continue;
1652
1653 uint8_t offset = 0;
1654 Value *src = getSrc(&insn->src[0], i);
1655 switch (prog->getType()) {
1656 case Program::TYPE_FRAGMENT: {
1657 if (info_out->out[idx].sn == TGSI_SEMANTIC_POSITION) {
1658 // TGSI uses a different interface than NIR, TGSI stores that
1659 // value in the z component, NIR in X
1660 offset += 2;
1661 src = mkOp1v(OP_SAT, TYPE_F32, getScratch(), src);
1662 }
1663 break;
1664 }
1665 case Program::TYPE_GEOMETRY:
1666 case Program::TYPE_TESSELLATION_EVAL:
1667 case Program::TYPE_VERTEX: {
1668 if (info_out->io.genUserClip > 0 && idx == (uint32_t)clipVertexOutput) {
1669 mkMov(clipVtx[i], src);
1670 src = clipVtx[i];
1671 }
1672 break;
1673 }
1674 default:
1675 break;
1676 }
1677
1678 storeTo(insn, FILE_SHADER_OUTPUT, OP_EXPORT, dType, src, idx, i + offset, indirect);
1679 }
1680 break;
1681 }
1682 case nir_intrinsic_load_input:
1683 case nir_intrinsic_load_interpolated_input:
1684 case nir_intrinsic_load_output: {
1685 LValues &newDefs = convert(&insn->dest);
1686
1687 // FBFetch
1688 if (prog->getType() == Program::TYPE_FRAGMENT &&
1689 op == nir_intrinsic_load_output) {
1690 std::vector<Value*> defs, srcs;
1691 uint8_t mask = 0;
1692
1693 srcs.push_back(getSSA());
1694 srcs.push_back(getSSA());
1695 Value *x = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 0));
1696 Value *y = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 1));
1697 mkCvt(OP_CVT, TYPE_U32, srcs[0], TYPE_F32, x)->rnd = ROUND_Z;
1698 mkCvt(OP_CVT, TYPE_U32, srcs[1], TYPE_F32, y)->rnd = ROUND_Z;
1699
1700 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LAYER, 0)));
1701 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_SAMPLE_INDEX, 0)));
1702
1703 for (uint8_t i = 0u; i < dest_components; ++i) {
1704 defs.push_back(newDefs[i]);
1705 mask |= 1 << i;
1706 }
1707
1708 TexInstruction *texi = mkTex(OP_TXF, TEX_TARGET_2D_MS_ARRAY, 0, 0, defs, srcs);
1709 texi->tex.levelZero = true;
1710 texi->tex.mask = mask;
1711 texi->tex.useOffsets = 0;
1712 texi->tex.r = 0xffff;
1713 texi->tex.s = 0xffff;
1714
1715 info_out->prop.fp.readsFramebuffer = true;
1716 break;
1717 }
1718
1719 const DataType dType = getDType(insn);
1720 Value *indirect;
1721 bool input = op != nir_intrinsic_load_output;
1722 operation nvirOp;
1723 uint32_t mode = 0;
1724
1725 uint32_t idx = getIndirect(insn, op == nir_intrinsic_load_interpolated_input ? 1 : 0, 0, indirect);
1726 nv50_ir_varying& vary = input ? info_out->in[idx] : info_out->out[idx];
1727
1728 // see load_barycentric_* handling
1729 if (prog->getType() == Program::TYPE_FRAGMENT) {
1730 if (op == nir_intrinsic_load_interpolated_input) {
1731 ImmediateValue immMode;
1732 if (getSrc(&insn->src[0], 1)->getUniqueInsn()->src(0).getImmediate(immMode))
1733 mode = immMode.reg.data.u32;
1734 }
1735 if (mode == NV50_IR_INTERP_DEFAULT)
1736 mode |= translateInterpMode(&vary, nvirOp);
1737 else {
1738 if (vary.linear) {
1739 nvirOp = OP_LINTERP;
1740 mode |= NV50_IR_INTERP_LINEAR;
1741 } else {
1742 nvirOp = OP_PINTERP;
1743 mode |= NV50_IR_INTERP_PERSPECTIVE;
1744 }
1745 }
1746 }
1747
1748 for (uint8_t i = 0u; i < dest_components; ++i) {
1749 uint32_t address = getSlotAddress(insn, idx, i);
1750 Symbol *sym = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address);
1751 if (prog->getType() == Program::TYPE_FRAGMENT) {
1752 int s = 1;
1753 if (typeSizeof(dType) == 8) {
1754 Value *lo = getSSA();
1755 Value *hi = getSSA();
1756 Instruction *interp;
1757
1758 interp = mkOp1(nvirOp, TYPE_U32, lo, sym);
1759 if (nvirOp == OP_PINTERP)
1760 interp->setSrc(s++, fp.position);
1761 if (mode & NV50_IR_INTERP_OFFSET)
1762 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1763 interp->setInterpolate(mode);
1764 interp->setIndirect(0, 0, indirect);
1765
1766 Symbol *sym1 = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address + 4);
1767 interp = mkOp1(nvirOp, TYPE_U32, hi, sym1);
1768 if (nvirOp == OP_PINTERP)
1769 interp->setSrc(s++, fp.position);
1770 if (mode & NV50_IR_INTERP_OFFSET)
1771 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1772 interp->setInterpolate(mode);
1773 interp->setIndirect(0, 0, indirect);
1774
1775 mkOp2(OP_MERGE, dType, newDefs[i], lo, hi);
1776 } else {
1777 Instruction *interp = mkOp1(nvirOp, dType, newDefs[i], sym);
1778 if (nvirOp == OP_PINTERP)
1779 interp->setSrc(s++, fp.position);
1780 if (mode & NV50_IR_INTERP_OFFSET)
1781 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1782 interp->setInterpolate(mode);
1783 interp->setIndirect(0, 0, indirect);
1784 }
1785 } else {
1786 mkLoad(dType, newDefs[i], sym, indirect)->perPatch = vary.patch;
1787 }
1788 }
1789 break;
1790 }
1791 case nir_intrinsic_load_barycentric_at_offset:
1792 case nir_intrinsic_load_barycentric_at_sample:
1793 case nir_intrinsic_load_barycentric_centroid:
1794 case nir_intrinsic_load_barycentric_pixel:
1795 case nir_intrinsic_load_barycentric_sample: {
1796 LValues &newDefs = convert(&insn->dest);
1797 uint32_t mode;
1798
1799 if (op == nir_intrinsic_load_barycentric_centroid ||
1800 op == nir_intrinsic_load_barycentric_sample) {
1801 mode = NV50_IR_INTERP_CENTROID;
1802 } else if (op == nir_intrinsic_load_barycentric_at_offset) {
1803 Value *offs[2];
1804 for (uint8_t c = 0; c < 2; c++) {
1805 offs[c] = getScratch();
1806 mkOp2(OP_MIN, TYPE_F32, offs[c], getSrc(&insn->src[0], c), loadImm(NULL, 0.4375f));
1807 mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f));
1808 mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f));
1809 mkCvt(OP_CVT, TYPE_S32, offs[c], TYPE_F32, offs[c]);
1810 }
1811 mkOp3v(OP_INSBF, TYPE_U32, newDefs[0], offs[1], mkImm(0x1010), offs[0]);
1812
1813 mode = NV50_IR_INTERP_OFFSET;
1814 } else if (op == nir_intrinsic_load_barycentric_pixel) {
1815 mode = NV50_IR_INTERP_DEFAULT;
1816 } else if (op == nir_intrinsic_load_barycentric_at_sample) {
1817 info_out->prop.fp.readsSampleLocations = true;
1818 Value *sample = getSSA();
1819 mkOp3(OP_SELP, TYPE_U32, sample, mkImm(0), getSrc(&insn->src[0], 0), mkImm(0))
1820 ->subOp = 2;
1821 mkOp1(OP_PIXLD, TYPE_U32, newDefs[0], sample)->subOp = NV50_IR_SUBOP_PIXLD_OFFSET;
1822 mode = NV50_IR_INTERP_OFFSET;
1823 } else {
1824 unreachable("all intrinsics already handled above");
1825 }
1826
1827 loadImm(newDefs[1], mode);
1828 break;
1829 }
1830 case nir_intrinsic_demote:
1831 case nir_intrinsic_discard:
1832 mkOp(OP_DISCARD, TYPE_NONE, NULL);
1833 break;
1834 case nir_intrinsic_demote_if:
1835 case nir_intrinsic_discard_if: {
1836 Value *pred = getSSA(1, FILE_PREDICATE);
1837 if (insn->num_components > 1) {
1838 ERROR("nir_intrinsic_discard_if only with 1 component supported!\n");
1839 assert(false);
1840 return false;
1841 }
1842 mkCmp(OP_SET, CC_NE, TYPE_U8, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1843 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_P, pred);
1844 break;
1845 }
1846 case nir_intrinsic_load_base_vertex:
1847 case nir_intrinsic_load_base_instance:
1848 case nir_intrinsic_load_draw_id:
1849 case nir_intrinsic_load_front_face:
1850 case nir_intrinsic_is_helper_invocation:
1851 case nir_intrinsic_load_helper_invocation:
1852 case nir_intrinsic_load_instance_id:
1853 case nir_intrinsic_load_invocation_id:
1854 case nir_intrinsic_load_workgroup_size:
1855 case nir_intrinsic_load_local_invocation_id:
1856 case nir_intrinsic_load_num_workgroups:
1857 case nir_intrinsic_load_patch_vertices_in:
1858 case nir_intrinsic_load_primitive_id:
1859 case nir_intrinsic_load_sample_id:
1860 case nir_intrinsic_load_sample_mask_in:
1861 case nir_intrinsic_load_sample_pos:
1862 case nir_intrinsic_load_subgroup_eq_mask:
1863 case nir_intrinsic_load_subgroup_ge_mask:
1864 case nir_intrinsic_load_subgroup_gt_mask:
1865 case nir_intrinsic_load_subgroup_le_mask:
1866 case nir_intrinsic_load_subgroup_lt_mask:
1867 case nir_intrinsic_load_subgroup_invocation:
1868 case nir_intrinsic_load_tess_coord:
1869 case nir_intrinsic_load_tess_level_inner:
1870 case nir_intrinsic_load_tess_level_outer:
1871 case nir_intrinsic_load_vertex_id:
1872 case nir_intrinsic_load_workgroup_id:
1873 case nir_intrinsic_load_work_dim: {
1874 const DataType dType = getDType(insn);
1875 SVSemantic sv = convert(op);
1876 LValues &newDefs = convert(&insn->dest);
1877
1878 for (uint8_t i = 0u; i < nir_intrinsic_dest_components(insn); ++i) {
1879 Value *def;
1880 if (typeSizeof(dType) == 8)
1881 def = getSSA();
1882 else
1883 def = newDefs[i];
1884
1885 if (sv == SV_TID && info->prop.cp.numThreads[i] == 1) {
1886 loadImm(def, 0u);
1887 } else {
1888 Symbol *sym = mkSysVal(sv, i);
1889 Instruction *rdsv = mkOp1(OP_RDSV, TYPE_U32, def, sym);
1890 if (sv == SV_TESS_OUTER || sv == SV_TESS_INNER)
1891 rdsv->perPatch = 1;
1892 }
1893
1894 if (typeSizeof(dType) == 8)
1895 mkOp2(OP_MERGE, dType, newDefs[i], def, loadImm(getSSA(), 0u));
1896 }
1897 break;
1898 }
1899 // constants
1900 case nir_intrinsic_load_subgroup_size: {
1901 LValues &newDefs = convert(&insn->dest);
1902 loadImm(newDefs[0], 32u);
1903 break;
1904 }
1905 case nir_intrinsic_vote_all:
1906 case nir_intrinsic_vote_any:
1907 case nir_intrinsic_vote_ieq: {
1908 LValues &newDefs = convert(&insn->dest);
1909 Value *pred = getScratch(1, FILE_PREDICATE);
1910 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1911 mkOp1(OP_VOTE, TYPE_U32, pred, pred)->subOp = getSubOp(op);
1912 mkCvt(OP_CVT, TYPE_U32, newDefs[0], TYPE_U8, pred);
1913 break;
1914 }
1915 case nir_intrinsic_ballot: {
1916 LValues &newDefs = convert(&insn->dest);
1917 Value *pred = getSSA(1, FILE_PREDICATE);
1918 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1919 mkOp1(OP_VOTE, TYPE_U32, newDefs[0], pred)->subOp = NV50_IR_SUBOP_VOTE_ANY;
1920 break;
1921 }
1922 case nir_intrinsic_read_first_invocation:
1923 case nir_intrinsic_read_invocation: {
1924 LValues &newDefs = convert(&insn->dest);
1925 const DataType dType = getDType(insn);
1926 Value *tmp = getScratch();
1927
1928 if (op == nir_intrinsic_read_first_invocation) {
1929 mkOp1(OP_VOTE, TYPE_U32, tmp, mkImm(1))->subOp = NV50_IR_SUBOP_VOTE_ANY;
1930 mkOp1(OP_BREV, TYPE_U32, tmp, tmp);
1931 mkOp1(OP_BFIND, TYPE_U32, tmp, tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
1932 } else
1933 tmp = getSrc(&insn->src[1], 0);
1934
1935 for (uint8_t i = 0; i < dest_components; ++i) {
1936 mkOp3(OP_SHFL, dType, newDefs[i], getSrc(&insn->src[0], i), tmp, mkImm(0x1f))
1937 ->subOp = NV50_IR_SUBOP_SHFL_IDX;
1938 }
1939 break;
1940 }
1941 case nir_intrinsic_load_per_vertex_input: {
1942 const DataType dType = getDType(insn);
1943 LValues &newDefs = convert(&insn->dest);
1944 Value *indirectVertex;
1945 Value *indirectOffset;
1946 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
1947 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
1948
1949 Value *vtxBase = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS),
1950 mkImm(baseVertex), indirectVertex);
1951 for (uint8_t i = 0u; i < dest_components; ++i) {
1952 uint32_t address = getSlotAddress(insn, idx, i);
1953 loadFrom(FILE_SHADER_INPUT, 0, dType, newDefs[i], address, 0,
1954 indirectOffset, vtxBase, info_out->in[idx].patch);
1955 }
1956 break;
1957 }
1958 case nir_intrinsic_load_per_vertex_output: {
1959 const DataType dType = getDType(insn);
1960 LValues &newDefs = convert(&insn->dest);
1961 Value *indirectVertex;
1962 Value *indirectOffset;
1963 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
1964 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
1965 Value *vtxBase = NULL;
1966
1967 if (indirectVertex)
1968 vtxBase = indirectVertex;
1969 else
1970 vtxBase = loadImm(NULL, baseVertex);
1971
1972 vtxBase = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, FILE_ADDRESS), outBase, vtxBase);
1973
1974 for (uint8_t i = 0u; i < dest_components; ++i) {
1975 uint32_t address = getSlotAddress(insn, idx, i);
1976 loadFrom(FILE_SHADER_OUTPUT, 0, dType, newDefs[i], address, 0,
1977 indirectOffset, vtxBase, info_out->in[idx].patch);
1978 }
1979 break;
1980 }
1981 case nir_intrinsic_emit_vertex: {
1982 if (info_out->io.genUserClip > 0)
1983 handleUserClipPlanes();
1984 uint32_t idx = nir_intrinsic_stream_id(insn);
1985 mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
1986 break;
1987 }
1988 case nir_intrinsic_end_primitive: {
1989 uint32_t idx = nir_intrinsic_stream_id(insn);
1990 if (idx)
1991 break;
1992 mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
1993 break;
1994 }
1995 case nir_intrinsic_load_ubo: {
1996 const DataType dType = getDType(insn);
1997 LValues &newDefs = convert(&insn->dest);
1998 Value *indirectIndex;
1999 Value *indirectOffset;
2000 uint32_t index = getIndirect(&insn->src[0], 0, indirectIndex) + 1;
2001 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2002
2003 for (uint8_t i = 0u; i < dest_components; ++i) {
2004 loadFrom(FILE_MEMORY_CONST, index, dType, newDefs[i], offset, i,
2005 indirectOffset, indirectIndex);
2006 }
2007 break;
2008 }
2009 case nir_intrinsic_get_ssbo_size: {
2010 LValues &newDefs = convert(&insn->dest);
2011 const DataType dType = getDType(insn);
2012 Value *indirectBuffer;
2013 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2014
2015 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, 0);
2016 mkOp1(OP_BUFQ, dType, newDefs[0], sym)->setIndirect(0, 0, indirectBuffer);
2017 break;
2018 }
2019 case nir_intrinsic_store_ssbo: {
2020 DataType sType = getSType(insn->src[0], false, false);
2021 Value *indirectBuffer;
2022 Value *indirectOffset;
2023 uint32_t buffer = getIndirect(&insn->src[1], 0, indirectBuffer);
2024 uint32_t offset = getIndirect(&insn->src[2], 0, indirectOffset);
2025
2026 for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
2027 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2028 continue;
2029 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, sType,
2030 offset + i * typeSizeof(sType));
2031 mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i))
2032 ->setIndirect(0, 1, indirectBuffer);
2033 }
2034 info_out->io.globalAccess |= 0x2;
2035 break;
2036 }
2037 case nir_intrinsic_load_ssbo: {
2038 const DataType dType = getDType(insn);
2039 LValues &newDefs = convert(&insn->dest);
2040 Value *indirectBuffer;
2041 Value *indirectOffset;
2042 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2043 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2044
2045 for (uint8_t i = 0u; i < dest_components; ++i)
2046 loadFrom(FILE_MEMORY_BUFFER, buffer, dType, newDefs[i], offset, i,
2047 indirectOffset, indirectBuffer);
2048
2049 info_out->io.globalAccess |= 0x1;
2050 break;
2051 }
2052 case nir_intrinsic_shared_atomic_add:
2053 case nir_intrinsic_shared_atomic_fadd:
2054 case nir_intrinsic_shared_atomic_and:
2055 case nir_intrinsic_shared_atomic_comp_swap:
2056 case nir_intrinsic_shared_atomic_exchange:
2057 case nir_intrinsic_shared_atomic_or:
2058 case nir_intrinsic_shared_atomic_imax:
2059 case nir_intrinsic_shared_atomic_imin:
2060 case nir_intrinsic_shared_atomic_umax:
2061 case nir_intrinsic_shared_atomic_umin:
2062 case nir_intrinsic_shared_atomic_xor: {
2063 const DataType dType = getDType(insn);
2064 LValues &newDefs = convert(&insn->dest);
2065 Value *indirectOffset;
2066 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2067 Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, dType, offset);
2068 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
2069 if (op == nir_intrinsic_shared_atomic_comp_swap)
2070 atom->setSrc(2, getSrc(&insn->src[2], 0));
2071 atom->setIndirect(0, 0, indirectOffset);
2072 atom->subOp = getSubOp(op);
2073 break;
2074 }
2075 case nir_intrinsic_ssbo_atomic_add:
2076 case nir_intrinsic_ssbo_atomic_fadd:
2077 case nir_intrinsic_ssbo_atomic_and:
2078 case nir_intrinsic_ssbo_atomic_comp_swap:
2079 case nir_intrinsic_ssbo_atomic_exchange:
2080 case nir_intrinsic_ssbo_atomic_or:
2081 case nir_intrinsic_ssbo_atomic_imax:
2082 case nir_intrinsic_ssbo_atomic_imin:
2083 case nir_intrinsic_ssbo_atomic_umax:
2084 case nir_intrinsic_ssbo_atomic_umin:
2085 case nir_intrinsic_ssbo_atomic_xor: {
2086 const DataType dType = getDType(insn);
2087 LValues &newDefs = convert(&insn->dest);
2088 Value *indirectBuffer;
2089 Value *indirectOffset;
2090 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2091 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2092
2093 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, offset);
2094 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym,
2095 getSrc(&insn->src[2], 0));
2096 if (op == nir_intrinsic_ssbo_atomic_comp_swap)
2097 atom->setSrc(2, getSrc(&insn->src[3], 0));
2098 atom->setIndirect(0, 0, indirectOffset);
2099 atom->setIndirect(0, 1, indirectBuffer);
2100 atom->subOp = getSubOp(op);
2101
2102 info_out->io.globalAccess |= 0x2;
2103 break;
2104 }
2105 case nir_intrinsic_global_atomic_add:
2106 case nir_intrinsic_global_atomic_fadd:
2107 case nir_intrinsic_global_atomic_and:
2108 case nir_intrinsic_global_atomic_comp_swap:
2109 case nir_intrinsic_global_atomic_exchange:
2110 case nir_intrinsic_global_atomic_or:
2111 case nir_intrinsic_global_atomic_imax:
2112 case nir_intrinsic_global_atomic_imin:
2113 case nir_intrinsic_global_atomic_umax:
2114 case nir_intrinsic_global_atomic_umin:
2115 case nir_intrinsic_global_atomic_xor: {
2116 const DataType dType = getDType(insn);
2117 LValues &newDefs = convert(&insn->dest);
2118 Value *address;
2119 uint32_t offset = getIndirect(&insn->src[0], 0, address);
2120
2121 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, dType, offset);
2122 Instruction *atom =
2123 mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
2124 if (op == nir_intrinsic_global_atomic_comp_swap)
2125 atom->setSrc(2, getSrc(&insn->src[2], 0));
2126 atom->setIndirect(0, 0, address);
2127 atom->subOp = getSubOp(op);
2128
2129 info_out->io.globalAccess |= 0x2;
2130 break;
2131 }
2132 case nir_intrinsic_bindless_image_atomic_add:
2133 case nir_intrinsic_bindless_image_atomic_fadd:
2134 case nir_intrinsic_bindless_image_atomic_and:
2135 case nir_intrinsic_bindless_image_atomic_comp_swap:
2136 case nir_intrinsic_bindless_image_atomic_exchange:
2137 case nir_intrinsic_bindless_image_atomic_imax:
2138 case nir_intrinsic_bindless_image_atomic_umax:
2139 case nir_intrinsic_bindless_image_atomic_imin:
2140 case nir_intrinsic_bindless_image_atomic_umin:
2141 case nir_intrinsic_bindless_image_atomic_or:
2142 case nir_intrinsic_bindless_image_atomic_xor:
2143 case nir_intrinsic_bindless_image_atomic_inc_wrap:
2144 case nir_intrinsic_bindless_image_atomic_dec_wrap:
2145 case nir_intrinsic_bindless_image_load:
2146 case nir_intrinsic_bindless_image_samples:
2147 case nir_intrinsic_bindless_image_size:
2148 case nir_intrinsic_bindless_image_store:
2149 case nir_intrinsic_image_atomic_add:
2150 case nir_intrinsic_image_atomic_fadd:
2151 case nir_intrinsic_image_atomic_and:
2152 case nir_intrinsic_image_atomic_comp_swap:
2153 case nir_intrinsic_image_atomic_exchange:
2154 case nir_intrinsic_image_atomic_imax:
2155 case nir_intrinsic_image_atomic_umax:
2156 case nir_intrinsic_image_atomic_imin:
2157 case nir_intrinsic_image_atomic_umin:
2158 case nir_intrinsic_image_atomic_or:
2159 case nir_intrinsic_image_atomic_xor:
2160 case nir_intrinsic_image_atomic_inc_wrap:
2161 case nir_intrinsic_image_atomic_dec_wrap:
2162 case nir_intrinsic_image_load:
2163 case nir_intrinsic_image_samples:
2164 case nir_intrinsic_image_size:
2165 case nir_intrinsic_image_store: {
2166 std::vector<Value*> srcs, defs;
2167 Value *indirect;
2168 DataType ty;
2169
2170 uint32_t mask = 0;
2171 TexInstruction::Target target =
2172 convert(nir_intrinsic_image_dim(insn), !!nir_intrinsic_image_array(insn), false);
2173 unsigned int argCount = getNIRArgCount(target);
2174 uint16_t location = 0;
2175
2176 if (opInfo.has_dest) {
2177 LValues &newDefs = convert(&insn->dest);
2178 for (uint8_t i = 0u; i < newDefs.size(); ++i) {
2179 defs.push_back(newDefs[i]);
2180 mask |= 1 << i;
2181 }
2182 }
2183
2184 int lod_src = -1;
2185 bool bindless = false;
2186 switch (op) {
2187 case nir_intrinsic_bindless_image_atomic_add:
2188 case nir_intrinsic_bindless_image_atomic_fadd:
2189 case nir_intrinsic_bindless_image_atomic_and:
2190 case nir_intrinsic_bindless_image_atomic_comp_swap:
2191 case nir_intrinsic_bindless_image_atomic_exchange:
2192 case nir_intrinsic_bindless_image_atomic_imax:
2193 case nir_intrinsic_bindless_image_atomic_umax:
2194 case nir_intrinsic_bindless_image_atomic_imin:
2195 case nir_intrinsic_bindless_image_atomic_umin:
2196 case nir_intrinsic_bindless_image_atomic_or:
2197 case nir_intrinsic_bindless_image_atomic_xor:
2198 case nir_intrinsic_bindless_image_atomic_inc_wrap:
2199 case nir_intrinsic_bindless_image_atomic_dec_wrap:
2200 ty = getDType(insn);
2201 bindless = true;
2202 info_out->io.globalAccess |= 0x2;
2203 mask = 0x1;
2204 break;
2205 case nir_intrinsic_image_atomic_add:
2206 case nir_intrinsic_image_atomic_fadd:
2207 case nir_intrinsic_image_atomic_and:
2208 case nir_intrinsic_image_atomic_comp_swap:
2209 case nir_intrinsic_image_atomic_exchange:
2210 case nir_intrinsic_image_atomic_imax:
2211 case nir_intrinsic_image_atomic_umax:
2212 case nir_intrinsic_image_atomic_imin:
2213 case nir_intrinsic_image_atomic_umin:
2214 case nir_intrinsic_image_atomic_or:
2215 case nir_intrinsic_image_atomic_xor:
2216 case nir_intrinsic_image_atomic_inc_wrap:
2217 case nir_intrinsic_image_atomic_dec_wrap:
2218 ty = getDType(insn);
2219 bindless = false;
2220 info_out->io.globalAccess |= 0x2;
2221 mask = 0x1;
2222 break;
2223 case nir_intrinsic_bindless_image_load:
2224 case nir_intrinsic_image_load:
2225 ty = TYPE_U32;
2226 bindless = op == nir_intrinsic_bindless_image_load;
2227 info_out->io.globalAccess |= 0x1;
2228 lod_src = 4;
2229 break;
2230 case nir_intrinsic_bindless_image_store:
2231 case nir_intrinsic_image_store:
2232 ty = TYPE_U32;
2233 bindless = op == nir_intrinsic_bindless_image_store;
2234 info_out->io.globalAccess |= 0x2;
2235 lod_src = 5;
2236 mask = 0xf;
2237 break;
2238 case nir_intrinsic_bindless_image_samples:
2239 mask = 0x8;
2240 FALLTHROUGH;
2241 case nir_intrinsic_image_samples:
2242 ty = TYPE_U32;
2243 bindless = op == nir_intrinsic_bindless_image_samples;
2244 mask = 0x8;
2245 break;
2246 case nir_intrinsic_bindless_image_size:
2247 case nir_intrinsic_image_size:
2248 assert(nir_src_as_uint(insn->src[1]) == 0);
2249 ty = TYPE_U32;
2250 bindless = op == nir_intrinsic_bindless_image_size;
2251 break;
2252 default:
2253 unreachable("unhandled image opcode");
2254 break;
2255 }
2256
2257 if (bindless)
2258 indirect = getSrc(&insn->src[0], 0);
2259 else
2260 location = getIndirect(&insn->src[0], 0, indirect);
2261
2262 // coords
2263 if (opInfo.num_srcs >= 2)
2264 for (unsigned int i = 0u; i < argCount; ++i)
2265 srcs.push_back(getSrc(&insn->src[1], i));
2266
2267 // the sampler is just another src added after coords
2268 if (opInfo.num_srcs >= 3 && target.isMS())
2269 srcs.push_back(getSrc(&insn->src[2], 0));
2270
2271 if (opInfo.num_srcs >= 4 && lod_src != 4) {
2272 unsigned components = opInfo.src_components[3] ? opInfo.src_components[3] : insn->num_components;
2273 for (uint8_t i = 0u; i < components; ++i)
2274 srcs.push_back(getSrc(&insn->src[3], i));
2275 }
2276
2277 if (opInfo.num_srcs >= 5 && lod_src != 5)
2278 // 1 for aotmic swap
2279 for (uint8_t i = 0u; i < opInfo.src_components[4]; ++i)
2280 srcs.push_back(getSrc(&insn->src[4], i));
2281
2282 TexInstruction *texi = mkTex(getOperation(op), target.getEnum(), location, 0, defs, srcs);
2283 texi->tex.bindless = bindless;
2284 texi->tex.format = nv50_ir::TexInstruction::translateImgFormat(nir_intrinsic_format(insn));
2285 texi->tex.mask = mask;
2286 texi->cache = convert(nir_intrinsic_access(insn));
2287 texi->setType(ty);
2288 texi->subOp = getSubOp(op);
2289
2290 if (indirect)
2291 texi->setIndirectR(indirect);
2292
2293 break;
2294 }
2295 case nir_intrinsic_store_scratch:
2296 case nir_intrinsic_store_shared: {
2297 DataType sType = getSType(insn->src[0], false, false);
2298 Value *indirectOffset;
2299 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2300
2301 for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
2302 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2303 continue;
2304 Symbol *sym = mkSymbol(getFile(op), 0, sType, offset + i * typeSizeof(sType));
2305 mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i));
2306 }
2307 break;
2308 }
2309 case nir_intrinsic_load_kernel_input:
2310 case nir_intrinsic_load_scratch:
2311 case nir_intrinsic_load_shared: {
2312 const DataType dType = getDType(insn);
2313 LValues &newDefs = convert(&insn->dest);
2314 Value *indirectOffset;
2315 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2316
2317 for (uint8_t i = 0u; i < dest_components; ++i)
2318 loadFrom(getFile(op), 0, dType, newDefs[i], offset, i, indirectOffset);
2319
2320 break;
2321 }
2322 case nir_intrinsic_control_barrier: {
2323 // TODO: add flag to shader_info
2324 info_out->numBarriers = 1;
2325 Instruction *bar = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
2326 bar->fixed = 1;
2327 bar->subOp = NV50_IR_SUBOP_BAR_SYNC;
2328 break;
2329 }
2330 case nir_intrinsic_group_memory_barrier:
2331 case nir_intrinsic_memory_barrier:
2332 case nir_intrinsic_memory_barrier_buffer:
2333 case nir_intrinsic_memory_barrier_image:
2334 case nir_intrinsic_memory_barrier_shared: {
2335 Instruction *bar = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
2336 bar->fixed = 1;
2337 bar->subOp = getSubOp(op);
2338 break;
2339 }
2340 case nir_intrinsic_memory_barrier_tcs_patch:
2341 break;
2342 case nir_intrinsic_shader_clock: {
2343 const DataType dType = getDType(insn);
2344 LValues &newDefs = convert(&insn->dest);
2345
2346 loadImm(newDefs[0], 0u);
2347 mkOp1(OP_RDSV, dType, newDefs[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
2348 break;
2349 }
2350 case nir_intrinsic_load_global:
2351 case nir_intrinsic_load_global_constant: {
2352 const DataType dType = getDType(insn);
2353 LValues &newDefs = convert(&insn->dest);
2354 Value *indirectOffset;
2355 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2356
2357 for (auto i = 0u; i < dest_components; ++i)
2358 loadFrom(FILE_MEMORY_GLOBAL, 0, dType, newDefs[i], offset, i, indirectOffset);
2359
2360 info_out->io.globalAccess |= 0x1;
2361 break;
2362 }
2363 case nir_intrinsic_store_global: {
2364 DataType sType = getSType(insn->src[0], false, false);
2365
2366 for (auto i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
2367 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2368 continue;
2369 if (typeSizeof(sType) == 8) {
2370 Value *split[2];
2371 mkSplit(split, 4, getSrc(&insn->src[0], i));
2372
2373 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType));
2374 mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[0]);
2375
2376 sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType) + 4);
2377 mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[1]);
2378 } else {
2379 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, sType, i * typeSizeof(sType));
2380 mkStore(OP_STORE, sType, sym, getSrc(&insn->src[1], 0), getSrc(&insn->src[0], i));
2381 }
2382 }
2383
2384 info_out->io.globalAccess |= 0x2;
2385 break;
2386 }
2387 default:
2388 ERROR("unknown nir_intrinsic_op %s\n", nir_intrinsic_infos[op].name);
2389 return false;
2390 }
2391
2392 return true;
2393 }
2394
2395 bool
visit(nir_jump_instr * insn)2396 Converter::visit(nir_jump_instr *insn)
2397 {
2398 switch (insn->type) {
2399 case nir_jump_return:
2400 // TODO: this only works in the main function
2401 mkFlow(OP_BRA, exit, CC_ALWAYS, NULL);
2402 bb->cfg.attach(&exit->cfg, Graph::Edge::CROSS);
2403 break;
2404 case nir_jump_break:
2405 case nir_jump_continue: {
2406 bool isBreak = insn->type == nir_jump_break;
2407 nir_block *block = insn->instr.block;
2408 BasicBlock *target = convert(block->successors[0]);
2409 mkFlow(isBreak ? OP_BREAK : OP_CONT, target, CC_ALWAYS, NULL);
2410 bb->cfg.attach(&target->cfg, isBreak ? Graph::Edge::CROSS : Graph::Edge::BACK);
2411 break;
2412 }
2413 default:
2414 ERROR("unknown nir_jump_type %u\n", insn->type);
2415 return false;
2416 }
2417
2418 return true;
2419 }
2420
2421 Value*
convert(nir_load_const_instr * insn,uint8_t idx)2422 Converter::convert(nir_load_const_instr *insn, uint8_t idx)
2423 {
2424 Value *val;
2425
2426 if (immInsertPos)
2427 setPosition(immInsertPos, true);
2428 else
2429 setPosition(bb, false);
2430
2431 switch (insn->def.bit_size) {
2432 case 64:
2433 val = loadImm(getSSA(8), insn->value[idx].u64);
2434 break;
2435 case 32:
2436 val = loadImm(getSSA(4), insn->value[idx].u32);
2437 break;
2438 case 16:
2439 val = loadImm(getSSA(2), insn->value[idx].u16);
2440 break;
2441 case 8:
2442 val = loadImm(getSSA(1), insn->value[idx].u8);
2443 break;
2444 default:
2445 unreachable("unhandled bit size!\n");
2446 }
2447 setPosition(bb, true);
2448 return val;
2449 }
2450
2451 bool
visit(nir_load_const_instr * insn)2452 Converter::visit(nir_load_const_instr *insn)
2453 {
2454 assert(insn->def.bit_size <= 64);
2455 immediates[insn->def.index] = insn;
2456 return true;
2457 }
2458
2459 #define DEFAULT_CHECKS \
2460 if (insn->dest.dest.ssa.num_components > 1) { \
2461 ERROR("nir_alu_instr only supported with 1 component!\n"); \
2462 return false; \
2463 } \
2464 if (insn->dest.write_mask != 1) { \
2465 ERROR("nir_alu_instr only with write_mask of 1 supported!\n"); \
2466 return false; \
2467 }
2468 bool
visit(nir_alu_instr * insn)2469 Converter::visit(nir_alu_instr *insn)
2470 {
2471 const nir_op op = insn->op;
2472 const nir_op_info &info = nir_op_infos[op];
2473 DataType dType = getDType(insn);
2474 const std::vector<DataType> sTypes = getSTypes(insn);
2475
2476 Instruction *oldPos = this->bb->getExit();
2477
2478 switch (op) {
2479 case nir_op_fabs:
2480 case nir_op_iabs:
2481 case nir_op_fadd:
2482 case nir_op_iadd:
2483 case nir_op_iand:
2484 case nir_op_fceil:
2485 case nir_op_fcos:
2486 case nir_op_fddx:
2487 case nir_op_fddx_coarse:
2488 case nir_op_fddx_fine:
2489 case nir_op_fddy:
2490 case nir_op_fddy_coarse:
2491 case nir_op_fddy_fine:
2492 case nir_op_fdiv:
2493 case nir_op_idiv:
2494 case nir_op_udiv:
2495 case nir_op_fexp2:
2496 case nir_op_ffloor:
2497 case nir_op_ffma:
2498 case nir_op_flog2:
2499 case nir_op_fmax:
2500 case nir_op_imax:
2501 case nir_op_umax:
2502 case nir_op_fmin:
2503 case nir_op_imin:
2504 case nir_op_umin:
2505 case nir_op_fmod:
2506 case nir_op_imod:
2507 case nir_op_umod:
2508 case nir_op_fmul:
2509 case nir_op_imul:
2510 case nir_op_imul_high:
2511 case nir_op_umul_high:
2512 case nir_op_fneg:
2513 case nir_op_ineg:
2514 case nir_op_inot:
2515 case nir_op_ior:
2516 case nir_op_pack_64_2x32_split:
2517 case nir_op_fpow:
2518 case nir_op_frcp:
2519 case nir_op_frem:
2520 case nir_op_irem:
2521 case nir_op_frsq:
2522 case nir_op_fsat:
2523 case nir_op_ishr:
2524 case nir_op_ushr:
2525 case nir_op_fsin:
2526 case nir_op_fsqrt:
2527 case nir_op_ftrunc:
2528 case nir_op_ishl:
2529 case nir_op_ixor: {
2530 DEFAULT_CHECKS;
2531 LValues &newDefs = convert(&insn->dest);
2532 operation preOp = preOperationNeeded(op);
2533 if (preOp != OP_NOP) {
2534 assert(info.num_inputs < 2);
2535 Value *tmp = getSSA(typeSizeof(dType));
2536 Instruction *i0 = mkOp(preOp, dType, tmp);
2537 Instruction *i1 = mkOp(getOperation(op), dType, newDefs[0]);
2538 if (info.num_inputs) {
2539 i0->setSrc(0, getSrc(&insn->src[0]));
2540 i1->setSrc(0, tmp);
2541 }
2542 i1->subOp = getSubOp(op);
2543 } else {
2544 Instruction *i = mkOp(getOperation(op), dType, newDefs[0]);
2545 for (unsigned s = 0u; s < info.num_inputs; ++s) {
2546 i->setSrc(s, getSrc(&insn->src[s]));
2547 }
2548 i->subOp = getSubOp(op);
2549 }
2550 break;
2551 }
2552 case nir_op_ifind_msb:
2553 case nir_op_ufind_msb: {
2554 DEFAULT_CHECKS;
2555 LValues &newDefs = convert(&insn->dest);
2556 dType = sTypes[0];
2557 mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2558 break;
2559 }
2560 case nir_op_fround_even: {
2561 DEFAULT_CHECKS;
2562 LValues &newDefs = convert(&insn->dest);
2563 mkCvt(OP_CVT, dType, newDefs[0], dType, getSrc(&insn->src[0]))->rnd = ROUND_NI;
2564 break;
2565 }
2566 // convert instructions
2567 case nir_op_f2f32:
2568 case nir_op_f2i32:
2569 case nir_op_f2u32:
2570 case nir_op_i2f32:
2571 case nir_op_i2i32:
2572 case nir_op_u2f32:
2573 case nir_op_u2u32:
2574 case nir_op_f2f64:
2575 case nir_op_f2i64:
2576 case nir_op_f2u64:
2577 case nir_op_i2f64:
2578 case nir_op_i2i64:
2579 case nir_op_u2f64:
2580 case nir_op_u2u64: {
2581 DEFAULT_CHECKS;
2582 LValues &newDefs = convert(&insn->dest);
2583 Instruction *i = mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2584 if (op == nir_op_f2i32 || op == nir_op_f2i64 || op == nir_op_f2u32 || op == nir_op_f2u64)
2585 i->rnd = ROUND_Z;
2586 i->sType = sTypes[0];
2587 break;
2588 }
2589 // compare instructions
2590 case nir_op_feq32:
2591 case nir_op_ieq32:
2592 case nir_op_fge32:
2593 case nir_op_ige32:
2594 case nir_op_uge32:
2595 case nir_op_flt32:
2596 case nir_op_ilt32:
2597 case nir_op_ult32:
2598 case nir_op_fneu32:
2599 case nir_op_ine32: {
2600 DEFAULT_CHECKS;
2601 LValues &newDefs = convert(&insn->dest);
2602 Instruction *i = mkCmp(getOperation(op),
2603 getCondCode(op),
2604 dType,
2605 newDefs[0],
2606 dType,
2607 getSrc(&insn->src[0]),
2608 getSrc(&insn->src[1]));
2609 if (info.num_inputs == 3)
2610 i->setSrc(2, getSrc(&insn->src[2]));
2611 i->sType = sTypes[0];
2612 break;
2613 }
2614 case nir_op_mov:
2615 case nir_op_vec2:
2616 case nir_op_vec3:
2617 case nir_op_vec4:
2618 case nir_op_vec8:
2619 case nir_op_vec16: {
2620 LValues &newDefs = convert(&insn->dest);
2621 for (LValues::size_type c = 0u; c < newDefs.size(); ++c) {
2622 mkMov(newDefs[c], getSrc(&insn->src[c]), dType);
2623 }
2624 break;
2625 }
2626 // (un)pack
2627 case nir_op_pack_64_2x32: {
2628 LValues &newDefs = convert(&insn->dest);
2629 Instruction *merge = mkOp(OP_MERGE, dType, newDefs[0]);
2630 merge->setSrc(0, getSrc(&insn->src[0], 0));
2631 merge->setSrc(1, getSrc(&insn->src[0], 1));
2632 break;
2633 }
2634 case nir_op_pack_half_2x16_split: {
2635 LValues &newDefs = convert(&insn->dest);
2636 Value *tmpH = getSSA();
2637 Value *tmpL = getSSA();
2638
2639 mkCvt(OP_CVT, TYPE_F16, tmpL, TYPE_F32, getSrc(&insn->src[0]));
2640 mkCvt(OP_CVT, TYPE_F16, tmpH, TYPE_F32, getSrc(&insn->src[1]));
2641 mkOp3(OP_INSBF, TYPE_U32, newDefs[0], tmpH, mkImm(0x1010), tmpL);
2642 break;
2643 }
2644 case nir_op_unpack_half_2x16_split_x:
2645 case nir_op_unpack_half_2x16_split_y: {
2646 LValues &newDefs = convert(&insn->dest);
2647 Instruction *cvt = mkCvt(OP_CVT, TYPE_F32, newDefs[0], TYPE_F16, getSrc(&insn->src[0]));
2648 if (op == nir_op_unpack_half_2x16_split_y)
2649 cvt->subOp = 1;
2650 break;
2651 }
2652 case nir_op_unpack_64_2x32: {
2653 LValues &newDefs = convert(&insn->dest);
2654 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, newDefs[1]);
2655 break;
2656 }
2657 case nir_op_unpack_64_2x32_split_x: {
2658 LValues &newDefs = convert(&insn->dest);
2659 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, getSSA());
2660 break;
2661 }
2662 case nir_op_unpack_64_2x32_split_y: {
2663 LValues &newDefs = convert(&insn->dest);
2664 mkOp1(OP_SPLIT, dType, getSSA(), getSrc(&insn->src[0]))->setDef(1, newDefs[0]);
2665 break;
2666 }
2667 // special instructions
2668 case nir_op_fsign:
2669 case nir_op_isign: {
2670 DEFAULT_CHECKS;
2671 DataType iType;
2672 if (::isFloatType(dType))
2673 iType = TYPE_F32;
2674 else
2675 iType = TYPE_S32;
2676
2677 LValues &newDefs = convert(&insn->dest);
2678 LValue *val0 = getScratch();
2679 LValue *val1 = getScratch();
2680 mkCmp(OP_SET, CC_GT, iType, val0, dType, getSrc(&insn->src[0]), zero);
2681 mkCmp(OP_SET, CC_LT, iType, val1, dType, getSrc(&insn->src[0]), zero);
2682
2683 if (dType == TYPE_F64) {
2684 mkOp2(OP_SUB, iType, val0, val0, val1);
2685 mkCvt(OP_CVT, TYPE_F64, newDefs[0], iType, val0);
2686 } else if (dType == TYPE_S64 || dType == TYPE_U64) {
2687 mkOp2(OP_SUB, iType, val0, val1, val0);
2688 mkOp2(OP_SHR, iType, val1, val0, loadImm(NULL, 31));
2689 mkOp2(OP_MERGE, dType, newDefs[0], val0, val1);
2690 } else if (::isFloatType(dType))
2691 mkOp2(OP_SUB, iType, newDefs[0], val0, val1);
2692 else
2693 mkOp2(OP_SUB, iType, newDefs[0], val1, val0);
2694 break;
2695 }
2696 case nir_op_fcsel:
2697 case nir_op_b32csel: {
2698 DEFAULT_CHECKS;
2699 LValues &newDefs = convert(&insn->dest);
2700 mkCmp(OP_SLCT, CC_NE, dType, newDefs[0], sTypes[0], getSrc(&insn->src[1]), getSrc(&insn->src[2]), getSrc(&insn->src[0]));
2701 break;
2702 }
2703 case nir_op_ibitfield_extract:
2704 case nir_op_ubitfield_extract: {
2705 DEFAULT_CHECKS;
2706 Value *tmp = getSSA();
2707 LValues &newDefs = convert(&insn->dest);
2708 mkOp3(OP_INSBF, dType, tmp, getSrc(&insn->src[2]), loadImm(NULL, 0x808), getSrc(&insn->src[1]));
2709 mkOp2(OP_EXTBF, dType, newDefs[0], getSrc(&insn->src[0]), tmp);
2710 break;
2711 }
2712 case nir_op_bfm: {
2713 DEFAULT_CHECKS;
2714 LValues &newDefs = convert(&insn->dest);
2715 mkOp2(OP_BMSK, dType, newDefs[0], getSrc(&insn->src[1]), getSrc(&insn->src[0]))->subOp = NV50_IR_SUBOP_BMSK_W;
2716 break;
2717 }
2718 case nir_op_bitfield_insert: {
2719 DEFAULT_CHECKS;
2720 LValues &newDefs = convert(&insn->dest);
2721 LValue *temp = getSSA();
2722 mkOp3(OP_INSBF, TYPE_U32, temp, getSrc(&insn->src[3]), mkImm(0x808), getSrc(&insn->src[2]));
2723 mkOp3(OP_INSBF, dType, newDefs[0], getSrc(&insn->src[1]), temp, getSrc(&insn->src[0]));
2724 break;
2725 }
2726 case nir_op_bit_count: {
2727 DEFAULT_CHECKS;
2728 LValues &newDefs = convert(&insn->dest);
2729 mkOp2(OP_POPCNT, dType, newDefs[0], getSrc(&insn->src[0]), getSrc(&insn->src[0]));
2730 break;
2731 }
2732 case nir_op_bitfield_reverse: {
2733 DEFAULT_CHECKS;
2734 LValues &newDefs = convert(&insn->dest);
2735 mkOp1(OP_BREV, TYPE_U32, newDefs[0], getSrc(&insn->src[0]));
2736 break;
2737 }
2738 case nir_op_find_lsb: {
2739 DEFAULT_CHECKS;
2740 LValues &newDefs = convert(&insn->dest);
2741 Value *tmp = getSSA();
2742 mkOp1(OP_BREV, TYPE_U32, tmp, getSrc(&insn->src[0]));
2743 mkOp1(OP_BFIND, TYPE_U32, newDefs[0], tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
2744 break;
2745 }
2746 case nir_op_extract_u8: {
2747 DEFAULT_CHECKS;
2748 LValues &newDefs = convert(&insn->dest);
2749 Value *prmt = getSSA();
2750 mkOp2(OP_OR, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x4440));
2751 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2752 break;
2753 }
2754 case nir_op_extract_i8: {
2755 DEFAULT_CHECKS;
2756 LValues &newDefs = convert(&insn->dest);
2757 Value *prmt = getSSA();
2758 mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x1111), loadImm(NULL, 0x8880));
2759 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2760 break;
2761 }
2762 case nir_op_extract_u16: {
2763 DEFAULT_CHECKS;
2764 LValues &newDefs = convert(&insn->dest);
2765 Value *prmt = getSSA();
2766 mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x22), loadImm(NULL, 0x4410));
2767 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2768 break;
2769 }
2770 case nir_op_extract_i16: {
2771 DEFAULT_CHECKS;
2772 LValues &newDefs = convert(&insn->dest);
2773 Value *prmt = getSSA();
2774 mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x2222), loadImm(NULL, 0x9910));
2775 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2776 break;
2777 }
2778 case nir_op_urol: {
2779 DEFAULT_CHECKS;
2780 LValues &newDefs = convert(&insn->dest);
2781 mkOp3(OP_SHF, TYPE_U32, newDefs[0], getSrc(&insn->src[0]),
2782 getSrc(&insn->src[1]), getSrc(&insn->src[0]))
2783 ->subOp = NV50_IR_SUBOP_SHF_L |
2784 NV50_IR_SUBOP_SHF_W |
2785 NV50_IR_SUBOP_SHF_HI;
2786 break;
2787 }
2788 case nir_op_uror: {
2789 DEFAULT_CHECKS;
2790 LValues &newDefs = convert(&insn->dest);
2791 mkOp3(OP_SHF, TYPE_U32, newDefs[0], getSrc(&insn->src[0]),
2792 getSrc(&insn->src[1]), getSrc(&insn->src[0]))
2793 ->subOp = NV50_IR_SUBOP_SHF_R |
2794 NV50_IR_SUBOP_SHF_W |
2795 NV50_IR_SUBOP_SHF_LO;
2796 break;
2797 }
2798 // boolean conversions
2799 case nir_op_b2f32: {
2800 DEFAULT_CHECKS;
2801 LValues &newDefs = convert(&insn->dest);
2802 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1.0f));
2803 break;
2804 }
2805 case nir_op_b2f64: {
2806 DEFAULT_CHECKS;
2807 LValues &newDefs = convert(&insn->dest);
2808 Value *tmp = getSSA(4);
2809 mkOp2(OP_AND, TYPE_U32, tmp, getSrc(&insn->src[0]), loadImm(NULL, 0x3ff00000));
2810 mkOp2(OP_MERGE, TYPE_U64, newDefs[0], loadImm(NULL, 0), tmp);
2811 break;
2812 }
2813 case nir_op_f2b32:
2814 case nir_op_i2b32: {
2815 DEFAULT_CHECKS;
2816 LValues &newDefs = convert(&insn->dest);
2817 Value *src1;
2818 if (typeSizeof(sTypes[0]) == 8) {
2819 src1 = loadImm(getSSA(8), 0.0);
2820 } else {
2821 src1 = zero;
2822 }
2823 CondCode cc = op == nir_op_f2b32 ? CC_NEU : CC_NE;
2824 mkCmp(OP_SET, cc, TYPE_U32, newDefs[0], sTypes[0], getSrc(&insn->src[0]), src1);
2825 break;
2826 }
2827 case nir_op_b2i32: {
2828 DEFAULT_CHECKS;
2829 LValues &newDefs = convert(&insn->dest);
2830 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1));
2831 break;
2832 }
2833 case nir_op_b2i64: {
2834 DEFAULT_CHECKS;
2835 LValues &newDefs = convert(&insn->dest);
2836 LValue *def = getScratch();
2837 mkOp2(OP_AND, TYPE_U32, def, getSrc(&insn->src[0]), loadImm(NULL, 1));
2838 mkOp2(OP_MERGE, TYPE_S64, newDefs[0], def, loadImm(NULL, 0));
2839 break;
2840 }
2841 default:
2842 ERROR("unknown nir_op %s\n", info.name);
2843 assert(false);
2844 return false;
2845 }
2846
2847 if (!oldPos) {
2848 oldPos = this->bb->getEntry();
2849 oldPos->precise = insn->exact;
2850 }
2851
2852 if (unlikely(!oldPos))
2853 return true;
2854
2855 while (oldPos->next) {
2856 oldPos = oldPos->next;
2857 oldPos->precise = insn->exact;
2858 }
2859 oldPos->saturate = insn->dest.saturate;
2860
2861 return true;
2862 }
2863 #undef DEFAULT_CHECKS
2864
2865 bool
visit(nir_ssa_undef_instr * insn)2866 Converter::visit(nir_ssa_undef_instr *insn)
2867 {
2868 LValues &newDefs = convert(&insn->def);
2869 for (uint8_t i = 0u; i < insn->def.num_components; ++i) {
2870 mkOp(OP_NOP, TYPE_NONE, newDefs[i]);
2871 }
2872 return true;
2873 }
2874
2875 #define CASE_SAMPLER(ty) \
2876 case GLSL_SAMPLER_DIM_ ## ty : \
2877 if (isArray && !isShadow) \
2878 return TEX_TARGET_ ## ty ## _ARRAY; \
2879 else if (!isArray && isShadow) \
2880 return TEX_TARGET_## ty ## _SHADOW; \
2881 else if (isArray && isShadow) \
2882 return TEX_TARGET_## ty ## _ARRAY_SHADOW; \
2883 else \
2884 return TEX_TARGET_ ## ty
2885
2886 TexTarget
convert(glsl_sampler_dim dim,bool isArray,bool isShadow)2887 Converter::convert(glsl_sampler_dim dim, bool isArray, bool isShadow)
2888 {
2889 switch (dim) {
2890 CASE_SAMPLER(1D);
2891 CASE_SAMPLER(2D);
2892 CASE_SAMPLER(CUBE);
2893 case GLSL_SAMPLER_DIM_3D:
2894 return TEX_TARGET_3D;
2895 case GLSL_SAMPLER_DIM_MS:
2896 if (isArray)
2897 return TEX_TARGET_2D_MS_ARRAY;
2898 return TEX_TARGET_2D_MS;
2899 case GLSL_SAMPLER_DIM_RECT:
2900 if (isShadow)
2901 return TEX_TARGET_RECT_SHADOW;
2902 return TEX_TARGET_RECT;
2903 case GLSL_SAMPLER_DIM_BUF:
2904 return TEX_TARGET_BUFFER;
2905 case GLSL_SAMPLER_DIM_EXTERNAL:
2906 return TEX_TARGET_2D;
2907 default:
2908 ERROR("unknown glsl_sampler_dim %u\n", dim);
2909 assert(false);
2910 return TEX_TARGET_COUNT;
2911 }
2912 }
2913 #undef CASE_SAMPLER
2914
2915 unsigned int
getNIRArgCount(TexInstruction::Target & target)2916 Converter::getNIRArgCount(TexInstruction::Target& target)
2917 {
2918 unsigned int result = target.getArgCount();
2919 if (target.isCube() && target.isArray())
2920 result--;
2921 if (target.isMS())
2922 result--;
2923 return result;
2924 }
2925
2926 CacheMode
convert(enum gl_access_qualifier access)2927 Converter::convert(enum gl_access_qualifier access)
2928 {
2929 if (access & ACCESS_VOLATILE)
2930 return CACHE_CV;
2931 if (access & ACCESS_COHERENT)
2932 return CACHE_CG;
2933 return CACHE_CA;
2934 }
2935
2936 bool
visit(nir_tex_instr * insn)2937 Converter::visit(nir_tex_instr *insn)
2938 {
2939 switch (insn->op) {
2940 case nir_texop_lod:
2941 case nir_texop_query_levels:
2942 case nir_texop_tex:
2943 case nir_texop_texture_samples:
2944 case nir_texop_tg4:
2945 case nir_texop_txb:
2946 case nir_texop_txd:
2947 case nir_texop_txf:
2948 case nir_texop_txf_ms:
2949 case nir_texop_txl:
2950 case nir_texop_txs: {
2951 LValues &newDefs = convert(&insn->dest);
2952 std::vector<Value*> srcs;
2953 std::vector<Value*> defs;
2954 std::vector<nir_src*> offsets;
2955 uint8_t mask = 0;
2956 bool lz = false;
2957 TexInstruction::Target target = convert(insn->sampler_dim, insn->is_array, insn->is_shadow);
2958 operation op = getOperation(insn->op);
2959
2960 int r, s;
2961 int biasIdx = nir_tex_instr_src_index(insn, nir_tex_src_bias);
2962 int compIdx = nir_tex_instr_src_index(insn, nir_tex_src_comparator);
2963 int coordsIdx = nir_tex_instr_src_index(insn, nir_tex_src_coord);
2964 int ddxIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddx);
2965 int ddyIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddy);
2966 int msIdx = nir_tex_instr_src_index(insn, nir_tex_src_ms_index);
2967 int lodIdx = nir_tex_instr_src_index(insn, nir_tex_src_lod);
2968 int offsetIdx = nir_tex_instr_src_index(insn, nir_tex_src_offset);
2969 int sampOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_offset);
2970 int texOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_offset);
2971 int sampHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_handle);
2972 int texHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_handle);
2973
2974 bool bindless = sampHandleIdx != -1 || texHandleIdx != -1;
2975 assert((sampHandleIdx != -1) == (texHandleIdx != -1));
2976
2977 srcs.resize(insn->coord_components);
2978 for (uint8_t i = 0u; i < insn->coord_components; ++i)
2979 srcs[i] = getSrc(&insn->src[coordsIdx].src, i);
2980
2981 // sometimes we get less args than target.getArgCount, but codegen expects the latter
2982 if (insn->coord_components) {
2983 uint32_t argCount = target.getArgCount();
2984
2985 if (target.isMS())
2986 argCount -= 1;
2987
2988 for (uint32_t i = 0u; i < (argCount - insn->coord_components); ++i)
2989 srcs.push_back(getSSA());
2990 }
2991
2992 if (insn->op == nir_texop_texture_samples)
2993 srcs.push_back(zero);
2994 else if (!insn->num_srcs)
2995 srcs.push_back(loadImm(NULL, 0));
2996 if (biasIdx != -1)
2997 srcs.push_back(getSrc(&insn->src[biasIdx].src, 0));
2998 if (lodIdx != -1)
2999 srcs.push_back(getSrc(&insn->src[lodIdx].src, 0));
3000 else if (op == OP_TXF)
3001 lz = true;
3002 if (msIdx != -1)
3003 srcs.push_back(getSrc(&insn->src[msIdx].src, 0));
3004 if (offsetIdx != -1)
3005 offsets.push_back(&insn->src[offsetIdx].src);
3006 if (compIdx != -1)
3007 srcs.push_back(getSrc(&insn->src[compIdx].src, 0));
3008 if (texOffIdx != -1) {
3009 srcs.push_back(getSrc(&insn->src[texOffIdx].src, 0));
3010 texOffIdx = srcs.size() - 1;
3011 }
3012 if (sampOffIdx != -1) {
3013 srcs.push_back(getSrc(&insn->src[sampOffIdx].src, 0));
3014 sampOffIdx = srcs.size() - 1;
3015 }
3016 if (bindless) {
3017 // currently we use the lower bits
3018 Value *split[2];
3019 Value *handle = getSrc(&insn->src[sampHandleIdx].src, 0);
3020
3021 mkSplit(split, 4, handle);
3022
3023 srcs.push_back(split[0]);
3024 texOffIdx = srcs.size() - 1;
3025 }
3026
3027 r = bindless ? 0xff : insn->texture_index;
3028 s = bindless ? 0x1f : insn->sampler_index;
3029
3030 defs.resize(newDefs.size());
3031 for (uint8_t d = 0u; d < newDefs.size(); ++d) {
3032 defs[d] = newDefs[d];
3033 mask |= 1 << d;
3034 }
3035 if (target.isMS() || (op == OP_TEX && prog->getType() != Program::TYPE_FRAGMENT))
3036 lz = true;
3037
3038 TexInstruction *texi = mkTex(op, target.getEnum(), r, s, defs, srcs);
3039 texi->tex.levelZero = lz;
3040 texi->tex.mask = mask;
3041 texi->tex.bindless = bindless;
3042
3043 if (texOffIdx != -1)
3044 texi->tex.rIndirectSrc = texOffIdx;
3045 if (sampOffIdx != -1)
3046 texi->tex.sIndirectSrc = sampOffIdx;
3047
3048 switch (insn->op) {
3049 case nir_texop_tg4:
3050 if (!target.isShadow())
3051 texi->tex.gatherComp = insn->component;
3052 break;
3053 case nir_texop_txs:
3054 texi->tex.query = TXQ_DIMS;
3055 break;
3056 case nir_texop_texture_samples:
3057 texi->tex.mask = 0x4;
3058 texi->tex.query = TXQ_TYPE;
3059 break;
3060 case nir_texop_query_levels:
3061 texi->tex.mask = 0x8;
3062 texi->tex.query = TXQ_DIMS;
3063 break;
3064 default:
3065 break;
3066 }
3067
3068 texi->tex.useOffsets = offsets.size();
3069 if (texi->tex.useOffsets) {
3070 for (uint8_t s = 0; s < texi->tex.useOffsets; ++s) {
3071 for (uint32_t c = 0u; c < 3; ++c) {
3072 uint8_t s2 = std::min(c, target.getDim() - 1);
3073 texi->offset[s][c].set(getSrc(offsets[s], s2));
3074 texi->offset[s][c].setInsn(texi);
3075 }
3076 }
3077 }
3078
3079 if (op == OP_TXG && offsetIdx == -1) {
3080 if (nir_tex_instr_has_explicit_tg4_offsets(insn)) {
3081 texi->tex.useOffsets = 4;
3082 setPosition(texi, false);
3083 for (uint8_t i = 0; i < 4; ++i) {
3084 for (uint8_t j = 0; j < 2; ++j) {
3085 texi->offset[i][j].set(loadImm(NULL, insn->tg4_offsets[i][j]));
3086 texi->offset[i][j].setInsn(texi);
3087 }
3088 }
3089 setPosition(texi, true);
3090 }
3091 }
3092
3093 if (ddxIdx != -1 && ddyIdx != -1) {
3094 for (uint8_t c = 0u; c < target.getDim() + target.isCube(); ++c) {
3095 texi->dPdx[c].set(getSrc(&insn->src[ddxIdx].src, c));
3096 texi->dPdy[c].set(getSrc(&insn->src[ddyIdx].src, c));
3097 }
3098 }
3099
3100 break;
3101 }
3102 default:
3103 ERROR("unknown nir_texop %u\n", insn->op);
3104 return false;
3105 }
3106 return true;
3107 }
3108
3109 bool
run()3110 Converter::run()
3111 {
3112 bool progress;
3113
3114 if (prog->dbgFlags & NV50_IR_DEBUG_VERBOSE)
3115 nir_print_shader(nir, stderr);
3116
3117 struct nir_lower_subgroups_options subgroup_options = {};
3118 subgroup_options.subgroup_size = 32;
3119 subgroup_options.ballot_bit_size = 32;
3120 subgroup_options.ballot_components = 1;
3121 subgroup_options.lower_elect = true;
3122
3123 /* prepare for IO lowering */
3124 NIR_PASS_V(nir, nir_opt_deref);
3125 NIR_PASS_V(nir, nir_lower_regs_to_ssa);
3126 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
3127
3128 /* codegen assumes vec4 alignment for memory */
3129 NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_function_temp, function_temp_type_info);
3130 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_function_temp, nir_address_format_32bit_offset);
3131 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
3132
3133 NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
3134 type_size, (nir_lower_io_options)0);
3135
3136 NIR_PASS_V(nir, nir_lower_subgroups, &subgroup_options);
3137
3138 struct nir_lower_tex_options tex_options = {};
3139 tex_options.lower_txp = ~0;
3140
3141 NIR_PASS_V(nir, nir_lower_tex, &tex_options);
3142
3143 NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
3144 NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
3145 NIR_PASS_V(nir, nir_lower_phis_to_scalar, false);
3146
3147 /*TODO: improve this lowering/optimisation loop so that we can use
3148 * nir_opt_idiv_const effectively before this.
3149 */
3150 nir_lower_idiv_options idiv_options = {
3151 .imprecise_32bit_lowering = false,
3152 .allow_fp16 = true,
3153 };
3154 NIR_PASS(progress, nir, nir_lower_idiv, &idiv_options);
3155
3156 do {
3157 progress = false;
3158 NIR_PASS(progress, nir, nir_copy_prop);
3159 NIR_PASS(progress, nir, nir_opt_remove_phis);
3160 NIR_PASS(progress, nir, nir_opt_trivial_continues);
3161 NIR_PASS(progress, nir, nir_opt_cse);
3162 NIR_PASS(progress, nir, nir_opt_algebraic);
3163 NIR_PASS(progress, nir, nir_opt_constant_folding);
3164 NIR_PASS(progress, nir, nir_copy_prop);
3165 NIR_PASS(progress, nir, nir_opt_dce);
3166 NIR_PASS(progress, nir, nir_opt_dead_cf);
3167 } while (progress);
3168
3169 NIR_PASS_V(nir, nir_lower_bool_to_int32);
3170 NIR_PASS_V(nir, nir_convert_from_ssa, true);
3171
3172 // Garbage collect dead instructions
3173 nir_sweep(nir);
3174
3175 if (!parseNIR()) {
3176 ERROR("Couldn't prase NIR!\n");
3177 return false;
3178 }
3179
3180 if (!assignSlots()) {
3181 ERROR("Couldn't assign slots!\n");
3182 return false;
3183 }
3184
3185 if (prog->dbgFlags & NV50_IR_DEBUG_BASIC)
3186 nir_print_shader(nir, stderr);
3187
3188 nir_foreach_function(function, nir) {
3189 if (!visit(function))
3190 return false;
3191 }
3192
3193 return true;
3194 }
3195
3196 } // unnamed namespace
3197
3198 namespace nv50_ir {
3199
3200 bool
makeFromNIR(struct nv50_ir_prog_info * info,struct nv50_ir_prog_info_out * info_out)3201 Program::makeFromNIR(struct nv50_ir_prog_info *info,
3202 struct nv50_ir_prog_info_out *info_out)
3203 {
3204 nir_shader *nir = (nir_shader*)info->bin.source;
3205 Converter converter(this, nir, info, info_out);
3206 bool result = converter.run();
3207 if (!result)
3208 return result;
3209 LoweringHelper lowering;
3210 lowering.run(this);
3211 tlsSize = info_out->bin.tlsSpace;
3212 return result;
3213 }
3214
3215 } // namespace nv50_ir
3216
3217 static nir_shader_compiler_options
nvir_nir_shader_compiler_options(int chipset)3218 nvir_nir_shader_compiler_options(int chipset)
3219 {
3220 nir_shader_compiler_options op = {};
3221 op.lower_fdiv = (chipset >= NVISA_GV100_CHIPSET);
3222 op.lower_ffma16 = false;
3223 op.lower_ffma32 = false;
3224 op.lower_ffma64 = false;
3225 op.fuse_ffma16 = false; /* nir doesn't track mad vs fma */
3226 op.fuse_ffma32 = false; /* nir doesn't track mad vs fma */
3227 op.fuse_ffma64 = false; /* nir doesn't track mad vs fma */
3228 op.lower_flrp16 = (chipset >= NVISA_GV100_CHIPSET);
3229 op.lower_flrp32 = true;
3230 op.lower_flrp64 = true;
3231 op.lower_fpow = false; // TODO: nir's lowering is broken, or we could use it
3232 op.lower_fsat = false;
3233 op.lower_fsqrt = false; // TODO: only before gm200
3234 op.lower_sincos = false;
3235 op.lower_fmod = true;
3236 op.lower_bitfield_extract = false;
3237 op.lower_bitfield_extract_to_shifts = (chipset >= NVISA_GV100_CHIPSET);
3238 op.lower_bitfield_insert = false;
3239 op.lower_bitfield_insert_to_shifts = (chipset >= NVISA_GV100_CHIPSET);
3240 op.lower_bitfield_insert_to_bitfield_select = false;
3241 op.lower_bitfield_reverse = false;
3242 op.lower_bit_count = false;
3243 op.lower_ifind_msb = false;
3244 op.lower_find_lsb = false;
3245 op.lower_uadd_carry = true; // TODO
3246 op.lower_usub_borrow = true; // TODO
3247 op.lower_mul_high = false;
3248 op.lower_fneg = false;
3249 op.lower_ineg = false;
3250 op.lower_scmp = true; // TODO: not implemented yet
3251 op.lower_vector_cmp = false;
3252 op.lower_bitops = false;
3253 op.lower_isign = (chipset >= NVISA_GV100_CHIPSET);
3254 op.lower_fsign = (chipset >= NVISA_GV100_CHIPSET);
3255 op.lower_fdph = false;
3256 op.lower_fdot = false;
3257 op.fdot_replicates = false; // TODO
3258 op.lower_ffloor = false; // TODO
3259 op.lower_ffract = true;
3260 op.lower_fceil = false; // TODO
3261 op.lower_ftrunc = false;
3262 op.lower_ldexp = true;
3263 op.lower_pack_half_2x16 = true;
3264 op.lower_pack_unorm_2x16 = true;
3265 op.lower_pack_snorm_2x16 = true;
3266 op.lower_pack_unorm_4x8 = true;
3267 op.lower_pack_snorm_4x8 = true;
3268 op.lower_unpack_half_2x16 = true;
3269 op.lower_unpack_unorm_2x16 = true;
3270 op.lower_unpack_snorm_2x16 = true;
3271 op.lower_unpack_unorm_4x8 = true;
3272 op.lower_unpack_snorm_4x8 = true;
3273 op.lower_pack_split = false;
3274 op.lower_extract_byte = (chipset < NVISA_GM107_CHIPSET);
3275 op.lower_extract_word = (chipset < NVISA_GM107_CHIPSET);
3276 op.lower_insert_byte = true;
3277 op.lower_insert_word = true;
3278 op.lower_all_io_to_temps = false;
3279 op.lower_all_io_to_elements = false;
3280 op.vertex_id_zero_based = false;
3281 op.lower_base_vertex = false;
3282 op.lower_helper_invocation = false;
3283 op.optimize_sample_mask_in = false;
3284 op.lower_cs_local_index_from_id = true;
3285 op.lower_cs_local_id_from_index = false;
3286 op.lower_device_index_to_zero = false; // TODO
3287 op.lower_wpos_pntc = false; // TODO
3288 op.lower_hadd = true; // TODO
3289 op.lower_uadd_sat = true; // TODO
3290 op.lower_iadd_sat = true; // TODO
3291 op.vectorize_io = false;
3292 op.lower_to_scalar = false;
3293 op.unify_interfaces = false;
3294 op.use_interpolated_input_intrinsics = true;
3295 op.lower_mul_2x32_64 = true; // TODO
3296 op.lower_rotate = (chipset < NVISA_GV100_CHIPSET);
3297 op.has_imul24 = false;
3298 op.intel_vec4 = false;
3299 op.max_unroll_iterations = 32;
3300 op.lower_int64_options = (nir_lower_int64_options) (
3301 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul64 : 0) |
3302 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_isign64 : 0) |
3303 nir_lower_divmod64 |
3304 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul_high64 : 0) |
3305 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_mov64 : 0) |
3306 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_icmp64 : 0) |
3307 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_iabs64 : 0) |
3308 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_ineg64 : 0) |
3309 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_logic64 : 0) |
3310 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_minmax64 : 0) |
3311 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_shift64 : 0) |
3312 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul_2x32_64 : 0) |
3313 ((chipset >= NVISA_GM107_CHIPSET) ? nir_lower_extract64 : 0) |
3314 nir_lower_ufind_msb64
3315 );
3316 op.lower_doubles_options = (nir_lower_doubles_options) (
3317 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_drcp : 0) |
3318 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dsqrt : 0) |
3319 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_drsq : 0) |
3320 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dfract : 0) |
3321 nir_lower_dmod |
3322 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dsub : 0) |
3323 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_ddiv : 0)
3324 );
3325 return op;
3326 }
3327
3328 static const nir_shader_compiler_options gf100_nir_shader_compiler_options =
3329 nvir_nir_shader_compiler_options(NVISA_GF100_CHIPSET);
3330 static const nir_shader_compiler_options gm107_nir_shader_compiler_options =
3331 nvir_nir_shader_compiler_options(NVISA_GM107_CHIPSET);
3332 static const nir_shader_compiler_options gv100_nir_shader_compiler_options =
3333 nvir_nir_shader_compiler_options(NVISA_GV100_CHIPSET);
3334
3335 const nir_shader_compiler_options *
nv50_ir_nir_shader_compiler_options(int chipset)3336 nv50_ir_nir_shader_compiler_options(int chipset)
3337 {
3338 if (chipset >= NVISA_GV100_CHIPSET)
3339 return &gv100_nir_shader_compiler_options;
3340 if (chipset >= NVISA_GM107_CHIPSET)
3341 return &gm107_nir_shader_compiler_options;
3342 return &gf100_nir_shader_compiler_options;
3343 }
3344