1 /*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Karol Herbst <kherbst@redhat.com>
23 */
24
25 #include "compiler/nir/nir.h"
26
27 #include "util/u_debug.h"
28
29 #include "codegen/nv50_ir.h"
30 #include "codegen/nv50_ir_from_common.h"
31 #include "codegen/nv50_ir_lowering_helper.h"
32 #include "codegen/nv50_ir_util.h"
33 #include "tgsi/tgsi_from_mesa.h"
34
35 #if __cplusplus >= 201103L
36 #include <unordered_map>
37 #else
38 #include <tr1/unordered_map>
39 #endif
40 #include <cstring>
41 #include <list>
42 #include <vector>
43
44 namespace {
45
46 #if __cplusplus >= 201103L
47 using std::hash;
48 using std::unordered_map;
49 #else
50 using std::tr1::hash;
51 using std::tr1::unordered_map;
52 #endif
53
54 using namespace nv50_ir;
55
56 int
type_size(const struct glsl_type * type,bool bindless)57 type_size(const struct glsl_type *type, bool bindless)
58 {
59 return glsl_count_attribute_slots(type, false);
60 }
61
62 static void
function_temp_type_info(const struct glsl_type * type,unsigned * size,unsigned * align)63 function_temp_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
64 {
65 assert(glsl_type_is_vector_or_scalar(type));
66
67 unsigned comp_size = glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
68 unsigned length = glsl_get_vector_elements(type);
69
70 *size = comp_size * length;
71 *align = 0x10;
72 }
73
74 class Converter : public ConverterCommon
75 {
76 public:
77 Converter(Program *, nir_shader *, nv50_ir_prog_info *, nv50_ir_prog_info_out *);
78
79 bool run();
80 private:
81 typedef std::vector<LValue*> LValues;
82 typedef unordered_map<unsigned, LValues> NirDefMap;
83 typedef unordered_map<unsigned, nir_load_const_instr*> ImmediateMap;
84 typedef unordered_map<unsigned, BasicBlock*> NirBlockMap;
85
86 CacheMode convert(enum gl_access_qualifier);
87 TexTarget convert(glsl_sampler_dim, bool isArray, bool isShadow);
88 LValues& convert(nir_alu_dest *);
89 BasicBlock* convert(nir_block *);
90 LValues& convert(nir_dest *);
91 SVSemantic convert(nir_intrinsic_op);
92 Value* convert(nir_load_const_instr*, uint8_t);
93 LValues& convert(nir_register *);
94 LValues& convert(nir_ssa_def *);
95
96 Value* getSrc(nir_alu_src *, uint8_t component = 0);
97 Value* getSrc(nir_register *, uint8_t);
98 Value* getSrc(nir_src *, uint8_t, bool indirect = false);
99 Value* getSrc(nir_ssa_def *, uint8_t);
100
101 // returned value is the constant part of the given source (either the
102 // nir_src or the selected source component of an intrinsic). Even though
103 // this is mostly an optimization to be able to skip indirects in a few
104 // cases, sometimes we require immediate values or set some fileds on
105 // instructions (e.g. tex) in order for codegen to consume those.
106 // If the found value has not a constant part, the Value gets returned
107 // through the Value parameter.
108 uint32_t getIndirect(nir_src *, uint8_t, Value *&);
109 // isScalar indicates that the addressing is scalar, vec4 addressing is
110 // assumed otherwise
111 uint32_t getIndirect(nir_intrinsic_instr *, uint8_t s, uint8_t c, Value *&,
112 bool isScalar = false);
113
114 uint32_t getSlotAddress(nir_intrinsic_instr *, uint8_t idx, uint8_t slot);
115
116 void setInterpolate(nv50_ir_varying *,
117 uint8_t,
118 bool centroid,
119 unsigned semantics);
120
121 Instruction *loadFrom(DataFile, uint8_t, DataType, Value *def, uint32_t base,
122 uint8_t c, Value *indirect0 = NULL,
123 Value *indirect1 = NULL, bool patch = false);
124 void storeTo(nir_intrinsic_instr *, DataFile, operation, DataType,
125 Value *src, uint8_t idx, uint8_t c, Value *indirect0 = NULL,
126 Value *indirect1 = NULL);
127
128 bool isFloatType(nir_alu_type);
129 bool isSignedType(nir_alu_type);
130 bool isResultFloat(nir_op);
131 bool isResultSigned(nir_op);
132
133 DataType getDType(nir_alu_instr *);
134 DataType getDType(nir_intrinsic_instr *);
135 DataType getDType(nir_intrinsic_instr *, bool isSigned);
136 DataType getDType(nir_op, uint8_t);
137
138 DataFile getFile(nir_intrinsic_op);
139
140 std::vector<DataType> getSTypes(nir_alu_instr *);
141 DataType getSType(nir_src &, bool isFloat, bool isSigned);
142
143 operation getOperation(nir_intrinsic_op);
144 operation getOperation(nir_op);
145 operation getOperation(nir_texop);
146 operation preOperationNeeded(nir_op);
147
148 int getSubOp(nir_intrinsic_op);
149 int getSubOp(nir_op);
150
151 CondCode getCondCode(nir_op);
152
153 bool assignSlots();
154 bool parseNIR();
155
156 bool visit(nir_alu_instr *);
157 bool visit(nir_block *);
158 bool visit(nir_cf_node *);
159 bool visit(nir_function *);
160 bool visit(nir_if *);
161 bool visit(nir_instr *);
162 bool visit(nir_intrinsic_instr *);
163 bool visit(nir_jump_instr *);
164 bool visit(nir_load_const_instr*);
165 bool visit(nir_loop *);
166 bool visit(nir_ssa_undef_instr *);
167 bool visit(nir_tex_instr *);
168
169 // tex stuff
170 Value* applyProjection(Value *src, Value *proj);
171 unsigned int getNIRArgCount(TexInstruction::Target&);
172
173 nir_shader *nir;
174
175 NirDefMap ssaDefs;
176 NirDefMap regDefs;
177 ImmediateMap immediates;
178 NirBlockMap blocks;
179 unsigned int curLoopDepth;
180 unsigned int curIfDepth;
181
182 BasicBlock *exit;
183 Value *zero;
184 Instruction *immInsertPos;
185
186 int clipVertexOutput;
187
188 union {
189 struct {
190 Value *position;
191 } fp;
192 };
193 };
194
Converter(Program * prog,nir_shader * nir,nv50_ir_prog_info * info,nv50_ir_prog_info_out * info_out)195 Converter::Converter(Program *prog, nir_shader *nir, nv50_ir_prog_info *info,
196 nv50_ir_prog_info_out *info_out)
197 : ConverterCommon(prog, info, info_out),
198 nir(nir),
199 curLoopDepth(0),
200 curIfDepth(0),
201 exit(NULL),
202 immInsertPos(NULL),
203 clipVertexOutput(-1)
204 {
205 zero = mkImm((uint32_t)0);
206 }
207
208 BasicBlock *
convert(nir_block * block)209 Converter::convert(nir_block *block)
210 {
211 NirBlockMap::iterator it = blocks.find(block->index);
212 if (it != blocks.end())
213 return it->second;
214
215 BasicBlock *bb = new BasicBlock(func);
216 blocks[block->index] = bb;
217 return bb;
218 }
219
220 bool
isFloatType(nir_alu_type type)221 Converter::isFloatType(nir_alu_type type)
222 {
223 return nir_alu_type_get_base_type(type) == nir_type_float;
224 }
225
226 bool
isSignedType(nir_alu_type type)227 Converter::isSignedType(nir_alu_type type)
228 {
229 return nir_alu_type_get_base_type(type) == nir_type_int;
230 }
231
232 bool
isResultFloat(nir_op op)233 Converter::isResultFloat(nir_op op)
234 {
235 const nir_op_info &info = nir_op_infos[op];
236 if (info.output_type != nir_type_invalid)
237 return isFloatType(info.output_type);
238
239 ERROR("isResultFloat not implemented for %s\n", nir_op_infos[op].name);
240 assert(false);
241 return true;
242 }
243
244 bool
isResultSigned(nir_op op)245 Converter::isResultSigned(nir_op op)
246 {
247 switch (op) {
248 // there is no umul and we get wrong results if we treat all muls as signed
249 case nir_op_imul:
250 case nir_op_inot:
251 return false;
252 default:
253 const nir_op_info &info = nir_op_infos[op];
254 if (info.output_type != nir_type_invalid)
255 return isSignedType(info.output_type);
256 ERROR("isResultSigned not implemented for %s\n", nir_op_infos[op].name);
257 assert(false);
258 return true;
259 }
260 }
261
262 DataType
getDType(nir_alu_instr * insn)263 Converter::getDType(nir_alu_instr *insn)
264 {
265 if (insn->dest.dest.is_ssa)
266 return getDType(insn->op, insn->dest.dest.ssa.bit_size);
267 else
268 return getDType(insn->op, insn->dest.dest.reg.reg->bit_size);
269 }
270
271 DataType
getDType(nir_intrinsic_instr * insn)272 Converter::getDType(nir_intrinsic_instr *insn)
273 {
274 bool isSigned;
275 switch (insn->intrinsic) {
276 case nir_intrinsic_shared_atomic_imax:
277 case nir_intrinsic_shared_atomic_imin:
278 case nir_intrinsic_ssbo_atomic_imax:
279 case nir_intrinsic_ssbo_atomic_imin:
280 isSigned = true;
281 break;
282 default:
283 isSigned = false;
284 break;
285 }
286
287 return getDType(insn, isSigned);
288 }
289
290 DataType
getDType(nir_intrinsic_instr * insn,bool isSigned)291 Converter::getDType(nir_intrinsic_instr *insn, bool isSigned)
292 {
293 if (insn->dest.is_ssa)
294 return typeOfSize(insn->dest.ssa.bit_size / 8, false, isSigned);
295 else
296 return typeOfSize(insn->dest.reg.reg->bit_size / 8, false, isSigned);
297 }
298
299 DataType
getDType(nir_op op,uint8_t bitSize)300 Converter::getDType(nir_op op, uint8_t bitSize)
301 {
302 DataType ty = typeOfSize(bitSize / 8, isResultFloat(op), isResultSigned(op));
303 if (ty == TYPE_NONE) {
304 ERROR("couldn't get Type for op %s with bitSize %u\n", nir_op_infos[op].name, bitSize);
305 assert(false);
306 }
307 return ty;
308 }
309
310 std::vector<DataType>
getSTypes(nir_alu_instr * insn)311 Converter::getSTypes(nir_alu_instr *insn)
312 {
313 const nir_op_info &info = nir_op_infos[insn->op];
314 std::vector<DataType> res(info.num_inputs);
315
316 for (uint8_t i = 0; i < info.num_inputs; ++i) {
317 if (info.input_types[i] != nir_type_invalid) {
318 res[i] = getSType(insn->src[i].src, isFloatType(info.input_types[i]), isSignedType(info.input_types[i]));
319 } else {
320 ERROR("getSType not implemented for %s idx %u\n", info.name, i);
321 assert(false);
322 res[i] = TYPE_NONE;
323 break;
324 }
325 }
326
327 return res;
328 }
329
330 DataType
getSType(nir_src & src,bool isFloat,bool isSigned)331 Converter::getSType(nir_src &src, bool isFloat, bool isSigned)
332 {
333 uint8_t bitSize;
334 if (src.is_ssa)
335 bitSize = src.ssa->bit_size;
336 else
337 bitSize = src.reg.reg->bit_size;
338
339 DataType ty = typeOfSize(bitSize / 8, isFloat, isSigned);
340 if (ty == TYPE_NONE) {
341 const char *str;
342 if (isFloat)
343 str = "float";
344 else if (isSigned)
345 str = "int";
346 else
347 str = "uint";
348 ERROR("couldn't get Type for %s with bitSize %u\n", str, bitSize);
349 assert(false);
350 }
351 return ty;
352 }
353
354 DataFile
getFile(nir_intrinsic_op op)355 Converter::getFile(nir_intrinsic_op op)
356 {
357 switch (op) {
358 case nir_intrinsic_load_global:
359 case nir_intrinsic_store_global:
360 case nir_intrinsic_load_global_constant:
361 return FILE_MEMORY_GLOBAL;
362 case nir_intrinsic_load_scratch:
363 case nir_intrinsic_store_scratch:
364 return FILE_MEMORY_LOCAL;
365 case nir_intrinsic_load_shared:
366 case nir_intrinsic_store_shared:
367 return FILE_MEMORY_SHARED;
368 case nir_intrinsic_load_kernel_input:
369 return FILE_SHADER_INPUT;
370 default:
371 ERROR("couldn't get DateFile for op %s\n", nir_intrinsic_infos[op].name);
372 assert(false);
373 }
374 return FILE_NULL;
375 }
376
377 operation
getOperation(nir_op op)378 Converter::getOperation(nir_op op)
379 {
380 switch (op) {
381 // basic ops with float and int variants
382 case nir_op_fabs:
383 case nir_op_iabs:
384 return OP_ABS;
385 case nir_op_fadd:
386 case nir_op_iadd:
387 return OP_ADD;
388 case nir_op_iand:
389 return OP_AND;
390 case nir_op_ifind_msb:
391 case nir_op_ufind_msb:
392 return OP_BFIND;
393 case nir_op_fceil:
394 return OP_CEIL;
395 case nir_op_fcos:
396 return OP_COS;
397 case nir_op_f2f32:
398 case nir_op_f2f64:
399 case nir_op_f2i32:
400 case nir_op_f2i64:
401 case nir_op_f2u32:
402 case nir_op_f2u64:
403 case nir_op_i2f32:
404 case nir_op_i2f64:
405 case nir_op_i2i32:
406 case nir_op_i2i64:
407 case nir_op_u2f32:
408 case nir_op_u2f64:
409 case nir_op_u2u32:
410 case nir_op_u2u64:
411 return OP_CVT;
412 case nir_op_fddx:
413 case nir_op_fddx_coarse:
414 case nir_op_fddx_fine:
415 return OP_DFDX;
416 case nir_op_fddy:
417 case nir_op_fddy_coarse:
418 case nir_op_fddy_fine:
419 return OP_DFDY;
420 case nir_op_fdiv:
421 case nir_op_idiv:
422 case nir_op_udiv:
423 return OP_DIV;
424 case nir_op_fexp2:
425 return OP_EX2;
426 case nir_op_ffloor:
427 return OP_FLOOR;
428 case nir_op_ffma:
429 return OP_FMA;
430 case nir_op_flog2:
431 return OP_LG2;
432 case nir_op_fmax:
433 case nir_op_imax:
434 case nir_op_umax:
435 return OP_MAX;
436 case nir_op_pack_64_2x32_split:
437 return OP_MERGE;
438 case nir_op_fmin:
439 case nir_op_imin:
440 case nir_op_umin:
441 return OP_MIN;
442 case nir_op_fmod:
443 case nir_op_imod:
444 case nir_op_umod:
445 case nir_op_frem:
446 case nir_op_irem:
447 return OP_MOD;
448 case nir_op_fmul:
449 case nir_op_imul:
450 case nir_op_imul_high:
451 case nir_op_umul_high:
452 return OP_MUL;
453 case nir_op_fneg:
454 case nir_op_ineg:
455 return OP_NEG;
456 case nir_op_inot:
457 return OP_NOT;
458 case nir_op_ior:
459 return OP_OR;
460 case nir_op_fpow:
461 return OP_POW;
462 case nir_op_frcp:
463 return OP_RCP;
464 case nir_op_frsq:
465 return OP_RSQ;
466 case nir_op_fsat:
467 return OP_SAT;
468 case nir_op_feq32:
469 case nir_op_ieq32:
470 case nir_op_fge32:
471 case nir_op_ige32:
472 case nir_op_uge32:
473 case nir_op_flt32:
474 case nir_op_ilt32:
475 case nir_op_ult32:
476 case nir_op_fneu32:
477 case nir_op_ine32:
478 return OP_SET;
479 case nir_op_ishl:
480 return OP_SHL;
481 case nir_op_ishr:
482 case nir_op_ushr:
483 return OP_SHR;
484 case nir_op_fsin:
485 return OP_SIN;
486 case nir_op_fsqrt:
487 return OP_SQRT;
488 case nir_op_ftrunc:
489 return OP_TRUNC;
490 case nir_op_ixor:
491 return OP_XOR;
492 default:
493 ERROR("couldn't get operation for op %s\n", nir_op_infos[op].name);
494 assert(false);
495 return OP_NOP;
496 }
497 }
498
499 operation
getOperation(nir_texop op)500 Converter::getOperation(nir_texop op)
501 {
502 switch (op) {
503 case nir_texop_tex:
504 return OP_TEX;
505 case nir_texop_lod:
506 return OP_TXLQ;
507 case nir_texop_txb:
508 return OP_TXB;
509 case nir_texop_txd:
510 return OP_TXD;
511 case nir_texop_txf:
512 case nir_texop_txf_ms:
513 return OP_TXF;
514 case nir_texop_tg4:
515 return OP_TXG;
516 case nir_texop_txl:
517 return OP_TXL;
518 case nir_texop_query_levels:
519 case nir_texop_texture_samples:
520 case nir_texop_txs:
521 return OP_TXQ;
522 default:
523 ERROR("couldn't get operation for nir_texop %u\n", op);
524 assert(false);
525 return OP_NOP;
526 }
527 }
528
529 operation
getOperation(nir_intrinsic_op op)530 Converter::getOperation(nir_intrinsic_op op)
531 {
532 switch (op) {
533 case nir_intrinsic_emit_vertex:
534 return OP_EMIT;
535 case nir_intrinsic_end_primitive:
536 return OP_RESTART;
537 case nir_intrinsic_bindless_image_atomic_add:
538 case nir_intrinsic_image_atomic_add:
539 case nir_intrinsic_bindless_image_atomic_and:
540 case nir_intrinsic_image_atomic_and:
541 case nir_intrinsic_bindless_image_atomic_comp_swap:
542 case nir_intrinsic_image_atomic_comp_swap:
543 case nir_intrinsic_bindless_image_atomic_exchange:
544 case nir_intrinsic_image_atomic_exchange:
545 case nir_intrinsic_bindless_image_atomic_imax:
546 case nir_intrinsic_image_atomic_imax:
547 case nir_intrinsic_bindless_image_atomic_umax:
548 case nir_intrinsic_image_atomic_umax:
549 case nir_intrinsic_bindless_image_atomic_imin:
550 case nir_intrinsic_image_atomic_imin:
551 case nir_intrinsic_bindless_image_atomic_umin:
552 case nir_intrinsic_image_atomic_umin:
553 case nir_intrinsic_bindless_image_atomic_or:
554 case nir_intrinsic_image_atomic_or:
555 case nir_intrinsic_bindless_image_atomic_xor:
556 case nir_intrinsic_image_atomic_xor:
557 case nir_intrinsic_bindless_image_atomic_inc_wrap:
558 case nir_intrinsic_image_atomic_inc_wrap:
559 case nir_intrinsic_bindless_image_atomic_dec_wrap:
560 case nir_intrinsic_image_atomic_dec_wrap:
561 return OP_SUREDP;
562 case nir_intrinsic_bindless_image_load:
563 case nir_intrinsic_image_load:
564 return OP_SULDP;
565 case nir_intrinsic_bindless_image_samples:
566 case nir_intrinsic_image_samples:
567 case nir_intrinsic_bindless_image_size:
568 case nir_intrinsic_image_size:
569 return OP_SUQ;
570 case nir_intrinsic_bindless_image_store:
571 case nir_intrinsic_image_store:
572 return OP_SUSTP;
573 default:
574 ERROR("couldn't get operation for nir_intrinsic_op %u\n", op);
575 assert(false);
576 return OP_NOP;
577 }
578 }
579
580 operation
preOperationNeeded(nir_op op)581 Converter::preOperationNeeded(nir_op op)
582 {
583 switch (op) {
584 case nir_op_fcos:
585 case nir_op_fsin:
586 return OP_PRESIN;
587 default:
588 return OP_NOP;
589 }
590 }
591
592 int
getSubOp(nir_op op)593 Converter::getSubOp(nir_op op)
594 {
595 switch (op) {
596 case nir_op_imul_high:
597 case nir_op_umul_high:
598 return NV50_IR_SUBOP_MUL_HIGH;
599 case nir_op_ishl:
600 case nir_op_ishr:
601 case nir_op_ushr:
602 return NV50_IR_SUBOP_SHIFT_WRAP;
603 default:
604 return 0;
605 }
606 }
607
608 int
getSubOp(nir_intrinsic_op op)609 Converter::getSubOp(nir_intrinsic_op op)
610 {
611 switch (op) {
612 case nir_intrinsic_bindless_image_atomic_add:
613 case nir_intrinsic_global_atomic_add:
614 case nir_intrinsic_image_atomic_add:
615 case nir_intrinsic_shared_atomic_add:
616 case nir_intrinsic_ssbo_atomic_add:
617 return NV50_IR_SUBOP_ATOM_ADD;
618 case nir_intrinsic_bindless_image_atomic_and:
619 case nir_intrinsic_global_atomic_and:
620 case nir_intrinsic_image_atomic_and:
621 case nir_intrinsic_shared_atomic_and:
622 case nir_intrinsic_ssbo_atomic_and:
623 return NV50_IR_SUBOP_ATOM_AND;
624 case nir_intrinsic_bindless_image_atomic_comp_swap:
625 case nir_intrinsic_global_atomic_comp_swap:
626 case nir_intrinsic_image_atomic_comp_swap:
627 case nir_intrinsic_shared_atomic_comp_swap:
628 case nir_intrinsic_ssbo_atomic_comp_swap:
629 return NV50_IR_SUBOP_ATOM_CAS;
630 case nir_intrinsic_bindless_image_atomic_exchange:
631 case nir_intrinsic_global_atomic_exchange:
632 case nir_intrinsic_image_atomic_exchange:
633 case nir_intrinsic_shared_atomic_exchange:
634 case nir_intrinsic_ssbo_atomic_exchange:
635 return NV50_IR_SUBOP_ATOM_EXCH;
636 case nir_intrinsic_bindless_image_atomic_or:
637 case nir_intrinsic_global_atomic_or:
638 case nir_intrinsic_image_atomic_or:
639 case nir_intrinsic_shared_atomic_or:
640 case nir_intrinsic_ssbo_atomic_or:
641 return NV50_IR_SUBOP_ATOM_OR;
642 case nir_intrinsic_bindless_image_atomic_imax:
643 case nir_intrinsic_bindless_image_atomic_umax:
644 case nir_intrinsic_global_atomic_imax:
645 case nir_intrinsic_global_atomic_umax:
646 case nir_intrinsic_image_atomic_imax:
647 case nir_intrinsic_image_atomic_umax:
648 case nir_intrinsic_shared_atomic_imax:
649 case nir_intrinsic_shared_atomic_umax:
650 case nir_intrinsic_ssbo_atomic_imax:
651 case nir_intrinsic_ssbo_atomic_umax:
652 return NV50_IR_SUBOP_ATOM_MAX;
653 case nir_intrinsic_bindless_image_atomic_imin:
654 case nir_intrinsic_bindless_image_atomic_umin:
655 case nir_intrinsic_global_atomic_imin:
656 case nir_intrinsic_global_atomic_umin:
657 case nir_intrinsic_image_atomic_imin:
658 case nir_intrinsic_image_atomic_umin:
659 case nir_intrinsic_shared_atomic_imin:
660 case nir_intrinsic_shared_atomic_umin:
661 case nir_intrinsic_ssbo_atomic_imin:
662 case nir_intrinsic_ssbo_atomic_umin:
663 return NV50_IR_SUBOP_ATOM_MIN;
664 case nir_intrinsic_bindless_image_atomic_xor:
665 case nir_intrinsic_global_atomic_xor:
666 case nir_intrinsic_image_atomic_xor:
667 case nir_intrinsic_shared_atomic_xor:
668 case nir_intrinsic_ssbo_atomic_xor:
669 return NV50_IR_SUBOP_ATOM_XOR;
670 case nir_intrinsic_bindless_image_atomic_inc_wrap:
671 case nir_intrinsic_image_atomic_inc_wrap:
672 return NV50_IR_SUBOP_ATOM_INC;
673 case nir_intrinsic_bindless_image_atomic_dec_wrap:
674 case nir_intrinsic_image_atomic_dec_wrap:
675 return NV50_IR_SUBOP_ATOM_DEC;
676
677 case nir_intrinsic_group_memory_barrier:
678 case nir_intrinsic_memory_barrier:
679 case nir_intrinsic_memory_barrier_buffer:
680 case nir_intrinsic_memory_barrier_image:
681 return NV50_IR_SUBOP_MEMBAR(M, GL);
682 case nir_intrinsic_memory_barrier_shared:
683 return NV50_IR_SUBOP_MEMBAR(M, CTA);
684
685 case nir_intrinsic_vote_all:
686 return NV50_IR_SUBOP_VOTE_ALL;
687 case nir_intrinsic_vote_any:
688 return NV50_IR_SUBOP_VOTE_ANY;
689 case nir_intrinsic_vote_ieq:
690 return NV50_IR_SUBOP_VOTE_UNI;
691 default:
692 return 0;
693 }
694 }
695
696 CondCode
getCondCode(nir_op op)697 Converter::getCondCode(nir_op op)
698 {
699 switch (op) {
700 case nir_op_feq32:
701 case nir_op_ieq32:
702 return CC_EQ;
703 case nir_op_fge32:
704 case nir_op_ige32:
705 case nir_op_uge32:
706 return CC_GE;
707 case nir_op_flt32:
708 case nir_op_ilt32:
709 case nir_op_ult32:
710 return CC_LT;
711 case nir_op_fneu32:
712 return CC_NEU;
713 case nir_op_ine32:
714 return CC_NE;
715 default:
716 ERROR("couldn't get CondCode for op %s\n", nir_op_infos[op].name);
717 assert(false);
718 return CC_FL;
719 }
720 }
721
722 Converter::LValues&
convert(nir_alu_dest * dest)723 Converter::convert(nir_alu_dest *dest)
724 {
725 return convert(&dest->dest);
726 }
727
728 Converter::LValues&
convert(nir_dest * dest)729 Converter::convert(nir_dest *dest)
730 {
731 if (dest->is_ssa)
732 return convert(&dest->ssa);
733 if (dest->reg.indirect) {
734 ERROR("no support for indirects.");
735 assert(false);
736 }
737 return convert(dest->reg.reg);
738 }
739
740 Converter::LValues&
convert(nir_register * reg)741 Converter::convert(nir_register *reg)
742 {
743 assert(!reg->num_array_elems);
744
745 NirDefMap::iterator it = regDefs.find(reg->index);
746 if (it != regDefs.end())
747 return it->second;
748
749 LValues newDef(reg->num_components);
750 for (uint8_t i = 0; i < reg->num_components; i++)
751 newDef[i] = getScratch(std::max(4, reg->bit_size / 8));
752 return regDefs[reg->index] = newDef;
753 }
754
755 Converter::LValues&
convert(nir_ssa_def * def)756 Converter::convert(nir_ssa_def *def)
757 {
758 NirDefMap::iterator it = ssaDefs.find(def->index);
759 if (it != ssaDefs.end())
760 return it->second;
761
762 LValues newDef(def->num_components);
763 for (uint8_t i = 0; i < def->num_components; i++)
764 newDef[i] = getSSA(std::max(4, def->bit_size / 8));
765 return ssaDefs[def->index] = newDef;
766 }
767
768 Value*
getSrc(nir_alu_src * src,uint8_t component)769 Converter::getSrc(nir_alu_src *src, uint8_t component)
770 {
771 if (src->abs || src->negate) {
772 ERROR("modifiers currently not supported on nir_alu_src\n");
773 assert(false);
774 }
775 return getSrc(&src->src, src->swizzle[component]);
776 }
777
778 Value*
getSrc(nir_register * reg,uint8_t idx)779 Converter::getSrc(nir_register *reg, uint8_t idx)
780 {
781 NirDefMap::iterator it = regDefs.find(reg->index);
782 if (it == regDefs.end())
783 return convert(reg)[idx];
784 return it->second[idx];
785 }
786
787 Value*
getSrc(nir_src * src,uint8_t idx,bool indirect)788 Converter::getSrc(nir_src *src, uint8_t idx, bool indirect)
789 {
790 if (src->is_ssa)
791 return getSrc(src->ssa, idx);
792
793 if (src->reg.indirect) {
794 if (indirect)
795 return getSrc(src->reg.indirect, idx);
796 ERROR("no support for indirects.");
797 assert(false);
798 return NULL;
799 }
800
801 return getSrc(src->reg.reg, idx);
802 }
803
804 Value*
getSrc(nir_ssa_def * src,uint8_t idx)805 Converter::getSrc(nir_ssa_def *src, uint8_t idx)
806 {
807 ImmediateMap::iterator iit = immediates.find(src->index);
808 if (iit != immediates.end())
809 return convert((*iit).second, idx);
810
811 NirDefMap::iterator it = ssaDefs.find(src->index);
812 if (it == ssaDefs.end()) {
813 ERROR("SSA value %u not found\n", src->index);
814 assert(false);
815 return NULL;
816 }
817 return it->second[idx];
818 }
819
820 uint32_t
getIndirect(nir_src * src,uint8_t idx,Value * & indirect)821 Converter::getIndirect(nir_src *src, uint8_t idx, Value *&indirect)
822 {
823 nir_const_value *offset = nir_src_as_const_value(*src);
824
825 if (offset) {
826 indirect = NULL;
827 return offset[0].u32;
828 }
829
830 indirect = getSrc(src, idx, true);
831 return 0;
832 }
833
834 uint32_t
getIndirect(nir_intrinsic_instr * insn,uint8_t s,uint8_t c,Value * & indirect,bool isScalar)835 Converter::getIndirect(nir_intrinsic_instr *insn, uint8_t s, uint8_t c, Value *&indirect, bool isScalar)
836 {
837 int32_t idx = nir_intrinsic_base(insn) + getIndirect(&insn->src[s], c, indirect);
838 if (indirect && !isScalar)
839 indirect = mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), indirect, loadImm(NULL, 4));
840 return idx;
841 }
842
843 static void
vert_attrib_to_tgsi_semantic(gl_vert_attrib slot,unsigned * name,unsigned * index)844 vert_attrib_to_tgsi_semantic(gl_vert_attrib slot, unsigned *name, unsigned *index)
845 {
846 assert(name && index);
847
848 if (slot >= VERT_ATTRIB_MAX) {
849 ERROR("invalid varying slot %u\n", slot);
850 assert(false);
851 return;
852 }
853
854 if (slot >= VERT_ATTRIB_GENERIC0 &&
855 slot < VERT_ATTRIB_GENERIC0 + VERT_ATTRIB_GENERIC_MAX) {
856 *name = TGSI_SEMANTIC_GENERIC;
857 *index = slot - VERT_ATTRIB_GENERIC0;
858 return;
859 }
860
861 if (slot >= VERT_ATTRIB_TEX0 &&
862 slot < VERT_ATTRIB_TEX0 + VERT_ATTRIB_TEX_MAX) {
863 *name = TGSI_SEMANTIC_TEXCOORD;
864 *index = slot - VERT_ATTRIB_TEX0;
865 return;
866 }
867
868 switch (slot) {
869 case VERT_ATTRIB_COLOR0:
870 *name = TGSI_SEMANTIC_COLOR;
871 *index = 0;
872 break;
873 case VERT_ATTRIB_COLOR1:
874 *name = TGSI_SEMANTIC_COLOR;
875 *index = 1;
876 break;
877 case VERT_ATTRIB_EDGEFLAG:
878 *name = TGSI_SEMANTIC_EDGEFLAG;
879 *index = 0;
880 break;
881 case VERT_ATTRIB_FOG:
882 *name = TGSI_SEMANTIC_FOG;
883 *index = 0;
884 break;
885 case VERT_ATTRIB_NORMAL:
886 *name = TGSI_SEMANTIC_NORMAL;
887 *index = 0;
888 break;
889 case VERT_ATTRIB_POS:
890 *name = TGSI_SEMANTIC_POSITION;
891 *index = 0;
892 break;
893 case VERT_ATTRIB_POINT_SIZE:
894 *name = TGSI_SEMANTIC_PSIZE;
895 *index = 0;
896 break;
897 default:
898 ERROR("unknown vert attrib slot %u\n", slot);
899 assert(false);
900 break;
901 }
902 }
903
904 void
setInterpolate(nv50_ir_varying * var,uint8_t mode,bool centroid,unsigned semantic)905 Converter::setInterpolate(nv50_ir_varying *var,
906 uint8_t mode,
907 bool centroid,
908 unsigned semantic)
909 {
910 switch (mode) {
911 case INTERP_MODE_FLAT:
912 var->flat = 1;
913 break;
914 case INTERP_MODE_NONE:
915 if (semantic == TGSI_SEMANTIC_COLOR)
916 var->sc = 1;
917 else if (semantic == TGSI_SEMANTIC_POSITION)
918 var->linear = 1;
919 break;
920 case INTERP_MODE_NOPERSPECTIVE:
921 var->linear = 1;
922 break;
923 case INTERP_MODE_SMOOTH:
924 break;
925 }
926 var->centroid = centroid;
927 }
928
929 static uint16_t
calcSlots(const glsl_type * type,Program::Type stage,const shader_info & info,bool input,const nir_variable * var)930 calcSlots(const glsl_type *type, Program::Type stage, const shader_info &info,
931 bool input, const nir_variable *var)
932 {
933 if (!type->is_array())
934 return type->count_attribute_slots(false);
935
936 uint16_t slots;
937 switch (stage) {
938 case Program::TYPE_GEOMETRY:
939 slots = type->count_attribute_slots(false);
940 if (input)
941 slots /= info.gs.vertices_in;
942 break;
943 case Program::TYPE_TESSELLATION_CONTROL:
944 case Program::TYPE_TESSELLATION_EVAL:
945 // remove first dimension
946 if (var->data.patch || (!input && stage == Program::TYPE_TESSELLATION_EVAL))
947 slots = type->count_attribute_slots(false);
948 else
949 slots = type->fields.array->count_attribute_slots(false);
950 break;
951 default:
952 slots = type->count_attribute_slots(false);
953 break;
954 }
955
956 return slots;
957 }
958
959 static uint8_t
getMaskForType(const glsl_type * type,uint8_t slot)960 getMaskForType(const glsl_type *type, uint8_t slot) {
961 uint16_t comp = type->without_array()->components();
962 comp = comp ? comp : 4;
963
964 if (glsl_base_type_is_64bit(type->without_array()->base_type)) {
965 comp *= 2;
966 if (comp > 4) {
967 if (slot % 2)
968 comp -= 4;
969 else
970 comp = 4;
971 }
972 }
973
974 return (1 << comp) - 1;
975 }
976
assignSlots()977 bool Converter::assignSlots() {
978 unsigned name;
979 unsigned index;
980
981 info->io.viewportId = -1;
982 info_out->numInputs = 0;
983 info_out->numOutputs = 0;
984 info_out->numSysVals = 0;
985
986 uint8_t i;
987 BITSET_FOREACH_SET(i, nir->info.system_values_read, SYSTEM_VALUE_MAX) {
988 info_out->sv[info_out->numSysVals].sn = tgsi_get_sysval_semantic(i);
989 info_out->sv[info_out->numSysVals].si = 0;
990 info_out->sv[info_out->numSysVals].input = 0; // TODO inferSysValDirection(sn);
991
992 switch (i) {
993 case SYSTEM_VALUE_INSTANCE_ID:
994 info_out->io.instanceId = info_out->numSysVals;
995 break;
996 case SYSTEM_VALUE_TESS_LEVEL_INNER:
997 case SYSTEM_VALUE_TESS_LEVEL_OUTER:
998 info_out->sv[info_out->numSysVals].patch = 1;
999 break;
1000 case SYSTEM_VALUE_VERTEX_ID:
1001 info_out->io.vertexId = info_out->numSysVals;
1002 break;
1003 default:
1004 break;
1005 }
1006
1007 info_out->numSysVals += 1;
1008 }
1009
1010 if (prog->getType() == Program::TYPE_COMPUTE)
1011 return true;
1012
1013 nir_foreach_shader_in_variable(var, nir) {
1014 const glsl_type *type = var->type;
1015 int slot = var->data.location;
1016 uint16_t slots = calcSlots(type, prog->getType(), nir->info, true, var);
1017 uint32_t vary = var->data.driver_location;
1018
1019 assert(vary + slots <= PIPE_MAX_SHADER_INPUTS);
1020
1021 switch(prog->getType()) {
1022 case Program::TYPE_FRAGMENT:
1023 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1024 &name, &index);
1025 for (uint16_t i = 0; i < slots; ++i) {
1026 setInterpolate(&info_out->in[vary + i], var->data.interpolation,
1027 var->data.centroid | var->data.sample, name);
1028 }
1029 break;
1030 case Program::TYPE_GEOMETRY:
1031 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1032 &name, &index);
1033 break;
1034 case Program::TYPE_TESSELLATION_CONTROL:
1035 case Program::TYPE_TESSELLATION_EVAL:
1036 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1037 &name, &index);
1038 if (var->data.patch && name == TGSI_SEMANTIC_PATCH)
1039 info_out->numPatchConstants = MAX2(info_out->numPatchConstants, index + slots);
1040 break;
1041 case Program::TYPE_VERTEX:
1042 if (slot >= VERT_ATTRIB_GENERIC0)
1043 slot = VERT_ATTRIB_GENERIC0 + vary;
1044 vert_attrib_to_tgsi_semantic((gl_vert_attrib)slot, &name, &index);
1045 switch (name) {
1046 case TGSI_SEMANTIC_EDGEFLAG:
1047 info_out->io.edgeFlagIn = vary;
1048 break;
1049 default:
1050 break;
1051 }
1052 break;
1053 default:
1054 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1055 return false;
1056 }
1057
1058 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1059 nv50_ir_varying *v = &info_out->in[vary];
1060
1061 v->patch = var->data.patch;
1062 v->sn = name;
1063 v->si = index + i;
1064 v->mask |= getMaskForType(type, i) << var->data.location_frac;
1065 }
1066 info_out->numInputs = std::max<uint8_t>(info_out->numInputs, vary);
1067 }
1068
1069 nir_foreach_shader_out_variable(var, nir) {
1070 const glsl_type *type = var->type;
1071 int slot = var->data.location;
1072 uint16_t slots = calcSlots(type, prog->getType(), nir->info, false, var);
1073 uint32_t vary = var->data.driver_location;
1074
1075 assert(vary < PIPE_MAX_SHADER_OUTPUTS);
1076
1077 switch(prog->getType()) {
1078 case Program::TYPE_FRAGMENT:
1079 tgsi_get_gl_frag_result_semantic((gl_frag_result)slot, &name, &index);
1080 switch (name) {
1081 case TGSI_SEMANTIC_COLOR:
1082 if (!var->data.fb_fetch_output)
1083 info_out->prop.fp.numColourResults++;
1084 if (var->data.location == FRAG_RESULT_COLOR &&
1085 nir->info.outputs_written & BITFIELD64_BIT(var->data.location))
1086 info_out->prop.fp.separateFragData = true;
1087 // sometimes we get FRAG_RESULT_DATAX with data.index 0
1088 // sometimes we get FRAG_RESULT_DATA0 with data.index X
1089 index = index == 0 ? var->data.index : index;
1090 break;
1091 case TGSI_SEMANTIC_POSITION:
1092 info_out->io.fragDepth = vary;
1093 info_out->prop.fp.writesDepth = true;
1094 break;
1095 case TGSI_SEMANTIC_SAMPLEMASK:
1096 info_out->io.sampleMask = vary;
1097 break;
1098 default:
1099 break;
1100 }
1101 break;
1102 case Program::TYPE_GEOMETRY:
1103 case Program::TYPE_TESSELLATION_CONTROL:
1104 case Program::TYPE_TESSELLATION_EVAL:
1105 case Program::TYPE_VERTEX:
1106 tgsi_get_gl_varying_semantic((gl_varying_slot)slot, true,
1107 &name, &index);
1108
1109 if (var->data.patch && name != TGSI_SEMANTIC_TESSINNER &&
1110 name != TGSI_SEMANTIC_TESSOUTER)
1111 info_out->numPatchConstants = MAX2(info_out->numPatchConstants, index + slots);
1112
1113 switch (name) {
1114 case TGSI_SEMANTIC_CLIPDIST:
1115 info_out->io.genUserClip = -1;
1116 break;
1117 case TGSI_SEMANTIC_CLIPVERTEX:
1118 clipVertexOutput = vary;
1119 break;
1120 case TGSI_SEMANTIC_EDGEFLAG:
1121 info_out->io.edgeFlagOut = vary;
1122 break;
1123 case TGSI_SEMANTIC_POSITION:
1124 if (clipVertexOutput < 0)
1125 clipVertexOutput = vary;
1126 break;
1127 default:
1128 break;
1129 }
1130 break;
1131 default:
1132 ERROR("unknown shader type %u in assignSlots\n", prog->getType());
1133 return false;
1134 }
1135
1136 for (uint16_t i = 0u; i < slots; ++i, ++vary) {
1137 nv50_ir_varying *v = &info_out->out[vary];
1138 v->patch = var->data.patch;
1139 v->sn = name;
1140 v->si = index + i;
1141 v->mask |= getMaskForType(type, i) << var->data.location_frac;
1142
1143 if (nir->info.outputs_read & 1ull << slot)
1144 v->oread = 1;
1145 }
1146 info_out->numOutputs = std::max<uint8_t>(info_out->numOutputs, vary);
1147 }
1148
1149 if (info_out->io.genUserClip > 0) {
1150 info_out->io.clipDistances = info_out->io.genUserClip;
1151
1152 const unsigned int nOut = (info_out->io.genUserClip + 3) / 4;
1153
1154 for (unsigned int n = 0; n < nOut; ++n) {
1155 unsigned int i = info_out->numOutputs++;
1156 info_out->out[i].id = i;
1157 info_out->out[i].sn = TGSI_SEMANTIC_CLIPDIST;
1158 info_out->out[i].si = n;
1159 info_out->out[i].mask = ((1 << info_out->io.clipDistances) - 1) >> (n * 4);
1160 }
1161 }
1162
1163 return info->assignSlots(info_out) == 0;
1164 }
1165
1166 uint32_t
getSlotAddress(nir_intrinsic_instr * insn,uint8_t idx,uint8_t slot)1167 Converter::getSlotAddress(nir_intrinsic_instr *insn, uint8_t idx, uint8_t slot)
1168 {
1169 DataType ty;
1170 int offset = nir_intrinsic_component(insn);
1171 bool input;
1172
1173 if (nir_intrinsic_infos[insn->intrinsic].has_dest)
1174 ty = getDType(insn);
1175 else
1176 ty = getSType(insn->src[0], false, false);
1177
1178 switch (insn->intrinsic) {
1179 case nir_intrinsic_load_input:
1180 case nir_intrinsic_load_interpolated_input:
1181 case nir_intrinsic_load_per_vertex_input:
1182 input = true;
1183 break;
1184 case nir_intrinsic_load_output:
1185 case nir_intrinsic_load_per_vertex_output:
1186 case nir_intrinsic_store_output:
1187 case nir_intrinsic_store_per_vertex_output:
1188 input = false;
1189 break;
1190 default:
1191 ERROR("unknown intrinsic in getSlotAddress %s",
1192 nir_intrinsic_infos[insn->intrinsic].name);
1193 input = false;
1194 assert(false);
1195 break;
1196 }
1197
1198 if (typeSizeof(ty) == 8) {
1199 slot *= 2;
1200 slot += offset;
1201 if (slot >= 4) {
1202 idx += 1;
1203 slot -= 4;
1204 }
1205 } else {
1206 slot += offset;
1207 }
1208
1209 assert(slot < 4);
1210 assert(!input || idx < PIPE_MAX_SHADER_INPUTS);
1211 assert(input || idx < PIPE_MAX_SHADER_OUTPUTS);
1212
1213 const nv50_ir_varying *vary = input ? info_out->in : info_out->out;
1214 return vary[idx].slot[slot] * 4;
1215 }
1216
1217 Instruction *
loadFrom(DataFile file,uint8_t i,DataType ty,Value * def,uint32_t base,uint8_t c,Value * indirect0,Value * indirect1,bool patch)1218 Converter::loadFrom(DataFile file, uint8_t i, DataType ty, Value *def,
1219 uint32_t base, uint8_t c, Value *indirect0,
1220 Value *indirect1, bool patch)
1221 {
1222 unsigned int tySize = typeSizeof(ty);
1223
1224 if (tySize == 8 &&
1225 (file == FILE_MEMORY_CONST || file == FILE_MEMORY_BUFFER || indirect0)) {
1226 Value *lo = getSSA();
1227 Value *hi = getSSA();
1228
1229 Instruction *loi =
1230 mkLoad(TYPE_U32, lo,
1231 mkSymbol(file, i, TYPE_U32, base + c * tySize),
1232 indirect0);
1233 loi->setIndirect(0, 1, indirect1);
1234 loi->perPatch = patch;
1235
1236 Instruction *hii =
1237 mkLoad(TYPE_U32, hi,
1238 mkSymbol(file, i, TYPE_U32, base + c * tySize + 4),
1239 indirect0);
1240 hii->setIndirect(0, 1, indirect1);
1241 hii->perPatch = patch;
1242
1243 return mkOp2(OP_MERGE, ty, def, lo, hi);
1244 } else {
1245 Instruction *ld =
1246 mkLoad(ty, def, mkSymbol(file, i, ty, base + c * tySize), indirect0);
1247 ld->setIndirect(0, 1, indirect1);
1248 ld->perPatch = patch;
1249 return ld;
1250 }
1251 }
1252
1253 void
storeTo(nir_intrinsic_instr * insn,DataFile file,operation op,DataType ty,Value * src,uint8_t idx,uint8_t c,Value * indirect0,Value * indirect1)1254 Converter::storeTo(nir_intrinsic_instr *insn, DataFile file, operation op,
1255 DataType ty, Value *src, uint8_t idx, uint8_t c,
1256 Value *indirect0, Value *indirect1)
1257 {
1258 uint8_t size = typeSizeof(ty);
1259 uint32_t address = getSlotAddress(insn, idx, c);
1260
1261 if (size == 8 && indirect0) {
1262 Value *split[2];
1263 mkSplit(split, 4, src);
1264
1265 if (op == OP_EXPORT) {
1266 split[0] = mkMov(getSSA(), split[0], ty)->getDef(0);
1267 split[1] = mkMov(getSSA(), split[1], ty)->getDef(0);
1268 }
1269
1270 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address), indirect0,
1271 split[0])->perPatch = info_out->out[idx].patch;
1272 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address + 4), indirect0,
1273 split[1])->perPatch = info_out->out[idx].patch;
1274 } else {
1275 if (op == OP_EXPORT)
1276 src = mkMov(getSSA(size), src, ty)->getDef(0);
1277 mkStore(op, ty, mkSymbol(file, 0, ty, address), indirect0,
1278 src)->perPatch = info_out->out[idx].patch;
1279 }
1280 }
1281
1282 bool
parseNIR()1283 Converter::parseNIR()
1284 {
1285 info_out->bin.tlsSpace = nir->scratch_size;
1286 info_out->io.clipDistances = nir->info.clip_distance_array_size;
1287 info_out->io.cullDistances = nir->info.cull_distance_array_size;
1288 info_out->io.layer_viewport_relative = nir->info.layer_viewport_relative;
1289
1290 switch(prog->getType()) {
1291 case Program::TYPE_COMPUTE:
1292 info->prop.cp.numThreads[0] = nir->info.cs.local_size[0];
1293 info->prop.cp.numThreads[1] = nir->info.cs.local_size[1];
1294 info->prop.cp.numThreads[2] = nir->info.cs.local_size[2];
1295 info_out->bin.smemSize += nir->info.cs.shared_size;
1296 break;
1297 case Program::TYPE_FRAGMENT:
1298 info_out->prop.fp.earlyFragTests = nir->info.fs.early_fragment_tests;
1299 prog->persampleInvocation =
1300 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_ID) ||
1301 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_POS);
1302 info_out->prop.fp.postDepthCoverage = nir->info.fs.post_depth_coverage;
1303 info_out->prop.fp.readsSampleLocations =
1304 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_POS);
1305 info_out->prop.fp.usesDiscard = nir->info.fs.uses_discard || nir->info.fs.uses_demote;
1306 info_out->prop.fp.usesSampleMaskIn =
1307 !BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_MASK_IN);
1308 break;
1309 case Program::TYPE_GEOMETRY:
1310 info_out->prop.gp.instanceCount = nir->info.gs.invocations;
1311 info_out->prop.gp.maxVertices = nir->info.gs.vertices_out;
1312 info_out->prop.gp.outputPrim = nir->info.gs.output_primitive;
1313 break;
1314 case Program::TYPE_TESSELLATION_CONTROL:
1315 case Program::TYPE_TESSELLATION_EVAL:
1316 if (nir->info.tess.primitive_mode == GL_ISOLINES)
1317 info_out->prop.tp.domain = GL_LINES;
1318 else
1319 info_out->prop.tp.domain = nir->info.tess.primitive_mode;
1320 info_out->prop.tp.outputPatchSize = nir->info.tess.tcs_vertices_out;
1321 info_out->prop.tp.outputPrim =
1322 nir->info.tess.point_mode ? PIPE_PRIM_POINTS : PIPE_PRIM_TRIANGLES;
1323 info_out->prop.tp.partitioning = (nir->info.tess.spacing + 1) % 3;
1324 info_out->prop.tp.winding = !nir->info.tess.ccw;
1325 break;
1326 case Program::TYPE_VERTEX:
1327 info_out->prop.vp.usesDrawParameters =
1328 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_VERTEX) ||
1329 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE) ||
1330 BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_DRAW_ID);
1331 break;
1332 default:
1333 break;
1334 }
1335
1336 return true;
1337 }
1338
1339 bool
visit(nir_function * function)1340 Converter::visit(nir_function *function)
1341 {
1342 assert(function->impl);
1343
1344 // usually the blocks will set everything up, but main is special
1345 BasicBlock *entry = new BasicBlock(prog->main);
1346 exit = new BasicBlock(prog->main);
1347 blocks[nir_start_block(function->impl)->index] = entry;
1348 prog->main->setEntry(entry);
1349 prog->main->setExit(exit);
1350
1351 setPosition(entry, true);
1352
1353 if (info_out->io.genUserClip > 0) {
1354 for (int c = 0; c < 4; ++c)
1355 clipVtx[c] = getScratch();
1356 }
1357
1358 switch (prog->getType()) {
1359 case Program::TYPE_TESSELLATION_CONTROL:
1360 outBase = mkOp2v(
1361 OP_SUB, TYPE_U32, getSSA(),
1362 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LANEID, 0)),
1363 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_INVOCATION_ID, 0)));
1364 break;
1365 case Program::TYPE_FRAGMENT: {
1366 Symbol *sv = mkSysVal(SV_POSITION, 3);
1367 fragCoord[3] = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), sv);
1368 fp.position = mkOp1v(OP_RCP, TYPE_F32, fragCoord[3], fragCoord[3]);
1369 break;
1370 }
1371 default:
1372 break;
1373 }
1374
1375 nir_index_ssa_defs(function->impl);
1376 foreach_list_typed(nir_cf_node, node, node, &function->impl->body) {
1377 if (!visit(node))
1378 return false;
1379 }
1380
1381 bb->cfg.attach(&exit->cfg, Graph::Edge::TREE);
1382 setPosition(exit, true);
1383
1384 if ((prog->getType() == Program::TYPE_VERTEX ||
1385 prog->getType() == Program::TYPE_TESSELLATION_EVAL)
1386 && info_out->io.genUserClip > 0)
1387 handleUserClipPlanes();
1388
1389 // TODO: for non main function this needs to be a OP_RETURN
1390 mkOp(OP_EXIT, TYPE_NONE, NULL)->terminator = 1;
1391 return true;
1392 }
1393
1394 bool
visit(nir_cf_node * node)1395 Converter::visit(nir_cf_node *node)
1396 {
1397 switch (node->type) {
1398 case nir_cf_node_block:
1399 return visit(nir_cf_node_as_block(node));
1400 case nir_cf_node_if:
1401 return visit(nir_cf_node_as_if(node));
1402 case nir_cf_node_loop:
1403 return visit(nir_cf_node_as_loop(node));
1404 default:
1405 ERROR("unknown nir_cf_node type %u\n", node->type);
1406 return false;
1407 }
1408 }
1409
1410 bool
visit(nir_block * block)1411 Converter::visit(nir_block *block)
1412 {
1413 if (!block->predecessors->entries && block->instr_list.is_empty())
1414 return true;
1415
1416 BasicBlock *bb = convert(block);
1417
1418 setPosition(bb, true);
1419 nir_foreach_instr(insn, block) {
1420 if (!visit(insn))
1421 return false;
1422 }
1423 return true;
1424 }
1425
1426 bool
visit(nir_if * nif)1427 Converter::visit(nir_if *nif)
1428 {
1429 curIfDepth++;
1430
1431 DataType sType = getSType(nif->condition, false, false);
1432 Value *src = getSrc(&nif->condition, 0);
1433
1434 nir_block *lastThen = nir_if_last_then_block(nif);
1435 nir_block *lastElse = nir_if_last_else_block(nif);
1436
1437 BasicBlock *headBB = bb;
1438 BasicBlock *ifBB = convert(nir_if_first_then_block(nif));
1439 BasicBlock *elseBB = convert(nir_if_first_else_block(nif));
1440
1441 bb->cfg.attach(&ifBB->cfg, Graph::Edge::TREE);
1442 bb->cfg.attach(&elseBB->cfg, Graph::Edge::TREE);
1443
1444 bool insertJoins = lastThen->successors[0] == lastElse->successors[0];
1445 mkFlow(OP_BRA, elseBB, CC_EQ, src)->setType(sType);
1446
1447 foreach_list_typed(nir_cf_node, node, node, &nif->then_list) {
1448 if (!visit(node))
1449 return false;
1450 }
1451
1452 setPosition(convert(lastThen), true);
1453 if (!bb->isTerminated()) {
1454 BasicBlock *tailBB = convert(lastThen->successors[0]);
1455 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1456 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1457 } else {
1458 insertJoins = insertJoins && bb->getExit()->op == OP_BRA;
1459 }
1460
1461 foreach_list_typed(nir_cf_node, node, node, &nif->else_list) {
1462 if (!visit(node))
1463 return false;
1464 }
1465
1466 setPosition(convert(lastElse), true);
1467 if (!bb->isTerminated()) {
1468 BasicBlock *tailBB = convert(lastElse->successors[0]);
1469 mkFlow(OP_BRA, tailBB, CC_ALWAYS, NULL);
1470 bb->cfg.attach(&tailBB->cfg, Graph::Edge::FORWARD);
1471 } else {
1472 insertJoins = insertJoins && bb->getExit()->op == OP_BRA;
1473 }
1474
1475 /* only insert joins for the most outer if */
1476 if (--curIfDepth)
1477 insertJoins = false;
1478
1479 /* we made sure that all threads would converge at the same block */
1480 if (insertJoins) {
1481 BasicBlock *conv = convert(lastThen->successors[0]);
1482 setPosition(headBB->getExit(), false);
1483 headBB->joinAt = mkFlow(OP_JOINAT, conv, CC_ALWAYS, NULL);
1484 setPosition(conv, false);
1485 mkFlow(OP_JOIN, NULL, CC_ALWAYS, NULL)->fixed = 1;
1486 }
1487
1488 return true;
1489 }
1490
1491 // TODO: add convergency
1492 bool
visit(nir_loop * loop)1493 Converter::visit(nir_loop *loop)
1494 {
1495 curLoopDepth += 1;
1496 func->loopNestingBound = std::max(func->loopNestingBound, curLoopDepth);
1497
1498 BasicBlock *loopBB = convert(nir_loop_first_block(loop));
1499 BasicBlock *tailBB = convert(nir_cf_node_as_block(nir_cf_node_next(&loop->cf_node)));
1500
1501 bb->cfg.attach(&loopBB->cfg, Graph::Edge::TREE);
1502
1503 mkFlow(OP_PREBREAK, tailBB, CC_ALWAYS, NULL);
1504 setPosition(loopBB, false);
1505 mkFlow(OP_PRECONT, loopBB, CC_ALWAYS, NULL);
1506
1507 foreach_list_typed(nir_cf_node, node, node, &loop->body) {
1508 if (!visit(node))
1509 return false;
1510 }
1511
1512 if (!bb->isTerminated()) {
1513 mkFlow(OP_CONT, loopBB, CC_ALWAYS, NULL);
1514 bb->cfg.attach(&loopBB->cfg, Graph::Edge::BACK);
1515 }
1516
1517 if (tailBB->cfg.incidentCount() == 0)
1518 loopBB->cfg.attach(&tailBB->cfg, Graph::Edge::TREE);
1519
1520 curLoopDepth -= 1;
1521
1522 return true;
1523 }
1524
1525 bool
visit(nir_instr * insn)1526 Converter::visit(nir_instr *insn)
1527 {
1528 // we need an insertion point for on the fly generated immediate loads
1529 immInsertPos = bb->getExit();
1530 switch (insn->type) {
1531 case nir_instr_type_alu:
1532 return visit(nir_instr_as_alu(insn));
1533 case nir_instr_type_intrinsic:
1534 return visit(nir_instr_as_intrinsic(insn));
1535 case nir_instr_type_jump:
1536 return visit(nir_instr_as_jump(insn));
1537 case nir_instr_type_load_const:
1538 return visit(nir_instr_as_load_const(insn));
1539 case nir_instr_type_ssa_undef:
1540 return visit(nir_instr_as_ssa_undef(insn));
1541 case nir_instr_type_tex:
1542 return visit(nir_instr_as_tex(insn));
1543 default:
1544 ERROR("unknown nir_instr type %u\n", insn->type);
1545 return false;
1546 }
1547 return true;
1548 }
1549
1550 SVSemantic
convert(nir_intrinsic_op intr)1551 Converter::convert(nir_intrinsic_op intr)
1552 {
1553 switch (intr) {
1554 case nir_intrinsic_load_base_vertex:
1555 return SV_BASEVERTEX;
1556 case nir_intrinsic_load_base_instance:
1557 return SV_BASEINSTANCE;
1558 case nir_intrinsic_load_draw_id:
1559 return SV_DRAWID;
1560 case nir_intrinsic_load_front_face:
1561 return SV_FACE;
1562 case nir_intrinsic_is_helper_invocation:
1563 case nir_intrinsic_load_helper_invocation:
1564 return SV_THREAD_KILL;
1565 case nir_intrinsic_load_instance_id:
1566 return SV_INSTANCE_ID;
1567 case nir_intrinsic_load_invocation_id:
1568 return SV_INVOCATION_ID;
1569 case nir_intrinsic_load_local_group_size:
1570 return SV_NTID;
1571 case nir_intrinsic_load_local_invocation_id:
1572 return SV_TID;
1573 case nir_intrinsic_load_num_work_groups:
1574 return SV_NCTAID;
1575 case nir_intrinsic_load_patch_vertices_in:
1576 return SV_VERTEX_COUNT;
1577 case nir_intrinsic_load_primitive_id:
1578 return SV_PRIMITIVE_ID;
1579 case nir_intrinsic_load_sample_id:
1580 return SV_SAMPLE_INDEX;
1581 case nir_intrinsic_load_sample_mask_in:
1582 return SV_SAMPLE_MASK;
1583 case nir_intrinsic_load_sample_pos:
1584 return SV_SAMPLE_POS;
1585 case nir_intrinsic_load_subgroup_eq_mask:
1586 return SV_LANEMASK_EQ;
1587 case nir_intrinsic_load_subgroup_ge_mask:
1588 return SV_LANEMASK_GE;
1589 case nir_intrinsic_load_subgroup_gt_mask:
1590 return SV_LANEMASK_GT;
1591 case nir_intrinsic_load_subgroup_le_mask:
1592 return SV_LANEMASK_LE;
1593 case nir_intrinsic_load_subgroup_lt_mask:
1594 return SV_LANEMASK_LT;
1595 case nir_intrinsic_load_subgroup_invocation:
1596 return SV_LANEID;
1597 case nir_intrinsic_load_tess_coord:
1598 return SV_TESS_COORD;
1599 case nir_intrinsic_load_tess_level_inner:
1600 return SV_TESS_INNER;
1601 case nir_intrinsic_load_tess_level_outer:
1602 return SV_TESS_OUTER;
1603 case nir_intrinsic_load_vertex_id:
1604 return SV_VERTEX_ID;
1605 case nir_intrinsic_load_work_group_id:
1606 return SV_CTAID;
1607 case nir_intrinsic_load_work_dim:
1608 return SV_WORK_DIM;
1609 default:
1610 ERROR("unknown SVSemantic for nir_intrinsic_op %s\n",
1611 nir_intrinsic_infos[intr].name);
1612 assert(false);
1613 return SV_LAST;
1614 }
1615 }
1616
1617 bool
visit(nir_intrinsic_instr * insn)1618 Converter::visit(nir_intrinsic_instr *insn)
1619 {
1620 nir_intrinsic_op op = insn->intrinsic;
1621 const nir_intrinsic_info &opInfo = nir_intrinsic_infos[op];
1622 unsigned dest_components = nir_intrinsic_dest_components(insn);
1623
1624 switch (op) {
1625 case nir_intrinsic_load_uniform: {
1626 LValues &newDefs = convert(&insn->dest);
1627 const DataType dType = getDType(insn);
1628 Value *indirect;
1629 uint32_t coffset = getIndirect(insn, 0, 0, indirect);
1630 for (uint8_t i = 0; i < dest_components; ++i) {
1631 loadFrom(FILE_MEMORY_CONST, 0, dType, newDefs[i], 16 * coffset, i, indirect);
1632 }
1633 break;
1634 }
1635 case nir_intrinsic_store_output:
1636 case nir_intrinsic_store_per_vertex_output: {
1637 Value *indirect;
1638 DataType dType = getSType(insn->src[0], false, false);
1639 uint32_t idx = getIndirect(insn, op == nir_intrinsic_store_output ? 1 : 2, 0, indirect);
1640
1641 for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
1642 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
1643 continue;
1644
1645 uint8_t offset = 0;
1646 Value *src = getSrc(&insn->src[0], i);
1647 switch (prog->getType()) {
1648 case Program::TYPE_FRAGMENT: {
1649 if (info_out->out[idx].sn == TGSI_SEMANTIC_POSITION) {
1650 // TGSI uses a different interface than NIR, TGSI stores that
1651 // value in the z component, NIR in X
1652 offset += 2;
1653 src = mkOp1v(OP_SAT, TYPE_F32, getScratch(), src);
1654 }
1655 break;
1656 }
1657 case Program::TYPE_GEOMETRY:
1658 case Program::TYPE_TESSELLATION_EVAL:
1659 case Program::TYPE_VERTEX: {
1660 if (info_out->io.genUserClip > 0 && idx == (uint32_t)clipVertexOutput) {
1661 mkMov(clipVtx[i], src);
1662 src = clipVtx[i];
1663 }
1664 break;
1665 }
1666 default:
1667 break;
1668 }
1669
1670 storeTo(insn, FILE_SHADER_OUTPUT, OP_EXPORT, dType, src, idx, i + offset, indirect);
1671 }
1672 break;
1673 }
1674 case nir_intrinsic_load_input:
1675 case nir_intrinsic_load_interpolated_input:
1676 case nir_intrinsic_load_output: {
1677 LValues &newDefs = convert(&insn->dest);
1678
1679 // FBFetch
1680 if (prog->getType() == Program::TYPE_FRAGMENT &&
1681 op == nir_intrinsic_load_output) {
1682 std::vector<Value*> defs, srcs;
1683 uint8_t mask = 0;
1684
1685 srcs.push_back(getSSA());
1686 srcs.push_back(getSSA());
1687 Value *x = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 0));
1688 Value *y = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 1));
1689 mkCvt(OP_CVT, TYPE_U32, srcs[0], TYPE_F32, x)->rnd = ROUND_Z;
1690 mkCvt(OP_CVT, TYPE_U32, srcs[1], TYPE_F32, y)->rnd = ROUND_Z;
1691
1692 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LAYER, 0)));
1693 srcs.push_back(mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_SAMPLE_INDEX, 0)));
1694
1695 for (uint8_t i = 0u; i < dest_components; ++i) {
1696 defs.push_back(newDefs[i]);
1697 mask |= 1 << i;
1698 }
1699
1700 TexInstruction *texi = mkTex(OP_TXF, TEX_TARGET_2D_MS_ARRAY, 0, 0, defs, srcs);
1701 texi->tex.levelZero = 1;
1702 texi->tex.mask = mask;
1703 texi->tex.useOffsets = 0;
1704 texi->tex.r = 0xffff;
1705 texi->tex.s = 0xffff;
1706
1707 info_out->prop.fp.readsFramebuffer = true;
1708 break;
1709 }
1710
1711 const DataType dType = getDType(insn);
1712 Value *indirect;
1713 bool input = op != nir_intrinsic_load_output;
1714 operation nvirOp;
1715 uint32_t mode = 0;
1716
1717 uint32_t idx = getIndirect(insn, op == nir_intrinsic_load_interpolated_input ? 1 : 0, 0, indirect);
1718 nv50_ir_varying& vary = input ? info_out->in[idx] : info_out->out[idx];
1719
1720 // see load_barycentric_* handling
1721 if (prog->getType() == Program::TYPE_FRAGMENT) {
1722 if (op == nir_intrinsic_load_interpolated_input) {
1723 ImmediateValue immMode;
1724 if (getSrc(&insn->src[0], 1)->getUniqueInsn()->src(0).getImmediate(immMode))
1725 mode = immMode.reg.data.u32;
1726 }
1727 if (mode == NV50_IR_INTERP_DEFAULT)
1728 mode |= translateInterpMode(&vary, nvirOp);
1729 else {
1730 if (vary.linear) {
1731 nvirOp = OP_LINTERP;
1732 mode |= NV50_IR_INTERP_LINEAR;
1733 } else {
1734 nvirOp = OP_PINTERP;
1735 mode |= NV50_IR_INTERP_PERSPECTIVE;
1736 }
1737 }
1738 }
1739
1740 for (uint8_t i = 0u; i < dest_components; ++i) {
1741 uint32_t address = getSlotAddress(insn, idx, i);
1742 Symbol *sym = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address);
1743 if (prog->getType() == Program::TYPE_FRAGMENT) {
1744 int s = 1;
1745 if (typeSizeof(dType) == 8) {
1746 Value *lo = getSSA();
1747 Value *hi = getSSA();
1748 Instruction *interp;
1749
1750 interp = mkOp1(nvirOp, TYPE_U32, lo, sym);
1751 if (nvirOp == OP_PINTERP)
1752 interp->setSrc(s++, fp.position);
1753 if (mode & NV50_IR_INTERP_OFFSET)
1754 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1755 interp->setInterpolate(mode);
1756 interp->setIndirect(0, 0, indirect);
1757
1758 Symbol *sym1 = mkSymbol(input ? FILE_SHADER_INPUT : FILE_SHADER_OUTPUT, 0, dType, address + 4);
1759 interp = mkOp1(nvirOp, TYPE_U32, hi, sym1);
1760 if (nvirOp == OP_PINTERP)
1761 interp->setSrc(s++, fp.position);
1762 if (mode & NV50_IR_INTERP_OFFSET)
1763 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1764 interp->setInterpolate(mode);
1765 interp->setIndirect(0, 0, indirect);
1766
1767 mkOp2(OP_MERGE, dType, newDefs[i], lo, hi);
1768 } else {
1769 Instruction *interp = mkOp1(nvirOp, dType, newDefs[i], sym);
1770 if (nvirOp == OP_PINTERP)
1771 interp->setSrc(s++, fp.position);
1772 if (mode & NV50_IR_INTERP_OFFSET)
1773 interp->setSrc(s++, getSrc(&insn->src[0], 0));
1774 interp->setInterpolate(mode);
1775 interp->setIndirect(0, 0, indirect);
1776 }
1777 } else {
1778 mkLoad(dType, newDefs[i], sym, indirect)->perPatch = vary.patch;
1779 }
1780 }
1781 break;
1782 }
1783 case nir_intrinsic_load_barycentric_at_offset:
1784 case nir_intrinsic_load_barycentric_at_sample:
1785 case nir_intrinsic_load_barycentric_centroid:
1786 case nir_intrinsic_load_barycentric_pixel:
1787 case nir_intrinsic_load_barycentric_sample: {
1788 LValues &newDefs = convert(&insn->dest);
1789 uint32_t mode;
1790
1791 if (op == nir_intrinsic_load_barycentric_centroid ||
1792 op == nir_intrinsic_load_barycentric_sample) {
1793 mode = NV50_IR_INTERP_CENTROID;
1794 } else if (op == nir_intrinsic_load_barycentric_at_offset) {
1795 Value *offs[2];
1796 for (uint8_t c = 0; c < 2; c++) {
1797 offs[c] = getScratch();
1798 mkOp2(OP_MIN, TYPE_F32, offs[c], getSrc(&insn->src[0], c), loadImm(NULL, 0.4375f));
1799 mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f));
1800 mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f));
1801 mkCvt(OP_CVT, TYPE_S32, offs[c], TYPE_F32, offs[c]);
1802 }
1803 mkOp3v(OP_INSBF, TYPE_U32, newDefs[0], offs[1], mkImm(0x1010), offs[0]);
1804
1805 mode = NV50_IR_INTERP_OFFSET;
1806 } else if (op == nir_intrinsic_load_barycentric_pixel) {
1807 mode = NV50_IR_INTERP_DEFAULT;
1808 } else if (op == nir_intrinsic_load_barycentric_at_sample) {
1809 info_out->prop.fp.readsSampleLocations = true;
1810 mkOp1(OP_PIXLD, TYPE_U32, newDefs[0], getSrc(&insn->src[0], 0))->subOp = NV50_IR_SUBOP_PIXLD_OFFSET;
1811 mode = NV50_IR_INTERP_OFFSET;
1812 } else {
1813 unreachable("all intrinsics already handled above");
1814 }
1815
1816 loadImm(newDefs[1], mode);
1817 break;
1818 }
1819 case nir_intrinsic_demote:
1820 case nir_intrinsic_discard:
1821 mkOp(OP_DISCARD, TYPE_NONE, NULL);
1822 break;
1823 case nir_intrinsic_demote_if:
1824 case nir_intrinsic_discard_if: {
1825 Value *pred = getSSA(1, FILE_PREDICATE);
1826 if (insn->num_components > 1) {
1827 ERROR("nir_intrinsic_discard_if only with 1 component supported!\n");
1828 assert(false);
1829 return false;
1830 }
1831 mkCmp(OP_SET, CC_NE, TYPE_U8, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1832 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_P, pred);
1833 break;
1834 }
1835 case nir_intrinsic_load_base_vertex:
1836 case nir_intrinsic_load_base_instance:
1837 case nir_intrinsic_load_draw_id:
1838 case nir_intrinsic_load_front_face:
1839 case nir_intrinsic_is_helper_invocation:
1840 case nir_intrinsic_load_helper_invocation:
1841 case nir_intrinsic_load_instance_id:
1842 case nir_intrinsic_load_invocation_id:
1843 case nir_intrinsic_load_local_group_size:
1844 case nir_intrinsic_load_local_invocation_id:
1845 case nir_intrinsic_load_num_work_groups:
1846 case nir_intrinsic_load_patch_vertices_in:
1847 case nir_intrinsic_load_primitive_id:
1848 case nir_intrinsic_load_sample_id:
1849 case nir_intrinsic_load_sample_mask_in:
1850 case nir_intrinsic_load_sample_pos:
1851 case nir_intrinsic_load_subgroup_eq_mask:
1852 case nir_intrinsic_load_subgroup_ge_mask:
1853 case nir_intrinsic_load_subgroup_gt_mask:
1854 case nir_intrinsic_load_subgroup_le_mask:
1855 case nir_intrinsic_load_subgroup_lt_mask:
1856 case nir_intrinsic_load_subgroup_invocation:
1857 case nir_intrinsic_load_tess_coord:
1858 case nir_intrinsic_load_tess_level_inner:
1859 case nir_intrinsic_load_tess_level_outer:
1860 case nir_intrinsic_load_vertex_id:
1861 case nir_intrinsic_load_work_group_id:
1862 case nir_intrinsic_load_work_dim: {
1863 const DataType dType = getDType(insn);
1864 SVSemantic sv = convert(op);
1865 LValues &newDefs = convert(&insn->dest);
1866
1867 for (uint8_t i = 0u; i < nir_intrinsic_dest_components(insn); ++i) {
1868 Value *def;
1869 if (typeSizeof(dType) == 8)
1870 def = getSSA();
1871 else
1872 def = newDefs[i];
1873
1874 if (sv == SV_TID && info->prop.cp.numThreads[i] == 1) {
1875 loadImm(def, 0u);
1876 } else {
1877 Symbol *sym = mkSysVal(sv, i);
1878 Instruction *rdsv = mkOp1(OP_RDSV, TYPE_U32, def, sym);
1879 if (sv == SV_TESS_OUTER || sv == SV_TESS_INNER)
1880 rdsv->perPatch = 1;
1881 }
1882
1883 if (typeSizeof(dType) == 8)
1884 mkOp2(OP_MERGE, dType, newDefs[i], def, loadImm(getSSA(), 0u));
1885 }
1886 break;
1887 }
1888 // constants
1889 case nir_intrinsic_load_subgroup_size: {
1890 LValues &newDefs = convert(&insn->dest);
1891 loadImm(newDefs[0], 32u);
1892 break;
1893 }
1894 case nir_intrinsic_vote_all:
1895 case nir_intrinsic_vote_any:
1896 case nir_intrinsic_vote_ieq: {
1897 LValues &newDefs = convert(&insn->dest);
1898 Value *pred = getScratch(1, FILE_PREDICATE);
1899 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1900 mkOp1(OP_VOTE, TYPE_U32, pred, pred)->subOp = getSubOp(op);
1901 mkCvt(OP_CVT, TYPE_U32, newDefs[0], TYPE_U8, pred);
1902 break;
1903 }
1904 case nir_intrinsic_ballot: {
1905 LValues &newDefs = convert(&insn->dest);
1906 Value *pred = getSSA(1, FILE_PREDICATE);
1907 mkCmp(OP_SET, CC_NE, TYPE_U32, pred, TYPE_U32, getSrc(&insn->src[0], 0), zero);
1908 mkOp1(OP_VOTE, TYPE_U32, newDefs[0], pred)->subOp = NV50_IR_SUBOP_VOTE_ANY;
1909 break;
1910 }
1911 case nir_intrinsic_read_first_invocation:
1912 case nir_intrinsic_read_invocation: {
1913 LValues &newDefs = convert(&insn->dest);
1914 const DataType dType = getDType(insn);
1915 Value *tmp = getScratch();
1916
1917 if (op == nir_intrinsic_read_first_invocation) {
1918 mkOp1(OP_VOTE, TYPE_U32, tmp, mkImm(1))->subOp = NV50_IR_SUBOP_VOTE_ANY;
1919 mkOp1(OP_BREV, TYPE_U32, tmp, tmp);
1920 mkOp1(OP_BFIND, TYPE_U32, tmp, tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
1921 } else
1922 tmp = getSrc(&insn->src[1], 0);
1923
1924 for (uint8_t i = 0; i < dest_components; ++i) {
1925 mkOp3(OP_SHFL, dType, newDefs[i], getSrc(&insn->src[0], i), tmp, mkImm(0x1f))
1926 ->subOp = NV50_IR_SUBOP_SHFL_IDX;
1927 }
1928 break;
1929 }
1930 case nir_intrinsic_load_per_vertex_input: {
1931 const DataType dType = getDType(insn);
1932 LValues &newDefs = convert(&insn->dest);
1933 Value *indirectVertex;
1934 Value *indirectOffset;
1935 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
1936 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
1937
1938 Value *vtxBase = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS),
1939 mkImm(baseVertex), indirectVertex);
1940 for (uint8_t i = 0u; i < dest_components; ++i) {
1941 uint32_t address = getSlotAddress(insn, idx, i);
1942 loadFrom(FILE_SHADER_INPUT, 0, dType, newDefs[i], address, 0,
1943 indirectOffset, vtxBase, info_out->in[idx].patch);
1944 }
1945 break;
1946 }
1947 case nir_intrinsic_load_per_vertex_output: {
1948 const DataType dType = getDType(insn);
1949 LValues &newDefs = convert(&insn->dest);
1950 Value *indirectVertex;
1951 Value *indirectOffset;
1952 uint32_t baseVertex = getIndirect(&insn->src[0], 0, indirectVertex);
1953 uint32_t idx = getIndirect(insn, 1, 0, indirectOffset);
1954 Value *vtxBase = NULL;
1955
1956 if (indirectVertex)
1957 vtxBase = indirectVertex;
1958 else
1959 vtxBase = loadImm(NULL, baseVertex);
1960
1961 vtxBase = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, FILE_ADDRESS), outBase, vtxBase);
1962
1963 for (uint8_t i = 0u; i < dest_components; ++i) {
1964 uint32_t address = getSlotAddress(insn, idx, i);
1965 loadFrom(FILE_SHADER_OUTPUT, 0, dType, newDefs[i], address, 0,
1966 indirectOffset, vtxBase, info_out->in[idx].patch);
1967 }
1968 break;
1969 }
1970 case nir_intrinsic_emit_vertex: {
1971 if (info_out->io.genUserClip > 0)
1972 handleUserClipPlanes();
1973 uint32_t idx = nir_intrinsic_stream_id(insn);
1974 mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
1975 break;
1976 }
1977 case nir_intrinsic_end_primitive: {
1978 uint32_t idx = nir_intrinsic_stream_id(insn);
1979 if (idx)
1980 break;
1981 mkOp1(getOperation(op), TYPE_U32, NULL, mkImm(idx))->fixed = 1;
1982 break;
1983 }
1984 case nir_intrinsic_load_ubo: {
1985 const DataType dType = getDType(insn);
1986 LValues &newDefs = convert(&insn->dest);
1987 Value *indirectIndex;
1988 Value *indirectOffset;
1989 uint32_t index = getIndirect(&insn->src[0], 0, indirectIndex) + 1;
1990 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
1991
1992 for (uint8_t i = 0u; i < dest_components; ++i) {
1993 loadFrom(FILE_MEMORY_CONST, index, dType, newDefs[i], offset, i,
1994 indirectOffset, indirectIndex);
1995 }
1996 break;
1997 }
1998 case nir_intrinsic_get_ssbo_size: {
1999 LValues &newDefs = convert(&insn->dest);
2000 const DataType dType = getDType(insn);
2001 Value *indirectBuffer;
2002 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2003
2004 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, 0);
2005 mkOp1(OP_BUFQ, dType, newDefs[0], sym)->setIndirect(0, 0, indirectBuffer);
2006 break;
2007 }
2008 case nir_intrinsic_store_ssbo: {
2009 DataType sType = getSType(insn->src[0], false, false);
2010 Value *indirectBuffer;
2011 Value *indirectOffset;
2012 uint32_t buffer = getIndirect(&insn->src[1], 0, indirectBuffer);
2013 uint32_t offset = getIndirect(&insn->src[2], 0, indirectOffset);
2014
2015 for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
2016 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2017 continue;
2018 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, sType,
2019 offset + i * typeSizeof(sType));
2020 mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i))
2021 ->setIndirect(0, 1, indirectBuffer);
2022 }
2023 info_out->io.globalAccess |= 0x2;
2024 break;
2025 }
2026 case nir_intrinsic_load_ssbo: {
2027 const DataType dType = getDType(insn);
2028 LValues &newDefs = convert(&insn->dest);
2029 Value *indirectBuffer;
2030 Value *indirectOffset;
2031 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2032 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2033
2034 for (uint8_t i = 0u; i < dest_components; ++i)
2035 loadFrom(FILE_MEMORY_BUFFER, buffer, dType, newDefs[i], offset, i,
2036 indirectOffset, indirectBuffer);
2037
2038 info_out->io.globalAccess |= 0x1;
2039 break;
2040 }
2041 case nir_intrinsic_shared_atomic_add:
2042 case nir_intrinsic_shared_atomic_and:
2043 case nir_intrinsic_shared_atomic_comp_swap:
2044 case nir_intrinsic_shared_atomic_exchange:
2045 case nir_intrinsic_shared_atomic_or:
2046 case nir_intrinsic_shared_atomic_imax:
2047 case nir_intrinsic_shared_atomic_imin:
2048 case nir_intrinsic_shared_atomic_umax:
2049 case nir_intrinsic_shared_atomic_umin:
2050 case nir_intrinsic_shared_atomic_xor: {
2051 const DataType dType = getDType(insn);
2052 LValues &newDefs = convert(&insn->dest);
2053 Value *indirectOffset;
2054 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2055 Symbol *sym = mkSymbol(FILE_MEMORY_SHARED, 0, dType, offset);
2056 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
2057 if (op == nir_intrinsic_shared_atomic_comp_swap)
2058 atom->setSrc(2, getSrc(&insn->src[2], 0));
2059 atom->setIndirect(0, 0, indirectOffset);
2060 atom->subOp = getSubOp(op);
2061 break;
2062 }
2063 case nir_intrinsic_ssbo_atomic_add:
2064 case nir_intrinsic_ssbo_atomic_and:
2065 case nir_intrinsic_ssbo_atomic_comp_swap:
2066 case nir_intrinsic_ssbo_atomic_exchange:
2067 case nir_intrinsic_ssbo_atomic_or:
2068 case nir_intrinsic_ssbo_atomic_imax:
2069 case nir_intrinsic_ssbo_atomic_imin:
2070 case nir_intrinsic_ssbo_atomic_umax:
2071 case nir_intrinsic_ssbo_atomic_umin:
2072 case nir_intrinsic_ssbo_atomic_xor: {
2073 const DataType dType = getDType(insn);
2074 LValues &newDefs = convert(&insn->dest);
2075 Value *indirectBuffer;
2076 Value *indirectOffset;
2077 uint32_t buffer = getIndirect(&insn->src[0], 0, indirectBuffer);
2078 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2079
2080 Symbol *sym = mkSymbol(FILE_MEMORY_BUFFER, buffer, dType, offset);
2081 Instruction *atom = mkOp2(OP_ATOM, dType, newDefs[0], sym,
2082 getSrc(&insn->src[2], 0));
2083 if (op == nir_intrinsic_ssbo_atomic_comp_swap)
2084 atom->setSrc(2, getSrc(&insn->src[3], 0));
2085 atom->setIndirect(0, 0, indirectOffset);
2086 atom->setIndirect(0, 1, indirectBuffer);
2087 atom->subOp = getSubOp(op);
2088
2089 info_out->io.globalAccess |= 0x2;
2090 break;
2091 }
2092 case nir_intrinsic_global_atomic_add:
2093 case nir_intrinsic_global_atomic_and:
2094 case nir_intrinsic_global_atomic_comp_swap:
2095 case nir_intrinsic_global_atomic_exchange:
2096 case nir_intrinsic_global_atomic_or:
2097 case nir_intrinsic_global_atomic_imax:
2098 case nir_intrinsic_global_atomic_imin:
2099 case nir_intrinsic_global_atomic_umax:
2100 case nir_intrinsic_global_atomic_umin:
2101 case nir_intrinsic_global_atomic_xor: {
2102 const DataType dType = getDType(insn);
2103 LValues &newDefs = convert(&insn->dest);
2104 Value *address;
2105 uint32_t offset = getIndirect(&insn->src[0], 0, address);
2106
2107 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, dType, offset);
2108 Instruction *atom =
2109 mkOp2(OP_ATOM, dType, newDefs[0], sym, getSrc(&insn->src[1], 0));
2110 if (op == nir_intrinsic_global_atomic_comp_swap)
2111 atom->setSrc(2, getSrc(&insn->src[2], 0));
2112 atom->setIndirect(0, 0, address);
2113 atom->subOp = getSubOp(op);
2114
2115 info_out->io.globalAccess |= 0x2;
2116 break;
2117 }
2118 case nir_intrinsic_bindless_image_atomic_add:
2119 case nir_intrinsic_bindless_image_atomic_and:
2120 case nir_intrinsic_bindless_image_atomic_comp_swap:
2121 case nir_intrinsic_bindless_image_atomic_exchange:
2122 case nir_intrinsic_bindless_image_atomic_imax:
2123 case nir_intrinsic_bindless_image_atomic_umax:
2124 case nir_intrinsic_bindless_image_atomic_imin:
2125 case nir_intrinsic_bindless_image_atomic_umin:
2126 case nir_intrinsic_bindless_image_atomic_or:
2127 case nir_intrinsic_bindless_image_atomic_xor:
2128 case nir_intrinsic_bindless_image_atomic_inc_wrap:
2129 case nir_intrinsic_bindless_image_atomic_dec_wrap:
2130 case nir_intrinsic_bindless_image_load:
2131 case nir_intrinsic_bindless_image_samples:
2132 case nir_intrinsic_bindless_image_size:
2133 case nir_intrinsic_bindless_image_store:
2134 case nir_intrinsic_image_atomic_add:
2135 case nir_intrinsic_image_atomic_and:
2136 case nir_intrinsic_image_atomic_comp_swap:
2137 case nir_intrinsic_image_atomic_exchange:
2138 case nir_intrinsic_image_atomic_imax:
2139 case nir_intrinsic_image_atomic_umax:
2140 case nir_intrinsic_image_atomic_imin:
2141 case nir_intrinsic_image_atomic_umin:
2142 case nir_intrinsic_image_atomic_or:
2143 case nir_intrinsic_image_atomic_xor:
2144 case nir_intrinsic_image_atomic_inc_wrap:
2145 case nir_intrinsic_image_atomic_dec_wrap:
2146 case nir_intrinsic_image_load:
2147 case nir_intrinsic_image_samples:
2148 case nir_intrinsic_image_size:
2149 case nir_intrinsic_image_store: {
2150 std::vector<Value*> srcs, defs;
2151 Value *indirect;
2152 DataType ty;
2153
2154 uint32_t mask = 0;
2155 TexInstruction::Target target =
2156 convert(nir_intrinsic_image_dim(insn), !!nir_intrinsic_image_array(insn), false);
2157 unsigned int argCount = getNIRArgCount(target);
2158 uint16_t location = 0;
2159
2160 if (opInfo.has_dest) {
2161 LValues &newDefs = convert(&insn->dest);
2162 for (uint8_t i = 0u; i < newDefs.size(); ++i) {
2163 defs.push_back(newDefs[i]);
2164 mask |= 1 << i;
2165 }
2166 }
2167
2168 int lod_src = -1;
2169 bool bindless = false;
2170 switch (op) {
2171 case nir_intrinsic_bindless_image_atomic_add:
2172 case nir_intrinsic_bindless_image_atomic_and:
2173 case nir_intrinsic_bindless_image_atomic_comp_swap:
2174 case nir_intrinsic_bindless_image_atomic_exchange:
2175 case nir_intrinsic_bindless_image_atomic_imax:
2176 case nir_intrinsic_bindless_image_atomic_umax:
2177 case nir_intrinsic_bindless_image_atomic_imin:
2178 case nir_intrinsic_bindless_image_atomic_umin:
2179 case nir_intrinsic_bindless_image_atomic_or:
2180 case nir_intrinsic_bindless_image_atomic_xor:
2181 case nir_intrinsic_bindless_image_atomic_inc_wrap:
2182 case nir_intrinsic_bindless_image_atomic_dec_wrap:
2183 ty = getDType(insn);
2184 bindless = true;
2185 info_out->io.globalAccess |= 0x2;
2186 mask = 0x1;
2187 break;
2188 case nir_intrinsic_image_atomic_add:
2189 case nir_intrinsic_image_atomic_and:
2190 case nir_intrinsic_image_atomic_comp_swap:
2191 case nir_intrinsic_image_atomic_exchange:
2192 case nir_intrinsic_image_atomic_imax:
2193 case nir_intrinsic_image_atomic_umax:
2194 case nir_intrinsic_image_atomic_imin:
2195 case nir_intrinsic_image_atomic_umin:
2196 case nir_intrinsic_image_atomic_or:
2197 case nir_intrinsic_image_atomic_xor:
2198 case nir_intrinsic_image_atomic_inc_wrap:
2199 case nir_intrinsic_image_atomic_dec_wrap:
2200 ty = getDType(insn);
2201 bindless = false;
2202 info_out->io.globalAccess |= 0x2;
2203 mask = 0x1;
2204 break;
2205 case nir_intrinsic_bindless_image_load:
2206 case nir_intrinsic_image_load:
2207 ty = TYPE_U32;
2208 bindless = op == nir_intrinsic_bindless_image_load;
2209 info_out->io.globalAccess |= 0x1;
2210 lod_src = 4;
2211 break;
2212 case nir_intrinsic_bindless_image_store:
2213 case nir_intrinsic_image_store:
2214 ty = TYPE_U32;
2215 bindless = op == nir_intrinsic_bindless_image_store;
2216 info_out->io.globalAccess |= 0x2;
2217 lod_src = 5;
2218 mask = 0xf;
2219 break;
2220 case nir_intrinsic_bindless_image_samples:
2221 mask = 0x8;
2222 /* fallthrough */
2223 case nir_intrinsic_image_samples:
2224 ty = TYPE_U32;
2225 bindless = op == nir_intrinsic_bindless_image_samples;
2226 mask = 0x8;
2227 break;
2228 case nir_intrinsic_bindless_image_size:
2229 case nir_intrinsic_image_size:
2230 assert(nir_src_as_uint(insn->src[1]) == 0);
2231 ty = TYPE_U32;
2232 bindless = op == nir_intrinsic_bindless_image_size;
2233 break;
2234 default:
2235 unreachable("unhandled image opcode");
2236 break;
2237 }
2238
2239 if (bindless)
2240 indirect = getSrc(&insn->src[0], 0);
2241 else
2242 location = getIndirect(&insn->src[0], 0, indirect);
2243
2244 // coords
2245 if (opInfo.num_srcs >= 2)
2246 for (unsigned int i = 0u; i < argCount; ++i)
2247 srcs.push_back(getSrc(&insn->src[1], i));
2248
2249 // the sampler is just another src added after coords
2250 if (opInfo.num_srcs >= 3 && target.isMS())
2251 srcs.push_back(getSrc(&insn->src[2], 0));
2252
2253 if (opInfo.num_srcs >= 4 && lod_src != 4) {
2254 unsigned components = opInfo.src_components[3] ? opInfo.src_components[3] : insn->num_components;
2255 for (uint8_t i = 0u; i < components; ++i)
2256 srcs.push_back(getSrc(&insn->src[3], i));
2257 }
2258
2259 if (opInfo.num_srcs >= 5 && lod_src != 5)
2260 // 1 for aotmic swap
2261 for (uint8_t i = 0u; i < opInfo.src_components[4]; ++i)
2262 srcs.push_back(getSrc(&insn->src[4], i));
2263
2264 TexInstruction *texi = mkTex(getOperation(op), target.getEnum(), location, 0, defs, srcs);
2265 texi->tex.bindless = bindless;
2266 texi->tex.format = nv50_ir::TexInstruction::translateImgFormat(nir_intrinsic_format(insn));
2267 texi->tex.mask = mask;
2268 texi->cache = convert(nir_intrinsic_access(insn));
2269 texi->setType(ty);
2270 texi->subOp = getSubOp(op);
2271
2272 if (indirect)
2273 texi->setIndirectR(indirect);
2274
2275 break;
2276 }
2277 case nir_intrinsic_store_scratch:
2278 case nir_intrinsic_store_shared: {
2279 DataType sType = getSType(insn->src[0], false, false);
2280 Value *indirectOffset;
2281 uint32_t offset = getIndirect(&insn->src[1], 0, indirectOffset);
2282
2283 for (uint8_t i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
2284 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2285 continue;
2286 Symbol *sym = mkSymbol(getFile(op), 0, sType, offset + i * typeSizeof(sType));
2287 mkStore(OP_STORE, sType, sym, indirectOffset, getSrc(&insn->src[0], i));
2288 }
2289 break;
2290 }
2291 case nir_intrinsic_load_kernel_input:
2292 case nir_intrinsic_load_scratch:
2293 case nir_intrinsic_load_shared: {
2294 const DataType dType = getDType(insn);
2295 LValues &newDefs = convert(&insn->dest);
2296 Value *indirectOffset;
2297 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2298
2299 for (uint8_t i = 0u; i < dest_components; ++i)
2300 loadFrom(getFile(op), 0, dType, newDefs[i], offset, i, indirectOffset);
2301
2302 break;
2303 }
2304 case nir_intrinsic_control_barrier: {
2305 // TODO: add flag to shader_info
2306 info_out->numBarriers = 1;
2307 Instruction *bar = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
2308 bar->fixed = 1;
2309 bar->subOp = NV50_IR_SUBOP_BAR_SYNC;
2310 break;
2311 }
2312 case nir_intrinsic_group_memory_barrier:
2313 case nir_intrinsic_memory_barrier:
2314 case nir_intrinsic_memory_barrier_buffer:
2315 case nir_intrinsic_memory_barrier_image:
2316 case nir_intrinsic_memory_barrier_shared: {
2317 Instruction *bar = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
2318 bar->fixed = 1;
2319 bar->subOp = getSubOp(op);
2320 break;
2321 }
2322 case nir_intrinsic_memory_barrier_tcs_patch:
2323 break;
2324 case nir_intrinsic_shader_clock: {
2325 const DataType dType = getDType(insn);
2326 LValues &newDefs = convert(&insn->dest);
2327
2328 loadImm(newDefs[0], 0u);
2329 mkOp1(OP_RDSV, dType, newDefs[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
2330 break;
2331 }
2332 case nir_intrinsic_load_global:
2333 case nir_intrinsic_load_global_constant: {
2334 const DataType dType = getDType(insn);
2335 LValues &newDefs = convert(&insn->dest);
2336 Value *indirectOffset;
2337 uint32_t offset = getIndirect(&insn->src[0], 0, indirectOffset);
2338
2339 for (auto i = 0u; i < dest_components; ++i)
2340 loadFrom(FILE_MEMORY_GLOBAL, 0, dType, newDefs[i], offset, i, indirectOffset);
2341
2342 info_out->io.globalAccess |= 0x1;
2343 break;
2344 }
2345 case nir_intrinsic_store_global: {
2346 DataType sType = getSType(insn->src[0], false, false);
2347
2348 for (auto i = 0u; i < nir_intrinsic_src_components(insn, 0); ++i) {
2349 if (!((1u << i) & nir_intrinsic_write_mask(insn)))
2350 continue;
2351 if (typeSizeof(sType) == 8) {
2352 Value *split[2];
2353 mkSplit(split, 4, getSrc(&insn->src[0], i));
2354
2355 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType));
2356 mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[0]);
2357
2358 sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, TYPE_U32, i * typeSizeof(sType) + 4);
2359 mkStore(OP_STORE, TYPE_U32, sym, getSrc(&insn->src[1], 0), split[1]);
2360 } else {
2361 Symbol *sym = mkSymbol(FILE_MEMORY_GLOBAL, 0, sType, i * typeSizeof(sType));
2362 mkStore(OP_STORE, sType, sym, getSrc(&insn->src[1], 0), getSrc(&insn->src[0], i));
2363 }
2364 }
2365
2366 info_out->io.globalAccess |= 0x2;
2367 break;
2368 }
2369 default:
2370 ERROR("unknown nir_intrinsic_op %s\n", nir_intrinsic_infos[op].name);
2371 return false;
2372 }
2373
2374 return true;
2375 }
2376
2377 bool
visit(nir_jump_instr * insn)2378 Converter::visit(nir_jump_instr *insn)
2379 {
2380 switch (insn->type) {
2381 case nir_jump_return:
2382 // TODO: this only works in the main function
2383 mkFlow(OP_BRA, exit, CC_ALWAYS, NULL);
2384 bb->cfg.attach(&exit->cfg, Graph::Edge::CROSS);
2385 break;
2386 case nir_jump_break:
2387 case nir_jump_continue: {
2388 bool isBreak = insn->type == nir_jump_break;
2389 nir_block *block = insn->instr.block;
2390 BasicBlock *target = convert(block->successors[0]);
2391 mkFlow(isBreak ? OP_BREAK : OP_CONT, target, CC_ALWAYS, NULL);
2392 bb->cfg.attach(&target->cfg, isBreak ? Graph::Edge::CROSS : Graph::Edge::BACK);
2393 break;
2394 }
2395 default:
2396 ERROR("unknown nir_jump_type %u\n", insn->type);
2397 return false;
2398 }
2399
2400 return true;
2401 }
2402
2403 Value*
convert(nir_load_const_instr * insn,uint8_t idx)2404 Converter::convert(nir_load_const_instr *insn, uint8_t idx)
2405 {
2406 Value *val;
2407
2408 if (immInsertPos)
2409 setPosition(immInsertPos, true);
2410 else
2411 setPosition(bb, false);
2412
2413 switch (insn->def.bit_size) {
2414 case 64:
2415 val = loadImm(getSSA(8), insn->value[idx].u64);
2416 break;
2417 case 32:
2418 val = loadImm(getSSA(4), insn->value[idx].u32);
2419 break;
2420 case 16:
2421 val = loadImm(getSSA(2), insn->value[idx].u16);
2422 break;
2423 case 8:
2424 val = loadImm(getSSA(1), insn->value[idx].u8);
2425 break;
2426 default:
2427 unreachable("unhandled bit size!\n");
2428 }
2429 setPosition(bb, true);
2430 return val;
2431 }
2432
2433 bool
visit(nir_load_const_instr * insn)2434 Converter::visit(nir_load_const_instr *insn)
2435 {
2436 assert(insn->def.bit_size <= 64);
2437 immediates[insn->def.index] = insn;
2438 return true;
2439 }
2440
2441 #define DEFAULT_CHECKS \
2442 if (insn->dest.dest.ssa.num_components > 1) { \
2443 ERROR("nir_alu_instr only supported with 1 component!\n"); \
2444 return false; \
2445 } \
2446 if (insn->dest.write_mask != 1) { \
2447 ERROR("nir_alu_instr only with write_mask of 1 supported!\n"); \
2448 return false; \
2449 }
2450 bool
visit(nir_alu_instr * insn)2451 Converter::visit(nir_alu_instr *insn)
2452 {
2453 const nir_op op = insn->op;
2454 const nir_op_info &info = nir_op_infos[op];
2455 DataType dType = getDType(insn);
2456 const std::vector<DataType> sTypes = getSTypes(insn);
2457
2458 Instruction *oldPos = this->bb->getExit();
2459
2460 switch (op) {
2461 case nir_op_fabs:
2462 case nir_op_iabs:
2463 case nir_op_fadd:
2464 case nir_op_iadd:
2465 case nir_op_iand:
2466 case nir_op_fceil:
2467 case nir_op_fcos:
2468 case nir_op_fddx:
2469 case nir_op_fddx_coarse:
2470 case nir_op_fddx_fine:
2471 case nir_op_fddy:
2472 case nir_op_fddy_coarse:
2473 case nir_op_fddy_fine:
2474 case nir_op_fdiv:
2475 case nir_op_idiv:
2476 case nir_op_udiv:
2477 case nir_op_fexp2:
2478 case nir_op_ffloor:
2479 case nir_op_ffma:
2480 case nir_op_flog2:
2481 case nir_op_fmax:
2482 case nir_op_imax:
2483 case nir_op_umax:
2484 case nir_op_fmin:
2485 case nir_op_imin:
2486 case nir_op_umin:
2487 case nir_op_fmod:
2488 case nir_op_imod:
2489 case nir_op_umod:
2490 case nir_op_fmul:
2491 case nir_op_imul:
2492 case nir_op_imul_high:
2493 case nir_op_umul_high:
2494 case nir_op_fneg:
2495 case nir_op_ineg:
2496 case nir_op_inot:
2497 case nir_op_ior:
2498 case nir_op_pack_64_2x32_split:
2499 case nir_op_fpow:
2500 case nir_op_frcp:
2501 case nir_op_frem:
2502 case nir_op_irem:
2503 case nir_op_frsq:
2504 case nir_op_fsat:
2505 case nir_op_ishr:
2506 case nir_op_ushr:
2507 case nir_op_fsin:
2508 case nir_op_fsqrt:
2509 case nir_op_ftrunc:
2510 case nir_op_ishl:
2511 case nir_op_ixor: {
2512 DEFAULT_CHECKS;
2513 LValues &newDefs = convert(&insn->dest);
2514 operation preOp = preOperationNeeded(op);
2515 if (preOp != OP_NOP) {
2516 assert(info.num_inputs < 2);
2517 Value *tmp = getSSA(typeSizeof(dType));
2518 Instruction *i0 = mkOp(preOp, dType, tmp);
2519 Instruction *i1 = mkOp(getOperation(op), dType, newDefs[0]);
2520 if (info.num_inputs) {
2521 i0->setSrc(0, getSrc(&insn->src[0]));
2522 i1->setSrc(0, tmp);
2523 }
2524 i1->subOp = getSubOp(op);
2525 } else {
2526 Instruction *i = mkOp(getOperation(op), dType, newDefs[0]);
2527 for (unsigned s = 0u; s < info.num_inputs; ++s) {
2528 i->setSrc(s, getSrc(&insn->src[s]));
2529 }
2530 i->subOp = getSubOp(op);
2531 }
2532 break;
2533 }
2534 case nir_op_ifind_msb:
2535 case nir_op_ufind_msb: {
2536 DEFAULT_CHECKS;
2537 LValues &newDefs = convert(&insn->dest);
2538 dType = sTypes[0];
2539 mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2540 break;
2541 }
2542 case nir_op_fround_even: {
2543 DEFAULT_CHECKS;
2544 LValues &newDefs = convert(&insn->dest);
2545 mkCvt(OP_CVT, dType, newDefs[0], dType, getSrc(&insn->src[0]))->rnd = ROUND_NI;
2546 break;
2547 }
2548 // convert instructions
2549 case nir_op_f2f32:
2550 case nir_op_f2i32:
2551 case nir_op_f2u32:
2552 case nir_op_i2f32:
2553 case nir_op_i2i32:
2554 case nir_op_u2f32:
2555 case nir_op_u2u32:
2556 case nir_op_f2f64:
2557 case nir_op_f2i64:
2558 case nir_op_f2u64:
2559 case nir_op_i2f64:
2560 case nir_op_i2i64:
2561 case nir_op_u2f64:
2562 case nir_op_u2u64: {
2563 DEFAULT_CHECKS;
2564 LValues &newDefs = convert(&insn->dest);
2565 Instruction *i = mkOp1(getOperation(op), dType, newDefs[0], getSrc(&insn->src[0]));
2566 if (op == nir_op_f2i32 || op == nir_op_f2i64 || op == nir_op_f2u32 || op == nir_op_f2u64)
2567 i->rnd = ROUND_Z;
2568 i->sType = sTypes[0];
2569 break;
2570 }
2571 // compare instructions
2572 case nir_op_feq32:
2573 case nir_op_ieq32:
2574 case nir_op_fge32:
2575 case nir_op_ige32:
2576 case nir_op_uge32:
2577 case nir_op_flt32:
2578 case nir_op_ilt32:
2579 case nir_op_ult32:
2580 case nir_op_fneu32:
2581 case nir_op_ine32: {
2582 DEFAULT_CHECKS;
2583 LValues &newDefs = convert(&insn->dest);
2584 Instruction *i = mkCmp(getOperation(op),
2585 getCondCode(op),
2586 dType,
2587 newDefs[0],
2588 dType,
2589 getSrc(&insn->src[0]),
2590 getSrc(&insn->src[1]));
2591 if (info.num_inputs == 3)
2592 i->setSrc(2, getSrc(&insn->src[2]));
2593 i->sType = sTypes[0];
2594 break;
2595 }
2596 case nir_op_mov:
2597 case nir_op_vec2:
2598 case nir_op_vec3:
2599 case nir_op_vec4:
2600 case nir_op_vec8:
2601 case nir_op_vec16: {
2602 LValues &newDefs = convert(&insn->dest);
2603 for (LValues::size_type c = 0u; c < newDefs.size(); ++c) {
2604 mkMov(newDefs[c], getSrc(&insn->src[c]), dType);
2605 }
2606 break;
2607 }
2608 // (un)pack
2609 case nir_op_pack_64_2x32: {
2610 LValues &newDefs = convert(&insn->dest);
2611 Instruction *merge = mkOp(OP_MERGE, dType, newDefs[0]);
2612 merge->setSrc(0, getSrc(&insn->src[0], 0));
2613 merge->setSrc(1, getSrc(&insn->src[0], 1));
2614 break;
2615 }
2616 case nir_op_pack_half_2x16_split: {
2617 LValues &newDefs = convert(&insn->dest);
2618 Value *tmpH = getSSA();
2619 Value *tmpL = getSSA();
2620
2621 mkCvt(OP_CVT, TYPE_F16, tmpL, TYPE_F32, getSrc(&insn->src[0]));
2622 mkCvt(OP_CVT, TYPE_F16, tmpH, TYPE_F32, getSrc(&insn->src[1]));
2623 mkOp3(OP_INSBF, TYPE_U32, newDefs[0], tmpH, mkImm(0x1010), tmpL);
2624 break;
2625 }
2626 case nir_op_unpack_half_2x16_split_x:
2627 case nir_op_unpack_half_2x16_split_y: {
2628 LValues &newDefs = convert(&insn->dest);
2629 Instruction *cvt = mkCvt(OP_CVT, TYPE_F32, newDefs[0], TYPE_F16, getSrc(&insn->src[0]));
2630 if (op == nir_op_unpack_half_2x16_split_y)
2631 cvt->subOp = 1;
2632 break;
2633 }
2634 case nir_op_unpack_64_2x32: {
2635 LValues &newDefs = convert(&insn->dest);
2636 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, newDefs[1]);
2637 break;
2638 }
2639 case nir_op_unpack_64_2x32_split_x: {
2640 LValues &newDefs = convert(&insn->dest);
2641 mkOp1(OP_SPLIT, dType, newDefs[0], getSrc(&insn->src[0]))->setDef(1, getSSA());
2642 break;
2643 }
2644 case nir_op_unpack_64_2x32_split_y: {
2645 LValues &newDefs = convert(&insn->dest);
2646 mkOp1(OP_SPLIT, dType, getSSA(), getSrc(&insn->src[0]))->setDef(1, newDefs[0]);
2647 break;
2648 }
2649 // special instructions
2650 case nir_op_fsign:
2651 case nir_op_isign: {
2652 DEFAULT_CHECKS;
2653 DataType iType;
2654 if (::isFloatType(dType))
2655 iType = TYPE_F32;
2656 else
2657 iType = TYPE_S32;
2658
2659 LValues &newDefs = convert(&insn->dest);
2660 LValue *val0 = getScratch();
2661 LValue *val1 = getScratch();
2662 mkCmp(OP_SET, CC_GT, iType, val0, dType, getSrc(&insn->src[0]), zero);
2663 mkCmp(OP_SET, CC_LT, iType, val1, dType, getSrc(&insn->src[0]), zero);
2664
2665 if (dType == TYPE_F64) {
2666 mkOp2(OP_SUB, iType, val0, val0, val1);
2667 mkCvt(OP_CVT, TYPE_F64, newDefs[0], iType, val0);
2668 } else if (dType == TYPE_S64 || dType == TYPE_U64) {
2669 mkOp2(OP_SUB, iType, val0, val1, val0);
2670 mkOp2(OP_SHR, iType, val1, val0, loadImm(NULL, 31));
2671 mkOp2(OP_MERGE, dType, newDefs[0], val0, val1);
2672 } else if (::isFloatType(dType))
2673 mkOp2(OP_SUB, iType, newDefs[0], val0, val1);
2674 else
2675 mkOp2(OP_SUB, iType, newDefs[0], val1, val0);
2676 break;
2677 }
2678 case nir_op_fcsel:
2679 case nir_op_b32csel: {
2680 DEFAULT_CHECKS;
2681 LValues &newDefs = convert(&insn->dest);
2682 mkCmp(OP_SLCT, CC_NE, dType, newDefs[0], sTypes[0], getSrc(&insn->src[1]), getSrc(&insn->src[2]), getSrc(&insn->src[0]));
2683 break;
2684 }
2685 case nir_op_ibitfield_extract:
2686 case nir_op_ubitfield_extract: {
2687 DEFAULT_CHECKS;
2688 Value *tmp = getSSA();
2689 LValues &newDefs = convert(&insn->dest);
2690 mkOp3(OP_INSBF, dType, tmp, getSrc(&insn->src[2]), loadImm(NULL, 0x808), getSrc(&insn->src[1]));
2691 mkOp2(OP_EXTBF, dType, newDefs[0], getSrc(&insn->src[0]), tmp);
2692 break;
2693 }
2694 case nir_op_bfm: {
2695 DEFAULT_CHECKS;
2696 LValues &newDefs = convert(&insn->dest);
2697 mkOp2(OP_BMSK, dType, newDefs[0], getSrc(&insn->src[1]), getSrc(&insn->src[0]))->subOp = NV50_IR_SUBOP_BMSK_W;
2698 break;
2699 }
2700 case nir_op_bitfield_insert: {
2701 DEFAULT_CHECKS;
2702 LValues &newDefs = convert(&insn->dest);
2703 LValue *temp = getSSA();
2704 mkOp3(OP_INSBF, TYPE_U32, temp, getSrc(&insn->src[3]), mkImm(0x808), getSrc(&insn->src[2]));
2705 mkOp3(OP_INSBF, dType, newDefs[0], getSrc(&insn->src[1]), temp, getSrc(&insn->src[0]));
2706 break;
2707 }
2708 case nir_op_bit_count: {
2709 DEFAULT_CHECKS;
2710 LValues &newDefs = convert(&insn->dest);
2711 mkOp2(OP_POPCNT, dType, newDefs[0], getSrc(&insn->src[0]), getSrc(&insn->src[0]));
2712 break;
2713 }
2714 case nir_op_bitfield_reverse: {
2715 DEFAULT_CHECKS;
2716 LValues &newDefs = convert(&insn->dest);
2717 mkOp1(OP_BREV, TYPE_U32, newDefs[0], getSrc(&insn->src[0]));
2718 break;
2719 }
2720 case nir_op_find_lsb: {
2721 DEFAULT_CHECKS;
2722 LValues &newDefs = convert(&insn->dest);
2723 Value *tmp = getSSA();
2724 mkOp1(OP_BREV, TYPE_U32, tmp, getSrc(&insn->src[0]));
2725 mkOp1(OP_BFIND, TYPE_U32, newDefs[0], tmp)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
2726 break;
2727 }
2728 case nir_op_extract_u8: {
2729 DEFAULT_CHECKS;
2730 LValues &newDefs = convert(&insn->dest);
2731 Value *prmt = getSSA();
2732 mkOp2(OP_OR, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x4440));
2733 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2734 break;
2735 }
2736 case nir_op_extract_i8: {
2737 DEFAULT_CHECKS;
2738 LValues &newDefs = convert(&insn->dest);
2739 Value *prmt = getSSA();
2740 mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x1111), loadImm(NULL, 0x8880));
2741 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2742 break;
2743 }
2744 case nir_op_extract_u16: {
2745 DEFAULT_CHECKS;
2746 LValues &newDefs = convert(&insn->dest);
2747 Value *prmt = getSSA();
2748 mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x22), loadImm(NULL, 0x4410));
2749 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2750 break;
2751 }
2752 case nir_op_extract_i16: {
2753 DEFAULT_CHECKS;
2754 LValues &newDefs = convert(&insn->dest);
2755 Value *prmt = getSSA();
2756 mkOp3(OP_MAD, TYPE_U32, prmt, getSrc(&insn->src[1]), loadImm(NULL, 0x2222), loadImm(NULL, 0x9910));
2757 mkOp3(OP_PERMT, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), prmt, loadImm(NULL, 0));
2758 break;
2759 }
2760 case nir_op_urol: {
2761 DEFAULT_CHECKS;
2762 LValues &newDefs = convert(&insn->dest);
2763 mkOp3(OP_SHF, TYPE_U32, newDefs[0], getSrc(&insn->src[0]),
2764 getSrc(&insn->src[1]), getSrc(&insn->src[0]))
2765 ->subOp = NV50_IR_SUBOP_SHF_L |
2766 NV50_IR_SUBOP_SHF_W |
2767 NV50_IR_SUBOP_SHF_HI;
2768 break;
2769 }
2770 case nir_op_uror: {
2771 DEFAULT_CHECKS;
2772 LValues &newDefs = convert(&insn->dest);
2773 mkOp3(OP_SHF, TYPE_U32, newDefs[0], getSrc(&insn->src[0]),
2774 getSrc(&insn->src[1]), getSrc(&insn->src[0]))
2775 ->subOp = NV50_IR_SUBOP_SHF_R |
2776 NV50_IR_SUBOP_SHF_W |
2777 NV50_IR_SUBOP_SHF_LO;
2778 break;
2779 }
2780 // boolean conversions
2781 case nir_op_b2f32: {
2782 DEFAULT_CHECKS;
2783 LValues &newDefs = convert(&insn->dest);
2784 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1.0f));
2785 break;
2786 }
2787 case nir_op_b2f64: {
2788 DEFAULT_CHECKS;
2789 LValues &newDefs = convert(&insn->dest);
2790 Value *tmp = getSSA(4);
2791 mkOp2(OP_AND, TYPE_U32, tmp, getSrc(&insn->src[0]), loadImm(NULL, 0x3ff00000));
2792 mkOp2(OP_MERGE, TYPE_U64, newDefs[0], loadImm(NULL, 0), tmp);
2793 break;
2794 }
2795 case nir_op_f2b32:
2796 case nir_op_i2b32: {
2797 DEFAULT_CHECKS;
2798 LValues &newDefs = convert(&insn->dest);
2799 Value *src1;
2800 if (typeSizeof(sTypes[0]) == 8) {
2801 src1 = loadImm(getSSA(8), 0.0);
2802 } else {
2803 src1 = zero;
2804 }
2805 CondCode cc = op == nir_op_f2b32 ? CC_NEU : CC_NE;
2806 mkCmp(OP_SET, cc, TYPE_U32, newDefs[0], sTypes[0], getSrc(&insn->src[0]), src1);
2807 break;
2808 }
2809 case nir_op_b2i32: {
2810 DEFAULT_CHECKS;
2811 LValues &newDefs = convert(&insn->dest);
2812 mkOp2(OP_AND, TYPE_U32, newDefs[0], getSrc(&insn->src[0]), loadImm(NULL, 1));
2813 break;
2814 }
2815 case nir_op_b2i64: {
2816 DEFAULT_CHECKS;
2817 LValues &newDefs = convert(&insn->dest);
2818 LValue *def = getScratch();
2819 mkOp2(OP_AND, TYPE_U32, def, getSrc(&insn->src[0]), loadImm(NULL, 1));
2820 mkOp2(OP_MERGE, TYPE_S64, newDefs[0], def, loadImm(NULL, 0));
2821 break;
2822 }
2823 default:
2824 ERROR("unknown nir_op %s\n", info.name);
2825 assert(false);
2826 return false;
2827 }
2828
2829 if (!oldPos) {
2830 oldPos = this->bb->getEntry();
2831 oldPos->precise = insn->exact;
2832 }
2833
2834 if (unlikely(!oldPos))
2835 return true;
2836
2837 while (oldPos->next) {
2838 oldPos = oldPos->next;
2839 oldPos->precise = insn->exact;
2840 }
2841 oldPos->saturate = insn->dest.saturate;
2842
2843 return true;
2844 }
2845 #undef DEFAULT_CHECKS
2846
2847 bool
visit(nir_ssa_undef_instr * insn)2848 Converter::visit(nir_ssa_undef_instr *insn)
2849 {
2850 LValues &newDefs = convert(&insn->def);
2851 for (uint8_t i = 0u; i < insn->def.num_components; ++i) {
2852 mkOp(OP_NOP, TYPE_NONE, newDefs[i]);
2853 }
2854 return true;
2855 }
2856
2857 #define CASE_SAMPLER(ty) \
2858 case GLSL_SAMPLER_DIM_ ## ty : \
2859 if (isArray && !isShadow) \
2860 return TEX_TARGET_ ## ty ## _ARRAY; \
2861 else if (!isArray && isShadow) \
2862 return TEX_TARGET_## ty ## _SHADOW; \
2863 else if (isArray && isShadow) \
2864 return TEX_TARGET_## ty ## _ARRAY_SHADOW; \
2865 else \
2866 return TEX_TARGET_ ## ty
2867
2868 TexTarget
convert(glsl_sampler_dim dim,bool isArray,bool isShadow)2869 Converter::convert(glsl_sampler_dim dim, bool isArray, bool isShadow)
2870 {
2871 switch (dim) {
2872 CASE_SAMPLER(1D);
2873 CASE_SAMPLER(2D);
2874 CASE_SAMPLER(CUBE);
2875 case GLSL_SAMPLER_DIM_3D:
2876 return TEX_TARGET_3D;
2877 case GLSL_SAMPLER_DIM_MS:
2878 if (isArray)
2879 return TEX_TARGET_2D_MS_ARRAY;
2880 return TEX_TARGET_2D_MS;
2881 case GLSL_SAMPLER_DIM_RECT:
2882 if (isShadow)
2883 return TEX_TARGET_RECT_SHADOW;
2884 return TEX_TARGET_RECT;
2885 case GLSL_SAMPLER_DIM_BUF:
2886 return TEX_TARGET_BUFFER;
2887 case GLSL_SAMPLER_DIM_EXTERNAL:
2888 return TEX_TARGET_2D;
2889 default:
2890 ERROR("unknown glsl_sampler_dim %u\n", dim);
2891 assert(false);
2892 return TEX_TARGET_COUNT;
2893 }
2894 }
2895 #undef CASE_SAMPLER
2896
2897 Value*
applyProjection(Value * src,Value * proj)2898 Converter::applyProjection(Value *src, Value *proj)
2899 {
2900 if (!proj)
2901 return src;
2902 return mkOp2v(OP_MUL, TYPE_F32, getScratch(), src, proj);
2903 }
2904
2905 unsigned int
getNIRArgCount(TexInstruction::Target & target)2906 Converter::getNIRArgCount(TexInstruction::Target& target)
2907 {
2908 unsigned int result = target.getArgCount();
2909 if (target.isCube() && target.isArray())
2910 result--;
2911 if (target.isMS())
2912 result--;
2913 return result;
2914 }
2915
2916 CacheMode
convert(enum gl_access_qualifier access)2917 Converter::convert(enum gl_access_qualifier access)
2918 {
2919 if (access & ACCESS_VOLATILE)
2920 return CACHE_CV;
2921 if (access & ACCESS_COHERENT)
2922 return CACHE_CG;
2923 return CACHE_CA;
2924 }
2925
2926 bool
visit(nir_tex_instr * insn)2927 Converter::visit(nir_tex_instr *insn)
2928 {
2929 switch (insn->op) {
2930 case nir_texop_lod:
2931 case nir_texop_query_levels:
2932 case nir_texop_tex:
2933 case nir_texop_texture_samples:
2934 case nir_texop_tg4:
2935 case nir_texop_txb:
2936 case nir_texop_txd:
2937 case nir_texop_txf:
2938 case nir_texop_txf_ms:
2939 case nir_texop_txl:
2940 case nir_texop_txs: {
2941 LValues &newDefs = convert(&insn->dest);
2942 std::vector<Value*> srcs;
2943 std::vector<Value*> defs;
2944 std::vector<nir_src*> offsets;
2945 uint8_t mask = 0;
2946 bool lz = false;
2947 Value *proj = NULL;
2948 TexInstruction::Target target = convert(insn->sampler_dim, insn->is_array, insn->is_shadow);
2949 operation op = getOperation(insn->op);
2950
2951 int r, s;
2952 int biasIdx = nir_tex_instr_src_index(insn, nir_tex_src_bias);
2953 int compIdx = nir_tex_instr_src_index(insn, nir_tex_src_comparator);
2954 int coordsIdx = nir_tex_instr_src_index(insn, nir_tex_src_coord);
2955 int ddxIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddx);
2956 int ddyIdx = nir_tex_instr_src_index(insn, nir_tex_src_ddy);
2957 int msIdx = nir_tex_instr_src_index(insn, nir_tex_src_ms_index);
2958 int lodIdx = nir_tex_instr_src_index(insn, nir_tex_src_lod);
2959 int offsetIdx = nir_tex_instr_src_index(insn, nir_tex_src_offset);
2960 int projIdx = nir_tex_instr_src_index(insn, nir_tex_src_projector);
2961 int sampOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_offset);
2962 int texOffIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_offset);
2963 int sampHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_sampler_handle);
2964 int texHandleIdx = nir_tex_instr_src_index(insn, nir_tex_src_texture_handle);
2965
2966 bool bindless = sampHandleIdx != -1 || texHandleIdx != -1;
2967 assert((sampHandleIdx != -1) == (texHandleIdx != -1));
2968
2969 if (projIdx != -1)
2970 proj = mkOp1v(OP_RCP, TYPE_F32, getScratch(), getSrc(&insn->src[projIdx].src, 0));
2971
2972 srcs.resize(insn->coord_components);
2973 for (uint8_t i = 0u; i < insn->coord_components; ++i)
2974 srcs[i] = applyProjection(getSrc(&insn->src[coordsIdx].src, i), proj);
2975
2976 // sometimes we get less args than target.getArgCount, but codegen expects the latter
2977 if (insn->coord_components) {
2978 uint32_t argCount = target.getArgCount();
2979
2980 if (target.isMS())
2981 argCount -= 1;
2982
2983 for (uint32_t i = 0u; i < (argCount - insn->coord_components); ++i)
2984 srcs.push_back(getSSA());
2985 }
2986
2987 if (insn->op == nir_texop_texture_samples)
2988 srcs.push_back(zero);
2989 else if (!insn->num_srcs)
2990 srcs.push_back(loadImm(NULL, 0));
2991 if (biasIdx != -1)
2992 srcs.push_back(getSrc(&insn->src[biasIdx].src, 0));
2993 if (lodIdx != -1)
2994 srcs.push_back(getSrc(&insn->src[lodIdx].src, 0));
2995 else if (op == OP_TXF)
2996 lz = true;
2997 if (msIdx != -1)
2998 srcs.push_back(getSrc(&insn->src[msIdx].src, 0));
2999 if (offsetIdx != -1)
3000 offsets.push_back(&insn->src[offsetIdx].src);
3001 if (compIdx != -1)
3002 srcs.push_back(applyProjection(getSrc(&insn->src[compIdx].src, 0), proj));
3003 if (texOffIdx != -1) {
3004 srcs.push_back(getSrc(&insn->src[texOffIdx].src, 0));
3005 texOffIdx = srcs.size() - 1;
3006 }
3007 if (sampOffIdx != -1) {
3008 srcs.push_back(getSrc(&insn->src[sampOffIdx].src, 0));
3009 sampOffIdx = srcs.size() - 1;
3010 }
3011 if (bindless) {
3012 // currently we use the lower bits
3013 Value *split[2];
3014 Value *handle = getSrc(&insn->src[sampHandleIdx].src, 0);
3015
3016 mkSplit(split, 4, handle);
3017
3018 srcs.push_back(split[0]);
3019 texOffIdx = srcs.size() - 1;
3020 }
3021
3022 r = bindless ? 0xff : insn->texture_index;
3023 s = bindless ? 0x1f : insn->sampler_index;
3024
3025 defs.resize(newDefs.size());
3026 for (uint8_t d = 0u; d < newDefs.size(); ++d) {
3027 defs[d] = newDefs[d];
3028 mask |= 1 << d;
3029 }
3030 if (target.isMS() || (op == OP_TEX && prog->getType() != Program::TYPE_FRAGMENT))
3031 lz = true;
3032
3033 TexInstruction *texi = mkTex(op, target.getEnum(), r, s, defs, srcs);
3034 texi->tex.levelZero = lz;
3035 texi->tex.mask = mask;
3036 texi->tex.bindless = bindless;
3037
3038 if (texOffIdx != -1)
3039 texi->tex.rIndirectSrc = texOffIdx;
3040 if (sampOffIdx != -1)
3041 texi->tex.sIndirectSrc = sampOffIdx;
3042
3043 switch (insn->op) {
3044 case nir_texop_tg4:
3045 if (!target.isShadow())
3046 texi->tex.gatherComp = insn->component;
3047 break;
3048 case nir_texop_txs:
3049 texi->tex.query = TXQ_DIMS;
3050 break;
3051 case nir_texop_texture_samples:
3052 texi->tex.mask = 0x4;
3053 texi->tex.query = TXQ_TYPE;
3054 break;
3055 case nir_texop_query_levels:
3056 texi->tex.mask = 0x8;
3057 texi->tex.query = TXQ_DIMS;
3058 break;
3059 default:
3060 break;
3061 }
3062
3063 texi->tex.useOffsets = offsets.size();
3064 if (texi->tex.useOffsets) {
3065 for (uint8_t s = 0; s < texi->tex.useOffsets; ++s) {
3066 for (uint32_t c = 0u; c < 3; ++c) {
3067 uint8_t s2 = std::min(c, target.getDim() - 1);
3068 texi->offset[s][c].set(getSrc(offsets[s], s2));
3069 texi->offset[s][c].setInsn(texi);
3070 }
3071 }
3072 }
3073
3074 if (op == OP_TXG && offsetIdx == -1) {
3075 if (nir_tex_instr_has_explicit_tg4_offsets(insn)) {
3076 texi->tex.useOffsets = 4;
3077 setPosition(texi, false);
3078 for (uint8_t i = 0; i < 4; ++i) {
3079 for (uint8_t j = 0; j < 2; ++j) {
3080 texi->offset[i][j].set(loadImm(NULL, insn->tg4_offsets[i][j]));
3081 texi->offset[i][j].setInsn(texi);
3082 }
3083 }
3084 setPosition(texi, true);
3085 }
3086 }
3087
3088 if (ddxIdx != -1 && ddyIdx != -1) {
3089 for (uint8_t c = 0u; c < target.getDim() + target.isCube(); ++c) {
3090 texi->dPdx[c].set(getSrc(&insn->src[ddxIdx].src, c));
3091 texi->dPdy[c].set(getSrc(&insn->src[ddyIdx].src, c));
3092 }
3093 }
3094
3095 break;
3096 }
3097 default:
3098 ERROR("unknown nir_texop %u\n", insn->op);
3099 return false;
3100 }
3101 return true;
3102 }
3103
3104 bool
run()3105 Converter::run()
3106 {
3107 bool progress;
3108
3109 if (prog->dbgFlags & NV50_IR_DEBUG_VERBOSE)
3110 nir_print_shader(nir, stderr);
3111
3112 struct nir_lower_subgroups_options subgroup_options = {};
3113 subgroup_options.subgroup_size = 32;
3114 subgroup_options.ballot_bit_size = 32;
3115 subgroup_options.lower_elect = true;
3116
3117 /* prepare for IO lowering */
3118 NIR_PASS_V(nir, nir_opt_deref);
3119 NIR_PASS_V(nir, nir_lower_regs_to_ssa);
3120 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
3121
3122 /* codegen assumes vec4 alignment for memory */
3123 NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_function_temp, function_temp_type_info);
3124 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_function_temp, nir_address_format_32bit_offset);
3125 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
3126
3127 NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
3128 type_size, (nir_lower_io_options)0);
3129
3130 NIR_PASS_V(nir, nir_lower_subgroups, &subgroup_options);
3131
3132 NIR_PASS_V(nir, nir_lower_load_const_to_scalar);
3133 NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
3134 NIR_PASS_V(nir, nir_lower_phis_to_scalar);
3135
3136 /*TODO: improve this lowering/optimisation loop so that we can use
3137 * nir_opt_idiv_const effectively before this.
3138 */
3139 NIR_PASS(progress, nir, nir_lower_idiv, nir_lower_idiv_precise);
3140
3141 do {
3142 progress = false;
3143 NIR_PASS(progress, nir, nir_copy_prop);
3144 NIR_PASS(progress, nir, nir_opt_remove_phis);
3145 NIR_PASS(progress, nir, nir_opt_trivial_continues);
3146 NIR_PASS(progress, nir, nir_opt_cse);
3147 NIR_PASS(progress, nir, nir_opt_algebraic);
3148 NIR_PASS(progress, nir, nir_opt_constant_folding);
3149 NIR_PASS(progress, nir, nir_copy_prop);
3150 NIR_PASS(progress, nir, nir_opt_dce);
3151 NIR_PASS(progress, nir, nir_opt_dead_cf);
3152 } while (progress);
3153
3154 NIR_PASS_V(nir, nir_lower_bool_to_int32);
3155 NIR_PASS_V(nir, nir_convert_from_ssa, true);
3156
3157 // Garbage collect dead instructions
3158 nir_sweep(nir);
3159
3160 if (!parseNIR()) {
3161 ERROR("Couldn't prase NIR!\n");
3162 return false;
3163 }
3164
3165 if (!assignSlots()) {
3166 ERROR("Couldn't assign slots!\n");
3167 return false;
3168 }
3169
3170 if (prog->dbgFlags & NV50_IR_DEBUG_BASIC)
3171 nir_print_shader(nir, stderr);
3172
3173 nir_foreach_function(function, nir) {
3174 if (!visit(function))
3175 return false;
3176 }
3177
3178 return true;
3179 }
3180
3181 } // unnamed namespace
3182
3183 namespace nv50_ir {
3184
3185 bool
makeFromNIR(struct nv50_ir_prog_info * info,struct nv50_ir_prog_info_out * info_out)3186 Program::makeFromNIR(struct nv50_ir_prog_info *info,
3187 struct nv50_ir_prog_info_out *info_out)
3188 {
3189 nir_shader *nir = (nir_shader*)info->bin.source;
3190 Converter converter(this, nir, info, info_out);
3191 bool result = converter.run();
3192 if (!result)
3193 return result;
3194 LoweringHelper lowering;
3195 lowering.run(this);
3196 tlsSize = info_out->bin.tlsSpace;
3197 return result;
3198 }
3199
3200 } // namespace nv50_ir
3201
3202 static nir_shader_compiler_options
nvir_nir_shader_compiler_options(int chipset)3203 nvir_nir_shader_compiler_options(int chipset)
3204 {
3205 nir_shader_compiler_options op = {};
3206 op.lower_fdiv = (chipset >= NVISA_GV100_CHIPSET);
3207 op.lower_ffma16 = false;
3208 op.lower_ffma32 = false;
3209 op.lower_ffma64 = false;
3210 op.fuse_ffma16 = false; /* nir doesn't track mad vs fma */
3211 op.fuse_ffma32 = false; /* nir doesn't track mad vs fma */
3212 op.fuse_ffma64 = false; /* nir doesn't track mad vs fma */
3213 op.lower_flrp16 = (chipset >= NVISA_GV100_CHIPSET);
3214 op.lower_flrp32 = true;
3215 op.lower_flrp64 = true;
3216 op.lower_fpow = false; // TODO: nir's lowering is broken, or we could use it
3217 op.lower_fsat = false;
3218 op.lower_fsqrt = false; // TODO: only before gm200
3219 op.lower_sincos = false;
3220 op.lower_fmod = true;
3221 op.lower_bitfield_extract = false;
3222 op.lower_bitfield_extract_to_shifts = (chipset >= NVISA_GV100_CHIPSET);
3223 op.lower_bitfield_insert = false;
3224 op.lower_bitfield_insert_to_shifts = (chipset >= NVISA_GV100_CHIPSET);
3225 op.lower_bitfield_insert_to_bitfield_select = false;
3226 op.lower_bitfield_reverse = false;
3227 op.lower_bit_count = false;
3228 op.lower_ifind_msb = false;
3229 op.lower_find_lsb = false;
3230 op.lower_uadd_carry = true; // TODO
3231 op.lower_usub_borrow = true; // TODO
3232 op.lower_mul_high = false;
3233 op.lower_negate = false;
3234 op.lower_sub = true;
3235 op.lower_scmp = true; // TODO: not implemented yet
3236 op.lower_vector_cmp = false;
3237 op.lower_bitops = false;
3238 op.lower_isign = (chipset >= NVISA_GV100_CHIPSET);
3239 op.lower_fsign = (chipset >= NVISA_GV100_CHIPSET);
3240 op.lower_fdph = false;
3241 op.lower_fdot = false;
3242 op.fdot_replicates = false; // TODO
3243 op.lower_ffloor = false; // TODO
3244 op.lower_ffract = true;
3245 op.lower_fceil = false; // TODO
3246 op.lower_ftrunc = false;
3247 op.lower_ldexp = true;
3248 op.lower_pack_half_2x16 = true;
3249 op.lower_pack_unorm_2x16 = true;
3250 op.lower_pack_snorm_2x16 = true;
3251 op.lower_pack_unorm_4x8 = true;
3252 op.lower_pack_snorm_4x8 = true;
3253 op.lower_unpack_half_2x16 = true;
3254 op.lower_unpack_unorm_2x16 = true;
3255 op.lower_unpack_snorm_2x16 = true;
3256 op.lower_unpack_unorm_4x8 = true;
3257 op.lower_unpack_snorm_4x8 = true;
3258 op.lower_pack_split = false;
3259 op.lower_extract_byte = (chipset < NVISA_GM107_CHIPSET);
3260 op.lower_extract_word = (chipset < NVISA_GM107_CHIPSET);
3261 op.lower_all_io_to_temps = false;
3262 op.lower_all_io_to_elements = false;
3263 op.vertex_id_zero_based = false;
3264 op.lower_base_vertex = false;
3265 op.lower_helper_invocation = false;
3266 op.optimize_sample_mask_in = false;
3267 op.lower_cs_local_index_from_id = true;
3268 op.lower_cs_local_id_from_index = false;
3269 op.lower_device_index_to_zero = false; // TODO
3270 op.lower_wpos_pntc = false; // TODO
3271 op.lower_hadd = true; // TODO
3272 op.lower_add_sat = true; // TODO
3273 op.vectorize_io = false;
3274 op.lower_to_scalar = false;
3275 op.unify_interfaces = false;
3276 op.use_interpolated_input_intrinsics = true;
3277 op.lower_mul_2x32_64 = true; // TODO
3278 op.lower_rotate = (chipset < NVISA_GV100_CHIPSET);
3279 op.has_imul24 = false;
3280 op.intel_vec4 = false;
3281 op.max_unroll_iterations = 32;
3282 op.lower_int64_options = (nir_lower_int64_options) (
3283 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul64 : 0) |
3284 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_isign64 : 0) |
3285 nir_lower_divmod64 |
3286 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul_high64 : 0) |
3287 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_mov64 : 0) |
3288 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_icmp64 : 0) |
3289 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_iabs64 : 0) |
3290 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_ineg64 : 0) |
3291 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_logic64 : 0) |
3292 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_minmax64 : 0) |
3293 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_shift64 : 0) |
3294 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_imul_2x32_64 : 0) |
3295 ((chipset >= NVISA_GM107_CHIPSET) ? nir_lower_extract64 : 0) |
3296 nir_lower_ufind_msb64
3297 );
3298 op.lower_doubles_options = (nir_lower_doubles_options) (
3299 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_drcp : 0) |
3300 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dsqrt : 0) |
3301 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_drsq : 0) |
3302 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dfract : 0) |
3303 nir_lower_dmod |
3304 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_dsub : 0) |
3305 ((chipset >= NVISA_GV100_CHIPSET) ? nir_lower_ddiv : 0)
3306 );
3307 return op;
3308 }
3309
3310 static const nir_shader_compiler_options gf100_nir_shader_compiler_options =
3311 nvir_nir_shader_compiler_options(NVISA_GF100_CHIPSET);
3312 static const nir_shader_compiler_options gm107_nir_shader_compiler_options =
3313 nvir_nir_shader_compiler_options(NVISA_GM107_CHIPSET);
3314 static const nir_shader_compiler_options gv100_nir_shader_compiler_options =
3315 nvir_nir_shader_compiler_options(NVISA_GV100_CHIPSET);
3316
3317 const nir_shader_compiler_options *
nv50_ir_nir_shader_compiler_options(int chipset)3318 nv50_ir_nir_shader_compiler_options(int chipset)
3319 {
3320 if (chipset >= NVISA_GV100_CHIPSET)
3321 return &gv100_nir_shader_compiler_options;
3322 if (chipset >= NVISA_GM107_CHIPSET)
3323 return &gm107_nir_shader_compiler_options;
3324 return &gf100_nir_shader_compiler_options;
3325 }
3326