1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "tgsi/tgsi_build.h"
24 #include "tgsi/tgsi_dump.h"
25 #include "tgsi/tgsi_scan.h"
26 #include "tgsi/tgsi_util.h"
27
28 #include <set>
29
30 #include "nv50_ir.h"
31 #include "nv50_ir_from_common.h"
32 #include "nv50_ir_util.h"
33
34 namespace tgsi {
35
36 class Source;
37
38 static nv50_ir::operation translateOpcode(uint opcode);
39 static nv50_ir::DataFile translateFile(uint file);
40 static nv50_ir::TexTarget translateTexture(uint texTarg);
41 static nv50_ir::SVSemantic translateSysVal(uint sysval);
42 static nv50_ir::CacheMode translateCacheMode(uint qualifier);
43
44 class Instruction
45 {
46 public:
Instruction(const struct tgsi_full_instruction * inst)47 Instruction(const struct tgsi_full_instruction *inst) : insn(inst) { }
48
49 class SrcRegister
50 {
51 public:
SrcRegister(const struct tgsi_full_src_register * src)52 SrcRegister(const struct tgsi_full_src_register *src)
53 : reg(src->Register),
54 fsr(src)
55 { }
56
SrcRegister(const struct tgsi_src_register & src)57 SrcRegister(const struct tgsi_src_register& src) : reg(src), fsr(NULL) { }
58
SrcRegister(const struct tgsi_ind_register & ind)59 SrcRegister(const struct tgsi_ind_register& ind)
60 : reg(tgsi_util_get_src_from_ind(&ind)),
61 fsr(NULL)
62 { }
63
offsetToSrc(struct tgsi_texture_offset off)64 struct tgsi_src_register offsetToSrc(struct tgsi_texture_offset off)
65 {
66 struct tgsi_src_register reg;
67 memset(®, 0, sizeof(reg));
68 reg.Index = off.Index;
69 reg.File = off.File;
70 reg.SwizzleX = off.SwizzleX;
71 reg.SwizzleY = off.SwizzleY;
72 reg.SwizzleZ = off.SwizzleZ;
73 return reg;
74 }
75
SrcRegister(const struct tgsi_texture_offset & off)76 SrcRegister(const struct tgsi_texture_offset& off) :
77 reg(offsetToSrc(off)),
78 fsr(NULL)
79 { }
80
getFile() const81 uint getFile() const { return reg.File; }
82
is2D() const83 bool is2D() const { return reg.Dimension; }
84
isIndirect(int dim) const85 bool isIndirect(int dim) const
86 {
87 return (dim && fsr) ? fsr->Dimension.Indirect : reg.Indirect;
88 }
89
getIndex(int dim) const90 int getIndex(int dim) const
91 {
92 return (dim && fsr) ? fsr->Dimension.Index : reg.Index;
93 }
94
getSwizzle(int chan) const95 int getSwizzle(int chan) const
96 {
97 return tgsi_util_get_src_register_swizzle(®, chan);
98 }
99
getArrayId() const100 int getArrayId() const
101 {
102 if (isIndirect(0))
103 return fsr->Indirect.ArrayID;
104 return 0;
105 }
106
107 nv50_ir::Modifier getMod(int chan) const;
108
getIndirect(int dim) const109 SrcRegister getIndirect(int dim) const
110 {
111 assert(fsr && isIndirect(dim));
112 if (dim)
113 return SrcRegister(fsr->DimIndirect);
114 return SrcRegister(fsr->Indirect);
115 }
116
getValueU32(int c,const uint32_t * data) const117 uint32_t getValueU32(int c, const uint32_t *data) const
118 {
119 assert(reg.File == TGSI_FILE_IMMEDIATE);
120 assert(!reg.Absolute);
121 assert(!reg.Negate);
122 return data[reg.Index * 4 + getSwizzle(c)];
123 }
124
125 private:
126 const struct tgsi_src_register reg;
127 const struct tgsi_full_src_register *fsr;
128 };
129
130 class DstRegister
131 {
132 public:
DstRegister(const struct tgsi_full_dst_register * dst)133 DstRegister(const struct tgsi_full_dst_register *dst)
134 : reg(dst->Register),
135 fdr(dst)
136 { }
137
DstRegister(const struct tgsi_dst_register & dst)138 DstRegister(const struct tgsi_dst_register& dst) : reg(dst), fdr(NULL) { }
139
getFile() const140 uint getFile() const { return reg.File; }
141
is2D() const142 bool is2D() const { return reg.Dimension; }
143
isIndirect(int dim) const144 bool isIndirect(int dim) const
145 {
146 return (dim && fdr) ? fdr->Dimension.Indirect : reg.Indirect;
147 }
148
getIndex(int dim) const149 int getIndex(int dim) const
150 {
151 return (dim && fdr) ? fdr->Dimension.Dimension : reg.Index;
152 }
153
getMask() const154 unsigned int getMask() const { return reg.WriteMask; }
155
isMasked(int chan) const156 bool isMasked(int chan) const { return !(getMask() & (1 << chan)); }
157
getIndirect(int dim) const158 SrcRegister getIndirect(int dim) const
159 {
160 assert(fdr && isIndirect(dim));
161 if (dim)
162 return SrcRegister(fdr->DimIndirect);
163 return SrcRegister(fdr->Indirect);
164 }
165
asSrc()166 struct tgsi_full_src_register asSrc()
167 {
168 assert(fdr);
169 return tgsi_full_src_register_from_dst(fdr);
170 }
171
getArrayId() const172 int getArrayId() const
173 {
174 if (isIndirect(0))
175 return fdr->Indirect.ArrayID;
176 return 0;
177 }
178
179 private:
180 const struct tgsi_dst_register reg;
181 const struct tgsi_full_dst_register *fdr;
182 };
183
getOpcode() const184 inline uint getOpcode() const { return insn->Instruction.Opcode; }
185
srcCount() const186 unsigned int srcCount() const { return insn->Instruction.NumSrcRegs; }
dstCount() const187 unsigned int dstCount() const { return insn->Instruction.NumDstRegs; }
188
189 // mask of used components of source s
190 unsigned int srcMask(unsigned int s) const;
191 unsigned int texOffsetMask() const;
192
getSrc(unsigned int s) const193 SrcRegister getSrc(unsigned int s) const
194 {
195 assert(s < srcCount());
196 return SrcRegister(&insn->Src[s]);
197 }
198
getDst(unsigned int d) const199 DstRegister getDst(unsigned int d) const
200 {
201 assert(d < dstCount());
202 return DstRegister(&insn->Dst[d]);
203 }
204
getTexOffset(unsigned int i) const205 SrcRegister getTexOffset(unsigned int i) const
206 {
207 assert(i < TGSI_FULL_MAX_TEX_OFFSETS);
208 return SrcRegister(insn->TexOffsets[i]);
209 }
210
getNumTexOffsets() const211 unsigned int getNumTexOffsets() const { return insn->Texture.NumOffsets; }
212
213 bool checkDstSrcAliasing() const;
214
getOP() const215 inline nv50_ir::operation getOP() const {
216 return translateOpcode(getOpcode()); }
217
218 nv50_ir::DataType inferSrcType() const;
219 nv50_ir::DataType inferDstType() const;
220
221 nv50_ir::CondCode getSetCond() const;
222
223 nv50_ir::TexInstruction::Target getTexture(const Source *, int s) const;
224
getImageFormat() const225 const nv50_ir::TexInstruction::ImgFormatDesc *getImageFormat() const {
226 return nv50_ir::TexInstruction::translateImgFormat((enum pipe_format)insn->Memory.Format);
227 }
228
getImageTarget() const229 nv50_ir::TexTarget getImageTarget() const {
230 return translateTexture(insn->Memory.Texture);
231 }
232
getCacheMode() const233 nv50_ir::CacheMode getCacheMode() const {
234 if (!insn->Instruction.Memory)
235 return nv50_ir::CACHE_CA;
236 return translateCacheMode(insn->Memory.Qualifier);
237 }
238
getLabel()239 inline uint getLabel() { return insn->Label.Label; }
240
getSaturate() const241 unsigned getSaturate() const { return insn->Instruction.Saturate; }
242
print() const243 void print() const
244 {
245 tgsi_dump_instruction(insn, 1);
246 }
247
248 private:
249 const struct tgsi_full_instruction *insn;
250 };
251
texOffsetMask() const252 unsigned int Instruction::texOffsetMask() const
253 {
254 const struct tgsi_instruction_texture *tex = &insn->Texture;
255 assert(insn->Instruction.Texture);
256
257 switch (tex->Texture) {
258 case TGSI_TEXTURE_BUFFER:
259 case TGSI_TEXTURE_1D:
260 case TGSI_TEXTURE_SHADOW1D:
261 case TGSI_TEXTURE_1D_ARRAY:
262 case TGSI_TEXTURE_SHADOW1D_ARRAY:
263 return 0x1;
264 case TGSI_TEXTURE_2D:
265 case TGSI_TEXTURE_SHADOW2D:
266 case TGSI_TEXTURE_2D_ARRAY:
267 case TGSI_TEXTURE_SHADOW2D_ARRAY:
268 case TGSI_TEXTURE_RECT:
269 case TGSI_TEXTURE_SHADOWRECT:
270 case TGSI_TEXTURE_2D_MSAA:
271 case TGSI_TEXTURE_2D_ARRAY_MSAA:
272 return 0x3;
273 case TGSI_TEXTURE_3D:
274 return 0x7;
275 default:
276 assert(!"Unexpected texture target");
277 return 0xf;
278 }
279 }
280
srcMask(unsigned int s) const281 unsigned int Instruction::srcMask(unsigned int s) const
282 {
283 unsigned int mask = insn->Dst[0].Register.WriteMask;
284
285 switch (insn->Instruction.Opcode) {
286 case TGSI_OPCODE_COS:
287 case TGSI_OPCODE_SIN:
288 return (mask & 0x8) | ((mask & 0x7) ? 0x1 : 0x0);
289 case TGSI_OPCODE_DP2:
290 return 0x3;
291 case TGSI_OPCODE_DP3:
292 return 0x7;
293 case TGSI_OPCODE_DP4:
294 case TGSI_OPCODE_KILL_IF: /* WriteMask ignored */
295 return 0xf;
296 case TGSI_OPCODE_DST:
297 return mask & (s ? 0xa : 0x6);
298 case TGSI_OPCODE_EX2:
299 case TGSI_OPCODE_EXP:
300 case TGSI_OPCODE_LG2:
301 case TGSI_OPCODE_LOG:
302 case TGSI_OPCODE_POW:
303 case TGSI_OPCODE_RCP:
304 case TGSI_OPCODE_RSQ:
305 return 0x1;
306 case TGSI_OPCODE_IF:
307 case TGSI_OPCODE_UIF:
308 return 0x1;
309 case TGSI_OPCODE_LIT:
310 return 0xb;
311 case TGSI_OPCODE_TEX2:
312 case TGSI_OPCODE_TXB2:
313 case TGSI_OPCODE_TXL2:
314 return (s == 0) ? 0xf : 0x3;
315 case TGSI_OPCODE_TEX:
316 case TGSI_OPCODE_TXB:
317 case TGSI_OPCODE_TXD:
318 case TGSI_OPCODE_TXL:
319 case TGSI_OPCODE_TXP:
320 case TGSI_OPCODE_TXF:
321 case TGSI_OPCODE_TG4:
322 case TGSI_OPCODE_TEX_LZ:
323 case TGSI_OPCODE_TXF_LZ:
324 case TGSI_OPCODE_LODQ:
325 {
326 const struct tgsi_instruction_texture *tex = &insn->Texture;
327
328 assert(insn->Instruction.Texture);
329
330 mask = 0x7;
331 if (insn->Instruction.Opcode != TGSI_OPCODE_TEX &&
332 insn->Instruction.Opcode != TGSI_OPCODE_TEX_LZ &&
333 insn->Instruction.Opcode != TGSI_OPCODE_TXF_LZ &&
334 insn->Instruction.Opcode != TGSI_OPCODE_TXD)
335 mask |= 0x8; /* bias, lod or proj */
336
337 switch (tex->Texture) {
338 case TGSI_TEXTURE_1D:
339 mask &= 0x9;
340 break;
341 case TGSI_TEXTURE_SHADOW1D:
342 mask &= 0xd;
343 break;
344 case TGSI_TEXTURE_1D_ARRAY:
345 case TGSI_TEXTURE_2D:
346 case TGSI_TEXTURE_RECT:
347 mask &= 0xb;
348 break;
349 case TGSI_TEXTURE_CUBE_ARRAY:
350 case TGSI_TEXTURE_SHADOW2D_ARRAY:
351 case TGSI_TEXTURE_SHADOWCUBE:
352 case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
353 mask |= 0x8;
354 break;
355 default:
356 break;
357 }
358 }
359 return mask;
360 case TGSI_OPCODE_TXQ:
361 return 1;
362 case TGSI_OPCODE_D2I:
363 case TGSI_OPCODE_D2U:
364 case TGSI_OPCODE_D2F:
365 case TGSI_OPCODE_DSLT:
366 case TGSI_OPCODE_DSGE:
367 case TGSI_OPCODE_DSEQ:
368 case TGSI_OPCODE_DSNE:
369 case TGSI_OPCODE_U64SEQ:
370 case TGSI_OPCODE_U64SNE:
371 case TGSI_OPCODE_I64SLT:
372 case TGSI_OPCODE_U64SLT:
373 case TGSI_OPCODE_I64SGE:
374 case TGSI_OPCODE_U64SGE:
375 case TGSI_OPCODE_I642F:
376 case TGSI_OPCODE_U642F:
377 switch (util_bitcount(mask)) {
378 case 1: return 0x3;
379 case 2: return 0xf;
380 default:
381 assert(!"unexpected mask");
382 return 0xf;
383 }
384 case TGSI_OPCODE_I2D:
385 case TGSI_OPCODE_U2D:
386 case TGSI_OPCODE_F2D: {
387 unsigned int x = 0;
388 if ((mask & 0x3) == 0x3)
389 x |= 1;
390 if ((mask & 0xc) == 0xc)
391 x |= 2;
392 return x;
393 }
394 case TGSI_OPCODE_PK2H:
395 return 0x3;
396 case TGSI_OPCODE_UP2H:
397 return 0x1;
398 default:
399 break;
400 }
401
402 return mask;
403 }
404
getMod(int chan) const405 nv50_ir::Modifier Instruction::SrcRegister::getMod(int chan) const
406 {
407 nv50_ir::Modifier m(0);
408
409 if (reg.Absolute)
410 m = m | nv50_ir::Modifier(NV50_IR_MOD_ABS);
411 if (reg.Negate)
412 m = m | nv50_ir::Modifier(NV50_IR_MOD_NEG);
413 return m;
414 }
415
translateFile(uint file)416 static nv50_ir::DataFile translateFile(uint file)
417 {
418 switch (file) {
419 case TGSI_FILE_CONSTANT: return nv50_ir::FILE_MEMORY_CONST;
420 case TGSI_FILE_INPUT: return nv50_ir::FILE_SHADER_INPUT;
421 case TGSI_FILE_OUTPUT: return nv50_ir::FILE_SHADER_OUTPUT;
422 case TGSI_FILE_TEMPORARY: return nv50_ir::FILE_GPR;
423 case TGSI_FILE_ADDRESS: return nv50_ir::FILE_ADDRESS;
424 case TGSI_FILE_IMMEDIATE: return nv50_ir::FILE_IMMEDIATE;
425 case TGSI_FILE_SYSTEM_VALUE: return nv50_ir::FILE_SYSTEM_VALUE;
426 case TGSI_FILE_BUFFER: return nv50_ir::FILE_MEMORY_BUFFER;
427 case TGSI_FILE_IMAGE: return nv50_ir::FILE_MEMORY_GLOBAL;
428 case TGSI_FILE_MEMORY: return nv50_ir::FILE_MEMORY_GLOBAL;
429 case TGSI_FILE_SAMPLER:
430 case TGSI_FILE_NULL:
431 default:
432 return nv50_ir::FILE_NULL;
433 }
434 }
435
translateSysVal(uint sysval)436 static nv50_ir::SVSemantic translateSysVal(uint sysval)
437 {
438 switch (sysval) {
439 case TGSI_SEMANTIC_FACE: return nv50_ir::SV_FACE;
440 case TGSI_SEMANTIC_PSIZE: return nv50_ir::SV_POINT_SIZE;
441 case TGSI_SEMANTIC_PRIMID: return nv50_ir::SV_PRIMITIVE_ID;
442 case TGSI_SEMANTIC_INSTANCEID: return nv50_ir::SV_INSTANCE_ID;
443 case TGSI_SEMANTIC_VERTEXID: return nv50_ir::SV_VERTEX_ID;
444 case TGSI_SEMANTIC_GRID_SIZE: return nv50_ir::SV_NCTAID;
445 case TGSI_SEMANTIC_BLOCK_ID: return nv50_ir::SV_CTAID;
446 case TGSI_SEMANTIC_BLOCK_SIZE: return nv50_ir::SV_NTID;
447 case TGSI_SEMANTIC_THREAD_ID: return nv50_ir::SV_TID;
448 case TGSI_SEMANTIC_SAMPLEID: return nv50_ir::SV_SAMPLE_INDEX;
449 case TGSI_SEMANTIC_SAMPLEPOS: return nv50_ir::SV_SAMPLE_POS;
450 case TGSI_SEMANTIC_SAMPLEMASK: return nv50_ir::SV_SAMPLE_MASK;
451 case TGSI_SEMANTIC_INVOCATIONID: return nv50_ir::SV_INVOCATION_ID;
452 case TGSI_SEMANTIC_TESSCOORD: return nv50_ir::SV_TESS_COORD;
453 case TGSI_SEMANTIC_TESSOUTER: return nv50_ir::SV_TESS_OUTER;
454 case TGSI_SEMANTIC_TESSINNER: return nv50_ir::SV_TESS_INNER;
455 case TGSI_SEMANTIC_VERTICESIN: return nv50_ir::SV_VERTEX_COUNT;
456 case TGSI_SEMANTIC_HELPER_INVOCATION: return nv50_ir::SV_THREAD_KILL;
457 case TGSI_SEMANTIC_BASEVERTEX: return nv50_ir::SV_BASEVERTEX;
458 case TGSI_SEMANTIC_BASEINSTANCE: return nv50_ir::SV_BASEINSTANCE;
459 case TGSI_SEMANTIC_DRAWID: return nv50_ir::SV_DRAWID;
460 case TGSI_SEMANTIC_WORK_DIM: return nv50_ir::SV_WORK_DIM;
461 case TGSI_SEMANTIC_SUBGROUP_INVOCATION: return nv50_ir::SV_LANEID;
462 case TGSI_SEMANTIC_SUBGROUP_EQ_MASK: return nv50_ir::SV_LANEMASK_EQ;
463 case TGSI_SEMANTIC_SUBGROUP_LT_MASK: return nv50_ir::SV_LANEMASK_LT;
464 case TGSI_SEMANTIC_SUBGROUP_LE_MASK: return nv50_ir::SV_LANEMASK_LE;
465 case TGSI_SEMANTIC_SUBGROUP_GT_MASK: return nv50_ir::SV_LANEMASK_GT;
466 case TGSI_SEMANTIC_SUBGROUP_GE_MASK: return nv50_ir::SV_LANEMASK_GE;
467 default:
468 assert(0);
469 return nv50_ir::SV_CLOCK;
470 }
471 }
472
473 #define NV50_IR_TEX_TARG_CASE(a, b) \
474 case TGSI_TEXTURE_##a: return nv50_ir::TEX_TARGET_##b;
475
translateTexture(uint tex)476 static nv50_ir::TexTarget translateTexture(uint tex)
477 {
478 switch (tex) {
479 NV50_IR_TEX_TARG_CASE(1D, 1D);
480 NV50_IR_TEX_TARG_CASE(2D, 2D);
481 NV50_IR_TEX_TARG_CASE(2D_MSAA, 2D_MS);
482 NV50_IR_TEX_TARG_CASE(3D, 3D);
483 NV50_IR_TEX_TARG_CASE(CUBE, CUBE);
484 NV50_IR_TEX_TARG_CASE(RECT, RECT);
485 NV50_IR_TEX_TARG_CASE(1D_ARRAY, 1D_ARRAY);
486 NV50_IR_TEX_TARG_CASE(2D_ARRAY, 2D_ARRAY);
487 NV50_IR_TEX_TARG_CASE(2D_ARRAY_MSAA, 2D_MS_ARRAY);
488 NV50_IR_TEX_TARG_CASE(CUBE_ARRAY, CUBE_ARRAY);
489 NV50_IR_TEX_TARG_CASE(SHADOW1D, 1D_SHADOW);
490 NV50_IR_TEX_TARG_CASE(SHADOW2D, 2D_SHADOW);
491 NV50_IR_TEX_TARG_CASE(SHADOWCUBE, CUBE_SHADOW);
492 NV50_IR_TEX_TARG_CASE(SHADOWRECT, RECT_SHADOW);
493 NV50_IR_TEX_TARG_CASE(SHADOW1D_ARRAY, 1D_ARRAY_SHADOW);
494 NV50_IR_TEX_TARG_CASE(SHADOW2D_ARRAY, 2D_ARRAY_SHADOW);
495 NV50_IR_TEX_TARG_CASE(SHADOWCUBE_ARRAY, CUBE_ARRAY_SHADOW);
496 NV50_IR_TEX_TARG_CASE(BUFFER, BUFFER);
497
498 case TGSI_TEXTURE_UNKNOWN:
499 default:
500 assert(!"invalid texture target");
501 return nv50_ir::TEX_TARGET_2D;
502 }
503 }
504
translateCacheMode(uint qualifier)505 static nv50_ir::CacheMode translateCacheMode(uint qualifier)
506 {
507 if (qualifier & TGSI_MEMORY_VOLATILE)
508 return nv50_ir::CACHE_CV;
509 if (qualifier & TGSI_MEMORY_COHERENT)
510 return nv50_ir::CACHE_CG;
511 return nv50_ir::CACHE_CA;
512 }
513
inferSrcType() const514 nv50_ir::DataType Instruction::inferSrcType() const
515 {
516 switch (getOpcode()) {
517 case TGSI_OPCODE_UIF:
518 case TGSI_OPCODE_AND:
519 case TGSI_OPCODE_OR:
520 case TGSI_OPCODE_XOR:
521 case TGSI_OPCODE_NOT:
522 case TGSI_OPCODE_SHL:
523 case TGSI_OPCODE_U2F:
524 case TGSI_OPCODE_U2D:
525 case TGSI_OPCODE_U2I64:
526 case TGSI_OPCODE_UADD:
527 case TGSI_OPCODE_UDIV:
528 case TGSI_OPCODE_UMOD:
529 case TGSI_OPCODE_UMAD:
530 case TGSI_OPCODE_UMUL:
531 case TGSI_OPCODE_UMUL_HI:
532 case TGSI_OPCODE_UMAX:
533 case TGSI_OPCODE_UMIN:
534 case TGSI_OPCODE_USEQ:
535 case TGSI_OPCODE_USGE:
536 case TGSI_OPCODE_USLT:
537 case TGSI_OPCODE_USNE:
538 case TGSI_OPCODE_USHR:
539 case TGSI_OPCODE_ATOMUADD:
540 case TGSI_OPCODE_ATOMXCHG:
541 case TGSI_OPCODE_ATOMCAS:
542 case TGSI_OPCODE_ATOMAND:
543 case TGSI_OPCODE_ATOMOR:
544 case TGSI_OPCODE_ATOMXOR:
545 case TGSI_OPCODE_ATOMUMIN:
546 case TGSI_OPCODE_ATOMUMAX:
547 case TGSI_OPCODE_ATOMDEC_WRAP:
548 case TGSI_OPCODE_ATOMINC_WRAP:
549 case TGSI_OPCODE_UBFE:
550 case TGSI_OPCODE_UMSB:
551 case TGSI_OPCODE_UP2H:
552 case TGSI_OPCODE_VOTE_ALL:
553 case TGSI_OPCODE_VOTE_ANY:
554 case TGSI_OPCODE_VOTE_EQ:
555 return nv50_ir::TYPE_U32;
556 case TGSI_OPCODE_I2F:
557 case TGSI_OPCODE_I2D:
558 case TGSI_OPCODE_I2I64:
559 case TGSI_OPCODE_IDIV:
560 case TGSI_OPCODE_IMUL_HI:
561 case TGSI_OPCODE_IMAX:
562 case TGSI_OPCODE_IMIN:
563 case TGSI_OPCODE_IABS:
564 case TGSI_OPCODE_INEG:
565 case TGSI_OPCODE_ISGE:
566 case TGSI_OPCODE_ISHR:
567 case TGSI_OPCODE_ISLT:
568 case TGSI_OPCODE_ISSG:
569 case TGSI_OPCODE_MOD:
570 case TGSI_OPCODE_UARL:
571 case TGSI_OPCODE_ATOMIMIN:
572 case TGSI_OPCODE_ATOMIMAX:
573 case TGSI_OPCODE_IBFE:
574 case TGSI_OPCODE_IMSB:
575 return nv50_ir::TYPE_S32;
576 case TGSI_OPCODE_D2F:
577 case TGSI_OPCODE_D2I:
578 case TGSI_OPCODE_D2U:
579 case TGSI_OPCODE_D2I64:
580 case TGSI_OPCODE_D2U64:
581 case TGSI_OPCODE_DABS:
582 case TGSI_OPCODE_DNEG:
583 case TGSI_OPCODE_DADD:
584 case TGSI_OPCODE_DMUL:
585 case TGSI_OPCODE_DDIV:
586 case TGSI_OPCODE_DMAX:
587 case TGSI_OPCODE_DMIN:
588 case TGSI_OPCODE_DSLT:
589 case TGSI_OPCODE_DSGE:
590 case TGSI_OPCODE_DSEQ:
591 case TGSI_OPCODE_DSNE:
592 case TGSI_OPCODE_DRCP:
593 case TGSI_OPCODE_DSQRT:
594 case TGSI_OPCODE_DMAD:
595 case TGSI_OPCODE_DFMA:
596 case TGSI_OPCODE_DFRAC:
597 case TGSI_OPCODE_DRSQ:
598 case TGSI_OPCODE_DTRUNC:
599 case TGSI_OPCODE_DCEIL:
600 case TGSI_OPCODE_DFLR:
601 case TGSI_OPCODE_DROUND:
602 return nv50_ir::TYPE_F64;
603 case TGSI_OPCODE_U64SEQ:
604 case TGSI_OPCODE_U64SNE:
605 case TGSI_OPCODE_U64SLT:
606 case TGSI_OPCODE_U64SGE:
607 case TGSI_OPCODE_U64MIN:
608 case TGSI_OPCODE_U64MAX:
609 case TGSI_OPCODE_U64ADD:
610 case TGSI_OPCODE_U64MUL:
611 case TGSI_OPCODE_U64SHL:
612 case TGSI_OPCODE_U64SHR:
613 case TGSI_OPCODE_U64DIV:
614 case TGSI_OPCODE_U64MOD:
615 case TGSI_OPCODE_U642F:
616 case TGSI_OPCODE_U642D:
617 return nv50_ir::TYPE_U64;
618 case TGSI_OPCODE_I64ABS:
619 case TGSI_OPCODE_I64SSG:
620 case TGSI_OPCODE_I64NEG:
621 case TGSI_OPCODE_I64SLT:
622 case TGSI_OPCODE_I64SGE:
623 case TGSI_OPCODE_I64MIN:
624 case TGSI_OPCODE_I64MAX:
625 case TGSI_OPCODE_I64SHR:
626 case TGSI_OPCODE_I64DIV:
627 case TGSI_OPCODE_I64MOD:
628 case TGSI_OPCODE_I642F:
629 case TGSI_OPCODE_I642D:
630 return nv50_ir::TYPE_S64;
631 default:
632 return nv50_ir::TYPE_F32;
633 }
634 }
635
inferDstType() const636 nv50_ir::DataType Instruction::inferDstType() const
637 {
638 switch (getOpcode()) {
639 case TGSI_OPCODE_D2U:
640 case TGSI_OPCODE_F2U: return nv50_ir::TYPE_U32;
641 case TGSI_OPCODE_D2I:
642 case TGSI_OPCODE_F2I: return nv50_ir::TYPE_S32;
643 case TGSI_OPCODE_FSEQ:
644 case TGSI_OPCODE_FSGE:
645 case TGSI_OPCODE_FSLT:
646 case TGSI_OPCODE_FSNE:
647 case TGSI_OPCODE_DSEQ:
648 case TGSI_OPCODE_DSGE:
649 case TGSI_OPCODE_DSLT:
650 case TGSI_OPCODE_DSNE:
651 case TGSI_OPCODE_I64SLT:
652 case TGSI_OPCODE_I64SGE:
653 case TGSI_OPCODE_U64SEQ:
654 case TGSI_OPCODE_U64SNE:
655 case TGSI_OPCODE_U64SLT:
656 case TGSI_OPCODE_U64SGE:
657 case TGSI_OPCODE_PK2H:
658 return nv50_ir::TYPE_U32;
659 case TGSI_OPCODE_I2F:
660 case TGSI_OPCODE_U2F:
661 case TGSI_OPCODE_D2F:
662 case TGSI_OPCODE_I642F:
663 case TGSI_OPCODE_U642F:
664 case TGSI_OPCODE_UP2H:
665 return nv50_ir::TYPE_F32;
666 case TGSI_OPCODE_I2D:
667 case TGSI_OPCODE_U2D:
668 case TGSI_OPCODE_F2D:
669 case TGSI_OPCODE_I642D:
670 case TGSI_OPCODE_U642D:
671 return nv50_ir::TYPE_F64;
672 case TGSI_OPCODE_I2I64:
673 case TGSI_OPCODE_U2I64:
674 case TGSI_OPCODE_F2I64:
675 case TGSI_OPCODE_D2I64:
676 return nv50_ir::TYPE_S64;
677 case TGSI_OPCODE_F2U64:
678 case TGSI_OPCODE_D2U64:
679 return nv50_ir::TYPE_U64;
680 default:
681 return inferSrcType();
682 }
683 }
684
getSetCond() const685 nv50_ir::CondCode Instruction::getSetCond() const
686 {
687 using namespace nv50_ir;
688
689 switch (getOpcode()) {
690 case TGSI_OPCODE_SLT:
691 case TGSI_OPCODE_ISLT:
692 case TGSI_OPCODE_USLT:
693 case TGSI_OPCODE_FSLT:
694 case TGSI_OPCODE_DSLT:
695 case TGSI_OPCODE_I64SLT:
696 case TGSI_OPCODE_U64SLT:
697 return CC_LT;
698 case TGSI_OPCODE_SLE:
699 return CC_LE;
700 case TGSI_OPCODE_SGE:
701 case TGSI_OPCODE_ISGE:
702 case TGSI_OPCODE_USGE:
703 case TGSI_OPCODE_FSGE:
704 case TGSI_OPCODE_DSGE:
705 case TGSI_OPCODE_I64SGE:
706 case TGSI_OPCODE_U64SGE:
707 return CC_GE;
708 case TGSI_OPCODE_SGT:
709 return CC_GT;
710 case TGSI_OPCODE_SEQ:
711 case TGSI_OPCODE_USEQ:
712 case TGSI_OPCODE_FSEQ:
713 case TGSI_OPCODE_DSEQ:
714 case TGSI_OPCODE_U64SEQ:
715 return CC_EQ;
716 case TGSI_OPCODE_SNE:
717 case TGSI_OPCODE_FSNE:
718 case TGSI_OPCODE_DSNE:
719 case TGSI_OPCODE_U64SNE:
720 return CC_NEU;
721 case TGSI_OPCODE_USNE:
722 return CC_NE;
723 default:
724 return CC_ALWAYS;
725 }
726 }
727
728 #define NV50_IR_OPCODE_CASE(a, b) case TGSI_OPCODE_##a: return nv50_ir::OP_##b
729
translateOpcode(uint opcode)730 static nv50_ir::operation translateOpcode(uint opcode)
731 {
732 switch (opcode) {
733 NV50_IR_OPCODE_CASE(ARL, SHL);
734 NV50_IR_OPCODE_CASE(MOV, MOV);
735
736 NV50_IR_OPCODE_CASE(RCP, RCP);
737 NV50_IR_OPCODE_CASE(RSQ, RSQ);
738 NV50_IR_OPCODE_CASE(SQRT, SQRT);
739
740 NV50_IR_OPCODE_CASE(MUL, MUL);
741 NV50_IR_OPCODE_CASE(ADD, ADD);
742
743 NV50_IR_OPCODE_CASE(MIN, MIN);
744 NV50_IR_OPCODE_CASE(MAX, MAX);
745 NV50_IR_OPCODE_CASE(SLT, SET);
746 NV50_IR_OPCODE_CASE(SGE, SET);
747 NV50_IR_OPCODE_CASE(MAD, MAD);
748 NV50_IR_OPCODE_CASE(FMA, FMA);
749
750 NV50_IR_OPCODE_CASE(FLR, FLOOR);
751 NV50_IR_OPCODE_CASE(ROUND, CVT);
752 NV50_IR_OPCODE_CASE(EX2, EX2);
753 NV50_IR_OPCODE_CASE(LG2, LG2);
754 NV50_IR_OPCODE_CASE(POW, POW);
755
756 NV50_IR_OPCODE_CASE(COS, COS);
757 NV50_IR_OPCODE_CASE(DDX, DFDX);
758 NV50_IR_OPCODE_CASE(DDX_FINE, DFDX);
759 NV50_IR_OPCODE_CASE(DDY, DFDY);
760 NV50_IR_OPCODE_CASE(DDY_FINE, DFDY);
761 NV50_IR_OPCODE_CASE(KILL, DISCARD);
762 NV50_IR_OPCODE_CASE(DEMOTE, DISCARD);
763
764 NV50_IR_OPCODE_CASE(SEQ, SET);
765 NV50_IR_OPCODE_CASE(SGT, SET);
766 NV50_IR_OPCODE_CASE(SIN, SIN);
767 NV50_IR_OPCODE_CASE(SLE, SET);
768 NV50_IR_OPCODE_CASE(SNE, SET);
769 NV50_IR_OPCODE_CASE(TEX, TEX);
770 NV50_IR_OPCODE_CASE(TXD, TXD);
771 NV50_IR_OPCODE_CASE(TXP, TEX);
772
773 NV50_IR_OPCODE_CASE(CAL, CALL);
774 NV50_IR_OPCODE_CASE(RET, RET);
775 NV50_IR_OPCODE_CASE(CMP, SLCT);
776
777 NV50_IR_OPCODE_CASE(TXB, TXB);
778
779 NV50_IR_OPCODE_CASE(DIV, DIV);
780
781 NV50_IR_OPCODE_CASE(TXL, TXL);
782 NV50_IR_OPCODE_CASE(TEX_LZ, TXL);
783
784 NV50_IR_OPCODE_CASE(CEIL, CEIL);
785 NV50_IR_OPCODE_CASE(I2F, CVT);
786 NV50_IR_OPCODE_CASE(NOT, NOT);
787 NV50_IR_OPCODE_CASE(TRUNC, TRUNC);
788 NV50_IR_OPCODE_CASE(SHL, SHL);
789
790 NV50_IR_OPCODE_CASE(AND, AND);
791 NV50_IR_OPCODE_CASE(OR, OR);
792 NV50_IR_OPCODE_CASE(MOD, MOD);
793 NV50_IR_OPCODE_CASE(XOR, XOR);
794 NV50_IR_OPCODE_CASE(TXF, TXF);
795 NV50_IR_OPCODE_CASE(TXF_LZ, TXF);
796 NV50_IR_OPCODE_CASE(TXQ, TXQ);
797 NV50_IR_OPCODE_CASE(TXQS, TXQ);
798 NV50_IR_OPCODE_CASE(TG4, TXG);
799 NV50_IR_OPCODE_CASE(LODQ, TXLQ);
800
801 NV50_IR_OPCODE_CASE(EMIT, EMIT);
802 NV50_IR_OPCODE_CASE(ENDPRIM, RESTART);
803
804 NV50_IR_OPCODE_CASE(KILL_IF, DISCARD);
805
806 NV50_IR_OPCODE_CASE(F2I, CVT);
807 NV50_IR_OPCODE_CASE(FSEQ, SET);
808 NV50_IR_OPCODE_CASE(FSGE, SET);
809 NV50_IR_OPCODE_CASE(FSLT, SET);
810 NV50_IR_OPCODE_CASE(FSNE, SET);
811 NV50_IR_OPCODE_CASE(IDIV, DIV);
812 NV50_IR_OPCODE_CASE(IMAX, MAX);
813 NV50_IR_OPCODE_CASE(IMIN, MIN);
814 NV50_IR_OPCODE_CASE(IABS, ABS);
815 NV50_IR_OPCODE_CASE(INEG, NEG);
816 NV50_IR_OPCODE_CASE(ISGE, SET);
817 NV50_IR_OPCODE_CASE(ISHR, SHR);
818 NV50_IR_OPCODE_CASE(ISLT, SET);
819 NV50_IR_OPCODE_CASE(F2U, CVT);
820 NV50_IR_OPCODE_CASE(U2F, CVT);
821 NV50_IR_OPCODE_CASE(UADD, ADD);
822 NV50_IR_OPCODE_CASE(UDIV, DIV);
823 NV50_IR_OPCODE_CASE(UMAD, MAD);
824 NV50_IR_OPCODE_CASE(UMAX, MAX);
825 NV50_IR_OPCODE_CASE(UMIN, MIN);
826 NV50_IR_OPCODE_CASE(UMOD, MOD);
827 NV50_IR_OPCODE_CASE(UMUL, MUL);
828 NV50_IR_OPCODE_CASE(USEQ, SET);
829 NV50_IR_OPCODE_CASE(USGE, SET);
830 NV50_IR_OPCODE_CASE(USHR, SHR);
831 NV50_IR_OPCODE_CASE(USLT, SET);
832 NV50_IR_OPCODE_CASE(USNE, SET);
833
834 NV50_IR_OPCODE_CASE(DABS, ABS);
835 NV50_IR_OPCODE_CASE(DNEG, NEG);
836 NV50_IR_OPCODE_CASE(DADD, ADD);
837 NV50_IR_OPCODE_CASE(DMUL, MUL);
838 NV50_IR_OPCODE_CASE(DDIV, DIV);
839 NV50_IR_OPCODE_CASE(DMAX, MAX);
840 NV50_IR_OPCODE_CASE(DMIN, MIN);
841 NV50_IR_OPCODE_CASE(DSLT, SET);
842 NV50_IR_OPCODE_CASE(DSGE, SET);
843 NV50_IR_OPCODE_CASE(DSEQ, SET);
844 NV50_IR_OPCODE_CASE(DSNE, SET);
845 NV50_IR_OPCODE_CASE(DRCP, RCP);
846 NV50_IR_OPCODE_CASE(DSQRT, SQRT);
847 NV50_IR_OPCODE_CASE(DMAD, MAD);
848 NV50_IR_OPCODE_CASE(DFMA, FMA);
849 NV50_IR_OPCODE_CASE(D2I, CVT);
850 NV50_IR_OPCODE_CASE(D2U, CVT);
851 NV50_IR_OPCODE_CASE(I2D, CVT);
852 NV50_IR_OPCODE_CASE(U2D, CVT);
853 NV50_IR_OPCODE_CASE(DRSQ, RSQ);
854 NV50_IR_OPCODE_CASE(DTRUNC, TRUNC);
855 NV50_IR_OPCODE_CASE(DCEIL, CEIL);
856 NV50_IR_OPCODE_CASE(DFLR, FLOOR);
857 NV50_IR_OPCODE_CASE(DROUND, CVT);
858
859 NV50_IR_OPCODE_CASE(U64SEQ, SET);
860 NV50_IR_OPCODE_CASE(U64SNE, SET);
861 NV50_IR_OPCODE_CASE(U64SLT, SET);
862 NV50_IR_OPCODE_CASE(U64SGE, SET);
863 NV50_IR_OPCODE_CASE(I64SLT, SET);
864 NV50_IR_OPCODE_CASE(I64SGE, SET);
865 NV50_IR_OPCODE_CASE(I2I64, CVT);
866 NV50_IR_OPCODE_CASE(U2I64, CVT);
867 NV50_IR_OPCODE_CASE(F2I64, CVT);
868 NV50_IR_OPCODE_CASE(F2U64, CVT);
869 NV50_IR_OPCODE_CASE(D2I64, CVT);
870 NV50_IR_OPCODE_CASE(D2U64, CVT);
871 NV50_IR_OPCODE_CASE(I642F, CVT);
872 NV50_IR_OPCODE_CASE(U642F, CVT);
873 NV50_IR_OPCODE_CASE(I642D, CVT);
874 NV50_IR_OPCODE_CASE(U642D, CVT);
875
876 NV50_IR_OPCODE_CASE(I64MIN, MIN);
877 NV50_IR_OPCODE_CASE(U64MIN, MIN);
878 NV50_IR_OPCODE_CASE(I64MAX, MAX);
879 NV50_IR_OPCODE_CASE(U64MAX, MAX);
880 NV50_IR_OPCODE_CASE(I64ABS, ABS);
881 NV50_IR_OPCODE_CASE(I64NEG, NEG);
882 NV50_IR_OPCODE_CASE(U64ADD, ADD);
883 NV50_IR_OPCODE_CASE(U64MUL, MUL);
884 NV50_IR_OPCODE_CASE(U64SHL, SHL);
885 NV50_IR_OPCODE_CASE(I64SHR, SHR);
886 NV50_IR_OPCODE_CASE(U64SHR, SHR);
887
888 NV50_IR_OPCODE_CASE(IMUL_HI, MUL);
889 NV50_IR_OPCODE_CASE(UMUL_HI, MUL);
890
891 NV50_IR_OPCODE_CASE(SAMPLE, TEX);
892 NV50_IR_OPCODE_CASE(SAMPLE_B, TXB);
893 NV50_IR_OPCODE_CASE(SAMPLE_C, TEX);
894 NV50_IR_OPCODE_CASE(SAMPLE_C_LZ, TEX);
895 NV50_IR_OPCODE_CASE(SAMPLE_D, TXD);
896 NV50_IR_OPCODE_CASE(SAMPLE_L, TXL);
897 NV50_IR_OPCODE_CASE(SAMPLE_I, TXF);
898 NV50_IR_OPCODE_CASE(SAMPLE_I_MS, TXF);
899 NV50_IR_OPCODE_CASE(GATHER4, TXG);
900 NV50_IR_OPCODE_CASE(SVIEWINFO, TXQ);
901
902 NV50_IR_OPCODE_CASE(ATOMUADD, ATOM);
903 NV50_IR_OPCODE_CASE(ATOMXCHG, ATOM);
904 NV50_IR_OPCODE_CASE(ATOMCAS, ATOM);
905 NV50_IR_OPCODE_CASE(ATOMAND, ATOM);
906 NV50_IR_OPCODE_CASE(ATOMOR, ATOM);
907 NV50_IR_OPCODE_CASE(ATOMXOR, ATOM);
908 NV50_IR_OPCODE_CASE(ATOMUMIN, ATOM);
909 NV50_IR_OPCODE_CASE(ATOMUMAX, ATOM);
910 NV50_IR_OPCODE_CASE(ATOMIMIN, ATOM);
911 NV50_IR_OPCODE_CASE(ATOMIMAX, ATOM);
912 NV50_IR_OPCODE_CASE(ATOMFADD, ATOM);
913 NV50_IR_OPCODE_CASE(ATOMDEC_WRAP, ATOM);
914 NV50_IR_OPCODE_CASE(ATOMINC_WRAP, ATOM);
915
916 NV50_IR_OPCODE_CASE(TEX2, TEX);
917 NV50_IR_OPCODE_CASE(TXB2, TXB);
918 NV50_IR_OPCODE_CASE(TXL2, TXL);
919
920 NV50_IR_OPCODE_CASE(IBFE, EXTBF);
921 NV50_IR_OPCODE_CASE(UBFE, EXTBF);
922 NV50_IR_OPCODE_CASE(BFI, INSBF);
923 NV50_IR_OPCODE_CASE(BREV, EXTBF);
924 NV50_IR_OPCODE_CASE(POPC, POPCNT);
925 NV50_IR_OPCODE_CASE(LSB, BFIND);
926 NV50_IR_OPCODE_CASE(IMSB, BFIND);
927 NV50_IR_OPCODE_CASE(UMSB, BFIND);
928
929 NV50_IR_OPCODE_CASE(VOTE_ALL, VOTE);
930 NV50_IR_OPCODE_CASE(VOTE_ANY, VOTE);
931 NV50_IR_OPCODE_CASE(VOTE_EQ, VOTE);
932
933 NV50_IR_OPCODE_CASE(BALLOT, VOTE);
934 NV50_IR_OPCODE_CASE(READ_INVOC, SHFL);
935 NV50_IR_OPCODE_CASE(READ_FIRST, SHFL);
936
937 NV50_IR_OPCODE_CASE(END, EXIT);
938
939 default:
940 return nv50_ir::OP_NOP;
941 }
942 }
943
opcodeToSubOp(uint opcode)944 static uint16_t opcodeToSubOp(uint opcode)
945 {
946 switch (opcode) {
947 case TGSI_OPCODE_ATOMUADD: return NV50_IR_SUBOP_ATOM_ADD;
948 case TGSI_OPCODE_ATOMXCHG: return NV50_IR_SUBOP_ATOM_EXCH;
949 case TGSI_OPCODE_ATOMCAS: return NV50_IR_SUBOP_ATOM_CAS;
950 case TGSI_OPCODE_ATOMAND: return NV50_IR_SUBOP_ATOM_AND;
951 case TGSI_OPCODE_ATOMOR: return NV50_IR_SUBOP_ATOM_OR;
952 case TGSI_OPCODE_ATOMXOR: return NV50_IR_SUBOP_ATOM_XOR;
953 case TGSI_OPCODE_ATOMUMIN: return NV50_IR_SUBOP_ATOM_MIN;
954 case TGSI_OPCODE_ATOMIMIN: return NV50_IR_SUBOP_ATOM_MIN;
955 case TGSI_OPCODE_ATOMUMAX: return NV50_IR_SUBOP_ATOM_MAX;
956 case TGSI_OPCODE_ATOMIMAX: return NV50_IR_SUBOP_ATOM_MAX;
957 case TGSI_OPCODE_ATOMFADD: return NV50_IR_SUBOP_ATOM_ADD;
958 case TGSI_OPCODE_ATOMDEC_WRAP: return NV50_IR_SUBOP_ATOM_DEC;
959 case TGSI_OPCODE_ATOMINC_WRAP: return NV50_IR_SUBOP_ATOM_INC;
960 case TGSI_OPCODE_IMUL_HI:
961 case TGSI_OPCODE_UMUL_HI:
962 return NV50_IR_SUBOP_MUL_HIGH;
963 case TGSI_OPCODE_VOTE_ALL: return NV50_IR_SUBOP_VOTE_ALL;
964 case TGSI_OPCODE_VOTE_ANY: return NV50_IR_SUBOP_VOTE_ANY;
965 case TGSI_OPCODE_VOTE_EQ: return NV50_IR_SUBOP_VOTE_UNI;
966 default:
967 return 0;
968 }
969 }
970
checkDstSrcAliasing() const971 bool Instruction::checkDstSrcAliasing() const
972 {
973 if (insn->Dst[0].Register.Indirect) // no danger if indirect, using memory
974 return false;
975
976 for (int s = 0; s < TGSI_FULL_MAX_SRC_REGISTERS; ++s) {
977 if (insn->Src[s].Register.File == TGSI_FILE_NULL)
978 break;
979 if (insn->Src[s].Register.File == insn->Dst[0].Register.File &&
980 insn->Src[s].Register.Index == insn->Dst[0].Register.Index)
981 return true;
982 }
983 return false;
984 }
985
986 class Source
987 {
988 public:
989 Source(struct nv50_ir_prog_info *, struct nv50_ir_prog_info_out *, nv50_ir::Program *);
990 ~Source();
991
992 public:
993 bool scanSource();
fileSize(unsigned file) const994 unsigned fileSize(unsigned file) const { return scan.file_max[file] + 1; }
995
996 public:
997 struct tgsi_shader_info scan;
998 struct tgsi_full_instruction *insns;
999 const struct tgsi_token *tokens;
1000 struct nv50_ir_prog_info *info;
1001 struct nv50_ir_prog_info_out *info_out;
1002
1003 nv50_ir::DynArray tempArrays;
1004 nv50_ir::DynArray immdArrays;
1005
1006 typedef nv50_ir::BuildUtil::Location Location;
1007 // these registers are per-subroutine, cannot be used for parameter passing
1008 std::set<Location> locals;
1009
1010 std::set<int> indirectTempArrays;
1011 std::map<int, int> indirectTempOffsets;
1012 std::map<int, std::pair<int, int> > tempArrayInfo;
1013 std::vector<int> tempArrayId;
1014
1015 std::map<int, int> bufferIds;
1016 std::map<int, int> imageIds;
1017
1018 int clipVertexOutput;
1019
1020 struct TextureView {
1021 uint8_t target; // TGSI_TEXTURE_*
1022 };
1023 std::vector<TextureView> textureViews;
1024
1025 /*
1026 struct Resource {
1027 uint8_t target; // TGSI_TEXTURE_*
1028 bool raw;
1029 uint8_t slot; // $surface index
1030 };
1031 std::vector<Resource> resources;
1032 */
1033
1034 struct MemoryFile {
1035 uint8_t mem_type; // TGSI_MEMORY_TYPE_*
1036 };
1037 std::vector<MemoryFile> memoryFiles;
1038
1039 std::vector<bool> bufferAtomics;
1040
1041 struct {
1042 uint16_t count; /* count of inline immediates */
1043 uint32_t *data; /* inline immediate data */
1044 } immd;
1045
1046 private:
1047 int gmemSlot;
1048 nv50_ir::Program *prog;
1049 int inferSysValDirection(unsigned sn) const;
1050 bool scanDeclaration(const struct tgsi_full_declaration *);
1051 bool scanInstruction(const struct tgsi_full_instruction *);
1052 void scanInstructionSrc(const Instruction& insn,
1053 const Instruction::SrcRegister& src,
1054 unsigned mask);
1055 void scanProperty(const struct tgsi_full_property *);
1056 void scanImmediate(const struct tgsi_full_immediate *);
1057
1058 inline bool isEdgeFlagPassthrough(const Instruction&) const;
1059 };
1060
Source(struct nv50_ir_prog_info * info,struct nv50_ir_prog_info_out * info_out,nv50_ir::Program * prog)1061 Source::Source(struct nv50_ir_prog_info *info, struct nv50_ir_prog_info_out *info_out,
1062 nv50_ir::Program *prog)
1063 : insns(NULL), info(info), info_out(info_out), clipVertexOutput(-1),
1064 gmemSlot(0), prog(prog)
1065 {
1066 tokens = (const struct tgsi_token *)info->bin.source;
1067
1068 if (info->dbgFlags & NV50_IR_DEBUG_BASIC)
1069 tgsi_dump(tokens, 0);
1070
1071 tgsi_scan_shader(tokens, &scan);
1072
1073 immd.count = 0;
1074 immd.data = (uint32_t *)MALLOC(scan.immediate_count * 16);
1075 }
1076
~Source()1077 Source::~Source()
1078 {
1079 if (insns)
1080 FREE(insns);
1081
1082 if (immd.data)
1083 FREE(immd.data);
1084 }
1085
scanSource()1086 bool Source::scanSource()
1087 {
1088 unsigned insnCount = 0;
1089 struct tgsi_parse_context parse;
1090
1091 insns = (struct tgsi_full_instruction *)MALLOC(scan.num_instructions *
1092 sizeof(insns[0]));
1093 if (!insns)
1094 return false;
1095
1096 textureViews.resize(scan.file_max[TGSI_FILE_SAMPLER_VIEW] + 1);
1097 //resources.resize(scan.file_max[TGSI_FILE_RESOURCE] + 1);
1098 tempArrayId.resize(scan.file_max[TGSI_FILE_TEMPORARY] + 1);
1099 memoryFiles.resize(scan.file_max[TGSI_FILE_MEMORY] + 1);
1100 bufferAtomics.resize(scan.file_max[TGSI_FILE_BUFFER] + 1);
1101
1102 info_out->numInputs = scan.file_max[TGSI_FILE_INPUT] + 1;
1103 info_out->numOutputs = scan.file_max[TGSI_FILE_OUTPUT] + 1;
1104 info_out->numSysVals = scan.file_max[TGSI_FILE_SYSTEM_VALUE] + 1;
1105
1106 if (info->type == PIPE_SHADER_FRAGMENT) {
1107 info_out->prop.fp.writesDepth = scan.writes_z;
1108 info_out->prop.fp.usesDiscard = scan.uses_kill || info->io.alphaRefBase;
1109 } else
1110 if (info->type == PIPE_SHADER_GEOMETRY) {
1111 info_out->prop.gp.instanceCount = 1; // default value
1112 }
1113
1114 info->io.viewportId = -1;
1115
1116 tgsi_parse_init(&parse, tokens);
1117 while (!tgsi_parse_end_of_tokens(&parse)) {
1118 tgsi_parse_token(&parse);
1119
1120 switch (parse.FullToken.Token.Type) {
1121 case TGSI_TOKEN_TYPE_IMMEDIATE:
1122 scanImmediate(&parse.FullToken.FullImmediate);
1123 break;
1124 case TGSI_TOKEN_TYPE_DECLARATION:
1125 scanDeclaration(&parse.FullToken.FullDeclaration);
1126 break;
1127 case TGSI_TOKEN_TYPE_INSTRUCTION:
1128 insns[insnCount++] = parse.FullToken.FullInstruction;
1129 scanInstruction(&parse.FullToken.FullInstruction);
1130 break;
1131 case TGSI_TOKEN_TYPE_PROPERTY:
1132 scanProperty(&parse.FullToken.FullProperty);
1133 break;
1134 default:
1135 INFO("unknown TGSI token type: %d\n", parse.FullToken.Token.Type);
1136 break;
1137 }
1138 }
1139 tgsi_parse_free(&parse);
1140
1141 if (indirectTempArrays.size()) {
1142 int tempBase = 0;
1143 for (std::set<int>::const_iterator it = indirectTempArrays.begin();
1144 it != indirectTempArrays.end(); ++it) {
1145 std::pair<int, int>& info = tempArrayInfo[*it];
1146 indirectTempOffsets.insert(std::make_pair(*it, tempBase - info.first));
1147 tempBase += info.second;
1148 }
1149 info_out->bin.tlsSpace += tempBase * 16;
1150 }
1151
1152 if (info_out->io.genUserClip > 0) {
1153 info_out->io.clipDistances = info_out->io.genUserClip;
1154
1155 const unsigned int nOut = (info_out->io.genUserClip + 3) / 4;
1156
1157 for (unsigned int n = 0; n < nOut; ++n) {
1158 unsigned int i = info_out->numOutputs++;
1159 info_out->out[i].id = i;
1160 info_out->out[i].sn = TGSI_SEMANTIC_CLIPDIST;
1161 info_out->out[i].si = n;
1162 info_out->out[i].mask = ((1 << info_out->io.clipDistances) - 1) >> (n * 4);
1163 }
1164 }
1165
1166 return info->assignSlots(info_out) == 0;
1167 }
1168
scanProperty(const struct tgsi_full_property * prop)1169 void Source::scanProperty(const struct tgsi_full_property *prop)
1170 {
1171 switch (prop->Property.PropertyName) {
1172 case TGSI_PROPERTY_GS_OUTPUT_PRIM:
1173 info_out->prop.gp.outputPrim = prop->u[0].Data;
1174 break;
1175 case TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES:
1176 info_out->prop.gp.maxVertices = prop->u[0].Data;
1177 break;
1178 case TGSI_PROPERTY_GS_INVOCATIONS:
1179 info_out->prop.gp.instanceCount = prop->u[0].Data;
1180 break;
1181 case TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS:
1182 info_out->prop.fp.separateFragData = true;
1183 break;
1184 case TGSI_PROPERTY_FS_COORD_ORIGIN:
1185 case TGSI_PROPERTY_FS_COORD_PIXEL_CENTER:
1186 case TGSI_PROPERTY_FS_DEPTH_LAYOUT:
1187 case TGSI_PROPERTY_GS_INPUT_PRIM:
1188 case TGSI_PROPERTY_FS_BLEND_EQUATION_ADVANCED:
1189 // we don't care
1190 break;
1191 case TGSI_PROPERTY_VS_PROHIBIT_UCPS:
1192 info_out->io.genUserClip = -1;
1193 break;
1194 case TGSI_PROPERTY_TCS_VERTICES_OUT:
1195 info_out->prop.tp.outputPatchSize = prop->u[0].Data;
1196 break;
1197 case TGSI_PROPERTY_TES_PRIM_MODE:
1198 info_out->prop.tp.domain = prop->u[0].Data;
1199 break;
1200 case TGSI_PROPERTY_TES_SPACING:
1201 info_out->prop.tp.partitioning = prop->u[0].Data;
1202 break;
1203 case TGSI_PROPERTY_TES_VERTEX_ORDER_CW:
1204 info_out->prop.tp.winding = prop->u[0].Data;
1205 break;
1206 case TGSI_PROPERTY_TES_POINT_MODE:
1207 if (prop->u[0].Data)
1208 info_out->prop.tp.outputPrim = PIPE_PRIM_POINTS;
1209 else
1210 info_out->prop.tp.outputPrim = PIPE_PRIM_TRIANGLES; /* anything but points */
1211 break;
1212 case TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH:
1213 info->prop.cp.numThreads[0] = prop->u[0].Data;
1214 break;
1215 case TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT:
1216 info->prop.cp.numThreads[1] = prop->u[0].Data;
1217 break;
1218 case TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH:
1219 info->prop.cp.numThreads[2] = prop->u[0].Data;
1220 break;
1221 case TGSI_PROPERTY_NUM_CLIPDIST_ENABLED:
1222 info_out->io.clipDistances = prop->u[0].Data;
1223 break;
1224 case TGSI_PROPERTY_NUM_CULLDIST_ENABLED:
1225 info_out->io.cullDistances = prop->u[0].Data;
1226 break;
1227 case TGSI_PROPERTY_NEXT_SHADER:
1228 /* Do not need to know the next shader stage. */
1229 break;
1230 case TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL:
1231 info_out->prop.fp.earlyFragTests = prop->u[0].Data;
1232 break;
1233 case TGSI_PROPERTY_FS_POST_DEPTH_COVERAGE:
1234 info_out->prop.fp.postDepthCoverage = prop->u[0].Data;
1235 break;
1236 case TGSI_PROPERTY_LEGACY_MATH_RULES:
1237 info->io.mul_zero_wins = prop->u[0].Data;
1238 break;
1239 case TGSI_PROPERTY_LAYER_VIEWPORT_RELATIVE:
1240 info_out->io.layer_viewport_relative = prop->u[0].Data;
1241 break;
1242 default:
1243 INFO("unhandled TGSI property %d\n", prop->Property.PropertyName);
1244 break;
1245 }
1246 }
1247
scanImmediate(const struct tgsi_full_immediate * imm)1248 void Source::scanImmediate(const struct tgsi_full_immediate *imm)
1249 {
1250 const unsigned n = immd.count++;
1251
1252 assert(n < scan.immediate_count);
1253
1254 for (int c = 0; c < 4; ++c)
1255 immd.data[n * 4 + c] = imm->u[c].Uint;
1256 }
1257
inferSysValDirection(unsigned sn) const1258 int Source::inferSysValDirection(unsigned sn) const
1259 {
1260 switch (sn) {
1261 case TGSI_SEMANTIC_INSTANCEID:
1262 case TGSI_SEMANTIC_VERTEXID:
1263 return 1;
1264 case TGSI_SEMANTIC_LAYER:
1265 #if 0
1266 case TGSI_SEMANTIC_VIEWPORTINDEX:
1267 return 0;
1268 #endif
1269 case TGSI_SEMANTIC_PRIMID:
1270 return (info->type == PIPE_SHADER_FRAGMENT) ? 1 : 0;
1271 default:
1272 return 0;
1273 }
1274 }
1275
scanDeclaration(const struct tgsi_full_declaration * decl)1276 bool Source::scanDeclaration(const struct tgsi_full_declaration *decl)
1277 {
1278 unsigned i, c;
1279 unsigned sn = TGSI_SEMANTIC_GENERIC;
1280 unsigned si = 0;
1281 const unsigned first = decl->Range.First, last = decl->Range.Last;
1282 const int arrayId = decl->Array.ArrayID;
1283
1284 if (decl->Declaration.Semantic) {
1285 sn = decl->Semantic.Name;
1286 si = decl->Semantic.Index;
1287 }
1288
1289 if (decl->Declaration.Local || decl->Declaration.File == TGSI_FILE_ADDRESS) {
1290 for (i = first; i <= last; ++i) {
1291 for (c = 0; c < 4; ++c) {
1292 locals.insert(
1293 Location(decl->Declaration.File, decl->Dim.Index2D, i, c));
1294 }
1295 }
1296 }
1297
1298 switch (decl->Declaration.File) {
1299 case TGSI_FILE_INPUT:
1300 if (info->type == PIPE_SHADER_VERTEX) {
1301 // all vertex attributes are equal
1302 for (i = first; i <= last; ++i) {
1303 info_out->in[i].sn = TGSI_SEMANTIC_GENERIC;
1304 info_out->in[i].si = i;
1305 }
1306 } else {
1307 for (i = first; i <= last; ++i, ++si) {
1308 info_out->in[i].id = i;
1309 info_out->in[i].sn = sn;
1310 info_out->in[i].si = si;
1311 if (info->type == PIPE_SHADER_FRAGMENT) {
1312 // translate interpolation mode
1313 switch (decl->Interp.Interpolate) {
1314 case TGSI_INTERPOLATE_CONSTANT:
1315 info_out->in[i].flat = 1;
1316 break;
1317 case TGSI_INTERPOLATE_COLOR:
1318 info_out->in[i].sc = 1;
1319 break;
1320 case TGSI_INTERPOLATE_LINEAR:
1321 info_out->in[i].linear = 1;
1322 break;
1323 default:
1324 break;
1325 }
1326 if (decl->Interp.Location)
1327 info_out->in[i].centroid = 1;
1328 }
1329
1330 if (sn == TGSI_SEMANTIC_PATCH)
1331 info_out->in[i].patch = 1;
1332 if (sn == TGSI_SEMANTIC_PATCH)
1333 info_out->numPatchConstants = MAX2(info_out->numPatchConstants, si + 1);
1334 }
1335 }
1336 break;
1337 case TGSI_FILE_OUTPUT:
1338 for (i = first; i <= last; ++i, ++si) {
1339 switch (sn) {
1340 case TGSI_SEMANTIC_POSITION:
1341 if (info->type == PIPE_SHADER_FRAGMENT)
1342 info_out->io.fragDepth = i;
1343 else
1344 if (clipVertexOutput < 0)
1345 clipVertexOutput = i;
1346 break;
1347 case TGSI_SEMANTIC_COLOR:
1348 if (info->type == PIPE_SHADER_FRAGMENT)
1349 info_out->prop.fp.numColourResults++;
1350 break;
1351 case TGSI_SEMANTIC_EDGEFLAG:
1352 info_out->io.edgeFlagOut = i;
1353 break;
1354 case TGSI_SEMANTIC_CLIPVERTEX:
1355 clipVertexOutput = i;
1356 break;
1357 case TGSI_SEMANTIC_CLIPDIST:
1358 info_out->io.genUserClip = -1;
1359 break;
1360 case TGSI_SEMANTIC_SAMPLEMASK:
1361 info_out->io.sampleMask = i;
1362 break;
1363 case TGSI_SEMANTIC_VIEWPORT_INDEX:
1364 info->io.viewportId = i;
1365 break;
1366 case TGSI_SEMANTIC_PATCH:
1367 info_out->numPatchConstants = MAX2(info_out->numPatchConstants, si + 1);
1368 FALLTHROUGH;
1369 case TGSI_SEMANTIC_TESSOUTER:
1370 case TGSI_SEMANTIC_TESSINNER:
1371 info_out->out[i].patch = 1;
1372 break;
1373 default:
1374 break;
1375 }
1376 info_out->out[i].id = i;
1377 info_out->out[i].sn = sn;
1378 info_out->out[i].si = si;
1379 }
1380 break;
1381 case TGSI_FILE_SYSTEM_VALUE:
1382 switch (sn) {
1383 case TGSI_SEMANTIC_INSTANCEID:
1384 info_out->io.instanceId = first;
1385 break;
1386 case TGSI_SEMANTIC_VERTEXID:
1387 info_out->io.vertexId = first;
1388 break;
1389 case TGSI_SEMANTIC_BASEVERTEX:
1390 case TGSI_SEMANTIC_BASEINSTANCE:
1391 case TGSI_SEMANTIC_DRAWID:
1392 info_out->prop.vp.usesDrawParameters = true;
1393 break;
1394 case TGSI_SEMANTIC_SAMPLEID:
1395 case TGSI_SEMANTIC_SAMPLEPOS:
1396 prog->persampleInvocation = true;
1397 break;
1398 case TGSI_SEMANTIC_SAMPLEMASK:
1399 info_out->prop.fp.usesSampleMaskIn = true;
1400 break;
1401 default:
1402 break;
1403 }
1404 for (i = first; i <= last; ++i, ++si) {
1405 info_out->sv[i].sn = sn;
1406 info_out->sv[i].si = si;
1407 info_out->sv[i].input = inferSysValDirection(sn);
1408
1409 switch (sn) {
1410 case TGSI_SEMANTIC_TESSOUTER:
1411 case TGSI_SEMANTIC_TESSINNER:
1412 info_out->sv[i].patch = 1;
1413 break;
1414 }
1415 }
1416 break;
1417 /*
1418 case TGSI_FILE_RESOURCE:
1419 for (i = first; i <= last; ++i) {
1420 resources[i].target = decl->Resource.Resource;
1421 resources[i].raw = decl->Resource.Raw;
1422 resources[i].slot = i;
1423 }
1424 break;
1425 */
1426 case TGSI_FILE_SAMPLER_VIEW:
1427 for (i = first; i <= last; ++i)
1428 textureViews[i].target = decl->SamplerView.Resource;
1429 break;
1430 case TGSI_FILE_MEMORY:
1431 for (i = first; i <= last; ++i)
1432 memoryFiles[i].mem_type = decl->Declaration.MemType;
1433 break;
1434 case TGSI_FILE_NULL:
1435 case TGSI_FILE_TEMPORARY:
1436 for (i = first; i <= last; ++i)
1437 tempArrayId[i] = arrayId;
1438 if (arrayId)
1439 tempArrayInfo.insert(std::make_pair(arrayId, std::make_pair(
1440 first, last - first + 1)));
1441 break;
1442 case TGSI_FILE_BUFFER:
1443 for (i = first; i <= last; ++i)
1444 bufferAtomics[i] = decl->Declaration.Atomic;
1445 if (info->type == PIPE_SHADER_COMPUTE && info->target < NVISA_GF100_CHIPSET) {
1446 for (i = first; i <= last; i++) {
1447 bufferIds.insert(std::make_pair(i, gmemSlot));
1448 info_out->prop.cp.gmem[gmemSlot++] = {.valid = 1, .image = 0, .slot = i};
1449 assert(gmemSlot < 16);
1450 }
1451 }
1452 break;
1453 case TGSI_FILE_IMAGE:
1454 if (info->type == PIPE_SHADER_COMPUTE && info->target < NVISA_GF100_CHIPSET) {
1455 for (i = first; i <= last; i++) {
1456 imageIds.insert(std::make_pair(i, gmemSlot));
1457 info_out->prop.cp.gmem[gmemSlot++] = {.valid = 1, .image = 1, .slot = i};
1458 assert(gmemSlot < 16);
1459 }
1460 }
1461 break;
1462 case TGSI_FILE_ADDRESS:
1463 case TGSI_FILE_CONSTANT:
1464 case TGSI_FILE_IMMEDIATE:
1465 case TGSI_FILE_SAMPLER:
1466 break;
1467 default:
1468 ERROR("unhandled TGSI_FILE %d\n", decl->Declaration.File);
1469 return false;
1470 }
1471 return true;
1472 }
1473
isEdgeFlagPassthrough(const Instruction & insn) const1474 inline bool Source::isEdgeFlagPassthrough(const Instruction& insn) const
1475 {
1476 return insn.getOpcode() == TGSI_OPCODE_MOV &&
1477 insn.getDst(0).getIndex(0) == info_out->io.edgeFlagOut &&
1478 insn.getSrc(0).getFile() == TGSI_FILE_INPUT;
1479 }
1480
scanInstructionSrc(const Instruction & insn,const Instruction::SrcRegister & src,unsigned mask)1481 void Source::scanInstructionSrc(const Instruction& insn,
1482 const Instruction::SrcRegister& src,
1483 unsigned mask)
1484 {
1485 if (src.getFile() == TGSI_FILE_TEMPORARY) {
1486 if (src.isIndirect(0))
1487 indirectTempArrays.insert(src.getArrayId());
1488 } else
1489 if (src.getFile() == TGSI_FILE_OUTPUT) {
1490 if (src.isIndirect(0)) {
1491 // We don't know which one is accessed, just mark everything for
1492 // reading. This is an extremely unlikely occurrence.
1493 for (unsigned i = 0; i < info_out->numOutputs; ++i)
1494 info_out->out[i].oread = 1;
1495 } else {
1496 info_out->out[src.getIndex(0)].oread = 1;
1497 }
1498 }
1499 if (src.getFile() == TGSI_FILE_SYSTEM_VALUE) {
1500 if (info_out->sv[src.getIndex(0)].sn == TGSI_SEMANTIC_SAMPLEPOS)
1501 info_out->prop.fp.readsSampleLocations = true;
1502 }
1503 if (src.getFile() != TGSI_FILE_INPUT)
1504 return;
1505
1506 if (src.isIndirect(0)) {
1507 for (unsigned i = 0; i < info_out->numInputs; ++i)
1508 info_out->in[i].mask = 0xf;
1509 } else {
1510 const int i = src.getIndex(0);
1511 for (unsigned c = 0; c < 4; ++c) {
1512 if (!(mask & (1 << c)))
1513 continue;
1514 int k = src.getSwizzle(c);
1515 if (k <= TGSI_SWIZZLE_W)
1516 info_out->in[i].mask |= 1 << k;
1517 }
1518 switch (info_out->in[i].sn) {
1519 case TGSI_SEMANTIC_PSIZE:
1520 case TGSI_SEMANTIC_PRIMID:
1521 case TGSI_SEMANTIC_FOG:
1522 info_out->in[i].mask &= 0x1;
1523 break;
1524 case TGSI_SEMANTIC_PCOORD:
1525 info_out->in[i].mask &= 0x3;
1526 break;
1527 default:
1528 break;
1529 }
1530 }
1531 }
1532
scanInstruction(const struct tgsi_full_instruction * inst)1533 bool Source::scanInstruction(const struct tgsi_full_instruction *inst)
1534 {
1535 Instruction insn(inst);
1536
1537 if (insn.getOpcode() == TGSI_OPCODE_BARRIER)
1538 info_out->numBarriers = 1;
1539
1540 if (insn.getOpcode() == TGSI_OPCODE_FBFETCH)
1541 info_out->prop.fp.readsFramebuffer = true;
1542
1543 if (insn.getOpcode() == TGSI_OPCODE_INTERP_SAMPLE)
1544 info_out->prop.fp.readsSampleLocations = true;
1545
1546 if (insn.getOpcode() == TGSI_OPCODE_DEMOTE)
1547 info_out->prop.fp.usesDiscard = true;
1548
1549 if (insn.dstCount()) {
1550 Instruction::DstRegister dst = insn.getDst(0);
1551
1552 if (insn.getOpcode() == TGSI_OPCODE_STORE &&
1553 dst.getFile() != TGSI_FILE_MEMORY) {
1554 info_out->io.globalAccess |= 0x2;
1555
1556 if (dst.getFile() == TGSI_FILE_INPUT) {
1557 // TODO: Handle indirect somehow?
1558 const int i = dst.getIndex(0);
1559 info_out->in[i].mask |= 1;
1560 }
1561 }
1562
1563 if (dst.getFile() == TGSI_FILE_OUTPUT) {
1564 if (dst.isIndirect(0))
1565 for (unsigned i = 0; i < info_out->numOutputs; ++i)
1566 info_out->out[i].mask = 0xf;
1567 else
1568 info_out->out[dst.getIndex(0)].mask |= dst.getMask();
1569
1570 if (info_out->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_PSIZE ||
1571 info_out->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_PRIMID ||
1572 info_out->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_LAYER ||
1573 info_out->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_VIEWPORT_INDEX ||
1574 info_out->out[dst.getIndex(0)].sn == TGSI_SEMANTIC_FOG)
1575 info_out->out[dst.getIndex(0)].mask &= 1;
1576
1577 if (isEdgeFlagPassthrough(insn))
1578 info_out->io.edgeFlagIn = insn.getSrc(0).getIndex(0);
1579 } else
1580 if (dst.getFile() == TGSI_FILE_TEMPORARY) {
1581 if (dst.isIndirect(0))
1582 indirectTempArrays.insert(dst.getArrayId());
1583 } else
1584 if (dst.getFile() == TGSI_FILE_BUFFER ||
1585 dst.getFile() == TGSI_FILE_IMAGE ||
1586 (dst.getFile() == TGSI_FILE_MEMORY &&
1587 memoryFiles[dst.getIndex(0)].mem_type == TGSI_MEMORY_TYPE_GLOBAL)) {
1588 info_out->io.globalAccess |= 0x2;
1589 }
1590 }
1591
1592 if (insn.srcCount() && (
1593 insn.getSrc(0).getFile() != TGSI_FILE_MEMORY ||
1594 memoryFiles[insn.getSrc(0).getIndex(0)].mem_type ==
1595 TGSI_MEMORY_TYPE_GLOBAL)) {
1596 switch (insn.getOpcode()) {
1597 case TGSI_OPCODE_ATOMUADD:
1598 case TGSI_OPCODE_ATOMXCHG:
1599 case TGSI_OPCODE_ATOMCAS:
1600 case TGSI_OPCODE_ATOMAND:
1601 case TGSI_OPCODE_ATOMOR:
1602 case TGSI_OPCODE_ATOMXOR:
1603 case TGSI_OPCODE_ATOMUMIN:
1604 case TGSI_OPCODE_ATOMIMIN:
1605 case TGSI_OPCODE_ATOMUMAX:
1606 case TGSI_OPCODE_ATOMIMAX:
1607 case TGSI_OPCODE_ATOMFADD:
1608 case TGSI_OPCODE_ATOMDEC_WRAP:
1609 case TGSI_OPCODE_ATOMINC_WRAP:
1610 case TGSI_OPCODE_LOAD:
1611 info_out->io.globalAccess |= (insn.getOpcode() == TGSI_OPCODE_LOAD) ?
1612 0x1 : 0x2;
1613 break;
1614 }
1615 }
1616
1617
1618 for (unsigned s = 0; s < insn.srcCount(); ++s)
1619 scanInstructionSrc(insn, insn.getSrc(s), insn.srcMask(s));
1620
1621 for (unsigned s = 0; s < insn.getNumTexOffsets(); ++s)
1622 scanInstructionSrc(insn, insn.getTexOffset(s), insn.texOffsetMask());
1623
1624 return true;
1625 }
1626
1627 nv50_ir::TexInstruction::Target
getTexture(const tgsi::Source * code,int s) const1628 Instruction::getTexture(const tgsi::Source *code, int s) const
1629 {
1630 // XXX: indirect access
1631 unsigned int r;
1632
1633 switch (getSrc(s).getFile()) {
1634 /*
1635 case TGSI_FILE_RESOURCE:
1636 r = getSrc(s).getIndex(0);
1637 return translateTexture(code->resources.at(r).target);
1638 */
1639 case TGSI_FILE_SAMPLER_VIEW:
1640 r = getSrc(s).getIndex(0);
1641 return translateTexture(code->textureViews.at(r).target);
1642 default:
1643 return translateTexture(insn->Texture.Texture);
1644 }
1645 }
1646
1647 } // namespace tgsi
1648
1649 namespace {
1650
1651 using namespace nv50_ir;
1652
1653 class Converter : public ConverterCommon
1654 {
1655 public:
1656 Converter(Program *, const tgsi::Source *, nv50_ir_prog_info_out *);
1657 ~Converter();
1658
1659 bool run();
1660
1661 private:
1662 Value *shiftAddress(Value *);
1663 Value *getVertexBase(int s);
1664 Value *getOutputBase(int s);
1665 DataArray *getArrayForFile(unsigned file, int idx);
1666 Value *fetchSrc(int s, int c);
1667 Value *fetchDst(int d, int c);
1668 Value *acquireDst(int d, int c);
1669 void storeDst(int d, int c, Value *);
1670
1671 Value *fetchSrc(const tgsi::Instruction::SrcRegister src, int c, Value *ptr);
1672 void storeDst(const tgsi::Instruction::DstRegister dst, int c,
1673 Value *val, Value *ptr);
1674
1675 void adjustTempIndex(int arrayId, int &idx, int &idx2d) const;
1676 Value *applySrcMod(Value *, int s, int c);
1677
1678 Symbol *makeSym(uint file, int fileIndex, int idx, int c, uint32_t addr);
1679 Symbol *srcToSym(tgsi::Instruction::SrcRegister, int c);
1680 Symbol *dstToSym(tgsi::Instruction::DstRegister, int c);
1681
1682 bool isSubGroupMask(uint8_t semantic);
1683
1684 bool handleInstruction(const struct tgsi_full_instruction *);
1685 void exportOutputs();
1686 inline bool isEndOfSubroutine(uint ip);
1687
1688 void loadProjTexCoords(Value *dst[4], Value *src[4], unsigned int mask);
1689
1690 // R,S,L,C,Dx,Dy encode TGSI sources for respective values (0xSf for auto)
1691 void setTexRS(TexInstruction *, unsigned int& s, int R, int S);
1692 void handleTEX(Value *dst0[4], int R, int S, int L, int C, int Dx, int Dy);
1693 void handleTXF(Value *dst0[4], int R, int L_M);
1694 void handleTXQ(Value *dst0[4], enum TexQuery, int R);
1695 void handleFBFETCH(Value *dst0[4]);
1696 void handleLIT(Value *dst0[4]);
1697
1698 // Symbol *getResourceBase(int r);
1699 void getImageCoords(std::vector<Value *>&, int s);
1700 int remapImageId(int);
1701 int remapBufferId(int);
1702
1703 void handleLOAD(Value *dst0[4]);
1704 void handleSTORE();
1705 void handleATOM(Value *dst0[4], DataType, uint16_t subOp);
1706
1707 void handleINTERP(Value *dst0[4]);
1708
1709 Value *interpolate(tgsi::Instruction::SrcRegister, int c, Value *ptr);
1710
1711 void insertConvergenceOps(BasicBlock *conv, BasicBlock *fork);
1712
1713 Value *buildDot(int dim);
1714
1715 class BindArgumentsPass : public Pass {
1716 public:
BindArgumentsPass(Converter & conv)1717 BindArgumentsPass(Converter &conv) : conv(conv), sub(NULL) { }
1718
1719 private:
1720 Converter &conv;
1721 Subroutine *sub;
1722
1723 inline const Location *getValueLocation(Subroutine *, Value *);
1724
1725 template<typename T> inline void
1726 updateCallArgs(Instruction *i, void (Instruction::*setArg)(int, Value *),
1727 T (Function::*proto));
1728
1729 template<typename T> inline void
1730 updatePrototype(BitSet *set, void (Function::*updateSet)(),
1731 T (Function::*proto));
1732
1733 protected:
1734 bool visit(Function *);
visit(BasicBlock * bb)1735 bool visit(BasicBlock *bb) { return false; }
1736 };
1737
1738 private:
1739 const tgsi::Source *code;
1740
1741 uint ip; // instruction pointer
1742
1743 tgsi::Instruction tgsi;
1744
1745 DataType dstTy;
1746 DataType srcTy;
1747
1748 DataArray tData; // TGSI_FILE_TEMPORARY
1749 DataArray lData; // TGSI_FILE_TEMPORARY, for indirect arrays
1750 DataArray aData; // TGSI_FILE_ADDRESS
1751 DataArray oData; // TGSI_FILE_OUTPUT (if outputs in registers)
1752
1753 Value *zero;
1754
1755 Value *vtxBase[5]; // base address of vertex in primitive (for TP/GP)
1756 uint8_t vtxBaseValid;
1757
1758 Stack condBBs; // fork BB, then else clause BB
1759 Stack joinBBs; // fork BB, for inserting join ops on ENDIF
1760 Stack loopBBs; // loop headers
1761 Stack breakBBs; // end of / after loop
1762
1763 Value *viewport;
1764 };
1765
1766 Symbol *
srcToSym(tgsi::Instruction::SrcRegister src,int c)1767 Converter::srcToSym(tgsi::Instruction::SrcRegister src, int c)
1768 {
1769 const int swz = src.getSwizzle(c);
1770
1771 /* TODO: Use Array ID when it's available for the index */
1772 return makeSym(src.getFile(),
1773 src.is2D() ? src.getIndex(1) : 0,
1774 src.getIndex(0), swz,
1775 src.getIndex(0) * 16 + swz * 4);
1776 }
1777
1778 Symbol *
dstToSym(tgsi::Instruction::DstRegister dst,int c)1779 Converter::dstToSym(tgsi::Instruction::DstRegister dst, int c)
1780 {
1781 /* TODO: Use Array ID when it's available for the index */
1782 return makeSym(dst.getFile(),
1783 dst.is2D() ? dst.getIndex(1) : 0,
1784 dst.getIndex(0), c,
1785 dst.getIndex(0) * 16 + c * 4);
1786 }
1787
1788 Symbol *
makeSym(uint tgsiFile,int fileIdx,int idx,int c,uint32_t address)1789 Converter::makeSym(uint tgsiFile, int fileIdx, int idx, int c, uint32_t address)
1790 {
1791 Symbol *sym = new_Symbol(prog, tgsi::translateFile(tgsiFile));
1792
1793 sym->reg.fileIndex = fileIdx;
1794
1795 if (tgsiFile == TGSI_FILE_MEMORY) {
1796 switch (code->memoryFiles[fileIdx].mem_type) {
1797 case TGSI_MEMORY_TYPE_GLOBAL:
1798 /* No-op this is the default for TGSI_FILE_MEMORY */
1799 sym->setFile(FILE_MEMORY_GLOBAL);
1800 break;
1801 case TGSI_MEMORY_TYPE_SHARED:
1802 sym->setFile(FILE_MEMORY_SHARED);
1803 address += info->prop.cp.inputOffset;
1804 break;
1805 case TGSI_MEMORY_TYPE_INPUT:
1806 assert(prog->getType() == Program::TYPE_COMPUTE);
1807 assert(idx == -1);
1808 sym->setFile(FILE_SHADER_INPUT);
1809 address += info->prop.cp.inputOffset;
1810 break;
1811 default:
1812 assert(0); /* TODO: Add support for global and private memory */
1813 }
1814 }
1815
1816 if (idx >= 0) {
1817 if (sym->reg.file == FILE_SHADER_INPUT)
1818 sym->setOffset(info_out->in[idx].slot[c] * 4);
1819 else
1820 if (sym->reg.file == FILE_SHADER_OUTPUT)
1821 sym->setOffset(info_out->out[idx].slot[c] * 4);
1822 else
1823 if (sym->reg.file == FILE_SYSTEM_VALUE)
1824 sym->setSV(tgsi::translateSysVal(info_out->sv[idx].sn), c);
1825 else
1826 sym->setOffset(address);
1827 } else {
1828 sym->setOffset(address);
1829 }
1830 return sym;
1831 }
1832
1833 Value *
interpolate(tgsi::Instruction::SrcRegister src,int c,Value * ptr)1834 Converter::interpolate(tgsi::Instruction::SrcRegister src, int c, Value *ptr)
1835 {
1836 operation op;
1837
1838 // XXX: no way to know interpolation mode if we don't know what's accessed
1839 const uint8_t mode = translateInterpMode(&info_out->in[ptr ? 0 :
1840 src.getIndex(0)], op);
1841
1842 Instruction *insn = new_Instruction(func, op, TYPE_F32);
1843
1844 insn->setDef(0, getScratch());
1845 insn->setSrc(0, srcToSym(src, c));
1846 if (op == OP_PINTERP)
1847 insn->setSrc(1, fragCoord[3]);
1848 if (ptr)
1849 insn->setIndirect(0, 0, ptr);
1850
1851 insn->setInterpolate(mode);
1852
1853 bb->insertTail(insn);
1854 return insn->getDef(0);
1855 }
1856
1857 Value *
applySrcMod(Value * val,int s,int c)1858 Converter::applySrcMod(Value *val, int s, int c)
1859 {
1860 Modifier m = tgsi.getSrc(s).getMod(c);
1861 DataType ty = tgsi.inferSrcType();
1862
1863 if (m & Modifier(NV50_IR_MOD_ABS))
1864 val = mkOp1v(OP_ABS, ty, getScratch(), val);
1865
1866 if (m & Modifier(NV50_IR_MOD_NEG))
1867 val = mkOp1v(OP_NEG, ty, getScratch(), val);
1868
1869 return val;
1870 }
1871
1872 Value *
getVertexBase(int s)1873 Converter::getVertexBase(int s)
1874 {
1875 assert(s < 5);
1876 if (!(vtxBaseValid & (1 << s))) {
1877 const int index = tgsi.getSrc(s).getIndex(1);
1878 Value *rel = NULL;
1879 if (tgsi.getSrc(s).isIndirect(1))
1880 rel = fetchSrc(tgsi.getSrc(s).getIndirect(1), 0, NULL);
1881 vtxBaseValid |= 1 << s;
1882 vtxBase[s] = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS),
1883 mkImm(index), rel);
1884 }
1885 return vtxBase[s];
1886 }
1887
1888 Value *
getOutputBase(int s)1889 Converter::getOutputBase(int s)
1890 {
1891 assert(s < 5);
1892 if (!(vtxBaseValid & (1 << s))) {
1893 Value *offset = loadImm(NULL, tgsi.getSrc(s).getIndex(1));
1894 if (tgsi.getSrc(s).isIndirect(1))
1895 offset = mkOp2v(OP_ADD, TYPE_U32, getSSA(),
1896 fetchSrc(tgsi.getSrc(s).getIndirect(1), 0, NULL),
1897 offset);
1898 vtxBaseValid |= 1 << s;
1899 vtxBase[s] = mkOp2v(OP_ADD, TYPE_U32, getSSA(), outBase, offset);
1900 }
1901 return vtxBase[s];
1902 }
1903
1904 Value *
fetchSrc(int s,int c)1905 Converter::fetchSrc(int s, int c)
1906 {
1907 Value *res;
1908 Value *ptr = NULL, *dimRel = NULL;
1909
1910 tgsi::Instruction::SrcRegister src = tgsi.getSrc(s);
1911
1912 if (src.isIndirect(0))
1913 ptr = fetchSrc(src.getIndirect(0), 0, NULL);
1914
1915 if (src.is2D()) {
1916 switch (src.getFile()) {
1917 case TGSI_FILE_OUTPUT:
1918 dimRel = getOutputBase(s);
1919 break;
1920 case TGSI_FILE_INPUT:
1921 dimRel = getVertexBase(s);
1922 break;
1923 case TGSI_FILE_CONSTANT:
1924 // on NVC0, this is valid and c{I+J}[k] == cI[(J << 16) + k]
1925 if (src.isIndirect(1))
1926 dimRel = fetchSrc(src.getIndirect(1), 0, 0);
1927 break;
1928 default:
1929 break;
1930 }
1931 }
1932
1933 res = fetchSrc(src, c, ptr);
1934
1935 if (dimRel)
1936 res->getInsn()->setIndirect(0, 1, dimRel);
1937
1938 return applySrcMod(res, s, c);
1939 }
1940
1941 Value *
fetchDst(int d,int c)1942 Converter::fetchDst(int d, int c)
1943 {
1944 Value *res;
1945 Value *ptr = NULL, *dimRel = NULL;
1946
1947 tgsi::Instruction::DstRegister dst = tgsi.getDst(d);
1948
1949 if (dst.isIndirect(0))
1950 ptr = fetchSrc(dst.getIndirect(0), 0, NULL);
1951
1952 if (dst.is2D()) {
1953 switch (dst.getFile()) {
1954 case TGSI_FILE_OUTPUT:
1955 assert(0); // TODO
1956 dimRel = NULL;
1957 break;
1958 case TGSI_FILE_INPUT:
1959 assert(0); // TODO
1960 dimRel = NULL;
1961 break;
1962 case TGSI_FILE_CONSTANT:
1963 // on NVC0, this is valid and c{I+J}[k] == cI[(J << 16) + k]
1964 if (dst.isIndirect(1))
1965 dimRel = fetchSrc(dst.getIndirect(1), 0, 0);
1966 break;
1967 default:
1968 break;
1969 }
1970 }
1971
1972 struct tgsi_full_src_register fsr = dst.asSrc();
1973 tgsi::Instruction::SrcRegister src(&fsr);
1974 res = fetchSrc(src, c, ptr);
1975
1976 if (dimRel)
1977 res->getInsn()->setIndirect(0, 1, dimRel);
1978
1979 return res;
1980 }
1981
1982 Converter::DataArray *
getArrayForFile(unsigned file,int idx)1983 Converter::getArrayForFile(unsigned file, int idx)
1984 {
1985 switch (file) {
1986 case TGSI_FILE_TEMPORARY:
1987 return idx == 0 ? &tData : &lData;
1988 case TGSI_FILE_ADDRESS:
1989 return &aData;
1990 case TGSI_FILE_OUTPUT:
1991 assert(prog->getType() == Program::TYPE_FRAGMENT);
1992 return &oData;
1993 default:
1994 assert(!"invalid/unhandled TGSI source file");
1995 return NULL;
1996 }
1997 }
1998
1999 Value *
shiftAddress(Value * index)2000 Converter::shiftAddress(Value *index)
2001 {
2002 if (!index)
2003 return NULL;
2004 return mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), index, mkImm(4));
2005 }
2006
2007 void
adjustTempIndex(int arrayId,int & idx,int & idx2d) const2008 Converter::adjustTempIndex(int arrayId, int &idx, int &idx2d) const
2009 {
2010 std::map<int, int>::const_iterator it =
2011 code->indirectTempOffsets.find(arrayId);
2012 if (it == code->indirectTempOffsets.end())
2013 return;
2014
2015 idx2d = 1;
2016 idx += it->second;
2017 }
2018
2019 bool
isSubGroupMask(uint8_t semantic)2020 Converter::isSubGroupMask(uint8_t semantic)
2021 {
2022 switch (semantic) {
2023 case TGSI_SEMANTIC_SUBGROUP_EQ_MASK:
2024 case TGSI_SEMANTIC_SUBGROUP_LT_MASK:
2025 case TGSI_SEMANTIC_SUBGROUP_LE_MASK:
2026 case TGSI_SEMANTIC_SUBGROUP_GT_MASK:
2027 case TGSI_SEMANTIC_SUBGROUP_GE_MASK:
2028 return true;
2029 default:
2030 return false;
2031 }
2032 }
2033
2034 Value *
fetchSrc(tgsi::Instruction::SrcRegister src,int c,Value * ptr)2035 Converter::fetchSrc(tgsi::Instruction::SrcRegister src, int c, Value *ptr)
2036 {
2037 int idx2d = src.is2D() ? src.getIndex(1) : 0;
2038 int idx = src.getIndex(0);
2039 const int swz = src.getSwizzle(c);
2040 Instruction *ld;
2041
2042 switch (src.getFile()) {
2043 case TGSI_FILE_IMMEDIATE:
2044 assert(!ptr);
2045 return loadImm(NULL, code->immd.data[idx * 4 + swz]);
2046 case TGSI_FILE_CONSTANT:
2047 return mkLoadv(TYPE_U32, srcToSym(src, c), shiftAddress(ptr));
2048 case TGSI_FILE_INPUT:
2049 if (prog->getType() == Program::TYPE_FRAGMENT) {
2050 // don't load masked inputs, won't be assigned a slot
2051 if (!ptr && !(info_out->in[idx].mask & (1 << swz)))
2052 return loadImm(NULL, swz == TGSI_SWIZZLE_W ? 1.0f : 0.0f);
2053 return interpolate(src, c, shiftAddress(ptr));
2054 } else
2055 if (prog->getType() == Program::TYPE_GEOMETRY) {
2056 if (!ptr && info_out->in[idx].sn == TGSI_SEMANTIC_PRIMID)
2057 return mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_PRIMITIVE_ID, 0));
2058 // XXX: This is going to be a problem with scalar arrays, i.e. when
2059 // we cannot assume that the address is given in units of vec4.
2060 //
2061 // nv50 and nvc0 need different things here, so let the lowering
2062 // passes decide what to do with the address
2063 if (ptr)
2064 return mkLoadv(TYPE_U32, srcToSym(src, c), ptr);
2065 }
2066 ld = mkLoad(TYPE_U32, getSSA(), srcToSym(src, c), shiftAddress(ptr));
2067 ld->perPatch = info_out->in[idx].patch;
2068 return ld->getDef(0);
2069 case TGSI_FILE_OUTPUT:
2070 assert(prog->getType() == Program::TYPE_TESSELLATION_CONTROL);
2071 ld = mkLoad(TYPE_U32, getSSA(), srcToSym(src, c), shiftAddress(ptr));
2072 ld->perPatch = info_out->out[idx].patch;
2073 return ld->getDef(0);
2074 case TGSI_FILE_SYSTEM_VALUE:
2075 assert(!ptr);
2076 if (info_out->sv[idx].sn == TGSI_SEMANTIC_THREAD_ID &&
2077 info->prop.cp.numThreads[swz] == 1)
2078 return loadImm(NULL, 0u);
2079 if (isSubGroupMask(info_out->sv[idx].sn) && swz > 0)
2080 return loadImm(NULL, 0u);
2081 if (info_out->sv[idx].sn == TGSI_SEMANTIC_SUBGROUP_SIZE)
2082 return loadImm(NULL, 32u);
2083 ld = mkOp1(OP_RDSV, TYPE_U32, getSSA(), srcToSym(src, c));
2084 ld->perPatch = info_out->sv[idx].patch;
2085 return ld->getDef(0);
2086 case TGSI_FILE_TEMPORARY: {
2087 int arrayid = src.getArrayId();
2088 if (!arrayid)
2089 arrayid = code->tempArrayId[idx];
2090 adjustTempIndex(arrayid, idx, idx2d);
2091 }
2092 FALLTHROUGH;
2093 default:
2094 return getArrayForFile(src.getFile(), idx2d)->load(
2095 sub.cur->values, idx, swz, shiftAddress(ptr));
2096 }
2097 }
2098
2099 Value *
acquireDst(int d,int c)2100 Converter::acquireDst(int d, int c)
2101 {
2102 const tgsi::Instruction::DstRegister dst = tgsi.getDst(d);
2103 const unsigned f = dst.getFile();
2104 int idx = dst.getIndex(0);
2105 int idx2d = dst.is2D() ? dst.getIndex(1) : 0;
2106
2107 if (dst.isMasked(c) || f == TGSI_FILE_BUFFER || f == TGSI_FILE_MEMORY ||
2108 f == TGSI_FILE_IMAGE)
2109 return NULL;
2110
2111 if (dst.isIndirect(0) ||
2112 f == TGSI_FILE_SYSTEM_VALUE ||
2113 (f == TGSI_FILE_OUTPUT && prog->getType() != Program::TYPE_FRAGMENT))
2114 return getScratch();
2115
2116 if (f == TGSI_FILE_TEMPORARY) {
2117 int arrayid = dst.getArrayId();
2118 if (!arrayid)
2119 arrayid = code->tempArrayId[idx];
2120 adjustTempIndex(arrayid, idx, idx2d);
2121 }
2122
2123 return getArrayForFile(f, idx2d)-> acquire(sub.cur->values, idx, c);
2124 }
2125
2126 void
storeDst(int d,int c,Value * val)2127 Converter::storeDst(int d, int c, Value *val)
2128 {
2129 const tgsi::Instruction::DstRegister dst = tgsi.getDst(d);
2130
2131 if (tgsi.getSaturate()) {
2132 mkOp1(OP_SAT, dstTy, val, val);
2133 }
2134
2135 Value *ptr = NULL;
2136 if (dst.isIndirect(0))
2137 ptr = shiftAddress(fetchSrc(dst.getIndirect(0), 0, NULL));
2138
2139 if (info_out->io.genUserClip > 0 &&
2140 dst.getFile() == TGSI_FILE_OUTPUT &&
2141 !dst.isIndirect(0) && dst.getIndex(0) == code->clipVertexOutput) {
2142 mkMov(clipVtx[c], val);
2143 val = clipVtx[c];
2144 }
2145
2146 storeDst(dst, c, val, ptr);
2147 }
2148
2149 void
storeDst(const tgsi::Instruction::DstRegister dst,int c,Value * val,Value * ptr)2150 Converter::storeDst(const tgsi::Instruction::DstRegister dst, int c,
2151 Value *val, Value *ptr)
2152 {
2153 const unsigned f = dst.getFile();
2154 int idx = dst.getIndex(0);
2155 int idx2d = dst.is2D() ? dst.getIndex(1) : 0;
2156
2157 if (f == TGSI_FILE_SYSTEM_VALUE) {
2158 assert(!ptr);
2159 mkOp2(OP_WRSV, TYPE_U32, NULL, dstToSym(dst, c), val);
2160 } else
2161 if (f == TGSI_FILE_OUTPUT && prog->getType() != Program::TYPE_FRAGMENT) {
2162
2163 if (ptr || (info_out->out[idx].mask & (1 << c))) {
2164 /* Save the viewport index into a scratch register so that it can be
2165 exported at EMIT time */
2166 if (info_out->out[idx].sn == TGSI_SEMANTIC_VIEWPORT_INDEX &&
2167 prog->getType() == Program::TYPE_GEOMETRY &&
2168 viewport != NULL)
2169 mkOp1(OP_MOV, TYPE_U32, viewport, val);
2170 else
2171 mkStore(OP_EXPORT, TYPE_U32, dstToSym(dst, c), ptr, val)->perPatch =
2172 info_out->out[idx].patch;
2173 }
2174 } else
2175 if (f == TGSI_FILE_TEMPORARY ||
2176 f == TGSI_FILE_ADDRESS ||
2177 f == TGSI_FILE_OUTPUT) {
2178 if (f == TGSI_FILE_TEMPORARY) {
2179 int arrayid = dst.getArrayId();
2180 if (!arrayid)
2181 arrayid = code->tempArrayId[idx];
2182 adjustTempIndex(arrayid, idx, idx2d);
2183 }
2184
2185 getArrayForFile(f, idx2d)->store(sub.cur->values, idx, c, ptr, val);
2186 } else {
2187 assert(!"invalid dst file");
2188 }
2189 }
2190
2191 #define FOR_EACH_DST_ENABLED_CHANNEL(d, chan, inst) \
2192 for (chan = 0; chan < 4; ++chan) \
2193 if (!inst.getDst(d).isMasked(chan))
2194
2195 Value *
buildDot(int dim)2196 Converter::buildDot(int dim)
2197 {
2198 assert(dim > 0);
2199
2200 Value *src0 = fetchSrc(0, 0), *src1 = fetchSrc(1, 0);
2201 Value *dotp = getScratch();
2202
2203 mkOp2(OP_MUL, TYPE_F32, dotp, src0, src1)
2204 ->dnz = info->io.mul_zero_wins;
2205
2206 for (int c = 1; c < dim; ++c) {
2207 src0 = fetchSrc(0, c);
2208 src1 = fetchSrc(1, c);
2209 mkOp3(OP_MAD, TYPE_F32, dotp, src0, src1, dotp)
2210 ->dnz = info->io.mul_zero_wins;
2211 }
2212 return dotp;
2213 }
2214
2215 void
insertConvergenceOps(BasicBlock * conv,BasicBlock * fork)2216 Converter::insertConvergenceOps(BasicBlock *conv, BasicBlock *fork)
2217 {
2218 FlowInstruction *join = new_FlowInstruction(func, OP_JOIN, NULL);
2219 join->fixed = 1;
2220 conv->insertHead(join);
2221
2222 assert(!fork->joinAt);
2223 fork->joinAt = new_FlowInstruction(func, OP_JOINAT, conv);
2224 fork->insertBefore(fork->getExit(), fork->joinAt);
2225 }
2226
2227 void
setTexRS(TexInstruction * tex,unsigned int & s,int R,int S)2228 Converter::setTexRS(TexInstruction *tex, unsigned int& s, int R, int S)
2229 {
2230 unsigned rIdx = 0, sIdx = 0;
2231
2232 if (R >= 0 && tgsi.getSrc(R).getFile() != TGSI_FILE_SAMPLER) {
2233 // This is the bindless case. We have to get the actual value and pass
2234 // it in. This will be the complete handle.
2235 tex->tex.rIndirectSrc = s;
2236 tex->setSrc(s++, fetchSrc(R, 0));
2237 tex->setTexture(tgsi.getTexture(code, R), 0xff, 0x1f);
2238 tex->tex.bindless = true;
2239 return;
2240 }
2241
2242 if (R >= 0)
2243 rIdx = tgsi.getSrc(R).getIndex(0);
2244 if (S >= 0)
2245 sIdx = tgsi.getSrc(S).getIndex(0);
2246
2247 tex->setTexture(tgsi.getTexture(code, R), rIdx, sIdx);
2248
2249 if (tgsi.getSrc(R).isIndirect(0)) {
2250 tex->tex.rIndirectSrc = s;
2251 tex->setSrc(s++, fetchSrc(tgsi.getSrc(R).getIndirect(0), 0, NULL));
2252 }
2253 if (S >= 0 && tgsi.getSrc(S).isIndirect(0)) {
2254 tex->tex.sIndirectSrc = s;
2255 tex->setSrc(s++, fetchSrc(tgsi.getSrc(S).getIndirect(0), 0, NULL));
2256 }
2257 }
2258
2259 void
handleTXQ(Value * dst0[4],enum TexQuery query,int R)2260 Converter::handleTXQ(Value *dst0[4], enum TexQuery query, int R)
2261 {
2262 TexInstruction *tex = new_TexInstruction(func, OP_TXQ);
2263 tex->tex.query = query;
2264 unsigned int c, d;
2265
2266 for (d = 0, c = 0; c < 4; ++c) {
2267 if (!dst0[c])
2268 continue;
2269 tex->tex.mask |= 1 << c;
2270 tex->setDef(d++, dst0[c]);
2271 }
2272 if (query == TXQ_DIMS)
2273 tex->setSrc((c = 0), fetchSrc(0, 0)); // mip level
2274 else
2275 tex->setSrc((c = 0), zero);
2276
2277 setTexRS(tex, ++c, R, -1);
2278
2279 bb->insertTail(tex);
2280 }
2281
2282 void
loadProjTexCoords(Value * dst[4],Value * src[4],unsigned int mask)2283 Converter::loadProjTexCoords(Value *dst[4], Value *src[4], unsigned int mask)
2284 {
2285 Value *proj = fetchSrc(0, 3);
2286 Instruction *insn = proj->getUniqueInsn();
2287 int c;
2288
2289 if (insn->op == OP_PINTERP) {
2290 bb->insertTail(insn = cloneForward(func, insn));
2291 insn->op = OP_LINTERP;
2292 insn->setInterpolate(NV50_IR_INTERP_LINEAR | insn->getSampleMode());
2293 insn->setSrc(1, NULL);
2294 proj = insn->getDef(0);
2295 }
2296 proj = mkOp1v(OP_RCP, TYPE_F32, getSSA(), proj);
2297
2298 for (c = 0; c < 4; ++c) {
2299 if (!(mask & (1 << c)))
2300 continue;
2301 if ((insn = src[c]->getUniqueInsn())->op != OP_PINTERP)
2302 continue;
2303 mask &= ~(1 << c);
2304
2305 bb->insertTail(insn = cloneForward(func, insn));
2306 insn->setInterpolate(NV50_IR_INTERP_PERSPECTIVE | insn->getSampleMode());
2307 insn->setSrc(1, proj);
2308 dst[c] = insn->getDef(0);
2309 }
2310 if (!mask)
2311 return;
2312
2313 proj = mkOp1v(OP_RCP, TYPE_F32, getSSA(), fetchSrc(0, 3));
2314
2315 for (c = 0; c < 4; ++c)
2316 if (mask & (1 << c))
2317 dst[c] = mkOp2v(OP_MUL, TYPE_F32, getSSA(), src[c], proj);
2318 }
2319
2320 // order of nv50 ir sources: x y z layer lod/bias shadow
2321 // order of TGSI TEX sources: x y z layer shadow lod/bias
2322 // lowering will finally set the hw specific order (like array first on nvc0)
2323 void
handleTEX(Value * dst[4],int R,int S,int L,int C,int Dx,int Dy)2324 Converter::handleTEX(Value *dst[4], int R, int S, int L, int C, int Dx, int Dy)
2325 {
2326 Value *arg[4], *src[8];
2327 Value *lod = NULL, *shd = NULL;
2328 unsigned int s, c, d;
2329 TexInstruction *texi = new_TexInstruction(func, tgsi.getOP());
2330
2331 TexInstruction::Target tgt = tgsi.getTexture(code, R);
2332
2333 for (s = 0; s < tgt.getArgCount(); ++s)
2334 arg[s] = src[s] = fetchSrc(0, s);
2335
2336 if (tgsi.getOpcode() == TGSI_OPCODE_TEX_LZ)
2337 lod = loadImm(NULL, 0);
2338 else if (texi->op == OP_TXL || texi->op == OP_TXB)
2339 lod = fetchSrc(L >> 4, L & 3);
2340
2341 if (C == 0x0f)
2342 C = 0x00 | MAX2(tgt.getArgCount(), 2); // guess DC src
2343
2344 if (tgt == TEX_TARGET_CUBE_ARRAY_SHADOW) {
2345 switch (tgsi.getOpcode()) {
2346 case TGSI_OPCODE_TG4: shd = fetchSrc(1, 0); break;
2347 case TGSI_OPCODE_TEX2: shd = fetchSrc(1, 0); break;
2348 case TGSI_OPCODE_TXB2: shd = fetchSrc(1, 1); break;
2349 case TGSI_OPCODE_TXL2: shd = fetchSrc(1, 1); break;
2350 default: assert(!"unexpected opcode with cube array shadow"); break;
2351 }
2352 }
2353 else if (tgt.isShadow())
2354 shd = fetchSrc(C >> 4, C & 3);
2355
2356 if (texi->op == OP_TXD) {
2357 for (c = 0; c < tgt.getDim() + tgt.isCube(); ++c) {
2358 texi->dPdx[c].set(fetchSrc(Dx >> 4, (Dx & 3) + c));
2359 texi->dPdy[c].set(fetchSrc(Dy >> 4, (Dy & 3) + c));
2360 }
2361 }
2362
2363 // cube textures don't care about projection value, it's divided out
2364 if (tgsi.getOpcode() == TGSI_OPCODE_TXP && !tgt.isCube() && !tgt.isArray()) {
2365 unsigned int n = tgt.getDim();
2366 if (shd) {
2367 arg[n] = shd;
2368 ++n;
2369 assert(tgt.getDim() == tgt.getArgCount());
2370 }
2371 loadProjTexCoords(src, arg, (1 << n) - 1);
2372 if (shd)
2373 shd = src[n - 1];
2374 }
2375
2376 for (c = 0, d = 0; c < 4; ++c) {
2377 if (dst[c]) {
2378 texi->setDef(d++, dst[c]);
2379 texi->tex.mask |= 1 << c;
2380 } else {
2381 // NOTE: maybe hook up def too, for CSE
2382 }
2383 }
2384 for (s = 0; s < tgt.getArgCount(); ++s)
2385 texi->setSrc(s, src[s]);
2386 if (lod)
2387 texi->setSrc(s++, lod);
2388 if (shd)
2389 texi->setSrc(s++, shd);
2390
2391 setTexRS(texi, s, R, S);
2392
2393 if (tgsi.getOpcode() == TGSI_OPCODE_SAMPLE_C_LZ)
2394 texi->tex.levelZero = true;
2395 if (prog->getType() != Program::TYPE_FRAGMENT &&
2396 (tgsi.getOpcode() == TGSI_OPCODE_TEX ||
2397 tgsi.getOpcode() == TGSI_OPCODE_TEX2 ||
2398 tgsi.getOpcode() == TGSI_OPCODE_TXP))
2399 texi->tex.levelZero = true;
2400 if (tgsi.getOpcode() == TGSI_OPCODE_TG4 && !tgt.isShadow())
2401 texi->tex.gatherComp = tgsi.getSrc(1).getValueU32(0, code->immd.data);
2402
2403 texi->tex.useOffsets = tgsi.getNumTexOffsets();
2404 for (s = 0; s < tgsi.getNumTexOffsets(); ++s) {
2405 for (c = 0; c < 3; ++c) {
2406 texi->offset[s][c].set(fetchSrc(tgsi.getTexOffset(s), c, NULL));
2407 texi->offset[s][c].setInsn(texi);
2408 }
2409 }
2410
2411 bb->insertTail(texi);
2412 }
2413
2414 // 1st source: xyz = coordinates, w = lod/sample
2415 // 2nd source: offset
2416 void
handleTXF(Value * dst[4],int R,int L_M)2417 Converter::handleTXF(Value *dst[4], int R, int L_M)
2418 {
2419 TexInstruction *texi = new_TexInstruction(func, tgsi.getOP());
2420 int ms;
2421 unsigned int c, d, s;
2422
2423 texi->tex.target = tgsi.getTexture(code, R);
2424
2425 ms = texi->tex.target.isMS() ? 1 : 0;
2426 texi->tex.levelZero = ms; /* MS textures don't have mip-maps */
2427
2428 for (c = 0, d = 0; c < 4; ++c) {
2429 if (dst[c]) {
2430 texi->setDef(d++, dst[c]);
2431 texi->tex.mask |= 1 << c;
2432 }
2433 }
2434 for (c = 0; c < (texi->tex.target.getArgCount() - ms); ++c)
2435 texi->setSrc(c, fetchSrc(0, c));
2436 if (!ms && tgsi.getOpcode() == TGSI_OPCODE_TXF_LZ)
2437 texi->setSrc(c++, loadImm(NULL, 0));
2438 else
2439 texi->setSrc(c++, fetchSrc(L_M >> 4, L_M & 3)); // lod or ms
2440
2441 setTexRS(texi, c, R, -1);
2442
2443 texi->tex.useOffsets = tgsi.getNumTexOffsets();
2444 for (s = 0; s < tgsi.getNumTexOffsets(); ++s) {
2445 for (c = 0; c < 3; ++c) {
2446 texi->offset[s][c].set(fetchSrc(tgsi.getTexOffset(s), c, NULL));
2447 texi->offset[s][c].setInsn(texi);
2448 }
2449 }
2450
2451 bb->insertTail(texi);
2452 }
2453
2454 void
handleFBFETCH(Value * dst[4])2455 Converter::handleFBFETCH(Value *dst[4])
2456 {
2457 TexInstruction *texi = new_TexInstruction(func, OP_TXF);
2458 unsigned int c, d;
2459
2460 texi->tex.target = TEX_TARGET_2D_MS_ARRAY;
2461 texi->tex.levelZero = true;
2462 texi->tex.useOffsets = 0;
2463
2464 for (c = 0, d = 0; c < 4; ++c) {
2465 if (dst[c]) {
2466 texi->setDef(d++, dst[c]);
2467 texi->tex.mask |= 1 << c;
2468 }
2469 }
2470
2471 Value *x = mkOp1v(OP_RDSV, TYPE_F32, getScratch(), mkSysVal(SV_POSITION, 0));
2472 Value *y = mkOp1v(OP_RDSV, TYPE_F32, getScratch(), mkSysVal(SV_POSITION, 1));
2473 Value *z = mkOp1v(OP_RDSV, TYPE_U32, getScratch(), mkSysVal(SV_LAYER, 0));
2474 Value *ms = mkOp1v(OP_RDSV, TYPE_U32, getScratch(), mkSysVal(SV_SAMPLE_INDEX, 0));
2475
2476 mkCvt(OP_CVT, TYPE_U32, x, TYPE_F32, x)->rnd = ROUND_Z;
2477 mkCvt(OP_CVT, TYPE_U32, y, TYPE_F32, y)->rnd = ROUND_Z;
2478 texi->setSrc(0, x);
2479 texi->setSrc(1, y);
2480 texi->setSrc(2, z);
2481 texi->setSrc(3, ms);
2482
2483 texi->tex.r = texi->tex.s = -1;
2484
2485 bb->insertTail(texi);
2486 }
2487
2488 void
handleLIT(Value * dst0[4])2489 Converter::handleLIT(Value *dst0[4])
2490 {
2491 Value *val0 = NULL;
2492 unsigned int mask = tgsi.getDst(0).getMask();
2493
2494 if (mask & (1 << 0))
2495 loadImm(dst0[0], 1.0f);
2496
2497 if (mask & (1 << 3))
2498 loadImm(dst0[3], 1.0f);
2499
2500 if (mask & (3 << 1)) {
2501 val0 = getScratch();
2502 mkOp2(OP_MAX, TYPE_F32, val0, fetchSrc(0, 0), zero);
2503 if (mask & (1 << 1))
2504 mkMov(dst0[1], val0);
2505 }
2506
2507 if (mask & (1 << 2)) {
2508 Value *src1 = fetchSrc(0, 1), *src3 = fetchSrc(0, 3);
2509 Value *val1 = getScratch(), *val3 = getScratch();
2510
2511 Value *pos128 = loadImm(NULL, +127.999999f);
2512 Value *neg128 = loadImm(NULL, -127.999999f);
2513
2514 mkOp2(OP_MAX, TYPE_F32, val1, src1, zero);
2515 mkOp2(OP_MAX, TYPE_F32, val3, src3, neg128);
2516 mkOp2(OP_MIN, TYPE_F32, val3, val3, pos128);
2517 mkOp2(OP_POW, TYPE_F32, val3, val1, val3);
2518
2519 mkCmp(OP_SLCT, CC_GT, TYPE_F32, dst0[2], TYPE_F32, val3, zero, val0);
2520 }
2521 }
2522
2523 /* Keep this around for now as reference when adding img support
2524 static inline bool
2525 isResourceSpecial(const int r)
2526 {
2527 return (r == TGSI_RESOURCE_GLOBAL ||
2528 r == TGSI_RESOURCE_LOCAL ||
2529 r == TGSI_RESOURCE_PRIVATE ||
2530 r == TGSI_RESOURCE_INPUT);
2531 }
2532
2533 static inline bool
2534 isResourceRaw(const tgsi::Source *code, const int r)
2535 {
2536 return isResourceSpecial(r) || code->resources[r].raw;
2537 }
2538
2539 static inline nv50_ir::TexTarget
2540 getResourceTarget(const tgsi::Source *code, int r)
2541 {
2542 if (isResourceSpecial(r))
2543 return nv50_ir::TEX_TARGET_BUFFER;
2544 return tgsi::translateTexture(code->resources.at(r).target);
2545 }
2546
2547 Symbol *
2548 Converter::getResourceBase(const int r)
2549 {
2550 Symbol *sym = NULL;
2551
2552 switch (r) {
2553 case TGSI_RESOURCE_GLOBAL:
2554 sym = new_Symbol(prog, nv50_ir::FILE_MEMORY_GLOBAL,
2555 info->io.auxCBSlot);
2556 break;
2557 case TGSI_RESOURCE_LOCAL:
2558 assert(prog->getType() == Program::TYPE_COMPUTE);
2559 sym = mkSymbol(nv50_ir::FILE_MEMORY_SHARED, 0, TYPE_U32,
2560 info->prop.cp.sharedOffset);
2561 break;
2562 case TGSI_RESOURCE_PRIVATE:
2563 sym = mkSymbol(nv50_ir::FILE_MEMORY_LOCAL, 0, TYPE_U32,
2564 info->bin.tlsSpace);
2565 break;
2566 case TGSI_RESOURCE_INPUT:
2567 assert(prog->getType() == Program::TYPE_COMPUTE);
2568 sym = mkSymbol(nv50_ir::FILE_SHADER_INPUT, 0, TYPE_U32,
2569 info->prop.cp.inputOffset);
2570 break;
2571 default:
2572 sym = new_Symbol(prog,
2573 nv50_ir::FILE_MEMORY_GLOBAL, code->resources.at(r).slot);
2574 break;
2575 }
2576 return sym;
2577 }
2578
2579 void
2580 Converter::getResourceCoords(std::vector<Value *> &coords, int r, int s)
2581 {
2582 const int arg =
2583 TexInstruction::Target(getResourceTarget(code, r)).getArgCount();
2584
2585 for (int c = 0; c < arg; ++c)
2586 coords.push_back(fetchSrc(s, c));
2587
2588 // NOTE: TGSI_RESOURCE_GLOBAL needs FILE_GPR; this is an nv50 quirk
2589 if (r == TGSI_RESOURCE_LOCAL ||
2590 r == TGSI_RESOURCE_PRIVATE ||
2591 r == TGSI_RESOURCE_INPUT)
2592 coords[0] = mkOp1v(OP_MOV, TYPE_U32, getScratch(4, FILE_ADDRESS),
2593 coords[0]);
2594 }
2595
2596 static inline int
2597 partitionLoadStore(uint8_t comp[2], uint8_t size[2], uint8_t mask)
2598 {
2599 int n = 0;
2600
2601 while (mask) {
2602 if (mask & 1) {
2603 size[n]++;
2604 } else {
2605 if (size[n])
2606 comp[n = 1] = size[0] + 1;
2607 else
2608 comp[n]++;
2609 }
2610 mask >>= 1;
2611 }
2612 if (size[0] == 3) {
2613 n = 1;
2614 size[0] = (comp[0] == 1) ? 1 : 2;
2615 size[1] = 3 - size[0];
2616 comp[1] = comp[0] + size[0];
2617 }
2618 return n + 1;
2619 }
2620 */
2621 void
getImageCoords(std::vector<Value * > & coords,int s)2622 Converter::getImageCoords(std::vector<Value *> &coords, int s)
2623 {
2624 TexInstruction::Target t =
2625 TexInstruction::Target(tgsi.getImageTarget());
2626 const int arg = t.getDim() + (t.isArray() || t.isCube());
2627
2628 for (int c = 0; c < arg; ++c)
2629 coords.push_back(fetchSrc(s, c));
2630
2631 if (t.isMS())
2632 coords.push_back(fetchSrc(s, 3));
2633 }
2634
2635 int
remapBufferId(int id)2636 Converter::remapBufferId(int id)
2637 {
2638 std::map<int, int>::const_iterator it = code->bufferIds.find(id);
2639 if (it != code->bufferIds.end())
2640 return it->second;
2641 return id;
2642 }
2643
2644 int
remapImageId(int id)2645 Converter::remapImageId(int id)
2646 {
2647 std::map<int, int>::const_iterator it = code->imageIds.find(id);
2648 if (it != code->imageIds.end())
2649 return it->second;
2650 return id;
2651 }
2652
2653 // For raw loads, granularity is 4 byte.
2654 // Usage of the texture read mask on OP_SULDP is not allowed.
2655 void
handleLOAD(Value * dst0[4])2656 Converter::handleLOAD(Value *dst0[4])
2657 {
2658 int r = tgsi.getSrc(0).getIndex(0);
2659 int c;
2660 std::vector<Value *> off, src, ldv, def;
2661 Value *ind = NULL;
2662
2663 if (tgsi.getSrc(0).isIndirect(0))
2664 ind = fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, 0);
2665
2666 switch (tgsi.getSrc(0).getFile()) {
2667 case TGSI_FILE_BUFFER:
2668 r = remapBufferId(r);
2669 /* fallthrough */
2670 case TGSI_FILE_MEMORY:
2671 for (c = 0; c < 4; ++c) {
2672 if (!dst0[c])
2673 continue;
2674
2675 Value *off;
2676 Symbol *sym;
2677 uint32_t src0_component_offset = tgsi.getSrc(0).getSwizzle(c) * 4;
2678
2679 if (tgsi.getSrc(1).getFile() == TGSI_FILE_IMMEDIATE) {
2680 off = NULL;
2681 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c,
2682 tgsi.getSrc(1).getValueU32(0, code->immd.data) +
2683 src0_component_offset);
2684 } else {
2685 // yzw are ignored for buffers
2686 off = fetchSrc(1, 0);
2687 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c,
2688 src0_component_offset);
2689 }
2690
2691 Instruction *ld = mkLoad(TYPE_U32, dst0[c], sym, off);
2692 if (tgsi.getSrc(0).getFile() == TGSI_FILE_BUFFER &&
2693 code->bufferAtomics[tgsi.getSrc(0).getIndex(0)])
2694 ld->cache = nv50_ir::CACHE_CG;
2695 else
2696 ld->cache = tgsi.getCacheMode();
2697 if (ind)
2698 ld->setIndirect(0, 1, ind);
2699 }
2700 break;
2701 default: {
2702 r = remapImageId(r);
2703 getImageCoords(off, 1);
2704 def.resize(4);
2705
2706 for (c = 0; c < 4; ++c) {
2707 if (!dst0[c] || tgsi.getSrc(0).getSwizzle(c) != (TGSI_SWIZZLE_X + c))
2708 def[c] = getScratch();
2709 else
2710 def[c] = dst0[c];
2711 }
2712
2713 bool bindless = tgsi.getSrc(0).getFile() != TGSI_FILE_IMAGE;
2714 if (bindless)
2715 ind = fetchSrc(0, 0);
2716
2717 TexInstruction *ld =
2718 mkTex(OP_SULDP, tgsi.getImageTarget(), 0, 0, def, off);
2719 ld->tex.mask = tgsi.getDst(0).getMask();
2720 ld->tex.format = tgsi.getImageFormat();
2721 ld->cache = tgsi.getCacheMode();
2722 ld->tex.bindless = bindless;
2723 if (!bindless)
2724 ld->tex.r = r;
2725 if (ind)
2726 ld->setIndirectR(ind);
2727
2728 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2729 if (dst0[c] != def[c])
2730 mkMov(dst0[c], def[tgsi.getSrc(0).getSwizzle(c)]);
2731 break;
2732 }
2733 }
2734
2735
2736 /* Keep this around for now as reference when adding img support
2737 getResourceCoords(off, r, 1);
2738
2739 if (isResourceRaw(code, r)) {
2740 uint8_t mask = 0;
2741 uint8_t comp[2] = { 0, 0 };
2742 uint8_t size[2] = { 0, 0 };
2743
2744 Symbol *base = getResourceBase(r);
2745
2746 // determine the base and size of the at most 2 load ops
2747 for (c = 0; c < 4; ++c)
2748 if (!tgsi.getDst(0).isMasked(c))
2749 mask |= 1 << (tgsi.getSrc(0).getSwizzle(c) - TGSI_SWIZZLE_X);
2750
2751 int n = partitionLoadStore(comp, size, mask);
2752
2753 src = off;
2754
2755 def.resize(4); // index by component, the ones we need will be non-NULL
2756 for (c = 0; c < 4; ++c) {
2757 if (dst0[c] && tgsi.getSrc(0).getSwizzle(c) == (TGSI_SWIZZLE_X + c))
2758 def[c] = dst0[c];
2759 else
2760 if (mask & (1 << c))
2761 def[c] = getScratch();
2762 }
2763
2764 const bool useLd = isResourceSpecial(r) ||
2765 (info->io.nv50styleSurfaces &&
2766 code->resources[r].target == TGSI_TEXTURE_BUFFER);
2767
2768 for (int i = 0; i < n; ++i) {
2769 ldv.assign(def.begin() + comp[i], def.begin() + comp[i] + size[i]);
2770
2771 if (comp[i]) // adjust x component of source address if necessary
2772 src[0] = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, off[0]->reg.file),
2773 off[0], mkImm(comp[i] * 4));
2774 else
2775 src[0] = off[0];
2776
2777 if (useLd) {
2778 Instruction *ld =
2779 mkLoad(typeOfSize(size[i] * 4), ldv[0], base, src[0]);
2780 for (size_t c = 1; c < ldv.size(); ++c)
2781 ld->setDef(c, ldv[c]);
2782 } else {
2783 mkTex(OP_SULDB, getResourceTarget(code, r), code->resources[r].slot,
2784 0, ldv, src)->dType = typeOfSize(size[i] * 4);
2785 }
2786 }
2787 } else {
2788 def.resize(4);
2789 for (c = 0; c < 4; ++c) {
2790 if (!dst0[c] || tgsi.getSrc(0).getSwizzle(c) != (TGSI_SWIZZLE_X + c))
2791 def[c] = getScratch();
2792 else
2793 def[c] = dst0[c];
2794 }
2795
2796 mkTex(OP_SULDP, getResourceTarget(code, r), code->resources[r].slot, 0,
2797 def, off);
2798 }
2799 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2800 if (dst0[c] != def[c])
2801 mkMov(dst0[c], def[tgsi.getSrc(0).getSwizzle(c)]);
2802 */
2803 }
2804
2805 // For formatted stores, the write mask on OP_SUSTP can be used.
2806 // Raw stores have to be split.
2807 void
handleSTORE()2808 Converter::handleSTORE()
2809 {
2810 int r = tgsi.getDst(0).getIndex(0);
2811 int c;
2812 std::vector<Value *> off, src, dummy;
2813 Value *ind = NULL;
2814
2815 if (tgsi.getDst(0).isIndirect(0))
2816 ind = fetchSrc(tgsi.getDst(0).getIndirect(0), 0, 0);
2817
2818 switch (tgsi.getDst(0).getFile()) {
2819 case TGSI_FILE_BUFFER:
2820 r = remapBufferId(r);
2821 /* fallthrough */
2822 case TGSI_FILE_MEMORY:
2823 for (c = 0; c < 4; ++c) {
2824 if (!(tgsi.getDst(0).getMask() & (1 << c)))
2825 continue;
2826
2827 Symbol *sym;
2828 Value *off;
2829 if (tgsi.getSrc(0).getFile() == TGSI_FILE_IMMEDIATE) {
2830 off = NULL;
2831 sym = makeSym(tgsi.getDst(0).getFile(), r, -1, c,
2832 tgsi.getSrc(0).getValueU32(0, code->immd.data) + 4 * c);
2833 } else {
2834 // yzw are ignored for buffers
2835 off = fetchSrc(0, 0);
2836 sym = makeSym(tgsi.getDst(0).getFile(), r, -1, c, 4 * c);
2837 }
2838
2839 Instruction *st = mkStore(OP_STORE, TYPE_U32, sym, off, fetchSrc(1, c));
2840 st->cache = tgsi.getCacheMode();
2841 if (ind)
2842 st->setIndirect(0, 1, ind);
2843 }
2844 break;
2845 default: {
2846 r = remapImageId(r);
2847 getImageCoords(off, 0);
2848 src = off;
2849
2850 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2851 src.push_back(fetchSrc(1, c));
2852
2853 bool bindless = tgsi.getDst(0).getFile() != TGSI_FILE_IMAGE;
2854 if (bindless)
2855 ind = fetchDst(0, 0);
2856
2857 TexInstruction *st =
2858 mkTex(OP_SUSTP, tgsi.getImageTarget(), 0, 0, dummy, src);
2859 st->tex.mask = tgsi.getDst(0).getMask();
2860 st->tex.format = tgsi.getImageFormat();
2861 st->cache = tgsi.getCacheMode();
2862 st->tex.bindless = bindless;
2863 if (!bindless)
2864 st->tex.r = r;
2865 if (ind)
2866 st->setIndirectR(ind);
2867
2868 break;
2869 }
2870 }
2871
2872 /* Keep this around for now as reference when adding img support
2873 getResourceCoords(off, r, 0);
2874 src = off;
2875 const int s = src.size();
2876
2877 if (isResourceRaw(code, r)) {
2878 uint8_t comp[2] = { 0, 0 };
2879 uint8_t size[2] = { 0, 0 };
2880
2881 int n = partitionLoadStore(comp, size, tgsi.getDst(0).getMask());
2882
2883 Symbol *base = getResourceBase(r);
2884
2885 const bool useSt = isResourceSpecial(r) ||
2886 (info->io.nv50styleSurfaces &&
2887 code->resources[r].target == TGSI_TEXTURE_BUFFER);
2888
2889 for (int i = 0; i < n; ++i) {
2890 if (comp[i]) // adjust x component of source address if necessary
2891 src[0] = mkOp2v(OP_ADD, TYPE_U32, getSSA(4, off[0]->reg.file),
2892 off[0], mkImm(comp[i] * 4));
2893 else
2894 src[0] = off[0];
2895
2896 const DataType stTy = typeOfSize(size[i] * 4);
2897
2898 if (useSt) {
2899 Instruction *st =
2900 mkStore(OP_STORE, stTy, base, NULL, fetchSrc(1, comp[i]));
2901 for (c = 1; c < size[i]; ++c)
2902 st->setSrc(1 + c, fetchSrc(1, comp[i] + c));
2903 st->setIndirect(0, 0, src[0]);
2904 } else {
2905 // attach values to be stored
2906 src.resize(s + size[i]);
2907 for (c = 0; c < size[i]; ++c)
2908 src[s + c] = fetchSrc(1, comp[i] + c);
2909 mkTex(OP_SUSTB, getResourceTarget(code, r), code->resources[r].slot,
2910 0, dummy, src)->setType(stTy);
2911 }
2912 }
2913 } else {
2914 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
2915 src.push_back(fetchSrc(1, c));
2916
2917 mkTex(OP_SUSTP, getResourceTarget(code, r), code->resources[r].slot, 0,
2918 dummy, src)->tex.mask = tgsi.getDst(0).getMask();
2919 }
2920 */
2921 }
2922
2923 // XXX: These only work on resources with the single-component u32/s32 formats.
2924 // Therefore the result is replicated. This might not be intended by TGSI, but
2925 // operating on more than 1 component would produce undefined results because
2926 // they do not exist.
2927 void
handleATOM(Value * dst0[4],DataType ty,uint16_t subOp)2928 Converter::handleATOM(Value *dst0[4], DataType ty, uint16_t subOp)
2929 {
2930 int r = tgsi.getSrc(0).getIndex(0);
2931 std::vector<Value *> srcv;
2932 std::vector<Value *> defv;
2933 LValue *dst = getScratch();
2934 Value *ind = NULL;
2935
2936 if (tgsi.getSrc(0).isIndirect(0))
2937 ind = fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, 0);
2938
2939 switch (tgsi.getSrc(0).getFile()) {
2940 case TGSI_FILE_BUFFER:
2941 r = remapBufferId(r);
2942 /* fallthrough */
2943 case TGSI_FILE_MEMORY:
2944 for (int c = 0; c < 4; ++c) {
2945 if (!dst0[c])
2946 continue;
2947
2948 Instruction *insn;
2949 Value *off = fetchSrc(1, c);
2950 Value *sym;
2951 if (tgsi.getSrc(1).getFile() == TGSI_FILE_IMMEDIATE)
2952 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c,
2953 tgsi.getSrc(1).getValueU32(c, code->immd.data));
2954 else
2955 sym = makeSym(tgsi.getSrc(0).getFile(), r, -1, c, 0);
2956 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2957 insn = mkOp3(OP_ATOM, ty, dst, sym, fetchSrc(2, c), fetchSrc(3, c));
2958 else
2959 insn = mkOp2(OP_ATOM, ty, dst, sym, fetchSrc(2, c));
2960 if (tgsi.getSrc(1).getFile() != TGSI_FILE_IMMEDIATE)
2961 insn->setIndirect(0, 0, off);
2962 if (ind)
2963 insn->setIndirect(0, 1, ind);
2964 insn->subOp = subOp;
2965
2966 if (tgsi.getSrc(0).getFile() == TGSI_FILE_BUFFER &&
2967 code->bufferAtomics[tgsi.getSrc(0).getIndex(0)])
2968 insn->cache = nv50_ir::CACHE_CG;
2969 }
2970 for (int c = 0; c < 4; ++c)
2971 if (dst0[c])
2972 dst0[c] = dst; // not equal to rDst so handleInstruction will do mkMov
2973 break;
2974 default: {
2975 r = remapImageId(r);
2976 getImageCoords(srcv, 1);
2977 defv.push_back(dst);
2978 srcv.push_back(fetchSrc(2, 0));
2979
2980 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
2981 srcv.push_back(fetchSrc(3, 0));
2982
2983 bool bindless = tgsi.getSrc(0).getFile() != TGSI_FILE_IMAGE;
2984 if (bindless)
2985 ind = fetchSrc(0, 0);
2986
2987 TexInstruction *tex = mkTex(OP_SUREDP, tgsi.getImageTarget(),
2988 0, 0, defv, srcv);
2989 tex->subOp = subOp;
2990 tex->tex.mask = 1;
2991 tex->tex.format = tgsi.getImageFormat();
2992 tex->setType(ty);
2993 tex->tex.bindless = bindless;
2994 if (!bindless)
2995 tex->tex.r = r;
2996 if (ind)
2997 tex->setIndirectR(ind);
2998
2999 for (int c = 0; c < 4; ++c)
3000 if (dst0[c])
3001 dst0[c] = dst; // not equal to rDst so handleInstruction will do mkMov
3002 break;
3003 }
3004 }
3005
3006 /* Keep this around for now as reference when adding img support
3007 getResourceCoords(srcv, r, 1);
3008
3009 if (isResourceSpecial(r)) {
3010 assert(r != TGSI_RESOURCE_INPUT);
3011 Instruction *insn;
3012 insn = mkOp2(OP_ATOM, ty, dst, getResourceBase(r), fetchSrc(2, 0));
3013 insn->subOp = subOp;
3014 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
3015 insn->setSrc(2, fetchSrc(3, 0));
3016 insn->setIndirect(0, 0, srcv.at(0));
3017 } else {
3018 operation op = isResourceRaw(code, r) ? OP_SUREDB : OP_SUREDP;
3019 TexTarget targ = getResourceTarget(code, r);
3020 int idx = code->resources[r].slot;
3021 defv.push_back(dst);
3022 srcv.push_back(fetchSrc(2, 0));
3023 if (subOp == NV50_IR_SUBOP_ATOM_CAS)
3024 srcv.push_back(fetchSrc(3, 0));
3025 TexInstruction *tex = mkTex(op, targ, idx, 0, defv, srcv);
3026 tex->subOp = subOp;
3027 tex->tex.mask = 1;
3028 tex->setType(ty);
3029 }
3030
3031 for (int c = 0; c < 4; ++c)
3032 if (dst0[c])
3033 dst0[c] = dst; // not equal to rDst so handleInstruction will do mkMov
3034 */
3035 }
3036
3037 void
handleINTERP(Value * dst[4])3038 Converter::handleINTERP(Value *dst[4])
3039 {
3040 // Check whether the input is linear. All other attributes ignored.
3041 Instruction *insn;
3042 Value *offset = NULL, *ptr = NULL, *w = NULL;
3043 Symbol *sym[4] = { NULL };
3044 bool linear;
3045 operation op = OP_NOP;
3046 int c, mode = 0;
3047
3048 tgsi::Instruction::SrcRegister src = tgsi.getSrc(0);
3049
3050 // In some odd cases, in large part due to varying packing, the source
3051 // might not actually be an input. This is illegal TGSI, but it's easier to
3052 // account for it here than it is to fix it where the TGSI is being
3053 // generated. In that case, it's going to be a straight up mov (or sequence
3054 // of mov's) from the input in question. We follow the mov chain to see
3055 // which input we need to use.
3056 if (src.getFile() != TGSI_FILE_INPUT) {
3057 if (src.isIndirect(0)) {
3058 ERROR("Ignoring indirect input interpolation\n");
3059 return;
3060 }
3061 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3062 Value *val = fetchSrc(0, c);
3063 assert(val->defs.size() == 1);
3064 insn = val->getInsn();
3065 while (insn->op == OP_MOV) {
3066 assert(insn->getSrc(0)->defs.size() == 1);
3067 insn = insn->getSrc(0)->getInsn();
3068 if (!insn) {
3069 ERROR("Miscompiling shader due to unhandled INTERP\n");
3070 return;
3071 }
3072 }
3073 if (insn->op != OP_LINTERP && insn->op != OP_PINTERP) {
3074 ERROR("Trying to interpolate non-input, this is not allowed.\n");
3075 return;
3076 }
3077 sym[c] = insn->getSrc(0)->asSym();
3078 assert(sym[c]);
3079 op = insn->op;
3080 mode = insn->ipa;
3081 ptr = insn->getIndirect(0, 0);
3082 }
3083 } else {
3084 if (src.isIndirect(0))
3085 ptr = shiftAddress(fetchSrc(src.getIndirect(0), 0, NULL));
3086
3087 // We can assume that the fixed index will point to an input of the same
3088 // interpolation type in case of an indirect.
3089 // TODO: Make use of ArrayID.
3090 linear = info_out->in[src.getIndex(0)].linear;
3091 if (linear) {
3092 op = OP_LINTERP;
3093 mode = NV50_IR_INTERP_LINEAR;
3094 } else {
3095 op = OP_PINTERP;
3096 mode = NV50_IR_INTERP_PERSPECTIVE;
3097 }
3098 }
3099
3100 switch (tgsi.getOpcode()) {
3101 case TGSI_OPCODE_INTERP_CENTROID:
3102 mode |= NV50_IR_INTERP_CENTROID;
3103 break;
3104 case TGSI_OPCODE_INTERP_SAMPLE: {
3105 // When using a non-MS buffer, we're supposed to always use the center
3106 // (i.e. sample 0). This adds a SELP which will be always true or false
3107 // based on a data fixup.
3108 Value *sample = getScratch();
3109 mkOp3(OP_SELP, TYPE_U32, sample, mkImm(0), fetchSrc(1, 0), mkImm(0))
3110 ->subOp = 2;
3111
3112 insn = mkOp1(OP_PIXLD, TYPE_U32, (offset = getScratch()), sample);
3113 insn->subOp = NV50_IR_SUBOP_PIXLD_OFFSET;
3114 mode |= NV50_IR_INTERP_OFFSET;
3115 break;
3116 }
3117 case TGSI_OPCODE_INTERP_OFFSET: {
3118 // The input in src1.xy is float, but we need a single 32-bit value
3119 // where the upper and lower 16 bits are encoded in S0.12 format. We need
3120 // to clamp the input coordinates to (-0.5, 0.4375), multiply by 4096,
3121 // and then convert to s32.
3122 Value *offs[2];
3123 for (c = 0; c < 2; c++) {
3124 offs[c] = getScratch();
3125 mkOp2(OP_MIN, TYPE_F32, offs[c], fetchSrc(1, c), loadImm(NULL, 0.4375f));
3126 mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f));
3127 mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f));
3128 mkCvt(OP_CVT, TYPE_S32, offs[c], TYPE_F32, offs[c]);
3129 }
3130 offset = mkOp3v(OP_INSBF, TYPE_U32, getScratch(),
3131 offs[1], mkImm(0x1010), offs[0]);
3132 mode |= NV50_IR_INTERP_OFFSET;
3133 break;
3134 }
3135 }
3136
3137 if (op == OP_PINTERP) {
3138 if (offset) {
3139 w = mkOp2v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 3), offset);
3140 mkOp1(OP_RCP, TYPE_F32, w, w);
3141 } else {
3142 w = fragCoord[3];
3143 }
3144 }
3145
3146
3147 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3148 insn = mkOp1(op, TYPE_F32, dst[c], sym[c] ? sym[c] : srcToSym(src, c));
3149 if (op == OP_PINTERP)
3150 insn->setSrc(1, w);
3151 if (offset)
3152 insn->setSrc(op == OP_PINTERP ? 2 : 1, offset);
3153 if (ptr)
3154 insn->setIndirect(0, 0, ptr);
3155
3156 insn->setInterpolate(mode);
3157 }
3158 }
3159
3160 bool
isEndOfSubroutine(uint ip)3161 Converter::isEndOfSubroutine(uint ip)
3162 {
3163 assert(ip < code->scan.num_instructions);
3164 tgsi::Instruction insn(&code->insns[ip]);
3165 return (insn.getOpcode() == TGSI_OPCODE_END ||
3166 insn.getOpcode() == TGSI_OPCODE_ENDSUB ||
3167 // does END occur at end of main or the very end ?
3168 insn.getOpcode() == TGSI_OPCODE_BGNSUB);
3169 }
3170
3171 bool
handleInstruction(const struct tgsi_full_instruction * insn)3172 Converter::handleInstruction(const struct tgsi_full_instruction *insn)
3173 {
3174 Instruction *geni;
3175
3176 Value *dst0[4], *rDst0[4];
3177 Value *src0, *src1, *src2, *src3;
3178 Value *val0 = NULL, *val1 = NULL;
3179 int c;
3180
3181 tgsi = tgsi::Instruction(insn);
3182
3183 bool useScratchDst = tgsi.checkDstSrcAliasing();
3184
3185 operation op = tgsi.getOP();
3186 dstTy = tgsi.inferDstType();
3187 srcTy = tgsi.inferSrcType();
3188
3189 unsigned int mask = tgsi.dstCount() ? tgsi.getDst(0).getMask() : 0;
3190
3191 if (tgsi.dstCount() && tgsi.getOpcode() != TGSI_OPCODE_STORE) {
3192 for (c = 0; c < 4; ++c) {
3193 rDst0[c] = acquireDst(0, c);
3194 dst0[c] = (useScratchDst && rDst0[c]) ? getScratch() : rDst0[c];
3195 }
3196 }
3197
3198 switch (tgsi.getOpcode()) {
3199 case TGSI_OPCODE_ADD:
3200 case TGSI_OPCODE_UADD:
3201 case TGSI_OPCODE_AND:
3202 case TGSI_OPCODE_DIV:
3203 case TGSI_OPCODE_IDIV:
3204 case TGSI_OPCODE_UDIV:
3205 case TGSI_OPCODE_MAX:
3206 case TGSI_OPCODE_MIN:
3207 case TGSI_OPCODE_IMAX:
3208 case TGSI_OPCODE_IMIN:
3209 case TGSI_OPCODE_UMAX:
3210 case TGSI_OPCODE_UMIN:
3211 case TGSI_OPCODE_MOD:
3212 case TGSI_OPCODE_UMOD:
3213 case TGSI_OPCODE_MUL:
3214 case TGSI_OPCODE_UMUL:
3215 case TGSI_OPCODE_IMUL_HI:
3216 case TGSI_OPCODE_UMUL_HI:
3217 case TGSI_OPCODE_OR:
3218 case TGSI_OPCODE_SHL:
3219 case TGSI_OPCODE_ISHR:
3220 case TGSI_OPCODE_USHR:
3221 case TGSI_OPCODE_XOR:
3222 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3223 src0 = fetchSrc(0, c);
3224 src1 = fetchSrc(1, c);
3225 geni = mkOp2(op, dstTy, dst0[c], src0, src1);
3226 geni->subOp = tgsi::opcodeToSubOp(tgsi.getOpcode());
3227 if (op == OP_MUL && dstTy == TYPE_F32)
3228 geni->dnz = info->io.mul_zero_wins;
3229 geni->precise = insn->Instruction.Precise;
3230 }
3231 break;
3232 case TGSI_OPCODE_MAD:
3233 case TGSI_OPCODE_UMAD:
3234 case TGSI_OPCODE_FMA:
3235 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3236 src0 = fetchSrc(0, c);
3237 src1 = fetchSrc(1, c);
3238 src2 = fetchSrc(2, c);
3239 geni = mkOp3(op, dstTy, dst0[c], src0, src1, src2);
3240 if (dstTy == TYPE_F32)
3241 geni->dnz = info->io.mul_zero_wins;
3242 geni->precise = insn->Instruction.Precise;
3243 }
3244 break;
3245 case TGSI_OPCODE_MOV:
3246 case TGSI_OPCODE_CEIL:
3247 case TGSI_OPCODE_FLR:
3248 case TGSI_OPCODE_TRUNC:
3249 case TGSI_OPCODE_RCP:
3250 case TGSI_OPCODE_SQRT:
3251 case TGSI_OPCODE_IABS:
3252 case TGSI_OPCODE_INEG:
3253 case TGSI_OPCODE_NOT:
3254 case TGSI_OPCODE_DDX:
3255 case TGSI_OPCODE_DDY:
3256 case TGSI_OPCODE_DDX_FINE:
3257 case TGSI_OPCODE_DDY_FINE:
3258 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3259 mkOp1(op, dstTy, dst0[c], fetchSrc(0, c));
3260 break;
3261 case TGSI_OPCODE_RSQ:
3262 src0 = fetchSrc(0, 0);
3263 val0 = getScratch();
3264 mkOp1(OP_ABS, TYPE_F32, val0, src0);
3265 mkOp1(OP_RSQ, TYPE_F32, val0, val0);
3266 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3267 mkMov(dst0[c], val0);
3268 break;
3269 case TGSI_OPCODE_ARL:
3270 case TGSI_OPCODE_ARR:
3271 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3272 const RoundMode rnd =
3273 tgsi.getOpcode() == TGSI_OPCODE_ARR ? ROUND_N : ROUND_M;
3274 src0 = fetchSrc(0, c);
3275 mkCvt(OP_CVT, TYPE_S32, dst0[c], TYPE_F32, src0)->rnd = rnd;
3276 }
3277 break;
3278 case TGSI_OPCODE_UARL:
3279 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3280 mkOp1(OP_MOV, TYPE_U32, dst0[c], fetchSrc(0, c));
3281 break;
3282 case TGSI_OPCODE_POW:
3283 val0 = mkOp2v(op, TYPE_F32, getScratch(), fetchSrc(0, 0), fetchSrc(1, 0));
3284 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3285 mkOp1(OP_MOV, TYPE_F32, dst0[c], val0);
3286 break;
3287 case TGSI_OPCODE_EX2:
3288 case TGSI_OPCODE_LG2:
3289 val0 = mkOp1(op, TYPE_F32, getScratch(), fetchSrc(0, 0))->getDef(0);
3290 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3291 mkOp1(OP_MOV, TYPE_F32, dst0[c], val0);
3292 break;
3293 case TGSI_OPCODE_COS:
3294 case TGSI_OPCODE_SIN:
3295 val0 = getScratch();
3296 if (mask & 7) {
3297 mkOp1(OP_PRESIN, TYPE_F32, val0, fetchSrc(0, 0));
3298 mkOp1(op, TYPE_F32, val0, val0);
3299 for (c = 0; c < 3; ++c)
3300 if (dst0[c])
3301 mkMov(dst0[c], val0);
3302 }
3303 if (dst0[3]) {
3304 mkOp1(OP_PRESIN, TYPE_F32, val0, fetchSrc(0, 3));
3305 mkOp1(op, TYPE_F32, dst0[3], val0);
3306 }
3307 break;
3308 case TGSI_OPCODE_EXP:
3309 src0 = fetchSrc(0, 0);
3310 val0 = mkOp1v(OP_FLOOR, TYPE_F32, getSSA(), src0);
3311 if (dst0[1])
3312 mkOp2(OP_SUB, TYPE_F32, dst0[1], src0, val0);
3313 if (dst0[0])
3314 mkOp1(OP_EX2, TYPE_F32, dst0[0], val0);
3315 if (dst0[2])
3316 mkOp1(OP_EX2, TYPE_F32, dst0[2], src0);
3317 if (dst0[3])
3318 loadImm(dst0[3], 1.0f);
3319 break;
3320 case TGSI_OPCODE_LOG:
3321 src0 = mkOp1v(OP_ABS, TYPE_F32, getSSA(), fetchSrc(0, 0));
3322 val0 = mkOp1v(OP_LG2, TYPE_F32, dst0[2] ? dst0[2] : getSSA(), src0);
3323 if (dst0[0] || dst0[1])
3324 val1 = mkOp1v(OP_FLOOR, TYPE_F32, dst0[0] ? dst0[0] : getSSA(), val0);
3325 if (dst0[1]) {
3326 mkOp1(OP_EX2, TYPE_F32, dst0[1], val1);
3327 mkOp1(OP_RCP, TYPE_F32, dst0[1], dst0[1]);
3328 mkOp2(OP_MUL, TYPE_F32, dst0[1], dst0[1], src0)
3329 ->dnz = info->io.mul_zero_wins;
3330 }
3331 if (dst0[3])
3332 loadImm(dst0[3], 1.0f);
3333 break;
3334 case TGSI_OPCODE_DP2:
3335 val0 = buildDot(2);
3336 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3337 mkMov(dst0[c], val0);
3338 break;
3339 case TGSI_OPCODE_DP3:
3340 val0 = buildDot(3);
3341 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3342 mkMov(dst0[c], val0);
3343 break;
3344 case TGSI_OPCODE_DP4:
3345 val0 = buildDot(4);
3346 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3347 mkMov(dst0[c], val0);
3348 break;
3349 case TGSI_OPCODE_DST:
3350 if (dst0[0])
3351 loadImm(dst0[0], 1.0f);
3352 if (dst0[1]) {
3353 src0 = fetchSrc(0, 1);
3354 src1 = fetchSrc(1, 1);
3355 mkOp2(OP_MUL, TYPE_F32, dst0[1], src0, src1)
3356 ->dnz = info->io.mul_zero_wins;
3357 }
3358 if (dst0[2])
3359 mkMov(dst0[2], fetchSrc(0, 2));
3360 if (dst0[3])
3361 mkMov(dst0[3], fetchSrc(1, 3));
3362 break;
3363 case TGSI_OPCODE_LRP:
3364 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3365 src0 = fetchSrc(0, c);
3366 src1 = fetchSrc(1, c);
3367 src2 = fetchSrc(2, c);
3368 mkOp3(OP_MAD, TYPE_F32, dst0[c],
3369 mkOp2v(OP_SUB, TYPE_F32, getSSA(), src1, src2), src0, src2)
3370 ->dnz = info->io.mul_zero_wins;
3371 }
3372 break;
3373 case TGSI_OPCODE_LIT:
3374 handleLIT(dst0);
3375 break;
3376 case TGSI_OPCODE_ISSG:
3377 case TGSI_OPCODE_SSG:
3378 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3379 src0 = fetchSrc(0, c);
3380 val0 = getScratch();
3381 val1 = getScratch();
3382 mkCmp(OP_SET, CC_GT, srcTy, val0, srcTy, src0, zero);
3383 mkCmp(OP_SET, CC_LT, srcTy, val1, srcTy, src0, zero);
3384 if (srcTy == TYPE_F32)
3385 mkOp2(OP_SUB, TYPE_F32, dst0[c], val0, val1);
3386 else
3387 mkOp2(OP_SUB, TYPE_S32, dst0[c], val1, val0);
3388 }
3389 break;
3390 case TGSI_OPCODE_UCMP:
3391 srcTy = TYPE_U32;
3392 FALLTHROUGH;
3393 case TGSI_OPCODE_CMP:
3394 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3395 src0 = fetchSrc(0, c);
3396 src1 = fetchSrc(1, c);
3397 src2 = fetchSrc(2, c);
3398 if (src1 == src2)
3399 mkMov(dst0[c], src1);
3400 else
3401 mkCmp(OP_SLCT, (srcTy == TYPE_F32) ? CC_LT : CC_NE,
3402 srcTy, dst0[c], srcTy, src1, src2, src0);
3403 }
3404 break;
3405 case TGSI_OPCODE_FRC:
3406 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3407 src0 = fetchSrc(0, c);
3408 val0 = getScratch();
3409 mkOp1(OP_FLOOR, TYPE_F32, val0, src0);
3410 mkOp2(OP_SUB, TYPE_F32, dst0[c], src0, val0);
3411 }
3412 break;
3413 case TGSI_OPCODE_ROUND:
3414 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3415 mkCvt(OP_CVT, TYPE_F32, dst0[c], TYPE_F32, fetchSrc(0, c))
3416 ->rnd = ROUND_NI;
3417 break;
3418 case TGSI_OPCODE_SLT:
3419 case TGSI_OPCODE_SGE:
3420 case TGSI_OPCODE_SEQ:
3421 case TGSI_OPCODE_SGT:
3422 case TGSI_OPCODE_SLE:
3423 case TGSI_OPCODE_SNE:
3424 case TGSI_OPCODE_FSEQ:
3425 case TGSI_OPCODE_FSGE:
3426 case TGSI_OPCODE_FSLT:
3427 case TGSI_OPCODE_FSNE:
3428 case TGSI_OPCODE_ISGE:
3429 case TGSI_OPCODE_ISLT:
3430 case TGSI_OPCODE_USEQ:
3431 case TGSI_OPCODE_USGE:
3432 case TGSI_OPCODE_USLT:
3433 case TGSI_OPCODE_USNE:
3434 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3435 src0 = fetchSrc(0, c);
3436 src1 = fetchSrc(1, c);
3437 mkCmp(op, tgsi.getSetCond(), dstTy, dst0[c], srcTy, src0, src1);
3438 }
3439 break;
3440 case TGSI_OPCODE_VOTE_ALL:
3441 case TGSI_OPCODE_VOTE_ANY:
3442 case TGSI_OPCODE_VOTE_EQ:
3443 val0 = new_LValue(func, FILE_PREDICATE);
3444 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3445 mkCmp(OP_SET, CC_NE, TYPE_U32, val0, TYPE_U32, fetchSrc(0, c), zero);
3446 mkOp1(op, dstTy, val0, val0)
3447 ->subOp = tgsi::opcodeToSubOp(tgsi.getOpcode());
3448 mkCvt(OP_CVT, TYPE_U32, dst0[c], TYPE_U8, val0);
3449 }
3450 break;
3451 case TGSI_OPCODE_BALLOT:
3452 if (!tgsi.getDst(0).isMasked(0)) {
3453 val0 = new_LValue(func, FILE_PREDICATE);
3454 mkCmp(OP_SET, CC_NE, TYPE_U32, val0, TYPE_U32, fetchSrc(0, 0), zero);
3455 mkOp1(op, TYPE_U32, dst0[0], val0)->subOp = NV50_IR_SUBOP_VOTE_ANY;
3456 }
3457 if (!tgsi.getDst(0).isMasked(1))
3458 mkMov(dst0[1], zero, TYPE_U32);
3459 break;
3460 case TGSI_OPCODE_READ_FIRST:
3461 // ReadFirstInvocationARB(src) is implemented as
3462 // ReadInvocationARB(src, findLSB(ballot(true)))
3463 val0 = getScratch();
3464 mkOp1(OP_VOTE, TYPE_U32, val0, mkImm(1))->subOp = NV50_IR_SUBOP_VOTE_ANY;
3465 mkOp1(OP_BREV, TYPE_U32, val0, val0);
3466 mkOp1(OP_BFIND, TYPE_U32, val0, val0)->subOp = NV50_IR_SUBOP_BFIND_SAMT;
3467 src1 = val0;
3468 FALLTHROUGH;
3469 case TGSI_OPCODE_READ_INVOC:
3470 if (tgsi.getOpcode() == TGSI_OPCODE_READ_INVOC)
3471 src1 = fetchSrc(1, 0);
3472 else
3473 src1 = val0;
3474 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3475 geni = mkOp3(op, dstTy, dst0[c], fetchSrc(0, c), src1, mkImm(0x1f));
3476 geni->subOp = NV50_IR_SUBOP_SHFL_IDX;
3477 }
3478 break;
3479 case TGSI_OPCODE_CLOCK:
3480 // Stick the 32-bit clock into the high dword of the logical result.
3481 if (!tgsi.getDst(0).isMasked(0))
3482 mkOp1(OP_MOV, TYPE_U32, dst0[0], zero);
3483 if (!tgsi.getDst(0).isMasked(1))
3484 mkOp1(OP_RDSV, TYPE_U32, dst0[1], mkSysVal(SV_CLOCK, 0))->fixed = 1;
3485 break;
3486 case TGSI_OPCODE_READ_HELPER:
3487 if (!tgsi.getDst(0).isMasked(0))
3488 mkOp1(OP_RDSV, TYPE_U32, dst0[0], mkSysVal(SV_THREAD_KILL, 0))
3489 ->fixed = 1;
3490 break;
3491 case TGSI_OPCODE_KILL_IF:
3492 val0 = new_LValue(func, FILE_PREDICATE);
3493 mask = 0;
3494 for (c = 0; c < 4; ++c) {
3495 const int s = tgsi.getSrc(0).getSwizzle(c);
3496 if (mask & (1 << s))
3497 continue;
3498 mask |= 1 << s;
3499 mkCmp(OP_SET, CC_LT, TYPE_F32, val0, TYPE_F32, fetchSrc(0, c), zero);
3500 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_P, val0);
3501 }
3502 break;
3503 case TGSI_OPCODE_KILL:
3504 case TGSI_OPCODE_DEMOTE:
3505 // TODO: Should we make KILL exit that invocation? Some old shaders
3506 // don't like that.
3507 mkOp(OP_DISCARD, TYPE_NONE, NULL);
3508 break;
3509 case TGSI_OPCODE_TEX:
3510 case TGSI_OPCODE_TEX_LZ:
3511 case TGSI_OPCODE_TXB:
3512 case TGSI_OPCODE_TXL:
3513 case TGSI_OPCODE_TXP:
3514 case TGSI_OPCODE_LODQ:
3515 // R S L C Dx Dy
3516 handleTEX(dst0, 1, 1, 0x03, 0x0f, 0x00, 0x00);
3517 break;
3518 case TGSI_OPCODE_TXD:
3519 handleTEX(dst0, 3, 3, 0x03, 0x0f, 0x10, 0x20);
3520 break;
3521 case TGSI_OPCODE_TG4:
3522 handleTEX(dst0, 2, 2, 0x03, 0x0f, 0x00, 0x00);
3523 break;
3524 case TGSI_OPCODE_TEX2:
3525 handleTEX(dst0, 2, 2, 0x03, 0x10, 0x00, 0x00);
3526 break;
3527 case TGSI_OPCODE_TXB2:
3528 case TGSI_OPCODE_TXL2:
3529 handleTEX(dst0, 2, 2, 0x10, 0x0f, 0x00, 0x00);
3530 break;
3531 case TGSI_OPCODE_SAMPLE:
3532 case TGSI_OPCODE_SAMPLE_B:
3533 case TGSI_OPCODE_SAMPLE_D:
3534 case TGSI_OPCODE_SAMPLE_L:
3535 case TGSI_OPCODE_SAMPLE_C:
3536 case TGSI_OPCODE_SAMPLE_C_LZ:
3537 handleTEX(dst0, 1, 2, 0x30, 0x30, 0x30, 0x40);
3538 break;
3539 case TGSI_OPCODE_TXF_LZ:
3540 case TGSI_OPCODE_TXF:
3541 handleTXF(dst0, 1, 0x03);
3542 break;
3543 case TGSI_OPCODE_SAMPLE_I:
3544 handleTXF(dst0, 1, 0x03);
3545 break;
3546 case TGSI_OPCODE_SAMPLE_I_MS:
3547 handleTXF(dst0, 1, 0x20);
3548 break;
3549 case TGSI_OPCODE_TXQ:
3550 case TGSI_OPCODE_SVIEWINFO:
3551 handleTXQ(dst0, TXQ_DIMS, 1);
3552 break;
3553 case TGSI_OPCODE_TXQS:
3554 // The TXQ_TYPE query returns samples in its 3rd arg, but we need it to
3555 // be in .x
3556 dst0[1] = dst0[2] = dst0[3] = NULL;
3557 std::swap(dst0[0], dst0[2]);
3558 handleTXQ(dst0, TXQ_TYPE, 0);
3559 std::swap(dst0[0], dst0[2]);
3560 break;
3561 case TGSI_OPCODE_FBFETCH:
3562 handleFBFETCH(dst0);
3563 break;
3564 case TGSI_OPCODE_F2I:
3565 case TGSI_OPCODE_F2U:
3566 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3567 mkCvt(OP_CVT, dstTy, dst0[c], srcTy, fetchSrc(0, c))->rnd = ROUND_Z;
3568 break;
3569 case TGSI_OPCODE_I2F:
3570 case TGSI_OPCODE_U2F:
3571 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3572 mkCvt(OP_CVT, dstTy, dst0[c], srcTy, fetchSrc(0, c));
3573 break;
3574 case TGSI_OPCODE_PK2H:
3575 val0 = getScratch();
3576 val1 = getScratch();
3577 mkCvt(OP_CVT, TYPE_F16, val0, TYPE_F32, fetchSrc(0, 0));
3578 mkCvt(OP_CVT, TYPE_F16, val1, TYPE_F32, fetchSrc(0, 1));
3579 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi)
3580 mkOp3(OP_INSBF, TYPE_U32, dst0[c], val1, mkImm(0x1010), val0);
3581 break;
3582 case TGSI_OPCODE_UP2H:
3583 src0 = fetchSrc(0, 0);
3584 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3585 geni = mkCvt(OP_CVT, TYPE_F32, dst0[c], TYPE_F16, src0);
3586 geni->subOp = c & 1;
3587 }
3588 break;
3589 case TGSI_OPCODE_EMIT:
3590 /* export the saved viewport index */
3591 if (viewport != NULL) {
3592 Symbol *vpSym = mkSymbol(FILE_SHADER_OUTPUT, 0, TYPE_U32,
3593 info_out->out[info->io.viewportId].slot[0] * 4);
3594 mkStore(OP_EXPORT, TYPE_U32, vpSym, NULL, viewport);
3595 }
3596 /* handle user clip planes for each emitted vertex */
3597 if (info_out->io.genUserClip > 0)
3598 handleUserClipPlanes();
3599 FALLTHROUGH;
3600 case TGSI_OPCODE_ENDPRIM:
3601 {
3602 // get vertex stream (must be immediate)
3603 unsigned int stream = tgsi.getSrc(0).getValueU32(0, code->immd.data);
3604 if (stream && op == OP_RESTART)
3605 break;
3606 if (info_out->prop.gp.maxVertices == 0)
3607 break;
3608 src0 = mkImm(stream);
3609 mkOp1(op, TYPE_U32, NULL, src0)->fixed = 1;
3610 break;
3611 }
3612 case TGSI_OPCODE_IF:
3613 case TGSI_OPCODE_UIF:
3614 {
3615 BasicBlock *ifBB = new BasicBlock(func);
3616
3617 bb->cfg.attach(&ifBB->cfg, Graph::Edge::TREE);
3618 condBBs.push(bb);
3619 joinBBs.push(bb);
3620
3621 mkFlow(OP_BRA, NULL, CC_NOT_P, fetchSrc(0, 0))->setType(srcTy);
3622
3623 setPosition(ifBB, true);
3624 }
3625 break;
3626 case TGSI_OPCODE_ELSE:
3627 {
3628 BasicBlock *elseBB = new BasicBlock(func);
3629 BasicBlock *forkBB = reinterpret_cast<BasicBlock *>(condBBs.pop().u.p);
3630
3631 forkBB->cfg.attach(&elseBB->cfg, Graph::Edge::TREE);
3632 condBBs.push(bb);
3633
3634 forkBB->getExit()->asFlow()->target.bb = elseBB;
3635 if (!bb->isTerminated())
3636 mkFlow(OP_BRA, NULL, CC_ALWAYS, NULL);
3637
3638 setPosition(elseBB, true);
3639 }
3640 break;
3641 case TGSI_OPCODE_ENDIF:
3642 {
3643 BasicBlock *convBB = new BasicBlock(func);
3644 BasicBlock *prevBB = reinterpret_cast<BasicBlock *>(condBBs.pop().u.p);
3645 BasicBlock *forkBB = reinterpret_cast<BasicBlock *>(joinBBs.pop().u.p);
3646
3647 if (!bb->isTerminated()) {
3648 // we only want join if none of the clauses ended with CONT/BREAK/RET
3649 if (prevBB->getExit()->op == OP_BRA && joinBBs.getSize() < 6)
3650 insertConvergenceOps(convBB, forkBB);
3651 mkFlow(OP_BRA, convBB, CC_ALWAYS, NULL);
3652 bb->cfg.attach(&convBB->cfg, Graph::Edge::FORWARD);
3653 }
3654
3655 if (prevBB->getExit()->op == OP_BRA) {
3656 prevBB->cfg.attach(&convBB->cfg, Graph::Edge::FORWARD);
3657 prevBB->getExit()->asFlow()->target.bb = convBB;
3658 }
3659 setPosition(convBB, true);
3660 }
3661 break;
3662 case TGSI_OPCODE_BGNLOOP:
3663 {
3664 BasicBlock *lbgnBB = new BasicBlock(func);
3665 BasicBlock *lbrkBB = new BasicBlock(func);
3666
3667 loopBBs.push(lbgnBB);
3668 breakBBs.push(lbrkBB);
3669 if (loopBBs.getSize() > func->loopNestingBound)
3670 func->loopNestingBound++;
3671
3672 mkFlow(OP_PREBREAK, lbrkBB, CC_ALWAYS, NULL);
3673
3674 bb->cfg.attach(&lbgnBB->cfg, Graph::Edge::TREE);
3675 setPosition(lbgnBB, true);
3676 mkFlow(OP_PRECONT, lbgnBB, CC_ALWAYS, NULL);
3677
3678 info_out->loops++;
3679 }
3680 break;
3681 case TGSI_OPCODE_ENDLOOP:
3682 {
3683 BasicBlock *loopBB = reinterpret_cast<BasicBlock *>(loopBBs.pop().u.p);
3684
3685 if (!bb->isTerminated()) {
3686 mkFlow(OP_CONT, loopBB, CC_ALWAYS, NULL);
3687 bb->cfg.attach(&loopBB->cfg, Graph::Edge::BACK);
3688 }
3689 setPosition(reinterpret_cast<BasicBlock *>(breakBBs.pop().u.p), true);
3690
3691 // If the loop never breaks (e.g. only has RET's inside), then there
3692 // will be no way to get to the break bb. However BGNLOOP will have
3693 // already made a PREBREAK to it, so it must be in the CFG.
3694 if (getBB()->cfg.incidentCount() == 0)
3695 loopBB->cfg.attach(&getBB()->cfg, Graph::Edge::TREE);
3696 }
3697 break;
3698 case TGSI_OPCODE_BRK:
3699 {
3700 if (bb->isTerminated())
3701 break;
3702 BasicBlock *brkBB = reinterpret_cast<BasicBlock *>(breakBBs.peek().u.p);
3703 mkFlow(OP_BREAK, brkBB, CC_ALWAYS, NULL);
3704 bb->cfg.attach(&brkBB->cfg, Graph::Edge::CROSS);
3705 }
3706 break;
3707 case TGSI_OPCODE_CONT:
3708 {
3709 if (bb->isTerminated())
3710 break;
3711 BasicBlock *contBB = reinterpret_cast<BasicBlock *>(loopBBs.peek().u.p);
3712 mkFlow(OP_CONT, contBB, CC_ALWAYS, NULL);
3713 contBB->explicitCont = true;
3714 bb->cfg.attach(&contBB->cfg, Graph::Edge::BACK);
3715 }
3716 break;
3717 case TGSI_OPCODE_BGNSUB:
3718 {
3719 Subroutine *s = getSubroutine(ip);
3720 BasicBlock *entry = new BasicBlock(s->f);
3721 BasicBlock *leave = new BasicBlock(s->f);
3722
3723 // multiple entrypoints possible, keep the graph connected
3724 if (prog->getType() == Program::TYPE_COMPUTE)
3725 prog->main->call.attach(&s->f->call, Graph::Edge::TREE);
3726
3727 sub.cur = s;
3728 s->f->setEntry(entry);
3729 s->f->setExit(leave);
3730 setPosition(entry, true);
3731 return true;
3732 }
3733 case TGSI_OPCODE_ENDSUB:
3734 {
3735 sub.cur = getSubroutine(prog->main);
3736 setPosition(BasicBlock::get(sub.cur->f->cfg.getRoot()), true);
3737 return true;
3738 }
3739 case TGSI_OPCODE_CAL:
3740 {
3741 Subroutine *s = getSubroutine(tgsi.getLabel());
3742 mkFlow(OP_CALL, s->f, CC_ALWAYS, NULL);
3743 func->call.attach(&s->f->call, Graph::Edge::TREE);
3744 return true;
3745 }
3746 case TGSI_OPCODE_RET:
3747 {
3748 if (bb->isTerminated())
3749 return true;
3750 BasicBlock *leave = BasicBlock::get(func->cfgExit);
3751
3752 if (!isEndOfSubroutine(ip + 1)) {
3753 // insert a PRERET at the entry if this is an early return
3754 // (only needed for sharing code in the epilogue)
3755 BasicBlock *root = BasicBlock::get(func->cfg.getRoot());
3756 if (root->getEntry() == NULL || root->getEntry()->op != OP_PRERET) {
3757 BasicBlock *pos = getBB();
3758 setPosition(root, false);
3759 mkFlow(OP_PRERET, leave, CC_ALWAYS, NULL)->fixed = 1;
3760 setPosition(pos, true);
3761 }
3762 }
3763 mkFlow(OP_RET, NULL, CC_ALWAYS, NULL)->fixed = 1;
3764 bb->cfg.attach(&leave->cfg, Graph::Edge::CROSS);
3765 }
3766 break;
3767 case TGSI_OPCODE_END:
3768 {
3769 // attach and generate epilogue code
3770 BasicBlock *epilogue = BasicBlock::get(func->cfgExit);
3771 bb->cfg.attach(&epilogue->cfg, Graph::Edge::TREE);
3772 setPosition(epilogue, true);
3773 if (prog->getType() == Program::TYPE_FRAGMENT)
3774 exportOutputs();
3775 if ((prog->getType() == Program::TYPE_VERTEX ||
3776 prog->getType() == Program::TYPE_TESSELLATION_EVAL
3777 ) && info_out->io.genUserClip > 0)
3778 handleUserClipPlanes();
3779 mkOp(OP_EXIT, TYPE_NONE, NULL)->terminator = 1;
3780 }
3781 break;
3782 case TGSI_OPCODE_SWITCH:
3783 case TGSI_OPCODE_CASE:
3784 ERROR("switch/case opcode encountered, should have been lowered\n");
3785 abort();
3786 break;
3787 case TGSI_OPCODE_LOAD:
3788 handleLOAD(dst0);
3789 break;
3790 case TGSI_OPCODE_STORE:
3791 handleSTORE();
3792 break;
3793 case TGSI_OPCODE_BARRIER:
3794 geni = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
3795 geni->fixed = 1;
3796 geni->subOp = NV50_IR_SUBOP_BAR_SYNC;
3797 break;
3798 case TGSI_OPCODE_MEMBAR:
3799 {
3800 uint32_t level = tgsi.getSrc(0).getValueU32(0, code->immd.data);
3801 geni = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
3802 geni->fixed = 1;
3803 if (!(level & ~(TGSI_MEMBAR_THREAD_GROUP | TGSI_MEMBAR_SHARED)))
3804 geni->subOp = NV50_IR_SUBOP_MEMBAR(M, CTA);
3805 else
3806 geni->subOp = NV50_IR_SUBOP_MEMBAR(M, GL);
3807 }
3808 break;
3809 case TGSI_OPCODE_ATOMUADD:
3810 case TGSI_OPCODE_ATOMXCHG:
3811 case TGSI_OPCODE_ATOMCAS:
3812 case TGSI_OPCODE_ATOMAND:
3813 case TGSI_OPCODE_ATOMOR:
3814 case TGSI_OPCODE_ATOMXOR:
3815 case TGSI_OPCODE_ATOMUMIN:
3816 case TGSI_OPCODE_ATOMIMIN:
3817 case TGSI_OPCODE_ATOMUMAX:
3818 case TGSI_OPCODE_ATOMIMAX:
3819 case TGSI_OPCODE_ATOMFADD:
3820 case TGSI_OPCODE_ATOMDEC_WRAP:
3821 case TGSI_OPCODE_ATOMINC_WRAP:
3822 handleATOM(dst0, dstTy, tgsi::opcodeToSubOp(tgsi.getOpcode()));
3823 break;
3824 case TGSI_OPCODE_RESQ:
3825 if (tgsi.getSrc(0).getFile() == TGSI_FILE_BUFFER) {
3826 Value *ind = NULL;
3827 if (tgsi.getSrc(0).isIndirect(0))
3828 ind = fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, 0);
3829 geni = mkOp1(OP_BUFQ, TYPE_U32, dst0[0],
3830 makeSym(tgsi.getSrc(0).getFile(),
3831 tgsi.getSrc(0).getIndex(0), -1, 0, 0));
3832 if (ind)
3833 geni->setIndirect(0, 1, ind);
3834 } else {
3835 TexInstruction *texi = new_TexInstruction(func, OP_SUQ);
3836 for (int c = 0, d = 0; c < 4; ++c) {
3837 if (dst0[c]) {
3838 texi->setDef(d++, dst0[c]);
3839 texi->tex.mask |= 1 << c;
3840 }
3841 }
3842 if (tgsi.getSrc(0).getFile() == TGSI_FILE_IMAGE) {
3843 texi->tex.r = tgsi.getSrc(0).getIndex(0);
3844 if (tgsi.getSrc(0).isIndirect(0))
3845 texi->setIndirectR(fetchSrc(tgsi.getSrc(0).getIndirect(0), 0, NULL));
3846 } else {
3847 texi->tex.bindless = true;
3848 texi->setIndirectR(fetchSrc(0, 0));
3849 }
3850 texi->tex.target = tgsi.getImageTarget();
3851
3852 bb->insertTail(texi);
3853 }
3854 break;
3855 case TGSI_OPCODE_IBFE:
3856 case TGSI_OPCODE_UBFE:
3857 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3858 src0 = fetchSrc(0, c);
3859 val0 = getScratch();
3860 if (tgsi.getSrc(1).getFile() == TGSI_FILE_IMMEDIATE &&
3861 tgsi.getSrc(2).getFile() == TGSI_FILE_IMMEDIATE) {
3862 loadImm(val0, (tgsi.getSrc(2).getValueU32(c, code->immd.data) << 8) |
3863 tgsi.getSrc(1).getValueU32(c, code->immd.data));
3864 } else {
3865 src1 = fetchSrc(1, c);
3866 src2 = fetchSrc(2, c);
3867 mkOp3(OP_INSBF, TYPE_U32, val0, src2, mkImm(0x808), src1);
3868 }
3869 mkOp2(OP_EXTBF, dstTy, dst0[c], src0, val0);
3870 }
3871 break;
3872 case TGSI_OPCODE_BFI:
3873 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3874 src0 = fetchSrc(0, c);
3875 src1 = fetchSrc(1, c);
3876 src2 = fetchSrc(2, c);
3877 src3 = fetchSrc(3, c);
3878 val0 = getScratch();
3879 mkOp3(OP_INSBF, TYPE_U32, val0, src3, mkImm(0x808), src2);
3880 mkOp3(OP_INSBF, TYPE_U32, dst0[c], src1, val0, src0);
3881 }
3882 break;
3883 case TGSI_OPCODE_LSB:
3884 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3885 src0 = fetchSrc(0, c);
3886 val0 = getScratch();
3887 mkOp1(OP_BREV, TYPE_U32, val0, src0);
3888 geni = mkOp1(OP_BFIND, TYPE_U32, dst0[c], val0);
3889 geni->subOp = NV50_IR_SUBOP_BFIND_SAMT;
3890 }
3891 break;
3892 case TGSI_OPCODE_IMSB:
3893 case TGSI_OPCODE_UMSB:
3894 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3895 src0 = fetchSrc(0, c);
3896 mkOp1(OP_BFIND, srcTy, dst0[c], src0);
3897 }
3898 break;
3899 case TGSI_OPCODE_BREV:
3900 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3901 src0 = fetchSrc(0, c);
3902 mkOp1(OP_BREV, TYPE_U32, dst0[c], src0);
3903 }
3904 break;
3905 case TGSI_OPCODE_POPC:
3906 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3907 src0 = fetchSrc(0, c);
3908 mkOp2(OP_POPCNT, TYPE_U32, dst0[c], src0, src0);
3909 }
3910 break;
3911 case TGSI_OPCODE_INTERP_CENTROID:
3912 case TGSI_OPCODE_INTERP_SAMPLE:
3913 case TGSI_OPCODE_INTERP_OFFSET:
3914 handleINTERP(dst0);
3915 break;
3916 case TGSI_OPCODE_I642F:
3917 case TGSI_OPCODE_U642F:
3918 case TGSI_OPCODE_D2I:
3919 case TGSI_OPCODE_D2U:
3920 case TGSI_OPCODE_D2F: {
3921 int pos = 0;
3922 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3923 Value *dreg = getSSA(8);
3924 src0 = fetchSrc(0, pos);
3925 src1 = fetchSrc(0, pos + 1);
3926 mkOp2(OP_MERGE, TYPE_U64, dreg, src0, src1);
3927 Instruction *cvt = mkCvt(OP_CVT, dstTy, dst0[c], srcTy, dreg);
3928 if (!isFloatType(dstTy))
3929 cvt->rnd = ROUND_Z;
3930 pos += 2;
3931 }
3932 break;
3933 }
3934 case TGSI_OPCODE_I2I64:
3935 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3936 dst0[c] = fetchSrc(0, c / 2);
3937 mkOp2(OP_SHR, TYPE_S32, dst0[c + 1], dst0[c], loadImm(NULL, 31));
3938 c++;
3939 }
3940 break;
3941 case TGSI_OPCODE_U2I64:
3942 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3943 dst0[c] = fetchSrc(0, c / 2);
3944 dst0[c + 1] = zero;
3945 c++;
3946 }
3947 break;
3948 case TGSI_OPCODE_F2I64:
3949 case TGSI_OPCODE_F2U64:
3950 case TGSI_OPCODE_I2D:
3951 case TGSI_OPCODE_U2D:
3952 case TGSI_OPCODE_F2D:
3953 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3954 Value *dreg = getSSA(8);
3955 Instruction *cvt = mkCvt(OP_CVT, dstTy, dreg, srcTy, fetchSrc(0, c / 2));
3956 if (!isFloatType(dstTy))
3957 cvt->rnd = ROUND_Z;
3958 mkSplit(&dst0[c], 4, dreg);
3959 c++;
3960 }
3961 break;
3962 case TGSI_OPCODE_D2I64:
3963 case TGSI_OPCODE_D2U64:
3964 case TGSI_OPCODE_I642D:
3965 case TGSI_OPCODE_U642D:
3966 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3967 src0 = getSSA(8);
3968 Value *dst = getSSA(8), *tmp[2];
3969 tmp[0] = fetchSrc(0, c);
3970 tmp[1] = fetchSrc(0, c + 1);
3971 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3972 Instruction *cvt = mkCvt(OP_CVT, dstTy, dst, srcTy, src0);
3973 if (!isFloatType(dstTy))
3974 cvt->rnd = ROUND_Z;
3975 mkSplit(&dst0[c], 4, dst);
3976 c++;
3977 }
3978 break;
3979 case TGSI_OPCODE_I64NEG:
3980 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3981 src0 = getSSA(8);
3982 Value *dst = getSSA(8), *tmp[2];
3983 tmp[0] = fetchSrc(0, c);
3984 tmp[1] = fetchSrc(0, c + 1);
3985 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
3986 mkOp2(OP_SUB, dstTy, dst, zero, src0);
3987 mkSplit(&dst0[c], 4, dst);
3988 c++;
3989 }
3990 break;
3991 case TGSI_OPCODE_I64ABS:
3992 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
3993 src0 = getSSA(8);
3994 Value *neg = getSSA(8), *srcComp[2], *negComp[2];
3995 srcComp[0] = fetchSrc(0, c);
3996 srcComp[1] = fetchSrc(0, c + 1);
3997 mkOp2(OP_MERGE, TYPE_U64, src0, srcComp[0], srcComp[1]);
3998 mkOp2(OP_SUB, dstTy, neg, zero, src0);
3999 mkSplit(negComp, 4, neg);
4000 mkCmp(OP_SLCT, CC_LT, TYPE_S32, dst0[c], TYPE_S32,
4001 negComp[0], srcComp[0], srcComp[1]);
4002 mkCmp(OP_SLCT, CC_LT, TYPE_S32, dst0[c + 1], TYPE_S32,
4003 negComp[1], srcComp[1], srcComp[1]);
4004 c++;
4005 }
4006 break;
4007 case TGSI_OPCODE_DABS:
4008 case TGSI_OPCODE_DNEG:
4009 case TGSI_OPCODE_DRCP:
4010 case TGSI_OPCODE_DSQRT:
4011 case TGSI_OPCODE_DRSQ:
4012 case TGSI_OPCODE_DTRUNC:
4013 case TGSI_OPCODE_DCEIL:
4014 case TGSI_OPCODE_DFLR:
4015 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4016 src0 = getSSA(8);
4017 Value *dst = getSSA(8), *tmp[2];
4018 tmp[0] = fetchSrc(0, c);
4019 tmp[1] = fetchSrc(0, c + 1);
4020 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4021 mkOp1(op, dstTy, dst, src0);
4022 mkSplit(&dst0[c], 4, dst);
4023 c++;
4024 }
4025 break;
4026 case TGSI_OPCODE_DFRAC:
4027 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4028 src0 = getSSA(8);
4029 Value *dst = getSSA(8), *tmp[2];
4030 tmp[0] = fetchSrc(0, c);
4031 tmp[1] = fetchSrc(0, c + 1);
4032 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4033 mkOp1(OP_FLOOR, TYPE_F64, dst, src0);
4034 mkOp2(OP_SUB, TYPE_F64, dst, src0, dst);
4035 mkSplit(&dst0[c], 4, dst);
4036 c++;
4037 }
4038 break;
4039 case TGSI_OPCODE_U64SEQ:
4040 case TGSI_OPCODE_U64SNE:
4041 case TGSI_OPCODE_U64SLT:
4042 case TGSI_OPCODE_U64SGE:
4043 case TGSI_OPCODE_I64SLT:
4044 case TGSI_OPCODE_I64SGE:
4045 case TGSI_OPCODE_DSLT:
4046 case TGSI_OPCODE_DSGE:
4047 case TGSI_OPCODE_DSEQ:
4048 case TGSI_OPCODE_DSNE: {
4049 int pos = 0;
4050 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4051 Value *tmp[2];
4052
4053 src0 = getSSA(8);
4054 src1 = getSSA(8);
4055 tmp[0] = fetchSrc(0, pos);
4056 tmp[1] = fetchSrc(0, pos + 1);
4057 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4058 tmp[0] = fetchSrc(1, pos);
4059 tmp[1] = fetchSrc(1, pos + 1);
4060 mkOp2(OP_MERGE, TYPE_U64, src1, tmp[0], tmp[1]);
4061 mkCmp(op, tgsi.getSetCond(), dstTy, dst0[c], srcTy, src0, src1);
4062 pos += 2;
4063 }
4064 break;
4065 }
4066 case TGSI_OPCODE_U64MIN:
4067 case TGSI_OPCODE_U64MAX:
4068 case TGSI_OPCODE_I64MIN:
4069 case TGSI_OPCODE_I64MAX: {
4070 dstTy = isSignedIntType(dstTy) ? TYPE_S32 : TYPE_U32;
4071 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4072 Value *flag = getSSA(1, FILE_FLAGS);
4073 src0 = fetchSrc(0, c + 1);
4074 src1 = fetchSrc(1, c + 1);
4075 geni = mkOp2(op, dstTy, dst0[c + 1], src0, src1);
4076 geni->subOp = NV50_IR_SUBOP_MINMAX_HIGH;
4077 geni->setFlagsDef(1, flag);
4078
4079 src0 = fetchSrc(0, c);
4080 src1 = fetchSrc(1, c);
4081 geni = mkOp2(op, TYPE_U32, dst0[c], src0, src1);
4082 geni->subOp = NV50_IR_SUBOP_MINMAX_LOW;
4083 geni->setFlagsSrc(2, flag);
4084
4085 c++;
4086 }
4087 break;
4088 }
4089 case TGSI_OPCODE_U64SHL:
4090 case TGSI_OPCODE_I64SHR:
4091 case TGSI_OPCODE_U64SHR:
4092 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4093 src0 = getSSA(8);
4094 Value *dst = getSSA(8), *tmp[2];
4095 tmp[0] = fetchSrc(0, c);
4096 tmp[1] = fetchSrc(0, c + 1);
4097 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4098 // Theoretically src1 is a 64-bit value but in practice only the low
4099 // bits matter. The IR expects this to be a 32-bit value.
4100 src1 = fetchSrc(1, c);
4101 mkOp2(op, dstTy, dst, src0, src1);
4102 mkSplit(&dst0[c], 4, dst);
4103 c++;
4104 }
4105 break;
4106 case TGSI_OPCODE_U64ADD:
4107 case TGSI_OPCODE_U64MUL:
4108 case TGSI_OPCODE_DADD:
4109 case TGSI_OPCODE_DMUL:
4110 case TGSI_OPCODE_DDIV:
4111 case TGSI_OPCODE_DMAX:
4112 case TGSI_OPCODE_DMIN:
4113 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4114 src0 = getSSA(8);
4115 src1 = getSSA(8);
4116 Value *dst = getSSA(8), *tmp[2];
4117 tmp[0] = fetchSrc(0, c);
4118 tmp[1] = fetchSrc(0, c + 1);
4119 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4120 tmp[0] = fetchSrc(1, c);
4121 tmp[1] = fetchSrc(1, c + 1);
4122 mkOp2(OP_MERGE, TYPE_U64, src1, tmp[0], tmp[1]);
4123 mkOp2(op, dstTy, dst, src0, src1);
4124 mkSplit(&dst0[c], 4, dst);
4125 c++;
4126 }
4127 break;
4128 case TGSI_OPCODE_DMAD:
4129 case TGSI_OPCODE_DFMA:
4130 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4131 src0 = getSSA(8);
4132 src1 = getSSA(8);
4133 src2 = getSSA(8);
4134 Value *dst = getSSA(8), *tmp[2];
4135 tmp[0] = fetchSrc(0, c);
4136 tmp[1] = fetchSrc(0, c + 1);
4137 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4138 tmp[0] = fetchSrc(1, c);
4139 tmp[1] = fetchSrc(1, c + 1);
4140 mkOp2(OP_MERGE, TYPE_U64, src1, tmp[0], tmp[1]);
4141 tmp[0] = fetchSrc(2, c);
4142 tmp[1] = fetchSrc(2, c + 1);
4143 mkOp2(OP_MERGE, TYPE_U64, src2, tmp[0], tmp[1]);
4144 mkOp3(op, dstTy, dst, src0, src1, src2);
4145 mkSplit(&dst0[c], 4, dst);
4146 c++;
4147 }
4148 break;
4149 case TGSI_OPCODE_DROUND:
4150 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4151 src0 = getSSA(8);
4152 Value *dst = getSSA(8), *tmp[2];
4153 tmp[0] = fetchSrc(0, c);
4154 tmp[1] = fetchSrc(0, c + 1);
4155 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4156 mkCvt(OP_CVT, TYPE_F64, dst, TYPE_F64, src0)
4157 ->rnd = ROUND_NI;
4158 mkSplit(&dst0[c], 4, dst);
4159 c++;
4160 }
4161 break;
4162 case TGSI_OPCODE_DSSG:
4163 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4164 src0 = getSSA(8);
4165 Value *dst = getSSA(8), *dstF32 = getSSA(), *tmp[2];
4166 tmp[0] = fetchSrc(0, c);
4167 tmp[1] = fetchSrc(0, c + 1);
4168 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4169
4170 val0 = getScratch();
4171 val1 = getScratch();
4172 // The zero is wrong here since it's only 32-bit, but it works out in
4173 // the end since it gets replaced with $r63.
4174 mkCmp(OP_SET, CC_GT, TYPE_F32, val0, TYPE_F64, src0, zero);
4175 mkCmp(OP_SET, CC_LT, TYPE_F32, val1, TYPE_F64, src0, zero);
4176 mkOp2(OP_SUB, TYPE_F32, dstF32, val0, val1);
4177 mkCvt(OP_CVT, TYPE_F64, dst, TYPE_F32, dstF32);
4178 mkSplit(&dst0[c], 4, dst);
4179 c++;
4180 }
4181 break;
4182 case TGSI_OPCODE_I64SSG:
4183 FOR_EACH_DST_ENABLED_CHANNEL(0, c, tgsi) {
4184 src0 = getSSA(8);
4185 Value *tmp[2];
4186 tmp[0] = fetchSrc(0, c);
4187 tmp[1] = fetchSrc(0, c + 1);
4188 mkOp2(OP_MERGE, TYPE_U64, src0, tmp[0], tmp[1]);
4189
4190 val0 = getScratch();
4191 val1 = getScratch();
4192 mkCmp(OP_SET, CC_GT, TYPE_U32, val0, TYPE_S64, src0, zero);
4193 mkCmp(OP_SET, CC_LT, TYPE_U32, val1, TYPE_S64, src0, zero);
4194 mkOp2(OP_SUB, TYPE_S32, dst0[c], val1, val0);
4195 mkOp2(OP_SHR, TYPE_S32, dst0[c + 1], dst0[c], loadImm(0, 31));
4196 c++;
4197 }
4198 break;
4199 default:
4200 ERROR("unhandled TGSI opcode: %u\n", tgsi.getOpcode());
4201 assert(0);
4202 break;
4203 }
4204
4205 if (tgsi.dstCount() && tgsi.getOpcode() != TGSI_OPCODE_STORE) {
4206 for (c = 0; c < 4; ++c) {
4207 if (!dst0[c])
4208 continue;
4209 if (dst0[c] != rDst0[c])
4210 mkMov(rDst0[c], dst0[c]);
4211 storeDst(0, c, rDst0[c]);
4212 }
4213 }
4214 vtxBaseValid = 0;
4215
4216 return true;
4217 }
4218
4219 void
exportOutputs()4220 Converter::exportOutputs()
4221 {
4222 if (info->io.alphaRefBase) {
4223 for (unsigned int i = 0; i < info_out->numOutputs; ++i) {
4224 if (info_out->out[i].sn != TGSI_SEMANTIC_COLOR ||
4225 info_out->out[i].si != 0)
4226 continue;
4227 const unsigned int c = 3;
4228 if (!oData.exists(sub.cur->values, i, c))
4229 continue;
4230 Value *val = oData.load(sub.cur->values, i, c, NULL);
4231 if (!val)
4232 continue;
4233
4234 Symbol *ref = mkSymbol(FILE_MEMORY_CONST, info->io.auxCBSlot,
4235 TYPE_U32, info->io.alphaRefBase);
4236 Value *pred = new_LValue(func, FILE_PREDICATE);
4237 mkCmp(OP_SET, CC_TR, TYPE_U32, pred, TYPE_F32, val,
4238 mkLoadv(TYPE_U32, ref, NULL))
4239 ->subOp = 1;
4240 mkOp(OP_DISCARD, TYPE_NONE, NULL)->setPredicate(CC_NOT_P, pred);
4241 }
4242 }
4243
4244 for (unsigned int i = 0; i < info_out->numOutputs; ++i) {
4245 for (unsigned int c = 0; c < 4; ++c) {
4246 if (!oData.exists(sub.cur->values, i, c))
4247 continue;
4248 Symbol *sym = mkSymbol(FILE_SHADER_OUTPUT, 0, TYPE_F32,
4249 info_out->out[i].slot[c] * 4);
4250 Value *val = oData.load(sub.cur->values, i, c, NULL);
4251 if (val) {
4252 if (info_out->out[i].sn == TGSI_SEMANTIC_POSITION)
4253 mkOp1(OP_SAT, TYPE_F32, val, val);
4254 mkStore(OP_EXPORT, TYPE_F32, sym, NULL, val);
4255 }
4256 }
4257 }
4258 }
4259
Converter(Program * ir,const tgsi::Source * code,nv50_ir_prog_info_out * info_out)4260 Converter::Converter(Program *ir, const tgsi::Source *code, nv50_ir_prog_info_out *info_out)
4261 : ConverterCommon(ir, code->info, info_out),
4262 code(code),
4263 tgsi(NULL),
4264 tData(this), lData(this), aData(this), oData(this)
4265 {
4266 const unsigned tSize = code->fileSize(TGSI_FILE_TEMPORARY);
4267 const unsigned aSize = code->fileSize(TGSI_FILE_ADDRESS);
4268 const unsigned oSize = code->fileSize(TGSI_FILE_OUTPUT);
4269
4270 tData.setup(TGSI_FILE_TEMPORARY, 0, 0, tSize, 4, 4, FILE_GPR, 0);
4271 lData.setup(TGSI_FILE_TEMPORARY, 1, 0, tSize, 4, 4, FILE_MEMORY_LOCAL, 0);
4272 aData.setup(TGSI_FILE_ADDRESS, 0, 0, aSize, 4, 4, FILE_GPR, 0);
4273 oData.setup(TGSI_FILE_OUTPUT, 0, 0, oSize, 4, 4, FILE_GPR, 0);
4274
4275 zero = mkImm((uint32_t)0);
4276
4277 vtxBaseValid = 0;
4278 }
4279
~Converter()4280 Converter::~Converter()
4281 {
4282 }
4283
4284 inline const Converter::Location *
getValueLocation(Subroutine * s,Value * v)4285 Converter::BindArgumentsPass::getValueLocation(Subroutine *s, Value *v)
4286 {
4287 ValueMap::l_iterator it = s->values.l.find(v);
4288 return it == s->values.l.end() ? NULL : &it->second;
4289 }
4290
4291 template<typename T> inline void
updateCallArgs(Instruction * i,void (Instruction::* setArg)(int,Value *),T (Function::* proto))4292 Converter::BindArgumentsPass::updateCallArgs(
4293 Instruction *i, void (Instruction::*setArg)(int, Value *),
4294 T (Function::*proto))
4295 {
4296 Function *g = i->asFlow()->target.fn;
4297 Subroutine *subg = conv.getSubroutine(g);
4298
4299 for (unsigned a = 0; a < (g->*proto).size(); ++a) {
4300 Value *v = (g->*proto)[a].get();
4301 const Converter::Location &l = *getValueLocation(subg, v);
4302 Converter::DataArray *array = conv.getArrayForFile(l.array, l.arrayIdx);
4303
4304 (i->*setArg)(a, array->acquire(sub->values, l.i, l.c));
4305 }
4306 }
4307
4308 template<typename T> inline void
updatePrototype(BitSet * set,void (Function::* updateSet)(),T (Function::* proto))4309 Converter::BindArgumentsPass::updatePrototype(
4310 BitSet *set, void (Function::*updateSet)(), T (Function::*proto))
4311 {
4312 (func->*updateSet)();
4313
4314 for (unsigned i = 0; i < set->getSize(); ++i) {
4315 Value *v = func->getLValue(i);
4316 const Converter::Location *l = getValueLocation(sub, v);
4317
4318 // only include values with a matching TGSI register
4319 if (set->test(i) && l && !conv.code->locals.count(*l))
4320 (func->*proto).push_back(v);
4321 }
4322 }
4323
4324 bool
visit(Function * f)4325 Converter::BindArgumentsPass::visit(Function *f)
4326 {
4327 sub = conv.getSubroutine(f);
4328
4329 for (ArrayList::Iterator bi = f->allBBlocks.iterator();
4330 !bi.end(); bi.next()) {
4331 for (Instruction *i = BasicBlock::get(bi)->getFirst();
4332 i; i = i->next) {
4333 if (i->op == OP_CALL && !i->asFlow()->builtin) {
4334 updateCallArgs(i, &Instruction::setSrc, &Function::ins);
4335 updateCallArgs(i, &Instruction::setDef, &Function::outs);
4336 }
4337 }
4338 }
4339
4340 if (func == prog->main /* && prog->getType() != Program::TYPE_COMPUTE */)
4341 return true;
4342 updatePrototype(&BasicBlock::get(f->cfg.getRoot())->liveSet,
4343 &Function::buildLiveSets, &Function::ins);
4344 updatePrototype(&BasicBlock::get(f->cfgExit)->defSet,
4345 &Function::buildDefSets, &Function::outs);
4346
4347 return true;
4348 }
4349
4350 bool
run()4351 Converter::run()
4352 {
4353 BasicBlock *entry = new BasicBlock(prog->main);
4354 BasicBlock *leave = new BasicBlock(prog->main);
4355
4356 prog->main->setEntry(entry);
4357 prog->main->setExit(leave);
4358
4359 setPosition(entry, true);
4360 sub.cur = getSubroutine(prog->main);
4361
4362 if (info_out->io.genUserClip > 0) {
4363 for (int c = 0; c < 4; ++c)
4364 clipVtx[c] = getScratch();
4365 }
4366
4367 switch (prog->getType()) {
4368 case Program::TYPE_TESSELLATION_CONTROL:
4369 outBase = mkOp2v(
4370 OP_SUB, TYPE_U32, getSSA(),
4371 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LANEID, 0)),
4372 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_INVOCATION_ID, 0)));
4373 break;
4374 case Program::TYPE_FRAGMENT: {
4375 Symbol *sv = mkSysVal(SV_POSITION, 3);
4376 fragCoord[3] = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), sv);
4377 mkOp1(OP_RCP, TYPE_F32, fragCoord[3], fragCoord[3]);
4378 break;
4379 }
4380 default:
4381 break;
4382 }
4383
4384 if (info->io.viewportId >= 0)
4385 viewport = getScratch();
4386 else
4387 viewport = NULL;
4388
4389 for (ip = 0; ip < code->scan.num_instructions; ++ip) {
4390 if (!handleInstruction(&code->insns[ip]))
4391 return false;
4392 }
4393
4394 if (!BindArgumentsPass(*this).run(prog))
4395 return false;
4396
4397 return true;
4398 }
4399
4400 } // unnamed namespace
4401
4402 namespace nv50_ir {
4403
4404 bool
makeFromTGSI(struct nv50_ir_prog_info * info,struct nv50_ir_prog_info_out * info_out)4405 Program::makeFromTGSI(struct nv50_ir_prog_info *info,
4406 struct nv50_ir_prog_info_out *info_out)
4407 {
4408 tgsi::Source src(info, info_out, this);
4409 if (!src.scanSource())
4410 return false;
4411 tlsSize = info_out->bin.tlsSpace;
4412
4413 Converter builder(this, &src, info_out);
4414 return builder.run();
4415 }
4416
4417 } // namespace nv50_ir
4418