1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_target.h"
25 #include "codegen/nv50_ir_build_util.h"
26
27 extern "C" {
28 #include "util/u_math.h"
29 }
30
31 namespace nv50_ir {
32
33 bool
isNop() const34 Instruction::isNop() const
35 {
36 if (op == OP_PHI || op == OP_SPLIT || op == OP_MERGE || op == OP_CONSTRAINT)
37 return true;
38 if (terminator || join) // XXX: should terminator imply flow ?
39 return false;
40 if (op == OP_ATOM)
41 return false;
42 if (!fixed && op == OP_NOP)
43 return true;
44
45 if (defExists(0) && def(0).rep()->reg.data.id < 0) {
46 for (int d = 1; defExists(d); ++d)
47 if (def(d).rep()->reg.data.id >= 0)
48 WARN("part of vector result is unused !\n");
49 return true;
50 }
51
52 if (op == OP_MOV || op == OP_UNION) {
53 if (!getDef(0)->equals(getSrc(0)))
54 return false;
55 if (op == OP_UNION)
56 if (!def(0).rep()->equals(getSrc(1)))
57 return false;
58 return true;
59 }
60
61 return false;
62 }
63
isDead() const64 bool Instruction::isDead() const
65 {
66 if (op == OP_STORE ||
67 op == OP_EXPORT ||
68 op == OP_ATOM ||
69 op == OP_SUSTB || op == OP_SUSTP || op == OP_SUREDP || op == OP_SUREDB ||
70 op == OP_WRSV)
71 return false;
72
73 for (int d = 0; defExists(d); ++d)
74 if (getDef(d)->refCount() || getDef(d)->reg.data.id >= 0)
75 return false;
76
77 if (terminator || asFlow())
78 return false;
79 if (fixed)
80 return false;
81
82 return true;
83 };
84
85 // =============================================================================
86
87 class CopyPropagation : public Pass
88 {
89 private:
90 virtual bool visit(BasicBlock *);
91 };
92
93 // Propagate all MOVs forward to make subsequent optimization easier, except if
94 // the sources stem from a phi, in which case we don't want to mess up potential
95 // swaps $rX <-> $rY, i.e. do not create live range overlaps of phi src and def.
96 bool
visit(BasicBlock * bb)97 CopyPropagation::visit(BasicBlock *bb)
98 {
99 Instruction *mov, *si, *next;
100
101 for (mov = bb->getEntry(); mov; mov = next) {
102 next = mov->next;
103 if (mov->op != OP_MOV || mov->fixed || !mov->getSrc(0)->asLValue())
104 continue;
105 if (mov->getPredicate())
106 continue;
107 if (mov->def(0).getFile() != mov->src(0).getFile())
108 continue;
109 si = mov->getSrc(0)->getInsn();
110 if (mov->getDef(0)->reg.data.id < 0 && si && si->op != OP_PHI) {
111 // propagate
112 mov->def(0).replace(mov->getSrc(0), false);
113 delete_Instruction(prog, mov);
114 }
115 }
116 return true;
117 }
118
119 // =============================================================================
120
121 class MergeSplits : public Pass
122 {
123 private:
124 virtual bool visit(BasicBlock *);
125 };
126
127 // For SPLIT / MERGE pairs that operate on the same registers, replace the
128 // post-merge def with the SPLIT's source.
129 bool
visit(BasicBlock * bb)130 MergeSplits::visit(BasicBlock *bb)
131 {
132 Instruction *i, *next, *si;
133
134 for (i = bb->getEntry(); i; i = next) {
135 next = i->next;
136 if (i->op != OP_MERGE || typeSizeof(i->dType) != 8)
137 continue;
138 si = i->getSrc(0)->getInsn();
139 if (si->op != OP_SPLIT || si != i->getSrc(1)->getInsn())
140 continue;
141 i->def(0).replace(si->getSrc(0), false);
142 delete_Instruction(prog, i);
143 }
144
145 return true;
146 }
147
148 // =============================================================================
149
150 class LoadPropagation : public Pass
151 {
152 private:
153 virtual bool visit(BasicBlock *);
154
155 void checkSwapSrc01(Instruction *);
156
157 bool isCSpaceLoad(Instruction *);
158 bool isImmdLoad(Instruction *);
159 bool isAttribOrSharedLoad(Instruction *);
160 };
161
162 bool
isCSpaceLoad(Instruction * ld)163 LoadPropagation::isCSpaceLoad(Instruction *ld)
164 {
165 return ld && ld->op == OP_LOAD && ld->src(0).getFile() == FILE_MEMORY_CONST;
166 }
167
168 bool
isImmdLoad(Instruction * ld)169 LoadPropagation::isImmdLoad(Instruction *ld)
170 {
171 if (!ld || (ld->op != OP_MOV) ||
172 ((typeSizeof(ld->dType) != 4) && (typeSizeof(ld->dType) != 8)))
173 return false;
174
175 // A 0 can be replaced with a register, so it doesn't count as an immediate.
176 ImmediateValue val;
177 return ld->src(0).getImmediate(val) && !val.isInteger(0);
178 }
179
180 bool
isAttribOrSharedLoad(Instruction * ld)181 LoadPropagation::isAttribOrSharedLoad(Instruction *ld)
182 {
183 return ld &&
184 (ld->op == OP_VFETCH ||
185 (ld->op == OP_LOAD &&
186 (ld->src(0).getFile() == FILE_SHADER_INPUT ||
187 ld->src(0).getFile() == FILE_MEMORY_SHARED)));
188 }
189
190 void
checkSwapSrc01(Instruction * insn)191 LoadPropagation::checkSwapSrc01(Instruction *insn)
192 {
193 const Target *targ = prog->getTarget();
194 if (!targ->getOpInfo(insn).commutative) {
195 if (insn->op != OP_SET && insn->op != OP_SLCT &&
196 insn->op != OP_SUB && insn->op != OP_XMAD)
197 return;
198 // XMAD is only commutative if both the CBCC and MRG flags are not set.
199 if (insn->op == OP_XMAD &&
200 (insn->subOp & NV50_IR_SUBOP_XMAD_CMODE_MASK) == NV50_IR_SUBOP_XMAD_CBCC)
201 return;
202 if (insn->op == OP_XMAD && (insn->subOp & NV50_IR_SUBOP_XMAD_MRG))
203 return;
204 }
205 if (insn->src(1).getFile() != FILE_GPR)
206 return;
207 // This is the special OP_SET used for alphatesting, we can't reverse its
208 // arguments as that will confuse the fixup code.
209 if (insn->op == OP_SET && insn->subOp)
210 return;
211
212 Instruction *i0 = insn->getSrc(0)->getInsn();
213 Instruction *i1 = insn->getSrc(1)->getInsn();
214
215 // Swap sources to inline the less frequently used source. That way,
216 // optimistically, it will eventually be able to remove the instruction.
217 int i0refs = insn->getSrc(0)->refCount();
218 int i1refs = insn->getSrc(1)->refCount();
219
220 if ((isCSpaceLoad(i0) || isImmdLoad(i0)) && targ->insnCanLoad(insn, 1, i0)) {
221 if ((!isImmdLoad(i1) && !isCSpaceLoad(i1)) ||
222 !targ->insnCanLoad(insn, 1, i1) ||
223 i0refs < i1refs)
224 insn->swapSources(0, 1);
225 else
226 return;
227 } else
228 if (isAttribOrSharedLoad(i1)) {
229 if (!isAttribOrSharedLoad(i0))
230 insn->swapSources(0, 1);
231 else
232 return;
233 } else {
234 return;
235 }
236
237 if (insn->op == OP_SET || insn->op == OP_SET_AND ||
238 insn->op == OP_SET_OR || insn->op == OP_SET_XOR)
239 insn->asCmp()->setCond = reverseCondCode(insn->asCmp()->setCond);
240 else
241 if (insn->op == OP_SLCT)
242 insn->asCmp()->setCond = inverseCondCode(insn->asCmp()->setCond);
243 else
244 if (insn->op == OP_SUB) {
245 insn->src(0).mod = insn->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
246 insn->src(1).mod = insn->src(1).mod ^ Modifier(NV50_IR_MOD_NEG);
247 } else
248 if (insn->op == OP_XMAD) {
249 // swap h1 flags
250 uint16_t h1 = (insn->subOp >> 1 & NV50_IR_SUBOP_XMAD_H1(0)) |
251 (insn->subOp << 1 & NV50_IR_SUBOP_XMAD_H1(1));
252 insn->subOp = (insn->subOp & ~NV50_IR_SUBOP_XMAD_H1_MASK) | h1;
253 }
254 }
255
256 bool
visit(BasicBlock * bb)257 LoadPropagation::visit(BasicBlock *bb)
258 {
259 const Target *targ = prog->getTarget();
260 Instruction *next;
261
262 for (Instruction *i = bb->getEntry(); i; i = next) {
263 next = i->next;
264
265 if (i->op == OP_CALL) // calls have args as sources, they must be in regs
266 continue;
267
268 if (i->op == OP_PFETCH) // pfetch expects arg1 to be a reg
269 continue;
270
271 if (i->srcExists(1))
272 checkSwapSrc01(i);
273
274 for (int s = 0; i->srcExists(s); ++s) {
275 Instruction *ld = i->getSrc(s)->getInsn();
276
277 if (!ld || ld->fixed || (ld->op != OP_LOAD && ld->op != OP_MOV))
278 continue;
279 if (ld->op == OP_LOAD && ld->subOp == NV50_IR_SUBOP_LOAD_LOCKED)
280 continue;
281 if (!targ->insnCanLoad(i, s, ld))
282 continue;
283
284 // propagate !
285 i->setSrc(s, ld->getSrc(0));
286 if (ld->src(0).isIndirect(0))
287 i->setIndirect(s, 0, ld->getIndirect(0, 0));
288
289 if (ld->getDef(0)->refCount() == 0)
290 delete_Instruction(prog, ld);
291 }
292 }
293 return true;
294 }
295
296 // =============================================================================
297
298 class IndirectPropagation : public Pass
299 {
300 private:
301 virtual bool visit(BasicBlock *);
302
303 BuildUtil bld;
304 };
305
306 bool
visit(BasicBlock * bb)307 IndirectPropagation::visit(BasicBlock *bb)
308 {
309 const Target *targ = prog->getTarget();
310 Instruction *next;
311
312 for (Instruction *i = bb->getEntry(); i; i = next) {
313 next = i->next;
314
315 bld.setPosition(i, false);
316
317 for (int s = 0; i->srcExists(s); ++s) {
318 Instruction *insn;
319 ImmediateValue imm;
320 if (!i->src(s).isIndirect(0))
321 continue;
322 insn = i->getIndirect(s, 0)->getInsn();
323 if (!insn)
324 continue;
325 if (insn->op == OP_ADD && !isFloatType(insn->dType)) {
326 if (insn->src(0).getFile() != targ->nativeFile(FILE_ADDRESS) ||
327 !insn->src(1).getImmediate(imm) ||
328 !targ->insnCanLoadOffset(i, s, imm.reg.data.s32))
329 continue;
330 i->setIndirect(s, 0, insn->getSrc(0));
331 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
332 i->src(s).get()->reg.data.offset += imm.reg.data.u32;
333 } else if (insn->op == OP_SUB && !isFloatType(insn->dType)) {
334 if (insn->src(0).getFile() != targ->nativeFile(FILE_ADDRESS) ||
335 !insn->src(1).getImmediate(imm) ||
336 !targ->insnCanLoadOffset(i, s, -imm.reg.data.s32))
337 continue;
338 i->setIndirect(s, 0, insn->getSrc(0));
339 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
340 i->src(s).get()->reg.data.offset -= imm.reg.data.u32;
341 } else if (insn->op == OP_MOV) {
342 if (!insn->src(0).getImmediate(imm) ||
343 !targ->insnCanLoadOffset(i, s, imm.reg.data.s32))
344 continue;
345 i->setIndirect(s, 0, NULL);
346 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
347 i->src(s).get()->reg.data.offset += imm.reg.data.u32;
348 } else if (insn->op == OP_SHLADD) {
349 if (!insn->src(2).getImmediate(imm) ||
350 !targ->insnCanLoadOffset(i, s, imm.reg.data.s32))
351 continue;
352 i->setIndirect(s, 0, bld.mkOp2v(
353 OP_SHL, TYPE_U32, bld.getSSA(), insn->getSrc(0), insn->getSrc(1)));
354 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
355 i->src(s).get()->reg.data.offset += imm.reg.data.u32;
356 }
357 }
358 }
359 return true;
360 }
361
362 // =============================================================================
363
364 // Evaluate constant expressions.
365 class ConstantFolding : public Pass
366 {
367 public:
ConstantFolding()368 ConstantFolding() : foldCount(0) {}
369 bool foldAll(Program *);
370
371 private:
372 virtual bool visit(BasicBlock *);
373
374 void expr(Instruction *, ImmediateValue&, ImmediateValue&);
375 void expr(Instruction *, ImmediateValue&, ImmediateValue&, ImmediateValue&);
376 /* true if i was deleted */
377 bool opnd(Instruction *i, ImmediateValue&, int s);
378 void opnd3(Instruction *, ImmediateValue&);
379
380 void unary(Instruction *, const ImmediateValue&);
381
382 void tryCollapseChainedMULs(Instruction *, const int s, ImmediateValue&);
383
384 CmpInstruction *findOriginForTestWithZero(Value *);
385
386 bool createMul(DataType ty, Value *def, Value *a, int64_t b, Value *c);
387
388 unsigned int foldCount;
389
390 BuildUtil bld;
391 };
392
393 // TODO: remember generated immediates and only revisit these
394 bool
foldAll(Program * prog)395 ConstantFolding::foldAll(Program *prog)
396 {
397 unsigned int iterCount = 0;
398 do {
399 foldCount = 0;
400 if (!run(prog))
401 return false;
402 } while (foldCount && ++iterCount < 2);
403 return true;
404 }
405
406 bool
visit(BasicBlock * bb)407 ConstantFolding::visit(BasicBlock *bb)
408 {
409 Instruction *i, *next;
410
411 for (i = bb->getEntry(); i; i = next) {
412 next = i->next;
413 if (i->op == OP_MOV || i->op == OP_CALL)
414 continue;
415
416 ImmediateValue src0, src1, src2;
417
418 if (i->srcExists(2) &&
419 i->src(0).getImmediate(src0) &&
420 i->src(1).getImmediate(src1) &&
421 i->src(2).getImmediate(src2)) {
422 expr(i, src0, src1, src2);
423 } else
424 if (i->srcExists(1) &&
425 i->src(0).getImmediate(src0) && i->src(1).getImmediate(src1)) {
426 expr(i, src0, src1);
427 } else
428 if (i->srcExists(0) && i->src(0).getImmediate(src0)) {
429 if (opnd(i, src0, 0))
430 continue;
431 } else
432 if (i->srcExists(1) && i->src(1).getImmediate(src1)) {
433 if (opnd(i, src1, 1))
434 continue;
435 }
436 if (i->srcExists(2) && i->src(2).getImmediate(src2))
437 opnd3(i, src2);
438 }
439 return true;
440 }
441
442 CmpInstruction *
findOriginForTestWithZero(Value * value)443 ConstantFolding::findOriginForTestWithZero(Value *value)
444 {
445 if (!value)
446 return NULL;
447 Instruction *insn = value->getInsn();
448 if (!insn)
449 return NULL;
450
451 if (insn->asCmp() && insn->op != OP_SLCT)
452 return insn->asCmp();
453
454 /* Sometimes mov's will sneak in as a result of other folding. This gets
455 * cleaned up later.
456 */
457 if (insn->op == OP_MOV)
458 return findOriginForTestWithZero(insn->getSrc(0));
459
460 /* Deal with AND 1.0 here since nv50 can't fold into boolean float */
461 if (insn->op == OP_AND) {
462 int s = 0;
463 ImmediateValue imm;
464 if (!insn->src(s).getImmediate(imm)) {
465 s = 1;
466 if (!insn->src(s).getImmediate(imm))
467 return NULL;
468 }
469 if (imm.reg.data.f32 != 1.0f)
470 return NULL;
471 /* TODO: Come up with a way to handle the condition being inverted */
472 if (insn->src(!s).mod != Modifier(0))
473 return NULL;
474 return findOriginForTestWithZero(insn->getSrc(!s));
475 }
476
477 return NULL;
478 }
479
480 void
applyTo(ImmediateValue & imm) const481 Modifier::applyTo(ImmediateValue& imm) const
482 {
483 if (!bits) // avoid failure if imm.reg.type is unhandled (e.g. b128)
484 return;
485 switch (imm.reg.type) {
486 case TYPE_F32:
487 if (bits & NV50_IR_MOD_ABS)
488 imm.reg.data.f32 = fabsf(imm.reg.data.f32);
489 if (bits & NV50_IR_MOD_NEG)
490 imm.reg.data.f32 = -imm.reg.data.f32;
491 if (bits & NV50_IR_MOD_SAT) {
492 if (imm.reg.data.f32 < 0.0f)
493 imm.reg.data.f32 = 0.0f;
494 else
495 if (imm.reg.data.f32 > 1.0f)
496 imm.reg.data.f32 = 1.0f;
497 }
498 assert(!(bits & NV50_IR_MOD_NOT));
499 break;
500
501 case TYPE_S8: // NOTE: will be extended
502 case TYPE_S16:
503 case TYPE_S32:
504 case TYPE_U8: // NOTE: treated as signed
505 case TYPE_U16:
506 case TYPE_U32:
507 if (bits & NV50_IR_MOD_ABS)
508 imm.reg.data.s32 = (imm.reg.data.s32 >= 0) ?
509 imm.reg.data.s32 : -imm.reg.data.s32;
510 if (bits & NV50_IR_MOD_NEG)
511 imm.reg.data.s32 = -imm.reg.data.s32;
512 if (bits & NV50_IR_MOD_NOT)
513 imm.reg.data.s32 = ~imm.reg.data.s32;
514 break;
515
516 case TYPE_F64:
517 if (bits & NV50_IR_MOD_ABS)
518 imm.reg.data.f64 = fabs(imm.reg.data.f64);
519 if (bits & NV50_IR_MOD_NEG)
520 imm.reg.data.f64 = -imm.reg.data.f64;
521 if (bits & NV50_IR_MOD_SAT) {
522 if (imm.reg.data.f64 < 0.0)
523 imm.reg.data.f64 = 0.0;
524 else
525 if (imm.reg.data.f64 > 1.0)
526 imm.reg.data.f64 = 1.0;
527 }
528 assert(!(bits & NV50_IR_MOD_NOT));
529 break;
530
531 default:
532 assert(!"invalid/unhandled type");
533 imm.reg.data.u64 = 0;
534 break;
535 }
536 }
537
538 operation
getOp() const539 Modifier::getOp() const
540 {
541 switch (bits) {
542 case NV50_IR_MOD_ABS: return OP_ABS;
543 case NV50_IR_MOD_NEG: return OP_NEG;
544 case NV50_IR_MOD_SAT: return OP_SAT;
545 case NV50_IR_MOD_NOT: return OP_NOT;
546 case 0:
547 return OP_MOV;
548 default:
549 return OP_CVT;
550 }
551 }
552
553 void
expr(Instruction * i,ImmediateValue & imm0,ImmediateValue & imm1)554 ConstantFolding::expr(Instruction *i,
555 ImmediateValue &imm0, ImmediateValue &imm1)
556 {
557 struct Storage *const a = &imm0.reg, *const b = &imm1.reg;
558 struct Storage res;
559 DataType type = i->dType;
560
561 memset(&res.data, 0, sizeof(res.data));
562
563 switch (i->op) {
564 case OP_SGXT: {
565 int bits = b->data.u32;
566 if (bits) {
567 uint32_t data = a->data.u32 & (0xffffffff >> (32 - bits));
568 if (bits < 32 && (data & (1 << (bits - 1))))
569 data = data - (1 << bits);
570 res.data.u32 = data;
571 }
572 break;
573 }
574 case OP_BMSK:
575 res.data.u32 = ((1 << b->data.u32) - 1) << a->data.u32;
576 break;
577 case OP_MAD:
578 case OP_FMA:
579 case OP_MUL:
580 if (i->dnz && i->dType == TYPE_F32) {
581 if (!isfinite(a->data.f32))
582 a->data.f32 = 0.0f;
583 if (!isfinite(b->data.f32))
584 b->data.f32 = 0.0f;
585 }
586 switch (i->dType) {
587 case TYPE_F32:
588 res.data.f32 = a->data.f32 * b->data.f32 * exp2f(i->postFactor);
589 break;
590 case TYPE_F64: res.data.f64 = a->data.f64 * b->data.f64; break;
591 case TYPE_S32:
592 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
593 res.data.s32 = ((int64_t)a->data.s32 * b->data.s32) >> 32;
594 break;
595 }
596 FALLTHROUGH;
597 case TYPE_U32:
598 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
599 res.data.u32 = ((uint64_t)a->data.u32 * b->data.u32) >> 32;
600 break;
601 }
602 res.data.u32 = a->data.u32 * b->data.u32; break;
603 default:
604 return;
605 }
606 break;
607 case OP_DIV:
608 if (b->data.u32 == 0)
609 break;
610 switch (i->dType) {
611 case TYPE_F32: res.data.f32 = a->data.f32 / b->data.f32; break;
612 case TYPE_F64: res.data.f64 = a->data.f64 / b->data.f64; break;
613 case TYPE_S32: res.data.s32 = a->data.s32 / b->data.s32; break;
614 case TYPE_U32: res.data.u32 = a->data.u32 / b->data.u32; break;
615 default:
616 return;
617 }
618 break;
619 case OP_ADD:
620 switch (i->dType) {
621 case TYPE_F32: res.data.f32 = a->data.f32 + b->data.f32; break;
622 case TYPE_F64: res.data.f64 = a->data.f64 + b->data.f64; break;
623 case TYPE_S32:
624 case TYPE_U32: res.data.u32 = a->data.u32 + b->data.u32; break;
625 default:
626 return;
627 }
628 break;
629 case OP_SUB:
630 switch (i->dType) {
631 case TYPE_F32: res.data.f32 = a->data.f32 - b->data.f32; break;
632 case TYPE_F64: res.data.f64 = a->data.f64 - b->data.f64; break;
633 case TYPE_S32:
634 case TYPE_U32: res.data.u32 = a->data.u32 - b->data.u32; break;
635 default:
636 return;
637 }
638 break;
639 case OP_POW:
640 switch (i->dType) {
641 case TYPE_F32: res.data.f32 = pow(a->data.f32, b->data.f32); break;
642 case TYPE_F64: res.data.f64 = pow(a->data.f64, b->data.f64); break;
643 default:
644 return;
645 }
646 break;
647 case OP_MAX:
648 switch (i->dType) {
649 case TYPE_F32: res.data.f32 = MAX2(a->data.f32, b->data.f32); break;
650 case TYPE_F64: res.data.f64 = MAX2(a->data.f64, b->data.f64); break;
651 case TYPE_S32: res.data.s32 = MAX2(a->data.s32, b->data.s32); break;
652 case TYPE_U32: res.data.u32 = MAX2(a->data.u32, b->data.u32); break;
653 default:
654 return;
655 }
656 break;
657 case OP_MIN:
658 switch (i->dType) {
659 case TYPE_F32: res.data.f32 = MIN2(a->data.f32, b->data.f32); break;
660 case TYPE_F64: res.data.f64 = MIN2(a->data.f64, b->data.f64); break;
661 case TYPE_S32: res.data.s32 = MIN2(a->data.s32, b->data.s32); break;
662 case TYPE_U32: res.data.u32 = MIN2(a->data.u32, b->data.u32); break;
663 default:
664 return;
665 }
666 break;
667 case OP_AND:
668 res.data.u64 = a->data.u64 & b->data.u64;
669 break;
670 case OP_OR:
671 res.data.u64 = a->data.u64 | b->data.u64;
672 break;
673 case OP_XOR:
674 res.data.u64 = a->data.u64 ^ b->data.u64;
675 break;
676 case OP_SHL:
677 res.data.u32 = a->data.u32 << b->data.u32;
678 break;
679 case OP_SHR:
680 switch (i->dType) {
681 case TYPE_S32: res.data.s32 = a->data.s32 >> b->data.u32; break;
682 case TYPE_U32: res.data.u32 = a->data.u32 >> b->data.u32; break;
683 default:
684 return;
685 }
686 break;
687 case OP_SLCT:
688 if (a->data.u32 != b->data.u32)
689 return;
690 res.data.u32 = a->data.u32;
691 break;
692 case OP_EXTBF: {
693 int offset = b->data.u32 & 0xff;
694 int width = (b->data.u32 >> 8) & 0xff;
695 int rshift = offset;
696 int lshift = 0;
697 if (width == 0) {
698 res.data.u32 = 0;
699 break;
700 }
701 if (width + offset < 32) {
702 rshift = 32 - width;
703 lshift = 32 - width - offset;
704 }
705 if (i->subOp == NV50_IR_SUBOP_EXTBF_REV)
706 res.data.u32 = util_bitreverse(a->data.u32);
707 else
708 res.data.u32 = a->data.u32;
709 switch (i->dType) {
710 case TYPE_S32: res.data.s32 = (res.data.s32 << lshift) >> rshift; break;
711 case TYPE_U32: res.data.u32 = (res.data.u32 << lshift) >> rshift; break;
712 default:
713 return;
714 }
715 break;
716 }
717 case OP_POPCNT:
718 res.data.u32 = util_bitcount(a->data.u32 & b->data.u32);
719 break;
720 case OP_PFETCH:
721 // The two arguments to pfetch are logically added together. Normally
722 // the second argument will not be constant, but that can happen.
723 res.data.u32 = a->data.u32 + b->data.u32;
724 type = TYPE_U32;
725 break;
726 case OP_MERGE:
727 switch (i->dType) {
728 case TYPE_U64:
729 case TYPE_S64:
730 case TYPE_F64:
731 res.data.u64 = (((uint64_t)b->data.u32) << 32) | a->data.u32;
732 break;
733 default:
734 return;
735 }
736 break;
737 default:
738 return;
739 }
740 ++foldCount;
741
742 i->src(0).mod = Modifier(0);
743 i->src(1).mod = Modifier(0);
744 i->postFactor = 0;
745
746 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
747 i->setSrc(1, NULL);
748
749 i->getSrc(0)->reg.data = res.data;
750 i->getSrc(0)->reg.type = type;
751 i->getSrc(0)->reg.size = typeSizeof(type);
752
753 switch (i->op) {
754 case OP_MAD:
755 case OP_FMA: {
756 ImmediateValue src0, src1 = *i->getSrc(0)->asImm();
757
758 // Move the immediate into position 1, where we know it might be
759 // emittable. However it might not be anyways, as there may be other
760 // restrictions, so move it into a separate LValue.
761 bld.setPosition(i, false);
762 i->op = OP_ADD;
763 i->dnz = 0;
764 i->setSrc(1, bld.mkMov(bld.getSSA(type), i->getSrc(0), type)->getDef(0));
765 i->setSrc(0, i->getSrc(2));
766 i->src(0).mod = i->src(2).mod;
767 i->setSrc(2, NULL);
768
769 if (i->src(0).getImmediate(src0))
770 expr(i, src0, src1);
771 else
772 opnd(i, src1, 1);
773 break;
774 }
775 case OP_PFETCH:
776 // Leave PFETCH alone... we just folded its 2 args into 1.
777 break;
778 default:
779 i->op = i->saturate ? OP_SAT : OP_MOV;
780 if (i->saturate)
781 unary(i, *i->getSrc(0)->asImm());
782 break;
783 }
784 i->subOp = 0;
785 }
786
787 void
expr(Instruction * i,ImmediateValue & imm0,ImmediateValue & imm1,ImmediateValue & imm2)788 ConstantFolding::expr(Instruction *i,
789 ImmediateValue &imm0,
790 ImmediateValue &imm1,
791 ImmediateValue &imm2)
792 {
793 struct Storage *const a = &imm0.reg, *const b = &imm1.reg, *const c = &imm2.reg;
794 struct Storage res;
795
796 memset(&res.data, 0, sizeof(res.data));
797
798 switch (i->op) {
799 case OP_LOP3_LUT:
800 for (int n = 0; n < 32; n++) {
801 uint8_t lut = ((a->data.u32 >> n) & 1) << 2 |
802 ((b->data.u32 >> n) & 1) << 1 |
803 ((c->data.u32 >> n) & 1);
804 res.data.u32 |= !!(i->subOp & (1 << lut)) << n;
805 }
806 break;
807 case OP_PERMT:
808 if (!i->subOp) {
809 uint64_t input = (uint64_t)c->data.u32 << 32 | a->data.u32;
810 uint16_t permt = b->data.u32;
811 for (int n = 0 ; n < 4; n++, permt >>= 4)
812 res.data.u32 |= ((input >> ((permt & 0xf) * 8)) & 0xff) << n * 8;
813 } else
814 return;
815 break;
816 case OP_INSBF: {
817 int offset = b->data.u32 & 0xff;
818 int width = (b->data.u32 >> 8) & 0xff;
819 unsigned bitmask = ((1 << width) - 1) << offset;
820 res.data.u32 = ((a->data.u32 << offset) & bitmask) | (c->data.u32 & ~bitmask);
821 break;
822 }
823 case OP_MAD:
824 case OP_FMA: {
825 switch (i->dType) {
826 case TYPE_F32:
827 res.data.f32 = a->data.f32 * b->data.f32 * exp2f(i->postFactor) +
828 c->data.f32;
829 break;
830 case TYPE_F64:
831 res.data.f64 = a->data.f64 * b->data.f64 + c->data.f64;
832 break;
833 case TYPE_S32:
834 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
835 res.data.s32 = ((int64_t)a->data.s32 * b->data.s32 >> 32) + c->data.s32;
836 break;
837 }
838 FALLTHROUGH;
839 case TYPE_U32:
840 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
841 res.data.u32 = ((uint64_t)a->data.u32 * b->data.u32 >> 32) + c->data.u32;
842 break;
843 }
844 res.data.u32 = a->data.u32 * b->data.u32 + c->data.u32;
845 break;
846 default:
847 return;
848 }
849 break;
850 }
851 case OP_SHLADD:
852 res.data.u32 = (a->data.u32 << b->data.u32) + c->data.u32;
853 break;
854 default:
855 return;
856 }
857
858 ++foldCount;
859 i->src(0).mod = Modifier(0);
860 i->src(1).mod = Modifier(0);
861 i->src(2).mod = Modifier(0);
862
863 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
864 i->setSrc(1, NULL);
865 i->setSrc(2, NULL);
866
867 i->getSrc(0)->reg.data = res.data;
868 i->getSrc(0)->reg.type = i->dType;
869 i->getSrc(0)->reg.size = typeSizeof(i->dType);
870
871 i->op = OP_MOV;
872 }
873
874 void
unary(Instruction * i,const ImmediateValue & imm)875 ConstantFolding::unary(Instruction *i, const ImmediateValue &imm)
876 {
877 Storage res;
878
879 if (i->dType != TYPE_F32)
880 return;
881 switch (i->op) {
882 case OP_NEG: res.data.f32 = -imm.reg.data.f32; break;
883 case OP_ABS: res.data.f32 = fabsf(imm.reg.data.f32); break;
884 case OP_SAT: res.data.f32 = SATURATE(imm.reg.data.f32); break;
885 case OP_RCP: res.data.f32 = 1.0f / imm.reg.data.f32; break;
886 case OP_RSQ: res.data.f32 = 1.0f / sqrtf(imm.reg.data.f32); break;
887 case OP_LG2: res.data.f32 = log2f(imm.reg.data.f32); break;
888 case OP_EX2: res.data.f32 = exp2f(imm.reg.data.f32); break;
889 case OP_SIN: res.data.f32 = sinf(imm.reg.data.f32); break;
890 case OP_COS: res.data.f32 = cosf(imm.reg.data.f32); break;
891 case OP_SQRT: res.data.f32 = sqrtf(imm.reg.data.f32); break;
892 case OP_PRESIN:
893 case OP_PREEX2:
894 // these should be handled in subsequent OP_SIN/COS/EX2
895 res.data.f32 = imm.reg.data.f32;
896 break;
897 default:
898 return;
899 }
900 i->op = OP_MOV;
901 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.f32));
902 i->src(0).mod = Modifier(0);
903 }
904
905 void
tryCollapseChainedMULs(Instruction * mul2,const int s,ImmediateValue & imm2)906 ConstantFolding::tryCollapseChainedMULs(Instruction *mul2,
907 const int s, ImmediateValue& imm2)
908 {
909 const int t = s ? 0 : 1;
910 Instruction *insn;
911 Instruction *mul1 = NULL; // mul1 before mul2
912 int e = 0;
913 float f = imm2.reg.data.f32 * exp2f(mul2->postFactor);
914 ImmediateValue imm1;
915
916 assert(mul2->op == OP_MUL && mul2->dType == TYPE_F32);
917
918 if (mul2->getSrc(t)->refCount() == 1) {
919 insn = mul2->getSrc(t)->getInsn();
920 if (!mul2->src(t).mod && insn->op == OP_MUL && insn->dType == TYPE_F32)
921 mul1 = insn;
922 if (mul1 && !mul1->saturate) {
923 int s1;
924
925 if (mul1->src(s1 = 0).getImmediate(imm1) ||
926 mul1->src(s1 = 1).getImmediate(imm1)) {
927 bld.setPosition(mul1, false);
928 // a = mul r, imm1
929 // d = mul a, imm2 -> d = mul r, (imm1 * imm2)
930 mul1->setSrc(s1, bld.loadImm(NULL, f * imm1.reg.data.f32));
931 mul1->src(s1).mod = Modifier(0);
932 mul2->def(0).replace(mul1->getDef(0), false);
933 mul1->saturate = mul2->saturate;
934 } else
935 if (prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
936 // c = mul a, b
937 // d = mul c, imm -> d = mul_x_imm a, b
938 mul1->postFactor = e;
939 mul2->def(0).replace(mul1->getDef(0), false);
940 if (f < 0)
941 mul1->src(0).mod *= Modifier(NV50_IR_MOD_NEG);
942 mul1->saturate = mul2->saturate;
943 }
944 return;
945 }
946 }
947 if (mul2->getDef(0)->refCount() == 1 && !mul2->saturate) {
948 // b = mul a, imm
949 // d = mul b, c -> d = mul_x_imm a, c
950 int s2, t2;
951 insn = (*mul2->getDef(0)->uses.begin())->getInsn();
952 if (!insn)
953 return;
954 mul1 = mul2;
955 mul2 = NULL;
956 s2 = insn->getSrc(0) == mul1->getDef(0) ? 0 : 1;
957 t2 = s2 ? 0 : 1;
958 if (insn->op == OP_MUL && insn->dType == TYPE_F32)
959 if (!insn->src(s2).mod && !insn->src(t2).getImmediate(imm1))
960 mul2 = insn;
961 if (mul2 && prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
962 mul2->postFactor = e;
963 mul2->setSrc(s2, mul1->src(t));
964 if (f < 0)
965 mul2->src(s2).mod *= Modifier(NV50_IR_MOD_NEG);
966 }
967 }
968 }
969
970 void
opnd3(Instruction * i,ImmediateValue & imm2)971 ConstantFolding::opnd3(Instruction *i, ImmediateValue &imm2)
972 {
973 switch (i->op) {
974 case OP_MAD:
975 case OP_FMA:
976 if (imm2.isInteger(0)) {
977 i->op = OP_MUL;
978 i->setSrc(2, NULL);
979 foldCount++;
980 return;
981 }
982 break;
983 case OP_SHLADD:
984 if (imm2.isInteger(0)) {
985 i->op = OP_SHL;
986 i->setSrc(2, NULL);
987 foldCount++;
988 return;
989 }
990 break;
991 default:
992 return;
993 }
994 }
995
996 bool
createMul(DataType ty,Value * def,Value * a,int64_t b,Value * c)997 ConstantFolding::createMul(DataType ty, Value *def, Value *a, int64_t b, Value *c)
998 {
999 const Target *target = prog->getTarget();
1000 int64_t absB = llabs(b);
1001
1002 //a * (2^shl) -> a << shl
1003 if (b >= 0 && util_is_power_of_two_or_zero64(b)) {
1004 int shl = util_logbase2_64(b);
1005
1006 Value *res = c ? bld.getSSA(typeSizeof(ty)) : def;
1007 bld.mkOp2(OP_SHL, ty, res, a, bld.mkImm(shl));
1008 if (c)
1009 bld.mkOp2(OP_ADD, ty, def, res, c);
1010
1011 return true;
1012 }
1013
1014 //a * (2^shl + 1) -> a << shl + a
1015 //a * -(2^shl + 1) -> -a << shl + a
1016 //a * (2^shl - 1) -> a << shl - a
1017 //a * -(2^shl - 1) -> -a << shl - a
1018 if (typeSizeof(ty) == 4 &&
1019 (util_is_power_of_two_or_zero64(absB - 1) ||
1020 util_is_power_of_two_or_zero64(absB + 1)) &&
1021 target->isOpSupported(OP_SHLADD, TYPE_U32)) {
1022 bool subA = util_is_power_of_two_or_zero64(absB + 1);
1023 int shl = subA ? util_logbase2_64(absB + 1) : util_logbase2_64(absB - 1);
1024
1025 Value *res = c ? bld.getSSA() : def;
1026 Instruction *insn = bld.mkOp3(OP_SHLADD, TYPE_U32, res, a, bld.mkImm(shl), a);
1027 if (b < 0)
1028 insn->src(0).mod = Modifier(NV50_IR_MOD_NEG);
1029 if (subA)
1030 insn->src(2).mod = Modifier(NV50_IR_MOD_NEG);
1031
1032 if (c)
1033 bld.mkOp2(OP_ADD, TYPE_U32, def, res, c);
1034
1035 return true;
1036 }
1037
1038 if (typeSizeof(ty) == 4 && b >= 0 && b <= 0xffff &&
1039 target->isOpSupported(OP_XMAD, TYPE_U32)) {
1040 Value *tmp = bld.mkOp3v(OP_XMAD, TYPE_U32, bld.getSSA(),
1041 a, bld.mkImm((uint32_t)b), c ? c : bld.mkImm(0));
1042 bld.mkOp3(OP_XMAD, TYPE_U32, def, a, bld.mkImm((uint32_t)b), tmp)->subOp =
1043 NV50_IR_SUBOP_XMAD_PSL | NV50_IR_SUBOP_XMAD_H1(0);
1044
1045 return true;
1046 }
1047
1048 return false;
1049 }
1050
1051 bool
opnd(Instruction * i,ImmediateValue & imm0,int s)1052 ConstantFolding::opnd(Instruction *i, ImmediateValue &imm0, int s)
1053 {
1054 const int t = !s;
1055 const operation op = i->op;
1056 Instruction *newi = i;
1057 bool deleted = false;
1058
1059 switch (i->op) {
1060 case OP_SPLIT: {
1061 bld.setPosition(i, false);
1062
1063 uint8_t size = i->getDef(0)->reg.size;
1064 uint8_t bitsize = size * 8;
1065 uint32_t mask = (1ULL << bitsize) - 1;
1066 assert(bitsize <= 32);
1067
1068 uint64_t val = imm0.reg.data.u64;
1069 for (int8_t d = 0; i->defExists(d); ++d) {
1070 Value *def = i->getDef(d);
1071 assert(def->reg.size == size);
1072
1073 newi = bld.mkMov(def, bld.mkImm((uint32_t)(val & mask)), TYPE_U32);
1074 val >>= bitsize;
1075 }
1076 delete_Instruction(prog, i);
1077 deleted = true;
1078 break;
1079 }
1080 case OP_MUL:
1081 if (i->dType == TYPE_F32 && !i->precise)
1082 tryCollapseChainedMULs(i, s, imm0);
1083
1084 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
1085 assert(!isFloatType(i->sType));
1086 if (imm0.isInteger(1) && i->dType == TYPE_S32) {
1087 bld.setPosition(i, false);
1088 // Need to set to the sign value, which is a compare.
1089 newi = bld.mkCmp(OP_SET, CC_LT, TYPE_S32, i->getDef(0),
1090 TYPE_S32, i->getSrc(t), bld.mkImm(0));
1091 delete_Instruction(prog, i);
1092 deleted = true;
1093 } else if (imm0.isInteger(0) || imm0.isInteger(1)) {
1094 // The high bits can't be set in this case (either mul by 0 or
1095 // unsigned by 1)
1096 i->op = OP_MOV;
1097 i->subOp = 0;
1098 i->setSrc(0, new_ImmediateValue(prog, 0u));
1099 i->src(0).mod = Modifier(0);
1100 i->setSrc(1, NULL);
1101 } else if (!imm0.isNegative() && imm0.isPow2()) {
1102 // Translate into a shift
1103 imm0.applyLog2();
1104 i->op = OP_SHR;
1105 i->subOp = 0;
1106 imm0.reg.data.u32 = 32 - imm0.reg.data.u32;
1107 i->setSrc(0, i->getSrc(t));
1108 i->src(0).mod = i->src(t).mod;
1109 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
1110 i->src(1).mod = 0;
1111 }
1112 } else
1113 if (imm0.isInteger(0)) {
1114 i->dnz = 0;
1115 i->op = OP_MOV;
1116 i->setSrc(0, new_ImmediateValue(prog, 0u));
1117 i->src(0).mod = Modifier(0);
1118 i->postFactor = 0;
1119 i->setSrc(1, NULL);
1120 } else
1121 if (!i->postFactor && (imm0.isInteger(1) || imm0.isInteger(-1))) {
1122 if (imm0.isNegative())
1123 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
1124 i->dnz = 0;
1125 i->op = i->src(t).mod.getOp();
1126 if (s == 0) {
1127 i->setSrc(0, i->getSrc(1));
1128 i->src(0).mod = i->src(1).mod;
1129 i->src(1).mod = 0;
1130 }
1131 if (i->op != OP_CVT)
1132 i->src(0).mod = 0;
1133 i->setSrc(1, NULL);
1134 } else
1135 if (!i->postFactor && (imm0.isInteger(2) || imm0.isInteger(-2))) {
1136 if (imm0.isNegative())
1137 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
1138 i->op = OP_ADD;
1139 i->dnz = 0;
1140 i->setSrc(s, i->getSrc(t));
1141 i->src(s).mod = i->src(t).mod;
1142 } else
1143 if (!isFloatType(i->dType) && !i->src(t).mod) {
1144 bld.setPosition(i, false);
1145 int64_t b = typeSizeof(i->dType) == 8 ? imm0.reg.data.s64 : imm0.reg.data.s32;
1146 if (createMul(i->dType, i->getDef(0), i->getSrc(t), b, NULL)) {
1147 delete_Instruction(prog, i);
1148 deleted = true;
1149 }
1150 } else
1151 if (i->postFactor && i->sType == TYPE_F32) {
1152 /* Can't emit a postfactor with an immediate, have to fold it in */
1153 i->setSrc(s, new_ImmediateValue(
1154 prog, imm0.reg.data.f32 * exp2f(i->postFactor)));
1155 i->postFactor = 0;
1156 }
1157 break;
1158 case OP_FMA:
1159 case OP_MAD:
1160 if (imm0.isInteger(0)) {
1161 i->setSrc(0, i->getSrc(2));
1162 i->src(0).mod = i->src(2).mod;
1163 i->setSrc(1, NULL);
1164 i->setSrc(2, NULL);
1165 i->dnz = 0;
1166 i->op = i->src(0).mod.getOp();
1167 if (i->op != OP_CVT)
1168 i->src(0).mod = 0;
1169 } else
1170 if (i->subOp != NV50_IR_SUBOP_MUL_HIGH &&
1171 (imm0.isInteger(1) || imm0.isInteger(-1))) {
1172 if (imm0.isNegative())
1173 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
1174 if (s == 0) {
1175 i->setSrc(0, i->getSrc(1));
1176 i->src(0).mod = i->src(1).mod;
1177 }
1178 i->setSrc(1, i->getSrc(2));
1179 i->src(1).mod = i->src(2).mod;
1180 i->setSrc(2, NULL);
1181 i->dnz = 0;
1182 i->op = OP_ADD;
1183 } else
1184 if (!isFloatType(i->dType) && !i->subOp && !i->src(t).mod && !i->src(2).mod) {
1185 bld.setPosition(i, false);
1186 int64_t b = typeSizeof(i->dType) == 8 ? imm0.reg.data.s64 : imm0.reg.data.s32;
1187 if (createMul(i->dType, i->getDef(0), i->getSrc(t), b, i->getSrc(2))) {
1188 delete_Instruction(prog, i);
1189 deleted = true;
1190 }
1191 }
1192 break;
1193 case OP_SUB:
1194 if (imm0.isInteger(0) && s == 0 && typeSizeof(i->dType) == 8 &&
1195 !isFloatType(i->dType))
1196 break;
1197 FALLTHROUGH;
1198 case OP_ADD:
1199 if (i->usesFlags())
1200 break;
1201 if (imm0.isInteger(0)) {
1202 if (s == 0) {
1203 i->setSrc(0, i->getSrc(1));
1204 i->src(0).mod = i->src(1).mod;
1205 if (i->op == OP_SUB)
1206 i->src(0).mod = i->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
1207 }
1208 i->setSrc(1, NULL);
1209 i->op = i->src(0).mod.getOp();
1210 if (i->op != OP_CVT)
1211 i->src(0).mod = Modifier(0);
1212 }
1213 break;
1214
1215 case OP_DIV:
1216 if (s != 1 || (i->dType != TYPE_S32 && i->dType != TYPE_U32))
1217 break;
1218 bld.setPosition(i, false);
1219 if (imm0.reg.data.u32 == 0) {
1220 break;
1221 } else
1222 if (imm0.reg.data.u32 == 1) {
1223 i->op = OP_MOV;
1224 i->setSrc(1, NULL);
1225 } else
1226 if (i->dType == TYPE_U32 && imm0.isPow2()) {
1227 i->op = OP_SHR;
1228 i->setSrc(1, bld.mkImm(util_logbase2(imm0.reg.data.u32)));
1229 } else
1230 if (i->dType == TYPE_U32) {
1231 Instruction *mul;
1232 Value *tA, *tB;
1233 const uint32_t d = imm0.reg.data.u32;
1234 uint32_t m;
1235 int r, s;
1236 uint32_t l = util_logbase2(d);
1237 if (((uint32_t)1 << l) < d)
1238 ++l;
1239 m = (((uint64_t)1 << 32) * (((uint64_t)1 << l) - d)) / d + 1;
1240 r = l ? 1 : 0;
1241 s = l ? (l - 1) : 0;
1242
1243 tA = bld.getSSA();
1244 tB = bld.getSSA();
1245 mul = bld.mkOp2(OP_MUL, TYPE_U32, tA, i->getSrc(0),
1246 bld.loadImm(NULL, m));
1247 mul->subOp = NV50_IR_SUBOP_MUL_HIGH;
1248 bld.mkOp2(OP_SUB, TYPE_U32, tB, i->getSrc(0), tA);
1249 tA = bld.getSSA();
1250 if (r)
1251 bld.mkOp2(OP_SHR, TYPE_U32, tA, tB, bld.mkImm(r));
1252 else
1253 tA = tB;
1254 tB = s ? bld.getSSA() : i->getDef(0);
1255 newi = bld.mkOp2(OP_ADD, TYPE_U32, tB, mul->getDef(0), tA);
1256 if (s)
1257 bld.mkOp2(OP_SHR, TYPE_U32, i->getDef(0), tB, bld.mkImm(s));
1258
1259 delete_Instruction(prog, i);
1260 deleted = true;
1261 } else
1262 if (imm0.reg.data.s32 == -1) {
1263 i->op = OP_NEG;
1264 i->setSrc(1, NULL);
1265 } else {
1266 LValue *tA, *tB;
1267 LValue *tD;
1268 const int32_t d = imm0.reg.data.s32;
1269 int32_t m;
1270 int32_t l = util_logbase2(static_cast<unsigned>(abs(d)));
1271 if ((1 << l) < abs(d))
1272 ++l;
1273 if (!l)
1274 l = 1;
1275 m = ((uint64_t)1 << (32 + l - 1)) / abs(d) + 1 - ((uint64_t)1 << 32);
1276
1277 tA = bld.getSSA();
1278 tB = bld.getSSA();
1279 bld.mkOp3(OP_MAD, TYPE_S32, tA, i->getSrc(0), bld.loadImm(NULL, m),
1280 i->getSrc(0))->subOp = NV50_IR_SUBOP_MUL_HIGH;
1281 if (l > 1)
1282 bld.mkOp2(OP_SHR, TYPE_S32, tB, tA, bld.mkImm(l - 1));
1283 else
1284 tB = tA;
1285 tA = bld.getSSA();
1286 bld.mkCmp(OP_SET, CC_LT, TYPE_S32, tA, TYPE_S32, i->getSrc(0), bld.mkImm(0));
1287 tD = (d < 0) ? bld.getSSA() : i->getDef(0)->asLValue();
1288 newi = bld.mkOp2(OP_SUB, TYPE_U32, tD, tB, tA);
1289 if (d < 0)
1290 bld.mkOp1(OP_NEG, TYPE_S32, i->getDef(0), tB);
1291
1292 delete_Instruction(prog, i);
1293 deleted = true;
1294 }
1295 break;
1296
1297 case OP_MOD:
1298 if (s == 1 && imm0.isPow2()) {
1299 bld.setPosition(i, false);
1300 if (i->sType == TYPE_U32) {
1301 i->op = OP_AND;
1302 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 - 1));
1303 } else if (i->sType == TYPE_S32) {
1304 // Do it on the absolute value of the input, and then restore the
1305 // sign. The only odd case is MIN_INT, but that should work out
1306 // as well, since MIN_INT mod any power of 2 is 0.
1307 //
1308 // Technically we don't have to do any of this since MOD is
1309 // undefined with negative arguments in GLSL, but this seems like
1310 // the nice thing to do.
1311 Value *abs = bld.mkOp1v(OP_ABS, TYPE_S32, bld.getSSA(), i->getSrc(0));
1312 Value *neg, *v1, *v2;
1313 bld.mkCmp(OP_SET, CC_LT, TYPE_S32,
1314 (neg = bld.getSSA(1, prog->getTarget()->nativeFile(FILE_PREDICATE))),
1315 TYPE_S32, i->getSrc(0), bld.loadImm(NULL, 0));
1316 Value *mod = bld.mkOp2v(OP_AND, TYPE_U32, bld.getSSA(), abs,
1317 bld.loadImm(NULL, imm0.reg.data.u32 - 1));
1318 bld.mkOp1(OP_NEG, TYPE_S32, (v1 = bld.getSSA()), mod)
1319 ->setPredicate(CC_P, neg);
1320 bld.mkOp1(OP_MOV, TYPE_S32, (v2 = bld.getSSA()), mod)
1321 ->setPredicate(CC_NOT_P, neg);
1322 newi = bld.mkOp2(OP_UNION, TYPE_S32, i->getDef(0), v1, v2);
1323
1324 delete_Instruction(prog, i);
1325 deleted = true;
1326 }
1327 } else if (s == 1) {
1328 // In this case, we still want the optimized lowering that we get
1329 // from having division by an immediate.
1330 //
1331 // a % b == a - (a/b) * b
1332 bld.setPosition(i, false);
1333 Value *div = bld.mkOp2v(OP_DIV, i->sType, bld.getSSA(),
1334 i->getSrc(0), i->getSrc(1));
1335 newi = bld.mkOp2(OP_ADD, i->sType, i->getDef(0), i->getSrc(0),
1336 bld.mkOp2v(OP_MUL, i->sType, bld.getSSA(), div, i->getSrc(1)));
1337 // TODO: Check that target supports this. In this case, we know that
1338 // all backends do.
1339 newi->src(1).mod = Modifier(NV50_IR_MOD_NEG);
1340
1341 delete_Instruction(prog, i);
1342 deleted = true;
1343 }
1344 break;
1345
1346 case OP_SET: // TODO: SET_AND,OR,XOR
1347 {
1348 /* This optimizes the case where the output of a set is being compared
1349 * to zero. Since the set can only produce 0/-1 (int) or 0/1 (float), we
1350 * can be a lot cleverer in our comparison.
1351 */
1352 CmpInstruction *si = findOriginForTestWithZero(i->getSrc(t));
1353 CondCode cc, ccZ;
1354 if (imm0.reg.data.u32 != 0 || !si)
1355 return false;
1356 cc = si->setCond;
1357 ccZ = (CondCode)((unsigned int)i->asCmp()->setCond & ~CC_U);
1358 // We do everything assuming var (cmp) 0, reverse the condition if 0 is
1359 // first.
1360 if (s == 0)
1361 ccZ = reverseCondCode(ccZ);
1362 // If there is a negative modifier, we need to undo that, by flipping
1363 // the comparison to zero.
1364 if (i->src(t).mod.neg())
1365 ccZ = reverseCondCode(ccZ);
1366 // If this is a signed comparison, we expect the input to be a regular
1367 // boolean, i.e. 0/-1. However the rest of the logic assumes that true
1368 // is positive, so just flip the sign.
1369 if (i->sType == TYPE_S32) {
1370 assert(!isFloatType(si->dType));
1371 ccZ = reverseCondCode(ccZ);
1372 }
1373 switch (ccZ) {
1374 case CC_LT: cc = CC_FL; break; // bool < 0 -- this is never true
1375 case CC_GE: cc = CC_TR; break; // bool >= 0 -- this is always true
1376 case CC_EQ: cc = inverseCondCode(cc); break; // bool == 0 -- !bool
1377 case CC_LE: cc = inverseCondCode(cc); break; // bool <= 0 -- !bool
1378 case CC_GT: break; // bool > 0 -- bool
1379 case CC_NE: break; // bool != 0 -- bool
1380 default:
1381 return false;
1382 }
1383
1384 // Update the condition of this SET to be identical to the origin set,
1385 // but with the updated condition code. The original SET should get
1386 // DCE'd, ideally.
1387 i->op = si->op;
1388 i->asCmp()->setCond = cc;
1389 i->setSrc(0, si->src(0));
1390 i->setSrc(1, si->src(1));
1391 if (si->srcExists(2))
1392 i->setSrc(2, si->src(2));
1393 i->sType = si->sType;
1394 }
1395 break;
1396
1397 case OP_AND:
1398 {
1399 Instruction *src = i->getSrc(t)->getInsn();
1400 ImmediateValue imm1;
1401 if (imm0.reg.data.u32 == 0) {
1402 i->op = OP_MOV;
1403 i->setSrc(0, new_ImmediateValue(prog, 0u));
1404 i->src(0).mod = Modifier(0);
1405 i->setSrc(1, NULL);
1406 } else if (imm0.reg.data.u32 == ~0U) {
1407 i->op = i->src(t).mod.getOp();
1408 if (t) {
1409 i->setSrc(0, i->getSrc(t));
1410 i->src(0).mod = i->src(t).mod;
1411 }
1412 i->setSrc(1, NULL);
1413 } else if (src->asCmp()) {
1414 CmpInstruction *cmp = src->asCmp();
1415 if (!cmp || cmp->op == OP_SLCT || cmp->getDef(0)->refCount() > 1)
1416 return false;
1417 if (!prog->getTarget()->isOpSupported(cmp->op, TYPE_F32))
1418 return false;
1419 if (imm0.reg.data.f32 != 1.0)
1420 return false;
1421 if (cmp->dType != TYPE_U32)
1422 return false;
1423
1424 cmp->dType = TYPE_F32;
1425 if (i->src(t).mod != Modifier(0)) {
1426 assert(i->src(t).mod == Modifier(NV50_IR_MOD_NOT));
1427 i->src(t).mod = Modifier(0);
1428 cmp->setCond = inverseCondCode(cmp->setCond);
1429 }
1430 i->op = OP_MOV;
1431 i->setSrc(s, NULL);
1432 if (t) {
1433 i->setSrc(0, i->getSrc(t));
1434 i->setSrc(t, NULL);
1435 }
1436 } else if (prog->getTarget()->isOpSupported(OP_EXTBF, TYPE_U32) &&
1437 src->op == OP_SHR &&
1438 src->src(1).getImmediate(imm1) &&
1439 i->src(t).mod == Modifier(0) &&
1440 util_is_power_of_two_or_zero(imm0.reg.data.u32 + 1)) {
1441 // low byte = offset, high byte = width
1442 uint32_t ext = (util_last_bit(imm0.reg.data.u32) << 8) | imm1.reg.data.u32;
1443 i->op = OP_EXTBF;
1444 i->setSrc(0, src->getSrc(0));
1445 i->setSrc(1, new_ImmediateValue(prog, ext));
1446 } else if (src->op == OP_SHL &&
1447 src->src(1).getImmediate(imm1) &&
1448 i->src(t).mod == Modifier(0) &&
1449 util_is_power_of_two_or_zero(~imm0.reg.data.u32 + 1) &&
1450 util_last_bit(~imm0.reg.data.u32) <= imm1.reg.data.u32) {
1451 i->op = OP_MOV;
1452 i->setSrc(s, NULL);
1453 if (t) {
1454 i->setSrc(0, i->getSrc(t));
1455 i->setSrc(t, NULL);
1456 }
1457 }
1458 }
1459 break;
1460
1461 case OP_SHL:
1462 {
1463 if (s != 1 || i->src(0).mod != Modifier(0))
1464 break;
1465
1466 if (imm0.reg.data.u32 == 0) {
1467 i->op = OP_MOV;
1468 i->setSrc(1, NULL);
1469 break;
1470 }
1471 // try to concatenate shifts
1472 Instruction *si = i->getSrc(0)->getInsn();
1473 if (!si)
1474 break;
1475 ImmediateValue imm1;
1476 switch (si->op) {
1477 case OP_SHL:
1478 if (si->src(1).getImmediate(imm1)) {
1479 bld.setPosition(i, false);
1480 i->setSrc(0, si->getSrc(0));
1481 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 + imm1.reg.data.u32));
1482 }
1483 break;
1484 case OP_SHR:
1485 if (si->src(1).getImmediate(imm1) && imm0.reg.data.u32 == imm1.reg.data.u32) {
1486 bld.setPosition(i, false);
1487 i->op = OP_AND;
1488 i->setSrc(0, si->getSrc(0));
1489 i->setSrc(1, bld.loadImm(NULL, ~((1 << imm0.reg.data.u32) - 1)));
1490 }
1491 break;
1492 case OP_MUL:
1493 int muls;
1494 if (isFloatType(si->dType))
1495 return false;
1496 if (si->subOp)
1497 return false;
1498 if (si->src(1).getImmediate(imm1))
1499 muls = 1;
1500 else if (si->src(0).getImmediate(imm1))
1501 muls = 0;
1502 else
1503 return false;
1504
1505 bld.setPosition(i, false);
1506 i->op = OP_MUL;
1507 i->subOp = 0;
1508 i->dType = si->dType;
1509 i->sType = si->sType;
1510 i->setSrc(0, si->getSrc(!muls));
1511 i->setSrc(1, bld.loadImm(NULL, imm1.reg.data.u32 << imm0.reg.data.u32));
1512 break;
1513 case OP_SUB:
1514 case OP_ADD:
1515 int adds;
1516 if (isFloatType(si->dType))
1517 return false;
1518 if (si->op != OP_SUB && si->src(0).getImmediate(imm1))
1519 adds = 0;
1520 else if (si->src(1).getImmediate(imm1))
1521 adds = 1;
1522 else
1523 return false;
1524 if (si->src(!adds).mod != Modifier(0))
1525 return false;
1526 // SHL(ADD(x, y), z) = ADD(SHL(x, z), SHL(y, z))
1527
1528 // This is more operations, but if one of x, y is an immediate, then
1529 // we can get a situation where (a) we can use ISCADD, or (b)
1530 // propagate the add bit into an indirect load.
1531 bld.setPosition(i, false);
1532 i->op = si->op;
1533 i->setSrc(adds, bld.loadImm(NULL, imm1.reg.data.u32 << imm0.reg.data.u32));
1534 i->setSrc(!adds, bld.mkOp2v(OP_SHL, i->dType,
1535 bld.getSSA(i->def(0).getSize(), i->def(0).getFile()),
1536 si->getSrc(!adds),
1537 bld.mkImm(imm0.reg.data.u32)));
1538 break;
1539 default:
1540 return false;
1541 }
1542 }
1543 break;
1544
1545 case OP_ABS:
1546 case OP_NEG:
1547 case OP_SAT:
1548 case OP_LG2:
1549 case OP_RCP:
1550 case OP_SQRT:
1551 case OP_RSQ:
1552 case OP_PRESIN:
1553 case OP_SIN:
1554 case OP_COS:
1555 case OP_PREEX2:
1556 case OP_EX2:
1557 unary(i, imm0);
1558 break;
1559 case OP_BFIND: {
1560 int32_t res;
1561 switch (i->dType) {
1562 case TYPE_S32: res = util_last_bit_signed(imm0.reg.data.s32) - 1; break;
1563 case TYPE_U32: res = util_last_bit(imm0.reg.data.u32) - 1; break;
1564 default:
1565 return false;
1566 }
1567 if (i->subOp == NV50_IR_SUBOP_BFIND_SAMT && res >= 0)
1568 res = 31 - res;
1569 bld.setPosition(i, false); /* make sure bld is init'ed */
1570 i->setSrc(0, bld.mkImm(res));
1571 i->setSrc(1, NULL);
1572 i->op = OP_MOV;
1573 i->subOp = 0;
1574 break;
1575 }
1576 case OP_BREV: {
1577 uint32_t res = util_bitreverse(imm0.reg.data.u32);
1578 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res));
1579 i->op = OP_MOV;
1580 break;
1581 }
1582 case OP_POPCNT: {
1583 // Only deal with 1-arg POPCNT here
1584 if (i->srcExists(1))
1585 break;
1586 uint32_t res = util_bitcount(imm0.reg.data.u32);
1587 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res));
1588 i->setSrc(1, NULL);
1589 i->op = OP_MOV;
1590 break;
1591 }
1592 case OP_CVT: {
1593 Storage res;
1594
1595 // TODO: handle 64-bit values properly
1596 if (typeSizeof(i->dType) == 8 || typeSizeof(i->sType) == 8)
1597 return false;
1598
1599 // TODO: handle single byte/word extractions
1600 if (i->subOp)
1601 return false;
1602
1603 bld.setPosition(i, true); /* make sure bld is init'ed */
1604
1605 #define CASE(type, dst, fmin, fmax, imin, imax, umin, umax) \
1606 case type: \
1607 switch (i->sType) { \
1608 case TYPE_F64: \
1609 res.data.dst = util_iround(i->saturate ? \
1610 CLAMP(imm0.reg.data.f64, fmin, fmax) : \
1611 imm0.reg.data.f64); \
1612 break; \
1613 case TYPE_F32: \
1614 res.data.dst = util_iround(i->saturate ? \
1615 CLAMP(imm0.reg.data.f32, fmin, fmax) : \
1616 imm0.reg.data.f32); \
1617 break; \
1618 case TYPE_S32: \
1619 res.data.dst = i->saturate ? \
1620 CLAMP(imm0.reg.data.s32, imin, imax) : \
1621 imm0.reg.data.s32; \
1622 break; \
1623 case TYPE_U32: \
1624 res.data.dst = i->saturate ? \
1625 CLAMP(imm0.reg.data.u32, umin, umax) : \
1626 imm0.reg.data.u32; \
1627 break; \
1628 case TYPE_S16: \
1629 res.data.dst = i->saturate ? \
1630 CLAMP(imm0.reg.data.s16, imin, imax) : \
1631 imm0.reg.data.s16; \
1632 break; \
1633 case TYPE_U16: \
1634 res.data.dst = i->saturate ? \
1635 CLAMP(imm0.reg.data.u16, umin, umax) : \
1636 imm0.reg.data.u16; \
1637 break; \
1638 default: return false; \
1639 } \
1640 i->setSrc(0, bld.mkImm(res.data.dst)); \
1641 break
1642
1643 switch(i->dType) {
1644 CASE(TYPE_U16, u16, 0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX);
1645 CASE(TYPE_S16, s16, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, 0, INT16_MAX);
1646 CASE(TYPE_U32, u32, 0, UINT32_MAX, 0, INT32_MAX, 0, UINT32_MAX);
1647 CASE(TYPE_S32, s32, INT32_MIN, INT32_MAX, INT32_MIN, INT32_MAX, 0, INT32_MAX);
1648 case TYPE_F32:
1649 switch (i->sType) {
1650 case TYPE_F64:
1651 res.data.f32 = i->saturate ?
1652 SATURATE(imm0.reg.data.f64) :
1653 imm0.reg.data.f64;
1654 break;
1655 case TYPE_F32:
1656 res.data.f32 = i->saturate ?
1657 SATURATE(imm0.reg.data.f32) :
1658 imm0.reg.data.f32;
1659 break;
1660 case TYPE_U16: res.data.f32 = (float) imm0.reg.data.u16; break;
1661 case TYPE_U32: res.data.f32 = (float) imm0.reg.data.u32; break;
1662 case TYPE_S16: res.data.f32 = (float) imm0.reg.data.s16; break;
1663 case TYPE_S32: res.data.f32 = (float) imm0.reg.data.s32; break;
1664 default:
1665 return false;
1666 }
1667 i->setSrc(0, bld.mkImm(res.data.f32));
1668 break;
1669 case TYPE_F64:
1670 switch (i->sType) {
1671 case TYPE_F64:
1672 res.data.f64 = i->saturate ?
1673 SATURATE(imm0.reg.data.f64) :
1674 imm0.reg.data.f64;
1675 break;
1676 case TYPE_F32:
1677 res.data.f64 = i->saturate ?
1678 SATURATE(imm0.reg.data.f32) :
1679 imm0.reg.data.f32;
1680 break;
1681 case TYPE_U16: res.data.f64 = (double) imm0.reg.data.u16; break;
1682 case TYPE_U32: res.data.f64 = (double) imm0.reg.data.u32; break;
1683 case TYPE_S16: res.data.f64 = (double) imm0.reg.data.s16; break;
1684 case TYPE_S32: res.data.f64 = (double) imm0.reg.data.s32; break;
1685 default:
1686 return false;
1687 }
1688 i->setSrc(0, bld.mkImm(res.data.f64));
1689 break;
1690 default:
1691 return false;
1692 }
1693 #undef CASE
1694
1695 i->setType(i->dType); /* Remove i->sType, which we don't need anymore */
1696 i->op = OP_MOV;
1697 i->saturate = 0;
1698 i->src(0).mod = Modifier(0); /* Clear the already applied modifier */
1699 break;
1700 }
1701 default:
1702 return false;
1703 }
1704
1705 // This can get left behind some of the optimizations which simplify
1706 // saturatable values.
1707 if (newi->op == OP_MOV && newi->saturate) {
1708 ImmediateValue tmp;
1709 newi->saturate = 0;
1710 newi->op = OP_SAT;
1711 if (newi->src(0).getImmediate(tmp))
1712 unary(newi, tmp);
1713 }
1714
1715 if (newi->op != op)
1716 foldCount++;
1717 return deleted;
1718 }
1719
1720 // =============================================================================
1721
1722 // Merge modifier operations (ABS, NEG, NOT) into ValueRefs where allowed.
1723 class ModifierFolding : public Pass
1724 {
1725 private:
1726 virtual bool visit(BasicBlock *);
1727 };
1728
1729 bool
visit(BasicBlock * bb)1730 ModifierFolding::visit(BasicBlock *bb)
1731 {
1732 const Target *target = prog->getTarget();
1733
1734 Instruction *i, *next, *mi;
1735 Modifier mod;
1736
1737 for (i = bb->getEntry(); i; i = next) {
1738 next = i->next;
1739
1740 if (false && i->op == OP_SUB) {
1741 // turn "sub" into "add neg" (do we really want this ?)
1742 i->op = OP_ADD;
1743 i->src(0).mod = i->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
1744 }
1745
1746 for (int s = 0; s < 3 && i->srcExists(s); ++s) {
1747 mi = i->getSrc(s)->getInsn();
1748 if (!mi ||
1749 mi->predSrc >= 0 || mi->getDef(0)->refCount() > 8)
1750 continue;
1751 if (i->sType == TYPE_U32 && mi->dType == TYPE_S32) {
1752 if ((i->op != OP_ADD &&
1753 i->op != OP_MUL) ||
1754 (mi->op != OP_ABS &&
1755 mi->op != OP_NEG))
1756 continue;
1757 } else
1758 if (i->sType != mi->dType) {
1759 continue;
1760 }
1761 if ((mod = Modifier(mi->op)) == Modifier(0))
1762 continue;
1763 mod *= mi->src(0).mod;
1764
1765 if ((i->op == OP_ABS) || i->src(s).mod.abs()) {
1766 // abs neg [abs] = abs
1767 mod = mod & Modifier(~(NV50_IR_MOD_NEG | NV50_IR_MOD_ABS));
1768 } else
1769 if ((i->op == OP_NEG) && mod.neg()) {
1770 assert(s == 0);
1771 // neg as both opcode and modifier on same insn is prohibited
1772 // neg neg abs = abs, neg neg = identity
1773 mod = mod & Modifier(~NV50_IR_MOD_NEG);
1774 i->op = mod.getOp();
1775 mod = mod & Modifier(~NV50_IR_MOD_ABS);
1776 if (mod == Modifier(0))
1777 i->op = OP_MOV;
1778 }
1779
1780 if (target->isModSupported(i, s, mod)) {
1781 i->setSrc(s, mi->getSrc(0));
1782 i->src(s).mod *= mod;
1783 }
1784 }
1785
1786 if (i->op == OP_SAT) {
1787 mi = i->getSrc(0)->getInsn();
1788 if (mi &&
1789 mi->getDef(0)->refCount() <= 1 && target->isSatSupported(mi)) {
1790 mi->saturate = 1;
1791 mi->setDef(0, i->getDef(0));
1792 delete_Instruction(prog, i);
1793 }
1794 }
1795 }
1796
1797 return true;
1798 }
1799
1800 // =============================================================================
1801
1802 // MUL + ADD -> MAD/FMA
1803 // MIN/MAX(a, a) -> a, etc.
1804 // SLCT(a, b, const) -> cc(const) ? a : b
1805 // RCP(RCP(a)) -> a
1806 // MUL(MUL(a, b), const) -> MUL_Xconst(a, b)
1807 // EXTBF(RDSV(COMBINED_TID)) -> RDSV(TID)
1808 class AlgebraicOpt : public Pass
1809 {
1810 private:
1811 virtual bool visit(BasicBlock *);
1812
1813 void handleABS(Instruction *);
1814 bool handleADD(Instruction *);
1815 bool tryADDToMADOrSAD(Instruction *, operation toOp);
1816 void handleMINMAX(Instruction *);
1817 void handleRCP(Instruction *);
1818 void handleSLCT(Instruction *);
1819 void handleLOGOP(Instruction *);
1820 void handleCVT_NEG(Instruction *);
1821 void handleCVT_CVT(Instruction *);
1822 void handleCVT_EXTBF(Instruction *);
1823 void handleSUCLAMP(Instruction *);
1824 void handleNEG(Instruction *);
1825 void handleEXTBF_RDSV(Instruction *);
1826
1827 BuildUtil bld;
1828 };
1829
1830 void
handleABS(Instruction * abs)1831 AlgebraicOpt::handleABS(Instruction *abs)
1832 {
1833 Instruction *sub = abs->getSrc(0)->getInsn();
1834 DataType ty;
1835 if (!sub ||
1836 !prog->getTarget()->isOpSupported(OP_SAD, abs->dType))
1837 return;
1838 // expect not to have mods yet, if we do, bail
1839 if (sub->src(0).mod || sub->src(1).mod)
1840 return;
1841 // hidden conversion ?
1842 ty = intTypeToSigned(sub->dType);
1843 if (abs->dType != abs->sType || ty != abs->sType)
1844 return;
1845
1846 if ((sub->op != OP_ADD && sub->op != OP_SUB) ||
1847 sub->src(0).getFile() != FILE_GPR || sub->src(0).mod ||
1848 sub->src(1).getFile() != FILE_GPR || sub->src(1).mod)
1849 return;
1850
1851 Value *src0 = sub->getSrc(0);
1852 Value *src1 = sub->getSrc(1);
1853
1854 if (sub->op == OP_ADD) {
1855 Instruction *neg = sub->getSrc(1)->getInsn();
1856 if (neg && neg->op != OP_NEG) {
1857 neg = sub->getSrc(0)->getInsn();
1858 src0 = sub->getSrc(1);
1859 }
1860 if (!neg || neg->op != OP_NEG ||
1861 neg->dType != neg->sType || neg->sType != ty)
1862 return;
1863 src1 = neg->getSrc(0);
1864 }
1865
1866 // found ABS(SUB))
1867 abs->moveSources(1, 2); // move sources >=1 up by 2
1868 abs->op = OP_SAD;
1869 abs->setType(sub->dType);
1870 abs->setSrc(0, src0);
1871 abs->setSrc(1, src1);
1872 bld.setPosition(abs, false);
1873 abs->setSrc(2, bld.loadImm(bld.getSSA(typeSizeof(ty)), 0));
1874 }
1875
1876 bool
handleADD(Instruction * add)1877 AlgebraicOpt::handleADD(Instruction *add)
1878 {
1879 Value *src0 = add->getSrc(0);
1880 Value *src1 = add->getSrc(1);
1881
1882 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1883 return false;
1884
1885 bool changed = false;
1886 // we can't optimize to MAD if the add is precise
1887 if (!add->precise && prog->getTarget()->isOpSupported(OP_MAD, add->dType))
1888 changed = tryADDToMADOrSAD(add, OP_MAD);
1889 if (!changed && prog->getTarget()->isOpSupported(OP_SAD, add->dType))
1890 changed = tryADDToMADOrSAD(add, OP_SAD);
1891 return changed;
1892 }
1893
1894 // ADD(SAD(a,b,0), c) -> SAD(a,b,c)
1895 // ADD(MUL(a,b), c) -> MAD(a,b,c)
1896 bool
tryADDToMADOrSAD(Instruction * add,operation toOp)1897 AlgebraicOpt::tryADDToMADOrSAD(Instruction *add, operation toOp)
1898 {
1899 Value *src0 = add->getSrc(0);
1900 Value *src1 = add->getSrc(1);
1901 Value *src;
1902 int s;
1903 const operation srcOp = toOp == OP_SAD ? OP_SAD : OP_MUL;
1904 const Modifier modBad = Modifier(~((toOp == OP_MAD) ? NV50_IR_MOD_NEG : 0));
1905 Modifier mod[4];
1906
1907 if (src0->refCount() == 1 &&
1908 src0->getUniqueInsn() && src0->getUniqueInsn()->op == srcOp)
1909 s = 0;
1910 else
1911 if (src1->refCount() == 1 &&
1912 src1->getUniqueInsn() && src1->getUniqueInsn()->op == srcOp)
1913 s = 1;
1914 else
1915 return false;
1916
1917 src = add->getSrc(s);
1918
1919 if (src->getUniqueInsn() && src->getUniqueInsn()->bb != add->bb)
1920 return false;
1921
1922 if (src->getInsn()->saturate || src->getInsn()->postFactor ||
1923 src->getInsn()->dnz || src->getInsn()->precise)
1924 return false;
1925
1926 if (toOp == OP_SAD) {
1927 ImmediateValue imm;
1928 if (!src->getInsn()->src(2).getImmediate(imm))
1929 return false;
1930 if (!imm.isInteger(0))
1931 return false;
1932 }
1933
1934 if (typeSizeof(add->dType) != typeSizeof(src->getInsn()->dType) ||
1935 isFloatType(add->dType) != isFloatType(src->getInsn()->dType))
1936 return false;
1937
1938 mod[0] = add->src(0).mod;
1939 mod[1] = add->src(1).mod;
1940 mod[2] = src->getUniqueInsn()->src(0).mod;
1941 mod[3] = src->getUniqueInsn()->src(1).mod;
1942
1943 if (((mod[0] | mod[1]) | (mod[2] | mod[3])) & modBad)
1944 return false;
1945
1946 add->op = toOp;
1947 add->subOp = src->getInsn()->subOp; // potentially mul-high
1948 add->dnz = src->getInsn()->dnz;
1949 add->dType = src->getInsn()->dType; // sign matters for imad hi
1950 add->sType = src->getInsn()->sType;
1951
1952 add->setSrc(2, add->src(s ? 0 : 1));
1953
1954 add->setSrc(0, src->getInsn()->getSrc(0));
1955 add->src(0).mod = mod[2] ^ mod[s];
1956 add->setSrc(1, src->getInsn()->getSrc(1));
1957 add->src(1).mod = mod[3];
1958
1959 return true;
1960 }
1961
1962 void
handleMINMAX(Instruction * minmax)1963 AlgebraicOpt::handleMINMAX(Instruction *minmax)
1964 {
1965 Value *src0 = minmax->getSrc(0);
1966 Value *src1 = minmax->getSrc(1);
1967
1968 if (src0 != src1 || src0->reg.file != FILE_GPR)
1969 return;
1970 if (minmax->src(0).mod == minmax->src(1).mod) {
1971 if (minmax->def(0).mayReplace(minmax->src(0))) {
1972 minmax->def(0).replace(minmax->src(0), false);
1973 delete_Instruction(prog, minmax);
1974 } else {
1975 minmax->op = OP_CVT;
1976 minmax->setSrc(1, NULL);
1977 }
1978 } else {
1979 // TODO:
1980 // min(x, -x) = -abs(x)
1981 // min(x, -abs(x)) = -abs(x)
1982 // min(x, abs(x)) = x
1983 // max(x, -abs(x)) = x
1984 // max(x, abs(x)) = abs(x)
1985 // max(x, -x) = abs(x)
1986 }
1987 }
1988
1989 // rcp(rcp(a)) = a
1990 // rcp(sqrt(a)) = rsq(a)
1991 void
handleRCP(Instruction * rcp)1992 AlgebraicOpt::handleRCP(Instruction *rcp)
1993 {
1994 Instruction *si = rcp->getSrc(0)->getUniqueInsn();
1995
1996 if (!si)
1997 return;
1998
1999 if (si->op == OP_RCP) {
2000 Modifier mod = rcp->src(0).mod * si->src(0).mod;
2001 rcp->op = mod.getOp();
2002 rcp->setSrc(0, si->getSrc(0));
2003 } else if (si->op == OP_SQRT) {
2004 rcp->op = OP_RSQ;
2005 rcp->setSrc(0, si->getSrc(0));
2006 rcp->src(0).mod = rcp->src(0).mod * si->src(0).mod;
2007 }
2008 }
2009
2010 void
handleSLCT(Instruction * slct)2011 AlgebraicOpt::handleSLCT(Instruction *slct)
2012 {
2013 if (slct->getSrc(2)->reg.file == FILE_IMMEDIATE) {
2014 if (slct->getSrc(2)->asImm()->compare(slct->asCmp()->setCond, 0.0f))
2015 slct->setSrc(0, slct->getSrc(1));
2016 } else
2017 if (slct->getSrc(0) != slct->getSrc(1)) {
2018 return;
2019 }
2020 slct->op = OP_MOV;
2021 slct->setSrc(1, NULL);
2022 slct->setSrc(2, NULL);
2023 }
2024
2025 void
handleLOGOP(Instruction * logop)2026 AlgebraicOpt::handleLOGOP(Instruction *logop)
2027 {
2028 Value *src0 = logop->getSrc(0);
2029 Value *src1 = logop->getSrc(1);
2030
2031 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
2032 return;
2033
2034 if (src0 == src1) {
2035 if ((logop->op == OP_AND || logop->op == OP_OR) &&
2036 logop->def(0).mayReplace(logop->src(0))) {
2037 logop->def(0).replace(logop->src(0), false);
2038 delete_Instruction(prog, logop);
2039 }
2040 } else {
2041 // try AND(SET, SET) -> SET_AND(SET)
2042 Instruction *set0 = src0->getInsn();
2043 Instruction *set1 = src1->getInsn();
2044
2045 if (!set0 || set0->fixed || !set1 || set1->fixed)
2046 return;
2047 if (set1->op != OP_SET) {
2048 Instruction *xchg = set0;
2049 set0 = set1;
2050 set1 = xchg;
2051 if (set1->op != OP_SET)
2052 return;
2053 }
2054 operation redOp = (logop->op == OP_AND ? OP_SET_AND :
2055 logop->op == OP_XOR ? OP_SET_XOR : OP_SET_OR);
2056 if (!prog->getTarget()->isOpSupported(redOp, set1->sType))
2057 return;
2058 if (set0->op != OP_SET &&
2059 set0->op != OP_SET_AND &&
2060 set0->op != OP_SET_OR &&
2061 set0->op != OP_SET_XOR)
2062 return;
2063 if (set0->getDef(0)->refCount() > 1 &&
2064 set1->getDef(0)->refCount() > 1)
2065 return;
2066 if (set0->getPredicate() || set1->getPredicate())
2067 return;
2068 // check that they don't source each other
2069 for (int s = 0; s < 2; ++s)
2070 if (set0->getSrc(s) == set1->getDef(0) ||
2071 set1->getSrc(s) == set0->getDef(0))
2072 return;
2073
2074 set0 = cloneForward(func, set0);
2075 set1 = cloneShallow(func, set1);
2076 logop->bb->insertAfter(logop, set1);
2077 logop->bb->insertAfter(logop, set0);
2078
2079 set0->dType = TYPE_U8;
2080 set0->getDef(0)->reg.file = FILE_PREDICATE;
2081 set0->getDef(0)->reg.size = 1;
2082 set1->setSrc(2, set0->getDef(0));
2083 set1->op = redOp;
2084 set1->setDef(0, logop->getDef(0));
2085 delete_Instruction(prog, logop);
2086 }
2087 }
2088
2089 // F2I(NEG(SET with result 1.0f/0.0f)) -> SET with result -1/0
2090 // nv50:
2091 // F2I(NEG(I2F(ABS(SET))))
2092 void
handleCVT_NEG(Instruction * cvt)2093 AlgebraicOpt::handleCVT_NEG(Instruction *cvt)
2094 {
2095 Instruction *insn = cvt->getSrc(0)->getInsn();
2096 if (cvt->sType != TYPE_F32 ||
2097 cvt->dType != TYPE_S32 || cvt->src(0).mod != Modifier(0))
2098 return;
2099 if (!insn || insn->op != OP_NEG || insn->dType != TYPE_F32)
2100 return;
2101 if (insn->src(0).mod != Modifier(0))
2102 return;
2103 insn = insn->getSrc(0)->getInsn();
2104
2105 // check for nv50 SET(-1,0) -> SET(1.0f/0.0f) chain and nvc0's f32 SET
2106 if (insn && insn->op == OP_CVT &&
2107 insn->dType == TYPE_F32 &&
2108 insn->sType == TYPE_S32) {
2109 insn = insn->getSrc(0)->getInsn();
2110 if (!insn || insn->op != OP_ABS || insn->sType != TYPE_S32 ||
2111 insn->src(0).mod)
2112 return;
2113 insn = insn->getSrc(0)->getInsn();
2114 if (!insn || insn->op != OP_SET || insn->dType != TYPE_U32)
2115 return;
2116 } else
2117 if (!insn || insn->op != OP_SET || insn->dType != TYPE_F32) {
2118 return;
2119 }
2120
2121 Instruction *bset = cloneShallow(func, insn);
2122 bset->dType = TYPE_U32;
2123 bset->setDef(0, cvt->getDef(0));
2124 cvt->bb->insertAfter(cvt, bset);
2125 delete_Instruction(prog, cvt);
2126 }
2127
2128 // F2I(TRUNC()) and so on can be expressed as a single CVT. If the earlier CVT
2129 // does a type conversion, this becomes trickier as there might be range
2130 // changes/etc. We could handle those in theory as long as the range was being
2131 // reduced or kept the same.
2132 void
handleCVT_CVT(Instruction * cvt)2133 AlgebraicOpt::handleCVT_CVT(Instruction *cvt)
2134 {
2135 Instruction *insn = cvt->getSrc(0)->getInsn();
2136
2137 if (!insn ||
2138 insn->saturate ||
2139 insn->subOp ||
2140 insn->dType != insn->sType ||
2141 insn->dType != cvt->sType)
2142 return;
2143
2144 RoundMode rnd = insn->rnd;
2145 switch (insn->op) {
2146 case OP_CEIL:
2147 rnd = ROUND_PI;
2148 break;
2149 case OP_FLOOR:
2150 rnd = ROUND_MI;
2151 break;
2152 case OP_TRUNC:
2153 rnd = ROUND_ZI;
2154 break;
2155 case OP_CVT:
2156 break;
2157 default:
2158 return;
2159 }
2160
2161 if (!isFloatType(cvt->dType) || !isFloatType(insn->sType))
2162 rnd = (RoundMode)(rnd & 3);
2163
2164 cvt->rnd = rnd;
2165 cvt->setSrc(0, insn->getSrc(0));
2166 cvt->src(0).mod *= insn->src(0).mod;
2167 cvt->sType = insn->sType;
2168 }
2169
2170 // Some shaders extract packed bytes out of words and convert them to
2171 // e.g. float. The Fermi+ CVT instruction can extract those directly, as can
2172 // nv50 for word sizes.
2173 //
2174 // CVT(EXTBF(x, byte/word))
2175 // CVT(AND(bytemask, x))
2176 // CVT(AND(bytemask, SHR(x, 8/16/24)))
2177 // CVT(SHR(x, 16/24))
2178 void
handleCVT_EXTBF(Instruction * cvt)2179 AlgebraicOpt::handleCVT_EXTBF(Instruction *cvt)
2180 {
2181 Instruction *insn = cvt->getSrc(0)->getInsn();
2182 ImmediateValue imm;
2183 Value *arg = NULL;
2184 unsigned width, offset = 0;
2185 if ((cvt->sType != TYPE_U32 && cvt->sType != TYPE_S32) || !insn)
2186 return;
2187 if (insn->op == OP_EXTBF && insn->src(1).getImmediate(imm)) {
2188 width = (imm.reg.data.u32 >> 8) & 0xff;
2189 offset = imm.reg.data.u32 & 0xff;
2190 arg = insn->getSrc(0);
2191
2192 if (width != 8 && width != 16)
2193 return;
2194 if (width == 8 && offset & 0x7)
2195 return;
2196 if (width == 16 && offset & 0xf)
2197 return;
2198 } else if (insn->op == OP_AND) {
2199 int s;
2200 if (insn->src(0).getImmediate(imm))
2201 s = 0;
2202 else if (insn->src(1).getImmediate(imm))
2203 s = 1;
2204 else
2205 return;
2206
2207 if (imm.reg.data.u32 == 0xff)
2208 width = 8;
2209 else if (imm.reg.data.u32 == 0xffff)
2210 width = 16;
2211 else
2212 return;
2213
2214 arg = insn->getSrc(!s);
2215 Instruction *shift = arg->getInsn();
2216
2217 if (shift && shift->op == OP_SHR &&
2218 shift->sType == cvt->sType &&
2219 shift->src(1).getImmediate(imm) &&
2220 ((width == 8 && (imm.reg.data.u32 & 0x7) == 0) ||
2221 (width == 16 && (imm.reg.data.u32 & 0xf) == 0))) {
2222 arg = shift->getSrc(0);
2223 offset = imm.reg.data.u32;
2224 }
2225 // We just AND'd the high bits away, which means this is effectively an
2226 // unsigned value.
2227 cvt->sType = TYPE_U32;
2228 } else if (insn->op == OP_SHR &&
2229 insn->sType == cvt->sType &&
2230 insn->src(1).getImmediate(imm)) {
2231 arg = insn->getSrc(0);
2232 if (imm.reg.data.u32 == 24) {
2233 width = 8;
2234 offset = 24;
2235 } else if (imm.reg.data.u32 == 16) {
2236 width = 16;
2237 offset = 16;
2238 } else {
2239 return;
2240 }
2241 }
2242
2243 if (!arg)
2244 return;
2245
2246 // Irrespective of what came earlier, we can undo a shift on the argument
2247 // by adjusting the offset.
2248 Instruction *shift = arg->getInsn();
2249 if (shift && shift->op == OP_SHL &&
2250 shift->src(1).getImmediate(imm) &&
2251 ((width == 8 && (imm.reg.data.u32 & 0x7) == 0) ||
2252 (width == 16 && (imm.reg.data.u32 & 0xf) == 0)) &&
2253 imm.reg.data.u32 <= offset) {
2254 arg = shift->getSrc(0);
2255 offset -= imm.reg.data.u32;
2256 }
2257
2258 // The unpackSnorm lowering still leaves a few shifts behind, but it's too
2259 // annoying to detect them.
2260
2261 if (width == 8) {
2262 cvt->sType = cvt->sType == TYPE_U32 ? TYPE_U8 : TYPE_S8;
2263 } else {
2264 assert(width == 16);
2265 cvt->sType = cvt->sType == TYPE_U32 ? TYPE_U16 : TYPE_S16;
2266 }
2267 cvt->setSrc(0, arg);
2268 cvt->subOp = offset >> 3;
2269 }
2270
2271 // SUCLAMP dst, (ADD b imm), k, 0 -> SUCLAMP dst, b, k, imm (if imm fits s6)
2272 void
handleSUCLAMP(Instruction * insn)2273 AlgebraicOpt::handleSUCLAMP(Instruction *insn)
2274 {
2275 ImmediateValue imm;
2276 int32_t val = insn->getSrc(2)->asImm()->reg.data.s32;
2277 int s;
2278 Instruction *add;
2279
2280 assert(insn->srcExists(0) && insn->src(0).getFile() == FILE_GPR);
2281
2282 // look for ADD (TODO: only count references by non-SUCLAMP)
2283 if (insn->getSrc(0)->refCount() > 1)
2284 return;
2285 add = insn->getSrc(0)->getInsn();
2286 if (!add || add->op != OP_ADD ||
2287 (add->dType != TYPE_U32 &&
2288 add->dType != TYPE_S32))
2289 return;
2290
2291 // look for immediate
2292 for (s = 0; s < 2; ++s)
2293 if (add->src(s).getImmediate(imm))
2294 break;
2295 if (s >= 2)
2296 return;
2297 s = s ? 0 : 1;
2298 // determine if immediate fits
2299 val += imm.reg.data.s32;
2300 if (val > 31 || val < -32)
2301 return;
2302 // determine if other addend fits
2303 if (add->src(s).getFile() != FILE_GPR || add->src(s).mod != Modifier(0))
2304 return;
2305
2306 bld.setPosition(insn, false); // make sure bld is init'ed
2307 // replace sources
2308 insn->setSrc(2, bld.mkImm(val));
2309 insn->setSrc(0, add->getSrc(s));
2310 }
2311
2312 // NEG(AND(SET, 1)) -> SET
2313 void
handleNEG(Instruction * i)2314 AlgebraicOpt::handleNEG(Instruction *i) {
2315 Instruction *src = i->getSrc(0)->getInsn();
2316 ImmediateValue imm;
2317 int b;
2318
2319 if (isFloatType(i->sType) || !src || src->op != OP_AND)
2320 return;
2321
2322 if (src->src(0).getImmediate(imm))
2323 b = 1;
2324 else if (src->src(1).getImmediate(imm))
2325 b = 0;
2326 else
2327 return;
2328
2329 if (!imm.isInteger(1))
2330 return;
2331
2332 Instruction *set = src->getSrc(b)->getInsn();
2333 if ((set->op == OP_SET || set->op == OP_SET_AND ||
2334 set->op == OP_SET_OR || set->op == OP_SET_XOR) &&
2335 !isFloatType(set->dType)) {
2336 i->def(0).replace(set->getDef(0), false);
2337 }
2338 }
2339
2340 // EXTBF(RDSV(COMBINED_TID)) -> RDSV(TID)
2341 void
handleEXTBF_RDSV(Instruction * i)2342 AlgebraicOpt::handleEXTBF_RDSV(Instruction *i)
2343 {
2344 Instruction *rdsv = i->getSrc(0)->getUniqueInsn();
2345 if (rdsv->op != OP_RDSV ||
2346 rdsv->getSrc(0)->asSym()->reg.data.sv.sv != SV_COMBINED_TID)
2347 return;
2348 // Avoid creating more RDSV instructions
2349 if (rdsv->getDef(0)->refCount() > 1)
2350 return;
2351
2352 ImmediateValue imm;
2353 if (!i->src(1).getImmediate(imm))
2354 return;
2355
2356 int index;
2357 if (imm.isInteger(0x1000))
2358 index = 0;
2359 else
2360 if (imm.isInteger(0x0a10))
2361 index = 1;
2362 else
2363 if (imm.isInteger(0x061a))
2364 index = 2;
2365 else
2366 return;
2367
2368 bld.setPosition(i, false);
2369
2370 i->op = OP_RDSV;
2371 i->setSrc(0, bld.mkSysVal(SV_TID, index));
2372 i->setSrc(1, NULL);
2373 }
2374
2375 bool
visit(BasicBlock * bb)2376 AlgebraicOpt::visit(BasicBlock *bb)
2377 {
2378 Instruction *next;
2379 for (Instruction *i = bb->getEntry(); i; i = next) {
2380 next = i->next;
2381 switch (i->op) {
2382 case OP_ABS:
2383 handleABS(i);
2384 break;
2385 case OP_ADD:
2386 handleADD(i);
2387 break;
2388 case OP_RCP:
2389 handleRCP(i);
2390 break;
2391 case OP_MIN:
2392 case OP_MAX:
2393 handleMINMAX(i);
2394 break;
2395 case OP_SLCT:
2396 handleSLCT(i);
2397 break;
2398 case OP_AND:
2399 case OP_OR:
2400 case OP_XOR:
2401 handleLOGOP(i);
2402 break;
2403 case OP_CVT:
2404 handleCVT_NEG(i);
2405 handleCVT_CVT(i);
2406 if (prog->getTarget()->isOpSupported(OP_EXTBF, TYPE_U32))
2407 handleCVT_EXTBF(i);
2408 break;
2409 case OP_SUCLAMP:
2410 handleSUCLAMP(i);
2411 break;
2412 case OP_NEG:
2413 handleNEG(i);
2414 break;
2415 case OP_EXTBF:
2416 handleEXTBF_RDSV(i);
2417 break;
2418 default:
2419 break;
2420 }
2421 }
2422
2423 return true;
2424 }
2425
2426 // =============================================================================
2427
2428 // ADD(SHL(a, b), c) -> SHLADD(a, b, c)
2429 // MUL(a, b) -> a few XMADs
2430 // MAD/FMA(a, b, c) -> a few XMADs
2431 class LateAlgebraicOpt : public Pass
2432 {
2433 private:
2434 virtual bool visit(Instruction *);
2435
2436 void handleADD(Instruction *);
2437 void handleMULMAD(Instruction *);
2438 bool tryADDToSHLADD(Instruction *);
2439
2440 BuildUtil bld;
2441 };
2442
2443 void
handleADD(Instruction * add)2444 LateAlgebraicOpt::handleADD(Instruction *add)
2445 {
2446 Value *src0 = add->getSrc(0);
2447 Value *src1 = add->getSrc(1);
2448
2449 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
2450 return;
2451
2452 if (prog->getTarget()->isOpSupported(OP_SHLADD, add->dType))
2453 tryADDToSHLADD(add);
2454 }
2455
2456 // ADD(SHL(a, b), c) -> SHLADD(a, b, c)
2457 bool
tryADDToSHLADD(Instruction * add)2458 LateAlgebraicOpt::tryADDToSHLADD(Instruction *add)
2459 {
2460 Value *src0 = add->getSrc(0);
2461 Value *src1 = add->getSrc(1);
2462 ImmediateValue imm;
2463 Instruction *shl;
2464 Value *src;
2465 int s;
2466
2467 if (add->saturate || add->usesFlags() || typeSizeof(add->dType) == 8
2468 || isFloatType(add->dType))
2469 return false;
2470
2471 if (src0->getUniqueInsn() && src0->getUniqueInsn()->op == OP_SHL)
2472 s = 0;
2473 else
2474 if (src1->getUniqueInsn() && src1->getUniqueInsn()->op == OP_SHL)
2475 s = 1;
2476 else
2477 return false;
2478
2479 src = add->getSrc(s);
2480 shl = src->getUniqueInsn();
2481
2482 if (shl->bb != add->bb || shl->usesFlags() || shl->subOp || shl->src(0).mod)
2483 return false;
2484
2485 if (!shl->src(1).getImmediate(imm))
2486 return false;
2487
2488 add->op = OP_SHLADD;
2489 add->setSrc(2, add->src(!s));
2490 // SHL can't have any modifiers, but the ADD source may have had
2491 // one. Preserve it.
2492 add->setSrc(0, shl->getSrc(0));
2493 if (s == 1)
2494 add->src(0).mod = add->src(1).mod;
2495 add->setSrc(1, new_ImmediateValue(shl->bb->getProgram(), imm.reg.data.u32));
2496 add->src(1).mod = Modifier(0);
2497
2498 return true;
2499 }
2500
2501 // MUL(a, b) -> a few XMADs
2502 // MAD/FMA(a, b, c) -> a few XMADs
2503 void
handleMULMAD(Instruction * i)2504 LateAlgebraicOpt::handleMULMAD(Instruction *i)
2505 {
2506 // TODO: handle NV50_IR_SUBOP_MUL_HIGH
2507 if (!prog->getTarget()->isOpSupported(OP_XMAD, TYPE_U32))
2508 return;
2509 if (isFloatType(i->dType) || typeSizeof(i->dType) != 4)
2510 return;
2511 if (i->subOp || i->usesFlags() || i->flagsDef >= 0)
2512 return;
2513
2514 assert(!i->src(0).mod);
2515 assert(!i->src(1).mod);
2516 assert(i->op == OP_MUL ? 1 : !i->src(2).mod);
2517
2518 bld.setPosition(i, false);
2519
2520 Value *a = i->getSrc(0);
2521 Value *b = i->getSrc(1);
2522 Value *c = i->op == OP_MUL ? bld.mkImm(0) : i->getSrc(2);
2523
2524 Value *tmp0 = bld.getSSA();
2525 Value *tmp1 = bld.getSSA();
2526
2527 Instruction *insn = bld.mkOp3(OP_XMAD, TYPE_U32, tmp0, b, a, c);
2528 insn->setPredicate(i->cc, i->getPredicate());
2529
2530 insn = bld.mkOp3(OP_XMAD, TYPE_U32, tmp1, b, a, bld.mkImm(0));
2531 insn->setPredicate(i->cc, i->getPredicate());
2532 insn->subOp = NV50_IR_SUBOP_XMAD_MRG | NV50_IR_SUBOP_XMAD_H1(1);
2533
2534 Value *pred = i->getPredicate();
2535 i->setPredicate(i->cc, NULL);
2536
2537 i->op = OP_XMAD;
2538 i->setSrc(0, b);
2539 i->setSrc(1, tmp1);
2540 i->setSrc(2, tmp0);
2541 i->subOp = NV50_IR_SUBOP_XMAD_PSL | NV50_IR_SUBOP_XMAD_CBCC;
2542 i->subOp |= NV50_IR_SUBOP_XMAD_H1(0) | NV50_IR_SUBOP_XMAD_H1(1);
2543
2544 i->setPredicate(i->cc, pred);
2545 }
2546
2547 bool
visit(Instruction * i)2548 LateAlgebraicOpt::visit(Instruction *i)
2549 {
2550 switch (i->op) {
2551 case OP_ADD:
2552 handleADD(i);
2553 break;
2554 case OP_MUL:
2555 case OP_MAD:
2556 case OP_FMA:
2557 handleMULMAD(i);
2558 break;
2559 default:
2560 break;
2561 }
2562
2563 return true;
2564 }
2565
2566 // =============================================================================
2567
2568 // Split 64-bit MUL and MAD
2569 class Split64BitOpPreRA : public Pass
2570 {
2571 private:
2572 virtual bool visit(BasicBlock *);
2573 void split64MulMad(Function *, Instruction *, DataType);
2574
2575 BuildUtil bld;
2576 };
2577
2578 bool
visit(BasicBlock * bb)2579 Split64BitOpPreRA::visit(BasicBlock *bb)
2580 {
2581 Instruction *i, *next;
2582 Modifier mod;
2583
2584 for (i = bb->getEntry(); i; i = next) {
2585 next = i->next;
2586
2587 DataType hTy;
2588 switch (i->dType) {
2589 case TYPE_U64: hTy = TYPE_U32; break;
2590 case TYPE_S64: hTy = TYPE_S32; break;
2591 default:
2592 continue;
2593 }
2594
2595 if (i->op == OP_MAD || i->op == OP_MUL)
2596 split64MulMad(func, i, hTy);
2597 }
2598
2599 return true;
2600 }
2601
2602 void
split64MulMad(Function * fn,Instruction * i,DataType hTy)2603 Split64BitOpPreRA::split64MulMad(Function *fn, Instruction *i, DataType hTy)
2604 {
2605 assert(i->op == OP_MAD || i->op == OP_MUL);
2606 assert(!isFloatType(i->dType) && !isFloatType(i->sType));
2607 assert(typeSizeof(hTy) == 4);
2608
2609 bld.setPosition(i, true);
2610
2611 Value *zero = bld.mkImm(0u);
2612 Value *carry = bld.getSSA(1, FILE_FLAGS);
2613
2614 // We want to compute `d = a * b (+ c)?`, where a, b, c and d are 64-bit
2615 // values (a, b and c might be 32-bit values), using 32-bit operations. This
2616 // gives the following operations:
2617 // * `d.low = low(a.low * b.low) (+ c.low)?`
2618 // * `d.high = low(a.high * b.low) + low(a.low * b.high)
2619 // + high(a.low * b.low) (+ c.high)?`
2620 //
2621 // To compute the high bits, we can split in the following operations:
2622 // * `tmp1 = low(a.high * b.low) (+ c.high)?`
2623 // * `tmp2 = low(a.low * b.high) + tmp1`
2624 // * `d.high = high(a.low * b.low) + tmp2`
2625 //
2626 // mkSplit put lower bits at index 0 and higher bits at index 1
2627
2628 Value *op1[2];
2629 if (i->getSrc(0)->reg.size == 8)
2630 bld.mkSplit(op1, 4, i->getSrc(0));
2631 else {
2632 op1[0] = i->getSrc(0);
2633 op1[1] = zero;
2634 }
2635 Value *op2[2];
2636 if (i->getSrc(1)->reg.size == 8)
2637 bld.mkSplit(op2, 4, i->getSrc(1));
2638 else {
2639 op2[0] = i->getSrc(1);
2640 op2[1] = zero;
2641 }
2642
2643 Value *op3[2] = { NULL, NULL };
2644 if (i->op == OP_MAD) {
2645 if (i->getSrc(2)->reg.size == 8)
2646 bld.mkSplit(op3, 4, i->getSrc(2));
2647 else {
2648 op3[0] = i->getSrc(2);
2649 op3[1] = zero;
2650 }
2651 }
2652
2653 Value *tmpRes1Hi = bld.getSSA();
2654 if (i->op == OP_MAD)
2655 bld.mkOp3(OP_MAD, hTy, tmpRes1Hi, op1[1], op2[0], op3[1]);
2656 else
2657 bld.mkOp2(OP_MUL, hTy, tmpRes1Hi, op1[1], op2[0]);
2658
2659 Value *tmpRes2Hi = bld.mkOp3v(OP_MAD, hTy, bld.getSSA(), op1[0], op2[1], tmpRes1Hi);
2660
2661 Value *def[2] = { bld.getSSA(), bld.getSSA() };
2662
2663 // If it was a MAD, add the carry from the low bits
2664 // It is not needed if it was a MUL, since we added high(a.low * b.low) to
2665 // d.high
2666 if (i->op == OP_MAD)
2667 bld.mkOp3(OP_MAD, hTy, def[0], op1[0], op2[0], op3[0])->setFlagsDef(1, carry);
2668 else
2669 bld.mkOp2(OP_MUL, hTy, def[0], op1[0], op2[0]);
2670
2671 Instruction *hiPart3 = bld.mkOp3(OP_MAD, hTy, def[1], op1[0], op2[0], tmpRes2Hi);
2672 hiPart3->subOp = NV50_IR_SUBOP_MUL_HIGH;
2673 if (i->op == OP_MAD)
2674 hiPart3->setFlagsSrc(3, carry);
2675
2676 bld.mkOp2(OP_MERGE, i->dType, i->getDef(0), def[0], def[1]);
2677
2678 delete_Instruction(fn->getProgram(), i);
2679 }
2680
2681 // =============================================================================
2682
2683 static inline void
updateLdStOffset(Instruction * ldst,int32_t offset,Function * fn)2684 updateLdStOffset(Instruction *ldst, int32_t offset, Function *fn)
2685 {
2686 if (offset != ldst->getSrc(0)->reg.data.offset) {
2687 if (ldst->getSrc(0)->refCount() > 1)
2688 ldst->setSrc(0, cloneShallow(fn, ldst->getSrc(0)));
2689 ldst->getSrc(0)->reg.data.offset = offset;
2690 }
2691 }
2692
2693 // Combine loads and stores, forward stores to loads where possible.
2694 class MemoryOpt : public Pass
2695 {
2696 private:
2697 class Record
2698 {
2699 public:
2700 Record *next;
2701 Instruction *insn;
2702 const Value *rel[2];
2703 const Value *base;
2704 int32_t offset;
2705 int8_t fileIndex;
2706 uint8_t size;
2707 bool locked;
2708 Record *prev;
2709
2710 bool overlaps(const Instruction *ldst) const;
2711
2712 inline void link(Record **);
2713 inline void unlink(Record **);
2714 inline void set(const Instruction *ldst);
2715 };
2716
2717 public:
2718 MemoryOpt();
2719
2720 Record *loads[DATA_FILE_COUNT];
2721 Record *stores[DATA_FILE_COUNT];
2722
2723 MemoryPool recordPool;
2724
2725 private:
2726 virtual bool visit(BasicBlock *);
2727 bool runOpt(BasicBlock *);
2728
2729 Record **getList(const Instruction *);
2730
2731 Record *findRecord(const Instruction *, bool load, bool& isAdjacent) const;
2732
2733 // merge @insn into load/store instruction from @rec
2734 bool combineLd(Record *rec, Instruction *ld);
2735 bool combineSt(Record *rec, Instruction *st);
2736
2737 bool replaceLdFromLd(Instruction *ld, Record *ldRec);
2738 bool replaceLdFromSt(Instruction *ld, Record *stRec);
2739 bool replaceStFromSt(Instruction *restrict st, Record *stRec);
2740
2741 void addRecord(Instruction *ldst);
2742 void purgeRecords(Instruction *const st, DataFile);
2743 void lockStores(Instruction *const ld);
2744 void reset();
2745
2746 private:
2747 Record *prevRecord;
2748 };
2749
MemoryOpt()2750 MemoryOpt::MemoryOpt() : recordPool(sizeof(MemoryOpt::Record), 6)
2751 {
2752 for (int i = 0; i < DATA_FILE_COUNT; ++i) {
2753 loads[i] = NULL;
2754 stores[i] = NULL;
2755 }
2756 prevRecord = NULL;
2757 }
2758
2759 void
reset()2760 MemoryOpt::reset()
2761 {
2762 for (unsigned int i = 0; i < DATA_FILE_COUNT; ++i) {
2763 Record *it, *next;
2764 for (it = loads[i]; it; it = next) {
2765 next = it->next;
2766 recordPool.release(it);
2767 }
2768 loads[i] = NULL;
2769 for (it = stores[i]; it; it = next) {
2770 next = it->next;
2771 recordPool.release(it);
2772 }
2773 stores[i] = NULL;
2774 }
2775 }
2776
2777 bool
combineLd(Record * rec,Instruction * ld)2778 MemoryOpt::combineLd(Record *rec, Instruction *ld)
2779 {
2780 int32_t offRc = rec->offset;
2781 int32_t offLd = ld->getSrc(0)->reg.data.offset;
2782 int sizeRc = rec->size;
2783 int sizeLd = typeSizeof(ld->dType);
2784 int size = sizeRc + sizeLd;
2785 int d, j;
2786
2787 if (!prog->getTarget()->
2788 isAccessSupported(ld->getSrc(0)->reg.file, typeOfSize(size)))
2789 return false;
2790 // no unaligned loads
2791 if (((size == 0x8) && (MIN2(offLd, offRc) & 0x7)) ||
2792 ((size == 0xc) && (MIN2(offLd, offRc) & 0xf)))
2793 return false;
2794 // for compute indirect loads are not guaranteed to be aligned
2795 if (prog->getType() == Program::TYPE_COMPUTE && rec->rel[0])
2796 return false;
2797
2798 assert(sizeRc + sizeLd <= 16 && offRc != offLd);
2799
2800 // lock any stores that overlap with the load being merged into the
2801 // existing record.
2802 lockStores(ld);
2803
2804 for (j = 0; sizeRc; sizeRc -= rec->insn->getDef(j)->reg.size, ++j);
2805
2806 if (offLd < offRc) {
2807 int sz;
2808 for (sz = 0, d = 0; sz < sizeLd; sz += ld->getDef(d)->reg.size, ++d);
2809 // d: nr of definitions in ld
2810 // j: nr of definitions in rec->insn, move:
2811 for (d = d + j - 1; j > 0; --j, --d)
2812 rec->insn->setDef(d, rec->insn->getDef(j - 1));
2813
2814 if (rec->insn->getSrc(0)->refCount() > 1)
2815 rec->insn->setSrc(0, cloneShallow(func, rec->insn->getSrc(0)));
2816 rec->offset = rec->insn->getSrc(0)->reg.data.offset = offLd;
2817
2818 d = 0;
2819 } else {
2820 d = j;
2821 }
2822 // move definitions of @ld to @rec->insn
2823 for (j = 0; sizeLd; ++j, ++d) {
2824 sizeLd -= ld->getDef(j)->reg.size;
2825 rec->insn->setDef(d, ld->getDef(j));
2826 }
2827
2828 rec->size = size;
2829 rec->insn->getSrc(0)->reg.size = size;
2830 rec->insn->setType(typeOfSize(size));
2831
2832 delete_Instruction(prog, ld);
2833
2834 return true;
2835 }
2836
2837 bool
combineSt(Record * rec,Instruction * st)2838 MemoryOpt::combineSt(Record *rec, Instruction *st)
2839 {
2840 int32_t offRc = rec->offset;
2841 int32_t offSt = st->getSrc(0)->reg.data.offset;
2842 int sizeRc = rec->size;
2843 int sizeSt = typeSizeof(st->dType);
2844 int s = sizeSt / 4;
2845 int size = sizeRc + sizeSt;
2846 int j, k;
2847 Value *src[4]; // no modifiers in ValueRef allowed for st
2848 Value *extra[3];
2849
2850 if (!prog->getTarget()->
2851 isAccessSupported(st->getSrc(0)->reg.file, typeOfSize(size)))
2852 return false;
2853 // no unaligned stores
2854 if (size == 8 && MIN2(offRc, offSt) & 0x7)
2855 return false;
2856 // for compute indirect stores are not guaranteed to be aligned
2857 if (prog->getType() == Program::TYPE_COMPUTE && rec->rel[0])
2858 return false;
2859
2860 // There's really no great place to put this in a generic manner. Seemingly
2861 // wide stores at 0x60 don't work in GS shaders on SM50+. Don't combine
2862 // those.
2863 if (prog->getTarget()->getChipset() >= NVISA_GM107_CHIPSET &&
2864 prog->getType() == Program::TYPE_GEOMETRY &&
2865 st->getSrc(0)->reg.file == FILE_SHADER_OUTPUT &&
2866 rec->rel[0] == NULL &&
2867 MIN2(offRc, offSt) == 0x60)
2868 return false;
2869
2870 // remove any existing load/store records for the store being merged into
2871 // the existing record.
2872 purgeRecords(st, DATA_FILE_COUNT);
2873
2874 st->takeExtraSources(0, extra); // save predicate and indirect address
2875
2876 if (offRc < offSt) {
2877 // save values from @st
2878 for (s = 0; sizeSt; ++s) {
2879 sizeSt -= st->getSrc(s + 1)->reg.size;
2880 src[s] = st->getSrc(s + 1);
2881 }
2882 // set record's values as low sources of @st
2883 for (j = 1; sizeRc; ++j) {
2884 sizeRc -= rec->insn->getSrc(j)->reg.size;
2885 st->setSrc(j, rec->insn->getSrc(j));
2886 }
2887 // set saved values as high sources of @st
2888 for (k = j, j = 0; j < s; ++j)
2889 st->setSrc(k++, src[j]);
2890
2891 updateLdStOffset(st, offRc, func);
2892 } else {
2893 for (j = 1; sizeSt; ++j)
2894 sizeSt -= st->getSrc(j)->reg.size;
2895 for (s = 1; sizeRc; ++j, ++s) {
2896 sizeRc -= rec->insn->getSrc(s)->reg.size;
2897 st->setSrc(j, rec->insn->getSrc(s));
2898 }
2899 rec->offset = offSt;
2900 }
2901 st->putExtraSources(0, extra); // restore pointer and predicate
2902
2903 delete_Instruction(prog, rec->insn);
2904 rec->insn = st;
2905 rec->size = size;
2906 rec->insn->getSrc(0)->reg.size = size;
2907 rec->insn->setType(typeOfSize(size));
2908 return true;
2909 }
2910
2911 void
set(const Instruction * ldst)2912 MemoryOpt::Record::set(const Instruction *ldst)
2913 {
2914 const Symbol *mem = ldst->getSrc(0)->asSym();
2915 fileIndex = mem->reg.fileIndex;
2916 rel[0] = ldst->getIndirect(0, 0);
2917 rel[1] = ldst->getIndirect(0, 1);
2918 offset = mem->reg.data.offset;
2919 base = mem->getBase();
2920 size = typeSizeof(ldst->sType);
2921 }
2922
2923 void
link(Record ** list)2924 MemoryOpt::Record::link(Record **list)
2925 {
2926 next = *list;
2927 if (next)
2928 next->prev = this;
2929 prev = NULL;
2930 *list = this;
2931 }
2932
2933 void
unlink(Record ** list)2934 MemoryOpt::Record::unlink(Record **list)
2935 {
2936 if (next)
2937 next->prev = prev;
2938 if (prev)
2939 prev->next = next;
2940 else
2941 *list = next;
2942 }
2943
2944 MemoryOpt::Record **
getList(const Instruction * insn)2945 MemoryOpt::getList(const Instruction *insn)
2946 {
2947 if (insn->op == OP_LOAD || insn->op == OP_VFETCH)
2948 return &loads[insn->src(0).getFile()];
2949 return &stores[insn->src(0).getFile()];
2950 }
2951
2952 void
addRecord(Instruction * i)2953 MemoryOpt::addRecord(Instruction *i)
2954 {
2955 Record **list = getList(i);
2956 Record *it = reinterpret_cast<Record *>(recordPool.allocate());
2957
2958 it->link(list);
2959 it->set(i);
2960 it->insn = i;
2961 it->locked = false;
2962 }
2963
2964 MemoryOpt::Record *
findRecord(const Instruction * insn,bool load,bool & isAdj) const2965 MemoryOpt::findRecord(const Instruction *insn, bool load, bool& isAdj) const
2966 {
2967 const Symbol *sym = insn->getSrc(0)->asSym();
2968 const int size = typeSizeof(insn->sType);
2969 Record *rec = NULL;
2970 Record *it = load ? loads[sym->reg.file] : stores[sym->reg.file];
2971
2972 for (; it; it = it->next) {
2973 if (it->locked && insn->op != OP_LOAD && insn->op != OP_VFETCH)
2974 continue;
2975 if ((it->offset >> 4) != (sym->reg.data.offset >> 4) ||
2976 it->rel[0] != insn->getIndirect(0, 0) ||
2977 it->fileIndex != sym->reg.fileIndex ||
2978 it->rel[1] != insn->getIndirect(0, 1))
2979 continue;
2980
2981 if (it->offset < sym->reg.data.offset) {
2982 if (it->offset + it->size >= sym->reg.data.offset) {
2983 isAdj = (it->offset + it->size == sym->reg.data.offset);
2984 if (!isAdj)
2985 return it;
2986 if (!(it->offset & 0x7))
2987 rec = it;
2988 }
2989 } else {
2990 isAdj = it->offset != sym->reg.data.offset;
2991 if (size <= it->size && !isAdj)
2992 return it;
2993 else
2994 if (!(sym->reg.data.offset & 0x7))
2995 if (it->offset - size <= sym->reg.data.offset)
2996 rec = it;
2997 }
2998 }
2999 return rec;
3000 }
3001
3002 bool
replaceLdFromSt(Instruction * ld,Record * rec)3003 MemoryOpt::replaceLdFromSt(Instruction *ld, Record *rec)
3004 {
3005 Instruction *st = rec->insn;
3006 int32_t offSt = rec->offset;
3007 int32_t offLd = ld->getSrc(0)->reg.data.offset;
3008 int d, s;
3009
3010 for (s = 1; offSt != offLd && st->srcExists(s); ++s)
3011 offSt += st->getSrc(s)->reg.size;
3012 if (offSt != offLd)
3013 return false;
3014
3015 for (d = 0; ld->defExists(d) && st->srcExists(s); ++d, ++s) {
3016 if (ld->getDef(d)->reg.size != st->getSrc(s)->reg.size)
3017 return false;
3018 if (st->getSrc(s)->reg.file != FILE_GPR)
3019 return false;
3020 ld->def(d).replace(st->src(s), false);
3021 }
3022 ld->bb->remove(ld);
3023 return true;
3024 }
3025
3026 bool
replaceLdFromLd(Instruction * ldE,Record * rec)3027 MemoryOpt::replaceLdFromLd(Instruction *ldE, Record *rec)
3028 {
3029 Instruction *ldR = rec->insn;
3030 int32_t offR = rec->offset;
3031 int32_t offE = ldE->getSrc(0)->reg.data.offset;
3032 int dR, dE;
3033
3034 assert(offR <= offE);
3035 for (dR = 0; offR < offE && ldR->defExists(dR); ++dR)
3036 offR += ldR->getDef(dR)->reg.size;
3037 if (offR != offE)
3038 return false;
3039
3040 for (dE = 0; ldE->defExists(dE) && ldR->defExists(dR); ++dE, ++dR) {
3041 if (ldE->getDef(dE)->reg.size != ldR->getDef(dR)->reg.size)
3042 return false;
3043 ldE->def(dE).replace(ldR->getDef(dR), false);
3044 }
3045
3046 delete_Instruction(prog, ldE);
3047 return true;
3048 }
3049
3050 bool
replaceStFromSt(Instruction * restrict st,Record * rec)3051 MemoryOpt::replaceStFromSt(Instruction *restrict st, Record *rec)
3052 {
3053 const Instruction *const ri = rec->insn;
3054 Value *extra[3];
3055
3056 int32_t offS = st->getSrc(0)->reg.data.offset;
3057 int32_t offR = rec->offset;
3058 int32_t endS = offS + typeSizeof(st->dType);
3059 int32_t endR = offR + typeSizeof(ri->dType);
3060
3061 rec->size = MAX2(endS, endR) - MIN2(offS, offR);
3062
3063 st->takeExtraSources(0, extra);
3064
3065 if (offR < offS) {
3066 Value *vals[10];
3067 int s, n;
3068 int k = 0;
3069 // get non-replaced sources of ri
3070 for (s = 1; offR < offS; offR += ri->getSrc(s)->reg.size, ++s)
3071 vals[k++] = ri->getSrc(s);
3072 n = s;
3073 // get replaced sources of st
3074 for (s = 1; st->srcExists(s); offS += st->getSrc(s)->reg.size, ++s)
3075 vals[k++] = st->getSrc(s);
3076 // skip replaced sources of ri
3077 for (s = n; offR < endS; offR += ri->getSrc(s)->reg.size, ++s);
3078 // get non-replaced sources after values covered by st
3079 for (; offR < endR; offR += ri->getSrc(s)->reg.size, ++s)
3080 vals[k++] = ri->getSrc(s);
3081 assert((unsigned int)k <= ARRAY_SIZE(vals));
3082 for (s = 0; s < k; ++s)
3083 st->setSrc(s + 1, vals[s]);
3084 st->setSrc(0, ri->getSrc(0));
3085 } else
3086 if (endR > endS) {
3087 int j, s;
3088 for (j = 1; offR < endS; offR += ri->getSrc(j++)->reg.size);
3089 for (s = 1; offS < endS; offS += st->getSrc(s++)->reg.size);
3090 for (; offR < endR; offR += ri->getSrc(j++)->reg.size)
3091 st->setSrc(s++, ri->getSrc(j));
3092 }
3093 st->putExtraSources(0, extra);
3094
3095 delete_Instruction(prog, rec->insn);
3096
3097 rec->insn = st;
3098 rec->offset = st->getSrc(0)->reg.data.offset;
3099
3100 st->setType(typeOfSize(rec->size));
3101
3102 return true;
3103 }
3104
3105 bool
overlaps(const Instruction * ldst) const3106 MemoryOpt::Record::overlaps(const Instruction *ldst) const
3107 {
3108 Record that;
3109 that.set(ldst);
3110
3111 // This assumes that images/buffers can't overlap. They can.
3112 // TODO: Plumb the restrict logic through, and only skip when it's a
3113 // restrict situation, or there can implicitly be no writes.
3114 if (this->fileIndex != that.fileIndex && this->rel[1] == that.rel[1])
3115 return false;
3116
3117 if (this->rel[0] || that.rel[0])
3118 return this->base == that.base;
3119
3120 return
3121 (this->offset < that.offset + that.size) &&
3122 (this->offset + this->size > that.offset);
3123 }
3124
3125 // We must not eliminate stores that affect the result of @ld if
3126 // we find later stores to the same location, and we may no longer
3127 // merge them with later stores.
3128 // The stored value can, however, still be used to determine the value
3129 // returned by future loads.
3130 void
lockStores(Instruction * const ld)3131 MemoryOpt::lockStores(Instruction *const ld)
3132 {
3133 for (Record *r = stores[ld->src(0).getFile()]; r; r = r->next)
3134 if (!r->locked && r->overlaps(ld))
3135 r->locked = true;
3136 }
3137
3138 // Prior loads from the location of @st are no longer valid.
3139 // Stores to the location of @st may no longer be used to derive
3140 // the value at it nor be coalesced into later stores.
3141 void
purgeRecords(Instruction * const st,DataFile f)3142 MemoryOpt::purgeRecords(Instruction *const st, DataFile f)
3143 {
3144 if (st)
3145 f = st->src(0).getFile();
3146
3147 for (Record *r = loads[f]; r; r = r->next)
3148 if (!st || r->overlaps(st))
3149 r->unlink(&loads[f]);
3150
3151 for (Record *r = stores[f]; r; r = r->next)
3152 if (!st || r->overlaps(st))
3153 r->unlink(&stores[f]);
3154 }
3155
3156 bool
visit(BasicBlock * bb)3157 MemoryOpt::visit(BasicBlock *bb)
3158 {
3159 bool ret = runOpt(bb);
3160 // Run again, one pass won't combine 4 32 bit ld/st to a single 128 bit ld/st
3161 // where 96 bit memory operations are forbidden.
3162 if (ret)
3163 ret = runOpt(bb);
3164 return ret;
3165 }
3166
3167 bool
runOpt(BasicBlock * bb)3168 MemoryOpt::runOpt(BasicBlock *bb)
3169 {
3170 Instruction *ldst, *next;
3171 Record *rec;
3172 bool isAdjacent = true;
3173
3174 for (ldst = bb->getEntry(); ldst; ldst = next) {
3175 bool keep = true;
3176 bool isLoad = true;
3177 next = ldst->next;
3178
3179 if (ldst->op == OP_LOAD || ldst->op == OP_VFETCH) {
3180 if (ldst->subOp == NV50_IR_SUBOP_LOAD_LOCKED) {
3181 purgeRecords(ldst, ldst->src(0).getFile());
3182 continue;
3183 }
3184 if (ldst->isDead()) {
3185 // might have been produced by earlier optimization
3186 delete_Instruction(prog, ldst);
3187 continue;
3188 }
3189 } else
3190 if (ldst->op == OP_STORE || ldst->op == OP_EXPORT) {
3191 if (ldst->subOp == NV50_IR_SUBOP_STORE_UNLOCKED) {
3192 purgeRecords(ldst, ldst->src(0).getFile());
3193 continue;
3194 }
3195 if (typeSizeof(ldst->dType) == 4 &&
3196 ldst->src(1).getFile() == FILE_GPR &&
3197 ldst->getSrc(1)->getInsn()->op == OP_NOP) {
3198 delete_Instruction(prog, ldst);
3199 continue;
3200 }
3201 isLoad = false;
3202 } else {
3203 // TODO: maybe have all fixed ops act as barrier ?
3204 if (ldst->op == OP_CALL ||
3205 ldst->op == OP_BAR ||
3206 ldst->op == OP_MEMBAR) {
3207 purgeRecords(NULL, FILE_MEMORY_LOCAL);
3208 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
3209 purgeRecords(NULL, FILE_MEMORY_SHARED);
3210 purgeRecords(NULL, FILE_SHADER_OUTPUT);
3211 } else
3212 if (ldst->op == OP_ATOM || ldst->op == OP_CCTL) {
3213 if (ldst->src(0).getFile() == FILE_MEMORY_GLOBAL) {
3214 purgeRecords(NULL, FILE_MEMORY_LOCAL);
3215 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
3216 purgeRecords(NULL, FILE_MEMORY_SHARED);
3217 } else {
3218 purgeRecords(NULL, ldst->src(0).getFile());
3219 }
3220 } else
3221 if (ldst->op == OP_EMIT || ldst->op == OP_RESTART) {
3222 purgeRecords(NULL, FILE_SHADER_OUTPUT);
3223 }
3224 continue;
3225 }
3226 if (ldst->getPredicate()) // TODO: handle predicated ld/st
3227 continue;
3228 if (ldst->perPatch) // TODO: create separate per-patch lists
3229 continue;
3230
3231 if (isLoad) {
3232 DataFile file = ldst->src(0).getFile();
3233
3234 // if ld l[]/g[] look for previous store to eliminate the reload
3235 if (file == FILE_MEMORY_GLOBAL || file == FILE_MEMORY_LOCAL) {
3236 // TODO: shared memory ?
3237 rec = findRecord(ldst, false, isAdjacent);
3238 if (rec && !isAdjacent)
3239 keep = !replaceLdFromSt(ldst, rec);
3240 }
3241
3242 // or look for ld from the same location and replace this one
3243 rec = keep ? findRecord(ldst, true, isAdjacent) : NULL;
3244 if (rec) {
3245 if (!isAdjacent)
3246 keep = !replaceLdFromLd(ldst, rec);
3247 else
3248 // or combine a previous load with this one
3249 keep = !combineLd(rec, ldst);
3250 }
3251 if (keep)
3252 lockStores(ldst);
3253 } else {
3254 rec = findRecord(ldst, false, isAdjacent);
3255 if (rec) {
3256 if (!isAdjacent)
3257 keep = !replaceStFromSt(ldst, rec);
3258 else
3259 keep = !combineSt(rec, ldst);
3260 }
3261 if (keep)
3262 purgeRecords(ldst, DATA_FILE_COUNT);
3263 }
3264 if (keep)
3265 addRecord(ldst);
3266 }
3267 reset();
3268
3269 return true;
3270 }
3271
3272 // =============================================================================
3273
3274 // Turn control flow into predicated instructions (after register allocation !).
3275 // TODO:
3276 // Could move this to before register allocation on NVC0 and also handle nested
3277 // constructs.
3278 class FlatteningPass : public Pass
3279 {
3280 public:
FlatteningPass()3281 FlatteningPass() : gpr_unit(0) {}
3282
3283 private:
3284 virtual bool visit(Function *);
3285 virtual bool visit(BasicBlock *);
3286
3287 bool tryPredicateConditional(BasicBlock *);
3288 void predicateInstructions(BasicBlock *, Value *pred, CondCode cc);
3289 void tryPropagateBranch(BasicBlock *);
3290 inline bool isConstantCondition(Value *pred);
3291 inline bool mayPredicate(const Instruction *, const Value *pred) const;
3292 inline void removeFlow(Instruction *);
3293
3294 uint8_t gpr_unit;
3295 };
3296
3297 bool
isConstantCondition(Value * pred)3298 FlatteningPass::isConstantCondition(Value *pred)
3299 {
3300 Instruction *insn = pred->getUniqueInsn();
3301 assert(insn);
3302 if (insn->op != OP_SET || insn->srcExists(2))
3303 return false;
3304
3305 for (int s = 0; s < 2 && insn->srcExists(s); ++s) {
3306 Instruction *ld = insn->getSrc(s)->getUniqueInsn();
3307 DataFile file;
3308 if (ld) {
3309 if (ld->op != OP_MOV && ld->op != OP_LOAD)
3310 return false;
3311 if (ld->src(0).isIndirect(0))
3312 return false;
3313 file = ld->src(0).getFile();
3314 } else {
3315 file = insn->src(s).getFile();
3316 // catch $r63 on NVC0 and $r63/$r127 on NV50. Unfortunately maxGPR is
3317 // in register "units", which can vary between targets.
3318 if (file == FILE_GPR) {
3319 Value *v = insn->getSrc(s);
3320 int bytes = v->reg.data.id * MIN2(v->reg.size, 4);
3321 int units = bytes >> gpr_unit;
3322 if (units > prog->maxGPR)
3323 file = FILE_IMMEDIATE;
3324 }
3325 }
3326 if (file != FILE_IMMEDIATE && file != FILE_MEMORY_CONST)
3327 return false;
3328 }
3329 return true;
3330 }
3331
3332 void
removeFlow(Instruction * insn)3333 FlatteningPass::removeFlow(Instruction *insn)
3334 {
3335 FlowInstruction *term = insn ? insn->asFlow() : NULL;
3336 if (!term)
3337 return;
3338 Graph::Edge::Type ty = term->bb->cfg.outgoing().getType();
3339
3340 if (term->op == OP_BRA) {
3341 // TODO: this might get more difficult when we get arbitrary BRAs
3342 if (ty == Graph::Edge::CROSS || ty == Graph::Edge::BACK)
3343 return;
3344 } else
3345 if (term->op != OP_JOIN)
3346 return;
3347
3348 Value *pred = term->getPredicate();
3349
3350 delete_Instruction(prog, term);
3351
3352 if (pred && pred->refCount() == 0) {
3353 Instruction *pSet = pred->getUniqueInsn();
3354 pred->join->reg.data.id = -1; // deallocate
3355 if (pSet->isDead())
3356 delete_Instruction(prog, pSet);
3357 }
3358 }
3359
3360 void
predicateInstructions(BasicBlock * bb,Value * pred,CondCode cc)3361 FlatteningPass::predicateInstructions(BasicBlock *bb, Value *pred, CondCode cc)
3362 {
3363 for (Instruction *i = bb->getEntry(); i; i = i->next) {
3364 if (i->isNop())
3365 continue;
3366 assert(!i->getPredicate());
3367 i->setPredicate(cc, pred);
3368 }
3369 removeFlow(bb->getExit());
3370 }
3371
3372 bool
mayPredicate(const Instruction * insn,const Value * pred) const3373 FlatteningPass::mayPredicate(const Instruction *insn, const Value *pred) const
3374 {
3375 if (insn->isPseudo())
3376 return true;
3377 // TODO: calls where we don't know which registers are modified
3378
3379 if (!prog->getTarget()->mayPredicate(insn, pred))
3380 return false;
3381 for (int d = 0; insn->defExists(d); ++d)
3382 if (insn->getDef(d)->equals(pred))
3383 return false;
3384 return true;
3385 }
3386
3387 // If we jump to BRA/RET/EXIT, replace the jump with it.
3388 // NOTE: We do not update the CFG anymore here !
3389 //
3390 // TODO: Handle cases where we skip over a branch (maybe do that elsewhere ?):
3391 // BB:0
3392 // @p0 bra BB:2 -> @!p0 bra BB:3 iff (!) BB:2 immediately adjoins BB:1
3393 // BB1:
3394 // bra BB:3
3395 // BB2:
3396 // ...
3397 // BB3:
3398 // ...
3399 void
tryPropagateBranch(BasicBlock * bb)3400 FlatteningPass::tryPropagateBranch(BasicBlock *bb)
3401 {
3402 for (Instruction *i = bb->getExit(); i && i->op == OP_BRA; i = i->prev) {
3403 BasicBlock *bf = i->asFlow()->target.bb;
3404
3405 if (bf->getInsnCount() != 1)
3406 continue;
3407
3408 FlowInstruction *bra = i->asFlow();
3409 FlowInstruction *rep = bf->getExit()->asFlow();
3410
3411 if (!rep || rep->getPredicate())
3412 continue;
3413 if (rep->op != OP_BRA &&
3414 rep->op != OP_JOIN &&
3415 rep->op != OP_EXIT)
3416 continue;
3417
3418 // TODO: If there are multiple branches to @rep, only the first would
3419 // be replaced, so only remove them after this pass is done ?
3420 // Also, need to check all incident blocks for fall-through exits and
3421 // add the branch there.
3422 bra->op = rep->op;
3423 bra->target.bb = rep->target.bb;
3424 if (bf->cfg.incidentCount() == 1)
3425 bf->remove(rep);
3426 }
3427 }
3428
3429 bool
visit(Function * fn)3430 FlatteningPass::visit(Function *fn)
3431 {
3432 gpr_unit = prog->getTarget()->getFileUnit(FILE_GPR);
3433
3434 return true;
3435 }
3436
3437 bool
visit(BasicBlock * bb)3438 FlatteningPass::visit(BasicBlock *bb)
3439 {
3440 if (tryPredicateConditional(bb))
3441 return true;
3442
3443 // try to attach join to previous instruction
3444 if (prog->getTarget()->hasJoin) {
3445 Instruction *insn = bb->getExit();
3446 if (insn && insn->op == OP_JOIN && !insn->getPredicate()) {
3447 insn = insn->prev;
3448 if (insn && !insn->getPredicate() &&
3449 !insn->asFlow() &&
3450 insn->op != OP_DISCARD &&
3451 insn->op != OP_TEXBAR &&
3452 !isTextureOp(insn->op) && // probably just nve4
3453 !isSurfaceOp(insn->op) && // not confirmed
3454 insn->op != OP_LINTERP && // probably just nve4
3455 insn->op != OP_PINTERP && // probably just nve4
3456 ((insn->op != OP_LOAD && insn->op != OP_STORE && insn->op != OP_ATOM) ||
3457 (typeSizeof(insn->dType) <= 4 && !insn->src(0).isIndirect(0))) &&
3458 !insn->isNop()) {
3459 insn->join = 1;
3460 bb->remove(bb->getExit());
3461 return true;
3462 }
3463 }
3464 }
3465
3466 tryPropagateBranch(bb);
3467
3468 return true;
3469 }
3470
3471 bool
tryPredicateConditional(BasicBlock * bb)3472 FlatteningPass::tryPredicateConditional(BasicBlock *bb)
3473 {
3474 BasicBlock *bL = NULL, *bR = NULL;
3475 unsigned int nL = 0, nR = 0, limit = 12;
3476 Instruction *insn;
3477 unsigned int mask;
3478
3479 mask = bb->initiatesSimpleConditional();
3480 if (!mask)
3481 return false;
3482
3483 assert(bb->getExit());
3484 Value *pred = bb->getExit()->getPredicate();
3485 assert(pred);
3486
3487 if (isConstantCondition(pred))
3488 limit = 4;
3489
3490 Graph::EdgeIterator ei = bb->cfg.outgoing();
3491
3492 if (mask & 1) {
3493 bL = BasicBlock::get(ei.getNode());
3494 for (insn = bL->getEntry(); insn; insn = insn->next, ++nL)
3495 if (!mayPredicate(insn, pred))
3496 return false;
3497 if (nL > limit)
3498 return false; // too long, do a real branch
3499 }
3500 ei.next();
3501
3502 if (mask & 2) {
3503 bR = BasicBlock::get(ei.getNode());
3504 for (insn = bR->getEntry(); insn; insn = insn->next, ++nR)
3505 if (!mayPredicate(insn, pred))
3506 return false;
3507 if (nR > limit)
3508 return false; // too long, do a real branch
3509 }
3510
3511 if (bL)
3512 predicateInstructions(bL, pred, bb->getExit()->cc);
3513 if (bR)
3514 predicateInstructions(bR, pred, inverseCondCode(bb->getExit()->cc));
3515
3516 if (bb->joinAt) {
3517 bb->remove(bb->joinAt);
3518 bb->joinAt = NULL;
3519 }
3520 removeFlow(bb->getExit()); // delete the branch/join at the fork point
3521
3522 // remove potential join operations at the end of the conditional
3523 if (prog->getTarget()->joinAnterior) {
3524 bb = BasicBlock::get((bL ? bL : bR)->cfg.outgoing().getNode());
3525 if (bb->getEntry() && bb->getEntry()->op == OP_JOIN)
3526 removeFlow(bb->getEntry());
3527 }
3528
3529 return true;
3530 }
3531
3532 // =============================================================================
3533
3534 // Fold Immediate into MAD; must be done after register allocation due to
3535 // constraint SDST == SSRC2
3536 // TODO:
3537 // Does NVC0+ have other situations where this pass makes sense?
3538 class PostRaLoadPropagation : public Pass
3539 {
3540 private:
3541 virtual bool visit(Instruction *);
3542
3543 void handleMADforNV50(Instruction *);
3544 void handleMADforNVC0(Instruction *);
3545 };
3546
3547 static bool
post_ra_dead(Instruction * i)3548 post_ra_dead(Instruction *i)
3549 {
3550 for (int d = 0; i->defExists(d); ++d)
3551 if (i->getDef(d)->refCount())
3552 return false;
3553 return true;
3554 }
3555
3556 // Fold Immediate into MAD; must be done after register allocation due to
3557 // constraint SDST == SSRC2
3558 void
handleMADforNV50(Instruction * i)3559 PostRaLoadPropagation::handleMADforNV50(Instruction *i)
3560 {
3561 if (i->def(0).getFile() != FILE_GPR ||
3562 i->src(0).getFile() != FILE_GPR ||
3563 i->src(1).getFile() != FILE_GPR ||
3564 i->src(2).getFile() != FILE_GPR ||
3565 i->getDef(0)->reg.data.id != i->getSrc(2)->reg.data.id)
3566 return;
3567
3568 if (i->getDef(0)->reg.data.id >= 64 ||
3569 i->getSrc(0)->reg.data.id >= 64)
3570 return;
3571
3572 if (i->flagsSrc >= 0 && i->getSrc(i->flagsSrc)->reg.data.id != 0)
3573 return;
3574
3575 if (i->getPredicate())
3576 return;
3577
3578 Value *vtmp;
3579 Instruction *def = i->getSrc(1)->getInsn();
3580
3581 if (def && def->op == OP_SPLIT && typeSizeof(def->sType) == 4)
3582 def = def->getSrc(0)->getInsn();
3583 if (def && def->op == OP_MOV && def->src(0).getFile() == FILE_IMMEDIATE) {
3584 vtmp = i->getSrc(1);
3585 if (isFloatType(i->sType)) {
3586 i->setSrc(1, def->getSrc(0));
3587 } else {
3588 ImmediateValue val;
3589 // getImmediate() has side-effects on the argument so this *shouldn't*
3590 // be folded into the assert()
3591 ASSERTED bool ret = def->src(0).getImmediate(val);
3592 assert(ret);
3593 if (i->getSrc(1)->reg.data.id & 1)
3594 val.reg.data.u32 >>= 16;
3595 val.reg.data.u32 &= 0xffff;
3596 i->setSrc(1, new_ImmediateValue(prog, val.reg.data.u32));
3597 }
3598
3599 /* There's no post-RA dead code elimination, so do it here
3600 * XXX: if we add more code-removing post-RA passes, we might
3601 * want to create a post-RA dead-code elim pass */
3602 if (post_ra_dead(vtmp->getInsn())) {
3603 Value *src = vtmp->getInsn()->getSrc(0);
3604 // Careful -- splits will have already been removed from the
3605 // functions. Don't double-delete.
3606 if (vtmp->getInsn()->bb)
3607 delete_Instruction(prog, vtmp->getInsn());
3608 if (src->getInsn() && post_ra_dead(src->getInsn()))
3609 delete_Instruction(prog, src->getInsn());
3610 }
3611 }
3612 }
3613
3614 void
handleMADforNVC0(Instruction * i)3615 PostRaLoadPropagation::handleMADforNVC0(Instruction *i)
3616 {
3617 if (i->def(0).getFile() != FILE_GPR ||
3618 i->src(0).getFile() != FILE_GPR ||
3619 i->src(1).getFile() != FILE_GPR ||
3620 i->src(2).getFile() != FILE_GPR ||
3621 i->getDef(0)->reg.data.id != i->getSrc(2)->reg.data.id)
3622 return;
3623
3624 // TODO: gm107 can also do this for S32, maybe other chipsets as well
3625 if (i->dType != TYPE_F32)
3626 return;
3627
3628 if ((i->src(2).mod | Modifier(NV50_IR_MOD_NEG)) != Modifier(NV50_IR_MOD_NEG))
3629 return;
3630
3631 ImmediateValue val;
3632 int s;
3633
3634 if (i->src(0).getImmediate(val))
3635 s = 1;
3636 else if (i->src(1).getImmediate(val))
3637 s = 0;
3638 else
3639 return;
3640
3641 if ((i->src(s).mod | Modifier(NV50_IR_MOD_NEG)) != Modifier(NV50_IR_MOD_NEG))
3642 return;
3643
3644 if (s == 1)
3645 i->swapSources(0, 1);
3646
3647 Instruction *imm = i->getSrc(1)->getInsn();
3648 i->setSrc(1, imm->getSrc(0));
3649 if (post_ra_dead(imm))
3650 delete_Instruction(prog, imm);
3651 }
3652
3653 bool
visit(Instruction * i)3654 PostRaLoadPropagation::visit(Instruction *i)
3655 {
3656 switch (i->op) {
3657 case OP_FMA:
3658 case OP_MAD:
3659 if (prog->getTarget()->getChipset() < 0xc0)
3660 handleMADforNV50(i);
3661 else
3662 handleMADforNVC0(i);
3663 break;
3664 default:
3665 break;
3666 }
3667
3668 return true;
3669 }
3670
3671 // =============================================================================
3672
3673 // Common subexpression elimination. Stupid O^2 implementation.
3674 class LocalCSE : public Pass
3675 {
3676 private:
3677 virtual bool visit(BasicBlock *);
3678
3679 inline bool tryReplace(Instruction **, Instruction *);
3680
3681 DLList ops[OP_LAST + 1];
3682 };
3683
3684 class GlobalCSE : public Pass
3685 {
3686 private:
3687 virtual bool visit(BasicBlock *);
3688 };
3689
3690 bool
isActionEqual(const Instruction * that) const3691 Instruction::isActionEqual(const Instruction *that) const
3692 {
3693 if (this->op != that->op ||
3694 this->dType != that->dType ||
3695 this->sType != that->sType)
3696 return false;
3697 if (this->cc != that->cc)
3698 return false;
3699
3700 if (this->asTex()) {
3701 if (memcmp(&this->asTex()->tex,
3702 &that->asTex()->tex,
3703 sizeof(this->asTex()->tex)))
3704 return false;
3705 } else
3706 if (this->asCmp()) {
3707 if (this->asCmp()->setCond != that->asCmp()->setCond)
3708 return false;
3709 } else
3710 if (this->asFlow()) {
3711 return false;
3712 } else
3713 if (this->op == OP_PHI && this->bb != that->bb) {
3714 /* TODO: we could probably be a bit smarter here by following the
3715 * control flow, but honestly, it is quite painful to check */
3716 return false;
3717 } else {
3718 if (this->ipa != that->ipa ||
3719 this->lanes != that->lanes ||
3720 this->perPatch != that->perPatch)
3721 return false;
3722 if (this->postFactor != that->postFactor)
3723 return false;
3724 }
3725
3726 if (this->subOp != that->subOp ||
3727 this->saturate != that->saturate ||
3728 this->rnd != that->rnd ||
3729 this->ftz != that->ftz ||
3730 this->dnz != that->dnz ||
3731 this->cache != that->cache ||
3732 this->mask != that->mask)
3733 return false;
3734
3735 return true;
3736 }
3737
3738 bool
isResultEqual(const Instruction * that) const3739 Instruction::isResultEqual(const Instruction *that) const
3740 {
3741 unsigned int d, s;
3742
3743 // NOTE: location of discard only affects tex with liveOnly and quadops
3744 if (!this->defExists(0) && this->op != OP_DISCARD)
3745 return false;
3746
3747 if (!isActionEqual(that))
3748 return false;
3749
3750 if (this->predSrc != that->predSrc)
3751 return false;
3752
3753 for (d = 0; this->defExists(d); ++d) {
3754 if (!that->defExists(d) ||
3755 !this->getDef(d)->equals(that->getDef(d), false))
3756 return false;
3757 }
3758 if (that->defExists(d))
3759 return false;
3760
3761 for (s = 0; this->srcExists(s); ++s) {
3762 if (!that->srcExists(s))
3763 return false;
3764 if (this->src(s).mod != that->src(s).mod)
3765 return false;
3766 if (!this->getSrc(s)->equals(that->getSrc(s), true))
3767 return false;
3768 }
3769 if (that->srcExists(s))
3770 return false;
3771
3772 if (op == OP_LOAD || op == OP_VFETCH || op == OP_ATOM) {
3773 switch (src(0).getFile()) {
3774 case FILE_MEMORY_CONST:
3775 case FILE_SHADER_INPUT:
3776 return true;
3777 case FILE_SHADER_OUTPUT:
3778 return bb->getProgram()->getType() == Program::TYPE_TESSELLATION_EVAL;
3779 default:
3780 return false;
3781 }
3782 }
3783
3784 return true;
3785 }
3786
3787 // pull through common expressions from different in-blocks
3788 bool
visit(BasicBlock * bb)3789 GlobalCSE::visit(BasicBlock *bb)
3790 {
3791 Instruction *phi, *next, *ik;
3792 int s;
3793
3794 // TODO: maybe do this with OP_UNION, too
3795
3796 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = next) {
3797 next = phi->next;
3798 if (phi->getSrc(0)->refCount() > 1)
3799 continue;
3800 ik = phi->getSrc(0)->getInsn();
3801 if (!ik)
3802 continue; // probably a function input
3803 if (ik->defCount(0xff) > 1)
3804 continue; // too painful to check if we can really push this forward
3805 for (s = 1; phi->srcExists(s); ++s) {
3806 if (phi->getSrc(s)->refCount() > 1)
3807 break;
3808 if (!phi->getSrc(s)->getInsn() ||
3809 !phi->getSrc(s)->getInsn()->isResultEqual(ik))
3810 break;
3811 }
3812 if (!phi->srcExists(s)) {
3813 assert(ik->op != OP_PHI);
3814 Instruction *entry = bb->getEntry();
3815 ik->bb->remove(ik);
3816 if (!entry || entry->op != OP_JOIN)
3817 bb->insertHead(ik);
3818 else
3819 bb->insertAfter(entry, ik);
3820 ik->setDef(0, phi->getDef(0));
3821 delete_Instruction(prog, phi);
3822 }
3823 }
3824
3825 return true;
3826 }
3827
3828 bool
tryReplace(Instruction ** ptr,Instruction * i)3829 LocalCSE::tryReplace(Instruction **ptr, Instruction *i)
3830 {
3831 Instruction *old = *ptr;
3832
3833 // TODO: maybe relax this later (causes trouble with OP_UNION)
3834 if (i->isPredicated())
3835 return false;
3836
3837 if (!old->isResultEqual(i))
3838 return false;
3839
3840 for (int d = 0; old->defExists(d); ++d)
3841 old->def(d).replace(i->getDef(d), false);
3842 delete_Instruction(prog, old);
3843 *ptr = NULL;
3844 return true;
3845 }
3846
3847 bool
visit(BasicBlock * bb)3848 LocalCSE::visit(BasicBlock *bb)
3849 {
3850 unsigned int replaced;
3851
3852 do {
3853 Instruction *ir, *next;
3854
3855 replaced = 0;
3856
3857 // will need to know the order of instructions
3858 int serial = 0;
3859 for (ir = bb->getFirst(); ir; ir = ir->next)
3860 ir->serial = serial++;
3861
3862 for (ir = bb->getFirst(); ir; ir = next) {
3863 int s;
3864 Value *src = NULL;
3865
3866 next = ir->next;
3867
3868 if (ir->fixed) {
3869 ops[ir->op].insert(ir);
3870 continue;
3871 }
3872
3873 for (s = 0; ir->srcExists(s); ++s)
3874 if (ir->getSrc(s)->asLValue())
3875 if (!src || ir->getSrc(s)->refCount() < src->refCount())
3876 src = ir->getSrc(s);
3877
3878 if (src) {
3879 for (Value::UseIterator it = src->uses.begin();
3880 it != src->uses.end(); ++it) {
3881 Instruction *ik = (*it)->getInsn();
3882 if (ik && ik->bb == ir->bb && ik->serial < ir->serial)
3883 if (tryReplace(&ir, ik))
3884 break;
3885 }
3886 } else {
3887 DLLIST_FOR_EACH(&ops[ir->op], iter)
3888 {
3889 Instruction *ik = reinterpret_cast<Instruction *>(iter.get());
3890 if (tryReplace(&ir, ik))
3891 break;
3892 }
3893 }
3894
3895 if (ir)
3896 ops[ir->op].insert(ir);
3897 else
3898 ++replaced;
3899 }
3900 for (unsigned int i = 0; i <= OP_LAST; ++i)
3901 ops[i].clear();
3902
3903 } while (replaced);
3904
3905 return true;
3906 }
3907
3908 // =============================================================================
3909
3910 // Remove computations of unused values.
3911 class DeadCodeElim : public Pass
3912 {
3913 public:
DeadCodeElim()3914 DeadCodeElim() : deadCount(0) {}
3915 bool buryAll(Program *);
3916
3917 private:
3918 virtual bool visit(BasicBlock *);
3919
3920 void checkSplitLoad(Instruction *ld); // for partially dead loads
3921
3922 unsigned int deadCount;
3923 };
3924
3925 bool
buryAll(Program * prog)3926 DeadCodeElim::buryAll(Program *prog)
3927 {
3928 do {
3929 deadCount = 0;
3930 if (!this->run(prog, false, false))
3931 return false;
3932 } while (deadCount);
3933
3934 return true;
3935 }
3936
3937 bool
visit(BasicBlock * bb)3938 DeadCodeElim::visit(BasicBlock *bb)
3939 {
3940 Instruction *prev;
3941
3942 for (Instruction *i = bb->getExit(); i; i = prev) {
3943 prev = i->prev;
3944 if (i->isDead()) {
3945 ++deadCount;
3946 delete_Instruction(prog, i);
3947 } else
3948 if (i->defExists(1) &&
3949 i->subOp == 0 &&
3950 (i->op == OP_VFETCH || i->op == OP_LOAD)) {
3951 checkSplitLoad(i);
3952 } else
3953 if (i->defExists(0) && !i->getDef(0)->refCount()) {
3954 if (i->op == OP_ATOM ||
3955 i->op == OP_SUREDP ||
3956 i->op == OP_SUREDB) {
3957 const Target *targ = prog->getTarget();
3958 if (targ->getChipset() >= NVISA_GF100_CHIPSET ||
3959 i->subOp != NV50_IR_SUBOP_ATOM_CAS)
3960 i->setDef(0, NULL);
3961 if (i->op == OP_ATOM && i->subOp == NV50_IR_SUBOP_ATOM_EXCH) {
3962 i->cache = CACHE_CV;
3963 i->op = OP_STORE;
3964 i->subOp = 0;
3965 }
3966 } else if (i->op == OP_LOAD && i->subOp == NV50_IR_SUBOP_LOAD_LOCKED) {
3967 i->setDef(0, i->getDef(1));
3968 i->setDef(1, NULL);
3969 }
3970 }
3971 }
3972 return true;
3973 }
3974
3975 // Each load can go into up to 4 destinations, any of which might potentially
3976 // be dead (i.e. a hole). These can always be split into 2 loads, independent
3977 // of where the holes are. We find the first contiguous region, put it into
3978 // the first load, and then put the second contiguous region into the second
3979 // load. There can be at most 2 contiguous regions.
3980 //
3981 // Note that there are some restrictions, for example it's not possible to do
3982 // a 64-bit load that's not 64-bit aligned, so such a load has to be split
3983 // up. Also hardware doesn't support 96-bit loads, so those also have to be
3984 // split into a 64-bit and 32-bit load.
3985 void
checkSplitLoad(Instruction * ld1)3986 DeadCodeElim::checkSplitLoad(Instruction *ld1)
3987 {
3988 Instruction *ld2 = NULL; // can get at most 2 loads
3989 Value *def1[4];
3990 Value *def2[4];
3991 int32_t addr1, addr2;
3992 int32_t size1, size2;
3993 int d, n1, n2;
3994 uint32_t mask = 0xffffffff;
3995
3996 for (d = 0; ld1->defExists(d); ++d)
3997 if (!ld1->getDef(d)->refCount() && ld1->getDef(d)->reg.data.id < 0)
3998 mask &= ~(1 << d);
3999 if (mask == 0xffffffff)
4000 return;
4001
4002 addr1 = ld1->getSrc(0)->reg.data.offset;
4003 n1 = n2 = 0;
4004 size1 = size2 = 0;
4005
4006 // Compute address/width for first load
4007 for (d = 0; ld1->defExists(d); ++d) {
4008 if (mask & (1 << d)) {
4009 if (size1 && (addr1 & 0x7))
4010 break;
4011 def1[n1] = ld1->getDef(d);
4012 size1 += def1[n1++]->reg.size;
4013 } else
4014 if (!n1) {
4015 addr1 += ld1->getDef(d)->reg.size;
4016 } else {
4017 break;
4018 }
4019 }
4020
4021 // Scale back the size of the first load until it can be loaded. This
4022 // typically happens for TYPE_B96 loads.
4023 while (n1 &&
4024 !prog->getTarget()->isAccessSupported(ld1->getSrc(0)->reg.file,
4025 typeOfSize(size1))) {
4026 size1 -= def1[--n1]->reg.size;
4027 d--;
4028 }
4029
4030 // Compute address/width for second load
4031 for (addr2 = addr1 + size1; ld1->defExists(d); ++d) {
4032 if (mask & (1 << d)) {
4033 assert(!size2 || !(addr2 & 0x7));
4034 def2[n2] = ld1->getDef(d);
4035 size2 += def2[n2++]->reg.size;
4036 } else if (!n2) {
4037 assert(!n2);
4038 addr2 += ld1->getDef(d)->reg.size;
4039 } else {
4040 break;
4041 }
4042 }
4043
4044 // Make sure that we've processed all the values
4045 for (; ld1->defExists(d); ++d)
4046 assert(!(mask & (1 << d)));
4047
4048 updateLdStOffset(ld1, addr1, func);
4049 ld1->setType(typeOfSize(size1));
4050 for (d = 0; d < 4; ++d)
4051 ld1->setDef(d, (d < n1) ? def1[d] : NULL);
4052
4053 if (!n2)
4054 return;
4055
4056 ld2 = cloneShallow(func, ld1);
4057 updateLdStOffset(ld2, addr2, func);
4058 ld2->setType(typeOfSize(size2));
4059 for (d = 0; d < 4; ++d)
4060 ld2->setDef(d, (d < n2) ? def2[d] : NULL);
4061
4062 ld1->bb->insertAfter(ld1, ld2);
4063 }
4064
4065 // =============================================================================
4066
4067 #define RUN_PASS(l, n, f) \
4068 if (level >= (l)) { \
4069 if (dbgFlags & NV50_IR_DEBUG_VERBOSE) \
4070 INFO("PEEPHOLE: %s\n", #n); \
4071 n pass; \
4072 if (!pass.f(this)) \
4073 return false; \
4074 }
4075
4076 bool
optimizeSSA(int level)4077 Program::optimizeSSA(int level)
4078 {
4079 RUN_PASS(1, DeadCodeElim, buryAll);
4080 RUN_PASS(1, CopyPropagation, run);
4081 RUN_PASS(1, MergeSplits, run);
4082 RUN_PASS(2, GlobalCSE, run);
4083 RUN_PASS(1, LocalCSE, run);
4084 RUN_PASS(2, AlgebraicOpt, run);
4085 RUN_PASS(2, ModifierFolding, run); // before load propagation -> less checks
4086 RUN_PASS(1, ConstantFolding, foldAll);
4087 RUN_PASS(0, Split64BitOpPreRA, run);
4088 RUN_PASS(2, LateAlgebraicOpt, run);
4089 RUN_PASS(1, LoadPropagation, run);
4090 RUN_PASS(1, IndirectPropagation, run);
4091 RUN_PASS(2, MemoryOpt, run);
4092 RUN_PASS(2, LocalCSE, run);
4093 RUN_PASS(0, DeadCodeElim, buryAll);
4094
4095 return true;
4096 }
4097
4098 bool
optimizePostRA(int level)4099 Program::optimizePostRA(int level)
4100 {
4101 RUN_PASS(2, FlatteningPass, run);
4102 RUN_PASS(2, PostRaLoadPropagation, run);
4103
4104 return true;
4105 }
4106
4107 }
4108