1 /*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "codegen/nv50_ir.h"
24 #include "codegen/nv50_ir_target.h"
25 #include "codegen/nv50_ir_build_util.h"
26
27 extern "C" {
28 #include "util/u_math.h"
29 }
30
31 namespace nv50_ir {
32
33 bool
isNop() const34 Instruction::isNop() const
35 {
36 if (op == OP_PHI || op == OP_SPLIT || op == OP_MERGE || op == OP_CONSTRAINT)
37 return true;
38 if (terminator || join) // XXX: should terminator imply flow ?
39 return false;
40 if (op == OP_ATOM)
41 return false;
42 if (!fixed && op == OP_NOP)
43 return true;
44
45 if (defExists(0) && def(0).rep()->reg.data.id < 0) {
46 for (int d = 1; defExists(d); ++d)
47 if (def(d).rep()->reg.data.id >= 0)
48 WARN("part of vector result is unused !\n");
49 return true;
50 }
51
52 if (op == OP_MOV || op == OP_UNION) {
53 if (!getDef(0)->equals(getSrc(0)))
54 return false;
55 if (op == OP_UNION)
56 if (!def(0).rep()->equals(getSrc(1)))
57 return false;
58 return true;
59 }
60
61 return false;
62 }
63
isDead() const64 bool Instruction::isDead() const
65 {
66 if (op == OP_STORE ||
67 op == OP_EXPORT ||
68 op == OP_ATOM ||
69 op == OP_SUSTB || op == OP_SUSTP || op == OP_SUREDP || op == OP_SUREDB ||
70 op == OP_WRSV)
71 return false;
72
73 for (int d = 0; defExists(d); ++d)
74 if (getDef(d)->refCount() || getDef(d)->reg.data.id >= 0)
75 return false;
76
77 if (terminator || asFlow())
78 return false;
79 if (fixed)
80 return false;
81
82 return true;
83 };
84
85 // =============================================================================
86
87 class CopyPropagation : public Pass
88 {
89 private:
90 virtual bool visit(BasicBlock *);
91 };
92
93 // Propagate all MOVs forward to make subsequent optimization easier, except if
94 // the sources stem from a phi, in which case we don't want to mess up potential
95 // swaps $rX <-> $rY, i.e. do not create live range overlaps of phi src and def.
96 bool
visit(BasicBlock * bb)97 CopyPropagation::visit(BasicBlock *bb)
98 {
99 Instruction *mov, *si, *next;
100
101 for (mov = bb->getEntry(); mov; mov = next) {
102 next = mov->next;
103 if (mov->op != OP_MOV || mov->fixed || !mov->getSrc(0)->asLValue())
104 continue;
105 if (mov->getPredicate())
106 continue;
107 if (mov->def(0).getFile() != mov->src(0).getFile())
108 continue;
109 si = mov->getSrc(0)->getInsn();
110 if (mov->getDef(0)->reg.data.id < 0 && si && si->op != OP_PHI) {
111 // propagate
112 mov->def(0).replace(mov->getSrc(0), false);
113 delete_Instruction(prog, mov);
114 }
115 }
116 return true;
117 }
118
119 // =============================================================================
120
121 class MergeSplits : public Pass
122 {
123 private:
124 virtual bool visit(BasicBlock *);
125 };
126
127 // For SPLIT / MERGE pairs that operate on the same registers, replace the
128 // post-merge def with the SPLIT's source.
129 bool
visit(BasicBlock * bb)130 MergeSplits::visit(BasicBlock *bb)
131 {
132 Instruction *i, *next, *si;
133
134 for (i = bb->getEntry(); i; i = next) {
135 next = i->next;
136 if (i->op != OP_MERGE || typeSizeof(i->dType) != 8)
137 continue;
138 si = i->getSrc(0)->getInsn();
139 if (si->op != OP_SPLIT || si != i->getSrc(1)->getInsn())
140 continue;
141 i->def(0).replace(si->getSrc(0), false);
142 delete_Instruction(prog, i);
143 }
144
145 return true;
146 }
147
148 // =============================================================================
149
150 class LoadPropagation : public Pass
151 {
152 private:
153 virtual bool visit(BasicBlock *);
154
155 void checkSwapSrc01(Instruction *);
156
157 bool isCSpaceLoad(Instruction *);
158 bool isImmdLoad(Instruction *);
159 bool isAttribOrSharedLoad(Instruction *);
160 };
161
162 bool
isCSpaceLoad(Instruction * ld)163 LoadPropagation::isCSpaceLoad(Instruction *ld)
164 {
165 return ld && ld->op == OP_LOAD && ld->src(0).getFile() == FILE_MEMORY_CONST;
166 }
167
168 bool
isImmdLoad(Instruction * ld)169 LoadPropagation::isImmdLoad(Instruction *ld)
170 {
171 if (!ld || (ld->op != OP_MOV) ||
172 ((typeSizeof(ld->dType) != 4) && (typeSizeof(ld->dType) != 8)))
173 return false;
174
175 // A 0 can be replaced with a register, so it doesn't count as an immediate.
176 ImmediateValue val;
177 return ld->src(0).getImmediate(val) && !val.isInteger(0);
178 }
179
180 bool
isAttribOrSharedLoad(Instruction * ld)181 LoadPropagation::isAttribOrSharedLoad(Instruction *ld)
182 {
183 return ld &&
184 (ld->op == OP_VFETCH ||
185 (ld->op == OP_LOAD &&
186 (ld->src(0).getFile() == FILE_SHADER_INPUT ||
187 ld->src(0).getFile() == FILE_MEMORY_SHARED)));
188 }
189
190 void
checkSwapSrc01(Instruction * insn)191 LoadPropagation::checkSwapSrc01(Instruction *insn)
192 {
193 const Target *targ = prog->getTarget();
194 if (!targ->getOpInfo(insn).commutative) {
195 if (insn->op != OP_SET && insn->op != OP_SLCT &&
196 insn->op != OP_SUB && insn->op != OP_XMAD)
197 return;
198 // XMAD is only commutative if both the CBCC and MRG flags are not set.
199 if (insn->op == OP_XMAD &&
200 (insn->subOp & NV50_IR_SUBOP_XMAD_CMODE_MASK) == NV50_IR_SUBOP_XMAD_CBCC)
201 return;
202 if (insn->op == OP_XMAD && (insn->subOp & NV50_IR_SUBOP_XMAD_MRG))
203 return;
204 }
205 if (insn->src(1).getFile() != FILE_GPR)
206 return;
207 // This is the special OP_SET used for alphatesting, we can't reverse its
208 // arguments as that will confuse the fixup code.
209 if (insn->op == OP_SET && insn->subOp)
210 return;
211
212 Instruction *i0 = insn->getSrc(0)->getInsn();
213 Instruction *i1 = insn->getSrc(1)->getInsn();
214
215 // Swap sources to inline the less frequently used source. That way,
216 // optimistically, it will eventually be able to remove the instruction.
217 int i0refs = insn->getSrc(0)->refCount();
218 int i1refs = insn->getSrc(1)->refCount();
219
220 if ((isCSpaceLoad(i0) || isImmdLoad(i0)) && targ->insnCanLoad(insn, 1, i0)) {
221 if ((!isImmdLoad(i1) && !isCSpaceLoad(i1)) ||
222 !targ->insnCanLoad(insn, 1, i1) ||
223 i0refs < i1refs)
224 insn->swapSources(0, 1);
225 else
226 return;
227 } else
228 if (isAttribOrSharedLoad(i1)) {
229 if (!isAttribOrSharedLoad(i0))
230 insn->swapSources(0, 1);
231 else
232 return;
233 } else {
234 return;
235 }
236
237 if (insn->op == OP_SET || insn->op == OP_SET_AND ||
238 insn->op == OP_SET_OR || insn->op == OP_SET_XOR)
239 insn->asCmp()->setCond = reverseCondCode(insn->asCmp()->setCond);
240 else
241 if (insn->op == OP_SLCT)
242 insn->asCmp()->setCond = inverseCondCode(insn->asCmp()->setCond);
243 else
244 if (insn->op == OP_SUB) {
245 insn->src(0).mod = insn->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
246 insn->src(1).mod = insn->src(1).mod ^ Modifier(NV50_IR_MOD_NEG);
247 } else
248 if (insn->op == OP_XMAD) {
249 // swap h1 flags
250 uint16_t h1 = (insn->subOp >> 1 & NV50_IR_SUBOP_XMAD_H1(0)) |
251 (insn->subOp << 1 & NV50_IR_SUBOP_XMAD_H1(1));
252 insn->subOp = (insn->subOp & ~NV50_IR_SUBOP_XMAD_H1_MASK) | h1;
253 }
254 }
255
256 bool
visit(BasicBlock * bb)257 LoadPropagation::visit(BasicBlock *bb)
258 {
259 const Target *targ = prog->getTarget();
260 Instruction *next;
261
262 for (Instruction *i = bb->getEntry(); i; i = next) {
263 next = i->next;
264
265 if (i->op == OP_CALL) // calls have args as sources, they must be in regs
266 continue;
267
268 if (i->op == OP_PFETCH) // pfetch expects arg1 to be a reg
269 continue;
270
271 if (i->srcExists(1))
272 checkSwapSrc01(i);
273
274 for (int s = 0; i->srcExists(s); ++s) {
275 Instruction *ld = i->getSrc(s)->getInsn();
276
277 if (!ld || ld->fixed || (ld->op != OP_LOAD && ld->op != OP_MOV))
278 continue;
279 if (!targ->insnCanLoad(i, s, ld))
280 continue;
281
282 // propagate !
283 i->setSrc(s, ld->getSrc(0));
284 if (ld->src(0).isIndirect(0))
285 i->setIndirect(s, 0, ld->getIndirect(0, 0));
286
287 if (ld->getDef(0)->refCount() == 0)
288 delete_Instruction(prog, ld);
289 }
290 }
291 return true;
292 }
293
294 // =============================================================================
295
296 class IndirectPropagation : public Pass
297 {
298 private:
299 virtual bool visit(BasicBlock *);
300
301 BuildUtil bld;
302 };
303
304 bool
visit(BasicBlock * bb)305 IndirectPropagation::visit(BasicBlock *bb)
306 {
307 const Target *targ = prog->getTarget();
308 Instruction *next;
309
310 for (Instruction *i = bb->getEntry(); i; i = next) {
311 next = i->next;
312
313 bld.setPosition(i, false);
314
315 for (int s = 0; i->srcExists(s); ++s) {
316 Instruction *insn;
317 ImmediateValue imm;
318 if (!i->src(s).isIndirect(0))
319 continue;
320 insn = i->getIndirect(s, 0)->getInsn();
321 if (!insn)
322 continue;
323 if (insn->op == OP_ADD && !isFloatType(insn->dType)) {
324 if (insn->src(0).getFile() != targ->nativeFile(FILE_ADDRESS) ||
325 !insn->src(1).getImmediate(imm) ||
326 !targ->insnCanLoadOffset(i, s, imm.reg.data.s32))
327 continue;
328 i->setIndirect(s, 0, insn->getSrc(0));
329 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
330 i->src(s).get()->reg.data.offset += imm.reg.data.u32;
331 } else if (insn->op == OP_SUB && !isFloatType(insn->dType)) {
332 if (insn->src(0).getFile() != targ->nativeFile(FILE_ADDRESS) ||
333 !insn->src(1).getImmediate(imm) ||
334 !targ->insnCanLoadOffset(i, s, -imm.reg.data.s32))
335 continue;
336 i->setIndirect(s, 0, insn->getSrc(0));
337 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
338 i->src(s).get()->reg.data.offset -= imm.reg.data.u32;
339 } else if (insn->op == OP_MOV) {
340 if (!insn->src(0).getImmediate(imm) ||
341 !targ->insnCanLoadOffset(i, s, imm.reg.data.s32))
342 continue;
343 i->setIndirect(s, 0, NULL);
344 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
345 i->src(s).get()->reg.data.offset += imm.reg.data.u32;
346 } else if (insn->op == OP_SHLADD) {
347 if (!insn->src(2).getImmediate(imm) ||
348 !targ->insnCanLoadOffset(i, s, imm.reg.data.s32))
349 continue;
350 i->setIndirect(s, 0, bld.mkOp2v(
351 OP_SHL, TYPE_U32, bld.getSSA(), insn->getSrc(0), insn->getSrc(1)));
352 i->setSrc(s, cloneShallow(func, i->getSrc(s)));
353 i->src(s).get()->reg.data.offset += imm.reg.data.u32;
354 }
355 }
356 }
357 return true;
358 }
359
360 // =============================================================================
361
362 // Evaluate constant expressions.
363 class ConstantFolding : public Pass
364 {
365 public:
366 bool foldAll(Program *);
367
368 private:
369 virtual bool visit(BasicBlock *);
370
371 void expr(Instruction *, ImmediateValue&, ImmediateValue&);
372 void expr(Instruction *, ImmediateValue&, ImmediateValue&, ImmediateValue&);
373 /* true if i was deleted */
374 bool opnd(Instruction *i, ImmediateValue&, int s);
375 void opnd3(Instruction *, ImmediateValue&);
376
377 void unary(Instruction *, const ImmediateValue&);
378
379 void tryCollapseChainedMULs(Instruction *, const int s, ImmediateValue&);
380
381 CmpInstruction *findOriginForTestWithZero(Value *);
382
383 bool createMul(DataType ty, Value *def, Value *a, int64_t b, Value *c);
384
385 unsigned int foldCount;
386
387 BuildUtil bld;
388 };
389
390 // TODO: remember generated immediates and only revisit these
391 bool
foldAll(Program * prog)392 ConstantFolding::foldAll(Program *prog)
393 {
394 unsigned int iterCount = 0;
395 do {
396 foldCount = 0;
397 if (!run(prog))
398 return false;
399 } while (foldCount && ++iterCount < 2);
400 return true;
401 }
402
403 bool
visit(BasicBlock * bb)404 ConstantFolding::visit(BasicBlock *bb)
405 {
406 Instruction *i, *next;
407
408 for (i = bb->getEntry(); i; i = next) {
409 next = i->next;
410 if (i->op == OP_MOV || i->op == OP_CALL)
411 continue;
412
413 ImmediateValue src0, src1, src2;
414
415 if (i->srcExists(2) &&
416 i->src(0).getImmediate(src0) &&
417 i->src(1).getImmediate(src1) &&
418 i->src(2).getImmediate(src2)) {
419 expr(i, src0, src1, src2);
420 } else
421 if (i->srcExists(1) &&
422 i->src(0).getImmediate(src0) && i->src(1).getImmediate(src1)) {
423 expr(i, src0, src1);
424 } else
425 if (i->srcExists(0) && i->src(0).getImmediate(src0)) {
426 if (opnd(i, src0, 0))
427 continue;
428 } else
429 if (i->srcExists(1) && i->src(1).getImmediate(src1)) {
430 if (opnd(i, src1, 1))
431 continue;
432 }
433 if (i->srcExists(2) && i->src(2).getImmediate(src2))
434 opnd3(i, src2);
435 }
436 return true;
437 }
438
439 CmpInstruction *
findOriginForTestWithZero(Value * value)440 ConstantFolding::findOriginForTestWithZero(Value *value)
441 {
442 if (!value)
443 return NULL;
444 Instruction *insn = value->getInsn();
445 if (!insn)
446 return NULL;
447
448 if (insn->asCmp() && insn->op != OP_SLCT)
449 return insn->asCmp();
450
451 /* Sometimes mov's will sneak in as a result of other folding. This gets
452 * cleaned up later.
453 */
454 if (insn->op == OP_MOV)
455 return findOriginForTestWithZero(insn->getSrc(0));
456
457 /* Deal with AND 1.0 here since nv50 can't fold into boolean float */
458 if (insn->op == OP_AND) {
459 int s = 0;
460 ImmediateValue imm;
461 if (!insn->src(s).getImmediate(imm)) {
462 s = 1;
463 if (!insn->src(s).getImmediate(imm))
464 return NULL;
465 }
466 if (imm.reg.data.f32 != 1.0f)
467 return NULL;
468 /* TODO: Come up with a way to handle the condition being inverted */
469 if (insn->src(!s).mod != Modifier(0))
470 return NULL;
471 return findOriginForTestWithZero(insn->getSrc(!s));
472 }
473
474 return NULL;
475 }
476
477 void
applyTo(ImmediateValue & imm) const478 Modifier::applyTo(ImmediateValue& imm) const
479 {
480 if (!bits) // avoid failure if imm.reg.type is unhandled (e.g. b128)
481 return;
482 switch (imm.reg.type) {
483 case TYPE_F32:
484 if (bits & NV50_IR_MOD_ABS)
485 imm.reg.data.f32 = fabsf(imm.reg.data.f32);
486 if (bits & NV50_IR_MOD_NEG)
487 imm.reg.data.f32 = -imm.reg.data.f32;
488 if (bits & NV50_IR_MOD_SAT) {
489 if (imm.reg.data.f32 < 0.0f)
490 imm.reg.data.f32 = 0.0f;
491 else
492 if (imm.reg.data.f32 > 1.0f)
493 imm.reg.data.f32 = 1.0f;
494 }
495 assert(!(bits & NV50_IR_MOD_NOT));
496 break;
497
498 case TYPE_S8: // NOTE: will be extended
499 case TYPE_S16:
500 case TYPE_S32:
501 case TYPE_U8: // NOTE: treated as signed
502 case TYPE_U16:
503 case TYPE_U32:
504 if (bits & NV50_IR_MOD_ABS)
505 imm.reg.data.s32 = (imm.reg.data.s32 >= 0) ?
506 imm.reg.data.s32 : -imm.reg.data.s32;
507 if (bits & NV50_IR_MOD_NEG)
508 imm.reg.data.s32 = -imm.reg.data.s32;
509 if (bits & NV50_IR_MOD_NOT)
510 imm.reg.data.s32 = ~imm.reg.data.s32;
511 break;
512
513 case TYPE_F64:
514 if (bits & NV50_IR_MOD_ABS)
515 imm.reg.data.f64 = fabs(imm.reg.data.f64);
516 if (bits & NV50_IR_MOD_NEG)
517 imm.reg.data.f64 = -imm.reg.data.f64;
518 if (bits & NV50_IR_MOD_SAT) {
519 if (imm.reg.data.f64 < 0.0)
520 imm.reg.data.f64 = 0.0;
521 else
522 if (imm.reg.data.f64 > 1.0)
523 imm.reg.data.f64 = 1.0;
524 }
525 assert(!(bits & NV50_IR_MOD_NOT));
526 break;
527
528 default:
529 assert(!"invalid/unhandled type");
530 imm.reg.data.u64 = 0;
531 break;
532 }
533 }
534
535 operation
getOp() const536 Modifier::getOp() const
537 {
538 switch (bits) {
539 case NV50_IR_MOD_ABS: return OP_ABS;
540 case NV50_IR_MOD_NEG: return OP_NEG;
541 case NV50_IR_MOD_SAT: return OP_SAT;
542 case NV50_IR_MOD_NOT: return OP_NOT;
543 case 0:
544 return OP_MOV;
545 default:
546 return OP_CVT;
547 }
548 }
549
550 void
expr(Instruction * i,ImmediateValue & imm0,ImmediateValue & imm1)551 ConstantFolding::expr(Instruction *i,
552 ImmediateValue &imm0, ImmediateValue &imm1)
553 {
554 struct Storage *const a = &imm0.reg, *const b = &imm1.reg;
555 struct Storage res;
556 DataType type = i->dType;
557
558 memset(&res.data, 0, sizeof(res.data));
559
560 switch (i->op) {
561 case OP_SGXT: {
562 int bits = b->data.u32;
563 if (bits) {
564 uint32_t data = a->data.u32 & (0xffffffff >> (32 - bits));
565 if (bits < 32 && (data & (1 << (bits - 1))))
566 data = data - (1 << bits);
567 res.data.u32 = data;
568 }
569 break;
570 }
571 case OP_BMSK:
572 res.data.u32 = ((1 << b->data.u32) - 1) << a->data.u32;
573 break;
574 case OP_MAD:
575 case OP_FMA:
576 case OP_MUL:
577 if (i->dnz && i->dType == TYPE_F32) {
578 if (!isfinite(a->data.f32))
579 a->data.f32 = 0.0f;
580 if (!isfinite(b->data.f32))
581 b->data.f32 = 0.0f;
582 }
583 switch (i->dType) {
584 case TYPE_F32:
585 res.data.f32 = a->data.f32 * b->data.f32 * exp2f(i->postFactor);
586 break;
587 case TYPE_F64: res.data.f64 = a->data.f64 * b->data.f64; break;
588 case TYPE_S32:
589 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
590 res.data.s32 = ((int64_t)a->data.s32 * b->data.s32) >> 32;
591 break;
592 }
593 /* fallthrough */
594 case TYPE_U32:
595 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
596 res.data.u32 = ((uint64_t)a->data.u32 * b->data.u32) >> 32;
597 break;
598 }
599 res.data.u32 = a->data.u32 * b->data.u32; break;
600 default:
601 return;
602 }
603 break;
604 case OP_DIV:
605 if (b->data.u32 == 0)
606 break;
607 switch (i->dType) {
608 case TYPE_F32: res.data.f32 = a->data.f32 / b->data.f32; break;
609 case TYPE_F64: res.data.f64 = a->data.f64 / b->data.f64; break;
610 case TYPE_S32: res.data.s32 = a->data.s32 / b->data.s32; break;
611 case TYPE_U32: res.data.u32 = a->data.u32 / b->data.u32; break;
612 default:
613 return;
614 }
615 break;
616 case OP_ADD:
617 switch (i->dType) {
618 case TYPE_F32: res.data.f32 = a->data.f32 + b->data.f32; break;
619 case TYPE_F64: res.data.f64 = a->data.f64 + b->data.f64; break;
620 case TYPE_S32:
621 case TYPE_U32: res.data.u32 = a->data.u32 + b->data.u32; break;
622 default:
623 return;
624 }
625 break;
626 case OP_SUB:
627 switch (i->dType) {
628 case TYPE_F32: res.data.f32 = a->data.f32 - b->data.f32; break;
629 case TYPE_F64: res.data.f64 = a->data.f64 - b->data.f64; break;
630 case TYPE_S32:
631 case TYPE_U32: res.data.u32 = a->data.u32 - b->data.u32; break;
632 default:
633 return;
634 }
635 break;
636 case OP_POW:
637 switch (i->dType) {
638 case TYPE_F32: res.data.f32 = pow(a->data.f32, b->data.f32); break;
639 case TYPE_F64: res.data.f64 = pow(a->data.f64, b->data.f64); break;
640 default:
641 return;
642 }
643 break;
644 case OP_MAX:
645 switch (i->dType) {
646 case TYPE_F32: res.data.f32 = MAX2(a->data.f32, b->data.f32); break;
647 case TYPE_F64: res.data.f64 = MAX2(a->data.f64, b->data.f64); break;
648 case TYPE_S32: res.data.s32 = MAX2(a->data.s32, b->data.s32); break;
649 case TYPE_U32: res.data.u32 = MAX2(a->data.u32, b->data.u32); break;
650 default:
651 return;
652 }
653 break;
654 case OP_MIN:
655 switch (i->dType) {
656 case TYPE_F32: res.data.f32 = MIN2(a->data.f32, b->data.f32); break;
657 case TYPE_F64: res.data.f64 = MIN2(a->data.f64, b->data.f64); break;
658 case TYPE_S32: res.data.s32 = MIN2(a->data.s32, b->data.s32); break;
659 case TYPE_U32: res.data.u32 = MIN2(a->data.u32, b->data.u32); break;
660 default:
661 return;
662 }
663 break;
664 case OP_AND:
665 res.data.u64 = a->data.u64 & b->data.u64;
666 break;
667 case OP_OR:
668 res.data.u64 = a->data.u64 | b->data.u64;
669 break;
670 case OP_XOR:
671 res.data.u64 = a->data.u64 ^ b->data.u64;
672 break;
673 case OP_SHL:
674 res.data.u32 = a->data.u32 << b->data.u32;
675 break;
676 case OP_SHR:
677 switch (i->dType) {
678 case TYPE_S32: res.data.s32 = a->data.s32 >> b->data.u32; break;
679 case TYPE_U32: res.data.u32 = a->data.u32 >> b->data.u32; break;
680 default:
681 return;
682 }
683 break;
684 case OP_SLCT:
685 if (a->data.u32 != b->data.u32)
686 return;
687 res.data.u32 = a->data.u32;
688 break;
689 case OP_EXTBF: {
690 int offset = b->data.u32 & 0xff;
691 int width = (b->data.u32 >> 8) & 0xff;
692 int rshift = offset;
693 int lshift = 0;
694 if (width == 0) {
695 res.data.u32 = 0;
696 break;
697 }
698 if (width + offset < 32) {
699 rshift = 32 - width;
700 lshift = 32 - width - offset;
701 }
702 if (i->subOp == NV50_IR_SUBOP_EXTBF_REV)
703 res.data.u32 = util_bitreverse(a->data.u32);
704 else
705 res.data.u32 = a->data.u32;
706 switch (i->dType) {
707 case TYPE_S32: res.data.s32 = (res.data.s32 << lshift) >> rshift; break;
708 case TYPE_U32: res.data.u32 = (res.data.u32 << lshift) >> rshift; break;
709 default:
710 return;
711 }
712 break;
713 }
714 case OP_POPCNT:
715 res.data.u32 = util_bitcount(a->data.u32 & b->data.u32);
716 break;
717 case OP_PFETCH:
718 // The two arguments to pfetch are logically added together. Normally
719 // the second argument will not be constant, but that can happen.
720 res.data.u32 = a->data.u32 + b->data.u32;
721 type = TYPE_U32;
722 break;
723 case OP_MERGE:
724 switch (i->dType) {
725 case TYPE_U64:
726 case TYPE_S64:
727 case TYPE_F64:
728 res.data.u64 = (((uint64_t)b->data.u32) << 32) | a->data.u32;
729 break;
730 default:
731 return;
732 }
733 break;
734 default:
735 return;
736 }
737 ++foldCount;
738
739 i->src(0).mod = Modifier(0);
740 i->src(1).mod = Modifier(0);
741 i->postFactor = 0;
742
743 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
744 i->setSrc(1, NULL);
745
746 i->getSrc(0)->reg.data = res.data;
747 i->getSrc(0)->reg.type = type;
748 i->getSrc(0)->reg.size = typeSizeof(type);
749
750 switch (i->op) {
751 case OP_MAD:
752 case OP_FMA: {
753 ImmediateValue src0, src1 = *i->getSrc(0)->asImm();
754
755 // Move the immediate into position 1, where we know it might be
756 // emittable. However it might not be anyways, as there may be other
757 // restrictions, so move it into a separate LValue.
758 bld.setPosition(i, false);
759 i->op = OP_ADD;
760 i->dnz = 0;
761 i->setSrc(1, bld.mkMov(bld.getSSA(type), i->getSrc(0), type)->getDef(0));
762 i->setSrc(0, i->getSrc(2));
763 i->src(0).mod = i->src(2).mod;
764 i->setSrc(2, NULL);
765
766 if (i->src(0).getImmediate(src0))
767 expr(i, src0, src1);
768 else
769 opnd(i, src1, 1);
770 break;
771 }
772 case OP_PFETCH:
773 // Leave PFETCH alone... we just folded its 2 args into 1.
774 break;
775 default:
776 i->op = i->saturate ? OP_SAT : OP_MOV;
777 if (i->saturate)
778 unary(i, *i->getSrc(0)->asImm());
779 break;
780 }
781 i->subOp = 0;
782 }
783
784 void
expr(Instruction * i,ImmediateValue & imm0,ImmediateValue & imm1,ImmediateValue & imm2)785 ConstantFolding::expr(Instruction *i,
786 ImmediateValue &imm0,
787 ImmediateValue &imm1,
788 ImmediateValue &imm2)
789 {
790 struct Storage *const a = &imm0.reg, *const b = &imm1.reg, *const c = &imm2.reg;
791 struct Storage res;
792
793 memset(&res.data, 0, sizeof(res.data));
794
795 switch (i->op) {
796 case OP_LOP3_LUT:
797 for (int n = 0; n < 32; n++) {
798 uint8_t lut = ((a->data.u32 >> n) & 1) << 2 |
799 ((b->data.u32 >> n) & 1) << 1 |
800 ((c->data.u32 >> n) & 1);
801 res.data.u32 |= !!(i->subOp & (1 << lut)) << n;
802 }
803 break;
804 case OP_PERMT:
805 if (!i->subOp) {
806 uint64_t input = (uint64_t)c->data.u32 << 32 | a->data.u32;
807 uint16_t permt = b->data.u32;
808 for (int n = 0 ; n < 4; n++, permt >>= 4)
809 res.data.u32 |= ((input >> ((permt & 0xf) * 8)) & 0xff) << n * 8;
810 } else
811 return;
812 break;
813 case OP_INSBF: {
814 int offset = b->data.u32 & 0xff;
815 int width = (b->data.u32 >> 8) & 0xff;
816 unsigned bitmask = ((1 << width) - 1) << offset;
817 res.data.u32 = ((a->data.u32 << offset) & bitmask) | (c->data.u32 & ~bitmask);
818 break;
819 }
820 case OP_MAD:
821 case OP_FMA: {
822 switch (i->dType) {
823 case TYPE_F32:
824 res.data.f32 = a->data.f32 * b->data.f32 * exp2f(i->postFactor) +
825 c->data.f32;
826 break;
827 case TYPE_F64:
828 res.data.f64 = a->data.f64 * b->data.f64 + c->data.f64;
829 break;
830 case TYPE_S32:
831 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
832 res.data.s32 = ((int64_t)a->data.s32 * b->data.s32 >> 32) + c->data.s32;
833 break;
834 }
835 /* fallthrough */
836 case TYPE_U32:
837 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
838 res.data.u32 = ((uint64_t)a->data.u32 * b->data.u32 >> 32) + c->data.u32;
839 break;
840 }
841 res.data.u32 = a->data.u32 * b->data.u32 + c->data.u32;
842 break;
843 default:
844 return;
845 }
846 break;
847 }
848 case OP_SHLADD:
849 res.data.u32 = (a->data.u32 << b->data.u32) + c->data.u32;
850 break;
851 default:
852 return;
853 }
854
855 ++foldCount;
856 i->src(0).mod = Modifier(0);
857 i->src(1).mod = Modifier(0);
858 i->src(2).mod = Modifier(0);
859
860 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.u32));
861 i->setSrc(1, NULL);
862 i->setSrc(2, NULL);
863
864 i->getSrc(0)->reg.data = res.data;
865 i->getSrc(0)->reg.type = i->dType;
866 i->getSrc(0)->reg.size = typeSizeof(i->dType);
867
868 i->op = OP_MOV;
869 }
870
871 void
unary(Instruction * i,const ImmediateValue & imm)872 ConstantFolding::unary(Instruction *i, const ImmediateValue &imm)
873 {
874 Storage res;
875
876 if (i->dType != TYPE_F32)
877 return;
878 switch (i->op) {
879 case OP_NEG: res.data.f32 = -imm.reg.data.f32; break;
880 case OP_ABS: res.data.f32 = fabsf(imm.reg.data.f32); break;
881 case OP_SAT: res.data.f32 = SATURATE(imm.reg.data.f32); break;
882 case OP_RCP: res.data.f32 = 1.0f / imm.reg.data.f32; break;
883 case OP_RSQ: res.data.f32 = 1.0f / sqrtf(imm.reg.data.f32); break;
884 case OP_LG2: res.data.f32 = log2f(imm.reg.data.f32); break;
885 case OP_EX2: res.data.f32 = exp2f(imm.reg.data.f32); break;
886 case OP_SIN: res.data.f32 = sinf(imm.reg.data.f32); break;
887 case OP_COS: res.data.f32 = cosf(imm.reg.data.f32); break;
888 case OP_SQRT: res.data.f32 = sqrtf(imm.reg.data.f32); break;
889 case OP_PRESIN:
890 case OP_PREEX2:
891 // these should be handled in subsequent OP_SIN/COS/EX2
892 res.data.f32 = imm.reg.data.f32;
893 break;
894 default:
895 return;
896 }
897 i->op = OP_MOV;
898 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res.data.f32));
899 i->src(0).mod = Modifier(0);
900 }
901
902 void
tryCollapseChainedMULs(Instruction * mul2,const int s,ImmediateValue & imm2)903 ConstantFolding::tryCollapseChainedMULs(Instruction *mul2,
904 const int s, ImmediateValue& imm2)
905 {
906 const int t = s ? 0 : 1;
907 Instruction *insn;
908 Instruction *mul1 = NULL; // mul1 before mul2
909 int e = 0;
910 float f = imm2.reg.data.f32 * exp2f(mul2->postFactor);
911 ImmediateValue imm1;
912
913 assert(mul2->op == OP_MUL && mul2->dType == TYPE_F32);
914
915 if (mul2->getSrc(t)->refCount() == 1) {
916 insn = mul2->getSrc(t)->getInsn();
917 if (!mul2->src(t).mod && insn->op == OP_MUL && insn->dType == TYPE_F32)
918 mul1 = insn;
919 if (mul1 && !mul1->saturate) {
920 int s1;
921
922 if (mul1->src(s1 = 0).getImmediate(imm1) ||
923 mul1->src(s1 = 1).getImmediate(imm1)) {
924 bld.setPosition(mul1, false);
925 // a = mul r, imm1
926 // d = mul a, imm2 -> d = mul r, (imm1 * imm2)
927 mul1->setSrc(s1, bld.loadImm(NULL, f * imm1.reg.data.f32));
928 mul1->src(s1).mod = Modifier(0);
929 mul2->def(0).replace(mul1->getDef(0), false);
930 mul1->saturate = mul2->saturate;
931 } else
932 if (prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
933 // c = mul a, b
934 // d = mul c, imm -> d = mul_x_imm a, b
935 mul1->postFactor = e;
936 mul2->def(0).replace(mul1->getDef(0), false);
937 if (f < 0)
938 mul1->src(0).mod *= Modifier(NV50_IR_MOD_NEG);
939 mul1->saturate = mul2->saturate;
940 }
941 return;
942 }
943 }
944 if (mul2->getDef(0)->refCount() == 1 && !mul2->saturate) {
945 // b = mul a, imm
946 // d = mul b, c -> d = mul_x_imm a, c
947 int s2, t2;
948 insn = (*mul2->getDef(0)->uses.begin())->getInsn();
949 if (!insn)
950 return;
951 mul1 = mul2;
952 mul2 = NULL;
953 s2 = insn->getSrc(0) == mul1->getDef(0) ? 0 : 1;
954 t2 = s2 ? 0 : 1;
955 if (insn->op == OP_MUL && insn->dType == TYPE_F32)
956 if (!insn->src(s2).mod && !insn->src(t2).getImmediate(imm1))
957 mul2 = insn;
958 if (mul2 && prog->getTarget()->isPostMultiplySupported(OP_MUL, f, e)) {
959 mul2->postFactor = e;
960 mul2->setSrc(s2, mul1->src(t));
961 if (f < 0)
962 mul2->src(s2).mod *= Modifier(NV50_IR_MOD_NEG);
963 }
964 }
965 }
966
967 void
opnd3(Instruction * i,ImmediateValue & imm2)968 ConstantFolding::opnd3(Instruction *i, ImmediateValue &imm2)
969 {
970 switch (i->op) {
971 case OP_MAD:
972 case OP_FMA:
973 if (imm2.isInteger(0)) {
974 i->op = OP_MUL;
975 i->setSrc(2, NULL);
976 foldCount++;
977 return;
978 }
979 break;
980 case OP_SHLADD:
981 if (imm2.isInteger(0)) {
982 i->op = OP_SHL;
983 i->setSrc(2, NULL);
984 foldCount++;
985 return;
986 }
987 break;
988 default:
989 return;
990 }
991 }
992
993 bool
createMul(DataType ty,Value * def,Value * a,int64_t b,Value * c)994 ConstantFolding::createMul(DataType ty, Value *def, Value *a, int64_t b, Value *c)
995 {
996 const Target *target = prog->getTarget();
997 int64_t absB = llabs(b);
998
999 //a * (2^shl) -> a << shl
1000 if (b >= 0 && util_is_power_of_two_or_zero64(b)) {
1001 int shl = util_logbase2_64(b);
1002
1003 Value *res = c ? bld.getSSA(typeSizeof(ty)) : def;
1004 bld.mkOp2(OP_SHL, ty, res, a, bld.mkImm(shl));
1005 if (c)
1006 bld.mkOp2(OP_ADD, ty, def, res, c);
1007
1008 return true;
1009 }
1010
1011 //a * (2^shl + 1) -> a << shl + a
1012 //a * -(2^shl + 1) -> -a << shl + a
1013 //a * (2^shl - 1) -> a << shl - a
1014 //a * -(2^shl - 1) -> -a << shl - a
1015 if (typeSizeof(ty) == 4 &&
1016 (util_is_power_of_two_or_zero64(absB - 1) ||
1017 util_is_power_of_two_or_zero64(absB + 1)) &&
1018 target->isOpSupported(OP_SHLADD, TYPE_U32)) {
1019 bool subA = util_is_power_of_two_or_zero64(absB + 1);
1020 int shl = subA ? util_logbase2_64(absB + 1) : util_logbase2_64(absB - 1);
1021
1022 Value *res = c ? bld.getSSA() : def;
1023 Instruction *insn = bld.mkOp3(OP_SHLADD, TYPE_U32, res, a, bld.mkImm(shl), a);
1024 if (b < 0)
1025 insn->src(0).mod = Modifier(NV50_IR_MOD_NEG);
1026 if (subA)
1027 insn->src(2).mod = Modifier(NV50_IR_MOD_NEG);
1028
1029 if (c)
1030 bld.mkOp2(OP_ADD, TYPE_U32, def, res, c);
1031
1032 return true;
1033 }
1034
1035 if (typeSizeof(ty) == 4 && b >= 0 && b <= 0xffff &&
1036 target->isOpSupported(OP_XMAD, TYPE_U32)) {
1037 Value *tmp = bld.mkOp3v(OP_XMAD, TYPE_U32, bld.getSSA(),
1038 a, bld.mkImm((uint32_t)b), c ? c : bld.mkImm(0));
1039 bld.mkOp3(OP_XMAD, TYPE_U32, def, a, bld.mkImm((uint32_t)b), tmp)->subOp =
1040 NV50_IR_SUBOP_XMAD_PSL | NV50_IR_SUBOP_XMAD_H1(0);
1041
1042 return true;
1043 }
1044
1045 return false;
1046 }
1047
1048 bool
opnd(Instruction * i,ImmediateValue & imm0,int s)1049 ConstantFolding::opnd(Instruction *i, ImmediateValue &imm0, int s)
1050 {
1051 const int t = !s;
1052 const operation op = i->op;
1053 Instruction *newi = i;
1054 bool deleted = false;
1055
1056 switch (i->op) {
1057 case OP_SPLIT: {
1058 bld.setPosition(i, false);
1059
1060 uint8_t size = i->getDef(0)->reg.size;
1061 uint8_t bitsize = size * 8;
1062 uint32_t mask = (1ULL << bitsize) - 1;
1063 assert(bitsize <= 32);
1064
1065 uint64_t val = imm0.reg.data.u64;
1066 for (int8_t d = 0; i->defExists(d); ++d) {
1067 Value *def = i->getDef(d);
1068 assert(def->reg.size == size);
1069
1070 newi = bld.mkMov(def, bld.mkImm((uint32_t)(val & mask)), TYPE_U32);
1071 val >>= bitsize;
1072 }
1073 delete_Instruction(prog, i);
1074 deleted = true;
1075 break;
1076 }
1077 case OP_MUL:
1078 if (i->dType == TYPE_F32 && !i->precise)
1079 tryCollapseChainedMULs(i, s, imm0);
1080
1081 if (i->subOp == NV50_IR_SUBOP_MUL_HIGH) {
1082 assert(!isFloatType(i->sType));
1083 if (imm0.isInteger(1) && i->dType == TYPE_S32) {
1084 bld.setPosition(i, false);
1085 // Need to set to the sign value, which is a compare.
1086 newi = bld.mkCmp(OP_SET, CC_LT, TYPE_S32, i->getDef(0),
1087 TYPE_S32, i->getSrc(t), bld.mkImm(0));
1088 delete_Instruction(prog, i);
1089 deleted = true;
1090 } else if (imm0.isInteger(0) || imm0.isInteger(1)) {
1091 // The high bits can't be set in this case (either mul by 0 or
1092 // unsigned by 1)
1093 i->op = OP_MOV;
1094 i->subOp = 0;
1095 i->setSrc(0, new_ImmediateValue(prog, 0u));
1096 i->src(0).mod = Modifier(0);
1097 i->setSrc(1, NULL);
1098 } else if (!imm0.isNegative() && imm0.isPow2()) {
1099 // Translate into a shift
1100 imm0.applyLog2();
1101 i->op = OP_SHR;
1102 i->subOp = 0;
1103 imm0.reg.data.u32 = 32 - imm0.reg.data.u32;
1104 i->setSrc(0, i->getSrc(t));
1105 i->src(0).mod = i->src(t).mod;
1106 i->setSrc(1, new_ImmediateValue(prog, imm0.reg.data.u32));
1107 i->src(1).mod = 0;
1108 }
1109 } else
1110 if (imm0.isInteger(0)) {
1111 i->op = OP_MOV;
1112 i->setSrc(0, new_ImmediateValue(prog, 0u));
1113 i->src(0).mod = Modifier(0);
1114 i->postFactor = 0;
1115 i->setSrc(1, NULL);
1116 } else
1117 if (!i->postFactor && (imm0.isInteger(1) || imm0.isInteger(-1))) {
1118 if (imm0.isNegative())
1119 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
1120 i->op = i->src(t).mod.getOp();
1121 if (s == 0) {
1122 i->setSrc(0, i->getSrc(1));
1123 i->src(0).mod = i->src(1).mod;
1124 i->src(1).mod = 0;
1125 }
1126 if (i->op != OP_CVT)
1127 i->src(0).mod = 0;
1128 i->setSrc(1, NULL);
1129 } else
1130 if (!i->postFactor && (imm0.isInteger(2) || imm0.isInteger(-2))) {
1131 if (imm0.isNegative())
1132 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
1133 i->op = OP_ADD;
1134 i->dnz = 0;
1135 i->setSrc(s, i->getSrc(t));
1136 i->src(s).mod = i->src(t).mod;
1137 } else
1138 if (!isFloatType(i->dType) && !i->src(t).mod) {
1139 bld.setPosition(i, false);
1140 int64_t b = typeSizeof(i->dType) == 8 ? imm0.reg.data.s64 : imm0.reg.data.s32;
1141 if (createMul(i->dType, i->getDef(0), i->getSrc(t), b, NULL)) {
1142 delete_Instruction(prog, i);
1143 deleted = true;
1144 }
1145 } else
1146 if (i->postFactor && i->sType == TYPE_F32) {
1147 /* Can't emit a postfactor with an immediate, have to fold it in */
1148 i->setSrc(s, new_ImmediateValue(
1149 prog, imm0.reg.data.f32 * exp2f(i->postFactor)));
1150 i->postFactor = 0;
1151 }
1152 break;
1153 case OP_FMA:
1154 case OP_MAD:
1155 if (imm0.isInteger(0)) {
1156 i->setSrc(0, i->getSrc(2));
1157 i->src(0).mod = i->src(2).mod;
1158 i->setSrc(1, NULL);
1159 i->setSrc(2, NULL);
1160 i->op = i->src(0).mod.getOp();
1161 if (i->op != OP_CVT)
1162 i->src(0).mod = 0;
1163 } else
1164 if (i->subOp != NV50_IR_SUBOP_MUL_HIGH &&
1165 (imm0.isInteger(1) || imm0.isInteger(-1))) {
1166 if (imm0.isNegative())
1167 i->src(t).mod = i->src(t).mod ^ Modifier(NV50_IR_MOD_NEG);
1168 if (s == 0) {
1169 i->setSrc(0, i->getSrc(1));
1170 i->src(0).mod = i->src(1).mod;
1171 }
1172 i->setSrc(1, i->getSrc(2));
1173 i->src(1).mod = i->src(2).mod;
1174 i->setSrc(2, NULL);
1175 i->dnz = 0;
1176 i->op = OP_ADD;
1177 } else
1178 if (!isFloatType(i->dType) && !i->subOp && !i->src(t).mod && !i->src(2).mod) {
1179 bld.setPosition(i, false);
1180 int64_t b = typeSizeof(i->dType) == 8 ? imm0.reg.data.s64 : imm0.reg.data.s32;
1181 if (createMul(i->dType, i->getDef(0), i->getSrc(t), b, i->getSrc(2))) {
1182 delete_Instruction(prog, i);
1183 deleted = true;
1184 }
1185 }
1186 break;
1187 case OP_SUB:
1188 if (imm0.isInteger(0) && s == 0 && typeSizeof(i->dType) == 8 &&
1189 !isFloatType(i->dType))
1190 break;
1191 /* fallthrough */
1192 case OP_ADD:
1193 if (i->usesFlags())
1194 break;
1195 if (imm0.isInteger(0)) {
1196 if (s == 0) {
1197 i->setSrc(0, i->getSrc(1));
1198 i->src(0).mod = i->src(1).mod;
1199 if (i->op == OP_SUB)
1200 i->src(0).mod = i->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
1201 }
1202 i->setSrc(1, NULL);
1203 i->op = i->src(0).mod.getOp();
1204 if (i->op != OP_CVT)
1205 i->src(0).mod = Modifier(0);
1206 }
1207 break;
1208
1209 case OP_DIV:
1210 if (s != 1 || (i->dType != TYPE_S32 && i->dType != TYPE_U32))
1211 break;
1212 bld.setPosition(i, false);
1213 if (imm0.reg.data.u32 == 0) {
1214 break;
1215 } else
1216 if (imm0.reg.data.u32 == 1) {
1217 i->op = OP_MOV;
1218 i->setSrc(1, NULL);
1219 } else
1220 if (i->dType == TYPE_U32 && imm0.isPow2()) {
1221 i->op = OP_SHR;
1222 i->setSrc(1, bld.mkImm(util_logbase2(imm0.reg.data.u32)));
1223 } else
1224 if (i->dType == TYPE_U32) {
1225 Instruction *mul;
1226 Value *tA, *tB;
1227 const uint32_t d = imm0.reg.data.u32;
1228 uint32_t m;
1229 int r, s;
1230 uint32_t l = util_logbase2(d);
1231 if (((uint32_t)1 << l) < d)
1232 ++l;
1233 m = (((uint64_t)1 << 32) * (((uint64_t)1 << l) - d)) / d + 1;
1234 r = l ? 1 : 0;
1235 s = l ? (l - 1) : 0;
1236
1237 tA = bld.getSSA();
1238 tB = bld.getSSA();
1239 mul = bld.mkOp2(OP_MUL, TYPE_U32, tA, i->getSrc(0),
1240 bld.loadImm(NULL, m));
1241 mul->subOp = NV50_IR_SUBOP_MUL_HIGH;
1242 bld.mkOp2(OP_SUB, TYPE_U32, tB, i->getSrc(0), tA);
1243 tA = bld.getSSA();
1244 if (r)
1245 bld.mkOp2(OP_SHR, TYPE_U32, tA, tB, bld.mkImm(r));
1246 else
1247 tA = tB;
1248 tB = s ? bld.getSSA() : i->getDef(0);
1249 newi = bld.mkOp2(OP_ADD, TYPE_U32, tB, mul->getDef(0), tA);
1250 if (s)
1251 bld.mkOp2(OP_SHR, TYPE_U32, i->getDef(0), tB, bld.mkImm(s));
1252
1253 delete_Instruction(prog, i);
1254 deleted = true;
1255 } else
1256 if (imm0.reg.data.s32 == -1) {
1257 i->op = OP_NEG;
1258 i->setSrc(1, NULL);
1259 } else {
1260 LValue *tA, *tB;
1261 LValue *tD;
1262 const int32_t d = imm0.reg.data.s32;
1263 int32_t m;
1264 int32_t l = util_logbase2(static_cast<unsigned>(abs(d)));
1265 if ((1 << l) < abs(d))
1266 ++l;
1267 if (!l)
1268 l = 1;
1269 m = ((uint64_t)1 << (32 + l - 1)) / abs(d) + 1 - ((uint64_t)1 << 32);
1270
1271 tA = bld.getSSA();
1272 tB = bld.getSSA();
1273 bld.mkOp3(OP_MAD, TYPE_S32, tA, i->getSrc(0), bld.loadImm(NULL, m),
1274 i->getSrc(0))->subOp = NV50_IR_SUBOP_MUL_HIGH;
1275 if (l > 1)
1276 bld.mkOp2(OP_SHR, TYPE_S32, tB, tA, bld.mkImm(l - 1));
1277 else
1278 tB = tA;
1279 tA = bld.getSSA();
1280 bld.mkCmp(OP_SET, CC_LT, TYPE_S32, tA, TYPE_S32, i->getSrc(0), bld.mkImm(0));
1281 tD = (d < 0) ? bld.getSSA() : i->getDef(0)->asLValue();
1282 newi = bld.mkOp2(OP_SUB, TYPE_U32, tD, tB, tA);
1283 if (d < 0)
1284 bld.mkOp1(OP_NEG, TYPE_S32, i->getDef(0), tB);
1285
1286 delete_Instruction(prog, i);
1287 deleted = true;
1288 }
1289 break;
1290
1291 case OP_MOD:
1292 if (s == 1 && imm0.isPow2()) {
1293 bld.setPosition(i, false);
1294 if (i->sType == TYPE_U32) {
1295 i->op = OP_AND;
1296 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 - 1));
1297 } else if (i->sType == TYPE_S32) {
1298 // Do it on the absolute value of the input, and then restore the
1299 // sign. The only odd case is MIN_INT, but that should work out
1300 // as well, since MIN_INT mod any power of 2 is 0.
1301 //
1302 // Technically we don't have to do any of this since MOD is
1303 // undefined with negative arguments in GLSL, but this seems like
1304 // the nice thing to do.
1305 Value *abs = bld.mkOp1v(OP_ABS, TYPE_S32, bld.getSSA(), i->getSrc(0));
1306 Value *neg, *v1, *v2;
1307 bld.mkCmp(OP_SET, CC_LT, TYPE_S32,
1308 (neg = bld.getSSA(1, prog->getTarget()->nativeFile(FILE_PREDICATE))),
1309 TYPE_S32, i->getSrc(0), bld.loadImm(NULL, 0));
1310 Value *mod = bld.mkOp2v(OP_AND, TYPE_U32, bld.getSSA(), abs,
1311 bld.loadImm(NULL, imm0.reg.data.u32 - 1));
1312 bld.mkOp1(OP_NEG, TYPE_S32, (v1 = bld.getSSA()), mod)
1313 ->setPredicate(CC_P, neg);
1314 bld.mkOp1(OP_MOV, TYPE_S32, (v2 = bld.getSSA()), mod)
1315 ->setPredicate(CC_NOT_P, neg);
1316 newi = bld.mkOp2(OP_UNION, TYPE_S32, i->getDef(0), v1, v2);
1317
1318 delete_Instruction(prog, i);
1319 deleted = true;
1320 }
1321 } else if (s == 1) {
1322 // In this case, we still want the optimized lowering that we get
1323 // from having division by an immediate.
1324 //
1325 // a % b == a - (a/b) * b
1326 bld.setPosition(i, false);
1327 Value *div = bld.mkOp2v(OP_DIV, i->sType, bld.getSSA(),
1328 i->getSrc(0), i->getSrc(1));
1329 newi = bld.mkOp2(OP_ADD, i->sType, i->getDef(0), i->getSrc(0),
1330 bld.mkOp2v(OP_MUL, i->sType, bld.getSSA(), div, i->getSrc(1)));
1331 // TODO: Check that target supports this. In this case, we know that
1332 // all backends do.
1333 newi->src(1).mod = Modifier(NV50_IR_MOD_NEG);
1334
1335 delete_Instruction(prog, i);
1336 deleted = true;
1337 }
1338 break;
1339
1340 case OP_SET: // TODO: SET_AND,OR,XOR
1341 {
1342 /* This optimizes the case where the output of a set is being compared
1343 * to zero. Since the set can only produce 0/-1 (int) or 0/1 (float), we
1344 * can be a lot cleverer in our comparison.
1345 */
1346 CmpInstruction *si = findOriginForTestWithZero(i->getSrc(t));
1347 CondCode cc, ccZ;
1348 if (imm0.reg.data.u32 != 0 || !si)
1349 return false;
1350 cc = si->setCond;
1351 ccZ = (CondCode)((unsigned int)i->asCmp()->setCond & ~CC_U);
1352 // We do everything assuming var (cmp) 0, reverse the condition if 0 is
1353 // first.
1354 if (s == 0)
1355 ccZ = reverseCondCode(ccZ);
1356 // If there is a negative modifier, we need to undo that, by flipping
1357 // the comparison to zero.
1358 if (i->src(t).mod.neg())
1359 ccZ = reverseCondCode(ccZ);
1360 // If this is a signed comparison, we expect the input to be a regular
1361 // boolean, i.e. 0/-1. However the rest of the logic assumes that true
1362 // is positive, so just flip the sign.
1363 if (i->sType == TYPE_S32) {
1364 assert(!isFloatType(si->dType));
1365 ccZ = reverseCondCode(ccZ);
1366 }
1367 switch (ccZ) {
1368 case CC_LT: cc = CC_FL; break; // bool < 0 -- this is never true
1369 case CC_GE: cc = CC_TR; break; // bool >= 0 -- this is always true
1370 case CC_EQ: cc = inverseCondCode(cc); break; // bool == 0 -- !bool
1371 case CC_LE: cc = inverseCondCode(cc); break; // bool <= 0 -- !bool
1372 case CC_GT: break; // bool > 0 -- bool
1373 case CC_NE: break; // bool != 0 -- bool
1374 default:
1375 return false;
1376 }
1377
1378 // Update the condition of this SET to be identical to the origin set,
1379 // but with the updated condition code. The original SET should get
1380 // DCE'd, ideally.
1381 i->op = si->op;
1382 i->asCmp()->setCond = cc;
1383 i->setSrc(0, si->src(0));
1384 i->setSrc(1, si->src(1));
1385 if (si->srcExists(2))
1386 i->setSrc(2, si->src(2));
1387 i->sType = si->sType;
1388 }
1389 break;
1390
1391 case OP_AND:
1392 {
1393 Instruction *src = i->getSrc(t)->getInsn();
1394 ImmediateValue imm1;
1395 if (imm0.reg.data.u32 == 0) {
1396 i->op = OP_MOV;
1397 i->setSrc(0, new_ImmediateValue(prog, 0u));
1398 i->src(0).mod = Modifier(0);
1399 i->setSrc(1, NULL);
1400 } else if (imm0.reg.data.u32 == ~0U) {
1401 i->op = i->src(t).mod.getOp();
1402 if (t) {
1403 i->setSrc(0, i->getSrc(t));
1404 i->src(0).mod = i->src(t).mod;
1405 }
1406 i->setSrc(1, NULL);
1407 } else if (src->asCmp()) {
1408 CmpInstruction *cmp = src->asCmp();
1409 if (!cmp || cmp->op == OP_SLCT || cmp->getDef(0)->refCount() > 1)
1410 return false;
1411 if (!prog->getTarget()->isOpSupported(cmp->op, TYPE_F32))
1412 return false;
1413 if (imm0.reg.data.f32 != 1.0)
1414 return false;
1415 if (cmp->dType != TYPE_U32)
1416 return false;
1417
1418 cmp->dType = TYPE_F32;
1419 if (i->src(t).mod != Modifier(0)) {
1420 assert(i->src(t).mod == Modifier(NV50_IR_MOD_NOT));
1421 i->src(t).mod = Modifier(0);
1422 cmp->setCond = inverseCondCode(cmp->setCond);
1423 }
1424 i->op = OP_MOV;
1425 i->setSrc(s, NULL);
1426 if (t) {
1427 i->setSrc(0, i->getSrc(t));
1428 i->setSrc(t, NULL);
1429 }
1430 } else if (prog->getTarget()->isOpSupported(OP_EXTBF, TYPE_U32) &&
1431 src->op == OP_SHR &&
1432 src->src(1).getImmediate(imm1) &&
1433 i->src(t).mod == Modifier(0) &&
1434 util_is_power_of_two_or_zero(imm0.reg.data.u32 + 1)) {
1435 // low byte = offset, high byte = width
1436 uint32_t ext = (util_last_bit(imm0.reg.data.u32) << 8) | imm1.reg.data.u32;
1437 i->op = OP_EXTBF;
1438 i->setSrc(0, src->getSrc(0));
1439 i->setSrc(1, new_ImmediateValue(prog, ext));
1440 } else if (src->op == OP_SHL &&
1441 src->src(1).getImmediate(imm1) &&
1442 i->src(t).mod == Modifier(0) &&
1443 util_is_power_of_two_or_zero(~imm0.reg.data.u32 + 1) &&
1444 util_last_bit(~imm0.reg.data.u32) <= imm1.reg.data.u32) {
1445 i->op = OP_MOV;
1446 i->setSrc(s, NULL);
1447 if (t) {
1448 i->setSrc(0, i->getSrc(t));
1449 i->setSrc(t, NULL);
1450 }
1451 }
1452 }
1453 break;
1454
1455 case OP_SHL:
1456 {
1457 if (s != 1 || i->src(0).mod != Modifier(0))
1458 break;
1459 // try to concatenate shifts
1460 Instruction *si = i->getSrc(0)->getInsn();
1461 if (!si)
1462 break;
1463 ImmediateValue imm1;
1464 switch (si->op) {
1465 case OP_SHL:
1466 if (si->src(1).getImmediate(imm1)) {
1467 bld.setPosition(i, false);
1468 i->setSrc(0, si->getSrc(0));
1469 i->setSrc(1, bld.loadImm(NULL, imm0.reg.data.u32 + imm1.reg.data.u32));
1470 }
1471 break;
1472 case OP_SHR:
1473 if (si->src(1).getImmediate(imm1) && imm0.reg.data.u32 == imm1.reg.data.u32) {
1474 bld.setPosition(i, false);
1475 i->op = OP_AND;
1476 i->setSrc(0, si->getSrc(0));
1477 i->setSrc(1, bld.loadImm(NULL, ~((1 << imm0.reg.data.u32) - 1)));
1478 }
1479 break;
1480 case OP_MUL:
1481 int muls;
1482 if (isFloatType(si->dType))
1483 return false;
1484 if (si->src(1).getImmediate(imm1))
1485 muls = 1;
1486 else if (si->src(0).getImmediate(imm1))
1487 muls = 0;
1488 else
1489 return false;
1490
1491 bld.setPosition(i, false);
1492 i->op = OP_MUL;
1493 i->setSrc(0, si->getSrc(!muls));
1494 i->setSrc(1, bld.loadImm(NULL, imm1.reg.data.u32 << imm0.reg.data.u32));
1495 break;
1496 case OP_SUB:
1497 case OP_ADD:
1498 int adds;
1499 if (isFloatType(si->dType))
1500 return false;
1501 if (si->op != OP_SUB && si->src(0).getImmediate(imm1))
1502 adds = 0;
1503 else if (si->src(1).getImmediate(imm1))
1504 adds = 1;
1505 else
1506 return false;
1507 if (si->src(!adds).mod != Modifier(0))
1508 return false;
1509 // SHL(ADD(x, y), z) = ADD(SHL(x, z), SHL(y, z))
1510
1511 // This is more operations, but if one of x, y is an immediate, then
1512 // we can get a situation where (a) we can use ISCADD, or (b)
1513 // propagate the add bit into an indirect load.
1514 bld.setPosition(i, false);
1515 i->op = si->op;
1516 i->setSrc(adds, bld.loadImm(NULL, imm1.reg.data.u32 << imm0.reg.data.u32));
1517 i->setSrc(!adds, bld.mkOp2v(OP_SHL, i->dType,
1518 bld.getSSA(i->def(0).getSize(), i->def(0).getFile()),
1519 si->getSrc(!adds),
1520 bld.mkImm(imm0.reg.data.u32)));
1521 break;
1522 default:
1523 return false;
1524 }
1525 }
1526 break;
1527
1528 case OP_ABS:
1529 case OP_NEG:
1530 case OP_SAT:
1531 case OP_LG2:
1532 case OP_RCP:
1533 case OP_SQRT:
1534 case OP_RSQ:
1535 case OP_PRESIN:
1536 case OP_SIN:
1537 case OP_COS:
1538 case OP_PREEX2:
1539 case OP_EX2:
1540 unary(i, imm0);
1541 break;
1542 case OP_BFIND: {
1543 int32_t res;
1544 switch (i->dType) {
1545 case TYPE_S32: res = util_last_bit_signed(imm0.reg.data.s32) - 1; break;
1546 case TYPE_U32: res = util_last_bit(imm0.reg.data.u32) - 1; break;
1547 default:
1548 return false;
1549 }
1550 if (i->subOp == NV50_IR_SUBOP_BFIND_SAMT && res >= 0)
1551 res = 31 - res;
1552 bld.setPosition(i, false); /* make sure bld is init'ed */
1553 i->setSrc(0, bld.mkImm(res));
1554 i->setSrc(1, NULL);
1555 i->op = OP_MOV;
1556 i->subOp = 0;
1557 break;
1558 }
1559 case OP_BREV: {
1560 uint32_t res = util_bitreverse(imm0.reg.data.u32);
1561 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res));
1562 i->op = OP_MOV;
1563 break;
1564 }
1565 case OP_POPCNT: {
1566 // Only deal with 1-arg POPCNT here
1567 if (i->srcExists(1))
1568 break;
1569 uint32_t res = util_bitcount(imm0.reg.data.u32);
1570 i->setSrc(0, new_ImmediateValue(i->bb->getProgram(), res));
1571 i->setSrc(1, NULL);
1572 i->op = OP_MOV;
1573 break;
1574 }
1575 case OP_CVT: {
1576 Storage res;
1577
1578 // TODO: handle 64-bit values properly
1579 if (typeSizeof(i->dType) == 8 || typeSizeof(i->sType) == 8)
1580 return false;
1581
1582 // TODO: handle single byte/word extractions
1583 if (i->subOp)
1584 return false;
1585
1586 bld.setPosition(i, true); /* make sure bld is init'ed */
1587
1588 #define CASE(type, dst, fmin, fmax, imin, imax, umin, umax) \
1589 case type: \
1590 switch (i->sType) { \
1591 case TYPE_F64: \
1592 res.data.dst = util_iround(i->saturate ? \
1593 CLAMP(imm0.reg.data.f64, fmin, fmax) : \
1594 imm0.reg.data.f64); \
1595 break; \
1596 case TYPE_F32: \
1597 res.data.dst = util_iround(i->saturate ? \
1598 CLAMP(imm0.reg.data.f32, fmin, fmax) : \
1599 imm0.reg.data.f32); \
1600 break; \
1601 case TYPE_S32: \
1602 res.data.dst = i->saturate ? \
1603 CLAMP(imm0.reg.data.s32, imin, imax) : \
1604 imm0.reg.data.s32; \
1605 break; \
1606 case TYPE_U32: \
1607 res.data.dst = i->saturate ? \
1608 CLAMP(imm0.reg.data.u32, umin, umax) : \
1609 imm0.reg.data.u32; \
1610 break; \
1611 case TYPE_S16: \
1612 res.data.dst = i->saturate ? \
1613 CLAMP(imm0.reg.data.s16, imin, imax) : \
1614 imm0.reg.data.s16; \
1615 break; \
1616 case TYPE_U16: \
1617 res.data.dst = i->saturate ? \
1618 CLAMP(imm0.reg.data.u16, umin, umax) : \
1619 imm0.reg.data.u16; \
1620 break; \
1621 default: return false; \
1622 } \
1623 i->setSrc(0, bld.mkImm(res.data.dst)); \
1624 break
1625
1626 switch(i->dType) {
1627 CASE(TYPE_U16, u16, 0, UINT16_MAX, 0, UINT16_MAX, 0, UINT16_MAX);
1628 CASE(TYPE_S16, s16, INT16_MIN, INT16_MAX, INT16_MIN, INT16_MAX, 0, INT16_MAX);
1629 CASE(TYPE_U32, u32, 0, UINT32_MAX, 0, INT32_MAX, 0, UINT32_MAX);
1630 CASE(TYPE_S32, s32, INT32_MIN, INT32_MAX, INT32_MIN, INT32_MAX, 0, INT32_MAX);
1631 case TYPE_F32:
1632 switch (i->sType) {
1633 case TYPE_F64:
1634 res.data.f32 = i->saturate ?
1635 SATURATE(imm0.reg.data.f64) :
1636 imm0.reg.data.f64;
1637 break;
1638 case TYPE_F32:
1639 res.data.f32 = i->saturate ?
1640 SATURATE(imm0.reg.data.f32) :
1641 imm0.reg.data.f32;
1642 break;
1643 case TYPE_U16: res.data.f32 = (float) imm0.reg.data.u16; break;
1644 case TYPE_U32: res.data.f32 = (float) imm0.reg.data.u32; break;
1645 case TYPE_S16: res.data.f32 = (float) imm0.reg.data.s16; break;
1646 case TYPE_S32: res.data.f32 = (float) imm0.reg.data.s32; break;
1647 default:
1648 return false;
1649 }
1650 i->setSrc(0, bld.mkImm(res.data.f32));
1651 break;
1652 case TYPE_F64:
1653 switch (i->sType) {
1654 case TYPE_F64:
1655 res.data.f64 = i->saturate ?
1656 SATURATE(imm0.reg.data.f64) :
1657 imm0.reg.data.f64;
1658 break;
1659 case TYPE_F32:
1660 res.data.f64 = i->saturate ?
1661 SATURATE(imm0.reg.data.f32) :
1662 imm0.reg.data.f32;
1663 break;
1664 case TYPE_U16: res.data.f64 = (double) imm0.reg.data.u16; break;
1665 case TYPE_U32: res.data.f64 = (double) imm0.reg.data.u32; break;
1666 case TYPE_S16: res.data.f64 = (double) imm0.reg.data.s16; break;
1667 case TYPE_S32: res.data.f64 = (double) imm0.reg.data.s32; break;
1668 default:
1669 return false;
1670 }
1671 i->setSrc(0, bld.mkImm(res.data.f64));
1672 break;
1673 default:
1674 return false;
1675 }
1676 #undef CASE
1677
1678 i->setType(i->dType); /* Remove i->sType, which we don't need anymore */
1679 i->op = OP_MOV;
1680 i->saturate = 0;
1681 i->src(0).mod = Modifier(0); /* Clear the already applied modifier */
1682 break;
1683 }
1684 default:
1685 return false;
1686 }
1687
1688 // This can get left behind some of the optimizations which simplify
1689 // saturatable values.
1690 if (newi->op == OP_MOV && newi->saturate) {
1691 ImmediateValue tmp;
1692 newi->saturate = 0;
1693 newi->op = OP_SAT;
1694 if (newi->src(0).getImmediate(tmp))
1695 unary(newi, tmp);
1696 }
1697
1698 if (newi->op != op)
1699 foldCount++;
1700 return deleted;
1701 }
1702
1703 // =============================================================================
1704
1705 // Merge modifier operations (ABS, NEG, NOT) into ValueRefs where allowed.
1706 class ModifierFolding : public Pass
1707 {
1708 private:
1709 virtual bool visit(BasicBlock *);
1710 };
1711
1712 bool
visit(BasicBlock * bb)1713 ModifierFolding::visit(BasicBlock *bb)
1714 {
1715 const Target *target = prog->getTarget();
1716
1717 Instruction *i, *next, *mi;
1718 Modifier mod;
1719
1720 for (i = bb->getEntry(); i; i = next) {
1721 next = i->next;
1722
1723 if (0 && i->op == OP_SUB) {
1724 // turn "sub" into "add neg" (do we really want this ?)
1725 i->op = OP_ADD;
1726 i->src(0).mod = i->src(0).mod ^ Modifier(NV50_IR_MOD_NEG);
1727 }
1728
1729 for (int s = 0; s < 3 && i->srcExists(s); ++s) {
1730 mi = i->getSrc(s)->getInsn();
1731 if (!mi ||
1732 mi->predSrc >= 0 || mi->getDef(0)->refCount() > 8)
1733 continue;
1734 if (i->sType == TYPE_U32 && mi->dType == TYPE_S32) {
1735 if ((i->op != OP_ADD &&
1736 i->op != OP_MUL) ||
1737 (mi->op != OP_ABS &&
1738 mi->op != OP_NEG))
1739 continue;
1740 } else
1741 if (i->sType != mi->dType) {
1742 continue;
1743 }
1744 if ((mod = Modifier(mi->op)) == Modifier(0))
1745 continue;
1746 mod *= mi->src(0).mod;
1747
1748 if ((i->op == OP_ABS) || i->src(s).mod.abs()) {
1749 // abs neg [abs] = abs
1750 mod = mod & Modifier(~(NV50_IR_MOD_NEG | NV50_IR_MOD_ABS));
1751 } else
1752 if ((i->op == OP_NEG) && mod.neg()) {
1753 assert(s == 0);
1754 // neg as both opcode and modifier on same insn is prohibited
1755 // neg neg abs = abs, neg neg = identity
1756 mod = mod & Modifier(~NV50_IR_MOD_NEG);
1757 i->op = mod.getOp();
1758 mod = mod & Modifier(~NV50_IR_MOD_ABS);
1759 if (mod == Modifier(0))
1760 i->op = OP_MOV;
1761 }
1762
1763 if (target->isModSupported(i, s, mod)) {
1764 i->setSrc(s, mi->getSrc(0));
1765 i->src(s).mod *= mod;
1766 }
1767 }
1768
1769 if (i->op == OP_SAT) {
1770 mi = i->getSrc(0)->getInsn();
1771 if (mi &&
1772 mi->getDef(0)->refCount() <= 1 && target->isSatSupported(mi)) {
1773 mi->saturate = 1;
1774 mi->setDef(0, i->getDef(0));
1775 delete_Instruction(prog, i);
1776 }
1777 }
1778 }
1779
1780 return true;
1781 }
1782
1783 // =============================================================================
1784
1785 // MUL + ADD -> MAD/FMA
1786 // MIN/MAX(a, a) -> a, etc.
1787 // SLCT(a, b, const) -> cc(const) ? a : b
1788 // RCP(RCP(a)) -> a
1789 // MUL(MUL(a, b), const) -> MUL_Xconst(a, b)
1790 // EXTBF(RDSV(COMBINED_TID)) -> RDSV(TID)
1791 class AlgebraicOpt : public Pass
1792 {
1793 private:
1794 virtual bool visit(BasicBlock *);
1795
1796 void handleABS(Instruction *);
1797 bool handleADD(Instruction *);
1798 bool tryADDToMADOrSAD(Instruction *, operation toOp);
1799 void handleMINMAX(Instruction *);
1800 void handleRCP(Instruction *);
1801 void handleSLCT(Instruction *);
1802 void handleLOGOP(Instruction *);
1803 void handleCVT_NEG(Instruction *);
1804 void handleCVT_CVT(Instruction *);
1805 void handleCVT_EXTBF(Instruction *);
1806 void handleSUCLAMP(Instruction *);
1807 void handleNEG(Instruction *);
1808 void handleEXTBF_RDSV(Instruction *);
1809
1810 BuildUtil bld;
1811 };
1812
1813 void
handleABS(Instruction * abs)1814 AlgebraicOpt::handleABS(Instruction *abs)
1815 {
1816 Instruction *sub = abs->getSrc(0)->getInsn();
1817 DataType ty;
1818 if (!sub ||
1819 !prog->getTarget()->isOpSupported(OP_SAD, abs->dType))
1820 return;
1821 // expect not to have mods yet, if we do, bail
1822 if (sub->src(0).mod || sub->src(1).mod)
1823 return;
1824 // hidden conversion ?
1825 ty = intTypeToSigned(sub->dType);
1826 if (abs->dType != abs->sType || ty != abs->sType)
1827 return;
1828
1829 if ((sub->op != OP_ADD && sub->op != OP_SUB) ||
1830 sub->src(0).getFile() != FILE_GPR || sub->src(0).mod ||
1831 sub->src(1).getFile() != FILE_GPR || sub->src(1).mod)
1832 return;
1833
1834 Value *src0 = sub->getSrc(0);
1835 Value *src1 = sub->getSrc(1);
1836
1837 if (sub->op == OP_ADD) {
1838 Instruction *neg = sub->getSrc(1)->getInsn();
1839 if (neg && neg->op != OP_NEG) {
1840 neg = sub->getSrc(0)->getInsn();
1841 src0 = sub->getSrc(1);
1842 }
1843 if (!neg || neg->op != OP_NEG ||
1844 neg->dType != neg->sType || neg->sType != ty)
1845 return;
1846 src1 = neg->getSrc(0);
1847 }
1848
1849 // found ABS(SUB))
1850 abs->moveSources(1, 2); // move sources >=1 up by 2
1851 abs->op = OP_SAD;
1852 abs->setType(sub->dType);
1853 abs->setSrc(0, src0);
1854 abs->setSrc(1, src1);
1855 bld.setPosition(abs, false);
1856 abs->setSrc(2, bld.loadImm(bld.getSSA(typeSizeof(ty)), 0));
1857 }
1858
1859 bool
handleADD(Instruction * add)1860 AlgebraicOpt::handleADD(Instruction *add)
1861 {
1862 Value *src0 = add->getSrc(0);
1863 Value *src1 = add->getSrc(1);
1864
1865 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
1866 return false;
1867
1868 bool changed = false;
1869 // we can't optimize to MAD if the add is precise
1870 if (!add->precise && prog->getTarget()->isOpSupported(OP_MAD, add->dType))
1871 changed = tryADDToMADOrSAD(add, OP_MAD);
1872 if (!changed && prog->getTarget()->isOpSupported(OP_SAD, add->dType))
1873 changed = tryADDToMADOrSAD(add, OP_SAD);
1874 return changed;
1875 }
1876
1877 // ADD(SAD(a,b,0), c) -> SAD(a,b,c)
1878 // ADD(MUL(a,b), c) -> MAD(a,b,c)
1879 bool
tryADDToMADOrSAD(Instruction * add,operation toOp)1880 AlgebraicOpt::tryADDToMADOrSAD(Instruction *add, operation toOp)
1881 {
1882 Value *src0 = add->getSrc(0);
1883 Value *src1 = add->getSrc(1);
1884 Value *src;
1885 int s;
1886 const operation srcOp = toOp == OP_SAD ? OP_SAD : OP_MUL;
1887 const Modifier modBad = Modifier(~((toOp == OP_MAD) ? NV50_IR_MOD_NEG : 0));
1888 Modifier mod[4];
1889
1890 if (src0->refCount() == 1 &&
1891 src0->getUniqueInsn() && src0->getUniqueInsn()->op == srcOp)
1892 s = 0;
1893 else
1894 if (src1->refCount() == 1 &&
1895 src1->getUniqueInsn() && src1->getUniqueInsn()->op == srcOp)
1896 s = 1;
1897 else
1898 return false;
1899
1900 src = add->getSrc(s);
1901
1902 if (src->getUniqueInsn() && src->getUniqueInsn()->bb != add->bb)
1903 return false;
1904
1905 if (src->getInsn()->saturate || src->getInsn()->postFactor ||
1906 src->getInsn()->dnz || src->getInsn()->precise)
1907 return false;
1908
1909 if (toOp == OP_SAD) {
1910 ImmediateValue imm;
1911 if (!src->getInsn()->src(2).getImmediate(imm))
1912 return false;
1913 if (!imm.isInteger(0))
1914 return false;
1915 }
1916
1917 if (typeSizeof(add->dType) != typeSizeof(src->getInsn()->dType) ||
1918 isFloatType(add->dType) != isFloatType(src->getInsn()->dType))
1919 return false;
1920
1921 mod[0] = add->src(0).mod;
1922 mod[1] = add->src(1).mod;
1923 mod[2] = src->getUniqueInsn()->src(0).mod;
1924 mod[3] = src->getUniqueInsn()->src(1).mod;
1925
1926 if (((mod[0] | mod[1]) | (mod[2] | mod[3])) & modBad)
1927 return false;
1928
1929 add->op = toOp;
1930 add->subOp = src->getInsn()->subOp; // potentially mul-high
1931 add->dnz = src->getInsn()->dnz;
1932 add->dType = src->getInsn()->dType; // sign matters for imad hi
1933 add->sType = src->getInsn()->sType;
1934
1935 add->setSrc(2, add->src(s ? 0 : 1));
1936
1937 add->setSrc(0, src->getInsn()->getSrc(0));
1938 add->src(0).mod = mod[2] ^ mod[s];
1939 add->setSrc(1, src->getInsn()->getSrc(1));
1940 add->src(1).mod = mod[3];
1941
1942 return true;
1943 }
1944
1945 void
handleMINMAX(Instruction * minmax)1946 AlgebraicOpt::handleMINMAX(Instruction *minmax)
1947 {
1948 Value *src0 = minmax->getSrc(0);
1949 Value *src1 = minmax->getSrc(1);
1950
1951 if (src0 != src1 || src0->reg.file != FILE_GPR)
1952 return;
1953 if (minmax->src(0).mod == minmax->src(1).mod) {
1954 if (minmax->def(0).mayReplace(minmax->src(0))) {
1955 minmax->def(0).replace(minmax->src(0), false);
1956 delete_Instruction(prog, minmax);
1957 } else {
1958 minmax->op = OP_CVT;
1959 minmax->setSrc(1, NULL);
1960 }
1961 } else {
1962 // TODO:
1963 // min(x, -x) = -abs(x)
1964 // min(x, -abs(x)) = -abs(x)
1965 // min(x, abs(x)) = x
1966 // max(x, -abs(x)) = x
1967 // max(x, abs(x)) = abs(x)
1968 // max(x, -x) = abs(x)
1969 }
1970 }
1971
1972 // rcp(rcp(a)) = a
1973 // rcp(sqrt(a)) = rsq(a)
1974 void
handleRCP(Instruction * rcp)1975 AlgebraicOpt::handleRCP(Instruction *rcp)
1976 {
1977 Instruction *si = rcp->getSrc(0)->getUniqueInsn();
1978
1979 if (!si)
1980 return;
1981
1982 if (si->op == OP_RCP) {
1983 Modifier mod = rcp->src(0).mod * si->src(0).mod;
1984 rcp->op = mod.getOp();
1985 rcp->setSrc(0, si->getSrc(0));
1986 } else if (si->op == OP_SQRT) {
1987 rcp->op = OP_RSQ;
1988 rcp->setSrc(0, si->getSrc(0));
1989 rcp->src(0).mod = rcp->src(0).mod * si->src(0).mod;
1990 }
1991 }
1992
1993 void
handleSLCT(Instruction * slct)1994 AlgebraicOpt::handleSLCT(Instruction *slct)
1995 {
1996 if (slct->getSrc(2)->reg.file == FILE_IMMEDIATE) {
1997 if (slct->getSrc(2)->asImm()->compare(slct->asCmp()->setCond, 0.0f))
1998 slct->setSrc(0, slct->getSrc(1));
1999 } else
2000 if (slct->getSrc(0) != slct->getSrc(1)) {
2001 return;
2002 }
2003 slct->op = OP_MOV;
2004 slct->setSrc(1, NULL);
2005 slct->setSrc(2, NULL);
2006 }
2007
2008 void
handleLOGOP(Instruction * logop)2009 AlgebraicOpt::handleLOGOP(Instruction *logop)
2010 {
2011 Value *src0 = logop->getSrc(0);
2012 Value *src1 = logop->getSrc(1);
2013
2014 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
2015 return;
2016
2017 if (src0 == src1) {
2018 if ((logop->op == OP_AND || logop->op == OP_OR) &&
2019 logop->def(0).mayReplace(logop->src(0))) {
2020 logop->def(0).replace(logop->src(0), false);
2021 delete_Instruction(prog, logop);
2022 }
2023 } else {
2024 // try AND(SET, SET) -> SET_AND(SET)
2025 Instruction *set0 = src0->getInsn();
2026 Instruction *set1 = src1->getInsn();
2027
2028 if (!set0 || set0->fixed || !set1 || set1->fixed)
2029 return;
2030 if (set1->op != OP_SET) {
2031 Instruction *xchg = set0;
2032 set0 = set1;
2033 set1 = xchg;
2034 if (set1->op != OP_SET)
2035 return;
2036 }
2037 operation redOp = (logop->op == OP_AND ? OP_SET_AND :
2038 logop->op == OP_XOR ? OP_SET_XOR : OP_SET_OR);
2039 if (!prog->getTarget()->isOpSupported(redOp, set1->sType))
2040 return;
2041 if (set0->op != OP_SET &&
2042 set0->op != OP_SET_AND &&
2043 set0->op != OP_SET_OR &&
2044 set0->op != OP_SET_XOR)
2045 return;
2046 if (set0->getDef(0)->refCount() > 1 &&
2047 set1->getDef(0)->refCount() > 1)
2048 return;
2049 if (set0->getPredicate() || set1->getPredicate())
2050 return;
2051 // check that they don't source each other
2052 for (int s = 0; s < 2; ++s)
2053 if (set0->getSrc(s) == set1->getDef(0) ||
2054 set1->getSrc(s) == set0->getDef(0))
2055 return;
2056
2057 set0 = cloneForward(func, set0);
2058 set1 = cloneShallow(func, set1);
2059 logop->bb->insertAfter(logop, set1);
2060 logop->bb->insertAfter(logop, set0);
2061
2062 set0->dType = TYPE_U8;
2063 set0->getDef(0)->reg.file = FILE_PREDICATE;
2064 set0->getDef(0)->reg.size = 1;
2065 set1->setSrc(2, set0->getDef(0));
2066 set1->op = redOp;
2067 set1->setDef(0, logop->getDef(0));
2068 delete_Instruction(prog, logop);
2069 }
2070 }
2071
2072 // F2I(NEG(SET with result 1.0f/0.0f)) -> SET with result -1/0
2073 // nv50:
2074 // F2I(NEG(I2F(ABS(SET))))
2075 void
handleCVT_NEG(Instruction * cvt)2076 AlgebraicOpt::handleCVT_NEG(Instruction *cvt)
2077 {
2078 Instruction *insn = cvt->getSrc(0)->getInsn();
2079 if (cvt->sType != TYPE_F32 ||
2080 cvt->dType != TYPE_S32 || cvt->src(0).mod != Modifier(0))
2081 return;
2082 if (!insn || insn->op != OP_NEG || insn->dType != TYPE_F32)
2083 return;
2084 if (insn->src(0).mod != Modifier(0))
2085 return;
2086 insn = insn->getSrc(0)->getInsn();
2087
2088 // check for nv50 SET(-1,0) -> SET(1.0f/0.0f) chain and nvc0's f32 SET
2089 if (insn && insn->op == OP_CVT &&
2090 insn->dType == TYPE_F32 &&
2091 insn->sType == TYPE_S32) {
2092 insn = insn->getSrc(0)->getInsn();
2093 if (!insn || insn->op != OP_ABS || insn->sType != TYPE_S32 ||
2094 insn->src(0).mod)
2095 return;
2096 insn = insn->getSrc(0)->getInsn();
2097 if (!insn || insn->op != OP_SET || insn->dType != TYPE_U32)
2098 return;
2099 } else
2100 if (!insn || insn->op != OP_SET || insn->dType != TYPE_F32) {
2101 return;
2102 }
2103
2104 Instruction *bset = cloneShallow(func, insn);
2105 bset->dType = TYPE_U32;
2106 bset->setDef(0, cvt->getDef(0));
2107 cvt->bb->insertAfter(cvt, bset);
2108 delete_Instruction(prog, cvt);
2109 }
2110
2111 // F2I(TRUNC()) and so on can be expressed as a single CVT. If the earlier CVT
2112 // does a type conversion, this becomes trickier as there might be range
2113 // changes/etc. We could handle those in theory as long as the range was being
2114 // reduced or kept the same.
2115 void
handleCVT_CVT(Instruction * cvt)2116 AlgebraicOpt::handleCVT_CVT(Instruction *cvt)
2117 {
2118 Instruction *insn = cvt->getSrc(0)->getInsn();
2119
2120 if (!insn ||
2121 insn->saturate ||
2122 insn->subOp ||
2123 insn->dType != insn->sType ||
2124 insn->dType != cvt->sType)
2125 return;
2126
2127 RoundMode rnd = insn->rnd;
2128 switch (insn->op) {
2129 case OP_CEIL:
2130 rnd = ROUND_PI;
2131 break;
2132 case OP_FLOOR:
2133 rnd = ROUND_MI;
2134 break;
2135 case OP_TRUNC:
2136 rnd = ROUND_ZI;
2137 break;
2138 case OP_CVT:
2139 break;
2140 default:
2141 return;
2142 }
2143
2144 if (!isFloatType(cvt->dType) || !isFloatType(insn->sType))
2145 rnd = (RoundMode)(rnd & 3);
2146
2147 cvt->rnd = rnd;
2148 cvt->setSrc(0, insn->getSrc(0));
2149 cvt->src(0).mod *= insn->src(0).mod;
2150 cvt->sType = insn->sType;
2151 }
2152
2153 // Some shaders extract packed bytes out of words and convert them to
2154 // e.g. float. The Fermi+ CVT instruction can extract those directly, as can
2155 // nv50 for word sizes.
2156 //
2157 // CVT(EXTBF(x, byte/word))
2158 // CVT(AND(bytemask, x))
2159 // CVT(AND(bytemask, SHR(x, 8/16/24)))
2160 // CVT(SHR(x, 16/24))
2161 void
handleCVT_EXTBF(Instruction * cvt)2162 AlgebraicOpt::handleCVT_EXTBF(Instruction *cvt)
2163 {
2164 Instruction *insn = cvt->getSrc(0)->getInsn();
2165 ImmediateValue imm;
2166 Value *arg = NULL;
2167 unsigned width, offset;
2168 if ((cvt->sType != TYPE_U32 && cvt->sType != TYPE_S32) || !insn)
2169 return;
2170 if (insn->op == OP_EXTBF && insn->src(1).getImmediate(imm)) {
2171 width = (imm.reg.data.u32 >> 8) & 0xff;
2172 offset = imm.reg.data.u32 & 0xff;
2173 arg = insn->getSrc(0);
2174
2175 if (width != 8 && width != 16)
2176 return;
2177 if (width == 8 && offset & 0x7)
2178 return;
2179 if (width == 16 && offset & 0xf)
2180 return;
2181 } else if (insn->op == OP_AND) {
2182 int s;
2183 if (insn->src(0).getImmediate(imm))
2184 s = 0;
2185 else if (insn->src(1).getImmediate(imm))
2186 s = 1;
2187 else
2188 return;
2189
2190 if (imm.reg.data.u32 == 0xff)
2191 width = 8;
2192 else if (imm.reg.data.u32 == 0xffff)
2193 width = 16;
2194 else
2195 return;
2196
2197 arg = insn->getSrc(!s);
2198 Instruction *shift = arg->getInsn();
2199 offset = 0;
2200 if (shift && shift->op == OP_SHR &&
2201 shift->sType == cvt->sType &&
2202 shift->src(1).getImmediate(imm) &&
2203 ((width == 8 && (imm.reg.data.u32 & 0x7) == 0) ||
2204 (width == 16 && (imm.reg.data.u32 & 0xf) == 0))) {
2205 arg = shift->getSrc(0);
2206 offset = imm.reg.data.u32;
2207 }
2208 // We just AND'd the high bits away, which means this is effectively an
2209 // unsigned value.
2210 cvt->sType = TYPE_U32;
2211 } else if (insn->op == OP_SHR &&
2212 insn->sType == cvt->sType &&
2213 insn->src(1).getImmediate(imm)) {
2214 arg = insn->getSrc(0);
2215 if (imm.reg.data.u32 == 24) {
2216 width = 8;
2217 offset = 24;
2218 } else if (imm.reg.data.u32 == 16) {
2219 width = 16;
2220 offset = 16;
2221 } else {
2222 return;
2223 }
2224 }
2225
2226 if (!arg)
2227 return;
2228
2229 // Irrespective of what came earlier, we can undo a shift on the argument
2230 // by adjusting the offset.
2231 Instruction *shift = arg->getInsn();
2232 if (shift && shift->op == OP_SHL &&
2233 shift->src(1).getImmediate(imm) &&
2234 ((width == 8 && (imm.reg.data.u32 & 0x7) == 0) ||
2235 (width == 16 && (imm.reg.data.u32 & 0xf) == 0)) &&
2236 imm.reg.data.u32 <= offset) {
2237 arg = shift->getSrc(0);
2238 offset -= imm.reg.data.u32;
2239 }
2240
2241 // The unpackSnorm lowering still leaves a few shifts behind, but it's too
2242 // annoying to detect them.
2243
2244 if (width == 8) {
2245 cvt->sType = cvt->sType == TYPE_U32 ? TYPE_U8 : TYPE_S8;
2246 } else {
2247 assert(width == 16);
2248 cvt->sType = cvt->sType == TYPE_U32 ? TYPE_U16 : TYPE_S16;
2249 }
2250 cvt->setSrc(0, arg);
2251 cvt->subOp = offset >> 3;
2252 }
2253
2254 // SUCLAMP dst, (ADD b imm), k, 0 -> SUCLAMP dst, b, k, imm (if imm fits s6)
2255 void
handleSUCLAMP(Instruction * insn)2256 AlgebraicOpt::handleSUCLAMP(Instruction *insn)
2257 {
2258 ImmediateValue imm;
2259 int32_t val = insn->getSrc(2)->asImm()->reg.data.s32;
2260 int s;
2261 Instruction *add;
2262
2263 assert(insn->srcExists(0) && insn->src(0).getFile() == FILE_GPR);
2264
2265 // look for ADD (TODO: only count references by non-SUCLAMP)
2266 if (insn->getSrc(0)->refCount() > 1)
2267 return;
2268 add = insn->getSrc(0)->getInsn();
2269 if (!add || add->op != OP_ADD ||
2270 (add->dType != TYPE_U32 &&
2271 add->dType != TYPE_S32))
2272 return;
2273
2274 // look for immediate
2275 for (s = 0; s < 2; ++s)
2276 if (add->src(s).getImmediate(imm))
2277 break;
2278 if (s >= 2)
2279 return;
2280 s = s ? 0 : 1;
2281 // determine if immediate fits
2282 val += imm.reg.data.s32;
2283 if (val > 31 || val < -32)
2284 return;
2285 // determine if other addend fits
2286 if (add->src(s).getFile() != FILE_GPR || add->src(s).mod != Modifier(0))
2287 return;
2288
2289 bld.setPosition(insn, false); // make sure bld is init'ed
2290 // replace sources
2291 insn->setSrc(2, bld.mkImm(val));
2292 insn->setSrc(0, add->getSrc(s));
2293 }
2294
2295 // NEG(AND(SET, 1)) -> SET
2296 void
handleNEG(Instruction * i)2297 AlgebraicOpt::handleNEG(Instruction *i) {
2298 Instruction *src = i->getSrc(0)->getInsn();
2299 ImmediateValue imm;
2300 int b;
2301
2302 if (isFloatType(i->sType) || !src || src->op != OP_AND)
2303 return;
2304
2305 if (src->src(0).getImmediate(imm))
2306 b = 1;
2307 else if (src->src(1).getImmediate(imm))
2308 b = 0;
2309 else
2310 return;
2311
2312 if (!imm.isInteger(1))
2313 return;
2314
2315 Instruction *set = src->getSrc(b)->getInsn();
2316 if ((set->op == OP_SET || set->op == OP_SET_AND ||
2317 set->op == OP_SET_OR || set->op == OP_SET_XOR) &&
2318 !isFloatType(set->dType)) {
2319 i->def(0).replace(set->getDef(0), false);
2320 }
2321 }
2322
2323 // EXTBF(RDSV(COMBINED_TID)) -> RDSV(TID)
2324 void
handleEXTBF_RDSV(Instruction * i)2325 AlgebraicOpt::handleEXTBF_RDSV(Instruction *i)
2326 {
2327 Instruction *rdsv = i->getSrc(0)->getUniqueInsn();
2328 if (rdsv->op != OP_RDSV ||
2329 rdsv->getSrc(0)->asSym()->reg.data.sv.sv != SV_COMBINED_TID)
2330 return;
2331 // Avoid creating more RDSV instructions
2332 if (rdsv->getDef(0)->refCount() > 1)
2333 return;
2334
2335 ImmediateValue imm;
2336 if (!i->src(1).getImmediate(imm))
2337 return;
2338
2339 int index;
2340 if (imm.isInteger(0x1000))
2341 index = 0;
2342 else
2343 if (imm.isInteger(0x0a10))
2344 index = 1;
2345 else
2346 if (imm.isInteger(0x061a))
2347 index = 2;
2348 else
2349 return;
2350
2351 bld.setPosition(i, false);
2352
2353 i->op = OP_RDSV;
2354 i->setSrc(0, bld.mkSysVal(SV_TID, index));
2355 i->setSrc(1, NULL);
2356 }
2357
2358 bool
visit(BasicBlock * bb)2359 AlgebraicOpt::visit(BasicBlock *bb)
2360 {
2361 Instruction *next;
2362 for (Instruction *i = bb->getEntry(); i; i = next) {
2363 next = i->next;
2364 switch (i->op) {
2365 case OP_ABS:
2366 handleABS(i);
2367 break;
2368 case OP_ADD:
2369 handleADD(i);
2370 break;
2371 case OP_RCP:
2372 handleRCP(i);
2373 break;
2374 case OP_MIN:
2375 case OP_MAX:
2376 handleMINMAX(i);
2377 break;
2378 case OP_SLCT:
2379 handleSLCT(i);
2380 break;
2381 case OP_AND:
2382 case OP_OR:
2383 case OP_XOR:
2384 handleLOGOP(i);
2385 break;
2386 case OP_CVT:
2387 handleCVT_NEG(i);
2388 handleCVT_CVT(i);
2389 if (prog->getTarget()->isOpSupported(OP_EXTBF, TYPE_U32))
2390 handleCVT_EXTBF(i);
2391 break;
2392 case OP_SUCLAMP:
2393 handleSUCLAMP(i);
2394 break;
2395 case OP_NEG:
2396 handleNEG(i);
2397 break;
2398 case OP_EXTBF:
2399 handleEXTBF_RDSV(i);
2400 break;
2401 default:
2402 break;
2403 }
2404 }
2405
2406 return true;
2407 }
2408
2409 // =============================================================================
2410
2411 // ADD(SHL(a, b), c) -> SHLADD(a, b, c)
2412 // MUL(a, b) -> a few XMADs
2413 // MAD/FMA(a, b, c) -> a few XMADs
2414 class LateAlgebraicOpt : public Pass
2415 {
2416 private:
2417 virtual bool visit(Instruction *);
2418
2419 void handleADD(Instruction *);
2420 void handleMULMAD(Instruction *);
2421 bool tryADDToSHLADD(Instruction *);
2422
2423 BuildUtil bld;
2424 };
2425
2426 void
handleADD(Instruction * add)2427 LateAlgebraicOpt::handleADD(Instruction *add)
2428 {
2429 Value *src0 = add->getSrc(0);
2430 Value *src1 = add->getSrc(1);
2431
2432 if (src0->reg.file != FILE_GPR || src1->reg.file != FILE_GPR)
2433 return;
2434
2435 if (prog->getTarget()->isOpSupported(OP_SHLADD, add->dType))
2436 tryADDToSHLADD(add);
2437 }
2438
2439 // ADD(SHL(a, b), c) -> SHLADD(a, b, c)
2440 bool
tryADDToSHLADD(Instruction * add)2441 LateAlgebraicOpt::tryADDToSHLADD(Instruction *add)
2442 {
2443 Value *src0 = add->getSrc(0);
2444 Value *src1 = add->getSrc(1);
2445 ImmediateValue imm;
2446 Instruction *shl;
2447 Value *src;
2448 int s;
2449
2450 if (add->saturate || add->usesFlags() || typeSizeof(add->dType) == 8
2451 || isFloatType(add->dType))
2452 return false;
2453
2454 if (src0->getUniqueInsn() && src0->getUniqueInsn()->op == OP_SHL)
2455 s = 0;
2456 else
2457 if (src1->getUniqueInsn() && src1->getUniqueInsn()->op == OP_SHL)
2458 s = 1;
2459 else
2460 return false;
2461
2462 src = add->getSrc(s);
2463 shl = src->getUniqueInsn();
2464
2465 if (shl->bb != add->bb || shl->usesFlags() || shl->subOp || shl->src(0).mod)
2466 return false;
2467
2468 if (!shl->src(1).getImmediate(imm))
2469 return false;
2470
2471 add->op = OP_SHLADD;
2472 add->setSrc(2, add->src(!s));
2473 // SHL can't have any modifiers, but the ADD source may have had
2474 // one. Preserve it.
2475 add->setSrc(0, shl->getSrc(0));
2476 if (s == 1)
2477 add->src(0).mod = add->src(1).mod;
2478 add->setSrc(1, new_ImmediateValue(shl->bb->getProgram(), imm.reg.data.u32));
2479 add->src(1).mod = Modifier(0);
2480
2481 return true;
2482 }
2483
2484 // MUL(a, b) -> a few XMADs
2485 // MAD/FMA(a, b, c) -> a few XMADs
2486 void
handleMULMAD(Instruction * i)2487 LateAlgebraicOpt::handleMULMAD(Instruction *i)
2488 {
2489 // TODO: handle NV50_IR_SUBOP_MUL_HIGH
2490 if (!prog->getTarget()->isOpSupported(OP_XMAD, TYPE_U32))
2491 return;
2492 if (isFloatType(i->dType) || typeSizeof(i->dType) != 4)
2493 return;
2494 if (i->subOp || i->usesFlags() || i->flagsDef >= 0)
2495 return;
2496
2497 assert(!i->src(0).mod);
2498 assert(!i->src(1).mod);
2499 assert(i->op == OP_MUL ? 1 : !i->src(2).mod);
2500
2501 bld.setPosition(i, false);
2502
2503 Value *a = i->getSrc(0);
2504 Value *b = i->getSrc(1);
2505 Value *c = i->op == OP_MUL ? bld.mkImm(0) : i->getSrc(2);
2506
2507 Value *tmp0 = bld.getSSA();
2508 Value *tmp1 = bld.getSSA();
2509
2510 Instruction *insn = bld.mkOp3(OP_XMAD, TYPE_U32, tmp0, b, a, c);
2511 insn->setPredicate(i->cc, i->getPredicate());
2512
2513 insn = bld.mkOp3(OP_XMAD, TYPE_U32, tmp1, b, a, bld.mkImm(0));
2514 insn->setPredicate(i->cc, i->getPredicate());
2515 insn->subOp = NV50_IR_SUBOP_XMAD_MRG | NV50_IR_SUBOP_XMAD_H1(1);
2516
2517 Value *pred = i->getPredicate();
2518 i->setPredicate(i->cc, NULL);
2519
2520 i->op = OP_XMAD;
2521 i->setSrc(0, b);
2522 i->setSrc(1, tmp1);
2523 i->setSrc(2, tmp0);
2524 i->subOp = NV50_IR_SUBOP_XMAD_PSL | NV50_IR_SUBOP_XMAD_CBCC;
2525 i->subOp |= NV50_IR_SUBOP_XMAD_H1(0) | NV50_IR_SUBOP_XMAD_H1(1);
2526
2527 i->setPredicate(i->cc, pred);
2528 }
2529
2530 bool
visit(Instruction * i)2531 LateAlgebraicOpt::visit(Instruction *i)
2532 {
2533 switch (i->op) {
2534 case OP_ADD:
2535 handleADD(i);
2536 break;
2537 case OP_MUL:
2538 case OP_MAD:
2539 case OP_FMA:
2540 handleMULMAD(i);
2541 break;
2542 default:
2543 break;
2544 }
2545
2546 return true;
2547 }
2548
2549 // =============================================================================
2550
2551 // Split 64-bit MUL and MAD
2552 class Split64BitOpPreRA : public Pass
2553 {
2554 private:
2555 virtual bool visit(BasicBlock *);
2556 void split64MulMad(Function *, Instruction *, DataType);
2557
2558 BuildUtil bld;
2559 };
2560
2561 bool
visit(BasicBlock * bb)2562 Split64BitOpPreRA::visit(BasicBlock *bb)
2563 {
2564 Instruction *i, *next;
2565 Modifier mod;
2566
2567 for (i = bb->getEntry(); i; i = next) {
2568 next = i->next;
2569
2570 DataType hTy;
2571 switch (i->dType) {
2572 case TYPE_U64: hTy = TYPE_U32; break;
2573 case TYPE_S64: hTy = TYPE_S32; break;
2574 default:
2575 continue;
2576 }
2577
2578 if (i->op == OP_MAD || i->op == OP_MUL)
2579 split64MulMad(func, i, hTy);
2580 }
2581
2582 return true;
2583 }
2584
2585 void
split64MulMad(Function * fn,Instruction * i,DataType hTy)2586 Split64BitOpPreRA::split64MulMad(Function *fn, Instruction *i, DataType hTy)
2587 {
2588 assert(i->op == OP_MAD || i->op == OP_MUL);
2589 assert(!isFloatType(i->dType) && !isFloatType(i->sType));
2590 assert(typeSizeof(hTy) == 4);
2591
2592 bld.setPosition(i, true);
2593
2594 Value *zero = bld.mkImm(0u);
2595 Value *carry = bld.getSSA(1, FILE_FLAGS);
2596
2597 // We want to compute `d = a * b (+ c)?`, where a, b, c and d are 64-bit
2598 // values (a, b and c might be 32-bit values), using 32-bit operations. This
2599 // gives the following operations:
2600 // * `d.low = low(a.low * b.low) (+ c.low)?`
2601 // * `d.high = low(a.high * b.low) + low(a.low * b.high)
2602 // + high(a.low * b.low) (+ c.high)?`
2603 //
2604 // To compute the high bits, we can split in the following operations:
2605 // * `tmp1 = low(a.high * b.low) (+ c.high)?`
2606 // * `tmp2 = low(a.low * b.high) + tmp1`
2607 // * `d.high = high(a.low * b.low) + tmp2`
2608 //
2609 // mkSplit put lower bits at index 0 and higher bits at index 1
2610
2611 Value *op1[2];
2612 if (i->getSrc(0)->reg.size == 8)
2613 bld.mkSplit(op1, 4, i->getSrc(0));
2614 else {
2615 op1[0] = i->getSrc(0);
2616 op1[1] = zero;
2617 }
2618 Value *op2[2];
2619 if (i->getSrc(1)->reg.size == 8)
2620 bld.mkSplit(op2, 4, i->getSrc(1));
2621 else {
2622 op2[0] = i->getSrc(1);
2623 op2[1] = zero;
2624 }
2625
2626 Value *op3[2] = { NULL, NULL };
2627 if (i->op == OP_MAD) {
2628 if (i->getSrc(2)->reg.size == 8)
2629 bld.mkSplit(op3, 4, i->getSrc(2));
2630 else {
2631 op3[0] = i->getSrc(2);
2632 op3[1] = zero;
2633 }
2634 }
2635
2636 Value *tmpRes1Hi = bld.getSSA();
2637 if (i->op == OP_MAD)
2638 bld.mkOp3(OP_MAD, hTy, tmpRes1Hi, op1[1], op2[0], op3[1]);
2639 else
2640 bld.mkOp2(OP_MUL, hTy, tmpRes1Hi, op1[1], op2[0]);
2641
2642 Value *tmpRes2Hi = bld.mkOp3v(OP_MAD, hTy, bld.getSSA(), op1[0], op2[1], tmpRes1Hi);
2643
2644 Value *def[2] = { bld.getSSA(), bld.getSSA() };
2645
2646 // If it was a MAD, add the carry from the low bits
2647 // It is not needed if it was a MUL, since we added high(a.low * b.low) to
2648 // d.high
2649 if (i->op == OP_MAD)
2650 bld.mkOp3(OP_MAD, hTy, def[0], op1[0], op2[0], op3[0])->setFlagsDef(1, carry);
2651 else
2652 bld.mkOp2(OP_MUL, hTy, def[0], op1[0], op2[0]);
2653
2654 Instruction *hiPart3 = bld.mkOp3(OP_MAD, hTy, def[1], op1[0], op2[0], tmpRes2Hi);
2655 hiPart3->subOp = NV50_IR_SUBOP_MUL_HIGH;
2656 if (i->op == OP_MAD)
2657 hiPart3->setFlagsSrc(3, carry);
2658
2659 bld.mkOp2(OP_MERGE, i->dType, i->getDef(0), def[0], def[1]);
2660
2661 delete_Instruction(fn->getProgram(), i);
2662 }
2663
2664 // =============================================================================
2665
2666 static inline void
updateLdStOffset(Instruction * ldst,int32_t offset,Function * fn)2667 updateLdStOffset(Instruction *ldst, int32_t offset, Function *fn)
2668 {
2669 if (offset != ldst->getSrc(0)->reg.data.offset) {
2670 if (ldst->getSrc(0)->refCount() > 1)
2671 ldst->setSrc(0, cloneShallow(fn, ldst->getSrc(0)));
2672 ldst->getSrc(0)->reg.data.offset = offset;
2673 }
2674 }
2675
2676 // Combine loads and stores, forward stores to loads where possible.
2677 class MemoryOpt : public Pass
2678 {
2679 private:
2680 class Record
2681 {
2682 public:
2683 Record *next;
2684 Instruction *insn;
2685 const Value *rel[2];
2686 const Value *base;
2687 int32_t offset;
2688 int8_t fileIndex;
2689 uint8_t size;
2690 bool locked;
2691 Record *prev;
2692
2693 bool overlaps(const Instruction *ldst) const;
2694
2695 inline void link(Record **);
2696 inline void unlink(Record **);
2697 inline void set(const Instruction *ldst);
2698 };
2699
2700 public:
2701 MemoryOpt();
2702
2703 Record *loads[DATA_FILE_COUNT];
2704 Record *stores[DATA_FILE_COUNT];
2705
2706 MemoryPool recordPool;
2707
2708 private:
2709 virtual bool visit(BasicBlock *);
2710 bool runOpt(BasicBlock *);
2711
2712 Record **getList(const Instruction *);
2713
2714 Record *findRecord(const Instruction *, bool load, bool& isAdjacent) const;
2715
2716 // merge @insn into load/store instruction from @rec
2717 bool combineLd(Record *rec, Instruction *ld);
2718 bool combineSt(Record *rec, Instruction *st);
2719
2720 bool replaceLdFromLd(Instruction *ld, Record *ldRec);
2721 bool replaceLdFromSt(Instruction *ld, Record *stRec);
2722 bool replaceStFromSt(Instruction *restrict st, Record *stRec);
2723
2724 void addRecord(Instruction *ldst);
2725 void purgeRecords(Instruction *const st, DataFile);
2726 void lockStores(Instruction *const ld);
2727 void reset();
2728
2729 private:
2730 Record *prevRecord;
2731 };
2732
MemoryOpt()2733 MemoryOpt::MemoryOpt() : recordPool(sizeof(MemoryOpt::Record), 6)
2734 {
2735 for (int i = 0; i < DATA_FILE_COUNT; ++i) {
2736 loads[i] = NULL;
2737 stores[i] = NULL;
2738 }
2739 prevRecord = NULL;
2740 }
2741
2742 void
reset()2743 MemoryOpt::reset()
2744 {
2745 for (unsigned int i = 0; i < DATA_FILE_COUNT; ++i) {
2746 Record *it, *next;
2747 for (it = loads[i]; it; it = next) {
2748 next = it->next;
2749 recordPool.release(it);
2750 }
2751 loads[i] = NULL;
2752 for (it = stores[i]; it; it = next) {
2753 next = it->next;
2754 recordPool.release(it);
2755 }
2756 stores[i] = NULL;
2757 }
2758 }
2759
2760 bool
combineLd(Record * rec,Instruction * ld)2761 MemoryOpt::combineLd(Record *rec, Instruction *ld)
2762 {
2763 int32_t offRc = rec->offset;
2764 int32_t offLd = ld->getSrc(0)->reg.data.offset;
2765 int sizeRc = rec->size;
2766 int sizeLd = typeSizeof(ld->dType);
2767 int size = sizeRc + sizeLd;
2768 int d, j;
2769
2770 if (!prog->getTarget()->
2771 isAccessSupported(ld->getSrc(0)->reg.file, typeOfSize(size)))
2772 return false;
2773 // no unaligned loads
2774 if (((size == 0x8) && (MIN2(offLd, offRc) & 0x7)) ||
2775 ((size == 0xc) && (MIN2(offLd, offRc) & 0xf)))
2776 return false;
2777 // for compute indirect loads are not guaranteed to be aligned
2778 if (prog->getType() == Program::TYPE_COMPUTE && rec->rel[0])
2779 return false;
2780
2781 assert(sizeRc + sizeLd <= 16 && offRc != offLd);
2782
2783 // lock any stores that overlap with the load being merged into the
2784 // existing record.
2785 lockStores(ld);
2786
2787 for (j = 0; sizeRc; sizeRc -= rec->insn->getDef(j)->reg.size, ++j);
2788
2789 if (offLd < offRc) {
2790 int sz;
2791 for (sz = 0, d = 0; sz < sizeLd; sz += ld->getDef(d)->reg.size, ++d);
2792 // d: nr of definitions in ld
2793 // j: nr of definitions in rec->insn, move:
2794 for (d = d + j - 1; j > 0; --j, --d)
2795 rec->insn->setDef(d, rec->insn->getDef(j - 1));
2796
2797 if (rec->insn->getSrc(0)->refCount() > 1)
2798 rec->insn->setSrc(0, cloneShallow(func, rec->insn->getSrc(0)));
2799 rec->offset = rec->insn->getSrc(0)->reg.data.offset = offLd;
2800
2801 d = 0;
2802 } else {
2803 d = j;
2804 }
2805 // move definitions of @ld to @rec->insn
2806 for (j = 0; sizeLd; ++j, ++d) {
2807 sizeLd -= ld->getDef(j)->reg.size;
2808 rec->insn->setDef(d, ld->getDef(j));
2809 }
2810
2811 rec->size = size;
2812 rec->insn->getSrc(0)->reg.size = size;
2813 rec->insn->setType(typeOfSize(size));
2814
2815 delete_Instruction(prog, ld);
2816
2817 return true;
2818 }
2819
2820 bool
combineSt(Record * rec,Instruction * st)2821 MemoryOpt::combineSt(Record *rec, Instruction *st)
2822 {
2823 int32_t offRc = rec->offset;
2824 int32_t offSt = st->getSrc(0)->reg.data.offset;
2825 int sizeRc = rec->size;
2826 int sizeSt = typeSizeof(st->dType);
2827 int s = sizeSt / 4;
2828 int size = sizeRc + sizeSt;
2829 int j, k;
2830 Value *src[4]; // no modifiers in ValueRef allowed for st
2831 Value *extra[3];
2832
2833 if (!prog->getTarget()->
2834 isAccessSupported(st->getSrc(0)->reg.file, typeOfSize(size)))
2835 return false;
2836 // no unaligned stores
2837 if (size == 8 && MIN2(offRc, offSt) & 0x7)
2838 return false;
2839 // for compute indirect stores are not guaranteed to be aligned
2840 if (prog->getType() == Program::TYPE_COMPUTE && rec->rel[0])
2841 return false;
2842
2843 // There's really no great place to put this in a generic manner. Seemingly
2844 // wide stores at 0x60 don't work in GS shaders on SM50+. Don't combine
2845 // those.
2846 if (prog->getTarget()->getChipset() >= NVISA_GM107_CHIPSET &&
2847 prog->getType() == Program::TYPE_GEOMETRY &&
2848 st->getSrc(0)->reg.file == FILE_SHADER_OUTPUT &&
2849 rec->rel[0] == NULL &&
2850 MIN2(offRc, offSt) == 0x60)
2851 return false;
2852
2853 // remove any existing load/store records for the store being merged into
2854 // the existing record.
2855 purgeRecords(st, DATA_FILE_COUNT);
2856
2857 st->takeExtraSources(0, extra); // save predicate and indirect address
2858
2859 if (offRc < offSt) {
2860 // save values from @st
2861 for (s = 0; sizeSt; ++s) {
2862 sizeSt -= st->getSrc(s + 1)->reg.size;
2863 src[s] = st->getSrc(s + 1);
2864 }
2865 // set record's values as low sources of @st
2866 for (j = 1; sizeRc; ++j) {
2867 sizeRc -= rec->insn->getSrc(j)->reg.size;
2868 st->setSrc(j, rec->insn->getSrc(j));
2869 }
2870 // set saved values as high sources of @st
2871 for (k = j, j = 0; j < s; ++j)
2872 st->setSrc(k++, src[j]);
2873
2874 updateLdStOffset(st, offRc, func);
2875 } else {
2876 for (j = 1; sizeSt; ++j)
2877 sizeSt -= st->getSrc(j)->reg.size;
2878 for (s = 1; sizeRc; ++j, ++s) {
2879 sizeRc -= rec->insn->getSrc(s)->reg.size;
2880 st->setSrc(j, rec->insn->getSrc(s));
2881 }
2882 rec->offset = offSt;
2883 }
2884 st->putExtraSources(0, extra); // restore pointer and predicate
2885
2886 delete_Instruction(prog, rec->insn);
2887 rec->insn = st;
2888 rec->size = size;
2889 rec->insn->getSrc(0)->reg.size = size;
2890 rec->insn->setType(typeOfSize(size));
2891 return true;
2892 }
2893
2894 void
set(const Instruction * ldst)2895 MemoryOpt::Record::set(const Instruction *ldst)
2896 {
2897 const Symbol *mem = ldst->getSrc(0)->asSym();
2898 fileIndex = mem->reg.fileIndex;
2899 rel[0] = ldst->getIndirect(0, 0);
2900 rel[1] = ldst->getIndirect(0, 1);
2901 offset = mem->reg.data.offset;
2902 base = mem->getBase();
2903 size = typeSizeof(ldst->sType);
2904 }
2905
2906 void
link(Record ** list)2907 MemoryOpt::Record::link(Record **list)
2908 {
2909 next = *list;
2910 if (next)
2911 next->prev = this;
2912 prev = NULL;
2913 *list = this;
2914 }
2915
2916 void
unlink(Record ** list)2917 MemoryOpt::Record::unlink(Record **list)
2918 {
2919 if (next)
2920 next->prev = prev;
2921 if (prev)
2922 prev->next = next;
2923 else
2924 *list = next;
2925 }
2926
2927 MemoryOpt::Record **
getList(const Instruction * insn)2928 MemoryOpt::getList(const Instruction *insn)
2929 {
2930 if (insn->op == OP_LOAD || insn->op == OP_VFETCH)
2931 return &loads[insn->src(0).getFile()];
2932 return &stores[insn->src(0).getFile()];
2933 }
2934
2935 void
addRecord(Instruction * i)2936 MemoryOpt::addRecord(Instruction *i)
2937 {
2938 Record **list = getList(i);
2939 Record *it = reinterpret_cast<Record *>(recordPool.allocate());
2940
2941 it->link(list);
2942 it->set(i);
2943 it->insn = i;
2944 it->locked = false;
2945 }
2946
2947 MemoryOpt::Record *
findRecord(const Instruction * insn,bool load,bool & isAdj) const2948 MemoryOpt::findRecord(const Instruction *insn, bool load, bool& isAdj) const
2949 {
2950 const Symbol *sym = insn->getSrc(0)->asSym();
2951 const int size = typeSizeof(insn->sType);
2952 Record *rec = NULL;
2953 Record *it = load ? loads[sym->reg.file] : stores[sym->reg.file];
2954
2955 for (; it; it = it->next) {
2956 if (it->locked && insn->op != OP_LOAD && insn->op != OP_VFETCH)
2957 continue;
2958 if ((it->offset >> 4) != (sym->reg.data.offset >> 4) ||
2959 it->rel[0] != insn->getIndirect(0, 0) ||
2960 it->fileIndex != sym->reg.fileIndex ||
2961 it->rel[1] != insn->getIndirect(0, 1))
2962 continue;
2963
2964 if (it->offset < sym->reg.data.offset) {
2965 if (it->offset + it->size >= sym->reg.data.offset) {
2966 isAdj = (it->offset + it->size == sym->reg.data.offset);
2967 if (!isAdj)
2968 return it;
2969 if (!(it->offset & 0x7))
2970 rec = it;
2971 }
2972 } else {
2973 isAdj = it->offset != sym->reg.data.offset;
2974 if (size <= it->size && !isAdj)
2975 return it;
2976 else
2977 if (!(sym->reg.data.offset & 0x7))
2978 if (it->offset - size <= sym->reg.data.offset)
2979 rec = it;
2980 }
2981 }
2982 return rec;
2983 }
2984
2985 bool
replaceLdFromSt(Instruction * ld,Record * rec)2986 MemoryOpt::replaceLdFromSt(Instruction *ld, Record *rec)
2987 {
2988 Instruction *st = rec->insn;
2989 int32_t offSt = rec->offset;
2990 int32_t offLd = ld->getSrc(0)->reg.data.offset;
2991 int d, s;
2992
2993 for (s = 1; offSt != offLd && st->srcExists(s); ++s)
2994 offSt += st->getSrc(s)->reg.size;
2995 if (offSt != offLd)
2996 return false;
2997
2998 for (d = 0; ld->defExists(d) && st->srcExists(s); ++d, ++s) {
2999 if (ld->getDef(d)->reg.size != st->getSrc(s)->reg.size)
3000 return false;
3001 if (st->getSrc(s)->reg.file != FILE_GPR)
3002 return false;
3003 ld->def(d).replace(st->src(s), false);
3004 }
3005 ld->bb->remove(ld);
3006 return true;
3007 }
3008
3009 bool
replaceLdFromLd(Instruction * ldE,Record * rec)3010 MemoryOpt::replaceLdFromLd(Instruction *ldE, Record *rec)
3011 {
3012 Instruction *ldR = rec->insn;
3013 int32_t offR = rec->offset;
3014 int32_t offE = ldE->getSrc(0)->reg.data.offset;
3015 int dR, dE;
3016
3017 assert(offR <= offE);
3018 for (dR = 0; offR < offE && ldR->defExists(dR); ++dR)
3019 offR += ldR->getDef(dR)->reg.size;
3020 if (offR != offE)
3021 return false;
3022
3023 for (dE = 0; ldE->defExists(dE) && ldR->defExists(dR); ++dE, ++dR) {
3024 if (ldE->getDef(dE)->reg.size != ldR->getDef(dR)->reg.size)
3025 return false;
3026 ldE->def(dE).replace(ldR->getDef(dR), false);
3027 }
3028
3029 delete_Instruction(prog, ldE);
3030 return true;
3031 }
3032
3033 bool
replaceStFromSt(Instruction * restrict st,Record * rec)3034 MemoryOpt::replaceStFromSt(Instruction *restrict st, Record *rec)
3035 {
3036 const Instruction *const ri = rec->insn;
3037 Value *extra[3];
3038
3039 int32_t offS = st->getSrc(0)->reg.data.offset;
3040 int32_t offR = rec->offset;
3041 int32_t endS = offS + typeSizeof(st->dType);
3042 int32_t endR = offR + typeSizeof(ri->dType);
3043
3044 rec->size = MAX2(endS, endR) - MIN2(offS, offR);
3045
3046 st->takeExtraSources(0, extra);
3047
3048 if (offR < offS) {
3049 Value *vals[10];
3050 int s, n;
3051 int k = 0;
3052 // get non-replaced sources of ri
3053 for (s = 1; offR < offS; offR += ri->getSrc(s)->reg.size, ++s)
3054 vals[k++] = ri->getSrc(s);
3055 n = s;
3056 // get replaced sources of st
3057 for (s = 1; st->srcExists(s); offS += st->getSrc(s)->reg.size, ++s)
3058 vals[k++] = st->getSrc(s);
3059 // skip replaced sources of ri
3060 for (s = n; offR < endS; offR += ri->getSrc(s)->reg.size, ++s);
3061 // get non-replaced sources after values covered by st
3062 for (; offR < endR; offR += ri->getSrc(s)->reg.size, ++s)
3063 vals[k++] = ri->getSrc(s);
3064 assert((unsigned int)k <= ARRAY_SIZE(vals));
3065 for (s = 0; s < k; ++s)
3066 st->setSrc(s + 1, vals[s]);
3067 st->setSrc(0, ri->getSrc(0));
3068 } else
3069 if (endR > endS) {
3070 int j, s;
3071 for (j = 1; offR < endS; offR += ri->getSrc(j++)->reg.size);
3072 for (s = 1; offS < endS; offS += st->getSrc(s++)->reg.size);
3073 for (; offR < endR; offR += ri->getSrc(j++)->reg.size)
3074 st->setSrc(s++, ri->getSrc(j));
3075 }
3076 st->putExtraSources(0, extra);
3077
3078 delete_Instruction(prog, rec->insn);
3079
3080 rec->insn = st;
3081 rec->offset = st->getSrc(0)->reg.data.offset;
3082
3083 st->setType(typeOfSize(rec->size));
3084
3085 return true;
3086 }
3087
3088 bool
overlaps(const Instruction * ldst) const3089 MemoryOpt::Record::overlaps(const Instruction *ldst) const
3090 {
3091 Record that;
3092 that.set(ldst);
3093
3094 // This assumes that images/buffers can't overlap. They can.
3095 // TODO: Plumb the restrict logic through, and only skip when it's a
3096 // restrict situation, or there can implicitly be no writes.
3097 if (this->fileIndex != that.fileIndex && this->rel[1] == that.rel[1])
3098 return false;
3099
3100 if (this->rel[0] || that.rel[0])
3101 return this->base == that.base;
3102
3103 return
3104 (this->offset < that.offset + that.size) &&
3105 (this->offset + this->size > that.offset);
3106 }
3107
3108 // We must not eliminate stores that affect the result of @ld if
3109 // we find later stores to the same location, and we may no longer
3110 // merge them with later stores.
3111 // The stored value can, however, still be used to determine the value
3112 // returned by future loads.
3113 void
lockStores(Instruction * const ld)3114 MemoryOpt::lockStores(Instruction *const ld)
3115 {
3116 for (Record *r = stores[ld->src(0).getFile()]; r; r = r->next)
3117 if (!r->locked && r->overlaps(ld))
3118 r->locked = true;
3119 }
3120
3121 // Prior loads from the location of @st are no longer valid.
3122 // Stores to the location of @st may no longer be used to derive
3123 // the value at it nor be coalesced into later stores.
3124 void
purgeRecords(Instruction * const st,DataFile f)3125 MemoryOpt::purgeRecords(Instruction *const st, DataFile f)
3126 {
3127 if (st)
3128 f = st->src(0).getFile();
3129
3130 for (Record *r = loads[f]; r; r = r->next)
3131 if (!st || r->overlaps(st))
3132 r->unlink(&loads[f]);
3133
3134 for (Record *r = stores[f]; r; r = r->next)
3135 if (!st || r->overlaps(st))
3136 r->unlink(&stores[f]);
3137 }
3138
3139 bool
visit(BasicBlock * bb)3140 MemoryOpt::visit(BasicBlock *bb)
3141 {
3142 bool ret = runOpt(bb);
3143 // Run again, one pass won't combine 4 32 bit ld/st to a single 128 bit ld/st
3144 // where 96 bit memory operations are forbidden.
3145 if (ret)
3146 ret = runOpt(bb);
3147 return ret;
3148 }
3149
3150 bool
runOpt(BasicBlock * bb)3151 MemoryOpt::runOpt(BasicBlock *bb)
3152 {
3153 Instruction *ldst, *next;
3154 Record *rec;
3155 bool isAdjacent = true;
3156
3157 for (ldst = bb->getEntry(); ldst; ldst = next) {
3158 bool keep = true;
3159 bool isLoad = true;
3160 next = ldst->next;
3161
3162 if (ldst->op == OP_LOAD || ldst->op == OP_VFETCH) {
3163 if (ldst->isDead()) {
3164 // might have been produced by earlier optimization
3165 delete_Instruction(prog, ldst);
3166 continue;
3167 }
3168 } else
3169 if (ldst->op == OP_STORE || ldst->op == OP_EXPORT) {
3170 if (typeSizeof(ldst->dType) == 4 &&
3171 ldst->src(1).getFile() == FILE_GPR &&
3172 ldst->getSrc(1)->getInsn()->op == OP_NOP) {
3173 delete_Instruction(prog, ldst);
3174 continue;
3175 }
3176 isLoad = false;
3177 } else {
3178 // TODO: maybe have all fixed ops act as barrier ?
3179 if (ldst->op == OP_CALL ||
3180 ldst->op == OP_BAR ||
3181 ldst->op == OP_MEMBAR) {
3182 purgeRecords(NULL, FILE_MEMORY_LOCAL);
3183 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
3184 purgeRecords(NULL, FILE_MEMORY_SHARED);
3185 purgeRecords(NULL, FILE_SHADER_OUTPUT);
3186 } else
3187 if (ldst->op == OP_ATOM || ldst->op == OP_CCTL) {
3188 if (ldst->src(0).getFile() == FILE_MEMORY_GLOBAL) {
3189 purgeRecords(NULL, FILE_MEMORY_LOCAL);
3190 purgeRecords(NULL, FILE_MEMORY_GLOBAL);
3191 purgeRecords(NULL, FILE_MEMORY_SHARED);
3192 } else {
3193 purgeRecords(NULL, ldst->src(0).getFile());
3194 }
3195 } else
3196 if (ldst->op == OP_EMIT || ldst->op == OP_RESTART) {
3197 purgeRecords(NULL, FILE_SHADER_OUTPUT);
3198 }
3199 continue;
3200 }
3201 if (ldst->getPredicate()) // TODO: handle predicated ld/st
3202 continue;
3203 if (ldst->perPatch) // TODO: create separate per-patch lists
3204 continue;
3205
3206 if (isLoad) {
3207 DataFile file = ldst->src(0).getFile();
3208
3209 // if ld l[]/g[] look for previous store to eliminate the reload
3210 if (file == FILE_MEMORY_GLOBAL || file == FILE_MEMORY_LOCAL) {
3211 // TODO: shared memory ?
3212 rec = findRecord(ldst, false, isAdjacent);
3213 if (rec && !isAdjacent)
3214 keep = !replaceLdFromSt(ldst, rec);
3215 }
3216
3217 // or look for ld from the same location and replace this one
3218 rec = keep ? findRecord(ldst, true, isAdjacent) : NULL;
3219 if (rec) {
3220 if (!isAdjacent)
3221 keep = !replaceLdFromLd(ldst, rec);
3222 else
3223 // or combine a previous load with this one
3224 keep = !combineLd(rec, ldst);
3225 }
3226 if (keep)
3227 lockStores(ldst);
3228 } else {
3229 rec = findRecord(ldst, false, isAdjacent);
3230 if (rec) {
3231 if (!isAdjacent)
3232 keep = !replaceStFromSt(ldst, rec);
3233 else
3234 keep = !combineSt(rec, ldst);
3235 }
3236 if (keep)
3237 purgeRecords(ldst, DATA_FILE_COUNT);
3238 }
3239 if (keep)
3240 addRecord(ldst);
3241 }
3242 reset();
3243
3244 return true;
3245 }
3246
3247 // =============================================================================
3248
3249 // Turn control flow into predicated instructions (after register allocation !).
3250 // TODO:
3251 // Could move this to before register allocation on NVC0 and also handle nested
3252 // constructs.
3253 class FlatteningPass : public Pass
3254 {
3255 private:
3256 virtual bool visit(Function *);
3257 virtual bool visit(BasicBlock *);
3258
3259 bool tryPredicateConditional(BasicBlock *);
3260 void predicateInstructions(BasicBlock *, Value *pred, CondCode cc);
3261 void tryPropagateBranch(BasicBlock *);
3262 inline bool isConstantCondition(Value *pred);
3263 inline bool mayPredicate(const Instruction *, const Value *pred) const;
3264 inline void removeFlow(Instruction *);
3265
3266 uint8_t gpr_unit;
3267 };
3268
3269 bool
isConstantCondition(Value * pred)3270 FlatteningPass::isConstantCondition(Value *pred)
3271 {
3272 Instruction *insn = pred->getUniqueInsn();
3273 assert(insn);
3274 if (insn->op != OP_SET || insn->srcExists(2))
3275 return false;
3276
3277 for (int s = 0; s < 2 && insn->srcExists(s); ++s) {
3278 Instruction *ld = insn->getSrc(s)->getUniqueInsn();
3279 DataFile file;
3280 if (ld) {
3281 if (ld->op != OP_MOV && ld->op != OP_LOAD)
3282 return false;
3283 if (ld->src(0).isIndirect(0))
3284 return false;
3285 file = ld->src(0).getFile();
3286 } else {
3287 file = insn->src(s).getFile();
3288 // catch $r63 on NVC0 and $r63/$r127 on NV50. Unfortunately maxGPR is
3289 // in register "units", which can vary between targets.
3290 if (file == FILE_GPR) {
3291 Value *v = insn->getSrc(s);
3292 int bytes = v->reg.data.id * MIN2(v->reg.size, 4);
3293 int units = bytes >> gpr_unit;
3294 if (units > prog->maxGPR)
3295 file = FILE_IMMEDIATE;
3296 }
3297 }
3298 if (file != FILE_IMMEDIATE && file != FILE_MEMORY_CONST)
3299 return false;
3300 }
3301 return true;
3302 }
3303
3304 void
removeFlow(Instruction * insn)3305 FlatteningPass::removeFlow(Instruction *insn)
3306 {
3307 FlowInstruction *term = insn ? insn->asFlow() : NULL;
3308 if (!term)
3309 return;
3310 Graph::Edge::Type ty = term->bb->cfg.outgoing().getType();
3311
3312 if (term->op == OP_BRA) {
3313 // TODO: this might get more difficult when we get arbitrary BRAs
3314 if (ty == Graph::Edge::CROSS || ty == Graph::Edge::BACK)
3315 return;
3316 } else
3317 if (term->op != OP_JOIN)
3318 return;
3319
3320 Value *pred = term->getPredicate();
3321
3322 delete_Instruction(prog, term);
3323
3324 if (pred && pred->refCount() == 0) {
3325 Instruction *pSet = pred->getUniqueInsn();
3326 pred->join->reg.data.id = -1; // deallocate
3327 if (pSet->isDead())
3328 delete_Instruction(prog, pSet);
3329 }
3330 }
3331
3332 void
predicateInstructions(BasicBlock * bb,Value * pred,CondCode cc)3333 FlatteningPass::predicateInstructions(BasicBlock *bb, Value *pred, CondCode cc)
3334 {
3335 for (Instruction *i = bb->getEntry(); i; i = i->next) {
3336 if (i->isNop())
3337 continue;
3338 assert(!i->getPredicate());
3339 i->setPredicate(cc, pred);
3340 }
3341 removeFlow(bb->getExit());
3342 }
3343
3344 bool
mayPredicate(const Instruction * insn,const Value * pred) const3345 FlatteningPass::mayPredicate(const Instruction *insn, const Value *pred) const
3346 {
3347 if (insn->isPseudo())
3348 return true;
3349 // TODO: calls where we don't know which registers are modified
3350
3351 if (!prog->getTarget()->mayPredicate(insn, pred))
3352 return false;
3353 for (int d = 0; insn->defExists(d); ++d)
3354 if (insn->getDef(d)->equals(pred))
3355 return false;
3356 return true;
3357 }
3358
3359 // If we jump to BRA/RET/EXIT, replace the jump with it.
3360 // NOTE: We do not update the CFG anymore here !
3361 //
3362 // TODO: Handle cases where we skip over a branch (maybe do that elsewhere ?):
3363 // BB:0
3364 // @p0 bra BB:2 -> @!p0 bra BB:3 iff (!) BB:2 immediately adjoins BB:1
3365 // BB1:
3366 // bra BB:3
3367 // BB2:
3368 // ...
3369 // BB3:
3370 // ...
3371 void
tryPropagateBranch(BasicBlock * bb)3372 FlatteningPass::tryPropagateBranch(BasicBlock *bb)
3373 {
3374 for (Instruction *i = bb->getExit(); i && i->op == OP_BRA; i = i->prev) {
3375 BasicBlock *bf = i->asFlow()->target.bb;
3376
3377 if (bf->getInsnCount() != 1)
3378 continue;
3379
3380 FlowInstruction *bra = i->asFlow();
3381 FlowInstruction *rep = bf->getExit()->asFlow();
3382
3383 if (!rep || rep->getPredicate())
3384 continue;
3385 if (rep->op != OP_BRA &&
3386 rep->op != OP_JOIN &&
3387 rep->op != OP_EXIT)
3388 continue;
3389
3390 // TODO: If there are multiple branches to @rep, only the first would
3391 // be replaced, so only remove them after this pass is done ?
3392 // Also, need to check all incident blocks for fall-through exits and
3393 // add the branch there.
3394 bra->op = rep->op;
3395 bra->target.bb = rep->target.bb;
3396 if (bf->cfg.incidentCount() == 1)
3397 bf->remove(rep);
3398 }
3399 }
3400
3401 bool
visit(Function * fn)3402 FlatteningPass::visit(Function *fn)
3403 {
3404 gpr_unit = prog->getTarget()->getFileUnit(FILE_GPR);
3405
3406 return true;
3407 }
3408
3409 bool
visit(BasicBlock * bb)3410 FlatteningPass::visit(BasicBlock *bb)
3411 {
3412 if (tryPredicateConditional(bb))
3413 return true;
3414
3415 // try to attach join to previous instruction
3416 if (prog->getTarget()->hasJoin) {
3417 Instruction *insn = bb->getExit();
3418 if (insn && insn->op == OP_JOIN && !insn->getPredicate()) {
3419 insn = insn->prev;
3420 if (insn && !insn->getPredicate() &&
3421 !insn->asFlow() &&
3422 insn->op != OP_DISCARD &&
3423 insn->op != OP_TEXBAR &&
3424 !isTextureOp(insn->op) && // probably just nve4
3425 !isSurfaceOp(insn->op) && // not confirmed
3426 insn->op != OP_LINTERP && // probably just nve4
3427 insn->op != OP_PINTERP && // probably just nve4
3428 ((insn->op != OP_LOAD && insn->op != OP_STORE && insn->op != OP_ATOM) ||
3429 (typeSizeof(insn->dType) <= 4 && !insn->src(0).isIndirect(0))) &&
3430 !insn->isNop()) {
3431 insn->join = 1;
3432 bb->remove(bb->getExit());
3433 return true;
3434 }
3435 }
3436 }
3437
3438 tryPropagateBranch(bb);
3439
3440 return true;
3441 }
3442
3443 bool
tryPredicateConditional(BasicBlock * bb)3444 FlatteningPass::tryPredicateConditional(BasicBlock *bb)
3445 {
3446 BasicBlock *bL = NULL, *bR = NULL;
3447 unsigned int nL = 0, nR = 0, limit = 12;
3448 Instruction *insn;
3449 unsigned int mask;
3450
3451 mask = bb->initiatesSimpleConditional();
3452 if (!mask)
3453 return false;
3454
3455 assert(bb->getExit());
3456 Value *pred = bb->getExit()->getPredicate();
3457 assert(pred);
3458
3459 if (isConstantCondition(pred))
3460 limit = 4;
3461
3462 Graph::EdgeIterator ei = bb->cfg.outgoing();
3463
3464 if (mask & 1) {
3465 bL = BasicBlock::get(ei.getNode());
3466 for (insn = bL->getEntry(); insn; insn = insn->next, ++nL)
3467 if (!mayPredicate(insn, pred))
3468 return false;
3469 if (nL > limit)
3470 return false; // too long, do a real branch
3471 }
3472 ei.next();
3473
3474 if (mask & 2) {
3475 bR = BasicBlock::get(ei.getNode());
3476 for (insn = bR->getEntry(); insn; insn = insn->next, ++nR)
3477 if (!mayPredicate(insn, pred))
3478 return false;
3479 if (nR > limit)
3480 return false; // too long, do a real branch
3481 }
3482
3483 if (bL)
3484 predicateInstructions(bL, pred, bb->getExit()->cc);
3485 if (bR)
3486 predicateInstructions(bR, pred, inverseCondCode(bb->getExit()->cc));
3487
3488 if (bb->joinAt) {
3489 bb->remove(bb->joinAt);
3490 bb->joinAt = NULL;
3491 }
3492 removeFlow(bb->getExit()); // delete the branch/join at the fork point
3493
3494 // remove potential join operations at the end of the conditional
3495 if (prog->getTarget()->joinAnterior) {
3496 bb = BasicBlock::get((bL ? bL : bR)->cfg.outgoing().getNode());
3497 if (bb->getEntry() && bb->getEntry()->op == OP_JOIN)
3498 removeFlow(bb->getEntry());
3499 }
3500
3501 return true;
3502 }
3503
3504 // =============================================================================
3505
3506 // Fold Immediate into MAD; must be done after register allocation due to
3507 // constraint SDST == SSRC2
3508 // TODO:
3509 // Does NVC0+ have other situations where this pass makes sense?
3510 class PostRaLoadPropagation : public Pass
3511 {
3512 private:
3513 virtual bool visit(Instruction *);
3514
3515 void handleMADforNV50(Instruction *);
3516 void handleMADforNVC0(Instruction *);
3517 };
3518
3519 static bool
post_ra_dead(Instruction * i)3520 post_ra_dead(Instruction *i)
3521 {
3522 for (int d = 0; i->defExists(d); ++d)
3523 if (i->getDef(d)->refCount())
3524 return false;
3525 return true;
3526 }
3527
3528 // Fold Immediate into MAD; must be done after register allocation due to
3529 // constraint SDST == SSRC2
3530 void
handleMADforNV50(Instruction * i)3531 PostRaLoadPropagation::handleMADforNV50(Instruction *i)
3532 {
3533 if (i->def(0).getFile() != FILE_GPR ||
3534 i->src(0).getFile() != FILE_GPR ||
3535 i->src(1).getFile() != FILE_GPR ||
3536 i->src(2).getFile() != FILE_GPR ||
3537 i->getDef(0)->reg.data.id != i->getSrc(2)->reg.data.id)
3538 return;
3539
3540 if (i->getDef(0)->reg.data.id >= 64 ||
3541 i->getSrc(0)->reg.data.id >= 64)
3542 return;
3543
3544 if (i->flagsSrc >= 0 && i->getSrc(i->flagsSrc)->reg.data.id != 0)
3545 return;
3546
3547 if (i->getPredicate())
3548 return;
3549
3550 Value *vtmp;
3551 Instruction *def = i->getSrc(1)->getInsn();
3552
3553 if (def && def->op == OP_SPLIT && typeSizeof(def->sType) == 4)
3554 def = def->getSrc(0)->getInsn();
3555 if (def && def->op == OP_MOV && def->src(0).getFile() == FILE_IMMEDIATE) {
3556 vtmp = i->getSrc(1);
3557 if (isFloatType(i->sType)) {
3558 i->setSrc(1, def->getSrc(0));
3559 } else {
3560 ImmediateValue val;
3561 // getImmediate() has side-effects on the argument so this *shouldn't*
3562 // be folded into the assert()
3563 ASSERTED bool ret = def->src(0).getImmediate(val);
3564 assert(ret);
3565 if (i->getSrc(1)->reg.data.id & 1)
3566 val.reg.data.u32 >>= 16;
3567 val.reg.data.u32 &= 0xffff;
3568 i->setSrc(1, new_ImmediateValue(prog, val.reg.data.u32));
3569 }
3570
3571 /* There's no post-RA dead code elimination, so do it here
3572 * XXX: if we add more code-removing post-RA passes, we might
3573 * want to create a post-RA dead-code elim pass */
3574 if (post_ra_dead(vtmp->getInsn())) {
3575 Value *src = vtmp->getInsn()->getSrc(0);
3576 // Careful -- splits will have already been removed from the
3577 // functions. Don't double-delete.
3578 if (vtmp->getInsn()->bb)
3579 delete_Instruction(prog, vtmp->getInsn());
3580 if (src->getInsn() && post_ra_dead(src->getInsn()))
3581 delete_Instruction(prog, src->getInsn());
3582 }
3583 }
3584 }
3585
3586 void
handleMADforNVC0(Instruction * i)3587 PostRaLoadPropagation::handleMADforNVC0(Instruction *i)
3588 {
3589 if (i->def(0).getFile() != FILE_GPR ||
3590 i->src(0).getFile() != FILE_GPR ||
3591 i->src(1).getFile() != FILE_GPR ||
3592 i->src(2).getFile() != FILE_GPR ||
3593 i->getDef(0)->reg.data.id != i->getSrc(2)->reg.data.id)
3594 return;
3595
3596 // TODO: gm107 can also do this for S32, maybe other chipsets as well
3597 if (i->dType != TYPE_F32)
3598 return;
3599
3600 if ((i->src(2).mod | Modifier(NV50_IR_MOD_NEG)) != Modifier(NV50_IR_MOD_NEG))
3601 return;
3602
3603 ImmediateValue val;
3604 int s;
3605
3606 if (i->src(0).getImmediate(val))
3607 s = 1;
3608 else if (i->src(1).getImmediate(val))
3609 s = 0;
3610 else
3611 return;
3612
3613 if ((i->src(s).mod | Modifier(NV50_IR_MOD_NEG)) != Modifier(NV50_IR_MOD_NEG))
3614 return;
3615
3616 if (s == 1)
3617 i->swapSources(0, 1);
3618
3619 Instruction *imm = i->getSrc(1)->getInsn();
3620 i->setSrc(1, imm->getSrc(0));
3621 if (post_ra_dead(imm))
3622 delete_Instruction(prog, imm);
3623 }
3624
3625 bool
visit(Instruction * i)3626 PostRaLoadPropagation::visit(Instruction *i)
3627 {
3628 switch (i->op) {
3629 case OP_FMA:
3630 case OP_MAD:
3631 if (prog->getTarget()->getChipset() < 0xc0)
3632 handleMADforNV50(i);
3633 else
3634 handleMADforNVC0(i);
3635 break;
3636 default:
3637 break;
3638 }
3639
3640 return true;
3641 }
3642
3643 // =============================================================================
3644
3645 // Common subexpression elimination. Stupid O^2 implementation.
3646 class LocalCSE : public Pass
3647 {
3648 private:
3649 virtual bool visit(BasicBlock *);
3650
3651 inline bool tryReplace(Instruction **, Instruction *);
3652
3653 DLList ops[OP_LAST + 1];
3654 };
3655
3656 class GlobalCSE : public Pass
3657 {
3658 private:
3659 virtual bool visit(BasicBlock *);
3660 };
3661
3662 bool
isActionEqual(const Instruction * that) const3663 Instruction::isActionEqual(const Instruction *that) const
3664 {
3665 if (this->op != that->op ||
3666 this->dType != that->dType ||
3667 this->sType != that->sType)
3668 return false;
3669 if (this->cc != that->cc)
3670 return false;
3671
3672 if (this->asTex()) {
3673 if (memcmp(&this->asTex()->tex,
3674 &that->asTex()->tex,
3675 sizeof(this->asTex()->tex)))
3676 return false;
3677 } else
3678 if (this->asCmp()) {
3679 if (this->asCmp()->setCond != that->asCmp()->setCond)
3680 return false;
3681 } else
3682 if (this->asFlow()) {
3683 return false;
3684 } else
3685 if (this->op == OP_PHI && this->bb != that->bb) {
3686 /* TODO: we could probably be a bit smarter here by following the
3687 * control flow, but honestly, it is quite painful to check */
3688 return false;
3689 } else {
3690 if (this->ipa != that->ipa ||
3691 this->lanes != that->lanes ||
3692 this->perPatch != that->perPatch)
3693 return false;
3694 if (this->postFactor != that->postFactor)
3695 return false;
3696 }
3697
3698 if (this->subOp != that->subOp ||
3699 this->saturate != that->saturate ||
3700 this->rnd != that->rnd ||
3701 this->ftz != that->ftz ||
3702 this->dnz != that->dnz ||
3703 this->cache != that->cache ||
3704 this->mask != that->mask)
3705 return false;
3706
3707 return true;
3708 }
3709
3710 bool
isResultEqual(const Instruction * that) const3711 Instruction::isResultEqual(const Instruction *that) const
3712 {
3713 unsigned int d, s;
3714
3715 // NOTE: location of discard only affects tex with liveOnly and quadops
3716 if (!this->defExists(0) && this->op != OP_DISCARD)
3717 return false;
3718
3719 if (!isActionEqual(that))
3720 return false;
3721
3722 if (this->predSrc != that->predSrc)
3723 return false;
3724
3725 for (d = 0; this->defExists(d); ++d) {
3726 if (!that->defExists(d) ||
3727 !this->getDef(d)->equals(that->getDef(d), false))
3728 return false;
3729 }
3730 if (that->defExists(d))
3731 return false;
3732
3733 for (s = 0; this->srcExists(s); ++s) {
3734 if (!that->srcExists(s))
3735 return false;
3736 if (this->src(s).mod != that->src(s).mod)
3737 return false;
3738 if (!this->getSrc(s)->equals(that->getSrc(s), true))
3739 return false;
3740 }
3741 if (that->srcExists(s))
3742 return false;
3743
3744 if (op == OP_LOAD || op == OP_VFETCH || op == OP_ATOM) {
3745 switch (src(0).getFile()) {
3746 case FILE_MEMORY_CONST:
3747 case FILE_SHADER_INPUT:
3748 return true;
3749 case FILE_SHADER_OUTPUT:
3750 return bb->getProgram()->getType() == Program::TYPE_TESSELLATION_EVAL;
3751 default:
3752 return false;
3753 }
3754 }
3755
3756 return true;
3757 }
3758
3759 // pull through common expressions from different in-blocks
3760 bool
visit(BasicBlock * bb)3761 GlobalCSE::visit(BasicBlock *bb)
3762 {
3763 Instruction *phi, *next, *ik;
3764 int s;
3765
3766 // TODO: maybe do this with OP_UNION, too
3767
3768 for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = next) {
3769 next = phi->next;
3770 if (phi->getSrc(0)->refCount() > 1)
3771 continue;
3772 ik = phi->getSrc(0)->getInsn();
3773 if (!ik)
3774 continue; // probably a function input
3775 if (ik->defCount(0xff) > 1)
3776 continue; // too painful to check if we can really push this forward
3777 for (s = 1; phi->srcExists(s); ++s) {
3778 if (phi->getSrc(s)->refCount() > 1)
3779 break;
3780 if (!phi->getSrc(s)->getInsn() ||
3781 !phi->getSrc(s)->getInsn()->isResultEqual(ik))
3782 break;
3783 }
3784 if (!phi->srcExists(s)) {
3785 assert(ik->op != OP_PHI);
3786 Instruction *entry = bb->getEntry();
3787 ik->bb->remove(ik);
3788 if (!entry || entry->op != OP_JOIN)
3789 bb->insertHead(ik);
3790 else
3791 bb->insertAfter(entry, ik);
3792 ik->setDef(0, phi->getDef(0));
3793 delete_Instruction(prog, phi);
3794 }
3795 }
3796
3797 return true;
3798 }
3799
3800 bool
tryReplace(Instruction ** ptr,Instruction * i)3801 LocalCSE::tryReplace(Instruction **ptr, Instruction *i)
3802 {
3803 Instruction *old = *ptr;
3804
3805 // TODO: maybe relax this later (causes trouble with OP_UNION)
3806 if (i->isPredicated())
3807 return false;
3808
3809 if (!old->isResultEqual(i))
3810 return false;
3811
3812 for (int d = 0; old->defExists(d); ++d)
3813 old->def(d).replace(i->getDef(d), false);
3814 delete_Instruction(prog, old);
3815 *ptr = NULL;
3816 return true;
3817 }
3818
3819 bool
visit(BasicBlock * bb)3820 LocalCSE::visit(BasicBlock *bb)
3821 {
3822 unsigned int replaced;
3823
3824 do {
3825 Instruction *ir, *next;
3826
3827 replaced = 0;
3828
3829 // will need to know the order of instructions
3830 int serial = 0;
3831 for (ir = bb->getFirst(); ir; ir = ir->next)
3832 ir->serial = serial++;
3833
3834 for (ir = bb->getFirst(); ir; ir = next) {
3835 int s;
3836 Value *src = NULL;
3837
3838 next = ir->next;
3839
3840 if (ir->fixed) {
3841 ops[ir->op].insert(ir);
3842 continue;
3843 }
3844
3845 for (s = 0; ir->srcExists(s); ++s)
3846 if (ir->getSrc(s)->asLValue())
3847 if (!src || ir->getSrc(s)->refCount() < src->refCount())
3848 src = ir->getSrc(s);
3849
3850 if (src) {
3851 for (Value::UseIterator it = src->uses.begin();
3852 it != src->uses.end(); ++it) {
3853 Instruction *ik = (*it)->getInsn();
3854 if (ik && ik->bb == ir->bb && ik->serial < ir->serial)
3855 if (tryReplace(&ir, ik))
3856 break;
3857 }
3858 } else {
3859 DLLIST_FOR_EACH(&ops[ir->op], iter)
3860 {
3861 Instruction *ik = reinterpret_cast<Instruction *>(iter.get());
3862 if (tryReplace(&ir, ik))
3863 break;
3864 }
3865 }
3866
3867 if (ir)
3868 ops[ir->op].insert(ir);
3869 else
3870 ++replaced;
3871 }
3872 for (unsigned int i = 0; i <= OP_LAST; ++i)
3873 ops[i].clear();
3874
3875 } while (replaced);
3876
3877 return true;
3878 }
3879
3880 // =============================================================================
3881
3882 // Remove computations of unused values.
3883 class DeadCodeElim : public Pass
3884 {
3885 public:
3886 bool buryAll(Program *);
3887
3888 private:
3889 virtual bool visit(BasicBlock *);
3890
3891 void checkSplitLoad(Instruction *ld); // for partially dead loads
3892
3893 unsigned int deadCount;
3894 };
3895
3896 bool
buryAll(Program * prog)3897 DeadCodeElim::buryAll(Program *prog)
3898 {
3899 do {
3900 deadCount = 0;
3901 if (!this->run(prog, false, false))
3902 return false;
3903 } while (deadCount);
3904
3905 return true;
3906 }
3907
3908 bool
visit(BasicBlock * bb)3909 DeadCodeElim::visit(BasicBlock *bb)
3910 {
3911 Instruction *prev;
3912
3913 for (Instruction *i = bb->getExit(); i; i = prev) {
3914 prev = i->prev;
3915 if (i->isDead()) {
3916 ++deadCount;
3917 delete_Instruction(prog, i);
3918 } else
3919 if (i->defExists(1) &&
3920 i->subOp == 0 &&
3921 (i->op == OP_VFETCH || i->op == OP_LOAD)) {
3922 checkSplitLoad(i);
3923 } else
3924 if (i->defExists(0) && !i->getDef(0)->refCount()) {
3925 if (i->op == OP_ATOM ||
3926 i->op == OP_SUREDP ||
3927 i->op == OP_SUREDB) {
3928 i->setDef(0, NULL);
3929 if (i->op == OP_ATOM && i->subOp == NV50_IR_SUBOP_ATOM_EXCH) {
3930 i->cache = CACHE_CV;
3931 i->op = OP_STORE;
3932 i->subOp = 0;
3933 }
3934 } else if (i->op == OP_LOAD && i->subOp == NV50_IR_SUBOP_LOAD_LOCKED) {
3935 i->setDef(0, i->getDef(1));
3936 i->setDef(1, NULL);
3937 }
3938 }
3939 }
3940 return true;
3941 }
3942
3943 // Each load can go into up to 4 destinations, any of which might potentially
3944 // be dead (i.e. a hole). These can always be split into 2 loads, independent
3945 // of where the holes are. We find the first contiguous region, put it into
3946 // the first load, and then put the second contiguous region into the second
3947 // load. There can be at most 2 contiguous regions.
3948 //
3949 // Note that there are some restrictions, for example it's not possible to do
3950 // a 64-bit load that's not 64-bit aligned, so such a load has to be split
3951 // up. Also hardware doesn't support 96-bit loads, so those also have to be
3952 // split into a 64-bit and 32-bit load.
3953 void
checkSplitLoad(Instruction * ld1)3954 DeadCodeElim::checkSplitLoad(Instruction *ld1)
3955 {
3956 Instruction *ld2 = NULL; // can get at most 2 loads
3957 Value *def1[4];
3958 Value *def2[4];
3959 int32_t addr1, addr2;
3960 int32_t size1, size2;
3961 int d, n1, n2;
3962 uint32_t mask = 0xffffffff;
3963
3964 for (d = 0; ld1->defExists(d); ++d)
3965 if (!ld1->getDef(d)->refCount() && ld1->getDef(d)->reg.data.id < 0)
3966 mask &= ~(1 << d);
3967 if (mask == 0xffffffff)
3968 return;
3969
3970 addr1 = ld1->getSrc(0)->reg.data.offset;
3971 n1 = n2 = 0;
3972 size1 = size2 = 0;
3973
3974 // Compute address/width for first load
3975 for (d = 0; ld1->defExists(d); ++d) {
3976 if (mask & (1 << d)) {
3977 if (size1 && (addr1 & 0x7))
3978 break;
3979 def1[n1] = ld1->getDef(d);
3980 size1 += def1[n1++]->reg.size;
3981 } else
3982 if (!n1) {
3983 addr1 += ld1->getDef(d)->reg.size;
3984 } else {
3985 break;
3986 }
3987 }
3988
3989 // Scale back the size of the first load until it can be loaded. This
3990 // typically happens for TYPE_B96 loads.
3991 while (n1 &&
3992 !prog->getTarget()->isAccessSupported(ld1->getSrc(0)->reg.file,
3993 typeOfSize(size1))) {
3994 size1 -= def1[--n1]->reg.size;
3995 d--;
3996 }
3997
3998 // Compute address/width for second load
3999 for (addr2 = addr1 + size1; ld1->defExists(d); ++d) {
4000 if (mask & (1 << d)) {
4001 assert(!size2 || !(addr2 & 0x7));
4002 def2[n2] = ld1->getDef(d);
4003 size2 += def2[n2++]->reg.size;
4004 } else if (!n2) {
4005 assert(!n2);
4006 addr2 += ld1->getDef(d)->reg.size;
4007 } else {
4008 break;
4009 }
4010 }
4011
4012 // Make sure that we've processed all the values
4013 for (; ld1->defExists(d); ++d)
4014 assert(!(mask & (1 << d)));
4015
4016 updateLdStOffset(ld1, addr1, func);
4017 ld1->setType(typeOfSize(size1));
4018 for (d = 0; d < 4; ++d)
4019 ld1->setDef(d, (d < n1) ? def1[d] : NULL);
4020
4021 if (!n2)
4022 return;
4023
4024 ld2 = cloneShallow(func, ld1);
4025 updateLdStOffset(ld2, addr2, func);
4026 ld2->setType(typeOfSize(size2));
4027 for (d = 0; d < 4; ++d)
4028 ld2->setDef(d, (d < n2) ? def2[d] : NULL);
4029
4030 ld1->bb->insertAfter(ld1, ld2);
4031 }
4032
4033 // =============================================================================
4034
4035 #define RUN_PASS(l, n, f) \
4036 if (level >= (l)) { \
4037 if (dbgFlags & NV50_IR_DEBUG_VERBOSE) \
4038 INFO("PEEPHOLE: %s\n", #n); \
4039 n pass; \
4040 if (!pass.f(this)) \
4041 return false; \
4042 }
4043
4044 bool
optimizeSSA(int level)4045 Program::optimizeSSA(int level)
4046 {
4047 RUN_PASS(1, DeadCodeElim, buryAll);
4048 RUN_PASS(1, CopyPropagation, run);
4049 RUN_PASS(1, MergeSplits, run);
4050 RUN_PASS(2, GlobalCSE, run);
4051 RUN_PASS(1, LocalCSE, run);
4052 RUN_PASS(2, AlgebraicOpt, run);
4053 RUN_PASS(2, ModifierFolding, run); // before load propagation -> less checks
4054 RUN_PASS(1, ConstantFolding, foldAll);
4055 RUN_PASS(0, Split64BitOpPreRA, run);
4056 RUN_PASS(2, LateAlgebraicOpt, run);
4057 RUN_PASS(1, LoadPropagation, run);
4058 RUN_PASS(1, IndirectPropagation, run);
4059 RUN_PASS(2, MemoryOpt, run);
4060 RUN_PASS(2, LocalCSE, run);
4061 RUN_PASS(0, DeadCodeElim, buryAll);
4062
4063 return true;
4064 }
4065
4066 bool
optimizePostRA(int level)4067 Program::optimizePostRA(int level)
4068 {
4069 RUN_PASS(2, FlatteningPass, run);
4070 RUN_PASS(2, PostRaLoadPropagation, run);
4071
4072 return true;
4073 }
4074
4075 }
4076