/external/mesa3d/src/gallium/drivers/nouveau/codegen/ |
D | nv50_ir_from_common.cpp | 89 TYPE_F32, info->io.ucpBase + i * 16 + c * 4); in handleUserClipPlanes() 90 Value *ucp = mkLoadv(TYPE_F32, sym, NULL); in handleUserClipPlanes() 92 res[i] = mkOp2v(OP_MUL, TYPE_F32, getScratch(), clipVtx[c], ucp); in handleUserClipPlanes() 94 mkOp3(OP_MAD, TYPE_F32, res[i], clipVtx[c], ucp, res[i]); in handleUserClipPlanes() 104 mkSymbol(FILE_SHADER_OUTPUT, 0, TYPE_F32, info_out->out[n].slot[c] * 4); in handleUserClipPlanes() 105 mkStore(OP_EXPORT, TYPE_F32, sym, NULL, res[i]); in handleUserClipPlanes()
|
D | nv50_ir_lowering_gm107.cpp | 127 bld.mkOp3(OP_SHFL, TYPE_F32, arr, i->getSrc(0), lane, quad); in handleManualTXD() 129 bld.mkOp3(OP_SHFL, TYPE_F32, shadow, i->getSrc(array + dim + indirect), lane, quad); in handleManualTXD() 134 bld.mkOp3(OP_SHFL, TYPE_F32, crd[c], i->getSrc(c + array), lane, quad); in handleManualTXD() 139 bld.mkOp3(OP_SHFL, TYPE_F32, tmp, i->dPdx[c].get(), lane, quad); in handleManualTXD() 140 add = bld.mkOp2(OP_QUADOP, TYPE_F32, crd[c], tmp, crd[c]); in handleManualTXD() 147 bld.mkOp3(OP_SHFL, TYPE_F32, tmp, i->dPdy[c].get(), lane, quad); in handleManualTXD() 148 add = bld.mkOp2(OP_QUADOP, TYPE_F32, crd[c], tmp, crd[c]); in handleManualTXD() 156 src[c] = bld.mkOp1v(OP_ABS, TYPE_F32, bld.getSSA(), crd[c]); in handleManualTXD() 158 bld.mkOp2(OP_MAX, TYPE_F32, val, src[0], src[1]); in handleManualTXD() 159 bld.mkOp2(OP_MAX, TYPE_F32, val, src[2], val); in handleManualTXD() [all …]
|
D | nv50_ir_from_tgsi.cpp | 632 return nv50_ir::TYPE_F32; in inferSrcType() 665 return nv50_ir::TYPE_F32; in inferDstType() 1818 Instruction *insn = new_Instruction(func, op, TYPE_F32); in interpolate() 2179 mkOp2(OP_MUL, TYPE_F32, dotp, src0, src1) in buildDot() 2185 mkOp3(OP_MAD, TYPE_F32, dotp, src0, src1, dotp) in buildDot() 2272 proj = mkOp1v(OP_RCP, TYPE_F32, getSSA(), proj); in loadProjTexCoords() 2289 proj = mkOp1v(OP_RCP, TYPE_F32, getSSA(), fetchSrc(0, 3)); in loadProjTexCoords() 2293 dst[c] = mkOp2v(OP_MUL, TYPE_F32, getSSA(), src[c], proj); in loadProjTexCoords() 2447 Value *x = mkOp1v(OP_RDSV, TYPE_F32, getScratch(), mkSysVal(SV_POSITION, 0)); in handleFBFETCH() 2448 Value *y = mkOp1v(OP_RDSV, TYPE_F32, getScratch(), mkSysVal(SV_POSITION, 1)); in handleFBFETCH() [all …]
|
D | nv50_ir_target_nvc0.cpp | 229 opInfo[i].srcTypes = 1 << (int)TYPE_F32; in initOpInfo() 230 opInfo[i].dstTypes = 1 << (int)TYPE_F32; in initOpInfo() 406 case TYPE_F32: in insnCanLoad() 429 if (i->op == OP_ADD && i->sType == TYPE_F32) { in insnCanLoad() 498 if (insn->sType != TYPE_F32) in isModSupported() 546 if (insn->op == OP_ADD && insn->sType == TYPE_F32) { in isSatSupported() 552 return insn->dType == TYPE_F32; in isSatSupported() 588 if (i->op == OP_MUL && i->dType != TYPE_F32) in getLatency() 615 if (i->dType == TYPE_F32) { in getThroughput() 705 return (a->dType == TYPE_F32 || a->op == OP_ADD || in canDualIssue() [all …]
|
D | nv50_ir_lowering_nv50.cpp | 499 bld.mkCvt(OP_CVT, TYPE_F32, af, ty, div->getSrc(0)); in handleDIV() 500 bld.mkCvt(OP_CVT, TYPE_F32, bf, ty, div->getSrc(1)); in handleDIV() 514 bf = bld.mkOp1v(OP_RCP, TYPE_F32, bld.getSSA(), bf); in handleDIV() 517 bld.mkOp2(OP_MUL, TYPE_F32, (qf = bld.getSSA()), af, bf)->rnd = ROUND_Z; in handleDIV() 518 bld.mkCvt(OP_CVT, ty, (q0 = bld.getSSA()), TYPE_F32, qf)->rnd = ROUND_Z; in handleDIV() 525 bld.mkCvt(OP_CVT, TYPE_F32, (aR = bld.getSSA()), TYPE_U32, aRf); in handleDIV() 527 bld.mkOp2(OP_MUL, TYPE_F32, (qRf = bld.getSSA()), aR, bf)->rnd = ROUND_Z; in handleDIV() 528 bld.mkCvt(OP_CVT, TYPE_U32, (qR = bld.getSSA()), TYPE_F32, qRf) in handleDIV() 734 src[c] = bld.mkOp1v(OP_ABS, TYPE_F32, bld.getSSA(), i->getSrc(c)); in handleTEX() 736 bld.mkOp2(OP_MAX, TYPE_F32, val, src[0], src[1]); in handleTEX() [all …]
|
D | nv50_ir_target_gv100.cpp | 56 opInfo[i].srcTypes = 1 << (int)TYPE_F32; in initOpInfo() 57 opInfo[i].dstTypes = 1 << (int)TYPE_F32; in initOpInfo() 214 if (i->dType == TYPE_F32) in getOpInfo() 282 if (i->dType == TYPE_F32) in getOpInfo() 309 if (i->dType == TYPE_F32) in getOpInfo() 319 if (i->dType == TYPE_F32) in getOpInfo() 349 if (i->dType == TYPE_F32) in getOpInfo() 413 case TYPE_F32: in isSatSupported() 444 if (ty == TYPE_F32) { in isOpSupported()
|
D | nv50_ir_lowering_nvc0.cpp | 145 i->setType(TYPE_F32); in handleRCPRSQ() 157 assert(i->sType == TYPE_F32); in handleFTZ() 335 if (i->sType == TYPE_F32 && prog->getType() != Program::TYPE_COMPUTE) in visit() 341 if (i->sType != TYPE_F32) in visit() 944 src[c] = bld.mkOp1v(OP_ABS, TYPE_F32, bld.getSSA(), i->getSrc(c)); in handleTEX() 946 bld.mkOp2(OP_MAX, TYPE_F32, val, src[0], src[1]); in handleTEX() 947 bld.mkOp2(OP_MAX, TYPE_F32, val, src[2], val); in handleTEX() 948 bld.mkOp1(OP_RCP, TYPE_F32, val, val); in handleTEX() 950 i->setSrc(c, bld.mkOp2v(OP_MUL, TYPE_F32, bld.getSSA(), in handleTEX() 1027 DataType sTy = (i->op == OP_TXF) ? TYPE_U32 : TYPE_F32; in handleTEX() [all …]
|
D | nv50_ir_lowering_gv100.cpp | 179 if (i->sType == TYPE_F32) in handleSET() 248 if (i->sType == TYPE_F32 && i->dType != TYPE_F16 && in visit() 370 bld.mkCvt(OP_CVT, TYPE_F32, i->getDef(0), i->sType, i->getSrc(0))-> in handleI2I() 372 bld.mkCvt(OP_CVT, i->dType, i->getDef(0), TYPE_F32, i->getDef(0)); in handleI2I() 405 ipa = bld.mkOp2(OP_LINTERP, TYPE_F32, i->getDef(0), i->getSrc(0), src2); in handlePINTERP() 407 mul = bld.mkOp2(OP_MUL, TYPE_F32, i->getDef(0), i->getDef(0), i->getSrc(1)); in handlePINTERP()
|
D | nv50_ir_target_nv50.cpp | 145 opInfo[i].srcTypes = 1 << (int)TYPE_F32; in initOpInfo() 146 opInfo[i].dstTypes = 1 << (int)TYPE_F32; in initOpInfo() 481 if (insn->sType != TYPE_F32) in isModSupported() 509 if (insn->dType != TYPE_F32) in isSatSupported() 542 if (i->dType == TYPE_F32) { in getThroughput()
|
D | nv50_ir_inlines.h | 64 case TYPE_F32: in typeSizeof() 88 case TYPE_F32: in typeSizeofLog2() 116 return flt ? TYPE_F32 : (sgn ? TYPE_S32 : TYPE_U32);
|
D | nv50_ir_peephole.cpp | 483 case TYPE_F32: in applyTo() 577 if (i->dnz && i->dType == TYPE_F32) { in expr() 584 case TYPE_F32: in expr() 608 case TYPE_F32: res.data.f32 = a->data.f32 / b->data.f32; break; in expr() 618 case TYPE_F32: res.data.f32 = a->data.f32 + b->data.f32; break; in expr() 628 case TYPE_F32: res.data.f32 = a->data.f32 - b->data.f32; break; in expr() 638 case TYPE_F32: res.data.f32 = pow(a->data.f32, b->data.f32); break; in expr() 646 case TYPE_F32: res.data.f32 = MAX2(a->data.f32, b->data.f32); break; in expr() 656 case TYPE_F32: res.data.f32 = MIN2(a->data.f32, b->data.f32); break; in expr() 823 case TYPE_F32: in expr() [all …]
|
D | nv50_ir.cpp | 340 reg.type = TYPE_F32; in ImmediateValue() 401 case TYPE_F32: in isInteger() 418 case TYPE_F32: return reg.data.u32 & (1 << 31); in isNegative() 454 case TYPE_F32: in applyLog2() 469 if (reg.type != TYPE_F32) in compare() 599 dType = sType = TYPE_F32; in Instruction() 908 : Instruction(fn, op, TYPE_F32), tex() in TexInstruction() 1110 : Instruction(fn, op, TYPE_F32) in CmpInstruction()
|
D | nv50_ir_emit_nv50.cpp | 585 case TYPE_F32: // fall through in emitLoadStoreSizeLG() 611 case TYPE_F32: in emitLoadStoreSizeCS() 953 case TYPE_F32: code[0] |= 0x80000000; break; in emitMINMAX() 1301 case TYPE_F32: code[0] |= 0x80000000; break; in emitSET() 1371 case TYPE_F32: code[1] = 0xc4400000; break; in emitCVT() 1382 case TYPE_F32: code[1] = 0x8c400000; break; in emitCVT() 1391 case TYPE_F32: code[1] = 0x84400000; break; in emitCVT() 1397 case TYPE_F32: in emitCVT() 1402 case TYPE_F32: code[1] = 0xc4004000; break; in emitCVT() 1415 case TYPE_F32: code[1] = 0x8c004000; break; in emitCVT() [all …]
|
D | nv50_ir_emit_gk110.cpp | 210 if (ty == TYPE_F32) in isLIMM() 337 if (i->sType == TYPE_F32) { in setShortImmediate() 498 if (isLIMM(i->src(1), TYPE_F32)) { in emitFMAD() 581 if (isLIMM(i->src(1), TYPE_F32)) { in emitFMUL() 660 if (isLIMM(i->src(1), TYPE_F32)) { in emitFADD() 1018 case TYPE_F32: in emitMINMAX() 1117 case TYPE_F32: op2 = 0x1d8; op1 = 0xb58; break; in emitSET() 1144 case TYPE_F32: op2 = 0x000; op1 = 0x800; break; in emitSET() 1163 if (i->dType == TYPE_F32) { in emitSET() 1200 if (i->dType == TYPE_F32) { in emitSLCT() [all …]
|
D | nv50_ir_build_util.cpp | 154 DataType ty = TYPE_F32; in mkInterp() 266 Instruction *quadop = mkOp2(OP_QUADOP, TYPE_F32, def, src0, src1); in mkQuadop() 404 return mkOp1v(OP_MOV, TYPE_F32, dst ? dst : getScratch(), mkImm(f)); in loadImm() 455 sym->reg.type = TYPE_F32; in mkSysVal()
|
D | nv50_ir_emit_gv100.cpp | 840 case TYPE_F32 : dType = 3; break; in emitATOM() 1093 case TYPE_F32: dType = 3; break; in emitRED() 1413 case TYPE_F32: type = 3; break; in emitSUATOM() 1692 if (insn->dType == TYPE_F32) in emitInstruction() 1792 if (insn->dType == TYPE_F32) in emitInstruction() 1827 if (insn->dType == TYPE_F32) { in emitInstruction() 1846 if (insn->dType == TYPE_F32) in emitInstruction() 1885 if (insn->dType == TYPE_F32) { in emitInstruction()
|
D | nv50_ir_from_nir.cpp | 1367 fragCoord[3] = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), sv); in visit() 1368 fp.position = mkOp1v(OP_RCP, TYPE_F32, fragCoord[3], fragCoord[3]); in visit() 1653 src = mkOp1v(OP_SAT, TYPE_F32, getScratch(), src); in visit() 1687 Value *x = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 0)); in visit() 1688 Value *y = mkOp1v(OP_RDSV, TYPE_F32, getSSA(), mkSysVal(SV_POSITION, 1)); in visit() 1689 mkCvt(OP_CVT, TYPE_U32, srcs[0], TYPE_F32, x)->rnd = ROUND_Z; in visit() 1690 mkCvt(OP_CVT, TYPE_U32, srcs[1], TYPE_F32, y)->rnd = ROUND_Z; in visit() 1798 mkOp2(OP_MIN, TYPE_F32, offs[c], getSrc(&insn->src[0], c), loadImm(NULL, 0.4375f)); in visit() 1799 mkOp2(OP_MAX, TYPE_F32, offs[c], offs[c], loadImm(NULL, -0.5f)); in visit() 1800 mkOp2(OP_MUL, TYPE_F32, offs[c], offs[c], loadImm(NULL, 4096.0f)); in visit() [all …]
|
D | nv50_ir_emit_nvc0.cpp | 216 if (ty == TYPE_F32) in isLIMM() 544 if (isLIMM(i->src(1), TYPE_F32)) { in emitFMAD() 601 if (isLIMM(i->src(1), TYPE_F32)) { in emitFMUL() 671 if (isLIMM(i->src(1), TYPE_F32)) { in emitFADD() 1203 if (i->sType == TYPE_F32) in emitSET() 1237 case TYPE_F32: in emitSLCT() 1807 case TYPE_F32: in emitLoadStoreType() 2147 if (i->dType == TYPE_F32) { in emitATOM() 2988 if (i->dType == TYPE_F32) { in getMinEncodingSize()
|
D | nv50_ir_emit_gm107.cpp | 347 if (insn->sType == TYPE_F32 || insn->sType == TYPE_F16) { in emitIMMD() 1217 emitField(0x34, 1, insn->dType == TYPE_F32); in emitDSET() 1605 emitField(0x34, 1, insn->dType == TYPE_F32); in emitFSET() 2104 emitField(0x2c, 1, insn->dType == TYPE_F32); in emitISET() 2651 case TYPE_F32: dType = 3; break; in emitATOM() 2720 case TYPE_F32: dType = 3; break; in emitRED() 3362 case TYPE_F32: type = 3; break; in emitSUREDx()
|
/external/mesa3d/src/freedreno/ir3/ |
D | ir3_cf.c | 48 if (instr->cat1.src_type == TYPE_F32 && in is_fp16_conv() 53 instr->cat1.dst_type == TYPE_F32) in is_fp16_conv()
|
D | ir3_image.c | 167 return bit_size == 16 ? TYPE_F16 : TYPE_F32; in ir3_get_type_for_image_intrinsic()
|
D | instr-a3xx.h | 272 TYPE_F32 = 1, enumerator 284 case TYPE_F32: in type_size() 303 return (type == TYPE_F32) || (type == TYPE_F16); in type_float()
|
D | ir3_compiler_nir.c | 131 src_type = TYPE_F32; in create_cov() 198 dst_type = TYPE_F32; in create_cov() 374 create_immed_typed(b, 0, bs[0] == 16 ? TYPE_F16 : TYPE_F32), 0); in emit_alu() 459 dst[0]->cat5.type = TYPE_F32; in emit_alu() 463 dst[0]->cat5.type = TYPE_F32; in emit_alu() 468 dst[0]->cat5.type = TYPE_F32; in emit_alu() 473 dst[0]->cat5.type = TYPE_F32; in emit_alu() 1462 xyzw[i] = ir3_COV(b, xyzw[i], TYPE_U32, TYPE_F32); in get_frag_coord() 1505 nir_dest_bit_size(intr->dest) == 16 ? TYPE_F16 : TYPE_F32); in emit_intrinsic() 1511 nir_dest_bit_size(intr->dest) == 16 ? TYPE_F16 : TYPE_F32, in emit_intrinsic() [all …]
|
D | ir3.h | 978 case TYPE_F32: return TYPE_F16; in half_type() 994 case TYPE_F16: return TYPE_F32; in full_type() 997 case TYPE_F32: in full_type() 1411 return create_uniform_typed(block, n, TYPE_F32); in create_uniform()
|
D | ir3_print.c | 55 [TYPE_F32] = "f32", in type_name()
|