/external/mesa3d/src/gallium/drivers/nouveau/codegen/ |
D | nv50_ir_lowering_nvc0.cpp | 70 case TYPE_U32: builtin = NVC0_BUILTIN_DIV_U32; break; in handleDIV() 241 DataType type = isSignedIntType(lo->dType) ? TYPE_S32 : TYPE_U32; in handleShift() 245 bld.mkOp2(OP_ADD, TYPE_U32, (x32_minus_shift = bld.getSSA()), shift, bld.mkImm(0x20)) in handleShift() 248 TYPE_U32, shift, bld.mkImm(32)); in handleShift() 250 bld.mkOp2(OP_OR, TYPE_U32, (hi1 = bld.getSSA()), in handleShift() 251 bld.mkOp2v(op, TYPE_U32, bld.getSSA(), src[1], shift), in handleShift() 252 bld.mkOp2v(antiop, TYPE_U32, bld.getSSA(), src[0], x32_minus_shift)) in handleShift() 260 bld.mkOp2(OP_UNION, TYPE_U32, (dst[1] = bld.getSSA()), hi1, hi2); in handleShift() 268 Instruction *hi = new_Instruction(func, op, TYPE_U32); in handleShift() 272 lo->dType = TYPE_U32; in handleShift() [all …]
|
D | nv50_ir_lowering_gv100.cpp | 39 bld.mkOp3(OP_SELP, TYPE_U32, i->getDef(0), i->getSrc(0), i->getSrc(1), pred); in handleCMP() 61 bld.mkOp2(OP_ADD, TYPE_U32, def[0], src[0][0], src[1][0])-> in handleIADD64() 63 bld.mkOp2(OP_ADD, TYPE_U32, def[1], src[0][1], src[1][1])-> in handleIADD64() 138 bld.mkOp3(OP_LOP3_LUT, TYPE_U32, i->getDef(0), i->getSrc(0), i->getSrc(1), in handleLOP2() 146 bld.mkOp3(OP_LOP3_LUT, TYPE_U32, i->getDef(0), bld.mkImm(0), i->getSrc(0), in handleNOT() 193 i = bld.mkOp3(OP_SELP, TYPE_U32, i->getDef(0), bld.mkImm(0), met, pred); in handleSET() 330 bld.mkCmp(OP_SET, (i->op == OP_MIN) ? CC_LT : CC_GT, TYPE_U32, pred, in handleDMNMX() 335 bld.mkOp3(OP_SELP, TYPE_U32, dest[0], src0[0], src1[0], pred); in handleDMNMX() 336 bld.mkOp3(OP_SELP, TYPE_U32, dest[1], src0[1], src1[1], pred); in handleDMNMX() 349 bld.mkOp3(OP_PERMT, TYPE_U32, bit, i->getSrc(1), bld.mkImm(0x4440), zero); in handleEXTBF() [all …]
|
D | nv50_ir_lowering_nv50.cpp | 52 case TYPE_S32: fTy = TYPE_U32; break; in expandIntegerMUL() 59 case TYPE_U32: hTy = TYPE_U16; break; in expandIntegerMUL() 60 case TYPE_U64: hTy = TYPE_U32; break; in expandIntegerMUL() 125 bld->mkOp2(OP_UNION, TYPE_U32, r[2], r[1], r[3]); in expandIntegerMUL() 448 arl = bld.mkOp2(OP_SHL, TYPE_U32, i->getDef(0), bld.getSSA(), bld.mkImm(0)); in handleAddrDef() 489 if (ty != TYPE_U32 && ty != TYPE_S32) in handleDIV() 515 bf = bld.mkOp2v(OP_ADD, TYPE_U32, bld.getSSA(), bf, bld.mkImm(-2)); in handleDIV() 522 bld.mkOp2(OP_MUL, TYPE_U32, (t = bld.getSSA()), q0, b)); in handleDIV() 523 bld.mkOp2(OP_SUB, TYPE_U32, (aRf = bld.getSSA()), a, t); in handleDIV() 525 bld.mkCvt(OP_CVT, TYPE_F32, (aR = bld.getSSA()), TYPE_U32, aRf); in handleDIV() [all …]
|
D | nv50_ir_lowering_gm107.cpp | 58 bld.mkOp2(OP_ADD , TYPE_U32, src0, i->getSrc(0), i->getSrc(1)); in handlePFETCH() 60 bld.mkOp1(OP_MOV , TYPE_U32, src0, i->getSrc(0)); in handlePFETCH() 195 Instruction *u = bld.mkOp(OP_UNION, TYPE_U32, i->getDef(c)); in handleManualTXD() 241 bld.mkOp1(OP_RDSV, TYPE_U32, tmp0, bld.mkSysVal(SV_INVOCATION_INFO, 0)); in handlePFETCH() 242 bld.mkOp3(OP_PERMT, TYPE_U32, tmp1, tmp0, bld.mkImm(0x4442), bld.mkImm(0)); in handlePFETCH() 243 bld.mkOp3(OP_PERMT, TYPE_U32, tmp0, tmp0, bld.mkImm(0x4440), bld.mkImm(0)); in handlePFETCH() 245 bld.mkOp2(OP_ADD , TYPE_U32, tmp2, i->getSrc(0), i->getSrc(1)); in handlePFETCH() 247 bld.mkOp1(OP_MOV , TYPE_U32, tmp2, i->getSrc(0)); in handlePFETCH() 248 bld.mkOp3(OP_MAD , TYPE_U32, tmp0, tmp0, tmp1, tmp2); in handlePFETCH() 292 bld.mkOp2(OP_DIV, TYPE_U32, suq->getDef(d), suq->getDef(d), in handleSUQ() [all …]
|
D | nv50_ir_from_nir.cpp | 839 indirect = mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), indirect, loadImm(NULL, 4)); in getIndirect() 1230 mkLoad(TYPE_U32, lo, in loadFrom() 1231 mkSymbol(file, i, TYPE_U32, base + c * tySize), in loadFrom() 1237 mkLoad(TYPE_U32, hi, in loadFrom() 1238 mkSymbol(file, i, TYPE_U32, base + c * tySize + 4), in loadFrom() 1270 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address), indirect0, in storeTo() 1272 mkStore(op, TYPE_U32, mkSymbol(file, 0, TYPE_U32, address + 4), indirect0, in storeTo() 1361 OP_SUB, TYPE_U32, getSSA(), in visit() 1362 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_LANEID, 0)), in visit() 1363 mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_INVOCATION_ID, 0))); in visit() [all …]
|
D | nv50_ir_from_tgsi.cpp | 555 return nv50_ir::TYPE_U32; in inferSrcType() 640 case TGSI_OPCODE_F2U: return nv50_ir::TYPE_U32; in inferDstType() 658 return nv50_ir::TYPE_U32; in inferDstType() 1858 vtxBase[s] = mkOp2v(OP_PFETCH, TYPE_U32, getSSA(4, FILE_ADDRESS), in getVertexBase() 1871 offset = mkOp2v(OP_ADD, TYPE_U32, getSSA(), in getOutputBase() 1875 vtxBase[s] = mkOp2v(OP_ADD, TYPE_U32, getSSA(), outBase, offset); in getOutputBase() 1980 return mkOp2v(OP_SHL, TYPE_U32, getSSA(4, FILE_ADDRESS), index, mkImm(4)); in shiftAddress() 2023 return mkLoadv(TYPE_U32, srcToSym(src, c), shiftAddress(ptr)); in fetchSrc() 2033 return mkOp1v(OP_RDSV, TYPE_U32, getSSA(), mkSysVal(SV_PRIMITIVE_ID, 0)); in fetchSrc() 2040 return mkLoadv(TYPE_U32, srcToSym(src, c), ptr); in fetchSrc() [all …]
|
D | nv50_ir_inlines.h | 65 case TYPE_U32: in typeSizeof() 89 case TYPE_U32: in typeSizeofLog2() 116 return flt ? TYPE_F32 : (sgn ? TYPE_S32 : TYPE_U32); 138 case TYPE_U32: in isSignedType() 152 case TYPE_U32: return TYPE_S32; in intTypeToSigned()
|
D | nv50_ir_peephole.cpp | 351 OP_SHL, TYPE_U32, bld.getSSA(), insn->getSrc(0), insn->getSrc(1))); in visit() 503 case TYPE_U32: in applyTo() 594 case TYPE_U32: in expr() 611 case TYPE_U32: res.data.u32 = a->data.u32 / b->data.u32; break; in expr() 621 case TYPE_U32: res.data.u32 = a->data.u32 + b->data.u32; break; in expr() 631 case TYPE_U32: res.data.u32 = a->data.u32 - b->data.u32; break; in expr() 649 case TYPE_U32: res.data.u32 = MAX2(a->data.u32, b->data.u32); break; in expr() 659 case TYPE_U32: res.data.u32 = MIN2(a->data.u32, b->data.u32); break; in expr() 679 case TYPE_U32: res.data.u32 = a->data.u32 >> b->data.u32; break; in expr() 708 case TYPE_U32: res.data.u32 = (res.data.u32 << lshift) >> rshift; break; in expr() [all …]
|
D | nv50_ir_emit_nv50.cpp | 587 case TYPE_U32: enc = 0x6; break; in emitLoadStoreSizeLG() 613 case TYPE_U32: code[1] |= 0xc000; break; in emitLoadStoreSizeCS() 955 case TYPE_U32: code[1] |= 0x84000000; break; in emitMINMAX() 1245 case TYPE_U32: code[1] = 0x04000000; break; in emitISAD() 1256 case TYPE_U32: code[0] = 0x50008000; break; in emitISAD() 1303 case TYPE_U32: code[1] |= 0x04000000; break; in emitSET() 1358 if (i->op == OP_NEG && i->dType == TYPE_U32) in emitCVT() 1373 case TYPE_U32: code[1] = 0x44400000; break; in emitCVT() 1404 case TYPE_U32: code[1] = 0x44004000; break; in emitCVT() 1417 case TYPE_U32: code[1] = 0x0c004000; break; in emitCVT() [all …]
|
D | nv50_ir_build_util.cpp | 157 ty = TYPE_U32; in mkInterp() 416 return mkOp1v(OP_MOV, TYPE_U32, dst ? dst : getScratch(), mkImm(u)); in loadImm() 458 sym->reg.type = TYPE_U32; in mkSysVal() 568 case TYPE_U64: hTy = TYPE_U32; break; in split64BitOpPostRA() 572 hTy = TYPE_U32; in split64BitOpPostRA()
|
D | nv50_ir_target_nvc0.cpp | 411 case TYPE_U32: in insnCanLoad() 470 if (op == OP_SAD && ty != TYPE_S32 && ty != TYPE_U32) in isOpSupported() 542 if (insn->dType == TYPE_U32) in isSatSupported() 642 if (i->dType == TYPE_U32 || i->dType == TYPE_S32) { in getThroughput()
|
D | nv50_ir_lowering_helper.cpp | 94 (dTy == TYPE_U32 && sTy == TYPE_U64)) { in handleCVT() 104 } else if (dTy == TYPE_U64 && sTy == TYPE_U32) { in handleCVT()
|
D | nv50_ir_emit_gv100.cpp | 788 case TYPE_U32: in emitSHF() 837 case TYPE_U32 : dType = 0; break; in emitATOM() 853 case TYPE_U32: dType = 0; break; in emitATOM() 879 case TYPE_U32: dType = 0; break; in emitATOMS() 899 case TYPE_U32: dType = 0; break; in emitATOMS() 1090 case TYPE_U32: dType = 0; break; in emitRED() 1416 assert(insn->dType == TYPE_U32); in emitSUATOM() 1456 case TYPE_U32: type = 4; break; in emitSULD()
|
D | nv50_ir.cpp | 327 reg.type = TYPE_U32; in ImmediateValue() 396 case TYPE_U32: in isInteger() 417 case TYPE_U32: return reg.data.s32 < 0; in isNegative() 445 case TYPE_U32: in applyLog2() 914 sType = TYPE_U32; in TexInstruction()
|
D | nv50_ir_emit_nvc0.cpp | 648 if (isLIMM(i->src(1), TYPE_U32)) { in emitUMUL() 736 if (isLIMM(i->src(1), TYPE_U32)) { in emitUADD() 847 assert(i->dType == TYPE_S32 || i->dType == TYPE_U32); in emitISAD() 896 if (isLIMM(i->src(1), TYPE_U32)) { in emitLogicOp() 1100 if (i->op == OP_NEG && i->dType == TYPE_U32) in emitCVT() 1234 case TYPE_U32: in emitSLCT() 1808 case TYPE_U32: in emitLoadStoreType() 2120 if (i->dType == TYPE_U32) { in emitATOM() 2312 assert(ty == TYPE_U32); in emitSUGType()
|
D | nv50_ir_emit_gk110.cpp | 774 assert(i->dType == TYPE_S32 || i->dType == TYPE_U32); in emitISAD() 1013 case TYPE_U32: in emitMINMAX() 1078 if (i->op == OP_NEG && i->dType == TYPE_U32) in emitCVT() 1684 assert(ty == TYPE_U32); in emitSUGType() 2118 case TYPE_U32: in emitLoadStoreType() 2403 case TYPE_U32: break; in emitATOM()
|
/external/mesa3d/src/freedreno/ir3/ |
D | ir3_a6xx.c | 55 ldib->cat6.type = intr->dest.ssa.bit_size == 16 ? TYPE_U16 : TYPE_U32; in emit_intrinsic_load_ssbo() 82 stib->cat6.type = intr->src[0].ssa->bit_size == 16 ? TYPE_U16 : TYPE_U32; in emit_intrinsic_store_ssbo() 112 type_t type = TYPE_U32; in emit_intrinsic_atomic_ssbo() 352 resinfo->cat6.type = TYPE_U32; in emit_intrinsic_image_size() 394 mov->cat1.src_type = TYPE_U32; in get_atomic_dest_mov() 395 mov->cat1.dst_type = TYPE_U32; in get_atomic_dest_mov()
|
D | ir3_context.c | 250 ctx->last_dst[i] = ir3_MOV(ctx->block, ctx->last_dst[i], TYPE_U32); in ir3_put_dst() 341 type_t type = (flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32; in ir3_create_collect() 430 instr = ir3_COV(block, src, TYPE_U32, TYPE_S16); in create_addr0() 593 mov->cat1.src_type = TYPE_U32; in ir3_create_array_load() 594 mov->cat1.dst_type = TYPE_U32; in ir3_create_array_load() 654 mov->cat1.src_type = TYPE_U32; in ir3_create_array_store() 655 mov->cat1.dst_type = TYPE_U32; in ir3_create_array_store()
|
D | ir3_compiler_nir.c | 79 instr->cat6.type = TYPE_U32; in create_frag_input() 168 src_type = TYPE_U32; in create_cov() 186 src_type = TYPE_U32; in create_cov() 230 dst_type = TYPE_U32; in create_cov() 268 TYPE_U16 : TYPE_U32; in emit_alu() 638 cond = ir3_COV(b, cond, TYPE_U32, TYPE_U16); in emit_alu() 656 TYPE_U32, TYPE_U16); in emit_alu() 657 lo = ir3_COV(b, src[0], TYPE_U32, TYPE_U16); in emit_alu() 670 dst[0] = ir3_COV(b, dst[0], TYPE_U16, TYPE_U32); in emit_alu() 739 ldc->cat6.type = TYPE_U32; in emit_intrinsic_load_ubo_ldc() [all …]
|
D | ir3_a4xx.c | 62 ldgb->cat6.type = TYPE_U32; in emit_intrinsic_load_ssbo() 98 stgb->cat6.type = TYPE_U32; in emit_intrinsic_store_ssbo() 128 type_t type = TYPE_U32; in emit_intrinsic_atomic_ssbo()
|
D | ir3_image.c | 163 return bit_size == 16 ? TYPE_U16 : TYPE_U32; in ir3_get_type_for_image_intrinsic()
|
D | ir3_group.c | 38 (collect->regs[idx+1]->flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32); in insert_mov()
|
D | ir3_context.h | 212 case 32: return TYPE_U32; in utype_for_size()
|
D | instr-a3xx.h | 274 TYPE_U32 = 3, enumerator 285 case TYPE_U32: in type_size() 308 return (type == TYPE_U32) || (type == TYPE_U16) || (type == TYPE_U8); in type_uint()
|
D | ir3.h | 979 case TYPE_U32: return TYPE_U16; in half_type() 995 case TYPE_U16: return TYPE_U32; in full_type() 998 case TYPE_U32: in full_type() 1390 return create_immed_typed(block, val, TYPE_U32); in create_immed()
|