/external/llvm-project/llvm/test/Transforms/InstSimplify/ |
D | div-by-0-guard-before-umul_ov.ll | 8 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE:%.*]], i… 9 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1 21 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE:%.*]], i… 22 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1 35 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE0:%.*]], … 36 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1 50 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[… 51 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1 65 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[… 66 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1 [all …]
|
D | div-by-0-guard-before-umul_ov-not.ll | 8 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE:%.*]], i… 9 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1 23 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE:%.*]], i… 24 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1 39 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE0:%.*]], … 40 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1 56 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[… 57 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1 73 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[… 74 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1 [all …]
|
/external/llvm-project/llvm/test/Transforms/InstCombine/ |
D | unsigned-mul-overflow-check-via-mul-udiv.ll | 11 ; CHECK-NEXT: [[UMUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 [[Y:%.… 12 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i8, i1 } [[UMUL]], 1 23 ; CHECK-NEXT: [[UMUL:%.*]] = call { <2 x i8>, <2 x i1> } @llvm.umul.with.overflow.v2i8(<2 x i8> … 24 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { <2 x i8>, <2 x i1> } [[UMUL]], 1 38 ; CHECK-NEXT: [[UMUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 [[Y]]) 39 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i8, i1 } [[UMUL]], 1 52 ; CHECK-NEXT: [[UMUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 [[Y]]) 53 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i8, i1 } [[UMUL]], 1 66 ; CHECK-NEXT: [[UMUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 [[Y]]) 67 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i8, i1 } [[UMUL]], 1 [all …]
|
D | unsigned-mul-lack-of-overflow-check-via-mul-udiv.ll | 11 ; CHECK-NEXT: [[UMUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 [[Y:%.… 12 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i8, i1 } [[UMUL]], 1 24 ; CHECK-NEXT: [[UMUL:%.*]] = call { <2 x i8>, <2 x i1> } @llvm.umul.with.overflow.v2i8(<2 x i8> … 25 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { <2 x i8>, <2 x i1> } [[UMUL]], 1 40 ; CHECK-NEXT: [[UMUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 [[Y]]) 41 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i8, i1 } [[UMUL]], 1 55 ; CHECK-NEXT: [[UMUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 [[Y]]) 56 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i8, i1 } [[UMUL]], 1 70 ; CHECK-NEXT: [[UMUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 [[Y]]) 71 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i8, i1 } [[UMUL]], 1 [all …]
|
D | unsigned-mul-overflow-check-via-udiv-of-allones.ll | 11 ; CHECK-NEXT: [[UMUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 [[Y:%.… 12 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i8, i1 } [[UMUL]], 1 22 ; CHECK-NEXT: [[UMUL:%.*]] = call { <2 x i8>, <2 x i1> } @llvm.umul.with.overflow.v2i8(<2 x i8> … 23 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { <2 x i8>, <2 x i1> } [[UMUL]], 1 33 ; CHECK-NEXT: [[UMUL:%.*]] = call { <3 x i8>, <3 x i1> } @llvm.umul.with.overflow.v3i8(<3 x i8> … 34 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { <3 x i8>, <3 x i1> } [[UMUL]], 1 47 ; CHECK-NEXT: [[UMUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 [[Y]]) 48 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i8, i1 } [[UMUL]], 1
|
D | unsigned-mul-lack-of-overflow-check-via-udiv-of-allones.ll | 11 ; CHECK-NEXT: [[UMUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 [[Y:%.… 12 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i8, i1 } [[UMUL]], 1 23 ; CHECK-NEXT: [[UMUL:%.*]] = call { <2 x i8>, <2 x i1> } @llvm.umul.with.overflow.v2i8(<2 x i8> … 24 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { <2 x i8>, <2 x i1> } [[UMUL]], 1 35 ; CHECK-NEXT: [[UMUL:%.*]] = call { <3 x i8>, <3 x i1> } @llvm.umul.with.overflow.v3i8(<3 x i8> … 36 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { <3 x i8>, <3 x i1> } [[UMUL]], 1 50 ; CHECK-NEXT: [[UMUL:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 [[X:%.*]], i8 [[Y]]) 51 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i8, i1 } [[UMUL]], 1
|
D | icmp-mul-zext.ll | 91 ; CHECK-NEXT: [[UMUL:%.*]] = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 [[A:%.*]], i16 [[… 92 ; CHECK-NEXT: [[UMUL_VALUE:%.*]] = extractvalue { i16, i1 } [[UMUL]], 0 93 ; CHECK-NEXT: [[DID_OVF:%.*]] = extractvalue { i16, i1 } [[UMUL]], 1
|
/external/llvm-project/llvm/test/Transforms/PhaseOrdering/ |
D | unsigned-multiply-overflow-check.ll | 38 ; INSTCOMBINEONLY-NEXT: [[UMUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[ARG]]… 39 ; INSTCOMBINEONLY-NEXT: [[UMUL_OV:%.*]] = extractvalue { i64, i1 } [[UMUL]], 1 48 ; INSTCOMBINESIMPLIFYCFGONLY-NEXT: [[UMUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(… 49 ; INSTCOMBINESIMPLIFYCFGONLY-NEXT: [[UMUL_OV:%.*]] = extractvalue { i64, i1 } [[UMUL]], 1 55 ; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT: [[UMUL:%.*]] = call { i64, i1 } @llvm.umul.with.overfl… 56 ; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT: [[UMUL_OV:%.*]] = extractvalue { i64, i1 } [[UMUL]], 1 94 ; INSTCOMBINEONLY-NEXT: [[UMUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[ARG]]… 95 ; INSTCOMBINEONLY-NEXT: [[UMUL_OV:%.*]] = extractvalue { i64, i1 } [[UMUL]], 1 105 ; INSTCOMBINESIMPLIFYCFGONLY-NEXT: [[UMUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(… 106 ; INSTCOMBINESIMPLIFYCFGONLY-NEXT: [[UMUL_OV:%.*]] = extractvalue { i64, i1 } [[UMUL]], 1 [all …]
|
/external/llvm-project/llvm/test/Transforms/SimplifyCFG/ |
D | unsigned-multiplication-will-overflow.ll | 14 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[SIZE]], i6… 15 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i64, i1 } [[UMUL]], 1
|
/external/mesa3d/src/gallium/tests/graw/vertex-shader/ |
D | vert-imul_hi.sh | 11 UMUL TEMP[0], TEMP[0], IMM[0].wwww
|
/external/llvm-project/llvm/test/Transforms/GVN/ |
D | commute.ll | 56 ; CHECK-NEXT: [[UMUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[X:%.*]], i32 [[… 57 ; CHECK-NEXT: ret { i32, i1 } [[UMUL]]
|
/external/virglrenderer/src/gallium/auxiliary/tgsi/ |
D | tgsi_opcode_tmp.h | 164 OP12(UMUL)
|
/external/mesa3d/src/gallium/auxiliary/tgsi/ |
D | tgsi_opcode_tmp.h | 155 OP12(UMUL)
|
D | tgsi_info_opcodes.h | 136 OPCODE(1, 2, COMP, UMUL)
|
/external/llvm/lib/Target/AMDGPU/ |
D | AMDGPUISelLowering.h | 223 UMUL, // 32bit unsigned multiplication enumerator
|
/external/llvm-project/polly/lib/External/isl/imath/ |
D | imath.c | 182 #define UMUL(X, Y, Z) \ macro 2719 UMUL(q1, mu, q2); in s_reduce() 2729 UMUL(q2, m, q1); in s_reduce() 2774 UMUL(c, a, TEMP(0)); in s_embar() 2798 UMUL(c, a, TEMP(0)); in s_embar()
|
/external/mesa3d/docs/relnotes/ |
D | 10.3.3.rst | 72 - freedreno/ir3: implement UMUL correctly
|
/external/llvm-project/llvm/lib/Target/AMDGPU/ |
D | AMDGPUISelLowering.h | 349 UMUL, // 32bit unsigned multiplication enumerator
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | AMDGPUISelLowering.h | 336 UMUL, // 32bit unsigned multiplication enumerator
|
/external/llvm/lib/Target/X86/ |
D | X86ISelLowering.h | 349 UMUL, enumerator
|
D | X86FastISel.cpp | 2742 BaseOpc = X86ISD::UMUL; CondOpc = X86::SETOr; break; in fastLowerIntrinsicCall() 2782 if (BaseOpc == X86ISD::UMUL && !ResultReg) { in fastLowerIntrinsicCall()
|
/external/pcre/dist2/src/sljit/ |
D | sljitNativeSPARC_common.c | 194 #define UMUL (OPC1(0x2) | OPC3(0x0a)) macro 852 …FAIL_IF(push_inst(compiler, (op == SLJIT_LMUL_UW ? UMUL : SMUL) | D(SLJIT_R0) | S1(SLJIT_R0) | S2(… in sljit_emit_op0()
|
/external/llvm-project/llvm/lib/Target/X86/ |
D | X86ISelLowering.h | 398 UMUL, enumerator
|
D | X86FastISel.cpp | 2922 BaseOpc = X86ISD::UMUL; CondCode = X86::COND_O; break; in fastLowerIntrinsicCall() 2964 if (BaseOpc == X86ISD::UMUL && !ResultReg) { in fastLowerIntrinsicCall()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86ISelLowering.h | 341 ADD, SUB, ADC, SBB, SMUL, UMUL, enumerator
|