/external/llvm-project/llvm/test/Transforms/InstSimplify/ |
D | div-by-0-guard-before-umul_ov.ll | 4 declare { i4, i1 } @llvm.umul.with.overflow.i4(i4, i4) #1 8 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE:%.*]], i… 13 %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb) 14 %umul.ov = extractvalue { i4, i1 } %umul, 1 15 %and = and i1 %umul.ov, %cmp 21 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE:%.*]], i… 26 %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb) 27 %umul.ov = extractvalue { i4, i1 } %umul, 1 28 %and = and i1 %cmp, %umul.ov ; swapped 35 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE0:%.*]], … [all …]
|
D | div-by-0-guard-before-umul_ov-not.ll | 4 declare { i4, i1 } @llvm.umul.with.overflow.i4(i4, i4) #1 8 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE:%.*]], i… 14 %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb) 15 %umul.ov = extractvalue { i4, i1 } %umul, 1 16 %phitmp = xor i1 %umul.ov, true 23 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE:%.*]], i… 29 %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb) 30 %umul.ov = extractvalue { i4, i1 } %umul, 1 31 %phitmp = xor i1 %umul.ov, true 39 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE0:%.*]], … [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | umulo-32.ll | 3 %umul.ty = type { i32, i1 } 8 %tmp0 = tail call %umul.ty @llvm.umul.with.overflow.i32(i32 %a, i32 37) 9 %tmp1 = extractvalue %umul.ty %tmp0, 0 14 declare %umul.ty @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone 32 %5 = call %umul.ty @llvm.umul.with.overflow.i32(i32 %4, i32 8) 33 %6 = extractvalue %umul.ty %5, 1 34 %7 = extractvalue %umul.ty %5, 0
|
/external/llvm-project/llvm/test/Transforms/SimplifyCFG/ |
D | unsigned-multiplication-will-overflow.ll | 8 ; produced llvm.umul.with.overflow. 14 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[SIZE]], i6… 25 %umul = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %size, i64 %nmemb) 26 %umul.ov = extractvalue { i64, i1 } %umul, 1 27 %umul.not.ov = xor i1 %umul.ov, true 31 %0 = phi i1 [ true, %entry ], [ %umul.not.ov, %land.rhs ] 36 declare { i64, i1 } @llvm.umul.with.overflow.i64(i64, i64) #0
|
/external/llvm-project/llvm/test/Analysis/CostModel/X86/ |
D | arith-fix.ll | 248 declare i64 @llvm.umul.fix.i64(i64, i64, i32) 249 declare <2 x i64> @llvm.umul.fix.v2i64(<2 x i64>, <2 x i64>, i32) 250 declare <4 x i64> @llvm.umul.fix.v4i64(<4 x i64>, <4 x i64>, i32) 251 declare <8 x i64> @llvm.umul.fix.v8i64(<8 x i64>, <8 x i64>, i32) 253 declare i32 @llvm.umul.fix.i32(i32, i32, i32) 254 declare <4 x i32> @llvm.umul.fix.v4i32(<4 x i32>, <4 x i32>, i32) 255 declare <8 x i32> @llvm.umul.fix.v8i32(<8 x i32>, <8 x i32>, i32) 256 declare <16 x i32> @llvm.umul.fix.v16i32(<16 x i32>, <16 x i32>, i32) 258 declare i16 @llvm.umul.fix.i16(i16, i16, i32) 259 declare <8 x i16> @llvm.umul.fix.v8i16(<8 x i16>, <8 x i16>, i32) [all …]
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | umulo-32.ll | 4 %umul.ty = type { i32, i1 } 22 %tmp0 = tail call %umul.ty @llvm.umul.with.overflow.i32(i32 %a, i32 37) 23 %tmp1 = extractvalue %umul.ty %tmp0, 0 28 declare %umul.ty @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone 55 %res = call %umul.ty @llvm.umul.with.overflow.i32(i32 %val, i32 8) 56 %ov = extractvalue %umul.ty %res, 1 57 %mul = extractvalue %umul.ty %res, 0
|
/external/llvm/test/CodeGen/Generic/ |
D | overflow.ll | 186 ;; umul 190 %umul = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %a, i8 %b) 191 %cmp = extractvalue { i8, i1 } %umul, 1 192 %umul.result = extractvalue { i8, i1 } %umul, 0 193 %X = select i1 %cmp, i8 %umul.result, i8 42 197 declare { i8, i1 } @llvm.umul.with.overflow.i8(i8, i8) nounwind readnone 201 %umul = tail call { i16, i1 } @llvm.umul.with.overflow.i16(i16 %a, i16 %b) 202 %cmp = extractvalue { i16, i1 } %umul, 1 203 %umul.result = extractvalue { i16, i1 } %umul, 0 204 %X = select i1 %cmp, i16 %umul.result, i16 42 [all …]
|
/external/llvm-project/llvm/test/CodeGen/Generic/ |
D | overflow.ll | 186 ;; umul 190 %umul = tail call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %a, i8 %b) 191 %cmp = extractvalue { i8, i1 } %umul, 1 192 %umul.result = extractvalue { i8, i1 } %umul, 0 193 %X = select i1 %cmp, i8 %umul.result, i8 42 197 declare { i8, i1 } @llvm.umul.with.overflow.i8(i8, i8) nounwind readnone 201 %umul = tail call { i16, i1 } @llvm.umul.with.overflow.i16(i16 %a, i16 %b) 202 %cmp = extractvalue { i16, i1 } %umul, 1 203 %umul.result = extractvalue { i16, i1 } %umul, 0 204 %X = select i1 %cmp, i16 %umul.result, i16 42 [all …]
|
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/X86/ |
D | arith-mul-umulo.ll | 23 declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64) 24 declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32) 25 declare {i16, i1} @llvm.umul.with.overflow.i16(i16, i16) 26 declare {i8 , i1} @llvm.umul.with.overflow.i8 (i8 , i8 ) 46 ; CHECK-NEXT: [[C0:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A0]], i64 [[B0]]) 47 ; CHECK-NEXT: [[C1:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A1]], i64 [[B1]]) 48 ; CHECK-NEXT: [[C2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A2]], i64 [[B2]]) 49 ; CHECK-NEXT: [[C3:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A3]], i64 [[B3]]) 50 ; CHECK-NEXT: [[C4:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A4]], i64 [[B4]]) 51 ; CHECK-NEXT: [[C5:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A5]], i64 [[B5]]) [all …]
|
D | arith-fix.ll | 952 declare i64 @llvm.umul.fix.i64(i64, i64, i32) 953 declare i32 @llvm.umul.fix.i32(i32, i32, i32) 954 declare i16 @llvm.umul.fix.i16(i16, i16, i32) 955 declare i8 @llvm.umul.fix.i8 (i8 , i8 , i32) 967 ; SSE-NEXT: [[TMP9:%.*]] = call <2 x i64> @llvm.umul.fix.v2i64(<2 x i64> [[TMP1]], <2 x i64> [[T… 968 ; SSE-NEXT: [[TMP10:%.*]] = call <2 x i64> @llvm.umul.fix.v2i64(<2 x i64> [[TMP2]], <2 x i64> [[… 969 ; SSE-NEXT: [[TMP11:%.*]] = call <2 x i64> @llvm.umul.fix.v2i64(<2 x i64> [[TMP3]], <2 x i64> [[… 970 ; SSE-NEXT: [[TMP12:%.*]] = call <2 x i64> @llvm.umul.fix.v2i64(<2 x i64> [[TMP4]], <2 x i64> [[… 986 ; SLM-NEXT: [[TMP9:%.*]] = call <2 x i64> @llvm.umul.fix.v2i64(<2 x i64> [[TMP1]], <2 x i64> [[T… 987 ; SLM-NEXT: [[TMP10:%.*]] = call <2 x i64> @llvm.umul.fix.v2i64(<2 x i64> [[TMP2]], <2 x i64> [[… [all …]
|
/external/llvm-project/llvm/test/CodeGen/X86/ |
D | mulfix_combine.ll | 5 declare i32 @llvm.umul.fix.i32(i32, i32, i32 immarg) 7 declare i32 @llvm.umul.fix.sat.i32(i32, i32, i32 immarg) 10 declare <4 x i32> @llvm.umul.fix.v4i32(<4 x i32>, <4 x i32>, i32 immarg) 12 declare <4 x i32> @llvm.umul.fix.sat.v4i32(<4 x i32>, <4 x i32>, i32 immarg) 37 %tmp = call i32 @llvm.umul.fix.i32(i32 undef, i32 %y, i32 2) 46 %tmp = call i32 @llvm.umul.fix.i32(i32 0, i32 %y, i32 2) 73 %tmp = call i32 @llvm.umul.fix.sat.i32(i32 undef, i32 %y, i32 2) 82 %tmp = call i32 @llvm.umul.fix.sat.i32(i32 0, i32 %y, i32 2) 109 %tmp = call <4 x i32> @llvm.umul.fix.v4i32(<4 x i32> undef, <4 x i32> %y, i32 2) 118 …%tmp = call <4 x i32> @llvm.umul.fix.v4i32(<4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> %y, i… [all …]
|
D | umul-with-overflow.ll | 5 declare {i32, i1} @llvm.umul.with.overflow.i32(i32 %a, i32 %b) 22 %res = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %x, i32 3) 43 %tmp1 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %tmp0, i32 2) 67 %tmp1 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %tmp0, i32 4) 74 declare {i300, i1} @llvm.umul.with.overflow.i300(i300 %a, i300 %b) 76 %x = call {i300, i1} @llvm.umul.with.overflow.i300(i300 %a, i300 %b)
|
/external/llvm-project/llvm/test/Transforms/GVN/ |
D | commute.ll | 46 declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32) 56 ; CHECK-NEXT: [[UMUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[X:%.*]], i32 [[… 67 %umul = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 68 ret { i32, i1 } %umul 72 declare i16 @llvm.umul.fix.i16(i16, i16, i32) 89 ; CHECK-NEXT: [[M1:%.*]] = call i16 @llvm.umul.fix.i16(i16 [[X:%.*]], i16 [[Y:%.*]], i32 2) 90 ; CHECK-NEXT: [[M2:%.*]] = call i16 @llvm.umul.fix.i16(i16 [[Y]], i16 [[X]], i32 1) 94 %m1 = call i16 @llvm.umul.fix.i16(i16 %x, i16 %y, i32 2) 95 %m2 = call i16 @llvm.umul.fix.i16(i16 %y, i16 %x, i32 1)
|
D | 2011-07-07-MatchIntrinsicExtract.ll | 32 %umul = tail call %0 @llvm.umul.with.overflow.i64(i64 %a, i64 %b) 33 %umul.0 = extractvalue %0 %umul, 0 81 declare %0 @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone
|
/external/llvm-project/llvm/test/Transforms/InstCombine/ |
D | umul-sign-check.ll | 4 ; Check that we simplify llvm.umul.with.overflow, if the overflow check is 12 declare { i64, i1 } @llvm.umul.with.overflow.i64(i64, i64) #0 24 %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b) 44 %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b) 64 %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b) 78 ; CHECK-NEXT: [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i6… 86 %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b) 98 ; CHECK-NEXT: [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i6… 108 %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b) 123 ; CHECK-NEXT: [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i6… [all …]
|
D | overflow-mul.ll | 22 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 37 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 53 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 71 ; CHECK-NOT: umul.with.overflow.i32 85 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 101 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 119 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 [[Y]]) 134 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 150 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 167 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) [all …]
|
D | commutative-intrinsics.ll | 51 ; CHECK-NEXT: [[R:%.*]] = call i5 @llvm.umul.fix.i5(i5 [[X:%.*]], i5 10, i32 3) 54 %r = call i5 @llvm.umul.fix.i5(i5 42, i5 %x, i32 3) 69 ; CHECK-NEXT: [[R:%.*]] = call <3 x i35> @llvm.umul.fix.sat.v3i35(<3 x i35> [[X:%.*]], <3 x i35>… 72 …%r = call <3 x i35> @llvm.umul.fix.sat.v3i35(<3 x i35> <i35 undef, i35 42, i35 43>, <3 x i35> %x, … 81 declare i5 @llvm.umul.fix.i5(i5, i5, i32) 83 declare <3 x i35> @llvm.umul.fix.sat.v3i35(<3 x i35>, <3 x i35>, i32)
|
/external/llvm/test/Transforms/InstCombine/ |
D | overflow-mul.ll | 11 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 26 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 42 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 60 ; CHECK-NOT: umul.with.overflow.i32 74 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 90 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 108 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 [[Y]]) 123 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 139 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 156 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | umul-with-overflow.ll | 3 declare {i32, i1} @llvm.umul.with.overflow.i32(i32 %a, i32 %b) 5 %res = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %x, i32 3) 18 %tmp1 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %tmp0, i32 2) 30 %tmp1 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %tmp0, i32 4)
|
D | xmulo.ll | 5 declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone 16 %1 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 9, i64 8) 30 %1 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 9, i64 0) 44 %1 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 9, i64 -1)
|
D | umul-with-carry.ll | 4 ; FIXME: umul-with-overflow not supported yet. 11 %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2) 26 declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32)
|
/external/llvm-project/llvm/test/Transforms/NewGVN/ |
D | 2011-07-07-MatchIntrinsicExtract.ll | 34 %umul = tail call %0 @llvm.umul.with.overflow.i64(i64 %a, i64 %b) 35 %umul.0 = extractvalue %0 %umul, 0 37 %add2 = add i64 %mul1, %umul.0 87 declare %0 @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone
|
/external/llvm-project/llvm/test/CodeGen/PowerPC/ |
D | pr45448.ll | 70 %umul = call { i128, i1 } @llvm.umul.with.overflow.i128(i128 %2, i128 %5) 71 %umul.ov = extractvalue { i128, i1 } %umul, 1 72 %value_phi102 = and i1 %1, %umul.ov 90 declare { i128, i1 } @llvm.umul.with.overflow.i128(i128, i128) #1
|
/external/llvm/test/Transforms/GVN/ |
D | 2011-07-07-MatchIntrinsicExtract.ll | 32 %umul = tail call %0 @llvm.umul.with.overflow.i64(i64 %a, i64 %b) 33 %umul.0 = extractvalue %0 %umul, 0 81 declare %0 @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | vec_umulo.ll | 4 declare {<1 x i32>, <1 x i1>} @llvm.umul.with.overflow.v1i32(<1 x i32>, <1 x i32>) 5 declare {<2 x i32>, <2 x i1>} @llvm.umul.with.overflow.v2i32(<2 x i32>, <2 x i32>) 6 declare {<3 x i32>, <3 x i1>} @llvm.umul.with.overflow.v3i32(<3 x i32>, <3 x i32>) 7 declare {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32>, <4 x i32>) 8 declare {<6 x i32>, <6 x i1>} @llvm.umul.with.overflow.v6i32(<6 x i32>, <6 x i32>) 9 declare {<8 x i32>, <8 x i1>} @llvm.umul.with.overflow.v8i32(<8 x i32>, <8 x i32>) 11 declare {<16 x i8>, <16 x i1>} @llvm.umul.with.overflow.v16i8(<16 x i8>, <16 x i8>) 12 declare {<8 x i16>, <8 x i1>} @llvm.umul.with.overflow.v8i16(<8 x i16>, <8 x i16>) 13 declare {<2 x i64>, <2 x i1>} @llvm.umul.with.overflow.v2i64(<2 x i64>, <2 x i64>) 15 declare {<4 x i24>, <4 x i1>} @llvm.umul.with.overflow.v4i24(<4 x i24>, <4 x i24>) [all …]
|