/external/llvm-project/llvm/test/Transforms/InstSimplify/ |
D | div-by-0-guard-before-smul_ov.ll | 4 declare { i4, i1 } @llvm.smul.with.overflow.i4(i4, i4) #1 8 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE:%.*]], i… 13 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb) 14 %smul.ov = extractvalue { i4, i1 } %smul, 1 15 %and = and i1 %smul.ov, %cmp 21 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE:%.*]], i… 26 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb) 27 %smul.ov = extractvalue { i4, i1 } %smul, 1 28 %and = and i1 %cmp, %smul.ov ; swapped 35 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE0:%.*]], … [all …]
|
D | div-by-0-guard-before-smul_ov-not.ll | 4 declare { i4, i1 } @llvm.smul.with.overflow.i4(i4, i4) #1 8 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE:%.*]], i… 14 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb) 15 %smul.ov = extractvalue { i4, i1 } %smul, 1 16 %phitmp = xor i1 %smul.ov, true 23 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE:%.*]], i… 29 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb) 30 %smul.ov = extractvalue { i4, i1 } %smul, 1 31 %phitmp = xor i1 %smul.ov, true 39 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE0:%.*]], … [all …]
|
/external/llvm-project/llvm/test/CodeGen/WebAssembly/ |
D | muloti4.ll | 3 ; Test that 128-bit smul.with.overflow assembles as expected. 10 %smul = tail call { i128, i1 } @llvm.smul.with.overflow.i128(i128 %a, i128 %b) 11 %cmp = extractvalue { i128, i1 } %smul, 1 12 %smul.result = extractvalue { i128, i1 } %smul, 0 13 %X = select i1 %cmp, i128 %smul.result, i128 42 19 declare { i128, i1 } @llvm.smul.with.overflow.i128(i128, i128) nounwind readnone
|
/external/llvm-project/llvm/test/Analysis/CostModel/X86/ |
D | arith-fix.ll | 14 declare i64 @llvm.smul.fix.i64(i64, i64, i32) 15 declare <2 x i64> @llvm.smul.fix.v2i64(<2 x i64>, <2 x i64>, i32) 16 declare <4 x i64> @llvm.smul.fix.v4i64(<4 x i64>, <4 x i64>, i32) 17 declare <8 x i64> @llvm.smul.fix.v8i64(<8 x i64>, <8 x i64>, i32) 19 declare i32 @llvm.smul.fix.i32(i32, i32, i32) 20 declare <4 x i32> @llvm.smul.fix.v4i32(<4 x i32>, <4 x i32>, i32) 21 declare <8 x i32> @llvm.smul.fix.v8i32(<8 x i32>, <8 x i32>, i32) 22 declare <16 x i32> @llvm.smul.fix.v16i32(<16 x i32>, <16 x i32>, i32) 24 declare i16 @llvm.smul.fix.i16(i16, i16, i32) 25 declare <8 x i16> @llvm.smul.fix.v8i16(<8 x i16>, <8 x i16>, i32) [all …]
|
/external/speex/libspeexdsp/ |
D | _kiss_fft_guts.h | 62 # define smul(a,b) ( (SAMPPROD)(a)*(b) ) macro 65 # define S_MUL(a,b) sround( smul(a,b) ) 68 do{ (m).r = sround( smul((a).r,(b).r) - smul((a).i,(b).i) ); \ 69 (m).i = sround( smul((a).r,(b).i) + smul((a).i,(b).r) ); }while(0) 72 do{ (m).r = PSHR32( smul((a).r,(b).r) - smul((a).i,(b).i),17 ); \ 73 (m).i = PSHR32( smul((a).r,(b).i) + smul((a).i,(b).r),17 ); }while(0) 76 (x) = sround( smul( x, SAMP_MAX/k ) ) 83 do{ (c).r = sround( smul( (c).r , s ) ) ;\ 84 (c).i = sround( smul( (c).i , s ) ) ; }while(0)
|
/external/llvm/test/CodeGen/Generic/ |
D | overflow.ll | 150 ;; smul 154 %smul = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %a, i8 %b) 155 %cmp = extractvalue { i8, i1 } %smul, 1 156 %smul.result = extractvalue { i8, i1 } %smul, 0 157 %X = select i1 %cmp, i8 %smul.result, i8 42 161 declare { i8, i1 } @llvm.smul.with.overflow.i8(i8, i8) nounwind readnone 165 %smul = tail call { i16, i1 } @llvm.smul.with.overflow.i16(i16 %a, i16 %b) 166 %cmp = extractvalue { i16, i1 } %smul, 1 167 %smul.result = extractvalue { i16, i1 } %smul, 0 168 %X = select i1 %cmp, i16 %smul.result, i16 42 [all …]
|
/external/llvm-project/llvm/test/CodeGen/Generic/ |
D | overflow.ll | 150 ;; smul 154 %smul = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %a, i8 %b) 155 %cmp = extractvalue { i8, i1 } %smul, 1 156 %smul.result = extractvalue { i8, i1 } %smul, 0 157 %X = select i1 %cmp, i8 %smul.result, i8 42 161 declare { i8, i1 } @llvm.smul.with.overflow.i8(i8, i8) nounwind readnone 165 %smul = tail call { i16, i1 } @llvm.smul.with.overflow.i16(i16 %a, i16 %b) 166 %cmp = extractvalue { i16, i1 } %smul, 1 167 %smul.result = extractvalue { i16, i1 } %smul, 0 168 %X = select i1 %cmp, i16 %smul.result, i16 42 [all …]
|
/external/llvm-project/llvm/test/Transforms/InstSimplify/ConstProp/ |
D | smul-fix.ll | 8 declare i32 @llvm.smul.fix.i32(i32, i32, i32) 14 %r = call i32 @llvm.smul.fix.i32(i32 1073741824, i32 1073741824, i32 31) ; 0.5 * 0.5 23 declare <8 x i3> @llvm.smul.fix.v8i3(<8 x i3>, <8 x i3>, i32) 29 %r = call <8 x i3> @llvm.smul.fix.v8i3( 40 %r = call <8 x i3> @llvm.smul.fix.v8i3( 51 %r = call <8 x i3> @llvm.smul.fix.v8i3( 62 %r = call <8 x i3> @llvm.smul.fix.v8i3( 73 %r = call <8 x i3> @llvm.smul.fix.v8i3( 84 %r = call <8 x i3> @llvm.smul.fix.v8i3( 95 %r = call <8 x i3> @llvm.smul.fix.v8i3( [all …]
|
D | smul-fix-sat.ll | 8 declare i32 @llvm.smul.fix.sat.i32(i32, i32, i32) 14 %r = call i32 @llvm.smul.fix.sat.i32(i32 1073741824, i32 1073741824, i32 31) ; 0.5 * 0.5 23 declare <8 x i3> @llvm.smul.fix.sat.v8i3(<8 x i3>, <8 x i3>, i32) 29 %r = call <8 x i3> @llvm.smul.fix.sat.v8i3( 40 %r = call <8 x i3> @llvm.smul.fix.sat.v8i3( 51 %r = call <8 x i3> @llvm.smul.fix.sat.v8i3( 62 %r = call <8 x i3> @llvm.smul.fix.sat.v8i3( 73 %r = call <8 x i3> @llvm.smul.fix.sat.v8i3( 84 %r = call <8 x i3> @llvm.smul.fix.sat.v8i3( 95 %r = call <8 x i3> @llvm.smul.fix.sat.v8i3( [all …]
|
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/X86/ |
D | arith-mul-smulo.ll | 23 declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) 24 declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) 25 declare {i16, i1} @llvm.smul.with.overflow.i16(i16, i16) 26 declare {i8 , i1} @llvm.smul.with.overflow.i8 (i8 , i8 ) 46 ; CHECK-NEXT: [[C0:%.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 [[A0]], i64 [[B0]]) 47 ; CHECK-NEXT: [[C1:%.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 [[A1]], i64 [[B1]]) 48 ; CHECK-NEXT: [[C2:%.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 [[A2]], i64 [[B2]]) 49 ; CHECK-NEXT: [[C3:%.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 [[A3]], i64 [[B3]]) 50 ; CHECK-NEXT: [[C4:%.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 [[A4]], i64 [[B4]]) 51 ; CHECK-NEXT: [[C5:%.*]] = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 [[A5]], i64 [[B5]]) [all …]
|
D | arith-fix.ll | 23 declare i64 @llvm.smul.fix.i64(i64, i64, i32) 24 declare i32 @llvm.smul.fix.i32(i32, i32, i32) 25 declare i16 @llvm.smul.fix.i16(i16, i16, i32) 26 declare i8 @llvm.smul.fix.i8 (i8 , i8 , i32) 38 ; SSE-NEXT: [[TMP9:%.*]] = call <2 x i64> @llvm.smul.fix.v2i64(<2 x i64> [[TMP1]], <2 x i64> [[T… 39 ; SSE-NEXT: [[TMP10:%.*]] = call <2 x i64> @llvm.smul.fix.v2i64(<2 x i64> [[TMP2]], <2 x i64> [[… 40 ; SSE-NEXT: [[TMP11:%.*]] = call <2 x i64> @llvm.smul.fix.v2i64(<2 x i64> [[TMP3]], <2 x i64> [[… 41 ; SSE-NEXT: [[TMP12:%.*]] = call <2 x i64> @llvm.smul.fix.v2i64(<2 x i64> [[TMP4]], <2 x i64> [[… 57 ; SLM-NEXT: [[TMP9:%.*]] = call <2 x i64> @llvm.smul.fix.v2i64(<2 x i64> [[TMP1]], <2 x i64> [[T… 58 ; SLM-NEXT: [[TMP10:%.*]] = call <2 x i64> @llvm.smul.fix.v2i64(<2 x i64> [[TMP2]], <2 x i64> [[… [all …]
|
/external/llvm-project/llvm/test/CodeGen/X86/ |
D | smul_fix_sat_constants.ll | 6 declare i4 @llvm.smul.fix.sat.i4 (i4, i4, i32) 7 declare i32 @llvm.smul.fix.sat.i32 (i32, i32, i32) 8 declare i64 @llvm.smul.fix.sat.i64 (i64, i64, i32) 9 declare <4 x i32> @llvm.smul.fix.sat.v4i32(<4 x i32>, <4 x i32>, i32) 10 declare { i64, i1 } @llvm.smul.with.overflow.i64(i64, i64) 26 %tmp = call i64 @llvm.smul.fix.sat.i64(i64 3, i64 2, i32 2) 43 %tmp = call i64 @llvm.smul.fix.sat.i64(i64 3, i64 2, i32 0) 61 %tmp = call i64 @llvm.smul.fix.sat.i64(i64 9223372036854775807, i64 2, i32 2) 79 %tmp = call i64 @llvm.smul.fix.sat.i64(i64 9223372036854775807, i64 2, i32 32) 99 %tmp = call i64 @llvm.smul.fix.sat.i64(i64 9223372036854775807, i64 2, i32 63)
|
D | mulfix_combine.ll | 4 declare i32 @llvm.smul.fix.i32(i32, i32, i32 immarg) 6 declare i32 @llvm.smul.fix.sat.i32(i32, i32, i32 immarg) 9 declare <4 x i32> @llvm.smul.fix.v4i32(<4 x i32>, <4 x i32>, i32 immarg) 11 declare <4 x i32> @llvm.smul.fix.sat.v4i32(<4 x i32>, <4 x i32>, i32 immarg) 19 %tmp = call i32 @llvm.smul.fix.i32(i32 undef, i32 %y, i32 2) 28 %tmp = call i32 @llvm.smul.fix.i32(i32 0, i32 %y, i32 2) 55 %tmp = call i32 @llvm.smul.fix.sat.i32(i32 undef, i32 %y, i32 2) 64 %tmp = call i32 @llvm.smul.fix.sat.i32(i32 0, i32 %y, i32 2) 91 %tmp = call <4 x i32> @llvm.smul.fix.v4i32(<4 x i32> undef, <4 x i32> %y, i32 2) 100 …%tmp = call <4 x i32> @llvm.smul.fix.v4i32(<4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> %y, i… [all …]
|
D | smul-with-overflow.ll | 8 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) 27 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) 45 declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) 50 %tmp1 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %tmp0, i32 2) 62 %tmp1 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %tmp0, i32 4) 71 declare { i63, i1 } @llvm.smul.with.overflow.i63(i63, i63) nounwind readnone 75 %res = call { i63, i1 } @llvm.smul.with.overflow.i63(i63 4, i63 4611686018427387903)
|
D | smul_fix.ll | 5 declare i4 @llvm.smul.fix.i4 (i4, i4, i32) 6 declare i32 @llvm.smul.fix.i32 (i32, i32, i32) 7 declare i64 @llvm.smul.fix.i64 (i64, i64, i32) 8 declare <4 x i32> @llvm.smul.fix.v4i32(<4 x i32>, <4 x i32>, i32) 28 %tmp = call i32 @llvm.smul.fix.i32(i32 %x, i32 %y, i32 2) 93 %tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 2) 131 %tmp = call i4 @llvm.smul.fix.i4(i4 %x, i4 %y, i32 2) 196 %tmp = call <4 x i32> @llvm.smul.fix.v4i32(<4 x i32> %x, <4 x i32> %y, i32 2) 213 %tmp = call i32 @llvm.smul.fix.i32(i32 %x, i32 %y, i32 0) 240 %tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 0) [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | smul-with-overflow.ll | 8 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) 27 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) 45 declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) 50 %tmp1 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %tmp0, i32 2) 62 %tmp1 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %tmp0, i32 4) 71 declare { i63, i1 } @llvm.smul.with.overflow.i63(i63, i63) nounwind readnone 75 %res = call { i63, i1 } @llvm.smul.with.overflow.i63(i63 4, i63 4611686018427387903)
|
/external/llvm-project/llvm/test/Transforms/NewGVN/ |
D | 2011-07-07-MatchIntrinsicExtract.ll | 73 %smul = tail call %0 @llvm.smul.with.overflow.i64(i64 %a, i64 %b) 74 %smul.0 = extractvalue %0 %smul, 0 76 %add2 = add i64 %mul1, %smul.0 90 declare %0 @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone
|
/external/llvm/test/Analysis/ValueTracking/ |
D | pr23011.ll | 3 declare { i8, i1 } @llvm.smul.with.overflow.i8(i8, i8) nounwind readnone 9 %t = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %rem, i8 %rem) 10 ; CHECK: %t = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %rem, i8 %rem)
|
/external/llvm-project/llvm/test/Analysis/ValueTracking/ |
D | pr23011.ll | 3 declare { i8, i1 } @llvm.smul.with.overflow.i8(i8, i8) nounwind readnone 9 %t = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %rem, i8 %rem) 10 ; CHECK: %t = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %rem, i8 %rem)
|
/external/llvm/test/Transforms/GVN/ |
D | 2011-07-07-MatchIntrinsicExtract.ll | 68 %smul = tail call %0 @llvm.smul.with.overflow.i64(i64 %a, i64 %b) 69 %smul.0 = extractvalue %0 %smul, 0 84 declare %0 @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone
|
/external/llvm-project/llvm/test/Transforms/GVN/ |
D | 2011-07-07-MatchIntrinsicExtract.ll | 68 %smul = tail call %0 @llvm.smul.with.overflow.i64(i64 %a, i64 %b) 69 %smul.0 = extractvalue %0 %smul, 0 84 declare %0 @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone
|
/external/llvm-project/clang/test/SemaCXX/ |
D | builtins-overflow.cpp | 88 constexpr Result<int> smul(int lhs, int rhs) { in smul() function 93 static_assert(smul(17,22) == Result<int>{false, 374}); 94 static_assert(smul(INT_MAX / 22, 23) == Result<int>{true, -2049870757}); 95 static_assert(smul(INT_MIN / 22, -23) == Result<int>{true, -2049870757});
|
/external/llvm-project/llvm/test/Transforms/InstCombine/ |
D | commutative-intrinsics.ll | 42 ; CHECK-NEXT: [[R:%.*]] = call i35 @llvm.smul.fix.i35(i35 [[X:%.*]], i35 42, i32 2) 45 %r = call i35 @llvm.smul.fix.i35(i35 42, i35 %x, i32 2) 60 ; CHECK-NEXT: [[R:%.*]] = call <2 x i35> @llvm.smul.fix.sat.v2i35(<2 x i35> [[X:%.*]], <2 x i35>… 63 %r = call <2 x i35> @llvm.smul.fix.sat.v2i35(<2 x i35> <i35 42, i35 43>, <2 x i35> %x, i32 4) 80 declare i35 @llvm.smul.fix.i35(i35, i35, i32) 82 declare <2 x i35> @llvm.smul.fix.sat.v2i35(<2 x i35>, <2 x i35>, i32)
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | unschedule-first-call.ll | 27 %20 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %18, i64 %19) 31 %24 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %23, i64 undef) 48 %41 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %39, i64 %40) 53 %46 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %44, i64 %45) 69 %62 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %61, i64 undef) 76 %69 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %67, i64 %68) 123 declare { i64, i1 } @llvm.smul.with.overflow.i64(i64, i64) #1
|
/external/llvm-project/llvm/test/Analysis/CostModel/ARM/ |
D | arith-overflow.ll | 641 declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) 642 declare {<2 x i64>, <2 x i1>} @llvm.smul.with.overflow.v2i64(<2 x i64>, <2 x i64>) 643 declare {<4 x i64>, <4 x i1>} @llvm.smul.with.overflow.v4i64(<4 x i64>, <4 x i64>) 644 declare {<8 x i64>, <8 x i1>} @llvm.smul.with.overflow.v8i64(<8 x i64>, <8 x i64>) 646 declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) 647 declare {<4 x i32>, <4 x i1>} @llvm.smul.with.overflow.v4i32(<4 x i32>, <4 x i32>) 648 declare {<8 x i32>, <8 x i1>} @llvm.smul.with.overflow.v8i32(<8 x i32>, <8 x i32>) 649 declare {<16 x i32>, <16 x i1>} @llvm.smul.with.overflow.v16i32(<16 x i32>, <16 x i32>) 651 declare {i16, i1} @llvm.smul.with.overflow.i16(i16, i16) 652 declare {<8 x i16>, <8 x i1>} @llvm.smul.with.overflow.v8i16(<8 x i16>, <8 x i16>) [all …]
|