/external/python/cpython3/Modules/_decimal/libmpdec/ |
D | memory.c | 58 mpd_size_t overflow; in mpd_callocfunc_em() local 68 &overflow); in mpd_callocfunc_em() 69 if (overflow) { in mpd_callocfunc_em() 88 mpd_size_t req, overflow; in mpd_alloc() local 90 req = mul_size_t_overflow(nmemb, size, &overflow); in mpd_alloc() 91 if (overflow) { in mpd_alloc() 102 mpd_size_t overflow; in mpd_calloc() local 104 (void)mul_size_t_overflow(nmemb, size, &overflow); in mpd_calloc() 105 if (overflow) { in mpd_calloc() 117 mpd_size_t req, overflow; in mpd_realloc() local [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/ConstProp/ |
D | overflow-ops.ll | 4 declare {i8, i1} @llvm.uadd.with.overflow.i8(i8, i8) 5 declare {i8, i1} @llvm.usub.with.overflow.i8(i8, i8) 6 declare {i8, i1} @llvm.umul.with.overflow.i8(i8, i8) 8 declare {i8, i1} @llvm.sadd.with.overflow.i8(i8, i8) 9 declare {i8, i1} @llvm.ssub.with.overflow.i8(i8, i8) 10 declare {i8, i1} @llvm.smul.with.overflow.i8(i8, i8) 18 %t = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 42, i8 100) 27 %t = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 142, i8 120) 40 %t = call {i8, i1} @llvm.usub.with.overflow.i8(i8 4, i8 2) 49 %t = call {i8, i1} @llvm.usub.with.overflow.i8(i8 4, i8 6) [all …]
|
/external/llvm/test/Transforms/ConstProp/ |
D | overflow-ops.ll | 4 declare {i8, i1} @llvm.uadd.with.overflow.i8(i8, i8) 5 declare {i8, i1} @llvm.usub.with.overflow.i8(i8, i8) 6 declare {i8, i1} @llvm.umul.with.overflow.i8(i8, i8) 8 declare {i8, i1} @llvm.sadd.with.overflow.i8(i8, i8) 9 declare {i8, i1} @llvm.ssub.with.overflow.i8(i8, i8) 10 declare {i8, i1} @llvm.smul.with.overflow.i8(i8, i8) 18 %t = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 42, i8 100) 27 %t = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 142, i8 120) 40 %t = call {i8, i1} @llvm.usub.with.overflow.i8(i8 4, i8 2) 49 %t = call {i8, i1} @llvm.usub.with.overflow.i8(i8 4, i8 6) [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | xaluo.ll | 5 ; Get the actual value of the overflow bit. 13 %t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v1, i8 %v2) 25 %t = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %v1, i16 %v2) 37 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) 49 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2) 62 %t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v1, i8 1) 74 %t = call {i16, i1} @llvm.sadd.with.overflow.i16(i16 %v1, i16 1) 86 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 1) 98 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 1) 116 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 2, i64 %v1) [all …]
|
D | smul-with-overflow.ll | 8 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) 11 br i1 %obit, label %overflow, label %normal 17 overflow: 27 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) 30 br i1 %obit, label %overflow, label %normal 32 overflow: 45 declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) 50 %tmp1 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %tmp0, i32 2) 62 %tmp1 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %tmp0, i32 4) 71 declare { i63, i1 } @llvm.smul.with.overflow.i63(i63, i63) nounwind readnone [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-xaluo.ll | 5 ; Get the actual value of the overflow bit. 12 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) 25 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 4) 38 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 -4) 51 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 16777215) 65 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %lsl) 77 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2) 89 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 4) 101 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -4) 113 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Generic/ |
D | overflow.ll | 2 ; Verify codegen's don't crash on overflow intrinsics. 8 %sadd = tail call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 %a, i8 %b) 15 declare { i8, i1 } @llvm.sadd.with.overflow.i8(i8, i8) nounwind readnone 19 %sadd = tail call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 %a, i16 %b) 26 declare { i16, i1 } @llvm.sadd.with.overflow.i16(i16, i16) nounwind readnone 30 %sadd = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) 37 declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone 44 %uadd = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %a, i8 %b) 51 declare { i8, i1 } @llvm.uadd.with.overflow.i8(i8, i8) nounwind readnone 55 %uadd = tail call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 %a, i16 %b) [all …]
|
/external/llvm/test/CodeGen/Generic/ |
D | overflow.ll | 2 ; Verify codegen's don't crash on overflow intrinsics. 8 %sadd = tail call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 %a, i8 %b) 15 declare { i8, i1 } @llvm.sadd.with.overflow.i8(i8, i8) nounwind readnone 19 %sadd = tail call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 %a, i16 %b) 26 declare { i16, i1 } @llvm.sadd.with.overflow.i16(i16, i16) nounwind readnone 30 %sadd = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) 37 declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone 44 %uadd = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %a, i8 %b) 51 declare { i8, i1 } @llvm.uadd.with.overflow.i8(i8, i8) nounwind readnone 55 %uadd = tail call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 %a, i16 %b) [all …]
|
/external/llvm/test/tools/llvm-profdata/ |
D | overflow-instr.test | 1 Tests for overflow when merging instrumented profiles. 3 1- Merge profile having maximum counts with itself and verify overflow detected and saturation occu… 4 RUN: llvm-profdata merge -instr %p/Inputs/overflow-instr.proftext %p/Inputs/overflow-instr.proftext… 6 MERGE_OVERFLOW: {{.*}}: overflow: Counter overflow 11 2- Merge profile having maximum counts by itself and verify no overflow 12 RUN: llvm-profdata merge -instr %p/Inputs/overflow-instr.proftext -o %t.out 2>&1 | FileCheck %s -ch… 14 MERGE_NO_OVERFLOW-NOT: {{.*}}: overflow: Counter overflow
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/tools/llvm-profdata/ |
D | overflow-instr.test | 1 Tests for overflow when merging instrumented profiles. 3 1- Merge profile having maximum counts with itself and verify overflow detected and saturation occu… 4 RUN: llvm-profdata merge -instr %p/Inputs/overflow-instr.proftext %p/Inputs/overflow-instr.proftext… 6 MERGE_OVERFLOW: {{.*}}: overflow: Counter overflow 11 2- Merge profile having maximum counts by itself and verify no overflow 12 RUN: llvm-profdata merge -instr %p/Inputs/overflow-instr.proftext -o %t.out 2>&1 | FileCheck %s -ch… 14 MERGE_NO_OVERFLOW-NOT: {{.*}}: overflow: Counter overflow
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | arm64-xaluo.ll | 5 ; Get the actual value of the overflow bit. 12 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) 25 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 4) 38 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 -4) 51 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 16777215) 65 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %lsl) 77 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2) 89 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 4) 101 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -4) 113 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2) [all …]
|
/external/llvm/test/Transforms/InstCombine/ |
D | overflow-mul.ll | 11 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 12 %overflow = icmp ugt i64 %mul64, 4294967295 14 %retval = zext i1 %overflow to i32 26 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 27 %overflow = icmp uge i64 %mul64, 4294967296 29 %retval = zext i1 %overflow to i32 42 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 43 %overflow = icmp ugt i64 %mul64, 4294967295 47 %retval = select i1 %overflow, i32 %mul32, i32 111 60 ; CHECK-NOT: umul.with.overflow.i32 [all …]
|
D | intrinsics.ll | 3 %overflow.result = type {i8, i1} 7 declare %overflow.result @llvm.uadd.with.overflow.i8(i8, i8) nounwind readnone 8 declare %overflow.result @llvm.umul.with.overflow.i8(i8, i8) nounwind readnone 9 declare %ov.result.32 @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone 10 declare %ov.result.32 @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone 11 declare %ov.result.32 @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone 12 declare %ov.result.32 @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone 13 declare %ov.result.32 @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone 14 declare %ov.result.32 @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone 29 %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %A, i8 %B) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | overflow-mul.ll | 11 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 12 %overflow = icmp ugt i64 %mul64, 4294967295 14 %retval = zext i1 %overflow to i32 26 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 27 %overflow = icmp uge i64 %mul64, 4294967296 29 %retval = zext i1 %overflow to i32 42 ; CHECK: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) 43 %overflow = icmp ugt i64 %mul64, 4294967295 47 %retval = select i1 %overflow, i32 %mul32, i32 111 60 ; CHECK-NOT: umul.with.overflow.i32 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/ |
D | unschedule-first-call.ll | 20 %13 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 undef, i64 undef) 22 %15 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %14, i64 1) 27 %20 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %18, i64 %19) 29 %22 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %21, i64 0) 31 %24 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %23, i64 undef) 33 %26 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %25, i64 0) 41 %34 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 0, i64 undef) 43 %36 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %35, i64 1) 48 %41 = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %39, i64 %40) 50 %43 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %42, i64 0) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | overflow-intrinsic-setcc-fold.ll | 11 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2) 24 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2) 37 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2) 50 %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2) 63 %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2) 76 %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2) 89 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2) 102 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2) 115 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) 128 %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) [all …]
|
D | smul-with-overflow.ll | 8 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) 11 br i1 %obit, label %overflow, label %normal 17 overflow: 27 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) 30 br i1 %obit, label %overflow, label %normal 32 overflow: 45 declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) 50 %tmp1 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %tmp0, i32 2) 62 %tmp1 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %tmp0, i32 4) 71 declare { i63, i1 } @llvm.smul.with.overflow.i63(i63, i63) nounwind readnone [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/CorrelatedValuePropagation/ |
D | overflows.ll | 3 declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) 5 declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) 7 declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) 9 declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) 16 ; CHECK-NOT: @llvm.ssub.with.overflow.i32 17 ; CHECK: @llvm.sadd.with.overflow.i32 23 %0 = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 2147483647, i32 %y) 41 %3 = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 -2147483648, i32 %y) 51 %6 = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %x, i32 %y) 63 ; CHECK-NOT: @llvm.usub.with.overflow.i32 [all …]
|
/external/llvm/test/Transforms/GVN/ |
D | 2011-07-07-MatchIntrinsicExtract.ll | 8 %uadd = tail call %0 @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) 20 %usub = tail call %0 @llvm.usub.with.overflow.i64(i64 %a, i64 %b) 32 %umul = tail call %0 @llvm.umul.with.overflow.i64(i64 %a, i64 %b) 44 %sadd = tail call %0 @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) 56 %ssub = tail call %0 @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) 68 %smul = tail call %0 @llvm.smul.with.overflow.i64(i64 %a, i64 %b) 79 declare %0 @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone 80 declare %0 @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone 81 declare %0 @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone 82 declare %0 @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/GVN/ |
D | 2011-07-07-MatchIntrinsicExtract.ll | 8 %uadd = tail call %0 @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) 20 %usub = tail call %0 @llvm.usub.with.overflow.i64(i64 %a, i64 %b) 32 %umul = tail call %0 @llvm.umul.with.overflow.i64(i64 %a, i64 %b) 44 %sadd = tail call %0 @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) 56 %ssub = tail call %0 @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) 68 %smul = tail call %0 @llvm.smul.with.overflow.i64(i64 %a, i64 %b) 79 declare %0 @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone 80 declare %0 @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone 81 declare %0 @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone 82 declare %0 @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone [all …]
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | fwd_txfm_sse2.h | 176 int overflow = k_check_epi32_overflow_4(preg0, preg1, preg2, preg3, zero); in k_check_epi32_overflow_8() local 177 if (!overflow) { in k_check_epi32_overflow_8() 178 overflow = k_check_epi32_overflow_4(preg4, preg5, preg6, preg7, zero); in k_check_epi32_overflow_8() 180 return overflow; in k_check_epi32_overflow_8() 190 int overflow = k_check_epi32_overflow_4(preg0, preg1, preg2, preg3, zero); in k_check_epi32_overflow_16() local 191 if (!overflow) { in k_check_epi32_overflow_16() 192 overflow = k_check_epi32_overflow_4(preg4, preg5, preg6, preg7, zero); in k_check_epi32_overflow_16() 193 if (!overflow) { in k_check_epi32_overflow_16() 194 overflow = k_check_epi32_overflow_4(preg8, preg9, preg10, preg11, zero); in k_check_epi32_overflow_16() 195 if (!overflow) { in k_check_epi32_overflow_16() [all …]
|
/external/one-true-awk/testdir/ |
D | T.overflow | 1 echo T.overflow: test some overflow conditions 11 cmp -s foo1 foo2 || echo 'BAD: T.overflow record 1' 19 cmp -s foo1 foo2 || echo 'BAD: T.overflow abcdef' 31 cmp -s foo1 foo2 || echo 'BAD: T.overflow -mr -mf set $1' 44 cmp -s foo1 foo2 || echo 'BAD: T.overflow -mr -mf NF' 53 test -r core && echo 1>&2 "BAD: T.overflow too long char class dropped core" 64 cmp -s foo1 foo2 || echo 'BAD: T.overflow huge sprintfs' 78 cmp -s foo1 foo2 || echo 'BAD: T.overflow big array' 82 grep "out of range field" foo >/dev/null || echo 1>&2 "BAD: T.overflow \$400000" 86 ls /tmp/awktestfoo* | grep '1000' >/dev/null || echo 1>&2 "BAD: T.overflow openfiles"
|
/external/arm-neon-tests/ |
D | ref-rvct-all.txt | 2243 VRSHL/VRSHLQ (checking round_const overflow: shift by -1) output: 2269 VRSHL/VRSHLQ (checking round_const overflow: shift by -3) output: 4639 VABAL test intermediate overflow output: 6207 VRSHR_N (overflow test: max shift amount, positive input) output: 6233 VRSHR_N (overflow test: shift by 1, with negative input) output: 6259 VRSHR_N (overflow test: shift by 3, positive input) output: 6285 VRSHR_N (overflow test: shift by 1, with negative input) output: 6311 VRSHR_N (overflow test: shift by 3, with negative input) output: 6363 VRSRA_N (checking overflow: shift by 1, positive input) output: 6389 VRSRA_N (checking overflow: shift by 3, positive input) output: [all …]
|
/external/libgsm/add-test/ |
D | add_test.dta | 36 ' positive overflow 76 ' positive overflow 115 ' positive overflow 155 ' positive overflow 184 ' overflow 212 ' overflow 248 ' overflow 289 ' overflow 311 ' overflow 359 ' overflow [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/NewGVN/ |
D | 2011-07-07-MatchIntrinsicExtract.ll | 8 %uadd = tail call %0 @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) 21 %usub = tail call %0 @llvm.usub.with.overflow.i64(i64 %a, i64 %b) 34 %umul = tail call %0 @llvm.umul.with.overflow.i64(i64 %a, i64 %b) 47 %sadd = tail call %0 @llvm.sadd.with.overflow.i64(i64 %a, i64 %b) 60 %ssub = tail call %0 @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) 73 %smul = tail call %0 @llvm.smul.with.overflow.i64(i64 %a, i64 %b) 85 declare %0 @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone 86 declare %0 @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone 87 declare %0 @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone 88 declare %0 @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone [all …]
|