/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | shift-i128.ll | 8 define void @test_lshr_i128(i128 %x, i128 %a, i128* nocapture %r) nounwind { 10 %0 = lshr i128 %x, %a 11 store i128 %0, i128* %r, align 16 15 define void @test_ashr_i128(i128 %x, i128 %a, i128* nocapture %r) nounwind { 17 %0 = ashr i128 %x, %a 18 store i128 %0, i128* %r, align 16 22 define void @test_shl_i128(i128 %x, i128 %a, i128* nocapture %r) nounwind { 24 %0 = shl i128 %x, %a 25 store i128 %0, i128* %r, align 16 29 define void @test_lshr_i128_outofrange(i128 %x, i128* nocapture %r) nounwind { [all …]
|
D | muloti.ll | 3 %1 = type { i128, i1 } 8 %tmp16 = zext i64 %a.coerce0 to i128 9 %tmp11 = zext i64 %a.coerce1 to i128 10 %tmp12 = shl nuw i128 %tmp11, 64 11 %ins14 = or i128 %tmp12, %tmp16 12 %tmp6 = zext i64 %b.coerce0 to i128 13 %tmp3 = zext i64 %b.coerce1 to i128 14 %tmp4 = shl nuw i128 %tmp3, 64 15 %ins = or i128 %tmp4, %tmp6 16 %0 = tail call %1 @llvm.smul.with.overflow.i128(i128 %ins14, i128 %ins) [all …]
|
D | addcarry.ll | 4 define i128 @add128(i128 %a, i128 %b) nounwind { 13 %0 = add i128 %a, %b 14 ret i128 %0 44 %0 = zext i64 %a to i128 45 %1 = zext i64 %b to i128 46 %2 = add i128 %1, %0 47 %3 = zext i64 %c to i128 48 %4 = shl i128 %3, 64 49 %5 = add i128 %4, %2 50 %6 = lshr i128 %5, 64 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/ |
D | vec_add_sub_quadword.ll | 6 define <1 x i128> @out_of_bounds_insertelement(<1 x i128> %x, i128 %val) nounwind { 7 %tmpvec = insertelement <1 x i128> <i128 0>, i128 %val, i32 1 8 %result = add <1 x i128> %x, %tmpvec 9 ret <1 x i128> %result 15 define <1 x i128> @test_add(<1 x i128> %x, <1 x i128> %y) nounwind { 16 %result = add <1 x i128> %x, %y 17 ret <1 x i128> %result 22 define <1 x i128> @increment_by_one(<1 x i128> %x) nounwind { 23 %result = add <1 x i128> %x, <i128 1> 24 ret <1 x i128> %result [all …]
|
D | shift128.ll | 16 define i128 @lshr(i128 %x, i128 %y) { 17 %r = lshr i128 %x, %y 18 ret i128 %r 31 define i128 @ashr(i128 %x, i128 %y) { 32 %r = ashr i128 %x, %y 33 ret i128 %r 45 define i128 @shl(i128 %x, i128 %y) { 46 %r = shl i128 %x, %y 47 ret i128 %r 59 define i128 @shl_v1i128(i128 %arg, i128 %amt) local_unnamed_addr #0 { [all …]
|
/external/llvm/test/CodeGen/PowerPC/ |
D | vec_add_sub_quadword.ll | 6 define <1 x i128> @test_add(<1 x i128> %x, <1 x i128> %y) nounwind { 7 %result = add <1 x i128> %x, %y 8 ret <1 x i128> %result 13 define <1 x i128> @increment_by_one(<1 x i128> %x) nounwind { 14 %result = add <1 x i128> %x, <i128 1> 15 ret <1 x i128> %result 20 define <1 x i128> @increment_by_val(<1 x i128> %x, i128 %val) nounwind { 21 %tmpvec = insertelement <1 x i128> <i128 0>, i128 %val, i32 0 22 %tmpvec2 = insertelement <1 x i128> %tmpvec, i128 %val, i32 1 23 %result = add <1 x i128> %x, %tmpvec2 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/SystemZ/ |
D | int-sub-05.ll | 6 declare i128 *@foo() 9 define void @f1(i128 *%ptr, i64 %high, i64 %low) { 14 %a = load i128, i128 *%ptr 15 %highx = zext i64 %high to i128 16 %lowx = zext i64 %low to i128 17 %bhigh = shl i128 %highx, 64 18 %b = or i128 %bhigh, %lowx 19 %sub = sub i128 %a, %b 20 store i128 %sub, i128 *%ptr 30 %bptr = inttoptr i64 %addr to i128 * [all …]
|
D | int-add-08.ll | 6 declare i128 *@foo() 9 define void @f1(i128 *%ptr) { 14 %value = load i128, i128 *%ptr 15 %add = add i128 %value, %value 16 store i128 %add, i128 *%ptr 22 define void @f2(i128 *%aptr, i64 %addr) { 27 %bptr = inttoptr i64 %addr to i128 * 28 %a = load volatile i128, i128 *%aptr 29 %b = load i128, i128 *%bptr 30 %add = add i128 %a, %b [all …]
|
D | cmpxchg-06.ll | 6 define i128 @f1(i128 %cmp, i128 %swap, i128 *%src) { 16 %pairval = cmpxchg i128 *%src, i128 %cmp, i128 %swap seq_cst seq_cst 17 %val = extractvalue { i128, i1 } %pairval, 0 18 ret i128 %val 22 define i128 @f2(i128 %cmp, i128 %swap, i128 *%src) { 26 %ptr = getelementptr i128, i128 *%src, i128 32767 27 %pairval = cmpxchg i128 *%ptr, i128 %cmp, i128 %swap seq_cst seq_cst 28 %val = extractvalue { i128, i1 } %pairval, 0 29 ret i128 %val 34 define i128 @f3(i128 %cmp, i128 %swap, i128 *%src) { [all …]
|
D | int-sub-06.ll | 7 define void @f1(i128 *%aptr, i32 %b) { 12 %a = load i128, i128 *%aptr 13 %xor = xor i128 %a, 127 14 %bext = zext i32 %b to i128 15 %sub = sub i128 %xor, %bext 16 store i128 %sub, i128 *%aptr 21 define void @f2(i128 *%aptr, i64 %b) { 26 %a = load i128, i128 *%aptr 27 %xor = xor i128 %a, 127 29 %bext = zext i32 %trunc to i128 [all …]
|
D | int-add-10.ll | 7 define void @f1(i128 *%aptr, i32 %b) { 12 %a = load i128, i128 *%aptr 13 %xor = xor i128 %a, 127 14 %bext = zext i32 %b to i128 15 %add = add i128 %xor, %bext 16 store i128 %add, i128 *%aptr 21 define void @f2(i128 *%aptr, i64 %b) { 26 %a = load i128, i128 *%aptr 27 %xor = xor i128 %a, 127 29 %bext = zext i32 %trunc to i128 [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | int-sub-05.ll | 6 declare i128 *@foo() 9 define void @f1(i128 *%ptr, i64 %high, i64 %low) { 14 %a = load i128 , i128 *%ptr 15 %highx = zext i64 %high to i128 16 %lowx = zext i64 %low to i128 17 %bhigh = shl i128 %highx, 64 18 %b = or i128 %bhigh, %lowx 19 %sub = sub i128 %a, %b 20 store i128 %sub, i128 *%ptr 30 %bptr = inttoptr i64 %addr to i128 * [all …]
|
D | int-add-08.ll | 6 declare i128 *@foo() 9 define void @f1(i128 *%ptr) { 14 %value = load i128 , i128 *%ptr 15 %add = add i128 %value, %value 16 store i128 %add, i128 *%ptr 22 define void @f2(i128 *%aptr, i64 %addr) { 27 %bptr = inttoptr i64 %addr to i128 * 28 %a = load volatile i128 , i128 *%aptr 29 %b = load i128 , i128 *%bptr 30 %add = add i128 %a, %b [all …]
|
D | int-sub-06.ll | 7 define void @f1(i128 *%aptr, i32 %b) { 12 %a = load i128 , i128 *%aptr 13 %xor = xor i128 %a, 127 14 %bext = zext i32 %b to i128 15 %sub = sub i128 %xor, %bext 16 store i128 %sub, i128 *%aptr 21 define void @f2(i128 *%aptr, i64 %b) { 26 %a = load i128 , i128 *%aptr 27 %xor = xor i128 %a, 127 29 %bext = zext i32 %trunc to i128 [all …]
|
D | int-add-10.ll | 7 define void @f1(i128 *%aptr, i32 %b) { 12 %a = load i128 , i128 *%aptr 13 %xor = xor i128 %a, 127 14 %bext = zext i32 %b to i128 15 %add = add i128 %xor, %bext 16 store i128 %add, i128 *%aptr 21 define void @f2(i128 *%aptr, i64 %b) { 26 %a = load i128 , i128 *%aptr 27 %xor = xor i128 %a, 127 29 %bext = zext i32 %trunc to i128 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/WebAssembly/ |
D | i128.ll | 8 declare i128 @llvm.ctlz.i128(i128, i1) 9 declare i128 @llvm.cttz.i128(i128, i1) 10 declare i128 @llvm.ctpop.i128(i128) 20 define i128 @add128(i128 %x, i128 %y) { 21 %a = add i128 %x, %y 22 ret i128 %a 33 define i128 @sub128(i128 %x, i128 %y) { 34 %a = sub i128 %x, %y 35 ret i128 %a 43 define i128 @mul128(i128 %x, i128 %y) { [all …]
|
/external/llvm/test/CodeGen/WebAssembly/ |
D | i128.ll | 8 declare i128 @llvm.ctlz.i128(i128, i1) 9 declare i128 @llvm.cttz.i128(i128, i1) 10 declare i128 @llvm.ctpop.i128(i128) 20 define i128 @add128(i128 %x, i128 %y) { 21 %a = add i128 %x, %y 22 ret i128 %a 33 define i128 @sub128(i128 %x, i128 %y) { 34 %a = sub i128 %x, %y 35 ret i128 %a 43 define i128 @mul128(i128 %x, i128 %y) { [all …]
|
/external/llvm/test/CodeGen/Generic/ |
D | i128-addsub.ll | 5 %tmp1 = zext i64 %AL to i128 ; <i128> [#uses=1] 6 %tmp23 = zext i64 %AH to i128 ; <i128> [#uses=1] 7 %tmp4 = shl i128 %tmp23, 64 ; <i128> [#uses=1] 8 %tmp5 = or i128 %tmp4, %tmp1 ; <i128> [#uses=1] 9 %tmp67 = zext i64 %BL to i128 ; <i128> [#uses=1] 10 %tmp89 = zext i64 %BH to i128 ; <i128> [#uses=1] 11 %tmp11 = shl i128 %tmp89, 64 ; <i128> [#uses=1] 12 %tmp12 = or i128 %tmp11, %tmp67 ; <i128> [#uses=1] 13 %tmp15 = add i128 %tmp12, %tmp5 ; <i128> [#uses=2] 14 %tmp1617 = trunc i128 %tmp15 to i64 ; <i64> [#uses=1] [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Generic/ |
D | i128-addsub.ll | 5 %tmp1 = zext i64 %AL to i128 ; <i128> [#uses=1] 6 %tmp23 = zext i64 %AH to i128 ; <i128> [#uses=1] 7 %tmp4 = shl i128 %tmp23, 64 ; <i128> [#uses=1] 8 %tmp5 = or i128 %tmp4, %tmp1 ; <i128> [#uses=1] 9 %tmp67 = zext i64 %BL to i128 ; <i128> [#uses=1] 10 %tmp89 = zext i64 %BH to i128 ; <i128> [#uses=1] 11 %tmp11 = shl i128 %tmp89, 64 ; <i128> [#uses=1] 12 %tmp12 = or i128 %tmp11, %tmp67 ; <i128> [#uses=1] 13 %tmp15 = add i128 %tmp12, %tmp5 ; <i128> [#uses=2] 14 %tmp1617 = trunc i128 %tmp15 to i64 ; <i64> [#uses=1] [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | cmpxchg-i128-i1.ll | 3 define i1 @try_cmpxchg(i128* %addr, i128 %desired, i128 %new) { 9 %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst 10 %success = extractvalue { i128, i1 } %pair, 1 14 define void @cmpxchg_flow(i128* %addr, i128 %desired, i128 %new) { 20 %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst 21 %success = extractvalue { i128, i1 } %pair, 1 34 define i1 @cmpxchg_arithcmp(i128* %addr, i128 %desired, i128 %new) { 39 %pair = cmpxchg i128* %addr, i128 %desired, i128 %new seq_cst seq_cst 40 %oldval = extractvalue { i128, i1 } %pair, 0 41 %success = icmp sge i128 %oldval, %desired [all …]
|
D | muloti.ll | 3 %1 = type { i128, i1 } 8 %tmp16 = zext i64 %a.coerce0 to i128 9 %tmp11 = zext i64 %a.coerce1 to i128 10 %tmp12 = shl nuw i128 %tmp11, 64 11 %ins14 = or i128 %tmp12, %tmp16 12 %tmp6 = zext i64 %b.coerce0 to i128 13 %tmp3 = zext i64 %b.coerce1 to i128 14 %tmp4 = shl nuw i128 %tmp3, 64 15 %ins = or i128 %tmp4, %tmp6 16 %0 = tail call %1 @llvm.smul.with.overflow.i128(i128 %ins14, i128 %ins) [all …]
|
D | atomic128.ll | 3 @var = global i128 0 5 define i128 @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) { 18 %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval acquire acquire 19 %val = extractvalue { i128, i1 } %pair, 0 20 ret i128 %val 23 define void @fetch_and_nand(i128* %p, i128 %bits) { 43 %val = atomicrmw nand i128* %p, i128 %bits release 44 store i128 %val, i128* @var, align 16 48 define void @fetch_and_or(i128* %p, i128 %bits) { 67 %val = atomicrmw or i128* %p, i128 %bits seq_cst [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | add_i128.ll | 9 …efine amdgpu_kernel void @test_i128_vreg(i128 addrspace(1)* noalias %out, i128 addrspace(1)* noali… 11 %a_ptr = getelementptr i128, i128 addrspace(1)* %inA, i32 %tid 12 %b_ptr = getelementptr i128, i128 addrspace(1)* %inB, i32 %tid 13 %a = load i128, i128 addrspace(1)* %a_ptr 14 %b = load i128, i128 addrspace(1)* %b_ptr 15 %result = add i128 %a, %b 16 store i128 %result, i128 addrspace(1)* %out 26 define amdgpu_kernel void @sgpr_operand(i128 addrspace(1)* noalias %out, i128 addrspace(1)* noalias… 27 %foo = load i128, i128 addrspace(1)* %in, align 8 28 %result = add i128 %foo, %a [all …]
|
D | shift-and-i128-ubfe.ll | 14 define amdgpu_kernel void @v_uextract_bit_31_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) … 16 %in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %id.x 17 %out.gep = getelementptr i128, i128 addrspace(1)* %out, i32 %id.x 18 %ld.64 = load i128, i128 addrspace(1)* %in.gep 19 %srl = lshr i128 %ld.64, 31 20 %bit = and i128 %srl, 1 21 store i128 %bit, i128 addrspace(1)* %out.gep 37 define amdgpu_kernel void @v_uextract_bit_63_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) … 39 %in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %id.x 40 %out.gep = getelementptr i128, i128 addrspace(1)* %out, i32 %id.x [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | shift-and-i128-ubfe.ll | 14 define void @v_uextract_bit_31_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) #1 { 16 %in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %id.x 17 %out.gep = getelementptr i128, i128 addrspace(1)* %out, i32 %id.x 18 %ld.64 = load i128, i128 addrspace(1)* %in.gep 19 %srl = lshr i128 %ld.64, 31 20 %bit = and i128 %srl, 1 21 store i128 %bit, i128 addrspace(1)* %out.gep 36 define void @v_uextract_bit_63_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) #1 { 38 %in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %id.x 39 %out.gep = getelementptr i128, i128 addrspace(1)* %out, i32 %id.x [all …]
|