/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/SystemZ/ |
D | scalar-cmp-cmp-log-sel.ll | 6 i8 %val5, i8 %val6) { 10 %sel = select i1 %and, i8 %val5, i8 %val6 17 ; CHECK: cost of 1 for instruction: %sel = select i1 %and, i8 %val5, i8 %val6 21 i16 %val5, i16 %val6) { 25 %sel = select i1 %and, i16 %val5, i16 %val6 32 ; CHECK: cost of 1 for instruction: %sel = select i1 %and, i16 %val5, i16 %val6 36 i32 %val5, i32 %val6) { 40 %sel = select i1 %and, i32 %val5, i32 %val6 47 ; CHECK: cost of 1 for instruction: %sel = select i1 %and, i32 %val5, i32 %val6 51 i64 %val5, i64 %val6) { [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/SystemZ/ |
D | vec-cmp-cmp-logic-select.ll | 8 …0(<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x i8> %val5, <2 x i8> %val6) { 19 %sel = select <2 x i1> %and, <2 x i8> %val5, <2 x i8> %val6 23 …<2 x i8> %val1, <2 x i8> %val2, <2 x i8> %val3, <2 x i8> %val4, <2 x i16> %val5, <2 x i16> %val6) { 35 %sel = select <2 x i1> %and, <2 x i16> %val5, <2 x i16> %val6 39 …i8> %val1, <16 x i8> %val2, <16 x i16> %val3, <16 x i16> %val4, <16 x i8> %val5, <16 x i8> %val6) { 52 %sel = select <16 x i1> %and, <16 x i8> %val5, <16 x i8> %val6 56 …> %val1, <16 x i8> %val2, <16 x i16> %val3, <16 x i16> %val4, <16 x i16> %val5, <16 x i16> %val6) { 75 %sel = select <16 x i1> %and, <16 x i16> %val5, <16 x i16> %val6 79 …x i8> %val1, <32 x i8> %val2, <32 x i8> %val3, <32 x i8> %val4, <32 x i8> %val5, <32 x i8> %val6) { 98 %sel = select <32 x i1> %and, <32 x i8> %val5, <32 x i8> %val6 [all …]
|
D | spill-01.ll | 52 %val5 = load i32, i32 *%ptr5 62 store i32 %val5, i32 *%ptr5 90 %val5 = load i32, i32 *%ptr5 102 store i32 %val5, i32 *%ptr5 132 %val5 = load i64, i64 *%ptr5 144 store i64 %val5, i64 *%ptr5 178 %val5 = load float, float *%ptr5 191 store float %val5, float *%ptr5 222 %val5 = load double, double *%ptr5 235 store double %val5, double *%ptr5 [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | spill-01.ll | 52 %val5 = load i32 , i32 *%ptr5 62 store i32 %val5, i32 *%ptr5 90 %val5 = load i32 , i32 *%ptr5 102 store i32 %val5, i32 *%ptr5 132 %val5 = load i64 , i64 *%ptr5 144 store i64 %val5, i64 *%ptr5 178 %val5 = load float , float *%ptr5 191 store float %val5, float *%ptr5 222 %val5 = load double , double *%ptr5 235 store double %val5, double *%ptr5 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | floatdp_2source.ll | 19 %val5 = fsub float %val4, %val2 22 store volatile float %val5, float* @varfloat 47 %val5 = fsub double %val4, %val2 50 store volatile double %val5, double* @vardouble
|
D | extract.ll | 7 %val5 = or i64 %left, %right 9 ret i64 %val5 16 %val5 = or i32 %left, %right 18 ret i32 %val5
|
D | arm64-extract.ll | 7 %val5 = or i64 %left, %right 9 ret i64 %val5 16 %val5 = or i32 %left, %right 18 ret i32 %val5
|
D | loop-micro-op-buffer-size-t99.ll | 24 ; CHECK: %val5 = add nuw nsw i32 %counter, 10 67 %val5 = add i32 %counter, 10 69 store i32 %val5, i32* %xptr5 106 %val5.2 = add i32 %counter.2, 10 108 store i32 %val5.2, i32* %xptr5.2 110 store i32 %val5.2, i32* %xptr6.2
|
D | addsub-shifted.ll | 42 %val5 = add i64 %lhs64, %shift5 43 store volatile i64 %val5, i64* @var64 105 %val5 = add i64 %lhs64, %shift5 106 store volatile i64 %val5, i64* @var64 164 %val5 = add i64 %lhs64, %shift5 165 store volatile i64 %val5, i64* @var64 282 %val5 = sub i64 0, %shift5 283 %tst5 = icmp ne i64 %lhs64, %val5
|
D | regress-w29-reserved-with-fp.ll | 14 %val5 = load volatile i32, i32* @var 29 store volatile i32 %val5, i32* @var
|
/external/llvm/test/CodeGen/AArch64/ |
D | floatdp_2source.ll | 19 %val5 = fsub float %val4, %val2 22 store volatile float %val5, float* @varfloat 47 %val5 = fsub double %val4, %val2 50 store volatile double %val5, double* @vardouble
|
D | arm64-extract.ll | 8 %val5 = or i64 %left, %right 10 ret i64 %val5 17 %val5 = or i32 %left, %right 19 ret i32 %val5
|
D | extract.ll | 7 %val5 = or i64 %left, %right 9 ret i64 %val5 16 %val5 = or i32 %left, %right 18 ret i32 %val5
|
D | addsub-shifted.ll | 42 %val5 = add i64 %lhs64, %shift5 43 store volatile i64 %val5, i64* @var64 105 %val5 = add i64 %lhs64, %shift5 106 store volatile i64 %val5, i64* @var64 164 %val5 = add i64 %lhs64, %shift5 165 store volatile i64 %val5, i64* @var64 282 %val5 = sub i64 0, %shift5 283 %tst5 = icmp ne i64 %lhs64, %val5
|
D | regress-w29-reserved-with-fp.ll | 14 %val5 = load volatile i32, i32* @var 29 store volatile i32 %val5, i32* @var
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LoopUnroll/X86/ |
D | store_cost.ll | 47 %val5 = add i32 %counter, 10 49 store i32 %val5, i32* %xptr5 86 %val5.2 = add i32 %counter.2, 10 88 store i32 %val5.2, i32* %xptr5.2 90 store i32 %val5.2, i32* %xptr6.2
|
/external/swiftshader/third_party/LLVM/test/Transforms/SimplifyLibCalls/ |
D | ToAscii.ll | 12 %val5 = call i32 @toascii( i32 255 ) ; <i32> [#uses=1] 16 %rslt3 = add i32 %val5, %val6 ; <i32> [#uses=1]
|
D | FFS.ll | 20 %val5 = call i32 @ffsll( i64 17179869184 ) ; <i32> [#uses=1] 24 %rslt3 = add i32 %val5, %val6 ; <i32> [#uses=1]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/CodeGenPrepare/X86/ |
D | sink-addrmode-two-phi.ll | 13 %val2 = phi i64 * [ null, %entry ], [ %val5, %exit ] 24 %val5 = phi i64 * [ undef, %slowpath ], [ %val2, %start ]
|
/external/llvm/test/CodeGen/ARM/ |
D | inlineasm-64bit.ll | 13 define void @multi_writes(i64* %p, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6… 37 …"r,r,r,r,r,r,r"(i64* %p, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6) nounwind 39 …r,r,r"(i64* %incdec.ptr, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6) nounwind 40 …r,r,r"(i64* %incdec.ptr, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6) nounwind
|
D | gpr-paired-spill-thumbinst.ll | 13 %val5 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr) 26 store volatile i64 %val5, i64* %addr
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/ |
D | inlineasm-64bit.ll | 13 define void @multi_writes(i64* %p, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6… 37 …"r,r,r,r,r,r,r"(i64* %p, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6) nounwind 39 …r,r,r"(i64* %incdec.ptr, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6) nounwind 40 …r,r,r"(i64* %incdec.ptr, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6) nounwind
|
D | gpr-paired-spill-thumbinst.ll | 13 %val5 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr) 26 store volatile i64 %val5, i64* %addr
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/RISCV/ |
D | select-cc.ll | 67 %val5 = load volatile i32, i32* %b 68 %tst3 = icmp ugt i32 %val4, %val5 69 %val6 = select i1 %tst3, i32 %val4, i32 %val5
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | ds_read2_offset_order.ll | 37 %val5 = load float, float addrspace(3)* %ptr5 38 %add5 = fadd float %add4, %val5
|