/external/llvm-project/llvm/test/CodeGen/Mips/ |
D | 2008-06-05-Carry.ll | 3 define i64 @add64(i64 %u, i64 %v) nounwind { 5 ; CHECK-LABEL: add64:
|
/external/llvm/test/CodeGen/XCore/ |
D | addsub64.ll | 2 define i64 @add64(i64 %a, i64 %b) { 6 ; CHECK: add64
|
/external/llvm-project/llvm/test/CodeGen/XCore/ |
D | addsub64.ll | 2 define i64 @add64(i64 %a, i64 %b) { 6 ; CHECK: add64
|
/external/llvm/test/CodeGen/Mips/ |
D | 2008-06-05-Carry.ll | 3 define i64 @add64(i64 %u, i64 %v) nounwind {
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | r600.add.ll | 103 ; FUNC-LABEL: {{^}}add64: 110 define amdgpu_kernel void @add64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
|
D | add.ll | 114 ; FUNC-LABEL: {{^}}add64: 117 define amdgpu_kernel void @add64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | add.ll | 122 ; FUNC-LABEL: {{^}}add64: 132 define void @add64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
|
/external/llvm-project/llvm/test/CodeGen/WebAssembly/ |
D | i64.ll | 12 ; CHECK-LABEL: add64: 13 ; CHECK-NEXT: .functype add64 (i64, i64) -> (i64){{$}} 18 define i64 @add64(i64 %x, i64 %y) {
|
/external/llvm/test/CodeGen/X86/ |
D | lsr-delayed-fold.ll | 92 %add64 = trunc i64 %tmp29 to i32 ; <i32> [#uses=1] 110 tail call void undef(i32 %add64)
|
D | haddsub-2.ll | 808 %add64 = add i16 %vecext60, %vecext62 809 %vecinit66 = insertelement <16 x i16> %vecinit59, i16 %add64, i32 9 1442 %add64 = add i16 %vecext60, %vecext62 1443 %vecinit66 = insertelement <16 x i16> %vecinit59, i16 %add64, i32 5
|
/external/llvm/test/CodeGen/WebAssembly/ |
D | i64.ll | 12 ; CHECK-LABEL: add64: 17 define i64 @add64(i64 %x, i64 %y) {
|
/external/llvm-project/llvm/test/CodeGen/X86/ |
D | lsr-delayed-fold.ll | 92 %add64 = trunc i64 %tmp29 to i32 ; <i32> [#uses=1] 110 tail call void undef(i32 %add64)
|
D | haddsub-2.ll | 776 %add64 = add i16 %vecext60, %vecext62 777 %vecinit66 = insertelement <16 x i16> %vecinit59, i16 %add64, i32 9 1386 %add64 = add i16 %vecext60, %vecext62 1387 %vecinit66 = insertelement <16 x i16> %vecinit59, i16 %add64, i32 5
|
/external/llvm-project/lld/COFF/ |
D | Chunks.cpp | 62 static void add64(uint8_t *p, int64_t v) { write64le(p, read64le(p) + v); } in add64() function 104 case IMAGE_REL_AMD64_ADDR64: add64(off, s + config->imageBase); break; in applyRelX64() 304 case IMAGE_REL_ARM64_ADDR64: add64(off, s + config->imageBase); break; in applyRelARM64()
|
/external/llvm/test/CodeGen/ARM/ |
D | coalesce-subregs.ll | 352 %add64 = fadd <2 x double> undef, %add63 353 %add67 = fadd <2 x double> undef, %add64
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | coalesce-subregs.ll | 352 %add64 = fadd <2 x double> undef, %add63 353 %add67 = fadd <2 x double> undef, %add64
|
/external/llvm/test/Analysis/DependenceAnalysis/ |
D | ExactRDIV.ll | 42 %add64 = or i64 %mul5, 1 43 %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add64
|
/external/llvm-project/llvm/test/Analysis/DependenceAnalysis/ |
D | ExactRDIV.ll | 44 %add64 = or i64 %mul5, 1 45 %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add64
|
/external/OpenCL-CTS/test_conformance/math_brute_force/ |
D | reference_math.cpp | 216 static inline void add64(cl_ulong *p, cl_ulong c, int *exponent) in add64() function 544 add64(&product, addend, &exponent); in reference_fma() 555 add64(&addend, product, &expC); in reference_fma() 910 add64(&addendA, addendB, &expA); in reference_add() 921 add64(&addendB, addendA, &expB); in reference_add()
|