/external/llvm/test/CodeGen/Mips/ |
D | ra-allocatable.ll | 101 %0 = load i32, i32* @a0, align 4 102 %1 = load i32*, i32** @b0, align 4 104 %2 = load i32, i32* @a1, align 4 105 %3 = load i32*, i32** @b1, align 4 107 %4 = load i32, i32* @a2, align 4 108 %5 = load i32*, i32** @b2, align 4 110 %6 = load i32, i32* @a3, align 4 111 %7 = load i32*, i32** @b3, align 4 113 %8 = load i32, i32* @a4, align 4 114 %9 = load i32*, i32** @b4, align 4 [all …]
|
D | hf16call32.ll | 70 %0 = load float, float* @lx, align 4 72 %1 = load float, float* @x, align 4 74 %2 = load float, float* @lx, align 4 76 %3 = load float, float* @x, align 4 77 %4 = load float, float* @lx, align 4 83 %5 = load double, double* @lxd, align 8 85 %6 = load double, double* @xd, align 8 86 %7 = load double, double* @lxd, align 8 87 %8 = load double, double* @xd, align 8 88 %9 = load double, double* @lxd, align 8 [all …]
|
/external/gemmlowp/meta/generators/ |
D | transform_kernels_common.py | 63 load = [registers.QuadRegister() for unused_i in range(register_count)] 64 emitter.EmitVLoadAE(8, elements, load, input_address, None) 67 for register in load: 70 for register in load: 74 emitter.EmitVStoreAE(8, elements, load, output_address, None) 76 registers.FreeRegisters(load) 109 load = [registers.QuadRegister() for unused_i in range(register_count)] 110 emitter.EmitVLoadAE(8, elements, load, input_address, None) 113 if len(load) is 1: 114 emitter.EmitVMovl('u8', load[0], load[0]) [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | load-constant-i1.ll | 13 %load = load i1, i1 addrspace(2)* %in 14 store i1 %load, i1 addrspace(1)* %out 20 %load = load <2 x i1>, <2 x i1> addrspace(2)* %in 21 store <2 x i1> %load, <2 x i1> addrspace(1)* %out 27 %load = load <3 x i1>, <3 x i1> addrspace(2)* %in 28 store <3 x i1> %load, <3 x i1> addrspace(1)* %out 34 %load = load <4 x i1>, <4 x i1> addrspace(2)* %in 35 store <4 x i1> %load, <4 x i1> addrspace(1)* %out 41 %load = load <8 x i1>, <8 x i1> addrspace(2)* %in 42 store <8 x i1> %load, <8 x i1> addrspace(1)* %out [all …]
|
D | load-global-i1.ll | 13 %load = load i1, i1 addrspace(1)* %in 14 store i1 %load, i1 addrspace(1)* %out 20 %load = load <2 x i1>, <2 x i1> addrspace(1)* %in 21 store <2 x i1> %load, <2 x i1> addrspace(1)* %out 27 %load = load <3 x i1>, <3 x i1> addrspace(1)* %in 28 store <3 x i1> %load, <3 x i1> addrspace(1)* %out 34 %load = load <4 x i1>, <4 x i1> addrspace(1)* %in 35 store <4 x i1> %load, <4 x i1> addrspace(1)* %out 41 %load = load <8 x i1>, <8 x i1> addrspace(1)* %in 42 store <8 x i1> %load, <8 x i1> addrspace(1)* %out [all …]
|
D | load-local-i1.ll | 14 %load = load i1, i1 addrspace(3)* %in 15 store i1 %load, i1 addrspace(3)* %out 21 %load = load <2 x i1>, <2 x i1> addrspace(3)* %in 22 store <2 x i1> %load, <2 x i1> addrspace(3)* %out 28 %load = load <3 x i1>, <3 x i1> addrspace(3)* %in 29 store <3 x i1> %load, <3 x i1> addrspace(3)* %out 35 %load = load <4 x i1>, <4 x i1> addrspace(3)* %in 36 store <4 x i1> %load, <4 x i1> addrspace(3)* %out 42 %load = load <8 x i1>, <8 x i1> addrspace(3)* %in 43 store <8 x i1> %load, <8 x i1> addrspace(3)* %out [all …]
|
D | load-constant-i8.ll | 14 %ld = load i8, i8 addrspace(2)* %in 26 %ld = load <2 x i8>, <2 x i8> addrspace(2)* %in 37 %ld = load <3 x i8>, <3 x i8> addrspace(2)* %in 48 %ld = load <4 x i8>, <4 x i8> addrspace(2)* %in 59 %ld = load <8 x i8>, <8 x i8> addrspace(2)* %in 70 %ld = load <16 x i8>, <16 x i8> addrspace(2)* %in 81 %a = load i8, i8 addrspace(2)* %in 95 %ld = load i8, i8 addrspace(2)* %in 103 %load = load <1 x i8>, <1 x i8> addrspace(2)* %in 104 %ext = zext <1 x i8> %load to <1 x i32> [all …]
|
D | load-local-i8.ll | 14 %ld = load i8, i8 addrspace(3)* %in 27 %ld = load <2 x i8>, <2 x i8> addrspace(3)* %in 38 %ld = load <3 x i8>, <3 x i8> addrspace(3)* %in 49 %ld = load <4 x i8>, <4 x i8> addrspace(3)* %in 61 %ld = load <8 x i8>, <8 x i8> addrspace(3)* %in 76 %ld = load <16 x i8>, <16 x i8> addrspace(3)* %in 88 %a = load i8, i8 addrspace(3)* %in 102 %ld = load i8, i8 addrspace(3)* %in 110 %load = load <1 x i8>, <1 x i8> addrspace(3)* %in 111 %ext = zext <1 x i8> %load to <1 x i32> [all …]
|
D | load-global-i8.ll | 15 %ld = load i8, i8 addrspace(1)* %in 27 %ld = load <2 x i8>, <2 x i8> addrspace(1)* %in 39 %ld = load <3 x i8>, <3 x i8> addrspace(1)* %in 51 %ld = load <4 x i8>, <4 x i8> addrspace(1)* %in 63 %ld = load <8 x i8>, <8 x i8> addrspace(1)* %in 76 %ld = load <16 x i8>, <16 x i8> addrspace(1)* %in 87 %a = load i8, i8 addrspace(1)* %in 101 %ld = load i8, i8 addrspace(1)* %in 109 %load = load <1 x i8>, <1 x i8> addrspace(1)* %in 110 %ext = zext <1 x i8> %load to <1 x i32> [all …]
|
/external/swiftshader/third_party/subzero/tests_lit/llvm2ice_tests/ |
D | load_cast.ll | 1 ; Tests desired and undesired folding of load instructions into cast 14 %load = load i8, i8* %addr, align 1 15 %result = zext i8 %load to i32 25 %load = load i8, i8* %addr, align 1 26 %tmp1 = zext i8 %load to i32 27 %tmp2 = zext i8 %load to i32 31 ; Test that load folding does not happen. 39 %load = load i8, i8* %addr, align 1 40 %result = sext i8 %load to i32 50 %load = load i8, i8* %addr, align 1 [all …]
|
/external/linux-kselftest/tools/testing/selftests/powerpc/ptrace/ |
D | ptrace-vsx.h | 17 int validate_vsx(unsigned long *vsx, unsigned long *load) in validate_vsx() argument 22 if (vsx[i] != load[2 * i + 1]) { in validate_vsx() 24 i, vsx[i], 2 * i + 1, load[2 * i + 1]); in validate_vsx() 35 int validate_vmx(unsigned long vmx[][2], unsigned long *load) in validate_vmx() argument 41 if ((vmx[i][0] != load[64 + 2 * i]) || in validate_vmx() 42 (vmx[i][1] != load[65 + 2 * i])) { in validate_vmx() 45 load[64 + 2 * i]); in validate_vmx() 48 load[65 + 2 * i]); in validate_vmx() 55 if ((vmx[i][0] != load[65 + 2 * i]) || in validate_vmx() 56 (vmx[i][1] != load[64 + 2 * i])) { in validate_vmx() [all …]
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | load_store.ll | 36 ;CHECK: cost of 1 {{.*}} load 37 load i8, i8* undef, align 4 38 ;CHECK: cost of 1 {{.*}} load 39 load i16, i16* undef, align 4 40 ;CHECK: cost of 1 {{.*}} load 41 load i32, i32* undef, align 4 42 ;CHECK: cost of 1 {{.*}} load 43 load i64, i64* undef, align 4 44 ;CHECK: cost of 2 {{.*}} load 45 load i128, i128* undef, align 4 [all …]
|
/external/llvm/test/Transforms/EarlyCSE/ |
D | invariant-loads.ll | 7 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0 13 %val0 = load i32, i32* %ptr, !invariant.load !{} 15 %val1 = load i32, i32* %ptr, !invariant.load !{} 17 %val2 = load i32, i32* %ptr, !invariant.load !{} 24 ; invariant load has executed, the location loaded from is known to be 28 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0 32 %val0 = load i32, i32* %ptr, !invariant.load !{} 34 %val1 = load i32, i32* %ptr 40 ; Negative test -- we can't forward a non-invariant load into an 41 ; invariant load. [all …]
|
/external/llvm/test/Transforms/Inline/ |
D | inline-cold.ll | 20 %a1 = load volatile i32, i32* @a 22 %a2 = load volatile i32, i32* @a 24 %a3 = load volatile i32, i32* @a 26 %a4 = load volatile i32, i32* @a 28 %a5 = load volatile i32, i32* @a 30 %a6 = load volatile i32, i32* @a 32 %a7 = load volatile i32, i32* @a 34 %a8 = load volatile i32, i32* @a 36 %a9 = load volatile i32, i32* @a 38 %a10 = load volatile i32, i32* @a [all …]
|
/external/llvm/test/Transforms/GVN/ |
D | volatile.ll | 5 ; Check that we can bypass a volatile load when searching 6 ; for dependencies of a non-volatile load 9 ; CHECK: %0 = load volatile i32, i32* %q 12 %x = load i32, i32* %p 13 load volatile i32, i32* %q 14 %y = load i32, i32* %p 20 ; volatile, this would be (in effect) removing the volatile load 23 ; CHECK: %x = load i32, i32* %p 24 ; CHECK-NEXT: %y = load volatile i32, i32* %p 27 %x = load i32, i32* %p [all …]
|
D | atomic.ll | 14 %x = load i32, i32* @y 16 %y = load i32, i32* @y 21 ; GVN across unordered load (allowed) 26 %x = load i32, i32* @y 27 %y = load atomic i32, i32* @x unordered, align 4 28 %z = load i32, i32* @y 34 ; GVN load to unordered load (allowed) 39 %x = load atomic i32, i32* @x unordered, align 4 40 %y = load i32, i32* @x 45 ; GVN unordered load to load (unordered load must not be removed) [all …]
|
D | invariant-load.ll | 1 ; Test if the !invariant.load metadata is maintained by GVN. 6 ; CHECK: %x = load i32, i32* %p, align 4, !invariant.load !0 7 ; CHECK-NOT: %y = load 9 %x = load i32, i32* %p, align 4, !invariant.load !0 12 %y = load i32, i32* %p, align 4, !invariant.load !0 19 ; CHECK-NOT: !invariant.load 20 ; CHECK-NOT: %y = load 22 %x = load i32, i32* %p, align 4 25 %y = load i32, i32* %p, align 4, !invariant.load !0 30 ; With the invariant.load metadata, what would otherwise [all …]
|
/external/libavc/common/arm/ |
D | ih264_default_weighted_pred_a9q.s | 124 vld1.32 d0[0], [r0], r3 @load row 1 in source 1 125 vld1.32 d0[1], [r0], r3 @load row 2 in source 1 126 vld1.32 d2[0], [r1], r4 @load row 1 in source 2 127 vld1.32 d2[1], [r1], r4 @load row 2 in source 2 129 vld1.32 d1[0], [r0], r3 @load row 3 in source 1 130 vld1.32 d1[1], [r0], r3 @load row 4 in source 1 132 vld1.32 d3[0], [r1], r4 @load row 3 in source 2 133 vld1.32 d3[1], [r1], r4 @load row 4 in source 2 136 vst1.32 d0[0], [r2], r5 @load row 1 in destination 137 vst1.32 d0[1], [r2], r5 @load row 2 in destination [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | frame-19.ll | 30 %v0 = load volatile <16 x i8>, <16 x i8> *%ptr 31 %v1 = load volatile <16 x i8>, <16 x i8> *%ptr 32 %v2 = load volatile <16 x i8>, <16 x i8> *%ptr 33 %v3 = load volatile <16 x i8>, <16 x i8> *%ptr 34 %v4 = load volatile <16 x i8>, <16 x i8> *%ptr 35 %v5 = load volatile <16 x i8>, <16 x i8> *%ptr 36 %v6 = load volatile <16 x i8>, <16 x i8> *%ptr 37 %v7 = load volatile <16 x i8>, <16 x i8> *%ptr 38 %v8 = load volatile <16 x i8>, <16 x i8> *%ptr 39 %v9 = load volatile <16 x i8>, <16 x i8> *%ptr [all …]
|
/external/llvm/test/Transforms/InstCombine/ |
D | loadstore-metadata.ll | 6 ; Ensure (cast (load (...))) -> (load (cast (...))) preserves TBAA. 8 ; CHECK: load i32, i32* %{{.*}}, !tbaa !0 10 %l = load float, float* %ptr, !tbaa !0 16 ; Ensure (cast (load (...))) -> (load (cast (...))) preserves no-alias metadata. 18 ; CHECK: load i32, i32* %{{.*}}, !alias.scope !2, !noalias !1 20 %l = load float, float* %ptr, !alias.scope !2, !noalias !1 26 ; Ensure (cast (load (...))) -> (load (cast (...))) drops range metadata. It 30 ; CHECK: load float, float* %{{.*}} 34 %l = load i32, i32* %ptr, !range !6 40 ; Ensure (cast (load (...))) -> (load (cast (...))) preserves invariant metadata. [all …]
|
/external/llvm/test/Transforms/LoadCombine/ |
D | load-combine.ll | 1 ; RUN: opt < %s -load-combine -instcombine -S | FileCheck %s 9 %2 = load i8, i8* %1, align 1 13 %6 = load i8, i8* %5, align 1 18 %11 = load i8, i8* %10, align 1 23 %16 = load i8, i8* %15, align 1 28 %21 = load i8, i8* %20, align 1 33 %26 = load i8, i8* %25, align 1 38 %31 = load i8, i8* %30, align 1 43 %36 = load i8, i8* %35, align 1 48 ; CHECK: load i64, i64* %{{.*}}, align 1 [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/ARM/ |
D | vbits.ll | 6 %tmp1 = load <8 x i8>* %A 7 %tmp2 = load <8 x i8>* %B 15 %tmp1 = load <4 x i16>* %A 16 %tmp2 = load <4 x i16>* %B 24 %tmp1 = load <2 x i32>* %A 25 %tmp2 = load <2 x i32>* %B 33 %tmp1 = load <1 x i64>* %A 34 %tmp2 = load <1 x i64>* %B 42 %tmp1 = load <16 x i8>* %A 43 %tmp2 = load <16 x i8>* %B [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | callee-save.ll | 15 %val1 = load volatile float, float* @var 16 %val2 = load volatile float, float* @var 17 %val3 = load volatile float, float* @var 18 %val4 = load volatile float, float* @var 19 %val5 = load volatile float, float* @var 20 %val6 = load volatile float, float* @var 21 %val7 = load volatile float, float* @var 22 %val8 = load volatile float, float* @var 23 %val9 = load volatile float, float* @var 24 %val10 = load volatile float, float* @var [all …]
|
D | tbi.ll | 12 %load = load i32, i32* %cast 13 ret i32 %load 16 ; load (r & MASK) + 4 24 %load = load i32, i32* %gep 25 ret i32 %load 28 ; load (r & WIDER_MASK) 35 %load = load i32, i32* %cast 36 ret i32 %load 45 %load = load i64, i64* %cast 46 ret i64 %load [all …]
|
/external/swiftshader/third_party/LLVM/test/Transforms/GVN/ |
D | atomic.ll | 14 %x = load i32* @y 16 %y = load i32* @y 26 %x = load i32* @y 28 %y = load i32* @y 33 ; GVN across unordered load (allowed) 38 %x = load i32* @y 39 %y = load atomic i32* @x unordered, align 4 40 %z = load i32* @y 46 ; GVN across acquire load (load after atomic load must not be removed) 49 ; CHECK: load atomic i32* @x [all …]
|