/external/llvm/test/CodeGen/Mips/ |
D | ra-allocatable.ll | 101 %0 = load i32, i32* @a0, align 4 102 %1 = load i32*, i32** @b0, align 4 104 %2 = load i32, i32* @a1, align 4 105 %3 = load i32*, i32** @b1, align 4 107 %4 = load i32, i32* @a2, align 4 108 %5 = load i32*, i32** @b2, align 4 110 %6 = load i32, i32* @a3, align 4 111 %7 = load i32*, i32** @b3, align 4 113 %8 = load i32, i32* @a4, align 4 114 %9 = load i32*, i32** @b4, align 4 [all …]
|
D | hf16call32.ll | 70 %0 = load float, float* @lx, align 4 72 %1 = load float, float* @x, align 4 74 %2 = load float, float* @lx, align 4 76 %3 = load float, float* @x, align 4 77 %4 = load float, float* @lx, align 4 83 %5 = load double, double* @lxd, align 8 85 %6 = load double, double* @xd, align 8 86 %7 = load double, double* @lxd, align 8 87 %8 = load double, double* @xd, align 8 88 %9 = load double, double* @lxd, align 8 [all …]
|
D | selpat.ll | 15 %0 = load i32, i32* @a, align 4 16 %1 = load i32, i32* @b, align 4 18 %2 = load i32, i32* @f, align 4 19 %3 = load i32, i32* @t, align 4 26 %4 = load i32, i32* @c, align 4 37 %0 = load i32, i32* @a, align 4 39 %1 = load i32, i32* @t, align 4 40 %2 = load i32, i32* @f, align 4 49 %3 = load i32, i32* @b, align 4 64 %0 = load i32, i32* @a, align 4 [all …]
|
/external/v8/test/mjsunit/ |
D | polymorph-arrays.js | 45 function load(a, i) { function 59 assertEquals(1, load(object_array, 1)); 60 assertEquals(1, load(js_array, 1)); 61 assertEquals(1, load(sparse_object_array, 1)); 62 assertEquals(1, load(sparse_js_array, 1)); 64 return load; 77 load = make_polymorphic_load_function(); 78 assertEquals(undefined, load(js_array, new Object())); 79 load = make_polymorphic_load_function(); 80 assertEquals(undefined, load(object_array, new Object())); [all …]
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | load_store.ll | 36 ;CHECK: cost of 1 {{.*}} load 37 load i8, i8* undef, align 4 38 ;CHECK: cost of 1 {{.*}} load 39 load i16, i16* undef, align 4 40 ;CHECK: cost of 1 {{.*}} load 41 load i32, i32* undef, align 4 42 ;CHECK: cost of 1 {{.*}} load 43 load i64, i64* undef, align 4 44 ;CHECK: cost of 2 {{.*}} load 45 load i128, i128* undef, align 4 [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | global-extload-i8.ll | 10 %a = load i8, i8 addrspace(1)* %in 21 %a = load i8, i8 addrspace(1)* %in 30 %load = load <1 x i8>, <1 x i8> addrspace(1)* %in 31 %ext = zext <1 x i8> %load to <1 x i32> 39 %load = load <1 x i8>, <1 x i8> addrspace(1)* %in 40 %ext = sext <1 x i8> %load to <1 x i32> 48 %load = load <2 x i8>, <2 x i8> addrspace(1)* %in 49 %ext = zext <2 x i8> %load to <2 x i32> 57 %load = load <2 x i8>, <2 x i8> addrspace(1)* %in 58 %ext = sext <2 x i8> %load to <2 x i32> [all …]
|
D | global-extload-i16.ll | 11 %a = load i16, i16 addrspace(1)* %in 22 %a = load i16, i16 addrspace(1)* %in 32 %load = load <1 x i16>, <1 x i16> addrspace(1)* %in 33 %ext = zext <1 x i16> %load to <1 x i32> 42 %load = load <1 x i16>, <1 x i16> addrspace(1)* %in 43 %ext = sext <1 x i16> %load to <1 x i32> 51 %load = load <2 x i16>, <2 x i16> addrspace(1)* %in 52 %ext = zext <2 x i16> %load to <2 x i32> 60 %load = load <2 x i16>, <2 x i16> addrspace(1)* %in 61 %ext = sext <2 x i16> %load to <2 x i32> [all …]
|
D | global-extload-i1.ll | 11 %a = load i1, i1 addrspace(1)* %in 23 %a = load i1, i1 addrspace(1)* %in 32 %load = load <1 x i1>, <1 x i1> addrspace(1)* %in 33 %ext = zext <1 x i1> %load to <1 x i32> 41 %load = load <1 x i1>, <1 x i1> addrspace(1)* %in 42 %ext = sext <1 x i1> %load to <1 x i32> 50 %load = load <2 x i1>, <2 x i1> addrspace(1)* %in 51 %ext = zext <2 x i1> %load to <2 x i32> 59 %load = load <2 x i1>, <2 x i1> addrspace(1)* %in 60 %ext = sext <2 x i1> %load to <2 x i32> [all …]
|
/external/llvm/test/Transforms/Inline/ |
D | inline-cold.ll | 20 %a1 = load volatile i32, i32* @a 22 %a2 = load volatile i32, i32* @a 24 %a3 = load volatile i32, i32* @a 26 %a4 = load volatile i32, i32* @a 28 %a5 = load volatile i32, i32* @a 30 %a6 = load volatile i32, i32* @a 32 %a7 = load volatile i32, i32* @a 34 %a8 = load volatile i32, i32* @a 36 %a9 = load volatile i32, i32* @a 38 %a10 = load volatile i32, i32* @a [all …]
|
/external/libavc/common/arm/ |
D | ih264_default_weighted_pred_a9q.s | 124 vld1.32 d0[0], [r0], r3 @load row 1 in source 1 125 vld1.32 d0[1], [r0], r3 @load row 2 in source 1 126 vld1.32 d2[0], [r1], r4 @load row 1 in source 2 127 vld1.32 d2[1], [r1], r4 @load row 2 in source 2 129 vld1.32 d1[0], [r0], r3 @load row 3 in source 1 130 vld1.32 d1[1], [r0], r3 @load row 4 in source 1 132 vld1.32 d3[0], [r1], r4 @load row 3 in source 2 133 vld1.32 d3[1], [r1], r4 @load row 4 in source 2 136 vst1.32 d0[0], [r2], r5 @load row 1 in destination 137 vst1.32 d0[1], [r2], r5 @load row 2 in destination [all …]
|
/external/llvm/test/Transforms/GVN/ |
D | invariant-load.ll | 1 ; Test if the !invariant.load metadata is maintained by GVN. 6 ; CHECK: %x = load i32, i32* %p, align 4, !invariant.load !0 7 ; CHECK-NOT: %y = load 9 %x = load i32, i32* %p, align 4, !invariant.load !0 12 %y = load i32, i32* %p, align 4, !invariant.load !0 19 ; CHECK-NOT: !invariant.load 20 ; CHECK-NOT: %y = load 22 %x = load i32, i32* %p, align 4 25 %y = load i32, i32* %p, align 4, !invariant.load !0 30 ; With the invariant.load metadata, what would otherwise [all …]
|
D | volatile.ll | 5 ; Check that we can bypass a volatile load when searching 6 ; for dependencies of a non-volatile load 9 ; CHECK: %0 = load volatile i32, i32* %q 12 %x = load i32, i32* %p 13 load volatile i32, i32* %q 14 %y = load i32, i32* %p 20 ; volatile, this would be (in effect) removing the volatile load 23 ; CHECK: %x = load i32, i32* %p 24 ; CHECK-NEXT: %y = load volatile i32, i32* %p 27 %x = load i32, i32* %p [all …]
|
D | atomic.ll | 14 %x = load i32, i32* @y 16 %y = load i32, i32* @y 21 ; GVN across unordered load (allowed) 26 %x = load i32, i32* @y 27 %y = load atomic i32, i32* @x unordered, align 4 28 %z = load i32, i32* @y 34 ; GVN load to unordered load (allowed) 39 %x = load atomic i32, i32* @x unordered, align 4 40 %y = load i32, i32* @x 45 ; GVN unordered load to load (unordered load must not be removed) [all …]
|
/external/llvm/test/Transforms/InstCombine/ |
D | loadstore-metadata.ll | 6 ; Ensure (cast (load (...))) -> (load (cast (...))) preserves TBAA. 8 ; CHECK: load i32, i32* %{{.*}}, !tbaa !0 10 %l = load float, float* %ptr, !tbaa !0 16 ; Ensure (cast (load (...))) -> (load (cast (...))) preserves no-alias metadata. 18 ; CHECK: load i32, i32* %{{.*}}, !alias.scope !2, !noalias !1 20 %l = load float, float* %ptr, !alias.scope !2, !noalias !1 26 ; Ensure (cast (load (...))) -> (load (cast (...))) drops range metadata. It 30 ; CHECK: load float, float* %{{.*}} 34 %l = load i32, i32* %ptr, !range !6 40 ; Ensure (cast (load (...))) -> (load (cast (...))) preserves invariant metadata. [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | frame-19.ll | 30 %v0 = load volatile <16 x i8>, <16 x i8> *%ptr 31 %v1 = load volatile <16 x i8>, <16 x i8> *%ptr 32 %v2 = load volatile <16 x i8>, <16 x i8> *%ptr 33 %v3 = load volatile <16 x i8>, <16 x i8> *%ptr 34 %v4 = load volatile <16 x i8>, <16 x i8> *%ptr 35 %v5 = load volatile <16 x i8>, <16 x i8> *%ptr 36 %v6 = load volatile <16 x i8>, <16 x i8> *%ptr 37 %v7 = load volatile <16 x i8>, <16 x i8> *%ptr 38 %v8 = load volatile <16 x i8>, <16 x i8> *%ptr 39 %v9 = load volatile <16 x i8>, <16 x i8> *%ptr [all …]
|
D | frame-20.ll | 48 %l0 = load volatile double, double *%ptr 49 %l1 = load volatile double, double *%ptr 50 %l2 = load volatile double, double *%ptr 51 %l3 = load volatile double, double *%ptr 52 %l4 = load volatile double, double *%ptr 53 %l5 = load volatile double, double *%ptr 54 %l6 = load volatile double, double *%ptr 55 %l7 = load volatile double, double *%ptr 56 %l8 = load volatile double, double *%ptr 57 %l9 = load volatile double, double *%ptr [all …]
|
D | frame-18.ll | 19 %l0 = load volatile i32 , i32 *%ptr 20 %l1 = load volatile i32 , i32 *%ptr 21 %l3 = load volatile i32 , i32 *%ptr 22 %l4 = load volatile i32 , i32 *%ptr 23 %l5 = load volatile i32 , i32 *%ptr 24 %l6 = load volatile i32 , i32 *%ptr 25 %l7 = load volatile i32 , i32 *%ptr 26 %l8 = load volatile i32 , i32 *%ptr 27 %l9 = load volatile i32 , i32 *%ptr 28 %l10 = load volatile i32 , i32 *%ptr [all …]
|
D | spill-01.ll | 47 %val0 = load i32 , i32 *%ptr0 48 %val1 = load i32 , i32 *%ptr1 49 %val2 = load i32 , i32 *%ptr2 50 %val3 = load i32 , i32 *%ptr3 51 %val4 = load i32 , i32 *%ptr4 52 %val5 = load i32 , i32 *%ptr5 53 %val6 = load i32 , i32 *%ptr6 68 ; Test a case where at least one i32 load and at least one i32 store 85 %val0 = load i32 , i32 *%ptr0 86 %val1 = load i32 , i32 *%ptr1 [all …]
|
/external/llvm/test/Transforms/LoadCombine/ |
D | load-combine.ll | 1 ; RUN: opt < %s -load-combine -instcombine -S | FileCheck %s 9 %2 = load i8, i8* %1, align 1 13 %6 = load i8, i8* %5, align 1 18 %11 = load i8, i8* %10, align 1 23 %16 = load i8, i8* %15, align 1 28 %21 = load i8, i8* %20, align 1 33 %26 = load i8, i8* %25, align 1 38 %31 = load i8, i8* %30, align 1 43 %36 = load i8, i8* %35, align 1 48 ; CHECK: load i64, i64* %{{.*}}, align 1 [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | tbi.ll | 12 %load = load i32, i32* %cast 13 ret i32 %load 16 ; load (r & MASK) + 4 24 %load = load i32, i32* %gep 25 ret i32 %load 28 ; load (r & WIDER_MASK) 35 %load = load i32, i32* %cast 36 ret i32 %load 45 %load = load i64, i64* %cast 46 ret i64 %load [all …]
|
D | callee-save.ll | 15 %val1 = load volatile float, float* @var 16 %val2 = load volatile float, float* @var 17 %val3 = load volatile float, float* @var 18 %val4 = load volatile float, float* @var 19 %val5 = load volatile float, float* @var 20 %val6 = load volatile float, float* @var 21 %val7 = load volatile float, float* @var 22 %val8 = load volatile float, float* @var 23 %val9 = load volatile float, float* @var 24 %val10 = load volatile float, float* @var [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | big-endian-neon-bitconv.ll | 22 %w = load <8 x i8>, <8 x i8>* @v8i8 28 define void @conv_v8i8_to_i64( <8 x i8>* %load, <8 x i8>* %store ) { 31 %v = load <8 x i8>, <8 x i8>* %load 32 %w = load <8 x i8>, <8 x i8>* @v8i8 43 %w = load <4 x i16>, <4 x i16>* @v4i16 49 define void @conv_v4i16_to_i64( <4 x i16>* %load, <4 x i16>* %store ) { 52 %v = load <4 x i16>, <4 x i16>* %load 53 %w = load <4 x i16>, <4 x i16>* @v4i16 64 %w = load <2 x i32>, <2 x i32>* @v2i32 70 define void @conv_v2i32_to_i64( <2 x i32>* %load, <2 x i32>* %store ) { [all …]
|
D | vbits.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 7 %tmp2 = load <8 x i8>, <8 x i8>* %B 15 %tmp1 = load <4 x i16>, <4 x i16>* %A 16 %tmp2 = load <4 x i16>, <4 x i16>* %B 24 %tmp1 = load <2 x i32>, <2 x i32>* %A 25 %tmp2 = load <2 x i32>, <2 x i32>* %B 33 %tmp1 = load <1 x i64>, <1 x i64>* %A 34 %tmp2 = load <1 x i64>, <1 x i64>* %B 42 %tmp1 = load <16 x i8>, <16 x i8>* %A 43 %tmp2 = load <16 x i8>, <16 x i8>* %B [all …]
|
/external/llvm/test/Instrumentation/DataFlowSanitizer/ |
D | load.ll | 1 ; RUN: opt < %s -dfsan -dfsan-combine-pointer-labels-on-load=1 -S | FileCheck %s --check-prefix=COM… 2 ; RUN: opt < %s -dfsan -dfsan-combine-pointer-labels-on-load=0 -S | FileCheck %s --check-prefix=NO_… 8 ; COMBINE_PTR_LABEL: load 9 ; COMBINE_PTR_LABEL-NOT: load 12 ; NO_COMBINE_PTR_LABEL: load 13 ; NO_COMBINE_PTR_LABEL-NOT: load 14 %a = load {}, {}* %p 20 ; COMBINE_PTR_LABEL: load i16, i16* 25 ; COMBINE_PTR_LABEL: load i16, i16* 28 ; COMBINE_PTR_LABEL: load i8, i8* [all …]
|
/external/llvm/test/Bitcode/ |
D | memInstructions.3.2.ll | 25 define void @load(){ 30 ; CHECK: %res1 = load i8, i8* %ptr1 31 %res1 = load i8, i8* %ptr1 33 ; CHECK-NEXT: %res2 = load volatile i8, i8* %ptr1 34 %res2 = load volatile i8, i8* %ptr1 36 ; CHECK-NEXT: %res3 = load i8, i8* %ptr1, align 1 37 %res3 = load i8, i8* %ptr1, align 1 39 ; CHECK-NEXT: %res4 = load volatile i8, i8* %ptr1, align 1 40 %res4 = load volatile i8, i8* %ptr1, align 1 42 ; CHECK-NEXT: %res5 = load i8, i8* %ptr1, !nontemporal !0 [all …]
|