Home
last modified time | relevance | path

Searched refs:gep2 (Results 1 – 25 of 108) sorted by relevance

12345

/external/swiftshader/third_party/subzero/tests_lit/llvm2ice_tests/
Daddr-opt-multi-def-var.ll33 %gep2 = add i32 %1, %gep_array
34 %gep2.asptr = inttoptr i32 %gep2 to i32*
35 %2 = load i32, i32* %gep2.asptr, align 1
39 ; [ 15] %__13 = load i32, i32* %gep2.asptr, align 1
40 ; Instruction: [ 14] %gep2.asptr = i32 %gep2
41 ; results in Base=%gep2, Index=<null>, Shift=0, Offset=0
42 ; Instruction: [ 13] %gep2 = add i32 %__9, %gep_array
51 ; [ 15] %__13 = load i32, i32* %gep2.asptr, align 1
52 ; Instruction: [ 14] %gep2.asptr = i32 %gep2
53 ; results in Base=%gep2, Index=<null>, Shift=0, Offset=0
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/
Dfalkor-hwpf.ll8 ; CHECK: load i32, i32* %gep2, !falkor.strided.access !0
12 ; NOHWPF: load i32, i32* %gep2{{$}}
23 %gep2 = getelementptr inbounds i32, i32* %p2, i32 %iv
24 %load2 = load i32, i32* %gep2
37 ; CHECK: load i32, i32* %gep2{{$}}
41 ; NOHWPF: load i32, i32* %gep2{{$}}
65 %gep2 = getelementptr inbounds i32, i32* %p, i32 %iv1
66 %load2 = load i32, i32* %gep2
82 ; CHECK: load i32, i32* %gep2{{$}}
86 ; NOHWPF: load i32, i32* %gep2{{$}}
[all …]
Dldst-opt.ll296 %gep2 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load2, i64 0, i32 2
299 %retptr = phi i32* [ %gep1, %if.then ], [ %gep2, %if.end ]
314 %gep2 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load2, i64 0, i32 2
317 %retptr = phi i64* [ %gep1, %if.then ], [ %gep2, %if.end ]
332 %gep2 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load2, i64 0, i32 2
335 %retptr = phi <2 x i64>* [ %gep1, %if.then ], [ %gep2, %if.end ]
350 %gep2 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load2, i64 0, i32 2
353 %retptr = phi float* [ %gep1, %if.then ], [ %gep2, %if.end ]
368 %gep2 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load2, i64 0, i32 2
371 %retptr = phi double* [ %gep1, %if.then ], [ %gep2, %if.end ]
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/BasicAA/
Dsequential-gep.ll4 ; CHECK: NoAlias: i32* %gep1, i32* %gep2
9 %gep2 = getelementptr [8 x i32], [8 x i32]* %p, i32 2, i32 %add
14 ; CHECK: MayAlias: i32* %gep1, i32* %gep2
19 %gep2 = getelementptr [8 x i32], [8 x i32]* %p, i32 0, i32 %add
24 ; CHECK: MustAlias: i32* %gep1, i32* %gep2
29 %gep2 = getelementptr [8 x i32], [8 x i32]* %p, i32 0, i32 %add
34 ; CHECK: MayAlias: i32* %gep1, i32* %gep2
39 %gep2 = getelementptr [8 x i32], [8 x i32]* %p, i32 %add, i32 %add
44 ; CHECK: MayAlias: i32* %gep2, i64* %bc
49 %gep2 = getelementptr [8 x i32], [8 x i32]* %p, i32 2, i32 %add
/external/llvm/test/Analysis/BasicAA/
Dsequential-gep.ll4 ; CHECK: NoAlias: i32* %gep1, i32* %gep2
9 %gep2 = getelementptr [8 x i32], [8 x i32]* %p, i32 2, i32 %add
14 ; CHECK: PartialAlias: i32* %gep1, i32* %gep2
19 %gep2 = getelementptr [8 x i32], [8 x i32]* %p, i32 0, i32 %add
24 ; CHECK: MustAlias: i32* %gep1, i32* %gep2
29 %gep2 = getelementptr [8 x i32], [8 x i32]* %p, i32 0, i32 %add
34 ; CHECK: PartialAlias: i32* %gep1, i32* %gep2
39 %gep2 = getelementptr [8 x i32], [8 x i32]* %p, i32 %add, i32 %add
44 ; CHECK: PartialAlias: i32* %gep2, i64* %bc
49 %gep2 = getelementptr [8 x i32], [8 x i32]* %p, i32 2, i32 %add
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/
Dselect-gep.ll12 %gep2 = getelementptr i32, i32* %q, i64 4
14 %select = select i1 %cmp, i32* %gep1, i32* %gep2
26 %gep2 = getelementptr i32, i32* %q, i64 4
28 %select = select i1 %cmp, i32* %gep1, i32* %gep2
40 %gep2 = getelementptr inbounds i32, i32* %q, i64 4
42 %select = select i1 %cmp, i32* %gep1, i32* %gep2
54 %gep2 = getelementptr inbounds i32, i32* %q, i64 4
56 %select = select i1 %cmp, i32* %gep1, i32* %gep2
68 %gep2 = getelementptr inbounds i32, i32* %p, i64 %y
70 %select = select i1 %cmp, i32* %gep1, i32* %gep2
[all …]
Dicmp-custom-dl.ll18 %gep2 = getelementptr inbounds i8, i8* %foo, i64 10
20 %cmp = icmp ult i8* %cast1, %gep2
36 %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i64 10
38 %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
54 %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
56 %cmp = icmp ult i8* %cast1, %gep2
70 %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i64 %j
72 %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
88 %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
90 %cmp = icmp ult i8* %cast1, %gep2
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/CodeGenPrepare/ARM/
Dsink-addrmode.ll30 ; CHECK-NOT: phi i32* [ %gep1, %if.then ], [ %gep2, %if.else ]
41 %gep2 = getelementptr inbounds i32, i32* %ptr, i32 2
45 %phi = phi i32* [ %gep1, %if.then ], [ %gep2, %if.else ]
72 ; CHECK-NOT: phi i32* [ %gep1, %if.then ], [ %gep2, %if.else ]
83 %gep2 = getelementptr inbounds i32, i32* %ptr, i32 %off2
87 %phi = phi i32* [ %gep1, %if.then ], [ %gep2, %if.else ]
95 ; CHECK-NOT: phi i32* [ %gep1, %if.then ], [ %gep2, %if.else ]
106 %gep2 = getelementptr inbounds i32, i32* %ptr2, i32 1
110 %phi = phi i32* [ %gep1, %if.then ], [ %gep2, %if.else ]
118 ; CHECK-NOT: phi i32* [ %gep1, %if.then ], [ %gep2, %if.else ]
[all …]
/external/llvm/test/Transforms/LoadStoreVectorizer/X86/
Dsubchain-interleaved.ll16 %next.gep2 = getelementptr i32, i32* %ptr, i64 2
23 %l4 = load i32, i32* %next.gep2, align 4
36 %next.gep2 = getelementptr i32, i32* %ptr, i64 2
43 %l4 = load i32, i32* %next.gep2, align 4
55 %next.gep2 = getelementptr i32, i32* %ptr, i64 2
61 %l4 = load i32, i32* %next.gep2, align 4
74 %next.gep2 = getelementptr i32, i32* %ptr, i64 2
80 store i32 0, i32* %next.gep2, align 4
82 %l4 = load i32, i32* %next.gep2, align 4
/external/llvm/test/CodeGen/AArch64/
Dldst-opt.ll295 %gep2 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load2, i64 0, i32 2
298 %retptr = phi i32* [ %gep1, %if.then ], [ %gep2, %if.end ]
313 %gep2 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load2, i64 0, i32 2
316 %retptr = phi i64* [ %gep1, %if.then ], [ %gep2, %if.end ]
331 %gep2 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load2, i64 0, i32 2
334 %retptr = phi <2 x i64>* [ %gep1, %if.then ], [ %gep2, %if.end ]
349 %gep2 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load2, i64 0, i32 2
352 %retptr = phi float* [ %gep1, %if.then ], [ %gep2, %if.end ]
367 %gep2 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load2, i64 0, i32 2
370 %retptr = phi double* [ %gep1, %if.then ], [ %gep2, %if.end ]
[all …]
/external/llvm/test/CodeGen/AMDGPU/
Dfrem.ll20 %gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
22 %r1 = load float, float addrspace(1)* %gep2, align 4
39 %gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
41 %r1 = load float, float addrspace(1)* %gep2, align 4
85 %gep2 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in2, i32 4
87 %r1 = load <2 x float>, <2 x float> addrspace(1)* %gep2, align 8
95 %gep2 = getelementptr <4 x float>, <4 x float> addrspace(1)* %in2, i32 4
97 %r1 = load <4 x float>, <4 x float> addrspace(1)* %gep2, align 16
105 %gep2 = getelementptr <2 x double>, <2 x double> addrspace(1)* %in2, i32 4
107 %r1 = load <2 x double>, <2 x double> addrspace(1)* %gep2, align 16
Dmad-sub.ll19 %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2
23 %c = load volatile float, float addrspace(1)* %gep2, align 4
43 %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2
47 %c = load volatile float, float addrspace(1)* %gep2, align 4
64 %gep2 = getelementptr double, double addrspace(1)* %ptr, i64 %add2
68 %c = load volatile double, double addrspace(1)* %gep2, align 8
88 %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2
92 %c = load volatile float, float addrspace(1)* %gep2, align 4
113 %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2
117 %c = load volatile float, float addrspace(1)* %gep2, align 4
[all …]
Dmin3.ll11 %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
15 %c = load i32, i32 addrspace(1)* %gep2, align 4
30 %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
34 %c = load i32, i32 addrspace(1)* %gep2, align 4
51 %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
62 %c = load i32, i32 addrspace(1)* %gep2, align 4
85 %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
96 %c = load i32, i32 addrspace(1)* %gep2, align 4
Dpromote-alloca-no-opts.ll15 %gep2 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 %index
16 %load = load i32, i32* %gep2
31 %gep2 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 %index
32 %load = load i32, i32* %gep2
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/
Dfrem.ll20 %gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
22 %r1 = load float, float addrspace(1)* %gep2, align 4
38 %gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
40 %r1 = load float, float addrspace(1)* %gep2, align 4
84 %gep2 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in2, i32 4
86 %r1 = load <2 x float>, <2 x float> addrspace(1)* %gep2, align 8
94 %gep2 = getelementptr <4 x float>, <4 x float> addrspace(1)* %in2, i32 4
96 %r1 = load <4 x float>, <4 x float> addrspace(1)* %gep2, align 16
104 %gep2 = getelementptr <2 x double>, <2 x double> addrspace(1)* %in2, i32 4
106 %r1 = load <2 x double>, <2 x double> addrspace(1)* %gep2, align 16
Dmin3.ll11 %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
15 %c = load i32, i32 addrspace(1)* %gep2
30 %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
34 %c = load i32, i32 addrspace(1)* %gep2
51 %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
62 %c = load i32, i32 addrspace(1)* %gep2
85 %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
96 %c = load i32, i32 addrspace(1)* %gep2
124 %gep2 = getelementptr i16, i16 addrspace(1)* %cptr, i32 %tid
128 %c = load i16, i16 addrspace(1)* %gep2
[all …]
Dmax3.ll11 %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
15 %c = load i32, i32 addrspace(1)* %gep2
30 %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
34 %c = load i32, i32 addrspace(1)* %gep2
54 %gep2 = getelementptr i16, i16 addrspace(1)* %cptr, i32 %tid
58 %c = load i16, i16 addrspace(1)* %gep2
78 %gep2 = getelementptr i16, i16 addrspace(1)* %cptr, i32 %tid
82 %c = load i16, i16 addrspace(1)* %gep2
Dfmed3.ll176 %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
180 %c = load volatile float, float addrspace(1)* %gep2
199 %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
203 %c = load volatile float, float addrspace(1)* %gep2
222 %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
226 %c = load volatile float, float addrspace(1)* %gep2
245 %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
249 %c = load volatile float, float addrspace(1)* %gep2
274 %gep2 = getelementptr float, float addrspace(1)* %cptr, i32 %tid
278 %c = load volatile float, float addrspace(1)* %gep2
[all …]
Dlocal-stack-slot-offset.ll22 %gep2.store = getelementptr [513 x float], [513 x float] addrspace(5)* %m2, i32 0, i32 %idx2
23 store float %v2, float addrspace(5)* %gep2.store
30 %gep2.load = getelementptr [513 x float], [513 x float] addrspace(5)* %m2, i32 0, i32 512
31 %out2 = load float, float addrspace(5)* %gep2.load
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LoadStoreVectorizer/X86/
Dsubchain-interleaved.ll16 %next.gep2 = getelementptr i32, i32* %ptr, i64 2
23 %l4 = load i32, i32* %next.gep2, align 4
36 %next.gep2 = getelementptr i32, i32* %ptr, i64 2
43 %l4 = load i32, i32* %next.gep2, align 4
55 %next.gep2 = getelementptr i32, i32* %ptr, i64 2
61 %l4 = load i32, i32* %next.gep2, align 4
74 %next.gep2 = getelementptr i32, i32* %ptr, i64 2
80 store i32 0, i32* %next.gep2, align 4
82 %l4 = load i32, i32* %next.gep2, align 4
/external/swiftshader/third_party/LLVM/test/Transforms/LoopStrengthReduce/
Ddont_insert_redundant_ops.ll13 %gep2 = getelementptr { i32, i32 }* %P, i32 %INDVAR, i32 1 ; <i32*> [#uses=1]
14 store i32 0, i32* %gep2
29 %gep2 = getelementptr [2 x i32]* %P, i32 %INDVAR, i64 1 ; <i32*> [#uses=1]
30 store i32 0, i32* %gep2
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LoopStrengthReduce/
Ddont_insert_redundant_ops.ll17 %gep2 = getelementptr { i32, i32 }, { i32, i32 }* %P, i32 %INDVAR, i32 1 ; <i32*> [#uses=1]
18 store i32 0, i32* %gep2
33 %gep2 = getelementptr [2 x i32], [2 x i32]* %P, i32 %INDVAR, i64 1 ; <i32*> [#uses=1]
34 store i32 0, i32* %gep2
/external/llvm/test/Transforms/LoopStrengthReduce/
Ddont_insert_redundant_ops.ll17 %gep2 = getelementptr { i32, i32 }, { i32, i32 }* %P, i32 %INDVAR, i32 1 ; <i32*> [#uses=1]
18 store i32 0, i32* %gep2
33 %gep2 = getelementptr [2 x i32], [2 x i32]* %P, i32 %INDVAR, i64 1 ; <i32*> [#uses=1]
34 store i32 0, i32* %gep2
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SROA/
Dfca.ll18 %gep2 = getelementptr inbounds { i32, i32 }, { i32, i32 }* %a, i32 0, i32 1
19 store i32 %y, i32* %gep2
42 %gep2 = getelementptr inbounds { i32, i32 }, { i32, i32 }* %a, i32 0, i32 1
43 store i32 %y, i32* %gep2
/external/llvm/test/Transforms/SROA/
Dfca.ll18 %gep2 = getelementptr inbounds { i32, i32 }, { i32, i32 }* %a, i32 0, i32 1
19 store i32 %y, i32* %gep2
42 %gep2 = getelementptr inbounds { i32, i32 }, { i32, i32 }* %a, i32 0, i32 1
43 store i32 %y, i32* %gep2

12345