• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -mtriple=x86_64-linux -O0 | FileCheck %s --check-prefix=X64
2; RUN: llc < %s -mtriple=x86_64-win32 -O0 | FileCheck %s --check-prefix=X64
3; RUN: llc < %s -march=x86 -O0 | FileCheck %s --check-prefix=X32
4
5; GEP indices are interpreted as signed integers, so they
6; should be sign-extended to 64 bits on 64-bit targets.
7; PR3181
8define i32 @test1(i32 %t3, i32* %t1) nounwind {
9       %t9 = getelementptr i32* %t1, i32 %t3           ; <i32*> [#uses=1]
10       %t15 = load i32* %t9            ; <i32> [#uses=1]
11       ret i32 %t15
12; X32: test1:
13; X32:  	movl	(%eax,%ecx,4), %eax
14; X32:  	ret
15
16; X64: test1:
17; X64:  	movslq	%e[[A0:di|cx]], %rax
18; X64:  	movl	(%r[[A1:si|dx]],%rax,4), %eax
19; X64:  	ret
20
21}
22define i32 @test2(i64 %t3, i32* %t1) nounwind {
23       %t9 = getelementptr i32* %t1, i64 %t3           ; <i32*> [#uses=1]
24       %t15 = load i32* %t9            ; <i32> [#uses=1]
25       ret i32 %t15
26; X32: test2:
27; X32:  	movl	(%edx,%ecx,4), %e
28; X32:  	ret
29
30; X64: test2:
31; X64:  	movl	(%r[[A1]],%r[[A0]],4), %eax
32; X64:  	ret
33}
34
35
36
37; PR4984
38define i8 @test3(i8* %start) nounwind {
39entry:
40  %A = getelementptr i8* %start, i64 -2               ; <i8*> [#uses=1]
41  %B = load i8* %A, align 1                       ; <i8> [#uses=1]
42  ret i8 %B
43
44
45; X32: test3:
46; X32:  	movl	4(%esp), %eax
47; X32:  	movb	-2(%eax), %al
48; X32:  	ret
49
50; X64: test3:
51; X64:  	movb	-2(%r[[A0]]), %al
52; X64:  	ret
53
54}
55
56define double @test4(i64 %x, double* %p) nounwind {
57entry:
58  %x.addr = alloca i64, align 8                   ; <i64*> [#uses=2]
59  %p.addr = alloca double*, align 8               ; <double**> [#uses=2]
60  store i64 %x, i64* %x.addr
61  store double* %p, double** %p.addr
62  %tmp = load i64* %x.addr                        ; <i64> [#uses=1]
63  %add = add nsw i64 %tmp, 16                     ; <i64> [#uses=1]
64  %tmp1 = load double** %p.addr                   ; <double*> [#uses=1]
65  %arrayidx = getelementptr inbounds double* %tmp1, i64 %add ; <double*> [#uses=1]
66  %tmp2 = load double* %arrayidx                  ; <double> [#uses=1]
67  ret double %tmp2
68
69; X32: test4:
70; X32: 128(%e{{.*}},%e{{.*}},8)
71; X64: test4:
72; X64: 128(%r{{.*}},%r{{.*}},8)
73}
74
75; PR8961 - Make sure the sext for the GEP addressing comes before the load that
76; is folded.
77define i64 @test5(i8* %A, i32 %I, i64 %B) nounwind {
78  %v8 = getelementptr i8* %A, i32 %I
79  %v9 = bitcast i8* %v8 to i64*
80  %v10 = load i64* %v9
81  %v11 = add i64 %B, %v10
82  ret i64 %v11
83; X64: test5:
84; X64: movslq	%e[[A1]], %rax
85; X64-NEXT: movq	(%r[[A0]],%rax), %rax
86; X64-NEXT: addq	%{{rdx|r8}}, %rax
87; X64-NEXT: ret
88}
89
90; PR9500, rdar://9156159 - Don't do non-local address mode folding,
91; because it may require values which wouldn't otherwise be live out
92; of their blocks.
93define void @test6() {
94if.end:                                           ; preds = %if.then, %invoke.cont
95  %tmp15 = load i64* undef
96  %dec = add i64 %tmp15, 13
97  store i64 %dec, i64* undef
98  %call17 = invoke i8* @_ZNK18G__FastAllocString4dataEv()
99          to label %invoke.cont16 unwind label %lpad
100
101invoke.cont16:                                    ; preds = %if.then14
102  %arrayidx18 = getelementptr inbounds i8* %call17, i64 %dec
103  store i8 0, i8* %arrayidx18
104  unreachable
105
106lpad:                                             ; preds = %if.end19, %if.then14, %if.end, %entry
107  unreachable
108}
109declare i8* @_ZNK18G__FastAllocString4dataEv() nounwind
110