• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=x86_64-linux-gnu                       -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE_FAST
3; RUN: llc -mtriple=x86_64-linux-gnu -regbankselect-greedy -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE_GREEDY
4
5define i1 @test_load_i1(i1 * %p1) {
6; ALL-LABEL: test_load_i1:
7; ALL:       # %bb.0:
8; ALL-NEXT:    movb (%rdi), %al
9; ALL-NEXT:    retq
10  %r = load i1, i1* %p1
11  ret i1 %r
12}
13
14define i8 @test_load_i8(i8 * %p1) {
15; ALL-LABEL: test_load_i8:
16; ALL:       # %bb.0:
17; ALL-NEXT:    movb (%rdi), %al
18; ALL-NEXT:    retq
19  %r = load i8, i8* %p1
20  ret i8 %r
21}
22
23define i16 @test_load_i16(i16 * %p1) {
24; ALL-LABEL: test_load_i16:
25; ALL:       # %bb.0:
26; ALL-NEXT:    movzwl (%rdi), %eax
27; ALL-NEXT:    retq
28  %r = load i16, i16* %p1
29  ret i16 %r
30}
31
32define i32 @test_load_i32(i32 * %p1) {
33; ALL-LABEL: test_load_i32:
34; ALL:       # %bb.0:
35; ALL-NEXT:    movl (%rdi), %eax
36; ALL-NEXT:    retq
37  %r = load i32, i32* %p1
38  ret i32 %r
39}
40
41define i64 @test_load_i64(i64 * %p1) {
42; ALL-LABEL: test_load_i64:
43; ALL:       # %bb.0:
44; ALL-NEXT:    movq (%rdi), %rax
45; ALL-NEXT:    retq
46  %r = load i64, i64* %p1
47  ret i64 %r
48}
49
50define float @test_load_float(float * %p1) {
51; SSE-LABEL: test_load_float:
52; SSE:       # %bb.0:
53; SSE-NEXT:    movl (%rdi), %eax
54; SSE-NEXT:    movd %eax, %xmm0
55; SSE-NEXT:    retq
56;
57; ALL-LABEL: test_load_float:
58; ALL:       # %bb.0:
59; ALL-NEXT:    movl (%rdi), %eax
60; ALL-NEXT:    movd %eax, %xmm0
61; ALL-NEXT:    retq
62  %r = load float, float* %p1
63  ret float %r
64}
65
66define double @test_load_double(double * %p1) {
67; SSE-LABEL: test_load_double:
68; SSE:       # %bb.0:
69; SSE-NEXT:    movq (%rdi), %rax
70; SSE-NEXT:    movq %rax, %xmm0
71; SSE-NEXT:    retq
72;
73; ALL-LABEL: test_load_double:
74; ALL:       # %bb.0:
75; ALL-NEXT:    movq (%rdi), %rax
76; ALL-NEXT:    movq %rax, %xmm0
77; ALL-NEXT:    retq
78  %r = load double, double* %p1
79  ret double %r
80}
81
82define i1 * @test_store_i1(i1 %val, i1 * %p1) {
83; ALL-LABEL: test_store_i1:
84; ALL:       # %bb.0:
85; ALL-NEXT:    movq %rsi, %rax
86; ALL-NEXT:    andb $1, %dil
87; ALL-NEXT:    movb %dil, (%rsi)
88; ALL-NEXT:    retq
89  store i1 %val, i1* %p1
90  ret i1 * %p1;
91}
92
93define i32 * @test_store_i32(i32 %val, i32 * %p1) {
94; ALL-LABEL: test_store_i32:
95; ALL:       # %bb.0:
96; ALL-NEXT:    movq %rsi, %rax
97; ALL-NEXT:    movl %edi, (%rsi)
98; ALL-NEXT:    retq
99  store i32 %val, i32* %p1
100  ret i32 * %p1;
101}
102
103define i64 * @test_store_i64(i64 %val, i64 * %p1) {
104; ALL-LABEL: test_store_i64:
105; ALL:       # %bb.0:
106; ALL-NEXT:    movq %rsi, %rax
107; ALL-NEXT:    movq %rdi, (%rsi)
108; ALL-NEXT:    retq
109  store i64 %val, i64* %p1
110  ret i64 * %p1;
111}
112
113define float * @test_store_float(float %val, float * %p1) {
114;
115; SSE_FAST-LABEL: test_store_float:
116; SSE_FAST:       # %bb.0:
117; SSE_FAST-NEXT:    movq %rdi, %rax
118; SSE_FAST-NEXT:    movd %xmm0, %ecx
119; SSE_FAST-NEXT:    movl %ecx, (%rdi)
120; SSE_FAST-NEXT:    retq
121;
122; SSE_GREEDY-LABEL: test_store_float:
123; SSE_GREEDY:       # %bb.0:
124; SSE_GREEDY-NEXT:    movq %rdi, %rax
125; SSE_GREEDY-NEXT:    movss %xmm0, (%rdi)
126; SSE_GREEDY-NEXT:    retq
127  store float %val, float* %p1
128  ret float * %p1;
129}
130
131define double * @test_store_double(double %val, double * %p1) {
132;
133; SSE_FAST-LABEL: test_store_double:
134; SSE_FAST:       # %bb.0:
135; SSE_FAST-NEXT:    movq %rdi, %rax
136; SSE_FAST-NEXT:    movq %xmm0, %rcx
137; SSE_FAST-NEXT:    movq %rcx, (%rdi)
138; SSE_FAST-NEXT:    retq
139;
140; SSE_GREEDY-LABEL: test_store_double:
141; SSE_GREEDY:       # %bb.0:
142; SSE_GREEDY-NEXT:    movq %rdi, %rax
143; SSE_GREEDY-NEXT:    movsd %xmm0, (%rdi)
144; SSE_GREEDY-NEXT:    retq
145  store double %val, double* %p1
146  ret double * %p1;
147}
148
149define i32* @test_load_ptr(i32** %ptr1) {
150; ALL-LABEL: test_load_ptr:
151; ALL:       # %bb.0:
152; ALL-NEXT:    movq (%rdi), %rax
153; ALL-NEXT:    retq
154  %p = load i32*, i32** %ptr1
155  ret i32* %p
156}
157
158define void @test_store_ptr(i32** %ptr1, i32* %a) {
159; ALL-LABEL: test_store_ptr:
160; ALL:       # %bb.0:
161; ALL-NEXT:    movq %rsi, (%rdi)
162; ALL-NEXT:    retq
163  store i32* %a, i32** %ptr1
164  ret void
165}
166
167define i32 @test_gep_folding(i32* %arr, i32 %val) {
168; ALL-LABEL: test_gep_folding:
169; ALL:       # %bb.0:
170; ALL-NEXT:    movl %esi, 20(%rdi)
171; ALL-NEXT:    movl 20(%rdi), %eax
172; ALL-NEXT:    retq
173  %arrayidx = getelementptr i32, i32* %arr, i32 5
174  store i32 %val, i32* %arrayidx
175  %r = load i32, i32* %arrayidx
176  ret i32 %r
177}
178
179; check that gep index doesn't folded into memory operand
180define i32 @test_gep_folding_largeGepIndex(i32* %arr, i32 %val) {
181; ALL-LABEL: test_gep_folding_largeGepIndex:
182; ALL:       # %bb.0:
183; ALL-NEXT:    movabsq $228719476720, %rax # imm = 0x3540BE3FF0
184; ALL-NEXT:    addq %rdi, %rax
185; ALL-NEXT:    movl %esi, (%rax)
186; ALL-NEXT:    movl (%rax), %eax
187; ALL-NEXT:    retq
188  %arrayidx = getelementptr i32, i32* %arr, i64 57179869180
189  store i32 %val, i32* %arrayidx
190  %r = load i32, i32* %arrayidx
191  ret i32 %r
192}
193