• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -march=cellspu > %t1.s
2; RUN: grep {stqd.*0(\$3)}      %t1.s | count 4
3; RUN: grep {stqd.*16(\$3)}     %t1.s | count 4
4; RUN: grep 16256               %t1.s | count 2
5; RUN: grep 16384               %t1.s | count 1
6; RUN: grep 771                 %t1.s | count 4
7; RUN: grep 515                 %t1.s | count 2
8; RUN: grep 1799                %t1.s | count 2
9; RUN: grep 1543                %t1.s | count 5
10; RUN: grep 1029                %t1.s | count 3
11; RUN: grep {shli.*, 4}         %t1.s | count 4
12; RUN: grep stqx                %t1.s | count 4
13; RUN: grep ilhu                %t1.s | count 11
14; RUN: grep iohl                %t1.s | count 8
15; RUN: grep shufb               %t1.s | count 15
16; RUN: grep frds                %t1.s | count 1
17; RUN: llc < %s -march=cellspu | FileCheck %s
18
19; ModuleID = 'stores.bc'
20target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128"
21target triple = "spu"
22
23define void @store_v16i8_1(<16 x i8>* %a) nounwind {
24entry:
25	store <16 x i8> < i8 1, i8 2, i8 1, i8 1, i8 1, i8 2, i8 1, i8 1, i8 1, i8 2, i8 1, i8 1, i8 1, i8 2, i8 1, i8 1 >, <16 x i8>* %a
26	ret void
27}
28
29define void @store_v16i8_2(<16 x i8>* %a) nounwind {
30entry:
31	%arrayidx = getelementptr <16 x i8>* %a, i32 1
32	store <16 x i8> < i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2 >, <16 x i8>* %arrayidx
33	ret void
34}
35
36define void @store_v16i8_3(<16 x i8>* %a, i32 %i) nounwind {
37entry:
38        %arrayidx = getelementptr <16 x i8>* %a, i32 %i
39	store <16 x i8> < i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1 >, <16 x i8>* %arrayidx
40        ret void
41}
42
43define void @store_v8i16_1(<8 x i16>* %a) nounwind {
44entry:
45	store <8 x i16> < i16 1, i16 2, i16 1, i16 1, i16 1, i16 2, i16 1, i16 1 >, <8 x i16>* %a
46	ret void
47}
48
49define void @store_v8i16_2(<8 x i16>* %a) nounwind {
50entry:
51	%arrayidx = getelementptr <8 x i16>* %a, i16 1
52	store <8 x i16> < i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2 >, <8 x i16>* %arrayidx
53	ret void
54}
55
56define void @store_v8i16_3(<8 x i16>* %a, i32 %i) nounwind {
57entry:
58        %arrayidx = getelementptr <8 x i16>* %a, i32 %i
59	store <8 x i16> < i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1 >, <8 x i16>* %arrayidx
60        ret void
61}
62
63define void @store_v4i32_1(<4 x i32>* %a) nounwind {
64entry:
65	store <4 x i32> < i32 1, i32 2, i32 1, i32 1 >, <4 x i32>* %a
66	ret void
67}
68
69define void @store_v4i32_2(<4 x i32>* %a) nounwind {
70entry:
71	%arrayidx = getelementptr <4 x i32>* %a, i32 1
72	store <4 x i32> < i32 2, i32 2, i32 2, i32 2 >, <4 x i32>* %arrayidx
73	ret void
74}
75
76define void @store_v4i32_3(<4 x i32>* %a, i32 %i) nounwind {
77entry:
78        %arrayidx = getelementptr <4 x i32>* %a, i32 %i
79        store <4 x i32> < i32 1, i32 1, i32 1, i32 1 >, <4 x i32>* %arrayidx
80        ret void
81}
82
83define void @store_v4f32_1(<4 x float>* %a) nounwind {
84entry:
85	store <4 x float> < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >, <4 x float>* %a
86	ret void
87}
88
89define void @store_v4f32_2(<4 x float>* %a) nounwind {
90entry:
91	%arrayidx = getelementptr <4 x float>* %a, i32 1
92	store <4 x float> < float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00 >, <4 x float>* %arrayidx
93	ret void
94}
95
96define void @store_v4f32_3(<4 x float>* %a, i32 %i) nounwind {
97entry:
98        %arrayidx = getelementptr <4 x float>* %a, i32 %i
99        store <4 x float> < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >, <4 x float>* %arrayidx
100        ret void
101}
102
103; Test truncating stores:
104
105define zeroext i8 @tstore_i16_i8(i16 signext %val, i8* %dest) nounwind {
106entry:
107	%conv = trunc i16 %val to i8
108	store i8 %conv, i8* %dest
109	ret i8 %conv
110}
111
112define zeroext i8 @tstore_i32_i8(i32 %val, i8* %dest) nounwind {
113entry:
114	%conv = trunc i32 %val to i8
115	store i8 %conv, i8* %dest
116	ret i8 %conv
117}
118
119define signext i16 @tstore_i32_i16(i32 %val, i16* %dest) nounwind {
120entry:
121	%conv = trunc i32 %val to i16
122	store i16 %conv, i16* %dest
123	ret i16 %conv
124}
125
126define zeroext i8 @tstore_i64_i8(i64 %val, i8* %dest) nounwind {
127entry:
128	%conv = trunc i64 %val to i8
129	store i8 %conv, i8* %dest
130	ret i8 %conv
131}
132
133define signext i16 @tstore_i64_i16(i64 %val, i16* %dest) nounwind {
134entry:
135	%conv = trunc i64 %val to i16
136	store i16 %conv, i16* %dest
137	ret i16 %conv
138}
139
140define i32 @tstore_i64_i32(i64 %val, i32* %dest) nounwind {
141entry:
142	%conv = trunc i64 %val to i32
143	store i32 %conv, i32* %dest
144	ret i32 %conv
145}
146
147define float @tstore_f64_f32(double %val, float* %dest) nounwind {
148entry:
149	%conv = fptrunc double %val to float
150	store float %conv, float* %dest
151	ret float %conv
152}
153
154;Check stores that might span two 16 byte memory blocks
155define void @store_misaligned( i32 %val, i32* %ptr) {
156;CHECK: store_misaligned
157;CHECK: lqd
158;CHECK: lqd
159;CHECK: stqd
160;CHECK: stqd
161;CHECK: bi $lr
162	store i32 %val, i32*%ptr, align 2
163	ret void
164}
165
166define void @store_v8( <8 x float> %val, <8 x float>* %ptr )
167{
168;CHECK: stq
169;CHECK: stq
170;CHECK: bi $lr
171	store <8 x float> %val, <8 x float>* %ptr
172	ret void
173}
174
175define void @store_null_vec( <4 x i32> %val ) {
176; FIXME - this is for some reason compiled into a il+stqd, not a sta.
177;CHECK: stqd
178;CHECK: bi $lr
179	store <4 x i32> %val, <4 x i32>* null
180	ret void
181}
182