• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -instcombine -mtriple aarch64-linux-gnu -mattr=+sve -S < %s 2>%t | FileCheck %s
3; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
4
5; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
6; WARN-NOT: warning
7
8define void @fixed_array16i32_to_scalable4i32(<vscale x 4 x i32>* %out) {
9; CHECK-LABEL: @fixed_array16i32_to_scalable4i32(
10; CHECK-NEXT:  entry:
11; CHECK-NEXT:    [[TMP:%.*]] = alloca [16 x i32], align 16
12; CHECK-NEXT:    [[CAST:%.*]] = bitcast [16 x i32]* [[TMP]] to <vscale x 4 x i32>*
13; CHECK-NEXT:    store volatile <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[CAST]], align 16
14; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* [[CAST]], align 16
15; CHECK-NEXT:    store <vscale x 4 x i32> [[RELOAD]], <vscale x 4 x i32>* [[OUT:%.*]], align 16
16; CHECK-NEXT:    ret void
17;
18entry:
19  %tmp = alloca [16 x i32], align 16
20  %cast = bitcast [16 x i32]* %tmp to <vscale x 4 x i32>*
21  store volatile <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* %cast, align 16
22  %reload = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %cast, align 16
23  store <vscale x 4 x i32> %reload, <vscale x 4 x i32>* %out, align 16
24  ret void
25}
26
27define void @scalable4i32_to_fixed16i32(<16 x i32>* %out) {
28; CHECK-LABEL: @scalable4i32_to_fixed16i32(
29; CHECK-NEXT:  entry:
30; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 4 x i32>, align 64
31; CHECK-NEXT:    [[CAST:%.*]] = bitcast <vscale x 4 x i32>* [[TMP]] to <16 x i32>*
32; CHECK-NEXT:    store <16 x i32> zeroinitializer, <16 x i32>* [[CAST]], align 64
33; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <16 x i32>, <16 x i32>* [[CAST]], align 64
34; CHECK-NEXT:    store <16 x i32> [[RELOAD]], <16 x i32>* [[OUT:%.*]], align 16
35; CHECK-NEXT:    ret void
36;
37entry:
38  %tmp = alloca <vscale x 4 x i32>, align 16
39  %cast = bitcast <vscale x 4 x i32>* %tmp to <16 x i32>*
40  store <16 x i32> zeroinitializer, <16 x i32>* %cast, align 16
41  %reload = load volatile <16 x i32>, <16 x i32>* %cast, align 16
42  store <16 x i32> %reload, <16 x i32>* %out, align 16
43  ret void
44}
45
46define void @fixed16i32_to_scalable4i32(<vscale x 4 x i32>* %out) {
47; CHECK-LABEL: @fixed16i32_to_scalable4i32(
48; CHECK-NEXT:  entry:
49; CHECK-NEXT:    [[TMP:%.*]] = alloca <16 x i32>, align 16
50; CHECK-NEXT:    [[CAST:%.*]] = bitcast <16 x i32>* [[TMP]] to <vscale x 4 x i32>*
51; CHECK-NEXT:    store volatile <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* [[CAST]], align 16
52; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* [[CAST]], align 16
53; CHECK-NEXT:    store <vscale x 4 x i32> [[RELOAD]], <vscale x 4 x i32>* [[OUT:%.*]], align 16
54; CHECK-NEXT:    ret void
55;
56entry:
57  %tmp = alloca <16 x i32>, align 16
58  %cast = bitcast <16 x i32>* %tmp to <vscale x 4 x i32>*
59  store volatile <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32>* %cast, align 16
60  %reload = load volatile <vscale x 4 x i32>, <vscale x 4 x i32>* %cast, align 16
61  store <vscale x 4 x i32> %reload, <vscale x 4 x i32>* %out, align 16
62  ret void
63}
64
65define void @scalable16i32_to_fixed16i32(<16 x i32>* %out) {
66; CHECK-LABEL: @scalable16i32_to_fixed16i32(
67; CHECK-NEXT:  entry:
68; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 16 x i32>, align 64
69; CHECK-NEXT:    [[CAST:%.*]] = bitcast <vscale x 16 x i32>* [[TMP]] to <16 x i32>*
70; CHECK-NEXT:    store volatile <16 x i32> zeroinitializer, <16 x i32>* [[CAST]], align 64
71; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <16 x i32>, <16 x i32>* [[CAST]], align 64
72; CHECK-NEXT:    store <16 x i32> [[RELOAD]], <16 x i32>* [[OUT:%.*]], align 16
73; CHECK-NEXT:    ret void
74;
75entry:
76  %tmp = alloca <vscale x 16 x i32>, align 16
77  %cast = bitcast <vscale x 16 x i32>* %tmp to <16 x i32>*
78  store volatile <16 x i32> zeroinitializer, <16 x i32>* %cast, align 16
79  %reload = load volatile <16 x i32>, <16 x i32>* %cast, align 16
80  store <16 x i32> %reload, <16 x i32>* %out, align 16
81  ret void
82}
83
84define void @scalable32i32_to_scalable16i32(<vscale x 16 x i32>* %out) {
85; CHECK-LABEL: @scalable32i32_to_scalable16i32(
86; CHECK-NEXT:  entry:
87; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 32 x i32>, align 64
88; CHECK-NEXT:    [[CAST:%.*]] = bitcast <vscale x 32 x i32>* [[TMP]] to <vscale x 16 x i32>*
89; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* [[CAST]], align 64
90; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* [[CAST]], align 64
91; CHECK-NEXT:    store <vscale x 16 x i32> [[RELOAD]], <vscale x 16 x i32>* [[OUT:%.*]], align 16
92; CHECK-NEXT:    ret void
93;
94entry:
95  %tmp = alloca <vscale x 32 x i32>, align 16
96  %cast = bitcast <vscale x 32 x i32>* %tmp to <vscale x 16 x i32>*
97  store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* %cast, align 16
98  %reload = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* %cast, align 16
99  store <vscale x 16 x i32> %reload, <vscale x 16 x i32>* %out, align 16
100  ret void
101}
102
103define void @scalable32i16_to_scalable16i32(<vscale x 16 x i32>* %out) {
104; CHECK-LABEL: @scalable32i16_to_scalable16i32(
105; CHECK-NEXT:  entry:
106; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 16 x i32>, align 64
107; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* [[TMP]], align 64
108; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* [[TMP]], align 64
109; CHECK-NEXT:    store <vscale x 16 x i32> [[RELOAD]], <vscale x 16 x i32>* [[OUT:%.*]], align 16
110; CHECK-NEXT:    ret void
111;
112entry:
113  %tmp = alloca <vscale x 32 x i16>, align 16
114  %cast = bitcast <vscale x 32 x i16>* %tmp to <vscale x 16 x i32>*
115  store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* %cast, align 16
116  %reload = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* %cast, align 16
117  store <vscale x 16 x i32> %reload, <vscale x 16 x i32>* %out, align 16
118  ret void
119}
120
121define void @scalable32i16_to_scalable16i32_multiuse(<vscale x 16 x i32>* %out, <vscale x 32 x i16>* %out2) {
122; CHECK-LABEL: @scalable32i16_to_scalable16i32_multiuse(
123; CHECK-NEXT:  entry:
124; CHECK-NEXT:    [[TMP:%.*]] = alloca <vscale x 32 x i16>, align 64
125; CHECK-NEXT:    [[CAST:%.*]] = bitcast <vscale x 32 x i16>* [[TMP]] to <vscale x 16 x i32>*
126; CHECK-NEXT:    store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* [[CAST]], align 64
127; CHECK-NEXT:    [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* [[CAST]], align 64
128; CHECK-NEXT:    store <vscale x 16 x i32> [[RELOAD]], <vscale x 16 x i32>* [[OUT:%.*]], align 16
129; CHECK-NEXT:    [[RELOAD2:%.*]] = load volatile <vscale x 32 x i16>, <vscale x 32 x i16>* [[TMP]], align 64
130; CHECK-NEXT:    store <vscale x 32 x i16> [[RELOAD2]], <vscale x 32 x i16>* [[OUT2:%.*]], align 16
131; CHECK-NEXT:    ret void
132;
133entry:
134  %tmp = alloca <vscale x 32 x i16>, align 16
135  %cast = bitcast <vscale x 32 x i16>* %tmp to <vscale x 16 x i32>*
136  store volatile <vscale x 16 x i32> zeroinitializer, <vscale x 16 x i32>* %cast, align 16
137  %reload = load volatile <vscale x 16 x i32>, <vscale x 16 x i32>* %cast, align 16
138  store <vscale x 16 x i32> %reload, <vscale x 16 x i32>* %out, align 16
139  %reload2 = load volatile <vscale x 32 x i16>, <vscale x 32 x i16>* %tmp, align 16
140  store <vscale x 32 x i16> %reload2, <vscale x 32 x i16>* %out2, align 16
141  ret void
142}
143