• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -verify-machineinstrs -o - %s -mtriple=arm64 | FileCheck %s
2
3@var32_0 = global i32 0
4@var32_1 = global i32 0
5@var64_0 = global i64 0
6@var64_1 = global i64 0
7
8define void @rorv_i64() {
9; CHECK-LABEL: rorv_i64:
10    %val0_tmp = load i64, i64* @var64_0
11    %val1_tmp = load i64, i64* @var64_1
12    %val2_tmp = sub i64 64, %val1_tmp
13    %val3_tmp = shl i64 %val0_tmp, %val2_tmp
14    %val4_tmp = lshr i64 %val0_tmp, %val1_tmp
15    %val5_tmp = or i64 %val3_tmp, %val4_tmp
16; CHECK: {{ror|rorv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
17    store volatile i64 %val5_tmp, i64* @var64_0
18    ret void
19}
20
21define void @asrv_i64() {
22; CHECK-LABEL: asrv_i64:
23    %val0_tmp = load i64, i64* @var64_0
24    %val1_tmp = load i64, i64* @var64_1
25    %val4_tmp = ashr i64 %val0_tmp, %val1_tmp
26; CHECK: {{asr|asrv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
27    store volatile i64 %val4_tmp, i64* @var64_1
28    ret void
29}
30
31define void @lsrv_i64() {
32; CHECK-LABEL: lsrv_i64:
33    %val0_tmp = load i64, i64* @var64_0
34    %val1_tmp = load i64, i64* @var64_1
35    %val4_tmp = lshr i64 %val0_tmp, %val1_tmp
36; CHECK: {{lsr|lsrv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
37    store volatile i64 %val4_tmp, i64* @var64_0
38    ret void
39}
40
41define void @lslv_i64() {
42; CHECK-LABEL: lslv_i64:
43    %val0_tmp = load i64, i64* @var64_0
44    %val1_tmp = load i64, i64* @var64_1
45    %val4_tmp = shl i64 %val0_tmp, %val1_tmp
46; CHECK: {{lsl|lslv}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
47    store volatile i64 %val4_tmp, i64* @var64_1
48    ret void
49}
50
51define void @udiv_i64() {
52; CHECK-LABEL: udiv_i64:
53    %val0_tmp = load i64, i64* @var64_0
54    %val1_tmp = load i64, i64* @var64_1
55    %val4_tmp = udiv i64 %val0_tmp, %val1_tmp
56; CHECK: udiv	{{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
57    store volatile i64 %val4_tmp, i64* @var64_0
58    ret void
59}
60
61define void @sdiv_i64() {
62; CHECK-LABEL: sdiv_i64:
63    %val0_tmp = load i64, i64* @var64_0
64    %val1_tmp = load i64, i64* @var64_1
65    %val4_tmp = sdiv i64 %val0_tmp, %val1_tmp
66; CHECK: sdiv	{{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
67    store volatile i64 %val4_tmp, i64* @var64_1
68    ret void
69}
70
71
72define void @lsrv_i32() {
73; CHECK-LABEL: lsrv_i32:
74    %val0_tmp = load i32, i32* @var32_0
75    %val1_tmp = load i32, i32* @var32_1
76    %val2_tmp = add i32 1, %val1_tmp
77    %val4_tmp = lshr i32 %val0_tmp, %val2_tmp
78; CHECK: {{lsr|lsrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
79    store volatile i32 %val4_tmp, i32* @var32_0
80    ret void
81}
82
83define void @lslv_i32() {
84; CHECK-LABEL: lslv_i32:
85    %val0_tmp = load i32, i32* @var32_0
86    %val1_tmp = load i32, i32* @var32_1
87    %val2_tmp = add i32 1, %val1_tmp
88    %val4_tmp = shl i32 %val0_tmp, %val2_tmp
89; CHECK: {{lsl|lslv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
90    store volatile i32 %val4_tmp, i32* @var32_1
91    ret void
92}
93
94define void @rorv_i32() {
95; CHECK-LABEL: rorv_i32:
96    %val0_tmp = load i32, i32* @var32_0
97    %val6_tmp = load i32, i32* @var32_1
98    %val1_tmp = add i32 1, %val6_tmp
99    %val2_tmp = sub i32 32, %val1_tmp
100    %val3_tmp = shl i32 %val0_tmp, %val2_tmp
101    %val4_tmp = lshr i32 %val0_tmp, %val1_tmp
102    %val5_tmp = or i32 %val3_tmp, %val4_tmp
103; CHECK: {{ror|rorv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
104    store volatile i32 %val5_tmp, i32* @var32_0
105    ret void
106}
107
108define void @asrv_i32() {
109; CHECK-LABEL: asrv_i32:
110    %val0_tmp = load i32, i32* @var32_0
111    %val1_tmp = load i32, i32* @var32_1
112    %val2_tmp = add i32 1, %val1_tmp
113    %val4_tmp = ashr i32 %val0_tmp, %val2_tmp
114; CHECK: {{asr|asrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
115    store volatile i32 %val4_tmp, i32* @var32_1
116    ret void
117}
118
119define void @sdiv_i32() {
120; CHECK-LABEL: sdiv_i32:
121    %val0_tmp = load i32, i32* @var32_0
122    %val1_tmp = load i32, i32* @var32_1
123    %val4_tmp = sdiv i32 %val0_tmp, %val1_tmp
124; CHECK: sdiv	{{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
125    store volatile i32 %val4_tmp, i32* @var32_1
126    ret void
127}
128
129define void @udiv_i32() {
130; CHECK-LABEL: udiv_i32:
131    %val0_tmp = load i32, i32* @var32_0
132    %val1_tmp = load i32, i32* @var32_1
133    %val4_tmp = udiv i32 %val0_tmp, %val1_tmp
134; CHECK: udiv	{{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
135    store volatile i32 %val4_tmp, i32* @var32_0
136    ret void
137}
138
139; The point of this test is that we may not actually see (shl GPR32:$Val, (zext GPR32:$Val2))
140; in the DAG (the RHS may be natively 64-bit), but we should still use the lsl instructions.
141define i32 @test_lsl32() {
142; CHECK-LABEL: test_lsl32:
143
144  %val = load i32, i32* @var32_0
145  %ret = shl i32 1, %val
146; CHECK: {{lsl|lslv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
147
148  ret i32 %ret
149}
150
151define i32 @test_lsr32() {
152; CHECK-LABEL: test_lsr32:
153
154  %val = load i32, i32* @var32_0
155  %ret = lshr i32 1, %val
156; CHECK: {{lsr|lsrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
157
158  ret i32 %ret
159}
160
161define i32 @test_asr32(i32 %in) {
162; CHECK-LABEL: test_asr32:
163
164  %val = load i32, i32* @var32_0
165  %ret = ashr i32 %in, %val
166; CHECK: {{asr|asrv}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
167
168  ret i32 %ret
169}
170