• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck %s
2
3define i8 @test_64bit_add(i16* %a, i64 %b) {
4; CHECK-LABEL: test_64bit_add:
5; CHECK: ldrh w0, [x0, x1, lsl #1]
6; CHECK: ret
7  %tmp1 = getelementptr inbounds i16, i16* %a, i64 %b
8  %tmp2 = load i16, i16* %tmp1
9  %tmp3 = trunc i16 %tmp2 to i8
10  ret i8 %tmp3
11}
12
13; These tests are trying to form SEXT and ZEXT operations that never leave i64
14; space, to make sure LLVM can adapt the offset register correctly.
15define void @ldst_8bit(i8* %base, i64 %offset) minsize {
16; CHECK-LABEL: ldst_8bit:
17
18   %off32.sext.tmp = shl i64 %offset, 32
19   %off32.sext = ashr i64 %off32.sext.tmp, 32
20   %addr8_sxtw = getelementptr i8, i8* %base, i64 %off32.sext
21   %val8_sxtw = load volatile i8, i8* %addr8_sxtw
22   %val32_signed = sext i8 %val8_sxtw to i32
23   store volatile i32 %val32_signed, i32* @var_32bit
24; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
25
26  %addrint_uxtw = ptrtoint i8* %base to i64
27  %offset_uxtw = and i64 %offset, 4294967295
28  %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
29  %addr_uxtw = inttoptr i64 %addrint1_uxtw to i8*
30  %val8_uxtw = load volatile i8, i8* %addr_uxtw
31  %newval8 = add i8 %val8_uxtw, 1
32  store volatile i8 %newval8, i8* @var_8bit
33; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
34
35   ret void
36}
37
38
39define void @ldst_16bit(i16* %base, i64 %offset) minsize {
40; CHECK-LABEL: ldst_16bit:
41
42  %addrint_uxtw = ptrtoint i16* %base to i64
43  %offset_uxtw = and i64 %offset, 4294967295
44  %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
45  %addr_uxtw = inttoptr i64 %addrint1_uxtw to i16*
46  %val8_uxtw = load volatile i16, i16* %addr_uxtw
47  %newval8 = add i16 %val8_uxtw, 1
48  store volatile i16 %newval8, i16* @var_16bit
49; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
50
51  %base_sxtw = ptrtoint i16* %base to i64
52  %offset_sxtw.tmp = shl i64 %offset, 32
53  %offset_sxtw = ashr i64 %offset_sxtw.tmp, 32
54  %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
55  %addr_sxtw = inttoptr i64 %addrint_sxtw to i16*
56  %val16_sxtw = load volatile i16, i16* %addr_sxtw
57  %val64_signed = sext i16 %val16_sxtw to i64
58  store volatile i64 %val64_signed, i64* @var_64bit
59; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
60
61
62  %base_uxtwN = ptrtoint i16* %base to i64
63  %offset_uxtwN = and i64 %offset, 4294967295
64  %offset2_uxtwN = shl i64 %offset_uxtwN, 1
65  %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
66  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i16*
67  %val32 = load volatile i32, i32* @var_32bit
68  %val16_trunc32 = trunc i32 %val32 to i16
69  store volatile i16 %val16_trunc32, i16* %addr_uxtwN
70; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #1]
71   ret void
72}
73
74define void @ldst_32bit(i32* %base, i64 %offset) minsize {
75; CHECK-LABEL: ldst_32bit:
76
77  %addrint_uxtw = ptrtoint i32* %base to i64
78  %offset_uxtw = and i64 %offset, 4294967295
79  %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
80  %addr_uxtw = inttoptr i64 %addrint1_uxtw to i32*
81  %val32_uxtw = load volatile i32, i32* %addr_uxtw
82  %newval32 = add i32 %val32_uxtw, 1
83  store volatile i32 %newval32, i32* @var_32bit
84; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
85
86  %base_sxtw = ptrtoint i32* %base to i64
87  %offset_sxtw.tmp = shl i64 %offset, 32
88  %offset_sxtw = ashr i64 %offset_sxtw.tmp, 32
89  %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
90  %addr_sxtw = inttoptr i64 %addrint_sxtw to i32*
91  %val32_sxtw = load volatile i32, i32* %addr_sxtw
92  %val64_signed = sext i32 %val32_sxtw to i64
93  store volatile i64 %val64_signed, i64* @var_64bit
94; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
95
96
97  %base_uxtwN = ptrtoint i32* %base to i64
98  %offset_uxtwN = and i64 %offset, 4294967295
99  %offset2_uxtwN = shl i64 %offset_uxtwN, 2
100  %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
101  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i32*
102  %val32 = load volatile i32, i32* @var_32bit
103  store volatile i32 %val32, i32* %addr_uxtwN
104; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #2]
105   ret void
106}
107
108define void @ldst_64bit(i64* %base, i64 %offset) minsize {
109; CHECK-LABEL: ldst_64bit:
110
111  %addrint_uxtw = ptrtoint i64* %base to i64
112  %offset_uxtw = and i64 %offset, 4294967295
113  %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
114  %addr_uxtw = inttoptr i64 %addrint1_uxtw to i64*
115  %val64_uxtw = load volatile i64, i64* %addr_uxtw
116  %newval8 = add i64 %val64_uxtw, 1
117  store volatile i64 %newval8, i64* @var_64bit
118; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
119
120  %base_sxtw = ptrtoint i64* %base to i64
121  %offset_sxtw.tmp = shl i64 %offset, 32
122  %offset_sxtw = ashr i64 %offset_sxtw.tmp, 32
123  %addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
124  %addr_sxtw = inttoptr i64 %addrint_sxtw to i64*
125  %val64_sxtw = load volatile i64, i64* %addr_sxtw
126  store volatile i64 %val64_sxtw, i64* @var_64bit
127; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
128
129
130  %base_uxtwN = ptrtoint i64* %base to i64
131  %offset_uxtwN = and i64 %offset, 4294967295
132  %offset2_uxtwN = shl i64 %offset_uxtwN, 3
133  %addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
134  %addr_uxtwN = inttoptr i64 %addrint_uxtwN to i64*
135  %val64 = load volatile i64, i64* @var_64bit
136  store volatile i64 %val64, i64* %addr_uxtwN
137; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #3]
138   ret void
139}
140
141@var_8bit = global i8 0
142@var_16bit = global i16 0
143@var_32bit = global i32 0
144@var_64bit = global i64 0
145