• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; Test basic EfficiencySanitizer slowpath instrumentation.
2;
3; RUN: opt < %s -esan -esan-working-set -esan-instrument-fastpath=false -S | FileCheck %s
4
5;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
6; Aligned loads:
7
8define i8 @loadAligned1(i8* %a) {
9entry:
10  %tmp1 = load i8, i8* %a, align 1
11  ret i8 %tmp1
12; CHECK: @llvm.global_ctors = {{.*}}@esan.module_ctor
13; CHECK:        call void @__esan_aligned_load1(i8* %a)
14; CHECK-NEXT:   %tmp1 = load i8, i8* %a, align 1
15; CHECK-NEXT:   ret i8 %tmp1
16}
17
18define i16 @loadAligned2(i16* %a) {
19entry:
20  %tmp1 = load i16, i16* %a, align 2
21  ret i16 %tmp1
22; CHECK:        %0 = bitcast i16* %a to i8*
23; CHECK-NEXT:   call void @__esan_aligned_load2(i8* %0)
24; CHECK-NEXT:   %tmp1 = load i16, i16* %a, align 2
25; CHECK-NEXT:   ret i16 %tmp1
26}
27
28define i32 @loadAligned4(i32* %a) {
29entry:
30  %tmp1 = load i32, i32* %a, align 4
31  ret i32 %tmp1
32; CHECK:        %0 = bitcast i32* %a to i8*
33; CHECK-NEXT:   call void @__esan_aligned_load4(i8* %0)
34; CHECK-NEXT:   %tmp1 = load i32, i32* %a, align 4
35; CHECK-NEXT:   ret i32 %tmp1
36}
37
38define i64 @loadAligned8(i64* %a) {
39entry:
40  %tmp1 = load i64, i64* %a, align 8
41  ret i64 %tmp1
42; CHECK:        %0 = bitcast i64* %a to i8*
43; CHECK-NEXT:   call void @__esan_aligned_load8(i8* %0)
44; CHECK-NEXT:   %tmp1 = load i64, i64* %a, align 8
45; CHECK-NEXT:   ret i64 %tmp1
46}
47
48define i128 @loadAligned16(i128* %a) {
49entry:
50  %tmp1 = load i128, i128* %a, align 16
51  ret i128 %tmp1
52; CHECK:        %0 = bitcast i128* %a to i8*
53; CHECK-NEXT:   call void @__esan_aligned_load16(i8* %0)
54; CHECK-NEXT:   %tmp1 = load i128, i128* %a, align 16
55; CHECK-NEXT:   ret i128 %tmp1
56}
57
58;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
59; Aligned stores:
60
61define void @storeAligned1(i8* %a) {
62entry:
63  store i8 1, i8* %a, align 1
64  ret void
65; CHECK:        call void @__esan_aligned_store1(i8* %a)
66; CHECK-NEXT:   store i8 1, i8* %a, align 1
67; CHECK-NEXT:   ret void
68}
69
70define void @storeAligned2(i16* %a) {
71entry:
72  store i16 1, i16* %a, align 2
73  ret void
74; CHECK:        %0 = bitcast i16* %a to i8*
75; CHECK-NEXT:   call void @__esan_aligned_store2(i8* %0)
76; CHECK-NEXT:   store i16 1, i16* %a, align 2
77; CHECK-NEXT:   ret void
78}
79
80define void @storeAligned4(i32* %a) {
81entry:
82  store i32 1, i32* %a, align 4
83  ret void
84; CHECK:        %0 = bitcast i32* %a to i8*
85; CHECK-NEXT:   call void @__esan_aligned_store4(i8* %0)
86; CHECK-NEXT:   store i32 1, i32* %a, align 4
87; CHECK-NEXT:   ret void
88}
89
90define void @storeAligned8(i64* %a) {
91entry:
92  store i64 1, i64* %a, align 8
93  ret void
94; CHECK:        %0 = bitcast i64* %a to i8*
95; CHECK-NEXT:   call void @__esan_aligned_store8(i8* %0)
96; CHECK-NEXT:   store i64 1, i64* %a, align 8
97; CHECK-NEXT:   ret void
98}
99
100define void @storeAligned16(i128* %a) {
101entry:
102  store i128 1, i128* %a, align 16
103  ret void
104; CHECK:        %0 = bitcast i128* %a to i8*
105; CHECK-NEXT:   call void @__esan_aligned_store16(i8* %0)
106; CHECK-NEXT:   store i128 1, i128* %a, align 16
107; CHECK-NEXT:   ret void
108}
109
110;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
111; Unaligned loads:
112
113define i16 @loadUnaligned2(i16* %a) {
114entry:
115  %tmp1 = load i16, i16* %a, align 1
116  ret i16 %tmp1
117; CHECK:        %0 = bitcast i16* %a to i8*
118; CHECK-NEXT:   call void @__esan_unaligned_load2(i8* %0)
119; CHECK-NEXT:   %tmp1 = load i16, i16* %a, align 1
120; CHECK-NEXT:   ret i16 %tmp1
121}
122
123define i32 @loadUnaligned4(i32* %a) {
124entry:
125  %tmp1 = load i32, i32* %a, align 1
126  ret i32 %tmp1
127; CHECK:        %0 = bitcast i32* %a to i8*
128; CHECK-NEXT:   call void @__esan_unaligned_load4(i8* %0)
129; CHECK-NEXT:   %tmp1 = load i32, i32* %a, align 1
130; CHECK-NEXT:   ret i32 %tmp1
131}
132
133define i64 @loadUnaligned8(i64* %a) {
134entry:
135  %tmp1 = load i64, i64* %a, align 1
136  ret i64 %tmp1
137; CHECK:        %0 = bitcast i64* %a to i8*
138; CHECK-NEXT:   call void @__esan_unaligned_load8(i8* %0)
139; CHECK-NEXT:   %tmp1 = load i64, i64* %a, align 1
140; CHECK-NEXT:   ret i64 %tmp1
141}
142
143define i128 @loadUnaligned16(i128* %a) {
144entry:
145  %tmp1 = load i128, i128* %a, align 1
146  ret i128 %tmp1
147; CHECK:        %0 = bitcast i128* %a to i8*
148; CHECK-NEXT:   call void @__esan_unaligned_load16(i8* %0)
149; CHECK-NEXT:   %tmp1 = load i128, i128* %a, align 1
150; CHECK-NEXT:   ret i128 %tmp1
151}
152
153;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
154; Unaligned stores:
155
156define void @storeUnaligned2(i16* %a) {
157entry:
158  store i16 1, i16* %a, align 1
159  ret void
160; CHECK:        %0 = bitcast i16* %a to i8*
161; CHECK-NEXT:   call void @__esan_unaligned_store2(i8* %0)
162; CHECK-NEXT:   store i16 1, i16* %a, align 1
163; CHECK-NEXT:   ret void
164}
165
166define void @storeUnaligned4(i32* %a) {
167entry:
168  store i32 1, i32* %a, align 1
169  ret void
170; CHECK:        %0 = bitcast i32* %a to i8*
171; CHECK-NEXT:   call void @__esan_unaligned_store4(i8* %0)
172; CHECK-NEXT:   store i32 1, i32* %a, align 1
173; CHECK-NEXT:   ret void
174}
175
176define void @storeUnaligned8(i64* %a) {
177entry:
178  store i64 1, i64* %a, align 1
179  ret void
180; CHECK:        %0 = bitcast i64* %a to i8*
181; CHECK-NEXT:   call void @__esan_unaligned_store8(i8* %0)
182; CHECK-NEXT:   store i64 1, i64* %a, align 1
183; CHECK-NEXT:   ret void
184}
185
186define void @storeUnaligned16(i128* %a) {
187entry:
188  store i128 1, i128* %a, align 1
189  ret void
190; CHECK:        %0 = bitcast i128* %a to i8*
191; CHECK-NEXT:   call void @__esan_unaligned_store16(i8* %0)
192; CHECK-NEXT:   store i128 1, i128* %a, align 1
193; CHECK-NEXT:   ret void
194}
195
196;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
197; Unusual loads and stores:
198
199define x86_fp80 @loadUnalignedFP(x86_fp80* %a) {
200entry:
201  %tmp1 = load x86_fp80, x86_fp80* %a, align 1
202  ret x86_fp80 %tmp1
203; CHECK:        %0 = bitcast x86_fp80* %a to i8*
204; CHECK-NEXT:   call void @__esan_unaligned_loadN(i8* %0, i64 10)
205; CHECK-NEXT:   %tmp1 = load x86_fp80, x86_fp80* %a, align 1
206; CHECK-NEXT:   ret x86_fp80 %tmp1
207}
208
209define void @storeUnalignedFP(x86_fp80* %a) {
210entry:
211  store x86_fp80 0xK00000000000000000000, x86_fp80* %a, align 1
212  ret void
213; CHECK:        %0 = bitcast x86_fp80* %a to i8*
214; CHECK-NEXT:   call void @__esan_unaligned_storeN(i8* %0, i64 10)
215; CHECK-NEXT:   store x86_fp80 0xK00000000000000000000, x86_fp80* %a, align 1
216; CHECK-NEXT:   ret void
217}
218
219;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
220; Ensure that esan converts memcpy intrinsics to calls:
221
222declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
223declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
224declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
225
226define void @memCpyTest(i8* nocapture %x, i8* nocapture %y) {
227entry:
228    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 16, i32 4, i1 false)
229    ret void
230; CHECK: define void @memCpyTest
231; CHECK: call i8* @memcpy
232; CHECK: ret void
233}
234
235define void @memMoveTest(i8* nocapture %x, i8* nocapture %y) {
236entry:
237    tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 16, i32 4, i1 false)
238    ret void
239; CHECK: define void @memMoveTest
240; CHECK: call i8* @memmove
241; CHECK: ret void
242}
243
244define void @memSetTest(i8* nocapture %x) {
245entry:
246    tail call void @llvm.memset.p0i8.i64(i8* %x, i8 77, i64 16, i32 4, i1 false)
247    ret void
248; CHECK: define void @memSetTest
249; CHECK: call i8* @memset
250; CHECK: ret void
251}
252
253;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
254; Top-level:
255
256; CHECK: define internal void @esan.module_ctor()
257; CHECK: call void @__esan_init(i32 2, i8* null)
258; CHECK: define internal void @esan.module_dtor()
259; CHECK: call void @__esan_exit(i8* null)
260