• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; Test EfficiencySanitizer working set instrumentation without aggressive
2; optimization flags.
3;
4; RUN: opt < %s -esan -esan-working-set -esan-assume-intra-cache-line=0 -S | FileCheck %s
5
6;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
7; Intra-cache-line
8
9define i8 @aligned1(i8* %a) {
10entry:
11  %tmp1 = load i8, i8* %a, align 1
12  ret i8 %tmp1
13; CHECK: @llvm.global_ctors = {{.*}}@esan.module_ctor
14; CHECK:        %0 = ptrtoint i8* %a to i64
15; CHECK-NEXT:   %1 = and i64 %0, 17592186044415
16; CHECK-NEXT:   %2 = add i64 %1, 1337006139375616
17; CHECK-NEXT:   %3 = lshr i64 %2, 6
18; CHECK-NEXT:   %4 = inttoptr i64 %3 to i8*
19; CHECK-NEXT:   %5 = load i8, i8* %4
20; CHECK-NEXT:   %6 = and i8 %5, -127
21; CHECK-NEXT:   %7 = icmp ne i8 %6, -127
22; CHECK-NEXT:   br i1 %7, label %8, label %11
23; CHECK:        %9 = or i8 %5, -127
24; CHECK-NEXT:   %10 = inttoptr i64 %3 to i8*
25; CHECK-NEXT:   store i8 %9, i8* %10
26; CHECK-NEXT:   br label %11
27; CHECK:        %tmp1 = load i8, i8* %a, align 1
28; CHECK-NEXT:   ret i8 %tmp1
29}
30
31define i16 @aligned2(i16* %a) {
32entry:
33  %tmp1 = load i16, i16* %a, align 2
34  ret i16 %tmp1
35; CHECK:        %0 = ptrtoint i16* %a to i64
36; CHECK-NEXT:   %1 = and i64 %0, 17592186044415
37; CHECK-NEXT:   %2 = add i64 %1, 1337006139375616
38; CHECK-NEXT:   %3 = lshr i64 %2, 6
39; CHECK-NEXT:   %4 = inttoptr i64 %3 to i8*
40; CHECK-NEXT:   %5 = load i8, i8* %4
41; CHECK-NEXT:   %6 = and i8 %5, -127
42; CHECK-NEXT:   %7 = icmp ne i8 %6, -127
43; CHECK-NEXT:   br i1 %7, label %8, label %11
44; CHECK:        %9 = or i8 %5, -127
45; CHECK-NEXT:   %10 = inttoptr i64 %3 to i8*
46; CHECK-NEXT:   store i8 %9, i8* %10
47; CHECK-NEXT:   br label %11
48; CHECK:        %tmp1 = load i16, i16* %a, align 2
49; CHECK-NEXT:   ret i16 %tmp1
50}
51
52define i32 @aligned4(i32* %a) {
53entry:
54  %tmp1 = load i32, i32* %a, align 4
55  ret i32 %tmp1
56; CHECK:        %0 = ptrtoint i32* %a to i64
57; CHECK-NEXT:   %1 = and i64 %0, 17592186044415
58; CHECK-NEXT:   %2 = add i64 %1, 1337006139375616
59; CHECK-NEXT:   %3 = lshr i64 %2, 6
60; CHECK-NEXT:   %4 = inttoptr i64 %3 to i8*
61; CHECK-NEXT:   %5 = load i8, i8* %4
62; CHECK-NEXT:   %6 = and i8 %5, -127
63; CHECK-NEXT:   %7 = icmp ne i8 %6, -127
64; CHECK-NEXT:   br i1 %7, label %8, label %11
65; CHECK:        %9 = or i8 %5, -127
66; CHECK-NEXT:   %10 = inttoptr i64 %3 to i8*
67; CHECK-NEXT:   store i8 %9, i8* %10
68; CHECK-NEXT:   br label %11
69; CHECK:        %tmp1 = load i32, i32* %a, align 4
70; CHECK-NEXT:   ret i32 %tmp1
71}
72
73define i64 @aligned8(i64* %a) {
74entry:
75  %tmp1 = load i64, i64* %a, align 8
76  ret i64 %tmp1
77; CHECK:        %0 = ptrtoint i64* %a to i64
78; CHECK-NEXT:   %1 = and i64 %0, 17592186044415
79; CHECK-NEXT:   %2 = add i64 %1, 1337006139375616
80; CHECK-NEXT:   %3 = lshr i64 %2, 6
81; CHECK-NEXT:   %4 = inttoptr i64 %3 to i8*
82; CHECK-NEXT:   %5 = load i8, i8* %4
83; CHECK-NEXT:   %6 = and i8 %5, -127
84; CHECK-NEXT:   %7 = icmp ne i8 %6, -127
85; CHECK-NEXT:   br i1 %7, label %8, label %11
86; CHECK:        %9 = or i8 %5, -127
87; CHECK-NEXT:   %10 = inttoptr i64 %3 to i8*
88; CHECK-NEXT:   store i8 %9, i8* %10
89; CHECK-NEXT:   br label %11
90; CHECK:        %tmp1 = load i64, i64* %a, align 8
91; CHECK-NEXT:   ret i64 %tmp1
92}
93
94define i128 @aligned16(i128* %a) {
95entry:
96  %tmp1 = load i128, i128* %a, align 16
97  ret i128 %tmp1
98; CHECK:        %0 = ptrtoint i128* %a to i64
99; CHECK-NEXT:   %1 = and i64 %0, 17592186044415
100; CHECK-NEXT:   %2 = add i64 %1, 1337006139375616
101; CHECK-NEXT:   %3 = lshr i64 %2, 6
102; CHECK-NEXT:   %4 = inttoptr i64 %3 to i8*
103; CHECK-NEXT:   %5 = load i8, i8* %4
104; CHECK-NEXT:   %6 = and i8 %5, -127
105; CHECK-NEXT:   %7 = icmp ne i8 %6, -127
106; CHECK-NEXT:   br i1 %7, label %8, label %11
107; CHECK:        %9 = or i8 %5, -127
108; CHECK-NEXT:   %10 = inttoptr i64 %3 to i8*
109; CHECK-NEXT:   store i8 %9, i8* %10
110; CHECK-NEXT:   br label %11
111; CHECK:        %tmp1 = load i128, i128* %a, align 16
112; CHECK-NEXT:   ret i128 %tmp1
113}
114
115;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
116; Not guaranteed to be intra-cache-line
117
118define i16 @unaligned2(i16* %a) {
119entry:
120  %tmp1 = load i16, i16* %a, align 1
121  ret i16 %tmp1
122; CHECK:        %0 = bitcast i16* %a to i8*
123; CHECK-NEXT:   call void @__esan_unaligned_load2(i8* %0)
124; CHECK-NEXT:   %tmp1 = load i16, i16* %a, align 1
125; CHECK-NEXT:   ret i16 %tmp1
126}
127
128define i32 @unaligned4(i32* %a) {
129entry:
130  %tmp1 = load i32, i32* %a, align 2
131  ret i32 %tmp1
132; CHECK:        %0 = bitcast i32* %a to i8*
133; CHECK-NEXT:   call void @__esan_unaligned_load4(i8* %0)
134; CHECK-NEXT:   %tmp1 = load i32, i32* %a, align 2
135; CHECK-NEXT:   ret i32 %tmp1
136}
137
138define i64 @unaligned8(i64* %a) {
139entry:
140  %tmp1 = load i64, i64* %a, align 4
141  ret i64 %tmp1
142; CHECK:        %0 = bitcast i64* %a to i8*
143; CHECK-NEXT:   call void @__esan_unaligned_load8(i8* %0)
144; CHECK-NEXT:   %tmp1 = load i64, i64* %a, align 4
145; CHECK-NEXT:   ret i64 %tmp1
146}
147
148define i128 @unaligned16(i128* %a) {
149entry:
150  %tmp1 = load i128, i128* %a, align 8
151  ret i128 %tmp1
152; CHECK:        %0 = bitcast i128* %a to i8*
153; CHECK-NEXT:   call void @__esan_unaligned_load16(i8* %0)
154; CHECK-NEXT:   %tmp1 = load i128, i128* %a, align 8
155; CHECK-NEXT:   ret i128 %tmp1
156}
157