• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; Test that direct loads and stores of local variables are not checked.
2; Also test that redundant checks of the same variable are elided.
3
4; REQUIRES: allow_dump
5
6; RUN: %p2i -i %s --args -verbose=inst -threads=0 -fsanitize-address \
7; RUN:     | FileCheck --check-prefix=DUMP %s
8
9define internal void @foo() {
10  %ptr8 = alloca i8, i32 1, align 4
11  %ptr16 = alloca i8, i32 2, align 4
12  %ptr32 = alloca i8, i32 4, align 4
13  %ptr64 = alloca i8, i32 8, align 4
14  %ptr128 = alloca i8, i32 16, align 4
15
16  %target8 = bitcast i8* %ptr8 to i8*
17  %target16 = bitcast i8* %ptr16 to i16*
18  %target32 = bitcast i8* %ptr32 to i32*
19  %target64 = bitcast i8* %ptr64 to i64*
20  %target128 = bitcast i8* %ptr128 to <4 x i32>*
21
22  ; unchecked loads
23  %loaded8 = load i8, i8* %target8, align 1
24  %loaded16 = load i16, i16* %target16, align 1
25  %loaded32 = load i32, i32* %target32, align 1
26  %loaded64 = load i64, i64* %target64, align 1
27  %loaded128 = load <4 x i32>, <4 x i32>* %target128, align 4
28
29  ; unchecked stores
30  store i8 %loaded8, i8* %target8, align 1
31  store i16 %loaded16, i16* %target16, align 1
32  store i32 %loaded32, i32* %target32, align 1
33  store i64 %loaded64, i64* %target64, align 1
34  store <4 x i32> %loaded128, <4 x i32>* %target128, align 4
35
36  %addr8 = ptrtoint i8* %ptr8 to i32
37  %addr16 = ptrtoint i8* %ptr16 to i32
38  %addr32 = ptrtoint i8* %ptr32 to i32
39  %addr64 = ptrtoint i8* %ptr64 to i32
40  %addr128 = ptrtoint i8* %ptr128 to i32
41
42  %off8 = add i32 %addr8, -1
43  %off16 = add i32 %addr16, -1
44  %off32 = add i32 %addr32, -1
45  %off64 = add i32 %addr64, -1
46  %off128 = add i32 %addr128, -1
47
48  %offtarget8 = inttoptr i32 %off8 to i8*
49  %offtarget16 = inttoptr i32 %off16 to i16*
50  %offtarget32 = inttoptr i32 %off32 to i32*
51  %offtarget64 = inttoptr i32 %off64 to i64*
52  %offtarget128 = inttoptr i32 %off128 to <4 x i32>*
53
54  ; checked stores
55  store i8 42, i8* %offtarget8, align 1
56  store i16 42, i16* %offtarget16, align 1
57  store i32 42, i32* %offtarget32, align 1
58
59  ; checked loads
60  %offloaded64 = load i64, i64* %offtarget64, align 1
61  %offloaded128 = load <4 x i32>, <4 x i32>* %offtarget128, align 4
62
63  ; loads and stores with elided redundant checks
64  %offloaded8 = load i8, i8* %offtarget8, align 1
65  %offloaded16 = load i16, i16* %offtarget16, align 1
66  %offloaded32 = load i32, i32* %offtarget32, align 1
67  store i64 %offloaded64, i64* %offtarget64, align 1
68  store <4 x i32> %offloaded128, <4 x i32>* %offtarget128, align 4
69
70  ret void
71}
72
73; DUMP-LABEL: ================ Instrumented CFG ================
74; DUMP-NEXT: define internal void @foo() {
75
76; Direct unchecked loads and stores
77; DUMP: %loaded8 = load i8, i8* %ptr8, align 1
78; DUMP-NEXT: %loaded16 = load i16, i16* %ptr16, align 1
79; DUMP-NEXT: %loaded32 = load i32, i32* %ptr32, align 1
80; DUMP-NEXT: %loaded64 = load i64, i64* %ptr64, align 1
81; DUMP-NEXT: %loaded128 = load <4 x i32>, <4 x i32>* %ptr128, align 4
82; DUMP-NEXT: store i8 %loaded8, i8* %ptr8, align 1
83; DUMP-NEXT: store i16 %loaded16, i16* %ptr16, align 1
84; DUMP-NEXT: store i32 %loaded32, i32* %ptr32, align 1
85; DUMP-NEXT: store i64 %loaded64, i64* %ptr64, align 1
86; DUMP-NEXT: store <4 x i32> %loaded128, <4 x i32>* %ptr128, align 4
87
88; Checked stores
89; DUMP: call void @__asan_check_store(i32 %off8, i32 1)
90; DUMP-NEXT: store i8 42, i8* %off8, align 1
91; DUMP-NEXT: call void @__asan_check_store(i32 %off16, i32 2)
92; DUMP-NEXT: store i16 42, i16* %off16, align 1
93; DUMP-NEXT: call void @__asan_check_store(i32 %off32, i32 4)
94; DUMP-NEXT: store i32 42, i32* %off32, align 1
95
96; Checked loads
97; DUMP-NEXT: call void @__asan_check_load(i32 %off64, i32 8)
98; DUMP-NEXT: %offloaded64 = load i64, i64* %off64, align 1
99; DUMP-NEXT: call void @__asan_check_load(i32 %off128, i32 16)
100; DUMP-NEXT: %offloaded128 = load <4 x i32>, <4 x i32>* %off128, align 4
101
102; Loads and stores with elided redundant checks
103; DUMP-NEXT: %offloaded8 = load i8, i8* %off8, align 1
104; DUMP-NEXT: %offloaded16 = load i16, i16* %off16, align 1
105; DUMP-NEXT: %offloaded32 = load i32, i32* %off32, align 1
106; DUMP-NEXT: store i64 %offloaded64, i64* %off64, align 1, beacon %offloaded64
107; DUMP-NEXT: store <4 x i32> %offloaded128, <4 x i32>* %off128, align 4, beacon %offloaded128
108