• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt -basicaa -gvn -S < %s | FileCheck %s
2
3target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
4target triple = "x86_64-apple-macosx10.7.0"
5
6@x = common global i32 0, align 4
7@y = common global i32 0, align 4
8
9; GVN across unordered store (allowed)
10define i32 @test1() nounwind uwtable ssp {
11; CHECK: test1
12; CHECK: add i32 %x, %x
13entry:
14  %x = load i32* @y
15  store atomic i32 %x, i32* @x unordered, align 4
16  %y = load i32* @y
17  %z = add i32 %x, %y
18  ret i32 %z
19}
20
21; GVN across seq_cst store (allowed in theory; not implemented ATM)
22define i32 @test2() nounwind uwtable ssp {
23; CHECK: test2
24; CHECK: add i32 %x, %y
25entry:
26  %x = load i32* @y
27  store atomic i32 %x, i32* @x seq_cst, align 4
28  %y = load i32* @y
29  %z = add i32 %x, %y
30  ret i32 %z
31}
32
33; GVN across unordered load (allowed)
34define i32 @test3() nounwind uwtable ssp {
35; CHECK: test3
36; CHECK: add i32 %x, %x
37entry:
38  %x = load i32* @y
39  %y = load atomic i32* @x unordered, align 4
40  %z = load i32* @y
41  %a = add i32 %x, %z
42  %b = add i32 %y, %a
43  ret i32 %b
44}
45
46; GVN across acquire load (load after atomic load must not be removed)
47define i32 @test4() nounwind uwtable ssp {
48; CHECK: test4
49; CHECK: load atomic i32* @x
50; CHECK: load i32* @y
51entry:
52  %x = load i32* @y
53  %y = load atomic i32* @x seq_cst, align 4
54  %x2 = load i32* @y
55  %x3 = add i32 %x, %x2
56  %y2 = add i32 %y, %x3
57  ret i32 %y2
58}
59
60; GVN load to unordered load (allowed)
61define i32 @test5() nounwind uwtable ssp {
62; CHECK: test5
63; CHECK: add i32 %x, %x
64entry:
65  %x = load atomic i32* @x unordered, align 4
66  %y = load i32* @x
67  %z = add i32 %x, %y
68  ret i32 %z
69}
70
71; GVN unordered load to load (unordered load must not be removed)
72define i32 @test6() nounwind uwtable ssp {
73; CHECK: test6
74; CHECK: load atomic i32* @x unordered
75entry:
76  %x = load i32* @x
77  %x2 = load atomic i32* @x unordered, align 4
78  %x3 = add i32 %x, %x2
79  ret i32 %x3
80}
81