1 // RUN: %clang_cc1 -triple arm-linux-gnueabi -emit-llvm %s -o - | FileCheck %s -check-prefix=ARM
2 // RUN: %clang_cc1 -triple powerpc-linux-gnu -emit-llvm %s -o - | FileCheck %s -check-prefix=PPC32
3 // RUN: %clang_cc1 -triple powerpc64-linux-gnu -emit-llvm %s -o - | FileCheck %s -check-prefix=PPC64
4 // RUN: %clang_cc1 -triple mipsel-linux-gnu -emit-llvm %s -o - | FileCheck %s -check-prefix=MIPS32
5 // RUN: %clang_cc1 -triple mips64el-linux-gnu -emit-llvm %s -o - | FileCheck %s -check-prefix=MIPS64
6 // RUN: %clang_cc1 -triple sparc-unknown-eabi -emit-llvm %s -o - | FileCheck %s -check-prefix=SPARC
7
8 unsigned char c1, c2;
9 unsigned short s1, s2;
10 unsigned int i1, i2;
11 unsigned long long ll1, ll2;
12 unsigned char a1[100], a2[100];
13
14 enum memory_order {
15 memory_order_relaxed,
16 memory_order_consume,
17 memory_order_acquire,
18 memory_order_release,
19 memory_order_acq_rel,
20 memory_order_seq_cst
21 };
22
test1(void)23 void test1(void) {
24 (void)__atomic_load(&c1, &c2, memory_order_seq_cst);
25 (void)__atomic_store(&c1, &c2, memory_order_seq_cst);
26 (void)__atomic_load(&s1, &s2, memory_order_seq_cst);
27 (void)__atomic_store(&s1, &s2, memory_order_seq_cst);
28 (void)__atomic_load(&i1, &i2, memory_order_seq_cst);
29 (void)__atomic_store(&i1, &i2, memory_order_seq_cst);
30 (void)__atomic_load(&ll1, &ll2, memory_order_seq_cst);
31 (void)__atomic_store(&ll1, &ll2, memory_order_seq_cst);
32 (void)__atomic_load(&a1, &a2, memory_order_seq_cst);
33 (void)__atomic_store(&a1, &a2, memory_order_seq_cst);
34
35 // ARM-LABEL: define{{.*}} void @test1
36 // ARM: = call{{.*}} zeroext i8 @__atomic_load_1(i8* @c1
37 // ARM: call{{.*}} void @__atomic_store_1(i8* @c1, i8 zeroext
38 // ARM: = call{{.*}} zeroext i16 @__atomic_load_2(i8* bitcast (i16* @s1 to i8*)
39 // ARM: call{{.*}} void @__atomic_store_2(i8* bitcast (i16* @s1 to i8*), i16 zeroext
40 // ARM: = call{{.*}} i32 @__atomic_load_4(i8* bitcast (i32* @i1 to i8*)
41 // ARM: call{{.*}} void @__atomic_store_4(i8* bitcast (i32* @i1 to i8*), i32
42 // ARM: = call{{.*}} i64 @__atomic_load_8(i8* bitcast (i64* @ll1 to i8*)
43 // ARM: call{{.*}} void @__atomic_store_8(i8* bitcast (i64* @ll1 to i8*), i64
44 // ARM: call{{.*}} void @__atomic_load(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0)
45 // ARM: call{{.*}} void @__atomic_store(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0)
46
47 // PPC32-LABEL: define void @test1
48 // PPC32: = load atomic i8, i8* @c1 seq_cst
49 // PPC32: store atomic i8 {{.*}}, i8* @c1 seq_cst
50 // PPC32: = load atomic i16, i16* @s1 seq_cst
51 // PPC32: store atomic i16 {{.*}}, i16* @s1 seq_cst
52 // PPC32: = load atomic i32, i32* @i1 seq_cst
53 // PPC32: store atomic i32 {{.*}}, i32* @i1 seq_cst
54 // PPC32: = call i64 @__atomic_load_8(i8* bitcast (i64* @ll1 to i8*)
55 // PPC32: call void @__atomic_store_8(i8* bitcast (i64* @ll1 to i8*), i64
56 // PPC32: call void @__atomic_load(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0)
57 // PPC32: call void @__atomic_store(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0)
58
59 // PPC64-LABEL: define void @test1
60 // PPC64: = load atomic i8, i8* @c1 seq_cst
61 // PPC64: store atomic i8 {{.*}}, i8* @c1 seq_cst
62 // PPC64: = load atomic i16, i16* @s1 seq_cst
63 // PPC64: store atomic i16 {{.*}}, i16* @s1 seq_cst
64 // PPC64: = load atomic i32, i32* @i1 seq_cst
65 // PPC64: store atomic i32 {{.*}}, i32* @i1 seq_cst
66 // PPC64: = load atomic i64, i64* @ll1 seq_cst
67 // PPC64: store atomic i64 {{.*}}, i64* @ll1 seq_cst
68 // PPC64: call void @__atomic_load(i64 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0)
69 // PPC64: call void @__atomic_store(i64 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0)
70
71 // MIPS32-LABEL: define void @test1
72 // MIPS32: = load atomic i8, i8* @c1 seq_cst
73 // MIPS32: store atomic i8 {{.*}}, i8* @c1 seq_cst
74 // MIPS32: = load atomic i16, i16* @s1 seq_cst
75 // MIPS32: store atomic i16 {{.*}}, i16* @s1 seq_cst
76 // MIPS32: = load atomic i32, i32* @i1 seq_cst
77 // MIPS32: store atomic i32 {{.*}}, i32* @i1 seq_cst
78 // MIPS32: call i64 @__atomic_load_8(i8* bitcast (i64* @ll1 to i8*)
79 // MIPS32: call void @__atomic_store_8(i8* bitcast (i64* @ll1 to i8*), i64
80 // MIPS32: call void @__atomic_load(i32 signext 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0)
81 // MIPS32: call void @__atomic_store(i32 signext 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0)
82
83 // MIPS64-LABEL: define void @test1
84 // MIPS64: = load atomic i8, i8* @c1 seq_cst
85 // MIPS64: store atomic i8 {{.*}}, i8* @c1 seq_cst
86 // MIPS64: = load atomic i16, i16* @s1 seq_cst
87 // MIPS64: store atomic i16 {{.*}}, i16* @s1 seq_cst
88 // MIPS64: = load atomic i32, i32* @i1 seq_cst
89 // MIPS64: store atomic i32 {{.*}}, i32* @i1 seq_cst
90 // MIPS64: = load atomic i64, i64* @ll1 seq_cst
91 // MIPS64: store atomic i64 {{.*}}, i64* @ll1 seq_cst
92 // MIPS64: call void @__atomic_load(i64 zeroext 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0)
93 // MIPS64: call void @__atomic_store(i64 zeroext 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0)
94
95 // SPARC-LABEL: define void @test1
96 // SPARC: = load atomic i8, i8* @c1 seq_cst
97 // SPARC: store atomic i8 {{.*}}, i8* @c1 seq_cst
98 // SPARC: = load atomic i16, i16* @s1 seq_cst
99 // SPARC: store atomic i16 {{.*}}, i16* @s1 seq_cst
100 // SPARC: = load atomic i32, i32* @i1 seq_cst
101 // SPARC: store atomic i32 {{.*}}, i32* @i1 seq_cst
102 // SPARC: = load atomic i64, i64* @ll1 seq_cst
103 // SPARC: store atomic i64 {{.*}}, i64* @ll1 seq_cst
104 // SPARC: call void @__atomic_load(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0)
105 // SPARC: call void @__atomic_store(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0)
106 }
107