• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <gtest/gtest.h>
18 // Fool stdatomic.h into not using <atomic>.
19 #undef _USING_LIBCXX
20 #include <stdatomic.h>
21 #include <pthread.h>
22 #include <stdint.h>
23 
TEST(stdatomic,LOCK_FREE)24 TEST(stdatomic, LOCK_FREE) {
25   ASSERT_TRUE(ATOMIC_BOOL_LOCK_FREE);
26   ASSERT_TRUE(ATOMIC_CHAR16_T_LOCK_FREE);
27   ASSERT_TRUE(ATOMIC_CHAR32_T_LOCK_FREE);
28   ASSERT_TRUE(ATOMIC_CHAR_LOCK_FREE);
29   ASSERT_TRUE(ATOMIC_INT_LOCK_FREE);
30   ASSERT_TRUE(ATOMIC_LLONG_LOCK_FREE);
31   ASSERT_TRUE(ATOMIC_LONG_LOCK_FREE);
32   ASSERT_TRUE(ATOMIC_POINTER_LOCK_FREE);
33   ASSERT_TRUE(ATOMIC_SHORT_LOCK_FREE);
34   ASSERT_TRUE(ATOMIC_WCHAR_T_LOCK_FREE);
35 }
36 
TEST(stdatomic,init)37 TEST(stdatomic, init) {
38   atomic_int v = ATOMIC_VAR_INIT(123);
39   ASSERT_EQ(123, atomic_load(&v));
40 
41   atomic_init(&v, 456);
42   ASSERT_EQ(456, atomic_load(&v));
43 
44   atomic_flag f = ATOMIC_FLAG_INIT;
45   ASSERT_FALSE(atomic_flag_test_and_set(&f));
46 }
47 
TEST(stdatomic,atomic_thread_fence)48 TEST(stdatomic, atomic_thread_fence) {
49   atomic_thread_fence(memory_order_relaxed);
50   atomic_thread_fence(memory_order_consume);
51   atomic_thread_fence(memory_order_acquire);
52   atomic_thread_fence(memory_order_release);
53   atomic_thread_fence(memory_order_acq_rel);
54   atomic_thread_fence(memory_order_seq_cst);
55 }
56 
TEST(stdatomic,atomic_signal_fence)57 TEST(stdatomic, atomic_signal_fence) {
58   atomic_signal_fence(memory_order_relaxed);
59   atomic_signal_fence(memory_order_consume);
60   atomic_signal_fence(memory_order_acquire);
61   atomic_signal_fence(memory_order_release);
62   atomic_signal_fence(memory_order_acq_rel);
63   atomic_signal_fence(memory_order_seq_cst);
64 }
65 
TEST(stdatomic,atomic_is_lock_free)66 TEST(stdatomic, atomic_is_lock_free) {
67   atomic_char small;
68   ASSERT_TRUE(atomic_is_lock_free(&small));
69   atomic_intmax_t big;
70   // atomic_intmax_t(size = 64) is not lock free on mips32.
71 #if defined(__mips__) && !defined(__LP64__)
72   ASSERT_FALSE(atomic_is_lock_free(&big));
73 #else
74   ASSERT_TRUE(atomic_is_lock_free(&big));
75 #endif
76 }
77 
TEST(stdatomic,atomic_flag)78 TEST(stdatomic, atomic_flag) {
79   atomic_flag f = ATOMIC_FLAG_INIT;
80   ASSERT_FALSE(atomic_flag_test_and_set(&f));
81   ASSERT_TRUE(atomic_flag_test_and_set(&f));
82 
83   atomic_flag_clear(&f);
84 
85   ASSERT_FALSE(atomic_flag_test_and_set_explicit(&f, memory_order_relaxed));
86   ASSERT_TRUE(atomic_flag_test_and_set_explicit(&f, memory_order_relaxed));
87 
88   atomic_flag_clear_explicit(&f, memory_order_relaxed);
89   ASSERT_FALSE(atomic_flag_test_and_set_explicit(&f, memory_order_relaxed));
90 }
91 
TEST(stdatomic,atomic_store)92 TEST(stdatomic, atomic_store) {
93   atomic_int i;
94   atomic_store(&i, 123);
95   ASSERT_EQ(123, atomic_load(&i));
96   atomic_store_explicit(&i, 123, memory_order_relaxed);
97   ASSERT_EQ(123, atomic_load_explicit(&i, memory_order_relaxed));
98 }
99 
TEST(stdatomic,atomic_exchange)100 TEST(stdatomic, atomic_exchange) {
101   atomic_int i;
102   atomic_store(&i, 123);
103   ASSERT_EQ(123, atomic_exchange(&i, 456));
104   ASSERT_EQ(456, atomic_exchange_explicit(&i, 123, memory_order_relaxed));
105 }
106 
TEST(stdatomic,atomic_compare_exchange)107 TEST(stdatomic, atomic_compare_exchange) {
108   atomic_int i;
109   int expected;
110 
111   atomic_store(&i, 123);
112   expected = 123;
113   ASSERT_TRUE(atomic_compare_exchange_strong(&i, &expected, 456));
114   ASSERT_FALSE(atomic_compare_exchange_strong(&i, &expected, 456));
115   ASSERT_EQ(456, expected);
116 
117   atomic_store(&i, 123);
118   expected = 123;
119   ASSERT_TRUE(atomic_compare_exchange_strong_explicit(&i, &expected, 456, memory_order_relaxed,
120           memory_order_relaxed));
121   ASSERT_FALSE(atomic_compare_exchange_strong_explicit(&i, &expected, 456, memory_order_relaxed,
122           memory_order_relaxed));
123   ASSERT_EQ(456, expected);
124 
125   atomic_store(&i, 123);
126   expected = 123;
127   int iter_count = 0;
128   do {
129     ++iter_count;
130     ASSERT_LT(iter_count, 100);  // Arbitrary limit on spurious compare_exchange failures.
131     ASSERT_EQ(expected, 123);
132   } while(!atomic_compare_exchange_weak(&i, &expected, 456));
133   ASSERT_FALSE(atomic_compare_exchange_weak(&i, &expected, 456));
134   ASSERT_EQ(456, expected);
135 
136   atomic_store(&i, 123);
137   expected = 123;
138   iter_count = 0;
139   do {
140     ++iter_count;
141     ASSERT_LT(iter_count, 100);
142     ASSERT_EQ(expected, 123);
143   } while(!atomic_compare_exchange_weak_explicit(&i, &expected, 456, memory_order_relaxed,
144           memory_order_relaxed));
145   ASSERT_FALSE(atomic_compare_exchange_weak_explicit(&i, &expected, 456, memory_order_relaxed,
146           memory_order_relaxed));
147   ASSERT_EQ(456, expected);
148 }
149 
TEST(stdatomic,atomic_fetch_add)150 TEST(stdatomic, atomic_fetch_add) {
151   atomic_int i = ATOMIC_VAR_INIT(123);
152   ASSERT_EQ(123, atomic_fetch_add(&i, 1));
153   ASSERT_EQ(124, atomic_fetch_add_explicit(&i, 1, memory_order_relaxed));
154   ASSERT_EQ(125, atomic_load(&i));
155 }
156 
TEST(stdatomic,atomic_fetch_sub)157 TEST(stdatomic, atomic_fetch_sub) {
158   atomic_int i = ATOMIC_VAR_INIT(123);
159   ASSERT_EQ(123, atomic_fetch_sub(&i, 1));
160   ASSERT_EQ(122, atomic_fetch_sub_explicit(&i, 1, memory_order_relaxed));
161   ASSERT_EQ(121, atomic_load(&i));
162 }
163 
TEST(stdatomic,atomic_fetch_or)164 TEST(stdatomic, atomic_fetch_or) {
165   atomic_int i = ATOMIC_VAR_INIT(0x100);
166   ASSERT_EQ(0x100, atomic_fetch_or(&i, 0x020));
167   ASSERT_EQ(0x120, atomic_fetch_or_explicit(&i, 0x003, memory_order_relaxed));
168   ASSERT_EQ(0x123, atomic_load(&i));
169 }
170 
TEST(stdatomic,atomic_fetch_xor)171 TEST(stdatomic, atomic_fetch_xor) {
172   atomic_int i = ATOMIC_VAR_INIT(0x100);
173   ASSERT_EQ(0x100, atomic_fetch_xor(&i, 0x120));
174   ASSERT_EQ(0x020, atomic_fetch_xor_explicit(&i, 0x103, memory_order_relaxed));
175   ASSERT_EQ(0x123, atomic_load(&i));
176 }
177 
TEST(stdatomic,atomic_fetch_and)178 TEST(stdatomic, atomic_fetch_and) {
179   atomic_int i = ATOMIC_VAR_INIT(0x123);
180   ASSERT_EQ(0x123, atomic_fetch_and(&i, 0x00f));
181   ASSERT_EQ(0x003, atomic_fetch_and_explicit(&i, 0x2, memory_order_relaxed));
182   ASSERT_EQ(0x002, atomic_load(&i));
183 }
184 
185 // And a rudimentary test of acquire-release memory ordering:
186 
187 constexpr static uint_least32_t BIG = 10000000ul; // Assumed even below.
188 
189 struct three_atomics {
190   atomic_uint_least32_t x;
191   char a[123];  // Everything in different cache lines,
192                 // increase chance of compiler getting alignment wrong.
193   atomic_uint_least32_t y;
194   char b[4013];
195   atomic_uint_least32_t z;
196 };
197 
198 // Very simple acquire/release memory ordering sanity check.
writer(void * arg)199 static void* writer(void* arg) {
200   three_atomics* a = reinterpret_cast<three_atomics*>(arg);
201   for (uint_least32_t i = 0; i <= BIG; i+=2) {
202     atomic_store_explicit(&a->x, i, memory_order_relaxed);
203     atomic_store_explicit(&a->z, i, memory_order_relaxed);
204     atomic_store_explicit(&a->y, i, memory_order_release);
205     atomic_store_explicit(&a->x, i+1, memory_order_relaxed);
206     atomic_store_explicit(&a->z, i+1, memory_order_relaxed);
207     atomic_store_explicit(&a->y, i+1, memory_order_release);
208   }
209   return nullptr;
210 }
211 
reader(void * arg)212 static void* reader(void* arg) {
213   three_atomics* a = reinterpret_cast<three_atomics*>(arg);
214   uint_least32_t xval = 0, yval = 0, zval = 0;
215   size_t repeat = 0;
216   size_t repeat_limit = 1000;
217   while (yval != BIG + 1) {
218     yval = atomic_load_explicit(&a->y, memory_order_acquire);
219     zval = atomic_load_explicit(&a->z, memory_order_relaxed);
220     xval = atomic_load_explicit(&a->x, memory_order_relaxed);
221     // If we see a given value of y, the immediately preceding
222     // stores to z and x, or later ones, should also be visible.
223     if (zval < yval) {
224       // Cant just ASSERT, since we are in a non-void function.
225       ADD_FAILURE() << "acquire-release ordering violation: "
226                     << zval << " < " << yval << ", " << xval << "\n";
227       return nullptr; // Only report once.
228     }
229     if (xval < yval) {
230       // Cant just ASSERT, since we are in a non-void function.
231       ADD_FAILURE() << "acquire-release ordering violation: "
232                     << xval << " < " << yval << ", " << zval <<  "\n";
233       return nullptr; // Only report once.
234     }
235     if (repeat < repeat_limit) ++repeat;
236   }
237   // The following assertion is not technically guaranteed to hold.
238   // But if it fails to hold, this test was useless, and we have a
239   // serious scheduling issue that we should probably know about.
240   EXPECT_EQ(repeat, repeat_limit);
241   return nullptr;
242 }
243 
TEST(stdatomic,ordering)244 TEST(stdatomic, ordering) {
245   // Run a memory ordering sanity test.
246   void* result;
247   three_atomics a;
248   atomic_init(&a.x, 0ul);
249   atomic_init(&a.y, 0ul);
250   atomic_init(&a.z, 0ul);
251   pthread_t t1,t2;
252   ASSERT_EQ(0, pthread_create(&t1, nullptr, reader, &a));
253   ASSERT_EQ(0, pthread_create(&t2, nullptr, writer, &a));
254   ASSERT_EQ(0, pthread_join(t1, &result));
255   EXPECT_EQ(nullptr, result);
256   ASSERT_EQ(0, pthread_join(t2, &result));
257   EXPECT_EQ(nullptr, result);
258   EXPECT_EQ(atomic_load_explicit(&a.x, memory_order_consume), BIG + 1);
259   EXPECT_EQ(atomic_load_explicit(&a.y, memory_order_seq_cst), BIG + 1);
260   EXPECT_EQ(atomic_load(&a.z), BIG + 1);
261 }
262