1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <gtest/gtest.h>
18
19 #include <utils/StrongPointer.h>
20 #include <utils/RefBase.h>
21
22 #include <thread>
23 #include <atomic>
24 #include <sched.h>
25 #include <errno.h>
26
27 // Enhanced version of StrongPointer_test, but using RefBase underneath.
28
29 using namespace android;
30
31 static constexpr int NITERS = 1000000;
32
33 static constexpr int INITIAL_STRONG_VALUE = 1 << 28; // Mirroring RefBase definition.
34
35 class Foo : public RefBase {
36 public:
Foo(bool * deleted_check)37 Foo(bool* deleted_check) : mDeleted(deleted_check) {
38 *mDeleted = false;
39 }
40
~Foo()41 ~Foo() {
42 *mDeleted = true;
43 }
44 private:
45 bool* mDeleted;
46 };
47
48 // A version of Foo that ensures that all objects are allocated at the same
49 // address. No more than one can be allocated at a time. Thread-hostile.
50 class FooFixedAlloc : public RefBase {
51 public:
operator new(size_t size)52 static void* operator new(size_t size) {
53 if (mAllocCount != 0) {
54 abort();
55 }
56 mAllocCount = 1;
57 if (theMemory == nullptr) {
58 theMemory = malloc(size);
59 }
60 return theMemory;
61 }
62
operator delete(void * p)63 static void operator delete(void *p) {
64 if (mAllocCount != 1 || p != theMemory) {
65 abort();
66 }
67 mAllocCount = 0;
68 }
69
FooFixedAlloc(bool * deleted_check)70 FooFixedAlloc(bool* deleted_check) : mDeleted(deleted_check) {
71 *mDeleted = false;
72 }
73
~FooFixedAlloc()74 ~FooFixedAlloc() {
75 *mDeleted = true;
76 }
77 private:
78 bool* mDeleted;
79 static int mAllocCount;
80 static void* theMemory;
81 };
82
83 int FooFixedAlloc::mAllocCount(0);
84 void* FooFixedAlloc::theMemory(nullptr);
85
TEST(RefBase,StrongMoves)86 TEST(RefBase, StrongMoves) {
87 bool isDeleted;
88 Foo* foo = new Foo(&isDeleted);
89 ASSERT_EQ(INITIAL_STRONG_VALUE, foo->getStrongCount());
90 ASSERT_FALSE(isDeleted) << "Already deleted...?";
91 sp<Foo> sp1(foo);
92 wp<Foo> wp1(sp1);
93 ASSERT_EQ(1, foo->getStrongCount());
94 // Weak count includes both strong and weak references.
95 ASSERT_EQ(2, foo->getWeakRefs()->getWeakCount());
96 {
97 sp<Foo> sp2 = std::move(sp1);
98 ASSERT_EQ(1, foo->getStrongCount())
99 << "std::move failed, incremented refcnt";
100 ASSERT_EQ(nullptr, sp1.get()) << "std::move failed, sp1 is still valid";
101 // The strong count isn't increasing, let's double check the old object
102 // is properly reset and doesn't early delete
103 sp1 = std::move(sp2);
104 }
105 ASSERT_FALSE(isDeleted) << "deleted too early! still has a reference!";
106 {
107 // Now let's double check it deletes on time
108 sp<Foo> sp2 = std::move(sp1);
109 }
110 ASSERT_TRUE(isDeleted) << "foo was leaked!";
111 ASSERT_TRUE(wp1.promote().get() == nullptr);
112 }
113
TEST(RefBase,WeakCopies)114 TEST(RefBase, WeakCopies) {
115 bool isDeleted;
116 Foo* foo = new Foo(&isDeleted);
117 EXPECT_EQ(0, foo->getWeakRefs()->getWeakCount());
118 ASSERT_FALSE(isDeleted) << "Foo (weak) already deleted...?";
119 wp<Foo> wp1(foo);
120 EXPECT_EQ(1, foo->getWeakRefs()->getWeakCount());
121 {
122 wp<Foo> wp2 = wp1;
123 ASSERT_EQ(2, foo->getWeakRefs()->getWeakCount());
124 }
125 EXPECT_EQ(1, foo->getWeakRefs()->getWeakCount());
126 ASSERT_FALSE(isDeleted) << "deleted too early! still has a reference!";
127 wp1 = nullptr;
128 ASSERT_FALSE(isDeleted) << "Deletion on wp destruction should no longer occur";
129 }
130
TEST(RefBase,Comparisons)131 TEST(RefBase, Comparisons) {
132 bool isDeleted, isDeleted2, isDeleted3;
133 Foo* foo = new Foo(&isDeleted);
134 Foo* foo2 = new Foo(&isDeleted2);
135 sp<Foo> sp1(foo);
136 sp<Foo> sp2(foo2);
137 wp<Foo> wp1(sp1);
138 wp<Foo> wp2(sp1);
139 wp<Foo> wp3(sp2);
140 ASSERT_TRUE(wp1 == wp2);
141 ASSERT_TRUE(wp1 == sp1);
142 ASSERT_TRUE(wp3 == sp2);
143 ASSERT_TRUE(wp1 != sp2);
144 ASSERT_TRUE(wp1 <= wp2);
145 ASSERT_TRUE(wp1 >= wp2);
146 ASSERT_FALSE(wp1 != wp2);
147 ASSERT_FALSE(wp1 > wp2);
148 ASSERT_FALSE(wp1 < wp2);
149 ASSERT_FALSE(sp1 == sp2);
150 ASSERT_TRUE(sp1 != sp2);
151 bool sp1_smaller = sp1 < sp2;
152 wp<Foo>wp_smaller = sp1_smaller ? wp1 : wp3;
153 wp<Foo>wp_larger = sp1_smaller ? wp3 : wp1;
154 ASSERT_TRUE(wp_smaller < wp_larger);
155 ASSERT_TRUE(wp_smaller != wp_larger);
156 ASSERT_TRUE(wp_smaller <= wp_larger);
157 ASSERT_FALSE(wp_smaller == wp_larger);
158 ASSERT_FALSE(wp_smaller > wp_larger);
159 ASSERT_FALSE(wp_smaller >= wp_larger);
160 sp2 = nullptr;
161 ASSERT_TRUE(isDeleted2);
162 ASSERT_FALSE(isDeleted);
163 ASSERT_FALSE(wp3 == sp2);
164 // Comparison results on weak pointers should not be affected.
165 ASSERT_TRUE(wp_smaller < wp_larger);
166 ASSERT_TRUE(wp_smaller != wp_larger);
167 ASSERT_TRUE(wp_smaller <= wp_larger);
168 ASSERT_FALSE(wp_smaller == wp_larger);
169 ASSERT_FALSE(wp_smaller > wp_larger);
170 ASSERT_FALSE(wp_smaller >= wp_larger);
171 wp2 = nullptr;
172 ASSERT_FALSE(wp1 == wp2);
173 ASSERT_TRUE(wp1 != wp2);
174 wp1.clear();
175 ASSERT_TRUE(wp1 == wp2);
176 ASSERT_FALSE(wp1 != wp2);
177 wp3.clear();
178 ASSERT_TRUE(wp1 == wp3);
179 ASSERT_FALSE(wp1 != wp3);
180 ASSERT_FALSE(isDeleted);
181 sp1.clear();
182 ASSERT_TRUE(isDeleted);
183 ASSERT_TRUE(sp1 == sp2);
184 // Try to check that null pointers are properly initialized.
185 {
186 // Try once with non-null, to maximize chances of getting junk on the
187 // stack.
188 sp<Foo> sp3(new Foo(&isDeleted3));
189 wp<Foo> wp4(sp3);
190 wp<Foo> wp5;
191 ASSERT_FALSE(wp4 == wp5);
192 ASSERT_TRUE(wp4 != wp5);
193 ASSERT_FALSE(sp3 == wp5);
194 ASSERT_FALSE(wp5 == sp3);
195 ASSERT_TRUE(sp3 != wp5);
196 ASSERT_TRUE(wp5 != sp3);
197 ASSERT_TRUE(sp3 == wp4);
198 }
199 {
200 sp<Foo> sp3;
201 wp<Foo> wp4(sp3);
202 wp<Foo> wp5;
203 ASSERT_TRUE(wp4 == wp5);
204 ASSERT_FALSE(wp4 != wp5);
205 ASSERT_TRUE(sp3 == wp5);
206 ASSERT_TRUE(wp5 == sp3);
207 ASSERT_FALSE(sp3 != wp5);
208 ASSERT_FALSE(wp5 != sp3);
209 ASSERT_TRUE(sp3 == wp4);
210 }
211 }
212
213 // Check whether comparison against dead wp works, even if the object referenced
214 // by the new wp happens to be at the same address.
TEST(RefBase,ReplacedComparison)215 TEST(RefBase, ReplacedComparison) {
216 bool isDeleted, isDeleted2;
217 FooFixedAlloc* foo = new FooFixedAlloc(&isDeleted);
218 sp<FooFixedAlloc> sp1(foo);
219 wp<FooFixedAlloc> wp1(sp1);
220 ASSERT_TRUE(wp1 == sp1);
221 sp1.clear(); // Deallocates the object.
222 ASSERT_TRUE(isDeleted);
223 FooFixedAlloc* foo2 = new FooFixedAlloc(&isDeleted2);
224 ASSERT_FALSE(isDeleted2);
225 ASSERT_EQ(foo, foo2); // Not technically a legal comparison, but ...
226 sp<FooFixedAlloc> sp2(foo2);
227 wp<FooFixedAlloc> wp2(sp2);
228 ASSERT_TRUE(sp2 == wp2);
229 ASSERT_FALSE(sp2 != wp2);
230 ASSERT_TRUE(sp2 != wp1);
231 ASSERT_FALSE(sp2 == wp1);
232 ASSERT_FALSE(sp2 == sp1); // sp1 is null.
233 ASSERT_FALSE(wp1 == wp2); // wp1 refers to old object.
234 ASSERT_TRUE(wp1 != wp2);
235 ASSERT_TRUE(wp1 > wp2 || wp1 < wp2);
236 ASSERT_TRUE(wp1 >= wp2 || wp1 <= wp2);
237 ASSERT_FALSE(wp1 >= wp2 && wp1 <= wp2);
238 ASSERT_FALSE(wp1 == nullptr);
239 wp1 = sp2;
240 ASSERT_TRUE(wp1 == wp2);
241 ASSERT_FALSE(wp1 != wp2);
242 }
243
TEST(RefBase,AssertWeakRefExistsSuccess)244 TEST(RefBase, AssertWeakRefExistsSuccess) {
245 bool isDeleted;
246 sp<Foo> foo = sp<Foo>::make(&isDeleted);
247 wp<Foo> weakFoo = foo;
248
249 EXPECT_EQ(weakFoo, wp<Foo>::fromExisting(foo.get()));
250 EXPECT_EQ(weakFoo.unsafe_get(), wp<Foo>::fromExisting(foo.get()).unsafe_get());
251
252 EXPECT_FALSE(isDeleted);
253 foo = nullptr;
254 EXPECT_TRUE(isDeleted);
255 }
256
TEST(RefBase,AssertWeakRefExistsDeath)257 TEST(RefBase, AssertWeakRefExistsDeath) {
258 // uses some other refcounting method, or none at all
259 bool isDeleted;
260 Foo* foo = new Foo(&isDeleted);
261
262 // can only get a valid wp<> object when you construct it from an sp<>
263 EXPECT_DEATH(wp<Foo>::fromExisting(foo), "");
264
265 delete foo;
266 }
267
268 // Set up a situation in which we race with visit2AndRremove() to delete
269 // 2 strong references. Bar destructor checks that there are no early
270 // deletions and prior updates are visible to destructor.
271 class Bar : public RefBase {
272 public:
Bar(std::atomic<int> * delete_count)273 Bar(std::atomic<int>* delete_count) : mVisited1(false), mVisited2(false),
274 mDeleteCount(delete_count) {
275 }
276
~Bar()277 ~Bar() {
278 EXPECT_TRUE(mVisited1);
279 EXPECT_TRUE(mVisited2);
280 (*mDeleteCount)++;
281 }
282 bool mVisited1;
283 bool mVisited2;
284 private:
285 std::atomic<int>* mDeleteCount;
286 };
287
288 static sp<Bar> buffer;
289 static std::atomic<bool> bufferFull(false);
290
291 // Wait until bufferFull has value val.
waitFor(bool val)292 static inline void waitFor(bool val) {
293 while (bufferFull != val) {}
294 }
295
296 cpu_set_t otherCpus;
297
298 // Divide the cpus we're allowed to run on into myCpus and otherCpus.
299 // Set origCpus to the processors we were originally allowed to run on.
300 // Return false if origCpus doesn't include at least processors 0 and 1.
setExclusiveCpus(cpu_set_t * origCpus,cpu_set_t * myCpus,cpu_set_t * otherCpus)301 static bool setExclusiveCpus(cpu_set_t* origCpus /* out */,
302 cpu_set_t* myCpus /* out */, cpu_set_t* otherCpus) {
303 if (sched_getaffinity(0, sizeof(cpu_set_t), origCpus) != 0) {
304 return false;
305 }
306 if (!CPU_ISSET(0, origCpus) || !CPU_ISSET(1, origCpus)) {
307 return false;
308 }
309 CPU_ZERO(myCpus);
310 CPU_ZERO(otherCpus);
311 CPU_OR(myCpus, myCpus, origCpus);
312 CPU_OR(otherCpus, otherCpus, origCpus);
313 for (unsigned i = 0; i < CPU_SETSIZE; ++i) {
314 // I get the even cores, the other thread gets the odd ones.
315 if (i & 1) {
316 CPU_CLR(i, myCpus);
317 } else {
318 CPU_CLR(i, otherCpus);
319 }
320 }
321 return true;
322 }
323
visit2AndRemove()324 static void visit2AndRemove() {
325 if (sched_setaffinity(0, sizeof(cpu_set_t), &otherCpus) != 0) {
326 FAIL() << "setaffinity returned:" << errno;
327 }
328 for (int i = 0; i < NITERS; ++i) {
329 waitFor(true);
330 buffer->mVisited2 = true;
331 buffer = nullptr;
332 bufferFull = false;
333 }
334 }
335
TEST(RefBase,RacingDestructors)336 TEST(RefBase, RacingDestructors) {
337 cpu_set_t origCpus;
338 cpu_set_t myCpus;
339 // Restrict us and the helper thread to disjoint cpu sets.
340 // This prevents us from getting scheduled against each other,
341 // which would be atrociously slow.
342 if (setExclusiveCpus(&origCpus, &myCpus, &otherCpus)) {
343 std::thread t(visit2AndRemove);
344 std::atomic<int> deleteCount(0);
345 if (sched_setaffinity(0, sizeof(cpu_set_t), &myCpus) != 0) {
346 FAIL() << "setaffinity returned:" << errno;
347 }
348 for (int i = 0; i < NITERS; ++i) {
349 waitFor(false);
350 Bar* bar = new Bar(&deleteCount);
351 sp<Bar> sp3(bar);
352 buffer = sp3;
353 bufferFull = true;
354 ASSERT_TRUE(bar->getStrongCount() >= 1);
355 // Weak count includes strong count.
356 ASSERT_TRUE(bar->getWeakRefs()->getWeakCount() >= 1);
357 sp3->mVisited1 = true;
358 sp3 = nullptr;
359 }
360 t.join();
361 if (sched_setaffinity(0, sizeof(cpu_set_t), &origCpus) != 0) {
362 FAIL();
363 }
364 ASSERT_EQ(NITERS, deleteCount) << "Deletions missed!";
365 } // Otherwise this is slow and probably pointless on a uniprocessor.
366 }
367
368 static wp<Bar> wpBuffer;
369 static std::atomic<bool> wpBufferFull(false);
370
371 // Wait until wpBufferFull has value val.
wpWaitFor(bool val)372 static inline void wpWaitFor(bool val) {
373 while (wpBufferFull != val) {}
374 }
375
visit3AndRemove()376 static void visit3AndRemove() {
377 if (sched_setaffinity(0, sizeof(cpu_set_t), &otherCpus) != 0) {
378 FAIL() << "setaffinity returned:" << errno;
379 }
380 for (int i = 0; i < NITERS; ++i) {
381 wpWaitFor(true);
382 {
383 sp<Bar> sp1 = wpBuffer.promote();
384 // We implicitly check that sp1 != NULL
385 sp1->mVisited2 = true;
386 }
387 wpBuffer = nullptr;
388 wpBufferFull = false;
389 }
390 }
391
TEST(RefBase,RacingPromotions)392 TEST(RefBase, RacingPromotions) {
393 cpu_set_t origCpus;
394 cpu_set_t myCpus;
395 // Restrict us and the helper thread to disjoint cpu sets.
396 // This prevents us from getting scheduled against each other,
397 // which would be atrociously slow.
398 if (setExclusiveCpus(&origCpus, &myCpus, &otherCpus)) {
399 std::thread t(visit3AndRemove);
400 std::atomic<int> deleteCount(0);
401 if (sched_setaffinity(0, sizeof(cpu_set_t), &myCpus) != 0) {
402 FAIL() << "setaffinity returned:" << errno;
403 }
404 for (int i = 0; i < NITERS; ++i) {
405 Bar* bar = new Bar(&deleteCount);
406 wp<Bar> wp1(bar);
407 bar->mVisited1 = true;
408 if (i % (NITERS / 10) == 0) {
409 // Do this rarely, since it generates a log message.
410 wp1 = nullptr; // No longer destroys the object.
411 wp1 = bar;
412 }
413 wpBuffer = wp1;
414 ASSERT_EQ(bar->getWeakRefs()->getWeakCount(), 2);
415 wpBufferFull = true;
416 // Promotion races with that in visit3AndRemove.
417 // This may or may not succeed, but it shouldn't interfere with
418 // the concurrent one.
419 sp<Bar> sp1 = wp1.promote();
420 wpWaitFor(false); // Waits for other thread to drop strong pointer.
421 sp1 = nullptr;
422 // No strong pointers here.
423 sp1 = wp1.promote();
424 ASSERT_EQ(sp1.get(), nullptr) << "Dead wp promotion succeeded!";
425 }
426 t.join();
427 if (sched_setaffinity(0, sizeof(cpu_set_t), &origCpus) != 0) {
428 FAIL();
429 }
430 ASSERT_EQ(NITERS, deleteCount) << "Deletions missed!";
431 } // Otherwise this is slow and probably pointless on a uniprocessor.
432 }
433