1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <gtest/gtest.h>
18
19 #include <utils/StrongPointer.h>
20 #include <utils/RefBase.h>
21
22 #include <thread>
23 #include <atomic>
24 #include <sched.h>
25 #include <errno.h>
26
27 // Enhanced version of StrongPointer_test, but using RefBase underneath.
28
29 using namespace android;
30
31 static constexpr int NITERS = 1000000;
32
33 static constexpr int INITIAL_STRONG_VALUE = 1 << 28; // Mirroring RefBase definition.
34
35 class Foo : public RefBase {
36 public:
Foo(bool * deleted_check)37 Foo(bool* deleted_check) : mDeleted(deleted_check) {
38 *mDeleted = false;
39 }
40
~Foo()41 ~Foo() {
42 *mDeleted = true;
43 }
44 private:
45 bool* mDeleted;
46 };
47
48 // A version of Foo that ensures that all objects are allocated at the same
49 // address. No more than one can be allocated at a time. Thread-hostile.
50 class FooFixedAlloc : public RefBase {
51 public:
operator new(size_t size)52 static void* operator new(size_t size) {
53 if (mAllocCount != 0) {
54 abort();
55 }
56 mAllocCount = 1;
57 if (theMemory == nullptr) {
58 theMemory = malloc(size);
59 }
60 return theMemory;
61 }
62
operator delete(void * p)63 static void operator delete(void *p) {
64 if (mAllocCount != 1 || p != theMemory) {
65 abort();
66 }
67 mAllocCount = 0;
68 }
69
FooFixedAlloc(bool * deleted_check)70 FooFixedAlloc(bool* deleted_check) : mDeleted(deleted_check) {
71 *mDeleted = false;
72 }
73
~FooFixedAlloc()74 ~FooFixedAlloc() {
75 *mDeleted = true;
76 }
77 private:
78 bool* mDeleted;
79 static int mAllocCount;
80 static void* theMemory;
81 };
82
83 int FooFixedAlloc::mAllocCount(0);
84 void* FooFixedAlloc::theMemory(nullptr);
85
TEST(RefBase,StrongMoves)86 TEST(RefBase, StrongMoves) {
87 bool isDeleted;
88 Foo* foo = new Foo(&isDeleted);
89 ASSERT_EQ(INITIAL_STRONG_VALUE, foo->getStrongCount());
90 ASSERT_FALSE(isDeleted) << "Already deleted...?";
91 sp<Foo> sp1(foo);
92 wp<Foo> wp1(sp1);
93 ASSERT_EQ(1, foo->getStrongCount());
94 // Weak count includes both strong and weak references.
95 ASSERT_EQ(2, foo->getWeakRefs()->getWeakCount());
96 {
97 sp<Foo> sp2 = std::move(sp1);
98 ASSERT_EQ(1, foo->getStrongCount())
99 << "std::move failed, incremented refcnt";
100 ASSERT_EQ(nullptr, sp1.get()) << "std::move failed, sp1 is still valid";
101 // The strong count isn't increasing, let's double check the old object
102 // is properly reset and doesn't early delete
103 sp1 = std::move(sp2);
104 }
105 ASSERT_FALSE(isDeleted) << "deleted too early! still has a reference!";
106 {
107 // Now let's double check it deletes on time
108 sp<Foo> sp2 = std::move(sp1);
109 }
110 ASSERT_TRUE(isDeleted) << "foo was leaked!";
111 ASSERT_TRUE(wp1.promote().get() == nullptr);
112 }
113
TEST(RefBase,WeakCopies)114 TEST(RefBase, WeakCopies) {
115 bool isDeleted;
116 Foo* foo = new Foo(&isDeleted);
117 EXPECT_EQ(0, foo->getWeakRefs()->getWeakCount());
118 ASSERT_FALSE(isDeleted) << "Foo (weak) already deleted...?";
119 wp<Foo> wp1(foo);
120 EXPECT_EQ(1, foo->getWeakRefs()->getWeakCount());
121 {
122 wp<Foo> wp2 = wp1;
123 ASSERT_EQ(2, foo->getWeakRefs()->getWeakCount());
124 }
125 EXPECT_EQ(1, foo->getWeakRefs()->getWeakCount());
126 ASSERT_FALSE(isDeleted) << "deleted too early! still has a reference!";
127 wp1 = nullptr;
128 ASSERT_FALSE(isDeleted) << "Deletion on wp destruction should no longer occur";
129 }
130
TEST(RefBase,Comparisons)131 TEST(RefBase, Comparisons) {
132 bool isDeleted, isDeleted2, isDeleted3;
133 Foo* foo = new Foo(&isDeleted);
134 Foo* foo2 = new Foo(&isDeleted2);
135 sp<Foo> sp1(foo);
136 sp<Foo> sp2(foo2);
137 wp<Foo> wp1(sp1);
138 wp<Foo> wp2(sp1);
139 wp<Foo> wp3(sp2);
140 ASSERT_TRUE(wp1 == wp2);
141 ASSERT_TRUE(wp1 == sp1);
142 ASSERT_TRUE(wp3 == sp2);
143 ASSERT_TRUE(wp1 != sp2);
144 ASSERT_TRUE(wp1 <= wp2);
145 ASSERT_TRUE(wp1 >= wp2);
146 ASSERT_FALSE(wp1 != wp2);
147 ASSERT_FALSE(wp1 > wp2);
148 ASSERT_FALSE(wp1 < wp2);
149 ASSERT_FALSE(sp1 == sp2);
150 ASSERT_TRUE(sp1 != sp2);
151 bool sp1_smaller = sp1 < sp2;
152 wp<Foo>wp_smaller = sp1_smaller ? wp1 : wp3;
153 wp<Foo>wp_larger = sp1_smaller ? wp3 : wp1;
154 ASSERT_TRUE(wp_smaller < wp_larger);
155 ASSERT_TRUE(wp_smaller != wp_larger);
156 ASSERT_TRUE(wp_smaller <= wp_larger);
157 ASSERT_FALSE(wp_smaller == wp_larger);
158 ASSERT_FALSE(wp_smaller > wp_larger);
159 ASSERT_FALSE(wp_smaller >= wp_larger);
160 sp2 = nullptr;
161 ASSERT_TRUE(isDeleted2);
162 ASSERT_FALSE(isDeleted);
163 ASSERT_FALSE(wp3 == sp2);
164 // Comparison results on weak pointers should not be affected.
165 ASSERT_TRUE(wp_smaller < wp_larger);
166 ASSERT_TRUE(wp_smaller != wp_larger);
167 ASSERT_TRUE(wp_smaller <= wp_larger);
168 ASSERT_FALSE(wp_smaller == wp_larger);
169 ASSERT_FALSE(wp_smaller > wp_larger);
170 ASSERT_FALSE(wp_smaller >= wp_larger);
171 wp2 = nullptr;
172 ASSERT_FALSE(wp1 == wp2);
173 ASSERT_TRUE(wp1 != wp2);
174 wp1.clear();
175 ASSERT_TRUE(wp1 == wp2);
176 ASSERT_FALSE(wp1 != wp2);
177 wp3.clear();
178 ASSERT_TRUE(wp1 == wp3);
179 ASSERT_FALSE(wp1 != wp3);
180 ASSERT_FALSE(isDeleted);
181 sp1.clear();
182 ASSERT_TRUE(isDeleted);
183 ASSERT_TRUE(sp1 == sp2);
184 // Try to check that null pointers are properly initialized.
185 {
186 // Try once with non-null, to maximize chances of getting junk on the
187 // stack.
188 sp<Foo> sp3(new Foo(&isDeleted3));
189 wp<Foo> wp4(sp3);
190 wp<Foo> wp5;
191 ASSERT_FALSE(wp4 == wp5);
192 ASSERT_TRUE(wp4 != wp5);
193 ASSERT_FALSE(sp3 == wp5);
194 ASSERT_FALSE(wp5 == sp3);
195 ASSERT_TRUE(sp3 != wp5);
196 ASSERT_TRUE(wp5 != sp3);
197 ASSERT_TRUE(sp3 == wp4);
198 }
199 {
200 sp<Foo> sp3;
201 wp<Foo> wp4(sp3);
202 wp<Foo> wp5;
203 ASSERT_TRUE(wp4 == wp5);
204 ASSERT_FALSE(wp4 != wp5);
205 ASSERT_TRUE(sp3 == wp5);
206 ASSERT_TRUE(wp5 == sp3);
207 ASSERT_FALSE(sp3 != wp5);
208 ASSERT_FALSE(wp5 != sp3);
209 ASSERT_TRUE(sp3 == wp4);
210 }
211 }
212
213 // Check whether comparison against dead wp works, even if the object referenced
214 // by the new wp happens to be at the same address.
TEST(RefBase,ReplacedComparison)215 TEST(RefBase, ReplacedComparison) {
216 bool isDeleted, isDeleted2;
217 FooFixedAlloc* foo = new FooFixedAlloc(&isDeleted);
218 sp<FooFixedAlloc> sp1(foo);
219 wp<FooFixedAlloc> wp1(sp1);
220 ASSERT_TRUE(wp1 == sp1);
221 sp1.clear(); // Deallocates the object.
222 ASSERT_TRUE(isDeleted);
223 FooFixedAlloc* foo2 = new FooFixedAlloc(&isDeleted2);
224 ASSERT_FALSE(isDeleted2);
225 ASSERT_EQ(foo, foo2); // Not technically a legal comparison, but ...
226 sp<FooFixedAlloc> sp2(foo2);
227 wp<FooFixedAlloc> wp2(sp2);
228 ASSERT_TRUE(sp2 == wp2);
229 ASSERT_FALSE(sp2 != wp2);
230 ASSERT_TRUE(sp2 != wp1);
231 ASSERT_FALSE(sp2 == wp1);
232 ASSERT_FALSE(sp2 == sp1); // sp1 is null.
233 ASSERT_FALSE(wp1 == wp2); // wp1 refers to old object.
234 ASSERT_TRUE(wp1 != wp2);
235 ASSERT_TRUE(wp1 > wp2 || wp1 < wp2);
236 ASSERT_TRUE(wp1 >= wp2 || wp1 <= wp2);
237 ASSERT_FALSE(wp1 >= wp2 && wp1 <= wp2);
238 ASSERT_FALSE(wp1 == nullptr);
239 wp1 = sp2;
240 ASSERT_TRUE(wp1 == wp2);
241 ASSERT_FALSE(wp1 != wp2);
242 }
243
TEST(RefBase,AssertWeakRefExistsSuccess)244 TEST(RefBase, AssertWeakRefExistsSuccess) {
245 bool isDeleted;
246 sp<Foo> foo = sp<Foo>::make(&isDeleted);
247 wp<Foo> weakFoo = foo;
248
249 EXPECT_EQ(weakFoo, wp<Foo>::fromExisting(foo.get()));
250 EXPECT_EQ(weakFoo.unsafe_get(), wp<Foo>::fromExisting(foo.get()).unsafe_get());
251
252 EXPECT_FALSE(isDeleted);
253 foo = nullptr;
254 EXPECT_TRUE(isDeleted);
255 }
256
TEST(RefBase,AssertWeakRefExistsDeath)257 TEST(RefBase, AssertWeakRefExistsDeath) {
258 // uses some other refcounting method, or none at all
259 bool isDeleted;
260 Foo* foo = new Foo(&isDeleted);
261
262 // can only get a valid wp<> object when you construct it from an sp<>
263 EXPECT_DEATH(wp<Foo>::fromExisting(foo), "");
264
265 delete foo;
266 }
267
TEST(RefBase,DoubleOwnershipDeath)268 TEST(RefBase, DoubleOwnershipDeath) {
269 bool isDeleted;
270 auto foo = sp<Foo>::make(&isDeleted);
271
272 // if something else thinks it owns foo, should die
273 EXPECT_DEATH(delete foo.get(), "");
274
275 EXPECT_FALSE(isDeleted);
276 }
277
TEST(RefBase,StackOwnershipDeath)278 TEST(RefBase, StackOwnershipDeath) {
279 bool isDeleted;
280 EXPECT_DEATH({ Foo foo(&isDeleted); foo.incStrong(nullptr); }, "");
281 }
282
283 // Set up a situation in which we race with visit2AndRremove() to delete
284 // 2 strong references. Bar destructor checks that there are no early
285 // deletions and prior updates are visible to destructor.
286 class Bar : public RefBase {
287 public:
Bar(std::atomic<int> * delete_count)288 Bar(std::atomic<int>* delete_count) : mVisited1(false), mVisited2(false),
289 mDeleteCount(delete_count) {
290 }
291
~Bar()292 ~Bar() {
293 EXPECT_TRUE(mVisited1);
294 EXPECT_TRUE(mVisited2);
295 (*mDeleteCount)++;
296 }
297 bool mVisited1;
298 bool mVisited2;
299 private:
300 std::atomic<int>* mDeleteCount;
301 };
302
303 static sp<Bar> buffer;
304 static std::atomic<bool> bufferFull(false);
305
306 // Wait until bufferFull has value val.
waitFor(bool val)307 static inline void waitFor(bool val) {
308 while (bufferFull != val) {}
309 }
310
311 cpu_set_t otherCpus;
312
313 // Divide the cpus we're allowed to run on into myCpus and otherCpus.
314 // Set origCpus to the processors we were originally allowed to run on.
315 // Return false if origCpus doesn't include at least processors 0 and 1.
setExclusiveCpus(cpu_set_t * origCpus,cpu_set_t * myCpus,cpu_set_t * otherCpus)316 static bool setExclusiveCpus(cpu_set_t* origCpus /* out */,
317 cpu_set_t* myCpus /* out */, cpu_set_t* otherCpus) {
318 if (sched_getaffinity(0, sizeof(cpu_set_t), origCpus) != 0) {
319 return false;
320 }
321 if (!CPU_ISSET(0, origCpus) || !CPU_ISSET(1, origCpus)) {
322 return false;
323 }
324 CPU_ZERO(myCpus);
325 CPU_ZERO(otherCpus);
326 CPU_OR(myCpus, myCpus, origCpus);
327 CPU_OR(otherCpus, otherCpus, origCpus);
328 for (unsigned i = 0; i < CPU_SETSIZE; ++i) {
329 // I get the even cores, the other thread gets the odd ones.
330 if (i & 1) {
331 CPU_CLR(i, myCpus);
332 } else {
333 CPU_CLR(i, otherCpus);
334 }
335 }
336 return true;
337 }
338
visit2AndRemove()339 static void visit2AndRemove() {
340 if (sched_setaffinity(0, sizeof(cpu_set_t), &otherCpus) != 0) {
341 FAIL() << "setaffinity returned:" << errno;
342 }
343 for (int i = 0; i < NITERS; ++i) {
344 waitFor(true);
345 buffer->mVisited2 = true;
346 buffer = nullptr;
347 bufferFull = false;
348 }
349 }
350
TEST(RefBase,RacingDestructors)351 TEST(RefBase, RacingDestructors) {
352 cpu_set_t origCpus;
353 cpu_set_t myCpus;
354 // Restrict us and the helper thread to disjoint cpu sets.
355 // This prevents us from getting scheduled against each other,
356 // which would be atrociously slow.
357 if (setExclusiveCpus(&origCpus, &myCpus, &otherCpus)) {
358 std::thread t(visit2AndRemove);
359 std::atomic<int> deleteCount(0);
360 if (sched_setaffinity(0, sizeof(cpu_set_t), &myCpus) != 0) {
361 FAIL() << "setaffinity returned:" << errno;
362 }
363 for (int i = 0; i < NITERS; ++i) {
364 waitFor(false);
365 Bar* bar = new Bar(&deleteCount);
366 sp<Bar> sp3(bar);
367 buffer = sp3;
368 bufferFull = true;
369 ASSERT_TRUE(bar->getStrongCount() >= 1);
370 // Weak count includes strong count.
371 ASSERT_TRUE(bar->getWeakRefs()->getWeakCount() >= 1);
372 sp3->mVisited1 = true;
373 sp3 = nullptr;
374 }
375 t.join();
376 if (sched_setaffinity(0, sizeof(cpu_set_t), &origCpus) != 0) {
377 FAIL();
378 }
379 ASSERT_EQ(NITERS, deleteCount) << "Deletions missed!";
380 } // Otherwise this is slow and probably pointless on a uniprocessor.
381 }
382
383 static wp<Bar> wpBuffer;
384 static std::atomic<bool> wpBufferFull(false);
385
386 // Wait until wpBufferFull has value val.
wpWaitFor(bool val)387 static inline void wpWaitFor(bool val) {
388 while (wpBufferFull != val) {}
389 }
390
visit3AndRemove()391 static void visit3AndRemove() {
392 if (sched_setaffinity(0, sizeof(cpu_set_t), &otherCpus) != 0) {
393 FAIL() << "setaffinity returned:" << errno;
394 }
395 for (int i = 0; i < NITERS; ++i) {
396 wpWaitFor(true);
397 {
398 sp<Bar> sp1 = wpBuffer.promote();
399 // We implicitly check that sp1 != NULL
400 sp1->mVisited2 = true;
401 }
402 wpBuffer = nullptr;
403 wpBufferFull = false;
404 }
405 }
406
TEST(RefBase,RacingPromotions)407 TEST(RefBase, RacingPromotions) {
408 cpu_set_t origCpus;
409 cpu_set_t myCpus;
410 // Restrict us and the helper thread to disjoint cpu sets.
411 // This prevents us from getting scheduled against each other,
412 // which would be atrociously slow.
413 if (setExclusiveCpus(&origCpus, &myCpus, &otherCpus)) {
414 std::thread t(visit3AndRemove);
415 std::atomic<int> deleteCount(0);
416 if (sched_setaffinity(0, sizeof(cpu_set_t), &myCpus) != 0) {
417 FAIL() << "setaffinity returned:" << errno;
418 }
419 for (int i = 0; i < NITERS; ++i) {
420 Bar* bar = new Bar(&deleteCount);
421 wp<Bar> wp1(bar);
422 bar->mVisited1 = true;
423 if (i % (NITERS / 10) == 0) {
424 // Do this rarely, since it generates a log message.
425 wp1 = nullptr; // No longer destroys the object.
426 wp1 = bar;
427 }
428 wpBuffer = wp1;
429 ASSERT_EQ(bar->getWeakRefs()->getWeakCount(), 2);
430 wpBufferFull = true;
431 // Promotion races with that in visit3AndRemove.
432 // This may or may not succeed, but it shouldn't interfere with
433 // the concurrent one.
434 sp<Bar> sp1 = wp1.promote();
435 wpWaitFor(false); // Waits for other thread to drop strong pointer.
436 sp1 = nullptr;
437 // No strong pointers here.
438 sp1 = wp1.promote();
439 ASSERT_EQ(sp1.get(), nullptr) << "Dead wp promotion succeeded!";
440 }
441 t.join();
442 if (sched_setaffinity(0, sizeof(cpu_set_t), &origCpus) != 0) {
443 FAIL();
444 }
445 ASSERT_EQ(NITERS, deleteCount) << "Deletions missed!";
446 } // Otherwise this is slow and probably pointless on a uniprocessor.
447 }
448