• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <gtest/gtest.h>
18 
19 #include <utils/StrongPointer.h>
20 #include <utils/RefBase.h>
21 
22 #include <thread>
23 #include <atomic>
24 #include <sched.h>
25 #include <errno.h>
26 
27 // Enhanced version of StrongPointer_test, but using RefBase underneath.
28 
29 using namespace android;
30 
31 static constexpr int NITERS = 500000;
32 
33 static constexpr int INITIAL_STRONG_VALUE = 1 << 28;  // Mirroring RefBase definition.
34 
35 class Foo : public RefBase {
36 public:
Foo(bool * deleted_check)37     Foo(bool* deleted_check) : mDeleted(deleted_check) {
38         *mDeleted = false;
39     }
40 
~Foo()41     ~Foo() {
42         *mDeleted = true;
43     }
44 private:
45     bool* mDeleted;
46 };
47 
48 // A version of Foo that ensures that all objects are allocated at the same
49 // address. No more than one can be allocated at a time. Thread-hostile.
50 class FooFixedAlloc : public RefBase {
51 public:
operator new(size_t size)52     static void* operator new(size_t size) {
53         if (mAllocCount != 0) {
54             abort();
55         }
56         mAllocCount = 1;
57         if (theMemory == nullptr) {
58             theMemory = malloc(size);
59         }
60         return theMemory;
61     }
62 
operator delete(void * p)63     static void operator delete(void *p) {
64         if (mAllocCount != 1 || p != theMemory) {
65             abort();
66         }
67         mAllocCount = 0;
68     }
69 
FooFixedAlloc(bool * deleted_check)70     FooFixedAlloc(bool* deleted_check) : mDeleted(deleted_check) {
71         *mDeleted = false;
72     }
73 
~FooFixedAlloc()74     ~FooFixedAlloc() {
75         *mDeleted = true;
76     }
77 private:
78     bool* mDeleted;
79     static int mAllocCount;
80     static void* theMemory;
81 };
82 
83 int FooFixedAlloc::mAllocCount(0);
84 void* FooFixedAlloc::theMemory(nullptr);
85 
TEST(RefBase,StrongMoves)86 TEST(RefBase, StrongMoves) {
87     bool isDeleted;
88     Foo* foo = new Foo(&isDeleted);
89     ASSERT_EQ(INITIAL_STRONG_VALUE, foo->getStrongCount());
90     ASSERT_FALSE(isDeleted) << "Already deleted...?";
91     sp<Foo> sp1(foo);
92     wp<Foo> wp1(sp1);
93     ASSERT_EQ(1, foo->getStrongCount());
94     // Weak count includes both strong and weak references.
95     ASSERT_EQ(2, foo->getWeakRefs()->getWeakCount());
96     {
97         sp<Foo> sp2 = std::move(sp1);
98         ASSERT_EQ(1, foo->getStrongCount())
99                 << "std::move failed, incremented refcnt";
100         ASSERT_EQ(nullptr, sp1.get()) << "std::move failed, sp1 is still valid";
101         // The strong count isn't increasing, let's double check the old object
102         // is properly reset and doesn't early delete
103         sp1 = std::move(sp2);
104     }
105     ASSERT_FALSE(isDeleted) << "deleted too early! still has a reference!";
106     {
107         // Now let's double check it deletes on time
108         sp<Foo> sp2 = std::move(sp1);
109     }
110     ASSERT_TRUE(isDeleted) << "foo was leaked!";
111     ASSERT_TRUE(wp1.promote().get() == nullptr);
112 }
113 
TEST(RefBase,WeakCopies)114 TEST(RefBase, WeakCopies) {
115     bool isDeleted;
116     Foo* foo = new Foo(&isDeleted);
117     EXPECT_EQ(0, foo->getWeakRefs()->getWeakCount());
118     ASSERT_FALSE(isDeleted) << "Foo (weak) already deleted...?";
119     wp<Foo> wp1(foo);
120     EXPECT_EQ(1, foo->getWeakRefs()->getWeakCount());
121     {
122         wp<Foo> wp2 = wp1;
123         ASSERT_EQ(2, foo->getWeakRefs()->getWeakCount());
124     }
125     EXPECT_EQ(1, foo->getWeakRefs()->getWeakCount());
126     ASSERT_FALSE(isDeleted) << "deleted too early! still has a reference!";
127     wp1 = nullptr;
128     ASSERT_FALSE(isDeleted) << "Deletion on wp destruction should no longer occur";
129 }
130 
TEST(RefBase,Comparisons)131 TEST(RefBase, Comparisons) {
132     bool isDeleted, isDeleted2, isDeleted3;
133     Foo* foo = new Foo(&isDeleted);
134     Foo* foo2 = new Foo(&isDeleted2);
135     sp<Foo> sp1(foo);
136     sp<Foo> sp2(foo2);
137     wp<Foo> wp1(sp1);
138     wp<Foo> wp2(sp1);
139     wp<Foo> wp3(sp2);
140     ASSERT_TRUE(wp1 == wp2);
141     ASSERT_TRUE(wp1 == sp1);
142     ASSERT_TRUE(wp3 == sp2);
143     ASSERT_TRUE(wp1 != sp2);
144     ASSERT_TRUE(wp1 <= wp2);
145     ASSERT_TRUE(wp1 >= wp2);
146     ASSERT_FALSE(wp1 != wp2);
147     ASSERT_FALSE(wp1 > wp2);
148     ASSERT_FALSE(wp1 < wp2);
149     ASSERT_FALSE(sp1 == sp2);
150     ASSERT_TRUE(sp1 != sp2);
151     bool sp1_smaller = sp1 < sp2;
152     wp<Foo>wp_smaller = sp1_smaller ? wp1 : wp3;
153     wp<Foo>wp_larger = sp1_smaller ? wp3 : wp1;
154     ASSERT_TRUE(wp_smaller < wp_larger);
155     ASSERT_TRUE(wp_smaller != wp_larger);
156     ASSERT_TRUE(wp_smaller <= wp_larger);
157     ASSERT_FALSE(wp_smaller == wp_larger);
158     ASSERT_FALSE(wp_smaller > wp_larger);
159     ASSERT_FALSE(wp_smaller >= wp_larger);
160     sp2 = nullptr;
161     ASSERT_TRUE(isDeleted2);
162     ASSERT_FALSE(isDeleted);
163     ASSERT_FALSE(wp3 == sp2);
164     // Comparison results on weak pointers should not be affected.
165     ASSERT_TRUE(wp_smaller < wp_larger);
166     ASSERT_TRUE(wp_smaller != wp_larger);
167     ASSERT_TRUE(wp_smaller <= wp_larger);
168     ASSERT_FALSE(wp_smaller == wp_larger);
169     ASSERT_FALSE(wp_smaller > wp_larger);
170     ASSERT_FALSE(wp_smaller >= wp_larger);
171     wp2 = nullptr;
172     ASSERT_FALSE(wp1 == wp2);
173     ASSERT_TRUE(wp1 != wp2);
174     wp1.clear();
175     ASSERT_TRUE(wp1 == wp2);
176     ASSERT_FALSE(wp1 != wp2);
177     wp3.clear();
178     ASSERT_TRUE(wp1 == wp3);
179     ASSERT_FALSE(wp1 != wp3);
180     ASSERT_FALSE(isDeleted);
181     sp1.clear();
182     ASSERT_TRUE(isDeleted);
183     ASSERT_TRUE(sp1 == sp2);
184     // Try to check that null pointers are properly initialized.
185     {
186         // Try once with non-null, to maximize chances of getting junk on the
187         // stack.
188         sp<Foo> sp3(new Foo(&isDeleted3));
189         wp<Foo> wp4(sp3);
190         wp<Foo> wp5;
191         ASSERT_FALSE(wp4 == wp5);
192         ASSERT_TRUE(wp4 != wp5);
193         ASSERT_FALSE(sp3 == wp5);
194         ASSERT_FALSE(wp5 == sp3);
195         ASSERT_TRUE(sp3 != wp5);
196         ASSERT_TRUE(wp5 != sp3);
197         ASSERT_TRUE(sp3 == wp4);
198     }
199     {
200         sp<Foo> sp3;
201         wp<Foo> wp4(sp3);
202         wp<Foo> wp5;
203         ASSERT_TRUE(wp4 == wp5);
204         ASSERT_FALSE(wp4 != wp5);
205         ASSERT_TRUE(sp3 == wp5);
206         ASSERT_TRUE(wp5 == sp3);
207         ASSERT_FALSE(sp3 != wp5);
208         ASSERT_FALSE(wp5 != sp3);
209         ASSERT_TRUE(sp3 == wp4);
210     }
211 }
212 
213 // Check whether comparison against dead wp works, even if the object referenced
214 // by the new wp happens to be at the same address.
TEST(RefBase,ReplacedComparison)215 TEST(RefBase, ReplacedComparison) {
216     bool isDeleted, isDeleted2;
217     FooFixedAlloc* foo = new FooFixedAlloc(&isDeleted);
218     sp<FooFixedAlloc> sp1(foo);
219     wp<FooFixedAlloc> wp1(sp1);
220     ASSERT_TRUE(wp1 == sp1);
221     sp1.clear();  // Deallocates the object.
222     ASSERT_TRUE(isDeleted);
223     FooFixedAlloc* foo2 = new FooFixedAlloc(&isDeleted2);
224     ASSERT_FALSE(isDeleted2);
225     ASSERT_EQ(foo, foo2);  // Not technically a legal comparison, but ...
226     sp<FooFixedAlloc> sp2(foo2);
227     wp<FooFixedAlloc> wp2(sp2);
228     ASSERT_TRUE(sp2 == wp2);
229     ASSERT_FALSE(sp2 != wp2);
230     ASSERT_TRUE(sp2 != wp1);
231     ASSERT_FALSE(sp2 == wp1);
232     ASSERT_FALSE(sp2 == sp1);  // sp1 is null.
233     ASSERT_FALSE(wp1 == wp2);  // wp1 refers to old object.
234     ASSERT_TRUE(wp1 != wp2);
235     ASSERT_TRUE(wp1 > wp2 || wp1 < wp2);
236     ASSERT_TRUE(wp1 >= wp2 || wp1 <= wp2);
237     ASSERT_FALSE(wp1 >= wp2 && wp1 <= wp2);
238     ASSERT_FALSE(wp1 == nullptr);
239     wp1 = sp2;
240     ASSERT_TRUE(wp1 == wp2);
241     ASSERT_FALSE(wp1 != wp2);
242 }
243 
TEST(RefBase,AssertWeakRefExistsSuccess)244 TEST(RefBase, AssertWeakRefExistsSuccess) {
245     bool isDeleted;
246     sp<Foo> foo = sp<Foo>::make(&isDeleted);
247     wp<Foo> weakFoo = foo;
248 
249     EXPECT_EQ(weakFoo, wp<Foo>::fromExisting(foo.get()));
250     EXPECT_EQ(weakFoo.unsafe_get(), wp<Foo>::fromExisting(foo.get()).unsafe_get());
251 
252     EXPECT_FALSE(isDeleted);
253     foo = nullptr;
254     EXPECT_TRUE(isDeleted);
255 }
256 
TEST(RefBase,AssertWeakRefExistsDeath)257 TEST(RefBase, AssertWeakRefExistsDeath) {
258     // uses some other refcounting method, or none at all
259     bool isDeleted;
260     Foo* foo = new Foo(&isDeleted);
261 
262     // can only get a valid wp<> object when you construct it from an sp<>
263     EXPECT_DEATH(wp<Foo>::fromExisting(foo), "");
264 
265     delete foo;
266 }
267 
TEST(RefBase,NoStrongCountPromoteFromWeak)268 TEST(RefBase, NoStrongCountPromoteFromWeak) {
269     bool isDeleted;
270     Foo* foo = new Foo(&isDeleted);
271 
272     wp<Foo> weakFoo = wp<Foo>(foo);
273 
274     EXPECT_FALSE(isDeleted);
275 
276     {
277         sp<Foo> strongFoo = weakFoo.promote();
278         EXPECT_EQ(strongFoo, foo);
279     }
280 
281     // this shows the justification of wp<>::fromExisting.
282     // if you construct a wp<>, for instance in a constructor, and it is
283     // accidentally promoted, that promoted sp<> will exclusively own
284     // the object. If that happens during the initialization of the
285     // object or in this scope, as you can see 'Foo* foo' is unowned,
286     // then we are left with a deleted object, and we could not put it
287     // into an sp<>.
288     //
289     // Consider the other implementation, where we disallow promoting
290     // a wp<> if there are no strong counts. If we return null, then
291     // the object would be unpromotable even though it hasn't been deleted.
292     // This is also errorprone.
293     //
294     // attemptIncStrong aborting in this case is a backwards incompatible
295     // change due to frequent use of wp<T>(this) in the constructor.
296     EXPECT_TRUE(isDeleted);
297 }
298 
TEST(RefBase,DoubleOwnershipDeath)299 TEST(RefBase, DoubleOwnershipDeath) {
300     bool isDeleted;
301     auto foo = sp<Foo>::make(&isDeleted);
302 
303     // if something else thinks it owns foo, should die
304     EXPECT_DEATH(delete foo.get(), "");
305 
306     EXPECT_FALSE(isDeleted);
307 }
308 
TEST(RefBase,StackOwnershipDeath)309 TEST(RefBase, StackOwnershipDeath) {
310     bool isDeleted;
311     EXPECT_DEATH({ Foo foo(&isDeleted); foo.incStrong(nullptr); }, "");
312 }
313 
314 // Set up a situation in which we race with visit2AndRremove() to delete
315 // 2 strong references.  Bar destructor checks that there are no early
316 // deletions and prior updates are visible to destructor.
317 class Bar : public RefBase {
318 public:
Bar(std::atomic<int> * delete_count)319     Bar(std::atomic<int>* delete_count) : mVisited1(false), mVisited2(false),
320             mDeleteCount(delete_count) {
321     }
322 
~Bar()323     ~Bar() {
324         EXPECT_TRUE(mVisited1);
325         EXPECT_TRUE(mVisited2);
326         (*mDeleteCount)++;
327     }
328     bool mVisited1;
329     bool mVisited2;
330 private:
331     std::atomic<int>* mDeleteCount;
332 };
333 
334 [[clang::no_destroy]] static constinit sp<Bar> buffer;
335 static constinit std::atomic<bool> bufferFull(false);
336 
337 // Wait until bufferFull has value val.
waitFor(bool val)338 static inline void waitFor(bool val) {
339     while (bufferFull != val) {}
340 }
341 
342 cpu_set_t otherCpus;
343 
344 // Divide the cpus we're allowed to run on into myCpus and otherCpus.
345 // Set origCpus to the processors we were originally allowed to run on.
346 // Return false if origCpus doesn't include at least processors 0 and 1.
setExclusiveCpus(cpu_set_t * origCpus,cpu_set_t * myCpus,cpu_set_t * otherCpus)347 static bool setExclusiveCpus(cpu_set_t* origCpus /* out */,
348         cpu_set_t* myCpus /* out */, cpu_set_t* otherCpus) {
349     if (sched_getaffinity(0, sizeof(cpu_set_t), origCpus) != 0) {
350         return false;
351     }
352     if (!CPU_ISSET(0,  origCpus) || !CPU_ISSET(1, origCpus)) {
353         return false;
354     }
355     CPU_ZERO(myCpus);
356     CPU_ZERO(otherCpus);
357     CPU_OR(myCpus, myCpus, origCpus);
358     CPU_OR(otherCpus, otherCpus, origCpus);
359     for (unsigned i = 0; i < CPU_SETSIZE; ++i) {
360         // I get the even cores, the other thread gets the odd ones.
361         if (i & 1) {
362             CPU_CLR(i, myCpus);
363         } else {
364             CPU_CLR(i, otherCpus);
365         }
366     }
367     return true;
368 }
369 
visit2AndRemove()370 static void visit2AndRemove() {
371     if (sched_setaffinity(0, sizeof(cpu_set_t), &otherCpus) != 0) {
372         FAIL() << "setaffinity returned:" << errno;
373     }
374     for (int i = 0; i < NITERS; ++i) {
375         waitFor(true);
376         buffer->mVisited2 = true;
377         buffer = nullptr;
378         bufferFull = false;
379     }
380 }
381 
TEST(RefBase,RacingDestructors)382 TEST(RefBase, RacingDestructors) {
383     cpu_set_t origCpus;
384     cpu_set_t myCpus;
385     // Restrict us and the helper thread to disjoint cpu sets.
386     // This prevents us from getting scheduled against each other,
387     // which would be atrociously slow.
388     if (setExclusiveCpus(&origCpus, &myCpus, &otherCpus)) {
389         std::thread t(visit2AndRemove);
390         std::atomic<int> deleteCount(0);
391         if (sched_setaffinity(0, sizeof(cpu_set_t), &myCpus) != 0) {
392             FAIL() << "setaffinity returned:" << errno;
393         }
394         for (int i = 0; i < NITERS; ++i) {
395             waitFor(false);
396             Bar* bar = new Bar(&deleteCount);
397             sp<Bar> sp3(bar);
398             buffer = sp3;
399             bufferFull = true;
400             ASSERT_TRUE(bar->getStrongCount() >= 1);
401             // Weak count includes strong count.
402             ASSERT_TRUE(bar->getWeakRefs()->getWeakCount() >= 1);
403             sp3->mVisited1 = true;
404             sp3 = nullptr;
405         }
406         t.join();
407         if (sched_setaffinity(0, sizeof(cpu_set_t), &origCpus) != 0) {
408             FAIL();
409         }
410         ASSERT_EQ(NITERS, deleteCount) << "Deletions missed!";
411     }  // Otherwise this is slow and probably pointless on a uniprocessor.
412 }
413 
414 [[clang::no_destroy]] static constinit wp<Bar> wpBuffer;
415 static constinit std::atomic<bool> wpBufferFull(false);
416 
417 // Wait until wpBufferFull has value val.
wpWaitFor(bool val)418 static inline void wpWaitFor(bool val) {
419     while (wpBufferFull != val) {}
420 }
421 
visit3AndRemove()422 static void visit3AndRemove() {
423     if (sched_setaffinity(0, sizeof(cpu_set_t), &otherCpus) != 0) {
424         FAIL() << "setaffinity returned:" << errno;
425     }
426     for (int i = 0; i < NITERS; ++i) {
427         wpWaitFor(true);
428         {
429             sp<Bar> sp1 = wpBuffer.promote();
430             // We implicitly check that sp1 != NULL
431             sp1->mVisited2 = true;
432         }
433         wpBuffer = nullptr;
434         wpBufferFull = false;
435     }
436 }
437 
TEST(RefBase,RacingPromotions)438 TEST(RefBase, RacingPromotions) {
439     cpu_set_t origCpus;
440     cpu_set_t myCpus;
441     // Restrict us and the helper thread to disjoint cpu sets.
442     // This prevents us from getting scheduled against each other,
443     // which would be atrociously slow.
444     if (setExclusiveCpus(&origCpus, &myCpus, &otherCpus)) {
445         std::thread t(visit3AndRemove);
446         std::atomic<int> deleteCount(0);
447         if (sched_setaffinity(0, sizeof(cpu_set_t), &myCpus) != 0) {
448             FAIL() << "setaffinity returned:" << errno;
449         }
450         for (int i = 0; i < NITERS; ++i) {
451             Bar* bar = new Bar(&deleteCount);
452             wp<Bar> wp1(bar);
453             bar->mVisited1 = true;
454             if (i % (NITERS / 10) == 0) {
455                 // Do this rarely, since it generates a log message.
456                 wp1 = nullptr;  // No longer destroys the object.
457                 wp1 = bar;
458             }
459             wpBuffer = wp1;
460             ASSERT_EQ(bar->getWeakRefs()->getWeakCount(), 2);
461             wpBufferFull = true;
462             // Promotion races with that in visit3AndRemove.
463             // This may or may not succeed, but it shouldn't interfere with
464             // the concurrent one.
465             sp<Bar> sp1 = wp1.promote();
466             wpWaitFor(false);  // Waits for other thread to drop strong pointer.
467             sp1 = nullptr;
468             // No strong pointers here.
469             sp1 = wp1.promote();
470             ASSERT_EQ(sp1.get(), nullptr) << "Dead wp promotion succeeded!";
471         }
472         t.join();
473         if (sched_setaffinity(0, sizeof(cpu_set_t), &origCpus) != 0) {
474             FAIL();
475         }
476         ASSERT_EQ(NITERS, deleteCount) << "Deletions missed!";
477     }  // Otherwise this is slow and probably pointless on a uniprocessor.
478 }
479