• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2005 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "RefBase"
18 // #define LOG_NDEBUG 0
19 
20 #include <memory>
21 
22 #include <android-base/macros.h>
23 
24 #include <log/log.h>
25 
26 #include <utils/RefBase.h>
27 
28 #include <utils/Mutex.h>
29 
30 #ifndef __unused
31 #define __unused __attribute__((__unused__))
32 #endif
33 
34 // Compile with refcounting debugging enabled.
35 #define DEBUG_REFS 0
36 
37 // The following three are ignored unless DEBUG_REFS is set.
38 
39 // whether ref-tracking is enabled by default, if not, trackMe(true, false)
40 // needs to be called explicitly
41 #define DEBUG_REFS_ENABLED_BY_DEFAULT 0
42 
43 // whether callstack are collected (significantly slows things down)
44 #define DEBUG_REFS_CALLSTACK_ENABLED 1
45 
46 // folder where stack traces are saved when DEBUG_REFS is enabled
47 // this folder needs to exist and be writable
48 #define DEBUG_REFS_CALLSTACK_PATH "/data/debug"
49 
50 // log all reference counting operations
51 #define PRINT_REFS 0
52 
53 #if defined(__linux__)
54 // CallStack is only supported on linux type platforms.
55 #define CALLSTACK_ENABLED 1
56 #else
57 #define CALLSTACK_ENABLED 0
58 #endif
59 
60 #if CALLSTACK_ENABLED
61 #include <utils/CallStack.h>
62 #endif
63 
64 // ---------------------------------------------------------------------------
65 
66 namespace android {
67 
68 // Observations, invariants, etc:
69 
70 // By default, obects are destroyed when the last strong reference disappears
71 // or, if the object never had a strong reference, when the last weak reference
72 // disappears.
73 //
74 // OBJECT_LIFETIME_WEAK changes this behavior to retain the object
75 // unconditionally until the last reference of either kind disappears.  The
76 // client ensures that the extendObjectLifetime call happens before the dec
77 // call that would otherwise have deallocated the object, or before an
78 // attemptIncStrong call that might rely on it.  We do not worry about
79 // concurrent changes to the object lifetime.
80 //
81 // AttemptIncStrong will succeed if the object has a strong reference, or if it
82 // has a weak reference and has never had a strong reference.
83 // AttemptIncWeak really does succeed only if there is already a WEAK
84 // reference, and thus may fail when attemptIncStrong would succeed.
85 //
86 // mStrong is the strong reference count.  mWeak is the weak reference count.
87 // Between calls, and ignoring memory ordering effects, mWeak includes strong
88 // references, and is thus >= mStrong.
89 //
90 // A weakref_impl holds all the information, including both reference counts,
91 // required to perform wp<> operations.  Thus these can continue to be performed
92 // after the RefBase object has been destroyed.
93 //
94 // A weakref_impl is allocated as the value of mRefs in a RefBase object on
95 // construction.
96 // In the OBJECT_LIFETIME_STRONG case, it is normally deallocated in decWeak,
97 // and hence lives as long as the last weak reference. (It can also be
98 // deallocated in the RefBase destructor iff the strong reference count was
99 // never incremented and the weak count is zero, e.g.  if the RefBase object is
100 // explicitly destroyed without decrementing the strong count.  This should be
101 // avoided.) In this case, the RefBase destructor should be invoked from
102 // decStrong.
103 // In the OBJECT_LIFETIME_WEAK case, the weakref_impl is always deallocated in
104 // the RefBase destructor, which is always invoked by decWeak. DecStrong
105 // explicitly avoids the deletion in this case.
106 //
107 // Memory ordering:
108 // The client must ensure that every inc() call, together with all other
109 // accesses to the object, happens before the corresponding dec() call.
110 //
111 // We try to keep memory ordering constraints on atomics as weak as possible,
112 // since memory fences or ordered memory accesses are likely to be a major
113 // performance cost for this code. All accesses to mStrong, mWeak, and mFlags
114 // explicitly relax memory ordering in some way.
115 //
116 // The only operations that are not memory_order_relaxed are reference count
117 // decrements. All reference count decrements are release operations.  In
118 // addition, the final decrement leading the deallocation is followed by an
119 // acquire fence, which we can view informally as also turning it into an
120 // acquire operation.  (See 29.8p4 [atomics.fences] for details. We could
121 // alternatively use acq_rel operations for all decrements. This is probably
122 // slower on most current (2016) hardware, especially on ARMv7, but that may
123 // not be true indefinitely.)
124 //
125 // This convention ensures that the second-to-last decrement synchronizes with
126 // (in the language of 1.10 in the C++ standard) the final decrement of a
127 // reference count. Since reference counts are only updated using atomic
128 // read-modify-write operations, this also extends to any earlier decrements.
129 // (See "release sequence" in 1.10.)
130 //
131 // Since all operations on an object happen before the corresponding reference
132 // count decrement, and all reference count decrements happen before the final
133 // one, we are guaranteed that all other object accesses happen before the
134 // object is destroyed.
135 
136 
137 #define INITIAL_STRONG_VALUE (1<<28)
138 
139 #define MAX_COUNT 0xfffff
140 
141 // Test whether the argument is a clearly invalid strong reference count.
142 // Used only for error checking on the value before an atomic decrement.
143 // Intended to be very cheap.
144 // Note that we cannot just check for excess decrements by comparing to zero
145 // since the object would be deallocated before that.
146 #define BAD_STRONG(c) \
147         ((c) == 0 || ((c) & (~(MAX_COUNT | INITIAL_STRONG_VALUE))) != 0)
148 
149 // Same for weak counts.
150 #define BAD_WEAK(c) ((c) == 0 || ((c) & (~MAX_COUNT)) != 0)
151 
152 // ---------------------------------------------------------------------------
153 
154 class RefBase::weakref_impl : public RefBase::weakref_type
155 {
156 public:
157     std::atomic<int32_t>    mStrong;
158     std::atomic<int32_t>    mWeak;
159     RefBase* const          mBase;
160     std::atomic<int32_t>    mFlags;
161 
162 #if !DEBUG_REFS
163 
weakref_impl(RefBase * base)164     explicit weakref_impl(RefBase* base)
165         : mStrong(INITIAL_STRONG_VALUE)
166         , mWeak(0)
167         , mBase(base)
168         , mFlags(OBJECT_LIFETIME_STRONG)
169     {
170     }
171 
addStrongRef(const void *)172     void addStrongRef(const void* /*id*/) { }
removeStrongRef(const void *)173     void removeStrongRef(const void* /*id*/) { }
renameStrongRefId(const void *,const void *)174     void renameStrongRefId(const void* /*old_id*/, const void* /*new_id*/) { }
addWeakRef(const void *)175     void addWeakRef(const void* /*id*/) { }
removeWeakRef(const void *)176     void removeWeakRef(const void* /*id*/) { }
renameWeakRefId(const void *,const void *)177     void renameWeakRefId(const void* /*old_id*/, const void* /*new_id*/) { }
printRefs() const178     void printRefs() const { }
trackMe(bool,bool)179     void trackMe(bool, bool) { }
180 
181 #else
182 
weakref_impl(RefBase * base)183     weakref_impl(RefBase* base)
184         : mStrong(INITIAL_STRONG_VALUE)
185         , mWeak(0)
186         , mBase(base)
187         , mFlags(OBJECT_LIFETIME_STRONG)
188         , mStrongRefs(NULL)
189         , mWeakRefs(NULL)
190         , mTrackEnabled(!!DEBUG_REFS_ENABLED_BY_DEFAULT)
191         , mRetain(false)
192     {
193     }
194 
~weakref_impl()195     ~weakref_impl()
196     {
197         bool dumpStack = false;
198         if (!mRetain && mStrongRefs != NULL) {
199             dumpStack = true;
200             ALOGE("Strong references remain:");
201             ref_entry* refs = mStrongRefs;
202             while (refs) {
203                 char inc = refs->ref >= 0 ? '+' : '-';
204                 ALOGD("\t%c ID %p (ref %d):", inc, refs->id, refs->ref);
205 #if DEBUG_REFS_CALLSTACK_ENABLED && CALLSTACK_ENABLED
206                 CallStack::logStack(LOG_TAG, refs->stack.get());
207 #endif
208                 refs = refs->next;
209             }
210         }
211 
212         if (!mRetain && mWeakRefs != NULL) {
213             dumpStack = true;
214             ALOGE("Weak references remain!");
215             ref_entry* refs = mWeakRefs;
216             while (refs) {
217                 char inc = refs->ref >= 0 ? '+' : '-';
218                 ALOGD("\t%c ID %p (ref %d):", inc, refs->id, refs->ref);
219 #if DEBUG_REFS_CALLSTACK_ENABLED && CALLSTACK_ENABLED
220                 CallStack::logStack(LOG_TAG, refs->stack.get());
221 #endif
222                 refs = refs->next;
223             }
224         }
225         if (dumpStack) {
226             ALOGE("above errors at:");
227 #if CALLSTACK_ENABLED
228             CallStack::logStack(LOG_TAG);
229 #endif
230         }
231     }
232 
addStrongRef(const void * id)233     void addStrongRef(const void* id) {
234         //ALOGD_IF(mTrackEnabled,
235         //        "addStrongRef: RefBase=%p, id=%p", mBase, id);
236         addRef(&mStrongRefs, id, mStrong.load(std::memory_order_relaxed));
237     }
238 
removeStrongRef(const void * id)239     void removeStrongRef(const void* id) {
240         //ALOGD_IF(mTrackEnabled,
241         //        "removeStrongRef: RefBase=%p, id=%p", mBase, id);
242         if (!mRetain) {
243             removeRef(&mStrongRefs, id);
244         } else {
245             addRef(&mStrongRefs, id, -mStrong.load(std::memory_order_relaxed));
246         }
247     }
248 
renameStrongRefId(const void * old_id,const void * new_id)249     void renameStrongRefId(const void* old_id, const void* new_id) {
250         //ALOGD_IF(mTrackEnabled,
251         //        "renameStrongRefId: RefBase=%p, oid=%p, nid=%p",
252         //        mBase, old_id, new_id);
253         renameRefsId(mStrongRefs, old_id, new_id);
254     }
255 
addWeakRef(const void * id)256     void addWeakRef(const void* id) {
257         addRef(&mWeakRefs, id, mWeak.load(std::memory_order_relaxed));
258     }
259 
removeWeakRef(const void * id)260     void removeWeakRef(const void* id) {
261         if (!mRetain) {
262             removeRef(&mWeakRefs, id);
263         } else {
264             addRef(&mWeakRefs, id, -mWeak.load(std::memory_order_relaxed));
265         }
266     }
267 
renameWeakRefId(const void * old_id,const void * new_id)268     void renameWeakRefId(const void* old_id, const void* new_id) {
269         renameRefsId(mWeakRefs, old_id, new_id);
270     }
271 
trackMe(bool track,bool retain)272     void trackMe(bool track, bool retain) {
273         mTrackEnabled = track;
274         mRetain = retain;
275     }
276 
printRefs() const277     void printRefs() const
278     {
279         String8 text;
280 
281         {
282             Mutex::Autolock _l(mMutex);
283             char buf[128];
284             snprintf(buf, sizeof(buf),
285                      "Strong references on RefBase %p (weakref_type %p):\n",
286                      mBase, this);
287             text.append(buf);
288             printRefsLocked(&text, mStrongRefs);
289             snprintf(buf, sizeof(buf),
290                      "Weak references on RefBase %p (weakref_type %p):\n",
291                      mBase, this);
292             text.append(buf);
293             printRefsLocked(&text, mWeakRefs);
294         }
295 
296         {
297             char name[100];
298             snprintf(name, sizeof(name), DEBUG_REFS_CALLSTACK_PATH "/%p.stack",
299                      this);
300             int rc = open(name, O_RDWR | O_CREAT | O_APPEND, 644);
301             if (rc >= 0) {
302                 (void)write(rc, text.string(), text.length());
303                 close(rc);
304                 ALOGD("STACK TRACE for %p saved in %s", this, name);
305             }
306             else ALOGE("FAILED TO PRINT STACK TRACE for %p in %s: %s", this,
307                       name, strerror(errno));
308         }
309     }
310 
311 private:
312     struct ref_entry
313     {
314         ref_entry* next;
315         const void* id;
316 #if DEBUG_REFS_CALLSTACK_ENABLED && CALLSTACK_ENABLED
317         CallStack::CallStackUPtr stack;
318 #endif
319         int32_t ref;
320     };
321 
addRef(ref_entry ** refs,const void * id,int32_t mRef)322     void addRef(ref_entry** refs, const void* id, int32_t mRef)
323     {
324         if (mTrackEnabled) {
325             AutoMutex _l(mMutex);
326 
327             ref_entry* ref = new ref_entry;
328             // Reference count at the time of the snapshot, but before the
329             // update.  Positive value means we increment, negative--we
330             // decrement the reference count.
331             ref->ref = mRef;
332             ref->id = id;
333 #if DEBUG_REFS_CALLSTACK_ENABLED && CALLSTACK_ENABLED
334             ref->stack = CallStack::getCurrent(2);
335 #endif
336             ref->next = *refs;
337             *refs = ref;
338         }
339     }
340 
removeRef(ref_entry ** refs,const void * id)341     void removeRef(ref_entry** refs, const void* id)
342     {
343         if (mTrackEnabled) {
344             AutoMutex _l(mMutex);
345 
346             ref_entry* const head = *refs;
347             ref_entry* ref = head;
348             while (ref != NULL) {
349                 if (ref->id == id) {
350                     *refs = ref->next;
351                     delete ref;
352                     return;
353                 }
354                 refs = &ref->next;
355                 ref = *refs;
356             }
357 
358             ALOGE("RefBase: removing id %p on RefBase %p"
359                     "(weakref_type %p) that doesn't exist!",
360                     id, mBase, this);
361 
362             ref = head;
363             while (ref) {
364                 char inc = ref->ref >= 0 ? '+' : '-';
365                 ALOGD("\t%c ID %p (ref %d):", inc, ref->id, ref->ref);
366                 ref = ref->next;
367             }
368 
369 #if CALLSTACK_ENABLED
370             CallStack::logStack(LOG_TAG);
371 #endif
372         }
373     }
374 
renameRefsId(ref_entry * r,const void * old_id,const void * new_id)375     void renameRefsId(ref_entry* r, const void* old_id, const void* new_id)
376     {
377         if (mTrackEnabled) {
378             AutoMutex _l(mMutex);
379             ref_entry* ref = r;
380             while (ref != NULL) {
381                 if (ref->id == old_id) {
382                     ref->id = new_id;
383                 }
384                 ref = ref->next;
385             }
386         }
387     }
388 
printRefsLocked(String8 * out,const ref_entry * refs) const389     void printRefsLocked(String8* out, const ref_entry* refs) const
390     {
391         char buf[128];
392         while (refs) {
393             char inc = refs->ref >= 0 ? '+' : '-';
394             snprintf(buf, sizeof(buf), "\t%c ID %p (ref %d):\n",
395                      inc, refs->id, refs->ref);
396             out->append(buf);
397 #if DEBUG_REFS_CALLSTACK_ENABLED && CALLSTACK_ENABLED
398             out->append(CallStack::stackToString("\t\t", refs->stack.get()));
399 #else
400             out->append("\t\t(call stacks disabled)");
401 #endif
402             refs = refs->next;
403         }
404     }
405 
406     mutable Mutex mMutex;
407     ref_entry* mStrongRefs;
408     ref_entry* mWeakRefs;
409 
410     bool mTrackEnabled;
411     // Collect stack traces on addref and removeref, instead of deleting the stack references
412     // on removeref that match the address ones.
413     bool mRetain;
414 
415 #endif
416 };
417 
418 // ---------------------------------------------------------------------------
419 
incStrong(const void * id) const420 void RefBase::incStrong(const void* id) const
421 {
422     weakref_impl* const refs = mRefs;
423     refs->incWeak(id);
424 
425     refs->addStrongRef(id);
426     const int32_t c = refs->mStrong.fetch_add(1, std::memory_order_relaxed);
427     ALOG_ASSERT(c > 0, "incStrong() called on %p after last strong ref", refs);
428 #if PRINT_REFS
429     ALOGD("incStrong of %p from %p: cnt=%d\n", this, id, c);
430 #endif
431     if (c != INITIAL_STRONG_VALUE)  {
432         return;
433     }
434 
435     int32_t old __unused = refs->mStrong.fetch_sub(INITIAL_STRONG_VALUE, std::memory_order_relaxed);
436     // A decStrong() must still happen after us.
437     ALOG_ASSERT(old > INITIAL_STRONG_VALUE, "0x%x too small", old);
438     refs->mBase->onFirstRef();
439 }
440 
incStrongRequireStrong(const void * id) const441 void RefBase::incStrongRequireStrong(const void* id) const {
442     weakref_impl* const refs = mRefs;
443     refs->incWeak(id);
444 
445     refs->addStrongRef(id);
446     const int32_t c = refs->mStrong.fetch_add(1, std::memory_order_relaxed);
447 
448     LOG_ALWAYS_FATAL_IF(c <= 0 || c == INITIAL_STRONG_VALUE,
449                         "incStrongRequireStrong() called on %p which isn't already owned", refs);
450 #if PRINT_REFS
451     ALOGD("incStrong (requiring strong) of %p from %p: cnt=%d\n", this, id, c);
452 #endif
453 }
454 
decStrong(const void * id) const455 void RefBase::decStrong(const void* id) const
456 {
457     weakref_impl* const refs = mRefs;
458     refs->removeStrongRef(id);
459     const int32_t c = refs->mStrong.fetch_sub(1, std::memory_order_release);
460 #if PRINT_REFS
461     ALOGD("decStrong of %p from %p: cnt=%d\n", this, id, c);
462 #endif
463     LOG_ALWAYS_FATAL_IF(BAD_STRONG(c), "decStrong() called on %p too many times",
464             refs);
465     if (c == 1) {
466         std::atomic_thread_fence(std::memory_order_acquire);
467         refs->mBase->onLastStrongRef(id);
468         int32_t flags = refs->mFlags.load(std::memory_order_relaxed);
469         if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) {
470             delete this;
471             // The destructor does not delete refs in this case.
472         }
473     }
474     // Note that even with only strong reference operations, the thread
475     // deallocating this may not be the same as the thread deallocating refs.
476     // That's OK: all accesses to this happen before its deletion here,
477     // and all accesses to refs happen before its deletion in the final decWeak.
478     // The destructor can safely access mRefs because either it's deleting
479     // mRefs itself, or it's running entirely before the final mWeak decrement.
480     //
481     // Since we're doing atomic loads of `flags`, the static analyzer assumes
482     // they can change between `delete this;` and `refs->decWeak(id);`. This is
483     // not the case. The analyzer may become more okay with this patten when
484     // https://bugs.llvm.org/show_bug.cgi?id=34365 gets resolved. NOLINTNEXTLINE
485     refs->decWeak(id);
486 }
487 
forceIncStrong(const void * id) const488 void RefBase::forceIncStrong(const void* id) const
489 {
490     // Allows initial mStrong of 0 in addition to INITIAL_STRONG_VALUE.
491     // TODO: Better document assumptions.
492     weakref_impl* const refs = mRefs;
493     refs->incWeak(id);
494 
495     refs->addStrongRef(id);
496     const int32_t c = refs->mStrong.fetch_add(1, std::memory_order_relaxed);
497     ALOG_ASSERT(c >= 0, "forceIncStrong called on %p after ref count underflow",
498                refs);
499 #if PRINT_REFS
500     ALOGD("forceIncStrong of %p from %p: cnt=%d\n", this, id, c);
501 #endif
502 
503     switch (c) {
504     case INITIAL_STRONG_VALUE:
505         refs->mStrong.fetch_sub(INITIAL_STRONG_VALUE,
506                 std::memory_order_relaxed);
507         FALLTHROUGH_INTENDED;
508     case 0:
509         refs->mBase->onFirstRef();
510     }
511 }
512 
getStrongCount() const513 int32_t RefBase::getStrongCount() const
514 {
515     // Debugging only; No memory ordering guarantees.
516     return mRefs->mStrong.load(std::memory_order_relaxed);
517 }
518 
refBase() const519 RefBase* RefBase::weakref_type::refBase() const
520 {
521     return static_cast<const weakref_impl*>(this)->mBase;
522 }
523 
incWeak(const void * id)524 void RefBase::weakref_type::incWeak(const void* id)
525 {
526     weakref_impl* const impl = static_cast<weakref_impl*>(this);
527     impl->addWeakRef(id);
528     const int32_t c __unused = impl->mWeak.fetch_add(1,
529             std::memory_order_relaxed);
530     ALOG_ASSERT(c >= 0, "incWeak called on %p after last weak ref", this);
531 }
532 
incWeakRequireWeak(const void * id)533 void RefBase::weakref_type::incWeakRequireWeak(const void* id)
534 {
535     weakref_impl* const impl = static_cast<weakref_impl*>(this);
536     impl->addWeakRef(id);
537     const int32_t c __unused = impl->mWeak.fetch_add(1,
538             std::memory_order_relaxed);
539     LOG_ALWAYS_FATAL_IF(c <= 0, "incWeakRequireWeak called on %p which has no weak refs", this);
540 }
541 
decWeak(const void * id)542 void RefBase::weakref_type::decWeak(const void* id)
543 {
544     weakref_impl* const impl = static_cast<weakref_impl*>(this);
545     impl->removeWeakRef(id);
546     const int32_t c = impl->mWeak.fetch_sub(1, std::memory_order_release);
547     LOG_ALWAYS_FATAL_IF(BAD_WEAK(c), "decWeak called on %p too many times",
548             this);
549     if (c != 1) return;
550     atomic_thread_fence(std::memory_order_acquire);
551 
552     int32_t flags = impl->mFlags.load(std::memory_order_relaxed);
553     if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) {
554         // This is the regular lifetime case. The object is destroyed
555         // when the last strong reference goes away. Since weakref_impl
556         // outlives the object, it is not destroyed in the dtor, and
557         // we'll have to do it here.
558         if (impl->mStrong.load(std::memory_order_relaxed)
559                 == INITIAL_STRONG_VALUE) {
560             // Decrementing a weak count to zero when object never had a strong
561             // reference.  We assume it acquired a weak reference early, e.g.
562             // in the constructor, and will eventually be properly destroyed,
563             // usually via incrementing and decrementing the strong count.
564             // Thus we no longer do anything here.  We log this case, since it
565             // seems to be extremely rare, and should not normally occur. We
566             // used to deallocate mBase here, so this may now indicate a leak.
567             ALOGW("RefBase: Object at %p lost last weak reference "
568                     "before it had a strong reference", impl->mBase);
569         } else {
570             // ALOGV("Freeing refs %p of old RefBase %p\n", this, impl->mBase);
571             delete impl;
572         }
573     } else {
574         // This is the OBJECT_LIFETIME_WEAK case. The last weak-reference
575         // is gone, we can destroy the object.
576         impl->mBase->onLastWeakRef(id);
577         delete impl->mBase;
578     }
579 }
580 
attemptIncStrong(const void * id)581 bool RefBase::weakref_type::attemptIncStrong(const void* id)
582 {
583     incWeak(id);
584 
585     weakref_impl* const impl = static_cast<weakref_impl*>(this);
586     int32_t curCount = impl->mStrong.load(std::memory_order_relaxed);
587 
588     ALOG_ASSERT(curCount >= 0,
589             "attemptIncStrong called on %p after underflow", this);
590 
591     while (curCount > 0 && curCount != INITIAL_STRONG_VALUE) {
592         // we're in the easy/common case of promoting a weak-reference
593         // from an existing strong reference.
594         if (impl->mStrong.compare_exchange_weak(curCount, curCount+1,
595                 std::memory_order_relaxed)) {
596             break;
597         }
598         // the strong count has changed on us, we need to re-assert our
599         // situation. curCount was updated by compare_exchange_weak.
600     }
601 
602     if (curCount <= 0 || curCount == INITIAL_STRONG_VALUE) {
603         // we're now in the harder case of either:
604         // - there never was a strong reference on us
605         // - or, all strong references have been released
606         int32_t flags = impl->mFlags.load(std::memory_order_relaxed);
607         if ((flags&OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_STRONG) {
608             // this object has a "normal" life-time, i.e.: it gets destroyed
609             // when the last strong reference goes away
610             if (curCount <= 0) {
611                 // the last strong-reference got released, the object cannot
612                 // be revived.
613                 decWeak(id);
614                 return false;
615             }
616 
617             // here, curCount == INITIAL_STRONG_VALUE, which means
618             // there never was a strong-reference, so we can try to
619             // promote this object; we need to do that atomically.
620             while (curCount > 0) {
621                 if (impl->mStrong.compare_exchange_weak(curCount, curCount+1,
622                         std::memory_order_relaxed)) {
623                     break;
624                 }
625                 // the strong count has changed on us, we need to re-assert our
626                 // situation (e.g.: another thread has inc/decStrong'ed us)
627                 // curCount has been updated.
628             }
629 
630             if (curCount <= 0) {
631                 // promote() failed, some other thread destroyed us in the
632                 // meantime (i.e.: strong count reached zero).
633                 decWeak(id);
634                 return false;
635             }
636         } else {
637             // this object has an "extended" life-time, i.e.: it can be
638             // revived from a weak-reference only.
639             // Ask the object's implementation if it agrees to be revived
640             if (!impl->mBase->onIncStrongAttempted(FIRST_INC_STRONG, id)) {
641                 // it didn't so give-up.
642                 decWeak(id);
643                 return false;
644             }
645             // grab a strong-reference, which is always safe due to the
646             // extended life-time.
647             curCount = impl->mStrong.fetch_add(1, std::memory_order_relaxed);
648             // If the strong reference count has already been incremented by
649             // someone else, the implementor of onIncStrongAttempted() is holding
650             // an unneeded reference.  So call onLastStrongRef() here to remove it.
651             // (No, this is not pretty.)  Note that we MUST NOT do this if we
652             // are in fact acquiring the first reference.
653             if (curCount != 0 && curCount != INITIAL_STRONG_VALUE) {
654                 impl->mBase->onLastStrongRef(id);
655             }
656         }
657     }
658 
659     impl->addStrongRef(id);
660 
661 #if PRINT_REFS
662     ALOGD("attemptIncStrong of %p from %p: cnt=%d\n", this, id, curCount);
663 #endif
664 
665     // curCount is the value of mStrong before we incremented it.
666     // Now we need to fix-up the count if it was INITIAL_STRONG_VALUE.
667     // This must be done safely, i.e.: handle the case where several threads
668     // were here in attemptIncStrong().
669     // curCount > INITIAL_STRONG_VALUE is OK, and can happen if we're doing
670     // this in the middle of another incStrong.  The subtraction is handled
671     // by the thread that started with INITIAL_STRONG_VALUE.
672     if (curCount == INITIAL_STRONG_VALUE) {
673         impl->mStrong.fetch_sub(INITIAL_STRONG_VALUE,
674                 std::memory_order_relaxed);
675     }
676 
677     return true;
678 }
679 
attemptIncWeak(const void * id)680 bool RefBase::weakref_type::attemptIncWeak(const void* id)
681 {
682     weakref_impl* const impl = static_cast<weakref_impl*>(this);
683 
684     int32_t curCount = impl->mWeak.load(std::memory_order_relaxed);
685     ALOG_ASSERT(curCount >= 0, "attemptIncWeak called on %p after underflow",
686                this);
687     while (curCount > 0) {
688         if (impl->mWeak.compare_exchange_weak(curCount, curCount+1,
689                 std::memory_order_relaxed)) {
690             break;
691         }
692         // curCount has been updated.
693     }
694 
695     if (curCount > 0) {
696         impl->addWeakRef(id);
697     }
698 
699     return curCount > 0;
700 }
701 
getWeakCount() const702 int32_t RefBase::weakref_type::getWeakCount() const
703 {
704     // Debug only!
705     return static_cast<const weakref_impl*>(this)->mWeak
706             .load(std::memory_order_relaxed);
707 }
708 
printRefs() const709 void RefBase::weakref_type::printRefs() const
710 {
711     static_cast<const weakref_impl*>(this)->printRefs();
712 }
713 
trackMe(bool enable,bool retain)714 void RefBase::weakref_type::trackMe(bool enable, bool retain)
715 {
716     static_cast<weakref_impl*>(this)->trackMe(enable, retain);
717 }
718 
createWeak(const void * id) const719 RefBase::weakref_type* RefBase::createWeak(const void* id) const
720 {
721     mRefs->incWeak(id);
722     return mRefs;
723 }
724 
getWeakRefs() const725 RefBase::weakref_type* RefBase::getWeakRefs() const
726 {
727     return mRefs;
728 }
729 
RefBase()730 RefBase::RefBase()
731     : mRefs(new weakref_impl(this))
732 {
733 }
734 
~RefBase()735 RefBase::~RefBase()
736 {
737     int32_t flags = mRefs->mFlags.load(std::memory_order_relaxed);
738     // Life-time of this object is extended to WEAK, in
739     // which case weakref_impl doesn't out-live the object and we
740     // can free it now.
741     if ((flags & OBJECT_LIFETIME_MASK) == OBJECT_LIFETIME_WEAK) {
742         // It's possible that the weak count is not 0 if the object
743         // re-acquired a weak reference in its destructor
744         if (mRefs->mWeak.load(std::memory_order_relaxed) == 0) {
745             delete mRefs;
746         }
747     } else if (mRefs->mStrong.load(std::memory_order_relaxed) == INITIAL_STRONG_VALUE) {
748         // We never acquired a strong reference on this object.
749 
750         // TODO: make this fatal, but too much code in Android manages RefBase with
751         // new/delete manually (or using other mechanisms such as std::make_unique).
752         // However, this is dangerous because it's also common for code to use the
753         // sp<T>(T*) constructor, assuming that if the object is around, it is already
754         // owned by an sp<>.
755         ALOGW("RefBase: Explicit destruction, weak count = %d (in %p). Use sp<> to manage this "
756               "object.",
757               mRefs->mWeak.load(), this);
758 
759 #if CALLSTACK_ENABLED
760         CallStack::logStack(LOG_TAG);
761 #endif
762     }
763     // For debugging purposes, clear mRefs.  Ineffective against outstanding wp's.
764     const_cast<weakref_impl*&>(mRefs) = nullptr;
765 }
766 
extendObjectLifetime(int32_t mode)767 void RefBase::extendObjectLifetime(int32_t mode)
768 {
769     // Must be happens-before ordered with respect to construction or any
770     // operation that could destroy the object.
771     mRefs->mFlags.fetch_or(mode, std::memory_order_relaxed);
772 }
773 
onFirstRef()774 void RefBase::onFirstRef()
775 {
776 }
777 
onLastStrongRef(const void *)778 void RefBase::onLastStrongRef(const void* /*id*/)
779 {
780 }
781 
onIncStrongAttempted(uint32_t flags,const void *)782 bool RefBase::onIncStrongAttempted(uint32_t flags, const void* /*id*/)
783 {
784     return (flags&FIRST_INC_STRONG) ? true : false;
785 }
786 
onLastWeakRef(const void *)787 void RefBase::onLastWeakRef(const void* /*id*/)
788 {
789 }
790 
791 // ---------------------------------------------------------------------------
792 
793 #if DEBUG_REFS
renameRefs(size_t n,const ReferenceRenamer & renamer)794 void RefBase::renameRefs(size_t n, const ReferenceRenamer& renamer) {
795     for (size_t i=0 ; i<n ; i++) {
796         renamer(i);
797     }
798 }
799 #else
renameRefs(size_t,const ReferenceRenamer &)800 void RefBase::renameRefs(size_t /*n*/, const ReferenceRenamer& /*renamer*/) { }
801 #endif
802 
renameRefId(weakref_type * ref,const void * old_id,const void * new_id)803 void RefBase::renameRefId(weakref_type* ref,
804         const void* old_id, const void* new_id) {
805     weakref_impl* const impl = static_cast<weakref_impl*>(ref);
806     impl->renameStrongRefId(old_id, new_id);
807     impl->renameWeakRefId(old_id, new_id);
808 }
809 
renameRefId(RefBase * ref,const void * old_id,const void * new_id)810 void RefBase::renameRefId(RefBase* ref,
811         const void* old_id, const void* new_id) {
812     ref->mRefs->renameStrongRefId(old_id, new_id);
813     ref->mRefs->renameWeakRefId(old_id, new_id);
814 }
815 
816 }; // namespace android
817