• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Dawn Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "common/RefCounted.h"
16 
17 #include "common/Assert.h"
18 
19 #include <cstddef>
20 
21 static constexpr size_t kPayloadBits = 1;
22 static constexpr uint64_t kPayloadMask = (uint64_t(1) << kPayloadBits) - 1;
23 static constexpr uint64_t kRefCountIncrement = (uint64_t(1) << kPayloadBits);
24 
RefCounted(uint64_t payload)25 RefCounted::RefCounted(uint64_t payload) : mRefCount(kRefCountIncrement + payload) {
26     ASSERT((payload & kPayloadMask) == payload);
27 }
28 
GetRefCountForTesting() const29 uint64_t RefCounted::GetRefCountForTesting() const {
30     return mRefCount >> kPayloadBits;
31 }
32 
GetRefCountPayload() const33 uint64_t RefCounted::GetRefCountPayload() const {
34     // We only care about the payload bits of the refcount. These never change after
35     // initialization so we can use the relaxed memory order. The order doesn't guarantee
36     // anything except the atomicity of the load, which is enough since any past values of the
37     // atomic will have the correct payload bits.
38     return kPayloadMask & mRefCount.load(std::memory_order_relaxed);
39 }
40 
Reference()41 void RefCounted::Reference() {
42     ASSERT((mRefCount & ~kPayloadMask) != 0);
43 
44     // The relaxed ordering guarantees only the atomicity of the update, which is enough here
45     // because the reference we are copying from still exists and makes sure other threads
46     // don't delete `this`.
47     // See the explanation in the Boost documentation:
48     //     https://www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html
49     mRefCount.fetch_add(kRefCountIncrement, std::memory_order_relaxed);
50 }
51 
Release()52 void RefCounted::Release() {
53     ASSERT((mRefCount & ~kPayloadMask) != 0);
54 
55     // The release fence here is to make sure all accesses to the object on a thread A
56     // happen-before the object is deleted on a thread B. The release memory order ensures that
57     // all accesses on thread A happen-before the refcount is decreased and the atomic variable
58     // makes sure the refcount decrease in A happens-before the refcount decrease in B. Finally
59     // the acquire fence in the destruction case makes sure the refcount decrease in B
60     // happens-before the `delete this`.
61     //
62     // See the explanation in the Boost documentation:
63     //     https://www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html
64     uint64_t previousRefCount = mRefCount.fetch_sub(kRefCountIncrement, std::memory_order_release);
65 
66     // Check that the previous reference count was strictly less than 2, ignoring payload bits.
67     if (previousRefCount < 2 * kRefCountIncrement) {
68         // Note that on ARM64 this will generate a `dmb ish` instruction which is a global
69         // memory barrier, when an acquire load on mRefCount (using the `ldar` instruction)
70         // should be enough and could end up being faster.
71         std::atomic_thread_fence(std::memory_order_acquire);
72         DeleteThis();
73     }
74 }
75 
APIReference()76 void RefCounted::APIReference() {
77     Reference();
78 }
79 
APIRelease()80 void RefCounted::APIRelease() {
81     Release();
82 }
83 
DeleteThis()84 void RefCounted::DeleteThis() {
85     delete this;
86 }
87