1 //===------------------------ memory.cpp ----------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is dual licensed under the MIT and the University of Illinois Open
6 // Source Licenses. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #define _LIBCPP_BUILDING_MEMORY
11 #include "memory"
12 #ifndef _LIBCPP_HAS_NO_THREADS
13 #include "mutex"
14 #include "thread"
15 #endif
16 #include "include/atomic_support.h"
17
18 _LIBCPP_BEGIN_NAMESPACE_STD
19
20 const allocator_arg_t allocator_arg = allocator_arg_t();
21
~bad_weak_ptr()22 bad_weak_ptr::~bad_weak_ptr() _NOEXCEPT {}
23
24 const char*
what() const25 bad_weak_ptr::what() const _NOEXCEPT
26 {
27 return "bad_weak_ptr";
28 }
29
~__shared_count()30 __shared_count::~__shared_count()
31 {
32 }
33
~__shared_weak_count()34 __shared_weak_count::~__shared_weak_count()
35 {
36 }
37
38 #if defined(_LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS)
39 void
__add_shared()40 __shared_count::__add_shared() _NOEXCEPT
41 {
42 __libcpp_atomic_refcount_increment(__shared_owners_);
43 }
44
45 bool
__release_shared()46 __shared_count::__release_shared() _NOEXCEPT
47 {
48 if (__libcpp_atomic_refcount_decrement(__shared_owners_) == -1)
49 {
50 __on_zero_shared();
51 return true;
52 }
53 return false;
54 }
55
56 void
__add_shared()57 __shared_weak_count::__add_shared() _NOEXCEPT
58 {
59 __shared_count::__add_shared();
60 }
61
62 void
__add_weak()63 __shared_weak_count::__add_weak() _NOEXCEPT
64 {
65 __libcpp_atomic_refcount_increment(__shared_weak_owners_);
66 }
67
68 void
__release_shared()69 __shared_weak_count::__release_shared() _NOEXCEPT
70 {
71 if (__shared_count::__release_shared())
72 __release_weak();
73 }
74
75 #endif // _LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS
76
77 void
__release_weak()78 __shared_weak_count::__release_weak() _NOEXCEPT
79 {
80 // NOTE: The acquire load here is an optimization of the very
81 // common case where a shared pointer is being destructed while
82 // having no other contended references.
83 //
84 // BENEFIT: We avoid expensive atomic stores like XADD and STREX
85 // in a common case. Those instructions are slow and do nasty
86 // things to caches.
87 //
88 // IS THIS SAFE? Yes. During weak destruction, if we see that we
89 // are the last reference, we know that no-one else is accessing
90 // us. If someone were accessing us, then they would be doing so
91 // while the last shared / weak_ptr was being destructed, and
92 // that's undefined anyway.
93 //
94 // If we see anything other than a 0, then we have possible
95 // contention, and need to use an atomicrmw primitive.
96 // The same arguments don't apply for increment, where it is legal
97 // (though inadvisable) to share shared_ptr references between
98 // threads, and have them all get copied at once. The argument
99 // also doesn't apply for __release_shared, because an outstanding
100 // weak_ptr::lock() could read / modify the shared count.
101 if (__libcpp_atomic_load(&__shared_weak_owners_, _AO_Acquire) == 0)
102 {
103 // no need to do this store, because we are about
104 // to destroy everything.
105 //__libcpp_atomic_store(&__shared_weak_owners_, -1, _AO_Release);
106 __on_zero_shared_weak();
107 }
108 else if (__libcpp_atomic_refcount_decrement(__shared_weak_owners_) == -1)
109 __on_zero_shared_weak();
110 }
111
112 __shared_weak_count*
lock()113 __shared_weak_count::lock() _NOEXCEPT
114 {
115 long object_owners = __libcpp_atomic_load(&__shared_owners_);
116 while (object_owners != -1)
117 {
118 if (__libcpp_atomic_compare_exchange(&__shared_owners_,
119 &object_owners,
120 object_owners+1))
121 return this;
122 }
123 return 0;
124 }
125
126 #if !defined(_LIBCPP_NO_RTTI) || !defined(_LIBCPP_BUILD_STATIC)
127
128 const void*
__get_deleter(const type_info &) const129 __shared_weak_count::__get_deleter(const type_info&) const _NOEXCEPT
130 {
131 return 0;
132 }
133
134 #endif // _LIBCPP_NO_RTTI
135
136 #if !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
137
138 _LIBCPP_SAFE_STATIC static const std::size_t __sp_mut_count = 16;
139 _LIBCPP_SAFE_STATIC static __libcpp_mutex_t mut_back[__sp_mut_count] =
140 {
141 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
142 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
143 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER,
144 _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER
145 };
146
__sp_mut(void * p)147 _LIBCPP_CONSTEXPR __sp_mut::__sp_mut(void* p) _NOEXCEPT
148 : __lx(p)
149 {
150 }
151
152 void
lock()153 __sp_mut::lock() _NOEXCEPT
154 {
155 auto m = static_cast<__libcpp_mutex_t*>(__lx);
156 unsigned count = 0;
157 while (__libcpp_mutex_trylock(m) != 0)
158 {
159 if (++count > 16)
160 {
161 __libcpp_mutex_lock(m);
162 break;
163 }
164 this_thread::yield();
165 }
166 }
167
168 void
unlock()169 __sp_mut::unlock() _NOEXCEPT
170 {
171 __libcpp_mutex_unlock(static_cast<__libcpp_mutex_t*>(__lx));
172 }
173
174 __sp_mut&
__get_sp_mut(const void * p)175 __get_sp_mut(const void* p)
176 {
177 static __sp_mut muts[__sp_mut_count]
178 {
179 &mut_back[ 0], &mut_back[ 1], &mut_back[ 2], &mut_back[ 3],
180 &mut_back[ 4], &mut_back[ 5], &mut_back[ 6], &mut_back[ 7],
181 &mut_back[ 8], &mut_back[ 9], &mut_back[10], &mut_back[11],
182 &mut_back[12], &mut_back[13], &mut_back[14], &mut_back[15]
183 };
184 return muts[hash<const void*>()(p) & (__sp_mut_count-1)];
185 }
186
187 #endif // !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
188
189 void
declare_reachable(void *)190 declare_reachable(void*)
191 {
192 }
193
194 void
declare_no_pointers(char *,size_t)195 declare_no_pointers(char*, size_t)
196 {
197 }
198
199 void
undeclare_no_pointers(char *,size_t)200 undeclare_no_pointers(char*, size_t)
201 {
202 }
203
204 #if !defined(_LIBCPP_ABI_POINTER_SAFETY_ENUM_TYPE)
get_pointer_safety()205 pointer_safety get_pointer_safety() _NOEXCEPT
206 {
207 return pointer_safety::relaxed;
208 }
209 #endif
210
211 void*
__undeclare_reachable(void * p)212 __undeclare_reachable(void* p)
213 {
214 return p;
215 }
216
217 void*
align(size_t alignment,size_t size,void * & ptr,size_t & space)218 align(size_t alignment, size_t size, void*& ptr, size_t& space)
219 {
220 void* r = nullptr;
221 if (size <= space)
222 {
223 char* p1 = static_cast<char*>(ptr);
224 char* p2 = reinterpret_cast<char*>(reinterpret_cast<size_t>(p1 + (alignment - 1)) & -alignment);
225 size_t d = static_cast<size_t>(p2 - p1);
226 if (d <= space - size)
227 {
228 r = p2;
229 ptr = r;
230 space -= d;
231 }
232 }
233 return r;
234 }
235
236 _LIBCPP_END_NAMESPACE_STD
237