1 // © 2016 and later: Unicode, Inc. and others. 2 // License & terms of use: http://www.unicode.org/copyright.html 3 /* 4 ********************************************************************** 5 * Copyright (C) 1997-2015, International Business Machines 6 * Corporation and others. All Rights Reserved. 7 ********************************************************************** 8 * 9 * File UMUTEX.H 10 * 11 * Modification History: 12 * 13 * Date Name Description 14 * 04/02/97 aliu Creation. 15 * 04/07/99 srl rewrite - C interface, multiple mutices 16 * 05/13/99 stephen Changed to umutex (from cmutex) 17 ****************************************************************************** 18 */ 19 20 #ifndef UMUTEX_H 21 #define UMUTEX_H 22 23 #include <atomic> 24 #include <condition_variable> 25 #include <mutex> 26 #include <type_traits> 27 28 #include "unicode/utypes.h" 29 #include "unicode/uclean.h" 30 #include "unicode/uobject.h" 31 32 #include "putilimp.h" 33 34 #if defined(U_USER_ATOMICS_H) || defined(U_USER_MUTEX_H) 35 // Support for including an alternate implementation of atomic & mutex operations has been withdrawn. 36 // See issue ICU-20185. 37 #error U_USER_ATOMICS and U_USER_MUTEX_H are not supported 38 #endif 39 40 // Export an explicit template instantiation of std::atomic<int32_t>. 41 // When building DLLs for Windows this is required as it is used as a data member of the exported SharedObject class. 42 // See digitlst.h, pluralaffix.h, datefmt.h, and others for similar examples. 43 // 44 // Similar story for std::atomic<std::mutex *>, and the exported UMutex class. 45 #if U_PF_WINDOWS <= U_PLATFORM && U_PLATFORM <= U_PF_CYGWIN && !defined(U_IN_DOXYGEN) 46 #if defined(__clang__) || defined(_MSC_VER) 47 #if defined(__clang__) 48 // Suppress the warning that the explicit instantiation after explicit specialization has no effect. 49 #pragma clang diagnostic push 50 #pragma clang diagnostic ignored "-Winstantiation-after-specialization" 51 #endif 52 template struct U_COMMON_API std::atomic<int32_t>; 53 template struct U_COMMON_API std::atomic<std::mutex *>; 54 #if defined(__clang__) 55 #pragma clang diagnostic pop 56 #endif 57 #elif defined(__GNUC__) 58 // For GCC this class is already exported/visible, so no need for U_COMMON_API. 59 template struct std::atomic<int32_t>; 60 template struct std::atomic<std::mutex *>; 61 #endif 62 #endif 63 64 65 U_NAMESPACE_BEGIN 66 67 /**************************************************************************** 68 * 69 * Low Level Atomic Operations, ICU wrappers for. 70 * 71 ****************************************************************************/ 72 73 typedef std::atomic<int32_t> u_atomic_int32_t; 74 #define ATOMIC_INT32_T_INITIALIZER(val) ATOMIC_VAR_INIT(val) 75 76 inline int32_t umtx_loadAcquire(u_atomic_int32_t &var) { 77 return var.load(std::memory_order_acquire); 78 } 79 80 inline void umtx_storeRelease(u_atomic_int32_t &var, int32_t val) { 81 var.store(val, std::memory_order_release); 82 } 83 84 inline int32_t umtx_atomic_inc(u_atomic_int32_t *var) { 85 return var->fetch_add(1) + 1; 86 } 87 88 inline int32_t umtx_atomic_dec(u_atomic_int32_t *var) { 89 return var->fetch_sub(1) - 1; 90 } 91 92 93 /************************************************************************************************* 94 * 95 * UInitOnce Definitions. 96 * 97 *************************************************************************************************/ 98 99 struct UInitOnce { 100 u_atomic_int32_t fState; 101 UErrorCode fErrCode; 102 void reset() {fState = 0;} 103 UBool isReset() {return umtx_loadAcquire(fState) == 0;} 104 // Note: isReset() is used by service registration code. 105 // Thread safety of this usage needs review. 106 }; 107 108 #define U_INITONCE_INITIALIZER {ATOMIC_INT32_T_INITIALIZER(0), U_ZERO_ERROR} 109 110 111 U_COMMON_API UBool U_EXPORT2 umtx_initImplPreInit(UInitOnce &); 112 U_COMMON_API void U_EXPORT2 umtx_initImplPostInit(UInitOnce &); 113 114 template<class T> void umtx_initOnce(UInitOnce &uio, T *obj, void (U_CALLCONV T::*fp)()) { 115 if (umtx_loadAcquire(uio.fState) == 2) { 116 return; 117 } 118 if (umtx_initImplPreInit(uio)) { 119 (obj->*fp)(); 120 umtx_initImplPostInit(uio); 121 } 122 } 123 124 125 // umtx_initOnce variant for plain functions, or static class functions. 126 // No context parameter. 127 inline void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)()) { 128 if (umtx_loadAcquire(uio.fState) == 2) { 129 return; 130 } 131 if (umtx_initImplPreInit(uio)) { 132 (*fp)(); 133 umtx_initImplPostInit(uio); 134 } 135 } 136 137 // umtx_initOnce variant for plain functions, or static class functions. 138 // With ErrorCode, No context parameter. 139 inline void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(UErrorCode &), UErrorCode &errCode) { 140 if (U_FAILURE(errCode)) { 141 return; 142 } 143 if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) { 144 // We run the initialization. 145 (*fp)(errCode); 146 uio.fErrCode = errCode; 147 umtx_initImplPostInit(uio); 148 } else { 149 // Someone else already ran the initialization. 150 if (U_FAILURE(uio.fErrCode)) { 151 errCode = uio.fErrCode; 152 } 153 } 154 } 155 156 // umtx_initOnce variant for plain functions, or static class functions, 157 // with a context parameter. 158 template<class T> void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(T), T context) { 159 if (umtx_loadAcquire(uio.fState) == 2) { 160 return; 161 } 162 if (umtx_initImplPreInit(uio)) { 163 (*fp)(context); 164 umtx_initImplPostInit(uio); 165 } 166 } 167 168 // umtx_initOnce variant for plain functions, or static class functions, 169 // with a context parameter and an error code. 170 template<class T> void umtx_initOnce(UInitOnce &uio, void (U_CALLCONV *fp)(T, UErrorCode &), T context, UErrorCode &errCode) { 171 if (U_FAILURE(errCode)) { 172 return; 173 } 174 if (umtx_loadAcquire(uio.fState) != 2 && umtx_initImplPreInit(uio)) { 175 // We run the initialization. 176 (*fp)(context, errCode); 177 uio.fErrCode = errCode; 178 umtx_initImplPostInit(uio); 179 } else { 180 // Someone else already ran the initialization. 181 if (U_FAILURE(uio.fErrCode)) { 182 errCode = uio.fErrCode; 183 } 184 } 185 } 186 187 // UMutex should be constexpr-constructible, so that no initialization code 188 // is run during startup. 189 // This works on all C++ libraries except MS VS before VS2019. 190 #if (defined(_CPPLIB_VER) && !defined(_MSVC_STL_VERSION)) || \ 191 (defined(_MSVC_STL_VERSION) && _MSVC_STL_VERSION < 142) 192 // (VS std lib older than VS2017) || (VS std lib version < VS2019) 193 # define UMUTEX_CONSTEXPR 194 #else 195 # define UMUTEX_CONSTEXPR constexpr 196 #endif 197 198 /** 199 * UMutex - ICU Mutex class. 200 * 201 * This is the preferred Mutex class for use within ICU implementation code. 202 * It is a thin wrapper over C++ std::mutex, with these additions: 203 * - Static instances are safe, not triggering static construction or destruction, 204 * and the associated order of construction or destruction issues. 205 * - Plumbed into u_cleanup() for destructing the underlying std::mutex, 206 * which frees any OS level resources they may be holding. 207 * 208 * Limitations: 209 * - Static or global instances only. Cannot be heap allocated. Cannot appear as a 210 * member of another class. 211 * - No condition variables or other advanced features. If needed, you will need to use 212 * std::mutex and std::condition_variable directly. For an example, see unifiedcache.cpp 213 * 214 * Typical Usage: 215 * static UMutex myMutex; 216 * 217 * { 218 * Mutex lock(myMutex); 219 * ... // Do stuff that is protected by myMutex; 220 * } // myMutex is released when lock goes out of scope. 221 */ 222 223 class U_COMMON_API UMutex { 224 public: 225 UMUTEX_CONSTEXPR UMutex() {} 226 ~UMutex() = default; 227 228 UMutex(const UMutex &other) = delete; 229 UMutex &operator =(const UMutex &other) = delete; 230 void *operator new(size_t) = delete; 231 232 // requirements for C++ BasicLockable, allows UMutex to work with std::lock_guard 233 void lock() { 234 std::mutex *m = fMutex.load(std::memory_order_acquire); 235 if (m == nullptr) { m = getMutex(); } 236 m->lock(); 237 } 238 void unlock() { fMutex.load(std::memory_order_relaxed)->unlock(); } 239 240 static void cleanup(); 241 242 private: 243 alignas(std::mutex) char fStorage[sizeof(std::mutex)] {}; 244 std::atomic<std::mutex *> fMutex { nullptr }; 245 246 /** All initialized UMutexes are kept in a linked list, so that they can be found, 247 * and the underlying std::mutex destructed, by u_cleanup(). 248 */ 249 UMutex *fListLink { nullptr }; 250 static UMutex *gListHead; 251 252 /** Out-of-line function to lazily initialize a UMutex on first use. 253 * Initial fast check is inline, in lock(). The returned value may never 254 * be nullptr. 255 */ 256 std::mutex *getMutex(); 257 }; 258 259 260 /* Lock a mutex. 261 * @param mutex The given mutex to be locked. Pass NULL to specify 262 * the global ICU mutex. Recursive locks are an error 263 * and may cause a deadlock on some platforms. 264 */ 265 U_CAPI void U_EXPORT2 umtx_lock(UMutex* mutex); 266 267 /* Unlock a mutex. 268 * @param mutex The given mutex to be unlocked. Pass NULL to specify 269 * the global ICU mutex. 270 */ 271 U_CAPI void U_EXPORT2 umtx_unlock (UMutex* mutex); 272 273 274 U_NAMESPACE_END 275 276 #endif /* UMUTEX_H */ 277 /*eof*/ 278