1 /* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_MONITOR_H_ 18 #define ART_RUNTIME_MONITOR_H_ 19 20 #include <pthread.h> 21 #include <stdint.h> 22 #include <stdlib.h> 23 24 #include <atomic> 25 #include <iosfwd> 26 #include <list> 27 #include <vector> 28 29 #include "base/allocator.h" 30 #include "base/atomic.h" 31 #include "base/mutex.h" 32 #include "gc_root.h" 33 #include "lock_word.h" 34 #include "obj_ptr.h" 35 #include "read_barrier_option.h" 36 #include "runtime_callbacks.h" 37 #include "thread_state.h" 38 39 namespace art { 40 41 class ArtMethod; 42 class IsMarkedVisitor; 43 class LockWord; 44 template<class T> class Handle; 45 class StackVisitor; 46 class Thread; 47 using MonitorId = uint32_t; 48 49 namespace mirror { 50 class Object; 51 } // namespace mirror 52 53 enum class LockReason { 54 kForWait, 55 kForLock, 56 }; 57 58 class Monitor { 59 public: 60 // The default number of spins that are done before thread suspension is used to forcibly inflate 61 // a lock word. See Runtime::max_spins_before_thin_lock_inflation_. 62 constexpr static size_t kDefaultMaxSpinsBeforeThinLockInflation = 50; 63 64 static constexpr int kDefaultMonitorTimeoutMs = 500; 65 66 static constexpr int kMonitorTimeoutMinMs = 200; 67 68 static constexpr int kMonitorTimeoutMaxMs = 1000; // 1 second 69 70 ~Monitor(); 71 72 static void Init(uint32_t lock_profiling_threshold, uint32_t stack_dump_lock_profiling_threshold); 73 74 // Return the thread id of the lock owner or 0 when there is no owner. 75 static uint32_t GetLockOwnerThreadId(ObjPtr<mirror::Object> obj) 76 NO_THREAD_SAFETY_ANALYSIS; // TODO: Reading lock owner without holding lock is racy. 77 78 // NO_THREAD_SAFETY_ANALYSIS for mon->Lock. 79 static ObjPtr<mirror::Object> MonitorEnter(Thread* thread, 80 ObjPtr<mirror::Object> obj, 81 bool trylock) 82 EXCLUSIVE_LOCK_FUNCTION(obj.Ptr()) 83 NO_THREAD_SAFETY_ANALYSIS 84 REQUIRES(!Roles::uninterruptible_) 85 REQUIRES_SHARED(Locks::mutator_lock_); 86 87 // NO_THREAD_SAFETY_ANALYSIS for mon->Unlock. 88 static bool MonitorExit(Thread* thread, ObjPtr<mirror::Object> obj) 89 NO_THREAD_SAFETY_ANALYSIS 90 REQUIRES(!Roles::uninterruptible_) 91 REQUIRES_SHARED(Locks::mutator_lock_) 92 UNLOCK_FUNCTION(obj.Ptr()); 93 Notify(Thread * self,ObjPtr<mirror::Object> obj)94 static void Notify(Thread* self, ObjPtr<mirror::Object> obj) 95 REQUIRES_SHARED(Locks::mutator_lock_) { 96 DoNotify(self, obj, false); 97 } NotifyAll(Thread * self,ObjPtr<mirror::Object> obj)98 static void NotifyAll(Thread* self, ObjPtr<mirror::Object> obj) 99 REQUIRES_SHARED(Locks::mutator_lock_) { 100 DoNotify(self, obj, true); 101 } 102 103 // Object.wait(). Also called for class init. 104 // NO_THREAD_SAFETY_ANALYSIS for mon->Wait. 105 static void Wait(Thread* self, 106 ObjPtr<mirror::Object> obj, 107 int64_t ms, 108 int32_t ns, 109 bool interruptShouldThrow, ThreadState why) 110 REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; 111 112 static ThreadState FetchState(const Thread* thread, 113 /* out */ ObjPtr<mirror::Object>* monitor_object, 114 /* out */ uint32_t* lock_owner_tid) 115 REQUIRES(!Locks::thread_suspend_count_lock_) 116 REQUIRES_SHARED(Locks::mutator_lock_); 117 118 // Used to implement JDWP's ThreadReference.CurrentContendedMonitor. 119 static ObjPtr<mirror::Object> GetContendedMonitor(Thread* thread) 120 REQUIRES_SHARED(Locks::mutator_lock_); 121 122 // Calls 'callback' once for each lock held in the single stack frame represented by 123 // the current state of 'stack_visitor'. 124 // The abort_on_failure flag allows to not die when the state of the runtime is unorderly. This 125 // is necessary when we have already aborted but want to dump the stack as much as we can. 126 static void VisitLocks(StackVisitor* stack_visitor, 127 void (*callback)(ObjPtr<mirror::Object>, void*), 128 void* callback_context, 129 bool abort_on_failure = true) 130 REQUIRES_SHARED(Locks::mutator_lock_); 131 132 static bool IsValidLockWord(LockWord lock_word); 133 134 template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> 135 ObjPtr<mirror::Object> GetObject() REQUIRES_SHARED(Locks::mutator_lock_); 136 137 void SetObject(ObjPtr<mirror::Object> object); 138 139 // Provides no memory ordering guarantees. GetOwner()140 Thread* GetOwner() const { 141 return owner_.load(std::memory_order_relaxed); 142 } 143 144 int32_t GetHashCode(); 145 146 // Is the monitor currently locked? Debug only, provides no memory ordering guarantees. 147 bool IsLocked() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_lock_); 148 HasHashCode()149 bool HasHashCode() const { 150 return hash_code_.load(std::memory_order_relaxed) != 0; 151 } 152 GetMonitorId()153 MonitorId GetMonitorId() const { 154 return monitor_id_; 155 } 156 157 // Inflate the lock on obj. May fail to inflate for spurious reasons, always re-check. 158 static void InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word, 159 uint32_t hash_code) REQUIRES_SHARED(Locks::mutator_lock_); 160 161 // Not exclusive because ImageWriter calls this during a Heap::VisitObjects() that 162 // does not allow a thread suspension in the middle. TODO: maybe make this exclusive. 163 // NO_THREAD_SAFETY_ANALYSIS for monitor->monitor_lock_. 164 static bool Deflate(Thread* self, ObjPtr<mirror::Object> obj) 165 REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; 166 167 #ifndef __LP64__ new(size_t size)168 void* operator new(size_t size) { 169 // Align Monitor* as per the monitor ID field size in the lock word. 170 void* result; 171 int error = posix_memalign(&result, LockWord::kMonitorIdAlignment, size); 172 CHECK_EQ(error, 0) << strerror(error); 173 return result; 174 } 175 delete(void * ptr)176 void operator delete(void* ptr) { 177 free(ptr); 178 } 179 #endif 180 181 private: 182 Monitor(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, int32_t hash_code) 183 REQUIRES_SHARED(Locks::mutator_lock_); 184 Monitor(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, int32_t hash_code, MonitorId id) 185 REQUIRES_SHARED(Locks::mutator_lock_); 186 187 // Install the monitor into its object, may fail if another thread installs a different monitor 188 // first. Monitor remains in the same logical state as before, i.e. held the same # of times. 189 bool Install(Thread* self) 190 REQUIRES(!monitor_lock_) 191 REQUIRES_SHARED(Locks::mutator_lock_); 192 193 // Links a thread into a monitor's wait set. The monitor lock must be held by the caller of this 194 // routine. 195 void AppendToWaitSet(Thread* thread) REQUIRES(monitor_lock_); 196 197 // Unlinks a thread from a monitor's wait set. The monitor lock must be held by the caller of 198 // this routine. 199 void RemoveFromWaitSet(Thread* thread) REQUIRES(monitor_lock_); 200 201 // Release the monitor lock and signal a waiting thread that has been notified and now needs the 202 // lock. Assumes the monitor lock is held exactly once, and the owner_ field has been reset to 203 // null. Caller may be suspended (Wait) or runnable (MonitorExit). 204 void SignalWaiterAndReleaseMonitorLock(Thread* self) RELEASE(monitor_lock_); 205 206 // Changes the shape of a monitor from thin to fat, preserving the internal lock state. The 207 // calling thread must own the lock or the owner must be suspended. There's a race with other 208 // threads inflating the lock, installing hash codes and spurious failures. The caller should 209 // re-read the lock word following the call. 210 static void Inflate(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, int32_t hash_code) 211 REQUIRES_SHARED(Locks::mutator_lock_) 212 NO_THREAD_SAFETY_ANALYSIS; // For m->Install(self) 213 214 void LogContentionEvent(Thread* self, 215 uint32_t wait_ms, 216 uint32_t sample_percent, 217 ArtMethod* owner_method, 218 uint32_t owner_dex_pc) 219 REQUIRES_SHARED(Locks::mutator_lock_); 220 221 static void FailedUnlock(ObjPtr<mirror::Object> obj, 222 uint32_t expected_owner_thread_id, 223 uint32_t found_owner_thread_id, 224 Monitor* mon) 225 REQUIRES(!Locks::thread_list_lock_) 226 REQUIRES_SHARED(Locks::mutator_lock_); 227 228 // Try to lock without blocking, returns true if we acquired the lock. 229 // If spin is true, then we spin for a short period before failing. 230 bool TryLock(Thread* self, bool spin = false) 231 TRY_ACQUIRE(true, monitor_lock_) 232 REQUIRES_SHARED(Locks::mutator_lock_); 233 234 template<LockReason reason = LockReason::kForLock> 235 void Lock(Thread* self) 236 ACQUIRE(monitor_lock_) 237 REQUIRES_SHARED(Locks::mutator_lock_); 238 239 bool Unlock(Thread* thread) 240 RELEASE(monitor_lock_) 241 REQUIRES_SHARED(Locks::mutator_lock_); 242 243 static void DoNotify(Thread* self, ObjPtr<mirror::Object> obj, bool notify_all) 244 REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; // For mon->Notify. 245 246 void Notify(Thread* self) 247 REQUIRES(monitor_lock_) 248 REQUIRES_SHARED(Locks::mutator_lock_); 249 250 void NotifyAll(Thread* self) 251 REQUIRES(monitor_lock_) 252 REQUIRES_SHARED(Locks::mutator_lock_); 253 254 static std::string PrettyContentionInfo(const std::string& owner_name, 255 pid_t owner_tid, 256 ArtMethod* owners_method, 257 uint32_t owners_dex_pc, 258 size_t num_waiters) 259 REQUIRES_SHARED(Locks::mutator_lock_); 260 261 // Wait on a monitor until timeout, interrupt, or notification. Used for Object.wait() and 262 // (somewhat indirectly) Thread.sleep() and Thread.join(). 263 // 264 // If another thread calls Thread.interrupt(), we throw InterruptedException and return 265 // immediately if one of the following are true: 266 // - blocked in wait(), wait(long), or wait(long, int) methods of Object 267 // - blocked in join(), join(long), or join(long, int) methods of Thread 268 // - blocked in sleep(long), or sleep(long, int) methods of Thread 269 // Otherwise, we set the "interrupted" flag. 270 // 271 // Checks to make sure that "ns" is in the range 0-999999 (i.e. fractions of a millisecond) and 272 // throws the appropriate exception if it isn't. 273 // 274 // The spec allows "spurious wakeups", and recommends that all code using Object.wait() do so in 275 // a loop. This appears to derive from concerns about pthread_cond_wait() on multiprocessor 276 // systems. Some commentary on the web casts doubt on whether these can/should occur. 277 // 278 // Since we're allowed to wake up "early", we clamp extremely long durations to return at the end 279 // of the 32-bit time epoch. 280 void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why) 281 REQUIRES(monitor_lock_) 282 REQUIRES_SHARED(Locks::mutator_lock_); 283 284 // Translates the provided method and pc into its declaring class' source file and line number. 285 static void TranslateLocation(ArtMethod* method, uint32_t pc, 286 const char** source_file, 287 int32_t* line_number) 288 REQUIRES_SHARED(Locks::mutator_lock_); 289 290 // Provides no memory ordering guarantees. 291 uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_); 292 293 // Set locking_method_ and locking_dex_pc_ corresponding to owner's current stack. 294 // owner is either self or suspended. 295 void SetLockingMethod(Thread* owner) REQUIRES(monitor_lock_) 296 REQUIRES_SHARED(Locks::mutator_lock_); 297 298 // The same, but without checking for a proxy method. Currently requires owner == self. 299 void SetLockingMethodNoProxy(Thread* owner) REQUIRES(monitor_lock_) 300 REQUIRES_SHARED(Locks::mutator_lock_); 301 302 // Support for systrace output of monitor operations. 303 ALWAYS_INLINE static void AtraceMonitorLock(Thread* self, 304 ObjPtr<mirror::Object> obj, 305 bool is_wait) 306 REQUIRES_SHARED(Locks::mutator_lock_); 307 static void AtraceMonitorLockImpl(Thread* self, 308 ObjPtr<mirror::Object> obj, 309 bool is_wait) 310 REQUIRES_SHARED(Locks::mutator_lock_); 311 ALWAYS_INLINE static void AtraceMonitorUnlock(); 312 313 static uint32_t lock_profiling_threshold_; 314 static uint32_t stack_dump_lock_profiling_threshold_; 315 static bool capture_method_eagerly_; 316 317 // Holding the monitor N times is represented by holding monitor_lock_ N times. 318 Mutex monitor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 319 320 // Pretend to unlock monitor lock. FakeUnlockMonitorLock()321 void FakeUnlockMonitorLock() RELEASE(monitor_lock_) NO_THREAD_SAFETY_ANALYSIS {} 322 323 // Number of threads either waiting on the condition or waiting on a contended 324 // monitor acquisition. Prevents deflation. 325 std::atomic<size_t> num_waiters_; 326 327 // Which thread currently owns the lock? monitor_lock_ only keeps the tid. 328 // Only set while holding monitor_lock_. Non-locking readers only use it to 329 // compare to self or for debugging. 330 std::atomic<Thread*> owner_; 331 332 // Owner's recursive lock depth. Owner_ non-null, and lock_count_ == 0 ==> held once. 333 unsigned int lock_count_ GUARDED_BY(monitor_lock_); 334 335 // Owner's recursive lock depth is given by monitor_lock_.GetDepth(). 336 337 // What object are we part of. This is a weak root. Do not access 338 // this directly, use GetObject() to read it so it will be guarded 339 // by a read barrier. 340 GcRoot<mirror::Object> obj_; 341 342 // Threads currently waiting on this monitor. 343 Thread* wait_set_ GUARDED_BY(monitor_lock_); 344 345 // Threads that were waiting on this monitor, but are now contending on it. 346 Thread* wake_set_ GUARDED_BY(monitor_lock_); 347 348 // Stored object hash code, generated lazily by GetHashCode. 349 AtomicInteger hash_code_; 350 351 // Data structure used to remember the method and dex pc of a recent holder of the 352 // lock. Used for tracing and contention reporting. Setting these is expensive, since it 353 // involves a partial stack walk. We set them only as follows, to minimize the cost: 354 // - If tracing is enabled, they are needed immediately when we first notice contention, so we 355 // set them unconditionally when a monitor is acquired. 356 // - If contention reporting is enabled, we use the lock_owner_request_ field to have the 357 // contending thread request them. The current owner then sets them when releasing the monitor, 358 // making them available when the contending thread acquires the monitor. 359 // - If tracing and contention reporting are enabled, we do both. This usually prevents us from 360 // switching between reporting the end and beginning of critical sections for contention logging 361 // when tracing is enabled. We expect that tracing overhead is normally much higher than for 362 // contention logging, so the added cost should be small. It also minimizes glitches when 363 // enabling and disabling traces. 364 // We're tolerant of missing information. E.g. when tracing is initially turned on, we may 365 // not have the lock holder information if the holder acquired the lock with tracing off. 366 // 367 // We make this data unconditionally atomic; for contention logging all accesses are in fact 368 // protected by the monitor, but for tracing, reads are not. Writes are always 369 // protected by the monitor. 370 // 371 // The fields are always accessed without memory ordering. We store a checksum, and reread if 372 // the checksum doesn't correspond to the values. This results in values that are correct with 373 // very high probability, but not certainty. 374 // 375 // If we need lock_owner information for a certain thread for contenion logging, we store its 376 // tid in lock_owner_request_. To satisfy the request, we store lock_owner_tid_, 377 // lock_owner_method_, and lock_owner_dex_pc_ and the corresponding checksum while holding the 378 // monitor. 379 // 380 // At all times, either lock_owner_ is zero, the checksum is valid, or a thread is actively 381 // in the process of establishing one of those states. Only one thread at a time can be actively 382 // establishing such a state, since writes are protected by the monitor. 383 std::atomic<Thread*> lock_owner_; // *lock_owner_ may no longer exist! 384 std::atomic<ArtMethod*> lock_owner_method_; 385 std::atomic<uint32_t> lock_owner_dex_pc_; 386 std::atomic<uintptr_t> lock_owner_sum_; 387 388 // Request lock owner save method and dex_pc. Written asynchronously. 389 std::atomic<Thread*> lock_owner_request_; 390 391 // Compute method, dex pc, and tid "checksum". 392 uintptr_t LockOwnerInfoChecksum(ArtMethod* m, uint32_t dex_pc, Thread* t); 393 394 // Set owning method, dex pc, and tid. owner_ field is set and points to current thread. 395 void SetLockOwnerInfo(ArtMethod* method, uint32_t dex_pc, Thread* t) 396 REQUIRES(monitor_lock_); 397 398 // Get owning method and dex pc for the given thread, if available. 399 void GetLockOwnerInfo(/*out*/ArtMethod** method, /*out*/uint32_t* dex_pc, Thread* t); 400 401 // Do the same, while holding the monitor. There are no concurrent updates. 402 void GetLockOwnerInfoLocked(/*out*/ArtMethod** method, /*out*/uint32_t* dex_pc, 403 uint32_t thread_id) 404 REQUIRES(monitor_lock_); 405 406 // We never clear lock_owner method and dex pc. Since it often reflects 407 // ownership when we last detected contention, it may be inconsistent with owner_ 408 // and not 100% reliable. For lock contention monitoring, in the absence of tracing, 409 // there is a small risk that the current owner may finish before noticing the request, 410 // or the information will be overwritten by another intervening request and monitor 411 // release, so it's also not 100% reliable. But if we report information at all, it 412 // should generally (modulo accidental checksum matches) pertain to to an acquisition of the 413 // right monitor by the right thread, so it's extremely unlikely to be seriously misleading. 414 // Since we track threads by a pointer to the Thread structure, there is a small chance we may 415 // confuse threads allocated at the same exact address, if a contending thread dies before 416 // we inquire about it. 417 418 // Check for and act on a pending lock_owner_request_ 419 void CheckLockOwnerRequest(Thread* self) 420 REQUIRES(monitor_lock_) REQUIRES_SHARED(Locks::mutator_lock_); 421 422 void MaybeEnableTimeout() REQUIRES(Locks::mutator_lock_); 423 424 // The denser encoded version of this monitor as stored in the lock word. 425 MonitorId monitor_id_; 426 427 #ifdef __LP64__ 428 // Free list for monitor pool. 429 Monitor* next_free_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); 430 #endif 431 432 friend class MonitorInfo; 433 friend class MonitorList; 434 friend class MonitorPool; 435 friend class mirror::Object; 436 DISALLOW_COPY_AND_ASSIGN(Monitor); 437 }; 438 439 class MonitorList { 440 public: 441 MonitorList(); 442 ~MonitorList(); 443 444 void Add(Monitor* m) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_list_lock_); 445 446 void SweepMonitorList(IsMarkedVisitor* visitor) 447 REQUIRES(!monitor_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_); 448 void DisallowNewMonitors() REQUIRES(!monitor_list_lock_); 449 void AllowNewMonitors() REQUIRES(!monitor_list_lock_); 450 void BroadcastForNewMonitors() REQUIRES(!monitor_list_lock_); 451 // Returns how many monitors were deflated. 452 size_t DeflateMonitors() REQUIRES(!monitor_list_lock_) REQUIRES(Locks::mutator_lock_); 453 size_t Size() REQUIRES(!monitor_list_lock_); 454 455 using Monitors = std::list<Monitor*, TrackingAllocator<Monitor*, kAllocatorTagMonitorList>>; 456 457 private: 458 // During sweeping we may free an object and on a separate thread have an object created using 459 // the newly freed memory. That object may then have its lock-word inflated and a monitor created. 460 // If we allow new monitor registration during sweeping this monitor may be incorrectly freed as 461 // the object wasn't marked when sweeping began. 462 bool allow_new_monitors_ GUARDED_BY(monitor_list_lock_); 463 Mutex monitor_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; 464 ConditionVariable monitor_add_condition_ GUARDED_BY(monitor_list_lock_); 465 Monitors list_ GUARDED_BY(monitor_list_lock_); 466 467 friend class Monitor; 468 DISALLOW_COPY_AND_ASSIGN(MonitorList); 469 }; 470 471 // Collects information about the current state of an object's monitor. 472 // This is very unsafe, and must only be called when all threads are suspended. 473 // For use only by the JDWP implementation. 474 class MonitorInfo { 475 public: MonitorInfo()476 MonitorInfo() : owner_(nullptr), entry_count_(0) {} 477 MonitorInfo(const MonitorInfo&) = default; 478 MonitorInfo& operator=(const MonitorInfo&) = default; 479 explicit MonitorInfo(ObjPtr<mirror::Object> o) REQUIRES(Locks::mutator_lock_); 480 481 Thread* owner_; 482 size_t entry_count_; 483 std::vector<Thread*> waiters_; 484 }; 485 486 } // namespace art 487 488 #endif // ART_RUNTIME_MONITOR_H_ 489