1 /**
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "runtime/monitor.h"
17
18 #include "libpandabase/os/thread.h"
19 #include "runtime/include/object_header.h"
20 #include "runtime/include/runtime.h"
21 #include "runtime/include/runtime_notification.h"
22 #include "runtime/include/thread_scopes.h"
23 #include "runtime/include/thread-inl.h"
24 #include "runtime/mark_word.h"
25 #include "runtime/monitor_pool.h"
26 #include "runtime/handle_base-inl.h"
27 #include "runtime/mem/vm_handle.h"
28
29 #include <cinttypes>
30 #include <string>
31 #include <sched.h>
32
33 namespace panda {
34
35 template <typename T>
36 template <typename Predicate>
RemoveIf(Predicate pred)37 bool ThreadList<T>::RemoveIf(Predicate pred)
38 {
39 bool found = false;
40 auto prev = head_;
41 for (auto current = head_; current != nullptr; current = current->GetNextWait()) {
42 if (pred(*current)) {
43 found = true;
44 EraseAfter(prev, current);
45 current = prev;
46 } else {
47 prev = current;
48 }
49 }
50 return found;
51 }
52
53 template <typename T>
Splice(ThreadList & other)54 void ThreadList<T>::Splice(ThreadList &other)
55 {
56 if (Empty()) {
57 head_ = other.head_;
58 } else {
59 T *last = head_;
60 for (; last->GetNextWait() != nullptr; last = last->GetNextWait()) {
61 }
62 last->SetWaitNext(other.head_);
63 }
64 other.Clear();
65 }
66
67 template <typename T>
EraseAfter(T * prev,T * current)68 void ThreadList<T>::EraseAfter(T *prev, T *current)
69 {
70 if (current == head_) {
71 head_ = current->GetNextWait();
72 } else {
73 prev->SetWaitNext(current->GetNextWait());
74 }
75 }
76
77 template <typename T>
PopFront()78 void ThreadList<T>::PopFront()
79 {
80 head_ = head_->GetNextWait();
81 }
82
83 template <typename T>
PushFront(T & thread)84 void ThreadList<T>::PushFront(T &thread)
85 {
86 thread.SetWaitNext(head_);
87 head_ = &thread;
88 }
89
InflateThinLock(MTManagedThread * thread,const VMHandle<ObjectHeader> & obj_handle)90 void Monitor::InflateThinLock(MTManagedThread *thread, [[maybe_unused]] const VMHandle<ObjectHeader> &obj_handle)
91 {
92 #if defined(PANDA_USE_FUTEX)
93 // Futex inflation policy: suspend target thread, wait until it actually gets suspended
94 // and try inflating light monitor (`Inflate` expects lock to still be acquired by target;
95 // otherwise markword CAS fails). If it fails (i.e. thread got suspended when this monitor is
96 // no longer taken), we restart lightlock acquisition policy again.
97 // Compared to forced inflation (actively retry inflation once MAX_TRYLOCK_RETRY is reached
98 // or inflate monitor once this thread acquires light lock), this policy yields much better
99 // performance for short running synchronized blocks or functions, and is still expected to
100 // succeeed on longer blocks which should have safepoints and suspend successfully with
101 // monitor still acquired.
102 // We are trying to inflate light lock acquired by other thread, suspend it first
103 MTManagedThread *owner = nullptr;
104 ASSERT(obj_handle.GetPtr() != nullptr);
105 MarkWord mark = obj_handle.GetPtr()->AtomicGetMark();
106 os::thread::ThreadId owner_thread_id = mark.GetThreadId();
107 {
108 ScopedChangeThreadStatus sts(thread, ThreadStatus::IS_WAITING_INFLATION);
109 owner = thread->GetVM()->GetThreadManager()->SuspendAndWaitThreadByInternalThreadId(owner_thread_id);
110 }
111 thread->SetEnterMonitorObject(nullptr);
112 thread->SetWaitingMonitorOldStatus(ThreadStatus::FINISHED);
113 // Thread could have finished by the time we tried stopping it
114 if (owner != nullptr) {
115 // NB! Inflate can do nothing if monitor is already unlocked or acquired by other thread.
116 Inflate<true>(obj_handle.GetPtr(), owner);
117 owner->ResumeImpl(true);
118 }
119 #else
120 // Non-futex inflation policy: Wait until light lock is released, acquire it and inflate
121 // to heavy monitor
122 {
123 static constexpr uint64_t SLEEP_MS = 10;
124 thread->TimedWait(ThreadStatus::IS_WAITING_INFLATION, SLEEP_MS, 0);
125 }
126 thread->SetEnterMonitorObject(nullptr);
127 thread->SetWaitingMonitorOldStatus(ThreadStatus::FINISHED);
128 #endif
129 }
130
131 /**
132 * Static call, which implements the basic functionality of monitors:
133 * heavyweight, lightweight and so on.
134 *
135 * @param obj an object header of corresponding object
136 * @param trylock is true if the function should fail in case of lock was already acquired by other thread
137 * @return state of function execution (ok, illegal)
138 */
MonitorEnter(ObjectHeader * obj,bool trylock)139 Monitor::State Monitor::MonitorEnter(ObjectHeader *obj, bool trylock)
140 {
141 auto *thread = MTManagedThread::GetCurrent();
142 ASSERT(thread != nullptr);
143 // This function can unlock MutatorLock, so GC can run during lock acquire waiting
144 // so we need to use handle to get updated header pointer
145 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
146 VMHandle<ObjectHeader> obj_handle(thread, obj);
147 bool ret = false;
148 bool should_inflate = false;
149 uint32_t lightlock_retry_count = 0;
150
151 while (true) {
152 MarkWord mark = obj_handle.GetPtr()->AtomicGetMark();
153 MarkWord new_mark = mark;
154 MarkWord::ObjectState state = mark.GetState();
155
156 LOG(DEBUG, RUNTIME) << "Try to enter monitor " << std::hex << obj << " with state " << std::dec << state;
157
158 switch (state) {
159 case MarkWord::STATE_HEAVY_LOCKED: {
160 auto monitor = thread->GetMonitorPool()->LookupMonitor(mark.GetMonitorId());
161 if (monitor == nullptr) {
162 // Not sure if it is possible
163 return State::ILLEGAL;
164 }
165 ret = monitor->Acquire(thread, obj_handle, trylock);
166 if (ret) {
167 thread->PushLocalObjectLocked(obj_handle.GetPtr());
168 }
169 return ret ? State::OK : State::ILLEGAL;
170 }
171 case MarkWord::STATE_LIGHT_LOCKED: {
172 os::thread::ThreadId owner_thread_id = mark.GetThreadId();
173 if (owner_thread_id == thread->GetInternalId()) {
174 uint32_t new_count = mark.GetLockCount() + 1;
175 if (new_count < MarkWord::LIGHT_LOCK_LOCK_MAX_COUNT) {
176 new_mark = mark.DecodeFromLightLock(thread->GetInternalId(), new_count);
177 // Strong CAS as the loop iteration is large
178 ret = obj_handle.GetPtr()->AtomicSetMark(mark, new_mark);
179 if (ret) {
180 LOG(DEBUG, RUNTIME) << "The lightweight monitor was successfully recursively acquired";
181 TraceMonitorLock(obj_handle.GetPtr(), false);
182 thread->PushLocalObjectLocked(obj_handle.GetPtr());
183 return State::OK;
184 }
185 } else {
186 Inflate(obj_handle.GetPtr(), thread);
187 // Inflate set up recursive counter to just current amount, loop again.
188 }
189 } else {
190 // Lock acquired by other thread.
191 if (trylock) {
192 return State::ILLEGAL;
193 }
194
195 // Retry acquiring light lock in loop first to avoid excessive inflation
196 static constexpr uint32_t MAX_TRYLOCK_RETRY = 100;
197 static constexpr uint32_t YIELD_AFTER = 50;
198
199 lightlock_retry_count++;
200 if (lightlock_retry_count < MAX_TRYLOCK_RETRY) {
201 if (lightlock_retry_count > YIELD_AFTER) {
202 MTManagedThread::Yield();
203 }
204 } else {
205 // Retried acquiring light lock for too long, do inflation
206
207 thread->SetEnterMonitorObject(obj_handle.GetPtr());
208 thread->SetWaitingMonitorOldStatus(ThreadStatus::IS_WAITING_INFLATION);
209 InflateThinLock(thread, obj_handle);
210 #if defined(PANDA_USE_FUTEX)
211 lightlock_retry_count = 0;
212 #else
213 should_inflate = true;
214 #endif
215 }
216 }
217 // Couldn't update mark.
218 if (trylock) {
219 return State::ILLEGAL;
220 }
221 // Go to the next iteration
222 continue;
223 }
224 case MarkWord::STATE_HASHED:
225 if (Inflate(obj_handle.GetPtr(), thread)) {
226 thread->PushLocalObjectLocked(obj_handle.GetPtr());
227 return State::OK;
228 }
229 // Couldn't inflate.
230 if (trylock) {
231 return State::ILLEGAL;
232 }
233 // Go to the next iteration
234 continue;
235 case MarkWord::STATE_UNLOCKED:
236 if (should_inflate) {
237 if (Inflate(obj_handle.GetPtr(), thread)) {
238 thread->PushLocalObjectLocked(obj_handle.GetPtr());
239 return State::OK;
240 }
241 // Couldn't inflate.
242 if (trylock) {
243 return State::ILLEGAL;
244 }
245 // Go to the next iteration
246 continue;
247 }
248
249 ASSERT(thread->GetInternalId() <= MarkWord::LIGHT_LOCK_THREADID_MAX_COUNT);
250 new_mark = mark.DecodeFromLightLock(thread->GetInternalId(), 1);
251 // Strong CAS as the loop iteration is large
252 ret = obj_handle.GetPtr()->AtomicSetMark(mark, new_mark);
253 if (ret) {
254 LOG(DEBUG, RUNTIME) << "The lightweight monitor was successfully acquired for the first time";
255 TraceMonitorLock(obj_handle.GetPtr(), false);
256 thread->PushLocalObjectLocked(obj_handle.GetPtr());
257 return State::OK;
258 }
259 // Couldn't update mark.
260 if (trylock) {
261 return State::ILLEGAL;
262 }
263 // Go to the next iteration
264 continue;
265 case MarkWord::STATE_GC:
266 LOG(FATAL, RUNTIME) << "Not yet implemented";
267 return State::ILLEGAL;
268 default:
269 LOG(FATAL, RUNTIME) << "Undefined object state";
270 return State::ILLEGAL;
271 }
272 }
273 }
274
MonitorExit(ObjectHeader * obj)275 Monitor::State Monitor::MonitorExit(ObjectHeader *obj)
276 {
277 auto thread = MTManagedThread::GetCurrent();
278 bool ret = false;
279
280 while (true) {
281 MarkWord mark = obj->AtomicGetMark();
282 MarkWord new_mark = mark;
283 MarkWord::ObjectState state = mark.GetState();
284 LOG(DEBUG, RUNTIME) << "Try to exit monitor " << std::hex << obj << " with state " << std::dec << state;
285 switch (state) {
286 case MarkWord::STATE_HEAVY_LOCKED: {
287 auto monitor = thread->GetMonitorPool()->LookupMonitor(mark.GetMonitorId());
288 ret = monitor->Release(thread);
289 if (ret) {
290 thread->PopLocalObjectLocked(obj);
291 }
292 return ret ? State::OK : State::ILLEGAL;
293 }
294 case MarkWord::STATE_LIGHT_LOCKED: {
295 if (mark.GetThreadId() != thread->GetInternalId()) {
296 LOG(DEBUG, RUNTIME) << "Caling MonitorEnter on object which isn't owned by this thread";
297 return State::ILLEGAL;
298 }
299 uint32_t new_count = mark.GetLockCount() - 1;
300 if (new_count != 0) {
301 new_mark = mark.DecodeFromLightLock(thread->GetInternalId(), new_count);
302 } else {
303 new_mark = mark.DecodeFromUnlocked();
304 }
305 // Strong CAS as the loop iteration is large
306 ret = obj->AtomicSetMark(mark, new_mark);
307 if (ret) {
308 LOG(DEBUG, RUNTIME) << "Exited lightweight lock";
309 TraceMonitorUnLock();
310 thread->PopLocalObjectLocked(obj);
311 return State::OK;
312 }
313 // CAS failed, must have been heavy locked by other thread. Retry unlock.
314 continue;
315 }
316 case MarkWord::STATE_HASHED:
317 case MarkWord::STATE_UNLOCKED:
318 LOG(ERROR, RUNTIME) << "Try to perform monitor exit from unlocked state";
319 return State::ILLEGAL;
320 case MarkWord::STATE_GC:
321 LOG(FATAL, RUNTIME) << "Not yet implemented";
322 return State::ILLEGAL;
323 default:
324 LOG(FATAL, RUNTIME) << "Undefined object state";
325 return State::ILLEGAL;
326 }
327 }
328 }
329
330 /** Zero timeout is used as infinite wait (see docs)
331 */
Wait(ObjectHeader * obj,ThreadStatus status,uint64_t timeout,uint64_t nanos,bool ignore_interruption)332 Monitor::State Monitor::Wait(ObjectHeader *obj, ThreadStatus status, uint64_t timeout, uint64_t nanos,
333 bool ignore_interruption)
334 {
335 ASSERT(obj != nullptr);
336 auto *thread = MTManagedThread::GetCurrent();
337 ASSERT(thread != nullptr);
338 State result_state = State::OK;
339
340 // This function can unlock MutatorLock, so GC can run during wait
341 // so we need to use handle to get updated header pointer
342 [[maybe_unused]] HandleScope<ObjectHeader *> scope(thread);
343 VMHandle<ObjectHeader> obj_handle(thread, obj);
344
345 Runtime::GetCurrent()->GetNotificationManager()->MonitorWaitEvent(obj, timeout);
346
347 while (true) {
348 MarkWord mark = obj_handle->AtomicGetMark();
349 MarkWord::ObjectState state = mark.GetState();
350 LOG(DEBUG, RUNTIME) << "Try to wait with state " << state;
351 switch (state) {
352 case MarkWord::STATE_HEAVY_LOCKED: {
353 auto monitor = thread->GetMonitorPool()->LookupMonitor(mark.GetMonitorId());
354
355 if (monitor->GetOwner() != thread) {
356 // The monitor is acquired by other thread
357 // throw an internal exception?
358 LOG(ERROR, RUNTIME) << "Illegal monitor state: try to wait with monitor acquired by other thread";
359 return State::ILLEGAL;
360 }
361
362 thread->GetWaitingMutex()->Lock();
363
364 if (thread->IsInterruptedWithLockHeld() && !ignore_interruption) {
365 thread->GetWaitingMutex()->Unlock();
366 return State::INTERRUPTED;
367 }
368
369 // Use LockHolder inside scope
370 uint64_t counter = monitor->recursive_counter_;
371 // Wait should be called under the monitor. We checked it in the previous if.
372 // Thus, the operation with queues are thread-safe
373 monitor->waiters_.PushFront(*thread);
374 thread->SetWaitingMonitor(monitor);
375 thread->SetWaitingMonitorOldStatus(status);
376
377 monitor->recursive_counter_ = 1;
378 // Atomic with relaxed order reason: memory access in monitor
379 monitor->waiters_counter_.fetch_add(1, std::memory_order_relaxed);
380 monitor->Release(thread);
381
382 TraceMonitorLock(obj_handle.GetPtr(), true);
383 bool is_timeout = false;
384 if (timeout == 0 && nanos == 0) {
385 // Normal wait
386 thread->WaitWithLockHeld(status);
387 } else {
388 is_timeout = thread->TimedWaitWithLockHeld(status, timeout, nanos, false);
389 }
390 TraceMonitorUnLock(); // End Wait().
391
392 // Unlock before to avoid deadlock
393 // Nothing happen, if the thread is rescheduled between,
394 // As the monitor was already released for external users
395 thread->GetWaitingMutex()->Unlock();
396 [[maybe_unused]] bool ret = monitor->Acquire(thread, obj_handle, false);
397 ASSERT(ret);
398 // Atomic with relaxed order reason: memory access in monitor
399 monitor->waiters_counter_.fetch_sub(1, std::memory_order_relaxed);
400 monitor->recursive_counter_ = counter;
401
402 if (thread->IsInterrupted()) {
403 // TODO(dtrubenkov): call panda::ThrowException when it will be imlemented
404 result_state = State::INTERRUPTED;
405 }
406
407 // problems with equality of MTManagedThread's
408 bool found = monitor->waiters_.RemoveIf(
409 [thread](MTManagedThread &t) { return thread->GetInternalId() == t.GetInternalId(); });
410 // If no matching thread found in waiters_, it should have been moved to to_wakeup_
411 // but this thread timed out or got interrupted
412 if (!found) {
413 monitor->to_wakeup_.RemoveIf(
414 [thread](MTManagedThread &t) { return thread->GetInternalId() == t.GetInternalId(); });
415 }
416
417 thread->SetWaitingMonitor(nullptr);
418 thread->SetWaitingMonitorOldStatus(ThreadStatus::FINISHED);
419 Runtime::GetCurrent()->GetNotificationManager()->MonitorWaitedEvent(obj_handle.GetPtr(), is_timeout);
420
421 return result_state;
422 }
423 case MarkWord::STATE_LIGHT_LOCKED:
424 if (mark.GetThreadId() != thread->GetInternalId()) {
425 LOG(FATAL, RUNTIME) << "Illegal monitor state: try to wait with monitor acquired by other thread";
426 return result_state;
427 }
428 Inflate(obj_handle.GetPtr(), thread);
429 // Go to the next iteration.
430 continue;
431 case MarkWord::STATE_UNLOCKED:
432 case MarkWord::STATE_HASHED:
433 case MarkWord::STATE_GC:
434 LOG(ERROR, RUNTIME) << "Try to perform Wait from unsupported state";
435 return State::ILLEGAL;
436 default:
437 LOG(FATAL, RUNTIME) << "Undefined object state";
438 UNREACHABLE();
439 }
440 }
441 }
442
Notify(ObjectHeader * obj)443 Monitor::State Monitor::Notify(ObjectHeader *obj)
444 {
445 ASSERT(obj != nullptr);
446 MarkWord mark = obj->AtomicGetMark();
447 MarkWord::ObjectState state = mark.GetState();
448 auto thread = MTManagedThread::GetCurrent();
449 LOG(DEBUG, RUNTIME) << "Try to notify with state " << state;
450
451 switch (state) {
452 case MarkWord::STATE_HEAVY_LOCKED: {
453 auto monitor = thread->GetMonitorPool()->LookupMonitor(mark.GetMonitorId());
454
455 if (monitor->GetOwner() != thread) {
456 // The monitor is acquired by other thread
457 // throw an internal exception?
458 LOG(ERROR, RUNTIME) << "Illegal monitor state: try to notify with monitor acquired by other thread";
459 return State::ILLEGAL;
460 }
461
462 // Notify should be called under the monitor. We checked it in the previous if.
463 // Thus, the operation with queues are thread-safe
464
465 // Move one thread from waiters to wake_up
466 if (!monitor->waiters_.Empty()) {
467 // With current panda::List implementation this reference is valid.
468 // This can be broken with future changes.
469 auto &waiter = monitor->waiters_.Front();
470 monitor->waiters_.PopFront();
471 monitor->to_wakeup_.PushFront(waiter);
472 }
473 return State::OK; // Success
474 }
475 case MarkWord::STATE_LIGHT_LOCKED:
476 if (mark.GetThreadId() != thread->GetInternalId()) {
477 LOG(ERROR, RUNTIME) << "Illegal monitor state: try to notify with monitor acquired by other thread";
478 return State::ILLEGAL;
479 }
480 return State::OK; // Success
481 case MarkWord::STATE_UNLOCKED:
482 case MarkWord::STATE_HASHED:
483 case MarkWord::STATE_GC:
484 LOG(ERROR, RUNTIME) << "Try to perform Notify from unsupported state";
485 return State::ILLEGAL;
486 default:
487 LOG(FATAL, RUNTIME) << "Undefined object state";
488 UNREACHABLE();
489 }
490 }
491
NotifyAll(ObjectHeader * obj)492 Monitor::State Monitor::NotifyAll(ObjectHeader *obj)
493 {
494 ASSERT(obj != nullptr);
495 MarkWord mark = obj->AtomicGetMark();
496 MarkWord::ObjectState state = mark.GetState();
497 auto thread = MTManagedThread::GetCurrent();
498 LOG(DEBUG, RUNTIME) << "Try to notify all with state " << state;
499
500 switch (state) {
501 case MarkWord::STATE_HEAVY_LOCKED: {
502 auto monitor = thread->GetMonitorPool()->LookupMonitor(mark.GetMonitorId());
503
504 if (monitor->GetOwner() != thread) {
505 // The monitor is acquired by other thread
506 // throw an internal exception?
507 LOG(ERROR, RUNTIME) << "Illegal monitor state: try to notify with monitor acquired by other thread";
508 return State::ILLEGAL;
509 }
510
511 // NotifyAll should be called under the monitor. We checked it in the previous if.
512 // Thus, the operation with queues are thread-safe
513 if (monitor->to_wakeup_.Empty()) {
514 monitor->to_wakeup_.Swap(monitor->waiters_);
515 return State::OK;
516 }
517
518 // Concatenate two queues
519 if (!monitor->waiters_.Empty()) {
520 monitor->to_wakeup_.Splice(monitor->waiters_);
521 monitor->waiters_.Clear();
522 }
523 return State::OK; // Success
524 }
525 case MarkWord::STATE_LIGHT_LOCKED:
526 if (mark.GetThreadId() != thread->GetInternalId()) {
527 LOG(ERROR, RUNTIME) << "Illegal monitor state: try to notify with monitor acquired by other thread";
528 return State::ILLEGAL;
529 }
530 return State::OK; // Success
531 case MarkWord::STATE_UNLOCKED:
532 case MarkWord::STATE_HASHED:
533 case MarkWord::STATE_GC:
534 LOG(ERROR, RUNTIME) << "Try to perform NotifyAll from unsupported state";
535 return State::ILLEGAL;
536 default:
537 LOG(FATAL, RUNTIME) << "Undefined object state";
538 UNREACHABLE();
539 }
540 }
541
Acquire(MTManagedThread * thread,const VMHandle<ObjectHeader> & obj_handle,bool trylock)542 bool Monitor::Acquire(MTManagedThread *thread, const VMHandle<ObjectHeader> &obj_handle, bool trylock)
543 {
544 ASSERT_MANAGED_CODE();
545
546 MTManagedThread *owner = this->GetOwner();
547 if (owner == thread) {
548 // Do we need to hold a lock here?
549 this->recursive_counter_++;
550 LOG(DEBUG, RUNTIME) << "The fat monitor was successfully recursively acquired";
551 TraceMonitorLock(obj_handle.GetPtr(), false);
552 return true;
553 }
554
555 // Use trylock first
556 if (trylock) {
557 if (!lock_.TryLock()) {
558 return false;
559 }
560 } else {
561 #ifdef PANDA_USE_FUTEX
562 if (!lock_.TryLockWithSpinning()) {
563 #else
564 if (!lock_.TryLock()) {
565 #endif // PANDA_USE_FUTEX
566 Runtime::GetCurrent()->GetNotificationManager()->MonitorContendedEnterEvent(obj_handle.GetPtr());
567 // If not trylock...
568 // Do atomic add out of scope to prevent GC getting old waiters_counter_
569 // Atomic with relaxed order reason: memory access in monitor
570 waiters_counter_.fetch_add(1, std::memory_order_relaxed);
571 thread->SetEnterMonitorObject(obj_handle.GetPtr());
572 thread->SetWaitingMonitorOldStatus(ThreadStatus::IS_BLOCKED);
573 {
574 ScopedChangeThreadStatus sts(thread, ThreadStatus::IS_BLOCKED);
575 // Save current monitor, on which the given thread is blocked.
576 // It can be used to detect potential deadlock with daemon threds.
577 thread->SetEnteringMonitor(this);
578 lock_.Lock();
579 // Deadlock is no longer possible with the given thread.
580 thread->SetEnteringMonitor(nullptr);
581 // Do this inside scope for thread to release this monitor during runtime destroy
582 if (!this->SetOwner(nullptr, thread)) {
583 LOG(FATAL, RUNTIME) << "Set monitor owner failed in Acquire";
584 }
585 thread->AddMonitor(this);
586 this->recursive_counter_++;
587 }
588 thread->SetEnterMonitorObject(nullptr);
589 thread->SetWaitingMonitorOldStatus(ThreadStatus::FINISHED);
590 // Atomic with relaxed order reason: memory access in monitor
591 waiters_counter_.fetch_sub(1, std::memory_order_relaxed);
592 // Even thout these 2 warnings are valid, We suppress them. Reason is to have consistent logging
593 // Otherwise we would see that lock was done on one monitor address,
594 // and unlock (after GC) - ona different one
595 // SUPPRESS_CSA_NEXTLINE(alpha.core.WasteObjHeader)
596 Runtime::GetCurrent()->GetNotificationManager()->MonitorContendedEnteredEvent(obj_handle.GetPtr());
597 LOG(DEBUG, RUNTIME) << "The fat monitor was successfully acquired for the first time";
598 // SUPPRESS_CSA_NEXTLINE(alpha.core.WasteObjHeader)
599 TraceMonitorLock(obj_handle.GetPtr(), false);
600 return true;
601 }
602 }
603
604 if (!this->SetOwner(nullptr, thread)) {
605 LOG(FATAL, RUNTIME) << "Set monitor owner failed in Acquire";
606 }
607 thread->AddMonitor(this);
608 this->recursive_counter_++;
609 LOG(DEBUG, RUNTIME) << "The fat monitor was successfully acquired for the first time";
610 TraceMonitorLock(obj_handle.GetPtr(), false);
611 return true;
612 }
613
614 void Monitor::InitWithOwner(MTManagedThread *thread, ObjectHeader *obj)
615 {
616 ASSERT(this->GetOwner() == nullptr);
617
618 #ifdef PANDA_USE_FUTEX
619 ASSERT(thread == MTManagedThread::GetCurrent() || thread->GetStatus() != ThreadStatus::RUNNING);
620 lock_.LockForOther(thread->GetId());
621 #else
622 ASSERT(thread == MTManagedThread::GetCurrent());
623 [[maybe_unused]] bool res = lock_.TryLock();
624 ASSERT(res);
625 #endif // PANDA_USE_FUTEX
626
627 if (!this->SetOwner(nullptr, thread)) {
628 LOG(FATAL, RUNTIME) << "Set monitor owner failed in InitWithOwner";
629 }
630 this->recursive_counter_++;
631 LOG(DEBUG, RUNTIME) << "The fat monitor was successfully initialized for the first time";
632 TraceMonitorLock(obj, false);
633 }
634
635 void Monitor::ReleaseOnFailedInflate(MTManagedThread *thread)
636 {
637 if (thread != this->GetOwner()) {
638 LOG(FATAL, RUNTIME) << "Releasing lock which isn't owned by this thread";
639 }
640 TraceMonitorUnLock();
641 this->recursive_counter_--;
642 ASSERT(this->recursive_counter_ == 0);
643 // This should never fail
644 [[maybe_unused]] bool success = this->SetOwner(thread, nullptr);
645 ASSERT(success);
646 #ifdef PANDA_USE_FUTEX
647 ASSERT(thread == MTManagedThread::GetCurrent() || thread->GetStatus() != ThreadStatus::RUNNING);
648 lock_.UnlockForOther(thread->GetId());
649 #else
650 ASSERT(thread == MTManagedThread::GetCurrent());
651 lock_.Unlock();
652 #endif // PANDA_USE_FUTEX
653 LOG(DEBUG, RUNTIME) << "The fat monitor was successfully released after failed inflation";
654 }
655
656 bool Monitor::Release(MTManagedThread *thread)
657 {
658 if (thread != this->GetOwner()) {
659 LOG(FATAL, RUNTIME) << "Releasing lock which isn't owned by this thread";
660 return false;
661 }
662 TraceMonitorUnLock();
663 this->recursive_counter_--;
664 if (this->recursive_counter_ == 0) {
665 if (!this->SetOwner(thread, nullptr)) {
666 LOG(FATAL, RUNTIME) << "Set monitor owner failed in Release";
667 }
668 // Signal the only waiter (the other one will be signaled after the next release)
669 MTManagedThread *waiter = nullptr;
670 Monitor *waiting_mon = nullptr;
671 if (!this->to_wakeup_.Empty()) {
672 // NB! Current list implementation leaves this pointer valid after PopFront, change this
673 // if List implementation is changed.
674 waiter = &(this->to_wakeup_.Front());
675 waiting_mon = waiter->GetWaitingMonitor();
676 this->to_wakeup_.PopFront();
677 }
678 thread->RemoveMonitor(this);
679 // Signal waiter after mutex unlock so that signalled thread doesn't get stuck on lock_
680 if (waiter != nullptr && waiting_mon == this) {
681 waiter->Signal();
682 LOG(DEBUG, RUNTIME) << "Send the notifing signal to " << waiter->GetId();
683 }
684 lock_.Unlock();
685 }
686 LOG(DEBUG, RUNTIME) << "The fat monitor was successfully released";
687 return true;
688 }
689
690 template <bool for_other_thread>
691 bool Monitor::Inflate(ObjectHeader *obj, MTManagedThread *thread)
692 {
693 ASSERT(obj != nullptr);
694 Monitor *monitor = nullptr;
695 MarkWord old_mark = obj->AtomicGetMark();
696 MarkWord new_mark = old_mark;
697 MarkWord::ObjectState state = old_mark.GetState();
698 bool ret = false;
699
700 // Dont inflate if someone already inflated the lock.
701 if (state == MarkWord::STATE_HEAVY_LOCKED) {
702 return false;
703 }
704 // NOLINTNEXTLINE(readability-braces-around-statements, hicpp-braces-around-statements)
705 if constexpr (for_other_thread) { // NOLINT(bugprone-suspicious-semicolon)
706 // Dont inflate if monitor got unlocked or acquired by other thread.
707 if (state != MarkWord::STATE_LIGHT_LOCKED || old_mark.GetThreadId() != thread->GetInternalId()) {
708 return false;
709 }
710 }
711
712 auto *monitor_pool = thread->GetMonitorPool();
713 monitor = monitor_pool->CreateMonitor(obj);
714 if (monitor == nullptr) {
715 LOG(FATAL, RUNTIME) << "Couldn't create new monitor. Out of memory?";
716 return false;
717 }
718 monitor->InitWithOwner(thread, obj);
719
720 switch (state) {
721 case MarkWord::STATE_LIGHT_LOCKED:
722 if (old_mark.GetThreadId() != thread->GetInternalId()) {
723 monitor->ReleaseOnFailedInflate(thread);
724 monitor_pool->FreeMonitor(monitor->GetId());
725 return false;
726 }
727 monitor->recursive_counter_ = old_mark.GetLockCount();
728 break;
729 case MarkWord::STATE_HASHED:
730 monitor->SetHashCode(old_mark.GetHash());
731 /* fallthrough */
732 [[fallthrough]];
733 case MarkWord::STATE_UNLOCKED:
734 // NOLINTNEXTLINE(readability-braces-around-statements, hicpp-braces-around-statements)
735 if constexpr (for_other_thread) { // NOLINT(bugprone-suspicious-semicolon)
736 // We did check above, has to be unreachable
737 UNREACHABLE();
738 } else { // NOLINT(readability-misleading-indentation)
739 break;
740 }
741 case MarkWord::STATE_HEAVY_LOCKED:
742 // Has to be unreachable
743 UNREACHABLE();
744 case MarkWord::STATE_GC:
745 LOG(FATAL, RUNTIME) << "Trying to inflate object in GC state";
746 return false;
747 default:
748 LOG(FATAL, RUNTIME) << "Undefined object state";
749 return false;
750 }
751 new_mark = old_mark.DecodeFromMonitor(monitor->GetId());
752 ret = obj->AtomicSetMark(old_mark, new_mark);
753 if (!ret) {
754 // Means, someone changed the mark
755 monitor->recursive_counter_ = 1;
756 monitor->ReleaseOnFailedInflate(thread);
757 monitor_pool->FreeMonitor(monitor->GetId());
758 } else {
759 // Unlike normal Acquire, AddMonitor should be done not in InitWithOwner but after successful inflation to avoid
760 // data race
761 thread->AddMonitor(monitor);
762 }
763 return ret;
764 }
765
766 bool Monitor::Deflate(ObjectHeader *obj)
767 {
768 Monitor *monitor = nullptr;
769 MarkWord old_mark = obj->AtomicGetMark();
770 MarkWord::ObjectState state = old_mark.GetState();
771 bool ret = false;
772
773 if (state != MarkWord::STATE_HEAVY_LOCKED) {
774 LOG(DEBUG, RUNTIME) << "Trying to deflate non-heavy locked object";
775 return false;
776 }
777
778 auto *monitor_pool = MTManagedThread::GetCurrent()->GetMonitorPool();
779 monitor = monitor_pool->LookupMonitor(old_mark.GetMonitorId());
780 if (monitor == nullptr) {
781 LOG(DEBUG, RUNTIME) << "Monitor was already destroyed by someone else.";
782 return false;
783 }
784
785 ret = monitor->DeflateInternal();
786 if (ret) {
787 monitor_pool->FreeMonitor(monitor->GetId());
788 }
789 return ret;
790 }
791
792 bool Monitor::DeflateInternal()
793 {
794 if (GetOwner() != nullptr) {
795 LOG(DEBUG, RUNTIME) << "Trying to deflate monitor which already has owner";
796 return false;
797 }
798 // Atomic with relaxed order reason: memory access in monitor
799 if (waiters_counter_.load(std::memory_order_relaxed) > 0) {
800 LOG(DEBUG, RUNTIME) << "Trying to deflate monitor which is trying to be acquired by other threads";
801 return false;
802 }
803 if (!lock_.TryLock()) {
804 LOG(DEBUG, RUNTIME) << "Couldn't TryLock monitor for deflation";
805 return false;
806 }
807 ASSERT(obj_ != nullptr);
808 ASSERT(recursive_counter_ == 0);
809 ASSERT(waiters_.Empty());
810 ASSERT(to_wakeup_.Empty());
811 ASSERT(GetOwner() == static_cast<MTManagedThread *>(nullptr));
812 MarkWord old_mark = obj_->AtomicGetMark();
813 MarkWord new_mark = old_mark;
814 if (HasHashCode()) {
815 new_mark = old_mark.DecodeFromHash(GetHashCode());
816 LOG(DEBUG, RUNTIME) << "Deflating monitor to hash";
817 } else {
818 new_mark = old_mark.DecodeFromUnlocked();
819 LOG(DEBUG, RUNTIME) << "Deflating monitor to unlocked";
820 }
821
822 // Warning: AtomicSetMark is weak, retry
823 while (!obj_->AtomicSetMark<false>(old_mark, new_mark)) {
824 MarkWord cur_mark = obj_->AtomicGetMark();
825 if (old_mark.GetValue() != cur_mark.GetValue()) {
826 old_mark = cur_mark;
827 new_mark = HasHashCode() ? old_mark.DecodeFromHash(GetHashCode()) : old_mark.DecodeFromUnlocked();
828 }
829 }
830 lock_.Unlock();
831 return true;
832 }
833
834 uint8_t Monitor::HoldsLock(ObjectHeader *obj)
835 {
836 MarkWord mark = obj->AtomicGetMark();
837 MarkWord::ObjectState state = mark.GetState();
838 MTManagedThread *thread = MTManagedThread::GetCurrent();
839
840 switch (state) {
841 case MarkWord::STATE_HEAVY_LOCKED: {
842 Monitor *monitor = thread->GetMonitorPool()->LookupMonitor(mark.GetMonitorId());
843 // asm has no boolean type
844 return (monitor->GetOwner() == thread) ? 1 : 0;
845 }
846 case MarkWord::STATE_LIGHT_LOCKED:
847 return (mark.GetThreadId() == thread->GetInternalId()) ? 1 : 0;
848 case MarkWord::STATE_UNLOCKED:
849 case MarkWord::STATE_HASHED:
850 case MarkWord::STATE_GC:
851 return 0;
852 default:
853 LOG(FATAL, RUNTIME) << "Undefined object state";
854 return 0;
855 }
856 }
857
858 uint32_t Monitor::GetLockOwnerOsThreadID(ObjectHeader *obj)
859 {
860 if (obj == nullptr) {
861 return MTManagedThread::NON_INITIALIZED_THREAD_ID;
862 }
863 MarkWord mark = obj->AtomicGetMark();
864 MarkWord::ObjectState state = mark.GetState();
865
866 switch (state) {
867 case MarkWord::STATE_HEAVY_LOCKED: {
868 Monitor *monitor = MTManagedThread::GetCurrent()->GetMonitorPool()->LookupMonitor(mark.GetMonitorId());
869 MTManagedThread *owner = monitor->GetOwner();
870 if (owner == nullptr) {
871 return MTManagedThread::NON_INITIALIZED_THREAD_ID;
872 }
873 return owner->GetId();
874 }
875 case MarkWord::STATE_LIGHT_LOCKED: {
876 return mark.GetThreadId();
877 }
878 case MarkWord::STATE_UNLOCKED:
879 case MarkWord::STATE_HASHED:
880 case MarkWord::STATE_GC:
881 return 0;
882 default:
883 LOG(FATAL, RUNTIME) << "Undefined object state";
884 return 0;
885 }
886 }
887
888 Monitor *Monitor::GetMonitorFromObject(ObjectHeader *obj)
889 {
890 if (obj != nullptr) {
891 MarkWord mark = obj->AtomicGetMark();
892 MarkWord::ObjectState state = mark.GetState();
893 switch (state) {
894 case MarkWord::STATE_HEAVY_LOCKED:
895 return MTManagedThread::GetCurrent()->GetMonitorPool()->LookupMonitor(mark.GetMonitorId());
896 case MarkWord::STATE_LIGHT_LOCKED:
897 return nullptr;
898 default:
899 // Shouldn't happen, return nullptr
900 LOG(WARNING, RUNTIME) << "obj:" << obj << " not locked by heavy or light locked";
901 }
902 }
903 return nullptr;
904 }
905
906 inline void Monitor::TraceMonitorLock(ObjectHeader *obj, bool is_wait)
907 {
908 if (UNLIKELY(panda::trace::IsEnabled())) {
909 // Use stack memory to avoid "Too many allocations" error.
910 constexpr int BUF_SIZE = 32;
911 std::array<char, BUF_SIZE> buf = {};
912 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg)
913 int ret = snprintf_s(buf.data(), BUF_SIZE, BUF_SIZE - 1,
914 (is_wait ? "Waiting on 0x%" PRIxPTR : "Locking 0x%" PRIxPTR), ToUintPtr(obj));
915 if (ret < 0) {
916 UNREACHABLE();
917 }
918 trace::BeginTracePoint(buf.data());
919 }
920 }
921
922 inline void Monitor::TraceMonitorUnLock()
923 {
924 if (UNLIKELY(panda::trace::IsEnabled())) {
925 trace::EndTracePoint();
926 }
927 }
928
929 uint32_t Monitor::GetHashCode()
930 {
931 while (!HasHashCode()) {
932 uint32_t expected = 0;
933 uint32_t newHash = ObjectHeader::GenerateHashCode();
934 if (hash_code_.compare_exchange_weak(expected, newHash)) {
935 return newHash;
936 }
937 }
938 ASSERT(HasHashCode());
939 // Atomic with relaxed order reason: memory access in monitor
940 return hash_code_.load(std::memory_order_relaxed);
941 }
942
943 bool Monitor::HasHashCode() const
944 {
945 // Atomic with relaxed order reason: memory access in monitor
946 return hash_code_.load(std::memory_order_relaxed) != 0;
947 }
948
949 void Monitor::SetHashCode(uint32_t hash)
950 {
951 ASSERT(GetOwner() == MTManagedThread::GetCurrent());
952 if (!HasHashCode()) {
953 // Atomic with relaxed order reason: memory access in monitor
954 hash_code_.store(hash, std::memory_order_relaxed);
955 } else {
956 LOG(FATAL, RUNTIME) << "Attempt to rewrite hash in monitor";
957 }
958 }
959
960 } // namespace panda
961