1 /**
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #ifndef PANDA_RUNTIME_THREAD_INL_H_
17 #define PANDA_RUNTIME_THREAD_INL_H_
18
19 #include "runtime/handle_base.h"
20 #include "runtime/global_handle_storage-inl.h"
21 #include "runtime/handle_storage-inl.h"
22 #include "runtime/include/thread.h"
23 #include "runtime/include/panda_vm.h"
24
25 namespace panda {
26
27 template <>
28 inline void ManagedThread::PushHandleScope<coretypes::TaggedType>(HandleScope<coretypes::TaggedType> *handle_scope)
29 {
30 tagged_handle_scopes_.push_back(handle_scope);
31 }
32
33 template <>
34 inline HandleScope<coretypes::TaggedType> *ManagedThread::PopHandleScope<coretypes::TaggedType>()
35 {
36 HandleScope<coretypes::TaggedType> *scope = tagged_handle_scopes_.back();
37 tagged_handle_scopes_.pop_back();
38 return scope;
39 }
40
41 template <>
42 inline HandleScope<coretypes::TaggedType> *ManagedThread::GetTopScope<coretypes::TaggedType>() const
43 {
44 if (tagged_handle_scopes_.empty()) {
45 return nullptr;
46 }
47 return tagged_handle_scopes_.back();
48 }
49
50 template <>
51 inline HandleStorage<coretypes::TaggedType> *ManagedThread::GetHandleStorage<coretypes::TaggedType>() const
52 {
53 return tagged_handle_storage_;
54 }
55
56 template <>
57 inline GlobalHandleStorage<coretypes::TaggedType> *ManagedThread::GetGlobalHandleStorage<coretypes::TaggedType>() const
58 {
59 return tagged_global_handle_storage_;
60 }
61
62 template <>
63 inline void ManagedThread::PushHandleScope<ObjectHeader *>(HandleScope<ObjectHeader *> *handle_scope)
64 {
65 object_header_handle_scopes_.push_back(handle_scope);
66 }
67
68 template <>
69 inline HandleScope<ObjectHeader *> *ManagedThread::PopHandleScope<ObjectHeader *>()
70 {
71 HandleScope<ObjectHeader *> *scope = object_header_handle_scopes_.back();
72 object_header_handle_scopes_.pop_back();
73 return scope;
74 }
75
76 template <>
77 inline HandleScope<ObjectHeader *> *ManagedThread::GetTopScope<ObjectHeader *>() const
78 {
79 if (object_header_handle_scopes_.empty()) {
80 return nullptr;
81 }
82 return object_header_handle_scopes_.back();
83 }
84
85 template <>
86 inline HandleStorage<ObjectHeader *> *ManagedThread::GetHandleStorage<ObjectHeader *>() const
87 {
88 return object_header_handle_storage_;
89 }
90
91 template <bool check_native_stack, bool check_iframe_stack>
StackOverflowCheck()92 ALWAYS_INLINE inline bool ManagedThread::StackOverflowCheck()
93 {
94 if (!StackOverflowCheckResult<check_native_stack, check_iframe_stack>()) {
95 // we're going to throw exception that will use the reserved stack space, so disable check
96 DisableStackOverflowCheck();
97 panda::ThrowStackOverflowException(this);
98 // after finish throw exception, restore overflow check
99 EnableStackOverflowCheck();
100 return false;
101 }
102 return true;
103 }
104
GetMonitorPool()105 ALWAYS_INLINE inline MonitorPool *MTManagedThread::GetMonitorPool()
106 {
107 return GetVM()->GetMonitorPool();
108 }
109
GetMonitorCount()110 ALWAYS_INLINE inline int32_t MTManagedThread::GetMonitorCount()
111 {
112 // Atomic with relaxed order reason: data race with monitor_count_ with no synchronization or ordering constraints
113 // imposed on other reads or writes
114 return monitor_count_.load(std::memory_order_relaxed);
115 }
116
AddMonitor(Monitor * monitor)117 ALWAYS_INLINE inline void MTManagedThread::AddMonitor(Monitor *monitor)
118 {
119 // Atomic with relaxed order reason: data race with monitor_count_ with no synchronization or ordering constraints
120 // imposed on other reads or writes
121 monitor_count_.fetch_add(1, std::memory_order_relaxed);
122 LOG(DEBUG, RUNTIME) << "Adding monitor " << monitor->GetId();
123 }
124
RemoveMonitor(Monitor * monitor)125 ALWAYS_INLINE inline void MTManagedThread::RemoveMonitor(Monitor *monitor)
126 {
127 // Atomic with relaxed order reason: data race with monitor_count_ with no synchronization or ordering constraints
128 // imposed on other reads or writes
129 monitor_count_.fetch_sub(1, std::memory_order_relaxed);
130 LOG(DEBUG, RUNTIME) << "Removing monitor " << monitor->GetId();
131 }
132
ReleaseMonitors()133 ALWAYS_INLINE inline void MTManagedThread::ReleaseMonitors()
134 {
135 if (GetMonitorCount() > 0) {
136 GetMonitorPool()->ReleaseMonitors(this);
137 }
138 ASSERT(GetMonitorCount() == 0);
139 }
140
141 } // namespace panda
142
143 #endif // PANDA_RUNTIME_THREAD_INL_H_
144