• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef PANDA_RUNTIME_THREAD_INL_H_
17 #define PANDA_RUNTIME_THREAD_INL_H_
18 
19 #include "runtime/handle_base.h"
20 #include "runtime/global_handle_storage-inl.h"
21 #include "runtime/handle_storage-inl.h"
22 #include "runtime/include/thread.h"
23 #include "runtime/include/panda_vm.h"
24 
25 namespace ark {
26 
27 template <>
28 inline void ManagedThread::PushHandleScope<coretypes::TaggedType>(HandleScope<coretypes::TaggedType> *handleScope)
29 {
30     taggedHandleScopes_.push_back(handleScope);
31 }
32 
33 template <>
34 inline HandleScope<coretypes::TaggedType> *ManagedThread::PopHandleScope<coretypes::TaggedType>()
35 {
36     HandleScope<coretypes::TaggedType> *scope = taggedHandleScopes_.back();
37     taggedHandleScopes_.pop_back();
38     return scope;
39 }
40 
41 template <>
42 inline HandleScope<coretypes::TaggedType> *ManagedThread::GetTopScope<coretypes::TaggedType>() const
43 {
44     if (taggedHandleScopes_.empty()) {
45         return nullptr;
46     }
47     return taggedHandleScopes_.back();
48 }
49 
50 template <>
51 inline HandleStorage<coretypes::TaggedType> *ManagedThread::GetHandleStorage<coretypes::TaggedType>() const
52 {
53     return taggedHandleStorage_;
54 }
55 
56 template <>
57 inline GlobalHandleStorage<coretypes::TaggedType> *ManagedThread::GetGlobalHandleStorage<coretypes::TaggedType>() const
58 {
59     return taggedGlobalHandleStorage_;
60 }
61 
62 template <>
63 inline void ManagedThread::PushHandleScope<ObjectHeader *>(HandleScope<ObjectHeader *> *handleScope)
64 {
65     objectHeaderHandleScopes_.push_back(handleScope);
66 }
67 
68 template <>
69 inline HandleScope<ObjectHeader *> *ManagedThread::PopHandleScope<ObjectHeader *>()
70 {
71     HandleScope<ObjectHeader *> *scope = objectHeaderHandleScopes_.back();
72     objectHeaderHandleScopes_.pop_back();
73     return scope;
74 }
75 
76 template <>
77 inline HandleScope<ObjectHeader *> *ManagedThread::GetTopScope<ObjectHeader *>() const
78 {
79     if (objectHeaderHandleScopes_.empty()) {
80         return nullptr;
81     }
82     return objectHeaderHandleScopes_.back();
83 }
84 
85 template <>
86 inline HandleStorage<ObjectHeader *> *ManagedThread::GetHandleStorage<ObjectHeader *>() const
87 {
88     return objectHeaderHandleStorage_;
89 }
90 
91 template <bool CHECK_NATIVE_STACK, bool CHECK_IFRAME_STACK>
StackOverflowCheck()92 ALWAYS_INLINE inline bool ManagedThread::StackOverflowCheck()
93 {
94     if (!StackOverflowCheckResult<CHECK_NATIVE_STACK, CHECK_IFRAME_STACK>()) {
95         // we're going to throw exception that will use the reserved stack space, so disable check
96         DisableStackOverflowCheck();
97         ark::ThrowStackOverflowException(this);
98         // after finish throw exception, restore overflow check
99         EnableStackOverflowCheck();
100         return false;
101     }
102     return true;
103 }
104 
GetMonitorPool()105 ALWAYS_INLINE inline MonitorPool *MTManagedThread::GetMonitorPool()
106 {
107     return GetVM()->GetMonitorPool();
108 }
109 
GetMonitorCount()110 ALWAYS_INLINE inline int32_t MTManagedThread::GetMonitorCount()
111 {
112     // Atomic with relaxed order reason: data race with monitor_count_ with no synchronization or ordering constraints
113     // imposed on other reads or writes
114     return monitorCount_.load(std::memory_order_relaxed);
115 }
116 
AddMonitor(Monitor * monitor)117 ALWAYS_INLINE inline void MTManagedThread::AddMonitor(Monitor *monitor)
118 {
119     // Atomic with relaxed order reason: data race with monitor_count_ with no synchronization or ordering constraints
120     // imposed on other reads or writes
121     monitorCount_.fetch_add(1, std::memory_order_relaxed);
122     LOG(DEBUG, RUNTIME) << "Adding monitor " << monitor->GetId();
123 }
124 
RemoveMonitor(Monitor * monitor)125 ALWAYS_INLINE inline void MTManagedThread::RemoveMonitor(Monitor *monitor)
126 {
127     // Atomic with relaxed order reason: data race with monitor_count_ with no synchronization or ordering constraints
128     // imposed on other reads or writes
129     monitorCount_.fetch_sub(1, std::memory_order_relaxed);
130     LOG(DEBUG, RUNTIME) << "Removing monitor " << monitor->GetId();
131 }
132 
ReleaseMonitors()133 ALWAYS_INLINE inline void MTManagedThread::ReleaseMonitors()
134 {
135     if (GetMonitorCount() > 0) {
136         GetMonitorPool()->ReleaseMonitors(this);
137     }
138     ASSERT(GetMonitorCount() == 0);
139 }
140 
141 }  // namespace ark
142 
143 #endif  // PANDA_RUNTIME_THREAD_INL_H_
144