1 /*
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "libpandabase/utils/logger.h"
17
18 #include "verification/jobs/thread_pool.h"
19
20 namespace panda::verifier {
Init()21 bool Processor::Init()
22 {
23 PandaString thread_name {"verifier#"};
24 thread_name += NumToStr(types_.GetThreadNum());
25 panda::os::thread::SetThreadName(panda::os::thread::GetNativeHandle(), thread_name.c_str());
26 LOG(DEBUG, VERIFIER) << "Thread ID " << panda::os::thread::GetCurrentThreadId() << " is named " << thread_name;
27 return true;
28 }
29
Process(Task task)30 bool Processor::Process(Task task)
31 {
32 if (task.IsEmpty()) {
33 // The return value of this method is currently ignored in panda::ThreadPool::WorkerEntry.
34 // If this changes, all return statements should be reviewed.
35 return true;
36 }
37
38 auto &method = task.GetMethod();
39 if (method.IsVerified()) {
40 LOG(DEBUG, VERIFIER) << "Method '" << method.GetFullName(true) << "' is already verified";
41 return true;
42 }
43
44 auto method_name = method.GetFullName();
45
46 auto opt_cache = ThreadPool::GetCache();
47 if (!opt_cache.HasRef()) {
48 LOG(INFO, VERIFIER) << "Attempt to verify " << method.GetFullName(true)
49 << "after the thread pool started shutdown, ignoring";
50 return true;
51 }
52 auto &cache = opt_cache.Get();
53 auto opt_cached_method = cache.FastAPI().GetMethod(method.GetClass()->GetSourceLang(), method.GetUniqId(), true);
54 if (!opt_cached_method.HasRef()) {
55 LOG(ERROR, VERIFIER) << "Method '" << method.GetFullName(true)
56 << "' not found in verifier cache, cannot create a job.";
57 method.SetVerified(false);
58 return true;
59 }
60 const auto &method_options = Runtime::GetCurrent()->GetVerificationOptions().Debug.GetMethodOptions();
61 const auto &verif_method_options = method_options[method_name];
62 LOG(DEBUG, VERIFIER) << "Verification config for '" << method_name << "': " << verif_method_options.GetName();
63 LOG(INFO, VERIFIER) << "Verifier thread #" << types_.GetThreadNum() << " started verification of method '"
64 << method.GetFullName(true) << "'";
65
66 Job job {method, opt_cached_method.Get(), verif_method_options};
67 bool result = job.DoChecks(cache, types_);
68 method.SetVerified(result);
69 LOG(INFO, VERIFIER) << "Verification result for method " << method.GetFullName(true)
70 << (result ? ": OK" : ": FAIL");
71
72 return true;
73 }
74
Destroy()75 bool Processor::Destroy()
76 {
77 bool show_subtyping =
78 Runtime::GetCurrentSync([](auto &instance) { return instance.GetVerificationOptions().Debug.Show.TypeSystem; });
79 if (show_subtyping) {
80 LOG(DEBUG, VERIFIER) << "Typesystem of verifier thread #" << types_.GetThreadNum();
81 types_.DisplayTypeSystem([](const auto &str) { LOG(DEBUG, VERIFIER) << str; });
82 }
83 return true;
84 }
85
Enqueue(Method * method)86 bool ThreadPool::Enqueue(Method *method)
87 {
88 Data *data = GetData();
89 if (data == nullptr) {
90 LOG(DEBUG, VERIFIER) << "Attempted to enqueue a method after shutdown";
91 return false;
92 }
93
94 bool enqueued = data->thread_pool.PutTask(Task {*method});
95 return enqueued;
96 }
97
Initialize(mem::InternalAllocatorPtr allocator,size_t num_threads)98 void ThreadPool::Initialize(mem::InternalAllocatorPtr allocator, size_t num_threads)
99 {
100 ASSERT(allocator != nullptr);
101
102 // Atomic with release order reason: data race with shutdown_ with dependecies on writes before the store which
103 // should become visible acquire
104 shutdown_.store(false, std::memory_order_release);
105 if (GetData() != nullptr) {
106 return;
107 }
108
109 // Atomic with release order reason: data race with next_thread_num_ with dependecies on writes before the store
110 // which should become visible acquire
111 Processor::next_thread_num_.store(0, std::memory_order_release);
112 allocator_ = allocator;
113 Data *data = allocator->New<Data>(allocator, num_threads);
114 if (data == nullptr) {
115 LOG(ERROR, VERIFIER) << "Insufficient memory to initialize verifier thread pool";
116 return;
117 }
118 Data *expected = nullptr;
119 if (data_.compare_exchange_strong(expected, data, std::memory_order_seq_cst)) {
120 LOG(INFO, VERIFIER) << "Initialized verifier thread pool";
121 } else {
122 // already initialized by another thread
123 allocator->Delete(data);
124 }
125 }
126
Destroy()127 void ThreadPool::Destroy()
128 {
129 // Atomic with seq_cst order reason: initial adding annotations for memory order
130 if (shutdown_.exchange(true, std::memory_order_seq_cst)) {
131 // the value was true already and shutdown has started
132 return;
133 }
134
135 Data *data = GetData(true);
136 if (data == nullptr) {
137 return;
138 }
139
140 data->thread_pool.Shutdown(true);
141
142 // Atomic with seq_cst order reason: initial adding annotations for memory order
143 Data *data1 = data_.exchange(nullptr, std::memory_order_seq_cst);
144 if (data1 != nullptr) {
145 // this should happen in at most one thread even if Destroy is called in many
146 allocator_->Delete(data1);
147 LOG(INFO, VERIFIER) << "Destroyed verifier thread pool";
148 }
149 }
150
SignalMethodVerified()151 void ThreadPool::SignalMethodVerified()
152 {
153 Data *data = GetData();
154 if (data != nullptr) {
155 panda::os::memory::LockHolder lck {data->lock};
156 data->cond_var.SignalAll();
157 }
158 }
159
160 } // namespace panda::verifier
161