1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
6
7 #include "src/base/atomicops.h"
8 #include "src/codegen/compiler.h"
9 #include "src/codegen/optimized-compilation-info.h"
10 #include "src/execution/isolate.h"
11 #include "src/execution/local-isolate.h"
12 #include "src/handles/handles-inl.h"
13 #include "src/heap/local-heap.h"
14 #include "src/heap/parked-scope.h"
15 #include "src/init/v8.h"
16 #include "src/logging/counters.h"
17 #include "src/logging/log.h"
18 #include "src/logging/runtime-call-stats-scope.h"
19 #include "src/objects/js-function.h"
20 #include "src/tasks/cancelable-task.h"
21 #include "src/tracing/trace-event.h"
22
23 namespace v8 {
24 namespace internal {
25
26 class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
27 public:
CompileTask(Isolate * isolate,OptimizingCompileDispatcher * dispatcher)28 explicit CompileTask(Isolate* isolate,
29 OptimizingCompileDispatcher* dispatcher)
30 : CancelableTask(isolate),
31 isolate_(isolate),
32 worker_thread_runtime_call_stats_(
33 isolate->counters()->worker_thread_runtime_call_stats()),
34 dispatcher_(dispatcher) {
35 ++dispatcher_->ref_count_;
36 }
37
38 CompileTask(const CompileTask&) = delete;
39 CompileTask& operator=(const CompileTask&) = delete;
40
41 ~CompileTask() override = default;
42
43 private:
44 // v8::Task overrides.
RunInternal()45 void RunInternal() override {
46 LocalIsolate local_isolate(isolate_, ThreadKind::kBackground);
47 DCHECK(local_isolate.heap()->IsParked());
48
49 {
50 RCS_SCOPE(&local_isolate,
51 RuntimeCallCounterId::kOptimizeBackgroundDispatcherJob);
52
53 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
54 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
55 "V8.OptimizeBackground");
56
57 if (dispatcher_->recompilation_delay_ != 0) {
58 base::OS::Sleep(base::TimeDelta::FromMilliseconds(
59 dispatcher_->recompilation_delay_));
60 }
61
62 dispatcher_->CompileNext(dispatcher_->NextInput(&local_isolate),
63 &local_isolate);
64 }
65 {
66 base::MutexGuard lock_guard(&dispatcher_->ref_count_mutex_);
67 if (--dispatcher_->ref_count_ == 0) {
68 dispatcher_->ref_count_zero_.NotifyOne();
69 }
70 }
71 }
72
73 Isolate* isolate_;
74 WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats_;
75 OptimizingCompileDispatcher* dispatcher_;
76 };
77
~OptimizingCompileDispatcher()78 OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
79 DCHECK_EQ(0, ref_count_);
80 DCHECK_EQ(0, input_queue_length_);
81 DeleteArray(input_queue_);
82 }
83
NextInput(LocalIsolate * local_isolate)84 TurbofanCompilationJob* OptimizingCompileDispatcher::NextInput(
85 LocalIsolate* local_isolate) {
86 base::MutexGuard access_input_queue_(&input_queue_mutex_);
87 if (input_queue_length_ == 0) return nullptr;
88 TurbofanCompilationJob* job = input_queue_[InputQueueIndex(0)];
89 DCHECK_NOT_NULL(job);
90 input_queue_shift_ = InputQueueIndex(1);
91 input_queue_length_--;
92 return job;
93 }
94
CompileNext(TurbofanCompilationJob * job,LocalIsolate * local_isolate)95 void OptimizingCompileDispatcher::CompileNext(TurbofanCompilationJob* job,
96 LocalIsolate* local_isolate) {
97 if (!job) return;
98
99 // The function may have already been optimized by OSR. Simply continue.
100 CompilationJob::Status status =
101 job->ExecuteJob(local_isolate->runtime_call_stats(), local_isolate);
102 USE(status); // Prevent an unused-variable error.
103
104 {
105 // The function may have already been optimized by OSR. Simply continue.
106 // Use a mutex to make sure that functions marked for install
107 // are always also queued.
108 base::MutexGuard access_output_queue_(&output_queue_mutex_);
109 output_queue_.push(job);
110 }
111
112 if (finalize()) isolate_->stack_guard()->RequestInstallCode();
113 }
114
FlushOutputQueue(bool restore_function_code)115 void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
116 for (;;) {
117 std::unique_ptr<TurbofanCompilationJob> job;
118 {
119 base::MutexGuard access_output_queue_(&output_queue_mutex_);
120 if (output_queue_.empty()) return;
121 job.reset(output_queue_.front());
122 output_queue_.pop();
123 }
124
125 Compiler::DisposeTurbofanCompilationJob(job.get(), restore_function_code);
126 }
127 }
128
FlushInputQueue()129 void OptimizingCompileDispatcher::FlushInputQueue() {
130 base::MutexGuard access_input_queue_(&input_queue_mutex_);
131 while (input_queue_length_ > 0) {
132 std::unique_ptr<TurbofanCompilationJob> job(
133 input_queue_[InputQueueIndex(0)]);
134 DCHECK_NOT_NULL(job);
135 input_queue_shift_ = InputQueueIndex(1);
136 input_queue_length_--;
137 Compiler::DisposeTurbofanCompilationJob(job.get(), true);
138 }
139 }
140
AwaitCompileTasks()141 void OptimizingCompileDispatcher::AwaitCompileTasks() {
142 {
143 base::MutexGuard lock_guard(&ref_count_mutex_);
144 while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
145 }
146
147 #ifdef DEBUG
148 base::MutexGuard access_input_queue(&input_queue_mutex_);
149 CHECK_EQ(input_queue_length_, 0);
150 #endif // DEBUG
151 }
152
FlushQueues(BlockingBehavior blocking_behavior,bool restore_function_code)153 void OptimizingCompileDispatcher::FlushQueues(
154 BlockingBehavior blocking_behavior, bool restore_function_code) {
155 FlushInputQueue();
156 if (blocking_behavior == BlockingBehavior::kBlock) {
157 base::MutexGuard lock_guard(&ref_count_mutex_);
158 while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
159 }
160 FlushOutputQueue(restore_function_code);
161 }
162
Flush(BlockingBehavior blocking_behavior)163 void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
164 HandleScope handle_scope(isolate_);
165 FlushQueues(blocking_behavior, true);
166 if (FLAG_trace_concurrent_recompilation) {
167 PrintF(" ** Flushed concurrent recompilation queues. (mode: %s)\n",
168 (blocking_behavior == BlockingBehavior::kBlock) ? "blocking"
169 : "non blocking");
170 }
171 }
172
Stop()173 void OptimizingCompileDispatcher::Stop() {
174 HandleScope handle_scope(isolate_);
175 FlushQueues(BlockingBehavior::kBlock, false);
176 // At this point the optimizing compiler thread's event loop has stopped.
177 // There is no need for a mutex when reading input_queue_length_.
178 DCHECK_EQ(input_queue_length_, 0);
179 }
180
InstallOptimizedFunctions()181 void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
182 HandleScope handle_scope(isolate_);
183
184 for (;;) {
185 std::unique_ptr<TurbofanCompilationJob> job;
186 {
187 base::MutexGuard access_output_queue_(&output_queue_mutex_);
188 if (output_queue_.empty()) return;
189 job.reset(output_queue_.front());
190 output_queue_.pop();
191 }
192 OptimizedCompilationInfo* info = job->compilation_info();
193 Handle<JSFunction> function(*info->closure(), isolate_);
194
195 // If another racing task has already finished compiling and installing the
196 // requested code kind on the function, throw out the current job.
197 if (!info->is_osr() && function->HasAvailableCodeKind(info->code_kind())) {
198 if (FLAG_trace_concurrent_recompilation) {
199 PrintF(" ** Aborting compilation for ");
200 function->ShortPrint();
201 PrintF(" as it has already been optimized.\n");
202 }
203 Compiler::DisposeTurbofanCompilationJob(job.get(), false);
204 continue;
205 }
206
207 Compiler::FinalizeTurbofanCompilationJob(job.get(), isolate_);
208 }
209 }
210
HasJobs()211 bool OptimizingCompileDispatcher::HasJobs() {
212 DCHECK_EQ(ThreadId::Current(), isolate_->thread_id());
213 // Note: This relies on {output_queue_} being mutated by a background thread
214 // only when {ref_count_} is not zero. Also, {ref_count_} is never incremented
215 // by a background thread.
216 return ref_count_ != 0 || !output_queue_.empty();
217 }
218
QueueForOptimization(TurbofanCompilationJob * job)219 void OptimizingCompileDispatcher::QueueForOptimization(
220 TurbofanCompilationJob* job) {
221 DCHECK(IsQueueAvailable());
222 {
223 // Add job to the back of the input queue.
224 base::MutexGuard access_input_queue(&input_queue_mutex_);
225 DCHECK_LT(input_queue_length_, input_queue_capacity_);
226 input_queue_[InputQueueIndex(input_queue_length_)] = job;
227 input_queue_length_++;
228 }
229 V8::GetCurrentPlatform()->CallOnWorkerThread(
230 std::make_unique<CompileTask>(isolate_, this));
231 }
232
233 } // namespace internal
234 } // namespace v8
235