1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/optimizing-compile-dispatcher.h"
6
7 #include "src/base/atomicops.h"
8 #include "src/full-codegen/full-codegen.h"
9 #include "src/isolate.h"
10 #include "src/tracing/trace-event.h"
11 #include "src/v8.h"
12
13 namespace v8 {
14 namespace internal {
15
16 namespace {
17
DisposeCompilationJob(CompilationJob * job,bool restore_function_code)18 void DisposeCompilationJob(CompilationJob* job, bool restore_function_code) {
19 if (restore_function_code) {
20 Handle<JSFunction> function = job->info()->closure();
21 function->ReplaceCode(function->shared()->code());
22 // TODO(mvstanton): We can't call ensureliterals here due to allocation,
23 // but we probably shouldn't call ReplaceCode either, as this
24 // sometimes runs on the worker thread!
25 // JSFunction::EnsureLiterals(function);
26 }
27 delete job;
28 }
29
30 } // namespace
31
32
33 class OptimizingCompileDispatcher::CompileTask : public v8::Task {
34 public:
CompileTask(Isolate * isolate)35 explicit CompileTask(Isolate* isolate) : isolate_(isolate) {
36 OptimizingCompileDispatcher* dispatcher =
37 isolate_->optimizing_compile_dispatcher();
38 base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
39 ++dispatcher->ref_count_;
40 }
41
~CompileTask()42 virtual ~CompileTask() {}
43
44 private:
45 // v8::Task overrides.
Run()46 void Run() override {
47 DisallowHeapAllocation no_allocation;
48 DisallowHandleAllocation no_handles;
49 DisallowHandleDereference no_deref;
50
51 OptimizingCompileDispatcher* dispatcher =
52 isolate_->optimizing_compile_dispatcher();
53 {
54 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
55 TRACE_EVENT0("v8", "V8.RecompileConcurrent");
56
57 if (dispatcher->recompilation_delay_ != 0) {
58 base::OS::Sleep(base::TimeDelta::FromMilliseconds(
59 dispatcher->recompilation_delay_));
60 }
61
62 dispatcher->CompileNext(dispatcher->NextInput(true));
63 }
64 {
65 base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
66 if (--dispatcher->ref_count_ == 0) {
67 dispatcher->ref_count_zero_.NotifyOne();
68 }
69 }
70 }
71
72 Isolate* isolate_;
73
74 DISALLOW_COPY_AND_ASSIGN(CompileTask);
75 };
76
77
~OptimizingCompileDispatcher()78 OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
79 #ifdef DEBUG
80 {
81 base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
82 DCHECK_EQ(0, ref_count_);
83 }
84 #endif
85 DCHECK_EQ(0, input_queue_length_);
86 DeleteArray(input_queue_);
87 }
88
NextInput(bool check_if_flushing)89 CompilationJob* OptimizingCompileDispatcher::NextInput(bool check_if_flushing) {
90 base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
91 if (input_queue_length_ == 0) return NULL;
92 CompilationJob* job = input_queue_[InputQueueIndex(0)];
93 DCHECK_NOT_NULL(job);
94 input_queue_shift_ = InputQueueIndex(1);
95 input_queue_length_--;
96 if (check_if_flushing) {
97 if (static_cast<ModeFlag>(base::Acquire_Load(&mode_)) == FLUSH) {
98 AllowHandleDereference allow_handle_dereference;
99 DisposeCompilationJob(job, true);
100 return NULL;
101 }
102 }
103 return job;
104 }
105
CompileNext(CompilationJob * job)106 void OptimizingCompileDispatcher::CompileNext(CompilationJob* job) {
107 if (!job) return;
108
109 // The function may have already been optimized by OSR. Simply continue.
110 CompilationJob::Status status = job->OptimizeGraph();
111 USE(status); // Prevent an unused-variable error.
112
113 // The function may have already been optimized by OSR. Simply continue.
114 // Use a mutex to make sure that functions marked for install
115 // are always also queued.
116 base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
117 output_queue_.push(job);
118 isolate_->stack_guard()->RequestInstallCode();
119 }
120
121
FlushOutputQueue(bool restore_function_code)122 void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
123 for (;;) {
124 CompilationJob* job = NULL;
125 {
126 base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
127 if (output_queue_.empty()) return;
128 job = output_queue_.front();
129 output_queue_.pop();
130 }
131
132 DisposeCompilationJob(job, restore_function_code);
133 }
134 }
135
136
Flush()137 void OptimizingCompileDispatcher::Flush() {
138 base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
139 if (FLAG_block_concurrent_recompilation) Unblock();
140 {
141 base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
142 while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
143 base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
144 }
145 FlushOutputQueue(true);
146 if (FLAG_trace_concurrent_recompilation) {
147 PrintF(" ** Flushed concurrent recompilation queues.\n");
148 }
149 }
150
151
Stop()152 void OptimizingCompileDispatcher::Stop() {
153 base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
154 if (FLAG_block_concurrent_recompilation) Unblock();
155 {
156 base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
157 while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
158 base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
159 }
160
161 if (recompilation_delay_ != 0) {
162 // At this point the optimizing compiler thread's event loop has stopped.
163 // There is no need for a mutex when reading input_queue_length_.
164 while (input_queue_length_ > 0) CompileNext(NextInput());
165 InstallOptimizedFunctions();
166 } else {
167 FlushOutputQueue(false);
168 }
169 }
170
171
InstallOptimizedFunctions()172 void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
173 HandleScope handle_scope(isolate_);
174
175 for (;;) {
176 CompilationJob* job = NULL;
177 {
178 base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
179 if (output_queue_.empty()) return;
180 job = output_queue_.front();
181 output_queue_.pop();
182 }
183 CompilationInfo* info = job->info();
184 Handle<JSFunction> function(*info->closure());
185 if (function->IsOptimized()) {
186 if (FLAG_trace_concurrent_recompilation) {
187 PrintF(" ** Aborting compilation for ");
188 function->ShortPrint();
189 PrintF(" as it has already been optimized.\n");
190 }
191 DisposeCompilationJob(job, false);
192 } else {
193 Compiler::FinalizeCompilationJob(job);
194 }
195 }
196 }
197
QueueForOptimization(CompilationJob * job)198 void OptimizingCompileDispatcher::QueueForOptimization(CompilationJob* job) {
199 DCHECK(IsQueueAvailable());
200 {
201 // Add job to the back of the input queue.
202 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
203 DCHECK_LT(input_queue_length_, input_queue_capacity_);
204 input_queue_[InputQueueIndex(input_queue_length_)] = job;
205 input_queue_length_++;
206 }
207 if (FLAG_block_concurrent_recompilation) {
208 blocked_jobs_++;
209 } else {
210 V8::GetCurrentPlatform()->CallOnBackgroundThread(
211 new CompileTask(isolate_), v8::Platform::kShortRunningTask);
212 }
213 }
214
215
Unblock()216 void OptimizingCompileDispatcher::Unblock() {
217 while (blocked_jobs_ > 0) {
218 V8::GetCurrentPlatform()->CallOnBackgroundThread(
219 new CompileTask(isolate_), v8::Platform::kShortRunningTask);
220 blocked_jobs_--;
221 }
222 }
223
224
225 } // namespace internal
226 } // namespace v8
227