1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler-dispatcher/compiler-dispatcher.h"
6
7 #include "include/v8-platform.h"
8 #include "include/v8.h"
9 #include "src/base/platform/time.h"
10 #include "src/base/template-utils.h"
11 #include "src/cancelable-task.h"
12 #include "src/compiler-dispatcher/compiler-dispatcher-job.h"
13 #include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
14 #include "src/compiler-dispatcher/unoptimized-compile-job.h"
15 #include "src/flags.h"
16 #include "src/objects-inl.h"
17
18 namespace v8 {
19 namespace internal {
20
21 namespace {
22
23 enum class ExceptionHandling { kSwallow, kThrow };
24
DoNextStepOnMainThread(Isolate * isolate,CompilerDispatcherJob * job,ExceptionHandling exception_handling)25 bool DoNextStepOnMainThread(Isolate* isolate, CompilerDispatcherJob* job,
26 ExceptionHandling exception_handling) {
27 DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
28 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
29 "V8.CompilerDispatcherForgroundStep");
30 switch (job->status()) {
31 case CompilerDispatcherJob::Status::kInitial:
32 job->PrepareOnMainThread(isolate);
33 break;
34 case CompilerDispatcherJob::Status::kPrepared:
35 job->Compile(false);
36 break;
37 case CompilerDispatcherJob::Status::kCompiled:
38 job->FinalizeOnMainThread(isolate);
39 break;
40 case CompilerDispatcherJob::Status::kHasErrorsToReport:
41 job->ReportErrorsOnMainThread(isolate);
42 break;
43 case CompilerDispatcherJob::Status::kFailed:
44 case CompilerDispatcherJob::Status::kDone:
45 UNREACHABLE();
46 }
47
48 DCHECK_EQ(job->IsFailed(), isolate->has_pending_exception());
49 if (job->IsFailed() && exception_handling == ExceptionHandling::kSwallow) {
50 isolate->clear_pending_exception();
51 }
52 return job->IsFailed();
53 }
54
DoNextStepOnBackgroundThread(CompilerDispatcherJob * job)55 void DoNextStepOnBackgroundThread(CompilerDispatcherJob* job) {
56 DCHECK(job->NextStepCanRunOnAnyThread());
57 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
58 "V8.CompilerDispatcherBackgroundStep");
59 switch (job->status()) {
60 case CompilerDispatcherJob::Status::kPrepared:
61 job->Compile(true);
62 break;
63 default:
64 UNREACHABLE();
65 }
66 }
67
68 // Theoretically we get 50ms of idle time max, however it's unlikely that
69 // we'll get all of it so try to be a conservative.
70 const double kMaxIdleTimeToExpectInMs = 40;
71
72 class MemoryPressureTask : public CancelableTask {
73 public:
74 MemoryPressureTask(CancelableTaskManager* task_manager,
75 CompilerDispatcher* dispatcher);
76 ~MemoryPressureTask() override;
77
78 // CancelableTask implementation.
79 void RunInternal() override;
80
81 private:
82 CompilerDispatcher* dispatcher_;
83
84 DISALLOW_COPY_AND_ASSIGN(MemoryPressureTask);
85 };
86
MemoryPressureTask(CancelableTaskManager * task_manager,CompilerDispatcher * dispatcher)87 MemoryPressureTask::MemoryPressureTask(CancelableTaskManager* task_manager,
88 CompilerDispatcher* dispatcher)
89 : CancelableTask(task_manager), dispatcher_(dispatcher) {}
90
~MemoryPressureTask()91 MemoryPressureTask::~MemoryPressureTask() {}
92
RunInternal()93 void MemoryPressureTask::RunInternal() {
94 dispatcher_->AbortAll(BlockingBehavior::kDontBlock);
95 }
96
97 } // namespace
98
99 class CompilerDispatcher::AbortTask : public CancelableTask {
100 public:
101 AbortTask(CancelableTaskManager* task_manager,
102 CompilerDispatcher* dispatcher);
103 ~AbortTask() override;
104
105 // CancelableTask implementation.
106 void RunInternal() override;
107
108 private:
109 CompilerDispatcher* dispatcher_;
110
111 DISALLOW_COPY_AND_ASSIGN(AbortTask);
112 };
113
AbortTask(CancelableTaskManager * task_manager,CompilerDispatcher * dispatcher)114 CompilerDispatcher::AbortTask::AbortTask(CancelableTaskManager* task_manager,
115 CompilerDispatcher* dispatcher)
116 : CancelableTask(task_manager), dispatcher_(dispatcher) {}
117
~AbortTask()118 CompilerDispatcher::AbortTask::~AbortTask() {}
119
RunInternal()120 void CompilerDispatcher::AbortTask::RunInternal() {
121 dispatcher_->AbortInactiveJobs();
122 }
123
124 class CompilerDispatcher::WorkerTask : public CancelableTask {
125 public:
126 WorkerTask(CancelableTaskManager* task_manager,
127 CompilerDispatcher* dispatcher);
128 ~WorkerTask() override;
129
130 // CancelableTask implementation.
131 void RunInternal() override;
132
133 private:
134 CompilerDispatcher* dispatcher_;
135
136 DISALLOW_COPY_AND_ASSIGN(WorkerTask);
137 };
138
WorkerTask(CancelableTaskManager * task_manager,CompilerDispatcher * dispatcher)139 CompilerDispatcher::WorkerTask::WorkerTask(CancelableTaskManager* task_manager,
140 CompilerDispatcher* dispatcher)
141 : CancelableTask(task_manager), dispatcher_(dispatcher) {}
142
~WorkerTask()143 CompilerDispatcher::WorkerTask::~WorkerTask() {}
144
RunInternal()145 void CompilerDispatcher::WorkerTask::RunInternal() {
146 dispatcher_->DoBackgroundWork();
147 }
148
149 class CompilerDispatcher::IdleTask : public CancelableIdleTask {
150 public:
151 IdleTask(CancelableTaskManager* task_manager, CompilerDispatcher* dispatcher);
152 ~IdleTask() override;
153
154 // CancelableIdleTask implementation.
155 void RunInternal(double deadline_in_seconds) override;
156
157 private:
158 CompilerDispatcher* dispatcher_;
159
160 DISALLOW_COPY_AND_ASSIGN(IdleTask);
161 };
162
IdleTask(CancelableTaskManager * task_manager,CompilerDispatcher * dispatcher)163 CompilerDispatcher::IdleTask::IdleTask(CancelableTaskManager* task_manager,
164 CompilerDispatcher* dispatcher)
165 : CancelableIdleTask(task_manager), dispatcher_(dispatcher) {}
166
~IdleTask()167 CompilerDispatcher::IdleTask::~IdleTask() {}
168
RunInternal(double deadline_in_seconds)169 void CompilerDispatcher::IdleTask::RunInternal(double deadline_in_seconds) {
170 dispatcher_->DoIdleWork(deadline_in_seconds);
171 }
172
CompilerDispatcher(Isolate * isolate,Platform * platform,size_t max_stack_size)173 CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
174 size_t max_stack_size)
175 : isolate_(isolate),
176 platform_(platform),
177 max_stack_size_(max_stack_size),
178 trace_compiler_dispatcher_(FLAG_trace_compiler_dispatcher),
179 tracer_(new CompilerDispatcherTracer(isolate_)),
180 task_manager_(new CancelableTaskManager()),
181 next_job_id_(0),
182 shared_to_unoptimized_job_id_(isolate->heap()),
183 memory_pressure_level_(MemoryPressureLevel::kNone),
184 abort_(false),
185 idle_task_scheduled_(false),
186 num_worker_tasks_(0),
187 main_thread_blocking_on_job_(nullptr),
188 block_for_testing_(false),
189 semaphore_for_testing_(0) {
190 if (trace_compiler_dispatcher_ && !IsEnabled()) {
191 PrintF("CompilerDispatcher: dispatcher is disabled\n");
192 }
193 }
194
~CompilerDispatcher()195 CompilerDispatcher::~CompilerDispatcher() {
196 // To avoid crashing in unit tests due to unfished jobs.
197 AbortAll(BlockingBehavior::kBlock);
198 task_manager_->CancelAndWait();
199 }
200
CanEnqueue()201 bool CompilerDispatcher::CanEnqueue() {
202 if (!IsEnabled()) return false;
203
204 if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
205 return false;
206 }
207
208 {
209 base::LockGuard<base::Mutex> lock(&mutex_);
210 if (abort_) return false;
211 }
212
213 return true;
214 }
215
CanEnqueue(Handle<SharedFunctionInfo> function)216 bool CompilerDispatcher::CanEnqueue(Handle<SharedFunctionInfo> function) {
217 if (!CanEnqueue()) return false;
218
219 // We only handle functions (no eval / top-level code / native) that are
220 // attached to a script.
221 if (!function->script()->IsScript() || function->is_toplevel() ||
222 function->native()) {
223 return false;
224 }
225
226 return true;
227 }
228
Enqueue(std::unique_ptr<CompilerDispatcherJob> job)229 CompilerDispatcher::JobId CompilerDispatcher::Enqueue(
230 std::unique_ptr<CompilerDispatcherJob> job) {
231 DCHECK(!job->IsFinished());
232 JobMap::const_iterator it = InsertJob(std::move(job));
233 ConsiderJobForBackgroundProcessing(it->second.get());
234 ScheduleIdleTaskIfNeeded();
235 return it->first;
236 }
237
EnqueueAndStep(std::unique_ptr<CompilerDispatcherJob> job)238 CompilerDispatcher::JobId CompilerDispatcher::EnqueueAndStep(
239 std::unique_ptr<CompilerDispatcherJob> job) {
240 DCHECK(!job->IsFinished());
241 JobMap::const_iterator it = InsertJob(std::move(job));
242 if (trace_compiler_dispatcher_) {
243 PrintF("CompilerDispatcher: stepping ");
244 it->second->ShortPrintOnMainThread();
245 PrintF("\n");
246 }
247 DoNextStepOnMainThread(isolate_, it->second.get(),
248 ExceptionHandling::kSwallow);
249 ConsiderJobForBackgroundProcessing(it->second.get());
250 RemoveIfFinished(it);
251 ScheduleIdleTaskIfNeeded();
252 return it->first;
253 }
254
Enqueue(Handle<SharedFunctionInfo> function)255 bool CompilerDispatcher::Enqueue(Handle<SharedFunctionInfo> function) {
256 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
257 "V8.CompilerDispatcherEnqueue");
258 if (!CanEnqueue(function)) return false;
259 if (IsEnqueued(function)) return true;
260
261 if (trace_compiler_dispatcher_) {
262 PrintF("CompilerDispatcher: enqueuing ");
263 function->ShortPrint();
264 PrintF(" for parse and compile\n");
265 }
266
267 std::unique_ptr<CompilerDispatcherJob> job(new UnoptimizedCompileJob(
268 isolate_, tracer_.get(), function, max_stack_size_));
269 Enqueue(std::move(job));
270 return true;
271 }
272
EnqueueAndStep(Handle<SharedFunctionInfo> function)273 bool CompilerDispatcher::EnqueueAndStep(Handle<SharedFunctionInfo> function) {
274 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
275 "V8.CompilerDispatcherEnqueueAndStep");
276 if (!CanEnqueue(function)) return false;
277 if (IsEnqueued(function)) return true;
278
279 if (trace_compiler_dispatcher_) {
280 PrintF("CompilerDispatcher: enqueuing ");
281 function->ShortPrint();
282 PrintF(" for parse and compile\n");
283 }
284
285 std::unique_ptr<CompilerDispatcherJob> job(new UnoptimizedCompileJob(
286 isolate_, tracer_.get(), function, max_stack_size_));
287 EnqueueAndStep(std::move(job));
288 return true;
289 }
290
IsEnabled() const291 bool CompilerDispatcher::IsEnabled() const { return FLAG_compiler_dispatcher; }
292
IsEnqueued(Handle<SharedFunctionInfo> function) const293 bool CompilerDispatcher::IsEnqueued(Handle<SharedFunctionInfo> function) const {
294 if (jobs_.empty()) return false;
295 return GetJobFor(function) != jobs_.end();
296 }
297
WaitForJobIfRunningOnBackground(CompilerDispatcherJob * job)298 void CompilerDispatcher::WaitForJobIfRunningOnBackground(
299 CompilerDispatcherJob* job) {
300 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
301 "V8.CompilerDispatcherWaitForBackgroundJob");
302 RuntimeCallTimerScope runtimeTimer(
303 isolate_, RuntimeCallCounterId::kCompileWaitForDispatcher);
304
305 base::LockGuard<base::Mutex> lock(&mutex_);
306 if (running_background_jobs_.find(job) == running_background_jobs_.end()) {
307 pending_background_jobs_.erase(job);
308 return;
309 }
310 DCHECK_NULL(main_thread_blocking_on_job_);
311 main_thread_blocking_on_job_ = job;
312 while (main_thread_blocking_on_job_ != nullptr) {
313 main_thread_blocking_signal_.Wait(&mutex_);
314 }
315 DCHECK(pending_background_jobs_.find(job) == pending_background_jobs_.end());
316 DCHECK(running_background_jobs_.find(job) == running_background_jobs_.end());
317 }
318
FinishNow(CompilerDispatcherJob * job)319 bool CompilerDispatcher::FinishNow(CompilerDispatcherJob* job) {
320 if (trace_compiler_dispatcher_) {
321 PrintF("CompilerDispatcher: finishing ");
322 job->ShortPrintOnMainThread();
323 PrintF(" now\n");
324 }
325 WaitForJobIfRunningOnBackground(job);
326 while (!job->IsFinished()) {
327 DoNextStepOnMainThread(isolate_, job, ExceptionHandling::kThrow);
328 }
329 return !job->IsFailed();
330 }
331
FinishNow(Handle<SharedFunctionInfo> function)332 bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
333 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
334 "V8.CompilerDispatcherFinishNow");
335 JobMap::const_iterator job = GetJobFor(function);
336 CHECK(job != jobs_.end());
337 bool result = FinishNow(job->second.get());
338 RemoveIfFinished(job);
339 return result;
340 }
341
FinishAllNow()342 void CompilerDispatcher::FinishAllNow() {
343 // First finish all jobs not running in background
344 for (auto it = jobs_.cbegin(); it != jobs_.cend();) {
345 CompilerDispatcherJob* job = it->second.get();
346 bool is_running_in_background;
347 {
348 base::LockGuard<base::Mutex> lock(&mutex_);
349 is_running_in_background =
350 running_background_jobs_.find(job) != running_background_jobs_.end();
351 pending_background_jobs_.erase(job);
352 }
353 if (!is_running_in_background) {
354 while (!job->IsFinished()) {
355 DoNextStepOnMainThread(isolate_, job, ExceptionHandling::kThrow);
356 }
357 it = RemoveIfFinished(it);
358 } else {
359 ++it;
360 }
361 }
362 // Potentially wait for jobs that were running in background
363 for (auto it = jobs_.cbegin(); it != jobs_.cend();
364 it = RemoveIfFinished(it)) {
365 FinishNow(it->second.get());
366 }
367 }
368
AbortAll(BlockingBehavior blocking)369 void CompilerDispatcher::AbortAll(BlockingBehavior blocking) {
370 bool background_tasks_running =
371 task_manager_->TryAbortAll() == CancelableTaskManager::kTaskRunning;
372 if (!background_tasks_running || blocking == BlockingBehavior::kBlock) {
373 for (auto& it : jobs_) {
374 WaitForJobIfRunningOnBackground(it.second.get());
375 if (trace_compiler_dispatcher_) {
376 PrintF("CompilerDispatcher: aborted ");
377 it.second->ShortPrintOnMainThread();
378 PrintF("\n");
379 }
380 it.second->ResetOnMainThread(isolate_);
381 }
382 jobs_.clear();
383 shared_to_unoptimized_job_id_.Clear();
384 {
385 base::LockGuard<base::Mutex> lock(&mutex_);
386 DCHECK(pending_background_jobs_.empty());
387 DCHECK(running_background_jobs_.empty());
388 abort_ = false;
389 }
390 return;
391 }
392
393 {
394 base::LockGuard<base::Mutex> lock(&mutex_);
395 abort_ = true;
396 pending_background_jobs_.clear();
397 }
398 AbortInactiveJobs();
399
400 // All running background jobs might already have scheduled idle tasks instead
401 // of abort tasks. Schedule a single abort task here to make sure they get
402 // processed as soon as possible (and not first when we have idle time).
403 ScheduleAbortTask();
404 }
405
AbortInactiveJobs()406 void CompilerDispatcher::AbortInactiveJobs() {
407 {
408 base::LockGuard<base::Mutex> lock(&mutex_);
409 // Since we schedule two abort tasks per async abort, we might end up
410 // here with nothing left to do.
411 if (!abort_) return;
412 }
413 for (auto it = jobs_.cbegin(); it != jobs_.cend();) {
414 auto job = it;
415 ++it;
416 {
417 base::LockGuard<base::Mutex> lock(&mutex_);
418 if (running_background_jobs_.find(job->second.get()) !=
419 running_background_jobs_.end()) {
420 continue;
421 }
422 }
423 if (trace_compiler_dispatcher_) {
424 PrintF("CompilerDispatcher: aborted ");
425 job->second->ShortPrintOnMainThread();
426 PrintF("\n");
427 }
428 it = RemoveJob(job);
429 }
430 if (jobs_.empty()) {
431 base::LockGuard<base::Mutex> lock(&mutex_);
432 if (num_worker_tasks_ == 0) abort_ = false;
433 }
434 }
435
MemoryPressureNotification(v8::MemoryPressureLevel level,bool is_isolate_locked)436 void CompilerDispatcher::MemoryPressureNotification(
437 v8::MemoryPressureLevel level, bool is_isolate_locked) {
438 MemoryPressureLevel previous = memory_pressure_level_.Value();
439 memory_pressure_level_.SetValue(level);
440 // If we're already under pressure, we haven't accepted new tasks meanwhile
441 // and can just return. If we're no longer under pressure, we're also done.
442 if (previous != MemoryPressureLevel::kNone ||
443 level == MemoryPressureLevel::kNone) {
444 return;
445 }
446 if (trace_compiler_dispatcher_) {
447 PrintF("CompilerDispatcher: received memory pressure notification\n");
448 }
449 if (is_isolate_locked) {
450 AbortAll(BlockingBehavior::kDontBlock);
451 } else {
452 {
453 base::LockGuard<base::Mutex> lock(&mutex_);
454 if (abort_) return;
455 // By going into abort mode here, and clearing the
456 // pending_background_jobs_, we at keep existing background jobs from
457 // picking up more work before the MemoryPressureTask gets executed.
458 abort_ = true;
459 pending_background_jobs_.clear();
460 }
461 platform_->CallOnForegroundThread(
462 reinterpret_cast<v8::Isolate*>(isolate_),
463 new MemoryPressureTask(task_manager_.get(), this));
464 }
465 }
466
GetJobFor(Handle<SharedFunctionInfo> shared) const467 CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor(
468 Handle<SharedFunctionInfo> shared) const {
469 JobId* job_id_ptr = shared_to_unoptimized_job_id_.Find(shared);
470 JobMap::const_iterator job = jobs_.end();
471 if (job_id_ptr) {
472 job = jobs_.find(*job_id_ptr);
473 DCHECK(job == jobs_.end() ||
474 job->second->AsUnoptimizedCompileJob()->IsAssociatedWith(shared));
475 }
476 return job;
477 }
478
ScheduleIdleTaskFromAnyThread()479 void CompilerDispatcher::ScheduleIdleTaskFromAnyThread() {
480 v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
481 if (!platform_->IdleTasksEnabled(v8_isolate)) return;
482 {
483 base::LockGuard<base::Mutex> lock(&mutex_);
484 if (idle_task_scheduled_) return;
485 idle_task_scheduled_ = true;
486 }
487 platform_->CallIdleOnForegroundThread(
488 v8_isolate, new IdleTask(task_manager_.get(), this));
489 }
490
ScheduleIdleTaskIfNeeded()491 void CompilerDispatcher::ScheduleIdleTaskIfNeeded() {
492 if (jobs_.empty()) return;
493 ScheduleIdleTaskFromAnyThread();
494 }
495
ScheduleAbortTask()496 void CompilerDispatcher::ScheduleAbortTask() {
497 v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
498 platform_->CallOnForegroundThread(v8_isolate,
499 new AbortTask(task_manager_.get(), this));
500 }
501
ConsiderJobForBackgroundProcessing(CompilerDispatcherJob * job)502 void CompilerDispatcher::ConsiderJobForBackgroundProcessing(
503 CompilerDispatcherJob* job) {
504 if (!job->NextStepCanRunOnAnyThread()) return;
505 {
506 base::LockGuard<base::Mutex> lock(&mutex_);
507 pending_background_jobs_.insert(job);
508 }
509 ScheduleMoreWorkerTasksIfNeeded();
510 }
511
ScheduleMoreWorkerTasksIfNeeded()512 void CompilerDispatcher::ScheduleMoreWorkerTasksIfNeeded() {
513 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
514 "V8.CompilerDispatcherScheduleMoreWorkerTasksIfNeeded");
515 {
516 base::LockGuard<base::Mutex> lock(&mutex_);
517 if (pending_background_jobs_.empty()) return;
518 if (platform_->NumberOfWorkerThreads() <= num_worker_tasks_) {
519 return;
520 }
521 ++num_worker_tasks_;
522 }
523 platform_->CallOnWorkerThread(
524 base::make_unique<WorkerTask>(task_manager_.get(), this));
525 }
526
DoBackgroundWork()527 void CompilerDispatcher::DoBackgroundWork() {
528 for (;;) {
529 CompilerDispatcherJob* job = nullptr;
530 {
531 base::LockGuard<base::Mutex> lock(&mutex_);
532 if (!pending_background_jobs_.empty()) {
533 auto it = pending_background_jobs_.begin();
534 job = *it;
535 pending_background_jobs_.erase(it);
536 running_background_jobs_.insert(job);
537 }
538 }
539 if (job == nullptr) break;
540
541 if (V8_UNLIKELY(block_for_testing_.Value())) {
542 block_for_testing_.SetValue(false);
543 semaphore_for_testing_.Wait();
544 }
545
546 if (trace_compiler_dispatcher_) {
547 PrintF("CompilerDispatcher: doing background work\n");
548 }
549
550 DoNextStepOnBackgroundThread(job);
551 // Unconditionally schedule an idle task, as all background steps have to be
552 // followed by a main thread step.
553 ScheduleIdleTaskFromAnyThread();
554
555 {
556 base::LockGuard<base::Mutex> lock(&mutex_);
557 running_background_jobs_.erase(job);
558
559 if (main_thread_blocking_on_job_ == job) {
560 main_thread_blocking_on_job_ = nullptr;
561 main_thread_blocking_signal_.NotifyOne();
562 }
563 }
564 }
565
566 {
567 base::LockGuard<base::Mutex> lock(&mutex_);
568 --num_worker_tasks_;
569
570 if (running_background_jobs_.empty() && abort_) {
571 // This is the last background job that finished. The abort task
572 // scheduled by AbortAll might already have ran, so schedule another
573 // one to be on the safe side.
574 ScheduleAbortTask();
575 }
576 }
577 // Don't touch |this| anymore after this point, as it might have been
578 // deleted.
579 }
580
DoIdleWork(double deadline_in_seconds)581 void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
582 bool aborted = false;
583 {
584 base::LockGuard<base::Mutex> lock(&mutex_);
585 idle_task_scheduled_ = false;
586 aborted = abort_;
587 }
588
589 if (aborted) {
590 AbortInactiveJobs();
591 return;
592 }
593
594 // Number of jobs that are unlikely to make progress during any idle callback
595 // due to their estimated duration.
596 size_t too_long_jobs = 0;
597
598 // Iterate over all available jobs & remaining time. For each job, decide
599 // whether to 1) skip it (if it would take too long), 2) erase it (if it's
600 // finished), or 3) make progress on it.
601 double idle_time_in_seconds =
602 deadline_in_seconds - platform_->MonotonicallyIncreasingTime();
603
604 if (trace_compiler_dispatcher_) {
605 PrintF("CompilerDispatcher: received %0.1lfms of idle time\n",
606 idle_time_in_seconds *
607 static_cast<double>(base::Time::kMillisecondsPerSecond));
608 }
609 for (auto job = jobs_.cbegin();
610 job != jobs_.cend() && idle_time_in_seconds > 0.0;
611 idle_time_in_seconds =
612 deadline_in_seconds - platform_->MonotonicallyIncreasingTime()) {
613 // Don't work on jobs that are being worked on by background tasks.
614 // Similarly, remove jobs we work on from the set of available background
615 // jobs.
616 std::unique_ptr<base::LockGuard<base::Mutex>> lock(
617 new base::LockGuard<base::Mutex>(&mutex_));
618 if (running_background_jobs_.find(job->second.get()) !=
619 running_background_jobs_.end()) {
620 ++job;
621 continue;
622 }
623 auto it = pending_background_jobs_.find(job->second.get());
624 double estimate_in_ms = job->second->EstimateRuntimeOfNextStepInMs();
625 if (idle_time_in_seconds <
626 (estimate_in_ms /
627 static_cast<double>(base::Time::kMillisecondsPerSecond))) {
628 // If there's not enough time left, try to estimate whether we would
629 // have managed to finish the job in a large idle task to assess
630 // whether we should ask for another idle callback.
631 if (estimate_in_ms > kMaxIdleTimeToExpectInMs) ++too_long_jobs;
632 if (it == pending_background_jobs_.end()) {
633 lock.reset();
634 ConsiderJobForBackgroundProcessing(job->second.get());
635 }
636 ++job;
637 } else if (job->second->IsFinished()) {
638 DCHECK(it == pending_background_jobs_.end());
639 lock.reset();
640 job = RemoveJob(job);
641 continue;
642 } else {
643 // Do one step, and keep processing the job (as we don't advance the
644 // iterator).
645 if (it != pending_background_jobs_.end()) {
646 pending_background_jobs_.erase(it);
647 }
648 lock.reset();
649 DoNextStepOnMainThread(isolate_, job->second.get(),
650 ExceptionHandling::kSwallow);
651 }
652 }
653 if (jobs_.size() > too_long_jobs) ScheduleIdleTaskIfNeeded();
654 }
655
RemoveIfFinished(JobMap::const_iterator job)656 CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveIfFinished(
657 JobMap::const_iterator job) {
658 if (!job->second->IsFinished()) {
659 return job;
660 }
661
662 if (trace_compiler_dispatcher_) {
663 bool result = !job->second->IsFailed();
664 PrintF("CompilerDispatcher: finished working on ");
665 job->second->ShortPrintOnMainThread();
666 PrintF(": %s\n", result ? "success" : "failure");
667 tracer_->DumpStatistics();
668 }
669
670 return RemoveJob(job);
671 }
672
InsertJob(std::unique_ptr<CompilerDispatcherJob> job)673 CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::InsertJob(
674 std::unique_ptr<CompilerDispatcherJob> job) {
675 bool added;
676 JobMap::const_iterator it;
677 std::tie(it, added) =
678 jobs_.insert(std::make_pair(next_job_id_++, std::move(job)));
679 DCHECK(added);
680
681 JobId id = it->first;
682 CompilerDispatcherJob* inserted_job = it->second.get();
683
684 // Maps unoptimized jobs' SFIs to their job id.
685 if (inserted_job->type() ==
686 CompilerDispatcherJob::Type::kUnoptimizedCompile) {
687 Handle<SharedFunctionInfo> shared =
688 inserted_job->AsUnoptimizedCompileJob()->shared();
689 if (!shared.is_null()) {
690 shared_to_unoptimized_job_id_.Set(shared, id);
691 }
692 }
693
694 return it;
695 }
696
RemoveJob(CompilerDispatcher::JobMap::const_iterator it)697 CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveJob(
698 CompilerDispatcher::JobMap::const_iterator it) {
699 CompilerDispatcherJob* job = it->second.get();
700 job->ResetOnMainThread(isolate_);
701
702 // Unmaps unoptimized jobs' SFIs to their job id.
703 if (job->type() == CompilerDispatcherJob::Type::kUnoptimizedCompile) {
704 Handle<SharedFunctionInfo> shared =
705 job->AsUnoptimizedCompileJob()->shared();
706 if (!shared.is_null()) {
707 JobId deleted_id;
708 shared_to_unoptimized_job_id_.Delete(shared, &deleted_id);
709 DCHECK_EQ(it->first, deleted_id);
710 }
711 }
712
713 it = jobs_.erase(it);
714 if (jobs_.empty()) {
715 base::LockGuard<base::Mutex> lock(&mutex_);
716 if (num_worker_tasks_ == 0) abort_ = false;
717 }
718 return it;
719 }
720
721 } // namespace internal
722 } // namespace v8
723