1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/pipeline.h"
6
7 #include <fstream>
8 #include <iostream>
9 #include <memory>
10 #include <sstream>
11
12 #include "src/base/optional.h"
13 #include "src/base/platform/elapsed-timer.h"
14 #include "src/builtins/profile-data-reader.h"
15 #include "src/codegen/assembler-inl.h"
16 #include "src/codegen/compiler.h"
17 #include "src/codegen/optimized-compilation-info.h"
18 #include "src/codegen/register-configuration.h"
19 #include "src/common/high-allocation-throughput-scope.h"
20 #include "src/compiler/add-type-assertions-reducer.h"
21 #include "src/compiler/backend/code-generator.h"
22 #include "src/compiler/backend/frame-elider.h"
23 #include "src/compiler/backend/instruction-selector.h"
24 #include "src/compiler/backend/instruction.h"
25 #include "src/compiler/backend/jump-threading.h"
26 #include "src/compiler/backend/mid-tier-register-allocator.h"
27 #include "src/compiler/backend/move-optimizer.h"
28 #include "src/compiler/backend/register-allocator-verifier.h"
29 #include "src/compiler/backend/register-allocator.h"
30 #include "src/compiler/basic-block-instrumentor.h"
31 #include "src/compiler/branch-condition-duplicator.h"
32 #include "src/compiler/branch-elimination.h"
33 #include "src/compiler/bytecode-graph-builder.h"
34 #include "src/compiler/checkpoint-elimination.h"
35 #include "src/compiler/common-operator-reducer.h"
36 #include "src/compiler/common-operator.h"
37 #include "src/compiler/compilation-dependencies.h"
38 #include "src/compiler/compiler-source-position-table.h"
39 #include "src/compiler/constant-folding-reducer.h"
40 #include "src/compiler/control-flow-optimizer.h"
41 #include "src/compiler/csa-load-elimination.h"
42 #include "src/compiler/dead-code-elimination.h"
43 #include "src/compiler/decompression-optimizer.h"
44 #include "src/compiler/effect-control-linearizer.h"
45 #include "src/compiler/escape-analysis-reducer.h"
46 #include "src/compiler/escape-analysis.h"
47 #include "src/compiler/graph-trimmer.h"
48 #include "src/compiler/graph-visualizer.h"
49 #include "src/compiler/js-call-reducer.h"
50 #include "src/compiler/js-context-specialization.h"
51 #include "src/compiler/js-create-lowering.h"
52 #include "src/compiler/js-generic-lowering.h"
53 #include "src/compiler/js-heap-broker.h"
54 #include "src/compiler/js-inlining-heuristic.h"
55 #include "src/compiler/js-intrinsic-lowering.h"
56 #include "src/compiler/js-native-context-specialization.h"
57 #include "src/compiler/js-typed-lowering.h"
58 #include "src/compiler/load-elimination.h"
59 #include "src/compiler/loop-analysis.h"
60 #include "src/compiler/loop-peeling.h"
61 #include "src/compiler/loop-unrolling.h"
62 #include "src/compiler/loop-variable-optimizer.h"
63 #include "src/compiler/machine-graph-verifier.h"
64 #include "src/compiler/machine-operator-reducer.h"
65 #include "src/compiler/memory-optimizer.h"
66 #include "src/compiler/node-observer.h"
67 #include "src/compiler/node-origin-table.h"
68 #include "src/compiler/osr.h"
69 #include "src/compiler/pipeline-statistics.h"
70 #include "src/compiler/redundancy-elimination.h"
71 #include "src/compiler/schedule.h"
72 #include "src/compiler/scheduler.h"
73 #include "src/compiler/select-lowering.h"
74 #include "src/compiler/simplified-lowering.h"
75 #include "src/compiler/simplified-operator-reducer.h"
76 #include "src/compiler/simplified-operator.h"
77 #include "src/compiler/store-store-elimination.h"
78 #include "src/compiler/type-narrowing-reducer.h"
79 #include "src/compiler/typed-optimization.h"
80 #include "src/compiler/typer.h"
81 #include "src/compiler/value-numbering-reducer.h"
82 #include "src/compiler/verifier.h"
83 #include "src/compiler/zone-stats.h"
84 #include "src/diagnostics/code-tracer.h"
85 #include "src/diagnostics/disassembler.h"
86 #include "src/execution/isolate-inl.h"
87 #include "src/heap/local-heap.h"
88 #include "src/init/bootstrapper.h"
89 #include "src/logging/code-events.h"
90 #include "src/logging/counters.h"
91 #include "src/logging/runtime-call-stats-scope.h"
92 #include "src/objects/shared-function-info.h"
93 #include "src/parsing/parse-info.h"
94 #include "src/tracing/trace-event.h"
95 #include "src/tracing/traced-value.h"
96 #include "src/utils/ostreams.h"
97 #include "src/utils/utils.h"
98
99 #if V8_ENABLE_WEBASSEMBLY
100 #include "src/compiler/wasm-compiler.h"
101 #include "src/compiler/wasm-escape-analysis.h"
102 #include "src/compiler/wasm-inlining.h"
103 #include "src/compiler/wasm-loop-peeling.h"
104 #include "src/wasm/function-body-decoder.h"
105 #include "src/wasm/function-compiler.h"
106 #include "src/wasm/wasm-engine.h"
107 #endif // V8_ENABLE_WEBASSEMBLY
108
109 namespace v8 {
110 namespace internal {
111 namespace compiler {
112
113 static constexpr char kCodegenZoneName[] = "codegen-zone";
114 static constexpr char kGraphZoneName[] = "graph-zone";
115 static constexpr char kInstructionZoneName[] = "instruction-zone";
116 static constexpr char kMachineGraphVerifierZoneName[] =
117 "machine-graph-verifier-zone";
118 static constexpr char kPipelineCompilationJobZoneName[] =
119 "pipeline-compilation-job-zone";
120 static constexpr char kRegisterAllocationZoneName[] =
121 "register-allocation-zone";
122 static constexpr char kRegisterAllocatorVerifierZoneName[] =
123 "register-allocator-verifier-zone";
124
125 namespace {
126
GetModuleContext(Handle<JSFunction> closure)127 Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) {
128 Context current = closure->context();
129 size_t distance = 0;
130 while (!current.IsNativeContext()) {
131 if (current.IsModuleContext()) {
132 return Just(
133 OuterContext(handle(current, current.GetIsolate()), distance));
134 }
135 current = current.previous();
136 distance++;
137 }
138 return Nothing<OuterContext>();
139 }
140
141 } // anonymous namespace
142
143 class PipelineData {
144 public:
145 // For main entry point.
PipelineData(ZoneStats * zone_stats,Isolate * isolate,OptimizedCompilationInfo * info,PipelineStatistics * pipeline_statistics)146 PipelineData(ZoneStats* zone_stats, Isolate* isolate,
147 OptimizedCompilationInfo* info,
148 PipelineStatistics* pipeline_statistics)
149 : isolate_(isolate),
150 allocator_(isolate->allocator()),
151 info_(info),
152 debug_name_(info_->GetDebugName()),
153 may_have_unverifiable_graph_(false),
154 zone_stats_(zone_stats),
155 pipeline_statistics_(pipeline_statistics),
156 graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone),
157 graph_zone_(graph_zone_scope_.zone()),
158 instruction_zone_scope_(zone_stats_, kInstructionZoneName),
159 instruction_zone_(instruction_zone_scope_.zone()),
160 codegen_zone_scope_(zone_stats_, kCodegenZoneName),
161 codegen_zone_(codegen_zone_scope_.zone()),
162 broker_(new JSHeapBroker(isolate_, info_->zone(),
163 info_->trace_heap_broker(),
164 info->code_kind())),
165 register_allocation_zone_scope_(zone_stats_,
166 kRegisterAllocationZoneName),
167 register_allocation_zone_(register_allocation_zone_scope_.zone()),
168 assembler_options_(AssemblerOptions::Default(isolate)) {
169 PhaseScope scope(pipeline_statistics, "V8.TFInitPipelineData");
170 graph_ = graph_zone_->New<Graph>(graph_zone_);
171 source_positions_ = graph_zone_->New<SourcePositionTable>(graph_);
172 node_origins_ = info->trace_turbo_json()
173 ? graph_zone_->New<NodeOriginTable>(graph_)
174 : nullptr;
175 simplified_ = graph_zone_->New<SimplifiedOperatorBuilder>(graph_zone_);
176 machine_ = graph_zone_->New<MachineOperatorBuilder>(
177 graph_zone_, MachineType::PointerRepresentation(),
178 InstructionSelector::SupportedMachineOperatorFlags(),
179 InstructionSelector::AlignmentRequirements());
180 common_ = graph_zone_->New<CommonOperatorBuilder>(graph_zone_);
181 javascript_ = graph_zone_->New<JSOperatorBuilder>(graph_zone_);
182 jsgraph_ = graph_zone_->New<JSGraph>(isolate_, graph_, common_, javascript_,
183 simplified_, machine_);
184 observe_node_manager_ =
185 info->node_observer()
186 ? graph_zone_->New<ObserveNodeManager>(graph_zone_)
187 : nullptr;
188 dependencies_ =
189 info_->zone()->New<CompilationDependencies>(broker_, info_->zone());
190 }
191
192 #if V8_ENABLE_WEBASSEMBLY
193 // For WebAssembly compile entry point.
PipelineData(ZoneStats * zone_stats,wasm::WasmEngine * wasm_engine,OptimizedCompilationInfo * info,MachineGraph * mcgraph,PipelineStatistics * pipeline_statistics,SourcePositionTable * source_positions,NodeOriginTable * node_origins,const AssemblerOptions & assembler_options)194 PipelineData(ZoneStats* zone_stats, wasm::WasmEngine* wasm_engine,
195 OptimizedCompilationInfo* info, MachineGraph* mcgraph,
196 PipelineStatistics* pipeline_statistics,
197 SourcePositionTable* source_positions,
198 NodeOriginTable* node_origins,
199 const AssemblerOptions& assembler_options)
200 : isolate_(nullptr),
201 wasm_engine_(wasm_engine),
202 allocator_(wasm_engine->allocator()),
203 info_(info),
204 debug_name_(info_->GetDebugName()),
205 may_have_unverifiable_graph_(false),
206 zone_stats_(zone_stats),
207 pipeline_statistics_(pipeline_statistics),
208 graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone),
209 graph_zone_(graph_zone_scope_.zone()),
210 graph_(mcgraph->graph()),
211 source_positions_(source_positions),
212 node_origins_(node_origins),
213 machine_(mcgraph->machine()),
214 common_(mcgraph->common()),
215 mcgraph_(mcgraph),
216 instruction_zone_scope_(zone_stats_, kInstructionZoneName),
217 instruction_zone_(instruction_zone_scope_.zone()),
218 codegen_zone_scope_(zone_stats_, kCodegenZoneName),
219 codegen_zone_(codegen_zone_scope_.zone()),
220 register_allocation_zone_scope_(zone_stats_,
221 kRegisterAllocationZoneName),
222 register_allocation_zone_(register_allocation_zone_scope_.zone()),
223 assembler_options_(assembler_options) {
224 simplified_ = graph_zone_->New<SimplifiedOperatorBuilder>(graph_zone_);
225 javascript_ = graph_zone_->New<JSOperatorBuilder>(graph_zone_);
226 jsgraph_ = graph_zone_->New<JSGraph>(isolate_, graph_, common_, javascript_,
227 simplified_, machine_);
228 }
229 #endif // V8_ENABLE_WEBASSEMBLY
230
231 // For CodeStubAssembler and machine graph testing entry point.
PipelineData(ZoneStats * zone_stats,OptimizedCompilationInfo * info,Isolate * isolate,AccountingAllocator * allocator,Graph * graph,JSGraph * jsgraph,Schedule * schedule,SourcePositionTable * source_positions,NodeOriginTable * node_origins,JumpOptimizationInfo * jump_opt,const AssemblerOptions & assembler_options,const ProfileDataFromFile * profile_data)232 PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
233 Isolate* isolate, AccountingAllocator* allocator, Graph* graph,
234 JSGraph* jsgraph, Schedule* schedule,
235 SourcePositionTable* source_positions,
236 NodeOriginTable* node_origins, JumpOptimizationInfo* jump_opt,
237 const AssemblerOptions& assembler_options,
238 const ProfileDataFromFile* profile_data)
239 : isolate_(isolate),
240 #if V8_ENABLE_WEBASSEMBLY
241 // TODO(clemensb): Remove this field, use GetWasmEngine directly
242 // instead.
243 wasm_engine_(wasm::GetWasmEngine()),
244 #endif // V8_ENABLE_WEBASSEMBLY
245 allocator_(allocator),
246 info_(info),
247 debug_name_(info_->GetDebugName()),
248 zone_stats_(zone_stats),
249 graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone),
250 graph_zone_(graph_zone_scope_.zone()),
251 graph_(graph),
252 source_positions_(source_positions),
253 node_origins_(node_origins),
254 schedule_(schedule),
255 instruction_zone_scope_(zone_stats_, kInstructionZoneName),
256 instruction_zone_(instruction_zone_scope_.zone()),
257 codegen_zone_scope_(zone_stats_, kCodegenZoneName),
258 codegen_zone_(codegen_zone_scope_.zone()),
259 register_allocation_zone_scope_(zone_stats_,
260 kRegisterAllocationZoneName),
261 register_allocation_zone_(register_allocation_zone_scope_.zone()),
262 jump_optimization_info_(jump_opt),
263 assembler_options_(assembler_options),
264 profile_data_(profile_data) {
265 if (jsgraph) {
266 jsgraph_ = jsgraph;
267 simplified_ = jsgraph->simplified();
268 machine_ = jsgraph->machine();
269 common_ = jsgraph->common();
270 javascript_ = jsgraph->javascript();
271 } else {
272 simplified_ = graph_zone_->New<SimplifiedOperatorBuilder>(graph_zone_);
273 machine_ = graph_zone_->New<MachineOperatorBuilder>(
274 graph_zone_, MachineType::PointerRepresentation(),
275 InstructionSelector::SupportedMachineOperatorFlags(),
276 InstructionSelector::AlignmentRequirements());
277 common_ = graph_zone_->New<CommonOperatorBuilder>(graph_zone_);
278 javascript_ = graph_zone_->New<JSOperatorBuilder>(graph_zone_);
279 jsgraph_ = graph_zone_->New<JSGraph>(isolate_, graph_, common_,
280 javascript_, simplified_, machine_);
281 }
282 }
283
284 // For register allocation testing entry point.
PipelineData(ZoneStats * zone_stats,OptimizedCompilationInfo * info,Isolate * isolate,InstructionSequence * sequence)285 PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
286 Isolate* isolate, InstructionSequence* sequence)
287 : isolate_(isolate),
288 allocator_(isolate->allocator()),
289 info_(info),
290 debug_name_(info_->GetDebugName()),
291 zone_stats_(zone_stats),
292 graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone),
293 instruction_zone_scope_(zone_stats_, kInstructionZoneName),
294 instruction_zone_(sequence->zone()),
295 sequence_(sequence),
296 codegen_zone_scope_(zone_stats_, kCodegenZoneName),
297 codegen_zone_(codegen_zone_scope_.zone()),
298 register_allocation_zone_scope_(zone_stats_,
299 kRegisterAllocationZoneName),
300 register_allocation_zone_(register_allocation_zone_scope_.zone()),
301 assembler_options_(AssemblerOptions::Default(isolate)) {}
302
~PipelineData()303 ~PipelineData() {
304 // Must happen before zones are destroyed.
305 delete code_generator_;
306 code_generator_ = nullptr;
307 DeleteTyper();
308 DeleteRegisterAllocationZone();
309 DeleteInstructionZone();
310 DeleteCodegenZone();
311 DeleteGraphZone();
312 }
313
314 PipelineData(const PipelineData&) = delete;
315 PipelineData& operator=(const PipelineData&) = delete;
316
isolate() const317 Isolate* isolate() const { return isolate_; }
allocator() const318 AccountingAllocator* allocator() const { return allocator_; }
info() const319 OptimizedCompilationInfo* info() const { return info_; }
zone_stats() const320 ZoneStats* zone_stats() const { return zone_stats_; }
dependencies() const321 CompilationDependencies* dependencies() const { return dependencies_; }
pipeline_statistics()322 PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
osr_helper()323 OsrHelper* osr_helper() { return &(*osr_helper_); }
compilation_failed() const324 bool compilation_failed() const { return compilation_failed_; }
set_compilation_failed()325 void set_compilation_failed() { compilation_failed_ = true; }
326
verify_graph() const327 bool verify_graph() const { return verify_graph_; }
set_verify_graph(bool value)328 void set_verify_graph(bool value) { verify_graph_ = value; }
329
code()330 MaybeHandle<Code> code() { return code_; }
set_code(MaybeHandle<Code> code)331 void set_code(MaybeHandle<Code> code) {
332 DCHECK(code_.is_null());
333 code_ = code;
334 }
335
code_generator() const336 CodeGenerator* code_generator() const { return code_generator_; }
337
338 // RawMachineAssembler generally produces graphs which cannot be verified.
MayHaveUnverifiableGraph() const339 bool MayHaveUnverifiableGraph() const { return may_have_unverifiable_graph_; }
340
graph_zone() const341 Zone* graph_zone() const { return graph_zone_; }
graph() const342 Graph* graph() const { return graph_; }
source_positions() const343 SourcePositionTable* source_positions() const { return source_positions_; }
node_origins() const344 NodeOriginTable* node_origins() const { return node_origins_; }
machine() const345 MachineOperatorBuilder* machine() const { return machine_; }
common() const346 CommonOperatorBuilder* common() const { return common_; }
javascript() const347 JSOperatorBuilder* javascript() const { return javascript_; }
jsgraph() const348 JSGraph* jsgraph() const { return jsgraph_; }
mcgraph() const349 MachineGraph* mcgraph() const { return mcgraph_; }
native_context() const350 Handle<NativeContext> native_context() const {
351 return handle(info()->native_context(), isolate());
352 }
global_object() const353 Handle<JSGlobalObject> global_object() const {
354 return handle(info()->global_object(), isolate());
355 }
356
broker() const357 JSHeapBroker* broker() const { return broker_; }
ReleaseBroker()358 std::unique_ptr<JSHeapBroker> ReleaseBroker() {
359 std::unique_ptr<JSHeapBroker> broker(broker_);
360 broker_ = nullptr;
361 return broker;
362 }
363
schedule() const364 Schedule* schedule() const { return schedule_; }
set_schedule(Schedule * schedule)365 void set_schedule(Schedule* schedule) {
366 DCHECK(!schedule_);
367 schedule_ = schedule;
368 }
reset_schedule()369 void reset_schedule() { schedule_ = nullptr; }
370
observe_node_manager() const371 ObserveNodeManager* observe_node_manager() const {
372 return observe_node_manager_;
373 }
374
instruction_zone() const375 Zone* instruction_zone() const { return instruction_zone_; }
codegen_zone() const376 Zone* codegen_zone() const { return codegen_zone_; }
sequence() const377 InstructionSequence* sequence() const { return sequence_; }
frame() const378 Frame* frame() const { return frame_; }
379
register_allocation_zone() const380 Zone* register_allocation_zone() const { return register_allocation_zone_; }
381
register_allocation_data() const382 RegisterAllocationData* register_allocation_data() const {
383 return register_allocation_data_;
384 }
top_tier_register_allocation_data() const385 TopTierRegisterAllocationData* top_tier_register_allocation_data() const {
386 return TopTierRegisterAllocationData::cast(register_allocation_data_);
387 }
mid_tier_register_allocator_data() const388 MidTierRegisterAllocationData* mid_tier_register_allocator_data() const {
389 return MidTierRegisterAllocationData::cast(register_allocation_data_);
390 }
391
source_position_output() const392 std::string const& source_position_output() const {
393 return source_position_output_;
394 }
set_source_position_output(std::string const & source_position_output)395 void set_source_position_output(std::string const& source_position_output) {
396 source_position_output_ = source_position_output;
397 }
398
jump_optimization_info() const399 JumpOptimizationInfo* jump_optimization_info() const {
400 return jump_optimization_info_;
401 }
402
assembler_options() const403 const AssemblerOptions& assembler_options() const {
404 return assembler_options_;
405 }
406
ChooseSpecializationContext()407 void ChooseSpecializationContext() {
408 if (info()->function_context_specializing()) {
409 DCHECK(info()->has_context());
410 specialization_context_ =
411 Just(OuterContext(handle(info()->context(), isolate()), 0));
412 } else {
413 specialization_context_ = GetModuleContext(info()->closure());
414 }
415 }
416
specialization_context() const417 Maybe<OuterContext> specialization_context() const {
418 return specialization_context_;
419 }
420
address_of_max_unoptimized_frame_height()421 size_t* address_of_max_unoptimized_frame_height() {
422 return &max_unoptimized_frame_height_;
423 }
max_unoptimized_frame_height() const424 size_t max_unoptimized_frame_height() const {
425 return max_unoptimized_frame_height_;
426 }
address_of_max_pushed_argument_count()427 size_t* address_of_max_pushed_argument_count() {
428 return &max_pushed_argument_count_;
429 }
max_pushed_argument_count() const430 size_t max_pushed_argument_count() const {
431 return max_pushed_argument_count_;
432 }
433
GetCodeTracer() const434 CodeTracer* GetCodeTracer() const {
435 #if V8_ENABLE_WEBASSEMBLY
436 if (wasm_engine_) return wasm_engine_->GetCodeTracer();
437 #endif // V8_ENABLE_WEBASSEMBLY
438 return isolate_->GetCodeTracer();
439 }
440
CreateTyper()441 Typer* CreateTyper() {
442 DCHECK_NULL(typer_);
443 typer_ =
444 new Typer(broker(), typer_flags_, graph(), &info()->tick_counter());
445 return typer_;
446 }
447
AddTyperFlag(Typer::Flag flag)448 void AddTyperFlag(Typer::Flag flag) {
449 DCHECK_NULL(typer_);
450 typer_flags_ |= flag;
451 }
452
DeleteTyper()453 void DeleteTyper() {
454 delete typer_;
455 typer_ = nullptr;
456 }
457
DeleteGraphZone()458 void DeleteGraphZone() {
459 if (graph_zone_ == nullptr) return;
460 graph_zone_scope_.Destroy();
461 graph_zone_ = nullptr;
462 graph_ = nullptr;
463 source_positions_ = nullptr;
464 node_origins_ = nullptr;
465 simplified_ = nullptr;
466 machine_ = nullptr;
467 common_ = nullptr;
468 javascript_ = nullptr;
469 jsgraph_ = nullptr;
470 mcgraph_ = nullptr;
471 schedule_ = nullptr;
472 }
473
DeleteInstructionZone()474 void DeleteInstructionZone() {
475 if (instruction_zone_ == nullptr) return;
476 instruction_zone_scope_.Destroy();
477 instruction_zone_ = nullptr;
478 sequence_ = nullptr;
479 }
480
DeleteCodegenZone()481 void DeleteCodegenZone() {
482 if (codegen_zone_ == nullptr) return;
483 codegen_zone_scope_.Destroy();
484 codegen_zone_ = nullptr;
485 dependencies_ = nullptr;
486 delete broker_;
487 broker_ = nullptr;
488 frame_ = nullptr;
489 }
490
DeleteRegisterAllocationZone()491 void DeleteRegisterAllocationZone() {
492 if (register_allocation_zone_ == nullptr) return;
493 register_allocation_zone_scope_.Destroy();
494 register_allocation_zone_ = nullptr;
495 register_allocation_data_ = nullptr;
496 }
497
InitializeInstructionSequence(const CallDescriptor * call_descriptor)498 void InitializeInstructionSequence(const CallDescriptor* call_descriptor) {
499 DCHECK_NULL(sequence_);
500 InstructionBlocks* instruction_blocks =
501 InstructionSequence::InstructionBlocksFor(instruction_zone(),
502 schedule());
503 sequence_ = instruction_zone()->New<InstructionSequence>(
504 isolate(), instruction_zone(), instruction_blocks);
505 if (call_descriptor && call_descriptor->RequiresFrameAsIncoming()) {
506 sequence_->instruction_blocks()[0]->mark_needs_frame();
507 } else {
508 DCHECK(call_descriptor->CalleeSavedFPRegisters().is_empty());
509 }
510 }
511
InitializeFrameData(CallDescriptor * call_descriptor)512 void InitializeFrameData(CallDescriptor* call_descriptor) {
513 DCHECK_NULL(frame_);
514 int fixed_frame_size = 0;
515 if (call_descriptor != nullptr) {
516 fixed_frame_size =
517 call_descriptor->CalculateFixedFrameSize(info()->code_kind());
518 }
519 frame_ = codegen_zone()->New<Frame>(fixed_frame_size);
520 if (osr_helper_.has_value()) osr_helper()->SetupFrame(frame());
521 }
522
InitializeTopTierRegisterAllocationData(const RegisterConfiguration * config,CallDescriptor * call_descriptor,RegisterAllocationFlags flags)523 void InitializeTopTierRegisterAllocationData(
524 const RegisterConfiguration* config, CallDescriptor* call_descriptor,
525 RegisterAllocationFlags flags) {
526 DCHECK_NULL(register_allocation_data_);
527 register_allocation_data_ =
528 register_allocation_zone()->New<TopTierRegisterAllocationData>(
529 config, register_allocation_zone(), frame(), sequence(), flags,
530 &info()->tick_counter(), debug_name());
531 }
532
InitializeMidTierRegisterAllocationData(const RegisterConfiguration * config,CallDescriptor * call_descriptor)533 void InitializeMidTierRegisterAllocationData(
534 const RegisterConfiguration* config, CallDescriptor* call_descriptor) {
535 DCHECK_NULL(register_allocation_data_);
536 register_allocation_data_ =
537 register_allocation_zone()->New<MidTierRegisterAllocationData>(
538 config, register_allocation_zone(), frame(), sequence(),
539 &info()->tick_counter(), debug_name());
540 }
541
InitializeOsrHelper()542 void InitializeOsrHelper() {
543 DCHECK(!osr_helper_.has_value());
544 osr_helper_.emplace(info());
545 }
546
set_start_source_position(int position)547 void set_start_source_position(int position) {
548 DCHECK_EQ(start_source_position_, kNoSourcePosition);
549 start_source_position_ = position;
550 }
551
InitializeCodeGenerator(Linkage * linkage)552 void InitializeCodeGenerator(Linkage* linkage) {
553 DCHECK_NULL(code_generator_);
554 code_generator_ = new CodeGenerator(
555 codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
556 osr_helper_, start_source_position_, jump_optimization_info_,
557 assembler_options(), info_->builtin(), max_unoptimized_frame_height(),
558 max_pushed_argument_count(),
559 FLAG_trace_turbo_stack_accesses ? debug_name_.get() : nullptr);
560 }
561
BeginPhaseKind(const char * phase_kind_name)562 void BeginPhaseKind(const char* phase_kind_name) {
563 if (pipeline_statistics() != nullptr) {
564 pipeline_statistics()->BeginPhaseKind(phase_kind_name);
565 }
566 }
567
EndPhaseKind()568 void EndPhaseKind() {
569 if (pipeline_statistics() != nullptr) {
570 pipeline_statistics()->EndPhaseKind();
571 }
572 }
573
debug_name() const574 const char* debug_name() const { return debug_name_.get(); }
575
profile_data() const576 const ProfileDataFromFile* profile_data() const { return profile_data_; }
set_profile_data(const ProfileDataFromFile * profile_data)577 void set_profile_data(const ProfileDataFromFile* profile_data) {
578 profile_data_ = profile_data;
579 }
580
581 // RuntimeCallStats that is only available during job execution but not
582 // finalization.
583 // TODO(delphick): Currently even during execution this can be nullptr, due to
584 // JSToWasmWrapperCompilationUnit::Execute. Once a table can be extracted
585 // there, this method can DCHECK that it is never nullptr.
runtime_call_stats() const586 RuntimeCallStats* runtime_call_stats() const { return runtime_call_stats_; }
set_runtime_call_stats(RuntimeCallStats * stats)587 void set_runtime_call_stats(RuntimeCallStats* stats) {
588 runtime_call_stats_ = stats;
589 }
590
591 // Used to skip the "wasm-inlining" phase when there are no JS-to-Wasm calls.
has_js_wasm_calls() const592 bool has_js_wasm_calls() const { return has_js_wasm_calls_; }
set_has_js_wasm_calls(bool has_js_wasm_calls)593 void set_has_js_wasm_calls(bool has_js_wasm_calls) {
594 has_js_wasm_calls_ = has_js_wasm_calls;
595 }
596
597 private:
598 Isolate* const isolate_;
599 #if V8_ENABLE_WEBASSEMBLY
600 wasm::WasmEngine* const wasm_engine_ = nullptr;
601 #endif // V8_ENABLE_WEBASSEMBLY
602 AccountingAllocator* const allocator_;
603 OptimizedCompilationInfo* const info_;
604 std::unique_ptr<char[]> debug_name_;
605 bool may_have_unverifiable_graph_ = true;
606 ZoneStats* const zone_stats_;
607 PipelineStatistics* pipeline_statistics_ = nullptr;
608 bool compilation_failed_ = false;
609 bool verify_graph_ = false;
610 int start_source_position_ = kNoSourcePosition;
611 base::Optional<OsrHelper> osr_helper_;
612 MaybeHandle<Code> code_;
613 CodeGenerator* code_generator_ = nullptr;
614 Typer* typer_ = nullptr;
615 Typer::Flags typer_flags_ = Typer::kNoFlags;
616
617 // All objects in the following group of fields are allocated in graph_zone_.
618 // They are all set to nullptr when the graph_zone_ is destroyed.
619 ZoneStats::Scope graph_zone_scope_;
620 Zone* graph_zone_ = nullptr;
621 Graph* graph_ = nullptr;
622 SourcePositionTable* source_positions_ = nullptr;
623 NodeOriginTable* node_origins_ = nullptr;
624 SimplifiedOperatorBuilder* simplified_ = nullptr;
625 MachineOperatorBuilder* machine_ = nullptr;
626 CommonOperatorBuilder* common_ = nullptr;
627 JSOperatorBuilder* javascript_ = nullptr;
628 JSGraph* jsgraph_ = nullptr;
629 MachineGraph* mcgraph_ = nullptr;
630 Schedule* schedule_ = nullptr;
631 ObserveNodeManager* observe_node_manager_ = nullptr;
632
633 // All objects in the following group of fields are allocated in
634 // instruction_zone_. They are all set to nullptr when the instruction_zone_
635 // is destroyed.
636 ZoneStats::Scope instruction_zone_scope_;
637 Zone* instruction_zone_;
638 InstructionSequence* sequence_ = nullptr;
639
640 // All objects in the following group of fields are allocated in
641 // codegen_zone_. They are all set to nullptr when the codegen_zone_
642 // is destroyed.
643 ZoneStats::Scope codegen_zone_scope_;
644 Zone* codegen_zone_;
645 CompilationDependencies* dependencies_ = nullptr;
646 JSHeapBroker* broker_ = nullptr;
647 Frame* frame_ = nullptr;
648
649 // All objects in the following group of fields are allocated in
650 // register_allocation_zone_. They are all set to nullptr when the zone is
651 // destroyed.
652 ZoneStats::Scope register_allocation_zone_scope_;
653 Zone* register_allocation_zone_;
654 RegisterAllocationData* register_allocation_data_ = nullptr;
655
656 // Source position output for --trace-turbo.
657 std::string source_position_output_;
658
659 JumpOptimizationInfo* jump_optimization_info_ = nullptr;
660 AssemblerOptions assembler_options_;
661 Maybe<OuterContext> specialization_context_ = Nothing<OuterContext>();
662
663 // The maximal combined height of all inlined frames in their unoptimized
664 // state, and the maximal number of arguments pushed during function calls.
665 // Calculated during instruction selection, applied during code generation.
666 size_t max_unoptimized_frame_height_ = 0;
667 size_t max_pushed_argument_count_ = 0;
668
669 RuntimeCallStats* runtime_call_stats_ = nullptr;
670 const ProfileDataFromFile* profile_data_ = nullptr;
671
672 bool has_js_wasm_calls_ = false;
673 };
674
675 class PipelineImpl final {
676 public:
PipelineImpl(PipelineData * data)677 explicit PipelineImpl(PipelineData* data) : data_(data) {}
678
679 // Helpers for executing pipeline phases.
680 template <typename Phase, typename... Args>
681 void Run(Args&&... args);
682
683 // Step A.1. Initialize the heap broker.
684 void InitializeHeapBroker();
685
686 // Step A.2. Run the graph creation and initial optimization passes.
687 bool CreateGraph();
688
689 // Step B. Run the concurrent optimization passes.
690 bool OptimizeGraph(Linkage* linkage);
691
692 // Substep B.1. Produce a scheduled graph.
693 void ComputeScheduledGraph();
694
695 // Substep B.2. Select instructions from a scheduled graph.
696 bool SelectInstructions(Linkage* linkage);
697
698 // Step C. Run the code assembly pass.
699 void AssembleCode(Linkage* linkage);
700
701 // Step D. Run the code finalization pass.
702 MaybeHandle<Code> FinalizeCode(bool retire_broker = true);
703
704 // Step E. Install any code dependencies.
705 bool CommitDependencies(Handle<Code> code);
706
707 void VerifyGeneratedCodeIsIdempotent();
708 void RunPrintAndVerify(const char* phase, bool untyped = false);
709 bool SelectInstructionsAndAssemble(CallDescriptor* call_descriptor);
710 MaybeHandle<Code> GenerateCode(CallDescriptor* call_descriptor);
711 void AllocateRegistersForTopTier(const RegisterConfiguration* config,
712 CallDescriptor* call_descriptor,
713 bool run_verifier);
714 void AllocateRegistersForMidTier(const RegisterConfiguration* config,
715 CallDescriptor* call_descriptor,
716 bool run_verifier);
717
718 OptimizedCompilationInfo* info() const;
719 Isolate* isolate() const;
720 CodeGenerator* code_generator() const;
721
722 ObserveNodeManager* observe_node_manager() const;
723
724 private:
725 PipelineData* const data_;
726 };
727
728 namespace {
729
730 class SourcePositionWrapper final : public Reducer {
731 public:
SourcePositionWrapper(Reducer * reducer,SourcePositionTable * table)732 SourcePositionWrapper(Reducer* reducer, SourcePositionTable* table)
733 : reducer_(reducer), table_(table) {}
734 ~SourcePositionWrapper() final = default;
735 SourcePositionWrapper(const SourcePositionWrapper&) = delete;
736 SourcePositionWrapper& operator=(const SourcePositionWrapper&) = delete;
737
reducer_name() const738 const char* reducer_name() const override { return reducer_->reducer_name(); }
739
Reduce(Node * node)740 Reduction Reduce(Node* node) final {
741 SourcePosition const pos = table_->GetSourcePosition(node);
742 SourcePositionTable::Scope position(table_, pos);
743 return reducer_->Reduce(node, nullptr);
744 }
745
Finalize()746 void Finalize() final { reducer_->Finalize(); }
747
748 private:
749 Reducer* const reducer_;
750 SourcePositionTable* const table_;
751 };
752
753 class NodeOriginsWrapper final : public Reducer {
754 public:
NodeOriginsWrapper(Reducer * reducer,NodeOriginTable * table)755 NodeOriginsWrapper(Reducer* reducer, NodeOriginTable* table)
756 : reducer_(reducer), table_(table) {}
757 ~NodeOriginsWrapper() final = default;
758 NodeOriginsWrapper(const NodeOriginsWrapper&) = delete;
759 NodeOriginsWrapper& operator=(const NodeOriginsWrapper&) = delete;
760
reducer_name() const761 const char* reducer_name() const override { return reducer_->reducer_name(); }
762
Reduce(Node * node)763 Reduction Reduce(Node* node) final {
764 NodeOriginTable::Scope position(table_, reducer_name(), node);
765 return reducer_->Reduce(node, nullptr);
766 }
767
Finalize()768 void Finalize() final { reducer_->Finalize(); }
769
770 private:
771 Reducer* const reducer_;
772 NodeOriginTable* const table_;
773 };
774
775 class V8_NODISCARD PipelineRunScope {
776 public:
777 #ifdef V8_RUNTIME_CALL_STATS
PipelineRunScope(PipelineData * data,const char * phase_name,RuntimeCallCounterId runtime_call_counter_id,RuntimeCallStats::CounterMode counter_mode=RuntimeCallStats::kExact)778 PipelineRunScope(
779 PipelineData* data, const char* phase_name,
780 RuntimeCallCounterId runtime_call_counter_id,
781 RuntimeCallStats::CounterMode counter_mode = RuntimeCallStats::kExact)
782 : phase_scope_(data->pipeline_statistics(), phase_name),
783 zone_scope_(data->zone_stats(), phase_name),
784 origin_scope_(data->node_origins(), phase_name),
785 runtime_call_timer_scope(data->runtime_call_stats(),
786 runtime_call_counter_id, counter_mode) {
787 DCHECK_NOT_NULL(phase_name);
788 }
789 #else // V8_RUNTIME_CALL_STATS
790 PipelineRunScope(PipelineData* data, const char* phase_name)
791 : phase_scope_(data->pipeline_statistics(), phase_name),
792 zone_scope_(data->zone_stats(), phase_name),
793 origin_scope_(data->node_origins(), phase_name) {
794 DCHECK_NOT_NULL(phase_name);
795 }
796 #endif // V8_RUNTIME_CALL_STATS
797
zone()798 Zone* zone() { return zone_scope_.zone(); }
799
800 private:
801 PhaseScope phase_scope_;
802 ZoneStats::Scope zone_scope_;
803 NodeOriginTable::PhaseScope origin_scope_;
804 #ifdef V8_RUNTIME_CALL_STATS
805 RuntimeCallTimerScope runtime_call_timer_scope;
806 #endif // V8_RUNTIME_CALL_STATS
807 };
808
809 // LocalIsolateScope encapsulates the phase where persistent handles are
810 // attached to the LocalHeap inside {local_isolate}.
811 class V8_NODISCARD LocalIsolateScope {
812 public:
LocalIsolateScope(JSHeapBroker * broker,OptimizedCompilationInfo * info,LocalIsolate * local_isolate)813 explicit LocalIsolateScope(JSHeapBroker* broker,
814 OptimizedCompilationInfo* info,
815 LocalIsolate* local_isolate)
816 : broker_(broker), info_(info) {
817 broker_->AttachLocalIsolate(info_, local_isolate);
818 info_->tick_counter().AttachLocalHeap(local_isolate->heap());
819 }
820
~LocalIsolateScope()821 ~LocalIsolateScope() {
822 info_->tick_counter().DetachLocalHeap();
823 broker_->DetachLocalIsolate(info_);
824 }
825
826 private:
827 JSHeapBroker* broker_;
828 OptimizedCompilationInfo* info_;
829 };
830
PrintFunctionSource(OptimizedCompilationInfo * info,Isolate * isolate,int source_id,Handle<SharedFunctionInfo> shared)831 void PrintFunctionSource(OptimizedCompilationInfo* info, Isolate* isolate,
832 int source_id, Handle<SharedFunctionInfo> shared) {
833 if (!shared->script().IsUndefined(isolate)) {
834 Handle<Script> script(Script::cast(shared->script()), isolate);
835
836 if (!script->source().IsUndefined(isolate)) {
837 CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
838 Object source_name = script->name();
839 auto& os = tracing_scope.stream();
840 os << "--- FUNCTION SOURCE (";
841 if (source_name.IsString()) {
842 os << String::cast(source_name).ToCString().get() << ":";
843 }
844 os << shared->DebugNameCStr().get() << ") id{";
845 os << info->optimization_id() << "," << source_id << "} start{";
846 os << shared->StartPosition() << "} ---\n";
847 {
848 DisallowGarbageCollection no_gc;
849 int start = shared->StartPosition();
850 int len = shared->EndPosition() - start;
851 SubStringRange source(String::cast(script->source()), no_gc, start,
852 len);
853 for (auto c : source) {
854 os << AsReversiblyEscapedUC16(c);
855 }
856 }
857
858 os << "\n--- END ---\n";
859 }
860 }
861 }
862
863 // Print information for the given inlining: which function was inlined and
864 // where the inlining occurred.
PrintInlinedFunctionInfo(OptimizedCompilationInfo * info,Isolate * isolate,int source_id,int inlining_id,const OptimizedCompilationInfo::InlinedFunctionHolder & h)865 void PrintInlinedFunctionInfo(
866 OptimizedCompilationInfo* info, Isolate* isolate, int source_id,
867 int inlining_id, const OptimizedCompilationInfo::InlinedFunctionHolder& h) {
868 CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
869 auto& os = tracing_scope.stream();
870 os << "INLINE (" << h.shared_info->DebugNameCStr().get() << ") id{"
871 << info->optimization_id() << "," << source_id << "} AS " << inlining_id
872 << " AT ";
873 const SourcePosition position = h.position.position;
874 if (position.IsKnown()) {
875 os << "<" << position.InliningId() << ":" << position.ScriptOffset() << ">";
876 } else {
877 os << "<?>";
878 }
879 os << std::endl;
880 }
881
882 // Print the source of all functions that participated in this optimizing
883 // compilation. For inlined functions print source position of their inlining.
PrintParticipatingSource(OptimizedCompilationInfo * info,Isolate * isolate)884 void PrintParticipatingSource(OptimizedCompilationInfo* info,
885 Isolate* isolate) {
886 SourceIdAssigner id_assigner(info->inlined_functions().size());
887 PrintFunctionSource(info, isolate, -1, info->shared_info());
888 const auto& inlined = info->inlined_functions();
889 for (unsigned id = 0; id < inlined.size(); id++) {
890 const int source_id = id_assigner.GetIdFor(inlined[id].shared_info);
891 PrintFunctionSource(info, isolate, source_id, inlined[id].shared_info);
892 PrintInlinedFunctionInfo(info, isolate, source_id, id, inlined[id]);
893 }
894 }
895
896 // Print the code after compiling it.
PrintCode(Isolate * isolate,Handle<Code> code,OptimizedCompilationInfo * info)897 void PrintCode(Isolate* isolate, Handle<Code> code,
898 OptimizedCompilationInfo* info) {
899 if (FLAG_print_opt_source && info->IsOptimizing()) {
900 PrintParticipatingSource(info, isolate);
901 }
902
903 #ifdef ENABLE_DISASSEMBLER
904 const bool print_code =
905 FLAG_print_code ||
906 (info->IsOptimizing() && FLAG_print_opt_code &&
907 info->shared_info()->PassesFilter(FLAG_print_opt_code_filter));
908 if (print_code) {
909 std::unique_ptr<char[]> debug_name = info->GetDebugName();
910 CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
911 auto& os = tracing_scope.stream();
912
913 // Print the source code if available.
914 const bool print_source = info->IsOptimizing();
915 if (print_source) {
916 Handle<SharedFunctionInfo> shared = info->shared_info();
917 if (shared->script().IsScript() &&
918 !Script::cast(shared->script()).source().IsUndefined(isolate)) {
919 os << "--- Raw source ---\n";
920 StringCharacterStream stream(
921 String::cast(Script::cast(shared->script()).source()),
922 shared->StartPosition());
923 // fun->end_position() points to the last character in the stream. We
924 // need to compensate by adding one to calculate the length.
925 int source_len = shared->EndPosition() - shared->StartPosition() + 1;
926 for (int i = 0; i < source_len; i++) {
927 if (stream.HasMore()) {
928 os << AsReversiblyEscapedUC16(stream.GetNext());
929 }
930 }
931 os << "\n\n";
932 }
933 }
934 if (info->IsOptimizing()) {
935 os << "--- Optimized code ---\n"
936 << "optimization_id = " << info->optimization_id() << "\n";
937 } else {
938 os << "--- Code ---\n";
939 }
940 if (print_source) {
941 Handle<SharedFunctionInfo> shared = info->shared_info();
942 os << "source_position = " << shared->StartPosition() << "\n";
943 }
944 code->Disassemble(debug_name.get(), os, isolate);
945 os << "--- End code ---\n";
946 }
947 #endif // ENABLE_DISASSEMBLER
948 }
949
TraceScheduleAndVerify(OptimizedCompilationInfo * info,PipelineData * data,Schedule * schedule,const char * phase_name)950 void TraceScheduleAndVerify(OptimizedCompilationInfo* info, PipelineData* data,
951 Schedule* schedule, const char* phase_name) {
952 RCS_SCOPE(data->runtime_call_stats(),
953 RuntimeCallCounterId::kOptimizeTraceScheduleAndVerify,
954 RuntimeCallStats::kThreadSpecific);
955 TRACE_EVENT0(PipelineStatistics::kTraceCategory, "V8.TraceScheduleAndVerify");
956 if (info->trace_turbo_json()) {
957 UnparkedScopeIfNeeded scope(data->broker());
958 AllowHandleDereference allow_deref;
959 TurboJsonFile json_of(info, std::ios_base::app);
960 json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"schedule\""
961 << ",\"data\":\"";
962 std::stringstream schedule_stream;
963 schedule_stream << *schedule;
964 std::string schedule_string(schedule_stream.str());
965 for (const auto& c : schedule_string) {
966 json_of << AsEscapedUC16ForJSON(c);
967 }
968 json_of << "\"},\n";
969 }
970 if (info->trace_turbo_graph() || FLAG_trace_turbo_scheduler) {
971 UnparkedScopeIfNeeded scope(data->broker());
972 AllowHandleDereference allow_deref;
973 CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
974 tracing_scope.stream()
975 << "-- Schedule --------------------------------------\n"
976 << *schedule;
977 }
978
979 if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
980 }
981
AddReducer(PipelineData * data,GraphReducer * graph_reducer,Reducer * reducer)982 void AddReducer(PipelineData* data, GraphReducer* graph_reducer,
983 Reducer* reducer) {
984 if (data->info()->source_positions()) {
985 SourcePositionWrapper* const wrapper =
986 data->graph_zone()->New<SourcePositionWrapper>(
987 reducer, data->source_positions());
988 reducer = wrapper;
989 }
990 if (data->info()->trace_turbo_json()) {
991 NodeOriginsWrapper* const wrapper =
992 data->graph_zone()->New<NodeOriginsWrapper>(reducer,
993 data->node_origins());
994 reducer = wrapper;
995 }
996
997 graph_reducer->AddReducer(reducer);
998 }
999
CreatePipelineStatistics(Handle<Script> script,OptimizedCompilationInfo * info,Isolate * isolate,ZoneStats * zone_stats)1000 PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
1001 OptimizedCompilationInfo* info,
1002 Isolate* isolate,
1003 ZoneStats* zone_stats) {
1004 PipelineStatistics* pipeline_statistics = nullptr;
1005
1006 bool tracing_enabled;
1007 TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("v8.turbofan"),
1008 &tracing_enabled);
1009 if (tracing_enabled || FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
1010 pipeline_statistics =
1011 new PipelineStatistics(info, isolate->GetTurboStatistics(), zone_stats);
1012 pipeline_statistics->BeginPhaseKind("V8.TFInitializing");
1013 }
1014
1015 if (info->trace_turbo_json()) {
1016 TurboJsonFile json_of(info, std::ios_base::trunc);
1017 json_of << "{\"function\" : ";
1018 JsonPrintFunctionSource(json_of, -1, info->GetDebugName(), script, isolate,
1019 info->shared_info());
1020 json_of << ",\n\"phases\":[";
1021 }
1022
1023 return pipeline_statistics;
1024 }
1025
1026 #if V8_ENABLE_WEBASSEMBLY
CreatePipelineStatistics(wasm::FunctionBody function_body,const wasm::WasmModule * wasm_module,OptimizedCompilationInfo * info,ZoneStats * zone_stats)1027 PipelineStatistics* CreatePipelineStatistics(
1028 wasm::FunctionBody function_body, const wasm::WasmModule* wasm_module,
1029 OptimizedCompilationInfo* info, ZoneStats* zone_stats) {
1030 PipelineStatistics* pipeline_statistics = nullptr;
1031
1032 bool tracing_enabled;
1033 TRACE_EVENT_CATEGORY_GROUP_ENABLED(
1034 TRACE_DISABLED_BY_DEFAULT("v8.wasm.turbofan"), &tracing_enabled);
1035 if (tracing_enabled || FLAG_turbo_stats_wasm) {
1036 pipeline_statistics = new PipelineStatistics(
1037 info, wasm::GetWasmEngine()->GetOrCreateTurboStatistics(), zone_stats);
1038 pipeline_statistics->BeginPhaseKind("V8.WasmInitializing");
1039 }
1040
1041 if (info->trace_turbo_json()) {
1042 TurboJsonFile json_of(info, std::ios_base::trunc);
1043 std::unique_ptr<char[]> function_name = info->GetDebugName();
1044 json_of << "{\"function\":\"" << function_name.get() << "\", \"source\":\"";
1045 AccountingAllocator allocator;
1046 std::ostringstream disassembly;
1047 std::vector<int> source_positions;
1048 wasm::PrintRawWasmCode(&allocator, function_body, wasm_module,
1049 wasm::kPrintLocals, disassembly, &source_positions);
1050 for (const auto& c : disassembly.str()) {
1051 json_of << AsEscapedUC16ForJSON(c);
1052 }
1053 json_of << "\",\n\"sourceLineToBytecodePosition\" : [";
1054 bool insert_comma = false;
1055 for (auto val : source_positions) {
1056 if (insert_comma) {
1057 json_of << ", ";
1058 }
1059 json_of << val;
1060 insert_comma = true;
1061 }
1062 json_of << "],\n\"phases\":[";
1063 }
1064
1065 return pipeline_statistics;
1066 }
1067 #endif // V8_ENABLE_WEBASSEMBLY
1068
1069 } // namespace
1070
1071 class PipelineCompilationJob final : public TurbofanCompilationJob {
1072 public:
1073 PipelineCompilationJob(Isolate* isolate,
1074 Handle<SharedFunctionInfo> shared_info,
1075 Handle<JSFunction> function, BytecodeOffset osr_offset,
1076 JavaScriptFrame* osr_frame, CodeKind code_kind);
1077 ~PipelineCompilationJob() final;
1078 PipelineCompilationJob(const PipelineCompilationJob&) = delete;
1079 PipelineCompilationJob& operator=(const PipelineCompilationJob&) = delete;
1080
1081 protected:
1082 Status PrepareJobImpl(Isolate* isolate) final;
1083 Status ExecuteJobImpl(RuntimeCallStats* stats,
1084 LocalIsolate* local_isolate) final;
1085 Status FinalizeJobImpl(Isolate* isolate) final;
1086
1087 // Registers weak object to optimized code dependencies.
1088 void RegisterWeakObjectsInOptimizedCode(Isolate* isolate,
1089 Handle<NativeContext> context,
1090 Handle<Code> code);
1091
1092 private:
1093 Zone zone_;
1094 ZoneStats zone_stats_;
1095 OptimizedCompilationInfo compilation_info_;
1096 std::unique_ptr<PipelineStatistics> pipeline_statistics_;
1097 PipelineData data_;
1098 PipelineImpl pipeline_;
1099 Linkage* linkage_;
1100 };
1101
PipelineCompilationJob(Isolate * isolate,Handle<SharedFunctionInfo> shared_info,Handle<JSFunction> function,BytecodeOffset osr_offset,JavaScriptFrame * osr_frame,CodeKind code_kind)1102 PipelineCompilationJob::PipelineCompilationJob(
1103 Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
1104 Handle<JSFunction> function, BytecodeOffset osr_offset,
1105 JavaScriptFrame* osr_frame, CodeKind code_kind)
1106 // Note that the OptimizedCompilationInfo is not initialized at the time
1107 // we pass it to the CompilationJob constructor, but it is not
1108 // dereferenced there.
1109 : TurbofanCompilationJob(&compilation_info_,
1110 CompilationJob::State::kReadyToPrepare),
1111 zone_(isolate->allocator(), kPipelineCompilationJobZoneName),
1112 zone_stats_(isolate->allocator()),
1113 compilation_info_(&zone_, isolate, shared_info, function, code_kind,
1114 osr_offset, osr_frame),
1115 pipeline_statistics_(CreatePipelineStatistics(
1116 handle(Script::cast(shared_info->script()), isolate),
1117 compilation_info(), isolate, &zone_stats_)),
1118 data_(&zone_stats_, isolate, compilation_info(),
1119 pipeline_statistics_.get()),
1120 pipeline_(&data_),
1121 linkage_(nullptr) {}
1122
1123 PipelineCompilationJob::~PipelineCompilationJob() = default;
1124
1125 namespace {
1126 // Ensure that the RuntimeStats table is set on the PipelineData for
1127 // duration of the job phase and unset immediately afterwards. Each job
1128 // needs to set the correct RuntimeCallStats table depending on whether it
1129 // is running on a background or foreground thread.
1130 class V8_NODISCARD PipelineJobScope {
1131 public:
PipelineJobScope(PipelineData * data,RuntimeCallStats * stats)1132 PipelineJobScope(PipelineData* data, RuntimeCallStats* stats) : data_(data) {
1133 data_->set_runtime_call_stats(stats);
1134 }
1135
~PipelineJobScope()1136 ~PipelineJobScope() { data_->set_runtime_call_stats(nullptr); }
1137
1138 private:
1139 HighAllocationThroughputScope high_throughput_scope_{
1140 V8::GetCurrentPlatform()};
1141 PipelineData* data_;
1142 };
1143 } // namespace
1144
PrepareJobImpl(Isolate * isolate)1145 PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
1146 Isolate* isolate) {
1147 // Ensure that the RuntimeCallStats table of main thread is available for
1148 // phases happening during PrepareJob.
1149 PipelineJobScope scope(&data_, isolate->counters()->runtime_call_stats());
1150
1151 if (compilation_info()->bytecode_array()->length() >
1152 FLAG_max_optimized_bytecode_size) {
1153 return AbortOptimization(BailoutReason::kFunctionTooBig);
1154 }
1155
1156 if (!FLAG_always_opt) {
1157 compilation_info()->set_bailout_on_uninitialized();
1158 }
1159 if (FLAG_turbo_loop_peeling) {
1160 compilation_info()->set_loop_peeling();
1161 }
1162 if (FLAG_turbo_inlining) {
1163 compilation_info()->set_inlining();
1164 }
1165 if (FLAG_turbo_allocation_folding) {
1166 compilation_info()->set_allocation_folding();
1167 }
1168
1169 // Determine whether to specialize the code for the function's context.
1170 // We can't do this in the case of OSR, because we want to cache the
1171 // generated code on the native context keyed on SharedFunctionInfo.
1172 // TODO(mythria): Check if it is better to key the OSR cache on JSFunction and
1173 // allow context specialization for OSR code.
1174 if (compilation_info()->closure()->raw_feedback_cell().map() ==
1175 ReadOnlyRoots(isolate).one_closure_cell_map() &&
1176 !compilation_info()->is_osr()) {
1177 compilation_info()->set_function_context_specializing();
1178 data_.ChooseSpecializationContext();
1179 }
1180
1181 if (compilation_info()->source_positions()) {
1182 SharedFunctionInfo::EnsureSourcePositionsAvailable(
1183 isolate, compilation_info()->shared_info());
1184 }
1185
1186 data_.set_start_source_position(
1187 compilation_info()->shared_info()->StartPosition());
1188
1189 linkage_ = compilation_info()->zone()->New<Linkage>(
1190 Linkage::ComputeIncoming(compilation_info()->zone(), compilation_info()));
1191
1192 if (compilation_info()->is_osr()) data_.InitializeOsrHelper();
1193
1194 // InitializeHeapBroker() and CreateGraph() may already use
1195 // IsPendingAllocation.
1196 isolate->heap()->PublishPendingAllocations();
1197
1198 pipeline_.InitializeHeapBroker();
1199
1200 // Serialization may have allocated.
1201 isolate->heap()->PublishPendingAllocations();
1202
1203 return SUCCEEDED;
1204 }
1205
ExecuteJobImpl(RuntimeCallStats * stats,LocalIsolate * local_isolate)1206 PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl(
1207 RuntimeCallStats* stats, LocalIsolate* local_isolate) {
1208 // Ensure that the RuntimeCallStats table is only available during execution
1209 // and not during finalization as that might be on a different thread.
1210 PipelineJobScope scope(&data_, stats);
1211 LocalIsolateScope local_isolate_scope(data_.broker(), data_.info(),
1212 local_isolate);
1213
1214 if (!pipeline_.CreateGraph()) {
1215 return AbortOptimization(BailoutReason::kGraphBuildingFailed);
1216 }
1217
1218 // We selectively Unpark inside OptimizeGraph.
1219 if (!pipeline_.OptimizeGraph(linkage_)) return FAILED;
1220
1221 pipeline_.AssembleCode(linkage_);
1222
1223 return SUCCEEDED;
1224 }
1225
FinalizeJobImpl(Isolate * isolate)1226 PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
1227 Isolate* isolate) {
1228 // Ensure that the RuntimeCallStats table of main thread is available for
1229 // phases happening during PrepareJob.
1230 PipelineJobScope scope(&data_, isolate->counters()->runtime_call_stats());
1231 RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeFinalizePipelineJob);
1232 MaybeHandle<Code> maybe_code = pipeline_.FinalizeCode();
1233 Handle<Code> code;
1234 if (!maybe_code.ToHandle(&code)) {
1235 if (compilation_info()->bailout_reason() == BailoutReason::kNoReason) {
1236 return AbortOptimization(BailoutReason::kCodeGenerationFailed);
1237 }
1238 return FAILED;
1239 }
1240 if (!pipeline_.CommitDependencies(code)) {
1241 return RetryOptimization(BailoutReason::kBailedOutDueToDependencyChange);
1242 }
1243
1244 compilation_info()->SetCode(code);
1245 Handle<NativeContext> context(compilation_info()->native_context(), isolate);
1246 if (CodeKindCanDeoptimize(code->kind())) {
1247 context->AddOptimizedCode(ToCodeT(*code));
1248 }
1249 RegisterWeakObjectsInOptimizedCode(isolate, context, code);
1250 return SUCCEEDED;
1251 }
1252
RegisterWeakObjectsInOptimizedCode(Isolate * isolate,Handle<NativeContext> context,Handle<Code> code)1253 void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
1254 Isolate* isolate, Handle<NativeContext> context, Handle<Code> code) {
1255 std::vector<Handle<Map>> maps;
1256 DCHECK(code->is_optimized_code());
1257 {
1258 DisallowGarbageCollection no_gc;
1259 PtrComprCageBase cage_base(isolate);
1260 int const mode_mask = RelocInfo::EmbeddedObjectModeMask();
1261 for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
1262 DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode()));
1263 HeapObject target_object = it.rinfo()->target_object(cage_base);
1264 if (code->IsWeakObjectInOptimizedCode(target_object)) {
1265 if (target_object.IsMap(cage_base)) {
1266 maps.push_back(handle(Map::cast(target_object), isolate));
1267 }
1268 }
1269 }
1270 }
1271 for (Handle<Map> map : maps) {
1272 isolate->heap()->AddRetainedMap(context, map);
1273 }
1274 code->set_can_have_weak_objects(true);
1275 }
1276
1277 template <typename Phase, typename... Args>
Run(Args &&...args)1278 void PipelineImpl::Run(Args&&... args) {
1279 #ifdef V8_RUNTIME_CALL_STATS
1280 PipelineRunScope scope(this->data_, Phase::phase_name(),
1281 Phase::kRuntimeCallCounterId, Phase::kCounterMode);
1282 #else
1283 PipelineRunScope scope(this->data_, Phase::phase_name());
1284 #endif
1285 Phase phase;
1286 phase.Run(this->data_, scope.zone(), std::forward<Args>(args)...);
1287 }
1288
1289 #ifdef V8_RUNTIME_CALL_STATS
1290 #define DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, Mode) \
1291 static const char* phase_name() { return "V8.TF" #Name; } \
1292 static constexpr RuntimeCallCounterId kRuntimeCallCounterId = \
1293 RuntimeCallCounterId::kOptimize##Name; \
1294 static constexpr RuntimeCallStats::CounterMode kCounterMode = Mode;
1295 #else // V8_RUNTIME_CALL_STATS
1296 #define DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, Mode) \
1297 static const char* phase_name() { return "V8.TF" #Name; }
1298 #endif // V8_RUNTIME_CALL_STATS
1299
1300 #define DECL_PIPELINE_PHASE_CONSTANTS(Name) \
1301 DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, RuntimeCallStats::kThreadSpecific)
1302
1303 #define DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(Name) \
1304 DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, RuntimeCallStats::kExact)
1305
1306 struct GraphBuilderPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::GraphBuilderPhase1307 DECL_PIPELINE_PHASE_CONSTANTS(BytecodeGraphBuilder)
1308
1309 void Run(PipelineData* data, Zone* temp_zone) {
1310 BytecodeGraphBuilderFlags flags;
1311 if (data->info()->analyze_environment_liveness()) {
1312 flags |= BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness;
1313 }
1314 if (data->info()->bailout_on_uninitialized()) {
1315 flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized;
1316 }
1317
1318 JSFunctionRef closure = MakeRef(data->broker(), data->info()->closure());
1319 CallFrequency frequency(1.0f);
1320 BuildGraphFromBytecode(
1321 data->broker(), temp_zone, closure.shared(),
1322 closure.raw_feedback_cell(data->dependencies()),
1323 data->info()->osr_offset(), data->jsgraph(), frequency,
1324 data->source_positions(), SourcePosition::kNotInlined,
1325 data->info()->code_kind(), flags, &data->info()->tick_counter(),
1326 ObserveNodeInfo{data->observe_node_manager(),
1327 data->info()->node_observer()});
1328 }
1329 };
1330
1331 struct InliningPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::InliningPhase1332 DECL_PIPELINE_PHASE_CONSTANTS(Inlining)
1333
1334 void Run(PipelineData* data, Zone* temp_zone) {
1335 OptimizedCompilationInfo* info = data->info();
1336 GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
1337 data->broker(), data->jsgraph()->Dead(),
1338 data->observe_node_manager());
1339 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1340 data->common(), temp_zone);
1341 CheckpointElimination checkpoint_elimination(&graph_reducer);
1342 CommonOperatorReducer common_reducer(
1343 &graph_reducer, data->graph(), data->broker(), data->common(),
1344 data->machine(), temp_zone, BranchSemantics::kJS);
1345 JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags;
1346 if (data->info()->bailout_on_uninitialized()) {
1347 call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
1348 }
1349 if (data->info()->inline_js_wasm_calls() && data->info()->inlining()) {
1350 call_reducer_flags |= JSCallReducer::kInlineJSToWasmCalls;
1351 }
1352 JSCallReducer call_reducer(&graph_reducer, data->jsgraph(), data->broker(),
1353 temp_zone, call_reducer_flags);
1354 JSContextSpecialization context_specialization(
1355 &graph_reducer, data->jsgraph(), data->broker(),
1356 data->specialization_context(),
1357 data->info()->function_context_specializing()
1358 ? data->info()->closure()
1359 : MaybeHandle<JSFunction>());
1360 JSNativeContextSpecialization::Flags flags =
1361 JSNativeContextSpecialization::kNoFlags;
1362 if (data->info()->bailout_on_uninitialized()) {
1363 flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
1364 }
1365 // Passing the OptimizedCompilationInfo's shared zone here as
1366 // JSNativeContextSpecialization allocates out-of-heap objects
1367 // that need to live until code generation.
1368 JSNativeContextSpecialization native_context_specialization(
1369 &graph_reducer, data->jsgraph(), data->broker(), flags,
1370 data->dependencies(), temp_zone, info->zone());
1371 JSInliningHeuristic inlining(
1372 &graph_reducer, temp_zone, data->info(), data->jsgraph(),
1373 data->broker(), data->source_positions(), JSInliningHeuristic::kJSOnly);
1374
1375 JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph(),
1376 data->broker());
1377 AddReducer(data, &graph_reducer, &dead_code_elimination);
1378 AddReducer(data, &graph_reducer, &checkpoint_elimination);
1379 AddReducer(data, &graph_reducer, &common_reducer);
1380 AddReducer(data, &graph_reducer, &native_context_specialization);
1381 AddReducer(data, &graph_reducer, &context_specialization);
1382 AddReducer(data, &graph_reducer, &intrinsic_lowering);
1383 AddReducer(data, &graph_reducer, &call_reducer);
1384 if (data->info()->inlining()) {
1385 AddReducer(data, &graph_reducer, &inlining);
1386 }
1387 graph_reducer.ReduceGraph();
1388 info->set_inlined_bytecode_size(inlining.total_inlined_bytecode_size());
1389
1390 // Skip the "wasm-inlining" phase if there are no Wasm functions calls.
1391 if (call_reducer.has_wasm_calls()) {
1392 data->set_has_js_wasm_calls(true);
1393 }
1394 }
1395 };
1396
1397 #if V8_ENABLE_WEBASSEMBLY
1398 struct JSWasmInliningPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::JSWasmInliningPhase1399 DECL_PIPELINE_PHASE_CONSTANTS(JSWasmInlining)
1400 void Run(PipelineData* data, Zone* temp_zone) {
1401 DCHECK(data->has_js_wasm_calls());
1402
1403 OptimizedCompilationInfo* info = data->info();
1404 GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
1405 data->broker(), data->jsgraph()->Dead());
1406 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1407 data->common(), temp_zone);
1408 CommonOperatorReducer common_reducer(
1409 &graph_reducer, data->graph(), data->broker(), data->common(),
1410 data->machine(), temp_zone, BranchSemantics::kMachine);
1411 JSInliningHeuristic inlining(&graph_reducer, temp_zone, data->info(),
1412 data->jsgraph(), data->broker(),
1413 data->source_positions(),
1414 JSInliningHeuristic::kWasmOnly);
1415 AddReducer(data, &graph_reducer, &dead_code_elimination);
1416 AddReducer(data, &graph_reducer, &common_reducer);
1417 AddReducer(data, &graph_reducer, &inlining);
1418 graph_reducer.ReduceGraph();
1419 }
1420 };
1421 #endif // V8_ENABLE_WEBASSEMBLY
1422
1423 struct EarlyGraphTrimmingPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::EarlyGraphTrimmingPhase1424 DECL_PIPELINE_PHASE_CONSTANTS(EarlyGraphTrimming)
1425
1426 void Run(PipelineData* data, Zone* temp_zone) {
1427 GraphTrimmer trimmer(temp_zone, data->graph());
1428 NodeVector roots(temp_zone);
1429 data->jsgraph()->GetCachedNodes(&roots);
1430 UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
1431 trimmer.TrimGraph(roots.begin(), roots.end());
1432 }
1433 };
1434
1435 struct TyperPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::TyperPhase1436 DECL_PIPELINE_PHASE_CONSTANTS(Typer)
1437
1438 void Run(PipelineData* data, Zone* temp_zone, Typer* typer) {
1439 NodeVector roots(temp_zone);
1440 data->jsgraph()->GetCachedNodes(&roots);
1441
1442 // Make sure we always type True and False. Needed for escape analysis.
1443 roots.push_back(data->jsgraph()->TrueConstant());
1444 roots.push_back(data->jsgraph()->FalseConstant());
1445
1446 LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
1447 data->common(), temp_zone);
1448 if (FLAG_turbo_loop_variable) induction_vars.Run();
1449
1450 // The typer inspects heap objects, so we need to unpark the local heap.
1451 UnparkedScopeIfNeeded scope(data->broker());
1452 typer->Run(roots, &induction_vars);
1453 }
1454 };
1455
1456 struct UntyperPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::UntyperPhase1457 DECL_PIPELINE_PHASE_CONSTANTS(Untyper)
1458
1459 void Run(PipelineData* data, Zone* temp_zone) {
1460 class RemoveTypeReducer final : public Reducer {
1461 public:
1462 const char* reducer_name() const override { return "RemoveTypeReducer"; }
1463 Reduction Reduce(Node* node) final {
1464 if (NodeProperties::IsTyped(node)) {
1465 NodeProperties::RemoveType(node);
1466 return Changed(node);
1467 }
1468 return NoChange();
1469 }
1470 };
1471
1472 NodeVector roots(temp_zone);
1473 data->jsgraph()->GetCachedNodes(&roots);
1474 for (Node* node : roots) {
1475 NodeProperties::RemoveType(node);
1476 }
1477
1478 GraphReducer graph_reducer(
1479 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1480 data->jsgraph()->Dead(), data->observe_node_manager());
1481 RemoveTypeReducer remove_type_reducer;
1482 AddReducer(data, &graph_reducer, &remove_type_reducer);
1483 graph_reducer.ReduceGraph();
1484 }
1485 };
1486
1487 struct HeapBrokerInitializationPhase {
DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::HeapBrokerInitializationPhase1488 DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(HeapBrokerInitialization)
1489
1490 void Run(PipelineData* data, Zone* temp_zone) {
1491 data->broker()->InitializeAndStartSerializing();
1492 }
1493 };
1494
1495 struct TypedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::TypedLoweringPhase1496 DECL_PIPELINE_PHASE_CONSTANTS(TypedLowering)
1497
1498 void Run(PipelineData* data, Zone* temp_zone) {
1499 GraphReducer graph_reducer(
1500 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1501 data->jsgraph()->Dead(), data->observe_node_manager());
1502 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1503 data->common(), temp_zone);
1504 JSCreateLowering create_lowering(&graph_reducer, data->dependencies(),
1505 data->jsgraph(), data->broker(),
1506 temp_zone);
1507 JSTypedLowering typed_lowering(&graph_reducer, data->jsgraph(),
1508 data->broker(), temp_zone);
1509 ConstantFoldingReducer constant_folding_reducer(
1510 &graph_reducer, data->jsgraph(), data->broker());
1511 TypedOptimization typed_optimization(&graph_reducer, data->dependencies(),
1512 data->jsgraph(), data->broker());
1513 SimplifiedOperatorReducer simple_reducer(
1514 &graph_reducer, data->jsgraph(), data->broker(), BranchSemantics::kJS);
1515 CheckpointElimination checkpoint_elimination(&graph_reducer);
1516 CommonOperatorReducer common_reducer(
1517 &graph_reducer, data->graph(), data->broker(), data->common(),
1518 data->machine(), temp_zone, BranchSemantics::kJS);
1519 AddReducer(data, &graph_reducer, &dead_code_elimination);
1520
1521 AddReducer(data, &graph_reducer, &create_lowering);
1522 AddReducer(data, &graph_reducer, &constant_folding_reducer);
1523 AddReducer(data, &graph_reducer, &typed_lowering);
1524 AddReducer(data, &graph_reducer, &typed_optimization);
1525 AddReducer(data, &graph_reducer, &simple_reducer);
1526 AddReducer(data, &graph_reducer, &checkpoint_elimination);
1527 AddReducer(data, &graph_reducer, &common_reducer);
1528
1529 // ConstantFoldingReducer, JSCreateLowering, JSTypedLowering, and
1530 // TypedOptimization access the heap.
1531 UnparkedScopeIfNeeded scope(data->broker());
1532
1533 graph_reducer.ReduceGraph();
1534 }
1535 };
1536
1537
1538 struct EscapeAnalysisPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::EscapeAnalysisPhase1539 DECL_PIPELINE_PHASE_CONSTANTS(EscapeAnalysis)
1540
1541 void Run(PipelineData* data, Zone* temp_zone) {
1542 EscapeAnalysis escape_analysis(data->jsgraph(),
1543 &data->info()->tick_counter(), temp_zone);
1544 escape_analysis.ReduceGraph();
1545
1546 GraphReducer reducer(temp_zone, data->graph(),
1547 &data->info()->tick_counter(), data->broker(),
1548 data->jsgraph()->Dead(), data->observe_node_manager());
1549 EscapeAnalysisReducer escape_reducer(
1550 &reducer, data->jsgraph(), data->broker(),
1551 escape_analysis.analysis_result(), temp_zone);
1552
1553 AddReducer(data, &reducer, &escape_reducer);
1554
1555 // EscapeAnalysisReducer accesses the heap.
1556 UnparkedScopeIfNeeded scope(data->broker());
1557
1558 reducer.ReduceGraph();
1559 // TODO(turbofan): Turn this into a debug mode check once we have
1560 // confidence.
1561 escape_reducer.VerifyReplacement();
1562 }
1563 };
1564
1565 struct TypeAssertionsPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::TypeAssertionsPhase1566 DECL_PIPELINE_PHASE_CONSTANTS(TypeAssertions)
1567
1568 void Run(PipelineData* data, Zone* temp_zone) {
1569 GraphReducer graph_reducer(
1570 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1571 data->jsgraph()->Dead(), data->observe_node_manager());
1572 AddTypeAssertionsReducer type_assertions(&graph_reducer, data->jsgraph(),
1573 temp_zone);
1574 AddReducer(data, &graph_reducer, &type_assertions);
1575 graph_reducer.ReduceGraph();
1576 }
1577 };
1578
1579 struct SimplifiedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::SimplifiedLoweringPhase1580 DECL_PIPELINE_PHASE_CONSTANTS(SimplifiedLowering)
1581
1582 void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
1583 SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
1584 data->source_positions(), data->node_origins(),
1585 &data->info()->tick_counter(), linkage,
1586 data->info(), data->observe_node_manager());
1587
1588 // RepresentationChanger accesses the heap.
1589 UnparkedScopeIfNeeded scope(data->broker());
1590
1591 lowering.LowerAllNodes();
1592 }
1593 };
1594
1595 struct LoopPeelingPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::LoopPeelingPhase1596 DECL_PIPELINE_PHASE_CONSTANTS(LoopPeeling)
1597
1598 void Run(PipelineData* data, Zone* temp_zone) {
1599 GraphTrimmer trimmer(temp_zone, data->graph());
1600 NodeVector roots(temp_zone);
1601 data->jsgraph()->GetCachedNodes(&roots);
1602 {
1603 UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
1604 trimmer.TrimGraph(roots.begin(), roots.end());
1605 }
1606
1607 LoopTree* loop_tree = LoopFinder::BuildLoopTree(
1608 data->jsgraph()->graph(), &data->info()->tick_counter(), temp_zone);
1609 // We call the typer inside of PeelInnerLoopsOfTree which inspects heap
1610 // objects, so we need to unpark the local heap.
1611 UnparkedScopeIfNeeded scope(data->broker());
1612 LoopPeeler(data->graph(), data->common(), loop_tree, temp_zone,
1613 data->source_positions(), data->node_origins())
1614 .PeelInnerLoopsOfTree();
1615 }
1616 };
1617
1618 #if V8_ENABLE_WEBASSEMBLY
1619 struct WasmInliningPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::WasmInliningPhase1620 DECL_PIPELINE_PHASE_CONSTANTS(WasmInlining)
1621
1622 void Run(PipelineData* data, Zone* temp_zone, wasm::CompilationEnv* env,
1623 uint32_t function_index, const wasm::WireBytesStorage* wire_bytes,
1624 std::vector<compiler::WasmLoopInfo>* loop_info) {
1625 if (!WasmInliner::graph_size_allows_inlining(data->graph()->NodeCount())) {
1626 return;
1627 }
1628 GraphReducer graph_reducer(
1629 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1630 data->jsgraph()->Dead(), data->observe_node_manager());
1631 DeadCodeElimination dead(&graph_reducer, data->graph(), data->common(),
1632 temp_zone);
1633 std::unique_ptr<char[]> debug_name = data->info()->GetDebugName();
1634 WasmInliner inliner(&graph_reducer, env, function_index,
1635 data->source_positions(), data->node_origins(),
1636 data->mcgraph(), wire_bytes, loop_info,
1637 debug_name.get());
1638 AddReducer(data, &graph_reducer, &dead);
1639 AddReducer(data, &graph_reducer, &inliner);
1640 graph_reducer.ReduceGraph();
1641 }
1642 };
1643
1644 namespace {
EliminateLoopExits(std::vector<compiler::WasmLoopInfo> * loop_infos)1645 void EliminateLoopExits(std::vector<compiler::WasmLoopInfo>* loop_infos) {
1646 for (WasmLoopInfo& loop_info : *loop_infos) {
1647 std::unordered_set<Node*> loop_exits;
1648 // We collect exits into a set first because we are not allowed to mutate
1649 // them while iterating uses().
1650 for (Node* use : loop_info.header->uses()) {
1651 if (use->opcode() == IrOpcode::kLoopExit) {
1652 loop_exits.insert(use);
1653 }
1654 }
1655 for (Node* use : loop_exits) {
1656 LoopPeeler::EliminateLoopExit(use);
1657 }
1658 }
1659 }
1660 } // namespace
1661
1662 struct WasmLoopUnrollingPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::WasmLoopUnrollingPhase1663 DECL_PIPELINE_PHASE_CONSTANTS(WasmLoopUnrolling)
1664
1665 void Run(PipelineData* data, Zone* temp_zone,
1666 std::vector<compiler::WasmLoopInfo>* loop_infos) {
1667 for (WasmLoopInfo& loop_info : *loop_infos) {
1668 if (loop_info.can_be_innermost) {
1669 ZoneUnorderedSet<Node*>* loop =
1670 LoopFinder::FindSmallInnermostLoopFromHeader(
1671 loop_info.header, temp_zone,
1672 // Only discover the loop until its size is the maximum unrolled
1673 // size for its depth.
1674 maximum_unrollable_size(loop_info.nesting_depth), true);
1675 if (loop == nullptr) continue;
1676 UnrollLoop(loop_info.header, loop, loop_info.nesting_depth,
1677 data->graph(), data->common(), temp_zone,
1678 data->source_positions(), data->node_origins());
1679 }
1680 }
1681
1682 EliminateLoopExits(loop_infos);
1683 }
1684 };
1685
1686 struct WasmLoopPeelingPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::WasmLoopPeelingPhase1687 DECL_PIPELINE_PHASE_CONSTANTS(WasmLoopPeeling)
1688
1689 void Run(PipelineData* data, Zone* temp_zone,
1690 std::vector<compiler::WasmLoopInfo>* loop_infos) {
1691 for (WasmLoopInfo& loop_info : *loop_infos) {
1692 if (loop_info.can_be_innermost) {
1693 ZoneUnorderedSet<Node*>* loop =
1694 LoopFinder::FindSmallInnermostLoopFromHeader(
1695 loop_info.header, temp_zone, std::numeric_limits<size_t>::max(),
1696 false);
1697 if (loop == nullptr) continue;
1698 PeelWasmLoop(loop_info.header, loop, data->graph(), data->common(),
1699 temp_zone, data->source_positions(), data->node_origins());
1700 }
1701 }
1702 // If we are going to unroll later, keep loop exits.
1703 if (!FLAG_wasm_loop_unrolling) EliminateLoopExits(loop_infos);
1704 }
1705 };
1706 #endif // V8_ENABLE_WEBASSEMBLY
1707
1708 struct LoopExitEliminationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::LoopExitEliminationPhase1709 DECL_PIPELINE_PHASE_CONSTANTS(LoopExitElimination)
1710
1711 void Run(PipelineData* data, Zone* temp_zone) {
1712 LoopPeeler::EliminateLoopExits(data->graph(), temp_zone);
1713 }
1714 };
1715
1716 struct GenericLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::GenericLoweringPhase1717 DECL_PIPELINE_PHASE_CONSTANTS(GenericLowering)
1718
1719 void Run(PipelineData* data, Zone* temp_zone) {
1720 GraphReducer graph_reducer(
1721 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1722 data->jsgraph()->Dead(), data->observe_node_manager());
1723 JSGenericLowering generic_lowering(data->jsgraph(), &graph_reducer,
1724 data->broker());
1725 AddReducer(data, &graph_reducer, &generic_lowering);
1726
1727 // JSGEnericLowering accesses the heap due to ObjectRef's type checks.
1728 UnparkedScopeIfNeeded scope(data->broker());
1729
1730 graph_reducer.ReduceGraph();
1731 }
1732 };
1733
1734 struct EarlyOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::EarlyOptimizationPhase1735 DECL_PIPELINE_PHASE_CONSTANTS(EarlyOptimization)
1736
1737 void Run(PipelineData* data, Zone* temp_zone) {
1738 GraphReducer graph_reducer(
1739 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1740 data->jsgraph()->Dead(), data->observe_node_manager());
1741 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1742 data->common(), temp_zone);
1743 SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
1744 data->broker(),
1745 BranchSemantics::kMachine);
1746 RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
1747 ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1748 MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
1749 CommonOperatorReducer common_reducer(
1750 &graph_reducer, data->graph(), data->broker(), data->common(),
1751 data->machine(), temp_zone, BranchSemantics::kMachine);
1752 AddReducer(data, &graph_reducer, &dead_code_elimination);
1753 AddReducer(data, &graph_reducer, &simple_reducer);
1754 AddReducer(data, &graph_reducer, &redundancy_elimination);
1755 AddReducer(data, &graph_reducer, &machine_reducer);
1756 AddReducer(data, &graph_reducer, &common_reducer);
1757 AddReducer(data, &graph_reducer, &value_numbering);
1758 graph_reducer.ReduceGraph();
1759 }
1760 };
1761
1762 struct ControlFlowOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::ControlFlowOptimizationPhase1763 DECL_PIPELINE_PHASE_CONSTANTS(ControlFlowOptimization)
1764
1765 void Run(PipelineData* data, Zone* temp_zone) {
1766 ControlFlowOptimizer optimizer(data->graph(), data->common(),
1767 data->machine(),
1768 &data->info()->tick_counter(), temp_zone);
1769 optimizer.Optimize();
1770 }
1771 };
1772
1773 struct EffectControlLinearizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::EffectControlLinearizationPhase1774 DECL_PIPELINE_PHASE_CONSTANTS(EffectLinearization)
1775
1776 void Run(PipelineData* data, Zone* temp_zone) {
1777 {
1778 // Branch cloning in the effect control linearizer requires the graphs to
1779 // be trimmed, so trim now before scheduling.
1780 GraphTrimmer trimmer(temp_zone, data->graph());
1781 NodeVector roots(temp_zone);
1782 data->jsgraph()->GetCachedNodes(&roots);
1783 {
1784 UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
1785 trimmer.TrimGraph(roots.begin(), roots.end());
1786 }
1787
1788 // Schedule the graph without node splitting so that we can
1789 // fix the effect and control flow for nodes with low-level side
1790 // effects (such as changing representation to tagged or
1791 // 'floating' allocation regions.)
1792 Schedule* schedule = Scheduler::ComputeSchedule(
1793 temp_zone, data->graph(), Scheduler::kTempSchedule,
1794 &data->info()->tick_counter(), data->profile_data());
1795 TraceScheduleAndVerify(data->info(), data, schedule,
1796 "effect linearization schedule");
1797
1798 // Post-pass for wiring the control/effects
1799 // - connect allocating representation changes into the control&effect
1800 // chains and lower them,
1801 // - get rid of the region markers,
1802 // - introduce effect phis and rewire effects to get SSA again.
1803 LinearizeEffectControl(data->jsgraph(), schedule, temp_zone,
1804 data->source_positions(), data->node_origins(),
1805 data->broker());
1806 }
1807 {
1808 // The {EffectControlLinearizer} might leave {Dead} nodes behind, so we
1809 // run {DeadCodeElimination} to prune these parts of the graph.
1810 // Also, the following store-store elimination phase greatly benefits from
1811 // doing a common operator reducer and dead code elimination just before
1812 // it, to eliminate conditional deopts with a constant condition.
1813 GraphReducer graph_reducer(temp_zone, data->graph(),
1814 &data->info()->tick_counter(), data->broker(),
1815 data->jsgraph()->Dead(),
1816 data->observe_node_manager());
1817 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1818 data->common(), temp_zone);
1819 CommonOperatorReducer common_reducer(
1820 &graph_reducer, data->graph(), data->broker(), data->common(),
1821 data->machine(), temp_zone, BranchSemantics::kMachine);
1822 AddReducer(data, &graph_reducer, &dead_code_elimination);
1823 AddReducer(data, &graph_reducer, &common_reducer);
1824 graph_reducer.ReduceGraph();
1825 }
1826 }
1827 };
1828
1829 struct StoreStoreEliminationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::StoreStoreEliminationPhase1830 DECL_PIPELINE_PHASE_CONSTANTS(StoreStoreElimination)
1831
1832 void Run(PipelineData* data, Zone* temp_zone) {
1833 GraphTrimmer trimmer(temp_zone, data->graph());
1834 NodeVector roots(temp_zone);
1835 data->jsgraph()->GetCachedNodes(&roots);
1836 {
1837 UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
1838 trimmer.TrimGraph(roots.begin(), roots.end());
1839 }
1840
1841 StoreStoreElimination::Run(data->jsgraph(), &data->info()->tick_counter(),
1842 temp_zone);
1843 }
1844 };
1845
1846 struct LoadEliminationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::LoadEliminationPhase1847 DECL_PIPELINE_PHASE_CONSTANTS(LoadElimination)
1848
1849 void Run(PipelineData* data, Zone* temp_zone) {
1850 GraphReducer graph_reducer(
1851 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1852 data->jsgraph()->Dead(), data->observe_node_manager());
1853 BranchElimination branch_condition_elimination(
1854 &graph_reducer, data->jsgraph(), temp_zone, data->source_positions(),
1855 BranchElimination::kEARLY);
1856 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1857 data->common(), temp_zone);
1858 RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
1859 LoadElimination load_elimination(&graph_reducer, data->jsgraph(),
1860 temp_zone);
1861 CheckpointElimination checkpoint_elimination(&graph_reducer);
1862 ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1863 CommonOperatorReducer common_reducer(
1864 &graph_reducer, data->graph(), data->broker(), data->common(),
1865 data->machine(), temp_zone, BranchSemantics::kJS);
1866 TypedOptimization typed_optimization(&graph_reducer, data->dependencies(),
1867 data->jsgraph(), data->broker());
1868 ConstantFoldingReducer constant_folding_reducer(
1869 &graph_reducer, data->jsgraph(), data->broker());
1870 TypeNarrowingReducer type_narrowing_reducer(&graph_reducer, data->jsgraph(),
1871 data->broker());
1872
1873 AddReducer(data, &graph_reducer, &branch_condition_elimination);
1874 AddReducer(data, &graph_reducer, &dead_code_elimination);
1875 AddReducer(data, &graph_reducer, &redundancy_elimination);
1876 AddReducer(data, &graph_reducer, &load_elimination);
1877 AddReducer(data, &graph_reducer, &type_narrowing_reducer);
1878 AddReducer(data, &graph_reducer, &constant_folding_reducer);
1879 AddReducer(data, &graph_reducer, &typed_optimization);
1880 AddReducer(data, &graph_reducer, &checkpoint_elimination);
1881 AddReducer(data, &graph_reducer, &common_reducer);
1882 AddReducer(data, &graph_reducer, &value_numbering);
1883
1884 // ConstantFoldingReducer and TypedOptimization access the heap.
1885 UnparkedScopeIfNeeded scope(data->broker());
1886
1887 graph_reducer.ReduceGraph();
1888 }
1889 };
1890
1891 struct MemoryOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MemoryOptimizationPhase1892 DECL_PIPELINE_PHASE_CONSTANTS(MemoryOptimization)
1893
1894 void Run(PipelineData* data, Zone* temp_zone) {
1895 // The memory optimizer requires the graphs to be trimmed, so trim now.
1896 GraphTrimmer trimmer(temp_zone, data->graph());
1897 NodeVector roots(temp_zone);
1898 data->jsgraph()->GetCachedNodes(&roots);
1899 {
1900 UnparkedScopeIfNeeded scope(data->broker(), FLAG_trace_turbo_trimming);
1901 trimmer.TrimGraph(roots.begin(), roots.end());
1902 }
1903
1904 // Optimize allocations and load/store operations.
1905 MemoryOptimizer optimizer(
1906 data->jsgraph(), temp_zone,
1907 data->info()->allocation_folding()
1908 ? MemoryLowering::AllocationFolding::kDoAllocationFolding
1909 : MemoryLowering::AllocationFolding::kDontAllocationFolding,
1910 data->debug_name(), &data->info()->tick_counter());
1911 optimizer.Optimize();
1912 }
1913 };
1914
1915 struct LateOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::LateOptimizationPhase1916 DECL_PIPELINE_PHASE_CONSTANTS(LateOptimization)
1917
1918 void Run(PipelineData* data, Zone* temp_zone) {
1919 GraphReducer graph_reducer(
1920 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1921 data->jsgraph()->Dead(), data->observe_node_manager());
1922 BranchElimination branch_condition_elimination(
1923 &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
1924 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1925 data->common(), temp_zone);
1926 ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1927 MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
1928 CommonOperatorReducer common_reducer(
1929 &graph_reducer, data->graph(), data->broker(), data->common(),
1930 data->machine(), temp_zone, BranchSemantics::kMachine);
1931 JSGraphAssembler graph_assembler(data->jsgraph(), temp_zone);
1932 SelectLowering select_lowering(&graph_assembler, data->graph());
1933 AddReducer(data, &graph_reducer, &branch_condition_elimination);
1934 AddReducer(data, &graph_reducer, &dead_code_elimination);
1935 AddReducer(data, &graph_reducer, &machine_reducer);
1936 AddReducer(data, &graph_reducer, &common_reducer);
1937 AddReducer(data, &graph_reducer, &select_lowering);
1938 AddReducer(data, &graph_reducer, &value_numbering);
1939 graph_reducer.ReduceGraph();
1940 }
1941 };
1942
1943 struct MachineOperatorOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MachineOperatorOptimizationPhase1944 DECL_PIPELINE_PHASE_CONSTANTS(MachineOperatorOptimization)
1945
1946 void Run(PipelineData* data, Zone* temp_zone) {
1947 GraphReducer graph_reducer(
1948 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1949 data->jsgraph()->Dead(), data->observe_node_manager());
1950 ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1951 MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
1952
1953 AddReducer(data, &graph_reducer, &machine_reducer);
1954 AddReducer(data, &graph_reducer, &value_numbering);
1955 graph_reducer.ReduceGraph();
1956 }
1957 };
1958
1959 struct WasmBaseOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::WasmBaseOptimizationPhase1960 DECL_PIPELINE_PHASE_CONSTANTS(WasmBaseOptimization)
1961
1962 void Run(PipelineData* data, Zone* temp_zone) {
1963 GraphReducer graph_reducer(
1964 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
1965 data->mcgraph()->Dead(), data->observe_node_manager());
1966 ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1967 AddReducer(data, &graph_reducer, &value_numbering);
1968 graph_reducer.ReduceGraph();
1969 }
1970 };
1971
1972 struct DecompressionOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::DecompressionOptimizationPhase1973 DECL_PIPELINE_PHASE_CONSTANTS(DecompressionOptimization)
1974
1975 void Run(PipelineData* data, Zone* temp_zone) {
1976 if (COMPRESS_POINTERS_BOOL) {
1977 DecompressionOptimizer decompression_optimizer(
1978 temp_zone, data->graph(), data->common(), data->machine());
1979 decompression_optimizer.Reduce();
1980 }
1981 }
1982 };
1983
1984 struct BranchConditionDuplicationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::BranchConditionDuplicationPhase1985 DECL_PIPELINE_PHASE_CONSTANTS(BranchConditionDuplication)
1986
1987 void Run(PipelineData* data, Zone* temp_zone) {
1988 BranchConditionDuplicator compare_zero_branch_optimizer(temp_zone,
1989 data->graph());
1990 compare_zero_branch_optimizer.Reduce();
1991 }
1992 };
1993
1994 #if V8_ENABLE_WEBASSEMBLY
1995 struct WasmOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::WasmOptimizationPhase1996 DECL_PIPELINE_PHASE_CONSTANTS(WasmOptimization)
1997
1998 void Run(PipelineData* data, Zone* temp_zone, bool allow_signalling_nan) {
1999 // Run optimizations in two rounds: First one around load elimination and
2000 // then one around branch elimination. This is because those two
2001 // optimizations sometimes display quadratic complexity when run together.
2002 // We only need load elimination for managed objects.
2003 if (FLAG_experimental_wasm_gc || FLAG_wasm_inlining) {
2004 GraphReducer graph_reducer(temp_zone, data->graph(),
2005 &data->info()->tick_counter(), data->broker(),
2006 data->jsgraph()->Dead(),
2007 data->observe_node_manager());
2008 MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
2009 allow_signalling_nan);
2010 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
2011 data->common(), temp_zone);
2012 CommonOperatorReducer common_reducer(
2013 &graph_reducer, data->graph(), data->broker(), data->common(),
2014 data->machine(), temp_zone, BranchSemantics::kMachine);
2015 ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
2016 CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(),
2017 temp_zone);
2018 WasmEscapeAnalysis escape(&graph_reducer, data->mcgraph());
2019 AddReducer(data, &graph_reducer, &machine_reducer);
2020 AddReducer(data, &graph_reducer, &dead_code_elimination);
2021 AddReducer(data, &graph_reducer, &common_reducer);
2022 AddReducer(data, &graph_reducer, &value_numbering);
2023 if (FLAG_experimental_wasm_gc) {
2024 AddReducer(data, &graph_reducer, &load_elimination);
2025 AddReducer(data, &graph_reducer, &escape);
2026 }
2027 graph_reducer.ReduceGraph();
2028 }
2029 {
2030 GraphReducer graph_reducer(temp_zone, data->graph(),
2031 &data->info()->tick_counter(), data->broker(),
2032 data->jsgraph()->Dead(),
2033 data->observe_node_manager());
2034 MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
2035 allow_signalling_nan);
2036 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
2037 data->common(), temp_zone);
2038 CommonOperatorReducer common_reducer(
2039 &graph_reducer, data->graph(), data->broker(), data->common(),
2040 data->machine(), temp_zone, BranchSemantics::kMachine);
2041 ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
2042 BranchElimination branch_condition_elimination(
2043 &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
2044 AddReducer(data, &graph_reducer, &machine_reducer);
2045 AddReducer(data, &graph_reducer, &dead_code_elimination);
2046 AddReducer(data, &graph_reducer, &common_reducer);
2047 AddReducer(data, &graph_reducer, &value_numbering);
2048 AddReducer(data, &graph_reducer, &branch_condition_elimination);
2049 graph_reducer.ReduceGraph();
2050 }
2051 }
2052 };
2053 #endif // V8_ENABLE_WEBASSEMBLY
2054
2055 struct CsaEarlyOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::CsaEarlyOptimizationPhase2056 DECL_PIPELINE_PHASE_CONSTANTS(CSAEarlyOptimization)
2057
2058 void Run(PipelineData* data, Zone* temp_zone) {
2059 // Run optimizations in two rounds: First one around load elimination and
2060 // then one around branch elimination. This is because those two
2061 // optimizations sometimes display quadratic complexity when run together.
2062 {
2063 GraphReducer graph_reducer(temp_zone, data->graph(),
2064 &data->info()->tick_counter(), data->broker(),
2065 data->jsgraph()->Dead(),
2066 data->observe_node_manager());
2067 MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
2068 true);
2069 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
2070 data->common(), temp_zone);
2071 CommonOperatorReducer common_reducer(
2072 &graph_reducer, data->graph(), data->broker(), data->common(),
2073 data->machine(), temp_zone, BranchSemantics::kMachine);
2074 ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
2075 CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(),
2076 temp_zone);
2077 AddReducer(data, &graph_reducer, &machine_reducer);
2078 AddReducer(data, &graph_reducer, &dead_code_elimination);
2079 AddReducer(data, &graph_reducer, &common_reducer);
2080 AddReducer(data, &graph_reducer, &value_numbering);
2081 AddReducer(data, &graph_reducer, &load_elimination);
2082 graph_reducer.ReduceGraph();
2083 }
2084 {
2085 GraphReducer graph_reducer(temp_zone, data->graph(),
2086 &data->info()->tick_counter(), data->broker(),
2087 data->jsgraph()->Dead(),
2088 data->observe_node_manager());
2089 MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
2090 true);
2091 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
2092 data->common(), temp_zone);
2093 CommonOperatorReducer common_reducer(
2094 &graph_reducer, data->graph(), data->broker(), data->common(),
2095 data->machine(), temp_zone, BranchSemantics::kMachine);
2096 ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
2097 BranchElimination branch_condition_elimination(
2098 &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
2099 AddReducer(data, &graph_reducer, &machine_reducer);
2100 AddReducer(data, &graph_reducer, &dead_code_elimination);
2101 AddReducer(data, &graph_reducer, &common_reducer);
2102 AddReducer(data, &graph_reducer, &value_numbering);
2103 AddReducer(data, &graph_reducer, &branch_condition_elimination);
2104 graph_reducer.ReduceGraph();
2105 }
2106 }
2107 };
2108
2109 struct CsaOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::CsaOptimizationPhase2110 DECL_PIPELINE_PHASE_CONSTANTS(CSAOptimization)
2111
2112 void Run(PipelineData* data, Zone* temp_zone, bool allow_signalling_nan) {
2113 GraphReducer graph_reducer(
2114 temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
2115 data->jsgraph()->Dead(), data->observe_node_manager());
2116 BranchElimination branch_condition_elimination(
2117 &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
2118 DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
2119 data->common(), temp_zone);
2120 MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
2121 allow_signalling_nan);
2122 CommonOperatorReducer common_reducer(
2123 &graph_reducer, data->graph(), data->broker(), data->common(),
2124 data->machine(), temp_zone, BranchSemantics::kMachine);
2125 ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
2126 AddReducer(data, &graph_reducer, &branch_condition_elimination);
2127 AddReducer(data, &graph_reducer, &dead_code_elimination);
2128 AddReducer(data, &graph_reducer, &machine_reducer);
2129 AddReducer(data, &graph_reducer, &common_reducer);
2130 AddReducer(data, &graph_reducer, &value_numbering);
2131 graph_reducer.ReduceGraph();
2132 }
2133 };
2134
2135 struct ComputeSchedulePhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::ComputeSchedulePhase2136 DECL_PIPELINE_PHASE_CONSTANTS(Scheduling)
2137
2138 void Run(PipelineData* data, Zone* temp_zone) {
2139 Schedule* schedule = Scheduler::ComputeSchedule(
2140 temp_zone, data->graph(),
2141 data->info()->splitting() ? Scheduler::kSplitNodes
2142 : Scheduler::kNoFlags,
2143 &data->info()->tick_counter(), data->profile_data());
2144 data->set_schedule(schedule);
2145 }
2146 };
2147
2148 struct InstructionRangesAsJSON {
2149 const InstructionSequence* sequence;
2150 const ZoneVector<std::pair<int, int>>* instr_origins;
2151 };
2152
operator <<(std::ostream & out,const InstructionRangesAsJSON & s)2153 std::ostream& operator<<(std::ostream& out, const InstructionRangesAsJSON& s) {
2154 const int max = static_cast<int>(s.sequence->LastInstructionIndex());
2155
2156 out << ", \"nodeIdToInstructionRange\": {";
2157 bool need_comma = false;
2158 for (size_t i = 0; i < s.instr_origins->size(); ++i) {
2159 std::pair<int, int> offset = (*s.instr_origins)[i];
2160 if (offset.first == -1) continue;
2161 const int first = max - offset.first + 1;
2162 const int second = max - offset.second + 1;
2163 if (need_comma) out << ", ";
2164 out << "\"" << i << "\": [" << first << ", " << second << "]";
2165 need_comma = true;
2166 }
2167 out << "}";
2168 out << ", \"blockIdtoInstructionRange\": {";
2169 need_comma = false;
2170 for (auto block : s.sequence->instruction_blocks()) {
2171 if (need_comma) out << ", ";
2172 out << "\"" << block->rpo_number() << "\": [" << block->code_start() << ", "
2173 << block->code_end() << "]";
2174 need_comma = true;
2175 }
2176 out << "}";
2177 return out;
2178 }
2179
2180 struct InstructionSelectionPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::InstructionSelectionPhase2181 DECL_PIPELINE_PHASE_CONSTANTS(SelectInstructions)
2182
2183 void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
2184 InstructionSelector selector(
2185 temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
2186 data->schedule(), data->source_positions(), data->frame(),
2187 data->info()->switch_jump_table()
2188 ? InstructionSelector::kEnableSwitchJumpTable
2189 : InstructionSelector::kDisableSwitchJumpTable,
2190 &data->info()->tick_counter(), data->broker(),
2191 data->address_of_max_unoptimized_frame_height(),
2192 data->address_of_max_pushed_argument_count(),
2193 data->info()->source_positions()
2194 ? InstructionSelector::kAllSourcePositions
2195 : InstructionSelector::kCallSourcePositions,
2196 InstructionSelector::SupportedFeatures(),
2197 FLAG_turbo_instruction_scheduling
2198 ? InstructionSelector::kEnableScheduling
2199 : InstructionSelector::kDisableScheduling,
2200 data->assembler_options().enable_root_relative_access
2201 ? InstructionSelector::kEnableRootsRelativeAddressing
2202 : InstructionSelector::kDisableRootsRelativeAddressing,
2203 data->info()->trace_turbo_json()
2204 ? InstructionSelector::kEnableTraceTurboJson
2205 : InstructionSelector::kDisableTraceTurboJson);
2206 if (!selector.SelectInstructions()) {
2207 data->set_compilation_failed();
2208 }
2209 if (data->info()->trace_turbo_json()) {
2210 TurboJsonFile json_of(data->info(), std::ios_base::app);
2211 json_of << "{\"name\":\"" << phase_name()
2212 << "\",\"type\":\"instructions\""
2213 << InstructionRangesAsJSON{data->sequence(),
2214 &selector.instr_origins()}
2215 << "},\n";
2216 }
2217 }
2218 };
2219
2220
2221 struct MeetRegisterConstraintsPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MeetRegisterConstraintsPhase2222 DECL_PIPELINE_PHASE_CONSTANTS(MeetRegisterConstraints)
2223 void Run(PipelineData* data, Zone* temp_zone) {
2224 ConstraintBuilder builder(data->top_tier_register_allocation_data());
2225 builder.MeetRegisterConstraints();
2226 }
2227 };
2228
2229
2230 struct ResolvePhisPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::ResolvePhisPhase2231 DECL_PIPELINE_PHASE_CONSTANTS(ResolvePhis)
2232
2233 void Run(PipelineData* data, Zone* temp_zone) {
2234 ConstraintBuilder builder(data->top_tier_register_allocation_data());
2235 builder.ResolvePhis();
2236 }
2237 };
2238
2239
2240 struct BuildLiveRangesPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::BuildLiveRangesPhase2241 DECL_PIPELINE_PHASE_CONSTANTS(BuildLiveRanges)
2242
2243 void Run(PipelineData* data, Zone* temp_zone) {
2244 LiveRangeBuilder builder(data->top_tier_register_allocation_data(),
2245 temp_zone);
2246 builder.BuildLiveRanges();
2247 }
2248 };
2249
2250 struct BuildBundlesPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::BuildBundlesPhase2251 DECL_PIPELINE_PHASE_CONSTANTS(BuildLiveRangeBundles)
2252
2253 void Run(PipelineData* data, Zone* temp_zone) {
2254 BundleBuilder builder(data->top_tier_register_allocation_data());
2255 builder.BuildBundles();
2256 }
2257 };
2258
2259 template <typename RegAllocator>
2260 struct AllocateGeneralRegistersPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::AllocateGeneralRegistersPhase2261 DECL_PIPELINE_PHASE_CONSTANTS(AllocateGeneralRegisters)
2262
2263 void Run(PipelineData* data, Zone* temp_zone) {
2264 RegAllocator allocator(data->top_tier_register_allocation_data(),
2265 RegisterKind::kGeneral, temp_zone);
2266 allocator.AllocateRegisters();
2267 }
2268 };
2269
2270 template <typename RegAllocator>
2271 struct AllocateFPRegistersPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::AllocateFPRegistersPhase2272 DECL_PIPELINE_PHASE_CONSTANTS(AllocateFPRegisters)
2273
2274 void Run(PipelineData* data, Zone* temp_zone) {
2275 RegAllocator allocator(data->top_tier_register_allocation_data(),
2276 RegisterKind::kDouble, temp_zone);
2277 allocator.AllocateRegisters();
2278 }
2279 };
2280
2281 template <typename RegAllocator>
2282 struct AllocateSimd128RegistersPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::AllocateSimd128RegistersPhase2283 DECL_PIPELINE_PHASE_CONSTANTS(AllocateSIMD128Registers)
2284
2285 void Run(PipelineData* data, Zone* temp_zone) {
2286 RegAllocator allocator(data->top_tier_register_allocation_data(),
2287 RegisterKind::kSimd128, temp_zone);
2288 allocator.AllocateRegisters();
2289 }
2290 };
2291
2292 struct DecideSpillingModePhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::DecideSpillingModePhase2293 DECL_PIPELINE_PHASE_CONSTANTS(DecideSpillingMode)
2294
2295 void Run(PipelineData* data, Zone* temp_zone) {
2296 OperandAssigner assigner(data->top_tier_register_allocation_data());
2297 assigner.DecideSpillingMode();
2298 }
2299 };
2300
2301 struct AssignSpillSlotsPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::AssignSpillSlotsPhase2302 DECL_PIPELINE_PHASE_CONSTANTS(AssignSpillSlots)
2303
2304 void Run(PipelineData* data, Zone* temp_zone) {
2305 OperandAssigner assigner(data->top_tier_register_allocation_data());
2306 assigner.AssignSpillSlots();
2307 }
2308 };
2309
2310
2311 struct CommitAssignmentPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::CommitAssignmentPhase2312 DECL_PIPELINE_PHASE_CONSTANTS(CommitAssignment)
2313
2314 void Run(PipelineData* data, Zone* temp_zone) {
2315 OperandAssigner assigner(data->top_tier_register_allocation_data());
2316 assigner.CommitAssignment();
2317 }
2318 };
2319
2320
2321 struct PopulateReferenceMapsPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::PopulateReferenceMapsPhase2322 DECL_PIPELINE_PHASE_CONSTANTS(PopulatePointerMaps)
2323
2324 void Run(PipelineData* data, Zone* temp_zone) {
2325 ReferenceMapPopulator populator(data->top_tier_register_allocation_data());
2326 populator.PopulateReferenceMaps();
2327 }
2328 };
2329
2330
2331 struct ConnectRangesPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::ConnectRangesPhase2332 DECL_PIPELINE_PHASE_CONSTANTS(ConnectRanges)
2333
2334 void Run(PipelineData* data, Zone* temp_zone) {
2335 LiveRangeConnector connector(data->top_tier_register_allocation_data());
2336 connector.ConnectRanges(temp_zone);
2337 }
2338 };
2339
2340
2341 struct ResolveControlFlowPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::ResolveControlFlowPhase2342 DECL_PIPELINE_PHASE_CONSTANTS(ResolveControlFlow)
2343
2344 void Run(PipelineData* data, Zone* temp_zone) {
2345 LiveRangeConnector connector(data->top_tier_register_allocation_data());
2346 connector.ResolveControlFlow(temp_zone);
2347 }
2348 };
2349
2350 struct MidTierRegisterOutputDefinitionPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MidTierRegisterOutputDefinitionPhase2351 DECL_PIPELINE_PHASE_CONSTANTS(MidTierRegisterOutputDefinition)
2352
2353 void Run(PipelineData* data, Zone* temp_zone) {
2354 DefineOutputs(data->mid_tier_register_allocator_data());
2355 }
2356 };
2357
2358 struct MidTierRegisterAllocatorPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MidTierRegisterAllocatorPhase2359 DECL_PIPELINE_PHASE_CONSTANTS(MidTierRegisterAllocator)
2360
2361 void Run(PipelineData* data, Zone* temp_zone) {
2362 AllocateRegisters(data->mid_tier_register_allocator_data());
2363 }
2364 };
2365
2366 struct MidTierSpillSlotAllocatorPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MidTierSpillSlotAllocatorPhase2367 DECL_PIPELINE_PHASE_CONSTANTS(MidTierSpillSlotAllocator)
2368
2369 void Run(PipelineData* data, Zone* temp_zone) {
2370 AllocateSpillSlots(data->mid_tier_register_allocator_data());
2371 }
2372 };
2373
2374 struct MidTierPopulateReferenceMapsPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::MidTierPopulateReferenceMapsPhase2375 DECL_PIPELINE_PHASE_CONSTANTS(MidTierPopulateReferenceMaps)
2376
2377 void Run(PipelineData* data, Zone* temp_zone) {
2378 PopulateReferenceMaps(data->mid_tier_register_allocator_data());
2379 }
2380 };
2381
2382 struct OptimizeMovesPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::OptimizeMovesPhase2383 DECL_PIPELINE_PHASE_CONSTANTS(OptimizeMoves)
2384
2385 void Run(PipelineData* data, Zone* temp_zone) {
2386 MoveOptimizer move_optimizer(temp_zone, data->sequence());
2387 move_optimizer.Run();
2388 }
2389 };
2390
2391 struct FrameElisionPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::FrameElisionPhase2392 DECL_PIPELINE_PHASE_CONSTANTS(FrameElision)
2393
2394 void Run(PipelineData* data, Zone* temp_zone) {
2395 FrameElider(data->sequence()).Run();
2396 }
2397 };
2398
2399 struct JumpThreadingPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::JumpThreadingPhase2400 DECL_PIPELINE_PHASE_CONSTANTS(JumpThreading)
2401
2402 void Run(PipelineData* data, Zone* temp_zone, bool frame_at_start) {
2403 ZoneVector<RpoNumber> result(temp_zone);
2404 if (JumpThreading::ComputeForwarding(temp_zone, &result, data->sequence(),
2405 frame_at_start)) {
2406 JumpThreading::ApplyForwarding(temp_zone, result, data->sequence());
2407 }
2408 }
2409 };
2410
2411 struct AssembleCodePhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::AssembleCodePhase2412 DECL_PIPELINE_PHASE_CONSTANTS(AssembleCode)
2413
2414 void Run(PipelineData* data, Zone* temp_zone) {
2415 data->code_generator()->AssembleCode();
2416 }
2417 };
2418
2419 struct FinalizeCodePhase {
DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::FinalizeCodePhase2420 DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(FinalizeCode)
2421
2422 void Run(PipelineData* data, Zone* temp_zone) {
2423 data->set_code(data->code_generator()->FinalizeCode());
2424 }
2425 };
2426
2427
2428 struct PrintGraphPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::PrintGraphPhase2429 DECL_PIPELINE_PHASE_CONSTANTS(PrintGraph)
2430
2431 void Run(PipelineData* data, Zone* temp_zone, const char* phase) {
2432 OptimizedCompilationInfo* info = data->info();
2433 Graph* graph = data->graph();
2434
2435 if (info->trace_turbo_json()) { // Print JSON.
2436 UnparkedScopeIfNeeded scope(data->broker());
2437 AllowHandleDereference allow_deref;
2438
2439 TurboJsonFile json_of(info, std::ios_base::app);
2440 json_of << "{\"name\":\"" << phase << "\",\"type\":\"graph\",\"data\":"
2441 << AsJSON(*graph, data->source_positions(), data->node_origins())
2442 << "},\n";
2443 }
2444
2445 if (info->trace_turbo_scheduled()) {
2446 AccountingAllocator allocator;
2447 Schedule* schedule = data->schedule();
2448 if (schedule == nullptr) {
2449 schedule = Scheduler::ComputeSchedule(
2450 temp_zone, data->graph(), Scheduler::kNoFlags,
2451 &info->tick_counter(), data->profile_data());
2452 }
2453
2454 UnparkedScopeIfNeeded scope(data->broker());
2455 AllowHandleDereference allow_deref;
2456 CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
2457 tracing_scope.stream()
2458 << "-- Graph after " << phase << " -- " << std::endl
2459 << AsScheduledGraph(schedule);
2460 } else if (info->trace_turbo_graph()) { // Simple textual RPO.
2461 UnparkedScopeIfNeeded scope(data->broker());
2462 AllowHandleDereference allow_deref;
2463 CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
2464 tracing_scope.stream()
2465 << "-- Graph after " << phase << " -- " << std::endl
2466 << AsRPO(*graph);
2467 }
2468 }
2469 };
2470
2471
2472 struct VerifyGraphPhase {
DECL_PIPELINE_PHASE_CONSTANTSv8::internal::compiler::VerifyGraphPhase2473 DECL_PIPELINE_PHASE_CONSTANTS(VerifyGraph)
2474
2475 void Run(PipelineData* data, Zone* temp_zone, const bool untyped,
2476 bool values_only = false) {
2477 Verifier::CodeType code_type;
2478 switch (data->info()->code_kind()) {
2479 case CodeKind::WASM_FUNCTION:
2480 case CodeKind::WASM_TO_CAPI_FUNCTION:
2481 case CodeKind::WASM_TO_JS_FUNCTION:
2482 case CodeKind::JS_TO_WASM_FUNCTION:
2483 case CodeKind::C_WASM_ENTRY:
2484 code_type = Verifier::kWasm;
2485 break;
2486 default:
2487 code_type = Verifier::kDefault;
2488 }
2489 Verifier::Run(data->graph(), !untyped ? Verifier::TYPED : Verifier::UNTYPED,
2490 values_only ? Verifier::kValuesOnly : Verifier::kAll,
2491 code_type);
2492 }
2493 };
2494
2495 #undef DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS
2496 #undef DECL_PIPELINE_PHASE_CONSTANTS
2497 #undef DECL_PIPELINE_PHASE_CONSTANTS_HELPER
2498
2499 #if V8_ENABLE_WEBASSEMBLY
2500 class WasmHeapStubCompilationJob final : public TurbofanCompilationJob {
2501 public:
WasmHeapStubCompilationJob(Isolate * isolate,CallDescriptor * call_descriptor,std::unique_ptr<Zone> zone,Graph * graph,CodeKind kind,std::unique_ptr<char[]> debug_name,const AssemblerOptions & options,SourcePositionTable * source_positions)2502 WasmHeapStubCompilationJob(Isolate* isolate, CallDescriptor* call_descriptor,
2503 std::unique_ptr<Zone> zone, Graph* graph,
2504 CodeKind kind, std::unique_ptr<char[]> debug_name,
2505 const AssemblerOptions& options,
2506 SourcePositionTable* source_positions)
2507 // Note that the OptimizedCompilationInfo is not initialized at the time
2508 // we pass it to the CompilationJob constructor, but it is not
2509 // dereferenced there.
2510 : TurbofanCompilationJob(&info_, CompilationJob::State::kReadyToExecute),
2511 debug_name_(std::move(debug_name)),
2512 info_(base::CStrVector(debug_name_.get()), graph->zone(), kind),
2513 call_descriptor_(call_descriptor),
2514 zone_stats_(zone->allocator()),
2515 zone_(std::move(zone)),
2516 graph_(graph),
2517 data_(&zone_stats_, &info_, isolate, wasm::GetWasmEngine()->allocator(),
2518 graph_, nullptr, nullptr, source_positions,
2519 zone_->New<NodeOriginTable>(graph_), nullptr, options, nullptr),
2520 pipeline_(&data_) {}
2521
2522 WasmHeapStubCompilationJob(const WasmHeapStubCompilationJob&) = delete;
2523 WasmHeapStubCompilationJob& operator=(const WasmHeapStubCompilationJob&) =
2524 delete;
2525
2526 protected:
2527 Status PrepareJobImpl(Isolate* isolate) final;
2528 Status ExecuteJobImpl(RuntimeCallStats* stats,
2529 LocalIsolate* local_isolate) final;
2530 Status FinalizeJobImpl(Isolate* isolate) final;
2531
2532 private:
2533 std::unique_ptr<char[]> debug_name_;
2534 OptimizedCompilationInfo info_;
2535 CallDescriptor* call_descriptor_;
2536 ZoneStats zone_stats_;
2537 std::unique_ptr<Zone> zone_;
2538 Graph* graph_;
2539 PipelineData data_;
2540 PipelineImpl pipeline_;
2541 };
2542
2543 // static
NewWasmHeapStubCompilationJob(Isolate * isolate,CallDescriptor * call_descriptor,std::unique_ptr<Zone> zone,Graph * graph,CodeKind kind,std::unique_ptr<char[]> debug_name,const AssemblerOptions & options,SourcePositionTable * source_positions)2544 std::unique_ptr<TurbofanCompilationJob> Pipeline::NewWasmHeapStubCompilationJob(
2545 Isolate* isolate, CallDescriptor* call_descriptor,
2546 std::unique_ptr<Zone> zone, Graph* graph, CodeKind kind,
2547 std::unique_ptr<char[]> debug_name, const AssemblerOptions& options,
2548 SourcePositionTable* source_positions) {
2549 return std::make_unique<WasmHeapStubCompilationJob>(
2550 isolate, call_descriptor, std::move(zone), graph, kind,
2551 std::move(debug_name), options, source_positions);
2552 }
2553
PrepareJobImpl(Isolate * isolate)2554 CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
2555 Isolate* isolate) {
2556 UNREACHABLE();
2557 }
2558
ExecuteJobImpl(RuntimeCallStats * stats,LocalIsolate * local_isolate)2559 CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl(
2560 RuntimeCallStats* stats, LocalIsolate* local_isolate) {
2561 std::unique_ptr<PipelineStatistics> pipeline_statistics;
2562 if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
2563 pipeline_statistics.reset(new PipelineStatistics(
2564 &info_, wasm::GetWasmEngine()->GetOrCreateTurboStatistics(),
2565 &zone_stats_));
2566 pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
2567 }
2568 if (info_.trace_turbo_json() || info_.trace_turbo_graph()) {
2569 CodeTracer::StreamScope tracing_scope(data_.GetCodeTracer());
2570 tracing_scope.stream()
2571 << "---------------------------------------------------\n"
2572 << "Begin compiling method " << info_.GetDebugName().get()
2573 << " using TurboFan" << std::endl;
2574 }
2575 if (info_.trace_turbo_graph()) { // Simple textual RPO.
2576 StdoutStream{} << "-- wasm stub " << CodeKindToString(info_.code_kind())
2577 << " graph -- " << std::endl
2578 << AsRPO(*data_.graph());
2579 }
2580
2581 if (info_.trace_turbo_json()) {
2582 TurboJsonFile json_of(&info_, std::ios_base::trunc);
2583 json_of << "{\"function\":\"" << info_.GetDebugName().get()
2584 << "\", \"source\":\"\",\n\"phases\":[";
2585 }
2586 pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true);
2587 pipeline_.Run<MemoryOptimizationPhase>();
2588 pipeline_.ComputeScheduledGraph();
2589 if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) {
2590 return CompilationJob::SUCCEEDED;
2591 }
2592 return CompilationJob::FAILED;
2593 }
2594
FinalizeJobImpl(Isolate * isolate)2595 CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
2596 Isolate* isolate) {
2597 Handle<Code> code;
2598 if (!pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code)) {
2599 V8::FatalProcessOutOfMemory(isolate,
2600 "WasmHeapStubCompilationJob::FinalizeJobImpl");
2601 }
2602 if (pipeline_.CommitDependencies(code)) {
2603 info_.SetCode(code);
2604 #ifdef ENABLE_DISASSEMBLER
2605 if (FLAG_print_opt_code) {
2606 CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
2607 code->Disassemble(compilation_info()->GetDebugName().get(),
2608 tracing_scope.stream(), isolate);
2609 }
2610 #endif
2611 PROFILE(isolate, CodeCreateEvent(CodeEventListener::STUB_TAG,
2612 Handle<AbstractCode>::cast(code),
2613 compilation_info()->GetDebugName().get()));
2614 return SUCCEEDED;
2615 }
2616 return FAILED;
2617 }
2618 #endif // V8_ENABLE_WEBASSEMBLY
2619
RunPrintAndVerify(const char * phase,bool untyped)2620 void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
2621 if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
2622 Run<PrintGraphPhase>(phase);
2623 }
2624 if (FLAG_turbo_verify) {
2625 Run<VerifyGraphPhase>(untyped);
2626 }
2627 }
2628
InitializeHeapBroker()2629 void PipelineImpl::InitializeHeapBroker() {
2630 PipelineData* data = data_;
2631
2632 data->BeginPhaseKind("V8.TFBrokerInitAndSerialization");
2633
2634 if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
2635 CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
2636 tracing_scope.stream()
2637 << "---------------------------------------------------\n"
2638 << "Begin compiling method " << info()->GetDebugName().get()
2639 << " using TurboFan" << std::endl;
2640 }
2641 if (info()->trace_turbo_json()) {
2642 TurboCfgFile tcf(isolate());
2643 tcf << AsC1VCompilation(info());
2644 }
2645
2646 data->source_positions()->AddDecorator();
2647 if (data->info()->trace_turbo_json()) {
2648 data->node_origins()->AddDecorator();
2649 }
2650
2651 data->broker()->SetTargetNativeContextRef(data->native_context());
2652 Run<HeapBrokerInitializationPhase>();
2653 data->broker()->StopSerializing();
2654 data->EndPhaseKind();
2655 }
2656
CreateGraph()2657 bool PipelineImpl::CreateGraph() {
2658 PipelineData* data = this->data_;
2659 UnparkedScopeIfNeeded unparked_scope(data->broker());
2660
2661 data->BeginPhaseKind("V8.TFGraphCreation");
2662
2663 Run<GraphBuilderPhase>();
2664 RunPrintAndVerify(GraphBuilderPhase::phase_name(), true);
2665
2666 // Perform function context specialization and inlining (if enabled).
2667 Run<InliningPhase>();
2668 RunPrintAndVerify(InliningPhase::phase_name(), true);
2669
2670 // Determine the Typer operation flags.
2671 {
2672 SharedFunctionInfoRef shared_info =
2673 MakeRef(data->broker(), info()->shared_info());
2674 if (is_sloppy(shared_info.language_mode()) &&
2675 shared_info.IsUserJavaScript()) {
2676 // Sloppy mode functions always have an Object for this.
2677 data->AddTyperFlag(Typer::kThisIsReceiver);
2678 }
2679 if (IsClassConstructor(shared_info.kind())) {
2680 // Class constructors cannot be [[Call]]ed.
2681 data->AddTyperFlag(Typer::kNewTargetIsReceiver);
2682 }
2683 }
2684
2685 data->EndPhaseKind();
2686
2687 return true;
2688 }
2689
OptimizeGraph(Linkage * linkage)2690 bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
2691 PipelineData* data = this->data_;
2692
2693 data->BeginPhaseKind("V8.TFLowering");
2694
2695 // Trim the graph before typing to ensure all nodes are typed.
2696 Run<EarlyGraphTrimmingPhase>();
2697 RunPrintAndVerify(EarlyGraphTrimmingPhase::phase_name(), true);
2698
2699 // Type the graph and keep the Typer running such that new nodes get
2700 // automatically typed when they are created.
2701 Run<TyperPhase>(data->CreateTyper());
2702 RunPrintAndVerify(TyperPhase::phase_name());
2703
2704 Run<TypedLoweringPhase>();
2705 RunPrintAndVerify(TypedLoweringPhase::phase_name());
2706
2707 if (data->info()->loop_peeling()) {
2708 Run<LoopPeelingPhase>();
2709 RunPrintAndVerify(LoopPeelingPhase::phase_name(), true);
2710 } else {
2711 Run<LoopExitEliminationPhase>();
2712 RunPrintAndVerify(LoopExitEliminationPhase::phase_name(), true);
2713 }
2714
2715 if (FLAG_turbo_load_elimination) {
2716 Run<LoadEliminationPhase>();
2717 RunPrintAndVerify(LoadEliminationPhase::phase_name());
2718 }
2719 data->DeleteTyper();
2720
2721 if (FLAG_turbo_escape) {
2722 Run<EscapeAnalysisPhase>();
2723 if (data->compilation_failed()) {
2724 info()->AbortOptimization(
2725 BailoutReason::kCyclicObjectStateDetectedInEscapeAnalysis);
2726 data->EndPhaseKind();
2727 return false;
2728 }
2729 RunPrintAndVerify(EscapeAnalysisPhase::phase_name());
2730 }
2731
2732 if (FLAG_assert_types) {
2733 Run<TypeAssertionsPhase>();
2734 RunPrintAndVerify(TypeAssertionsPhase::phase_name());
2735 }
2736
2737 // Perform simplified lowering. This has to run w/o the Typer decorator,
2738 // because we cannot compute meaningful types anyways, and the computed types
2739 // might even conflict with the representation/truncation logic.
2740 Run<SimplifiedLoweringPhase>(linkage);
2741 RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
2742
2743 #if V8_ENABLE_WEBASSEMBLY
2744 if (data->has_js_wasm_calls()) {
2745 DCHECK(data->info()->inline_js_wasm_calls());
2746 Run<JSWasmInliningPhase>();
2747 RunPrintAndVerify(JSWasmInliningPhase::phase_name(), true);
2748 }
2749 #endif // V8_ENABLE_WEBASSEMBLY
2750
2751 // From now on it is invalid to look at types on the nodes, because the types
2752 // on the nodes might not make sense after representation selection due to the
2753 // way we handle truncations; if we'd want to look at types afterwards we'd
2754 // essentially need to re-type (large portions of) the graph.
2755
2756 // In order to catch bugs related to type access after this point, we now
2757 // remove the types from the nodes (currently only in Debug builds).
2758 #ifdef DEBUG
2759 Run<UntyperPhase>();
2760 RunPrintAndVerify(UntyperPhase::phase_name(), true);
2761 #endif
2762
2763 // Run generic lowering pass.
2764 Run<GenericLoweringPhase>();
2765 RunPrintAndVerify(GenericLoweringPhase::phase_name(), true);
2766
2767 data->BeginPhaseKind("V8.TFBlockBuilding");
2768
2769 data->InitializeFrameData(linkage->GetIncomingDescriptor());
2770
2771 // Run early optimization pass.
2772 Run<EarlyOptimizationPhase>();
2773 RunPrintAndVerify(EarlyOptimizationPhase::phase_name(), true);
2774
2775 Run<EffectControlLinearizationPhase>();
2776 RunPrintAndVerify(EffectControlLinearizationPhase::phase_name(), true);
2777
2778 if (FLAG_turbo_store_elimination) {
2779 Run<StoreStoreEliminationPhase>();
2780 RunPrintAndVerify(StoreStoreEliminationPhase::phase_name(), true);
2781 }
2782
2783 // Optimize control flow.
2784 if (FLAG_turbo_cf_optimization) {
2785 Run<ControlFlowOptimizationPhase>();
2786 RunPrintAndVerify(ControlFlowOptimizationPhase::phase_name(), true);
2787 }
2788
2789 Run<LateOptimizationPhase>();
2790 RunPrintAndVerify(LateOptimizationPhase::phase_name(), true);
2791
2792 // Optimize memory access and allocation operations.
2793 Run<MemoryOptimizationPhase>();
2794 RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
2795
2796 // Run value numbering and machine operator reducer to optimize load/store
2797 // address computation (in particular, reuse the address computation whenever
2798 // possible).
2799 Run<MachineOperatorOptimizationPhase>();
2800 RunPrintAndVerify(MachineOperatorOptimizationPhase::phase_name(), true);
2801
2802 Run<DecompressionOptimizationPhase>();
2803 RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(), true);
2804
2805 Run<BranchConditionDuplicationPhase>();
2806 RunPrintAndVerify(BranchConditionDuplicationPhase::phase_name(), true);
2807
2808 data->source_positions()->RemoveDecorator();
2809 if (data->info()->trace_turbo_json()) {
2810 data->node_origins()->RemoveDecorator();
2811 }
2812
2813 ComputeScheduledGraph();
2814
2815 return SelectInstructions(linkage);
2816 }
2817
2818 namespace {
2819
2820 // Compute a hash of the given graph, in a way that should provide the same
2821 // result in multiple runs of mksnapshot, meaning the hash cannot depend on any
2822 // external pointer values or uncompressed heap constants. This hash can be used
2823 // to reject profiling data if the builtin's current code doesn't match the
2824 // version that was profiled. Hash collisions are not catastrophic; in the worst
2825 // case, we just defer some blocks that ideally shouldn't be deferred. The
2826 // result value is in the valid Smi range.
HashGraphForPGO(Graph * graph)2827 int HashGraphForPGO(Graph* graph) {
2828 AccountingAllocator allocator;
2829 Zone local_zone(&allocator, ZONE_NAME);
2830
2831 constexpr NodeId kUnassigned = static_cast<NodeId>(-1);
2832
2833 constexpr byte kUnvisited = 0;
2834 constexpr byte kOnStack = 1;
2835 constexpr byte kVisited = 2;
2836
2837 // Do a depth-first post-order traversal of the graph. For every node, hash:
2838 //
2839 // - the node's traversal number
2840 // - the opcode
2841 // - the number of inputs
2842 // - each input node's traversal number
2843 //
2844 // What's a traversal number? We can't use node IDs because they're not stable
2845 // build-to-build, so we assign a new number for each node as it is visited.
2846
2847 ZoneVector<byte> state(graph->NodeCount(), kUnvisited, &local_zone);
2848 ZoneVector<NodeId> traversal_numbers(graph->NodeCount(), kUnassigned,
2849 &local_zone);
2850 ZoneStack<Node*> stack(&local_zone);
2851
2852 NodeId visited_count = 0;
2853 size_t hash = 0;
2854
2855 stack.push(graph->end());
2856 state[graph->end()->id()] = kOnStack;
2857 traversal_numbers[graph->end()->id()] = visited_count++;
2858 while (!stack.empty()) {
2859 Node* n = stack.top();
2860 bool pop = true;
2861 for (Node* const i : n->inputs()) {
2862 if (state[i->id()] == kUnvisited) {
2863 state[i->id()] = kOnStack;
2864 traversal_numbers[i->id()] = visited_count++;
2865 stack.push(i);
2866 pop = false;
2867 break;
2868 }
2869 }
2870 if (pop) {
2871 state[n->id()] = kVisited;
2872 stack.pop();
2873 hash = base::hash_combine(hash, traversal_numbers[n->id()], n->opcode(),
2874 n->InputCount());
2875 for (Node* const i : n->inputs()) {
2876 DCHECK(traversal_numbers[i->id()] != kUnassigned);
2877 hash = base::hash_combine(hash, traversal_numbers[i->id()]);
2878 }
2879 }
2880 }
2881 return Smi(IntToSmi(static_cast<int>(hash))).value();
2882 }
2883
2884 } // namespace
2885
GenerateCodeForCodeStub(Isolate * isolate,CallDescriptor * call_descriptor,Graph * graph,JSGraph * jsgraph,SourcePositionTable * source_positions,CodeKind kind,const char * debug_name,Builtin builtin,const AssemblerOptions & options,const ProfileDataFromFile * profile_data)2886 MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
2887 Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
2888 JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind,
2889 const char* debug_name, Builtin builtin, const AssemblerOptions& options,
2890 const ProfileDataFromFile* profile_data) {
2891 OptimizedCompilationInfo info(base::CStrVector(debug_name), graph->zone(),
2892 kind);
2893 info.set_builtin(builtin);
2894
2895 // Construct a pipeline for scheduling and code generation.
2896 ZoneStats zone_stats(isolate->allocator());
2897 NodeOriginTable node_origins(graph);
2898 JumpOptimizationInfo jump_opt;
2899 bool should_optimize_jumps = isolate->serializer_enabled() &&
2900 FLAG_turbo_rewrite_far_jumps &&
2901 !FLAG_turbo_profiling;
2902 PipelineData data(&zone_stats, &info, isolate, isolate->allocator(), graph,
2903 jsgraph, nullptr, source_positions, &node_origins,
2904 should_optimize_jumps ? &jump_opt : nullptr, options,
2905 profile_data);
2906 PipelineJobScope scope(&data, isolate->counters()->runtime_call_stats());
2907 RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeCode);
2908 data.set_verify_graph(FLAG_verify_csa);
2909 std::unique_ptr<PipelineStatistics> pipeline_statistics;
2910 if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
2911 pipeline_statistics.reset(new PipelineStatistics(
2912 &info, isolate->GetTurboStatistics(), &zone_stats));
2913 pipeline_statistics->BeginPhaseKind("V8.TFStubCodegen");
2914 }
2915
2916 PipelineImpl pipeline(&data);
2917
2918 if (info.trace_turbo_json() || info.trace_turbo_graph()) {
2919 CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
2920 tracing_scope.stream()
2921 << "---------------------------------------------------\n"
2922 << "Begin compiling " << debug_name << " using TurboFan" << std::endl;
2923 if (info.trace_turbo_json()) {
2924 TurboJsonFile json_of(&info, std::ios_base::trunc);
2925 json_of << "{\"function\" : ";
2926 JsonPrintFunctionSource(json_of, -1, info.GetDebugName(),
2927 Handle<Script>(), isolate,
2928 Handle<SharedFunctionInfo>());
2929 json_of << ",\n\"phases\":[";
2930 }
2931 pipeline.Run<PrintGraphPhase>("V8.TFMachineCode");
2932 }
2933
2934 pipeline.Run<CsaEarlyOptimizationPhase>();
2935 pipeline.RunPrintAndVerify(CsaEarlyOptimizationPhase::phase_name(), true);
2936
2937 // Optimize memory access and allocation operations.
2938 pipeline.Run<MemoryOptimizationPhase>();
2939 pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
2940
2941 pipeline.Run<CsaOptimizationPhase>(true);
2942 pipeline.RunPrintAndVerify(CsaOptimizationPhase::phase_name(), true);
2943
2944 pipeline.Run<DecompressionOptimizationPhase>();
2945 pipeline.RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(),
2946 true);
2947
2948 pipeline.Run<BranchConditionDuplicationPhase>();
2949 pipeline.RunPrintAndVerify(BranchConditionDuplicationPhase::phase_name(),
2950 true);
2951
2952 pipeline.Run<VerifyGraphPhase>(true);
2953
2954 int graph_hash_before_scheduling = 0;
2955 if (FLAG_turbo_profiling || profile_data != nullptr) {
2956 graph_hash_before_scheduling = HashGraphForPGO(data.graph());
2957 }
2958
2959 if (profile_data != nullptr &&
2960 profile_data->hash() != graph_hash_before_scheduling) {
2961 PrintF("Rejected profile data for %s due to function change\n", debug_name);
2962 profile_data = nullptr;
2963 data.set_profile_data(profile_data);
2964 }
2965
2966 pipeline.ComputeScheduledGraph();
2967 DCHECK_NOT_NULL(data.schedule());
2968
2969 // First run code generation on a copy of the pipeline, in order to be able to
2970 // repeat it for jump optimization. The first run has to happen on a temporary
2971 // pipeline to avoid deletion of zones on the main pipeline.
2972 PipelineData second_data(&zone_stats, &info, isolate, isolate->allocator(),
2973 data.graph(), data.jsgraph(), data.schedule(),
2974 data.source_positions(), data.node_origins(),
2975 data.jump_optimization_info(), options,
2976 profile_data);
2977 PipelineJobScope second_scope(&second_data,
2978 isolate->counters()->runtime_call_stats());
2979 second_data.set_verify_graph(FLAG_verify_csa);
2980 PipelineImpl second_pipeline(&second_data);
2981 second_pipeline.SelectInstructionsAndAssemble(call_descriptor);
2982
2983 if (FLAG_turbo_profiling) {
2984 info.profiler_data()->SetHash(graph_hash_before_scheduling);
2985 }
2986
2987 if (jump_opt.is_optimizable()) {
2988 jump_opt.set_optimizing();
2989 return pipeline.GenerateCode(call_descriptor);
2990 } else {
2991 return second_pipeline.FinalizeCode();
2992 }
2993 }
2994
2995 struct BlockStartsAsJSON {
2996 const ZoneVector<int>* block_starts;
2997 };
2998
operator <<(std::ostream & out,const BlockStartsAsJSON & s)2999 std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) {
3000 out << ", \"blockIdToOffset\": {";
3001 bool need_comma = false;
3002 for (size_t i = 0; i < s.block_starts->size(); ++i) {
3003 if (need_comma) out << ", ";
3004 int offset = (*s.block_starts)[i];
3005 out << "\"" << i << "\":" << offset;
3006 need_comma = true;
3007 }
3008 out << "},";
3009 return out;
3010 }
3011
3012 #if V8_ENABLE_WEBASSEMBLY
3013 // static
GenerateCodeForWasmNativeStub(CallDescriptor * call_descriptor,MachineGraph * mcgraph,CodeKind kind,const char * debug_name,const AssemblerOptions & options,SourcePositionTable * source_positions)3014 wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
3015 CallDescriptor* call_descriptor, MachineGraph* mcgraph, CodeKind kind,
3016 const char* debug_name, const AssemblerOptions& options,
3017 SourcePositionTable* source_positions) {
3018 Graph* graph = mcgraph->graph();
3019 OptimizedCompilationInfo info(base::CStrVector(debug_name), graph->zone(),
3020 kind);
3021 // Construct a pipeline for scheduling and code generation.
3022 wasm::WasmEngine* wasm_engine = wasm::GetWasmEngine();
3023 ZoneStats zone_stats(wasm_engine->allocator());
3024 NodeOriginTable* node_positions = graph->zone()->New<NodeOriginTable>(graph);
3025 PipelineData data(&zone_stats, wasm_engine, &info, mcgraph, nullptr,
3026 source_positions, node_positions, options);
3027 std::unique_ptr<PipelineStatistics> pipeline_statistics;
3028 if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
3029 pipeline_statistics.reset(new PipelineStatistics(
3030 &info, wasm_engine->GetOrCreateTurboStatistics(), &zone_stats));
3031 pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
3032 }
3033
3034 PipelineImpl pipeline(&data);
3035
3036 if (info.trace_turbo_json() || info.trace_turbo_graph()) {
3037 CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
3038 tracing_scope.stream()
3039 << "---------------------------------------------------\n"
3040 << "Begin compiling method " << info.GetDebugName().get()
3041 << " using TurboFan" << std::endl;
3042 }
3043
3044 if (info.trace_turbo_graph()) { // Simple textual RPO.
3045 StdoutStream{} << "-- wasm stub " << CodeKindToString(kind) << " graph -- "
3046 << std::endl
3047 << AsRPO(*graph);
3048 }
3049
3050 if (info.trace_turbo_json()) {
3051 TurboJsonFile json_of(&info, std::ios_base::trunc);
3052 json_of << "{\"function\":\"" << info.GetDebugName().get()
3053 << "\", \"source\":\"\",\n\"phases\":[";
3054 }
3055
3056 pipeline.RunPrintAndVerify("V8.WasmNativeStubMachineCode", true);
3057
3058 pipeline.Run<MemoryOptimizationPhase>();
3059 pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
3060
3061 pipeline.ComputeScheduledGraph();
3062
3063 Linkage linkage(call_descriptor);
3064 CHECK(pipeline.SelectInstructions(&linkage));
3065 pipeline.AssembleCode(&linkage);
3066
3067 CodeGenerator* code_generator = pipeline.code_generator();
3068 wasm::WasmCompilationResult result;
3069 code_generator->tasm()->GetCode(
3070 nullptr, &result.code_desc, code_generator->safepoint_table_builder(),
3071 static_cast<int>(code_generator->handler_table_offset()));
3072 result.instr_buffer = code_generator->tasm()->ReleaseBuffer();
3073 result.source_positions = code_generator->GetSourcePositionTable();
3074 result.protected_instructions_data =
3075 code_generator->GetProtectedInstructionsData();
3076 result.frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
3077 result.tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
3078 result.result_tier = wasm::ExecutionTier::kTurbofan;
3079 if (kind == CodeKind::WASM_TO_JS_FUNCTION) {
3080 result.kind = wasm::WasmCompilationResult::kWasmToJsWrapper;
3081 }
3082
3083 DCHECK(result.succeeded());
3084
3085 if (info.trace_turbo_json()) {
3086 TurboJsonFile json_of(&info, std::ios_base::app);
3087 json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
3088 << BlockStartsAsJSON{&code_generator->block_starts()}
3089 << "\"data\":\"";
3090 #ifdef ENABLE_DISASSEMBLER
3091 std::stringstream disassembler_stream;
3092 Disassembler::Decode(
3093 nullptr, disassembler_stream, result.code_desc.buffer,
3094 result.code_desc.buffer + result.code_desc.safepoint_table_offset,
3095 CodeReference(&result.code_desc));
3096 for (auto const c : disassembler_stream.str()) {
3097 json_of << AsEscapedUC16ForJSON(c);
3098 }
3099 #endif // ENABLE_DISASSEMBLER
3100 json_of << "\"}\n]";
3101 json_of << "\n}";
3102 }
3103
3104 if (info.trace_turbo_json() || info.trace_turbo_graph()) {
3105 CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
3106 tracing_scope.stream()
3107 << "---------------------------------------------------\n"
3108 << "Finished compiling method " << info.GetDebugName().get()
3109 << " using TurboFan" << std::endl;
3110 }
3111
3112 return result;
3113 }
3114
3115 // static
GenerateCodeForWasmFunction(OptimizedCompilationInfo * info,wasm::CompilationEnv * env,const wasm::WireBytesStorage * wire_bytes_storage,MachineGraph * mcgraph,CallDescriptor * call_descriptor,SourcePositionTable * source_positions,NodeOriginTable * node_origins,wasm::FunctionBody function_body,const wasm::WasmModule * module,int function_index,std::vector<compiler::WasmLoopInfo> * loop_info)3116 void Pipeline::GenerateCodeForWasmFunction(
3117 OptimizedCompilationInfo* info, wasm::CompilationEnv* env,
3118 const wasm::WireBytesStorage* wire_bytes_storage, MachineGraph* mcgraph,
3119 CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
3120 NodeOriginTable* node_origins, wasm::FunctionBody function_body,
3121 const wasm::WasmModule* module, int function_index,
3122 std::vector<compiler::WasmLoopInfo>* loop_info) {
3123 auto* wasm_engine = wasm::GetWasmEngine();
3124 base::TimeTicks start_time;
3125 if (V8_UNLIKELY(FLAG_trace_wasm_compilation_times)) {
3126 start_time = base::TimeTicks::Now();
3127 }
3128 ZoneStats zone_stats(wasm_engine->allocator());
3129 std::unique_ptr<PipelineStatistics> pipeline_statistics(
3130 CreatePipelineStatistics(function_body, module, info, &zone_stats));
3131 PipelineData data(&zone_stats, wasm_engine, info, mcgraph,
3132 pipeline_statistics.get(), source_positions, node_origins,
3133 WasmAssemblerOptions());
3134
3135 PipelineImpl pipeline(&data);
3136
3137 if (data.info()->trace_turbo_json() || data.info()->trace_turbo_graph()) {
3138 CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
3139 tracing_scope.stream()
3140 << "---------------------------------------------------\n"
3141 << "Begin compiling method " << data.info()->GetDebugName().get()
3142 << " using TurboFan" << std::endl;
3143 }
3144
3145 pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
3146
3147 data.BeginPhaseKind("V8.WasmOptimization");
3148 if (FLAG_wasm_inlining) {
3149 pipeline.Run<WasmInliningPhase>(env, function_index, wire_bytes_storage,
3150 loop_info);
3151 pipeline.RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
3152 }
3153 if (FLAG_wasm_loop_peeling) {
3154 pipeline.Run<WasmLoopPeelingPhase>(loop_info);
3155 pipeline.RunPrintAndVerify(WasmLoopPeelingPhase::phase_name(), true);
3156 }
3157 if (FLAG_wasm_loop_unrolling) {
3158 pipeline.Run<WasmLoopUnrollingPhase>(loop_info);
3159 pipeline.RunPrintAndVerify(WasmLoopUnrollingPhase::phase_name(), true);
3160 }
3161 const bool is_asm_js = is_asmjs_module(module);
3162
3163 if (FLAG_wasm_opt || is_asm_js) {
3164 pipeline.Run<WasmOptimizationPhase>(is_asm_js);
3165 pipeline.RunPrintAndVerify(WasmOptimizationPhase::phase_name(), true);
3166 } else {
3167 pipeline.Run<WasmBaseOptimizationPhase>();
3168 pipeline.RunPrintAndVerify(WasmBaseOptimizationPhase::phase_name(), true);
3169 }
3170
3171 pipeline.Run<MemoryOptimizationPhase>();
3172 pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
3173
3174 if (FLAG_turbo_splitting && !is_asm_js) {
3175 data.info()->set_splitting();
3176 }
3177
3178 if (data.node_origins()) {
3179 data.node_origins()->RemoveDecorator();
3180 }
3181
3182 data.BeginPhaseKind("V8.InstructionSelection");
3183 pipeline.ComputeScheduledGraph();
3184
3185 Linkage linkage(call_descriptor);
3186 if (!pipeline.SelectInstructions(&linkage)) return;
3187 pipeline.AssembleCode(&linkage);
3188
3189 auto result = std::make_unique<wasm::WasmCompilationResult>();
3190 CodeGenerator* code_generator = pipeline.code_generator();
3191 code_generator->tasm()->GetCode(
3192 nullptr, &result->code_desc, code_generator->safepoint_table_builder(),
3193 static_cast<int>(code_generator->handler_table_offset()));
3194
3195 result->instr_buffer = code_generator->tasm()->ReleaseBuffer();
3196 result->frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
3197 result->tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
3198 result->source_positions = code_generator->GetSourcePositionTable();
3199 result->protected_instructions_data =
3200 code_generator->GetProtectedInstructionsData();
3201 result->result_tier = wasm::ExecutionTier::kTurbofan;
3202
3203 if (data.info()->trace_turbo_json()) {
3204 TurboJsonFile json_of(data.info(), std::ios_base::app);
3205 json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
3206 << BlockStartsAsJSON{&code_generator->block_starts()}
3207 << "\"data\":\"";
3208 #ifdef ENABLE_DISASSEMBLER
3209 std::stringstream disassembler_stream;
3210 Disassembler::Decode(
3211 nullptr, disassembler_stream, result->code_desc.buffer,
3212 result->code_desc.buffer + result->code_desc.safepoint_table_offset,
3213 CodeReference(&result->code_desc));
3214 for (auto const c : disassembler_stream.str()) {
3215 json_of << AsEscapedUC16ForJSON(c);
3216 }
3217 #endif // ENABLE_DISASSEMBLER
3218 json_of << "\"}\n]";
3219 json_of << "\n}";
3220 }
3221
3222 if (data.info()->trace_turbo_json() || data.info()->trace_turbo_graph()) {
3223 CodeTracer::StreamScope tracing_scope(data.GetCodeTracer());
3224 tracing_scope.stream()
3225 << "---------------------------------------------------\n"
3226 << "Finished compiling method " << data.info()->GetDebugName().get()
3227 << " using TurboFan" << std::endl;
3228 }
3229
3230 if (V8_UNLIKELY(FLAG_trace_wasm_compilation_times)) {
3231 base::TimeDelta time = base::TimeTicks::Now() - start_time;
3232 int codesize = result->code_desc.body_size();
3233 StdoutStream{} << "Compiled function "
3234 << reinterpret_cast<const void*>(module) << "#"
3235 << function_index << " using TurboFan, took "
3236 << time.InMilliseconds() << " ms and "
3237 << zone_stats.GetMaxAllocatedBytes() << " / "
3238 << zone_stats.GetTotalAllocatedBytes()
3239 << " max/total bytes; bodysize "
3240 << function_body.end - function_body.start << " codesize "
3241 << codesize << " name " << data.info()->GetDebugName().get()
3242 << std::endl;
3243 }
3244
3245 DCHECK(result->succeeded());
3246 info->SetWasmCompilationResult(std::move(result));
3247 }
3248 #endif // V8_ENABLE_WEBASSEMBLY
3249
3250 // static
GenerateCodeForTesting(OptimizedCompilationInfo * info,Isolate * isolate,std::unique_ptr<JSHeapBroker> * out_broker)3251 MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
3252 OptimizedCompilationInfo* info, Isolate* isolate,
3253 std::unique_ptr<JSHeapBroker>* out_broker) {
3254 ZoneStats zone_stats(isolate->allocator());
3255 std::unique_ptr<PipelineStatistics> pipeline_statistics(
3256 CreatePipelineStatistics(Handle<Script>::null(), info, isolate,
3257 &zone_stats));
3258
3259 PipelineData data(&zone_stats, isolate, info, pipeline_statistics.get());
3260 PipelineImpl pipeline(&data);
3261
3262 Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
3263
3264 {
3265 CompilationHandleScope compilation_scope(isolate, info);
3266 CanonicalHandleScopeForTurbofan canonical(isolate, info);
3267 info->ReopenHandlesInNewHandleScope(isolate);
3268 pipeline.InitializeHeapBroker();
3269 }
3270
3271 {
3272 LocalIsolateScope local_isolate_scope(data.broker(), info,
3273 isolate->main_thread_local_isolate());
3274 if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
3275 // We selectively Unpark inside OptimizeGraph.
3276 if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
3277
3278 pipeline.AssembleCode(&linkage);
3279 }
3280
3281 const bool will_retire_broker = out_broker == nullptr;
3282 if (!will_retire_broker) {
3283 // If the broker is going to be kept alive, pass the persistent and the
3284 // canonical handles containers back to the JSHeapBroker since it will
3285 // outlive the OptimizedCompilationInfo.
3286 data.broker()->SetPersistentAndCopyCanonicalHandlesForTesting(
3287 info->DetachPersistentHandles(), info->DetachCanonicalHandles());
3288 }
3289
3290 Handle<Code> code;
3291 if (pipeline.FinalizeCode(will_retire_broker).ToHandle(&code) &&
3292 pipeline.CommitDependencies(code)) {
3293 if (!will_retire_broker) *out_broker = data.ReleaseBroker();
3294 return code;
3295 }
3296 return MaybeHandle<Code>();
3297 }
3298
3299 // static
GenerateCodeForTesting(OptimizedCompilationInfo * info,Isolate * isolate,CallDescriptor * call_descriptor,Graph * graph,const AssemblerOptions & options,Schedule * schedule)3300 MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
3301 OptimizedCompilationInfo* info, Isolate* isolate,
3302 CallDescriptor* call_descriptor, Graph* graph,
3303 const AssemblerOptions& options, Schedule* schedule) {
3304 // Construct a pipeline for scheduling and code generation.
3305 ZoneStats zone_stats(isolate->allocator());
3306 NodeOriginTable* node_positions = info->zone()->New<NodeOriginTable>(graph);
3307 PipelineData data(&zone_stats, info, isolate, isolate->allocator(), graph,
3308 nullptr, schedule, nullptr, node_positions, nullptr,
3309 options, nullptr);
3310 std::unique_ptr<PipelineStatistics> pipeline_statistics;
3311 if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
3312 pipeline_statistics.reset(new PipelineStatistics(
3313 info, isolate->GetTurboStatistics(), &zone_stats));
3314 pipeline_statistics->BeginPhaseKind("V8.TFTestCodegen");
3315 }
3316
3317 PipelineImpl pipeline(&data);
3318
3319 if (info->trace_turbo_json()) {
3320 TurboJsonFile json_of(info, std::ios_base::trunc);
3321 json_of << "{\"function\":\"" << info->GetDebugName().get()
3322 << "\", \"source\":\"\",\n\"phases\":[";
3323 }
3324 // TODO(rossberg): Should this really be untyped?
3325 pipeline.RunPrintAndVerify("V8.TFMachineCode", true);
3326
3327 // Ensure we have a schedule.
3328 if (data.schedule() == nullptr) {
3329 pipeline.ComputeScheduledGraph();
3330 }
3331
3332 Handle<Code> code;
3333 if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) &&
3334 pipeline.CommitDependencies(code)) {
3335 return code;
3336 }
3337 return MaybeHandle<Code>();
3338 }
3339
3340 // static
NewCompilationJob(Isolate * isolate,Handle<JSFunction> function,CodeKind code_kind,bool has_script,BytecodeOffset osr_offset,JavaScriptFrame * osr_frame)3341 std::unique_ptr<TurbofanCompilationJob> Pipeline::NewCompilationJob(
3342 Isolate* isolate, Handle<JSFunction> function, CodeKind code_kind,
3343 bool has_script, BytecodeOffset osr_offset, JavaScriptFrame* osr_frame) {
3344 Handle<SharedFunctionInfo> shared(function->shared(), isolate);
3345 return std::make_unique<PipelineCompilationJob>(
3346 isolate, shared, function, osr_offset, osr_frame, code_kind);
3347 }
3348
AllocateRegistersForTesting(const RegisterConfiguration * config,InstructionSequence * sequence,bool use_mid_tier_register_allocator,bool run_verifier)3349 bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
3350 InstructionSequence* sequence,
3351 bool use_mid_tier_register_allocator,
3352 bool run_verifier) {
3353 OptimizedCompilationInfo info(base::ArrayVector("testing"), sequence->zone(),
3354 CodeKind::FOR_TESTING);
3355 ZoneStats zone_stats(sequence->isolate()->allocator());
3356 PipelineData data(&zone_stats, &info, sequence->isolate(), sequence);
3357 data.InitializeFrameData(nullptr);
3358
3359 if (info.trace_turbo_json()) {
3360 TurboJsonFile json_of(&info, std::ios_base::trunc);
3361 json_of << "{\"function\":\"" << info.GetDebugName().get()
3362 << "\", \"source\":\"\",\n\"phases\":[";
3363 }
3364
3365 PipelineImpl pipeline(&data);
3366 if (use_mid_tier_register_allocator) {
3367 pipeline.AllocateRegistersForMidTier(config, nullptr, run_verifier);
3368 } else {
3369 pipeline.AllocateRegistersForTopTier(config, nullptr, run_verifier);
3370 }
3371
3372 return !data.compilation_failed();
3373 }
3374
ComputeScheduledGraph()3375 void PipelineImpl::ComputeScheduledGraph() {
3376 PipelineData* data = this->data_;
3377
3378 // We should only schedule the graph if it is not scheduled yet.
3379 DCHECK_NULL(data->schedule());
3380
3381 Run<ComputeSchedulePhase>();
3382 TraceScheduleAndVerify(data->info(), data, data->schedule(), "schedule");
3383 }
3384
SelectInstructions(Linkage * linkage)3385 bool PipelineImpl::SelectInstructions(Linkage* linkage) {
3386 auto call_descriptor = linkage->GetIncomingDescriptor();
3387 PipelineData* data = this->data_;
3388
3389 // We should have a scheduled graph.
3390 DCHECK_NOT_NULL(data->graph());
3391 DCHECK_NOT_NULL(data->schedule());
3392
3393 if (FLAG_turbo_profiling) {
3394 UnparkedScopeIfNeeded unparked_scope(data->broker());
3395 data->info()->set_profiler_data(BasicBlockInstrumentor::Instrument(
3396 info(), data->graph(), data->schedule(), data->isolate()));
3397 }
3398
3399 bool verify_stub_graph =
3400 data->verify_graph() ||
3401 (FLAG_turbo_verify_machine_graph != nullptr &&
3402 (!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
3403 !strcmp(FLAG_turbo_verify_machine_graph, data->debug_name())));
3404 // Jump optimization runs instruction selection twice, but the instruction
3405 // selector mutates nodes like swapping the inputs of a load, which can
3406 // violate the machine graph verification rules. So we skip the second
3407 // verification on a graph that already verified before.
3408 auto jump_opt = data->jump_optimization_info();
3409 if (jump_opt && jump_opt->is_optimizing()) {
3410 verify_stub_graph = false;
3411 }
3412 if (verify_stub_graph) {
3413 if (FLAG_trace_verify_csa) {
3414 UnparkedScopeIfNeeded scope(data->broker());
3415 AllowHandleDereference allow_deref;
3416 CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
3417 tracing_scope.stream()
3418 << "--------------------------------------------------\n"
3419 << "--- Verifying " << data->debug_name()
3420 << " generated by TurboFan\n"
3421 << "--------------------------------------------------\n"
3422 << *data->schedule()
3423 << "--------------------------------------------------\n"
3424 << "--- End of " << data->debug_name() << " generated by TurboFan\n"
3425 << "--------------------------------------------------\n";
3426 }
3427 // TODO(jgruber): The parameter is called is_stub but actually contains
3428 // something different. Update either the name or its contents.
3429 bool is_stub = !data->info()->IsOptimizing();
3430 #if V8_ENABLE_WEBASSEMBLY
3431 if (data->info()->IsWasm()) is_stub = false;
3432 #endif // V8_ENABLE_WEBASSEMBLY
3433 Zone temp_zone(data->allocator(), kMachineGraphVerifierZoneName);
3434 MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage, is_stub,
3435 data->debug_name(), &temp_zone);
3436 }
3437
3438 data->InitializeInstructionSequence(call_descriptor);
3439
3440 // Depending on which code path led us to this function, the frame may or
3441 // may not have been initialized. If it hasn't yet, initialize it now.
3442 if (!data->frame()) {
3443 data->InitializeFrameData(call_descriptor);
3444 }
3445 // Select and schedule instructions covering the scheduled graph.
3446 Run<InstructionSelectionPhase>(linkage);
3447 if (data->compilation_failed()) {
3448 info()->AbortOptimization(BailoutReason::kCodeGenerationFailed);
3449 data->EndPhaseKind();
3450 return false;
3451 }
3452
3453 if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
3454 UnparkedScopeIfNeeded scope(data->broker());
3455 AllowHandleDereference allow_deref;
3456 TurboCfgFile tcf(isolate());
3457 tcf << AsC1V("CodeGen", data->schedule(), data->source_positions(),
3458 data->sequence());
3459 }
3460
3461 if (info()->trace_turbo_json()) {
3462 std::ostringstream source_position_output;
3463 // Output source position information before the graph is deleted.
3464 if (data_->source_positions() != nullptr) {
3465 data_->source_positions()->PrintJson(source_position_output);
3466 } else {
3467 source_position_output << "{}";
3468 }
3469 source_position_output << ",\n\"NodeOrigins\" : ";
3470 data_->node_origins()->PrintJson(source_position_output);
3471 data_->set_source_position_output(source_position_output.str());
3472 }
3473
3474 data->DeleteGraphZone();
3475
3476 data->BeginPhaseKind("V8.TFRegisterAllocation");
3477
3478 bool run_verifier = FLAG_turbo_verify_allocation;
3479
3480 // Allocate registers.
3481
3482 // This limit is chosen somewhat arbitrarily, by looking at a few bigger
3483 // WebAssembly programs, and chosing the limit such that functions that take
3484 // >100ms in register allocation are switched to mid-tier.
3485 static int kTopTierVirtualRegistersLimit = 8192;
3486
3487 const RegisterConfiguration* config = RegisterConfiguration::Default();
3488 std::unique_ptr<const RegisterConfiguration> restricted_config;
3489 // The mid-tier register allocator keeps values in stack slots for too long.
3490 // This is incompatible with left-trimming, therefore we cannot enable it for
3491 // JS functions.
3492 bool use_mid_tier_register_allocator =
3493 data->info()->code_kind() == CodeKind::WASM_FUNCTION &&
3494 (FLAG_turbo_force_mid_tier_regalloc ||
3495 (FLAG_turbo_use_mid_tier_regalloc_for_huge_functions &&
3496 data->sequence()->VirtualRegisterCount() >
3497 kTopTierVirtualRegistersLimit));
3498
3499 if (call_descriptor->HasRestrictedAllocatableRegisters()) {
3500 RegList registers = call_descriptor->AllocatableRegisters();
3501 DCHECK_LT(0, registers.Count());
3502 restricted_config.reset(
3503 RegisterConfiguration::RestrictGeneralRegisters(registers));
3504 config = restricted_config.get();
3505 use_mid_tier_register_allocator = false;
3506 }
3507 if (use_mid_tier_register_allocator) {
3508 AllocateRegistersForMidTier(config, call_descriptor, run_verifier);
3509 } else {
3510 AllocateRegistersForTopTier(config, call_descriptor, run_verifier);
3511 }
3512
3513 // Verify the instruction sequence has the same hash in two stages.
3514 VerifyGeneratedCodeIsIdempotent();
3515
3516 Run<FrameElisionPhase>();
3517 if (data->compilation_failed()) {
3518 info()->AbortOptimization(
3519 BailoutReason::kNotEnoughVirtualRegistersRegalloc);
3520 data->EndPhaseKind();
3521 return false;
3522 }
3523
3524 // TODO(mtrofin): move this off to the register allocator.
3525 bool generate_frame_at_start =
3526 data_->sequence()->instruction_blocks().front()->must_construct_frame();
3527 // Optimimize jumps.
3528 if (FLAG_turbo_jt) {
3529 Run<JumpThreadingPhase>(generate_frame_at_start);
3530 }
3531
3532 data->EndPhaseKind();
3533
3534 return true;
3535 }
3536
VerifyGeneratedCodeIsIdempotent()3537 void PipelineImpl::VerifyGeneratedCodeIsIdempotent() {
3538 PipelineData* data = this->data_;
3539 JumpOptimizationInfo* jump_opt = data->jump_optimization_info();
3540 if (jump_opt == nullptr) return;
3541
3542 InstructionSequence* code = data->sequence();
3543 int instruction_blocks = code->InstructionBlockCount();
3544 int virtual_registers = code->VirtualRegisterCount();
3545 size_t hash_code = base::hash_combine(instruction_blocks, virtual_registers);
3546 for (auto instr : *code) {
3547 hash_code = base::hash_combine(hash_code, instr->opcode(),
3548 instr->InputCount(), instr->OutputCount());
3549 }
3550 for (int i = 0; i < virtual_registers; i++) {
3551 hash_code = base::hash_combine(hash_code, code->GetRepresentation(i));
3552 }
3553 if (jump_opt->is_collecting()) {
3554 jump_opt->set_hash_code(hash_code);
3555 } else {
3556 CHECK_EQ(hash_code, jump_opt->hash_code());
3557 }
3558 }
3559
3560 struct InstructionStartsAsJSON {
3561 const ZoneVector<TurbolizerInstructionStartInfo>* instr_starts;
3562 };
3563
operator <<(std::ostream & out,const InstructionStartsAsJSON & s)3564 std::ostream& operator<<(std::ostream& out, const InstructionStartsAsJSON& s) {
3565 out << ", \"instructionOffsetToPCOffset\": {";
3566 bool need_comma = false;
3567 for (size_t i = 0; i < s.instr_starts->size(); ++i) {
3568 if (need_comma) out << ", ";
3569 const TurbolizerInstructionStartInfo& info = (*s.instr_starts)[i];
3570 out << "\"" << i << "\": {";
3571 out << "\"gap\": " << info.gap_pc_offset;
3572 out << ", \"arch\": " << info.arch_instr_pc_offset;
3573 out << ", \"condition\": " << info.condition_pc_offset;
3574 out << "}";
3575 need_comma = true;
3576 }
3577 out << "}";
3578 return out;
3579 }
3580
3581 struct TurbolizerCodeOffsetsInfoAsJSON {
3582 const TurbolizerCodeOffsetsInfo* offsets_info;
3583 };
3584
operator <<(std::ostream & out,const TurbolizerCodeOffsetsInfoAsJSON & s)3585 std::ostream& operator<<(std::ostream& out,
3586 const TurbolizerCodeOffsetsInfoAsJSON& s) {
3587 out << ", \"codeOffsetsInfo\": {";
3588 out << "\"codeStartRegisterCheck\": "
3589 << s.offsets_info->code_start_register_check << ", ";
3590 out << "\"deoptCheck\": " << s.offsets_info->deopt_check << ", ";
3591 out << "\"blocksStart\": " << s.offsets_info->blocks_start << ", ";
3592 out << "\"outOfLineCode\": " << s.offsets_info->out_of_line_code << ", ";
3593 out << "\"deoptimizationExits\": " << s.offsets_info->deoptimization_exits
3594 << ", ";
3595 out << "\"pools\": " << s.offsets_info->pools << ", ";
3596 out << "\"jumpTables\": " << s.offsets_info->jump_tables;
3597 out << "}";
3598 return out;
3599 }
3600
AssembleCode(Linkage * linkage)3601 void PipelineImpl::AssembleCode(Linkage* linkage) {
3602 PipelineData* data = this->data_;
3603 data->BeginPhaseKind("V8.TFCodeGeneration");
3604 data->InitializeCodeGenerator(linkage);
3605
3606 UnparkedScopeIfNeeded unparked_scope(data->broker());
3607
3608 Run<AssembleCodePhase>();
3609 if (data->info()->trace_turbo_json()) {
3610 TurboJsonFile json_of(data->info(), std::ios_base::app);
3611 json_of << "{\"name\":\"code generation\""
3612 << ", \"type\":\"instructions\""
3613 << InstructionStartsAsJSON{&data->code_generator()->instr_starts()}
3614 << TurbolizerCodeOffsetsInfoAsJSON{
3615 &data->code_generator()->offsets_info()};
3616 json_of << "},\n";
3617 }
3618 data->DeleteInstructionZone();
3619 data->EndPhaseKind();
3620 }
3621
FinalizeCode(bool retire_broker)3622 MaybeHandle<Code> PipelineImpl::FinalizeCode(bool retire_broker) {
3623 PipelineData* data = this->data_;
3624 data->BeginPhaseKind("V8.TFFinalizeCode");
3625 if (data->broker() && retire_broker) {
3626 data->broker()->Retire();
3627 }
3628 Run<FinalizeCodePhase>();
3629
3630 MaybeHandle<Code> maybe_code = data->code();
3631 Handle<Code> code;
3632 if (!maybe_code.ToHandle(&code)) {
3633 return maybe_code;
3634 }
3635
3636 info()->SetCode(code);
3637 PrintCode(isolate(), code, info());
3638
3639 if (info()->trace_turbo_json()) {
3640 TurboJsonFile json_of(info(), std::ios_base::app);
3641
3642 json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
3643 << BlockStartsAsJSON{&data->code_generator()->block_starts()}
3644 << "\"data\":\"";
3645 #ifdef ENABLE_DISASSEMBLER
3646 std::stringstream disassembly_stream;
3647 code->Disassemble(nullptr, disassembly_stream, isolate());
3648 std::string disassembly_string(disassembly_stream.str());
3649 for (const auto& c : disassembly_string) {
3650 json_of << AsEscapedUC16ForJSON(c);
3651 }
3652 #endif // ENABLE_DISASSEMBLER
3653 json_of << "\"}\n],\n";
3654 json_of << "\"nodePositions\":";
3655 json_of << data->source_position_output() << ",\n";
3656 JsonPrintAllSourceWithPositions(json_of, data->info(), isolate());
3657 json_of << "\n}";
3658 }
3659 if (info()->trace_turbo_json() || info()->trace_turbo_graph()) {
3660 CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
3661 tracing_scope.stream()
3662 << "---------------------------------------------------\n"
3663 << "Finished compiling method " << info()->GetDebugName().get()
3664 << " using TurboFan" << std::endl;
3665 }
3666 data->EndPhaseKind();
3667 return code;
3668 }
3669
SelectInstructionsAndAssemble(CallDescriptor * call_descriptor)3670 bool PipelineImpl::SelectInstructionsAndAssemble(
3671 CallDescriptor* call_descriptor) {
3672 Linkage linkage(call_descriptor);
3673
3674 // Perform instruction selection and register allocation.
3675 if (!SelectInstructions(&linkage)) return false;
3676
3677 // Generate the final machine code.
3678 AssembleCode(&linkage);
3679 return true;
3680 }
3681
GenerateCode(CallDescriptor * call_descriptor)3682 MaybeHandle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
3683 if (!SelectInstructionsAndAssemble(call_descriptor)) {
3684 return MaybeHandle<Code>();
3685 }
3686 return FinalizeCode();
3687 }
3688
CommitDependencies(Handle<Code> code)3689 bool PipelineImpl::CommitDependencies(Handle<Code> code) {
3690 return data_->dependencies() == nullptr ||
3691 data_->dependencies()->Commit(code);
3692 }
3693
3694 namespace {
3695
TraceSequence(OptimizedCompilationInfo * info,PipelineData * data,const char * phase_name)3696 void TraceSequence(OptimizedCompilationInfo* info, PipelineData* data,
3697 const char* phase_name) {
3698 if (info->trace_turbo_json()) {
3699 UnparkedScopeIfNeeded scope(data->broker());
3700 AllowHandleDereference allow_deref;
3701 TurboJsonFile json_of(info, std::ios_base::app);
3702 json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"sequence\""
3703 << ",\"blocks\":" << InstructionSequenceAsJSON{data->sequence()}
3704 << ",\"register_allocation\":{"
3705 << RegisterAllocationDataAsJSON{*(data->register_allocation_data()),
3706 *(data->sequence())}
3707 << "}},\n";
3708 }
3709 if (info->trace_turbo_graph()) {
3710 UnparkedScopeIfNeeded scope(data->broker());
3711 AllowHandleDereference allow_deref;
3712 CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
3713 tracing_scope.stream() << "----- Instruction sequence " << phase_name
3714 << " -----\n"
3715 << *data->sequence();
3716 }
3717 }
3718
3719 } // namespace
3720
AllocateRegistersForTopTier(const RegisterConfiguration * config,CallDescriptor * call_descriptor,bool run_verifier)3721 void PipelineImpl::AllocateRegistersForTopTier(
3722 const RegisterConfiguration* config, CallDescriptor* call_descriptor,
3723 bool run_verifier) {
3724 PipelineData* data = this->data_;
3725 // Don't track usage for this zone in compiler stats.
3726 std::unique_ptr<Zone> verifier_zone;
3727 RegisterAllocatorVerifier* verifier = nullptr;
3728 if (run_verifier) {
3729 verifier_zone.reset(
3730 new Zone(data->allocator(), kRegisterAllocatorVerifierZoneName));
3731 verifier = verifier_zone->New<RegisterAllocatorVerifier>(
3732 verifier_zone.get(), config, data->sequence(), data->frame());
3733 }
3734
3735 #ifdef DEBUG
3736 data_->sequence()->ValidateEdgeSplitForm();
3737 data_->sequence()->ValidateDeferredBlockEntryPaths();
3738 data_->sequence()->ValidateDeferredBlockExitPaths();
3739 #endif
3740
3741 RegisterAllocationFlags flags;
3742 if (data->info()->trace_turbo_allocation()) {
3743 flags |= RegisterAllocationFlag::kTraceAllocation;
3744 }
3745 data->InitializeTopTierRegisterAllocationData(config, call_descriptor, flags);
3746
3747 Run<MeetRegisterConstraintsPhase>();
3748 Run<ResolvePhisPhase>();
3749 Run<BuildLiveRangesPhase>();
3750 Run<BuildBundlesPhase>();
3751
3752 TraceSequence(info(), data, "before register allocation");
3753 if (verifier != nullptr) {
3754 CHECK(!data->top_tier_register_allocation_data()
3755 ->ExistsUseWithoutDefinition());
3756 CHECK(data->top_tier_register_allocation_data()
3757 ->RangesDefinedInDeferredStayInDeferred());
3758 }
3759
3760 if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
3761 TurboCfgFile tcf(isolate());
3762 tcf << AsC1VRegisterAllocationData(
3763 "PreAllocation", data->top_tier_register_allocation_data());
3764 }
3765
3766 Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
3767
3768 if (data->sequence()->HasFPVirtualRegisters()) {
3769 Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
3770 }
3771
3772 if (data->sequence()->HasSimd128VirtualRegisters() &&
3773 (kFPAliasing == AliasingKind::kIndependent)) {
3774 Run<AllocateSimd128RegistersPhase<LinearScanAllocator>>();
3775 }
3776
3777 Run<DecideSpillingModePhase>();
3778 Run<AssignSpillSlotsPhase>();
3779 Run<CommitAssignmentPhase>();
3780
3781 // TODO(chromium:725559): remove this check once
3782 // we understand the cause of the bug. We keep just the
3783 // check at the end of the allocation.
3784 if (verifier != nullptr) {
3785 verifier->VerifyAssignment("Immediately after CommitAssignmentPhase.");
3786 }
3787
3788
3789 Run<ConnectRangesPhase>();
3790
3791 Run<ResolveControlFlowPhase>();
3792
3793 Run<PopulateReferenceMapsPhase>();
3794
3795 if (FLAG_turbo_move_optimization) {
3796 Run<OptimizeMovesPhase>();
3797 }
3798
3799 TraceSequence(info(), data, "after register allocation");
3800
3801 if (verifier != nullptr) {
3802 verifier->VerifyAssignment("End of regalloc pipeline.");
3803 verifier->VerifyGapMoves();
3804 }
3805
3806 if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
3807 TurboCfgFile tcf(isolate());
3808 tcf << AsC1VRegisterAllocationData(
3809 "CodeGen", data->top_tier_register_allocation_data());
3810 }
3811
3812 data->DeleteRegisterAllocationZone();
3813 }
3814
AllocateRegistersForMidTier(const RegisterConfiguration * config,CallDescriptor * call_descriptor,bool run_verifier)3815 void PipelineImpl::AllocateRegistersForMidTier(
3816 const RegisterConfiguration* config, CallDescriptor* call_descriptor,
3817 bool run_verifier) {
3818 PipelineData* data = data_;
3819 // Don't track usage for this zone in compiler stats.
3820 std::unique_ptr<Zone> verifier_zone;
3821 RegisterAllocatorVerifier* verifier = nullptr;
3822 if (run_verifier) {
3823 verifier_zone.reset(
3824 new Zone(data->allocator(), kRegisterAllocatorVerifierZoneName));
3825 verifier = verifier_zone->New<RegisterAllocatorVerifier>(
3826 verifier_zone.get(), config, data->sequence(), data->frame());
3827 }
3828
3829 #ifdef DEBUG
3830 data->sequence()->ValidateEdgeSplitForm();
3831 data->sequence()->ValidateDeferredBlockEntryPaths();
3832 data->sequence()->ValidateDeferredBlockExitPaths();
3833 #endif
3834 data->InitializeMidTierRegisterAllocationData(config, call_descriptor);
3835
3836 TraceSequence(info(), data, "before register allocation");
3837
3838 Run<MidTierRegisterOutputDefinitionPhase>();
3839
3840 Run<MidTierRegisterAllocatorPhase>();
3841
3842 Run<MidTierSpillSlotAllocatorPhase>();
3843
3844 Run<MidTierPopulateReferenceMapsPhase>();
3845
3846 TraceSequence(info(), data, "after register allocation");
3847
3848 if (verifier != nullptr) {
3849 verifier->VerifyAssignment("End of regalloc pipeline.");
3850 verifier->VerifyGapMoves();
3851 }
3852
3853 data->DeleteRegisterAllocationZone();
3854 }
3855
info() const3856 OptimizedCompilationInfo* PipelineImpl::info() const { return data_->info(); }
3857
isolate() const3858 Isolate* PipelineImpl::isolate() const { return data_->isolate(); }
3859
code_generator() const3860 CodeGenerator* PipelineImpl::code_generator() const {
3861 return data_->code_generator();
3862 }
3863
observe_node_manager() const3864 ObserveNodeManager* PipelineImpl::observe_node_manager() const {
3865 return data_->observe_node_manager();
3866 }
3867
3868 } // namespace compiler
3869 } // namespace internal
3870 } // namespace v8
3871