• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/pipeline.h"
6 
7 #include <fstream>  // NOLINT(readability/streams)
8 #include <iostream>
9 #include <memory>
10 #include <sstream>
11 
12 #include "src/assembler-inl.h"
13 #include "src/base/adapters.h"
14 #include "src/base/optional.h"
15 #include "src/base/platform/elapsed-timer.h"
16 #include "src/bootstrapper.h"
17 #include "src/code-tracer.h"
18 #include "src/compiler.h"
19 #include "src/compiler/basic-block-instrumentor.h"
20 #include "src/compiler/branch-elimination.h"
21 #include "src/compiler/bytecode-graph-builder.h"
22 #include "src/compiler/checkpoint-elimination.h"
23 #include "src/compiler/code-generator.h"
24 #include "src/compiler/common-operator-reducer.h"
25 #include "src/compiler/compilation-dependencies.h"
26 #include "src/compiler/compiler-source-position-table.h"
27 #include "src/compiler/constant-folding-reducer.h"
28 #include "src/compiler/control-flow-optimizer.h"
29 #include "src/compiler/dead-code-elimination.h"
30 #include "src/compiler/effect-control-linearizer.h"
31 #include "src/compiler/escape-analysis-reducer.h"
32 #include "src/compiler/escape-analysis.h"
33 #include "src/compiler/frame-elider.h"
34 #include "src/compiler/graph-trimmer.h"
35 #include "src/compiler/graph-visualizer.h"
36 #include "src/compiler/instruction-selector.h"
37 #include "src/compiler/instruction.h"
38 #include "src/compiler/js-call-reducer.h"
39 #include "src/compiler/js-context-specialization.h"
40 #include "src/compiler/js-create-lowering.h"
41 #include "src/compiler/js-generic-lowering.h"
42 #include "src/compiler/js-heap-broker.h"
43 #include "src/compiler/js-heap-copy-reducer.h"
44 #include "src/compiler/js-inlining-heuristic.h"
45 #include "src/compiler/js-intrinsic-lowering.h"
46 #include "src/compiler/js-native-context-specialization.h"
47 #include "src/compiler/js-typed-lowering.h"
48 #include "src/compiler/jump-threading.h"
49 #include "src/compiler/live-range-separator.h"
50 #include "src/compiler/load-elimination.h"
51 #include "src/compiler/loop-analysis.h"
52 #include "src/compiler/loop-peeling.h"
53 #include "src/compiler/loop-variable-optimizer.h"
54 #include "src/compiler/machine-graph-verifier.h"
55 #include "src/compiler/machine-operator-reducer.h"
56 #include "src/compiler/memory-optimizer.h"
57 #include "src/compiler/move-optimizer.h"
58 #include "src/compiler/node-origin-table.h"
59 #include "src/compiler/osr.h"
60 #include "src/compiler/pipeline-statistics.h"
61 #include "src/compiler/redundancy-elimination.h"
62 #include "src/compiler/register-allocator-verifier.h"
63 #include "src/compiler/register-allocator.h"
64 #include "src/compiler/schedule.h"
65 #include "src/compiler/scheduler.h"
66 #include "src/compiler/select-lowering.h"
67 #include "src/compiler/simplified-lowering.h"
68 #include "src/compiler/simplified-operator-reducer.h"
69 #include "src/compiler/simplified-operator.h"
70 #include "src/compiler/store-store-elimination.h"
71 #include "src/compiler/type-narrowing-reducer.h"
72 #include "src/compiler/typed-optimization.h"
73 #include "src/compiler/typer.h"
74 #include "src/compiler/value-numbering-reducer.h"
75 #include "src/compiler/verifier.h"
76 #include "src/compiler/wasm-compiler.h"
77 #include "src/compiler/zone-stats.h"
78 #include "src/disassembler.h"
79 #include "src/isolate-inl.h"
80 #include "src/objects/shared-function-info.h"
81 #include "src/optimized-compilation-info.h"
82 #include "src/ostreams.h"
83 #include "src/parsing/parse-info.h"
84 #include "src/register-configuration.h"
85 #include "src/utils.h"
86 #include "src/wasm/function-body-decoder.h"
87 #include "src/wasm/wasm-engine.h"
88 
89 namespace v8 {
90 namespace internal {
91 
92 namespace trap_handler {
93 struct ProtectedInstructionData;
94 }  // namespace trap_handler
95 
96 namespace compiler {
97 
98 // Turbofan can only handle 2^16 control inputs. Since each control flow split
99 // requires at least two bytes (jump and offset), we limit the bytecode size
100 // to 128K bytes.
101 const int kMaxBytecodeSizeForTurbofan = 128 * 1024;
102 
103 class PipelineData {
104  public:
105   // For main entry point.
PipelineData(ZoneStats * zone_stats,Isolate * isolate,OptimizedCompilationInfo * info,PipelineStatistics * pipeline_statistics)106   PipelineData(ZoneStats* zone_stats, Isolate* isolate,
107                OptimizedCompilationInfo* info,
108                PipelineStatistics* pipeline_statistics)
109       : isolate_(isolate),
110         allocator_(isolate->allocator()),
111         info_(info),
112         debug_name_(info_->GetDebugName()),
113         may_have_unverifiable_graph_(false),
114         zone_stats_(zone_stats),
115         pipeline_statistics_(pipeline_statistics),
116         graph_zone_scope_(zone_stats_, ZONE_NAME),
117         graph_zone_(graph_zone_scope_.zone()),
118         instruction_zone_scope_(zone_stats_, ZONE_NAME),
119         instruction_zone_(instruction_zone_scope_.zone()),
120         codegen_zone_scope_(zone_stats_, ZONE_NAME),
121         codegen_zone_(codegen_zone_scope_.zone()),
122         register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
123         register_allocation_zone_(register_allocation_zone_scope_.zone()),
124         assembler_options_(AssemblerOptions::Default(isolate)) {
125     PhaseScope scope(pipeline_statistics, "init pipeline data");
126     graph_ = new (graph_zone_) Graph(graph_zone_);
127     source_positions_ = new (graph_zone_) SourcePositionTable(graph_);
128     node_origins_ = info->trace_turbo_json_enabled()
129                         ? new (graph_zone_) NodeOriginTable(graph_)
130                         : nullptr;
131     simplified_ = new (graph_zone_) SimplifiedOperatorBuilder(graph_zone_);
132     machine_ = new (graph_zone_) MachineOperatorBuilder(
133         graph_zone_, MachineType::PointerRepresentation(),
134         InstructionSelector::SupportedMachineOperatorFlags(),
135         InstructionSelector::AlignmentRequirements());
136     common_ = new (graph_zone_) CommonOperatorBuilder(graph_zone_);
137     javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
138     jsgraph_ = new (graph_zone_)
139         JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
140     js_heap_broker_ = new (codegen_zone_) JSHeapBroker(isolate_, codegen_zone_);
141     dependencies_ =
142         new (codegen_zone_) CompilationDependencies(isolate_, codegen_zone_);
143   }
144 
145   // For WebAssembly compile entry point.
PipelineData(ZoneStats * zone_stats,wasm::WasmEngine * wasm_engine,OptimizedCompilationInfo * info,MachineGraph * mcgraph,PipelineStatistics * pipeline_statistics,SourcePositionTable * source_positions,NodeOriginTable * node_origins,int wasm_function_index,const AssemblerOptions & assembler_options)146   PipelineData(ZoneStats* zone_stats, wasm::WasmEngine* wasm_engine,
147                OptimizedCompilationInfo* info, MachineGraph* mcgraph,
148                PipelineStatistics* pipeline_statistics,
149                SourcePositionTable* source_positions,
150                NodeOriginTable* node_origins,
151                int wasm_function_index,
152                const AssemblerOptions& assembler_options)
153       : isolate_(nullptr),
154         wasm_engine_(wasm_engine),
155         allocator_(wasm_engine->allocator()),
156         info_(info),
157         debug_name_(info_->GetDebugName()),
158         wasm_function_index_(wasm_function_index),
159         zone_stats_(zone_stats),
160         pipeline_statistics_(pipeline_statistics),
161         graph_zone_scope_(zone_stats_, ZONE_NAME),
162         graph_zone_(graph_zone_scope_.zone()),
163         graph_(mcgraph->graph()),
164         source_positions_(source_positions),
165         node_origins_(node_origins),
166         machine_(mcgraph->machine()),
167         common_(mcgraph->common()),
168         mcgraph_(mcgraph),
169         instruction_zone_scope_(zone_stats_, ZONE_NAME),
170         instruction_zone_(instruction_zone_scope_.zone()),
171         codegen_zone_scope_(zone_stats_, ZONE_NAME),
172         codegen_zone_(codegen_zone_scope_.zone()),
173         register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
174         register_allocation_zone_(register_allocation_zone_scope_.zone()),
175         assembler_options_(assembler_options) {}
176 
177   // For machine graph testing entry point.
PipelineData(ZoneStats * zone_stats,OptimizedCompilationInfo * info,Isolate * isolate,Graph * graph,Schedule * schedule,SourcePositionTable * source_positions,NodeOriginTable * node_origins,JumpOptimizationInfo * jump_opt,const AssemblerOptions & assembler_options)178   PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
179                Isolate* isolate, Graph* graph, Schedule* schedule,
180                SourcePositionTable* source_positions,
181                NodeOriginTable* node_origins, JumpOptimizationInfo* jump_opt,
182                const AssemblerOptions& assembler_options)
183       : isolate_(isolate),
184         allocator_(isolate->allocator()),
185         info_(info),
186         debug_name_(info_->GetDebugName()),
187         zone_stats_(zone_stats),
188         graph_zone_scope_(zone_stats_, ZONE_NAME),
189         graph_(graph),
190         source_positions_(source_positions),
191         node_origins_(node_origins),
192         schedule_(schedule),
193         instruction_zone_scope_(zone_stats_, ZONE_NAME),
194         instruction_zone_(instruction_zone_scope_.zone()),
195         codegen_zone_scope_(zone_stats_, ZONE_NAME),
196         codegen_zone_(codegen_zone_scope_.zone()),
197         register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
198         register_allocation_zone_(register_allocation_zone_scope_.zone()),
199         jump_optimization_info_(jump_opt),
200         assembler_options_(assembler_options) {}
201 
202   // For register allocation testing entry point.
PipelineData(ZoneStats * zone_stats,OptimizedCompilationInfo * info,Isolate * isolate,InstructionSequence * sequence)203   PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
204                Isolate* isolate, InstructionSequence* sequence)
205       : isolate_(isolate),
206         allocator_(isolate->allocator()),
207         info_(info),
208         debug_name_(info_->GetDebugName()),
209         zone_stats_(zone_stats),
210         graph_zone_scope_(zone_stats_, ZONE_NAME),
211         instruction_zone_scope_(zone_stats_, ZONE_NAME),
212         instruction_zone_(sequence->zone()),
213         sequence_(sequence),
214         codegen_zone_scope_(zone_stats_, ZONE_NAME),
215         codegen_zone_(codegen_zone_scope_.zone()),
216         register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
217         register_allocation_zone_(register_allocation_zone_scope_.zone()),
218         assembler_options_(AssemblerOptions::Default(isolate)) {}
219 
~PipelineData()220   ~PipelineData() {
221     delete code_generator_;  // Must happen before zones are destroyed.
222     code_generator_ = nullptr;
223     DeleteRegisterAllocationZone();
224     DeleteInstructionZone();
225     DeleteCodegenZone();
226     DeleteGraphZone();
227   }
228 
isolate() const229   Isolate* isolate() const { return isolate_; }
allocator() const230   AccountingAllocator* allocator() const { return allocator_; }
info() const231   OptimizedCompilationInfo* info() const { return info_; }
zone_stats() const232   ZoneStats* zone_stats() const { return zone_stats_; }
dependencies() const233   CompilationDependencies* dependencies() const { return dependencies_; }
pipeline_statistics()234   PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
osr_helper()235   OsrHelper* osr_helper() { return &(*osr_helper_); }
compilation_failed() const236   bool compilation_failed() const { return compilation_failed_; }
set_compilation_failed()237   void set_compilation_failed() { compilation_failed_ = true; }
238 
verify_graph() const239   bool verify_graph() const { return verify_graph_; }
set_verify_graph(bool value)240   void set_verify_graph(bool value) { verify_graph_ = value; }
241 
code()242   MaybeHandle<Code> code() { return code_; }
set_code(MaybeHandle<Code> code)243   void set_code(MaybeHandle<Code> code) {
244     DCHECK(code_.is_null());
245     code_ = code;
246   }
247 
code_generator() const248   CodeGenerator* code_generator() const { return code_generator_; }
249 
250   // RawMachineAssembler generally produces graphs which cannot be verified.
MayHaveUnverifiableGraph() const251   bool MayHaveUnverifiableGraph() const { return may_have_unverifiable_graph_; }
252 
graph_zone() const253   Zone* graph_zone() const { return graph_zone_; }
graph() const254   Graph* graph() const { return graph_; }
source_positions() const255   SourcePositionTable* source_positions() const { return source_positions_; }
node_origins() const256   NodeOriginTable* node_origins() const { return node_origins_; }
machine() const257   MachineOperatorBuilder* machine() const { return machine_; }
common() const258   CommonOperatorBuilder* common() const { return common_; }
javascript() const259   JSOperatorBuilder* javascript() const { return javascript_; }
jsgraph() const260   JSGraph* jsgraph() const { return jsgraph_; }
mcgraph() const261   MachineGraph* mcgraph() const { return mcgraph_; }
native_context() const262   Handle<Context> native_context() const {
263     return handle(info()->native_context(), isolate());
264   }
global_object() const265   Handle<JSGlobalObject> global_object() const {
266     return handle(info()->global_object(), isolate());
267   }
268 
js_heap_broker() const269   JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
270 
schedule() const271   Schedule* schedule() const { return schedule_; }
set_schedule(Schedule * schedule)272   void set_schedule(Schedule* schedule) {
273     DCHECK(!schedule_);
274     schedule_ = schedule;
275   }
reset_schedule()276   void reset_schedule() { schedule_ = nullptr; }
277 
instruction_zone() const278   Zone* instruction_zone() const { return instruction_zone_; }
codegen_zone() const279   Zone* codegen_zone() const { return codegen_zone_; }
sequence() const280   InstructionSequence* sequence() const { return sequence_; }
frame() const281   Frame* frame() const { return frame_; }
282 
register_allocation_zone() const283   Zone* register_allocation_zone() const { return register_allocation_zone_; }
register_allocation_data() const284   RegisterAllocationData* register_allocation_data() const {
285     return register_allocation_data_;
286   }
287 
profiler_data() const288   BasicBlockProfiler::Data* profiler_data() const { return profiler_data_; }
set_profiler_data(BasicBlockProfiler::Data * profiler_data)289   void set_profiler_data(BasicBlockProfiler::Data* profiler_data) {
290     profiler_data_ = profiler_data;
291   }
292 
source_position_output() const293   std::string const& source_position_output() const {
294     return source_position_output_;
295   }
set_source_position_output(std::string const & source_position_output)296   void set_source_position_output(std::string const& source_position_output) {
297     source_position_output_ = source_position_output;
298   }
299 
jump_optimization_info() const300   JumpOptimizationInfo* jump_optimization_info() const {
301     return jump_optimization_info_;
302   }
303 
assembler_options() const304   const AssemblerOptions& assembler_options() const {
305     return assembler_options_;
306   }
307 
GetCodeTracer() const308   CodeTracer* GetCodeTracer() const {
309     return wasm_engine_ == nullptr ? isolate_->GetCodeTracer()
310                                    : wasm_engine_->GetCodeTracer();
311   }
312 
DeleteGraphZone()313   void DeleteGraphZone() {
314     if (graph_zone_ == nullptr) return;
315     graph_zone_scope_.Destroy();
316     graph_zone_ = nullptr;
317     graph_ = nullptr;
318     source_positions_ = nullptr;
319     node_origins_ = nullptr;
320     simplified_ = nullptr;
321     machine_ = nullptr;
322     common_ = nullptr;
323     javascript_ = nullptr;
324     jsgraph_ = nullptr;
325     mcgraph_ = nullptr;
326     schedule_ = nullptr;
327   }
328 
DeleteInstructionZone()329   void DeleteInstructionZone() {
330     if (instruction_zone_ == nullptr) return;
331     instruction_zone_scope_.Destroy();
332     instruction_zone_ = nullptr;
333     sequence_ = nullptr;
334   }
335 
DeleteCodegenZone()336   void DeleteCodegenZone() {
337     if (codegen_zone_ == nullptr) return;
338     codegen_zone_scope_.Destroy();
339     codegen_zone_ = nullptr;
340     dependencies_ = nullptr;
341     js_heap_broker_ = nullptr;
342     frame_ = nullptr;
343   }
344 
DeleteRegisterAllocationZone()345   void DeleteRegisterAllocationZone() {
346     if (register_allocation_zone_ == nullptr) return;
347     register_allocation_zone_scope_.Destroy();
348     register_allocation_zone_ = nullptr;
349     register_allocation_data_ = nullptr;
350   }
351 
InitializeInstructionSequence(const CallDescriptor * call_descriptor)352   void InitializeInstructionSequence(const CallDescriptor* call_descriptor) {
353     DCHECK_NULL(sequence_);
354     InstructionBlocks* instruction_blocks =
355         InstructionSequence::InstructionBlocksFor(instruction_zone(),
356                                                   schedule());
357     sequence_ = new (instruction_zone())
358         InstructionSequence(isolate(), instruction_zone(), instruction_blocks);
359     if (call_descriptor && call_descriptor->RequiresFrameAsIncoming()) {
360       sequence_->instruction_blocks()[0]->mark_needs_frame();
361     } else {
362       DCHECK_EQ(0u, call_descriptor->CalleeSavedFPRegisters());
363       DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters());
364     }
365   }
366 
InitializeFrameData(CallDescriptor * call_descriptor)367   void InitializeFrameData(CallDescriptor* call_descriptor) {
368     DCHECK_NULL(frame_);
369     int fixed_frame_size = 0;
370     if (call_descriptor != nullptr) {
371       fixed_frame_size = call_descriptor->CalculateFixedFrameSize();
372     }
373     frame_ = new (codegen_zone()) Frame(fixed_frame_size);
374   }
375 
InitializeRegisterAllocationData(const RegisterConfiguration * config,CallDescriptor * call_descriptor)376   void InitializeRegisterAllocationData(const RegisterConfiguration* config,
377                                         CallDescriptor* call_descriptor) {
378     DCHECK_NULL(register_allocation_data_);
379     register_allocation_data_ = new (register_allocation_zone())
380         RegisterAllocationData(config, register_allocation_zone(), frame(),
381                                sequence(), debug_name());
382   }
383 
InitializeOsrHelper()384   void InitializeOsrHelper() {
385     DCHECK(!osr_helper_.has_value());
386     osr_helper_.emplace(info());
387   }
388 
set_start_source_position(int position)389   void set_start_source_position(int position) {
390     DCHECK_EQ(start_source_position_, kNoSourcePosition);
391     start_source_position_ = position;
392   }
393 
InitializeCodeGenerator(Linkage * linkage)394   void InitializeCodeGenerator(Linkage* linkage) {
395     DCHECK_NULL(code_generator_);
396 
397     code_generator_ = new CodeGenerator(
398         codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
399         osr_helper_, start_source_position_, jump_optimization_info_,
400         info()->GetPoisoningMitigationLevel(), assembler_options_,
401         info_->builtin_index());
402   }
403 
BeginPhaseKind(const char * phase_kind_name)404   void BeginPhaseKind(const char* phase_kind_name) {
405     if (pipeline_statistics() != nullptr) {
406       pipeline_statistics()->BeginPhaseKind(phase_kind_name);
407     }
408   }
409 
EndPhaseKind()410   void EndPhaseKind() {
411     if (pipeline_statistics() != nullptr) {
412       pipeline_statistics()->EndPhaseKind();
413     }
414   }
415 
debug_name() const416   const char* debug_name() const { return debug_name_.get(); }
417 
wasm_function_index() const418   int wasm_function_index() const { return wasm_function_index_; }
419 
420  private:
421   Isolate* const isolate_;
422   wasm::WasmEngine* const wasm_engine_ = nullptr;
423   AccountingAllocator* const allocator_;
424   OptimizedCompilationInfo* const info_;
425   std::unique_ptr<char[]> debug_name_;
426   int wasm_function_index_ = -1;
427   bool may_have_unverifiable_graph_ = true;
428   ZoneStats* const zone_stats_;
429   PipelineStatistics* pipeline_statistics_ = nullptr;
430   bool compilation_failed_ = false;
431   bool verify_graph_ = false;
432   int start_source_position_ = kNoSourcePosition;
433   base::Optional<OsrHelper> osr_helper_;
434   MaybeHandle<Code> code_;
435   CodeGenerator* code_generator_ = nullptr;
436 
437   // All objects in the following group of fields are allocated in graph_zone_.
438   // They are all set to nullptr when the graph_zone_ is destroyed.
439   ZoneStats::Scope graph_zone_scope_;
440   Zone* graph_zone_ = nullptr;
441   Graph* graph_ = nullptr;
442   SourcePositionTable* source_positions_ = nullptr;
443   NodeOriginTable* node_origins_ = nullptr;
444   SimplifiedOperatorBuilder* simplified_ = nullptr;
445   MachineOperatorBuilder* machine_ = nullptr;
446   CommonOperatorBuilder* common_ = nullptr;
447   JSOperatorBuilder* javascript_ = nullptr;
448   JSGraph* jsgraph_ = nullptr;
449   MachineGraph* mcgraph_ = nullptr;
450   Schedule* schedule_ = nullptr;
451 
452   // All objects in the following group of fields are allocated in
453   // instruction_zone_. They are all set to nullptr when the instruction_zone_
454   // is destroyed.
455   ZoneStats::Scope instruction_zone_scope_;
456   Zone* instruction_zone_;
457   InstructionSequence* sequence_ = nullptr;
458 
459   // All objects in the following group of fields are allocated in
460   // codegen_zone_. They are all set to nullptr when the codegen_zone_
461   // is destroyed.
462   ZoneStats::Scope codegen_zone_scope_;
463   Zone* codegen_zone_;
464   CompilationDependencies* dependencies_ = nullptr;
465   JSHeapBroker* js_heap_broker_ = nullptr;
466   Frame* frame_ = nullptr;
467 
468   // All objects in the following group of fields are allocated in
469   // register_allocation_zone_. They are all set to nullptr when the zone is
470   // destroyed.
471   ZoneStats::Scope register_allocation_zone_scope_;
472   Zone* register_allocation_zone_;
473   RegisterAllocationData* register_allocation_data_ = nullptr;
474 
475   // Basic block profiling support.
476   BasicBlockProfiler::Data* profiler_data_ = nullptr;
477 
478   // Source position output for --trace-turbo.
479   std::string source_position_output_;
480 
481   JumpOptimizationInfo* jump_optimization_info_ = nullptr;
482   AssemblerOptions assembler_options_;
483 
484   DISALLOW_COPY_AND_ASSIGN(PipelineData);
485 };
486 
487 class PipelineImpl final {
488  public:
PipelineImpl(PipelineData * data)489   explicit PipelineImpl(PipelineData* data) : data_(data) {}
490 
491   // Helpers for executing pipeline phases.
492   template <typename Phase>
493   void Run();
494   template <typename Phase, typename Arg0>
495   void Run(Arg0 arg_0);
496   template <typename Phase, typename Arg0, typename Arg1>
497   void Run(Arg0 arg_0, Arg1 arg_1);
498 
499   // Step A. Run the graph creation and initial optimization passes.
500   bool CreateGraph();
501 
502   // B. Run the concurrent optimization passes.
503   bool OptimizeGraph(Linkage* linkage);
504 
505   // Substep B.1. Produce a scheduled graph.
506   void ComputeScheduledGraph();
507 
508   // Substep B.2. Select instructions from a scheduled graph.
509   bool SelectInstructions(Linkage* linkage);
510 
511   // Step C. Run the code assembly pass.
512   void AssembleCode(Linkage* linkage);
513 
514   // Step D. Run the code finalization pass.
515   MaybeHandle<Code> FinalizeCode();
516 
517   // Step E. Install any code dependencies.
518   bool CommitDependencies(Handle<Code> code);
519 
520   void VerifyGeneratedCodeIsIdempotent();
521   void RunPrintAndVerify(const char* phase, bool untyped = false);
522   MaybeHandle<Code> GenerateCode(CallDescriptor* call_descriptor);
523   void AllocateRegisters(const RegisterConfiguration* config,
524                          CallDescriptor* call_descriptor, bool run_verifier);
525 
526   OptimizedCompilationInfo* info() const;
527   Isolate* isolate() const;
528   CodeGenerator* code_generator() const;
529 
530  private:
531   PipelineData* const data_;
532 };
533 
534 namespace {
535 
PrintFunctionSource(OptimizedCompilationInfo * info,Isolate * isolate,int source_id,Handle<SharedFunctionInfo> shared)536 void PrintFunctionSource(OptimizedCompilationInfo* info, Isolate* isolate,
537                          int source_id, Handle<SharedFunctionInfo> shared) {
538   if (!shared->script()->IsUndefined(isolate)) {
539     Handle<Script> script(Script::cast(shared->script()), isolate);
540 
541     if (!script->source()->IsUndefined(isolate)) {
542       CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
543       Object* source_name = script->name();
544       OFStream os(tracing_scope.file());
545       os << "--- FUNCTION SOURCE (";
546       if (source_name->IsString()) {
547         os << String::cast(source_name)->ToCString().get() << ":";
548       }
549       os << shared->DebugName()->ToCString().get() << ") id{";
550       os << info->optimization_id() << "," << source_id << "} start{";
551       os << shared->StartPosition() << "} ---\n";
552       {
553         DisallowHeapAllocation no_allocation;
554         int start = shared->StartPosition();
555         int len = shared->EndPosition() - start;
556         String::SubStringRange source(String::cast(script->source()), start,
557                                       len);
558         for (const auto& c : source) {
559           os << AsReversiblyEscapedUC16(c);
560         }
561       }
562 
563       os << "\n--- END ---\n";
564     }
565   }
566 }
567 
568 // Print information for the given inlining: which function was inlined and
569 // where the inlining occurred.
PrintInlinedFunctionInfo(OptimizedCompilationInfo * info,Isolate * isolate,int source_id,int inlining_id,const OptimizedCompilationInfo::InlinedFunctionHolder & h)570 void PrintInlinedFunctionInfo(
571     OptimizedCompilationInfo* info, Isolate* isolate, int source_id,
572     int inlining_id, const OptimizedCompilationInfo::InlinedFunctionHolder& h) {
573   CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
574   OFStream os(tracing_scope.file());
575   os << "INLINE (" << h.shared_info->DebugName()->ToCString().get() << ") id{"
576      << info->optimization_id() << "," << source_id << "} AS " << inlining_id
577      << " AT ";
578   const SourcePosition position = h.position.position;
579   if (position.IsKnown()) {
580     os << "<" << position.InliningId() << ":" << position.ScriptOffset() << ">";
581   } else {
582     os << "<?>";
583   }
584   os << std::endl;
585 }
586 
587 // Print the source of all functions that participated in this optimizing
588 // compilation. For inlined functions print source position of their inlining.
PrintParticipatingSource(OptimizedCompilationInfo * info,Isolate * isolate)589 void PrintParticipatingSource(OptimizedCompilationInfo* info,
590                               Isolate* isolate) {
591   AllowDeferredHandleDereference allow_deference_for_print_code;
592 
593   SourceIdAssigner id_assigner(info->inlined_functions().size());
594   PrintFunctionSource(info, isolate, -1, info->shared_info());
595   const auto& inlined = info->inlined_functions();
596   for (unsigned id = 0; id < inlined.size(); id++) {
597     const int source_id = id_assigner.GetIdFor(inlined[id].shared_info);
598     PrintFunctionSource(info, isolate, source_id, inlined[id].shared_info);
599     PrintInlinedFunctionInfo(info, isolate, source_id, id, inlined[id]);
600   }
601 }
602 
603 // Print the code after compiling it.
PrintCode(Isolate * isolate,Handle<Code> code,OptimizedCompilationInfo * info)604 void PrintCode(Isolate* isolate, Handle<Code> code,
605                OptimizedCompilationInfo* info) {
606   if (FLAG_print_opt_source && info->IsOptimizing()) {
607     PrintParticipatingSource(info, isolate);
608   }
609 
610 #ifdef ENABLE_DISASSEMBLER
611   AllowDeferredHandleDereference allow_deference_for_print_code;
612   bool print_code =
613       isolate->bootstrapper()->IsActive()
614           ? FLAG_print_builtin_code && info->shared_info()->PassesFilter(
615                                            FLAG_print_builtin_code_filter)
616           : (FLAG_print_code || (info->IsStub() && FLAG_print_code_stubs) ||
617              (info->IsOptimizing() && FLAG_print_opt_code &&
618               info->shared_info()->PassesFilter(FLAG_print_opt_code_filter)));
619   if (print_code) {
620     std::unique_ptr<char[]> debug_name = info->GetDebugName();
621     CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
622     OFStream os(tracing_scope.file());
623 
624     // Print the source code if available.
625     bool print_source = code->kind() == Code::OPTIMIZED_FUNCTION;
626     if (print_source) {
627       Handle<SharedFunctionInfo> shared = info->shared_info();
628       if (shared->script()->IsScript() &&
629           !Script::cast(shared->script())->source()->IsUndefined(isolate)) {
630         os << "--- Raw source ---\n";
631         StringCharacterStream stream(
632             String::cast(Script::cast(shared->script())->source()),
633             shared->StartPosition());
634         // fun->end_position() points to the last character in the stream. We
635         // need to compensate by adding one to calculate the length.
636         int source_len = shared->EndPosition() - shared->StartPosition() + 1;
637         for (int i = 0; i < source_len; i++) {
638           if (stream.HasMore()) {
639             os << AsReversiblyEscapedUC16(stream.GetNext());
640           }
641         }
642         os << "\n\n";
643       }
644     }
645     if (info->IsOptimizing()) {
646       os << "--- Optimized code ---\n"
647          << "optimization_id = " << info->optimization_id() << "\n";
648     } else {
649       os << "--- Code ---\n";
650     }
651     if (print_source) {
652       Handle<SharedFunctionInfo> shared = info->shared_info();
653       os << "source_position = " << shared->StartPosition() << "\n";
654     }
655     code->Disassemble(debug_name.get(), os);
656     os << "--- End code ---\n";
657   }
658 #endif  // ENABLE_DISASSEMBLER
659 }
660 
661 struct TurboCfgFile : public std::ofstream {
TurboCfgFilev8::internal::compiler::__anondb3dbd220111::TurboCfgFile662   explicit TurboCfgFile(Isolate* isolate)
663       : std::ofstream(isolate->GetTurboCfgFileName().c_str(),
664                       std::ios_base::app) {}
665 };
666 
TraceSchedule(OptimizedCompilationInfo * info,PipelineData * data,Schedule * schedule,const char * phase_name)667 void TraceSchedule(OptimizedCompilationInfo* info, PipelineData* data,
668                    Schedule* schedule, const char* phase_name) {
669   if (info->trace_turbo_json_enabled()) {
670     AllowHandleDereference allow_deref;
671     TurboJsonFile json_of(info, std::ios_base::app);
672     json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"schedule\""
673             << ",\"data\":\"";
674     std::stringstream schedule_stream;
675     schedule_stream << *schedule;
676     std::string schedule_string(schedule_stream.str());
677     for (const auto& c : schedule_string) {
678       json_of << AsEscapedUC16ForJSON(c);
679     }
680     json_of << "\"},\n";
681   }
682   if (info->trace_turbo_graph_enabled() || FLAG_trace_turbo_scheduler) {
683     AllowHandleDereference allow_deref;
684     CodeTracer::Scope tracing_scope(data->GetCodeTracer());
685     OFStream os(tracing_scope.file());
686     os << "-- Schedule --------------------------------------\n" << *schedule;
687   }
688 }
689 
690 
691 class SourcePositionWrapper final : public Reducer {
692  public:
SourcePositionWrapper(Reducer * reducer,SourcePositionTable * table)693   SourcePositionWrapper(Reducer* reducer, SourcePositionTable* table)
694       : reducer_(reducer), table_(table) {}
~SourcePositionWrapper()695   ~SourcePositionWrapper() final {}
696 
reducer_name() const697   const char* reducer_name() const override { return reducer_->reducer_name(); }
698 
Reduce(Node * node)699   Reduction Reduce(Node* node) final {
700     SourcePosition const pos = table_->GetSourcePosition(node);
701     SourcePositionTable::Scope position(table_, pos);
702     return reducer_->Reduce(node);
703   }
704 
Finalize()705   void Finalize() final { reducer_->Finalize(); }
706 
707  private:
708   Reducer* const reducer_;
709   SourcePositionTable* const table_;
710 
711   DISALLOW_COPY_AND_ASSIGN(SourcePositionWrapper);
712 };
713 
714 class NodeOriginsWrapper final : public Reducer {
715  public:
NodeOriginsWrapper(Reducer * reducer,NodeOriginTable * table)716   NodeOriginsWrapper(Reducer* reducer, NodeOriginTable* table)
717       : reducer_(reducer), table_(table) {}
~NodeOriginsWrapper()718   ~NodeOriginsWrapper() final {}
719 
reducer_name() const720   const char* reducer_name() const override { return reducer_->reducer_name(); }
721 
Reduce(Node * node)722   Reduction Reduce(Node* node) final {
723     NodeOriginTable::Scope position(table_, reducer_name(), node);
724     return reducer_->Reduce(node);
725   }
726 
Finalize()727   void Finalize() final { reducer_->Finalize(); }
728 
729  private:
730   Reducer* const reducer_;
731   NodeOriginTable* const table_;
732 
733   DISALLOW_COPY_AND_ASSIGN(NodeOriginsWrapper);
734 };
735 
AddReducer(PipelineData * data,GraphReducer * graph_reducer,Reducer * reducer)736 void AddReducer(PipelineData* data, GraphReducer* graph_reducer,
737                 Reducer* reducer) {
738   if (data->info()->is_source_positions_enabled()) {
739     void* const buffer = data->graph_zone()->New(sizeof(SourcePositionWrapper));
740     SourcePositionWrapper* const wrapper =
741         new (buffer) SourcePositionWrapper(reducer, data->source_positions());
742     reducer = wrapper;
743   }
744   if (data->info()->trace_turbo_json_enabled()) {
745     void* const buffer = data->graph_zone()->New(sizeof(NodeOriginsWrapper));
746     NodeOriginsWrapper* const wrapper =
747         new (buffer) NodeOriginsWrapper(reducer, data->node_origins());
748     reducer = wrapper;
749   }
750 
751   graph_reducer->AddReducer(reducer);
752 }
753 
754 class PipelineRunScope {
755  public:
PipelineRunScope(PipelineData * data,const char * phase_name)756   PipelineRunScope(PipelineData* data, const char* phase_name)
757       : phase_scope_(
758             phase_name == nullptr ? nullptr : data->pipeline_statistics(),
759             phase_name),
760         zone_scope_(data->zone_stats(), ZONE_NAME),
761         origin_scope_(data->node_origins(), phase_name) {}
762 
zone()763   Zone* zone() { return zone_scope_.zone(); }
764 
765  private:
766   PhaseScope phase_scope_;
767   ZoneStats::Scope zone_scope_;
768   NodeOriginTable::PhaseScope origin_scope_;
769 };
770 
CreatePipelineStatistics(Handle<Script> script,OptimizedCompilationInfo * info,Isolate * isolate,ZoneStats * zone_stats)771 PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
772                                              OptimizedCompilationInfo* info,
773                                              Isolate* isolate,
774                                              ZoneStats* zone_stats) {
775   PipelineStatistics* pipeline_statistics = nullptr;
776 
777   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
778     pipeline_statistics =
779         new PipelineStatistics(info, isolate->GetTurboStatistics(), zone_stats);
780     pipeline_statistics->BeginPhaseKind("initializing");
781   }
782 
783   if (info->trace_turbo_json_enabled()) {
784     TurboJsonFile json_of(info, std::ios_base::trunc);
785     json_of << "{\"function\" : ";
786     JsonPrintFunctionSource(json_of, -1, info->GetDebugName(), script, isolate,
787                             info->shared_info());
788     json_of << ",\n\"phases\":[";
789   }
790 
791   return pipeline_statistics;
792 }
793 
CreatePipelineStatistics(wasm::WasmEngine * wasm_engine,wasm::FunctionBody function_body,wasm::WasmModule * wasm_module,OptimizedCompilationInfo * info,ZoneStats * zone_stats)794 PipelineStatistics* CreatePipelineStatistics(wasm::WasmEngine* wasm_engine,
795                                              wasm::FunctionBody function_body,
796                                              wasm::WasmModule* wasm_module,
797                                              OptimizedCompilationInfo* info,
798                                              ZoneStats* zone_stats) {
799   PipelineStatistics* pipeline_statistics = nullptr;
800 
801   if (FLAG_turbo_stats_wasm) {
802     pipeline_statistics = new PipelineStatistics(
803         info, wasm_engine->GetOrCreateTurboStatistics(), zone_stats);
804     pipeline_statistics->BeginPhaseKind("initializing");
805   }
806 
807   if (info->trace_turbo_json_enabled()) {
808     TurboJsonFile json_of(info, std::ios_base::trunc);
809     std::unique_ptr<char[]> function_name = info->GetDebugName();
810     json_of << "{\"function\":\"" << function_name.get() << "\", \"source\":\"";
811     AccountingAllocator allocator;
812     std::ostringstream disassembly;
813     std::vector<int> source_positions;
814     wasm::PrintRawWasmCode(&allocator, function_body, wasm_module,
815                            wasm::kPrintLocals, disassembly, &source_positions);
816     for (const auto& c : disassembly.str()) {
817       json_of << AsEscapedUC16ForJSON(c);
818     }
819     json_of << "\",\n\"sourceLineToBytecodePosition\" : [";
820     bool insert_comma = false;
821     for (auto val : source_positions) {
822       if (insert_comma) {
823         json_of << ", ";
824       }
825       json_of << val;
826       insert_comma = true;
827     }
828     json_of << "],\n\"phases\":[";
829   }
830 
831   return pipeline_statistics;
832 }
833 
834 }  // namespace
835 
836 class PipelineCompilationJob final : public OptimizedCompilationJob {
837  public:
PipelineCompilationJob(Isolate * isolate,Handle<SharedFunctionInfo> shared_info,Handle<JSFunction> function)838   PipelineCompilationJob(Isolate* isolate,
839                          Handle<SharedFunctionInfo> shared_info,
840                          Handle<JSFunction> function)
841       // Note that the OptimizedCompilationInfo is not initialized at the time
842       // we pass it to the CompilationJob constructor, but it is not
843       // dereferenced there.
844       : OptimizedCompilationJob(
845             function->GetIsolate()->stack_guard()->real_climit(),
846             &compilation_info_, "TurboFan"),
847         zone_(function->GetIsolate()->allocator(), ZONE_NAME),
848         zone_stats_(function->GetIsolate()->allocator()),
849         compilation_info_(&zone_, function->GetIsolate(), shared_info,
850                           function),
851         pipeline_statistics_(CreatePipelineStatistics(
852             handle(Script::cast(shared_info->script()), isolate),
853             compilation_info(), function->GetIsolate(), &zone_stats_)),
854         data_(&zone_stats_, function->GetIsolate(), compilation_info(),
855               pipeline_statistics_.get()),
856         pipeline_(&data_),
857         linkage_(nullptr) {}
858 
859  protected:
860   Status PrepareJobImpl(Isolate* isolate) final;
861   Status ExecuteJobImpl() final;
862   Status FinalizeJobImpl(Isolate* isolate) final;
863 
864   // Registers weak object to optimized code dependencies.
865   void RegisterWeakObjectsInOptimizedCode(Handle<Code> code, Isolate* isolate);
866 
867  private:
868   Zone zone_;
869   ZoneStats zone_stats_;
870   OptimizedCompilationInfo compilation_info_;
871   std::unique_ptr<PipelineStatistics> pipeline_statistics_;
872   PipelineData data_;
873   PipelineImpl pipeline_;
874   Linkage* linkage_;
875 
876   DISALLOW_COPY_AND_ASSIGN(PipelineCompilationJob);
877 };
878 
PrepareJobImpl(Isolate * isolate)879 PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
880     Isolate* isolate) {
881   if (compilation_info()->shared_info()->GetBytecodeArray()->length() >
882       kMaxBytecodeSizeForTurbofan) {
883     return AbortOptimization(BailoutReason::kFunctionTooBig);
884   }
885 
886   if (!FLAG_always_opt) {
887     compilation_info()->MarkAsBailoutOnUninitialized();
888   }
889   if (FLAG_turbo_loop_peeling) {
890     compilation_info()->MarkAsLoopPeelingEnabled();
891   }
892   if (FLAG_turbo_inlining) {
893     compilation_info()->MarkAsInliningEnabled();
894   }
895   if (FLAG_inline_accessors) {
896     compilation_info()->MarkAsAccessorInliningEnabled();
897   }
898 
899   // Compute and set poisoning level.
900   PoisoningMitigationLevel load_poisoning =
901       PoisoningMitigationLevel::kDontPoison;
902   if (FLAG_branch_load_poisoning) {
903     load_poisoning = PoisoningMitigationLevel::kPoisonAll;
904   } else if (FLAG_untrusted_code_mitigations) {
905     load_poisoning = PoisoningMitigationLevel::kPoisonCriticalOnly;
906   }
907   compilation_info()->SetPoisoningMitigationLevel(load_poisoning);
908 
909   if (FLAG_turbo_allocation_folding) {
910     compilation_info()->MarkAsAllocationFoldingEnabled();
911   }
912 
913   if (compilation_info()->closure()->feedback_cell()->map() ==
914       ReadOnlyRoots(isolate).one_closure_cell_map()) {
915     compilation_info()->MarkAsFunctionContextSpecializing();
916   }
917 
918   data_.set_start_source_position(
919       compilation_info()->shared_info()->StartPosition());
920 
921   linkage_ = new (compilation_info()->zone()) Linkage(
922       Linkage::ComputeIncoming(compilation_info()->zone(), compilation_info()));
923 
924   if (!pipeline_.CreateGraph()) {
925     if (isolate->has_pending_exception()) return FAILED;  // Stack overflowed.
926     return AbortOptimization(BailoutReason::kGraphBuildingFailed);
927   }
928 
929   if (compilation_info()->is_osr()) data_.InitializeOsrHelper();
930 
931   // Make sure that we have generated the maximal number of deopt entries.
932   // This is in order to avoid triggering the generation of deopt entries later
933   // during code assembly.
934   Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(isolate);
935 
936   return SUCCEEDED;
937 }
938 
ExecuteJobImpl()939 PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl() {
940   if (!pipeline_.OptimizeGraph(linkage_)) return FAILED;
941   pipeline_.AssembleCode(linkage_);
942   return SUCCEEDED;
943 }
944 
FinalizeJobImpl(Isolate * isolate)945 PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
946     Isolate* isolate) {
947   MaybeHandle<Code> maybe_code = pipeline_.FinalizeCode();
948   Handle<Code> code;
949   if (!maybe_code.ToHandle(&code)) {
950     if (compilation_info()->bailout_reason() == BailoutReason::kNoReason) {
951       return AbortOptimization(BailoutReason::kCodeGenerationFailed);
952     }
953     return FAILED;
954   }
955   if (!pipeline_.CommitDependencies(code)) {
956     return RetryOptimization(BailoutReason::kBailedOutDueToDependencyChange);
957   }
958 
959   compilation_info()->SetCode(code);
960   compilation_info()->context()->native_context()->AddOptimizedCode(*code);
961   RegisterWeakObjectsInOptimizedCode(code, isolate);
962   return SUCCEEDED;
963 }
964 
RegisterWeakObjectsInOptimizedCode(Handle<Code> code,Isolate * isolate)965 void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
966     Handle<Code> code, Isolate* isolate) {
967   DCHECK(code->is_optimized_code());
968   std::vector<Handle<Map>> maps;
969   {
970     DisallowHeapAllocation no_gc;
971     int const mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
972     for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
973       RelocInfo::Mode mode = it.rinfo()->rmode();
974       if (mode == RelocInfo::EMBEDDED_OBJECT &&
975           code->IsWeakObjectInOptimizedCode(it.rinfo()->target_object())) {
976         Handle<HeapObject> object(HeapObject::cast(it.rinfo()->target_object()),
977                                   isolate);
978         if (object->IsMap()) {
979           maps.push_back(Handle<Map>::cast(object));
980         }
981       }
982     }
983   }
984   for (Handle<Map> map : maps) {
985     isolate->heap()->AddRetainedMap(map);
986   }
987   code->set_can_have_weak_objects(true);
988 }
989 
990 // The stack limit used during compilation is used to limit the recursion
991 // depth in, e.g. AST walking. No such recursion happens in WASM compilations.
992 constexpr uintptr_t kNoStackLimit = 0;
993 
994 class PipelineWasmCompilationJob final : public OptimizedCompilationJob {
995  public:
PipelineWasmCompilationJob(OptimizedCompilationInfo * info,wasm::WasmEngine * wasm_engine,MachineGraph * mcgraph,CallDescriptor * call_descriptor,SourcePositionTable * source_positions,NodeOriginTable * node_origins,wasm::FunctionBody function_body,wasm::WasmModule * wasm_module,wasm::NativeModule * native_module,int function_index,bool asmjs_origin)996   explicit PipelineWasmCompilationJob(
997       OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
998       MachineGraph* mcgraph, CallDescriptor* call_descriptor,
999       SourcePositionTable* source_positions, NodeOriginTable* node_origins,
1000       wasm::FunctionBody function_body, wasm::WasmModule* wasm_module,
1001       wasm::NativeModule* native_module, int function_index, bool asmjs_origin)
1002       : OptimizedCompilationJob(kNoStackLimit, info, "TurboFan",
1003                                 State::kReadyToExecute),
1004         zone_stats_(wasm_engine->allocator()),
1005         pipeline_statistics_(CreatePipelineStatistics(
1006             wasm_engine, function_body, wasm_module, info, &zone_stats_)),
1007         data_(&zone_stats_, wasm_engine, info, mcgraph,
1008               pipeline_statistics_.get(), source_positions, node_origins,
1009               function_index, WasmAssemblerOptions()),
1010         pipeline_(&data_),
1011         linkage_(call_descriptor),
1012         native_module_(native_module),
1013         asmjs_origin_(asmjs_origin) {}
1014 
1015  protected:
1016   Status PrepareJobImpl(Isolate* isolate) final;
1017   Status ExecuteJobImpl() final;
1018   Status FinalizeJobImpl(Isolate* isolate) final;
1019 
1020  private:
1021   ZoneStats zone_stats_;
1022   std::unique_ptr<PipelineStatistics> pipeline_statistics_;
1023   PipelineData data_;
1024   PipelineImpl pipeline_;
1025   Linkage linkage_;
1026   wasm::NativeModule* native_module_;
1027   bool asmjs_origin_;
1028 };
1029 
PrepareJobImpl(Isolate * isolate)1030 PipelineWasmCompilationJob::Status PipelineWasmCompilationJob::PrepareJobImpl(
1031     Isolate* isolate) {
1032   UNREACHABLE();  // Prepare should always be skipped for WasmCompilationJob.
1033   return SUCCEEDED;
1034 }
1035 
1036 PipelineWasmCompilationJob::Status
ExecuteJobImpl()1037 PipelineWasmCompilationJob::ExecuteJobImpl() {
1038   pipeline_.RunPrintAndVerify("Machine", true);
1039 
1040   PipelineData* data = &data_;
1041   data->BeginPhaseKind("wasm optimization");
1042   if (FLAG_wasm_opt || asmjs_origin_) {
1043     PipelineRunScope scope(data, "wasm full optimization");
1044     GraphReducer graph_reducer(scope.zone(), data->graph(),
1045                                data->mcgraph()->Dead());
1046     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1047                                               data->common(), scope.zone());
1048     ValueNumberingReducer value_numbering(scope.zone(), data->graph()->zone());
1049     MachineOperatorReducer machine_reducer(data->mcgraph(), asmjs_origin_);
1050     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1051                                          data->js_heap_broker(), data->common(),
1052                                          data->machine(), scope.zone());
1053     AddReducer(data, &graph_reducer, &dead_code_elimination);
1054     AddReducer(data, &graph_reducer, &machine_reducer);
1055     AddReducer(data, &graph_reducer, &common_reducer);
1056     AddReducer(data, &graph_reducer, &value_numbering);
1057     graph_reducer.ReduceGraph();
1058   } else {
1059     PipelineRunScope scope(data, "wasm base optimization");
1060     GraphReducer graph_reducer(scope.zone(), data->graph(),
1061                                data->mcgraph()->Dead());
1062     ValueNumberingReducer value_numbering(scope.zone(), data->graph()->zone());
1063     AddReducer(data, &graph_reducer, &value_numbering);
1064     graph_reducer.ReduceGraph();
1065   }
1066   pipeline_.RunPrintAndVerify("wasm optimization", true);
1067 
1068   if (data_.node_origins()) {
1069     data_.node_origins()->RemoveDecorator();
1070   }
1071 
1072   pipeline_.ComputeScheduledGraph();
1073   if (!pipeline_.SelectInstructions(&linkage_)) return FAILED;
1074   pipeline_.AssembleCode(&linkage_);
1075 
1076   CodeGenerator* code_generator = pipeline_.code_generator();
1077   CodeDesc code_desc;
1078   code_generator->tasm()->GetCode(nullptr, &code_desc);
1079 
1080   wasm::WasmCode* code = native_module_->AddCode(
1081       data_.wasm_function_index(), code_desc,
1082       code_generator->frame()->GetTotalFrameSlotCount(),
1083       code_generator->GetSafepointTableOffset(),
1084       code_generator->GetHandlerTableOffset(),
1085       code_generator->GetProtectedInstructions(),
1086       code_generator->GetSourcePositionTable(), wasm::WasmCode::kTurbofan);
1087 
1088   if (data_.info()->trace_turbo_json_enabled()) {
1089     TurboJsonFile json_of(data_.info(), std::ios_base::app);
1090     json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
1091 #ifdef ENABLE_DISASSEMBLER
1092     std::stringstream disassembler_stream;
1093     Disassembler::Decode(
1094         nullptr, &disassembler_stream, code->instructions().start(),
1095         code->instructions().start() + code->safepoint_table_offset(),
1096         CodeReference(code));
1097     for (auto const c : disassembler_stream.str()) {
1098       json_of << AsEscapedUC16ForJSON(c);
1099     }
1100 #endif  // ENABLE_DISASSEMBLER
1101     json_of << "\"}\n]";
1102     json_of << "\n}";
1103   }
1104 
1105   compilation_info()->SetCode(code);
1106 
1107   return SUCCEEDED;
1108 }
1109 
FinalizeJobImpl(Isolate * isolate)1110 PipelineWasmCompilationJob::Status PipelineWasmCompilationJob::FinalizeJobImpl(
1111     Isolate* isolate) {
1112   UNREACHABLE();  // Finalize should always be skipped for WasmCompilationJob.
1113   return SUCCEEDED;
1114 }
1115 
1116 template <typename Phase>
Run()1117 void PipelineImpl::Run() {
1118   PipelineRunScope scope(this->data_, Phase::phase_name());
1119   Phase phase;
1120   phase.Run(this->data_, scope.zone());
1121 }
1122 
1123 template <typename Phase, typename Arg0>
Run(Arg0 arg_0)1124 void PipelineImpl::Run(Arg0 arg_0) {
1125   PipelineRunScope scope(this->data_, Phase::phase_name());
1126   Phase phase;
1127   phase.Run(this->data_, scope.zone(), arg_0);
1128 }
1129 
1130 template <typename Phase, typename Arg0, typename Arg1>
Run(Arg0 arg_0,Arg1 arg_1)1131 void PipelineImpl::Run(Arg0 arg_0, Arg1 arg_1) {
1132   PipelineRunScope scope(this->data_, Phase::phase_name());
1133   Phase phase;
1134   phase.Run(this->data_, scope.zone(), arg_0, arg_1);
1135 }
1136 
1137 struct GraphBuilderPhase {
phase_namev8::internal::compiler::GraphBuilderPhase1138   static const char* phase_name() { return "bytecode graph builder"; }
1139 
Runv8::internal::compiler::GraphBuilderPhase1140   void Run(PipelineData* data, Zone* temp_zone) {
1141     JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags;
1142     if (data->info()->is_bailout_on_uninitialized()) {
1143       flags |= JSTypeHintLowering::kBailoutOnUninitialized;
1144     }
1145     CallFrequency frequency = CallFrequency(1.0f);
1146     BytecodeGraphBuilder graph_builder(
1147         temp_zone, data->info()->shared_info(),
1148         handle(data->info()->closure()->feedback_vector(), data->isolate()),
1149         data->info()->osr_offset(), data->jsgraph(), frequency,
1150         data->source_positions(), data->native_context(),
1151         SourcePosition::kNotInlined, flags, true,
1152         data->info()->is_analyze_environment_liveness());
1153     graph_builder.CreateGraph();
1154   }
1155 };
1156 
1157 namespace {
1158 
GetModuleContext(Handle<JSFunction> closure)1159 Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) {
1160   Context* current = closure->context();
1161   size_t distance = 0;
1162   while (!current->IsNativeContext()) {
1163     if (current->IsModuleContext()) {
1164       return Just(
1165           OuterContext(handle(current, current->GetIsolate()), distance));
1166     }
1167     current = current->previous();
1168     distance++;
1169   }
1170   return Nothing<OuterContext>();
1171 }
1172 
ChooseSpecializationContext(Isolate * isolate,OptimizedCompilationInfo * info)1173 Maybe<OuterContext> ChooseSpecializationContext(
1174     Isolate* isolate, OptimizedCompilationInfo* info) {
1175   if (info->is_function_context_specializing()) {
1176     DCHECK(info->has_context());
1177     return Just(OuterContext(handle(info->context(), isolate), 0));
1178   }
1179   return GetModuleContext(info->closure());
1180 }
1181 
1182 }  // anonymous namespace
1183 
1184 struct InliningPhase {
phase_namev8::internal::compiler::InliningPhase1185   static const char* phase_name() { return "inlining"; }
1186 
Runv8::internal::compiler::InliningPhase1187   void Run(PipelineData* data, Zone* temp_zone) {
1188     Isolate* isolate = data->isolate();
1189     GraphReducer graph_reducer(temp_zone, data->graph(),
1190                                data->jsgraph()->Dead());
1191     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1192                                               data->common(), temp_zone);
1193     CheckpointElimination checkpoint_elimination(&graph_reducer);
1194     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1195                                          data->js_heap_broker(), data->common(),
1196                                          data->machine(), temp_zone);
1197     JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
1198                                data->js_heap_broker(),
1199                                data->info()->is_bailout_on_uninitialized()
1200                                    ? JSCallReducer::kBailoutOnUninitialized
1201                                    : JSCallReducer::kNoFlags,
1202                                data->native_context(), data->dependencies());
1203     JSContextSpecialization context_specialization(
1204         &graph_reducer, data->jsgraph(), data->js_heap_broker(),
1205         ChooseSpecializationContext(isolate, data->info()),
1206         data->info()->is_function_context_specializing()
1207             ? data->info()->closure()
1208             : MaybeHandle<JSFunction>());
1209     JSNativeContextSpecialization::Flags flags =
1210         JSNativeContextSpecialization::kNoFlags;
1211     if (data->info()->is_accessor_inlining_enabled()) {
1212       flags |= JSNativeContextSpecialization::kAccessorInliningEnabled;
1213     }
1214     if (data->info()->is_bailout_on_uninitialized()) {
1215       flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
1216     }
1217     JSNativeContextSpecialization native_context_specialization(
1218         &graph_reducer, data->jsgraph(), data->js_heap_broker(), flags,
1219         data->native_context(), data->dependencies(), temp_zone);
1220     JSInliningHeuristic inlining(
1221         &graph_reducer, data->info()->is_inlining_enabled()
1222                             ? JSInliningHeuristic::kGeneralInlining
1223                             : JSInliningHeuristic::kRestrictedInlining,
1224         temp_zone, data->info(), data->jsgraph(), data->source_positions());
1225     JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph());
1226     AddReducer(data, &graph_reducer, &dead_code_elimination);
1227     AddReducer(data, &graph_reducer, &checkpoint_elimination);
1228     AddReducer(data, &graph_reducer, &common_reducer);
1229     AddReducer(data, &graph_reducer, &native_context_specialization);
1230     AddReducer(data, &graph_reducer, &context_specialization);
1231     AddReducer(data, &graph_reducer, &intrinsic_lowering);
1232     AddReducer(data, &graph_reducer, &call_reducer);
1233     AddReducer(data, &graph_reducer, &inlining);
1234     graph_reducer.ReduceGraph();
1235   }
1236 };
1237 
1238 
1239 struct TyperPhase {
phase_namev8::internal::compiler::TyperPhase1240   static const char* phase_name() { return "typer"; }
1241 
Runv8::internal::compiler::TyperPhase1242   void Run(PipelineData* data, Zone* temp_zone, Typer* typer) {
1243     NodeVector roots(temp_zone);
1244     data->jsgraph()->GetCachedNodes(&roots);
1245     LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
1246                                          data->common(), temp_zone);
1247     if (FLAG_turbo_loop_variable) induction_vars.Run();
1248     typer->Run(roots, &induction_vars);
1249   }
1250 };
1251 
1252 struct UntyperPhase {
phase_namev8::internal::compiler::UntyperPhase1253   static const char* phase_name() { return "untyper"; }
1254 
Runv8::internal::compiler::UntyperPhase1255   void Run(PipelineData* data, Zone* temp_zone) {
1256     class RemoveTypeReducer final : public Reducer {
1257      public:
1258       const char* reducer_name() const override { return "RemoveTypeReducer"; }
1259       Reduction Reduce(Node* node) final {
1260         if (NodeProperties::IsTyped(node)) {
1261           NodeProperties::RemoveType(node);
1262           return Changed(node);
1263         }
1264         return NoChange();
1265       }
1266     };
1267 
1268     NodeVector roots(temp_zone);
1269     data->jsgraph()->GetCachedNodes(&roots);
1270     for (Node* node : roots) {
1271       NodeProperties::RemoveType(node);
1272     }
1273 
1274     GraphReducer graph_reducer(temp_zone, data->graph(),
1275                                data->jsgraph()->Dead());
1276     RemoveTypeReducer remove_type_reducer;
1277     AddReducer(data, &graph_reducer, &remove_type_reducer);
1278     graph_reducer.ReduceGraph();
1279   }
1280 };
1281 
1282 struct CopyMetadataForConcurrentCompilePhase {
phase_namev8::internal::compiler::CopyMetadataForConcurrentCompilePhase1283   static const char* phase_name() {
1284     return "copy metadata for concurrent compile";
1285   }
1286 
Runv8::internal::compiler::CopyMetadataForConcurrentCompilePhase1287   void Run(PipelineData* data, Zone* temp_zone) {
1288     GraphReducer graph_reducer(temp_zone, data->graph(),
1289                                data->jsgraph()->Dead());
1290     JSHeapCopyReducer heap_copy_reducer(data->js_heap_broker());
1291     AddReducer(data, &graph_reducer, &heap_copy_reducer);
1292     graph_reducer.ReduceGraph();
1293     data->js_heap_broker()->StopSerializing();
1294   }
1295 };
1296 
1297 struct TypedLoweringPhase {
phase_namev8::internal::compiler::TypedLoweringPhase1298   static const char* phase_name() { return "typed lowering"; }
1299 
Runv8::internal::compiler::TypedLoweringPhase1300   void Run(PipelineData* data, Zone* temp_zone) {
1301     GraphReducer graph_reducer(temp_zone, data->graph(),
1302                                data->jsgraph()->Dead());
1303     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1304                                               data->common(), temp_zone);
1305     JSCreateLowering create_lowering(&graph_reducer, data->dependencies(),
1306                                      data->jsgraph(), data->js_heap_broker(),
1307                                      data->native_context(), temp_zone);
1308     JSTypedLowering typed_lowering(&graph_reducer, data->jsgraph(),
1309                                    data->js_heap_broker(), temp_zone);
1310     ConstantFoldingReducer constant_folding_reducer(
1311         &graph_reducer, data->jsgraph(), data->js_heap_broker());
1312     TypedOptimization typed_optimization(&graph_reducer, data->dependencies(),
1313                                          data->jsgraph(),
1314                                          data->js_heap_broker());
1315     SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
1316                                              data->js_heap_broker());
1317     CheckpointElimination checkpoint_elimination(&graph_reducer);
1318     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1319                                          data->js_heap_broker(), data->common(),
1320                                          data->machine(), temp_zone);
1321     AddReducer(data, &graph_reducer, &dead_code_elimination);
1322     AddReducer(data, &graph_reducer, &create_lowering);
1323     AddReducer(data, &graph_reducer, &constant_folding_reducer);
1324     AddReducer(data, &graph_reducer, &typed_optimization);
1325     AddReducer(data, &graph_reducer, &typed_lowering);
1326     AddReducer(data, &graph_reducer, &simple_reducer);
1327     AddReducer(data, &graph_reducer, &checkpoint_elimination);
1328     AddReducer(data, &graph_reducer, &common_reducer);
1329     graph_reducer.ReduceGraph();
1330   }
1331 };
1332 
1333 
1334 struct EscapeAnalysisPhase {
phase_namev8::internal::compiler::EscapeAnalysisPhase1335   static const char* phase_name() { return "escape analysis"; }
1336 
Runv8::internal::compiler::EscapeAnalysisPhase1337   void Run(PipelineData* data, Zone* temp_zone) {
1338     EscapeAnalysis escape_analysis(data->jsgraph(), temp_zone);
1339     escape_analysis.ReduceGraph();
1340     GraphReducer reducer(temp_zone, data->graph(), data->jsgraph()->Dead());
1341     EscapeAnalysisReducer escape_reducer(&reducer, data->jsgraph(),
1342                                          escape_analysis.analysis_result(),
1343                                          temp_zone);
1344     AddReducer(data, &reducer, &escape_reducer);
1345     reducer.ReduceGraph();
1346     // TODO(tebbi): Turn this into a debug mode check once we have confidence.
1347     escape_reducer.VerifyReplacement();
1348   }
1349 };
1350 
1351 struct SimplifiedLoweringPhase {
phase_namev8::internal::compiler::SimplifiedLoweringPhase1352   static const char* phase_name() { return "simplified lowering"; }
1353 
Runv8::internal::compiler::SimplifiedLoweringPhase1354   void Run(PipelineData* data, Zone* temp_zone) {
1355     SimplifiedLowering lowering(data->jsgraph(), data->js_heap_broker(),
1356                                 temp_zone, data->source_positions(),
1357                                 data->node_origins(),
1358                                 data->info()->GetPoisoningMitigationLevel());
1359     lowering.LowerAllNodes();
1360   }
1361 };
1362 
1363 struct LoopPeelingPhase {
phase_namev8::internal::compiler::LoopPeelingPhase1364   static const char* phase_name() { return "loop peeling"; }
1365 
Runv8::internal::compiler::LoopPeelingPhase1366   void Run(PipelineData* data, Zone* temp_zone) {
1367     GraphTrimmer trimmer(temp_zone, data->graph());
1368     NodeVector roots(temp_zone);
1369     data->jsgraph()->GetCachedNodes(&roots);
1370     trimmer.TrimGraph(roots.begin(), roots.end());
1371 
1372     LoopTree* loop_tree =
1373         LoopFinder::BuildLoopTree(data->jsgraph()->graph(), temp_zone);
1374     LoopPeeler(data->graph(), data->common(), loop_tree, temp_zone,
1375                data->source_positions(), data->node_origins())
1376         .PeelInnerLoopsOfTree();
1377   }
1378 };
1379 
1380 struct LoopExitEliminationPhase {
phase_namev8::internal::compiler::LoopExitEliminationPhase1381   static const char* phase_name() { return "loop exit elimination"; }
1382 
Runv8::internal::compiler::LoopExitEliminationPhase1383   void Run(PipelineData* data, Zone* temp_zone) {
1384     LoopPeeler::EliminateLoopExits(data->graph(), temp_zone);
1385   }
1386 };
1387 
1388 struct ConcurrentOptimizationPrepPhase {
phase_namev8::internal::compiler::ConcurrentOptimizationPrepPhase1389   static const char* phase_name() { return "concurrency preparation"; }
1390 
Runv8::internal::compiler::ConcurrentOptimizationPrepPhase1391   void Run(PipelineData* data, Zone* temp_zone) {
1392     // Make sure we cache these code stubs.
1393     data->jsgraph()->CEntryStubConstant(1);
1394     data->jsgraph()->CEntryStubConstant(2);
1395 
1396     // TODO(turbofan): Remove this line once the Array constructor code
1397     // is a proper builtin and no longer a CodeStub.
1398     data->jsgraph()->ArrayConstructorStubConstant();
1399 
1400     // This is needed for escape analysis.
1401     NodeProperties::SetType(
1402         data->jsgraph()->FalseConstant(),
1403         Type::HeapConstant(data->js_heap_broker(),
1404                            data->isolate()->factory()->false_value(),
1405                            data->jsgraph()->zone()));
1406     NodeProperties::SetType(
1407         data->jsgraph()->TrueConstant(),
1408         Type::HeapConstant(data->js_heap_broker(),
1409                            data->isolate()->factory()->true_value(),
1410                            data->jsgraph()->zone()));
1411   }
1412 };
1413 
1414 struct GenericLoweringPhase {
phase_namev8::internal::compiler::GenericLoweringPhase1415   static const char* phase_name() { return "generic lowering"; }
1416 
Runv8::internal::compiler::GenericLoweringPhase1417   void Run(PipelineData* data, Zone* temp_zone) {
1418     GraphReducer graph_reducer(temp_zone, data->graph(),
1419                                data->jsgraph()->Dead());
1420     JSGenericLowering generic_lowering(data->jsgraph());
1421     AddReducer(data, &graph_reducer, &generic_lowering);
1422     graph_reducer.ReduceGraph();
1423   }
1424 };
1425 
1426 struct EarlyOptimizationPhase {
phase_namev8::internal::compiler::EarlyOptimizationPhase1427   static const char* phase_name() { return "early optimization"; }
1428 
Runv8::internal::compiler::EarlyOptimizationPhase1429   void Run(PipelineData* data, Zone* temp_zone) {
1430     GraphReducer graph_reducer(temp_zone, data->graph(),
1431                                data->jsgraph()->Dead());
1432     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1433                                               data->common(), temp_zone);
1434     SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
1435                                              data->js_heap_broker());
1436     RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
1437     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1438     MachineOperatorReducer machine_reducer(data->jsgraph());
1439     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1440                                          data->js_heap_broker(), data->common(),
1441                                          data->machine(), temp_zone);
1442     AddReducer(data, &graph_reducer, &dead_code_elimination);
1443     AddReducer(data, &graph_reducer, &simple_reducer);
1444     AddReducer(data, &graph_reducer, &redundancy_elimination);
1445     AddReducer(data, &graph_reducer, &machine_reducer);
1446     AddReducer(data, &graph_reducer, &common_reducer);
1447     AddReducer(data, &graph_reducer, &value_numbering);
1448     graph_reducer.ReduceGraph();
1449   }
1450 };
1451 
1452 struct ControlFlowOptimizationPhase {
phase_namev8::internal::compiler::ControlFlowOptimizationPhase1453   static const char* phase_name() { return "control flow optimization"; }
1454 
Runv8::internal::compiler::ControlFlowOptimizationPhase1455   void Run(PipelineData* data, Zone* temp_zone) {
1456     ControlFlowOptimizer optimizer(data->graph(), data->common(),
1457                                    data->machine(), temp_zone);
1458     optimizer.Optimize();
1459   }
1460 };
1461 
1462 struct EffectControlLinearizationPhase {
phase_namev8::internal::compiler::EffectControlLinearizationPhase1463   static const char* phase_name() { return "effect linearization"; }
1464 
Runv8::internal::compiler::EffectControlLinearizationPhase1465   void Run(PipelineData* data, Zone* temp_zone) {
1466     {
1467       // The scheduler requires the graphs to be trimmed, so trim now.
1468       // TODO(jarin) Remove the trimming once the scheduler can handle untrimmed
1469       // graphs.
1470       GraphTrimmer trimmer(temp_zone, data->graph());
1471       NodeVector roots(temp_zone);
1472       data->jsgraph()->GetCachedNodes(&roots);
1473       trimmer.TrimGraph(roots.begin(), roots.end());
1474 
1475       // Schedule the graph without node splitting so that we can
1476       // fix the effect and control flow for nodes with low-level side
1477       // effects (such as changing representation to tagged or
1478       // 'floating' allocation regions.)
1479       Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
1480                                                       Scheduler::kTempSchedule);
1481       if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
1482       TraceSchedule(data->info(), data, schedule,
1483                     "effect linearization schedule");
1484 
1485       EffectControlLinearizer::MaskArrayIndexEnable mask_array_index =
1486           (data->info()->GetPoisoningMitigationLevel() !=
1487            PoisoningMitigationLevel::kDontPoison)
1488               ? EffectControlLinearizer::kMaskArrayIndex
1489               : EffectControlLinearizer::kDoNotMaskArrayIndex;
1490       // Post-pass for wiring the control/effects
1491       // - connect allocating representation changes into the control&effect
1492       //   chains and lower them,
1493       // - get rid of the region markers,
1494       // - introduce effect phis and rewire effects to get SSA again.
1495       EffectControlLinearizer linearizer(
1496           data->jsgraph(), schedule, temp_zone, data->source_positions(),
1497           data->node_origins(), mask_array_index);
1498       linearizer.Run();
1499     }
1500     {
1501       // The {EffectControlLinearizer} might leave {Dead} nodes behind, so we
1502       // run {DeadCodeElimination} to prune these parts of the graph.
1503       // Also, the following store-store elimination phase greatly benefits from
1504       // doing a common operator reducer and dead code elimination just before
1505       // it, to eliminate conditional deopts with a constant condition.
1506       GraphReducer graph_reducer(temp_zone, data->graph(),
1507                                  data->jsgraph()->Dead());
1508       DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1509                                                 data->common(), temp_zone);
1510       CommonOperatorReducer common_reducer(
1511           &graph_reducer, data->graph(), data->js_heap_broker(), data->common(),
1512           data->machine(), temp_zone);
1513       AddReducer(data, &graph_reducer, &dead_code_elimination);
1514       AddReducer(data, &graph_reducer, &common_reducer);
1515       graph_reducer.ReduceGraph();
1516     }
1517   }
1518 };
1519 
1520 struct StoreStoreEliminationPhase {
phase_namev8::internal::compiler::StoreStoreEliminationPhase1521   static const char* phase_name() { return "store-store elimination"; }
1522 
Runv8::internal::compiler::StoreStoreEliminationPhase1523   void Run(PipelineData* data, Zone* temp_zone) {
1524     GraphTrimmer trimmer(temp_zone, data->graph());
1525     NodeVector roots(temp_zone);
1526     data->jsgraph()->GetCachedNodes(&roots);
1527     trimmer.TrimGraph(roots.begin(), roots.end());
1528 
1529     StoreStoreElimination::Run(data->jsgraph(), temp_zone);
1530   }
1531 };
1532 
1533 struct LoadEliminationPhase {
phase_namev8::internal::compiler::LoadEliminationPhase1534   static const char* phase_name() { return "load elimination"; }
1535 
Runv8::internal::compiler::LoadEliminationPhase1536   void Run(PipelineData* data, Zone* temp_zone) {
1537     GraphReducer graph_reducer(temp_zone, data->graph(),
1538                                data->jsgraph()->Dead());
1539     BranchElimination branch_condition_elimination(&graph_reducer,
1540                                                    data->jsgraph(), temp_zone);
1541     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1542                                               data->common(), temp_zone);
1543     RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
1544     LoadElimination load_elimination(&graph_reducer, data->jsgraph(),
1545                                      temp_zone);
1546     CheckpointElimination checkpoint_elimination(&graph_reducer);
1547     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1548     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1549                                          data->js_heap_broker(), data->common(),
1550                                          data->machine(), temp_zone);
1551     ConstantFoldingReducer constant_folding_reducer(
1552         &graph_reducer, data->jsgraph(), data->js_heap_broker());
1553     TypeNarrowingReducer type_narrowing_reducer(&graph_reducer, data->jsgraph(),
1554                                                 data->js_heap_broker());
1555     AddReducer(data, &graph_reducer, &branch_condition_elimination);
1556     AddReducer(data, &graph_reducer, &dead_code_elimination);
1557     AddReducer(data, &graph_reducer, &redundancy_elimination);
1558     AddReducer(data, &graph_reducer, &load_elimination);
1559     AddReducer(data, &graph_reducer, &type_narrowing_reducer);
1560     AddReducer(data, &graph_reducer, &constant_folding_reducer);
1561     AddReducer(data, &graph_reducer, &checkpoint_elimination);
1562     AddReducer(data, &graph_reducer, &common_reducer);
1563     AddReducer(data, &graph_reducer, &value_numbering);
1564     graph_reducer.ReduceGraph();
1565   }
1566 };
1567 
1568 struct MemoryOptimizationPhase {
phase_namev8::internal::compiler::MemoryOptimizationPhase1569   static const char* phase_name() { return "memory optimization"; }
1570 
Runv8::internal::compiler::MemoryOptimizationPhase1571   void Run(PipelineData* data, Zone* temp_zone) {
1572     // The memory optimizer requires the graphs to be trimmed, so trim now.
1573     GraphTrimmer trimmer(temp_zone, data->graph());
1574     NodeVector roots(temp_zone);
1575     data->jsgraph()->GetCachedNodes(&roots);
1576     trimmer.TrimGraph(roots.begin(), roots.end());
1577 
1578     // Optimize allocations and load/store operations.
1579     MemoryOptimizer optimizer(
1580         data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(),
1581         data->info()->is_allocation_folding_enabled()
1582             ? MemoryOptimizer::AllocationFolding::kDoAllocationFolding
1583             : MemoryOptimizer::AllocationFolding::kDontAllocationFolding);
1584     optimizer.Optimize();
1585   }
1586 };
1587 
1588 struct LateOptimizationPhase {
phase_namev8::internal::compiler::LateOptimizationPhase1589   static const char* phase_name() { return "late optimization"; }
1590 
Runv8::internal::compiler::LateOptimizationPhase1591   void Run(PipelineData* data, Zone* temp_zone) {
1592     GraphReducer graph_reducer(temp_zone, data->graph(),
1593                                data->jsgraph()->Dead());
1594     BranchElimination branch_condition_elimination(&graph_reducer,
1595                                                    data->jsgraph(), temp_zone);
1596     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1597                                               data->common(), temp_zone);
1598     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1599     MachineOperatorReducer machine_reducer(data->jsgraph());
1600     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1601                                          data->js_heap_broker(), data->common(),
1602                                          data->machine(), temp_zone);
1603     SelectLowering select_lowering(data->jsgraph()->graph(),
1604                                    data->jsgraph()->common());
1605     AddReducer(data, &graph_reducer, &branch_condition_elimination);
1606     AddReducer(data, &graph_reducer, &dead_code_elimination);
1607     AddReducer(data, &graph_reducer, &machine_reducer);
1608     AddReducer(data, &graph_reducer, &common_reducer);
1609     AddReducer(data, &graph_reducer, &select_lowering);
1610     AddReducer(data, &graph_reducer, &value_numbering);
1611     graph_reducer.ReduceGraph();
1612   }
1613 };
1614 
1615 struct EarlyGraphTrimmingPhase {
phase_namev8::internal::compiler::EarlyGraphTrimmingPhase1616   static const char* phase_name() { return "early trimming"; }
Runv8::internal::compiler::EarlyGraphTrimmingPhase1617   void Run(PipelineData* data, Zone* temp_zone) {
1618     GraphTrimmer trimmer(temp_zone, data->graph());
1619     NodeVector roots(temp_zone);
1620     data->jsgraph()->GetCachedNodes(&roots);
1621     trimmer.TrimGraph(roots.begin(), roots.end());
1622   }
1623 };
1624 
1625 
1626 struct LateGraphTrimmingPhase {
phase_namev8::internal::compiler::LateGraphTrimmingPhase1627   static const char* phase_name() { return "late graph trimming"; }
Runv8::internal::compiler::LateGraphTrimmingPhase1628   void Run(PipelineData* data, Zone* temp_zone) {
1629     GraphTrimmer trimmer(temp_zone, data->graph());
1630     NodeVector roots(temp_zone);
1631     if (data->jsgraph()) {
1632       data->jsgraph()->GetCachedNodes(&roots);
1633     }
1634     trimmer.TrimGraph(roots.begin(), roots.end());
1635   }
1636 };
1637 
1638 
1639 struct ComputeSchedulePhase {
phase_namev8::internal::compiler::ComputeSchedulePhase1640   static const char* phase_name() { return "scheduling"; }
1641 
Runv8::internal::compiler::ComputeSchedulePhase1642   void Run(PipelineData* data, Zone* temp_zone) {
1643     Schedule* schedule = Scheduler::ComputeSchedule(
1644         temp_zone, data->graph(), data->info()->is_splitting_enabled()
1645                                       ? Scheduler::kSplitNodes
1646                                       : Scheduler::kNoFlags);
1647     if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
1648     data->set_schedule(schedule);
1649   }
1650 };
1651 
1652 struct InstructionRangesAsJSON {
1653   const InstructionSequence* sequence;
1654   const ZoneVector<std::pair<int, int>>* instr_origins;
1655 };
1656 
operator <<(std::ostream & out,const InstructionRangesAsJSON & s)1657 std::ostream& operator<<(std::ostream& out, const InstructionRangesAsJSON& s) {
1658   const int max = static_cast<int>(s.sequence->LastInstructionIndex());
1659 
1660   out << ", \"nodeIdToInstructionRange\": {";
1661   bool need_comma = false;
1662   for (size_t i = 0; i < s.instr_origins->size(); ++i) {
1663     std::pair<int, int> offset = (*s.instr_origins)[i];
1664     if (offset.first == -1) continue;
1665     const int first = max - offset.first + 1;
1666     const int second = max - offset.second + 1;
1667     if (need_comma) out << ", ";
1668     out << "\"" << i << "\": [" << first << ", " << second << "]";
1669     need_comma = true;
1670   }
1671   out << "}";
1672   out << ", \"blockIdtoInstructionRange\": {";
1673   need_comma = false;
1674   for (auto block : s.sequence->instruction_blocks()) {
1675     if (need_comma) out << ", ";
1676     out << "\"" << block->rpo_number() << "\": [" << block->code_start() << ", "
1677         << block->code_end() << "]";
1678     need_comma = true;
1679   }
1680   out << "}";
1681   return out;
1682 }
1683 
1684 struct InstructionSelectionPhase {
phase_namev8::internal::compiler::InstructionSelectionPhase1685   static const char* phase_name() { return "select instructions"; }
1686 
Runv8::internal::compiler::InstructionSelectionPhase1687   void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
1688     InstructionSelector selector(
1689         temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
1690         data->schedule(), data->source_positions(), data->frame(),
1691         data->info()->switch_jump_table_enabled()
1692             ? InstructionSelector::kEnableSwitchJumpTable
1693             : InstructionSelector::kDisableSwitchJumpTable,
1694         data->info()->is_source_positions_enabled()
1695             ? InstructionSelector::kAllSourcePositions
1696             : InstructionSelector::kCallSourcePositions,
1697         InstructionSelector::SupportedFeatures(),
1698         FLAG_turbo_instruction_scheduling
1699             ? InstructionSelector::kEnableScheduling
1700             : InstructionSelector::kDisableScheduling,
1701         !data->isolate() || data->isolate()->serializer_enabled()
1702             ? InstructionSelector::kDisableRootsRelativeAddressing
1703             : InstructionSelector::kEnableRootsRelativeAddressing,
1704         data->info()->GetPoisoningMitigationLevel(),
1705         data->info()->trace_turbo_json_enabled()
1706             ? InstructionSelector::kEnableTraceTurboJson
1707             : InstructionSelector::kDisableTraceTurboJson);
1708     if (!selector.SelectInstructions()) {
1709       data->set_compilation_failed();
1710     }
1711     if (data->info()->trace_turbo_json_enabled()) {
1712       TurboJsonFile json_of(data->info(), std::ios_base::app);
1713       json_of << "{\"name\":\"" << phase_name()
1714               << "\",\"type\":\"instructions\""
1715               << InstructionRangesAsJSON{data->sequence(),
1716                                          &selector.instr_origins()}
1717               << "},\n";
1718     }
1719   }
1720 };
1721 
1722 
1723 struct MeetRegisterConstraintsPhase {
phase_namev8::internal::compiler::MeetRegisterConstraintsPhase1724   static const char* phase_name() { return "meet register constraints"; }
1725 
Runv8::internal::compiler::MeetRegisterConstraintsPhase1726   void Run(PipelineData* data, Zone* temp_zone) {
1727     ConstraintBuilder builder(data->register_allocation_data());
1728     builder.MeetRegisterConstraints();
1729   }
1730 };
1731 
1732 
1733 struct ResolvePhisPhase {
phase_namev8::internal::compiler::ResolvePhisPhase1734   static const char* phase_name() { return "resolve phis"; }
1735 
Runv8::internal::compiler::ResolvePhisPhase1736   void Run(PipelineData* data, Zone* temp_zone) {
1737     ConstraintBuilder builder(data->register_allocation_data());
1738     builder.ResolvePhis();
1739   }
1740 };
1741 
1742 
1743 struct BuildLiveRangesPhase {
phase_namev8::internal::compiler::BuildLiveRangesPhase1744   static const char* phase_name() { return "build live ranges"; }
1745 
Runv8::internal::compiler::BuildLiveRangesPhase1746   void Run(PipelineData* data, Zone* temp_zone) {
1747     LiveRangeBuilder builder(data->register_allocation_data(), temp_zone);
1748     builder.BuildLiveRanges();
1749   }
1750 };
1751 
1752 
1753 struct SplinterLiveRangesPhase {
phase_namev8::internal::compiler::SplinterLiveRangesPhase1754   static const char* phase_name() { return "splinter live ranges"; }
1755 
Runv8::internal::compiler::SplinterLiveRangesPhase1756   void Run(PipelineData* data, Zone* temp_zone) {
1757     LiveRangeSeparator live_range_splinterer(data->register_allocation_data(),
1758                                              temp_zone);
1759     live_range_splinterer.Splinter();
1760   }
1761 };
1762 
1763 
1764 template <typename RegAllocator>
1765 struct AllocateGeneralRegistersPhase {
phase_namev8::internal::compiler::AllocateGeneralRegistersPhase1766   static const char* phase_name() { return "allocate general registers"; }
1767 
Runv8::internal::compiler::AllocateGeneralRegistersPhase1768   void Run(PipelineData* data, Zone* temp_zone) {
1769     RegAllocator allocator(data->register_allocation_data(), GENERAL_REGISTERS,
1770                            temp_zone);
1771     allocator.AllocateRegisters();
1772   }
1773 };
1774 
1775 template <typename RegAllocator>
1776 struct AllocateFPRegistersPhase {
phase_namev8::internal::compiler::AllocateFPRegistersPhase1777   static const char* phase_name() { return "allocate f.p. registers"; }
1778 
Runv8::internal::compiler::AllocateFPRegistersPhase1779   void Run(PipelineData* data, Zone* temp_zone) {
1780     RegAllocator allocator(data->register_allocation_data(), FP_REGISTERS,
1781                            temp_zone);
1782     allocator.AllocateRegisters();
1783   }
1784 };
1785 
1786 
1787 struct MergeSplintersPhase {
phase_namev8::internal::compiler::MergeSplintersPhase1788   static const char* phase_name() { return "merge splintered ranges"; }
Runv8::internal::compiler::MergeSplintersPhase1789   void Run(PipelineData* pipeline_data, Zone* temp_zone) {
1790     RegisterAllocationData* data = pipeline_data->register_allocation_data();
1791     LiveRangeMerger live_range_merger(data, temp_zone);
1792     live_range_merger.Merge();
1793   }
1794 };
1795 
1796 
1797 struct LocateSpillSlotsPhase {
phase_namev8::internal::compiler::LocateSpillSlotsPhase1798   static const char* phase_name() { return "locate spill slots"; }
1799 
Runv8::internal::compiler::LocateSpillSlotsPhase1800   void Run(PipelineData* data, Zone* temp_zone) {
1801     SpillSlotLocator locator(data->register_allocation_data());
1802     locator.LocateSpillSlots();
1803   }
1804 };
1805 
1806 
1807 struct AssignSpillSlotsPhase {
phase_namev8::internal::compiler::AssignSpillSlotsPhase1808   static const char* phase_name() { return "assign spill slots"; }
1809 
Runv8::internal::compiler::AssignSpillSlotsPhase1810   void Run(PipelineData* data, Zone* temp_zone) {
1811     OperandAssigner assigner(data->register_allocation_data());
1812     assigner.AssignSpillSlots();
1813   }
1814 };
1815 
1816 
1817 struct CommitAssignmentPhase {
phase_namev8::internal::compiler::CommitAssignmentPhase1818   static const char* phase_name() { return "commit assignment"; }
1819 
Runv8::internal::compiler::CommitAssignmentPhase1820   void Run(PipelineData* data, Zone* temp_zone) {
1821     OperandAssigner assigner(data->register_allocation_data());
1822     assigner.CommitAssignment();
1823   }
1824 };
1825 
1826 
1827 struct PopulateReferenceMapsPhase {
phase_namev8::internal::compiler::PopulateReferenceMapsPhase1828   static const char* phase_name() { return "populate pointer maps"; }
1829 
Runv8::internal::compiler::PopulateReferenceMapsPhase1830   void Run(PipelineData* data, Zone* temp_zone) {
1831     ReferenceMapPopulator populator(data->register_allocation_data());
1832     populator.PopulateReferenceMaps();
1833   }
1834 };
1835 
1836 
1837 struct ConnectRangesPhase {
phase_namev8::internal::compiler::ConnectRangesPhase1838   static const char* phase_name() { return "connect ranges"; }
1839 
Runv8::internal::compiler::ConnectRangesPhase1840   void Run(PipelineData* data, Zone* temp_zone) {
1841     LiveRangeConnector connector(data->register_allocation_data());
1842     connector.ConnectRanges(temp_zone);
1843   }
1844 };
1845 
1846 
1847 struct ResolveControlFlowPhase {
phase_namev8::internal::compiler::ResolveControlFlowPhase1848   static const char* phase_name() { return "resolve control flow"; }
1849 
Runv8::internal::compiler::ResolveControlFlowPhase1850   void Run(PipelineData* data, Zone* temp_zone) {
1851     LiveRangeConnector connector(data->register_allocation_data());
1852     connector.ResolveControlFlow(temp_zone);
1853   }
1854 };
1855 
1856 
1857 struct OptimizeMovesPhase {
phase_namev8::internal::compiler::OptimizeMovesPhase1858   static const char* phase_name() { return "optimize moves"; }
1859 
Runv8::internal::compiler::OptimizeMovesPhase1860   void Run(PipelineData* data, Zone* temp_zone) {
1861     MoveOptimizer move_optimizer(temp_zone, data->sequence());
1862     move_optimizer.Run();
1863   }
1864 };
1865 
1866 
1867 struct FrameElisionPhase {
phase_namev8::internal::compiler::FrameElisionPhase1868   static const char* phase_name() { return "frame elision"; }
1869 
Runv8::internal::compiler::FrameElisionPhase1870   void Run(PipelineData* data, Zone* temp_zone) {
1871     FrameElider(data->sequence()).Run();
1872   }
1873 };
1874 
1875 
1876 struct JumpThreadingPhase {
phase_namev8::internal::compiler::JumpThreadingPhase1877   static const char* phase_name() { return "jump threading"; }
1878 
Runv8::internal::compiler::JumpThreadingPhase1879   void Run(PipelineData* data, Zone* temp_zone, bool frame_at_start) {
1880     ZoneVector<RpoNumber> result(temp_zone);
1881     if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence(),
1882                                          frame_at_start)) {
1883       JumpThreading::ApplyForwarding(temp_zone, result, data->sequence());
1884     }
1885   }
1886 };
1887 
1888 struct AssembleCodePhase {
phase_namev8::internal::compiler::AssembleCodePhase1889   static const char* phase_name() { return "assemble code"; }
1890 
Runv8::internal::compiler::AssembleCodePhase1891   void Run(PipelineData* data, Zone* temp_zone) {
1892     data->code_generator()->AssembleCode();
1893   }
1894 };
1895 
1896 struct FinalizeCodePhase {
phase_namev8::internal::compiler::FinalizeCodePhase1897   static const char* phase_name() { return "finalize code"; }
1898 
Runv8::internal::compiler::FinalizeCodePhase1899   void Run(PipelineData* data, Zone* temp_zone) {
1900     data->set_code(data->code_generator()->FinalizeCode());
1901   }
1902 };
1903 
1904 
1905 struct PrintGraphPhase {
phase_namev8::internal::compiler::PrintGraphPhase1906   static const char* phase_name() { return nullptr; }
1907 
Runv8::internal::compiler::PrintGraphPhase1908   void Run(PipelineData* data, Zone* temp_zone, const char* phase) {
1909     OptimizedCompilationInfo* info = data->info();
1910     Graph* graph = data->graph();
1911 
1912     if (info->trace_turbo_json_enabled()) {  // Print JSON.
1913       AllowHandleDereference allow_deref;
1914 
1915       TurboJsonFile json_of(info, std::ios_base::app);
1916       json_of << "{\"name\":\"" << phase << "\",\"type\":\"graph\",\"data\":"
1917               << AsJSON(*graph, data->source_positions(), data->node_origins())
1918               << "},\n";
1919     }
1920 
1921     if (info->trace_turbo_scheduled_enabled()) {
1922       AccountingAllocator allocator;
1923       Schedule* schedule = data->schedule();
1924       if (schedule == nullptr) {
1925         schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
1926                                               Scheduler::kNoFlags);
1927       }
1928 
1929       AllowHandleDereference allow_deref;
1930       CodeTracer::Scope tracing_scope(data->GetCodeTracer());
1931       OFStream os(tracing_scope.file());
1932       os << "-- Graph after " << phase << " -- " << std::endl;
1933       os << AsScheduledGraph(schedule);
1934     } else if (info->trace_turbo_graph_enabled()) {  // Simple textual RPO.
1935       AllowHandleDereference allow_deref;
1936       CodeTracer::Scope tracing_scope(data->GetCodeTracer());
1937       OFStream os(tracing_scope.file());
1938       os << "-- Graph after " << phase << " -- " << std::endl;
1939       os << AsRPO(*graph);
1940     }
1941   }
1942 };
1943 
1944 
1945 struct VerifyGraphPhase {
phase_namev8::internal::compiler::VerifyGraphPhase1946   static const char* phase_name() { return nullptr; }
1947 
Runv8::internal::compiler::VerifyGraphPhase1948   void Run(PipelineData* data, Zone* temp_zone, const bool untyped,
1949            bool values_only = false) {
1950     Verifier::CodeType code_type;
1951     switch (data->info()->code_kind()) {
1952       case Code::WASM_FUNCTION:
1953       case Code::WASM_TO_JS_FUNCTION:
1954       case Code::JS_TO_WASM_FUNCTION:
1955       case Code::WASM_INTERPRETER_ENTRY:
1956       case Code::C_WASM_ENTRY:
1957         code_type = Verifier::kWasm;
1958         break;
1959       default:
1960         code_type = Verifier::kDefault;
1961     }
1962     Verifier::Run(data->graph(), !untyped ? Verifier::TYPED : Verifier::UNTYPED,
1963                   values_only ? Verifier::kValuesOnly : Verifier::kAll,
1964                   code_type);
1965   }
1966 };
1967 
RunPrintAndVerify(const char * phase,bool untyped)1968 void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
1969   if (info()->trace_turbo_json_enabled() ||
1970       info()->trace_turbo_graph_enabled()) {
1971     Run<PrintGraphPhase>(phase);
1972   }
1973   if (FLAG_turbo_verify) {
1974     Run<VerifyGraphPhase>(untyped);
1975   }
1976 }
1977 
CreateGraph()1978 bool PipelineImpl::CreateGraph() {
1979   PipelineData* data = this->data_;
1980 
1981   data->BeginPhaseKind("graph creation");
1982 
1983   if (info()->trace_turbo_json_enabled() ||
1984       info()->trace_turbo_graph_enabled()) {
1985     CodeTracer::Scope tracing_scope(data->GetCodeTracer());
1986     OFStream os(tracing_scope.file());
1987     os << "---------------------------------------------------\n"
1988        << "Begin compiling method " << info()->GetDebugName().get()
1989        << " using Turbofan" << std::endl;
1990   }
1991   if (info()->trace_turbo_json_enabled()) {
1992     TurboCfgFile tcf(isolate());
1993     tcf << AsC1VCompilation(info());
1994   }
1995 
1996   data->source_positions()->AddDecorator();
1997   if (data->info()->trace_turbo_json_enabled()) {
1998     data->node_origins()->AddDecorator();
1999   }
2000 
2001   Run<GraphBuilderPhase>();
2002   RunPrintAndVerify(GraphBuilderPhase::phase_name(), true);
2003 
2004   // Perform function context specialization and inlining (if enabled).
2005   Run<InliningPhase>();
2006   RunPrintAndVerify(InliningPhase::phase_name(), true);
2007 
2008   // Remove dead->live edges from the graph.
2009   Run<EarlyGraphTrimmingPhase>();
2010   RunPrintAndVerify(EarlyGraphTrimmingPhase::phase_name(), true);
2011 
2012   // Run the type-sensitive lowerings and optimizations on the graph.
2013   {
2014     // Determine the Typer operation flags.
2015     Typer::Flags flags = Typer::kNoFlags;
2016     if (is_sloppy(info()->shared_info()->language_mode()) &&
2017         info()->shared_info()->IsUserJavaScript()) {
2018       // Sloppy mode functions always have an Object for this.
2019       flags |= Typer::kThisIsReceiver;
2020     }
2021     if (IsClassConstructor(info()->shared_info()->kind())) {
2022       // Class constructors cannot be [[Call]]ed.
2023       flags |= Typer::kNewTargetIsReceiver;
2024     }
2025 
2026     // Type the graph and keep the Typer running on newly created nodes within
2027     // this scope; the Typer is automatically unlinked from the Graph once we
2028     // leave this scope below.
2029     Typer typer(isolate(), data->js_heap_broker(), flags, data->graph());
2030     Run<TyperPhase>(&typer);
2031     RunPrintAndVerify(TyperPhase::phase_name());
2032 
2033     // Do some hacky things to prepare for the optimization phase.
2034     // (caching handles, etc.).
2035     Run<ConcurrentOptimizationPrepPhase>();
2036 
2037     if (FLAG_concurrent_compiler_frontend) {
2038       data->js_heap_broker()->SerializeStandardObjects();
2039       Run<CopyMetadataForConcurrentCompilePhase>();
2040     }
2041 
2042     // Lower JSOperators where we can determine types.
2043     Run<TypedLoweringPhase>();
2044     RunPrintAndVerify(TypedLoweringPhase::phase_name());
2045   }
2046 
2047   data->EndPhaseKind();
2048 
2049   return true;
2050 }
2051 
OptimizeGraph(Linkage * linkage)2052 bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
2053   PipelineData* data = this->data_;
2054 
2055   data->BeginPhaseKind("lowering");
2056 
2057   if (data->info()->is_loop_peeling_enabled()) {
2058     Run<LoopPeelingPhase>();
2059     RunPrintAndVerify(LoopPeelingPhase::phase_name(), true);
2060   } else {
2061     Run<LoopExitEliminationPhase>();
2062     RunPrintAndVerify(LoopExitEliminationPhase::phase_name(), true);
2063   }
2064 
2065   if (FLAG_turbo_load_elimination) {
2066     Run<LoadEliminationPhase>();
2067     RunPrintAndVerify(LoadEliminationPhase::phase_name());
2068   }
2069 
2070   if (FLAG_turbo_escape) {
2071     Run<EscapeAnalysisPhase>();
2072     if (data->compilation_failed()) {
2073       info()->AbortOptimization(
2074           BailoutReason::kCyclicObjectStateDetectedInEscapeAnalysis);
2075       data->EndPhaseKind();
2076       return false;
2077     }
2078     RunPrintAndVerify(EscapeAnalysisPhase::phase_name());
2079   }
2080 
2081   // Perform simplified lowering. This has to run w/o the Typer decorator,
2082   // because we cannot compute meaningful types anyways, and the computed types
2083   // might even conflict with the representation/truncation logic.
2084   Run<SimplifiedLoweringPhase>();
2085   RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
2086 
2087   // From now on it is invalid to look at types on the nodes, because the types
2088   // on the nodes might not make sense after representation selection due to the
2089   // way we handle truncations; if we'd want to look at types afterwards we'd
2090   // essentially need to re-type (large portions of) the graph.
2091 
2092   // In order to catch bugs related to type access after this point, we now
2093   // remove the types from the nodes (currently only in Debug builds).
2094 #ifdef DEBUG
2095   Run<UntyperPhase>();
2096   RunPrintAndVerify(UntyperPhase::phase_name(), true);
2097 #endif
2098 
2099   // Run generic lowering pass.
2100   Run<GenericLoweringPhase>();
2101   RunPrintAndVerify(GenericLoweringPhase::phase_name(), true);
2102 
2103   data->BeginPhaseKind("block building");
2104 
2105   // Run early optimization pass.
2106   Run<EarlyOptimizationPhase>();
2107   RunPrintAndVerify(EarlyOptimizationPhase::phase_name(), true);
2108 
2109   Run<EffectControlLinearizationPhase>();
2110   RunPrintAndVerify(EffectControlLinearizationPhase::phase_name(), true);
2111 
2112   if (FLAG_turbo_store_elimination) {
2113     Run<StoreStoreEliminationPhase>();
2114     RunPrintAndVerify(StoreStoreEliminationPhase::phase_name(), true);
2115   }
2116 
2117   // Optimize control flow.
2118   if (FLAG_turbo_cf_optimization) {
2119     Run<ControlFlowOptimizationPhase>();
2120     RunPrintAndVerify(ControlFlowOptimizationPhase::phase_name(), true);
2121   }
2122 
2123   // Optimize memory access and allocation operations.
2124   Run<MemoryOptimizationPhase>();
2125   // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
2126   RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
2127 
2128   // Lower changes that have been inserted before.
2129   Run<LateOptimizationPhase>();
2130   // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
2131   RunPrintAndVerify(LateOptimizationPhase::phase_name(), true);
2132 
2133   data->source_positions()->RemoveDecorator();
2134   if (data->info()->trace_turbo_json_enabled()) {
2135     data->node_origins()->RemoveDecorator();
2136   }
2137 
2138   ComputeScheduledGraph();
2139 
2140   return SelectInstructions(linkage);
2141 }
2142 
GenerateCodeForCodeStub(Isolate * isolate,CallDescriptor * call_descriptor,Graph * graph,Schedule * schedule,Code::Kind kind,const char * debug_name,uint32_t stub_key,int32_t builtin_index,JumpOptimizationInfo * jump_opt,PoisoningMitigationLevel poisoning_level,const AssemblerOptions & options)2143 MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
2144     Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
2145     Schedule* schedule, Code::Kind kind, const char* debug_name,
2146     uint32_t stub_key, int32_t builtin_index, JumpOptimizationInfo* jump_opt,
2147     PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options) {
2148   OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
2149   info.set_builtin_index(builtin_index);
2150   info.set_stub_key(stub_key);
2151 
2152   if (poisoning_level != PoisoningMitigationLevel::kDontPoison) {
2153     info.SetPoisoningMitigationLevel(poisoning_level);
2154   }
2155 
2156   // Construct a pipeline for scheduling and code generation.
2157   ZoneStats zone_stats(isolate->allocator());
2158   SourcePositionTable source_positions(graph);
2159   NodeOriginTable node_origins(graph);
2160   PipelineData data(&zone_stats, &info, isolate, graph, schedule,
2161                     &source_positions, &node_origins, jump_opt, options);
2162   data.set_verify_graph(FLAG_verify_csa);
2163   std::unique_ptr<PipelineStatistics> pipeline_statistics;
2164   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
2165     pipeline_statistics.reset(new PipelineStatistics(
2166         &info, isolate->GetTurboStatistics(), &zone_stats));
2167     pipeline_statistics->BeginPhaseKind("stub codegen");
2168   }
2169 
2170   PipelineImpl pipeline(&data);
2171   DCHECK_NOT_NULL(data.schedule());
2172 
2173   if (info.trace_turbo_json_enabled() || info.trace_turbo_graph_enabled()) {
2174     CodeTracer::Scope tracing_scope(data.GetCodeTracer());
2175     OFStream os(tracing_scope.file());
2176     os << "---------------------------------------------------\n"
2177        << "Begin compiling " << debug_name << " using Turbofan" << std::endl;
2178     if (info.trace_turbo_json_enabled()) {
2179       TurboJsonFile json_of(&info, std::ios_base::trunc);
2180       json_of << "{\"function\" : ";
2181       JsonPrintFunctionSource(json_of, -1, info.GetDebugName(),
2182                               Handle<Script>(), isolate,
2183                               Handle<SharedFunctionInfo>());
2184       json_of << ",\n\"phases\":[";
2185     }
2186     pipeline.Run<PrintGraphPhase>("Machine");
2187   }
2188 
2189   TraceSchedule(data.info(), &data, data.schedule(), "schedule");
2190 
2191   pipeline.Run<VerifyGraphPhase>(false, true);
2192   return pipeline.GenerateCode(call_descriptor);
2193 }
2194 
2195 // static
GenerateCodeForTesting(OptimizedCompilationInfo * info,Isolate * isolate)2196 MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
2197     OptimizedCompilationInfo* info, Isolate* isolate) {
2198   ZoneStats zone_stats(isolate->allocator());
2199   std::unique_ptr<PipelineStatistics> pipeline_statistics(
2200       CreatePipelineStatistics(Handle<Script>::null(), info, isolate,
2201                                &zone_stats));
2202   PipelineData data(&zone_stats, isolate, info, pipeline_statistics.get());
2203   PipelineImpl pipeline(&data);
2204 
2205   Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
2206   Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(isolate);
2207 
2208   if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
2209   if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
2210   pipeline.AssembleCode(&linkage);
2211   Handle<Code> code;
2212   if (pipeline.FinalizeCode().ToHandle(&code) &&
2213       pipeline.CommitDependencies(code)) {
2214     return code;
2215   }
2216   return MaybeHandle<Code>();
2217 }
2218 
2219 // static
GenerateCodeForTesting(OptimizedCompilationInfo * info,Isolate * isolate,CallDescriptor * call_descriptor,Graph * graph,const AssemblerOptions & options,Schedule * schedule,SourcePositionTable * source_positions)2220 MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
2221     OptimizedCompilationInfo* info, Isolate* isolate,
2222     CallDescriptor* call_descriptor, Graph* graph,
2223     const AssemblerOptions& options, Schedule* schedule,
2224     SourcePositionTable* source_positions) {
2225   // Construct a pipeline for scheduling and code generation.
2226   ZoneStats zone_stats(isolate->allocator());
2227   // TODO(wasm): Refactor code generation to check for non-existing source
2228   // table, then remove this conditional allocation.
2229   if (!source_positions)
2230     source_positions = new (info->zone()) SourcePositionTable(graph);
2231   NodeOriginTable* node_positions = new (info->zone()) NodeOriginTable(graph);
2232   PipelineData data(&zone_stats, info, isolate, graph, schedule,
2233                     source_positions, node_positions, nullptr, options);
2234   std::unique_ptr<PipelineStatistics> pipeline_statistics;
2235   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
2236     pipeline_statistics.reset(new PipelineStatistics(
2237         info, isolate->GetTurboStatistics(), &zone_stats));
2238     pipeline_statistics->BeginPhaseKind("test codegen");
2239   }
2240 
2241   PipelineImpl pipeline(&data);
2242 
2243   if (info->trace_turbo_json_enabled()) {
2244     TurboJsonFile json_of(info, std::ios_base::trunc);
2245     json_of << "{\"function\":\"" << info->GetDebugName().get()
2246             << "\", \"source\":\"\",\n\"phases\":[";
2247   }
2248   // TODO(rossberg): Should this really be untyped?
2249   pipeline.RunPrintAndVerify("machine", true);
2250 
2251   // Ensure we have a schedule.
2252   if (data.schedule() == nullptr) {
2253     pipeline.ComputeScheduledGraph();
2254   }
2255 
2256   Handle<Code> code;
2257   if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) &&
2258       pipeline.CommitDependencies(code)) {
2259     return code;
2260   }
2261   return MaybeHandle<Code>();
2262 }
2263 
2264 // static
NewCompilationJob(Isolate * isolate,Handle<JSFunction> function,bool has_script)2265 OptimizedCompilationJob* Pipeline::NewCompilationJob(
2266     Isolate* isolate, Handle<JSFunction> function, bool has_script) {
2267   Handle<SharedFunctionInfo> shared =
2268       handle(function->shared(), function->GetIsolate());
2269   return new PipelineCompilationJob(isolate, shared, function);
2270 }
2271 
2272 // static
NewWasmCompilationJob(OptimizedCompilationInfo * info,wasm::WasmEngine * wasm_engine,MachineGraph * mcgraph,CallDescriptor * call_descriptor,SourcePositionTable * source_positions,NodeOriginTable * node_origins,wasm::FunctionBody function_body,wasm::WasmModule * wasm_module,wasm::NativeModule * native_module,int function_index,wasm::ModuleOrigin asmjs_origin)2273 OptimizedCompilationJob* Pipeline::NewWasmCompilationJob(
2274     OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
2275     MachineGraph* mcgraph, CallDescriptor* call_descriptor,
2276     SourcePositionTable* source_positions, NodeOriginTable* node_origins,
2277     wasm::FunctionBody function_body, wasm::WasmModule* wasm_module,
2278     wasm::NativeModule* native_module, int function_index,
2279     wasm::ModuleOrigin asmjs_origin) {
2280   return new PipelineWasmCompilationJob(
2281       info, wasm_engine, mcgraph, call_descriptor, source_positions,
2282       node_origins, function_body, wasm_module, native_module, function_index,
2283       asmjs_origin);
2284 }
2285 
AllocateRegistersForTesting(const RegisterConfiguration * config,InstructionSequence * sequence,bool run_verifier)2286 bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
2287                                            InstructionSequence* sequence,
2288                                            bool run_verifier) {
2289   OptimizedCompilationInfo info(ArrayVector("testing"), sequence->zone(),
2290                                 Code::STUB);
2291   ZoneStats zone_stats(sequence->isolate()->allocator());
2292   PipelineData data(&zone_stats, &info, sequence->isolate(), sequence);
2293   data.InitializeFrameData(nullptr);
2294   PipelineImpl pipeline(&data);
2295   pipeline.AllocateRegisters(config, nullptr, run_verifier);
2296   return !data.compilation_failed();
2297 }
2298 
ComputeScheduledGraph()2299 void PipelineImpl::ComputeScheduledGraph() {
2300   PipelineData* data = this->data_;
2301 
2302   // We should only schedule the graph if it is not scheduled yet.
2303   DCHECK_NULL(data->schedule());
2304 
2305   Run<LateGraphTrimmingPhase>();
2306   RunPrintAndVerify(LateGraphTrimmingPhase::phase_name(), true);
2307 
2308   Run<ComputeSchedulePhase>();
2309   TraceSchedule(data->info(), data, data->schedule(), "schedule");
2310 }
2311 
SelectInstructions(Linkage * linkage)2312 bool PipelineImpl::SelectInstructions(Linkage* linkage) {
2313   auto call_descriptor = linkage->GetIncomingDescriptor();
2314   PipelineData* data = this->data_;
2315 
2316   // We should have a scheduled graph.
2317   DCHECK_NOT_NULL(data->graph());
2318   DCHECK_NOT_NULL(data->schedule());
2319 
2320   if (FLAG_turbo_profiling) {
2321     data->set_profiler_data(BasicBlockInstrumentor::Instrument(
2322         info(), data->graph(), data->schedule(), data->isolate()));
2323   }
2324 
2325   bool verify_stub_graph = data->verify_graph();
2326   // Jump optimization runs instruction selection twice, but the instruction
2327   // selector mutates nodes like swapping the inputs of a load, which can
2328   // violate the machine graph verification rules. So we skip the second
2329   // verification on a graph that already verified before.
2330   auto jump_opt = data->jump_optimization_info();
2331   if (jump_opt && jump_opt->is_optimizing()) {
2332     verify_stub_graph = false;
2333   }
2334   if (verify_stub_graph ||
2335       (FLAG_turbo_verify_machine_graph != nullptr &&
2336        (!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
2337         !strcmp(FLAG_turbo_verify_machine_graph, data->debug_name())))) {
2338     if (FLAG_trace_verify_csa) {
2339       AllowHandleDereference allow_deref;
2340       CodeTracer::Scope tracing_scope(data->GetCodeTracer());
2341       OFStream os(tracing_scope.file());
2342       os << "--------------------------------------------------\n"
2343          << "--- Verifying " << data->debug_name() << " generated by TurboFan\n"
2344          << "--------------------------------------------------\n"
2345          << *data->schedule()
2346          << "--------------------------------------------------\n"
2347          << "--- End of " << data->debug_name() << " generated by TurboFan\n"
2348          << "--------------------------------------------------\n";
2349     }
2350     Zone temp_zone(data->allocator(), ZONE_NAME);
2351     MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage,
2352                               data->info()->IsStub(), data->debug_name(),
2353                               &temp_zone);
2354   }
2355 
2356   data->InitializeInstructionSequence(call_descriptor);
2357 
2358   data->InitializeFrameData(call_descriptor);
2359   // Select and schedule instructions covering the scheduled graph.
2360   Run<InstructionSelectionPhase>(linkage);
2361   if (data->compilation_failed()) {
2362     info()->AbortOptimization(BailoutReason::kCodeGenerationFailed);
2363     data->EndPhaseKind();
2364     return false;
2365   }
2366 
2367   if (info()->trace_turbo_json_enabled() && !data->MayHaveUnverifiableGraph()) {
2368     AllowHandleDereference allow_deref;
2369     TurboCfgFile tcf(isolate());
2370     tcf << AsC1V("CodeGen", data->schedule(), data->source_positions(),
2371                  data->sequence());
2372   }
2373 
2374   if (info()->trace_turbo_json_enabled()) {
2375     std::ostringstream source_position_output;
2376     // Output source position information before the graph is deleted.
2377     data_->source_positions()->PrintJson(source_position_output);
2378     source_position_output << ",\n\"NodeOrigins\" : ";
2379     data_->node_origins()->PrintJson(source_position_output);
2380     data_->set_source_position_output(source_position_output.str());
2381   }
2382 
2383   data->DeleteGraphZone();
2384 
2385   data->BeginPhaseKind("register allocation");
2386 
2387   bool run_verifier = FLAG_turbo_verify_allocation;
2388 
2389   // Allocate registers.
2390   if (call_descriptor->HasRestrictedAllocatableRegisters()) {
2391     RegList registers = call_descriptor->AllocatableRegisters();
2392     DCHECK_LT(0, NumRegs(registers));
2393     std::unique_ptr<const RegisterConfiguration> config;
2394     config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers));
2395     AllocateRegisters(config.get(), call_descriptor, run_verifier);
2396   } else if (data->info()->GetPoisoningMitigationLevel() !=
2397              PoisoningMitigationLevel::kDontPoison) {
2398     AllocateRegisters(RegisterConfiguration::Poisoning(), call_descriptor,
2399                       run_verifier);
2400 #if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
2401   } else if (data_->assembler_options().isolate_independent_code) {
2402     // TODO(v8:6666): Extend support to user code. Ensure that
2403     // it is mutually exclusive with the Poisoning configuration above; and that
2404     // it cooperates with restricted allocatable registers above.
2405     static_assert(kRootRegister == kSpeculationPoisonRegister,
2406                   "The following checks assume root equals poison register");
2407     CHECK_IMPLIES(FLAG_embedded_builtins, !FLAG_branch_load_poisoning);
2408     CHECK_IMPLIES(FLAG_embedded_builtins, !FLAG_untrusted_code_mitigations);
2409     AllocateRegisters(RegisterConfiguration::PreserveRootIA32(),
2410                       call_descriptor, run_verifier);
2411 #endif  // V8_TARGET_ARCH_IA32
2412   } else {
2413     AllocateRegisters(RegisterConfiguration::Default(), call_descriptor,
2414                       run_verifier);
2415   }
2416 
2417   // Verify the instruction sequence has the same hash in two stages.
2418   VerifyGeneratedCodeIsIdempotent();
2419 
2420   Run<FrameElisionPhase>();
2421   if (data->compilation_failed()) {
2422     info()->AbortOptimization(
2423         BailoutReason::kNotEnoughVirtualRegistersRegalloc);
2424     data->EndPhaseKind();
2425     return false;
2426   }
2427 
2428   // TODO(mtrofin): move this off to the register allocator.
2429   bool generate_frame_at_start =
2430       data_->sequence()->instruction_blocks().front()->must_construct_frame();
2431   // Optimimize jumps.
2432   if (FLAG_turbo_jt) {
2433     Run<JumpThreadingPhase>(generate_frame_at_start);
2434   }
2435 
2436   data->EndPhaseKind();
2437 
2438   return true;
2439 }
2440 
VerifyGeneratedCodeIsIdempotent()2441 void PipelineImpl::VerifyGeneratedCodeIsIdempotent() {
2442   PipelineData* data = this->data_;
2443   JumpOptimizationInfo* jump_opt = data->jump_optimization_info();
2444   if (jump_opt == nullptr) return;
2445 
2446   InstructionSequence* code = data->sequence();
2447   int instruction_blocks = code->InstructionBlockCount();
2448   int virtual_registers = code->VirtualRegisterCount();
2449   size_t hash_code = base::hash_combine(instruction_blocks, virtual_registers);
2450   for (auto instr : *code) {
2451     hash_code = base::hash_combine(hash_code, instr->opcode(),
2452                                    instr->InputCount(), instr->OutputCount());
2453   }
2454   for (int i = 0; i < virtual_registers; i++) {
2455     hash_code = base::hash_combine(hash_code, code->GetRepresentation(i));
2456   }
2457   if (jump_opt->is_collecting()) {
2458     jump_opt->set_hash_code(hash_code);
2459   } else {
2460     CHECK_EQ(hash_code, jump_opt->hash_code());
2461   }
2462 }
2463 
2464 struct InstructionStartsAsJSON {
2465   const ZoneVector<int>* instr_starts;
2466 };
2467 
operator <<(std::ostream & out,const InstructionStartsAsJSON & s)2468 std::ostream& operator<<(std::ostream& out, const InstructionStartsAsJSON& s) {
2469   out << ", \"instructionOffsetToPCOffset\": {";
2470   bool need_comma = false;
2471   for (size_t i = 0; i < s.instr_starts->size(); ++i) {
2472     if (need_comma) out << ", ";
2473     int offset = (*s.instr_starts)[i];
2474     out << "\"" << i << "\":" << offset;
2475     need_comma = true;
2476   }
2477   out << "}";
2478   return out;
2479 }
2480 
AssembleCode(Linkage * linkage)2481 void PipelineImpl::AssembleCode(Linkage* linkage) {
2482   PipelineData* data = this->data_;
2483   data->BeginPhaseKind("code generation");
2484   data->InitializeCodeGenerator(linkage);
2485   Run<AssembleCodePhase>();
2486   if (data->info()->trace_turbo_json_enabled()) {
2487     TurboJsonFile json_of(data->info(), std::ios_base::app);
2488     json_of << "{\"name\":\"code generation\""
2489             << ", \"type\":\"instructions\""
2490             << InstructionStartsAsJSON{&data->code_generator()->instr_starts()};
2491     json_of << "},\n";
2492   }
2493   data->DeleteInstructionZone();
2494 }
2495 
2496 struct BlockStartsAsJSON {
2497   const ZoneVector<int>* block_starts;
2498 };
2499 
operator <<(std::ostream & out,const BlockStartsAsJSON & s)2500 std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) {
2501   out << ", \"blockIdToOffset\": {";
2502   bool need_comma = false;
2503   for (size_t i = 0; i < s.block_starts->size(); ++i) {
2504     if (need_comma) out << ", ";
2505     int offset = (*s.block_starts)[i];
2506     out << "\"" << i << "\":" << offset;
2507     need_comma = true;
2508   }
2509   out << "},";
2510   return out;
2511 }
2512 
FinalizeCode()2513 MaybeHandle<Code> PipelineImpl::FinalizeCode() {
2514   PipelineData* data = this->data_;
2515   Run<FinalizeCodePhase>();
2516 
2517   MaybeHandle<Code> maybe_code = data->code();
2518   Handle<Code> code;
2519   if (!maybe_code.ToHandle(&code)) {
2520     return maybe_code;
2521   }
2522 
2523   if (data->profiler_data()) {
2524 #ifdef ENABLE_DISASSEMBLER
2525     std::ostringstream os;
2526     code->Disassemble(nullptr, os);
2527     data->profiler_data()->SetCode(&os);
2528 #endif  // ENABLE_DISASSEMBLER
2529   }
2530 
2531   info()->SetCode(code);
2532   PrintCode(isolate(), code, info());
2533 
2534   if (info()->trace_turbo_json_enabled()) {
2535     TurboJsonFile json_of(info(), std::ios_base::app);
2536 
2537     json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
2538             << BlockStartsAsJSON{&data->code_generator()->block_starts()}
2539             << "\"data\":\"";
2540 #ifdef ENABLE_DISASSEMBLER
2541     std::stringstream disassembly_stream;
2542     code->Disassemble(nullptr, disassembly_stream);
2543     std::string disassembly_string(disassembly_stream.str());
2544     for (const auto& c : disassembly_string) {
2545       json_of << AsEscapedUC16ForJSON(c);
2546     }
2547 #endif  // ENABLE_DISASSEMBLER
2548     json_of << "\"}\n],\n";
2549     json_of << "\"nodePositions\":";
2550     json_of << data->source_position_output() << ",\n";
2551     JsonPrintAllSourceWithPositions(json_of, data->info(), isolate());
2552     json_of << "\n}";
2553   }
2554   if (info()->trace_turbo_json_enabled() ||
2555       info()->trace_turbo_graph_enabled()) {
2556     CodeTracer::Scope tracing_scope(data->GetCodeTracer());
2557     OFStream os(tracing_scope.file());
2558     os << "---------------------------------------------------\n"
2559        << "Finished compiling method " << info()->GetDebugName().get()
2560        << " using Turbofan" << std::endl;
2561   }
2562   return code;
2563 }
2564 
GenerateCode(CallDescriptor * call_descriptor)2565 MaybeHandle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
2566   Linkage linkage(call_descriptor);
2567 
2568   // Perform instruction selection and register allocation.
2569   if (!SelectInstructions(&linkage)) return MaybeHandle<Code>();
2570 
2571   // Generate the final machine code.
2572   AssembleCode(&linkage);
2573   return FinalizeCode();
2574 }
2575 
CommitDependencies(Handle<Code> code)2576 bool PipelineImpl::CommitDependencies(Handle<Code> code) {
2577   return data_->dependencies() == nullptr ||
2578          data_->dependencies()->Commit(code);
2579 }
2580 
AllocateRegisters(const RegisterConfiguration * config,CallDescriptor * call_descriptor,bool run_verifier)2581 void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
2582                                      CallDescriptor* call_descriptor,
2583                                      bool run_verifier) {
2584   PipelineData* data = this->data_;
2585   // Don't track usage for this zone in compiler stats.
2586   std::unique_ptr<Zone> verifier_zone;
2587   RegisterAllocatorVerifier* verifier = nullptr;
2588   if (run_verifier) {
2589     verifier_zone.reset(new Zone(data->allocator(), ZONE_NAME));
2590     verifier = new (verifier_zone.get()) RegisterAllocatorVerifier(
2591         verifier_zone.get(), config, data->sequence());
2592   }
2593 
2594 #ifdef DEBUG
2595   data_->sequence()->ValidateEdgeSplitForm();
2596   data_->sequence()->ValidateDeferredBlockEntryPaths();
2597   data_->sequence()->ValidateDeferredBlockExitPaths();
2598 #endif
2599 
2600   data->InitializeRegisterAllocationData(config, call_descriptor);
2601   if (info()->is_osr()) data->osr_helper()->SetupFrame(data->frame());
2602 
2603   Run<MeetRegisterConstraintsPhase>();
2604   Run<ResolvePhisPhase>();
2605   Run<BuildLiveRangesPhase>();
2606   if (info()->trace_turbo_graph_enabled()) {
2607     AllowHandleDereference allow_deref;
2608     CodeTracer::Scope tracing_scope(data->GetCodeTracer());
2609     OFStream os(tracing_scope.file());
2610     os << "----- Instruction sequence before register allocation -----\n"
2611        << PrintableInstructionSequence({config, data->sequence()});
2612   }
2613   if (verifier != nullptr) {
2614     CHECK(!data->register_allocation_data()->ExistsUseWithoutDefinition());
2615     CHECK(data->register_allocation_data()
2616               ->RangesDefinedInDeferredStayInDeferred());
2617   }
2618 
2619   if (FLAG_turbo_preprocess_ranges) {
2620     Run<SplinterLiveRangesPhase>();
2621   }
2622 
2623   Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
2624   Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
2625 
2626   if (FLAG_turbo_preprocess_ranges) {
2627     Run<MergeSplintersPhase>();
2628   }
2629 
2630   Run<AssignSpillSlotsPhase>();
2631 
2632   Run<CommitAssignmentPhase>();
2633 
2634   // TODO(chromium:725559): remove this check once
2635   // we understand the cause of the bug. We keep just the
2636   // check at the end of the allocation.
2637   if (verifier != nullptr) {
2638     verifier->VerifyAssignment("Immediately after CommitAssignmentPhase.");
2639   }
2640 
2641   Run<PopulateReferenceMapsPhase>();
2642   Run<ConnectRangesPhase>();
2643   Run<ResolveControlFlowPhase>();
2644   if (FLAG_turbo_move_optimization) {
2645     Run<OptimizeMovesPhase>();
2646   }
2647 
2648   Run<LocateSpillSlotsPhase>();
2649 
2650   if (info()->trace_turbo_graph_enabled()) {
2651     AllowHandleDereference allow_deref;
2652     CodeTracer::Scope tracing_scope(data->GetCodeTracer());
2653     OFStream os(tracing_scope.file());
2654     os << "----- Instruction sequence after register allocation -----\n"
2655        << PrintableInstructionSequence({config, data->sequence()});
2656   }
2657 
2658   if (verifier != nullptr) {
2659     verifier->VerifyAssignment("End of regalloc pipeline.");
2660     verifier->VerifyGapMoves();
2661   }
2662 
2663   if (info()->trace_turbo_json_enabled() && !data->MayHaveUnverifiableGraph()) {
2664     TurboCfgFile tcf(isolate());
2665     tcf << AsC1VRegisterAllocationData("CodeGen",
2666                                        data->register_allocation_data());
2667   }
2668 
2669   data->DeleteRegisterAllocationZone();
2670 }
2671 
info() const2672 OptimizedCompilationInfo* PipelineImpl::info() const { return data_->info(); }
2673 
isolate() const2674 Isolate* PipelineImpl::isolate() const { return data_->isolate(); }
2675 
code_generator() const2676 CodeGenerator* PipelineImpl::code_generator() const {
2677   return data_->code_generator();
2678 }
2679 
2680 }  // namespace compiler
2681 }  // namespace internal
2682 }  // namespace v8
2683