• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/pipeline.h"
6 
7 #include <fstream>  // NOLINT(readability/streams)
8 #include <memory>
9 #include <sstream>
10 
11 #include "src/base/adapters.h"
12 #include "src/base/platform/elapsed-timer.h"
13 #include "src/compilation-info.h"
14 #include "src/compiler.h"
15 #include "src/compiler/ast-graph-builder.h"
16 #include "src/compiler/ast-loop-assignment-analyzer.h"
17 #include "src/compiler/basic-block-instrumentor.h"
18 #include "src/compiler/branch-elimination.h"
19 #include "src/compiler/bytecode-graph-builder.h"
20 #include "src/compiler/checkpoint-elimination.h"
21 #include "src/compiler/code-generator.h"
22 #include "src/compiler/common-operator-reducer.h"
23 #include "src/compiler/control-flow-optimizer.h"
24 #include "src/compiler/dead-code-elimination.h"
25 #include "src/compiler/effect-control-linearizer.h"
26 #include "src/compiler/escape-analysis-reducer.h"
27 #include "src/compiler/escape-analysis.h"
28 #include "src/compiler/frame-elider.h"
29 #include "src/compiler/graph-replay.h"
30 #include "src/compiler/graph-trimmer.h"
31 #include "src/compiler/graph-visualizer.h"
32 #include "src/compiler/instruction-selector.h"
33 #include "src/compiler/instruction.h"
34 #include "src/compiler/js-builtin-reducer.h"
35 #include "src/compiler/js-call-reducer.h"
36 #include "src/compiler/js-context-specialization.h"
37 #include "src/compiler/js-create-lowering.h"
38 #include "src/compiler/js-frame-specialization.h"
39 #include "src/compiler/js-generic-lowering.h"
40 #include "src/compiler/js-inlining-heuristic.h"
41 #include "src/compiler/js-intrinsic-lowering.h"
42 #include "src/compiler/js-native-context-specialization.h"
43 #include "src/compiler/js-typed-lowering.h"
44 #include "src/compiler/jump-threading.h"
45 #include "src/compiler/live-range-separator.h"
46 #include "src/compiler/load-elimination.h"
47 #include "src/compiler/loop-analysis.h"
48 #include "src/compiler/loop-peeling.h"
49 #include "src/compiler/loop-variable-optimizer.h"
50 #include "src/compiler/machine-graph-verifier.h"
51 #include "src/compiler/machine-operator-reducer.h"
52 #include "src/compiler/memory-optimizer.h"
53 #include "src/compiler/move-optimizer.h"
54 #include "src/compiler/osr.h"
55 #include "src/compiler/pipeline-statistics.h"
56 #include "src/compiler/redundancy-elimination.h"
57 #include "src/compiler/register-allocator-verifier.h"
58 #include "src/compiler/register-allocator.h"
59 #include "src/compiler/schedule.h"
60 #include "src/compiler/scheduler.h"
61 #include "src/compiler/select-lowering.h"
62 #include "src/compiler/simplified-lowering.h"
63 #include "src/compiler/simplified-operator-reducer.h"
64 #include "src/compiler/simplified-operator.h"
65 #include "src/compiler/store-store-elimination.h"
66 #include "src/compiler/tail-call-optimization.h"
67 #include "src/compiler/typed-optimization.h"
68 #include "src/compiler/typer.h"
69 #include "src/compiler/value-numbering-reducer.h"
70 #include "src/compiler/verifier.h"
71 #include "src/compiler/zone-stats.h"
72 #include "src/isolate-inl.h"
73 #include "src/ostreams.h"
74 #include "src/parsing/parse-info.h"
75 #include "src/register-configuration.h"
76 #include "src/trap-handler/trap-handler.h"
77 #include "src/type-info.h"
78 #include "src/utils.h"
79 
80 namespace v8 {
81 namespace internal {
82 namespace compiler {
83 
84 class PipelineData {
85  public:
86   // For main entry point.
PipelineData(ZoneStats * zone_stats,CompilationInfo * info,PipelineStatistics * pipeline_statistics)87   PipelineData(ZoneStats* zone_stats, CompilationInfo* info,
88                PipelineStatistics* pipeline_statistics)
89       : isolate_(info->isolate()),
90         info_(info),
91         debug_name_(info_->GetDebugName()),
92         outer_zone_(info_->zone()),
93         zone_stats_(zone_stats),
94         pipeline_statistics_(pipeline_statistics),
95         graph_zone_scope_(zone_stats_, ZONE_NAME),
96         graph_zone_(graph_zone_scope_.zone()),
97         instruction_zone_scope_(zone_stats_, ZONE_NAME),
98         instruction_zone_(instruction_zone_scope_.zone()),
99         register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
100         register_allocation_zone_(register_allocation_zone_scope_.zone()) {
101     PhaseScope scope(pipeline_statistics, "init pipeline data");
102     graph_ = new (graph_zone_) Graph(graph_zone_);
103     source_positions_ = new (graph_zone_) SourcePositionTable(graph_);
104     simplified_ = new (graph_zone_) SimplifiedOperatorBuilder(graph_zone_);
105     machine_ = new (graph_zone_) MachineOperatorBuilder(
106         graph_zone_, MachineType::PointerRepresentation(),
107         InstructionSelector::SupportedMachineOperatorFlags(),
108         InstructionSelector::AlignmentRequirements());
109     common_ = new (graph_zone_) CommonOperatorBuilder(graph_zone_);
110     javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
111     jsgraph_ = new (graph_zone_)
112         JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
113     is_asm_ = info->shared_info()->asm_function();
114   }
115 
116   // For WASM compile entry point.
PipelineData(ZoneStats * zone_stats,CompilationInfo * info,JSGraph * jsgraph,SourcePositionTable * source_positions,ZoneVector<trap_handler::ProtectedInstructionData> * protected_instructions)117   PipelineData(ZoneStats* zone_stats, CompilationInfo* info, JSGraph* jsgraph,
118                SourcePositionTable* source_positions,
119                ZoneVector<trap_handler::ProtectedInstructionData>*
120                    protected_instructions)
121       : isolate_(info->isolate()),
122         info_(info),
123         debug_name_(info_->GetDebugName()),
124         zone_stats_(zone_stats),
125         graph_zone_scope_(zone_stats_, ZONE_NAME),
126         graph_(jsgraph->graph()),
127         source_positions_(source_positions),
128         machine_(jsgraph->machine()),
129         common_(jsgraph->common()),
130         javascript_(jsgraph->javascript()),
131         jsgraph_(jsgraph),
132         instruction_zone_scope_(zone_stats_, ZONE_NAME),
133         instruction_zone_(instruction_zone_scope_.zone()),
134         register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
135         register_allocation_zone_(register_allocation_zone_scope_.zone()),
136         protected_instructions_(protected_instructions) {
137     is_asm_ =
138         info->has_shared_info() ? info->shared_info()->asm_function() : false;
139   }
140 
141   // For machine graph testing entry point.
PipelineData(ZoneStats * zone_stats,CompilationInfo * info,Graph * graph,Schedule * schedule,SourcePositionTable * source_positions)142   PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Graph* graph,
143                Schedule* schedule, SourcePositionTable* source_positions)
144       : isolate_(info->isolate()),
145         info_(info),
146         debug_name_(info_->GetDebugName()),
147         zone_stats_(zone_stats),
148         graph_zone_scope_(zone_stats_, ZONE_NAME),
149         graph_(graph),
150         source_positions_(source_positions),
151         schedule_(schedule),
152         instruction_zone_scope_(zone_stats_, ZONE_NAME),
153         instruction_zone_(instruction_zone_scope_.zone()),
154         register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
155         register_allocation_zone_(register_allocation_zone_scope_.zone()) {
156     is_asm_ = false;
157   }
158   // For register allocation testing entry point.
PipelineData(ZoneStats * zone_stats,CompilationInfo * info,InstructionSequence * sequence)159   PipelineData(ZoneStats* zone_stats, CompilationInfo* info,
160                InstructionSequence* sequence)
161       : isolate_(info->isolate()),
162         info_(info),
163         debug_name_(info_->GetDebugName()),
164         zone_stats_(zone_stats),
165         graph_zone_scope_(zone_stats_, ZONE_NAME),
166         instruction_zone_scope_(zone_stats_, ZONE_NAME),
167         instruction_zone_(sequence->zone()),
168         sequence_(sequence),
169         register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
170         register_allocation_zone_(register_allocation_zone_scope_.zone()) {
171     is_asm_ =
172         info->has_shared_info() ? info->shared_info()->asm_function() : false;
173   }
174 
~PipelineData()175   ~PipelineData() {
176     DeleteRegisterAllocationZone();
177     DeleteInstructionZone();
178     DeleteGraphZone();
179   }
180 
isolate() const181   Isolate* isolate() const { return isolate_; }
info() const182   CompilationInfo* info() const { return info_; }
zone_stats() const183   ZoneStats* zone_stats() const { return zone_stats_; }
pipeline_statistics()184   PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
compilation_failed() const185   bool compilation_failed() const { return compilation_failed_; }
set_compilation_failed()186   void set_compilation_failed() { compilation_failed_ = true; }
187 
is_asm() const188   bool is_asm() const { return is_asm_; }
verify_graph() const189   bool verify_graph() const { return verify_graph_; }
set_verify_graph(bool value)190   void set_verify_graph(bool value) { verify_graph_ = value; }
191 
code()192   Handle<Code> code() { return code_; }
set_code(Handle<Code> code)193   void set_code(Handle<Code> code) {
194     DCHECK(code_.is_null());
195     code_ = code;
196   }
197 
198   // RawMachineAssembler generally produces graphs which cannot be verified.
MayHaveUnverifiableGraph() const199   bool MayHaveUnverifiableGraph() const { return outer_zone_ == nullptr; }
200 
graph_zone() const201   Zone* graph_zone() const { return graph_zone_; }
graph() const202   Graph* graph() const { return graph_; }
source_positions() const203   SourcePositionTable* source_positions() const { return source_positions_; }
machine() const204   MachineOperatorBuilder* machine() const { return machine_; }
common() const205   CommonOperatorBuilder* common() const { return common_; }
javascript() const206   JSOperatorBuilder* javascript() const { return javascript_; }
jsgraph() const207   JSGraph* jsgraph() const { return jsgraph_; }
native_context() const208   Handle<Context> native_context() const {
209     return handle(info()->native_context(), isolate());
210   }
global_object() const211   Handle<JSGlobalObject> global_object() const {
212     return handle(info()->global_object(), isolate());
213   }
214 
loop_assignment() const215   LoopAssignmentAnalysis* loop_assignment() const { return loop_assignment_; }
set_loop_assignment(LoopAssignmentAnalysis * loop_assignment)216   void set_loop_assignment(LoopAssignmentAnalysis* loop_assignment) {
217     DCHECK(!loop_assignment_);
218     loop_assignment_ = loop_assignment;
219   }
220 
schedule() const221   Schedule* schedule() const { return schedule_; }
set_schedule(Schedule * schedule)222   void set_schedule(Schedule* schedule) {
223     DCHECK(!schedule_);
224     schedule_ = schedule;
225   }
reset_schedule()226   void reset_schedule() { schedule_ = nullptr; }
227 
instruction_zone() const228   Zone* instruction_zone() const { return instruction_zone_; }
sequence() const229   InstructionSequence* sequence() const { return sequence_; }
frame() const230   Frame* frame() const { return frame_; }
231 
register_allocation_zone() const232   Zone* register_allocation_zone() const { return register_allocation_zone_; }
register_allocation_data() const233   RegisterAllocationData* register_allocation_data() const {
234     return register_allocation_data_;
235   }
236 
profiler_data() const237   BasicBlockProfiler::Data* profiler_data() const { return profiler_data_; }
set_profiler_data(BasicBlockProfiler::Data * profiler_data)238   void set_profiler_data(BasicBlockProfiler::Data* profiler_data) {
239     profiler_data_ = profiler_data;
240   }
241 
source_position_output() const242   std::string const& source_position_output() const {
243     return source_position_output_;
244   }
set_source_position_output(std::string const & source_position_output)245   void set_source_position_output(std::string const& source_position_output) {
246     source_position_output_ = source_position_output;
247   }
248 
protected_instructions() const249   ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions()
250       const {
251     return protected_instructions_;
252   }
253 
DeleteGraphZone()254   void DeleteGraphZone() {
255     if (graph_zone_ == nullptr) return;
256     graph_zone_scope_.Destroy();
257     graph_zone_ = nullptr;
258     graph_ = nullptr;
259     source_positions_ = nullptr;
260     loop_assignment_ = nullptr;
261     simplified_ = nullptr;
262     machine_ = nullptr;
263     common_ = nullptr;
264     javascript_ = nullptr;
265     jsgraph_ = nullptr;
266     schedule_ = nullptr;
267   }
268 
DeleteInstructionZone()269   void DeleteInstructionZone() {
270     if (instruction_zone_ == nullptr) return;
271     instruction_zone_scope_.Destroy();
272     instruction_zone_ = nullptr;
273     sequence_ = nullptr;
274     frame_ = nullptr;
275   }
276 
DeleteRegisterAllocationZone()277   void DeleteRegisterAllocationZone() {
278     if (register_allocation_zone_ == nullptr) return;
279     register_allocation_zone_scope_.Destroy();
280     register_allocation_zone_ = nullptr;
281     register_allocation_data_ = nullptr;
282   }
283 
InitializeInstructionSequence(const CallDescriptor * descriptor)284   void InitializeInstructionSequence(const CallDescriptor* descriptor) {
285     DCHECK(sequence_ == nullptr);
286     InstructionBlocks* instruction_blocks =
287         InstructionSequence::InstructionBlocksFor(instruction_zone(),
288                                                   schedule());
289     sequence_ = new (instruction_zone()) InstructionSequence(
290         info()->isolate(), instruction_zone(), instruction_blocks);
291     if (descriptor && descriptor->RequiresFrameAsIncoming()) {
292       sequence_->instruction_blocks()[0]->mark_needs_frame();
293     } else {
294       DCHECK_EQ(0u, descriptor->CalleeSavedFPRegisters());
295       DCHECK_EQ(0u, descriptor->CalleeSavedRegisters());
296     }
297   }
298 
InitializeFrameData(CallDescriptor * descriptor)299   void InitializeFrameData(CallDescriptor* descriptor) {
300     DCHECK(frame_ == nullptr);
301     int fixed_frame_size = 0;
302     if (descriptor != nullptr) {
303       fixed_frame_size = descriptor->CalculateFixedFrameSize();
304     }
305     frame_ = new (instruction_zone()) Frame(fixed_frame_size);
306   }
307 
InitializeRegisterAllocationData(const RegisterConfiguration * config,CallDescriptor * descriptor)308   void InitializeRegisterAllocationData(const RegisterConfiguration* config,
309                                         CallDescriptor* descriptor) {
310     DCHECK(register_allocation_data_ == nullptr);
311     register_allocation_data_ = new (register_allocation_zone())
312         RegisterAllocationData(config, register_allocation_zone(), frame(),
313                                sequence(), debug_name());
314   }
315 
BeginPhaseKind(const char * phase_kind_name)316   void BeginPhaseKind(const char* phase_kind_name) {
317     if (pipeline_statistics() != nullptr) {
318       pipeline_statistics()->BeginPhaseKind(phase_kind_name);
319     }
320   }
321 
EndPhaseKind()322   void EndPhaseKind() {
323     if (pipeline_statistics() != nullptr) {
324       pipeline_statistics()->EndPhaseKind();
325     }
326   }
327 
debug_name() const328   const char* debug_name() const { return debug_name_.get(); }
329 
330  private:
331   Isolate* const isolate_;
332   CompilationInfo* const info_;
333   std::unique_ptr<char[]> debug_name_;
334   Zone* outer_zone_ = nullptr;
335   ZoneStats* const zone_stats_;
336   PipelineStatistics* pipeline_statistics_ = nullptr;
337   bool compilation_failed_ = false;
338   bool verify_graph_ = false;
339   bool is_asm_ = false;
340   Handle<Code> code_ = Handle<Code>::null();
341 
342   // All objects in the following group of fields are allocated in graph_zone_.
343   // They are all set to nullptr when the graph_zone_ is destroyed.
344   ZoneStats::Scope graph_zone_scope_;
345   Zone* graph_zone_ = nullptr;
346   Graph* graph_ = nullptr;
347   SourcePositionTable* source_positions_ = nullptr;
348   LoopAssignmentAnalysis* loop_assignment_ = nullptr;
349   SimplifiedOperatorBuilder* simplified_ = nullptr;
350   MachineOperatorBuilder* machine_ = nullptr;
351   CommonOperatorBuilder* common_ = nullptr;
352   JSOperatorBuilder* javascript_ = nullptr;
353   JSGraph* jsgraph_ = nullptr;
354   Schedule* schedule_ = nullptr;
355 
356   // All objects in the following group of fields are allocated in
357   // instruction_zone_.  They are all set to nullptr when the instruction_zone_
358   // is
359   // destroyed.
360   ZoneStats::Scope instruction_zone_scope_;
361   Zone* instruction_zone_;
362   InstructionSequence* sequence_ = nullptr;
363   Frame* frame_ = nullptr;
364 
365   // All objects in the following group of fields are allocated in
366   // register_allocation_zone_.  They are all set to nullptr when the zone is
367   // destroyed.
368   ZoneStats::Scope register_allocation_zone_scope_;
369   Zone* register_allocation_zone_;
370   RegisterAllocationData* register_allocation_data_ = nullptr;
371 
372   // Basic block profiling support.
373   BasicBlockProfiler::Data* profiler_data_ = nullptr;
374 
375   // Source position output for --trace-turbo.
376   std::string source_position_output_;
377 
378   ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions_ =
379       nullptr;
380 
381   DISALLOW_COPY_AND_ASSIGN(PipelineData);
382 };
383 
384 class PipelineImpl final {
385  public:
PipelineImpl(PipelineData * data)386   explicit PipelineImpl(PipelineData* data) : data_(data) {}
387 
388   // Helpers for executing pipeline phases.
389   template <typename Phase>
390   void Run();
391   template <typename Phase, typename Arg0>
392   void Run(Arg0 arg_0);
393   template <typename Phase, typename Arg0, typename Arg1>
394   void Run(Arg0 arg_0, Arg1 arg_1);
395 
396   // Run the graph creation and initial optimization passes.
397   bool CreateGraph();
398 
399   // Run the concurrent optimization passes.
400   bool OptimizeGraph(Linkage* linkage);
401 
402   // Perform the actual code generation and return handle to a code object.
403   Handle<Code> GenerateCode(Linkage* linkage);
404 
405   bool ScheduleAndSelectInstructions(Linkage* linkage, bool trim_graph);
406   void RunPrintAndVerify(const char* phase, bool untyped = false);
407   Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
408   void AllocateRegisters(const RegisterConfiguration* config,
409                          CallDescriptor* descriptor, bool run_verifier);
410 
411   CompilationInfo* info() const;
412   Isolate* isolate() const;
413 
414   PipelineData* const data_;
415 };
416 
417 namespace {
418 
419 struct TurboCfgFile : public std::ofstream {
TurboCfgFilev8::internal::compiler::__anon00c718d80111::TurboCfgFile420   explicit TurboCfgFile(Isolate* isolate)
421       : std::ofstream(isolate->GetTurboCfgFileName().c_str(),
422                       std::ios_base::app) {}
423 };
424 
425 struct TurboJsonFile : public std::ofstream {
TurboJsonFilev8::internal::compiler::__anon00c718d80111::TurboJsonFile426   TurboJsonFile(CompilationInfo* info, std::ios_base::openmode mode)
427       : std::ofstream(GetVisualizerLogFileName(info, nullptr, "json").get(),
428                       mode) {}
429 };
430 
TraceSchedule(CompilationInfo * info,Schedule * schedule)431 void TraceSchedule(CompilationInfo* info, Schedule* schedule) {
432   if (FLAG_trace_turbo) {
433     AllowHandleDereference allow_deref;
434     TurboJsonFile json_of(info, std::ios_base::app);
435     json_of << "{\"name\":\"Schedule\",\"type\":\"schedule\",\"data\":\"";
436     std::stringstream schedule_stream;
437     schedule_stream << *schedule;
438     std::string schedule_string(schedule_stream.str());
439     for (const auto& c : schedule_string) {
440       json_of << AsEscapedUC16ForJSON(c);
441     }
442     json_of << "\"},\n";
443   }
444   if (FLAG_trace_turbo_graph || FLAG_trace_turbo_scheduler) {
445     AllowHandleDereference allow_deref;
446     CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
447     OFStream os(tracing_scope.file());
448     os << "-- Schedule --------------------------------------\n" << *schedule;
449   }
450 }
451 
452 
453 class SourcePositionWrapper final : public Reducer {
454  public:
SourcePositionWrapper(Reducer * reducer,SourcePositionTable * table)455   SourcePositionWrapper(Reducer* reducer, SourcePositionTable* table)
456       : reducer_(reducer), table_(table) {}
~SourcePositionWrapper()457   ~SourcePositionWrapper() final {}
458 
Reduce(Node * node)459   Reduction Reduce(Node* node) final {
460     SourcePosition const pos = table_->GetSourcePosition(node);
461     SourcePositionTable::Scope position(table_, pos);
462     return reducer_->Reduce(node);
463   }
464 
Finalize()465   void Finalize() final { reducer_->Finalize(); }
466 
467  private:
468   Reducer* const reducer_;
469   SourcePositionTable* const table_;
470 
471   DISALLOW_COPY_AND_ASSIGN(SourcePositionWrapper);
472 };
473 
474 
475 class JSGraphReducer final : public GraphReducer {
476  public:
JSGraphReducer(JSGraph * jsgraph,Zone * zone)477   JSGraphReducer(JSGraph* jsgraph, Zone* zone)
478       : GraphReducer(zone, jsgraph->graph(), jsgraph->Dead()) {}
~JSGraphReducer()479   ~JSGraphReducer() final {}
480 };
481 
482 
AddReducer(PipelineData * data,GraphReducer * graph_reducer,Reducer * reducer)483 void AddReducer(PipelineData* data, GraphReducer* graph_reducer,
484                 Reducer* reducer) {
485   if (data->info()->is_source_positions_enabled()) {
486     void* const buffer = data->graph_zone()->New(sizeof(SourcePositionWrapper));
487     SourcePositionWrapper* const wrapper =
488         new (buffer) SourcePositionWrapper(reducer, data->source_positions());
489     graph_reducer->AddReducer(wrapper);
490   } else {
491     graph_reducer->AddReducer(reducer);
492   }
493 }
494 
495 
496 class PipelineRunScope {
497  public:
PipelineRunScope(PipelineData * data,const char * phase_name)498   PipelineRunScope(PipelineData* data, const char* phase_name)
499       : phase_scope_(
500             phase_name == nullptr ? nullptr : data->pipeline_statistics(),
501             phase_name),
502         zone_scope_(data->zone_stats(), ZONE_NAME) {}
503 
zone()504   Zone* zone() { return zone_scope_.zone(); }
505 
506  private:
507   PhaseScope phase_scope_;
508   ZoneStats::Scope zone_scope_;
509 };
510 
CreatePipelineStatistics(CompilationInfo * info,ZoneStats * zone_stats)511 PipelineStatistics* CreatePipelineStatistics(CompilationInfo* info,
512                                              ZoneStats* zone_stats) {
513   PipelineStatistics* pipeline_statistics = nullptr;
514 
515   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
516     pipeline_statistics = new PipelineStatistics(info, zone_stats);
517     pipeline_statistics->BeginPhaseKind("initializing");
518   }
519 
520   if (FLAG_trace_turbo) {
521     TurboJsonFile json_of(info, std::ios_base::trunc);
522     Handle<Script> script = info->script();
523     std::unique_ptr<char[]> function_name = info->GetDebugName();
524     int pos = info->shared_info()->start_position();
525     json_of << "{\"function\":\"" << function_name.get()
526             << "\", \"sourcePosition\":" << pos << ", \"source\":\"";
527     Isolate* isolate = info->isolate();
528     if (!script->IsUndefined(isolate) &&
529         !script->source()->IsUndefined(isolate)) {
530       DisallowHeapAllocation no_allocation;
531       int start = info->shared_info()->start_position();
532       int len = info->shared_info()->end_position() - start;
533       String::SubStringRange source(String::cast(script->source()), start, len);
534       for (const auto& c : source) {
535         json_of << AsEscapedUC16ForJSON(c);
536       }
537     }
538     json_of << "\",\n\"phases\":[";
539   }
540 
541   return pipeline_statistics;
542 }
543 
544 }  // namespace
545 
546 class PipelineCompilationJob final : public CompilationJob {
547  public:
PipelineCompilationJob(ParseInfo * parse_info,Handle<JSFunction> function)548   PipelineCompilationJob(ParseInfo* parse_info, Handle<JSFunction> function)
549       // Note that the CompilationInfo is not initialized at the time we pass it
550       // to the CompilationJob constructor, but it is not dereferenced there.
551       : CompilationJob(parse_info->isolate(), &info_, "TurboFan"),
552         parse_info_(parse_info),
553         zone_stats_(parse_info->isolate()->allocator()),
554         info_(parse_info_.get()->zone(), parse_info_.get(), function),
555         pipeline_statistics_(CreatePipelineStatistics(info(), &zone_stats_)),
556         data_(&zone_stats_, info(), pipeline_statistics_.get()),
557         pipeline_(&data_),
558         linkage_(nullptr) {}
559 
560  protected:
561   Status PrepareJobImpl() final;
562   Status ExecuteJobImpl() final;
563   Status FinalizeJobImpl() final;
564 
565  private:
566   std::unique_ptr<ParseInfo> parse_info_;
567   ZoneStats zone_stats_;
568   CompilationInfo info_;
569   std::unique_ptr<PipelineStatistics> pipeline_statistics_;
570   PipelineData data_;
571   PipelineImpl pipeline_;
572   Linkage* linkage_;
573 
574   DISALLOW_COPY_AND_ASSIGN(PipelineCompilationJob);
575 };
576 
PrepareJobImpl()577 PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
578   if (info()->shared_info()->asm_function()) {
579     if (info()->osr_frame() && !info()->is_optimizing_from_bytecode()) {
580       info()->MarkAsFrameSpecializing();
581     }
582     info()->MarkAsFunctionContextSpecializing();
583   } else {
584     if (!FLAG_always_opt) {
585       info()->MarkAsBailoutOnUninitialized();
586     }
587     if (FLAG_turbo_loop_peeling) {
588       info()->MarkAsLoopPeelingEnabled();
589     }
590   }
591   if (info()->is_optimizing_from_bytecode() ||
592       !info()->shared_info()->asm_function()) {
593     info()->MarkAsDeoptimizationEnabled();
594     if (FLAG_inline_accessors) {
595       info()->MarkAsAccessorInliningEnabled();
596     }
597     if (info()->closure()->feedback_vector_cell()->map() ==
598         isolate()->heap()->one_closure_cell_map()) {
599       info()->MarkAsFunctionContextSpecializing();
600     }
601   }
602   if (!info()->is_optimizing_from_bytecode()) {
603     if (!Compiler::EnsureDeoptimizationSupport(info())) return FAILED;
604   } else if (FLAG_turbo_inlining) {
605     info()->MarkAsInliningEnabled();
606   }
607 
608   linkage_ = new (info()->zone())
609       Linkage(Linkage::ComputeIncoming(info()->zone(), info()));
610 
611   if (!pipeline_.CreateGraph()) {
612     if (isolate()->has_pending_exception()) return FAILED;  // Stack overflowed.
613     return AbortOptimization(kGraphBuildingFailed);
614   }
615 
616   return SUCCEEDED;
617 }
618 
ExecuteJobImpl()619 PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl() {
620   if (!pipeline_.OptimizeGraph(linkage_)) return FAILED;
621   return SUCCEEDED;
622 }
623 
FinalizeJobImpl()624 PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl() {
625   Handle<Code> code = pipeline_.GenerateCode(linkage_);
626   if (code.is_null()) {
627     if (info()->bailout_reason() == kNoReason) {
628       return AbortOptimization(kCodeGenerationFailed);
629     }
630     return FAILED;
631   }
632   info()->dependencies()->Commit(code);
633   info()->SetCode(code);
634   if (info()->is_deoptimization_enabled()) {
635     info()->context()->native_context()->AddOptimizedCode(*code);
636     RegisterWeakObjectsInOptimizedCode(code);
637   }
638   return SUCCEEDED;
639 }
640 
641 class PipelineWasmCompilationJob final : public CompilationJob {
642  public:
PipelineWasmCompilationJob(CompilationInfo * info,JSGraph * jsgraph,CallDescriptor * descriptor,SourcePositionTable * source_positions,ZoneVector<trap_handler::ProtectedInstructionData> * protected_insts,bool allow_signalling_nan)643   explicit PipelineWasmCompilationJob(
644       CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
645       SourcePositionTable* source_positions,
646       ZoneVector<trap_handler::ProtectedInstructionData>* protected_insts,
647       bool allow_signalling_nan)
648       : CompilationJob(info->isolate(), info, "TurboFan",
649                        State::kReadyToExecute),
650         zone_stats_(info->isolate()->allocator()),
651         data_(&zone_stats_, info, jsgraph, source_positions, protected_insts),
652         pipeline_(&data_),
653         linkage_(descriptor),
654         allow_signalling_nan_(allow_signalling_nan) {}
655 
656  protected:
657   Status PrepareJobImpl() final;
658   Status ExecuteJobImpl() final;
659   Status FinalizeJobImpl() final;
660 
661  private:
662   ZoneStats zone_stats_;
663   PipelineData data_;
664   PipelineImpl pipeline_;
665   Linkage linkage_;
666   bool allow_signalling_nan_;
667 };
668 
669 PipelineWasmCompilationJob::Status
PrepareJobImpl()670 PipelineWasmCompilationJob::PrepareJobImpl() {
671   UNREACHABLE();  // Prepare should always be skipped for WasmCompilationJob.
672   return SUCCEEDED;
673 }
674 
675 PipelineWasmCompilationJob::Status
ExecuteJobImpl()676 PipelineWasmCompilationJob::ExecuteJobImpl() {
677   if (FLAG_trace_turbo) {
678     TurboJsonFile json_of(info(), std::ios_base::trunc);
679     json_of << "{\"function\":\"" << info()->GetDebugName().get()
680             << "\", \"source\":\"\",\n\"phases\":[";
681   }
682 
683   pipeline_.RunPrintAndVerify("Machine", true);
684   if (FLAG_wasm_opt) {
685     PipelineData* data = &data_;
686     PipelineRunScope scope(data, "WASM optimization");
687     JSGraphReducer graph_reducer(data->jsgraph(), scope.zone());
688     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
689                                               data->common());
690     ValueNumberingReducer value_numbering(scope.zone(), data->graph()->zone());
691     MachineOperatorReducer machine_reducer(data->jsgraph(),
692                                            allow_signalling_nan_);
693     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
694                                          data->common(), data->machine());
695     AddReducer(data, &graph_reducer, &dead_code_elimination);
696     AddReducer(data, &graph_reducer, &value_numbering);
697     AddReducer(data, &graph_reducer, &machine_reducer);
698     AddReducer(data, &graph_reducer, &common_reducer);
699     graph_reducer.ReduceGraph();
700     pipeline_.RunPrintAndVerify("Optimized Machine", true);
701   }
702 
703   if (!pipeline_.ScheduleAndSelectInstructions(&linkage_, true)) return FAILED;
704   return SUCCEEDED;
705 }
706 
707 PipelineWasmCompilationJob::Status
FinalizeJobImpl()708 PipelineWasmCompilationJob::FinalizeJobImpl() {
709   pipeline_.GenerateCode(&linkage_);
710   return SUCCEEDED;
711 }
712 
713 template <typename Phase>
Run()714 void PipelineImpl::Run() {
715   PipelineRunScope scope(this->data_, Phase::phase_name());
716   Phase phase;
717   phase.Run(this->data_, scope.zone());
718 }
719 
720 template <typename Phase, typename Arg0>
Run(Arg0 arg_0)721 void PipelineImpl::Run(Arg0 arg_0) {
722   PipelineRunScope scope(this->data_, Phase::phase_name());
723   Phase phase;
724   phase.Run(this->data_, scope.zone(), arg_0);
725 }
726 
727 template <typename Phase, typename Arg0, typename Arg1>
Run(Arg0 arg_0,Arg1 arg_1)728 void PipelineImpl::Run(Arg0 arg_0, Arg1 arg_1) {
729   PipelineRunScope scope(this->data_, Phase::phase_name());
730   Phase phase;
731   phase.Run(this->data_, scope.zone(), arg_0, arg_1);
732 }
733 
734 struct LoopAssignmentAnalysisPhase {
phase_namev8::internal::compiler::LoopAssignmentAnalysisPhase735   static const char* phase_name() { return "loop assignment analysis"; }
736 
Runv8::internal::compiler::LoopAssignmentAnalysisPhase737   void Run(PipelineData* data, Zone* temp_zone) {
738     if (!data->info()->is_optimizing_from_bytecode()) {
739       AstLoopAssignmentAnalyzer analyzer(data->graph_zone(), data->info());
740       LoopAssignmentAnalysis* loop_assignment = analyzer.Analyze();
741       data->set_loop_assignment(loop_assignment);
742     }
743   }
744 };
745 
746 
747 struct GraphBuilderPhase {
phase_namev8::internal::compiler::GraphBuilderPhase748   static const char* phase_name() { return "graph builder"; }
749 
Runv8::internal::compiler::GraphBuilderPhase750   void Run(PipelineData* data, Zone* temp_zone) {
751     bool succeeded = false;
752 
753     if (data->info()->is_optimizing_from_bytecode()) {
754       // Bytecode graph builder assumes deoptimziation is enabled.
755       DCHECK(data->info()->is_deoptimization_enabled());
756       BytecodeGraphBuilder graph_builder(
757           temp_zone, data->info()->shared_info(),
758           handle(data->info()->closure()->feedback_vector()),
759           data->info()->osr_ast_id(), data->jsgraph(), 1.0f,
760           data->source_positions());
761       succeeded = graph_builder.CreateGraph();
762     } else {
763       AstGraphBuilderWithPositions graph_builder(
764           temp_zone, data->info(), data->jsgraph(), 1.0f,
765           data->loop_assignment(), data->source_positions());
766       succeeded = graph_builder.CreateGraph();
767     }
768 
769     if (!succeeded) {
770       data->set_compilation_failed();
771     }
772   }
773 };
774 
775 
776 struct InliningPhase {
phase_namev8::internal::compiler::InliningPhase777   static const char* phase_name() { return "inlining"; }
778 
Runv8::internal::compiler::InliningPhase779   void Run(PipelineData* data, Zone* temp_zone) {
780     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
781     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
782                                               data->common());
783     CheckpointElimination checkpoint_elimination(&graph_reducer);
784     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
785                                          data->common(), data->machine());
786     JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags;
787     if (data->info()->is_deoptimization_enabled()) {
788       call_reducer_flags |= JSCallReducer::kDeoptimizationEnabled;
789     }
790     JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
791                                call_reducer_flags, data->native_context(),
792                                data->info()->dependencies());
793     JSContextSpecialization context_specialization(
794         &graph_reducer, data->jsgraph(),
795         data->info()->is_function_context_specializing()
796             ? handle(data->info()->context())
797             : MaybeHandle<Context>());
798     JSFrameSpecialization frame_specialization(
799         &graph_reducer, data->info()->osr_frame(), data->jsgraph());
800     JSNativeContextSpecialization::Flags flags =
801         JSNativeContextSpecialization::kNoFlags;
802     if (data->info()->is_accessor_inlining_enabled()) {
803       flags |= JSNativeContextSpecialization::kAccessorInliningEnabled;
804     }
805     if (data->info()->is_bailout_on_uninitialized()) {
806       flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
807     }
808     if (data->info()->is_deoptimization_enabled()) {
809       flags |= JSNativeContextSpecialization::kDeoptimizationEnabled;
810     }
811     JSNativeContextSpecialization native_context_specialization(
812         &graph_reducer, data->jsgraph(), flags, data->native_context(),
813         data->info()->dependencies(), temp_zone);
814     JSInliningHeuristic inlining(
815         &graph_reducer, data->info()->is_inlining_enabled()
816                             ? JSInliningHeuristic::kGeneralInlining
817                             : JSInliningHeuristic::kRestrictedInlining,
818         temp_zone, data->info(), data->jsgraph(), data->source_positions());
819     JSIntrinsicLowering intrinsic_lowering(
820         &graph_reducer, data->jsgraph(),
821         data->info()->is_deoptimization_enabled()
822             ? JSIntrinsicLowering::kDeoptimizationEnabled
823             : JSIntrinsicLowering::kDeoptimizationDisabled);
824     AddReducer(data, &graph_reducer, &dead_code_elimination);
825     AddReducer(data, &graph_reducer, &checkpoint_elimination);
826     AddReducer(data, &graph_reducer, &common_reducer);
827     if (data->info()->is_frame_specializing()) {
828       AddReducer(data, &graph_reducer, &frame_specialization);
829     }
830     AddReducer(data, &graph_reducer, &native_context_specialization);
831     AddReducer(data, &graph_reducer, &context_specialization);
832     AddReducer(data, &graph_reducer, &intrinsic_lowering);
833     AddReducer(data, &graph_reducer, &call_reducer);
834     AddReducer(data, &graph_reducer, &inlining);
835     graph_reducer.ReduceGraph();
836   }
837 };
838 
839 
840 struct TyperPhase {
phase_namev8::internal::compiler::TyperPhase841   static const char* phase_name() { return "typer"; }
842 
Runv8::internal::compiler::TyperPhase843   void Run(PipelineData* data, Zone* temp_zone, Typer* typer) {
844     NodeVector roots(temp_zone);
845     data->jsgraph()->GetCachedNodes(&roots);
846     LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
847                                          data->common(), temp_zone);
848     if (FLAG_turbo_loop_variable) induction_vars.Run();
849     typer->Run(roots, &induction_vars);
850   }
851 };
852 
853 struct UntyperPhase {
phase_namev8::internal::compiler::UntyperPhase854   static const char* phase_name() { return "untyper"; }
855 
Runv8::internal::compiler::UntyperPhase856   void Run(PipelineData* data, Zone* temp_zone) {
857     class RemoveTypeReducer final : public Reducer {
858      public:
859       Reduction Reduce(Node* node) final {
860         if (NodeProperties::IsTyped(node)) {
861           NodeProperties::RemoveType(node);
862           return Changed(node);
863         }
864         return NoChange();
865       }
866     };
867 
868     NodeVector roots(temp_zone);
869     data->jsgraph()->GetCachedNodes(&roots);
870     for (Node* node : roots) {
871       NodeProperties::RemoveType(node);
872     }
873 
874     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
875     RemoveTypeReducer remove_type_reducer;
876     AddReducer(data, &graph_reducer, &remove_type_reducer);
877     graph_reducer.ReduceGraph();
878   }
879 };
880 
881 struct OsrDeconstructionPhase {
phase_namev8::internal::compiler::OsrDeconstructionPhase882   static const char* phase_name() { return "OSR deconstruction"; }
883 
Runv8::internal::compiler::OsrDeconstructionPhase884   void Run(PipelineData* data, Zone* temp_zone) {
885     GraphTrimmer trimmer(temp_zone, data->graph());
886     NodeVector roots(temp_zone);
887     data->jsgraph()->GetCachedNodes(&roots);
888     trimmer.TrimGraph(roots.begin(), roots.end());
889 
890     OsrHelper osr_helper(data->info());
891     osr_helper.Deconstruct(data->jsgraph(), data->common(), temp_zone);
892   }
893 };
894 
895 
896 struct TypedLoweringPhase {
phase_namev8::internal::compiler::TypedLoweringPhase897   static const char* phase_name() { return "typed lowering"; }
898 
Runv8::internal::compiler::TypedLoweringPhase899   void Run(PipelineData* data, Zone* temp_zone) {
900     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
901     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
902                                               data->common());
903     JSBuiltinReducer builtin_reducer(
904         &graph_reducer, data->jsgraph(),
905         data->info()->is_deoptimization_enabled()
906             ? JSBuiltinReducer::kDeoptimizationEnabled
907             : JSBuiltinReducer::kNoFlags,
908         data->info()->dependencies(), data->native_context());
909     Handle<FeedbackVector> feedback_vector(
910         data->info()->closure()->feedback_vector());
911     JSCreateLowering create_lowering(
912         &graph_reducer, data->info()->dependencies(), data->jsgraph(),
913         feedback_vector, data->native_context(), temp_zone);
914     JSTypedLowering::Flags typed_lowering_flags = JSTypedLowering::kNoFlags;
915     if (data->info()->is_deoptimization_enabled()) {
916       typed_lowering_flags |= JSTypedLowering::kDeoptimizationEnabled;
917     }
918     JSTypedLowering typed_lowering(&graph_reducer, data->info()->dependencies(),
919                                    typed_lowering_flags, data->jsgraph(),
920                                    temp_zone);
921     TypedOptimization typed_optimization(
922         &graph_reducer, data->info()->dependencies(),
923         data->info()->is_deoptimization_enabled()
924             ? TypedOptimization::kDeoptimizationEnabled
925             : TypedOptimization::kNoFlags,
926         data->jsgraph());
927     SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
928     CheckpointElimination checkpoint_elimination(&graph_reducer);
929     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
930                                          data->common(), data->machine());
931     AddReducer(data, &graph_reducer, &dead_code_elimination);
932     AddReducer(data, &graph_reducer, &builtin_reducer);
933     if (data->info()->is_deoptimization_enabled()) {
934       AddReducer(data, &graph_reducer, &create_lowering);
935     }
936     AddReducer(data, &graph_reducer, &typed_optimization);
937     AddReducer(data, &graph_reducer, &typed_lowering);
938     AddReducer(data, &graph_reducer, &simple_reducer);
939     AddReducer(data, &graph_reducer, &checkpoint_elimination);
940     AddReducer(data, &graph_reducer, &common_reducer);
941     graph_reducer.ReduceGraph();
942   }
943 };
944 
945 
946 struct EscapeAnalysisPhase {
phase_namev8::internal::compiler::EscapeAnalysisPhase947   static const char* phase_name() { return "escape analysis"; }
948 
Runv8::internal::compiler::EscapeAnalysisPhase949   void Run(PipelineData* data, Zone* temp_zone) {
950     EscapeAnalysis escape_analysis(data->graph(), data->jsgraph()->common(),
951                                    temp_zone);
952     if (!escape_analysis.Run()) return;
953     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
954     EscapeAnalysisReducer escape_reducer(&graph_reducer, data->jsgraph(),
955                                          &escape_analysis, temp_zone);
956     AddReducer(data, &graph_reducer, &escape_reducer);
957     graph_reducer.ReduceGraph();
958     if (escape_reducer.compilation_failed()) {
959       data->set_compilation_failed();
960       return;
961     }
962     escape_reducer.VerifyReplacement();
963   }
964 };
965 
966 struct SimplifiedLoweringPhase {
phase_namev8::internal::compiler::SimplifiedLoweringPhase967   static const char* phase_name() { return "simplified lowering"; }
968 
Runv8::internal::compiler::SimplifiedLoweringPhase969   void Run(PipelineData* data, Zone* temp_zone) {
970     SimplifiedLowering lowering(data->jsgraph(), temp_zone,
971                                 data->source_positions());
972     lowering.LowerAllNodes();
973   }
974 };
975 
976 struct LoopPeelingPhase {
phase_namev8::internal::compiler::LoopPeelingPhase977   static const char* phase_name() { return "loop peeling"; }
978 
Runv8::internal::compiler::LoopPeelingPhase979   void Run(PipelineData* data, Zone* temp_zone) {
980     GraphTrimmer trimmer(temp_zone, data->graph());
981     NodeVector roots(temp_zone);
982     data->jsgraph()->GetCachedNodes(&roots);
983     trimmer.TrimGraph(roots.begin(), roots.end());
984 
985     LoopTree* loop_tree =
986         LoopFinder::BuildLoopTree(data->jsgraph()->graph(), temp_zone);
987     LoopPeeler::PeelInnerLoopsOfTree(data->graph(), data->common(), loop_tree,
988                                      temp_zone);
989   }
990 };
991 
992 struct LoopExitEliminationPhase {
phase_namev8::internal::compiler::LoopExitEliminationPhase993   static const char* phase_name() { return "loop exit elimination"; }
994 
Runv8::internal::compiler::LoopExitEliminationPhase995   void Run(PipelineData* data, Zone* temp_zone) {
996     LoopPeeler::EliminateLoopExits(data->graph(), temp_zone);
997   }
998 };
999 
1000 struct ConcurrentOptimizationPrepPhase {
phase_namev8::internal::compiler::ConcurrentOptimizationPrepPhase1001   static const char* phase_name() {
1002     return "concurrent optimization preparation";
1003   }
1004 
Runv8::internal::compiler::ConcurrentOptimizationPrepPhase1005   void Run(PipelineData* data, Zone* temp_zone) {
1006     // Make sure we cache these code stubs.
1007     data->jsgraph()->CEntryStubConstant(1);
1008     data->jsgraph()->CEntryStubConstant(2);
1009     data->jsgraph()->CEntryStubConstant(3);
1010 
1011     // This is needed for escape analysis.
1012     NodeProperties::SetType(data->jsgraph()->FalseConstant(), Type::Boolean());
1013     NodeProperties::SetType(data->jsgraph()->TrueConstant(), Type::Boolean());
1014   }
1015 };
1016 
1017 struct GenericLoweringPhase {
phase_namev8::internal::compiler::GenericLoweringPhase1018   static const char* phase_name() { return "generic lowering"; }
1019 
Runv8::internal::compiler::GenericLoweringPhase1020   void Run(PipelineData* data, Zone* temp_zone) {
1021     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
1022     JSGenericLowering generic_lowering(data->jsgraph());
1023     AddReducer(data, &graph_reducer, &generic_lowering);
1024     graph_reducer.ReduceGraph();
1025   }
1026 };
1027 
1028 struct EarlyOptimizationPhase {
phase_namev8::internal::compiler::EarlyOptimizationPhase1029   static const char* phase_name() { return "early optimization"; }
1030 
Runv8::internal::compiler::EarlyOptimizationPhase1031   void Run(PipelineData* data, Zone* temp_zone) {
1032     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
1033     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1034                                               data->common());
1035     SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
1036     RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
1037     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1038     MachineOperatorReducer machine_reducer(data->jsgraph());
1039     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1040                                          data->common(), data->machine());
1041     AddReducer(data, &graph_reducer, &dead_code_elimination);
1042     AddReducer(data, &graph_reducer, &simple_reducer);
1043     AddReducer(data, &graph_reducer, &redundancy_elimination);
1044     AddReducer(data, &graph_reducer, &value_numbering);
1045     AddReducer(data, &graph_reducer, &machine_reducer);
1046     AddReducer(data, &graph_reducer, &common_reducer);
1047     graph_reducer.ReduceGraph();
1048   }
1049 };
1050 
1051 struct ControlFlowOptimizationPhase {
phase_namev8::internal::compiler::ControlFlowOptimizationPhase1052   static const char* phase_name() { return "control flow optimization"; }
1053 
Runv8::internal::compiler::ControlFlowOptimizationPhase1054   void Run(PipelineData* data, Zone* temp_zone) {
1055     ControlFlowOptimizer optimizer(data->graph(), data->common(),
1056                                    data->machine(), temp_zone);
1057     optimizer.Optimize();
1058   }
1059 };
1060 
1061 struct EffectControlLinearizationPhase {
phase_namev8::internal::compiler::EffectControlLinearizationPhase1062   static const char* phase_name() { return "effect linearization"; }
1063 
Runv8::internal::compiler::EffectControlLinearizationPhase1064   void Run(PipelineData* data, Zone* temp_zone) {
1065     // The scheduler requires the graphs to be trimmed, so trim now.
1066     // TODO(jarin) Remove the trimming once the scheduler can handle untrimmed
1067     // graphs.
1068     GraphTrimmer trimmer(temp_zone, data->graph());
1069     NodeVector roots(temp_zone);
1070     data->jsgraph()->GetCachedNodes(&roots);
1071     trimmer.TrimGraph(roots.begin(), roots.end());
1072 
1073     // Schedule the graph without node splitting so that we can
1074     // fix the effect and control flow for nodes with low-level side
1075     // effects (such as changing representation to tagged or
1076     // 'floating' allocation regions.)
1077     Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
1078                                                     Scheduler::kNoFlags);
1079     if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
1080     TraceSchedule(data->info(), schedule);
1081 
1082     // Post-pass for wiring the control/effects
1083     // - connect allocating representation changes into the control&effect
1084     //   chains and lower them,
1085     // - get rid of the region markers,
1086     // - introduce effect phis and rewire effects to get SSA again.
1087     EffectControlLinearizer linearizer(data->jsgraph(), schedule, temp_zone,
1088                                        data->source_positions());
1089     linearizer.Run();
1090   }
1091 };
1092 
1093 // The store-store elimination greatly benefits from doing a common operator
1094 // reducer and dead code elimination just before it, to eliminate conditional
1095 // deopts with a constant condition.
1096 
1097 struct DeadCodeEliminationPhase {
phase_namev8::internal::compiler::DeadCodeEliminationPhase1098   static const char* phase_name() { return "dead code elimination"; }
1099 
Runv8::internal::compiler::DeadCodeEliminationPhase1100   void Run(PipelineData* data, Zone* temp_zone) {
1101     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
1102     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1103                                               data->common());
1104     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1105                                          data->common(), data->machine());
1106     AddReducer(data, &graph_reducer, &dead_code_elimination);
1107     AddReducer(data, &graph_reducer, &common_reducer);
1108     graph_reducer.ReduceGraph();
1109   }
1110 };
1111 
1112 struct StoreStoreEliminationPhase {
phase_namev8::internal::compiler::StoreStoreEliminationPhase1113   static const char* phase_name() { return "store-store elimination"; }
1114 
Runv8::internal::compiler::StoreStoreEliminationPhase1115   void Run(PipelineData* data, Zone* temp_zone) {
1116     GraphTrimmer trimmer(temp_zone, data->graph());
1117     NodeVector roots(temp_zone);
1118     data->jsgraph()->GetCachedNodes(&roots);
1119     trimmer.TrimGraph(roots.begin(), roots.end());
1120 
1121     StoreStoreElimination::Run(data->jsgraph(), temp_zone);
1122   }
1123 };
1124 
1125 struct LoadEliminationPhase {
phase_namev8::internal::compiler::LoadEliminationPhase1126   static const char* phase_name() { return "load elimination"; }
1127 
Runv8::internal::compiler::LoadEliminationPhase1128   void Run(PipelineData* data, Zone* temp_zone) {
1129     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
1130     BranchElimination branch_condition_elimination(&graph_reducer,
1131                                                    data->jsgraph(), temp_zone);
1132     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1133                                               data->common());
1134     RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
1135     LoadElimination load_elimination(&graph_reducer, data->jsgraph(),
1136                                      temp_zone);
1137     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1138     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1139                                          data->common(), data->machine());
1140     AddReducer(data, &graph_reducer, &branch_condition_elimination);
1141     AddReducer(data, &graph_reducer, &dead_code_elimination);
1142     AddReducer(data, &graph_reducer, &redundancy_elimination);
1143     AddReducer(data, &graph_reducer, &load_elimination);
1144     AddReducer(data, &graph_reducer, &value_numbering);
1145     AddReducer(data, &graph_reducer, &common_reducer);
1146     graph_reducer.ReduceGraph();
1147   }
1148 };
1149 
1150 struct MemoryOptimizationPhase {
phase_namev8::internal::compiler::MemoryOptimizationPhase1151   static const char* phase_name() { return "memory optimization"; }
1152 
Runv8::internal::compiler::MemoryOptimizationPhase1153   void Run(PipelineData* data, Zone* temp_zone) {
1154     // The memory optimizer requires the graphs to be trimmed, so trim now.
1155     GraphTrimmer trimmer(temp_zone, data->graph());
1156     NodeVector roots(temp_zone);
1157     data->jsgraph()->GetCachedNodes(&roots);
1158     trimmer.TrimGraph(roots.begin(), roots.end());
1159 
1160     // Optimize allocations and load/store operations.
1161     MemoryOptimizer optimizer(data->jsgraph(), temp_zone);
1162     optimizer.Optimize();
1163   }
1164 };
1165 
1166 struct LateOptimizationPhase {
phase_namev8::internal::compiler::LateOptimizationPhase1167   static const char* phase_name() { return "late optimization"; }
1168 
Runv8::internal::compiler::LateOptimizationPhase1169   void Run(PipelineData* data, Zone* temp_zone) {
1170     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
1171     BranchElimination branch_condition_elimination(&graph_reducer,
1172                                                    data->jsgraph(), temp_zone);
1173     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1174                                               data->common());
1175     ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1176     MachineOperatorReducer machine_reducer(data->jsgraph());
1177     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1178                                          data->common(), data->machine());
1179     SelectLowering select_lowering(data->jsgraph()->graph(),
1180                                    data->jsgraph()->common());
1181     TailCallOptimization tco(data->common(), data->graph());
1182     AddReducer(data, &graph_reducer, &branch_condition_elimination);
1183     AddReducer(data, &graph_reducer, &dead_code_elimination);
1184     AddReducer(data, &graph_reducer, &value_numbering);
1185     AddReducer(data, &graph_reducer, &machine_reducer);
1186     AddReducer(data, &graph_reducer, &common_reducer);
1187     AddReducer(data, &graph_reducer, &select_lowering);
1188     AddReducer(data, &graph_reducer, &tco);
1189     graph_reducer.ReduceGraph();
1190   }
1191 };
1192 
1193 struct EarlyGraphTrimmingPhase {
phase_namev8::internal::compiler::EarlyGraphTrimmingPhase1194   static const char* phase_name() { return "early graph trimming"; }
Runv8::internal::compiler::EarlyGraphTrimmingPhase1195   void Run(PipelineData* data, Zone* temp_zone) {
1196     GraphTrimmer trimmer(temp_zone, data->graph());
1197     NodeVector roots(temp_zone);
1198     data->jsgraph()->GetCachedNodes(&roots);
1199     trimmer.TrimGraph(roots.begin(), roots.end());
1200   }
1201 };
1202 
1203 
1204 struct LateGraphTrimmingPhase {
phase_namev8::internal::compiler::LateGraphTrimmingPhase1205   static const char* phase_name() { return "late graph trimming"; }
Runv8::internal::compiler::LateGraphTrimmingPhase1206   void Run(PipelineData* data, Zone* temp_zone) {
1207     GraphTrimmer trimmer(temp_zone, data->graph());
1208     NodeVector roots(temp_zone);
1209     if (data->jsgraph()) {
1210       data->jsgraph()->GetCachedNodes(&roots);
1211     }
1212     trimmer.TrimGraph(roots.begin(), roots.end());
1213   }
1214 };
1215 
1216 
1217 struct ComputeSchedulePhase {
phase_namev8::internal::compiler::ComputeSchedulePhase1218   static const char* phase_name() { return "scheduling"; }
1219 
Runv8::internal::compiler::ComputeSchedulePhase1220   void Run(PipelineData* data, Zone* temp_zone) {
1221     Schedule* schedule = Scheduler::ComputeSchedule(
1222         temp_zone, data->graph(), data->info()->is_splitting_enabled()
1223                                       ? Scheduler::kSplitNodes
1224                                       : Scheduler::kNoFlags);
1225     if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
1226     data->set_schedule(schedule);
1227   }
1228 };
1229 
1230 
1231 struct InstructionSelectionPhase {
phase_namev8::internal::compiler::InstructionSelectionPhase1232   static const char* phase_name() { return "select instructions"; }
1233 
Runv8::internal::compiler::InstructionSelectionPhase1234   void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
1235     InstructionSelector selector(
1236         temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
1237         data->schedule(), data->source_positions(), data->frame(),
1238         data->info()->is_source_positions_enabled()
1239             ? InstructionSelector::kAllSourcePositions
1240             : InstructionSelector::kCallSourcePositions,
1241         InstructionSelector::SupportedFeatures(),
1242         FLAG_turbo_instruction_scheduling
1243             ? InstructionSelector::kEnableScheduling
1244             : InstructionSelector::kDisableScheduling,
1245         data->info()->will_serialize()
1246             ? InstructionSelector::kEnableSerialization
1247             : InstructionSelector::kDisableSerialization);
1248     if (!selector.SelectInstructions()) {
1249       data->set_compilation_failed();
1250     }
1251   }
1252 };
1253 
1254 
1255 struct MeetRegisterConstraintsPhase {
phase_namev8::internal::compiler::MeetRegisterConstraintsPhase1256   static const char* phase_name() { return "meet register constraints"; }
1257 
Runv8::internal::compiler::MeetRegisterConstraintsPhase1258   void Run(PipelineData* data, Zone* temp_zone) {
1259     ConstraintBuilder builder(data->register_allocation_data());
1260     builder.MeetRegisterConstraints();
1261   }
1262 };
1263 
1264 
1265 struct ResolvePhisPhase {
phase_namev8::internal::compiler::ResolvePhisPhase1266   static const char* phase_name() { return "resolve phis"; }
1267 
Runv8::internal::compiler::ResolvePhisPhase1268   void Run(PipelineData* data, Zone* temp_zone) {
1269     ConstraintBuilder builder(data->register_allocation_data());
1270     builder.ResolvePhis();
1271   }
1272 };
1273 
1274 
1275 struct BuildLiveRangesPhase {
phase_namev8::internal::compiler::BuildLiveRangesPhase1276   static const char* phase_name() { return "build live ranges"; }
1277 
Runv8::internal::compiler::BuildLiveRangesPhase1278   void Run(PipelineData* data, Zone* temp_zone) {
1279     LiveRangeBuilder builder(data->register_allocation_data(), temp_zone);
1280     builder.BuildLiveRanges();
1281   }
1282 };
1283 
1284 
1285 struct SplinterLiveRangesPhase {
phase_namev8::internal::compiler::SplinterLiveRangesPhase1286   static const char* phase_name() { return "splinter live ranges"; }
1287 
Runv8::internal::compiler::SplinterLiveRangesPhase1288   void Run(PipelineData* data, Zone* temp_zone) {
1289     LiveRangeSeparator live_range_splinterer(data->register_allocation_data(),
1290                                              temp_zone);
1291     live_range_splinterer.Splinter();
1292   }
1293 };
1294 
1295 
1296 template <typename RegAllocator>
1297 struct AllocateGeneralRegistersPhase {
phase_namev8::internal::compiler::AllocateGeneralRegistersPhase1298   static const char* phase_name() { return "allocate general registers"; }
1299 
Runv8::internal::compiler::AllocateGeneralRegistersPhase1300   void Run(PipelineData* data, Zone* temp_zone) {
1301     RegAllocator allocator(data->register_allocation_data(), GENERAL_REGISTERS,
1302                            temp_zone);
1303     allocator.AllocateRegisters();
1304   }
1305 };
1306 
1307 template <typename RegAllocator>
1308 struct AllocateFPRegistersPhase {
phase_namev8::internal::compiler::AllocateFPRegistersPhase1309   static const char* phase_name() {
1310     return "allocate floating point registers";
1311   }
1312 
Runv8::internal::compiler::AllocateFPRegistersPhase1313   void Run(PipelineData* data, Zone* temp_zone) {
1314     RegAllocator allocator(data->register_allocation_data(), FP_REGISTERS,
1315                            temp_zone);
1316     allocator.AllocateRegisters();
1317   }
1318 };
1319 
1320 
1321 struct MergeSplintersPhase {
phase_namev8::internal::compiler::MergeSplintersPhase1322   static const char* phase_name() { return "merge splintered ranges"; }
Runv8::internal::compiler::MergeSplintersPhase1323   void Run(PipelineData* pipeline_data, Zone* temp_zone) {
1324     RegisterAllocationData* data = pipeline_data->register_allocation_data();
1325     LiveRangeMerger live_range_merger(data, temp_zone);
1326     live_range_merger.Merge();
1327   }
1328 };
1329 
1330 
1331 struct LocateSpillSlotsPhase {
phase_namev8::internal::compiler::LocateSpillSlotsPhase1332   static const char* phase_name() { return "locate spill slots"; }
1333 
Runv8::internal::compiler::LocateSpillSlotsPhase1334   void Run(PipelineData* data, Zone* temp_zone) {
1335     SpillSlotLocator locator(data->register_allocation_data());
1336     locator.LocateSpillSlots();
1337   }
1338 };
1339 
1340 
1341 struct AssignSpillSlotsPhase {
phase_namev8::internal::compiler::AssignSpillSlotsPhase1342   static const char* phase_name() { return "assign spill slots"; }
1343 
Runv8::internal::compiler::AssignSpillSlotsPhase1344   void Run(PipelineData* data, Zone* temp_zone) {
1345     OperandAssigner assigner(data->register_allocation_data());
1346     assigner.AssignSpillSlots();
1347   }
1348 };
1349 
1350 
1351 struct CommitAssignmentPhase {
phase_namev8::internal::compiler::CommitAssignmentPhase1352   static const char* phase_name() { return "commit assignment"; }
1353 
Runv8::internal::compiler::CommitAssignmentPhase1354   void Run(PipelineData* data, Zone* temp_zone) {
1355     OperandAssigner assigner(data->register_allocation_data());
1356     assigner.CommitAssignment();
1357   }
1358 };
1359 
1360 
1361 struct PopulateReferenceMapsPhase {
phase_namev8::internal::compiler::PopulateReferenceMapsPhase1362   static const char* phase_name() { return "populate pointer maps"; }
1363 
Runv8::internal::compiler::PopulateReferenceMapsPhase1364   void Run(PipelineData* data, Zone* temp_zone) {
1365     ReferenceMapPopulator populator(data->register_allocation_data());
1366     populator.PopulateReferenceMaps();
1367   }
1368 };
1369 
1370 
1371 struct ConnectRangesPhase {
phase_namev8::internal::compiler::ConnectRangesPhase1372   static const char* phase_name() { return "connect ranges"; }
1373 
Runv8::internal::compiler::ConnectRangesPhase1374   void Run(PipelineData* data, Zone* temp_zone) {
1375     LiveRangeConnector connector(data->register_allocation_data());
1376     connector.ConnectRanges(temp_zone);
1377   }
1378 };
1379 
1380 
1381 struct ResolveControlFlowPhase {
phase_namev8::internal::compiler::ResolveControlFlowPhase1382   static const char* phase_name() { return "resolve control flow"; }
1383 
Runv8::internal::compiler::ResolveControlFlowPhase1384   void Run(PipelineData* data, Zone* temp_zone) {
1385     LiveRangeConnector connector(data->register_allocation_data());
1386     connector.ResolveControlFlow(temp_zone);
1387   }
1388 };
1389 
1390 
1391 struct OptimizeMovesPhase {
phase_namev8::internal::compiler::OptimizeMovesPhase1392   static const char* phase_name() { return "optimize moves"; }
1393 
Runv8::internal::compiler::OptimizeMovesPhase1394   void Run(PipelineData* data, Zone* temp_zone) {
1395     MoveOptimizer move_optimizer(temp_zone, data->sequence());
1396     move_optimizer.Run();
1397   }
1398 };
1399 
1400 
1401 struct FrameElisionPhase {
phase_namev8::internal::compiler::FrameElisionPhase1402   static const char* phase_name() { return "frame elision"; }
1403 
Runv8::internal::compiler::FrameElisionPhase1404   void Run(PipelineData* data, Zone* temp_zone) {
1405     FrameElider(data->sequence()).Run();
1406   }
1407 };
1408 
1409 
1410 struct JumpThreadingPhase {
phase_namev8::internal::compiler::JumpThreadingPhase1411   static const char* phase_name() { return "jump threading"; }
1412 
Runv8::internal::compiler::JumpThreadingPhase1413   void Run(PipelineData* data, Zone* temp_zone, bool frame_at_start) {
1414     ZoneVector<RpoNumber> result(temp_zone);
1415     if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence(),
1416                                          frame_at_start)) {
1417       JumpThreading::ApplyForwarding(result, data->sequence());
1418     }
1419   }
1420 };
1421 
1422 
1423 struct GenerateCodePhase {
phase_namev8::internal::compiler::GenerateCodePhase1424   static const char* phase_name() { return "generate code"; }
1425 
Runv8::internal::compiler::GenerateCodePhase1426   void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
1427     CodeGenerator generator(data->frame(), linkage, data->sequence(),
1428                             data->info());
1429     data->set_code(generator.GenerateCode());
1430   }
1431 };
1432 
1433 
1434 struct PrintGraphPhase {
phase_namev8::internal::compiler::PrintGraphPhase1435   static const char* phase_name() { return nullptr; }
1436 
Runv8::internal::compiler::PrintGraphPhase1437   void Run(PipelineData* data, Zone* temp_zone, const char* phase) {
1438     CompilationInfo* info = data->info();
1439     Graph* graph = data->graph();
1440 
1441     {  // Print JSON.
1442       AllowHandleDereference allow_deref;
1443       TurboJsonFile json_of(info, std::ios_base::app);
1444       json_of << "{\"name\":\"" << phase << "\",\"type\":\"graph\",\"data\":"
1445               << AsJSON(*graph, data->source_positions()) << "},\n";
1446     }
1447 
1448     if (FLAG_trace_turbo_graph) {  // Simple textual RPO.
1449       AllowHandleDereference allow_deref;
1450       CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
1451       OFStream os(tracing_scope.file());
1452       os << "-- Graph after " << phase << " -- " << std::endl;
1453       os << AsRPO(*graph);
1454     }
1455   }
1456 };
1457 
1458 
1459 struct VerifyGraphPhase {
phase_namev8::internal::compiler::VerifyGraphPhase1460   static const char* phase_name() { return nullptr; }
1461 
Runv8::internal::compiler::VerifyGraphPhase1462   void Run(PipelineData* data, Zone* temp_zone, const bool untyped,
1463            bool values_only = false) {
1464     Verifier::Run(data->graph(), !untyped ? Verifier::TYPED : Verifier::UNTYPED,
1465                   values_only ? Verifier::kValuesOnly : Verifier::kAll);
1466   }
1467 };
1468 
RunPrintAndVerify(const char * phase,bool untyped)1469 void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
1470   if (FLAG_trace_turbo) {
1471     Run<PrintGraphPhase>(phase);
1472   }
1473   if (FLAG_turbo_verify) {
1474     Run<VerifyGraphPhase>(untyped);
1475   }
1476 }
1477 
CreateGraph()1478 bool PipelineImpl::CreateGraph() {
1479   PipelineData* data = this->data_;
1480 
1481   data->BeginPhaseKind("graph creation");
1482 
1483   if (FLAG_trace_turbo) {
1484     CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
1485     OFStream os(tracing_scope.file());
1486     os << "---------------------------------------------------\n"
1487        << "Begin compiling method " << info()->GetDebugName().get()
1488        << " using Turbofan" << std::endl;
1489     TurboCfgFile tcf(isolate());
1490     tcf << AsC1VCompilation(info());
1491   }
1492 
1493   data->source_positions()->AddDecorator();
1494 
1495   if (FLAG_loop_assignment_analysis) {
1496     Run<LoopAssignmentAnalysisPhase>();
1497   }
1498 
1499   Run<GraphBuilderPhase>();
1500   if (data->compilation_failed()) {
1501     data->EndPhaseKind();
1502     return false;
1503   }
1504   RunPrintAndVerify("Initial untyped", true);
1505 
1506   // Perform OSR deconstruction.
1507   if (info()->is_osr()) {
1508     Run<OsrDeconstructionPhase>();
1509 
1510     Run<UntyperPhase>();
1511     RunPrintAndVerify("OSR deconstruction", true);
1512   }
1513 
1514   // Perform function context specialization and inlining (if enabled).
1515   Run<InliningPhase>();
1516   RunPrintAndVerify("Inlined", true);
1517 
1518   // Remove dead->live edges from the graph.
1519   Run<EarlyGraphTrimmingPhase>();
1520   RunPrintAndVerify("Early trimmed", true);
1521 
1522   if (FLAG_print_turbo_replay) {
1523     // Print a replay of the initial graph.
1524     GraphReplayPrinter::PrintReplay(data->graph());
1525   }
1526 
1527   // Run the type-sensitive lowerings and optimizations on the graph.
1528   {
1529     // Determine the Typer operation flags.
1530     Typer::Flags flags = Typer::kNoFlags;
1531     if (is_sloppy(info()->shared_info()->language_mode()) &&
1532         info()->shared_info()->IsUserJavaScript()) {
1533       // Sloppy mode functions always have an Object for this.
1534       flags |= Typer::kThisIsReceiver;
1535     }
1536     if (IsClassConstructor(info()->shared_info()->kind())) {
1537       // Class constructors cannot be [[Call]]ed.
1538       flags |= Typer::kNewTargetIsReceiver;
1539     }
1540 
1541     // Type the graph and keep the Typer running on newly created nodes within
1542     // this scope; the Typer is automatically unlinked from the Graph once we
1543     // leave this scope below.
1544     Typer typer(isolate(), flags, data->graph());
1545     Run<TyperPhase>(&typer);
1546     RunPrintAndVerify("Typed");
1547 
1548     data->BeginPhaseKind("lowering");
1549 
1550     // Lower JSOperators where we can determine types.
1551     Run<TypedLoweringPhase>();
1552     RunPrintAndVerify("Lowered typed");
1553   }
1554 
1555   // Do some hacky things to prepare for the optimization phase.
1556   // (caching handles, etc.).
1557   Run<ConcurrentOptimizationPrepPhase>();
1558 
1559   data->EndPhaseKind();
1560 
1561   return true;
1562 }
1563 
OptimizeGraph(Linkage * linkage)1564 bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
1565   PipelineData* data = this->data_;
1566 
1567   if (data->info()->is_loop_peeling_enabled()) {
1568     Run<LoopPeelingPhase>();
1569     RunPrintAndVerify("Loops peeled", true);
1570   } else {
1571     Run<LoopExitEliminationPhase>();
1572     RunPrintAndVerify("Loop exits eliminated", true);
1573   }
1574 
1575   if (!data->is_asm()) {
1576     if (FLAG_turbo_load_elimination) {
1577       Run<LoadEliminationPhase>();
1578       RunPrintAndVerify("Load eliminated");
1579     }
1580 
1581     if (FLAG_turbo_escape) {
1582       Run<EscapeAnalysisPhase>();
1583       if (data->compilation_failed()) {
1584         info()->AbortOptimization(kCyclicObjectStateDetectedInEscapeAnalysis);
1585         data->EndPhaseKind();
1586         return false;
1587       }
1588       RunPrintAndVerify("Escape Analysed");
1589     }
1590   }
1591 
1592   // Perform simplified lowering. This has to run w/o the Typer decorator,
1593   // because we cannot compute meaningful types anyways, and the computed types
1594   // might even conflict with the representation/truncation logic.
1595   Run<SimplifiedLoweringPhase>();
1596   RunPrintAndVerify("Simplified lowering", true);
1597 
1598 #ifdef DEBUG
1599   // From now on it is invalid to look at types on the nodes, because:
1600   //
1601   //  (a) The remaining passes (might) run concurrent to the main thread and
1602   //      therefore must not access the Heap or the Isolate in an uncontrolled
1603   //      way (as done by the type system), and
1604   //  (b) the types on the nodes might not make sense after representation
1605   //      selection due to the way we handle truncations; if we'd want to look
1606   //      at types afterwards we'd essentially need to re-type (large portions
1607   //      of) the graph.
1608   //
1609   // In order to catch bugs related to type access after this point we remove
1610   // the types from the nodes at this point (currently only in Debug builds).
1611   Run<UntyperPhase>();
1612   RunPrintAndVerify("Untyped", true);
1613 #endif
1614 
1615   // Run generic lowering pass.
1616   Run<GenericLoweringPhase>();
1617   RunPrintAndVerify("Generic lowering", true);
1618 
1619   data->BeginPhaseKind("block building");
1620 
1621   // Run early optimization pass.
1622   Run<EarlyOptimizationPhase>();
1623   RunPrintAndVerify("Early optimized", true);
1624 
1625   Run<EffectControlLinearizationPhase>();
1626   RunPrintAndVerify("Effect and control linearized", true);
1627 
1628   Run<DeadCodeEliminationPhase>();
1629   RunPrintAndVerify("Dead code elimination", true);
1630 
1631   if (FLAG_turbo_store_elimination) {
1632     Run<StoreStoreEliminationPhase>();
1633     RunPrintAndVerify("Store-store elimination", true);
1634   }
1635 
1636   // Optimize control flow.
1637   if (FLAG_turbo_cf_optimization) {
1638     Run<ControlFlowOptimizationPhase>();
1639     RunPrintAndVerify("Control flow optimized", true);
1640   }
1641 
1642   // Optimize memory access and allocation operations.
1643   Run<MemoryOptimizationPhase>();
1644   // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
1645   RunPrintAndVerify("Memory optimized", true);
1646 
1647   // Lower changes that have been inserted before.
1648   Run<LateOptimizationPhase>();
1649   // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
1650   RunPrintAndVerify("Late optimized", true);
1651 
1652   data->source_positions()->RemoveDecorator();
1653 
1654   return ScheduleAndSelectInstructions(linkage, true);
1655 }
1656 
GenerateCodeForCodeStub(Isolate * isolate,CallDescriptor * call_descriptor,Graph * graph,Schedule * schedule,Code::Flags flags,const char * debug_name)1657 Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
1658                                                CallDescriptor* call_descriptor,
1659                                                Graph* graph, Schedule* schedule,
1660                                                Code::Flags flags,
1661                                                const char* debug_name) {
1662   CompilationInfo info(CStrVector(debug_name), isolate, graph->zone(), flags);
1663   if (isolate->serializer_enabled()) info.PrepareForSerializing();
1664 
1665   // Construct a pipeline for scheduling and code generation.
1666   ZoneStats zone_stats(isolate->allocator());
1667   SourcePositionTable source_positions(graph);
1668   PipelineData data(&zone_stats, &info, graph, schedule, &source_positions);
1669   data.set_verify_graph(FLAG_verify_csa);
1670   std::unique_ptr<PipelineStatistics> pipeline_statistics;
1671   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
1672     pipeline_statistics.reset(new PipelineStatistics(&info, &zone_stats));
1673     pipeline_statistics->BeginPhaseKind("stub codegen");
1674   }
1675 
1676   PipelineImpl pipeline(&data);
1677   DCHECK_NOT_NULL(data.schedule());
1678 
1679   if (FLAG_trace_turbo) {
1680     {
1681       CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
1682       OFStream os(tracing_scope.file());
1683       os << "---------------------------------------------------\n"
1684          << "Begin compiling " << debug_name << " using Turbofan" << std::endl;
1685     }
1686     {
1687       TurboJsonFile json_of(&info, std::ios_base::trunc);
1688       json_of << "{\"function\":\"" << info.GetDebugName().get()
1689               << "\", \"source\":\"\",\n\"phases\":[";
1690     }
1691     pipeline.Run<PrintGraphPhase>("Machine");
1692   }
1693 
1694   pipeline.Run<VerifyGraphPhase>(false, true);
1695   return pipeline.ScheduleAndGenerateCode(call_descriptor);
1696 }
1697 
1698 // static
GenerateCodeForTesting(CompilationInfo * info)1699 Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info) {
1700   ZoneStats zone_stats(info->isolate()->allocator());
1701   std::unique_ptr<PipelineStatistics> pipeline_statistics(
1702       CreatePipelineStatistics(info, &zone_stats));
1703   PipelineData data(&zone_stats, info, pipeline_statistics.get());
1704   PipelineImpl pipeline(&data);
1705 
1706   Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
1707 
1708   if (!pipeline.CreateGraph()) return Handle<Code>::null();
1709   if (!pipeline.OptimizeGraph(&linkage)) return Handle<Code>::null();
1710   return pipeline.GenerateCode(&linkage);
1711 }
1712 
1713 // static
GenerateCodeForTesting(CompilationInfo * info,Graph * graph,Schedule * schedule)1714 Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
1715                                               Graph* graph,
1716                                               Schedule* schedule) {
1717   CallDescriptor* call_descriptor =
1718       Linkage::ComputeIncoming(info->zone(), info);
1719   return GenerateCodeForTesting(info, call_descriptor, graph, schedule);
1720 }
1721 
1722 // static
GenerateCodeForTesting(CompilationInfo * info,CallDescriptor * call_descriptor,Graph * graph,Schedule * schedule,SourcePositionTable * source_positions)1723 Handle<Code> Pipeline::GenerateCodeForTesting(
1724     CompilationInfo* info, CallDescriptor* call_descriptor, Graph* graph,
1725     Schedule* schedule, SourcePositionTable* source_positions) {
1726   // Construct a pipeline for scheduling and code generation.
1727   ZoneStats zone_stats(info->isolate()->allocator());
1728   // TODO(wasm): Refactor code generation to check for non-existing source
1729   // table, then remove this conditional allocation.
1730   if (!source_positions)
1731     source_positions = new (info->zone()) SourcePositionTable(graph);
1732   PipelineData data(&zone_stats, info, graph, schedule, source_positions);
1733   std::unique_ptr<PipelineStatistics> pipeline_statistics;
1734   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
1735     pipeline_statistics.reset(new PipelineStatistics(info, &zone_stats));
1736     pipeline_statistics->BeginPhaseKind("test codegen");
1737   }
1738 
1739   PipelineImpl pipeline(&data);
1740 
1741   if (FLAG_trace_turbo) {
1742     TurboJsonFile json_of(info, std::ios_base::trunc);
1743     json_of << "{\"function\":\"" << info->GetDebugName().get()
1744             << "\", \"source\":\"\",\n\"phases\":[";
1745   }
1746   // TODO(rossberg): Should this really be untyped?
1747   pipeline.RunPrintAndVerify("Machine", true);
1748 
1749   return pipeline.ScheduleAndGenerateCode(call_descriptor);
1750 }
1751 
1752 // static
NewCompilationJob(Handle<JSFunction> function,bool has_script)1753 CompilationJob* Pipeline::NewCompilationJob(Handle<JSFunction> function,
1754                                             bool has_script) {
1755   Handle<SharedFunctionInfo> shared = handle(function->shared());
1756   ParseInfo* parse_info;
1757   if (!has_script) {
1758     parse_info = ParseInfo::AllocateWithoutScript(shared);
1759   } else {
1760     parse_info = new ParseInfo(shared);
1761   }
1762   return new PipelineCompilationJob(parse_info, function);
1763 }
1764 
1765 // static
NewWasmCompilationJob(CompilationInfo * info,JSGraph * jsgraph,CallDescriptor * descriptor,SourcePositionTable * source_positions,ZoneVector<trap_handler::ProtectedInstructionData> * protected_instructions,bool allow_signalling_nan)1766 CompilationJob* Pipeline::NewWasmCompilationJob(
1767     CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
1768     SourcePositionTable* source_positions,
1769     ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions,
1770     bool allow_signalling_nan) {
1771   return new PipelineWasmCompilationJob(
1772       info, jsgraph, descriptor, source_positions, protected_instructions,
1773       allow_signalling_nan);
1774 }
1775 
AllocateRegistersForTesting(const RegisterConfiguration * config,InstructionSequence * sequence,bool run_verifier)1776 bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
1777                                            InstructionSequence* sequence,
1778                                            bool run_verifier) {
1779   CompilationInfo info(ArrayVector("testing"), sequence->isolate(),
1780                        sequence->zone(), Code::ComputeFlags(Code::STUB));
1781   ZoneStats zone_stats(sequence->isolate()->allocator());
1782   PipelineData data(&zone_stats, &info, sequence);
1783   PipelineImpl pipeline(&data);
1784   pipeline.data_->InitializeFrameData(nullptr);
1785   pipeline.AllocateRegisters(config, nullptr, run_verifier);
1786   return !data.compilation_failed();
1787 }
1788 
ScheduleAndSelectInstructions(Linkage * linkage,bool trim_graph)1789 bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
1790                                                  bool trim_graph) {
1791   CallDescriptor* call_descriptor = linkage->GetIncomingDescriptor();
1792   PipelineData* data = this->data_;
1793 
1794   DCHECK_NOT_NULL(data->graph());
1795 
1796   if (trim_graph) {
1797     Run<LateGraphTrimmingPhase>();
1798     RunPrintAndVerify("Late trimmed", true);
1799   }
1800   if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
1801   TraceSchedule(data->info(), data->schedule());
1802 
1803   if (FLAG_turbo_profiling) {
1804     data->set_profiler_data(BasicBlockInstrumentor::Instrument(
1805         info(), data->graph(), data->schedule()));
1806   }
1807 
1808   bool verify_stub_graph = data->verify_graph();
1809   if (verify_stub_graph ||
1810       (FLAG_turbo_verify_machine_graph != nullptr &&
1811        (!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
1812         !strcmp(FLAG_turbo_verify_machine_graph, data->debug_name())))) {
1813     if (FLAG_trace_verify_csa) {
1814       AllowHandleDereference allow_deref;
1815       CompilationInfo* info = data->info();
1816       CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
1817       OFStream os(tracing_scope.file());
1818       os << "--------------------------------------------------\n"
1819          << "--- Verifying " << data->debug_name() << " generated by TurboFan\n"
1820          << "--------------------------------------------------\n"
1821          << *data->schedule()
1822          << "--------------------------------------------------\n"
1823          << "--- End of " << data->debug_name() << " generated by TurboFan\n"
1824          << "--------------------------------------------------\n";
1825     }
1826     Zone temp_zone(data->isolate()->allocator(), ZONE_NAME);
1827     MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage,
1828                               data->info()->IsStub(), data->debug_name(),
1829                               &temp_zone);
1830   }
1831 
1832   data->InitializeInstructionSequence(call_descriptor);
1833 
1834   data->InitializeFrameData(call_descriptor);
1835   // Select and schedule instructions covering the scheduled graph.
1836   Run<InstructionSelectionPhase>(linkage);
1837   if (data->compilation_failed()) {
1838     info()->AbortOptimization(kCodeGenerationFailed);
1839     data->EndPhaseKind();
1840     return false;
1841   }
1842 
1843   if (FLAG_trace_turbo && !data->MayHaveUnverifiableGraph()) {
1844     AllowHandleDereference allow_deref;
1845     TurboCfgFile tcf(isolate());
1846     tcf << AsC1V("CodeGen", data->schedule(), data->source_positions(),
1847                  data->sequence());
1848   }
1849 
1850   if (FLAG_trace_turbo) {
1851     std::ostringstream source_position_output;
1852     // Output source position information before the graph is deleted.
1853     data_->source_positions()->Print(source_position_output);
1854     data_->set_source_position_output(source_position_output.str());
1855   }
1856 
1857   data->DeleteGraphZone();
1858 
1859   data->BeginPhaseKind("register allocation");
1860 
1861   bool run_verifier = FLAG_turbo_verify_allocation;
1862 
1863   // Allocate registers.
1864   AllocateRegisters(RegisterConfiguration::Turbofan(), call_descriptor,
1865                     run_verifier);
1866   Run<FrameElisionPhase>();
1867   if (data->compilation_failed()) {
1868     info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
1869     data->EndPhaseKind();
1870     return false;
1871   }
1872 
1873   // TODO(mtrofin): move this off to the register allocator.
1874   bool generate_frame_at_start =
1875       data_->sequence()->instruction_blocks().front()->must_construct_frame();
1876   // Optimimize jumps.
1877   if (FLAG_turbo_jt) {
1878     Run<JumpThreadingPhase>(generate_frame_at_start);
1879   }
1880 
1881   data->EndPhaseKind();
1882 
1883   return true;
1884 }
1885 
GenerateCode(Linkage * linkage)1886 Handle<Code> PipelineImpl::GenerateCode(Linkage* linkage) {
1887   PipelineData* data = this->data_;
1888 
1889   data->BeginPhaseKind("code generation");
1890 
1891   // Generate final machine code.
1892   Run<GenerateCodePhase>(linkage);
1893 
1894   Handle<Code> code = data->code();
1895   if (data->profiler_data()) {
1896 #if ENABLE_DISASSEMBLER
1897     std::ostringstream os;
1898     code->Disassemble(nullptr, os);
1899     data->profiler_data()->SetCode(&os);
1900 #endif
1901   }
1902 
1903   info()->SetCode(code);
1904   v8::internal::CodeGenerator::PrintCode(code, info());
1905 
1906   if (FLAG_trace_turbo) {
1907     TurboJsonFile json_of(info(), std::ios_base::app);
1908     json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
1909 #if ENABLE_DISASSEMBLER
1910     std::stringstream disassembly_stream;
1911     code->Disassemble(nullptr, disassembly_stream);
1912     std::string disassembly_string(disassembly_stream.str());
1913     for (const auto& c : disassembly_string) {
1914       json_of << AsEscapedUC16ForJSON(c);
1915     }
1916 #endif  // ENABLE_DISASSEMBLER
1917     json_of << "\"}\n],\n";
1918     json_of << "\"nodePositions\":";
1919     json_of << data->source_position_output();
1920     json_of << "}";
1921 
1922     CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
1923     OFStream os(tracing_scope.file());
1924     os << "---------------------------------------------------\n"
1925        << "Finished compiling method " << info()->GetDebugName().get()
1926        << " using Turbofan" << std::endl;
1927   }
1928 
1929   return code;
1930 }
1931 
ScheduleAndGenerateCode(CallDescriptor * call_descriptor)1932 Handle<Code> PipelineImpl::ScheduleAndGenerateCode(
1933     CallDescriptor* call_descriptor) {
1934   Linkage linkage(call_descriptor);
1935 
1936   // Schedule the graph, perform instruction selection and register allocation.
1937   if (!ScheduleAndSelectInstructions(&linkage, false)) return Handle<Code>();
1938 
1939   // Generate the final machine code.
1940   return GenerateCode(&linkage);
1941 }
1942 
AllocateRegisters(const RegisterConfiguration * config,CallDescriptor * descriptor,bool run_verifier)1943 void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
1944                                      CallDescriptor* descriptor,
1945                                      bool run_verifier) {
1946   PipelineData* data = this->data_;
1947   // Don't track usage for this zone in compiler stats.
1948   std::unique_ptr<Zone> verifier_zone;
1949   RegisterAllocatorVerifier* verifier = nullptr;
1950   if (run_verifier) {
1951     verifier_zone.reset(new Zone(isolate()->allocator(), ZONE_NAME));
1952     verifier = new (verifier_zone.get()) RegisterAllocatorVerifier(
1953         verifier_zone.get(), config, data->sequence());
1954   }
1955 
1956 #ifdef DEBUG
1957   data_->sequence()->ValidateEdgeSplitForm();
1958   data_->sequence()->ValidateDeferredBlockEntryPaths();
1959   data_->sequence()->ValidateDeferredBlockExitPaths();
1960 #endif
1961 
1962   data->InitializeRegisterAllocationData(config, descriptor);
1963   if (info()->is_osr()) {
1964     AllowHandleDereference allow_deref;
1965     OsrHelper osr_helper(info());
1966     osr_helper.SetupFrame(data->frame());
1967   }
1968 
1969   Run<MeetRegisterConstraintsPhase>();
1970   Run<ResolvePhisPhase>();
1971   Run<BuildLiveRangesPhase>();
1972   if (FLAG_trace_turbo_graph) {
1973     AllowHandleDereference allow_deref;
1974     CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
1975     OFStream os(tracing_scope.file());
1976     os << "----- Instruction sequence before register allocation -----\n"
1977        << PrintableInstructionSequence({config, data->sequence()});
1978   }
1979   if (verifier != nullptr) {
1980     CHECK(!data->register_allocation_data()->ExistsUseWithoutDefinition());
1981     CHECK(data->register_allocation_data()
1982               ->RangesDefinedInDeferredStayInDeferred());
1983   }
1984 
1985   if (FLAG_turbo_preprocess_ranges) {
1986     Run<SplinterLiveRangesPhase>();
1987   }
1988 
1989   Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
1990   Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
1991 
1992   if (FLAG_turbo_preprocess_ranges) {
1993     Run<MergeSplintersPhase>();
1994   }
1995 
1996   Run<AssignSpillSlotsPhase>();
1997 
1998   Run<CommitAssignmentPhase>();
1999   Run<PopulateReferenceMapsPhase>();
2000   Run<ConnectRangesPhase>();
2001   Run<ResolveControlFlowPhase>();
2002   if (FLAG_turbo_move_optimization) {
2003     Run<OptimizeMovesPhase>();
2004   }
2005 
2006   Run<LocateSpillSlotsPhase>();
2007 
2008   if (FLAG_trace_turbo_graph) {
2009     AllowHandleDereference allow_deref;
2010     CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
2011     OFStream os(tracing_scope.file());
2012     os << "----- Instruction sequence after register allocation -----\n"
2013        << PrintableInstructionSequence({config, data->sequence()});
2014   }
2015 
2016   if (verifier != nullptr) {
2017     verifier->VerifyAssignment();
2018     verifier->VerifyGapMoves();
2019   }
2020 
2021   if (FLAG_trace_turbo && !data->MayHaveUnverifiableGraph()) {
2022     TurboCfgFile tcf(data->isolate());
2023     tcf << AsC1VRegisterAllocationData("CodeGen",
2024                                        data->register_allocation_data());
2025   }
2026 
2027   data->DeleteRegisterAllocationZone();
2028 }
2029 
info() const2030 CompilationInfo* PipelineImpl::info() const { return data_->info(); }
2031 
isolate() const2032 Isolate* PipelineImpl::isolate() const { return info()->isolate(); }
2033 
2034 }  // namespace compiler
2035 }  // namespace internal
2036 }  // namespace v8
2037