• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/maglev/maglev-ir.h"
6 
7 #include "src/base/bits.h"
8 #include "src/base/logging.h"
9 #include "src/codegen/interface-descriptors-inl.h"
10 #include "src/codegen/macro-assembler-inl.h"
11 #include "src/codegen/register.h"
12 #include "src/compiler/backend/instruction.h"
13 #include "src/ic/handler-configuration.h"
14 #include "src/maglev/maglev-code-gen-state.h"
15 #include "src/maglev/maglev-compilation-unit.h"
16 #include "src/maglev/maglev-graph-labeller.h"
17 #include "src/maglev/maglev-graph-printer.h"
18 #include "src/maglev/maglev-graph-processor.h"
19 #include "src/maglev/maglev-interpreter-frame-state.h"
20 #include "src/maglev/maglev-vreg-allocator.h"
21 
22 namespace v8 {
23 namespace internal {
24 namespace maglev {
25 
ToString(Opcode opcode)26 const char* ToString(Opcode opcode) {
27 #define DEF_NAME(Name) #Name,
28   static constexpr const char* const names[] = {NODE_BASE_LIST(DEF_NAME)};
29 #undef DEF_NAME
30   return names[static_cast<int>(opcode)];
31 }
32 
33 #define __ code_gen_state->masm()->
34 
35 // TODO(v8:7700): Clean up after all code paths are supported.
36 static bool g_this_field_will_be_unused_once_all_code_paths_are_supported;
37 #define UNSUPPORTED(REASON)                                                \
38   do {                                                                     \
39     std::cerr << "Maglev: Can't compile, unsuppored codegen path (" REASON \
40                  ")\n";                                                    \
41     code_gen_state->set_found_unsupported_code_paths(true);                \
42     g_this_field_will_be_unused_once_all_code_paths_are_supported = true;  \
43   } while (false)
44 
45 namespace {
46 
47 // ---
48 // Vreg allocation helpers.
49 // ---
50 
GetVirtualRegister(Node * node)51 int GetVirtualRegister(Node* node) {
52   return compiler::UnallocatedOperand::cast(node->result().operand())
53       .virtual_register();
54 }
55 
DefineAsRegister(MaglevVregAllocationState * vreg_state,Node * node)56 void DefineAsRegister(MaglevVregAllocationState* vreg_state, Node* node) {
57   node->result().SetUnallocated(
58       compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
59       vreg_state->AllocateVirtualRegister());
60 }
61 
DefineAsFixed(MaglevVregAllocationState * vreg_state,Node * node,Register reg)62 void DefineAsFixed(MaglevVregAllocationState* vreg_state, Node* node,
63                    Register reg) {
64   node->result().SetUnallocated(compiler::UnallocatedOperand::FIXED_REGISTER,
65                                 reg.code(),
66                                 vreg_state->AllocateVirtualRegister());
67 }
68 
DefineSameAsFirst(MaglevVregAllocationState * vreg_state,Node * node)69 void DefineSameAsFirst(MaglevVregAllocationState* vreg_state, Node* node) {
70   node->result().SetUnallocated(vreg_state->AllocateVirtualRegister(), 0);
71 }
72 
UseRegister(Input & input)73 void UseRegister(Input& input) {
74   input.SetUnallocated(compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
75                        compiler::UnallocatedOperand::USED_AT_START,
76                        GetVirtualRegister(input.node()));
77 }
UseAny(Input & input)78 void UseAny(Input& input) {
79   input.SetUnallocated(
80       compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
81       compiler::UnallocatedOperand::USED_AT_START,
82       GetVirtualRegister(input.node()));
83 }
UseFixed(Input & input,Register reg)84 void UseFixed(Input& input, Register reg) {
85   input.SetUnallocated(compiler::UnallocatedOperand::FIXED_REGISTER, reg.code(),
86                        GetVirtualRegister(input.node()));
87 }
88 
89 // ---
90 // Code gen helpers.
91 // ---
92 
PushInput(MaglevCodeGenState * code_gen_state,const Input & input)93 void PushInput(MaglevCodeGenState* code_gen_state, const Input& input) {
94   // TODO(leszeks): Consider special casing the value. (Toon: could possibly
95   // be done through Input directly?)
96   const compiler::AllocatedOperand& operand =
97       compiler::AllocatedOperand::cast(input.operand());
98 
99   if (operand.IsRegister()) {
100     __ Push(operand.GetRegister());
101   } else {
102     DCHECK(operand.IsStackSlot());
103     __ Push(GetStackSlot(operand));
104   }
105 }
106 
107 // ---
108 // Deferred code handling.
109 // ---
110 
111 // Base case provides an error.
112 template <typename T, typename Enable = void>
113 struct CopyForDeferredHelper {
114   template <typename U>
115   struct No_Copy_Helper_Implemented_For_Type;
116   static void Copy(MaglevCompilationUnit* compilation_unit,
117                    No_Copy_Helper_Implemented_For_Type<T>);
118 };
119 
120 // Helper for copies by value.
121 template <typename T, typename Enable = void>
122 struct CopyForDeferredByValue {
Copyv8::internal::maglev::__anon6ad19faa0111::CopyForDeferredByValue123   static T Copy(MaglevCompilationUnit* compilation_unit, T node) {
124     return node;
125   }
126 };
127 
128 // Node pointers are copied by value.
129 template <typename T>
130 struct CopyForDeferredHelper<
131     T*, typename std::enable_if<std::is_base_of<NodeBase, T>::value>::type>
132     : public CopyForDeferredByValue<T*> {};
133 // Arithmetic values and enums are copied by value.
134 template <typename T>
135 struct CopyForDeferredHelper<
136     T, typename std::enable_if<std::is_arithmetic<T>::value>::type>
137     : public CopyForDeferredByValue<T> {};
138 template <typename T>
139 struct CopyForDeferredHelper<
140     T, typename std::enable_if<std::is_enum<T>::value>::type>
141     : public CopyForDeferredByValue<T> {};
142 // MaglevCompilationUnits are copied by value.
143 template <>
144 struct CopyForDeferredHelper<MaglevCompilationUnit*>
145     : public CopyForDeferredByValue<MaglevCompilationUnit*> {};
146 // Machine registers are copied by value.
147 template <>
148 struct CopyForDeferredHelper<Register>
149     : public CopyForDeferredByValue<Register> {};
150 // Bytecode offsets are copied by value.
151 template <>
152 struct CopyForDeferredHelper<BytecodeOffset>
153     : public CopyForDeferredByValue<BytecodeOffset> {};
154 
155 // InterpreterFrameState is cloned.
156 template <>
157 struct CopyForDeferredHelper<const InterpreterFrameState*> {
Copyv8::internal::maglev::__anon6ad19faa0111::CopyForDeferredHelper158   static const InterpreterFrameState* Copy(
159       MaglevCompilationUnit* compilation_unit,
160       const InterpreterFrameState* frame_state) {
161     return compilation_unit->zone()->New<InterpreterFrameState>(
162         *compilation_unit, *frame_state);
163   }
164 };
165 // EagerDeoptInfo pointers are copied by value.
166 template <>
167 struct CopyForDeferredHelper<EagerDeoptInfo*>
168     : public CopyForDeferredByValue<EagerDeoptInfo*> {};
169 
170 template <typename T>
CopyForDeferred(MaglevCompilationUnit * compilation_unit,T && value)171 T CopyForDeferred(MaglevCompilationUnit* compilation_unit, T&& value) {
172   return CopyForDeferredHelper<T>::Copy(compilation_unit,
173                                         std::forward<T>(value));
174 }
175 
176 template <typename T>
CopyForDeferred(MaglevCompilationUnit * compilation_unit,T & value)177 T CopyForDeferred(MaglevCompilationUnit* compilation_unit, T& value) {
178   return CopyForDeferredHelper<T>::Copy(compilation_unit, value);
179 }
180 
181 template <typename T>
CopyForDeferred(MaglevCompilationUnit * compilation_unit,const T & value)182 T CopyForDeferred(MaglevCompilationUnit* compilation_unit, const T& value) {
183   return CopyForDeferredHelper<T>::Copy(compilation_unit, value);
184 }
185 
186 template <typename Function, typename FunctionPointer = Function>
187 struct FunctionArgumentsTupleHelper
188     : FunctionArgumentsTupleHelper<Function,
189                                    decltype(&FunctionPointer::operator())> {};
190 
191 template <typename T, typename C, typename R, typename... A>
192 struct FunctionArgumentsTupleHelper<T, R (C::*)(A...) const> {
193   using FunctionPointer = R (*)(A...);
194   using Tuple = std::tuple<A...>;
195   static constexpr size_t kSize = sizeof...(A);
196 };
197 
198 template <typename T>
199 struct StripFirstTwoTupleArgs;
200 
201 template <typename T1, typename T2, typename... T>
202 struct StripFirstTwoTupleArgs<std::tuple<T1, T2, T...>> {
203   using Stripped = std::tuple<T...>;
204 };
205 
206 template <typename Function>
207 class DeferredCodeInfoImpl final : public DeferredCodeInfo {
208  public:
209   using FunctionPointer =
210       typename FunctionArgumentsTupleHelper<Function>::FunctionPointer;
211   using Tuple = typename StripFirstTwoTupleArgs<
212       typename FunctionArgumentsTupleHelper<Function>::Tuple>::Stripped;
213   static constexpr size_t kSize = FunctionArgumentsTupleHelper<Function>::kSize;
214 
215   template <typename... InArgs>
DeferredCodeInfoImpl(MaglevCompilationUnit * compilation_unit,FunctionPointer function,InArgs &&...args)216   explicit DeferredCodeInfoImpl(MaglevCompilationUnit* compilation_unit,
217                                 FunctionPointer function, InArgs&&... args)
218       : function(function),
219         args(CopyForDeferred(compilation_unit, std::forward<InArgs>(args))...) {
220   }
221 
222   DeferredCodeInfoImpl(DeferredCodeInfoImpl&&) = delete;
223   DeferredCodeInfoImpl(const DeferredCodeInfoImpl&) = delete;
224 
Generate(MaglevCodeGenState * code_gen_state,Label * return_label)225   void Generate(MaglevCodeGenState* code_gen_state,
226                 Label* return_label) override {
227     DoCall(code_gen_state, return_label, std::make_index_sequence<kSize - 2>{});
228   }
229 
230  private:
231   template <size_t... I>
DoCall(MaglevCodeGenState * code_gen_state,Label * return_label,std::index_sequence<I...>)232   auto DoCall(MaglevCodeGenState* code_gen_state, Label* return_label,
233               std::index_sequence<I...>) {
234     // TODO(leszeks): This could be replaced with std::apply in C++17.
235     return function(code_gen_state, return_label, std::get<I>(args)...);
236   }
237 
238   FunctionPointer function;
239   Tuple args;
240 };
241 
242 template <typename Function, typename... Args>
JumpToDeferredIf(Condition cond,MaglevCodeGenState * code_gen_state,Function && deferred_code_gen,Args &&...args)243 void JumpToDeferredIf(Condition cond, MaglevCodeGenState* code_gen_state,
244                       Function&& deferred_code_gen, Args&&... args) {
245   using DeferredCodeInfoT = DeferredCodeInfoImpl<Function>;
246   DeferredCodeInfoT* deferred_code =
247       code_gen_state->compilation_unit()->zone()->New<DeferredCodeInfoT>(
248           code_gen_state->compilation_unit(), deferred_code_gen,
249           std::forward<Args>(args)...);
250 
251   code_gen_state->PushDeferredCode(deferred_code);
252   if (FLAG_code_comments) {
253     __ RecordComment("-- Jump to deferred code");
254   }
255   __ j(cond, &deferred_code->deferred_code_label);
256   __ bind(&deferred_code->return_label);
257 }
258 
259 // ---
260 // Deopt
261 // ---
262 
RegisterEagerDeopt(MaglevCodeGenState * code_gen_state,EagerDeoptInfo * deopt_info)263 void RegisterEagerDeopt(MaglevCodeGenState* code_gen_state,
264                         EagerDeoptInfo* deopt_info) {
265   if (deopt_info->deopt_entry_label.is_unused()) {
266     code_gen_state->PushEagerDeopt(deopt_info);
267   }
268 }
269 
EmitEagerDeoptIf(Condition cond,MaglevCodeGenState * code_gen_state,EagerDeoptInfo * deopt_info)270 void EmitEagerDeoptIf(Condition cond, MaglevCodeGenState* code_gen_state,
271                       EagerDeoptInfo* deopt_info) {
272   RegisterEagerDeopt(code_gen_state, deopt_info);
273   __ RecordComment("-- Jump to eager deopt");
274   __ j(cond, &deopt_info->deopt_entry_label);
275 }
276 
277 template <typename NodeT>
EmitEagerDeoptIf(Condition cond,MaglevCodeGenState * code_gen_state,NodeT * node)278 void EmitEagerDeoptIf(Condition cond, MaglevCodeGenState* code_gen_state,
279                       NodeT* node) {
280   STATIC_ASSERT(NodeT::kProperties.can_eager_deopt());
281   EmitEagerDeoptIf(cond, code_gen_state, node->eager_deopt_info());
282 }
283 
284 // ---
285 // Print
286 // ---
287 
PrintInputs(std::ostream & os,MaglevGraphLabeller * graph_labeller,const NodeBase * node)288 void PrintInputs(std::ostream& os, MaglevGraphLabeller* graph_labeller,
289                  const NodeBase* node) {
290   if (!node->has_inputs()) return;
291 
292   os << " [";
293   for (int i = 0; i < node->input_count(); i++) {
294     if (i != 0) os << ", ";
295     graph_labeller->PrintInput(os, node->input(i));
296   }
297   os << "]";
298 }
299 
PrintResult(std::ostream & os,MaglevGraphLabeller * graph_labeller,const NodeBase * node)300 void PrintResult(std::ostream& os, MaglevGraphLabeller* graph_labeller,
301                  const NodeBase* node) {}
302 
PrintResult(std::ostream & os,MaglevGraphLabeller * graph_labeller,const ValueNode * node)303 void PrintResult(std::ostream& os, MaglevGraphLabeller* graph_labeller,
304                  const ValueNode* node) {
305   os << " → " << node->result().operand();
306   if (node->has_valid_live_range()) {
307     os << ", live range: [" << node->live_range().start << "-"
308        << node->live_range().end << "]";
309   }
310 }
311 
PrintTargets(std::ostream & os,MaglevGraphLabeller * graph_labeller,const NodeBase * node)312 void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
313                   const NodeBase* node) {}
314 
PrintTargets(std::ostream & os,MaglevGraphLabeller * graph_labeller,const UnconditionalControlNode * node)315 void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
316                   const UnconditionalControlNode* node) {
317   os << " b" << graph_labeller->BlockId(node->target());
318 }
319 
PrintTargets(std::ostream & os,MaglevGraphLabeller * graph_labeller,const ConditionalControlNode * node)320 void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
321                   const ConditionalControlNode* node) {
322   os << " b" << graph_labeller->BlockId(node->if_true()) << " b"
323      << graph_labeller->BlockId(node->if_false());
324 }
325 
326 template <typename NodeT>
PrintImpl(std::ostream & os,MaglevGraphLabeller * graph_labeller,const NodeT * node)327 void PrintImpl(std::ostream& os, MaglevGraphLabeller* graph_labeller,
328                const NodeT* node) {
329   os << node->opcode();
330   node->PrintParams(os, graph_labeller);
331   PrintInputs(os, graph_labeller, node);
332   PrintResult(os, graph_labeller, node);
333   PrintTargets(os, graph_labeller, node);
334 }
335 
336 }  // namespace
337 
Print(std::ostream & os,MaglevGraphLabeller * graph_labeller) const338 void NodeBase::Print(std::ostream& os,
339                      MaglevGraphLabeller* graph_labeller) const {
340   switch (opcode()) {
341 #define V(Name)         \
342   case Opcode::k##Name: \
343     return PrintImpl(os, graph_labeller, this->Cast<Name>());
344     NODE_BASE_LIST(V)
345 #undef V
346   }
347   UNREACHABLE();
348 }
349 
DeoptInfo(Zone * zone,const MaglevCompilationUnit & compilation_unit,CheckpointedInterpreterState state)350 DeoptInfo::DeoptInfo(Zone* zone, const MaglevCompilationUnit& compilation_unit,
351                      CheckpointedInterpreterState state)
352     : state(state),
353       input_locations(zone->NewArray<InputLocation>(
354           state.register_frame->size(compilation_unit))) {
355   // Default initialise if we're printing the graph, to avoid printing junk
356   // values.
357   if (FLAG_print_maglev_graph) {
358     for (size_t i = 0; i < state.register_frame->size(compilation_unit); ++i) {
359       new (&input_locations[i]) InputLocation();
360     }
361   }
362 }
363 
364 // ---
365 // Nodes
366 // ---
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)367 void SmiConstant::AllocateVreg(MaglevVregAllocationState* vreg_state,
368                                const ProcessingState& state) {
369   DefineAsRegister(vreg_state, this);
370 }
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)371 void SmiConstant::GenerateCode(MaglevCodeGenState* code_gen_state,
372                                const ProcessingState& state) {
373   __ Move(ToRegister(result()), Immediate(value()));
374 }
PrintParams(std::ostream & os,MaglevGraphLabeller * graph_labeller) const375 void SmiConstant::PrintParams(std::ostream& os,
376                               MaglevGraphLabeller* graph_labeller) const {
377   os << "(" << value() << ")";
378 }
379 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)380 void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
381                             const ProcessingState& state) {
382   DefineAsRegister(vreg_state, this);
383 }
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)384 void Constant::GenerateCode(MaglevCodeGenState* code_gen_state,
385                             const ProcessingState& state) {
386   __ Move(ToRegister(result()), object_.object());
387 }
PrintParams(std::ostream & os,MaglevGraphLabeller * graph_labeller) const388 void Constant::PrintParams(std::ostream& os,
389                            MaglevGraphLabeller* graph_labeller) const {
390   os << "(" << object_ << ")";
391 }
392 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)393 void InitialValue::AllocateVreg(MaglevVregAllocationState* vreg_state,
394                                 const ProcessingState& state) {
395   // TODO(leszeks): Make this nicer.
396   result().SetUnallocated(compiler::UnallocatedOperand::FIXED_SLOT,
397                           (StandardFrameConstants::kExpressionsOffset -
398                            UnoptimizedFrameConstants::kRegisterFileFromFp) /
399                                   kSystemPointerSize +
400                               source().index(),
401                           vreg_state->AllocateVirtualRegister());
402 }
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)403 void InitialValue::GenerateCode(MaglevCodeGenState* code_gen_state,
404                                 const ProcessingState& state) {
405   // No-op, the value is already in the appropriate slot.
406 }
PrintParams(std::ostream & os,MaglevGraphLabeller * graph_labeller) const407 void InitialValue::PrintParams(std::ostream& os,
408                                MaglevGraphLabeller* graph_labeller) const {
409   os << "(" << source().ToString() << ")";
410 }
411 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)412 void LoadGlobal::AllocateVreg(MaglevVregAllocationState* vreg_state,
413                               const ProcessingState& state) {
414   UseFixed(context(), kContextRegister);
415   DefineAsFixed(vreg_state, this, kReturnRegister0);
416 }
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)417 void LoadGlobal::GenerateCode(MaglevCodeGenState* code_gen_state,
418                               const ProcessingState& state) {
419   // TODO(leszeks): Port the nice Sparkplug CallBuiltin helper.
420 
421   DCHECK_EQ(ToRegister(context()), kContextRegister);
422 
423   // TODO(jgruber): Detect properly.
424   const int ic_kind =
425       static_cast<int>(FeedbackSlotKind::kLoadGlobalNotInsideTypeof);
426 
427   __ Move(LoadGlobalNoFeedbackDescriptor::GetRegisterParameter(
428               LoadGlobalNoFeedbackDescriptor::kName),
429           name().object());
430   __ Move(LoadGlobalNoFeedbackDescriptor::GetRegisterParameter(
431               LoadGlobalNoFeedbackDescriptor::kICKind),
432           Immediate(Smi::FromInt(ic_kind)));
433 
434   // TODO(jgruber): Implement full LoadGlobal handling.
435   __ CallBuiltin(Builtin::kLoadGlobalIC_NoFeedback);
436 }
PrintParams(std::ostream & os,MaglevGraphLabeller * graph_labeller) const437 void LoadGlobal::PrintParams(std::ostream& os,
438                              MaglevGraphLabeller* graph_labeller) const {
439   os << "(" << name() << ")";
440 }
441 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)442 void RegisterInput::AllocateVreg(MaglevVregAllocationState* vreg_state,
443                                  const ProcessingState& state) {
444   DefineAsFixed(vreg_state, this, input());
445 }
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)446 void RegisterInput::GenerateCode(MaglevCodeGenState* code_gen_state,
447                                  const ProcessingState& state) {
448   // Nothing to be done, the value is already in the register.
449 }
PrintParams(std::ostream & os,MaglevGraphLabeller * graph_labeller) const450 void RegisterInput::PrintParams(std::ostream& os,
451                                 MaglevGraphLabeller* graph_labeller) const {
452   os << "(" << input() << ")";
453 }
454 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)455 void RootConstant::AllocateVreg(MaglevVregAllocationState* vreg_state,
456                                 const ProcessingState& state) {
457   DefineAsRegister(vreg_state, this);
458 }
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)459 void RootConstant::GenerateCode(MaglevCodeGenState* code_gen_state,
460                                 const ProcessingState& state) {
461   if (!has_valid_live_range()) return;
462 
463   Register reg = ToRegister(result());
464   __ LoadRoot(reg, index());
465 }
PrintParams(std::ostream & os,MaglevGraphLabeller * graph_labeller) const466 void RootConstant::PrintParams(std::ostream& os,
467                                MaglevGraphLabeller* graph_labeller) const {
468   os << "(" << RootsTable::name(index()) << ")";
469 }
470 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)471 void CheckMaps::AllocateVreg(MaglevVregAllocationState* vreg_state,
472                              const ProcessingState& state) {
473   UseRegister(actual_map_input());
474   set_temporaries_needed(1);
475 }
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)476 void CheckMaps::GenerateCode(MaglevCodeGenState* code_gen_state,
477                              const ProcessingState& state) {
478   Register object = ToRegister(actual_map_input());
479   RegList temps = temporaries();
480   Register map_tmp = temps.PopFirst();
481 
482   __ LoadMap(map_tmp, object);
483   __ Cmp(map_tmp, map().object());
484 
485   // TODO(leszeks): Encode as a bit on CheckMaps.
486   if (map().is_migration_target()) {
487     JumpToDeferredIf(
488         not_equal, code_gen_state,
489         [](MaglevCodeGenState* code_gen_state, Label* return_label,
490            Register object, CheckMaps* node, EagerDeoptInfo* deopt_info,
491            Register map_tmp) {
492           RegisterEagerDeopt(code_gen_state, deopt_info);
493 
494           // If the map is not deprecated, deopt straight away.
495           __ movl(kScratchRegister,
496                   FieldOperand(map_tmp, Map::kBitField3Offset));
497           __ testl(kScratchRegister,
498                    Immediate(Map::Bits3::IsDeprecatedBit::kMask));
499           __ j(zero, &deopt_info->deopt_entry_label);
500 
501           // Otherwise, try migrating the object. If the migration returns Smi
502           // zero, then it failed and we should deopt.
503           __ Push(object);
504           __ Move(kContextRegister,
505                   code_gen_state->broker()->target_native_context().object());
506           // TODO(verwaest): We're calling so we need to spill around it.
507           __ CallRuntime(Runtime::kTryMigrateInstance);
508           __ cmpl(kReturnRegister0, Immediate(0));
509           __ j(equal, &deopt_info->deopt_entry_label);
510 
511           // The migrated object is returned on success, retry the map check.
512           __ Move(object, kReturnRegister0);
513           __ LoadMap(map_tmp, object);
514           __ Cmp(map_tmp, node->map().object());
515           __ j(equal, return_label);
516           __ jmp(&deopt_info->deopt_entry_label);
517         },
518         object, this, eager_deopt_info(), map_tmp);
519   } else {
520     EmitEagerDeoptIf(not_equal, code_gen_state, this);
521   }
522 }
PrintParams(std::ostream & os,MaglevGraphLabeller * graph_labeller) const523 void CheckMaps::PrintParams(std::ostream& os,
524                             MaglevGraphLabeller* graph_labeller) const {
525   os << "(" << *map().object() << ")";
526 }
527 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)528 void LoadField::AllocateVreg(MaglevVregAllocationState* vreg_state,
529                              const ProcessingState& state) {
530   UseRegister(object_input());
531   DefineAsRegister(vreg_state, this);
532 }
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)533 void LoadField::GenerateCode(MaglevCodeGenState* code_gen_state,
534                              const ProcessingState& state) {
535   // os << "kField, is in object = "
536   //    << LoadHandler::IsInobjectBits::decode(raw_handler)
537   //    << ", is double = " << LoadHandler::IsDoubleBits::decode(raw_handler)
538   //    << ", field index = " <<
539   //    LoadHandler::FieldIndexBits::decode(raw_handler);
540 
541   Register object = ToRegister(object_input());
542   Register res = ToRegister(result());
543   int handler = this->handler();
544 
545   if (LoadHandler::IsInobjectBits::decode(handler)) {
546     Operand input_field_operand = FieldOperand(
547         object, LoadHandler::FieldIndexBits::decode(handler) * kTaggedSize);
548     __ DecompressAnyTagged(res, input_field_operand);
549   } else {
550     Operand property_array_operand =
551         FieldOperand(object, JSReceiver::kPropertiesOrHashOffset);
552     __ DecompressAnyTagged(res, property_array_operand);
553 
554     __ AssertNotSmi(res);
555 
556     Operand input_field_operand = FieldOperand(
557         res, LoadHandler::FieldIndexBits::decode(handler) * kTaggedSize);
558     __ DecompressAnyTagged(res, input_field_operand);
559   }
560 
561   if (LoadHandler::IsDoubleBits::decode(handler)) {
562     // TODO(leszeks): Copy out the value, either as a double or a HeapNumber.
563     UNSUPPORTED("LoadField double property");
564   }
565 }
PrintParams(std::ostream & os,MaglevGraphLabeller * graph_labeller) const566 void LoadField::PrintParams(std::ostream& os,
567                             MaglevGraphLabeller* graph_labeller) const {
568   os << "(" << std::hex << handler() << std::dec << ")";
569 }
570 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)571 void StoreField::AllocateVreg(MaglevVregAllocationState* vreg_state,
572                               const ProcessingState& state) {
573   UseRegister(object_input());
574   UseRegister(value_input());
575 }
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)576 void StoreField::GenerateCode(MaglevCodeGenState* code_gen_state,
577                               const ProcessingState& state) {
578   Register object = ToRegister(object_input());
579   Register value = ToRegister(value_input());
580 
581   if (StoreHandler::IsInobjectBits::decode(this->handler())) {
582     Operand operand = FieldOperand(
583         object,
584         StoreHandler::FieldIndexBits::decode(this->handler()) * kTaggedSize);
585     __ StoreTaggedField(operand, value);
586   } else {
587     // TODO(victorgomes): Out-of-object properties.
588     UNSUPPORTED("StoreField out-of-object property");
589   }
590 }
591 
PrintParams(std::ostream & os,MaglevGraphLabeller * graph_labeller) const592 void StoreField::PrintParams(std::ostream& os,
593                              MaglevGraphLabeller* graph_labeller) const {
594   os << "(" << std::hex << handler() << std::dec << ")";
595 }
596 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)597 void LoadNamedGeneric::AllocateVreg(MaglevVregAllocationState* vreg_state,
598                                     const ProcessingState& state) {
599   using D = LoadWithVectorDescriptor;
600   UseFixed(context(), kContextRegister);
601   UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver));
602   DefineAsFixed(vreg_state, this, kReturnRegister0);
603 }
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)604 void LoadNamedGeneric::GenerateCode(MaglevCodeGenState* code_gen_state,
605                                     const ProcessingState& state) {
606   using D = LoadWithVectorDescriptor;
607   DCHECK_EQ(ToRegister(context()), kContextRegister);
608   DCHECK_EQ(ToRegister(object_input()), D::GetRegisterParameter(D::kReceiver));
609   __ Move(D::GetRegisterParameter(D::kName), name().object());
610   __ Move(D::GetRegisterParameter(D::kSlot),
611           Smi::FromInt(feedback().slot.ToInt()));
612   __ Move(D::GetRegisterParameter(D::kVector), feedback().vector);
613   __ CallBuiltin(Builtin::kLoadIC);
614 }
PrintParams(std::ostream & os,MaglevGraphLabeller * graph_labeller) const615 void LoadNamedGeneric::PrintParams(std::ostream& os,
616                                    MaglevGraphLabeller* graph_labeller) const {
617   os << "(" << name_ << ")";
618 }
619 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)620 void GapMove::AllocateVreg(MaglevVregAllocationState* vreg_state,
621                            const ProcessingState& state) {
622   UNREACHABLE();
623 }
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)624 void GapMove::GenerateCode(MaglevCodeGenState* code_gen_state,
625                            const ProcessingState& state) {
626   if (source().IsAnyRegister()) {
627     Register source_reg = ToRegister(source());
628     if (target().IsAnyRegister()) {
629       __ movq(ToRegister(target()), source_reg);
630     } else {
631       __ movq(ToMemOperand(target()), source_reg);
632     }
633   } else {
634     MemOperand source_op = ToMemOperand(source());
635     if (target().IsAnyRegister()) {
636       __ movq(ToRegister(target()), source_op);
637     } else {
638       __ movq(kScratchRegister, source_op);
639       __ movq(ToMemOperand(target()), kScratchRegister);
640     }
641   }
642 }
PrintParams(std::ostream & os,MaglevGraphLabeller * graph_labeller) const643 void GapMove::PrintParams(std::ostream& os,
644                           MaglevGraphLabeller* graph_labeller) const {
645   os << "(" << source() << " → " << target() << ")";
646 }
647 
648 namespace {
649 
BuiltinFor(Operation operation)650 constexpr Builtin BuiltinFor(Operation operation) {
651   switch (operation) {
652 #define CASE(name)         \
653   case Operation::k##name: \
654     return Builtin::k##name##_WithFeedback;
655     OPERATION_LIST(CASE)
656 #undef CASE
657   }
658 }
659 
660 }  // namespace
661 
662 template <class Derived, Operation kOperation>
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)663 void UnaryWithFeedbackNode<Derived, kOperation>::AllocateVreg(
664     MaglevVregAllocationState* vreg_state, const ProcessingState& state) {
665   using D = UnaryOp_WithFeedbackDescriptor;
666   UseFixed(operand_input(), D::GetRegisterParameter(D::kValue));
667   DefineAsFixed(vreg_state, this, kReturnRegister0);
668 }
669 
670 template <class Derived, Operation kOperation>
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)671 void UnaryWithFeedbackNode<Derived, kOperation>::GenerateCode(
672     MaglevCodeGenState* code_gen_state, const ProcessingState& state) {
673   using D = UnaryOp_WithFeedbackDescriptor;
674   DCHECK_EQ(ToRegister(operand_input()), D::GetRegisterParameter(D::kValue));
675   __ Move(kContextRegister, code_gen_state->native_context().object());
676   __ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
677   __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
678   __ CallBuiltin(BuiltinFor(kOperation));
679 }
680 
681 template <class Derived, Operation kOperation>
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)682 void BinaryWithFeedbackNode<Derived, kOperation>::AllocateVreg(
683     MaglevVregAllocationState* vreg_state, const ProcessingState& state) {
684   using D = BinaryOp_WithFeedbackDescriptor;
685   UseFixed(left_input(), D::GetRegisterParameter(D::kLeft));
686   UseFixed(right_input(), D::GetRegisterParameter(D::kRight));
687   DefineAsFixed(vreg_state, this, kReturnRegister0);
688 }
689 
690 template <class Derived, Operation kOperation>
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)691 void BinaryWithFeedbackNode<Derived, kOperation>::GenerateCode(
692     MaglevCodeGenState* code_gen_state, const ProcessingState& state) {
693   using D = BinaryOp_WithFeedbackDescriptor;
694   DCHECK_EQ(ToRegister(left_input()), D::GetRegisterParameter(D::kLeft));
695   DCHECK_EQ(ToRegister(right_input()), D::GetRegisterParameter(D::kRight));
696   __ Move(kContextRegister, code_gen_state->native_context().object());
697   __ Move(D::GetRegisterParameter(D::kSlot), Immediate(feedback().index()));
698   __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector);
699   __ CallBuiltin(BuiltinFor(kOperation));
700 }
701 
702 #define DEF_OPERATION(Name)                                      \
703   void Name::AllocateVreg(MaglevVregAllocationState* vreg_state, \
704                           const ProcessingState& state) {        \
705     Base::AllocateVreg(vreg_state, state);                       \
706   }                                                              \
707   void Name::GenerateCode(MaglevCodeGenState* code_gen_state,    \
708                           const ProcessingState& state) {        \
709     Base::GenerateCode(code_gen_state, state);                   \
710   }
GENERIC_OPERATIONS_NODE_LIST(DEF_OPERATION)711 GENERIC_OPERATIONS_NODE_LIST(DEF_OPERATION)
712 #undef DEF_OPERATION
713 
714 void CheckedSmiUntag::AllocateVreg(MaglevVregAllocationState* vreg_state,
715                                    const ProcessingState& state) {
716   UseRegister(input());
717   DefineSameAsFirst(vreg_state, this);
718 }
719 
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)720 void CheckedSmiUntag::GenerateCode(MaglevCodeGenState* code_gen_state,
721                                    const ProcessingState& state) {
722   Register value = ToRegister(input());
723   // TODO(leszeks): Consider optimizing away this test and using the carry bit
724   // of the `sarl` for cases where the deopt uses the value from a different
725   // register.
726   __ testb(value, Immediate(1));
727   EmitEagerDeoptIf(not_zero, code_gen_state, this);
728   __ sarl(value, Immediate(1));
729 }
730 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)731 void CheckedSmiTag::AllocateVreg(MaglevVregAllocationState* vreg_state,
732                                  const ProcessingState& state) {
733   UseRegister(input());
734   DefineSameAsFirst(vreg_state, this);
735 }
736 
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)737 void CheckedSmiTag::GenerateCode(MaglevCodeGenState* code_gen_state,
738                                  const ProcessingState& state) {
739   Register reg = ToRegister(input());
740   __ addl(reg, reg);
741   EmitEagerDeoptIf(overflow, code_gen_state, this);
742 }
743 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)744 void Int32Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
745                                  const ProcessingState& state) {
746   DefineAsRegister(vreg_state, this);
747 }
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)748 void Int32Constant::GenerateCode(MaglevCodeGenState* code_gen_state,
749                                  const ProcessingState& state) {
750   __ Move(ToRegister(result()), Immediate(value()));
751 }
PrintParams(std::ostream & os,MaglevGraphLabeller * graph_labeller) const752 void Int32Constant::PrintParams(std::ostream& os,
753                                 MaglevGraphLabeller* graph_labeller) const {
754   os << "(" << value() << ")";
755 }
756 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)757 void Int32AddWithOverflow::AllocateVreg(MaglevVregAllocationState* vreg_state,
758                                         const ProcessingState& state) {
759   UseRegister(left_input());
760   UseRegister(right_input());
761   DefineSameAsFirst(vreg_state, this);
762 }
763 
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)764 void Int32AddWithOverflow::GenerateCode(MaglevCodeGenState* code_gen_state,
765                                         const ProcessingState& state) {
766   Register left = ToRegister(left_input());
767   Register right = ToRegister(right_input());
768   __ addl(left, right);
769   EmitEagerDeoptIf(overflow, code_gen_state, this);
770 }
771 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)772 void Phi::AllocateVreg(MaglevVregAllocationState* vreg_state,
773                        const ProcessingState& state) {
774   // Phi inputs are processed in the post-process, once loop phis' inputs'
775   // v-regs are allocated.
776   result().SetUnallocated(
777       compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
778       vreg_state->AllocateVirtualRegister());
779 }
780 // TODO(verwaest): Remove after switching the register allocator.
AllocateVregInPostProcess(MaglevVregAllocationState * vreg_state)781 void Phi::AllocateVregInPostProcess(MaglevVregAllocationState* vreg_state) {
782   for (Input& input : *this) {
783     UseAny(input);
784   }
785 }
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)786 void Phi::GenerateCode(MaglevCodeGenState* code_gen_state,
787                        const ProcessingState& state) {}
PrintParams(std::ostream & os,MaglevGraphLabeller * graph_labeller) const788 void Phi::PrintParams(std::ostream& os,
789                       MaglevGraphLabeller* graph_labeller) const {
790   os << "(" << owner().ToString() << ")";
791 }
792 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)793 void Call::AllocateVreg(MaglevVregAllocationState* vreg_state,
794                         const ProcessingState& state) {
795   UseFixed(function(), CallTrampolineDescriptor::GetRegisterParameter(
796                            CallTrampolineDescriptor::kFunction));
797   UseFixed(context(), kContextRegister);
798   for (int i = 0; i < num_args(); i++) {
799     UseAny(arg(i));
800   }
801   DefineAsFixed(vreg_state, this, kReturnRegister0);
802 }
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)803 void Call::GenerateCode(MaglevCodeGenState* code_gen_state,
804                         const ProcessingState& state) {
805   // TODO(leszeks): Port the nice Sparkplug CallBuiltin helper.
806 
807   DCHECK_EQ(ToRegister(function()),
808             CallTrampolineDescriptor::GetRegisterParameter(
809                 CallTrampolineDescriptor::kFunction));
810   DCHECK_EQ(ToRegister(context()), kContextRegister);
811 
812   for (int i = num_args() - 1; i >= 0; --i) {
813     PushInput(code_gen_state, arg(i));
814   }
815 
816   uint32_t arg_count = num_args();
817   __ Move(CallTrampolineDescriptor::GetRegisterParameter(
818               CallTrampolineDescriptor::kActualArgumentsCount),
819           Immediate(arg_count));
820 
821   // TODO(leszeks): This doesn't collect feedback yet, either pass in the
822   // feedback vector by Handle.
823   switch (receiver_mode_) {
824     case ConvertReceiverMode::kNullOrUndefined:
825       __ CallBuiltin(Builtin::kCall_ReceiverIsNullOrUndefined);
826       break;
827     case ConvertReceiverMode::kNotNullOrUndefined:
828       __ CallBuiltin(Builtin::kCall_ReceiverIsNotNullOrUndefined);
829       break;
830     case ConvertReceiverMode::kAny:
831       __ CallBuiltin(Builtin::kCall_ReceiverIsAny);
832       break;
833   }
834 
835   lazy_deopt_info()->deopting_call_return_pc = __ pc_offset_for_safepoint();
836   code_gen_state->PushLazyDeopt(lazy_deopt_info());
837 
838   SafepointTableBuilder::Safepoint safepoint =
839       code_gen_state->safepoint_table_builder()->DefineSafepoint(
840           code_gen_state->masm());
841   code_gen_state->DefineSafepointStackSlots(safepoint);
842 }
843 
844 // ---
845 // Control nodes
846 // ---
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)847 void Return::AllocateVreg(MaglevVregAllocationState* vreg_state,
848                           const ProcessingState& state) {
849   UseFixed(value_input(), kReturnRegister0);
850 }
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)851 void Return::GenerateCode(MaglevCodeGenState* code_gen_state,
852                           const ProcessingState& state) {
853   DCHECK_EQ(ToRegister(value_input()), kReturnRegister0);
854 
855   // We're not going to continue execution, so we can use an arbitrary register
856   // here instead of relying on temporaries from the register allocator.
857   Register actual_params_size = r8;
858 
859   // Compute the size of the actual parameters + receiver (in bytes).
860   // TODO(leszeks): Consider making this an input into Return to re-use the
861   // incoming argc's register (if it's still valid).
862   __ movq(actual_params_size,
863           MemOperand(rbp, StandardFrameConstants::kArgCOffset));
864 
865   // Leave the frame.
866   // TODO(leszeks): Add a new frame maker for Maglev.
867   __ LeaveFrame(StackFrame::BASELINE);
868 
869   // If actual is bigger than formal, then we should use it to free up the stack
870   // arguments.
871   Label drop_dynamic_arg_size;
872   __ cmpq(actual_params_size, Immediate(code_gen_state->parameter_count()));
873   __ j(greater, &drop_dynamic_arg_size);
874 
875   // Drop receiver + arguments according to static formal arguments size.
876   __ Ret(code_gen_state->parameter_count() * kSystemPointerSize,
877          kScratchRegister);
878 
879   __ bind(&drop_dynamic_arg_size);
880   // Drop receiver + arguments according to dynamic arguments size.
881   __ DropArguments(actual_params_size, r9, TurboAssembler::kCountIsInteger,
882                    TurboAssembler::kCountIncludesReceiver);
883   __ Ret();
884 }
885 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)886 void Deopt::AllocateVreg(MaglevVregAllocationState* vreg_state,
887                          const ProcessingState& state) {}
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)888 void Deopt::GenerateCode(MaglevCodeGenState* code_gen_state,
889                          const ProcessingState& state) {
890   EmitEagerDeoptIf(always, code_gen_state, this);
891 }
892 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)893 void Jump::AllocateVreg(MaglevVregAllocationState* vreg_state,
894                         const ProcessingState& state) {}
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)895 void Jump::GenerateCode(MaglevCodeGenState* code_gen_state,
896                         const ProcessingState& state) {
897   // Avoid emitting a jump to the next block.
898   if (target() != state.next_block()) {
899     __ jmp(target()->label());
900   }
901 }
902 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)903 void JumpLoop::AllocateVreg(MaglevVregAllocationState* vreg_state,
904                             const ProcessingState& state) {}
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)905 void JumpLoop::GenerateCode(MaglevCodeGenState* code_gen_state,
906                             const ProcessingState& state) {
907   __ jmp(target()->label());
908 }
909 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)910 void BranchIfTrue::AllocateVreg(MaglevVregAllocationState* vreg_state,
911                                 const ProcessingState& state) {
912   UseRegister(condition_input());
913 }
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)914 void BranchIfTrue::GenerateCode(MaglevCodeGenState* code_gen_state,
915                                 const ProcessingState& state) {
916   Register value = ToRegister(condition_input());
917 
918   auto* next_block = state.next_block();
919 
920   // We don't have any branch probability information, so try to jump
921   // over whatever the next block emitted is.
922   if (if_false() == next_block) {
923     // Jump over the false block if true, otherwise fall through into it.
924     __ JumpIfRoot(value, RootIndex::kTrueValue, if_true()->label());
925   } else {
926     // Jump to the false block if true.
927     __ JumpIfNotRoot(value, RootIndex::kTrueValue, if_false()->label());
928     // Jump to the true block if it's not the next block.
929     if (if_true() != next_block) {
930       __ jmp(if_true()->label());
931     }
932   }
933 }
934 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)935 void BranchIfCompare::AllocateVreg(MaglevVregAllocationState* vreg_state,
936                                    const ProcessingState& state) {}
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)937 void BranchIfCompare::GenerateCode(MaglevCodeGenState* code_gen_state,
938                                    const ProcessingState& state) {
939   USE(operation_);
940   UNREACHABLE();
941 }
942 
AllocateVreg(MaglevVregAllocationState * vreg_state,const ProcessingState & state)943 void BranchIfToBooleanTrue::AllocateVreg(MaglevVregAllocationState* vreg_state,
944                                          const ProcessingState& state) {
945   UseFixed(condition_input(),
946            ToBooleanForBaselineJumpDescriptor::GetRegisterParameter(0));
947 }
GenerateCode(MaglevCodeGenState * code_gen_state,const ProcessingState & state)948 void BranchIfToBooleanTrue::GenerateCode(MaglevCodeGenState* code_gen_state,
949                                          const ProcessingState& state) {
950   DCHECK_EQ(ToRegister(condition_input()),
951             ToBooleanForBaselineJumpDescriptor::GetRegisterParameter(0));
952 
953   // ToBooleanForBaselineJump returns the ToBoolean value into return reg 1, and
954   // the original value into kInterpreterAccumulatorRegister, so we don't have
955   // to worry about it getting clobbered.
956   __ CallBuiltin(Builtin::kToBooleanForBaselineJump);
957   __ SmiCompare(kReturnRegister1, Smi::zero());
958 
959   auto* next_block = state.next_block();
960 
961   // We don't have any branch probability information, so try to jump
962   // over whatever the next block emitted is.
963   if (if_false() == next_block) {
964     // Jump over the false block if non zero, otherwise fall through into it.
965     __ j(not_equal, if_true()->label());
966   } else {
967     // Jump to the false block if zero.
968     __ j(equal, if_false()->label());
969     // Fall through or jump to the true block.
970     if (if_true() != next_block) {
971       __ jmp(if_true()->label());
972     }
973   }
974 }
975 
976 }  // namespace maglev
977 }  // namespace internal
978 }  // namespace v8
979