• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/effect-control-linearizer.h"
6 
7 #include "src/code-factory.h"
8 #include "src/compiler/access-builder.h"
9 #include "src/compiler/compiler-source-position-table.h"
10 #include "src/compiler/js-graph.h"
11 #include "src/compiler/linkage.h"
12 #include "src/compiler/node-matchers.h"
13 #include "src/compiler/node-properties.h"
14 #include "src/compiler/node.h"
15 #include "src/compiler/schedule.h"
16 #include "src/objects-inl.h"
17 
18 namespace v8 {
19 namespace internal {
20 namespace compiler {
21 
EffectControlLinearizer(JSGraph * js_graph,Schedule * schedule,Zone * temp_zone,SourcePositionTable * source_positions)22 EffectControlLinearizer::EffectControlLinearizer(
23     JSGraph* js_graph, Schedule* schedule, Zone* temp_zone,
24     SourcePositionTable* source_positions)
25     : js_graph_(js_graph),
26       schedule_(schedule),
27       temp_zone_(temp_zone),
28       source_positions_(source_positions),
29       graph_assembler_(js_graph, nullptr, nullptr, temp_zone) {}
30 
graph() const31 Graph* EffectControlLinearizer::graph() const { return js_graph_->graph(); }
common() const32 CommonOperatorBuilder* EffectControlLinearizer::common() const {
33   return js_graph_->common();
34 }
simplified() const35 SimplifiedOperatorBuilder* EffectControlLinearizer::simplified() const {
36   return js_graph_->simplified();
37 }
machine() const38 MachineOperatorBuilder* EffectControlLinearizer::machine() const {
39   return js_graph_->machine();
40 }
41 
42 namespace {
43 
44 struct BlockEffectControlData {
45   Node* current_effect = nullptr;       // New effect.
46   Node* current_control = nullptr;      // New control.
47   Node* current_frame_state = nullptr;  // New frame state.
48 };
49 
50 class BlockEffectControlMap {
51  public:
BlockEffectControlMap(Zone * temp_zone)52   explicit BlockEffectControlMap(Zone* temp_zone) : map_(temp_zone) {}
53 
For(BasicBlock * from,BasicBlock * to)54   BlockEffectControlData& For(BasicBlock* from, BasicBlock* to) {
55     return map_[std::make_pair(from->rpo_number(), to->rpo_number())];
56   }
57 
For(BasicBlock * from,BasicBlock * to) const58   const BlockEffectControlData& For(BasicBlock* from, BasicBlock* to) const {
59     return map_.at(std::make_pair(from->rpo_number(), to->rpo_number()));
60   }
61 
62  private:
63   typedef std::pair<int32_t, int32_t> Key;
64   typedef ZoneMap<Key, BlockEffectControlData> Map;
65 
66   Map map_;
67 };
68 
69 // Effect phis that need to be updated after the first pass.
70 struct PendingEffectPhi {
71   Node* effect_phi;
72   BasicBlock* block;
73 
PendingEffectPhiv8::internal::compiler::__anon2395a8ff0111::PendingEffectPhi74   PendingEffectPhi(Node* effect_phi, BasicBlock* block)
75       : effect_phi(effect_phi), block(block) {}
76 };
77 
UpdateEffectPhi(Node * node,BasicBlock * block,BlockEffectControlMap * block_effects)78 void UpdateEffectPhi(Node* node, BasicBlock* block,
79                      BlockEffectControlMap* block_effects) {
80   // Update all inputs to an effect phi with the effects from the given
81   // block->effect map.
82   DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
83   DCHECK_EQ(static_cast<size_t>(node->op()->EffectInputCount()),
84             block->PredecessorCount());
85   for (int i = 0; i < node->op()->EffectInputCount(); i++) {
86     Node* input = node->InputAt(i);
87     BasicBlock* predecessor = block->PredecessorAt(static_cast<size_t>(i));
88     const BlockEffectControlData& block_effect =
89         block_effects->For(predecessor, block);
90     if (input != block_effect.current_effect) {
91       node->ReplaceInput(i, block_effect.current_effect);
92     }
93   }
94 }
95 
UpdateBlockControl(BasicBlock * block,BlockEffectControlMap * block_effects)96 void UpdateBlockControl(BasicBlock* block,
97                         BlockEffectControlMap* block_effects) {
98   Node* control = block->NodeAt(0);
99   DCHECK(NodeProperties::IsControl(control));
100 
101   // Do not rewire the end node.
102   if (control->opcode() == IrOpcode::kEnd) return;
103 
104   // Update all inputs to the given control node with the correct control.
105   DCHECK(control->opcode() == IrOpcode::kMerge ||
106          static_cast<size_t>(control->op()->ControlInputCount()) ==
107              block->PredecessorCount());
108   if (static_cast<size_t>(control->op()->ControlInputCount()) !=
109       block->PredecessorCount()) {
110     return;  // We already re-wired the control inputs of this node.
111   }
112   for (int i = 0; i < control->op()->ControlInputCount(); i++) {
113     Node* input = NodeProperties::GetControlInput(control, i);
114     BasicBlock* predecessor = block->PredecessorAt(static_cast<size_t>(i));
115     const BlockEffectControlData& block_effect =
116         block_effects->For(predecessor, block);
117     if (input != block_effect.current_control) {
118       NodeProperties::ReplaceControlInput(control, block_effect.current_control,
119                                           i);
120     }
121   }
122 }
123 
HasIncomingBackEdges(BasicBlock * block)124 bool HasIncomingBackEdges(BasicBlock* block) {
125   for (BasicBlock* pred : block->predecessors()) {
126     if (pred->rpo_number() >= block->rpo_number()) {
127       return true;
128     }
129   }
130   return false;
131 }
132 
RemoveRegionNode(Node * node)133 void RemoveRegionNode(Node* node) {
134   DCHECK(IrOpcode::kFinishRegion == node->opcode() ||
135          IrOpcode::kBeginRegion == node->opcode());
136   // Update the value/context uses to the value input of the finish node and
137   // the effect uses to the effect input.
138   for (Edge edge : node->use_edges()) {
139     DCHECK(!edge.from()->IsDead());
140     if (NodeProperties::IsEffectEdge(edge)) {
141       edge.UpdateTo(NodeProperties::GetEffectInput(node));
142     } else {
143       DCHECK(!NodeProperties::IsControlEdge(edge));
144       DCHECK(!NodeProperties::IsFrameStateEdge(edge));
145       edge.UpdateTo(node->InputAt(0));
146     }
147   }
148   node->Kill();
149 }
150 
TryCloneBranch(Node * node,BasicBlock * block,Graph * graph,CommonOperatorBuilder * common,BlockEffectControlMap * block_effects,SourcePositionTable * source_positions)151 void TryCloneBranch(Node* node, BasicBlock* block, Graph* graph,
152                     CommonOperatorBuilder* common,
153                     BlockEffectControlMap* block_effects,
154                     SourcePositionTable* source_positions) {
155   DCHECK_EQ(IrOpcode::kBranch, node->opcode());
156 
157   // This optimization is a special case of (super)block cloning. It takes an
158   // input graph as shown below and clones the Branch node for every predecessor
159   // to the Merge, essentially removing the Merge completely. This avoids
160   // materializing the bit for the Phi and may offer potential for further
161   // branch folding optimizations (i.e. because one or more inputs to the Phi is
162   // a constant). Note that there may be more Phi nodes hanging off the Merge,
163   // but we can only a certain subset of them currently (actually only Phi and
164   // EffectPhi nodes whose uses have either the IfTrue or IfFalse as control
165   // input).
166 
167   //   Control1 ... ControlN
168   //      ^            ^
169   //      |            |   Cond1 ... CondN
170   //      +----+  +----+     ^         ^
171   //           |  |          |         |
172   //           |  |     +----+         |
173   //          Merge<--+ | +------------+
174   //            ^      \|/
175   //            |      Phi
176   //            |       |
177   //          Branch----+
178   //            ^
179   //            |
180   //      +-----+-----+
181   //      |           |
182   //    IfTrue     IfFalse
183   //      ^           ^
184   //      |           |
185 
186   // The resulting graph (modulo the Phi and EffectPhi nodes) looks like this:
187 
188   // Control1 Cond1 ... ControlN CondN
189   //    ^      ^           ^      ^
190   //    \      /           \      /
191   //     Branch     ...     Branch
192   //       ^                  ^
193   //       |                  |
194   //   +---+---+          +---+----+
195   //   |       |          |        |
196   // IfTrue IfFalse ... IfTrue  IfFalse
197   //   ^       ^          ^        ^
198   //   |       |          |        |
199   //   +--+ +-------------+        |
200   //      | |  +--------------+ +--+
201   //      | |                 | |
202   //     Merge               Merge
203   //       ^                   ^
204   //       |                   |
205 
206   SourcePositionTable::Scope scope(source_positions,
207                                    source_positions->GetSourcePosition(node));
208   Node* branch = node;
209   Node* cond = NodeProperties::GetValueInput(branch, 0);
210   if (!cond->OwnedBy(branch) || cond->opcode() != IrOpcode::kPhi) return;
211   Node* merge = NodeProperties::GetControlInput(branch);
212   if (merge->opcode() != IrOpcode::kMerge ||
213       NodeProperties::GetControlInput(cond) != merge) {
214     return;
215   }
216   // Grab the IfTrue/IfFalse projections of the Branch.
217   BranchMatcher matcher(branch);
218   // Check/collect other Phi/EffectPhi nodes hanging off the Merge.
219   NodeVector phis(graph->zone());
220   for (Node* const use : merge->uses()) {
221     if (use == branch || use == cond) continue;
222     // We cannot currently deal with non-Phi/EffectPhi nodes hanging off the
223     // Merge. Ideally, we would just clone the nodes (and everything that
224     // depends on it to some distant join point), but that requires knowledge
225     // about dominance/post-dominance.
226     if (!NodeProperties::IsPhi(use)) return;
227     for (Edge edge : use->use_edges()) {
228       // Right now we can only handle Phi/EffectPhi nodes whose uses are
229       // directly control-dependend on either the IfTrue or the IfFalse
230       // successor, because we know exactly how to update those uses.
231       if (edge.from()->op()->ControlInputCount() != 1) return;
232       Node* control = NodeProperties::GetControlInput(edge.from());
233       if (NodeProperties::IsPhi(edge.from())) {
234         control = NodeProperties::GetControlInput(control, edge.index());
235       }
236       if (control != matcher.IfTrue() && control != matcher.IfFalse()) return;
237     }
238     phis.push_back(use);
239   }
240   BranchHint const hint = BranchHintOf(branch->op());
241   int const input_count = merge->op()->ControlInputCount();
242   DCHECK_LE(1, input_count);
243   Node** const inputs = graph->zone()->NewArray<Node*>(2 * input_count);
244   Node** const merge_true_inputs = &inputs[0];
245   Node** const merge_false_inputs = &inputs[input_count];
246   for (int index = 0; index < input_count; ++index) {
247     Node* cond1 = NodeProperties::GetValueInput(cond, index);
248     Node* control1 = NodeProperties::GetControlInput(merge, index);
249     Node* branch1 = graph->NewNode(common->Branch(hint), cond1, control1);
250     merge_true_inputs[index] = graph->NewNode(common->IfTrue(), branch1);
251     merge_false_inputs[index] = graph->NewNode(common->IfFalse(), branch1);
252   }
253   Node* const merge_true = matcher.IfTrue();
254   Node* const merge_false = matcher.IfFalse();
255   merge_true->TrimInputCount(0);
256   merge_false->TrimInputCount(0);
257   for (int i = 0; i < input_count; ++i) {
258     merge_true->AppendInput(graph->zone(), merge_true_inputs[i]);
259     merge_false->AppendInput(graph->zone(), merge_false_inputs[i]);
260   }
261   DCHECK_EQ(2u, block->SuccessorCount());
262   NodeProperties::ChangeOp(matcher.IfTrue(), common->Merge(input_count));
263   NodeProperties::ChangeOp(matcher.IfFalse(), common->Merge(input_count));
264   int const true_index =
265       block->SuccessorAt(0)->NodeAt(0) == matcher.IfTrue() ? 0 : 1;
266   BlockEffectControlData* true_block_data =
267       &block_effects->For(block, block->SuccessorAt(true_index));
268   BlockEffectControlData* false_block_data =
269       &block_effects->For(block, block->SuccessorAt(true_index ^ 1));
270   for (Node* const phi : phis) {
271     for (int index = 0; index < input_count; ++index) {
272       inputs[index] = phi->InputAt(index);
273     }
274     inputs[input_count] = merge_true;
275     Node* phi_true = graph->NewNode(phi->op(), input_count + 1, inputs);
276     inputs[input_count] = merge_false;
277     Node* phi_false = graph->NewNode(phi->op(), input_count + 1, inputs);
278     if (phi->UseCount() == 0) {
279       DCHECK_EQ(phi->opcode(), IrOpcode::kEffectPhi);
280     } else {
281       for (Edge edge : phi->use_edges()) {
282         Node* control = NodeProperties::GetControlInput(edge.from());
283         if (NodeProperties::IsPhi(edge.from())) {
284           control = NodeProperties::GetControlInput(control, edge.index());
285         }
286         DCHECK(control == matcher.IfTrue() || control == matcher.IfFalse());
287         edge.UpdateTo((control == matcher.IfTrue()) ? phi_true : phi_false);
288       }
289     }
290     if (phi->opcode() == IrOpcode::kEffectPhi) {
291       true_block_data->current_effect = phi_true;
292       false_block_data->current_effect = phi_false;
293     }
294     phi->Kill();
295   }
296   // Fix up IfTrue and IfFalse and kill all dead nodes.
297   if (branch == block->control_input()) {
298     true_block_data->current_control = merge_true;
299     false_block_data->current_control = merge_false;
300   }
301   branch->Kill();
302   cond->Kill();
303   merge->Kill();
304 }
305 }  // namespace
306 
Run()307 void EffectControlLinearizer::Run() {
308   BlockEffectControlMap block_effects(temp_zone());
309   ZoneVector<PendingEffectPhi> pending_effect_phis(temp_zone());
310   ZoneVector<BasicBlock*> pending_block_controls(temp_zone());
311   NodeVector inputs_buffer(temp_zone());
312 
313   for (BasicBlock* block : *(schedule()->rpo_order())) {
314     size_t instr = 0;
315 
316     // The control node should be the first.
317     Node* control = block->NodeAt(instr);
318     DCHECK(NodeProperties::IsControl(control));
319     // Update the control inputs.
320     if (HasIncomingBackEdges(block)) {
321       // If there are back edges, we need to update later because we have not
322       // computed the control yet. This should only happen for loops.
323       DCHECK_EQ(IrOpcode::kLoop, control->opcode());
324       pending_block_controls.push_back(block);
325     } else {
326       // If there are no back edges, we can update now.
327       UpdateBlockControl(block, &block_effects);
328     }
329     instr++;
330 
331     // Iterate over the phis and update the effect phis.
332     Node* effect = nullptr;
333     Node* terminate = nullptr;
334     for (; instr < block->NodeCount(); instr++) {
335       Node* node = block->NodeAt(instr);
336       // Only go through the phis and effect phis.
337       if (node->opcode() == IrOpcode::kEffectPhi) {
338         // There should be at most one effect phi in a block.
339         DCHECK_NULL(effect);
340         // IfException blocks should not have effect phis.
341         DCHECK_NE(IrOpcode::kIfException, control->opcode());
342         effect = node;
343 
344         // Make sure we update the inputs to the incoming blocks' effects.
345         if (HasIncomingBackEdges(block)) {
346           // In case of loops, we do not update the effect phi immediately
347           // because the back predecessor has not been handled yet. We just
348           // record the effect phi for later processing.
349           pending_effect_phis.push_back(PendingEffectPhi(node, block));
350         } else {
351           UpdateEffectPhi(node, block, &block_effects);
352         }
353       } else if (node->opcode() == IrOpcode::kPhi) {
354         // Just skip phis.
355       } else if (node->opcode() == IrOpcode::kTerminate) {
356         DCHECK(terminate == nullptr);
357         terminate = node;
358       } else {
359         break;
360       }
361     }
362 
363     if (effect == nullptr) {
364       // There was no effect phi.
365       DCHECK(!HasIncomingBackEdges(block));
366       if (block == schedule()->start()) {
367         // Start block => effect is start.
368         DCHECK_EQ(graph()->start(), control);
369         effect = graph()->start();
370       } else if (control->opcode() == IrOpcode::kEnd) {
371         // End block is just a dummy, no effect needed.
372         DCHECK_EQ(BasicBlock::kNone, block->control());
373         DCHECK_EQ(1u, block->size());
374         effect = nullptr;
375       } else {
376         // If all the predecessors have the same effect, we can use it as our
377         // current effect.
378         effect =
379             block_effects.For(block->PredecessorAt(0), block).current_effect;
380         for (size_t i = 1; i < block->PredecessorCount(); ++i) {
381           if (block_effects.For(block->PredecessorAt(i), block)
382                   .current_effect != effect) {
383             effect = nullptr;
384             break;
385           }
386         }
387         if (effect == nullptr) {
388           DCHECK_NE(IrOpcode::kIfException, control->opcode());
389           // The input blocks do not have the same effect. We have
390           // to create an effect phi node.
391           inputs_buffer.clear();
392           inputs_buffer.resize(block->PredecessorCount(), jsgraph()->Dead());
393           inputs_buffer.push_back(control);
394           effect = graph()->NewNode(
395               common()->EffectPhi(static_cast<int>(block->PredecessorCount())),
396               static_cast<int>(inputs_buffer.size()), &(inputs_buffer.front()));
397           // For loops, we update the effect phi node later to break cycles.
398           if (control->opcode() == IrOpcode::kLoop) {
399             pending_effect_phis.push_back(PendingEffectPhi(effect, block));
400           } else {
401             UpdateEffectPhi(effect, block, &block_effects);
402           }
403         } else if (control->opcode() == IrOpcode::kIfException) {
404           // The IfException is connected into the effect chain, so we need
405           // to update the effect here.
406           NodeProperties::ReplaceEffectInput(control, effect);
407           effect = control;
408         }
409       }
410     }
411 
412     // Fixup the Terminate node.
413     if (terminate != nullptr) {
414       NodeProperties::ReplaceEffectInput(terminate, effect);
415     }
416 
417     // The frame state at block entry is determined by the frame states leaving
418     // all predecessors. In case there is no frame state dominating this block,
419     // we can rely on a checkpoint being present before the next deoptimization.
420     // TODO(mstarzinger): Eventually we will need to go hunt for a frame state
421     // once deoptimizing nodes roam freely through the schedule.
422     Node* frame_state = nullptr;
423     if (block != schedule()->start()) {
424       // If all the predecessors have the same effect, we can use it
425       // as our current effect.
426       frame_state =
427           block_effects.For(block->PredecessorAt(0), block).current_frame_state;
428       for (size_t i = 1; i < block->PredecessorCount(); i++) {
429         if (block_effects.For(block->PredecessorAt(i), block)
430                 .current_frame_state != frame_state) {
431           frame_state = nullptr;
432           break;
433         }
434       }
435     }
436 
437     // Process the ordinary instructions.
438     for (; instr < block->NodeCount(); instr++) {
439       Node* node = block->NodeAt(instr);
440       ProcessNode(node, &frame_state, &effect, &control);
441     }
442 
443     switch (block->control()) {
444       case BasicBlock::kGoto:
445       case BasicBlock::kNone:
446         break;
447 
448       case BasicBlock::kCall:
449       case BasicBlock::kTailCall:
450       case BasicBlock::kSwitch:
451       case BasicBlock::kReturn:
452       case BasicBlock::kDeoptimize:
453       case BasicBlock::kThrow:
454         ProcessNode(block->control_input(), &frame_state, &effect, &control);
455         break;
456 
457       case BasicBlock::kBranch:
458         ProcessNode(block->control_input(), &frame_state, &effect, &control);
459         TryCloneBranch(block->control_input(), block, graph(), common(),
460                        &block_effects, source_positions_);
461         break;
462     }
463 
464     // Store the effect, control and frame state for later use.
465     for (BasicBlock* successor : block->successors()) {
466       BlockEffectControlData* data = &block_effects.For(block, successor);
467       if (data->current_effect == nullptr) {
468         data->current_effect = effect;
469       }
470       if (data->current_control == nullptr) {
471         data->current_control = control;
472       }
473       data->current_frame_state = frame_state;
474     }
475   }
476 
477   // Update the incoming edges of the effect phis that could not be processed
478   // during the first pass (because they could have incoming back edges).
479   for (const PendingEffectPhi& pending_effect_phi : pending_effect_phis) {
480     UpdateEffectPhi(pending_effect_phi.effect_phi, pending_effect_phi.block,
481                     &block_effects);
482   }
483   for (BasicBlock* pending_block_control : pending_block_controls) {
484     UpdateBlockControl(pending_block_control, &block_effects);
485   }
486 }
487 
488 namespace {
489 
TryScheduleCallIfSuccess(Node * node,Node ** control)490 void TryScheduleCallIfSuccess(Node* node, Node** control) {
491   // Schedule the call's IfSuccess node if there is no exception use.
492   if (!NodeProperties::IsExceptionalCall(node)) {
493     for (Edge edge : node->use_edges()) {
494       if (NodeProperties::IsControlEdge(edge) &&
495           edge.from()->opcode() == IrOpcode::kIfSuccess) {
496         *control = edge.from();
497       }
498     }
499   }
500 }
501 
502 }  // namespace
503 
ProcessNode(Node * node,Node ** frame_state,Node ** effect,Node ** control)504 void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
505                                           Node** effect, Node** control) {
506   SourcePositionTable::Scope scope(source_positions_,
507                                    source_positions_->GetSourcePosition(node));
508 
509   // If the node needs to be wired into the effect/control chain, do this
510   // here. Pass current frame state for lowering to eager deoptimization.
511   if (TryWireInStateEffect(node, *frame_state, effect, control)) {
512     return;
513   }
514 
515   // If the node has a visible effect, then there must be a checkpoint in the
516   // effect chain before we are allowed to place another eager deoptimization
517   // point. We zap the frame state to ensure this invariant is maintained.
518   if (region_observability_ == RegionObservability::kObservable &&
519       !node->op()->HasProperty(Operator::kNoWrite)) {
520     *frame_state = nullptr;
521   }
522 
523   // Remove the end markers of 'atomic' allocation region because the
524   // region should be wired-in now.
525   if (node->opcode() == IrOpcode::kFinishRegion) {
526     // Reset the current region observability.
527     region_observability_ = RegionObservability::kObservable;
528     // Update the value uses to the value input of the finish node and
529     // the effect uses to the effect input.
530     return RemoveRegionNode(node);
531   }
532   if (node->opcode() == IrOpcode::kBeginRegion) {
533     // Determine the observability for this region and use that for all
534     // nodes inside the region (i.e. ignore the absence of kNoWrite on
535     // StoreField and other operators).
536     DCHECK_NE(RegionObservability::kNotObservable, region_observability_);
537     region_observability_ = RegionObservabilityOf(node->op());
538     // Update the value uses to the value input of the finish node and
539     // the effect uses to the effect input.
540     return RemoveRegionNode(node);
541   }
542 
543   // Special treatment for checkpoint nodes.
544   if (node->opcode() == IrOpcode::kCheckpoint) {
545     // Unlink the check point; effect uses will be updated to the incoming
546     // effect that is passed. The frame state is preserved for lowering.
547     DCHECK_EQ(RegionObservability::kObservable, region_observability_);
548     *frame_state = NodeProperties::GetFrameStateInput(node);
549     return;
550   }
551 
552   if (node->opcode() == IrOpcode::kIfSuccess) {
553     // We always schedule IfSuccess with its call, so skip it here.
554     DCHECK_EQ(IrOpcode::kCall, node->InputAt(0)->opcode());
555     // The IfSuccess node should not belong to an exceptional call node
556     // because such IfSuccess nodes should only start a basic block (and
557     // basic block start nodes are not handled in the ProcessNode method).
558     DCHECK(!NodeProperties::IsExceptionalCall(node->InputAt(0)));
559     return;
560   }
561 
562   // If the node takes an effect, replace with the current one.
563   if (node->op()->EffectInputCount() > 0) {
564     DCHECK_EQ(1, node->op()->EffectInputCount());
565     Node* input_effect = NodeProperties::GetEffectInput(node);
566 
567     if (input_effect != *effect) {
568       NodeProperties::ReplaceEffectInput(node, *effect);
569     }
570 
571     // If the node produces an effect, update our current effect. (However,
572     // ignore new effect chains started with ValueEffect.)
573     if (node->op()->EffectOutputCount() > 0) {
574       DCHECK_EQ(1, node->op()->EffectOutputCount());
575       *effect = node;
576     }
577   } else {
578     // New effect chain is only started with a Start or ValueEffect node.
579     DCHECK(node->op()->EffectOutputCount() == 0 ||
580            node->opcode() == IrOpcode::kStart);
581   }
582 
583   // Rewire control inputs.
584   for (int i = 0; i < node->op()->ControlInputCount(); i++) {
585     NodeProperties::ReplaceControlInput(node, *control, i);
586   }
587   // Update the current control and wire IfSuccess right after calls.
588   if (node->op()->ControlOutputCount() > 0) {
589     *control = node;
590     if (node->opcode() == IrOpcode::kCall) {
591       // Schedule the call's IfSuccess node (if there is no exception use).
592       TryScheduleCallIfSuccess(node, control);
593     }
594   }
595 }
596 
TryWireInStateEffect(Node * node,Node * frame_state,Node ** effect,Node ** control)597 bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
598                                                    Node* frame_state,
599                                                    Node** effect,
600                                                    Node** control) {
601   gasm()->Reset(*effect, *control);
602   Node* result = nullptr;
603   switch (node->opcode()) {
604     case IrOpcode::kChangeBitToTagged:
605       result = LowerChangeBitToTagged(node);
606       break;
607     case IrOpcode::kChangeInt31ToTaggedSigned:
608       result = LowerChangeInt31ToTaggedSigned(node);
609       break;
610     case IrOpcode::kChangeInt32ToTagged:
611       result = LowerChangeInt32ToTagged(node);
612       break;
613     case IrOpcode::kChangeUint32ToTagged:
614       result = LowerChangeUint32ToTagged(node);
615       break;
616     case IrOpcode::kChangeFloat64ToTagged:
617       result = LowerChangeFloat64ToTagged(node);
618       break;
619     case IrOpcode::kChangeFloat64ToTaggedPointer:
620       result = LowerChangeFloat64ToTaggedPointer(node);
621       break;
622     case IrOpcode::kChangeTaggedSignedToInt32:
623       result = LowerChangeTaggedSignedToInt32(node);
624       break;
625     case IrOpcode::kChangeTaggedToBit:
626       result = LowerChangeTaggedToBit(node);
627       break;
628     case IrOpcode::kChangeTaggedToInt32:
629       result = LowerChangeTaggedToInt32(node);
630       break;
631     case IrOpcode::kChangeTaggedToUint32:
632       result = LowerChangeTaggedToUint32(node);
633       break;
634     case IrOpcode::kChangeTaggedToFloat64:
635       result = LowerChangeTaggedToFloat64(node);
636       break;
637     case IrOpcode::kChangeTaggedToTaggedSigned:
638       result = LowerChangeTaggedToTaggedSigned(node);
639       break;
640     case IrOpcode::kTruncateTaggedToBit:
641       result = LowerTruncateTaggedToBit(node);
642       break;
643     case IrOpcode::kTruncateTaggedToFloat64:
644       result = LowerTruncateTaggedToFloat64(node);
645       break;
646     case IrOpcode::kCheckBounds:
647       result = LowerCheckBounds(node, frame_state);
648       break;
649     case IrOpcode::kCheckMaps:
650       result = LowerCheckMaps(node, frame_state);
651       break;
652     case IrOpcode::kCheckNumber:
653       result = LowerCheckNumber(node, frame_state);
654       break;
655     case IrOpcode::kCheckReceiver:
656       result = LowerCheckReceiver(node, frame_state);
657       break;
658     case IrOpcode::kCheckString:
659       result = LowerCheckString(node, frame_state);
660       break;
661     case IrOpcode::kCheckInternalizedString:
662       result = LowerCheckInternalizedString(node, frame_state);
663       break;
664     case IrOpcode::kCheckIf:
665       result = LowerCheckIf(node, frame_state);
666       break;
667     case IrOpcode::kCheckedInt32Add:
668       result = LowerCheckedInt32Add(node, frame_state);
669       break;
670     case IrOpcode::kCheckedInt32Sub:
671       result = LowerCheckedInt32Sub(node, frame_state);
672       break;
673     case IrOpcode::kCheckedInt32Div:
674       result = LowerCheckedInt32Div(node, frame_state);
675       break;
676     case IrOpcode::kCheckedInt32Mod:
677       result = LowerCheckedInt32Mod(node, frame_state);
678       break;
679     case IrOpcode::kCheckedUint32Div:
680       result = LowerCheckedUint32Div(node, frame_state);
681       break;
682     case IrOpcode::kCheckedUint32Mod:
683       result = LowerCheckedUint32Mod(node, frame_state);
684       break;
685     case IrOpcode::kCheckedInt32Mul:
686       result = LowerCheckedInt32Mul(node, frame_state);
687       break;
688     case IrOpcode::kCheckedInt32ToTaggedSigned:
689       result = LowerCheckedInt32ToTaggedSigned(node, frame_state);
690       break;
691     case IrOpcode::kCheckedUint32ToInt32:
692       result = LowerCheckedUint32ToInt32(node, frame_state);
693       break;
694     case IrOpcode::kCheckedUint32ToTaggedSigned:
695       result = LowerCheckedUint32ToTaggedSigned(node, frame_state);
696       break;
697     case IrOpcode::kCheckedFloat64ToInt32:
698       result = LowerCheckedFloat64ToInt32(node, frame_state);
699       break;
700     case IrOpcode::kCheckedTaggedSignedToInt32:
701       result = LowerCheckedTaggedSignedToInt32(node, frame_state);
702       break;
703     case IrOpcode::kCheckedTaggedToInt32:
704       result = LowerCheckedTaggedToInt32(node, frame_state);
705       break;
706     case IrOpcode::kCheckedTaggedToFloat64:
707       result = LowerCheckedTaggedToFloat64(node, frame_state);
708       break;
709     case IrOpcode::kCheckedTaggedToTaggedSigned:
710       result = LowerCheckedTaggedToTaggedSigned(node, frame_state);
711       break;
712     case IrOpcode::kCheckedTaggedToTaggedPointer:
713       result = LowerCheckedTaggedToTaggedPointer(node, frame_state);
714       break;
715     case IrOpcode::kTruncateTaggedToWord32:
716       result = LowerTruncateTaggedToWord32(node);
717       break;
718     case IrOpcode::kCheckedTruncateTaggedToWord32:
719       result = LowerCheckedTruncateTaggedToWord32(node, frame_state);
720       break;
721     case IrOpcode::kObjectIsDetectableCallable:
722       result = LowerObjectIsDetectableCallable(node);
723       break;
724     case IrOpcode::kObjectIsNonCallable:
725       result = LowerObjectIsNonCallable(node);
726       break;
727     case IrOpcode::kObjectIsNumber:
728       result = LowerObjectIsNumber(node);
729       break;
730     case IrOpcode::kObjectIsReceiver:
731       result = LowerObjectIsReceiver(node);
732       break;
733     case IrOpcode::kObjectIsSmi:
734       result = LowerObjectIsSmi(node);
735       break;
736     case IrOpcode::kObjectIsString:
737       result = LowerObjectIsString(node);
738       break;
739     case IrOpcode::kObjectIsUndetectable:
740       result = LowerObjectIsUndetectable(node);
741       break;
742     case IrOpcode::kNewRestParameterElements:
743       result = LowerNewRestParameterElements(node);
744       break;
745     case IrOpcode::kNewUnmappedArgumentsElements:
746       result = LowerNewUnmappedArgumentsElements(node);
747       break;
748     case IrOpcode::kArrayBufferWasNeutered:
749       result = LowerArrayBufferWasNeutered(node);
750       break;
751     case IrOpcode::kStringFromCharCode:
752       result = LowerStringFromCharCode(node);
753       break;
754     case IrOpcode::kStringFromCodePoint:
755       result = LowerStringFromCodePoint(node);
756       break;
757     case IrOpcode::kStringIndexOf:
758       result = LowerStringIndexOf(node);
759       break;
760     case IrOpcode::kStringCharAt:
761       result = LowerStringCharAt(node);
762       break;
763     case IrOpcode::kStringCharCodeAt:
764       result = LowerStringCharCodeAt(node);
765       break;
766     case IrOpcode::kStringEqual:
767       result = LowerStringEqual(node);
768       break;
769     case IrOpcode::kStringLessThan:
770       result = LowerStringLessThan(node);
771       break;
772     case IrOpcode::kStringLessThanOrEqual:
773       result = LowerStringLessThanOrEqual(node);
774       break;
775     case IrOpcode::kCheckFloat64Hole:
776       result = LowerCheckFloat64Hole(node, frame_state);
777       break;
778     case IrOpcode::kCheckTaggedHole:
779       result = LowerCheckTaggedHole(node, frame_state);
780       break;
781     case IrOpcode::kConvertTaggedHoleToUndefined:
782       result = LowerConvertTaggedHoleToUndefined(node);
783       break;
784     case IrOpcode::kPlainPrimitiveToNumber:
785       result = LowerPlainPrimitiveToNumber(node);
786       break;
787     case IrOpcode::kPlainPrimitiveToWord32:
788       result = LowerPlainPrimitiveToWord32(node);
789       break;
790     case IrOpcode::kPlainPrimitiveToFloat64:
791       result = LowerPlainPrimitiveToFloat64(node);
792       break;
793     case IrOpcode::kEnsureWritableFastElements:
794       result = LowerEnsureWritableFastElements(node);
795       break;
796     case IrOpcode::kMaybeGrowFastElements:
797       result = LowerMaybeGrowFastElements(node, frame_state);
798       break;
799     case IrOpcode::kTransitionElementsKind:
800       LowerTransitionElementsKind(node);
801       break;
802     case IrOpcode::kLoadTypedElement:
803       result = LowerLoadTypedElement(node);
804       break;
805     case IrOpcode::kStoreTypedElement:
806       LowerStoreTypedElement(node);
807       break;
808     case IrOpcode::kFloat64RoundUp:
809       if (!LowerFloat64RoundUp(node).To(&result)) {
810         return false;
811       }
812       break;
813     case IrOpcode::kFloat64RoundDown:
814       if (!LowerFloat64RoundDown(node).To(&result)) {
815         return false;
816       }
817       break;
818     case IrOpcode::kFloat64RoundTruncate:
819       if (!LowerFloat64RoundTruncate(node).To(&result)) {
820         return false;
821       }
822       break;
823     case IrOpcode::kFloat64RoundTiesEven:
824       if (!LowerFloat64RoundTiesEven(node).To(&result)) {
825         return false;
826       }
827       break;
828     default:
829       return false;
830   }
831   *effect = gasm()->ExtractCurrentEffect();
832   *control = gasm()->ExtractCurrentControl();
833   NodeProperties::ReplaceUses(node, result, *effect, *control);
834   return true;
835 }
836 
837 #define __ gasm()->
838 
LowerChangeFloat64ToTagged(Node * node)839 Node* EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node) {
840   Node* value = node->InputAt(0);
841   return AllocateHeapNumberWithValue(value);
842 }
843 
LowerChangeFloat64ToTaggedPointer(Node * node)844 Node* EffectControlLinearizer::LowerChangeFloat64ToTaggedPointer(Node* node) {
845   Node* value = node->InputAt(0);
846   return AllocateHeapNumberWithValue(value);
847 }
848 
LowerChangeBitToTagged(Node * node)849 Node* EffectControlLinearizer::LowerChangeBitToTagged(Node* node) {
850   Node* value = node->InputAt(0);
851 
852   auto if_true = __ MakeLabel<1>();
853   auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
854 
855   __ GotoIf(value, &if_true);
856   __ Goto(&done, __ FalseConstant());
857 
858   __ Bind(&if_true);
859   __ Goto(&done, __ TrueConstant());
860 
861   __ Bind(&done);
862   return done.PhiAt(0);
863 }
864 
LowerChangeInt31ToTaggedSigned(Node * node)865 Node* EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node) {
866   Node* value = node->InputAt(0);
867   return ChangeInt32ToSmi(value);
868 }
869 
LowerChangeInt32ToTagged(Node * node)870 Node* EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node) {
871   Node* value = node->InputAt(0);
872 
873   if (machine()->Is64()) {
874     return ChangeInt32ToSmi(value);
875   }
876 
877   auto if_overflow = __ MakeDeferredLabel<1>();
878   auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
879 
880   Node* add = __ Int32AddWithOverflow(value, value);
881   Node* ovf = __ Projection(1, add);
882   __ GotoIf(ovf, &if_overflow);
883   __ Goto(&done, __ Projection(0, add));
884 
885   __ Bind(&if_overflow);
886   Node* number = AllocateHeapNumberWithValue(__ ChangeInt32ToFloat64(value));
887   __ Goto(&done, number);
888 
889   __ Bind(&done);
890   return done.PhiAt(0);
891 }
892 
LowerChangeUint32ToTagged(Node * node)893 Node* EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node) {
894   Node* value = node->InputAt(0);
895 
896   auto if_not_in_smi_range = __ MakeDeferredLabel<1>();
897   auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
898 
899   Node* check = __ Uint32LessThanOrEqual(value, SmiMaxValueConstant());
900   __ GotoUnless(check, &if_not_in_smi_range);
901   __ Goto(&done, ChangeUint32ToSmi(value));
902 
903   __ Bind(&if_not_in_smi_range);
904   Node* number = AllocateHeapNumberWithValue(__ ChangeUint32ToFloat64(value));
905 
906   __ Goto(&done, number);
907   __ Bind(&done);
908 
909   return done.PhiAt(0);
910 }
911 
LowerChangeTaggedSignedToInt32(Node * node)912 Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node) {
913   Node* value = node->InputAt(0);
914   return ChangeSmiToInt32(value);
915 }
916 
LowerChangeTaggedToBit(Node * node)917 Node* EffectControlLinearizer::LowerChangeTaggedToBit(Node* node) {
918   Node* value = node->InputAt(0);
919   return __ WordEqual(value, __ TrueConstant());
920 }
921 
LowerTruncateTaggedToBit(Node * node)922 Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
923   Node* value = node->InputAt(0);
924 
925   auto if_smi = __ MakeDeferredLabel<1>();
926   auto if_heapnumber = __ MakeDeferredLabel<1>();
927   auto done = __ MakeLabel<6>(MachineRepresentation::kBit);
928 
929   Node* zero = __ Int32Constant(0);
930   Node* fzero = __ Float64Constant(0.0);
931 
932   // Check if {value} is false.
933   __ GotoIf(__ WordEqual(value, __ FalseConstant()), &done, zero);
934 
935   // Check if {value} is a Smi.
936   Node* check_smi = ObjectIsSmi(value);
937   __ GotoIf(check_smi, &if_smi);
938 
939   // Check if {value} is the empty string.
940   __ GotoIf(__ WordEqual(value, __ EmptyStringConstant()), &done, zero);
941 
942   // Load the map of {value}.
943   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
944 
945   // Check if the {value} is undetectable and immediately return false.
946   Node* value_map_bitfield =
947       __ LoadField(AccessBuilder::ForMapBitField(), value_map);
948   __ GotoUnless(
949       __ Word32Equal(__ Word32And(value_map_bitfield,
950                                   __ Int32Constant(1 << Map::kIsUndetectable)),
951                      zero),
952       &done, zero);
953 
954   // Check if {value} is a HeapNumber.
955   __ GotoIf(__ WordEqual(value_map, __ HeapNumberMapConstant()),
956             &if_heapnumber);
957 
958   // All other values that reach here are true.
959   __ Goto(&done, __ Int32Constant(1));
960 
961   __ Bind(&if_heapnumber);
962   {
963     // For HeapNumber {value}, just check that its value is not 0.0, -0.0 or
964     // NaN.
965     Node* value_value =
966         __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
967     __ Goto(&done, __ Float64LessThan(fzero, __ Float64Abs(value_value)));
968   }
969 
970   __ Bind(&if_smi);
971   {
972     // If {value} is a Smi, then we only need to check that it's not zero.
973     __ Goto(&done,
974             __ Word32Equal(__ WordEqual(value, __ IntPtrConstant(0)), zero));
975   }
976 
977   __ Bind(&done);
978   return done.PhiAt(0);
979 }
980 
LowerChangeTaggedToInt32(Node * node)981 Node* EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node) {
982   Node* value = node->InputAt(0);
983 
984   auto if_not_smi = __ MakeDeferredLabel<1>();
985   auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
986 
987   Node* check = ObjectIsSmi(value);
988   __ GotoUnless(check, &if_not_smi);
989   __ Goto(&done, ChangeSmiToInt32(value));
990 
991   __ Bind(&if_not_smi);
992   STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
993   Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
994   vfalse = __ ChangeFloat64ToInt32(vfalse);
995   __ Goto(&done, vfalse);
996 
997   __ Bind(&done);
998   return done.PhiAt(0);
999 }
1000 
LowerChangeTaggedToUint32(Node * node)1001 Node* EffectControlLinearizer::LowerChangeTaggedToUint32(Node* node) {
1002   Node* value = node->InputAt(0);
1003 
1004   auto if_not_smi = __ MakeDeferredLabel<1>();
1005   auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
1006 
1007   Node* check = ObjectIsSmi(value);
1008   __ GotoUnless(check, &if_not_smi);
1009   __ Goto(&done, ChangeSmiToInt32(value));
1010 
1011   __ Bind(&if_not_smi);
1012   STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
1013   Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
1014   vfalse = __ ChangeFloat64ToUint32(vfalse);
1015   __ Goto(&done, vfalse);
1016 
1017   __ Bind(&done);
1018   return done.PhiAt(0);
1019 }
1020 
LowerChangeTaggedToFloat64(Node * node)1021 Node* EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node) {
1022   return LowerTruncateTaggedToFloat64(node);
1023 }
1024 
LowerChangeTaggedToTaggedSigned(Node * node)1025 Node* EffectControlLinearizer::LowerChangeTaggedToTaggedSigned(Node* node) {
1026   Node* value = node->InputAt(0);
1027 
1028   auto if_not_smi = __ MakeDeferredLabel<1>();
1029   auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
1030 
1031   Node* check = ObjectIsSmi(value);
1032   __ GotoUnless(check, &if_not_smi);
1033   __ Goto(&done, value);
1034 
1035   __ Bind(&if_not_smi);
1036   STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
1037   Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
1038   vfalse = __ ChangeFloat64ToInt32(vfalse);
1039   vfalse = ChangeInt32ToSmi(vfalse);
1040   __ Goto(&done, vfalse);
1041 
1042   __ Bind(&done);
1043   return done.PhiAt(0);
1044 }
1045 
LowerTruncateTaggedToFloat64(Node * node)1046 Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
1047   Node* value = node->InputAt(0);
1048 
1049   auto if_not_smi = __ MakeDeferredLabel<1>();
1050   auto done = __ MakeLabel<2>(MachineRepresentation::kFloat64);
1051 
1052   Node* check = ObjectIsSmi(value);
1053   __ GotoUnless(check, &if_not_smi);
1054   Node* vtrue = ChangeSmiToInt32(value);
1055   vtrue = __ ChangeInt32ToFloat64(vtrue);
1056   __ Goto(&done, vtrue);
1057 
1058   __ Bind(&if_not_smi);
1059   STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
1060   Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
1061   __ Goto(&done, vfalse);
1062 
1063   __ Bind(&done);
1064   return done.PhiAt(0);
1065 }
1066 
LowerCheckBounds(Node * node,Node * frame_state)1067 Node* EffectControlLinearizer::LowerCheckBounds(Node* node, Node* frame_state) {
1068   Node* index = node->InputAt(0);
1069   Node* limit = node->InputAt(1);
1070 
1071   Node* check = __ Uint32LessThan(index, limit);
1072   __ DeoptimizeUnless(DeoptimizeReason::kOutOfBounds, check, frame_state);
1073   return index;
1074 }
1075 
LowerCheckMaps(Node * node,Node * frame_state)1076 Node* EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
1077   CheckMapsParameters const& p = CheckMapsParametersOf(node->op());
1078   Node* value = node->InputAt(0);
1079 
1080   ZoneHandleSet<Map> const& maps = p.maps();
1081   size_t const map_count = maps.size();
1082 
1083   if (p.flags() & CheckMapsFlag::kTryMigrateInstance) {
1084     auto done =
1085         __ MakeLabelFor(GraphAssemblerLabelType::kNonDeferred, map_count * 2);
1086     auto migrate = __ MakeDeferredLabel<1>();
1087 
1088     // Load the current map of the {value}.
1089     Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1090 
1091     // Perform the map checks.
1092     for (size_t i = 0; i < map_count; ++i) {
1093       Node* map = __ HeapConstant(maps[i]);
1094       Node* check = __ WordEqual(value_map, map);
1095       if (i == map_count - 1) {
1096         __ GotoUnless(check, &migrate);
1097         __ Goto(&done);
1098       } else {
1099         __ GotoIf(check, &done);
1100       }
1101     }
1102 
1103     // Perform the (deferred) instance migration.
1104     __ Bind(&migrate);
1105     {
1106       // If map is not deprecated the migration attempt does not make sense.
1107       Node* bitfield3 =
1108           __ LoadField(AccessBuilder::ForMapBitField3(), value_map);
1109       Node* if_not_deprecated = __ WordEqual(
1110           __ Word32And(bitfield3, __ Int32Constant(Map::Deprecated::kMask)),
1111           __ Int32Constant(0));
1112       __ DeoptimizeIf(DeoptimizeReason::kWrongMap, if_not_deprecated,
1113                       frame_state);
1114 
1115       Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
1116       Runtime::FunctionId id = Runtime::kTryMigrateInstance;
1117       CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
1118           graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
1119       Node* result =
1120           __ Call(desc, __ CEntryStubConstant(1), value,
1121                   __ ExternalConstant(ExternalReference(id, isolate())),
1122                   __ Int32Constant(1), __ NoContextConstant());
1123       Node* check = ObjectIsSmi(result);
1124       __ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, check,
1125                       frame_state);
1126     }
1127 
1128     // Reload the current map of the {value}.
1129     value_map = __ LoadField(AccessBuilder::ForMap(), value);
1130 
1131     // Perform the map checks again.
1132     for (size_t i = 0; i < map_count; ++i) {
1133       Node* map = __ HeapConstant(maps[i]);
1134       Node* check = __ WordEqual(value_map, map);
1135       if (i == map_count - 1) {
1136         __ DeoptimizeUnless(DeoptimizeReason::kWrongMap, check, frame_state);
1137       } else {
1138         __ GotoIf(check, &done);
1139       }
1140     }
1141 
1142     __ Goto(&done);
1143     __ Bind(&done);
1144   } else {
1145     auto done =
1146         __ MakeLabelFor(GraphAssemblerLabelType::kNonDeferred, map_count);
1147 
1148     // Load the current map of the {value}.
1149     Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1150 
1151     for (size_t i = 0; i < map_count; ++i) {
1152       Node* map = __ HeapConstant(maps[i]);
1153       Node* check = __ WordEqual(value_map, map);
1154       if (i == map_count - 1) {
1155         __ DeoptimizeUnless(DeoptimizeReason::kWrongMap, check, frame_state);
1156       } else {
1157         __ GotoIf(check, &done);
1158       }
1159     }
1160     __ Goto(&done);
1161     __ Bind(&done);
1162   }
1163   return value;
1164 }
1165 
LowerCheckNumber(Node * node,Node * frame_state)1166 Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) {
1167   Node* value = node->InputAt(0);
1168 
1169   auto if_not_smi = __ MakeDeferredLabel<1>();
1170   auto done = __ MakeLabel<2>();
1171 
1172   Node* check0 = ObjectIsSmi(value);
1173   __ GotoUnless(check0, &if_not_smi);
1174   __ Goto(&done);
1175 
1176   __ Bind(&if_not_smi);
1177   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1178   Node* check1 = __ WordEqual(value_map, __ HeapNumberMapConstant());
1179   __ DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber, check1, frame_state);
1180   __ Goto(&done);
1181 
1182   __ Bind(&done);
1183   return value;
1184 }
1185 
LowerCheckReceiver(Node * node,Node * frame_state)1186 Node* EffectControlLinearizer::LowerCheckReceiver(Node* node,
1187                                                   Node* frame_state) {
1188   Node* value = node->InputAt(0);
1189 
1190   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1191   Node* value_instance_type =
1192       __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
1193 
1194   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1195   Node* check = __ Uint32LessThanOrEqual(
1196       __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
1197   __ DeoptimizeUnless(DeoptimizeReason::kNotAJavaScriptObject, check,
1198                       frame_state);
1199   return value;
1200 }
1201 
LowerCheckString(Node * node,Node * frame_state)1202 Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
1203   Node* value = node->InputAt(0);
1204 
1205   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1206   Node* value_instance_type =
1207       __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
1208 
1209   Node* check = __ Uint32LessThan(value_instance_type,
1210                                   __ Uint32Constant(FIRST_NONSTRING_TYPE));
1211   __ DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType, check, frame_state);
1212   return value;
1213 }
1214 
LowerCheckInternalizedString(Node * node,Node * frame_state)1215 Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
1216                                                             Node* frame_state) {
1217   Node* value = node->InputAt(0);
1218 
1219   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1220   Node* value_instance_type =
1221       __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
1222 
1223   Node* check = __ Word32Equal(
1224       __ Word32And(value_instance_type,
1225                    __ Int32Constant(kIsNotStringMask | kIsNotInternalizedMask)),
1226       __ Int32Constant(kInternalizedTag));
1227   __ DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType, check, frame_state);
1228 
1229   return value;
1230 }
1231 
LowerCheckIf(Node * node,Node * frame_state)1232 Node* EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state) {
1233   Node* value = node->InputAt(0);
1234   __ DeoptimizeUnless(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason,
1235                       value, frame_state);
1236   return value;
1237 }
1238 
LowerCheckedInt32Add(Node * node,Node * frame_state)1239 Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
1240                                                     Node* frame_state) {
1241   Node* lhs = node->InputAt(0);
1242   Node* rhs = node->InputAt(1);
1243 
1244   Node* value = __ Int32AddWithOverflow(lhs, rhs);
1245   Node* check = __ Projection(1, value);
1246   __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
1247   return __ Projection(0, value);
1248 }
1249 
LowerCheckedInt32Sub(Node * node,Node * frame_state)1250 Node* EffectControlLinearizer::LowerCheckedInt32Sub(Node* node,
1251                                                     Node* frame_state) {
1252   Node* lhs = node->InputAt(0);
1253   Node* rhs = node->InputAt(1);
1254 
1255   Node* value = __ Int32SubWithOverflow(lhs, rhs);
1256   Node* check = __ Projection(1, value);
1257   __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
1258   return __ Projection(0, value);
1259 }
1260 
LowerCheckedInt32Div(Node * node,Node * frame_state)1261 Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
1262                                                     Node* frame_state) {
1263   Node* lhs = node->InputAt(0);
1264   Node* rhs = node->InputAt(1);
1265 
1266   auto if_not_positive = __ MakeDeferredLabel<1>();
1267   auto if_is_minint = __ MakeDeferredLabel<1>();
1268   auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
1269   auto minint_check_done = __ MakeLabel<2>();
1270 
1271   Node* zero = __ Int32Constant(0);
1272 
1273   // Check if {rhs} is positive (and not zero).
1274   Node* check0 = __ Int32LessThan(zero, rhs);
1275   __ GotoUnless(check0, &if_not_positive);
1276 
1277   // Fast case, no additional checking required.
1278   __ Goto(&done, __ Int32Div(lhs, rhs));
1279 
1280   {
1281     __ Bind(&if_not_positive);
1282 
1283     // Check if {rhs} is zero.
1284     Node* check = __ Word32Equal(rhs, zero);
1285     __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
1286 
1287     // Check if {lhs} is zero, as that would produce minus zero.
1288     check = __ Word32Equal(lhs, zero);
1289     __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check, frame_state);
1290 
1291     // Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
1292     // to return -kMinInt, which is not representable.
1293     Node* minint = __ Int32Constant(std::numeric_limits<int32_t>::min());
1294     Node* check1 = graph()->NewNode(machine()->Word32Equal(), lhs, minint);
1295     __ GotoIf(check1, &if_is_minint);
1296     __ Goto(&minint_check_done);
1297 
1298     __ Bind(&if_is_minint);
1299     // Check if {rhs} is -1.
1300     Node* minusone = __ Int32Constant(-1);
1301     Node* is_minus_one = __ Word32Equal(rhs, minusone);
1302     __ DeoptimizeIf(DeoptimizeReason::kOverflow, is_minus_one, frame_state);
1303     __ Goto(&minint_check_done);
1304 
1305     __ Bind(&minint_check_done);
1306     // Perform the actual integer division.
1307     __ Goto(&done, __ Int32Div(lhs, rhs));
1308   }
1309 
1310   __ Bind(&done);
1311   Node* value = done.PhiAt(0);
1312 
1313   // Check if the remainder is non-zero.
1314   Node* check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
1315   __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, check, frame_state);
1316 
1317   return value;
1318 }
1319 
LowerCheckedInt32Mod(Node * node,Node * frame_state)1320 Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
1321                                                     Node* frame_state) {
1322   // General case for signed integer modulus, with optimization for (unknown)
1323   // power of 2 right hand side.
1324   //
1325   //   if rhs <= 0 then
1326   //     rhs = -rhs
1327   //     deopt if rhs == 0
1328   //   if lhs < 0 then
1329   //     let res = lhs % rhs in
1330   //     deopt if res == 0
1331   //     res
1332   //   else
1333   //     let msk = rhs - 1 in
1334   //     if rhs & msk == 0 then
1335   //       lhs & msk
1336   //     else
1337   //       lhs % rhs
1338   //
1339   Node* lhs = node->InputAt(0);
1340   Node* rhs = node->InputAt(1);
1341 
1342   auto if_rhs_not_positive = __ MakeDeferredLabel<1>();
1343   auto if_lhs_negative = __ MakeDeferredLabel<1>();
1344   auto if_power_of_two = __ MakeLabel<1>();
1345   auto rhs_checked = __ MakeLabel<2>(MachineRepresentation::kWord32);
1346   auto done = __ MakeLabel<3>(MachineRepresentation::kWord32);
1347 
1348   Node* zero = __ Int32Constant(0);
1349 
1350   // Check if {rhs} is not strictly positive.
1351   Node* check0 = __ Int32LessThanOrEqual(rhs, zero);
1352   __ GotoIf(check0, &if_rhs_not_positive);
1353   __ Goto(&rhs_checked, rhs);
1354 
1355   __ Bind(&if_rhs_not_positive);
1356   {
1357     // Negate {rhs}, might still produce a negative result in case of
1358     // -2^31, but that is handled safely below.
1359     Node* vtrue0 = __ Int32Sub(zero, rhs);
1360 
1361     // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
1362     Node* check = __ Word32Equal(vtrue0, zero);
1363     __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
1364     __ Goto(&rhs_checked, vtrue0);
1365   }
1366 
1367   __ Bind(&rhs_checked);
1368   rhs = rhs_checked.PhiAt(0);
1369 
1370   // Check if {lhs} is negative.
1371   Node* check1 = __ Int32LessThan(lhs, zero);
1372   __ GotoIf(check1, &if_lhs_negative);
1373 
1374   // {lhs} non-negative.
1375   {
1376     Node* one = __ Int32Constant(1);
1377     Node* msk = __ Int32Sub(rhs, one);
1378 
1379     // Check if {rhs} minus one is a valid mask.
1380     Node* check2 = __ Word32Equal(__ Word32And(rhs, msk), zero);
1381     __ GotoIf(check2, &if_power_of_two);
1382     // Compute the remainder using the generic {lhs % rhs}.
1383     __ Goto(&done, __ Int32Mod(lhs, rhs));
1384 
1385     __ Bind(&if_power_of_two);
1386     // Compute the remainder using {lhs & msk}.
1387     __ Goto(&done, __ Word32And(lhs, msk));
1388   }
1389 
1390   __ Bind(&if_lhs_negative);
1391   {
1392     // Compute the remainder using {lhs % msk}.
1393     Node* vtrue1 = __ Int32Mod(lhs, rhs);
1394 
1395     // Check if we would have to return -0.
1396     Node* check = __ Word32Equal(vtrue1, zero);
1397     __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check, frame_state);
1398     __ Goto(&done, vtrue1);
1399   }
1400 
1401   __ Bind(&done);
1402   return done.PhiAt(0);
1403 }
1404 
LowerCheckedUint32Div(Node * node,Node * frame_state)1405 Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node,
1406                                                      Node* frame_state) {
1407   Node* lhs = node->InputAt(0);
1408   Node* rhs = node->InputAt(1);
1409 
1410   Node* zero = __ Int32Constant(0);
1411 
1412   // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
1413   Node* check = __ Word32Equal(rhs, zero);
1414   __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
1415 
1416   // Perform the actual unsigned integer division.
1417   Node* value = __ Uint32Div(lhs, rhs);
1418 
1419   // Check if the remainder is non-zero.
1420   check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
1421   __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, check, frame_state);
1422   return value;
1423 }
1424 
LowerCheckedUint32Mod(Node * node,Node * frame_state)1425 Node* EffectControlLinearizer::LowerCheckedUint32Mod(Node* node,
1426                                                      Node* frame_state) {
1427   Node* lhs = node->InputAt(0);
1428   Node* rhs = node->InputAt(1);
1429 
1430   Node* zero = __ Int32Constant(0);
1431 
1432   // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
1433   Node* check = __ Word32Equal(rhs, zero);
1434   __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
1435 
1436   // Perform the actual unsigned integer modulus.
1437   return __ Uint32Mod(lhs, rhs);
1438 }
1439 
LowerCheckedInt32Mul(Node * node,Node * frame_state)1440 Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
1441                                                     Node* frame_state) {
1442   CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
1443   Node* lhs = node->InputAt(0);
1444   Node* rhs = node->InputAt(1);
1445 
1446   Node* projection = __ Int32MulWithOverflow(lhs, rhs);
1447   Node* check = __ Projection(1, projection);
1448   __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
1449 
1450   Node* value = __ Projection(0, projection);
1451 
1452   if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
1453     auto if_zero = __ MakeDeferredLabel<1>();
1454     auto check_done = __ MakeLabel<2>();
1455     Node* zero = __ Int32Constant(0);
1456     Node* check_zero = __ Word32Equal(value, zero);
1457     __ GotoIf(check_zero, &if_zero);
1458     __ Goto(&check_done);
1459 
1460     __ Bind(&if_zero);
1461     // We may need to return negative zero.
1462     Node* check_or = __ Int32LessThan(__ Word32Or(lhs, rhs), zero);
1463     __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check_or, frame_state);
1464     __ Goto(&check_done);
1465 
1466     __ Bind(&check_done);
1467   }
1468 
1469   return value;
1470 }
1471 
LowerCheckedInt32ToTaggedSigned(Node * node,Node * frame_state)1472 Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
1473     Node* node, Node* frame_state) {
1474   DCHECK(SmiValuesAre31Bits());
1475   Node* value = node->InputAt(0);
1476 
1477   Node* add = __ Int32AddWithOverflow(value, value);
1478   Node* check = __ Projection(1, add);
1479   __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
1480   return __ Projection(0, add);
1481 }
1482 
LowerCheckedUint32ToInt32(Node * node,Node * frame_state)1483 Node* EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
1484                                                          Node* frame_state) {
1485   Node* value = node->InputAt(0);
1486   Node* max_int = __ Int32Constant(std::numeric_limits<int32_t>::max());
1487   Node* is_safe = __ Uint32LessThanOrEqual(value, max_int);
1488   __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, is_safe, frame_state);
1489   return value;
1490 }
1491 
LowerCheckedUint32ToTaggedSigned(Node * node,Node * frame_state)1492 Node* EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(
1493     Node* node, Node* frame_state) {
1494   Node* value = node->InputAt(0);
1495   Node* check = __ Uint32LessThanOrEqual(value, SmiMaxValueConstant());
1496   __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, check, frame_state);
1497   return ChangeUint32ToSmi(value);
1498 }
1499 
BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,Node * value,Node * frame_state)1500 Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
1501     CheckForMinusZeroMode mode, Node* value, Node* frame_state) {
1502   Node* value32 = __ RoundFloat64ToInt32(value);
1503   Node* check_same = __ Float64Equal(value, __ ChangeInt32ToFloat64(value32));
1504   __ DeoptimizeUnless(DeoptimizeReason::kLostPrecisionOrNaN, check_same,
1505                       frame_state);
1506 
1507   if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
1508     // Check if {value} is -0.
1509     auto if_zero = __ MakeDeferredLabel<1>();
1510     auto check_done = __ MakeLabel<2>();
1511 
1512     Node* check_zero = __ Word32Equal(value32, __ Int32Constant(0));
1513     __ GotoIf(check_zero, &if_zero);
1514     __ Goto(&check_done);
1515 
1516     __ Bind(&if_zero);
1517     // In case of 0, we need to check the high bits for the IEEE -0 pattern.
1518     Node* check_negative = __ Int32LessThan(__ Float64ExtractHighWord32(value),
1519                                             __ Int32Constant(0));
1520     __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check_negative, frame_state);
1521     __ Goto(&check_done);
1522 
1523     __ Bind(&check_done);
1524   }
1525   return value32;
1526 }
1527 
LowerCheckedFloat64ToInt32(Node * node,Node * frame_state)1528 Node* EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
1529                                                           Node* frame_state) {
1530   CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
1531   Node* value = node->InputAt(0);
1532   return BuildCheckedFloat64ToInt32(mode, value, frame_state);
1533 }
1534 
LowerCheckedTaggedSignedToInt32(Node * node,Node * frame_state)1535 Node* EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(
1536     Node* node, Node* frame_state) {
1537   Node* value = node->InputAt(0);
1538   Node* check = ObjectIsSmi(value);
1539   __ DeoptimizeUnless(DeoptimizeReason::kNotASmi, check, frame_state);
1540   return ChangeSmiToInt32(value);
1541 }
1542 
LowerCheckedTaggedToInt32(Node * node,Node * frame_state)1543 Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
1544                                                          Node* frame_state) {
1545   CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
1546   Node* value = node->InputAt(0);
1547 
1548   auto if_not_smi = __ MakeDeferredLabel<1>();
1549   auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
1550 
1551   Node* check = ObjectIsSmi(value);
1552   __ GotoUnless(check, &if_not_smi);
1553   // In the Smi case, just convert to int32.
1554   __ Goto(&done, ChangeSmiToInt32(value));
1555 
1556   // In the non-Smi case, check the heap numberness, load the number and convert
1557   // to int32.
1558   __ Bind(&if_not_smi);
1559   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1560   Node* check_map = __ WordEqual(value_map, __ HeapNumberMapConstant());
1561   __ DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber, check_map,
1562                       frame_state);
1563   Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
1564   vfalse = BuildCheckedFloat64ToInt32(mode, vfalse, frame_state);
1565   __ Goto(&done, vfalse);
1566 
1567   __ Bind(&done);
1568   return done.PhiAt(0);
1569 }
1570 
BuildCheckedHeapNumberOrOddballToFloat64(CheckTaggedInputMode mode,Node * value,Node * frame_state)1571 Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
1572     CheckTaggedInputMode mode, Node* value, Node* frame_state) {
1573   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1574   Node* check_number = __ WordEqual(value_map, __ HeapNumberMapConstant());
1575   switch (mode) {
1576     case CheckTaggedInputMode::kNumber: {
1577       __ DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber, check_number,
1578                           frame_state);
1579       break;
1580     }
1581     case CheckTaggedInputMode::kNumberOrOddball: {
1582       auto check_done = __ MakeLabel<2>();
1583 
1584       __ GotoIf(check_number, &check_done);
1585       // For oddballs also contain the numeric value, let us just check that
1586       // we have an oddball here.
1587       Node* instance_type =
1588           __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
1589       Node* check_oddball =
1590           __ Word32Equal(instance_type, __ Int32Constant(ODDBALL_TYPE));
1591       __ DeoptimizeUnless(DeoptimizeReason::kNotANumberOrOddball, check_oddball,
1592                           frame_state);
1593       STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
1594       __ Goto(&check_done);
1595 
1596       __ Bind(&check_done);
1597       break;
1598     }
1599   }
1600   return __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
1601 }
1602 
LowerCheckedTaggedToFloat64(Node * node,Node * frame_state)1603 Node* EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
1604                                                            Node* frame_state) {
1605   CheckTaggedInputMode mode = CheckTaggedInputModeOf(node->op());
1606   Node* value = node->InputAt(0);
1607 
1608   auto if_smi = __ MakeLabel<1>();
1609   auto done = __ MakeLabel<2>(MachineRepresentation::kFloat64);
1610 
1611   Node* check = ObjectIsSmi(value);
1612   __ GotoIf(check, &if_smi);
1613 
1614   // In the Smi case, just convert to int32 and then float64.
1615   // Otherwise, check heap numberness and load the number.
1616   Node* number =
1617       BuildCheckedHeapNumberOrOddballToFloat64(mode, value, frame_state);
1618   __ Goto(&done, number);
1619 
1620   __ Bind(&if_smi);
1621   Node* from_smi = ChangeSmiToInt32(value);
1622   from_smi = __ ChangeInt32ToFloat64(from_smi);
1623   __ Goto(&done, from_smi);
1624 
1625   __ Bind(&done);
1626   return done.PhiAt(0);
1627 }
1628 
LowerCheckedTaggedToTaggedSigned(Node * node,Node * frame_state)1629 Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(
1630     Node* node, Node* frame_state) {
1631   Node* value = node->InputAt(0);
1632 
1633   Node* check = ObjectIsSmi(value);
1634   __ DeoptimizeUnless(DeoptimizeReason::kNotASmi, check, frame_state);
1635 
1636   return value;
1637 }
1638 
LowerCheckedTaggedToTaggedPointer(Node * node,Node * frame_state)1639 Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(
1640     Node* node, Node* frame_state) {
1641   Node* value = node->InputAt(0);
1642 
1643   Node* check = ObjectIsSmi(value);
1644   __ DeoptimizeIf(DeoptimizeReason::kSmi, check, frame_state);
1645   return value;
1646 }
1647 
LowerTruncateTaggedToWord32(Node * node)1648 Node* EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node) {
1649   Node* value = node->InputAt(0);
1650 
1651   auto if_not_smi = __ MakeDeferredLabel<1>();
1652   auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
1653 
1654   Node* check = ObjectIsSmi(value);
1655   __ GotoUnless(check, &if_not_smi);
1656   __ Goto(&done, ChangeSmiToInt32(value));
1657 
1658   __ Bind(&if_not_smi);
1659   STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
1660   Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
1661   vfalse = __ TruncateFloat64ToWord32(vfalse);
1662   __ Goto(&done, vfalse);
1663 
1664   __ Bind(&done);
1665   return done.PhiAt(0);
1666 }
1667 
LowerCheckedTruncateTaggedToWord32(Node * node,Node * frame_state)1668 Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
1669     Node* node, Node* frame_state) {
1670   Node* value = node->InputAt(0);
1671 
1672   auto if_not_smi = __ MakeLabel<1>();
1673   auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
1674 
1675   Node* check = ObjectIsSmi(value);
1676   __ GotoUnless(check, &if_not_smi);
1677   // In the Smi case, just convert to int32.
1678   __ Goto(&done, ChangeSmiToInt32(value));
1679 
1680   // Otherwise, check that it's a heap number or oddball and truncate the value
1681   // to int32.
1682   __ Bind(&if_not_smi);
1683   Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
1684       CheckTaggedInputMode::kNumberOrOddball, value, frame_state);
1685   number = __ TruncateFloat64ToWord32(number);
1686   __ Goto(&done, number);
1687 
1688   __ Bind(&done);
1689   return done.PhiAt(0);
1690 }
1691 
LowerObjectIsDetectableCallable(Node * node)1692 Node* EffectControlLinearizer::LowerObjectIsDetectableCallable(Node* node) {
1693   Node* value = node->InputAt(0);
1694 
1695   auto if_smi = __ MakeDeferredLabel<1>();
1696   auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
1697 
1698   Node* check = ObjectIsSmi(value);
1699   __ GotoIf(check, &if_smi);
1700 
1701   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1702   Node* value_bit_field =
1703       __ LoadField(AccessBuilder::ForMapBitField(), value_map);
1704   Node* vfalse = __ Word32Equal(
1705       __ Int32Constant(1 << Map::kIsCallable),
1706       __ Word32And(value_bit_field,
1707                    __ Int32Constant((1 << Map::kIsCallable) |
1708                                     (1 << Map::kIsUndetectable))));
1709   __ Goto(&done, vfalse);
1710 
1711   __ Bind(&if_smi);
1712   __ Goto(&done, __ Int32Constant(0));
1713 
1714   __ Bind(&done);
1715   return done.PhiAt(0);
1716 }
1717 
LowerObjectIsNonCallable(Node * node)1718 Node* EffectControlLinearizer::LowerObjectIsNonCallable(Node* node) {
1719   Node* value = node->InputAt(0);
1720 
1721   auto if_primitive = __ MakeDeferredLabel<2>();
1722   auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
1723 
1724   Node* check0 = ObjectIsSmi(value);
1725   __ GotoIf(check0, &if_primitive);
1726 
1727   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1728   Node* value_instance_type =
1729       __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
1730   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1731   Node* check1 = __ Uint32LessThanOrEqual(
1732       __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
1733   __ GotoUnless(check1, &if_primitive);
1734 
1735   Node* value_bit_field =
1736       __ LoadField(AccessBuilder::ForMapBitField(), value_map);
1737   Node* check2 = __ Word32Equal(
1738       __ Int32Constant(0),
1739       __ Word32And(value_bit_field, __ Int32Constant(1 << Map::kIsCallable)));
1740   __ Goto(&done, check2);
1741 
1742   __ Bind(&if_primitive);
1743   __ Goto(&done, __ Int32Constant(0));
1744 
1745   __ Bind(&done);
1746   return done.PhiAt(0);
1747 }
1748 
LowerObjectIsNumber(Node * node)1749 Node* EffectControlLinearizer::LowerObjectIsNumber(Node* node) {
1750   Node* value = node->InputAt(0);
1751 
1752   auto if_smi = __ MakeLabel<1>();
1753   auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
1754 
1755   __ GotoIf(ObjectIsSmi(value), &if_smi);
1756   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1757   __ Goto(&done, __ WordEqual(value_map, __ HeapNumberMapConstant()));
1758 
1759   __ Bind(&if_smi);
1760   __ Goto(&done, __ Int32Constant(1));
1761 
1762   __ Bind(&done);
1763   return done.PhiAt(0);
1764 }
1765 
LowerObjectIsReceiver(Node * node)1766 Node* EffectControlLinearizer::LowerObjectIsReceiver(Node* node) {
1767   Node* value = node->InputAt(0);
1768 
1769   auto if_smi = __ MakeDeferredLabel<1>();
1770   auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
1771 
1772   __ GotoIf(ObjectIsSmi(value), &if_smi);
1773 
1774   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1775   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1776   Node* value_instance_type =
1777       __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
1778   Node* result = __ Uint32LessThanOrEqual(
1779       __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
1780   __ Goto(&done, result);
1781 
1782   __ Bind(&if_smi);
1783   __ Goto(&done, __ Int32Constant(0));
1784 
1785   __ Bind(&done);
1786   return done.PhiAt(0);
1787 }
1788 
LowerObjectIsSmi(Node * node)1789 Node* EffectControlLinearizer::LowerObjectIsSmi(Node* node) {
1790   Node* value = node->InputAt(0);
1791   return ObjectIsSmi(value);
1792 }
1793 
LowerObjectIsString(Node * node)1794 Node* EffectControlLinearizer::LowerObjectIsString(Node* node) {
1795   Node* value = node->InputAt(0);
1796 
1797   auto if_smi = __ MakeDeferredLabel<1>();
1798   auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
1799 
1800   Node* check = ObjectIsSmi(value);
1801   __ GotoIf(check, &if_smi);
1802   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1803   Node* value_instance_type =
1804       __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
1805   Node* vfalse = __ Uint32LessThan(value_instance_type,
1806                                    __ Uint32Constant(FIRST_NONSTRING_TYPE));
1807   __ Goto(&done, vfalse);
1808 
1809   __ Bind(&if_smi);
1810   __ Goto(&done, __ Int32Constant(0));
1811 
1812   __ Bind(&done);
1813   return done.PhiAt(0);
1814 }
1815 
LowerObjectIsUndetectable(Node * node)1816 Node* EffectControlLinearizer::LowerObjectIsUndetectable(Node* node) {
1817   Node* value = node->InputAt(0);
1818 
1819   auto if_smi = __ MakeDeferredLabel<1>();
1820   auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
1821 
1822   Node* check = ObjectIsSmi(value);
1823   __ GotoIf(check, &if_smi);
1824 
1825   Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
1826   Node* value_bit_field =
1827       __ LoadField(AccessBuilder::ForMapBitField(), value_map);
1828   Node* vfalse = __ Word32Equal(
1829       __ Word32Equal(__ Int32Constant(0),
1830                      __ Word32And(value_bit_field,
1831                                   __ Int32Constant(1 << Map::kIsUndetectable))),
1832       __ Int32Constant(0));
1833   __ Goto(&done, vfalse);
1834 
1835   __ Bind(&if_smi);
1836   __ Goto(&done, __ Int32Constant(0));
1837 
1838   __ Bind(&done);
1839   return done.PhiAt(0);
1840 }
1841 
LowerNewRestParameterElements(Node * node)1842 Node* EffectControlLinearizer::LowerNewRestParameterElements(Node* node) {
1843   int const formal_parameter_count = ParameterCountOf(node->op());
1844 
1845   Callable const callable = CodeFactory::NewRestParameterElements(isolate());
1846   Operator::Properties const properties = node->op()->properties();
1847   CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
1848   CallDescriptor* desc = Linkage::GetStubCallDescriptor(
1849       isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
1850   return __ Call(desc, __ HeapConstant(callable.code()),
1851                  __ IntPtrConstant(formal_parameter_count),
1852                  __ NoContextConstant());
1853 }
1854 
LowerNewUnmappedArgumentsElements(Node * node)1855 Node* EffectControlLinearizer::LowerNewUnmappedArgumentsElements(Node* node) {
1856   int const formal_parameter_count = ParameterCountOf(node->op());
1857 
1858   Callable const callable =
1859       CodeFactory::NewUnmappedArgumentsElements(isolate());
1860   Operator::Properties const properties = node->op()->properties();
1861   CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
1862   CallDescriptor* desc = Linkage::GetStubCallDescriptor(
1863       isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
1864   return __ Call(desc, __ HeapConstant(callable.code()),
1865                  __ IntPtrConstant(formal_parameter_count),
1866                  __ NoContextConstant());
1867 }
1868 
LowerArrayBufferWasNeutered(Node * node)1869 Node* EffectControlLinearizer::LowerArrayBufferWasNeutered(Node* node) {
1870   Node* value = node->InputAt(0);
1871 
1872   Node* value_bit_field =
1873       __ LoadField(AccessBuilder::ForJSArrayBufferBitField(), value);
1874   return __ Word32Equal(
1875       __ Word32Equal(
1876           __ Word32And(value_bit_field,
1877                        __ Int32Constant(JSArrayBuffer::WasNeutered::kMask)),
1878           __ Int32Constant(0)),
1879       __ Int32Constant(0));
1880 }
1881 
LowerStringCharAt(Node * node)1882 Node* EffectControlLinearizer::LowerStringCharAt(Node* node) {
1883   Node* receiver = node->InputAt(0);
1884   Node* position = node->InputAt(1);
1885 
1886   Callable const callable = CodeFactory::StringCharAt(isolate());
1887   Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
1888   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
1889   CallDescriptor* desc = Linkage::GetStubCallDescriptor(
1890       isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
1891   return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
1892                  __ NoContextConstant());
1893 }
1894 
LowerStringCharCodeAt(Node * node)1895 Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
1896   Node* receiver = node->InputAt(0);
1897   Node* position = node->InputAt(1);
1898 
1899   Callable const callable = CodeFactory::StringCharCodeAt(isolate());
1900   Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
1901   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
1902   CallDescriptor* desc = Linkage::GetStubCallDescriptor(
1903       isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties,
1904       MachineType::TaggedSigned());
1905   return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
1906                  __ NoContextConstant());
1907 }
1908 
LowerStringFromCharCode(Node * node)1909 Node* EffectControlLinearizer::LowerStringFromCharCode(Node* node) {
1910   Node* value = node->InputAt(0);
1911 
1912   auto runtime_call = __ MakeDeferredLabel<2>();
1913   auto if_undefined = __ MakeDeferredLabel<1>();
1914   auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
1915 
1916   // Compute the character code.
1917   Node* code = __ Word32And(value, __ Int32Constant(String::kMaxUtf16CodeUnit));
1918 
1919   // Check if the {code} is a one-byte char code.
1920   Node* check0 = __ Int32LessThanOrEqual(
1921       code, __ Int32Constant(String::kMaxOneByteCharCode));
1922   __ GotoUnless(check0, &runtime_call);
1923 
1924   // Load the isolate wide single character string cache.
1925   Node* cache = __ HeapConstant(factory()->single_character_string_cache());
1926 
1927   // Compute the {cache} index for {code}.
1928   Node* index = machine()->Is32() ? code : __ ChangeUint32ToUint64(code);
1929 
1930   // Check if we have an entry for the {code} in the single character string
1931   // cache already.
1932   Node* entry =
1933       __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index);
1934 
1935   Node* check1 = __ WordEqual(entry, __ UndefinedConstant());
1936   __ GotoIf(check1, &runtime_call);
1937   __ Goto(&done, entry);
1938 
1939   // Let %StringFromCharCode handle this case.
1940   // TODO(turbofan): At some point we may consider adding a stub for this
1941   // deferred case, so that we don't need to call to C++ here.
1942   __ Bind(&runtime_call);
1943   {
1944     Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
1945     Runtime::FunctionId id = Runtime::kStringCharFromCode;
1946     CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
1947         graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
1948     Node* vtrue1 =
1949         __ Call(desc, __ CEntryStubConstant(1), ChangeInt32ToSmi(code),
1950                 __ ExternalConstant(ExternalReference(id, isolate())),
1951                 __ Int32Constant(1), __ NoContextConstant());
1952     __ Goto(&done, vtrue1);
1953   }
1954   __ Bind(&done);
1955   return done.PhiAt(0);
1956 }
1957 
LowerStringFromCodePoint(Node * node)1958 Node* EffectControlLinearizer::LowerStringFromCodePoint(Node* node) {
1959   Node* value = node->InputAt(0);
1960   Node* code = value;
1961 
1962   auto if_not_single_code = __ MakeDeferredLabel<1>();
1963   auto if_not_one_byte = __ MakeDeferredLabel<1>();
1964   auto cache_miss = __ MakeDeferredLabel<1>();
1965   auto done = __ MakeLabel<4>(MachineRepresentation::kTagged);
1966 
1967   // Check if the {code} is a single code unit
1968   Node* check0 = __ Uint32LessThanOrEqual(code, __ Uint32Constant(0xFFFF));
1969   __ GotoUnless(check0, &if_not_single_code);
1970 
1971   {
1972     // Check if the {code} is a one byte character
1973     Node* check1 = __ Uint32LessThanOrEqual(
1974         code, __ Uint32Constant(String::kMaxOneByteCharCode));
1975     __ GotoUnless(check1, &if_not_one_byte);
1976     {
1977       // Load the isolate wide single character string cache.
1978       Node* cache = __ HeapConstant(factory()->single_character_string_cache());
1979 
1980       // Compute the {cache} index for {code}.
1981       Node* index = machine()->Is32() ? code : __ ChangeUint32ToUint64(code);
1982 
1983       // Check if we have an entry for the {code} in the single character string
1984       // cache already.
1985       Node* entry =
1986           __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index);
1987 
1988       Node* check2 = __ WordEqual(entry, __ UndefinedConstant());
1989       __ GotoIf(check2, &cache_miss);
1990 
1991       // Use the {entry} from the {cache}.
1992       __ Goto(&done, entry);
1993 
1994       __ Bind(&cache_miss);
1995       {
1996         // Allocate a new SeqOneByteString for {code}.
1997         Node* vtrue2 = __ Allocate(
1998             NOT_TENURED, __ Int32Constant(SeqOneByteString::SizeFor(1)));
1999         __ StoreField(AccessBuilder::ForMap(), vtrue2,
2000                       __ HeapConstant(factory()->one_byte_string_map()));
2001         __ StoreField(AccessBuilder::ForNameHashField(), vtrue2,
2002                       __ IntPtrConstant(Name::kEmptyHashField));
2003         __ StoreField(AccessBuilder::ForStringLength(), vtrue2,
2004                       __ SmiConstant(1));
2005         __ Store(
2006             StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
2007             vtrue2,
2008             __ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag),
2009             code);
2010 
2011         // Remember it in the {cache}.
2012         __ StoreElement(AccessBuilder::ForFixedArrayElement(), cache, index,
2013                         vtrue2);
2014         __ Goto(&done, vtrue2);
2015       }
2016     }
2017 
2018     __ Bind(&if_not_one_byte);
2019     {
2020       // Allocate a new SeqTwoByteString for {code}.
2021       Node* vfalse1 = __ Allocate(
2022           NOT_TENURED, __ Int32Constant(SeqTwoByteString::SizeFor(1)));
2023       __ StoreField(AccessBuilder::ForMap(), vfalse1,
2024                     __ HeapConstant(factory()->string_map()));
2025       __ StoreField(AccessBuilder::ForNameHashField(), vfalse1,
2026                     __ IntPtrConstant(Name::kEmptyHashField));
2027       __ StoreField(AccessBuilder::ForStringLength(), vfalse1,
2028                     __ SmiConstant(1));
2029       __ Store(
2030           StoreRepresentation(MachineRepresentation::kWord16, kNoWriteBarrier),
2031           vfalse1,
2032           __ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
2033           code);
2034       __ Goto(&done, vfalse1);
2035     }
2036   }
2037 
2038   __ Bind(&if_not_single_code);
2039   // Generate surrogate pair string
2040   {
2041     switch (UnicodeEncodingOf(node->op())) {
2042       case UnicodeEncoding::UTF16:
2043         break;
2044 
2045       case UnicodeEncoding::UTF32: {
2046         // Convert UTF32 to UTF16 code units, and store as a 32 bit word.
2047         Node* lead_offset = __ Int32Constant(0xD800 - (0x10000 >> 10));
2048 
2049         // lead = (codepoint >> 10) + LEAD_OFFSET
2050         Node* lead =
2051             __ Int32Add(__ Word32Shr(code, __ Int32Constant(10)), lead_offset);
2052 
2053         // trail = (codepoint & 0x3FF) + 0xDC00;
2054         Node* trail = __ Int32Add(__ Word32And(code, __ Int32Constant(0x3FF)),
2055                                   __ Int32Constant(0xDC00));
2056 
2057         // codpoint = (trail << 16) | lead;
2058         code = __ Word32Or(__ Word32Shl(trail, __ Int32Constant(16)), lead);
2059         break;
2060       }
2061     }
2062 
2063     // Allocate a new SeqTwoByteString for {code}.
2064     Node* vfalse0 = __ Allocate(NOT_TENURED,
2065                                 __ Int32Constant(SeqTwoByteString::SizeFor(2)));
2066     __ StoreField(AccessBuilder::ForMap(), vfalse0,
2067                   __ HeapConstant(factory()->string_map()));
2068     __ StoreField(AccessBuilder::ForNameHashField(), vfalse0,
2069                   __ IntPtrConstant(Name::kEmptyHashField));
2070     __ StoreField(AccessBuilder::ForStringLength(), vfalse0, __ SmiConstant(2));
2071     __ Store(
2072         StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
2073         vfalse0,
2074         __ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
2075         code);
2076     __ Goto(&done, vfalse0);
2077   }
2078 
2079   __ Bind(&done);
2080   return done.PhiAt(0);
2081 }
2082 
LowerStringIndexOf(Node * node)2083 Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
2084   Node* subject = node->InputAt(0);
2085   Node* search_string = node->InputAt(1);
2086   Node* position = node->InputAt(2);
2087 
2088   Callable callable = CodeFactory::StringIndexOf(isolate());
2089   Operator::Properties properties = Operator::kEliminatable;
2090   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
2091   CallDescriptor* desc = Linkage::GetStubCallDescriptor(
2092       isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
2093   return __ Call(desc, __ HeapConstant(callable.code()), subject, search_string,
2094                  position, __ NoContextConstant());
2095 }
2096 
LowerStringComparison(Callable const & callable,Node * node)2097 Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
2098                                                      Node* node) {
2099   Node* lhs = node->InputAt(0);
2100   Node* rhs = node->InputAt(1);
2101 
2102   Operator::Properties properties = Operator::kEliminatable;
2103   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
2104   CallDescriptor* desc = Linkage::GetStubCallDescriptor(
2105       isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
2106   return __ Call(desc, __ HeapConstant(callable.code()), lhs, rhs,
2107                  __ NoContextConstant());
2108 }
2109 
LowerStringEqual(Node * node)2110 Node* EffectControlLinearizer::LowerStringEqual(Node* node) {
2111   return LowerStringComparison(CodeFactory::StringEqual(isolate()), node);
2112 }
2113 
LowerStringLessThan(Node * node)2114 Node* EffectControlLinearizer::LowerStringLessThan(Node* node) {
2115   return LowerStringComparison(CodeFactory::StringLessThan(isolate()), node);
2116 }
2117 
LowerStringLessThanOrEqual(Node * node)2118 Node* EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node) {
2119   return LowerStringComparison(CodeFactory::StringLessThanOrEqual(isolate()),
2120                                node);
2121 }
2122 
LowerCheckFloat64Hole(Node * node,Node * frame_state)2123 Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
2124                                                      Node* frame_state) {
2125   // If we reach this point w/o eliminating the {node} that's marked
2126   // with allow-return-hole, we cannot do anything, so just deoptimize
2127   // in case of the hole NaN (similar to Crankshaft).
2128   Node* value = node->InputAt(0);
2129   Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
2130                                __ Int32Constant(kHoleNanUpper32));
2131   __ DeoptimizeIf(DeoptimizeReason::kHole, check, frame_state);
2132   return value;
2133 }
2134 
LowerCheckTaggedHole(Node * node,Node * frame_state)2135 Node* EffectControlLinearizer::LowerCheckTaggedHole(Node* node,
2136                                                     Node* frame_state) {
2137   Node* value = node->InputAt(0);
2138   Node* check = __ WordEqual(value, __ TheHoleConstant());
2139   __ DeoptimizeIf(DeoptimizeReason::kHole, check, frame_state);
2140   return value;
2141 }
2142 
LowerConvertTaggedHoleToUndefined(Node * node)2143 Node* EffectControlLinearizer::LowerConvertTaggedHoleToUndefined(Node* node) {
2144   Node* value = node->InputAt(0);
2145 
2146   auto if_is_hole = __ MakeDeferredLabel<1>();
2147   auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
2148 
2149   Node* check = __ WordEqual(value, __ TheHoleConstant());
2150   __ GotoIf(check, &if_is_hole);
2151   __ Goto(&done, value);
2152 
2153   __ Bind(&if_is_hole);
2154   __ Goto(&done, __ UndefinedConstant());
2155 
2156   __ Bind(&done);
2157   return done.PhiAt(0);
2158 }
2159 
AllocateHeapNumberWithValue(Node * value)2160 Node* EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value) {
2161   Node* result = __ Allocate(NOT_TENURED, __ Int32Constant(HeapNumber::kSize));
2162   __ StoreField(AccessBuilder::ForMap(), result, __ HeapNumberMapConstant());
2163   __ StoreField(AccessBuilder::ForHeapNumberValue(), result, value);
2164   return result;
2165 }
2166 
ChangeInt32ToSmi(Node * value)2167 Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
2168   if (machine()->Is64()) {
2169     value = __ ChangeInt32ToInt64(value);
2170   }
2171   return __ WordShl(value, SmiShiftBitsConstant());
2172 }
2173 
ChangeUint32ToSmi(Node * value)2174 Node* EffectControlLinearizer::ChangeUint32ToSmi(Node* value) {
2175   if (machine()->Is64()) {
2176     value = __ ChangeUint32ToUint64(value);
2177   }
2178   return __ WordShl(value, SmiShiftBitsConstant());
2179 }
2180 
ChangeSmiToInt32(Node * value)2181 Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
2182   value = __ WordSar(value, SmiShiftBitsConstant());
2183   if (machine()->Is64()) {
2184     value = __ TruncateInt64ToInt32(value);
2185   }
2186   return value;
2187 }
2188 
ObjectIsSmi(Node * value)2189 Node* EffectControlLinearizer::ObjectIsSmi(Node* value) {
2190   return __ WordEqual(__ WordAnd(value, __ IntPtrConstant(kSmiTagMask)),
2191                       __ IntPtrConstant(kSmiTag));
2192 }
2193 
SmiMaxValueConstant()2194 Node* EffectControlLinearizer::SmiMaxValueConstant() {
2195   return __ Int32Constant(Smi::kMaxValue);
2196 }
2197 
SmiShiftBitsConstant()2198 Node* EffectControlLinearizer::SmiShiftBitsConstant() {
2199   return __ IntPtrConstant(kSmiShiftSize + kSmiTagSize);
2200 }
2201 
LowerPlainPrimitiveToNumber(Node * node)2202 Node* EffectControlLinearizer::LowerPlainPrimitiveToNumber(Node* node) {
2203   Node* value = node->InputAt(0);
2204   return __ ToNumber(value);
2205 }
2206 
LowerPlainPrimitiveToWord32(Node * node)2207 Node* EffectControlLinearizer::LowerPlainPrimitiveToWord32(Node* node) {
2208   Node* value = node->InputAt(0);
2209 
2210   auto if_not_smi = __ MakeDeferredLabel<1>();
2211   auto if_to_number_smi = __ MakeLabel<1>();
2212   auto done = __ MakeLabel<3>(MachineRepresentation::kWord32);
2213 
2214   Node* check0 = ObjectIsSmi(value);
2215   __ GotoUnless(check0, &if_not_smi);
2216   __ Goto(&done, ChangeSmiToInt32(value));
2217 
2218   __ Bind(&if_not_smi);
2219   Node* to_number = __ ToNumber(value);
2220 
2221   Node* check1 = ObjectIsSmi(to_number);
2222   __ GotoIf(check1, &if_to_number_smi);
2223   Node* number = __ LoadField(AccessBuilder::ForHeapNumberValue(), to_number);
2224   __ Goto(&done, __ TruncateFloat64ToWord32(number));
2225 
2226   __ Bind(&if_to_number_smi);
2227   __ Goto(&done, ChangeSmiToInt32(to_number));
2228 
2229   __ Bind(&done);
2230   return done.PhiAt(0);
2231 }
2232 
LowerPlainPrimitiveToFloat64(Node * node)2233 Node* EffectControlLinearizer::LowerPlainPrimitiveToFloat64(Node* node) {
2234   Node* value = node->InputAt(0);
2235 
2236   auto if_not_smi = __ MakeDeferredLabel<1>();
2237   auto if_to_number_smi = __ MakeLabel<1>();
2238   auto done = __ MakeLabel<3>(MachineRepresentation::kFloat64);
2239 
2240   Node* check0 = ObjectIsSmi(value);
2241   __ GotoUnless(check0, &if_not_smi);
2242   Node* from_smi = ChangeSmiToInt32(value);
2243   __ Goto(&done, __ ChangeInt32ToFloat64(from_smi));
2244 
2245   __ Bind(&if_not_smi);
2246   Node* to_number = __ ToNumber(value);
2247   Node* check1 = ObjectIsSmi(to_number);
2248   __ GotoIf(check1, &if_to_number_smi);
2249 
2250   Node* number = __ LoadField(AccessBuilder::ForHeapNumberValue(), to_number);
2251   __ Goto(&done, number);
2252 
2253   __ Bind(&if_to_number_smi);
2254   Node* number_from_smi = ChangeSmiToInt32(to_number);
2255   number_from_smi = __ ChangeInt32ToFloat64(number_from_smi);
2256   __ Goto(&done, number_from_smi);
2257 
2258   __ Bind(&done);
2259   return done.PhiAt(0);
2260 }
2261 
LowerEnsureWritableFastElements(Node * node)2262 Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) {
2263   Node* object = node->InputAt(0);
2264   Node* elements = node->InputAt(1);
2265 
2266   auto if_not_fixed_array = __ MakeDeferredLabel<1>();
2267   auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
2268 
2269   // Load the current map of {elements}.
2270   Node* elements_map = __ LoadField(AccessBuilder::ForMap(), elements);
2271 
2272   // Check if {elements} is not a copy-on-write FixedArray.
2273   Node* check = __ WordEqual(elements_map, __ FixedArrayMapConstant());
2274   __ GotoUnless(check, &if_not_fixed_array);
2275   // Nothing to do if the {elements} are not copy-on-write.
2276   __ Goto(&done, elements);
2277 
2278   __ Bind(&if_not_fixed_array);
2279   // We need to take a copy of the {elements} and set them up for {object}.
2280   Operator::Properties properties = Operator::kEliminatable;
2281   Callable callable = CodeFactory::CopyFastSmiOrObjectElements(isolate());
2282   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
2283   CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
2284       isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
2285   Node* result = __ Call(desc, __ HeapConstant(callable.code()), object,
2286                          __ NoContextConstant());
2287   __ Goto(&done, result);
2288 
2289   __ Bind(&done);
2290   return done.PhiAt(0);
2291 }
2292 
LowerMaybeGrowFastElements(Node * node,Node * frame_state)2293 Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
2294                                                           Node* frame_state) {
2295   GrowFastElementsFlags flags = GrowFastElementsFlagsOf(node->op());
2296   Node* object = node->InputAt(0);
2297   Node* elements = node->InputAt(1);
2298   Node* index = node->InputAt(2);
2299   Node* length = node->InputAt(3);
2300 
2301   auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
2302   auto done_grow = __ MakeLabel<2>(MachineRepresentation::kTagged);
2303   auto if_grow = __ MakeDeferredLabel<1>();
2304   auto if_not_grow = __ MakeLabel<1>();
2305 
2306   Node* check0 = (flags & GrowFastElementsFlag::kHoleyElements)
2307                      ? __ Uint32LessThanOrEqual(length, index)
2308                      : __ Word32Equal(length, index);
2309   __ GotoUnless(check0, &if_not_grow);
2310   {
2311     // Load the length of the {elements} backing store.
2312     Node* elements_length =
2313         __ LoadField(AccessBuilder::ForFixedArrayLength(), elements);
2314     elements_length = ChangeSmiToInt32(elements_length);
2315 
2316     // Check if we need to grow the {elements} backing store.
2317     Node* check1 = __ Uint32LessThan(index, elements_length);
2318     __ GotoUnless(check1, &if_grow);
2319     __ Goto(&done_grow, elements);
2320 
2321     __ Bind(&if_grow);
2322     // We need to grow the {elements} for {object}.
2323     Operator::Properties properties = Operator::kEliminatable;
2324     Callable callable =
2325         (flags & GrowFastElementsFlag::kDoubleElements)
2326             ? CodeFactory::GrowFastDoubleElements(isolate())
2327             : CodeFactory::GrowFastSmiOrObjectElements(isolate());
2328     CallDescriptor::Flags call_flags = CallDescriptor::kNoFlags;
2329     CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
2330         isolate(), graph()->zone(), callable.descriptor(), 0, call_flags,
2331         properties);
2332     Node* new_object = __ Call(desc, __ HeapConstant(callable.code()), object,
2333                                ChangeInt32ToSmi(index), __ NoContextConstant());
2334 
2335     // Ensure that we were able to grow the {elements}.
2336     // TODO(turbofan): We use kSmi as reason here similar to Crankshaft,
2337     // but maybe we should just introduce a reason that makes sense.
2338     __ DeoptimizeIf(DeoptimizeReason::kSmi, ObjectIsSmi(new_object),
2339                     frame_state);
2340     __ Goto(&done_grow, new_object);
2341 
2342     __ Bind(&done_grow);
2343 
2344     // For JSArray {object}s we also need to update the "length".
2345     if (flags & GrowFastElementsFlag::kArrayObject) {
2346       // Compute the new {length}.
2347       Node* object_length =
2348           ChangeInt32ToSmi(__ Int32Add(index, __ Int32Constant(1)));
2349 
2350       // Update the "length" property of the {object}.
2351       __ StoreField(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), object,
2352                     object_length);
2353     }
2354     __ Goto(&done, done_grow.PhiAt(0));
2355   }
2356 
2357   __ Bind(&if_not_grow);
2358   {
2359     // In case of non-holey {elements}, we need to verify that the {index} is
2360     // in-bounds, otherwise for holey {elements}, the check above already
2361     // guards the index (and the operator forces {index} to be unsigned).
2362     if (!(flags & GrowFastElementsFlag::kHoleyElements)) {
2363       Node* check1 = __ Uint32LessThan(index, length);
2364       __ DeoptimizeUnless(DeoptimizeReason::kOutOfBounds, check1, frame_state);
2365     }
2366     __ Goto(&done, elements);
2367   }
2368   __ Bind(&done);
2369   return done.PhiAt(0);
2370 }
2371 
LowerTransitionElementsKind(Node * node)2372 void EffectControlLinearizer::LowerTransitionElementsKind(Node* node) {
2373   ElementsTransition const transition = ElementsTransitionOf(node->op());
2374   Node* object = node->InputAt(0);
2375 
2376   auto if_map_same = __ MakeDeferredLabel<1>();
2377   auto done = __ MakeLabel<2>();
2378 
2379   Node* source_map = __ HeapConstant(transition.source());
2380   Node* target_map = __ HeapConstant(transition.target());
2381 
2382   // Load the current map of {object}.
2383   Node* object_map = __ LoadField(AccessBuilder::ForMap(), object);
2384 
2385   // Check if {object_map} is the same as {source_map}.
2386   Node* check = __ WordEqual(object_map, source_map);
2387   __ GotoIf(check, &if_map_same);
2388   __ Goto(&done);
2389 
2390   __ Bind(&if_map_same);
2391   switch (transition.mode()) {
2392     case ElementsTransition::kFastTransition:
2393       // In-place migration of {object}, just store the {target_map}.
2394       __ StoreField(AccessBuilder::ForMap(), object, target_map);
2395       break;
2396     case ElementsTransition::kSlowTransition: {
2397       // Instance migration, call out to the runtime for {object}.
2398       Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
2399       Runtime::FunctionId id = Runtime::kTransitionElementsKind;
2400       CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
2401           graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
2402       __ Call(desc, __ CEntryStubConstant(1), object, target_map,
2403               __ ExternalConstant(ExternalReference(id, isolate())),
2404               __ Int32Constant(2), __ NoContextConstant());
2405       break;
2406     }
2407   }
2408   __ Goto(&done);
2409 
2410   __ Bind(&done);
2411 }
2412 
LowerLoadTypedElement(Node * node)2413 Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
2414   ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
2415   Node* buffer = node->InputAt(0);
2416   Node* base = node->InputAt(1);
2417   Node* external = node->InputAt(2);
2418   Node* index = node->InputAt(3);
2419 
2420   // We need to keep the {buffer} alive so that the GC will not release the
2421   // ArrayBuffer (if there's any) as long as we are still operating on it.
2422   __ Retain(buffer);
2423 
2424   // Compute the effective storage pointer, handling the case where the
2425   // {external} pointer is the effective storage pointer (i.e. the {base}
2426   // is Smi zero).
2427   Node* storage = NumberMatcher(base).Is(0) ? external : __ UnsafePointerAdd(
2428                                                              base, external);
2429 
2430   // Perform the actual typed element access.
2431   return __ LoadElement(AccessBuilder::ForTypedArrayElement(array_type, true),
2432                         storage, index);
2433 }
2434 
LowerStoreTypedElement(Node * node)2435 void EffectControlLinearizer::LowerStoreTypedElement(Node* node) {
2436   ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
2437   Node* buffer = node->InputAt(0);
2438   Node* base = node->InputAt(1);
2439   Node* external = node->InputAt(2);
2440   Node* index = node->InputAt(3);
2441   Node* value = node->InputAt(4);
2442 
2443   // We need to keep the {buffer} alive so that the GC will not release the
2444   // ArrayBuffer (if there's any) as long as we are still operating on it.
2445   __ Retain(buffer);
2446 
2447   // Compute the effective storage pointer, handling the case where the
2448   // {external} pointer is the effective storage pointer (i.e. the {base}
2449   // is Smi zero).
2450   Node* storage = NumberMatcher(base).Is(0) ? external : __ UnsafePointerAdd(
2451                                                              base, external);
2452 
2453   // Perform the actual typed element access.
2454   __ StoreElement(AccessBuilder::ForTypedArrayElement(array_type, true),
2455                   storage, index, value);
2456 }
2457 
LowerFloat64RoundUp(Node * node)2458 Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundUp(Node* node) {
2459   // Nothing to be done if a fast hardware instruction is available.
2460   if (machine()->Float64RoundUp().IsSupported()) {
2461     return Nothing<Node*>();
2462   }
2463 
2464   Node* const input = node->InputAt(0);
2465 
2466   // General case for ceil.
2467   //
2468   //   if 0.0 < input then
2469   //     if 2^52 <= input then
2470   //       input
2471   //     else
2472   //       let temp1 = (2^52 + input) - 2^52 in
2473   //       if temp1 < input then
2474   //         temp1 + 1
2475   //       else
2476   //         temp1
2477   //   else
2478   //     if input == 0 then
2479   //       input
2480   //     else
2481   //       if input <= -2^52 then
2482   //         input
2483   //       else
2484   //         let temp1 = -0 - input in
2485   //         let temp2 = (2^52 + temp1) - 2^52 in
2486   //         let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
2487   //         -0 - temp3
2488 
2489   auto if_not_positive = __ MakeDeferredLabel<1>();
2490   auto if_greater_than_two_52 = __ MakeDeferredLabel<1>();
2491   auto if_less_than_minus_two_52 = __ MakeDeferredLabel<1>();
2492   auto if_zero = __ MakeDeferredLabel<1>();
2493   auto done_temp3 = __ MakeLabel<2>(MachineRepresentation::kFloat64);
2494   auto done = __ MakeLabel<6>(MachineRepresentation::kFloat64);
2495 
2496   Node* const zero = __ Float64Constant(0.0);
2497   Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
2498   Node* const one = __ Float64Constant(1.0);
2499 
2500   Node* check0 = __ Float64LessThan(zero, input);
2501   __ GotoUnless(check0, &if_not_positive);
2502   {
2503     Node* check1 = __ Float64LessThanOrEqual(two_52, input);
2504     __ GotoIf(check1, &if_greater_than_two_52);
2505     {
2506       Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
2507       __ GotoUnless(__ Float64LessThan(temp1, input), &done, temp1);
2508       __ Goto(&done, __ Float64Add(temp1, one));
2509     }
2510 
2511     __ Bind(&if_greater_than_two_52);
2512     __ Goto(&done, input);
2513   }
2514 
2515   __ Bind(&if_not_positive);
2516   {
2517     Node* check1 = __ Float64Equal(input, zero);
2518     __ GotoIf(check1, &if_zero);
2519 
2520     Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
2521     Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
2522     __ GotoIf(check2, &if_less_than_minus_two_52);
2523 
2524     {
2525       Node* const minus_zero = __ Float64Constant(-0.0);
2526       Node* temp1 = __ Float64Sub(minus_zero, input);
2527       Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
2528       Node* check3 = __ Float64LessThan(temp1, temp2);
2529       __ GotoUnless(check3, &done_temp3, temp2);
2530       __ Goto(&done_temp3, __ Float64Sub(temp2, one));
2531 
2532       __ Bind(&done_temp3);
2533       Node* temp3 = done_temp3.PhiAt(0);
2534       __ Goto(&done, __ Float64Sub(minus_zero, temp3));
2535     }
2536     __ Bind(&if_less_than_minus_two_52);
2537     __ Goto(&done, input);
2538 
2539     __ Bind(&if_zero);
2540     __ Goto(&done, input);
2541   }
2542   __ Bind(&done);
2543   return Just(done.PhiAt(0));
2544 }
2545 
BuildFloat64RoundDown(Node * value)2546 Node* EffectControlLinearizer::BuildFloat64RoundDown(Node* value) {
2547   Node* round_down = __ Float64RoundDown(value);
2548   if (round_down != nullptr) {
2549     return round_down;
2550   }
2551 
2552   Node* const input = value;
2553 
2554   // General case for floor.
2555   //
2556   //   if 0.0 < input then
2557   //     if 2^52 <= input then
2558   //       input
2559   //     else
2560   //       let temp1 = (2^52 + input) - 2^52 in
2561   //       if input < temp1 then
2562   //         temp1 - 1
2563   //       else
2564   //         temp1
2565   //   else
2566   //     if input == 0 then
2567   //       input
2568   //     else
2569   //       if input <= -2^52 then
2570   //         input
2571   //       else
2572   //         let temp1 = -0 - input in
2573   //         let temp2 = (2^52 + temp1) - 2^52 in
2574   //         if temp2 < temp1 then
2575   //           -1 - temp2
2576   //         else
2577   //           -0 - temp2
2578 
2579   auto if_not_positive = __ MakeDeferredLabel<1>();
2580   auto if_greater_than_two_52 = __ MakeDeferredLabel<1>();
2581   auto if_less_than_minus_two_52 = __ MakeDeferredLabel<1>();
2582   auto if_temp2_lt_temp1 = __ MakeLabel<1>();
2583   auto if_zero = __ MakeDeferredLabel<1>();
2584   auto done = __ MakeLabel<7>(MachineRepresentation::kFloat64);
2585 
2586   Node* const zero = __ Float64Constant(0.0);
2587   Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
2588 
2589   Node* check0 = __ Float64LessThan(zero, input);
2590   __ GotoUnless(check0, &if_not_positive);
2591   {
2592     Node* check1 = __ Float64LessThanOrEqual(two_52, input);
2593     __ GotoIf(check1, &if_greater_than_two_52);
2594     {
2595       Node* const one = __ Float64Constant(1.0);
2596       Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
2597       __ GotoUnless(__ Float64LessThan(input, temp1), &done, temp1);
2598       __ Goto(&done, __ Float64Sub(temp1, one));
2599     }
2600 
2601     __ Bind(&if_greater_than_two_52);
2602     __ Goto(&done, input);
2603   }
2604 
2605   __ Bind(&if_not_positive);
2606   {
2607     Node* check1 = __ Float64Equal(input, zero);
2608     __ GotoIf(check1, &if_zero);
2609 
2610     Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
2611     Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
2612     __ GotoIf(check2, &if_less_than_minus_two_52);
2613 
2614     {
2615       Node* const minus_zero = __ Float64Constant(-0.0);
2616       Node* temp1 = __ Float64Sub(minus_zero, input);
2617       Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
2618       Node* check3 = __ Float64LessThan(temp2, temp1);
2619       __ GotoIf(check3, &if_temp2_lt_temp1);
2620       __ Goto(&done, __ Float64Sub(minus_zero, temp2));
2621 
2622       __ Bind(&if_temp2_lt_temp1);
2623       __ Goto(&done, __ Float64Sub(__ Float64Constant(-1.0), temp2));
2624     }
2625     __ Bind(&if_less_than_minus_two_52);
2626     __ Goto(&done, input);
2627 
2628     __ Bind(&if_zero);
2629     __ Goto(&done, input);
2630   }
2631   __ Bind(&done);
2632   return done.PhiAt(0);
2633 }
2634 
LowerFloat64RoundDown(Node * node)2635 Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundDown(Node* node) {
2636   // Nothing to be done if a fast hardware instruction is available.
2637   if (machine()->Float64RoundDown().IsSupported()) {
2638     return Nothing<Node*>();
2639   }
2640 
2641   Node* const input = node->InputAt(0);
2642   return Just(BuildFloat64RoundDown(input));
2643 }
2644 
LowerFloat64RoundTiesEven(Node * node)2645 Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTiesEven(Node* node) {
2646   // Nothing to be done if a fast hardware instruction is available.
2647   if (machine()->Float64RoundTiesEven().IsSupported()) {
2648     return Nothing<Node*>();
2649   }
2650 
2651   Node* const input = node->InputAt(0);
2652 
2653   // Generate case for round ties to even:
2654   //
2655   //   let value = floor(input) in
2656   //   let temp1 = input - value in
2657   //   if temp1 < 0.5 then
2658   //     value
2659   //   else if 0.5 < temp1 then
2660   //     value + 1.0
2661   //   else
2662   //     let temp2 = value % 2.0 in
2663   //     if temp2 == 0.0 then
2664   //       value
2665   //     else
2666   //       value + 1.0
2667 
2668   auto if_is_half = __ MakeLabel<1>();
2669   auto done = __ MakeLabel<4>(MachineRepresentation::kFloat64);
2670 
2671   Node* value = BuildFloat64RoundDown(input);
2672   Node* temp1 = __ Float64Sub(input, value);
2673 
2674   Node* const half = __ Float64Constant(0.5);
2675   Node* check0 = __ Float64LessThan(temp1, half);
2676   __ GotoIf(check0, &done, value);
2677 
2678   Node* const one = __ Float64Constant(1.0);
2679   Node* check1 = __ Float64LessThan(half, temp1);
2680   __ GotoUnless(check1, &if_is_half);
2681   __ Goto(&done, __ Float64Add(value, one));
2682 
2683   __ Bind(&if_is_half);
2684   Node* temp2 = __ Float64Mod(value, __ Float64Constant(2.0));
2685   Node* check2 = __ Float64Equal(temp2, __ Float64Constant(0.0));
2686   __ GotoIf(check2, &done, value);
2687   __ Goto(&done, __ Float64Add(value, one));
2688 
2689   __ Bind(&done);
2690   return Just(done.PhiAt(0));
2691 }
2692 
LowerFloat64RoundTruncate(Node * node)2693 Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node) {
2694   // Nothing to be done if a fast hardware instruction is available.
2695   if (machine()->Float64RoundTruncate().IsSupported()) {
2696     return Nothing<Node*>();
2697   }
2698 
2699   Node* const input = node->InputAt(0);
2700 
2701   // General case for trunc.
2702   //
2703   //   if 0.0 < input then
2704   //     if 2^52 <= input then
2705   //       input
2706   //     else
2707   //       let temp1 = (2^52 + input) - 2^52 in
2708   //       if input < temp1 then
2709   //         temp1 - 1
2710   //       else
2711   //         temp1
2712   //   else
2713   //     if input == 0 then
2714   //       input
2715   //     else
2716   //       if input <= -2^52 then
2717   //         input
2718   //       else
2719   //         let temp1 = -0 - input in
2720   //         let temp2 = (2^52 + temp1) - 2^52 in
2721   //         let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
2722   //         -0 - temp3
2723   //
2724   // Note: We do not use the Diamond helper class here, because it really hurts
2725   // readability with nested diamonds.
2726 
2727   auto if_not_positive = __ MakeDeferredLabel<1>();
2728   auto if_greater_than_two_52 = __ MakeDeferredLabel<1>();
2729   auto if_less_than_minus_two_52 = __ MakeDeferredLabel<1>();
2730   auto if_zero = __ MakeDeferredLabel<1>();
2731   auto done_temp3 = __ MakeLabel<2>(MachineRepresentation::kFloat64);
2732   auto done = __ MakeLabel<6>(MachineRepresentation::kFloat64);
2733 
2734   Node* const zero = __ Float64Constant(0.0);
2735   Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
2736   Node* const one = __ Float64Constant(1.0);
2737 
2738   Node* check0 = __ Float64LessThan(zero, input);
2739   __ GotoUnless(check0, &if_not_positive);
2740   {
2741     Node* check1 = __ Float64LessThanOrEqual(two_52, input);
2742     __ GotoIf(check1, &if_greater_than_two_52);
2743     {
2744       Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
2745       __ GotoUnless(__ Float64LessThan(input, temp1), &done, temp1);
2746       __ Goto(&done, __ Float64Sub(temp1, one));
2747     }
2748 
2749     __ Bind(&if_greater_than_two_52);
2750     __ Goto(&done, input);
2751   }
2752 
2753   __ Bind(&if_not_positive);
2754   {
2755     Node* check1 = __ Float64Equal(input, zero);
2756     __ GotoIf(check1, &if_zero);
2757 
2758     Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
2759     Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
2760     __ GotoIf(check2, &if_less_than_minus_two_52);
2761 
2762     {
2763       Node* const minus_zero = __ Float64Constant(-0.0);
2764       Node* temp1 = __ Float64Sub(minus_zero, input);
2765       Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
2766       Node* check3 = __ Float64LessThan(temp1, temp2);
2767       __ GotoUnless(check3, &done_temp3, temp2);
2768       __ Goto(&done_temp3, __ Float64Sub(temp2, one));
2769 
2770       __ Bind(&done_temp3);
2771       Node* temp3 = done_temp3.PhiAt(0);
2772       __ Goto(&done, __ Float64Sub(minus_zero, temp3));
2773     }
2774     __ Bind(&if_less_than_minus_two_52);
2775     __ Goto(&done, input);
2776 
2777     __ Bind(&if_zero);
2778     __ Goto(&done, input);
2779   }
2780   __ Bind(&done);
2781   return Just(done.PhiAt(0));
2782 }
2783 
2784 #undef __
2785 
factory() const2786 Factory* EffectControlLinearizer::factory() const {
2787   return isolate()->factory();
2788 }
2789 
isolate() const2790 Isolate* EffectControlLinearizer::isolate() const {
2791   return jsgraph()->isolate();
2792 }
2793 
2794 }  // namespace compiler
2795 }  // namespace internal
2796 }  // namespace v8
2797