1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/common-operator-reducer.h"
6
7 #include <algorithm>
8
9 #include "src/compiler/common-operator.h"
10 #include "src/compiler/graph.h"
11 #include "src/compiler/machine-operator.h"
12 #include "src/compiler/node.h"
13 #include "src/compiler/node-matchers.h"
14 #include "src/compiler/node-properties.h"
15
16 namespace v8 {
17 namespace internal {
18 namespace compiler {
19
20 namespace {
21
DecideCondition(JSHeapBroker * broker,Node * const cond)22 Decision DecideCondition(JSHeapBroker* broker, Node* const cond) {
23 switch (cond->opcode()) {
24 case IrOpcode::kInt32Constant: {
25 Int32Matcher mcond(cond);
26 return mcond.Value() ? Decision::kTrue : Decision::kFalse;
27 }
28 case IrOpcode::kHeapConstant: {
29 HeapObjectMatcher mcond(cond);
30 return mcond.Ref(broker).BooleanValue() ? Decision::kTrue
31 : Decision::kFalse;
32 }
33 default:
34 return Decision::kUnknown;
35 }
36 }
37
38 } // namespace
39
CommonOperatorReducer(Editor * editor,Graph * graph,JSHeapBroker * js_heap_broker,CommonOperatorBuilder * common,MachineOperatorBuilder * machine,Zone * temp_zone)40 CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
41 JSHeapBroker* js_heap_broker,
42 CommonOperatorBuilder* common,
43 MachineOperatorBuilder* machine,
44 Zone* temp_zone)
45 : AdvancedReducer(editor),
46 graph_(graph),
47 js_heap_broker_(js_heap_broker),
48 common_(common),
49 machine_(machine),
50 dead_(graph->NewNode(common->Dead())),
51 zone_(temp_zone) {
52 NodeProperties::SetType(dead_, Type::None());
53 }
54
Reduce(Node * node)55 Reduction CommonOperatorReducer::Reduce(Node* node) {
56 DisallowHeapAccess no_heap_access;
57 switch (node->opcode()) {
58 case IrOpcode::kBranch:
59 return ReduceBranch(node);
60 case IrOpcode::kDeoptimizeIf:
61 case IrOpcode::kDeoptimizeUnless:
62 return ReduceDeoptimizeConditional(node);
63 case IrOpcode::kMerge:
64 return ReduceMerge(node);
65 case IrOpcode::kEffectPhi:
66 return ReduceEffectPhi(node);
67 case IrOpcode::kPhi:
68 return ReducePhi(node);
69 case IrOpcode::kReturn:
70 return ReduceReturn(node);
71 case IrOpcode::kSelect:
72 return ReduceSelect(node);
73 case IrOpcode::kSwitch:
74 return ReduceSwitch(node);
75 default:
76 break;
77 }
78 return NoChange();
79 }
80
81
ReduceBranch(Node * node)82 Reduction CommonOperatorReducer::ReduceBranch(Node* node) {
83 DCHECK_EQ(IrOpcode::kBranch, node->opcode());
84 Node* const cond = node->InputAt(0);
85 // Swap IfTrue/IfFalse on {branch} if {cond} is a BooleanNot and use the input
86 // to BooleanNot as new condition for {branch}. Note we assume that {cond} was
87 // already properly optimized before we get here (as guaranteed by the graph
88 // reduction logic). The same applies if {cond} is a Select acting as boolean
89 // not (i.e. true being returned in the false case and vice versa).
90 if (cond->opcode() == IrOpcode::kBooleanNot ||
91 (cond->opcode() == IrOpcode::kSelect &&
92 DecideCondition(js_heap_broker(), cond->InputAt(1)) ==
93 Decision::kFalse &&
94 DecideCondition(js_heap_broker(), cond->InputAt(2)) ==
95 Decision::kTrue)) {
96 for (Node* const use : node->uses()) {
97 switch (use->opcode()) {
98 case IrOpcode::kIfTrue:
99 NodeProperties::ChangeOp(use, common()->IfFalse());
100 break;
101 case IrOpcode::kIfFalse:
102 NodeProperties::ChangeOp(use, common()->IfTrue());
103 break;
104 default:
105 UNREACHABLE();
106 }
107 }
108 // Update the condition of {branch}. No need to mark the uses for revisit,
109 // since we tell the graph reducer that the {branch} was changed and the
110 // graph reduction logic will ensure that the uses are revisited properly.
111 node->ReplaceInput(0, cond->InputAt(0));
112 // Negate the hint for {branch}.
113 NodeProperties::ChangeOp(
114 node, common()->Branch(NegateBranchHint(BranchHintOf(node->op()))));
115 return Changed(node);
116 }
117 Decision const decision = DecideCondition(js_heap_broker(), cond);
118 if (decision == Decision::kUnknown) return NoChange();
119 Node* const control = node->InputAt(1);
120 for (Node* const use : node->uses()) {
121 switch (use->opcode()) {
122 case IrOpcode::kIfTrue:
123 Replace(use, (decision == Decision::kTrue) ? control : dead());
124 break;
125 case IrOpcode::kIfFalse:
126 Replace(use, (decision == Decision::kFalse) ? control : dead());
127 break;
128 default:
129 UNREACHABLE();
130 }
131 }
132 return Replace(dead());
133 }
134
ReduceDeoptimizeConditional(Node * node)135 Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
136 DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
137 node->opcode() == IrOpcode::kDeoptimizeUnless);
138 bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
139 DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
140 Node* condition = NodeProperties::GetValueInput(node, 0);
141 Node* frame_state = NodeProperties::GetValueInput(node, 1);
142 Node* effect = NodeProperties::GetEffectInput(node);
143 Node* control = NodeProperties::GetControlInput(node);
144 // Swap DeoptimizeIf/DeoptimizeUnless on {node} if {cond} is a BooleaNot
145 // and use the input to BooleanNot as new condition for {node}. Note we
146 // assume that {cond} was already properly optimized before we get here
147 // (as guaranteed by the graph reduction logic).
148 if (condition->opcode() == IrOpcode::kBooleanNot) {
149 NodeProperties::ReplaceValueInput(node, condition->InputAt(0), 0);
150 NodeProperties::ChangeOp(
151 node,
152 condition_is_true
153 ? common()->DeoptimizeIf(p.kind(), p.reason(), p.feedback())
154 : common()->DeoptimizeUnless(p.kind(), p.reason(), p.feedback()));
155 return Changed(node);
156 }
157 Decision const decision = DecideCondition(js_heap_broker(), condition);
158 if (decision == Decision::kUnknown) return NoChange();
159 if (condition_is_true == (decision == Decision::kTrue)) {
160 ReplaceWithValue(node, dead(), effect, control);
161 } else {
162 control = graph()->NewNode(
163 common()->Deoptimize(p.kind(), p.reason(), p.feedback()), frame_state,
164 effect, control);
165 // TODO(bmeurer): This should be on the AdvancedReducer somehow.
166 NodeProperties::MergeControlToEnd(graph(), common(), control);
167 Revisit(graph()->end());
168 }
169 return Replace(dead());
170 }
171
ReduceMerge(Node * node)172 Reduction CommonOperatorReducer::ReduceMerge(Node* node) {
173 DCHECK_EQ(IrOpcode::kMerge, node->opcode());
174 //
175 // Check if this is a merge that belongs to an unused diamond, which means
176 // that:
177 //
178 // a) the {Merge} has no {Phi} or {EffectPhi} uses, and
179 // b) the {Merge} has two inputs, one {IfTrue} and one {IfFalse}, which are
180 // both owned by the Merge, and
181 // c) and the {IfTrue} and {IfFalse} nodes point to the same {Branch}.
182 //
183 if (node->InputCount() == 2) {
184 for (Node* const use : node->uses()) {
185 if (IrOpcode::IsPhiOpcode(use->opcode())) return NoChange();
186 }
187 Node* if_true = node->InputAt(0);
188 Node* if_false = node->InputAt(1);
189 if (if_true->opcode() != IrOpcode::kIfTrue) std::swap(if_true, if_false);
190 if (if_true->opcode() == IrOpcode::kIfTrue &&
191 if_false->opcode() == IrOpcode::kIfFalse &&
192 if_true->InputAt(0) == if_false->InputAt(0) && if_true->OwnedBy(node) &&
193 if_false->OwnedBy(node)) {
194 Node* const branch = if_true->InputAt(0);
195 DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
196 DCHECK(branch->OwnedBy(if_true, if_false));
197 Node* const control = branch->InputAt(1);
198 // Mark the {branch} as {Dead}.
199 branch->TrimInputCount(0);
200 NodeProperties::ChangeOp(branch, common()->Dead());
201 return Replace(control);
202 }
203 }
204 return NoChange();
205 }
206
207
ReduceEffectPhi(Node * node)208 Reduction CommonOperatorReducer::ReduceEffectPhi(Node* node) {
209 DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
210 Node::Inputs inputs = node->inputs();
211 int const effect_input_count = inputs.count() - 1;
212 DCHECK_LE(1, effect_input_count);
213 Node* const merge = inputs[effect_input_count];
214 DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
215 DCHECK_EQ(effect_input_count, merge->InputCount());
216 Node* const effect = inputs[0];
217 DCHECK_NE(node, effect);
218 for (int i = 1; i < effect_input_count; ++i) {
219 Node* const input = inputs[i];
220 if (input == node) {
221 // Ignore redundant inputs.
222 DCHECK_EQ(IrOpcode::kLoop, merge->opcode());
223 continue;
224 }
225 if (input != effect) return NoChange();
226 }
227 // We might now be able to further reduce the {merge} node.
228 Revisit(merge);
229 return Replace(effect);
230 }
231
232
ReducePhi(Node * node)233 Reduction CommonOperatorReducer::ReducePhi(Node* node) {
234 DCHECK_EQ(IrOpcode::kPhi, node->opcode());
235 Node::Inputs inputs = node->inputs();
236 int const value_input_count = inputs.count() - 1;
237 DCHECK_LE(1, value_input_count);
238 Node* const merge = inputs[value_input_count];
239 DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
240 DCHECK_EQ(value_input_count, merge->InputCount());
241 if (value_input_count == 2) {
242 Node* vtrue = inputs[0];
243 Node* vfalse = inputs[1];
244 Node::Inputs merge_inputs = merge->inputs();
245 Node* if_true = merge_inputs[0];
246 Node* if_false = merge_inputs[1];
247 if (if_true->opcode() != IrOpcode::kIfTrue) {
248 std::swap(if_true, if_false);
249 std::swap(vtrue, vfalse);
250 }
251 if (if_true->opcode() == IrOpcode::kIfTrue &&
252 if_false->opcode() == IrOpcode::kIfFalse &&
253 if_true->InputAt(0) == if_false->InputAt(0)) {
254 Node* const branch = if_true->InputAt(0);
255 // Check that the branch is not dead already.
256 if (branch->opcode() != IrOpcode::kBranch) return NoChange();
257 Node* const cond = branch->InputAt(0);
258 if (cond->opcode() == IrOpcode::kFloat32LessThan) {
259 Float32BinopMatcher mcond(cond);
260 if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
261 vfalse->opcode() == IrOpcode::kFloat32Sub) {
262 Float32BinopMatcher mvfalse(vfalse);
263 if (mvfalse.left().IsZero() && mvfalse.right().Equals(vtrue)) {
264 // We might now be able to further reduce the {merge} node.
265 Revisit(merge);
266 return Change(node, machine()->Float32Abs(), vtrue);
267 }
268 }
269 } else if (cond->opcode() == IrOpcode::kFloat64LessThan) {
270 Float64BinopMatcher mcond(cond);
271 if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
272 vfalse->opcode() == IrOpcode::kFloat64Sub) {
273 Float64BinopMatcher mvfalse(vfalse);
274 if (mvfalse.left().IsZero() && mvfalse.right().Equals(vtrue)) {
275 // We might now be able to further reduce the {merge} node.
276 Revisit(merge);
277 return Change(node, machine()->Float64Abs(), vtrue);
278 }
279 }
280 }
281 }
282 }
283 Node* const value = inputs[0];
284 DCHECK_NE(node, value);
285 for (int i = 1; i < value_input_count; ++i) {
286 Node* const input = inputs[i];
287 if (input == node) {
288 // Ignore redundant inputs.
289 DCHECK_EQ(IrOpcode::kLoop, merge->opcode());
290 continue;
291 }
292 if (input != value) return NoChange();
293 }
294 // We might now be able to further reduce the {merge} node.
295 Revisit(merge);
296 return Replace(value);
297 }
298
ReduceReturn(Node * node)299 Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
300 DCHECK_EQ(IrOpcode::kReturn, node->opcode());
301 Node* effect = NodeProperties::GetEffectInput(node);
302 if (effect->opcode() == IrOpcode::kCheckpoint) {
303 // Any {Return} node can never be used to insert a deoptimization point,
304 // hence checkpoints can be cut out of the effect chain flowing into it.
305 effect = NodeProperties::GetEffectInput(effect);
306 NodeProperties::ReplaceEffectInput(node, effect);
307 Reduction const reduction = ReduceReturn(node);
308 return reduction.Changed() ? reduction : Changed(node);
309 }
310 // TODO(ahaas): Extend the reduction below to multiple return values.
311 if (ValueInputCountOfReturn(node->op()) != 1) {
312 return NoChange();
313 }
314 Node* pop_count = NodeProperties::GetValueInput(node, 0);
315 Node* value = NodeProperties::GetValueInput(node, 1);
316 Node* control = NodeProperties::GetControlInput(node);
317 if (value->opcode() == IrOpcode::kPhi &&
318 NodeProperties::GetControlInput(value) == control &&
319 control->opcode() == IrOpcode::kMerge) {
320 // This optimization pushes {Return} nodes through merges. It checks that
321 // the return value is actually a {Phi} and the return control dependency
322 // is the {Merge} to which the {Phi} belongs.
323
324 // Value1 ... ValueN Control1 ... ControlN
325 // ^ ^ ^ ^
326 // | | | |
327 // +----+-----+ +------+-----+
328 // | |
329 // Phi --------------> Merge
330 // ^ ^
331 // | |
332 // | +-----------------+
333 // | |
334 // Return -----> Effect
335 // ^
336 // |
337 // End
338
339 // Now the effect input to the {Return} node can be either an {EffectPhi}
340 // hanging off the same {Merge}, or the {Merge} node is only connected to
341 // the {Return} and the {Phi}, in which case we know that the effect input
342 // must somehow dominate all merged branches.
343
344 Node::Inputs control_inputs = control->inputs();
345 Node::Inputs value_inputs = value->inputs();
346 DCHECK_NE(0, control_inputs.count());
347 DCHECK_EQ(control_inputs.count(), value_inputs.count() - 1);
348 DCHECK_EQ(IrOpcode::kEnd, graph()->end()->opcode());
349 DCHECK_NE(0, graph()->end()->InputCount());
350 if (control->OwnedBy(node, value)) {
351 for (int i = 0; i < control_inputs.count(); ++i) {
352 // Create a new {Return} and connect it to {end}. We don't need to mark
353 // {end} as revisit, because we mark {node} as {Dead} below, which was
354 // previously connected to {end}, so we know for sure that at some point
355 // the reducer logic will visit {end} again.
356 Node* ret = graph()->NewNode(node->op(), pop_count, value_inputs[i],
357 effect, control_inputs[i]);
358 NodeProperties::MergeControlToEnd(graph(), common(), ret);
359 }
360 // Mark the Merge {control} and Return {node} as {dead}.
361 Replace(control, dead());
362 return Replace(dead());
363 } else if (effect->opcode() == IrOpcode::kEffectPhi &&
364 NodeProperties::GetControlInput(effect) == control) {
365 Node::Inputs effect_inputs = effect->inputs();
366 DCHECK_EQ(control_inputs.count(), effect_inputs.count() - 1);
367 for (int i = 0; i < control_inputs.count(); ++i) {
368 // Create a new {Return} and connect it to {end}. We don't need to mark
369 // {end} as revisit, because we mark {node} as {Dead} below, which was
370 // previously connected to {end}, so we know for sure that at some point
371 // the reducer logic will visit {end} again.
372 Node* ret = graph()->NewNode(node->op(), pop_count, value_inputs[i],
373 effect_inputs[i], control_inputs[i]);
374 NodeProperties::MergeControlToEnd(graph(), common(), ret);
375 }
376 // Mark the Merge {control} and Return {node} as {dead}.
377 Replace(control, dead());
378 return Replace(dead());
379 }
380 }
381 return NoChange();
382 }
383
ReduceSelect(Node * node)384 Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
385 DCHECK_EQ(IrOpcode::kSelect, node->opcode());
386 Node* const cond = node->InputAt(0);
387 Node* const vtrue = node->InputAt(1);
388 Node* const vfalse = node->InputAt(2);
389 if (vtrue == vfalse) return Replace(vtrue);
390 switch (DecideCondition(js_heap_broker(), cond)) {
391 case Decision::kTrue:
392 return Replace(vtrue);
393 case Decision::kFalse:
394 return Replace(vfalse);
395 case Decision::kUnknown:
396 break;
397 }
398 switch (cond->opcode()) {
399 case IrOpcode::kFloat32LessThan: {
400 Float32BinopMatcher mcond(cond);
401 if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
402 vfalse->opcode() == IrOpcode::kFloat32Sub) {
403 Float32BinopMatcher mvfalse(vfalse);
404 if (mvfalse.left().IsZero() && mvfalse.right().Equals(vtrue)) {
405 return Change(node, machine()->Float32Abs(), vtrue);
406 }
407 }
408 break;
409 }
410 case IrOpcode::kFloat64LessThan: {
411 Float64BinopMatcher mcond(cond);
412 if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
413 vfalse->opcode() == IrOpcode::kFloat64Sub) {
414 Float64BinopMatcher mvfalse(vfalse);
415 if (mvfalse.left().IsZero() && mvfalse.right().Equals(vtrue)) {
416 return Change(node, machine()->Float64Abs(), vtrue);
417 }
418 }
419 break;
420 }
421 default:
422 break;
423 }
424 return NoChange();
425 }
426
ReduceSwitch(Node * node)427 Reduction CommonOperatorReducer::ReduceSwitch(Node* node) {
428 DCHECK_EQ(IrOpcode::kSwitch, node->opcode());
429 Node* const switched_value = node->InputAt(0);
430 Node* const control = node->InputAt(1);
431
432 // Attempt to constant match the switched value against the IfValue cases. If
433 // no case matches, then use the IfDefault. We don't bother marking
434 // non-matching cases as dead code (same for an unused IfDefault), because the
435 // Switch itself will be marked as dead code.
436 Int32Matcher mswitched(switched_value);
437 if (mswitched.HasValue()) {
438 bool matched = false;
439
440 size_t const projection_count = node->op()->ControlOutputCount();
441 Node** projections = zone_->NewArray<Node*>(projection_count);
442 NodeProperties::CollectControlProjections(node, projections,
443 projection_count);
444 for (size_t i = 0; i < projection_count - 1; i++) {
445 Node* if_value = projections[i];
446 DCHECK_EQ(IrOpcode::kIfValue, if_value->opcode());
447 const IfValueParameters& p = IfValueParametersOf(if_value->op());
448 if (p.value() == mswitched.Value()) {
449 matched = true;
450 Replace(if_value, control);
451 break;
452 }
453 }
454 if (!matched) {
455 Node* if_default = projections[projection_count - 1];
456 DCHECK_EQ(IrOpcode::kIfDefault, if_default->opcode());
457 Replace(if_default, control);
458 }
459 return Replace(dead());
460 }
461 return NoChange();
462 }
463
Change(Node * node,Operator const * op,Node * a)464 Reduction CommonOperatorReducer::Change(Node* node, Operator const* op,
465 Node* a) {
466 node->ReplaceInput(0, a);
467 node->TrimInputCount(1);
468 NodeProperties::ChangeOp(node, op);
469 return Changed(node);
470 }
471
472
Change(Node * node,Operator const * op,Node * a,Node * b)473 Reduction CommonOperatorReducer::Change(Node* node, Operator const* op, Node* a,
474 Node* b) {
475 node->ReplaceInput(0, a);
476 node->ReplaceInput(1, b);
477 node->TrimInputCount(2);
478 NodeProperties::ChangeOp(node, op);
479 return Changed(node);
480 }
481
482 } // namespace compiler
483 } // namespace internal
484 } // namespace v8
485