1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/base/adapters.h"
6 #include "src/base/bits.h"
7 #include "src/compiler/instruction-selector-impl.h"
8 #include "src/compiler/node-matchers.h"
9 #include "src/compiler/node-properties.h"
10
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14
15 // Adds Arm-specific methods for generating InstructionOperands.
16 class ArmOperandGenerator : public OperandGenerator {
17 public:
ArmOperandGenerator(InstructionSelector * selector)18 explicit ArmOperandGenerator(InstructionSelector* selector)
19 : OperandGenerator(selector) {}
20
CanBeImmediate(int32_t value) const21 bool CanBeImmediate(int32_t value) const {
22 return Assembler::ImmediateFitsAddrMode1Instruction(value);
23 }
24
CanBeImmediate(uint32_t value) const25 bool CanBeImmediate(uint32_t value) const {
26 return CanBeImmediate(bit_cast<int32_t>(value));
27 }
28
CanBeImmediate(Node * node,InstructionCode opcode)29 bool CanBeImmediate(Node* node, InstructionCode opcode) {
30 Int32Matcher m(node);
31 if (!m.HasValue()) return false;
32 int32_t value = m.Value();
33 switch (ArchOpcodeField::decode(opcode)) {
34 case kArmAnd:
35 case kArmMov:
36 case kArmMvn:
37 case kArmBic:
38 return CanBeImmediate(value) || CanBeImmediate(~value);
39
40 case kArmAdd:
41 case kArmSub:
42 case kArmCmp:
43 case kArmCmn:
44 return CanBeImmediate(value) || CanBeImmediate(-value);
45
46 case kArmTst:
47 case kArmTeq:
48 case kArmOrr:
49 case kArmEor:
50 case kArmRsb:
51 return CanBeImmediate(value);
52
53 case kArmVldrF32:
54 case kArmVstrF32:
55 case kArmVldrF64:
56 case kArmVstrF64:
57 return value >= -1020 && value <= 1020 && (value % 4) == 0;
58
59 case kArmLdrb:
60 case kArmLdrsb:
61 case kArmStrb:
62 case kArmLdr:
63 case kArmStr:
64 return value >= -4095 && value <= 4095;
65
66 case kArmLdrh:
67 case kArmLdrsh:
68 case kArmStrh:
69 return value >= -255 && value <= 255;
70
71 default:
72 break;
73 }
74 return false;
75 }
76 };
77
78
79 namespace {
80
VisitRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)81 void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
82 ArmOperandGenerator g(selector);
83 selector->Emit(opcode, g.DefineAsRegister(node),
84 g.UseRegister(node->InputAt(0)));
85 }
86
87
VisitRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)88 void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
89 ArmOperandGenerator g(selector);
90 selector->Emit(opcode, g.DefineAsRegister(node),
91 g.UseRegister(node->InputAt(0)),
92 g.UseRegister(node->InputAt(1)));
93 }
94
95
96 template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
97 AddressingMode kImmMode, AddressingMode kRegMode>
TryMatchShift(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,InstructionOperand * value_return,InstructionOperand * shift_return)98 bool TryMatchShift(InstructionSelector* selector,
99 InstructionCode* opcode_return, Node* node,
100 InstructionOperand* value_return,
101 InstructionOperand* shift_return) {
102 ArmOperandGenerator g(selector);
103 if (node->opcode() == kOpcode) {
104 Int32BinopMatcher m(node);
105 *value_return = g.UseRegister(m.left().node());
106 if (m.right().IsInRange(kImmMin, kImmMax)) {
107 *opcode_return |= AddressingModeField::encode(kImmMode);
108 *shift_return = g.UseImmediate(m.right().node());
109 } else {
110 *opcode_return |= AddressingModeField::encode(kRegMode);
111 *shift_return = g.UseRegister(m.right().node());
112 }
113 return true;
114 }
115 return false;
116 }
117
118 template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
119 AddressingMode kImmMode>
TryMatchShiftImmediate(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,InstructionOperand * value_return,InstructionOperand * shift_return)120 bool TryMatchShiftImmediate(InstructionSelector* selector,
121 InstructionCode* opcode_return, Node* node,
122 InstructionOperand* value_return,
123 InstructionOperand* shift_return) {
124 ArmOperandGenerator g(selector);
125 if (node->opcode() == kOpcode) {
126 Int32BinopMatcher m(node);
127 if (m.right().IsInRange(kImmMin, kImmMax)) {
128 *opcode_return |= AddressingModeField::encode(kImmMode);
129 *value_return = g.UseRegister(m.left().node());
130 *shift_return = g.UseImmediate(m.right().node());
131 return true;
132 }
133 }
134 return false;
135 }
136
TryMatchROR(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,InstructionOperand * value_return,InstructionOperand * shift_return)137 bool TryMatchROR(InstructionSelector* selector, InstructionCode* opcode_return,
138 Node* node, InstructionOperand* value_return,
139 InstructionOperand* shift_return) {
140 return TryMatchShift<IrOpcode::kWord32Ror, 1, 31, kMode_Operand2_R_ROR_I,
141 kMode_Operand2_R_ROR_R>(selector, opcode_return, node,
142 value_return, shift_return);
143 }
144
145
TryMatchASR(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,InstructionOperand * value_return,InstructionOperand * shift_return)146 bool TryMatchASR(InstructionSelector* selector, InstructionCode* opcode_return,
147 Node* node, InstructionOperand* value_return,
148 InstructionOperand* shift_return) {
149 return TryMatchShift<IrOpcode::kWord32Sar, 1, 32, kMode_Operand2_R_ASR_I,
150 kMode_Operand2_R_ASR_R>(selector, opcode_return, node,
151 value_return, shift_return);
152 }
153
154
TryMatchLSL(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,InstructionOperand * value_return,InstructionOperand * shift_return)155 bool TryMatchLSL(InstructionSelector* selector, InstructionCode* opcode_return,
156 Node* node, InstructionOperand* value_return,
157 InstructionOperand* shift_return) {
158 return TryMatchShift<IrOpcode::kWord32Shl, 0, 31, kMode_Operand2_R_LSL_I,
159 kMode_Operand2_R_LSL_R>(selector, opcode_return, node,
160 value_return, shift_return);
161 }
162
TryMatchLSLImmediate(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,InstructionOperand * value_return,InstructionOperand * shift_return)163 bool TryMatchLSLImmediate(InstructionSelector* selector,
164 InstructionCode* opcode_return, Node* node,
165 InstructionOperand* value_return,
166 InstructionOperand* shift_return) {
167 return TryMatchShiftImmediate<IrOpcode::kWord32Shl, 0, 31,
168 kMode_Operand2_R_LSL_I>(
169 selector, opcode_return, node, value_return, shift_return);
170 }
171
TryMatchLSR(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,InstructionOperand * value_return,InstructionOperand * shift_return)172 bool TryMatchLSR(InstructionSelector* selector, InstructionCode* opcode_return,
173 Node* node, InstructionOperand* value_return,
174 InstructionOperand* shift_return) {
175 return TryMatchShift<IrOpcode::kWord32Shr, 1, 32, kMode_Operand2_R_LSR_I,
176 kMode_Operand2_R_LSR_R>(selector, opcode_return, node,
177 value_return, shift_return);
178 }
179
180
TryMatchShift(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,InstructionOperand * value_return,InstructionOperand * shift_return)181 bool TryMatchShift(InstructionSelector* selector,
182 InstructionCode* opcode_return, Node* node,
183 InstructionOperand* value_return,
184 InstructionOperand* shift_return) {
185 return (
186 TryMatchASR(selector, opcode_return, node, value_return, shift_return) ||
187 TryMatchLSL(selector, opcode_return, node, value_return, shift_return) ||
188 TryMatchLSR(selector, opcode_return, node, value_return, shift_return) ||
189 TryMatchROR(selector, opcode_return, node, value_return, shift_return));
190 }
191
192
TryMatchImmediateOrShift(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,size_t * input_count_return,InstructionOperand * inputs)193 bool TryMatchImmediateOrShift(InstructionSelector* selector,
194 InstructionCode* opcode_return, Node* node,
195 size_t* input_count_return,
196 InstructionOperand* inputs) {
197 ArmOperandGenerator g(selector);
198 if (g.CanBeImmediate(node, *opcode_return)) {
199 *opcode_return |= AddressingModeField::encode(kMode_Operand2_I);
200 inputs[0] = g.UseImmediate(node);
201 *input_count_return = 1;
202 return true;
203 }
204 if (TryMatchShift(selector, opcode_return, node, &inputs[0], &inputs[1])) {
205 *input_count_return = 2;
206 return true;
207 }
208 return false;
209 }
210
211
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,InstructionCode reverse_opcode,FlagsContinuation * cont)212 void VisitBinop(InstructionSelector* selector, Node* node,
213 InstructionCode opcode, InstructionCode reverse_opcode,
214 FlagsContinuation* cont) {
215 ArmOperandGenerator g(selector);
216 Int32BinopMatcher m(node);
217 InstructionOperand inputs[5];
218 size_t input_count = 0;
219 InstructionOperand outputs[2];
220 size_t output_count = 0;
221
222 if (m.left().node() == m.right().node()) {
223 // If both inputs refer to the same operand, enforce allocating a register
224 // for both of them to ensure that we don't end up generating code like
225 // this:
226 //
227 // mov r0, r1, asr #16
228 // adds r0, r0, r1, asr #16
229 // bvs label
230 InstructionOperand const input = g.UseRegister(m.left().node());
231 opcode |= AddressingModeField::encode(kMode_Operand2_R);
232 inputs[input_count++] = input;
233 inputs[input_count++] = input;
234 } else if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
235 &input_count, &inputs[1])) {
236 inputs[0] = g.UseRegister(m.left().node());
237 input_count++;
238 } else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
239 m.left().node(), &input_count,
240 &inputs[1])) {
241 inputs[0] = g.UseRegister(m.right().node());
242 opcode = reverse_opcode;
243 input_count++;
244 } else {
245 opcode |= AddressingModeField::encode(kMode_Operand2_R);
246 inputs[input_count++] = g.UseRegister(m.left().node());
247 inputs[input_count++] = g.UseRegister(m.right().node());
248 }
249
250 if (cont->IsBranch()) {
251 inputs[input_count++] = g.Label(cont->true_block());
252 inputs[input_count++] = g.Label(cont->false_block());
253 }
254
255 if (cont->IsDeoptimize()) {
256 // If we can deoptimize as a result of the binop, we need to make sure that
257 // the deopt inputs are not overwritten by the binop result. One way
258 // to achieve that is to declare the output register as same-as-first.
259 outputs[output_count++] = g.DefineSameAsFirst(node);
260 } else {
261 outputs[output_count++] = g.DefineAsRegister(node);
262 }
263 if (cont->IsSet()) {
264 outputs[output_count++] = g.DefineAsRegister(cont->result());
265 }
266
267 DCHECK_NE(0u, input_count);
268 DCHECK_NE(0u, output_count);
269 DCHECK_GE(arraysize(inputs), input_count);
270 DCHECK_GE(arraysize(outputs), output_count);
271 DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
272
273 opcode = cont->Encode(opcode);
274 if (cont->IsDeoptimize()) {
275 selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
276 cont->frame_state());
277 } else {
278 selector->Emit(opcode, output_count, outputs, input_count, inputs);
279 }
280 }
281
282
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,InstructionCode reverse_opcode)283 void VisitBinop(InstructionSelector* selector, Node* node,
284 InstructionCode opcode, InstructionCode reverse_opcode) {
285 FlagsContinuation cont;
286 VisitBinop(selector, node, opcode, reverse_opcode, &cont);
287 }
288
289
EmitDiv(InstructionSelector * selector,ArchOpcode div_opcode,ArchOpcode f64i32_opcode,ArchOpcode i32f64_opcode,InstructionOperand result_operand,InstructionOperand left_operand,InstructionOperand right_operand)290 void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
291 ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
292 InstructionOperand result_operand, InstructionOperand left_operand,
293 InstructionOperand right_operand) {
294 ArmOperandGenerator g(selector);
295 if (selector->IsSupported(SUDIV)) {
296 selector->Emit(div_opcode, result_operand, left_operand, right_operand);
297 return;
298 }
299 InstructionOperand left_double_operand = g.TempDoubleRegister();
300 InstructionOperand right_double_operand = g.TempDoubleRegister();
301 InstructionOperand result_double_operand = g.TempDoubleRegister();
302 selector->Emit(f64i32_opcode, left_double_operand, left_operand);
303 selector->Emit(f64i32_opcode, right_double_operand, right_operand);
304 selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
305 right_double_operand);
306 selector->Emit(i32f64_opcode, result_operand, result_double_operand);
307 }
308
309
VisitDiv(InstructionSelector * selector,Node * node,ArchOpcode div_opcode,ArchOpcode f64i32_opcode,ArchOpcode i32f64_opcode)310 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode div_opcode,
311 ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode) {
312 ArmOperandGenerator g(selector);
313 Int32BinopMatcher m(node);
314 EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
315 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
316 g.UseRegister(m.right().node()));
317 }
318
319
VisitMod(InstructionSelector * selector,Node * node,ArchOpcode div_opcode,ArchOpcode f64i32_opcode,ArchOpcode i32f64_opcode)320 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode div_opcode,
321 ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode) {
322 ArmOperandGenerator g(selector);
323 Int32BinopMatcher m(node);
324 InstructionOperand div_operand = g.TempRegister();
325 InstructionOperand result_operand = g.DefineAsRegister(node);
326 InstructionOperand left_operand = g.UseRegister(m.left().node());
327 InstructionOperand right_operand = g.UseRegister(m.right().node());
328 EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
329 left_operand, right_operand);
330 if (selector->IsSupported(ARMv7)) {
331 selector->Emit(kArmMls, result_operand, div_operand, right_operand,
332 left_operand);
333 } else {
334 InstructionOperand mul_operand = g.TempRegister();
335 selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
336 selector->Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_R),
337 result_operand, left_operand, mul_operand);
338 }
339 }
340
341 } // namespace
342
343
VisitLoad(Node * node)344 void InstructionSelector::VisitLoad(Node* node) {
345 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
346 ArmOperandGenerator g(this);
347 Node* base = node->InputAt(0);
348 Node* index = node->InputAt(1);
349 InstructionOperand inputs[3];
350 size_t input_count = 0;
351 InstructionOperand outputs[1];
352
353 InstructionCode opcode = kArchNop;
354 switch (load_rep.representation()) {
355 case MachineRepresentation::kFloat32:
356 opcode = kArmVldrF32;
357 break;
358 case MachineRepresentation::kFloat64:
359 opcode = kArmVldrF64;
360 break;
361 case MachineRepresentation::kBit: // Fall through.
362 case MachineRepresentation::kWord8:
363 opcode = load_rep.IsUnsigned() ? kArmLdrb : kArmLdrsb;
364 break;
365 case MachineRepresentation::kWord16:
366 opcode = load_rep.IsUnsigned() ? kArmLdrh : kArmLdrsh;
367 break;
368 case MachineRepresentation::kTagged: // Fall through.
369 case MachineRepresentation::kWord32:
370 opcode = kArmLdr;
371 break;
372 case MachineRepresentation::kWord64: // Fall through.
373 case MachineRepresentation::kSimd128: // Fall through.
374 case MachineRepresentation::kNone:
375 UNREACHABLE();
376 return;
377 }
378
379 outputs[0] = g.DefineAsRegister(node);
380 inputs[0] = g.UseRegister(base);
381
382 if (g.CanBeImmediate(index, opcode)) {
383 input_count = 2;
384 inputs[1] = g.UseImmediate(index);
385 opcode |= AddressingModeField::encode(kMode_Offset_RI);
386 } else if ((opcode == kArmLdr) &&
387 TryMatchLSLImmediate(this, &opcode, index, &inputs[1],
388 &inputs[2])) {
389 input_count = 3;
390 } else {
391 input_count = 2;
392 inputs[1] = g.UseRegister(index);
393 opcode |= AddressingModeField::encode(kMode_Offset_RR);
394 }
395
396 Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
397 }
398
399
VisitStore(Node * node)400 void InstructionSelector::VisitStore(Node* node) {
401 ArmOperandGenerator g(this);
402 Node* base = node->InputAt(0);
403 Node* index = node->InputAt(1);
404 Node* value = node->InputAt(2);
405
406 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
407 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
408 MachineRepresentation rep = store_rep.representation();
409
410 if (write_barrier_kind != kNoWriteBarrier) {
411 DCHECK_EQ(MachineRepresentation::kTagged, rep);
412 AddressingMode addressing_mode;
413 InstructionOperand inputs[3];
414 size_t input_count = 0;
415 inputs[input_count++] = g.UseUniqueRegister(base);
416 // OutOfLineRecordWrite uses the index in an 'add' instruction as well as
417 // for the store itself, so we must check compatibility with both.
418 if (g.CanBeImmediate(index, kArmAdd) && g.CanBeImmediate(index, kArmStr)) {
419 inputs[input_count++] = g.UseImmediate(index);
420 addressing_mode = kMode_Offset_RI;
421 } else {
422 inputs[input_count++] = g.UseUniqueRegister(index);
423 addressing_mode = kMode_Offset_RR;
424 }
425 inputs[input_count++] = g.UseUniqueRegister(value);
426 RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
427 switch (write_barrier_kind) {
428 case kNoWriteBarrier:
429 UNREACHABLE();
430 break;
431 case kMapWriteBarrier:
432 record_write_mode = RecordWriteMode::kValueIsMap;
433 break;
434 case kPointerWriteBarrier:
435 record_write_mode = RecordWriteMode::kValueIsPointer;
436 break;
437 case kFullWriteBarrier:
438 record_write_mode = RecordWriteMode::kValueIsAny;
439 break;
440 }
441 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
442 size_t const temp_count = arraysize(temps);
443 InstructionCode code = kArchStoreWithWriteBarrier;
444 code |= AddressingModeField::encode(addressing_mode);
445 code |= MiscField::encode(static_cast<int>(record_write_mode));
446 Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
447 } else {
448 InstructionOperand inputs[4];
449 size_t input_count = 0;
450
451 InstructionCode opcode = kArchNop;
452 switch (rep) {
453 case MachineRepresentation::kFloat32:
454 opcode = kArmVstrF32;
455 break;
456 case MachineRepresentation::kFloat64:
457 opcode = kArmVstrF64;
458 break;
459 case MachineRepresentation::kBit: // Fall through.
460 case MachineRepresentation::kWord8:
461 opcode = kArmStrb;
462 break;
463 case MachineRepresentation::kWord16:
464 opcode = kArmStrh;
465 break;
466 case MachineRepresentation::kTagged: // Fall through.
467 case MachineRepresentation::kWord32:
468 opcode = kArmStr;
469 break;
470 case MachineRepresentation::kWord64: // Fall through.
471 case MachineRepresentation::kSimd128: // Fall through.
472 case MachineRepresentation::kNone:
473 UNREACHABLE();
474 return;
475 }
476
477 inputs[0] = g.UseRegister(value);
478 inputs[1] = g.UseRegister(base);
479
480 if (g.CanBeImmediate(index, opcode)) {
481 input_count = 3;
482 inputs[2] = g.UseImmediate(index);
483 opcode |= AddressingModeField::encode(kMode_Offset_RI);
484 } else if ((opcode == kArmStr) &&
485 TryMatchLSLImmediate(this, &opcode, index, &inputs[2],
486 &inputs[3])) {
487 input_count = 4;
488 } else {
489 input_count = 3;
490 inputs[2] = g.UseRegister(index);
491 opcode |= AddressingModeField::encode(kMode_Offset_RR);
492 }
493 Emit(opcode, 0, nullptr, input_count, inputs);
494 }
495 }
496
497
VisitCheckedLoad(Node * node)498 void InstructionSelector::VisitCheckedLoad(Node* node) {
499 CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
500 ArmOperandGenerator g(this);
501 Node* const buffer = node->InputAt(0);
502 Node* const offset = node->InputAt(1);
503 Node* const length = node->InputAt(2);
504 ArchOpcode opcode = kArchNop;
505 switch (load_rep.representation()) {
506 case MachineRepresentation::kWord8:
507 opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
508 break;
509 case MachineRepresentation::kWord16:
510 opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
511 break;
512 case MachineRepresentation::kWord32:
513 opcode = kCheckedLoadWord32;
514 break;
515 case MachineRepresentation::kFloat32:
516 opcode = kCheckedLoadFloat32;
517 break;
518 case MachineRepresentation::kFloat64:
519 opcode = kCheckedLoadFloat64;
520 break;
521 case MachineRepresentation::kBit: // Fall through.
522 case MachineRepresentation::kTagged: // Fall through.
523 case MachineRepresentation::kWord64: // Fall through.
524 case MachineRepresentation::kSimd128: // Fall through.
525 case MachineRepresentation::kNone:
526 UNREACHABLE();
527 return;
528 }
529 InstructionOperand offset_operand = g.UseRegister(offset);
530 InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
531 ? g.UseImmediate(length)
532 : g.UseRegister(length);
533 Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
534 g.DefineAsRegister(node), offset_operand, length_operand,
535 g.UseRegister(buffer), offset_operand);
536 }
537
538
VisitCheckedStore(Node * node)539 void InstructionSelector::VisitCheckedStore(Node* node) {
540 MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
541 ArmOperandGenerator g(this);
542 Node* const buffer = node->InputAt(0);
543 Node* const offset = node->InputAt(1);
544 Node* const length = node->InputAt(2);
545 Node* const value = node->InputAt(3);
546 ArchOpcode opcode = kArchNop;
547 switch (rep) {
548 case MachineRepresentation::kWord8:
549 opcode = kCheckedStoreWord8;
550 break;
551 case MachineRepresentation::kWord16:
552 opcode = kCheckedStoreWord16;
553 break;
554 case MachineRepresentation::kWord32:
555 opcode = kCheckedStoreWord32;
556 break;
557 case MachineRepresentation::kFloat32:
558 opcode = kCheckedStoreFloat32;
559 break;
560 case MachineRepresentation::kFloat64:
561 opcode = kCheckedStoreFloat64;
562 break;
563 case MachineRepresentation::kBit: // Fall through.
564 case MachineRepresentation::kTagged: // Fall through.
565 case MachineRepresentation::kWord64: // Fall through.
566 case MachineRepresentation::kSimd128: // Fall through.
567 case MachineRepresentation::kNone:
568 UNREACHABLE();
569 return;
570 }
571 InstructionOperand offset_operand = g.UseRegister(offset);
572 InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
573 ? g.UseImmediate(length)
574 : g.UseRegister(length);
575 Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
576 offset_operand, length_operand, g.UseRegister(value),
577 g.UseRegister(buffer), offset_operand);
578 }
579
580
581 namespace {
582
EmitBic(InstructionSelector * selector,Node * node,Node * left,Node * right)583 void EmitBic(InstructionSelector* selector, Node* node, Node* left,
584 Node* right) {
585 ArmOperandGenerator g(selector);
586 InstructionCode opcode = kArmBic;
587 InstructionOperand value_operand;
588 InstructionOperand shift_operand;
589 if (TryMatchShift(selector, &opcode, right, &value_operand, &shift_operand)) {
590 selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
591 value_operand, shift_operand);
592 return;
593 }
594 selector->Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
595 g.DefineAsRegister(node), g.UseRegister(left),
596 g.UseRegister(right));
597 }
598
599
EmitUbfx(InstructionSelector * selector,Node * node,Node * left,uint32_t lsb,uint32_t width)600 void EmitUbfx(InstructionSelector* selector, Node* node, Node* left,
601 uint32_t lsb, uint32_t width) {
602 DCHECK_LE(1u, width);
603 DCHECK_LE(width, 32u - lsb);
604 ArmOperandGenerator g(selector);
605 selector->Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(left),
606 g.TempImmediate(lsb), g.TempImmediate(width));
607 }
608
609 } // namespace
610
611
VisitWord32And(Node * node)612 void InstructionSelector::VisitWord32And(Node* node) {
613 ArmOperandGenerator g(this);
614 Int32BinopMatcher m(node);
615 if (m.left().IsWord32Xor() && CanCover(node, m.left().node())) {
616 Int32BinopMatcher mleft(m.left().node());
617 if (mleft.right().Is(-1)) {
618 EmitBic(this, node, m.right().node(), mleft.left().node());
619 return;
620 }
621 }
622 if (m.right().IsWord32Xor() && CanCover(node, m.right().node())) {
623 Int32BinopMatcher mright(m.right().node());
624 if (mright.right().Is(-1)) {
625 EmitBic(this, node, m.left().node(), mright.left().node());
626 return;
627 }
628 }
629 if (m.right().HasValue()) {
630 uint32_t const value = m.right().Value();
631 uint32_t width = base::bits::CountPopulation32(value);
632 uint32_t leading_zeros = base::bits::CountLeadingZeros32(value);
633
634 // Try to merge SHR operations on the left hand input into this AND.
635 if (m.left().IsWord32Shr()) {
636 Int32BinopMatcher mshr(m.left().node());
637 if (mshr.right().HasValue()) {
638 uint32_t const shift = mshr.right().Value();
639
640 if (((shift == 8) || (shift == 16) || (shift == 24)) &&
641 ((value == 0xff) || (value == 0xffff))) {
642 // Merge SHR into AND by emitting a UXTB or UXTH instruction with a
643 // bytewise rotation.
644 Emit((value == 0xff) ? kArmUxtb : kArmUxth,
645 g.DefineAsRegister(m.node()), g.UseRegister(mshr.left().node()),
646 g.TempImmediate(mshr.right().Value()));
647 return;
648 } else if (IsSupported(ARMv7) && (width != 0) &&
649 ((leading_zeros + width) == 32)) {
650 // Merge Shr into And by emitting a UBFX instruction.
651 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
652 if ((1 <= shift) && (shift <= 31)) {
653 // UBFX cannot extract bits past the register size, however since
654 // shifting the original value would have introduced some zeros we
655 // can still use UBFX with a smaller mask and the remaining bits
656 // will be zeros.
657 EmitUbfx(this, node, mshr.left().node(), shift,
658 std::min(width, 32 - shift));
659 return;
660 }
661 }
662 }
663 } else if (value == 0xffff) {
664 // Emit UXTH for this AND. We don't bother testing for UXTB, as it's no
665 // better than AND 0xff for this operation.
666 Emit(kArmUxth, g.DefineAsRegister(m.node()),
667 g.UseRegister(m.left().node()), g.TempImmediate(0));
668 return;
669 }
670 if (g.CanBeImmediate(~value)) {
671 // Emit BIC for this AND by inverting the immediate value first.
672 Emit(kArmBic | AddressingModeField::encode(kMode_Operand2_I),
673 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
674 g.TempImmediate(~value));
675 return;
676 }
677 if (!g.CanBeImmediate(value) && IsSupported(ARMv7)) {
678 // If value has 9 to 23 contiguous set bits, and has the lsb set, we can
679 // replace this AND with UBFX. Other contiguous bit patterns have already
680 // been handled by BIC or will be handled by AND.
681 if ((width != 0) && ((leading_zeros + width) == 32) &&
682 (9 <= leading_zeros) && (leading_zeros <= 23)) {
683 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
684 EmitUbfx(this, node, m.left().node(), 0, width);
685 return;
686 }
687
688 width = 32 - width;
689 leading_zeros = base::bits::CountLeadingZeros32(~value);
690 uint32_t lsb = base::bits::CountTrailingZeros32(~value);
691 if ((leading_zeros + width + lsb) == 32) {
692 // This AND can be replaced with BFC.
693 Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
694 g.TempImmediate(lsb), g.TempImmediate(width));
695 return;
696 }
697 }
698 }
699 VisitBinop(this, node, kArmAnd, kArmAnd);
700 }
701
702
VisitWord32Or(Node * node)703 void InstructionSelector::VisitWord32Or(Node* node) {
704 VisitBinop(this, node, kArmOrr, kArmOrr);
705 }
706
707
VisitWord32Xor(Node * node)708 void InstructionSelector::VisitWord32Xor(Node* node) {
709 ArmOperandGenerator g(this);
710 Int32BinopMatcher m(node);
711 if (m.right().Is(-1)) {
712 InstructionCode opcode = kArmMvn;
713 InstructionOperand value_operand;
714 InstructionOperand shift_operand;
715 if (TryMatchShift(this, &opcode, m.left().node(), &value_operand,
716 &shift_operand)) {
717 Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand);
718 return;
719 }
720 Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
721 g.DefineAsRegister(node), g.UseRegister(m.left().node()));
722 return;
723 }
724 VisitBinop(this, node, kArmEor, kArmEor);
725 }
726
727
728 namespace {
729
730 template <typename TryMatchShift>
VisitShift(InstructionSelector * selector,Node * node,TryMatchShift try_match_shift,FlagsContinuation * cont)731 void VisitShift(InstructionSelector* selector, Node* node,
732 TryMatchShift try_match_shift, FlagsContinuation* cont) {
733 ArmOperandGenerator g(selector);
734 InstructionCode opcode = kArmMov;
735 InstructionOperand inputs[4];
736 size_t input_count = 2;
737 InstructionOperand outputs[2];
738 size_t output_count = 0;
739
740 CHECK(try_match_shift(selector, &opcode, node, &inputs[0], &inputs[1]));
741
742 if (cont->IsBranch()) {
743 inputs[input_count++] = g.Label(cont->true_block());
744 inputs[input_count++] = g.Label(cont->false_block());
745 }
746
747 outputs[output_count++] = g.DefineAsRegister(node);
748 if (cont->IsSet()) {
749 outputs[output_count++] = g.DefineAsRegister(cont->result());
750 }
751
752 DCHECK_NE(0u, input_count);
753 DCHECK_NE(0u, output_count);
754 DCHECK_GE(arraysize(inputs), input_count);
755 DCHECK_GE(arraysize(outputs), output_count);
756 DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
757
758 opcode = cont->Encode(opcode);
759 if (cont->IsDeoptimize()) {
760 selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
761 cont->frame_state());
762 } else {
763 selector->Emit(opcode, output_count, outputs, input_count, inputs);
764 }
765 }
766
767
768 template <typename TryMatchShift>
VisitShift(InstructionSelector * selector,Node * node,TryMatchShift try_match_shift)769 void VisitShift(InstructionSelector* selector, Node* node,
770 TryMatchShift try_match_shift) {
771 FlagsContinuation cont;
772 VisitShift(selector, node, try_match_shift, &cont);
773 }
774
775 } // namespace
776
777
VisitWord32Shl(Node * node)778 void InstructionSelector::VisitWord32Shl(Node* node) {
779 VisitShift(this, node, TryMatchLSL);
780 }
781
782
VisitWord32Shr(Node * node)783 void InstructionSelector::VisitWord32Shr(Node* node) {
784 ArmOperandGenerator g(this);
785 Int32BinopMatcher m(node);
786 if (IsSupported(ARMv7) && m.left().IsWord32And() &&
787 m.right().IsInRange(0, 31)) {
788 uint32_t lsb = m.right().Value();
789 Int32BinopMatcher mleft(m.left().node());
790 if (mleft.right().HasValue()) {
791 uint32_t value = (mleft.right().Value() >> lsb) << lsb;
792 uint32_t width = base::bits::CountPopulation32(value);
793 uint32_t msb = base::bits::CountLeadingZeros32(value);
794 if (msb + width + lsb == 32) {
795 DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(value));
796 return EmitUbfx(this, node, mleft.left().node(), lsb, width);
797 }
798 }
799 }
800 VisitShift(this, node, TryMatchLSR);
801 }
802
803
VisitWord32Sar(Node * node)804 void InstructionSelector::VisitWord32Sar(Node* node) {
805 ArmOperandGenerator g(this);
806 Int32BinopMatcher m(node);
807 if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
808 Int32BinopMatcher mleft(m.left().node());
809 if (m.right().HasValue() && mleft.right().HasValue()) {
810 uint32_t sar = m.right().Value();
811 uint32_t shl = mleft.right().Value();
812 if ((sar == shl) && (sar == 16)) {
813 Emit(kArmSxth, g.DefineAsRegister(node),
814 g.UseRegister(mleft.left().node()), g.TempImmediate(0));
815 return;
816 } else if ((sar == shl) && (sar == 24)) {
817 Emit(kArmSxtb, g.DefineAsRegister(node),
818 g.UseRegister(mleft.left().node()), g.TempImmediate(0));
819 return;
820 } else if (IsSupported(ARMv7) && (sar >= shl)) {
821 Emit(kArmSbfx, g.DefineAsRegister(node),
822 g.UseRegister(mleft.left().node()), g.TempImmediate(sar - shl),
823 g.TempImmediate(32 - sar));
824 return;
825 }
826 }
827 }
828 VisitShift(this, node, TryMatchASR);
829 }
830
VisitInt32PairAdd(Node * node)831 void InstructionSelector::VisitInt32PairAdd(Node* node) {
832 ArmOperandGenerator g(this);
833
834 // We use UseUniqueRegister here to avoid register sharing with the output
835 // registers.
836 InstructionOperand inputs[] = {
837 g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
838 g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
839
840 InstructionOperand outputs[] = {
841 g.DefineAsRegister(node),
842 g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
843
844 Emit(kArmAddPair, 2, outputs, 4, inputs);
845 }
846
VisitInt32PairSub(Node * node)847 void InstructionSelector::VisitInt32PairSub(Node* node) {
848 ArmOperandGenerator g(this);
849
850 // We use UseUniqueRegister here to avoid register sharing with the output
851 // register.
852 InstructionOperand inputs[] = {
853 g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
854 g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
855
856 InstructionOperand outputs[] = {
857 g.DefineAsRegister(node),
858 g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
859
860 Emit(kArmSubPair, 2, outputs, 4, inputs);
861 }
862
VisitInt32PairMul(Node * node)863 void InstructionSelector::VisitInt32PairMul(Node* node) {
864 ArmOperandGenerator g(this);
865 InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
866 g.UseUniqueRegister(node->InputAt(1)),
867 g.UseUniqueRegister(node->InputAt(2)),
868 g.UseUniqueRegister(node->InputAt(3))};
869
870 InstructionOperand outputs[] = {
871 g.DefineAsRegister(node),
872 g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
873
874 Emit(kArmMulPair, 2, outputs, 4, inputs);
875 }
876
VisitWord32PairShl(Node * node)877 void InstructionSelector::VisitWord32PairShl(Node* node) {
878 ArmOperandGenerator g(this);
879 // We use g.UseUniqueRegister here for InputAt(0) to guarantee that there is
880 // no register aliasing with output registers.
881 Int32Matcher m(node->InputAt(2));
882 InstructionOperand shift_operand;
883 if (m.HasValue()) {
884 shift_operand = g.UseImmediate(m.node());
885 } else {
886 shift_operand = g.UseUniqueRegister(m.node());
887 }
888
889 InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
890 g.UseRegister(node->InputAt(1)),
891 shift_operand};
892
893 InstructionOperand outputs[] = {
894 g.DefineAsRegister(node),
895 g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
896
897 Emit(kArmLslPair, 2, outputs, 3, inputs);
898 }
899
VisitWord32PairShr(Node * node)900 void InstructionSelector::VisitWord32PairShr(Node* node) {
901 ArmOperandGenerator g(this);
902 // We use g.UseUniqueRegister here for InputAt(1) and InputAt(2) to to
903 // guarantee that there is no register aliasing with output register.
904 Int32Matcher m(node->InputAt(2));
905 InstructionOperand shift_operand;
906 if (m.HasValue()) {
907 shift_operand = g.UseImmediate(m.node());
908 } else {
909 shift_operand = g.UseUniqueRegister(m.node());
910 }
911
912 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
913 g.UseUniqueRegister(node->InputAt(1)),
914 shift_operand};
915
916 InstructionOperand outputs[] = {
917 g.DefineAsRegister(node),
918 g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
919
920 Emit(kArmLsrPair, 2, outputs, 3, inputs);
921 }
922
VisitWord32PairSar(Node * node)923 void InstructionSelector::VisitWord32PairSar(Node* node) {
924 ArmOperandGenerator g(this);
925 // We use g.UseUniqueRegister here for InputAt(1) and InputAt(2) to to
926 // guarantee that there is no register aliasing with output register.
927 Int32Matcher m(node->InputAt(2));
928 InstructionOperand shift_operand;
929 if (m.HasValue()) {
930 shift_operand = g.UseImmediate(m.node());
931 } else {
932 shift_operand = g.UseUniqueRegister(m.node());
933 }
934
935 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
936 g.UseUniqueRegister(node->InputAt(1)),
937 shift_operand};
938
939 InstructionOperand outputs[] = {
940 g.DefineAsRegister(node),
941 g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
942
943 Emit(kArmAsrPair, 2, outputs, 3, inputs);
944 }
945
VisitWord32Ror(Node * node)946 void InstructionSelector::VisitWord32Ror(Node* node) {
947 VisitShift(this, node, TryMatchROR);
948 }
949
950
VisitWord32Clz(Node * node)951 void InstructionSelector::VisitWord32Clz(Node* node) {
952 VisitRR(this, kArmClz, node);
953 }
954
955
VisitWord32Ctz(Node * node)956 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
957
958
VisitWord32ReverseBits(Node * node)959 void InstructionSelector::VisitWord32ReverseBits(Node* node) {
960 DCHECK(IsSupported(ARMv7));
961 VisitRR(this, kArmRbit, node);
962 }
963
964
VisitWord32Popcnt(Node * node)965 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
966
967
VisitInt32Add(Node * node)968 void InstructionSelector::VisitInt32Add(Node* node) {
969 ArmOperandGenerator g(this);
970 Int32BinopMatcher m(node);
971 if (CanCover(node, m.left().node())) {
972 switch (m.left().opcode()) {
973 case IrOpcode::kInt32Mul: {
974 Int32BinopMatcher mleft(m.left().node());
975 Emit(kArmMla, g.DefineAsRegister(node),
976 g.UseRegister(mleft.left().node()),
977 g.UseRegister(mleft.right().node()),
978 g.UseRegister(m.right().node()));
979 return;
980 }
981 case IrOpcode::kInt32MulHigh: {
982 Int32BinopMatcher mleft(m.left().node());
983 Emit(kArmSmmla, g.DefineAsRegister(node),
984 g.UseRegister(mleft.left().node()),
985 g.UseRegister(mleft.right().node()),
986 g.UseRegister(m.right().node()));
987 return;
988 }
989 case IrOpcode::kWord32And: {
990 Int32BinopMatcher mleft(m.left().node());
991 if (mleft.right().Is(0xff)) {
992 Emit(kArmUxtab, g.DefineAsRegister(node),
993 g.UseRegister(m.right().node()),
994 g.UseRegister(mleft.left().node()), g.TempImmediate(0));
995 return;
996 } else if (mleft.right().Is(0xffff)) {
997 Emit(kArmUxtah, g.DefineAsRegister(node),
998 g.UseRegister(m.right().node()),
999 g.UseRegister(mleft.left().node()), g.TempImmediate(0));
1000 return;
1001 }
1002 }
1003 case IrOpcode::kWord32Sar: {
1004 Int32BinopMatcher mleft(m.left().node());
1005 if (CanCover(mleft.node(), mleft.left().node()) &&
1006 mleft.left().IsWord32Shl()) {
1007 Int32BinopMatcher mleftleft(mleft.left().node());
1008 if (mleft.right().Is(24) && mleftleft.right().Is(24)) {
1009 Emit(kArmSxtab, g.DefineAsRegister(node),
1010 g.UseRegister(m.right().node()),
1011 g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
1012 return;
1013 } else if (mleft.right().Is(16) && mleftleft.right().Is(16)) {
1014 Emit(kArmSxtah, g.DefineAsRegister(node),
1015 g.UseRegister(m.right().node()),
1016 g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
1017 return;
1018 }
1019 }
1020 }
1021 default:
1022 break;
1023 }
1024 }
1025 if (CanCover(node, m.right().node())) {
1026 switch (m.right().opcode()) {
1027 case IrOpcode::kInt32Mul: {
1028 Int32BinopMatcher mright(m.right().node());
1029 Emit(kArmMla, g.DefineAsRegister(node),
1030 g.UseRegister(mright.left().node()),
1031 g.UseRegister(mright.right().node()),
1032 g.UseRegister(m.left().node()));
1033 return;
1034 }
1035 case IrOpcode::kInt32MulHigh: {
1036 Int32BinopMatcher mright(m.right().node());
1037 Emit(kArmSmmla, g.DefineAsRegister(node),
1038 g.UseRegister(mright.left().node()),
1039 g.UseRegister(mright.right().node()),
1040 g.UseRegister(m.left().node()));
1041 return;
1042 }
1043 case IrOpcode::kWord32And: {
1044 Int32BinopMatcher mright(m.right().node());
1045 if (mright.right().Is(0xff)) {
1046 Emit(kArmUxtab, g.DefineAsRegister(node),
1047 g.UseRegister(m.left().node()),
1048 g.UseRegister(mright.left().node()), g.TempImmediate(0));
1049 return;
1050 } else if (mright.right().Is(0xffff)) {
1051 Emit(kArmUxtah, g.DefineAsRegister(node),
1052 g.UseRegister(m.left().node()),
1053 g.UseRegister(mright.left().node()), g.TempImmediate(0));
1054 return;
1055 }
1056 }
1057 case IrOpcode::kWord32Sar: {
1058 Int32BinopMatcher mright(m.right().node());
1059 if (CanCover(mright.node(), mright.left().node()) &&
1060 mright.left().IsWord32Shl()) {
1061 Int32BinopMatcher mrightleft(mright.left().node());
1062 if (mright.right().Is(24) && mrightleft.right().Is(24)) {
1063 Emit(kArmSxtab, g.DefineAsRegister(node),
1064 g.UseRegister(m.left().node()),
1065 g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
1066 return;
1067 } else if (mright.right().Is(16) && mrightleft.right().Is(16)) {
1068 Emit(kArmSxtah, g.DefineAsRegister(node),
1069 g.UseRegister(m.left().node()),
1070 g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
1071 return;
1072 }
1073 }
1074 }
1075 default:
1076 break;
1077 }
1078 }
1079 VisitBinop(this, node, kArmAdd, kArmAdd);
1080 }
1081
1082
VisitInt32Sub(Node * node)1083 void InstructionSelector::VisitInt32Sub(Node* node) {
1084 ArmOperandGenerator g(this);
1085 Int32BinopMatcher m(node);
1086 if (IsSupported(ARMv7) && m.right().IsInt32Mul() &&
1087 CanCover(node, m.right().node())) {
1088 Int32BinopMatcher mright(m.right().node());
1089 Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
1090 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
1091 return;
1092 }
1093 VisitBinop(this, node, kArmSub, kArmRsb);
1094 }
1095
1096
VisitInt32Mul(Node * node)1097 void InstructionSelector::VisitInt32Mul(Node* node) {
1098 ArmOperandGenerator g(this);
1099 Int32BinopMatcher m(node);
1100 if (m.right().HasValue() && m.right().Value() > 0) {
1101 int32_t value = m.right().Value();
1102 if (base::bits::IsPowerOfTwo32(value - 1)) {
1103 Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
1104 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1105 g.UseRegister(m.left().node()),
1106 g.TempImmediate(WhichPowerOf2(value - 1)));
1107 return;
1108 }
1109 if (value < kMaxInt && base::bits::IsPowerOfTwo32(value + 1)) {
1110 Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
1111 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1112 g.UseRegister(m.left().node()),
1113 g.TempImmediate(WhichPowerOf2(value + 1)));
1114 return;
1115 }
1116 }
1117 VisitRRR(this, kArmMul, node);
1118 }
1119
1120
VisitInt32MulHigh(Node * node)1121 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1122 VisitRRR(this, kArmSmmul, node);
1123 }
1124
1125
VisitUint32MulHigh(Node * node)1126 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1127 ArmOperandGenerator g(this);
1128 InstructionOperand outputs[] = {g.TempRegister(), g.DefineAsRegister(node)};
1129 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
1130 g.UseRegister(node->InputAt(1))};
1131 Emit(kArmUmull, arraysize(outputs), outputs, arraysize(inputs), inputs);
1132 }
1133
1134
VisitInt32Div(Node * node)1135 void InstructionSelector::VisitInt32Div(Node* node) {
1136 VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
1137 }
1138
1139
VisitUint32Div(Node * node)1140 void InstructionSelector::VisitUint32Div(Node* node) {
1141 VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
1142 }
1143
1144
VisitInt32Mod(Node * node)1145 void InstructionSelector::VisitInt32Mod(Node* node) {
1146 VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
1147 }
1148
1149
VisitUint32Mod(Node * node)1150 void InstructionSelector::VisitUint32Mod(Node* node) {
1151 VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
1152 }
1153
1154
VisitChangeFloat32ToFloat64(Node * node)1155 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
1156 VisitRR(this, kArmVcvtF64F32, node);
1157 }
1158
1159
VisitRoundInt32ToFloat32(Node * node)1160 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
1161 VisitRR(this, kArmVcvtF32S32, node);
1162 }
1163
1164
VisitRoundUint32ToFloat32(Node * node)1165 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1166 VisitRR(this, kArmVcvtF32U32, node);
1167 }
1168
1169
VisitChangeInt32ToFloat64(Node * node)1170 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
1171 VisitRR(this, kArmVcvtF64S32, node);
1172 }
1173
1174
VisitChangeUint32ToFloat64(Node * node)1175 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
1176 VisitRR(this, kArmVcvtF64U32, node);
1177 }
1178
1179
VisitTruncateFloat32ToInt32(Node * node)1180 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1181 VisitRR(this, kArmVcvtS32F32, node);
1182 }
1183
1184
VisitTruncateFloat32ToUint32(Node * node)1185 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1186 VisitRR(this, kArmVcvtU32F32, node);
1187 }
1188
1189
VisitChangeFloat64ToInt32(Node * node)1190 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1191 VisitRR(this, kArmVcvtS32F64, node);
1192 }
1193
1194
VisitChangeFloat64ToUint32(Node * node)1195 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1196 VisitRR(this, kArmVcvtU32F64, node);
1197 }
1198
VisitTruncateFloat64ToUint32(Node * node)1199 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1200 VisitRR(this, kArmVcvtU32F64, node);
1201 }
VisitTruncateFloat64ToFloat32(Node * node)1202 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1203 VisitRR(this, kArmVcvtF32F64, node);
1204 }
1205
VisitTruncateFloat64ToWord32(Node * node)1206 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1207 VisitRR(this, kArchTruncateDoubleToI, node);
1208 }
1209
VisitRoundFloat64ToInt32(Node * node)1210 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1211 VisitRR(this, kArmVcvtS32F64, node);
1212 }
1213
VisitBitcastFloat32ToInt32(Node * node)1214 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1215 VisitRR(this, kArmVmovU32F32, node);
1216 }
1217
VisitBitcastInt32ToFloat32(Node * node)1218 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1219 VisitRR(this, kArmVmovF32U32, node);
1220 }
1221
VisitFloat32Add(Node * node)1222 void InstructionSelector::VisitFloat32Add(Node* node) {
1223 ArmOperandGenerator g(this);
1224 Float32BinopMatcher m(node);
1225 if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
1226 Float32BinopMatcher mleft(m.left().node());
1227 Emit(kArmVmlaF32, g.DefineSameAsFirst(node),
1228 g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1229 g.UseRegister(mleft.right().node()));
1230 return;
1231 }
1232 if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
1233 Float32BinopMatcher mright(m.right().node());
1234 Emit(kArmVmlaF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1235 g.UseRegister(mright.left().node()),
1236 g.UseRegister(mright.right().node()));
1237 return;
1238 }
1239 VisitRRR(this, kArmVaddF32, node);
1240 }
1241
1242
VisitFloat64Add(Node * node)1243 void InstructionSelector::VisitFloat64Add(Node* node) {
1244 ArmOperandGenerator g(this);
1245 Float64BinopMatcher m(node);
1246 if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
1247 Float64BinopMatcher mleft(m.left().node());
1248 Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
1249 g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1250 g.UseRegister(mleft.right().node()));
1251 return;
1252 }
1253 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
1254 Float64BinopMatcher mright(m.right().node());
1255 Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1256 g.UseRegister(mright.left().node()),
1257 g.UseRegister(mright.right().node()));
1258 return;
1259 }
1260 VisitRRR(this, kArmVaddF64, node);
1261 }
1262
1263 namespace {
VisitFloat32SubHelper(InstructionSelector * selector,Node * node)1264 void VisitFloat32SubHelper(InstructionSelector* selector, Node* node) {
1265 ArmOperandGenerator g(selector);
1266 Float32BinopMatcher m(node);
1267 if (m.right().IsFloat32Mul() && selector->CanCover(node, m.right().node())) {
1268 Float32BinopMatcher mright(m.right().node());
1269 selector->Emit(kArmVmlsF32, g.DefineSameAsFirst(node),
1270 g.UseRegister(m.left().node()),
1271 g.UseRegister(mright.left().node()),
1272 g.UseRegister(mright.right().node()));
1273 return;
1274 }
1275 VisitRRR(selector, kArmVsubF32, node);
1276 }
1277
VisitFloat64SubHelper(InstructionSelector * selector,Node * node)1278 void VisitFloat64SubHelper(InstructionSelector* selector, Node* node) {
1279 ArmOperandGenerator g(selector);
1280 Float64BinopMatcher m(node);
1281 if (m.right().IsFloat64Mul() && selector->CanCover(node, m.right().node())) {
1282 Float64BinopMatcher mright(m.right().node());
1283 selector->Emit(kArmVmlsF64, g.DefineSameAsFirst(node),
1284 g.UseRegister(m.left().node()),
1285 g.UseRegister(mright.left().node()),
1286 g.UseRegister(mright.right().node()));
1287 return;
1288 }
1289 VisitRRR(selector, kArmVsubF64, node);
1290 }
1291 } // namespace
1292
VisitFloat32Sub(Node * node)1293 void InstructionSelector::VisitFloat32Sub(Node* node) {
1294 ArmOperandGenerator g(this);
1295 Float32BinopMatcher m(node);
1296 if (m.left().IsMinusZero()) {
1297 Emit(kArmVnegF32, g.DefineAsRegister(node),
1298 g.UseRegister(m.right().node()));
1299 return;
1300 }
1301 VisitFloat32SubHelper(this, node);
1302 }
1303
VisitFloat32SubPreserveNan(Node * node)1304 void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
1305 VisitFloat32SubHelper(this, node);
1306 }
1307
VisitFloat64Sub(Node * node)1308 void InstructionSelector::VisitFloat64Sub(Node* node) {
1309 ArmOperandGenerator g(this);
1310 Float64BinopMatcher m(node);
1311 if (m.left().IsMinusZero()) {
1312 if (m.right().IsFloat64RoundDown() &&
1313 CanCover(m.node(), m.right().node())) {
1314 if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
1315 CanCover(m.right().node(), m.right().InputAt(0))) {
1316 Float64BinopMatcher mright0(m.right().InputAt(0));
1317 if (mright0.left().IsMinusZero()) {
1318 Emit(kArmVrintpF64, g.DefineAsRegister(node),
1319 g.UseRegister(mright0.right().node()));
1320 return;
1321 }
1322 }
1323 }
1324 Emit(kArmVnegF64, g.DefineAsRegister(node),
1325 g.UseRegister(m.right().node()));
1326 return;
1327 }
1328 VisitFloat64SubHelper(this, node);
1329 }
1330
VisitFloat64SubPreserveNan(Node * node)1331 void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
1332 VisitFloat64SubHelper(this, node);
1333 }
1334
VisitFloat32Mul(Node * node)1335 void InstructionSelector::VisitFloat32Mul(Node* node) {
1336 VisitRRR(this, kArmVmulF32, node);
1337 }
1338
1339
VisitFloat64Mul(Node * node)1340 void InstructionSelector::VisitFloat64Mul(Node* node) {
1341 VisitRRR(this, kArmVmulF64, node);
1342 }
1343
1344
VisitFloat32Div(Node * node)1345 void InstructionSelector::VisitFloat32Div(Node* node) {
1346 VisitRRR(this, kArmVdivF32, node);
1347 }
1348
1349
VisitFloat64Div(Node * node)1350 void InstructionSelector::VisitFloat64Div(Node* node) {
1351 VisitRRR(this, kArmVdivF64, node);
1352 }
1353
1354
VisitFloat64Mod(Node * node)1355 void InstructionSelector::VisitFloat64Mod(Node* node) {
1356 ArmOperandGenerator g(this);
1357 Emit(kArmVmodF64, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
1358 g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
1359 }
1360
VisitFloat32Max(Node * node)1361 void InstructionSelector::VisitFloat32Max(Node* node) {
1362 DCHECK(IsSupported(ARMv8));
1363 VisitRRR(this, kArmFloat32Max, node);
1364 }
1365
VisitFloat64Max(Node * node)1366 void InstructionSelector::VisitFloat64Max(Node* node) {
1367 DCHECK(IsSupported(ARMv8));
1368 VisitRRR(this, kArmFloat64Max, node);
1369 }
1370
VisitFloat64SilenceNaN(Node * node)1371 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
1372 VisitRR(this, kArmFloat64SilenceNaN, node);
1373 }
1374
VisitFloat32Min(Node * node)1375 void InstructionSelector::VisitFloat32Min(Node* node) {
1376 DCHECK(IsSupported(ARMv8));
1377 VisitRRR(this, kArmFloat32Min, node);
1378 }
1379
VisitFloat64Min(Node * node)1380 void InstructionSelector::VisitFloat64Min(Node* node) {
1381 DCHECK(IsSupported(ARMv8));
1382 VisitRRR(this, kArmFloat64Min, node);
1383 }
1384
VisitFloat32Abs(Node * node)1385 void InstructionSelector::VisitFloat32Abs(Node* node) {
1386 VisitRR(this, kArmVabsF32, node);
1387 }
1388
1389
VisitFloat64Abs(Node * node)1390 void InstructionSelector::VisitFloat64Abs(Node* node) {
1391 VisitRR(this, kArmVabsF64, node);
1392 }
1393
VisitFloat32Sqrt(Node * node)1394 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1395 VisitRR(this, kArmVsqrtF32, node);
1396 }
1397
1398
VisitFloat64Sqrt(Node * node)1399 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1400 VisitRR(this, kArmVsqrtF64, node);
1401 }
1402
1403
VisitFloat32RoundDown(Node * node)1404 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1405 VisitRR(this, kArmVrintmF32, node);
1406 }
1407
1408
VisitFloat64RoundDown(Node * node)1409 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1410 VisitRR(this, kArmVrintmF64, node);
1411 }
1412
1413
VisitFloat32RoundUp(Node * node)1414 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1415 VisitRR(this, kArmVrintpF32, node);
1416 }
1417
1418
VisitFloat64RoundUp(Node * node)1419 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1420 VisitRR(this, kArmVrintpF64, node);
1421 }
1422
1423
VisitFloat32RoundTruncate(Node * node)1424 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1425 VisitRR(this, kArmVrintzF32, node);
1426 }
1427
1428
VisitFloat64RoundTruncate(Node * node)1429 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1430 VisitRR(this, kArmVrintzF64, node);
1431 }
1432
1433
VisitFloat64RoundTiesAway(Node * node)1434 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1435 VisitRR(this, kArmVrintaF64, node);
1436 }
1437
1438
VisitFloat32RoundTiesEven(Node * node)1439 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1440 VisitRR(this, kArmVrintnF32, node);
1441 }
1442
1443
VisitFloat64RoundTiesEven(Node * node)1444 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1445 VisitRR(this, kArmVrintnF64, node);
1446 }
1447
VisitFloat32Neg(Node * node)1448 void InstructionSelector::VisitFloat32Neg(Node* node) {
1449 VisitRR(this, kArmVnegF32, node);
1450 }
1451
VisitFloat64Neg(Node * node)1452 void InstructionSelector::VisitFloat64Neg(Node* node) {
1453 VisitRR(this, kArmVnegF64, node);
1454 }
1455
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1456 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1457 InstructionCode opcode) {
1458 ArmOperandGenerator g(this);
1459 Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
1460 g.UseFixed(node->InputAt(1), d1))
1461 ->MarkAsCall();
1462 }
1463
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1464 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1465 InstructionCode opcode) {
1466 ArmOperandGenerator g(this);
1467 Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0))
1468 ->MarkAsCall();
1469 }
1470
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * descriptor,Node * node)1471 void InstructionSelector::EmitPrepareArguments(
1472 ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
1473 Node* node) {
1474 ArmOperandGenerator g(this);
1475
1476 // Prepare for C function call.
1477 if (descriptor->IsCFunctionCall()) {
1478 Emit(kArchPrepareCallCFunction |
1479 MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
1480 0, nullptr, 0, nullptr);
1481
1482 // Poke any stack arguments.
1483 for (size_t n = 0; n < arguments->size(); ++n) {
1484 PushParameter input = (*arguments)[n];
1485 if (input.node()) {
1486 int slot = static_cast<int>(n);
1487 Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(),
1488 g.UseRegister(input.node()));
1489 }
1490 }
1491 } else {
1492 // Push any stack arguments.
1493 for (PushParameter input : base::Reversed(*arguments)) {
1494 // Skip any alignment holes in pushed nodes.
1495 if (input.node() == nullptr) continue;
1496 Emit(kArmPush, g.NoOutput(), g.UseRegister(input.node()));
1497 }
1498 }
1499 }
1500
1501
IsTailCallAddressImmediate()1502 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1503
GetTempsCountForTailCallFromJSFunction()1504 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
1505
1506 namespace {
1507
1508 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1509 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1510 InstructionOperand left, InstructionOperand right,
1511 FlagsContinuation* cont) {
1512 ArmOperandGenerator g(selector);
1513 opcode = cont->Encode(opcode);
1514 if (cont->IsBranch()) {
1515 selector->Emit(opcode, g.NoOutput(), left, right,
1516 g.Label(cont->true_block()), g.Label(cont->false_block()));
1517 } else if (cont->IsDeoptimize()) {
1518 selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
1519 cont->frame_state());
1520 } else {
1521 DCHECK(cont->IsSet());
1522 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
1523 }
1524 }
1525
1526
1527 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1528 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1529 FlagsContinuation* cont) {
1530 ArmOperandGenerator g(selector);
1531 Float32BinopMatcher m(node);
1532 if (m.right().Is(0.0f)) {
1533 VisitCompare(selector, kArmVcmpF32, g.UseRegister(m.left().node()),
1534 g.UseImmediate(m.right().node()), cont);
1535 } else if (m.left().Is(0.0f)) {
1536 cont->Commute();
1537 VisitCompare(selector, kArmVcmpF32, g.UseRegister(m.right().node()),
1538 g.UseImmediate(m.left().node()), cont);
1539 } else {
1540 VisitCompare(selector, kArmVcmpF32, g.UseRegister(m.left().node()),
1541 g.UseRegister(m.right().node()), cont);
1542 }
1543 }
1544
1545
1546 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1547 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1548 FlagsContinuation* cont) {
1549 ArmOperandGenerator g(selector);
1550 Float64BinopMatcher m(node);
1551 if (m.right().Is(0.0)) {
1552 VisitCompare(selector, kArmVcmpF64, g.UseRegister(m.left().node()),
1553 g.UseImmediate(m.right().node()), cont);
1554 } else if (m.left().Is(0.0)) {
1555 cont->Commute();
1556 VisitCompare(selector, kArmVcmpF64, g.UseRegister(m.right().node()),
1557 g.UseImmediate(m.left().node()), cont);
1558 } else {
1559 VisitCompare(selector, kArmVcmpF64, g.UseRegister(m.left().node()),
1560 g.UseRegister(m.right().node()), cont);
1561 }
1562 }
1563
1564
1565 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1566 void VisitWordCompare(InstructionSelector* selector, Node* node,
1567 InstructionCode opcode, FlagsContinuation* cont) {
1568 ArmOperandGenerator g(selector);
1569 Int32BinopMatcher m(node);
1570 InstructionOperand inputs[5];
1571 size_t input_count = 0;
1572 InstructionOperand outputs[1];
1573 size_t output_count = 0;
1574
1575 if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
1576 &input_count, &inputs[1])) {
1577 inputs[0] = g.UseRegister(m.left().node());
1578 input_count++;
1579 } else if (TryMatchImmediateOrShift(selector, &opcode, m.left().node(),
1580 &input_count, &inputs[1])) {
1581 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1582 inputs[0] = g.UseRegister(m.right().node());
1583 input_count++;
1584 } else {
1585 opcode |= AddressingModeField::encode(kMode_Operand2_R);
1586 inputs[input_count++] = g.UseRegister(m.left().node());
1587 inputs[input_count++] = g.UseRegister(m.right().node());
1588 }
1589
1590 if (cont->IsBranch()) {
1591 inputs[input_count++] = g.Label(cont->true_block());
1592 inputs[input_count++] = g.Label(cont->false_block());
1593 } else if (cont->IsSet()) {
1594 outputs[output_count++] = g.DefineAsRegister(cont->result());
1595 }
1596
1597 DCHECK_NE(0u, input_count);
1598 DCHECK_GE(arraysize(inputs), input_count);
1599 DCHECK_GE(arraysize(outputs), output_count);
1600
1601 opcode = cont->Encode(opcode);
1602 if (cont->IsDeoptimize()) {
1603 selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
1604 cont->frame_state());
1605 } else {
1606 selector->Emit(opcode, output_count, outputs, input_count, inputs);
1607 }
1608 }
1609
1610
VisitWordCompare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1611 void VisitWordCompare(InstructionSelector* selector, Node* node,
1612 FlagsContinuation* cont) {
1613 VisitWordCompare(selector, node, kArmCmp, cont);
1614 }
1615
1616
1617 // Shared routine for word comparisons against zero.
VisitWordCompareZero(InstructionSelector * selector,Node * user,Node * value,FlagsContinuation * cont)1618 void VisitWordCompareZero(InstructionSelector* selector, Node* user,
1619 Node* value, FlagsContinuation* cont) {
1620 while (selector->CanCover(user, value)) {
1621 switch (value->opcode()) {
1622 case IrOpcode::kWord32Equal: {
1623 // Combine with comparisons against 0 by simply inverting the
1624 // continuation.
1625 Int32BinopMatcher m(value);
1626 if (m.right().Is(0)) {
1627 user = value;
1628 value = m.left().node();
1629 cont->Negate();
1630 continue;
1631 }
1632 cont->OverwriteAndNegateIfEqual(kEqual);
1633 return VisitWordCompare(selector, value, cont);
1634 }
1635 case IrOpcode::kInt32LessThan:
1636 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1637 return VisitWordCompare(selector, value, cont);
1638 case IrOpcode::kInt32LessThanOrEqual:
1639 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1640 return VisitWordCompare(selector, value, cont);
1641 case IrOpcode::kUint32LessThan:
1642 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1643 return VisitWordCompare(selector, value, cont);
1644 case IrOpcode::kUint32LessThanOrEqual:
1645 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1646 return VisitWordCompare(selector, value, cont);
1647 case IrOpcode::kFloat32Equal:
1648 cont->OverwriteAndNegateIfEqual(kEqual);
1649 return VisitFloat32Compare(selector, value, cont);
1650 case IrOpcode::kFloat32LessThan:
1651 cont->OverwriteAndNegateIfEqual(kFloatLessThan);
1652 return VisitFloat32Compare(selector, value, cont);
1653 case IrOpcode::kFloat32LessThanOrEqual:
1654 cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
1655 return VisitFloat32Compare(selector, value, cont);
1656 case IrOpcode::kFloat64Equal:
1657 cont->OverwriteAndNegateIfEqual(kEqual);
1658 return VisitFloat64Compare(selector, value, cont);
1659 case IrOpcode::kFloat64LessThan:
1660 cont->OverwriteAndNegateIfEqual(kFloatLessThan);
1661 return VisitFloat64Compare(selector, value, cont);
1662 case IrOpcode::kFloat64LessThanOrEqual:
1663 cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
1664 return VisitFloat64Compare(selector, value, cont);
1665 case IrOpcode::kProjection:
1666 // Check if this is the overflow output projection of an
1667 // <Operation>WithOverflow node.
1668 if (ProjectionIndexOf(value->op()) == 1u) {
1669 // We cannot combine the <Operation>WithOverflow with this branch
1670 // unless the 0th projection (the use of the actual value of the
1671 // <Operation> is either nullptr, which means there's no use of the
1672 // actual value, or was already defined, which means it is scheduled
1673 // *AFTER* this branch).
1674 Node* const node = value->InputAt(0);
1675 Node* const result = NodeProperties::FindProjection(node, 0);
1676 if (!result || selector->IsDefined(result)) {
1677 switch (node->opcode()) {
1678 case IrOpcode::kInt32AddWithOverflow:
1679 cont->OverwriteAndNegateIfEqual(kOverflow);
1680 return VisitBinop(selector, node, kArmAdd, kArmAdd, cont);
1681 case IrOpcode::kInt32SubWithOverflow:
1682 cont->OverwriteAndNegateIfEqual(kOverflow);
1683 return VisitBinop(selector, node, kArmSub, kArmRsb, cont);
1684 default:
1685 break;
1686 }
1687 }
1688 }
1689 break;
1690 case IrOpcode::kInt32Add:
1691 return VisitWordCompare(selector, value, kArmCmn, cont);
1692 case IrOpcode::kInt32Sub:
1693 return VisitWordCompare(selector, value, kArmCmp, cont);
1694 case IrOpcode::kWord32And:
1695 return VisitWordCompare(selector, value, kArmTst, cont);
1696 case IrOpcode::kWord32Or:
1697 return VisitBinop(selector, value, kArmOrr, kArmOrr, cont);
1698 case IrOpcode::kWord32Xor:
1699 return VisitWordCompare(selector, value, kArmTeq, cont);
1700 case IrOpcode::kWord32Sar:
1701 return VisitShift(selector, value, TryMatchASR, cont);
1702 case IrOpcode::kWord32Shl:
1703 return VisitShift(selector, value, TryMatchLSL, cont);
1704 case IrOpcode::kWord32Shr:
1705 return VisitShift(selector, value, TryMatchLSR, cont);
1706 case IrOpcode::kWord32Ror:
1707 return VisitShift(selector, value, TryMatchROR, cont);
1708 default:
1709 break;
1710 }
1711 break;
1712 }
1713
1714 // Continuation could not be combined with a compare, emit compare against 0.
1715 ArmOperandGenerator g(selector);
1716 InstructionCode const opcode =
1717 cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
1718 InstructionOperand const value_operand = g.UseRegister(value);
1719 if (cont->IsBranch()) {
1720 selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
1721 g.Label(cont->true_block()), g.Label(cont->false_block()));
1722 } else if (cont->IsDeoptimize()) {
1723 selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
1724 cont->frame_state());
1725 } else {
1726 DCHECK(cont->IsSet());
1727 selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
1728 value_operand);
1729 }
1730 }
1731
1732 } // namespace
1733
VisitBranch(Node * branch,BasicBlock * tbranch,BasicBlock * fbranch)1734 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1735 BasicBlock* fbranch) {
1736 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1737 VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
1738 }
1739
VisitDeoptimizeIf(Node * node)1740 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
1741 FlagsContinuation cont =
1742 FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
1743 VisitWordCompareZero(this, node, node->InputAt(0), &cont);
1744 }
1745
VisitDeoptimizeUnless(Node * node)1746 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
1747 FlagsContinuation cont =
1748 FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
1749 VisitWordCompareZero(this, node, node->InputAt(0), &cont);
1750 }
1751
VisitSwitch(Node * node,const SwitchInfo & sw)1752 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1753 ArmOperandGenerator g(this);
1754 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1755
1756 // Emit either ArchTableSwitch or ArchLookupSwitch.
1757 size_t table_space_cost = 4 + sw.value_range;
1758 size_t table_time_cost = 3;
1759 size_t lookup_space_cost = 3 + 2 * sw.case_count;
1760 size_t lookup_time_cost = sw.case_count;
1761 if (sw.case_count > 0 &&
1762 table_space_cost + 3 * table_time_cost <=
1763 lookup_space_cost + 3 * lookup_time_cost &&
1764 sw.min_value > std::numeric_limits<int32_t>::min()) {
1765 InstructionOperand index_operand = value_operand;
1766 if (sw.min_value) {
1767 index_operand = g.TempRegister();
1768 Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I),
1769 index_operand, value_operand, g.TempImmediate(sw.min_value));
1770 }
1771 // Generate a table lookup.
1772 return EmitTableSwitch(sw, index_operand);
1773 }
1774
1775 // Generate a sequence of conditional jumps.
1776 return EmitLookupSwitch(sw, value_operand);
1777 }
1778
1779
VisitWord32Equal(Node * const node)1780 void InstructionSelector::VisitWord32Equal(Node* const node) {
1781 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1782 Int32BinopMatcher m(node);
1783 if (m.right().Is(0)) {
1784 return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
1785 }
1786 VisitWordCompare(this, node, &cont);
1787 }
1788
1789
VisitInt32LessThan(Node * node)1790 void InstructionSelector::VisitInt32LessThan(Node* node) {
1791 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1792 VisitWordCompare(this, node, &cont);
1793 }
1794
1795
VisitInt32LessThanOrEqual(Node * node)1796 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1797 FlagsContinuation cont =
1798 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1799 VisitWordCompare(this, node, &cont);
1800 }
1801
1802
VisitUint32LessThan(Node * node)1803 void InstructionSelector::VisitUint32LessThan(Node* node) {
1804 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1805 VisitWordCompare(this, node, &cont);
1806 }
1807
1808
VisitUint32LessThanOrEqual(Node * node)1809 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1810 FlagsContinuation cont =
1811 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1812 VisitWordCompare(this, node, &cont);
1813 }
1814
1815
VisitInt32AddWithOverflow(Node * node)1816 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1817 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1818 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1819 return VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
1820 }
1821 FlagsContinuation cont;
1822 VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
1823 }
1824
1825
VisitInt32SubWithOverflow(Node * node)1826 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1827 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1828 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1829 return VisitBinop(this, node, kArmSub, kArmRsb, &cont);
1830 }
1831 FlagsContinuation cont;
1832 VisitBinop(this, node, kArmSub, kArmRsb, &cont);
1833 }
1834
1835
VisitFloat32Equal(Node * node)1836 void InstructionSelector::VisitFloat32Equal(Node* node) {
1837 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1838 VisitFloat32Compare(this, node, &cont);
1839 }
1840
1841
VisitFloat32LessThan(Node * node)1842 void InstructionSelector::VisitFloat32LessThan(Node* node) {
1843 FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
1844 VisitFloat32Compare(this, node, &cont);
1845 }
1846
1847
VisitFloat32LessThanOrEqual(Node * node)1848 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
1849 FlagsContinuation cont =
1850 FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
1851 VisitFloat32Compare(this, node, &cont);
1852 }
1853
1854
VisitFloat64Equal(Node * node)1855 void InstructionSelector::VisitFloat64Equal(Node* node) {
1856 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1857 VisitFloat64Compare(this, node, &cont);
1858 }
1859
1860
VisitFloat64LessThan(Node * node)1861 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1862 FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
1863 VisitFloat64Compare(this, node, &cont);
1864 }
1865
1866
VisitFloat64LessThanOrEqual(Node * node)1867 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1868 FlagsContinuation cont =
1869 FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
1870 VisitFloat64Compare(this, node, &cont);
1871 }
1872
1873
VisitFloat64ExtractLowWord32(Node * node)1874 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1875 VisitRR(this, kArmVmovLowU32F64, node);
1876 }
1877
1878
VisitFloat64ExtractHighWord32(Node * node)1879 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1880 VisitRR(this, kArmVmovHighU32F64, node);
1881 }
1882
1883
VisitFloat64InsertLowWord32(Node * node)1884 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1885 ArmOperandGenerator g(this);
1886 Node* left = node->InputAt(0);
1887 Node* right = node->InputAt(1);
1888 if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
1889 CanCover(node, left)) {
1890 left = left->InputAt(1);
1891 Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), g.UseRegister(right),
1892 g.UseRegister(left));
1893 return;
1894 }
1895 Emit(kArmVmovLowF64U32, g.DefineSameAsFirst(node), g.UseRegister(left),
1896 g.UseRegister(right));
1897 }
1898
1899
VisitFloat64InsertHighWord32(Node * node)1900 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1901 ArmOperandGenerator g(this);
1902 Node* left = node->InputAt(0);
1903 Node* right = node->InputAt(1);
1904 if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
1905 CanCover(node, left)) {
1906 left = left->InputAt(1);
1907 Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), g.UseRegister(left),
1908 g.UseRegister(right));
1909 return;
1910 }
1911 Emit(kArmVmovHighF64U32, g.DefineSameAsFirst(node), g.UseRegister(left),
1912 g.UseRegister(right));
1913 }
1914
VisitAtomicLoad(Node * node)1915 void InstructionSelector::VisitAtomicLoad(Node* node) {
1916 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1917 ArmOperandGenerator g(this);
1918 Node* base = node->InputAt(0);
1919 Node* index = node->InputAt(1);
1920 ArchOpcode opcode = kArchNop;
1921 switch (load_rep.representation()) {
1922 case MachineRepresentation::kWord8:
1923 opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
1924 break;
1925 case MachineRepresentation::kWord16:
1926 opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
1927 break;
1928 case MachineRepresentation::kWord32:
1929 opcode = kAtomicLoadWord32;
1930 break;
1931 default:
1932 UNREACHABLE();
1933 return;
1934 }
1935 Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
1936 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
1937 }
1938
VisitAtomicStore(Node * node)1939 void InstructionSelector::VisitAtomicStore(Node* node) {
1940 MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
1941 ArmOperandGenerator g(this);
1942 Node* base = node->InputAt(0);
1943 Node* index = node->InputAt(1);
1944 Node* value = node->InputAt(2);
1945 ArchOpcode opcode = kArchNop;
1946 switch (rep) {
1947 case MachineRepresentation::kWord8:
1948 opcode = kAtomicStoreWord8;
1949 break;
1950 case MachineRepresentation::kWord16:
1951 opcode = kAtomicStoreWord16;
1952 break;
1953 case MachineRepresentation::kWord32:
1954 opcode = kAtomicStoreWord32;
1955 break;
1956 default:
1957 UNREACHABLE();
1958 return;
1959 }
1960
1961 AddressingMode addressing_mode = kMode_Offset_RR;
1962 InstructionOperand inputs[4];
1963 size_t input_count = 0;
1964 inputs[input_count++] = g.UseUniqueRegister(base);
1965 inputs[input_count++] = g.UseUniqueRegister(index);
1966 inputs[input_count++] = g.UseUniqueRegister(value);
1967 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1968 Emit(code, 0, nullptr, input_count, inputs);
1969 }
1970
1971 // static
1972 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()1973 InstructionSelector::SupportedMachineOperatorFlags() {
1974 MachineOperatorBuilder::Flags flags;
1975 if (CpuFeatures::IsSupported(SUDIV)) {
1976 // The sdiv and udiv instructions correctly return 0 if the divisor is 0,
1977 // but the fall-back implementation does not.
1978 flags |= MachineOperatorBuilder::kInt32DivIsSafe |
1979 MachineOperatorBuilder::kUint32DivIsSafe;
1980 }
1981 if (CpuFeatures::IsSupported(ARMv7)) {
1982 flags |= MachineOperatorBuilder::kWord32ReverseBits;
1983 }
1984 if (CpuFeatures::IsSupported(ARMv8)) {
1985 flags |= MachineOperatorBuilder::kFloat32RoundDown |
1986 MachineOperatorBuilder::kFloat64RoundDown |
1987 MachineOperatorBuilder::kFloat32RoundUp |
1988 MachineOperatorBuilder::kFloat64RoundUp |
1989 MachineOperatorBuilder::kFloat32RoundTruncate |
1990 MachineOperatorBuilder::kFloat64RoundTruncate |
1991 MachineOperatorBuilder::kFloat64RoundTiesAway |
1992 MachineOperatorBuilder::kFloat32RoundTiesEven |
1993 MachineOperatorBuilder::kFloat64RoundTiesEven |
1994 MachineOperatorBuilder::kFloat32Min |
1995 MachineOperatorBuilder::kFloat32Max |
1996 MachineOperatorBuilder::kFloat64Min |
1997 MachineOperatorBuilder::kFloat64Max |
1998 MachineOperatorBuilder::kFloat32Neg |
1999 MachineOperatorBuilder::kFloat64Neg;
2000 }
2001 return flags;
2002 }
2003
2004 // static
2005 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()2006 InstructionSelector::AlignmentRequirements() {
2007 return MachineOperatorBuilder::AlignmentRequirements::
2008 FullUnalignedAccessSupport();
2009 }
2010
2011 } // namespace compiler
2012 } // namespace internal
2013 } // namespace v8
2014