1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/pipeline.h"
6 #include "test/unittests/compiler/instruction-sequence-unittest.h"
7
8 namespace v8 {
9 namespace internal {
10 namespace compiler {
11
12
13 namespace {
14
15 // We can't just use the size of the moves collection, because of
16 // redundant moves which need to be discounted.
GetMoveCount(const ParallelMove & moves)17 int GetMoveCount(const ParallelMove& moves) {
18 int move_count = 0;
19 for (auto move : moves) {
20 if (move->IsEliminated() || move->IsRedundant()) continue;
21 ++move_count;
22 }
23 return move_count;
24 }
25
26
AreOperandsOfSameType(const AllocatedOperand & op,const InstructionSequenceTest::TestOperand & test_op)27 bool AreOperandsOfSameType(
28 const AllocatedOperand& op,
29 const InstructionSequenceTest::TestOperand& test_op) {
30 bool test_op_is_reg =
31 (test_op.type_ ==
32 InstructionSequenceTest::TestOperandType::kFixedRegister ||
33 test_op.type_ == InstructionSequenceTest::TestOperandType::kRegister);
34
35 return (op.IsRegister() && test_op_is_reg) ||
36 (op.IsStackSlot() && !test_op_is_reg);
37 }
38
39
AllocatedOperandMatches(const AllocatedOperand & op,const InstructionSequenceTest::TestOperand & test_op)40 bool AllocatedOperandMatches(
41 const AllocatedOperand& op,
42 const InstructionSequenceTest::TestOperand& test_op) {
43 return AreOperandsOfSameType(op, test_op) &&
44 ((op.IsRegister() ? op.GetRegister().code() : op.index()) ==
45 test_op.value_ ||
46 test_op.value_ == InstructionSequenceTest::kNoValue);
47 }
48
49
GetParallelMoveCount(int instr_index,Instruction::GapPosition gap_pos,const InstructionSequence * sequence)50 int GetParallelMoveCount(int instr_index, Instruction::GapPosition gap_pos,
51 const InstructionSequence* sequence) {
52 const ParallelMove* moves =
53 sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos);
54 if (moves == nullptr) return 0;
55 return GetMoveCount(*moves);
56 }
57
58
IsParallelMovePresent(int instr_index,Instruction::GapPosition gap_pos,const InstructionSequence * sequence,const InstructionSequenceTest::TestOperand & src,const InstructionSequenceTest::TestOperand & dest)59 bool IsParallelMovePresent(int instr_index, Instruction::GapPosition gap_pos,
60 const InstructionSequence* sequence,
61 const InstructionSequenceTest::TestOperand& src,
62 const InstructionSequenceTest::TestOperand& dest) {
63 const ParallelMove* moves =
64 sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos);
65 EXPECT_NE(nullptr, moves);
66
67 bool found_match = false;
68 for (auto move : *moves) {
69 if (move->IsEliminated() || move->IsRedundant()) continue;
70 if (AllocatedOperandMatches(AllocatedOperand::cast(move->source()), src) &&
71 AllocatedOperandMatches(AllocatedOperand::cast(move->destination()),
72 dest)) {
73 found_match = true;
74 break;
75 }
76 }
77 return found_match;
78 }
79
80 } // namespace
81
82
83 class RegisterAllocatorTest : public InstructionSequenceTest {
84 public:
Allocate()85 void Allocate() {
86 WireBlocks();
87 Pipeline::AllocateRegistersForTesting(config(), sequence(), true);
88 }
89 };
90
91
TEST_F(RegisterAllocatorTest,CanAllocateThreeRegisters)92 TEST_F(RegisterAllocatorTest, CanAllocateThreeRegisters) {
93 // return p0 + p1;
94 StartBlock();
95 auto a_reg = Parameter();
96 auto b_reg = Parameter();
97 auto c_reg = EmitOI(Reg(1), Reg(a_reg, 1), Reg(b_reg, 0));
98 Return(c_reg);
99 EndBlock(Last());
100
101 Allocate();
102 }
103
104
TEST_F(RegisterAllocatorTest,SimpleLoop)105 TEST_F(RegisterAllocatorTest, SimpleLoop) {
106 // i = K;
107 // while(true) { i++ }
108 StartBlock();
109 auto i_reg = DefineConstant();
110 EndBlock();
111
112 {
113 StartLoop(1);
114
115 StartBlock();
116 auto phi = Phi(i_reg, 2);
117 auto ipp = EmitOI(Same(), Reg(phi), Use(DefineConstant()));
118 SetInput(phi, 1, ipp);
119 EndBlock(Jump(0));
120
121 EndLoop();
122 }
123
124 Allocate();
125 }
126
127
TEST_F(RegisterAllocatorTest,SimpleBranch)128 TEST_F(RegisterAllocatorTest, SimpleBranch) {
129 // return i ? K1 : K2
130 StartBlock();
131 auto i = DefineConstant();
132 EndBlock(Branch(Reg(i), 1, 2));
133
134 StartBlock();
135 Return(DefineConstant());
136 EndBlock(Last());
137
138 StartBlock();
139 Return(DefineConstant());
140 EndBlock(Last());
141
142 Allocate();
143 }
144
145
TEST_F(RegisterAllocatorTest,SimpleDiamond)146 TEST_F(RegisterAllocatorTest, SimpleDiamond) {
147 // return p0 ? p0 : p0
148 StartBlock();
149 auto param = Parameter();
150 EndBlock(Branch(Reg(param), 1, 2));
151
152 StartBlock();
153 EndBlock(Jump(2));
154
155 StartBlock();
156 EndBlock(Jump(1));
157
158 StartBlock();
159 Return(param);
160 EndBlock();
161
162 Allocate();
163 }
164
165
TEST_F(RegisterAllocatorTest,SimpleDiamondPhi)166 TEST_F(RegisterAllocatorTest, SimpleDiamondPhi) {
167 // return i ? K1 : K2
168 StartBlock();
169 EndBlock(Branch(Reg(DefineConstant()), 1, 2));
170
171 StartBlock();
172 auto t_val = DefineConstant();
173 EndBlock(Jump(2));
174
175 StartBlock();
176 auto f_val = DefineConstant();
177 EndBlock(Jump(1));
178
179 StartBlock();
180 Return(Reg(Phi(t_val, f_val)));
181 EndBlock();
182
183 Allocate();
184 }
185
186
TEST_F(RegisterAllocatorTest,DiamondManyPhis)187 TEST_F(RegisterAllocatorTest, DiamondManyPhis) {
188 const int kPhis = kDefaultNRegs * 2;
189
190 StartBlock();
191 EndBlock(Branch(Reg(DefineConstant()), 1, 2));
192
193 StartBlock();
194 VReg t_vals[kPhis];
195 for (int i = 0; i < kPhis; ++i) {
196 t_vals[i] = DefineConstant();
197 }
198 EndBlock(Jump(2));
199
200 StartBlock();
201 VReg f_vals[kPhis];
202 for (int i = 0; i < kPhis; ++i) {
203 f_vals[i] = DefineConstant();
204 }
205 EndBlock(Jump(1));
206
207 StartBlock();
208 TestOperand merged[kPhis];
209 for (int i = 0; i < kPhis; ++i) {
210 merged[i] = Use(Phi(t_vals[i], f_vals[i]));
211 }
212 Return(EmitCall(Slot(-1), kPhis, merged));
213 EndBlock();
214
215 Allocate();
216 }
217
218
TEST_F(RegisterAllocatorTest,DoubleDiamondManyRedundantPhis)219 TEST_F(RegisterAllocatorTest, DoubleDiamondManyRedundantPhis) {
220 const int kPhis = kDefaultNRegs * 2;
221
222 // First diamond.
223 StartBlock();
224 VReg vals[kPhis];
225 for (int i = 0; i < kPhis; ++i) {
226 vals[i] = Parameter(Slot(-1 - i));
227 }
228 EndBlock(Branch(Reg(DefineConstant()), 1, 2));
229
230 StartBlock();
231 EndBlock(Jump(2));
232
233 StartBlock();
234 EndBlock(Jump(1));
235
236 // Second diamond.
237 StartBlock();
238 EndBlock(Branch(Reg(DefineConstant()), 1, 2));
239
240 StartBlock();
241 EndBlock(Jump(2));
242
243 StartBlock();
244 EndBlock(Jump(1));
245
246 StartBlock();
247 TestOperand merged[kPhis];
248 for (int i = 0; i < kPhis; ++i) {
249 merged[i] = Use(Phi(vals[i], vals[i]));
250 }
251 Return(EmitCall(Reg(0), kPhis, merged));
252 EndBlock();
253
254 Allocate();
255 }
256
257
TEST_F(RegisterAllocatorTest,RegressionPhisNeedTooManyRegisters)258 TEST_F(RegisterAllocatorTest, RegressionPhisNeedTooManyRegisters) {
259 const size_t kNumRegs = 3;
260 const size_t kParams = kNumRegs + 1;
261 // Override number of registers.
262 SetNumRegs(kNumRegs, kNumRegs);
263
264 StartBlock();
265 auto constant = DefineConstant();
266 VReg parameters[kParams];
267 for (size_t i = 0; i < arraysize(parameters); ++i) {
268 parameters[i] = DefineConstant();
269 }
270 EndBlock();
271
272 PhiInstruction* phis[kParams];
273 {
274 StartLoop(2);
275
276 // Loop header.
277 StartBlock();
278
279 for (size_t i = 0; i < arraysize(parameters); ++i) {
280 phis[i] = Phi(parameters[i], 2);
281 }
282
283 // Perform some computations.
284 // something like phi[i] += const
285 for (size_t i = 0; i < arraysize(parameters); ++i) {
286 auto result = EmitOI(Same(), Reg(phis[i]), Use(constant));
287 SetInput(phis[i], 1, result);
288 }
289
290 EndBlock(Branch(Reg(DefineConstant()), 1, 2));
291
292 // Jump back to loop header.
293 StartBlock();
294 EndBlock(Jump(-1));
295
296 EndLoop();
297 }
298
299 StartBlock();
300 Return(DefineConstant());
301 EndBlock();
302
303 Allocate();
304 }
305
306
TEST_F(RegisterAllocatorTest,SpillPhi)307 TEST_F(RegisterAllocatorTest, SpillPhi) {
308 StartBlock();
309 EndBlock(Branch(Imm(), 1, 2));
310
311 StartBlock();
312 auto left = Define(Reg(0));
313 EndBlock(Jump(2));
314
315 StartBlock();
316 auto right = Define(Reg(0));
317 EndBlock();
318
319 StartBlock();
320 auto phi = Phi(left, right);
321 EmitCall(Slot(-1));
322 Return(Reg(phi));
323 EndBlock();
324
325 Allocate();
326 }
327
328
TEST_F(RegisterAllocatorTest,MoveLotsOfConstants)329 TEST_F(RegisterAllocatorTest, MoveLotsOfConstants) {
330 StartBlock();
331 VReg constants[kDefaultNRegs];
332 for (size_t i = 0; i < arraysize(constants); ++i) {
333 constants[i] = DefineConstant();
334 }
335 TestOperand call_ops[kDefaultNRegs * 2];
336 for (int i = 0; i < kDefaultNRegs; ++i) {
337 call_ops[i] = Reg(constants[i], i);
338 }
339 for (int i = 0; i < kDefaultNRegs; ++i) {
340 call_ops[i + kDefaultNRegs] = Slot(constants[i], i);
341 }
342 EmitCall(Slot(-1), arraysize(call_ops), call_ops);
343 EndBlock(Last());
344
345 Allocate();
346 }
347
348
TEST_F(RegisterAllocatorTest,SplitBeforeInstruction)349 TEST_F(RegisterAllocatorTest, SplitBeforeInstruction) {
350 const int kNumRegs = 6;
351 SetNumRegs(kNumRegs, kNumRegs);
352
353 StartBlock();
354
355 // Stack parameters/spilled values.
356 auto p_0 = Define(Slot(-1));
357 auto p_1 = Define(Slot(-2));
358
359 // Fill registers.
360 VReg values[kNumRegs];
361 for (size_t i = 0; i < arraysize(values); ++i) {
362 values[i] = Define(Reg(static_cast<int>(i)));
363 }
364
365 // values[0] will be split in the second half of this instruction.
366 // Models Intel mod instructions.
367 EmitOI(Reg(0), Reg(p_0, 1), UniqueReg(p_1));
368 EmitI(Reg(values[0], 0));
369 EndBlock(Last());
370
371 Allocate();
372 }
373
374
TEST_F(RegisterAllocatorTest,SplitBeforeInstruction2)375 TEST_F(RegisterAllocatorTest, SplitBeforeInstruction2) {
376 const int kNumRegs = 6;
377 SetNumRegs(kNumRegs, kNumRegs);
378
379 StartBlock();
380
381 // Stack parameters/spilled values.
382 auto p_0 = Define(Slot(-1));
383 auto p_1 = Define(Slot(-2));
384
385 // Fill registers.
386 VReg values[kNumRegs];
387 for (size_t i = 0; i < arraysize(values); ++i) {
388 values[i] = Define(Reg(static_cast<int>(i)));
389 }
390
391 // values[0] and [1] will be split in the second half of this instruction.
392 EmitOOI(Reg(0), Reg(1), Reg(p_0, 0), Reg(p_1, 1));
393 EmitI(Reg(values[0]), Reg(values[1]));
394 EndBlock(Last());
395
396 Allocate();
397 }
398
399
TEST_F(RegisterAllocatorTest,NestedDiamondPhiMerge)400 TEST_F(RegisterAllocatorTest, NestedDiamondPhiMerge) {
401 // Outer diamond.
402 StartBlock();
403 EndBlock(Branch(Imm(), 1, 5));
404
405 // Diamond 1
406 StartBlock();
407 EndBlock(Branch(Imm(), 1, 2));
408
409 StartBlock();
410 auto ll = Define(Reg());
411 EndBlock(Jump(2));
412
413 StartBlock();
414 auto lr = Define(Reg());
415 EndBlock();
416
417 StartBlock();
418 auto l_phi = Phi(ll, lr);
419 EndBlock(Jump(5));
420
421 // Diamond 2
422 StartBlock();
423 EndBlock(Branch(Imm(), 1, 2));
424
425 StartBlock();
426 auto rl = Define(Reg());
427 EndBlock(Jump(2));
428
429 StartBlock();
430 auto rr = Define(Reg());
431 EndBlock();
432
433 StartBlock();
434 auto r_phi = Phi(rl, rr);
435 EndBlock();
436
437 // Outer diamond merge.
438 StartBlock();
439 auto phi = Phi(l_phi, r_phi);
440 Return(Reg(phi));
441 EndBlock();
442
443 Allocate();
444 }
445
446
TEST_F(RegisterAllocatorTest,NestedDiamondPhiMergeDifferent)447 TEST_F(RegisterAllocatorTest, NestedDiamondPhiMergeDifferent) {
448 // Outer diamond.
449 StartBlock();
450 EndBlock(Branch(Imm(), 1, 5));
451
452 // Diamond 1
453 StartBlock();
454 EndBlock(Branch(Imm(), 1, 2));
455
456 StartBlock();
457 auto ll = Define(Reg(0));
458 EndBlock(Jump(2));
459
460 StartBlock();
461 auto lr = Define(Reg(1));
462 EndBlock();
463
464 StartBlock();
465 auto l_phi = Phi(ll, lr);
466 EndBlock(Jump(5));
467
468 // Diamond 2
469 StartBlock();
470 EndBlock(Branch(Imm(), 1, 2));
471
472 StartBlock();
473 auto rl = Define(Reg(2));
474 EndBlock(Jump(2));
475
476 StartBlock();
477 auto rr = Define(Reg(3));
478 EndBlock();
479
480 StartBlock();
481 auto r_phi = Phi(rl, rr);
482 EndBlock();
483
484 // Outer diamond merge.
485 StartBlock();
486 auto phi = Phi(l_phi, r_phi);
487 Return(Reg(phi));
488 EndBlock();
489
490 Allocate();
491 }
492
493
TEST_F(RegisterAllocatorTest,RegressionSplitBeforeAndMove)494 TEST_F(RegisterAllocatorTest, RegressionSplitBeforeAndMove) {
495 StartBlock();
496
497 // Fill registers.
498 VReg values[kDefaultNRegs];
499 for (size_t i = 0; i < arraysize(values); ++i) {
500 if (i == 0 || i == 1) continue; // Leave a hole for c_1 to take.
501 values[i] = Define(Reg(static_cast<int>(i)));
502 }
503
504 auto c_0 = DefineConstant();
505 auto c_1 = DefineConstant();
506
507 EmitOI(Reg(1), Reg(c_0, 0), UniqueReg(c_1));
508
509 // Use previous values to force c_1 to split before the previous instruction.
510 for (size_t i = 0; i < arraysize(values); ++i) {
511 if (i == 0 || i == 1) continue;
512 EmitI(Reg(values[i], static_cast<int>(i)));
513 }
514
515 EndBlock(Last());
516
517 Allocate();
518 }
519
520
TEST_F(RegisterAllocatorTest,RegressionSpillTwice)521 TEST_F(RegisterAllocatorTest, RegressionSpillTwice) {
522 StartBlock();
523 auto p_0 = Parameter(Reg(1));
524 EmitCall(Slot(-2), Unique(p_0), Reg(p_0, 1));
525 EndBlock(Last());
526
527 Allocate();
528 }
529
530
TEST_F(RegisterAllocatorTest,RegressionLoadConstantBeforeSpill)531 TEST_F(RegisterAllocatorTest, RegressionLoadConstantBeforeSpill) {
532 StartBlock();
533 // Fill registers.
534 VReg values[kDefaultNRegs];
535 for (size_t i = arraysize(values); i > 0; --i) {
536 values[i - 1] = Define(Reg(static_cast<int>(i - 1)));
537 }
538 auto c = DefineConstant();
539 auto to_spill = Define(Reg());
540 EndBlock(Jump(1));
541
542 {
543 StartLoop(1);
544
545 StartBlock();
546 // Create a use for c in second half of prev block's last gap
547 Phi(c);
548 for (size_t i = arraysize(values); i > 0; --i) {
549 Phi(values[i - 1]);
550 }
551 EndBlock(Jump(1));
552
553 EndLoop();
554 }
555
556 StartBlock();
557 // Force c to split within to_spill's definition.
558 EmitI(Reg(c));
559 EmitI(Reg(to_spill));
560 EndBlock(Last());
561
562 Allocate();
563 }
564
565
TEST_F(RegisterAllocatorTest,DiamondWithCallFirstBlock)566 TEST_F(RegisterAllocatorTest, DiamondWithCallFirstBlock) {
567 StartBlock();
568 auto x = EmitOI(Reg(0));
569 EndBlock(Branch(Reg(x), 1, 2));
570
571 StartBlock();
572 EmitCall(Slot(-1));
573 auto occupy = EmitOI(Reg(0));
574 EndBlock(Jump(2));
575
576 StartBlock();
577 EndBlock(FallThrough());
578
579 StartBlock();
580 Use(occupy);
581 Return(Reg(x));
582 EndBlock();
583 Allocate();
584 }
585
586
TEST_F(RegisterAllocatorTest,DiamondWithCallSecondBlock)587 TEST_F(RegisterAllocatorTest, DiamondWithCallSecondBlock) {
588 StartBlock();
589 auto x = EmitOI(Reg(0));
590 EndBlock(Branch(Reg(x), 1, 2));
591
592 StartBlock();
593 EndBlock(Jump(2));
594
595 StartBlock();
596 EmitCall(Slot(-1));
597 auto occupy = EmitOI(Reg(0));
598 EndBlock(FallThrough());
599
600 StartBlock();
601 Use(occupy);
602 Return(Reg(x));
603 EndBlock();
604 Allocate();
605 }
606
607
TEST_F(RegisterAllocatorTest,SingleDeferredBlockSpill)608 TEST_F(RegisterAllocatorTest, SingleDeferredBlockSpill) {
609 StartBlock(); // B0
610 auto var = EmitOI(Reg(0));
611 EndBlock(Branch(Reg(var), 1, 2));
612
613 StartBlock(); // B1
614 EndBlock(Jump(2));
615
616 StartBlock(true); // B2
617 EmitCall(Slot(-1), Slot(var));
618 EndBlock();
619
620 StartBlock(); // B3
621 EmitNop();
622 EndBlock();
623
624 StartBlock(); // B4
625 Return(Reg(var, 0));
626 EndBlock();
627
628 Allocate();
629
630 const int var_def_index = 1;
631 const int call_index = 3;
632 int expect_no_moves =
633 FLAG_turbo_preprocess_ranges ? var_def_index : call_index;
634 int expect_spill_move =
635 FLAG_turbo_preprocess_ranges ? call_index : var_def_index;
636
637 // We should have no parallel moves at the "expect_no_moves" position.
638 EXPECT_EQ(
639 0, GetParallelMoveCount(expect_no_moves, Instruction::START, sequence()));
640
641 // The spill should be performed at the position expect_spill_move.
642 EXPECT_TRUE(IsParallelMovePresent(expect_spill_move, Instruction::START,
643 sequence(), Reg(0), Slot(0)));
644 }
645
646
TEST_F(RegisterAllocatorTest,MultipleDeferredBlockSpills)647 TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) {
648 if (!FLAG_turbo_preprocess_ranges) return;
649
650 StartBlock(); // B0
651 auto var1 = EmitOI(Reg(0));
652 auto var2 = EmitOI(Reg(1));
653 auto var3 = EmitOI(Reg(2));
654 EndBlock(Branch(Reg(var1, 0), 1, 2));
655
656 StartBlock(true); // B1
657 EmitCall(Slot(-2), Slot(var1));
658 EndBlock(Jump(2));
659
660 StartBlock(true); // B2
661 EmitCall(Slot(-1), Slot(var2));
662 EndBlock();
663
664 StartBlock(); // B3
665 EmitNop();
666 EndBlock();
667
668 StartBlock(); // B4
669 Return(Reg(var3, 2));
670 EndBlock();
671
672 const int def_of_v2 = 3;
673 const int call_in_b1 = 4;
674 const int call_in_b2 = 6;
675 const int end_of_b1 = 5;
676 const int end_of_b2 = 7;
677 const int start_of_b3 = 8;
678
679 Allocate();
680 // TODO(mtrofin): at the moment, the linear allocator spills var1 and var2,
681 // so only var3 is spilled in deferred blocks. Greedy avoids spilling 1&2.
682 // Expand the test once greedy is back online with this facility.
683 const int var3_reg = 2;
684 const int var3_slot = 2;
685
686 EXPECT_FALSE(IsParallelMovePresent(def_of_v2, Instruction::START, sequence(),
687 Reg(var3_reg), Slot()));
688 EXPECT_TRUE(IsParallelMovePresent(call_in_b1, Instruction::START, sequence(),
689 Reg(var3_reg), Slot(var3_slot)));
690 EXPECT_TRUE(IsParallelMovePresent(end_of_b1, Instruction::START, sequence(),
691 Slot(var3_slot), Reg()));
692
693 EXPECT_TRUE(IsParallelMovePresent(call_in_b2, Instruction::START, sequence(),
694 Reg(var3_reg), Slot(var3_slot)));
695 EXPECT_TRUE(IsParallelMovePresent(end_of_b2, Instruction::START, sequence(),
696 Slot(var3_slot), Reg()));
697
698
699 EXPECT_EQ(0,
700 GetParallelMoveCount(start_of_b3, Instruction::START, sequence()));
701 }
702
703
704 namespace {
705
706 enum class ParameterType { kFixedSlot, kSlot, kRegister, kFixedRegister };
707
708 const ParameterType kParameterTypes[] = {
709 ParameterType::kFixedSlot, ParameterType::kSlot, ParameterType::kRegister,
710 ParameterType::kFixedRegister};
711
712 class SlotConstraintTest : public RegisterAllocatorTest,
713 public ::testing::WithParamInterface<
714 ::testing::tuple<ParameterType, int>> {
715 public:
716 static const int kMaxVariant = 5;
717
718 protected:
parameter_type() const719 ParameterType parameter_type() const {
720 return ::testing::get<0>(B::GetParam());
721 }
variant() const722 int variant() const { return ::testing::get<1>(B::GetParam()); }
723
724 private:
725 typedef ::testing::WithParamInterface<::testing::tuple<ParameterType, int>> B;
726 };
727
728 } // namespace
729
730
731 #if GTEST_HAS_COMBINE
732
TEST_P(SlotConstraintTest,SlotConstraint)733 TEST_P(SlotConstraintTest, SlotConstraint) {
734 StartBlock();
735 VReg p_0;
736 switch (parameter_type()) {
737 case ParameterType::kFixedSlot:
738 p_0 = Parameter(Slot(-1));
739 break;
740 case ParameterType::kSlot:
741 p_0 = Parameter(Slot(-1));
742 break;
743 case ParameterType::kRegister:
744 p_0 = Parameter(Reg());
745 break;
746 case ParameterType::kFixedRegister:
747 p_0 = Parameter(Reg(1));
748 break;
749 }
750 switch (variant()) {
751 case 0:
752 EmitI(Slot(p_0), Reg(p_0));
753 break;
754 case 1:
755 EmitI(Slot(p_0));
756 break;
757 case 2:
758 EmitI(Reg(p_0));
759 EmitI(Slot(p_0));
760 break;
761 case 3:
762 EmitI(Slot(p_0));
763 EmitI(Reg(p_0));
764 break;
765 case 4:
766 EmitI(Slot(p_0, -1), Slot(p_0), Reg(p_0), Reg(p_0, 1));
767 break;
768 default:
769 UNREACHABLE();
770 break;
771 }
772 EndBlock(Last());
773
774 Allocate();
775 }
776
777
778 INSTANTIATE_TEST_CASE_P(
779 RegisterAllocatorTest, SlotConstraintTest,
780 ::testing::Combine(::testing::ValuesIn(kParameterTypes),
781 ::testing::Range(0, SlotConstraintTest::kMaxVariant)));
782
783 #endif // GTEST_HAS_COMBINE
784
785 } // namespace compiler
786 } // namespace internal
787 } // namespace v8
788