1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_COMPILER_BACKEND_INSTRUCTION_H_
6 #define V8_COMPILER_BACKEND_INSTRUCTION_H_
7
8 #include <iosfwd>
9 #include <map>
10
11 #include "src/base/compiler-specific.h"
12 #include "src/base/numbers/double.h"
13 #include "src/codegen/external-reference.h"
14 #include "src/codegen/register.h"
15 #include "src/codegen/source-position.h"
16 #include "src/common/globals.h"
17 #include "src/compiler/backend/instruction-codes.h"
18 #include "src/compiler/common-operator.h"
19 #include "src/compiler/feedback-source.h"
20 #include "src/compiler/frame.h"
21 #include "src/compiler/opcodes.h"
22 #include "src/zone/zone-allocator.h"
23
24 namespace v8 {
25 namespace internal {
26
27 class RegisterConfiguration;
28
29 namespace compiler {
30
31 class Schedule;
32 class SourcePositionTable;
33
34 #if defined(V8_CC_MSVC) && defined(V8_TARGET_ARCH_IA32)
35 // MSVC on x86 has issues with ALIGNAS(8) on InstructionOperand, but does
36 // align the object to 8 bytes anyway (covered by a static assert below).
37 // See crbug.com/v8/10796
38 #define INSTRUCTION_OPERAND_ALIGN
39 #else
40 #define INSTRUCTION_OPERAND_ALIGN ALIGNAS(8)
41 #endif
42
43 class V8_EXPORT_PRIVATE INSTRUCTION_OPERAND_ALIGN InstructionOperand {
44 public:
45 static const int kInvalidVirtualRegister = -1;
46
47 enum Kind {
48 INVALID,
49 UNALLOCATED,
50 CONSTANT,
51 IMMEDIATE,
52 PENDING,
53 // Location operand kinds.
54 ALLOCATED,
55 FIRST_LOCATION_OPERAND_KIND = ALLOCATED
56 // Location operand kinds must be last.
57 };
58
InstructionOperand()59 InstructionOperand() : InstructionOperand(INVALID) {}
60
kind()61 Kind kind() const { return KindField::decode(value_); }
62
63 #define INSTRUCTION_OPERAND_PREDICATE(name, type) \
64 bool Is##name() const { return kind() == type; }
65 INSTRUCTION_OPERAND_PREDICATE(Invalid, INVALID)
66 // UnallocatedOperands are place-holder operands created before register
67 // allocation. They later are assigned registers and become AllocatedOperands.
68 INSTRUCTION_OPERAND_PREDICATE(Unallocated, UNALLOCATED)
69 // Constant operands participate in register allocation. They are allocated to
70 // registers but have a special "spilling" behavior. When a ConstantOperand
71 // value must be rematerialized, it is loaded from an immediate constant
72 // rather from an unspilled slot.
73 INSTRUCTION_OPERAND_PREDICATE(Constant, CONSTANT)
74 // ImmediateOperands do not participate in register allocation and are only
75 // embedded directly in instructions, e.g. small integers and on some
76 // platforms Objects.
77 INSTRUCTION_OPERAND_PREDICATE(Immediate, IMMEDIATE)
78 // PendingOperands are pending allocation during register allocation and
79 // shouldn't be seen elsewhere. They chain together multiple operators that
80 // will be replaced together with the same value when finalized.
81 INSTRUCTION_OPERAND_PREDICATE(Pending, PENDING)
82 // AllocatedOperands are registers or stack slots that are assigned by the
83 // register allocator and are always associated with a virtual register.
84 INSTRUCTION_OPERAND_PREDICATE(Allocated, ALLOCATED)
85 #undef INSTRUCTION_OPERAND_PREDICATE
86
87 inline bool IsAnyLocationOperand() const;
88 inline bool IsLocationOperand() const;
89 inline bool IsFPLocationOperand() const;
90 inline bool IsAnyRegister() const;
91 inline bool IsRegister() const;
92 inline bool IsFPRegister() const;
93 inline bool IsFloatRegister() const;
94 inline bool IsDoubleRegister() const;
95 inline bool IsSimd128Register() const;
96 inline bool IsAnyStackSlot() const;
97 inline bool IsStackSlot() const;
98 inline bool IsFPStackSlot() const;
99 inline bool IsFloatStackSlot() const;
100 inline bool IsDoubleStackSlot() const;
101 inline bool IsSimd128StackSlot() const;
102
103 template <typename SubKindOperand>
New(Zone * zone,const SubKindOperand & op)104 static SubKindOperand* New(Zone* zone, const SubKindOperand& op) {
105 return zone->New<SubKindOperand>(op);
106 }
107
ReplaceWith(InstructionOperand * dest,const InstructionOperand * src)108 static void ReplaceWith(InstructionOperand* dest,
109 const InstructionOperand* src) {
110 *dest = *src;
111 }
112
Equals(const InstructionOperand & that)113 bool Equals(const InstructionOperand& that) const {
114 if (IsPending()) {
115 // Pending operands are only equal if they are the same operand.
116 return this == &that;
117 }
118 return this->value_ == that.value_;
119 }
120
Compare(const InstructionOperand & that)121 bool Compare(const InstructionOperand& that) const {
122 return this->value_ < that.value_;
123 }
124
EqualsCanonicalized(const InstructionOperand & that)125 bool EqualsCanonicalized(const InstructionOperand& that) const {
126 if (IsPending()) {
127 // Pending operands can't be canonicalized, so just compare for equality.
128 return Equals(that);
129 }
130 return this->GetCanonicalizedValue() == that.GetCanonicalizedValue();
131 }
132
CompareCanonicalized(const InstructionOperand & that)133 bool CompareCanonicalized(const InstructionOperand& that) const {
134 DCHECK(!IsPending());
135 return this->GetCanonicalizedValue() < that.GetCanonicalizedValue();
136 }
137
138 bool InterferesWith(const InstructionOperand& other) const;
139
140 // APIs to aid debugging. For general-stream APIs, use operator<<.
141 void Print() const;
142
143 bool operator==(InstructionOperand& other) const { return Equals(other); }
144 bool operator!=(InstructionOperand& other) const { return !Equals(other); }
145
146 protected:
InstructionOperand(Kind kind)147 explicit InstructionOperand(Kind kind) : value_(KindField::encode(kind)) {}
148
149 inline uint64_t GetCanonicalizedValue() const;
150
151 using KindField = base::BitField64<Kind, 0, 3>;
152
153 uint64_t value_;
154 };
155
156 using InstructionOperandVector = ZoneVector<InstructionOperand>;
157
158 std::ostream& operator<<(std::ostream&, const InstructionOperand&);
159
160 #define INSTRUCTION_OPERAND_CASTS(OperandType, OperandKind) \
161 \
162 static OperandType* cast(InstructionOperand* op) { \
163 DCHECK_EQ(OperandKind, op->kind()); \
164 return static_cast<OperandType*>(op); \
165 } \
166 \
167 static const OperandType* cast(const InstructionOperand* op) { \
168 DCHECK_EQ(OperandKind, op->kind()); \
169 return static_cast<const OperandType*>(op); \
170 } \
171 \
172 static OperandType cast(const InstructionOperand& op) { \
173 DCHECK_EQ(OperandKind, op.kind()); \
174 return *static_cast<const OperandType*>(&op); \
175 }
176
177 class UnallocatedOperand final : public InstructionOperand {
178 public:
179 enum BasicPolicy { FIXED_SLOT, EXTENDED_POLICY };
180
181 enum ExtendedPolicy {
182 NONE,
183 REGISTER_OR_SLOT,
184 REGISTER_OR_SLOT_OR_CONSTANT,
185 FIXED_REGISTER,
186 FIXED_FP_REGISTER,
187 MUST_HAVE_REGISTER,
188 MUST_HAVE_SLOT,
189 SAME_AS_INPUT
190 };
191
192 // Lifetime of operand inside the instruction.
193 enum Lifetime {
194 // USED_AT_START operand is guaranteed to be live only at instruction start.
195 // The register allocator is free to assign the same register to some other
196 // operand used inside instruction (i.e. temporary or output).
197 USED_AT_START,
198
199 // USED_AT_END operand is treated as live until the end of instruction.
200 // This means that register allocator will not reuse its register for any
201 // other operand inside instruction.
202 USED_AT_END
203 };
204
UnallocatedOperand(ExtendedPolicy policy,int virtual_register)205 UnallocatedOperand(ExtendedPolicy policy, int virtual_register)
206 : UnallocatedOperand(virtual_register) {
207 value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
208 value_ |= ExtendedPolicyField::encode(policy);
209 value_ |= LifetimeField::encode(USED_AT_END);
210 }
211
UnallocatedOperand(int virtual_register,int input_index)212 UnallocatedOperand(int virtual_register, int input_index)
213 : UnallocatedOperand(virtual_register) {
214 value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
215 value_ |= ExtendedPolicyField::encode(SAME_AS_INPUT);
216 value_ |= LifetimeField::encode(USED_AT_END);
217 value_ |= InputIndexField::encode(input_index);
218 }
219
UnallocatedOperand(BasicPolicy policy,int index,int virtual_register)220 UnallocatedOperand(BasicPolicy policy, int index, int virtual_register)
221 : UnallocatedOperand(virtual_register) {
222 DCHECK(policy == FIXED_SLOT);
223 value_ |= BasicPolicyField::encode(policy);
224 value_ |= static_cast<uint64_t>(static_cast<int64_t>(index))
225 << FixedSlotIndexField::kShift;
226 DCHECK(this->fixed_slot_index() == index);
227 }
228
UnallocatedOperand(ExtendedPolicy policy,int index,int virtual_register)229 UnallocatedOperand(ExtendedPolicy policy, int index, int virtual_register)
230 : UnallocatedOperand(virtual_register) {
231 DCHECK(policy == FIXED_REGISTER || policy == FIXED_FP_REGISTER);
232 value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
233 value_ |= ExtendedPolicyField::encode(policy);
234 value_ |= LifetimeField::encode(USED_AT_END);
235 value_ |= FixedRegisterField::encode(index);
236 }
237
UnallocatedOperand(ExtendedPolicy policy,Lifetime lifetime,int virtual_register)238 UnallocatedOperand(ExtendedPolicy policy, Lifetime lifetime,
239 int virtual_register)
240 : UnallocatedOperand(virtual_register) {
241 value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
242 value_ |= ExtendedPolicyField::encode(policy);
243 value_ |= LifetimeField::encode(lifetime);
244 }
245
UnallocatedOperand(int reg_id,int slot_id,int virtual_register)246 UnallocatedOperand(int reg_id, int slot_id, int virtual_register)
247 : UnallocatedOperand(FIXED_REGISTER, reg_id, virtual_register) {
248 value_ |= HasSecondaryStorageField::encode(true);
249 value_ |= SecondaryStorageField::encode(slot_id);
250 }
251
UnallocatedOperand(const UnallocatedOperand & other,int virtual_register)252 UnallocatedOperand(const UnallocatedOperand& other, int virtual_register) {
253 DCHECK_NE(kInvalidVirtualRegister, virtual_register);
254 value_ = VirtualRegisterField::update(
255 other.value_, static_cast<uint32_t>(virtual_register));
256 }
257
258 // Predicates for the operand policy.
HasRegisterOrSlotPolicy()259 bool HasRegisterOrSlotPolicy() const {
260 return basic_policy() == EXTENDED_POLICY &&
261 extended_policy() == REGISTER_OR_SLOT;
262 }
HasRegisterOrSlotOrConstantPolicy()263 bool HasRegisterOrSlotOrConstantPolicy() const {
264 return basic_policy() == EXTENDED_POLICY &&
265 extended_policy() == REGISTER_OR_SLOT_OR_CONSTANT;
266 }
HasFixedPolicy()267 bool HasFixedPolicy() const {
268 return basic_policy() == FIXED_SLOT ||
269 extended_policy() == FIXED_REGISTER ||
270 extended_policy() == FIXED_FP_REGISTER;
271 }
HasRegisterPolicy()272 bool HasRegisterPolicy() const {
273 return basic_policy() == EXTENDED_POLICY &&
274 extended_policy() == MUST_HAVE_REGISTER;
275 }
HasSlotPolicy()276 bool HasSlotPolicy() const {
277 return basic_policy() == EXTENDED_POLICY &&
278 extended_policy() == MUST_HAVE_SLOT;
279 }
HasSameAsInputPolicy()280 bool HasSameAsInputPolicy() const {
281 return basic_policy() == EXTENDED_POLICY &&
282 extended_policy() == SAME_AS_INPUT;
283 }
HasFixedSlotPolicy()284 bool HasFixedSlotPolicy() const { return basic_policy() == FIXED_SLOT; }
HasFixedRegisterPolicy()285 bool HasFixedRegisterPolicy() const {
286 return basic_policy() == EXTENDED_POLICY &&
287 extended_policy() == FIXED_REGISTER;
288 }
HasFixedFPRegisterPolicy()289 bool HasFixedFPRegisterPolicy() const {
290 return basic_policy() == EXTENDED_POLICY &&
291 extended_policy() == FIXED_FP_REGISTER;
292 }
HasSecondaryStorage()293 bool HasSecondaryStorage() const {
294 return basic_policy() == EXTENDED_POLICY &&
295 extended_policy() == FIXED_REGISTER &&
296 HasSecondaryStorageField::decode(value_);
297 }
GetSecondaryStorage()298 int GetSecondaryStorage() const {
299 DCHECK(HasSecondaryStorage());
300 return SecondaryStorageField::decode(value_);
301 }
302
303 // [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
basic_policy()304 BasicPolicy basic_policy() const { return BasicPolicyField::decode(value_); }
305
306 // [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
extended_policy()307 ExtendedPolicy extended_policy() const {
308 DCHECK(basic_policy() == EXTENDED_POLICY);
309 return ExtendedPolicyField::decode(value_);
310 }
311
input_index()312 int input_index() const {
313 DCHECK(HasSameAsInputPolicy());
314 return InputIndexField::decode(value_);
315 }
316
317 // [fixed_slot_index]: Only for FIXED_SLOT.
fixed_slot_index()318 int fixed_slot_index() const {
319 DCHECK(HasFixedSlotPolicy());
320 return static_cast<int>(static_cast<int64_t>(value_) >>
321 FixedSlotIndexField::kShift);
322 }
323
324 // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_FP_REGISTER.
fixed_register_index()325 int fixed_register_index() const {
326 DCHECK(HasFixedRegisterPolicy() || HasFixedFPRegisterPolicy());
327 return FixedRegisterField::decode(value_);
328 }
329
330 // [virtual_register]: The virtual register ID for this operand.
virtual_register()331 int32_t virtual_register() const {
332 return static_cast<int32_t>(VirtualRegisterField::decode(value_));
333 }
334
335 // [lifetime]: Only for non-FIXED_SLOT.
IsUsedAtStart()336 bool IsUsedAtStart() const {
337 return basic_policy() == EXTENDED_POLICY &&
338 LifetimeField::decode(value_) == USED_AT_START;
339 }
340
341 INSTRUCTION_OPERAND_CASTS(UnallocatedOperand, UNALLOCATED)
342
343 // The encoding used for UnallocatedOperand operands depends on the policy
344 // that is
345 // stored within the operand. The FIXED_SLOT policy uses a compact encoding
346 // because it accommodates a larger pay-load.
347 //
348 // For FIXED_SLOT policy:
349 // +------------------------------------------------+
350 // | slot_index | 0 | virtual_register | 001 |
351 // +------------------------------------------------+
352 //
353 // For all other (extended) policies:
354 // +-----------------------------------------------------+
355 // | reg_index | L | PPP | 1 | virtual_register | 001 |
356 // +-----------------------------------------------------+
357 // L ... Lifetime
358 // P ... Policy
359 //
360 // The slot index is a signed value which requires us to decode it manually
361 // instead of using the base::BitField utility class.
362
363 STATIC_ASSERT(KindField::kSize == 3);
364
365 using VirtualRegisterField = base::BitField64<uint32_t, 3, 32>;
366
367 // base::BitFields for all unallocated operands.
368 using BasicPolicyField = base::BitField64<BasicPolicy, 35, 1>;
369
370 // BitFields specific to BasicPolicy::FIXED_SLOT.
371 using FixedSlotIndexField = base::BitField64<int, 36, 28>;
372
373 // BitFields specific to BasicPolicy::EXTENDED_POLICY.
374 using ExtendedPolicyField = base::BitField64<ExtendedPolicy, 36, 3>;
375 using LifetimeField = base::BitField64<Lifetime, 39, 1>;
376 using HasSecondaryStorageField = base::BitField64<bool, 40, 1>;
377 using FixedRegisterField = base::BitField64<int, 41, 6>;
378 using SecondaryStorageField = base::BitField64<int, 47, 3>;
379 using InputIndexField = base::BitField64<int, 50, 3>;
380
381 private:
UnallocatedOperand(int virtual_register)382 explicit UnallocatedOperand(int virtual_register)
383 : InstructionOperand(UNALLOCATED) {
384 value_ |=
385 VirtualRegisterField::encode(static_cast<uint32_t>(virtual_register));
386 }
387 };
388
389 class ConstantOperand : public InstructionOperand {
390 public:
ConstantOperand(int virtual_register)391 explicit ConstantOperand(int virtual_register)
392 : InstructionOperand(CONSTANT) {
393 value_ |=
394 VirtualRegisterField::encode(static_cast<uint32_t>(virtual_register));
395 }
396
virtual_register()397 int32_t virtual_register() const {
398 return static_cast<int32_t>(VirtualRegisterField::decode(value_));
399 }
400
New(Zone * zone,int virtual_register)401 static ConstantOperand* New(Zone* zone, int virtual_register) {
402 return InstructionOperand::New(zone, ConstantOperand(virtual_register));
403 }
404
405 INSTRUCTION_OPERAND_CASTS(ConstantOperand, CONSTANT)
406
407 STATIC_ASSERT(KindField::kSize == 3);
408 using VirtualRegisterField = base::BitField64<uint32_t, 3, 32>;
409 };
410
411 class ImmediateOperand : public InstructionOperand {
412 public:
413 enum ImmediateType { INLINE_INT32, INLINE_INT64, INDEXED_RPO, INDEXED_IMM };
414
ImmediateOperand(ImmediateType type,int32_t value)415 explicit ImmediateOperand(ImmediateType type, int32_t value)
416 : InstructionOperand(IMMEDIATE) {
417 value_ |= TypeField::encode(type);
418 value_ |= static_cast<uint64_t>(static_cast<int64_t>(value))
419 << ValueField::kShift;
420 }
421
type()422 ImmediateType type() const { return TypeField::decode(value_); }
423
inline_int32_value()424 int32_t inline_int32_value() const {
425 DCHECK_EQ(INLINE_INT32, type());
426 return static_cast<int64_t>(value_) >> ValueField::kShift;
427 }
428
inline_int64_value()429 int64_t inline_int64_value() const {
430 DCHECK_EQ(INLINE_INT64, type());
431 return static_cast<int64_t>(value_) >> ValueField::kShift;
432 }
433
indexed_value()434 int32_t indexed_value() const {
435 DCHECK(type() == INDEXED_IMM || type() == INDEXED_RPO);
436 return static_cast<int64_t>(value_) >> ValueField::kShift;
437 }
438
New(Zone * zone,ImmediateType type,int32_t value)439 static ImmediateOperand* New(Zone* zone, ImmediateType type, int32_t value) {
440 return InstructionOperand::New(zone, ImmediateOperand(type, value));
441 }
442
443 INSTRUCTION_OPERAND_CASTS(ImmediateOperand, IMMEDIATE)
444
445 STATIC_ASSERT(KindField::kSize == 3);
446 using TypeField = base::BitField64<ImmediateType, 3, 2>;
447 using ValueField = base::BitField64<int32_t, 32, 32>;
448 };
449
450 class PendingOperand : public InstructionOperand {
451 public:
PendingOperand()452 PendingOperand() : InstructionOperand(PENDING) {}
PendingOperand(PendingOperand * next_operand)453 explicit PendingOperand(PendingOperand* next_operand) : PendingOperand() {
454 set_next(next_operand);
455 }
456
set_next(PendingOperand * next)457 void set_next(PendingOperand* next) {
458 DCHECK_NULL(this->next());
459 uintptr_t shifted_value =
460 reinterpret_cast<uintptr_t>(next) >> kPointerShift;
461 DCHECK_EQ(reinterpret_cast<uintptr_t>(next),
462 shifted_value << kPointerShift);
463 value_ |= NextOperandField::encode(static_cast<uint64_t>(shifted_value));
464 }
465
next()466 PendingOperand* next() const {
467 uintptr_t shifted_value =
468 static_cast<uint64_t>(NextOperandField::decode(value_));
469 return reinterpret_cast<PendingOperand*>(shifted_value << kPointerShift);
470 }
471
New(Zone * zone,PendingOperand * previous_operand)472 static PendingOperand* New(Zone* zone, PendingOperand* previous_operand) {
473 return InstructionOperand::New(zone, PendingOperand(previous_operand));
474 }
475
476 INSTRUCTION_OPERAND_CASTS(PendingOperand, PENDING)
477
478 private:
479 // Operands are uint64_t values and so are aligned to 8 byte boundaries,
480 // therefore we can shift off the bottom three zeros without losing data.
481 static const uint64_t kPointerShift = 3;
482 STATIC_ASSERT(alignof(InstructionOperand) >= (1 << kPointerShift));
483
484 STATIC_ASSERT(KindField::kSize == 3);
485 using NextOperandField = base::BitField64<uint64_t, 3, 61>;
486 };
487
488 class LocationOperand : public InstructionOperand {
489 public:
490 enum LocationKind { REGISTER, STACK_SLOT };
491
LocationOperand(InstructionOperand::Kind operand_kind,LocationOperand::LocationKind location_kind,MachineRepresentation rep,int index)492 LocationOperand(InstructionOperand::Kind operand_kind,
493 LocationOperand::LocationKind location_kind,
494 MachineRepresentation rep, int index)
495 : InstructionOperand(operand_kind) {
496 DCHECK_IMPLIES(location_kind == REGISTER, index >= 0);
497 DCHECK(IsSupportedRepresentation(rep));
498 value_ |= LocationKindField::encode(location_kind);
499 value_ |= RepresentationField::encode(rep);
500 value_ |= static_cast<uint64_t>(static_cast<int64_t>(index))
501 << IndexField::kShift;
502 }
503
index()504 int index() const {
505 DCHECK(IsStackSlot() || IsFPStackSlot());
506 return static_cast<int64_t>(value_) >> IndexField::kShift;
507 }
508
register_code()509 int register_code() const {
510 DCHECK(IsRegister() || IsFPRegister());
511 return static_cast<int64_t>(value_) >> IndexField::kShift;
512 }
513
GetRegister()514 Register GetRegister() const {
515 DCHECK(IsRegister());
516 return Register::from_code(register_code());
517 }
518
GetFloatRegister()519 FloatRegister GetFloatRegister() const {
520 DCHECK(IsFloatRegister());
521 return FloatRegister::from_code(register_code());
522 }
523
GetDoubleRegister()524 DoubleRegister GetDoubleRegister() const {
525 // On platforms where FloatRegister, DoubleRegister, and Simd128Register
526 // are all the same type, it's convenient to treat everything as a
527 // DoubleRegister, so be lax about type checking here.
528 DCHECK(IsFPRegister());
529 return DoubleRegister::from_code(register_code());
530 }
531
GetSimd128Register()532 Simd128Register GetSimd128Register() const {
533 DCHECK(IsSimd128Register());
534 return Simd128Register::from_code(register_code());
535 }
536
location_kind()537 LocationKind location_kind() const {
538 return LocationKindField::decode(value_);
539 }
540
representation()541 MachineRepresentation representation() const {
542 return RepresentationField::decode(value_);
543 }
544
IsSupportedRepresentation(MachineRepresentation rep)545 static bool IsSupportedRepresentation(MachineRepresentation rep) {
546 switch (rep) {
547 case MachineRepresentation::kWord32:
548 case MachineRepresentation::kWord64:
549 case MachineRepresentation::kFloat32:
550 case MachineRepresentation::kFloat64:
551 case MachineRepresentation::kSimd128:
552 case MachineRepresentation::kTaggedSigned:
553 case MachineRepresentation::kTaggedPointer:
554 case MachineRepresentation::kTagged:
555 case MachineRepresentation::kCompressedPointer:
556 case MachineRepresentation::kCompressed:
557 case MachineRepresentation::kSandboxedPointer:
558 return true;
559 case MachineRepresentation::kBit:
560 case MachineRepresentation::kWord8:
561 case MachineRepresentation::kWord16:
562 case MachineRepresentation::kNone:
563 return false;
564 case MachineRepresentation::kMapWord:
565 break;
566 }
567 UNREACHABLE();
568 }
569
570 // Return true if the locations can be moved to one another.
571 bool IsCompatible(LocationOperand* op);
572
cast(InstructionOperand * op)573 static LocationOperand* cast(InstructionOperand* op) {
574 DCHECK(op->IsAnyLocationOperand());
575 return static_cast<LocationOperand*>(op);
576 }
577
cast(const InstructionOperand * op)578 static const LocationOperand* cast(const InstructionOperand* op) {
579 DCHECK(op->IsAnyLocationOperand());
580 return static_cast<const LocationOperand*>(op);
581 }
582
cast(const InstructionOperand & op)583 static LocationOperand cast(const InstructionOperand& op) {
584 DCHECK(op.IsAnyLocationOperand());
585 return *static_cast<const LocationOperand*>(&op);
586 }
587
588 STATIC_ASSERT(KindField::kSize == 3);
589 using LocationKindField = base::BitField64<LocationKind, 3, 2>;
590 using RepresentationField = base::BitField64<MachineRepresentation, 5, 8>;
591 using IndexField = base::BitField64<int32_t, 35, 29>;
592 };
593
594 class AllocatedOperand : public LocationOperand {
595 public:
AllocatedOperand(LocationKind kind,MachineRepresentation rep,int index)596 AllocatedOperand(LocationKind kind, MachineRepresentation rep, int index)
597 : LocationOperand(ALLOCATED, kind, rep, index) {}
598
New(Zone * zone,LocationKind kind,MachineRepresentation rep,int index)599 static AllocatedOperand* New(Zone* zone, LocationKind kind,
600 MachineRepresentation rep, int index) {
601 return InstructionOperand::New(zone, AllocatedOperand(kind, rep, index));
602 }
603
604 INSTRUCTION_OPERAND_CASTS(AllocatedOperand, ALLOCATED)
605 };
606
607 #undef INSTRUCTION_OPERAND_CASTS
608
IsAnyLocationOperand()609 bool InstructionOperand::IsAnyLocationOperand() const {
610 return this->kind() >= FIRST_LOCATION_OPERAND_KIND;
611 }
612
IsLocationOperand()613 bool InstructionOperand::IsLocationOperand() const {
614 return IsAnyLocationOperand() &&
615 !IsFloatingPoint(LocationOperand::cast(this)->representation());
616 }
617
IsFPLocationOperand()618 bool InstructionOperand::IsFPLocationOperand() const {
619 return IsAnyLocationOperand() &&
620 IsFloatingPoint(LocationOperand::cast(this)->representation());
621 }
622
IsAnyRegister()623 bool InstructionOperand::IsAnyRegister() const {
624 return IsAnyLocationOperand() &&
625 LocationOperand::cast(this)->location_kind() ==
626 LocationOperand::REGISTER;
627 }
628
IsRegister()629 bool InstructionOperand::IsRegister() const {
630 return IsAnyRegister() &&
631 !IsFloatingPoint(LocationOperand::cast(this)->representation());
632 }
633
IsFPRegister()634 bool InstructionOperand::IsFPRegister() const {
635 return IsAnyRegister() &&
636 IsFloatingPoint(LocationOperand::cast(this)->representation());
637 }
638
IsFloatRegister()639 bool InstructionOperand::IsFloatRegister() const {
640 return IsAnyRegister() && LocationOperand::cast(this)->representation() ==
641 MachineRepresentation::kFloat32;
642 }
643
IsDoubleRegister()644 bool InstructionOperand::IsDoubleRegister() const {
645 return IsAnyRegister() && LocationOperand::cast(this)->representation() ==
646 MachineRepresentation::kFloat64;
647 }
648
IsSimd128Register()649 bool InstructionOperand::IsSimd128Register() const {
650 return IsAnyRegister() && LocationOperand::cast(this)->representation() ==
651 MachineRepresentation::kSimd128;
652 }
653
IsAnyStackSlot()654 bool InstructionOperand::IsAnyStackSlot() const {
655 return IsAnyLocationOperand() &&
656 LocationOperand::cast(this)->location_kind() ==
657 LocationOperand::STACK_SLOT;
658 }
659
IsStackSlot()660 bool InstructionOperand::IsStackSlot() const {
661 return IsAnyStackSlot() &&
662 !IsFloatingPoint(LocationOperand::cast(this)->representation());
663 }
664
IsFPStackSlot()665 bool InstructionOperand::IsFPStackSlot() const {
666 return IsAnyStackSlot() &&
667 IsFloatingPoint(LocationOperand::cast(this)->representation());
668 }
669
IsFloatStackSlot()670 bool InstructionOperand::IsFloatStackSlot() const {
671 return IsAnyLocationOperand() &&
672 LocationOperand::cast(this)->location_kind() ==
673 LocationOperand::STACK_SLOT &&
674 LocationOperand::cast(this)->representation() ==
675 MachineRepresentation::kFloat32;
676 }
677
IsDoubleStackSlot()678 bool InstructionOperand::IsDoubleStackSlot() const {
679 return IsAnyLocationOperand() &&
680 LocationOperand::cast(this)->location_kind() ==
681 LocationOperand::STACK_SLOT &&
682 LocationOperand::cast(this)->representation() ==
683 MachineRepresentation::kFloat64;
684 }
685
IsSimd128StackSlot()686 bool InstructionOperand::IsSimd128StackSlot() const {
687 return IsAnyLocationOperand() &&
688 LocationOperand::cast(this)->location_kind() ==
689 LocationOperand::STACK_SLOT &&
690 LocationOperand::cast(this)->representation() ==
691 MachineRepresentation::kSimd128;
692 }
693
GetCanonicalizedValue()694 uint64_t InstructionOperand::GetCanonicalizedValue() const {
695 if (IsAnyLocationOperand()) {
696 MachineRepresentation canonical = MachineRepresentation::kNone;
697 if (IsFPRegister()) {
698 if (kFPAliasing == AliasingKind::kOverlap) {
699 // We treat all FP register operands the same for simple aliasing.
700 canonical = MachineRepresentation::kFloat64;
701 } else if (kFPAliasing == AliasingKind::kIndependent) {
702 if (IsSimd128Register()) {
703 canonical = MachineRepresentation::kSimd128;
704 } else {
705 canonical = MachineRepresentation::kFloat64;
706 }
707 } else {
708 // We need to distinguish FP register operands of different reps when
709 // aliasing is AliasingKind::kCombine (e.g. ARM).
710 DCHECK_EQ(kFPAliasing, AliasingKind::kCombine);
711 canonical = LocationOperand::cast(this)->representation();
712 }
713 }
714 return InstructionOperand::KindField::update(
715 LocationOperand::RepresentationField::update(this->value_, canonical),
716 LocationOperand::ALLOCATED);
717 }
718 return this->value_;
719 }
720
721 // Required for maps that don't care about machine type.
722 struct CompareOperandModuloType {
operatorCompareOperandModuloType723 bool operator()(const InstructionOperand& a,
724 const InstructionOperand& b) const {
725 return a.CompareCanonicalized(b);
726 }
727 };
728
729 class V8_EXPORT_PRIVATE MoveOperands final
NON_EXPORTED_BASE(ZoneObject)730 : public NON_EXPORTED_BASE(ZoneObject) {
731 public:
732 MoveOperands(const InstructionOperand& source,
733 const InstructionOperand& destination)
734 : source_(source), destination_(destination) {
735 DCHECK(!source.IsInvalid() && !destination.IsInvalid());
736 }
737
738 MoveOperands(const MoveOperands&) = delete;
739 MoveOperands& operator=(const MoveOperands&) = delete;
740
741 const InstructionOperand& source() const { return source_; }
742 InstructionOperand& source() { return source_; }
743 void set_source(const InstructionOperand& operand) { source_ = operand; }
744
745 const InstructionOperand& destination() const { return destination_; }
746 InstructionOperand& destination() { return destination_; }
747 void set_destination(const InstructionOperand& operand) {
748 destination_ = operand;
749 }
750
751 // The gap resolver marks moves as "in-progress" by clearing the
752 // destination (but not the source).
753 bool IsPending() const {
754 return destination_.IsInvalid() && !source_.IsInvalid();
755 }
756 void SetPending() { destination_ = InstructionOperand(); }
757
758 // A move is redundant if it's been eliminated or if its source and
759 // destination are the same.
760 bool IsRedundant() const {
761 DCHECK_IMPLIES(!destination_.IsInvalid(), !destination_.IsConstant());
762 return IsEliminated() || source_.EqualsCanonicalized(destination_);
763 }
764
765 // We clear both operands to indicate move that's been eliminated.
766 void Eliminate() { source_ = destination_ = InstructionOperand(); }
767 bool IsEliminated() const {
768 DCHECK_IMPLIES(source_.IsInvalid(), destination_.IsInvalid());
769 return source_.IsInvalid();
770 }
771
772 // APIs to aid debugging. For general-stream APIs, use operator<<.
773 void Print() const;
774
775 private:
776 InstructionOperand source_;
777 InstructionOperand destination_;
778 };
779
780 V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, const MoveOperands&);
781
782 class V8_EXPORT_PRIVATE ParallelMove final
NON_EXPORTED_BASE(ZoneVector<MoveOperands * >)783 : public NON_EXPORTED_BASE(ZoneVector<MoveOperands*>),
784 public NON_EXPORTED_BASE(ZoneObject) {
785 public:
786 explicit ParallelMove(Zone* zone) : ZoneVector<MoveOperands*>(zone) {}
787 ParallelMove(const ParallelMove&) = delete;
788 ParallelMove& operator=(const ParallelMove&) = delete;
789
790 MoveOperands* AddMove(const InstructionOperand& from,
791 const InstructionOperand& to) {
792 Zone* zone = get_allocator().zone();
793 return AddMove(from, to, zone);
794 }
795
796 MoveOperands* AddMove(const InstructionOperand& from,
797 const InstructionOperand& to,
798 Zone* operand_allocation_zone) {
799 if (from.EqualsCanonicalized(to)) return nullptr;
800 MoveOperands* move = operand_allocation_zone->New<MoveOperands>(from, to);
801 if (empty()) reserve(4);
802 push_back(move);
803 return move;
804 }
805
806 bool IsRedundant() const;
807
808 // Prepare this ParallelMove to insert move as if it happened in a subsequent
809 // ParallelMove. move->source() may be changed. Any MoveOperands added to
810 // to_eliminate must be Eliminated.
811 void PrepareInsertAfter(MoveOperands* move,
812 ZoneVector<MoveOperands*>* to_eliminate) const;
813 };
814
815 std::ostream& operator<<(std::ostream&, const ParallelMove&);
816
817 class ReferenceMap final : public ZoneObject {
818 public:
ReferenceMap(Zone * zone)819 explicit ReferenceMap(Zone* zone)
820 : reference_operands_(8, zone), instruction_position_(-1) {}
821
reference_operands()822 const ZoneVector<InstructionOperand>& reference_operands() const {
823 return reference_operands_;
824 }
instruction_position()825 int instruction_position() const { return instruction_position_; }
826
set_instruction_position(int pos)827 void set_instruction_position(int pos) {
828 DCHECK_EQ(-1, instruction_position_);
829 instruction_position_ = pos;
830 }
831
832 void RecordReference(const AllocatedOperand& op);
833
834 private:
835 friend std::ostream& operator<<(std::ostream&, const ReferenceMap&);
836
837 ZoneVector<InstructionOperand> reference_operands_;
838 int instruction_position_;
839 };
840
841 std::ostream& operator<<(std::ostream&, const ReferenceMap&);
842
843 class InstructionBlock;
844
845 class V8_EXPORT_PRIVATE Instruction final {
846 public:
847 Instruction(const Instruction&) = delete;
848 Instruction& operator=(const Instruction&) = delete;
849
OutputCount()850 size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
OutputAt(size_t i)851 const InstructionOperand* OutputAt(size_t i) const {
852 DCHECK_LT(i, OutputCount());
853 return &operands_[i];
854 }
OutputAt(size_t i)855 InstructionOperand* OutputAt(size_t i) {
856 DCHECK_LT(i, OutputCount());
857 return &operands_[i];
858 }
859
HasOutput()860 bool HasOutput() const { return OutputCount() > 0; }
Output()861 const InstructionOperand* Output() const { return OutputAt(0); }
Output()862 InstructionOperand* Output() { return OutputAt(0); }
863
InputCount()864 size_t InputCount() const { return InputCountField::decode(bit_field_); }
InputAt(size_t i)865 const InstructionOperand* InputAt(size_t i) const {
866 DCHECK_LT(i, InputCount());
867 return &operands_[OutputCount() + i];
868 }
InputAt(size_t i)869 InstructionOperand* InputAt(size_t i) {
870 DCHECK_LT(i, InputCount());
871 return &operands_[OutputCount() + i];
872 }
873
TempCount()874 size_t TempCount() const { return TempCountField::decode(bit_field_); }
TempAt(size_t i)875 const InstructionOperand* TempAt(size_t i) const {
876 DCHECK_LT(i, TempCount());
877 return &operands_[OutputCount() + InputCount() + i];
878 }
TempAt(size_t i)879 InstructionOperand* TempAt(size_t i) {
880 DCHECK_LT(i, TempCount());
881 return &operands_[OutputCount() + InputCount() + i];
882 }
883
opcode()884 InstructionCode opcode() const { return opcode_; }
arch_opcode()885 ArchOpcode arch_opcode() const { return ArchOpcodeField::decode(opcode()); }
addressing_mode()886 AddressingMode addressing_mode() const {
887 return AddressingModeField::decode(opcode());
888 }
flags_mode()889 FlagsMode flags_mode() const { return FlagsModeField::decode(opcode()); }
flags_condition()890 FlagsCondition flags_condition() const {
891 return FlagsConditionField::decode(opcode());
892 }
misc()893 int misc() const { return MiscField::decode(opcode()); }
HasMemoryAccessMode()894 bool HasMemoryAccessMode() const {
895 return compiler::HasMemoryAccessMode(arch_opcode());
896 }
memory_access_mode()897 MemoryAccessMode memory_access_mode() const {
898 DCHECK(HasMemoryAccessMode());
899 return AccessModeField::decode(opcode());
900 }
901
New(Zone * zone,InstructionCode opcode)902 static Instruction* New(Zone* zone, InstructionCode opcode) {
903 return New(zone, opcode, 0, nullptr, 0, nullptr, 0, nullptr);
904 }
905
New(Zone * zone,InstructionCode opcode,size_t output_count,InstructionOperand * outputs,size_t input_count,InstructionOperand * inputs,size_t temp_count,InstructionOperand * temps)906 static Instruction* New(Zone* zone, InstructionCode opcode,
907 size_t output_count, InstructionOperand* outputs,
908 size_t input_count, InstructionOperand* inputs,
909 size_t temp_count, InstructionOperand* temps) {
910 DCHECK(output_count == 0 || outputs != nullptr);
911 DCHECK(input_count == 0 || inputs != nullptr);
912 DCHECK(temp_count == 0 || temps != nullptr);
913 // TODO(turbofan): Handle this gracefully. See crbug.com/582702.
914 CHECK(InputCountField::is_valid(input_count));
915
916 size_t total_extra_ops = output_count + input_count + temp_count;
917 if (total_extra_ops != 0) total_extra_ops--;
918 int size = static_cast<int>(
919 RoundUp(sizeof(Instruction), sizeof(InstructionOperand)) +
920 total_extra_ops * sizeof(InstructionOperand));
921 return new (zone->Allocate<Instruction>(size)) Instruction(
922 opcode, output_count, outputs, input_count, inputs, temp_count, temps);
923 }
924
MarkAsCall()925 Instruction* MarkAsCall() {
926 bit_field_ = IsCallField::update(bit_field_, true);
927 return this;
928 }
IsCall()929 bool IsCall() const { return IsCallField::decode(bit_field_); }
NeedsReferenceMap()930 bool NeedsReferenceMap() const { return IsCall(); }
HasReferenceMap()931 bool HasReferenceMap() const { return reference_map_ != nullptr; }
932
ClobbersRegisters()933 bool ClobbersRegisters() const { return IsCall(); }
ClobbersTemps()934 bool ClobbersTemps() const { return IsCall(); }
ClobbersDoubleRegisters()935 bool ClobbersDoubleRegisters() const { return IsCall(); }
reference_map()936 ReferenceMap* reference_map() const { return reference_map_; }
937
set_reference_map(ReferenceMap * map)938 void set_reference_map(ReferenceMap* map) {
939 DCHECK(NeedsReferenceMap());
940 DCHECK(!reference_map_);
941 reference_map_ = map;
942 }
943
OverwriteWithNop()944 void OverwriteWithNop() {
945 opcode_ = ArchOpcodeField::encode(kArchNop);
946 bit_field_ = 0;
947 reference_map_ = nullptr;
948 }
949
IsNop()950 bool IsNop() const { return arch_opcode() == kArchNop; }
951
IsDeoptimizeCall()952 bool IsDeoptimizeCall() const {
953 return arch_opcode() == ArchOpcode::kArchDeoptimize ||
954 FlagsModeField::decode(opcode()) == kFlags_deoptimize;
955 }
956
IsTrap()957 bool IsTrap() const {
958 return FlagsModeField::decode(opcode()) == kFlags_trap;
959 }
960
IsJump()961 bool IsJump() const { return arch_opcode() == ArchOpcode::kArchJmp; }
IsRet()962 bool IsRet() const { return arch_opcode() == ArchOpcode::kArchRet; }
IsTailCall()963 bool IsTailCall() const {
964 #if V8_ENABLE_WEBASSEMBLY
965 return arch_opcode() <= ArchOpcode::kArchTailCallWasm;
966 #else
967 return arch_opcode() <= ArchOpcode::kArchTailCallAddress;
968 #endif // V8_ENABLE_WEBASSEMBLY
969 }
IsThrow()970 bool IsThrow() const {
971 return arch_opcode() == ArchOpcode::kArchThrowTerminator;
972 }
973
IsCallWithDescriptorFlags(InstructionCode arch_opcode)974 static constexpr bool IsCallWithDescriptorFlags(InstructionCode arch_opcode) {
975 return arch_opcode <= ArchOpcode::kArchCallBuiltinPointer;
976 }
IsCallWithDescriptorFlags()977 bool IsCallWithDescriptorFlags() const {
978 return IsCallWithDescriptorFlags(arch_opcode());
979 }
HasCallDescriptorFlag(CallDescriptor::Flag flag)980 bool HasCallDescriptorFlag(CallDescriptor::Flag flag) const {
981 DCHECK(IsCallWithDescriptorFlags());
982 STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode == 10);
983 #ifdef DEBUG
984 static constexpr int kInstructionCodeFlagsMask =
985 ((1 << CallDescriptor::kFlagsBitsEncodedInInstructionCode) - 1);
986 DCHECK_EQ(static_cast<int>(flag) & kInstructionCodeFlagsMask, flag);
987 #endif
988 return MiscField::decode(opcode()) & flag;
989 }
990
991 enum GapPosition {
992 START,
993 END,
994 FIRST_GAP_POSITION = START,
995 LAST_GAP_POSITION = END
996 };
997
GetOrCreateParallelMove(GapPosition pos,Zone * zone)998 ParallelMove* GetOrCreateParallelMove(GapPosition pos, Zone* zone) {
999 if (parallel_moves_[pos] == nullptr) {
1000 parallel_moves_[pos] = zone->New<ParallelMove>(zone);
1001 }
1002 return parallel_moves_[pos];
1003 }
1004
GetParallelMove(GapPosition pos)1005 ParallelMove* GetParallelMove(GapPosition pos) {
1006 return parallel_moves_[pos];
1007 }
1008
GetParallelMove(GapPosition pos)1009 const ParallelMove* GetParallelMove(GapPosition pos) const {
1010 return parallel_moves_[pos];
1011 }
1012
1013 bool AreMovesRedundant() const;
1014
parallel_moves()1015 ParallelMove* const* parallel_moves() const { return ¶llel_moves_[0]; }
parallel_moves()1016 ParallelMove** parallel_moves() { return ¶llel_moves_[0]; }
1017
1018 // The block_id may be invalidated in JumpThreading. It is only important for
1019 // register allocation, to avoid searching for blocks from instruction
1020 // indexes.
block()1021 InstructionBlock* block() const { return block_; }
set_block(InstructionBlock * block)1022 void set_block(InstructionBlock* block) {
1023 DCHECK_NOT_NULL(block);
1024 block_ = block;
1025 }
1026
1027 // APIs to aid debugging. For general-stream APIs, use operator<<.
1028 void Print() const;
1029
1030 using OutputCountField = base::BitField<size_t, 0, 8>;
1031 using InputCountField = base::BitField<size_t, 8, 16>;
1032 using TempCountField = base::BitField<size_t, 24, 6>;
1033
1034 static const size_t kMaxOutputCount = OutputCountField::kMax;
1035 static const size_t kMaxInputCount = InputCountField::kMax;
1036 static const size_t kMaxTempCount = TempCountField::kMax;
1037
1038 private:
1039 explicit Instruction(InstructionCode opcode);
1040
1041 Instruction(InstructionCode opcode, size_t output_count,
1042 InstructionOperand* outputs, size_t input_count,
1043 InstructionOperand* inputs, size_t temp_count,
1044 InstructionOperand* temps);
1045
1046 using IsCallField = base::BitField<bool, 30, 1>;
1047
1048 InstructionCode opcode_;
1049 uint32_t bit_field_;
1050 ParallelMove* parallel_moves_[2];
1051 ReferenceMap* reference_map_;
1052 InstructionBlock* block_;
1053 InstructionOperand operands_[1];
1054 };
1055
1056 std::ostream& operator<<(std::ostream&, const Instruction&);
1057
1058 class RpoNumber final {
1059 public:
1060 static const int kInvalidRpoNumber = -1;
RpoNumber()1061 RpoNumber() : index_(kInvalidRpoNumber) {}
1062
ToInt()1063 int ToInt() const {
1064 DCHECK(IsValid());
1065 return index_;
1066 }
ToSize()1067 size_t ToSize() const {
1068 DCHECK(IsValid());
1069 return static_cast<size_t>(index_);
1070 }
IsValid()1071 bool IsValid() const { return index_ >= 0; }
FromInt(int index)1072 static RpoNumber FromInt(int index) { return RpoNumber(index); }
Invalid()1073 static RpoNumber Invalid() { return RpoNumber(kInvalidRpoNumber); }
1074
IsNext(const RpoNumber other)1075 bool IsNext(const RpoNumber other) const {
1076 DCHECK(IsValid());
1077 return other.index_ == this->index_ + 1;
1078 }
1079
Next()1080 RpoNumber Next() const {
1081 DCHECK(IsValid());
1082 return RpoNumber(index_ + 1);
1083 }
1084
1085 // Comparison operators.
1086 bool operator==(RpoNumber other) const { return index_ == other.index_; }
1087 bool operator!=(RpoNumber other) const { return index_ != other.index_; }
1088 bool operator>(RpoNumber other) const { return index_ > other.index_; }
1089 bool operator<(RpoNumber other) const { return index_ < other.index_; }
1090 bool operator<=(RpoNumber other) const { return index_ <= other.index_; }
1091 bool operator>=(RpoNumber other) const { return index_ >= other.index_; }
1092
1093 private:
RpoNumber(int32_t index)1094 explicit RpoNumber(int32_t index) : index_(index) {}
1095 int32_t index_;
1096 };
1097
1098 V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, const RpoNumber&);
1099
1100 class V8_EXPORT_PRIVATE Constant final {
1101 public:
1102 enum Type {
1103 kInt32,
1104 kInt64,
1105 kFloat32,
1106 kFloat64,
1107 kExternalReference,
1108 kCompressedHeapObject,
1109 kHeapObject,
1110 kRpoNumber,
1111 kDelayedStringConstant
1112 };
1113
1114 explicit Constant(int32_t v);
Constant(int64_t v)1115 explicit Constant(int64_t v) : type_(kInt64), value_(v) {}
Constant(float v)1116 explicit Constant(float v) : type_(kFloat32), value_(bit_cast<int32_t>(v)) {}
Constant(double v)1117 explicit Constant(double v) : type_(kFloat64), value_(bit_cast<int64_t>(v)) {}
Constant(ExternalReference ref)1118 explicit Constant(ExternalReference ref)
1119 : type_(kExternalReference), value_(bit_cast<intptr_t>(ref.address())) {}
1120 explicit Constant(Handle<HeapObject> obj, bool is_compressed = false)
1121 : type_(is_compressed ? kCompressedHeapObject : kHeapObject),
1122 value_(bit_cast<intptr_t>(obj)) {}
Constant(RpoNumber rpo)1123 explicit Constant(RpoNumber rpo) : type_(kRpoNumber), value_(rpo.ToInt()) {}
Constant(const StringConstantBase * str)1124 explicit Constant(const StringConstantBase* str)
1125 : type_(kDelayedStringConstant), value_(bit_cast<intptr_t>(str)) {}
1126 explicit Constant(RelocatablePtrConstantInfo info);
1127
type()1128 Type type() const { return type_; }
1129
rmode()1130 RelocInfo::Mode rmode() const { return rmode_; }
1131
FitsInInt32()1132 bool FitsInInt32() const {
1133 if (type() == kInt32) return true;
1134 DCHECK(type() == kInt64);
1135 return value_ >= std::numeric_limits<int32_t>::min() &&
1136 value_ <= std::numeric_limits<int32_t>::max();
1137 }
1138
ToInt32()1139 int32_t ToInt32() const {
1140 DCHECK(FitsInInt32());
1141 const int32_t value = static_cast<int32_t>(value_);
1142 DCHECK_EQ(value_, static_cast<int64_t>(value));
1143 return value;
1144 }
1145
ToInt64()1146 int64_t ToInt64() const {
1147 if (type() == kInt32) return ToInt32();
1148 DCHECK_EQ(kInt64, type());
1149 return value_;
1150 }
1151
ToFloat32()1152 float ToFloat32() const {
1153 // TODO(ahaas): We should remove this function. If value_ has the bit
1154 // representation of a signalling NaN, then returning it as float can cause
1155 // the signalling bit to flip, and value_ is returned as a quiet NaN.
1156 DCHECK_EQ(kFloat32, type());
1157 return bit_cast<float>(static_cast<int32_t>(value_));
1158 }
1159
ToFloat32AsInt()1160 uint32_t ToFloat32AsInt() const {
1161 DCHECK_EQ(kFloat32, type());
1162 return bit_cast<uint32_t>(static_cast<int32_t>(value_));
1163 }
1164
ToFloat64()1165 base::Double ToFloat64() const {
1166 DCHECK_EQ(kFloat64, type());
1167 return base::Double(bit_cast<uint64_t>(value_));
1168 }
1169
ToExternalReference()1170 ExternalReference ToExternalReference() const {
1171 DCHECK_EQ(kExternalReference, type());
1172 return ExternalReference::FromRawAddress(static_cast<Address>(value_));
1173 }
1174
ToRpoNumber()1175 RpoNumber ToRpoNumber() const {
1176 DCHECK_EQ(kRpoNumber, type());
1177 return RpoNumber::FromInt(static_cast<int>(value_));
1178 }
1179
1180 Handle<HeapObject> ToHeapObject() const;
1181 Handle<CodeT> ToCode() const;
1182 const StringConstantBase* ToDelayedStringConstant() const;
1183
1184 private:
1185 Type type_;
1186 RelocInfo::Mode rmode_ = RelocInfo::NO_INFO;
1187 int64_t value_;
1188 };
1189
1190 std::ostream& operator<<(std::ostream&, const Constant&);
1191
1192 // Forward declarations.
1193 class FrameStateDescriptor;
1194
1195 enum class StateValueKind : uint8_t {
1196 kArgumentsElements,
1197 kArgumentsLength,
1198 kPlain,
1199 kOptimizedOut,
1200 kNested,
1201 kDuplicate
1202 };
1203
1204 class StateValueDescriptor {
1205 public:
StateValueDescriptor()1206 StateValueDescriptor()
1207 : kind_(StateValueKind::kPlain), type_(MachineType::AnyTagged()) {}
1208
ArgumentsElements(ArgumentsStateType type)1209 static StateValueDescriptor ArgumentsElements(ArgumentsStateType type) {
1210 StateValueDescriptor descr(StateValueKind::kArgumentsElements,
1211 MachineType::AnyTagged());
1212 descr.args_type_ = type;
1213 return descr;
1214 }
ArgumentsLength()1215 static StateValueDescriptor ArgumentsLength() {
1216 return StateValueDescriptor(StateValueKind::kArgumentsLength,
1217 MachineType::AnyTagged());
1218 }
Plain(MachineType type)1219 static StateValueDescriptor Plain(MachineType type) {
1220 return StateValueDescriptor(StateValueKind::kPlain, type);
1221 }
OptimizedOut()1222 static StateValueDescriptor OptimizedOut() {
1223 return StateValueDescriptor(StateValueKind::kOptimizedOut,
1224 MachineType::AnyTagged());
1225 }
Recursive(size_t id)1226 static StateValueDescriptor Recursive(size_t id) {
1227 StateValueDescriptor descr(StateValueKind::kNested,
1228 MachineType::AnyTagged());
1229 descr.id_ = id;
1230 return descr;
1231 }
Duplicate(size_t id)1232 static StateValueDescriptor Duplicate(size_t id) {
1233 StateValueDescriptor descr(StateValueKind::kDuplicate,
1234 MachineType::AnyTagged());
1235 descr.id_ = id;
1236 return descr;
1237 }
1238
IsArgumentsElements()1239 bool IsArgumentsElements() const {
1240 return kind_ == StateValueKind::kArgumentsElements;
1241 }
IsArgumentsLength()1242 bool IsArgumentsLength() const {
1243 return kind_ == StateValueKind::kArgumentsLength;
1244 }
IsPlain()1245 bool IsPlain() const { return kind_ == StateValueKind::kPlain; }
IsOptimizedOut()1246 bool IsOptimizedOut() const { return kind_ == StateValueKind::kOptimizedOut; }
IsNested()1247 bool IsNested() const { return kind_ == StateValueKind::kNested; }
IsDuplicate()1248 bool IsDuplicate() const { return kind_ == StateValueKind::kDuplicate; }
type()1249 MachineType type() const { return type_; }
id()1250 size_t id() const {
1251 DCHECK(kind_ == StateValueKind::kDuplicate ||
1252 kind_ == StateValueKind::kNested);
1253 return id_;
1254 }
arguments_type()1255 ArgumentsStateType arguments_type() const {
1256 DCHECK(kind_ == StateValueKind::kArgumentsElements);
1257 return args_type_;
1258 }
1259
1260 private:
StateValueDescriptor(StateValueKind kind,MachineType type)1261 StateValueDescriptor(StateValueKind kind, MachineType type)
1262 : kind_(kind), type_(type) {}
1263
1264 StateValueKind kind_;
1265 MachineType type_;
1266 union {
1267 size_t id_;
1268 ArgumentsStateType args_type_;
1269 };
1270 };
1271
1272 class StateValueList {
1273 public:
StateValueList(Zone * zone)1274 explicit StateValueList(Zone* zone) : fields_(zone), nested_(zone) {}
1275
size()1276 size_t size() { return fields_.size(); }
1277
nested_count()1278 size_t nested_count() { return nested_.size(); }
1279
1280 struct Value {
1281 StateValueDescriptor* desc;
1282 StateValueList* nested;
1283
ValueValue1284 Value(StateValueDescriptor* desc, StateValueList* nested)
1285 : desc(desc), nested(nested) {}
1286 };
1287
1288 class iterator {
1289 public:
1290 // Bare minimum of operators needed for range iteration.
1291 bool operator!=(const iterator& other) const {
1292 return field_iterator != other.field_iterator;
1293 }
1294 bool operator==(const iterator& other) const {
1295 return field_iterator == other.field_iterator;
1296 }
1297 iterator& operator++() {
1298 if (field_iterator->IsNested()) {
1299 nested_iterator++;
1300 }
1301 ++field_iterator;
1302 return *this;
1303 }
1304 Value operator*() {
1305 StateValueDescriptor* desc = &(*field_iterator);
1306 StateValueList* nested = desc->IsNested() ? *nested_iterator : nullptr;
1307 return Value(desc, nested);
1308 }
1309
1310 private:
1311 friend class StateValueList;
1312
iterator(ZoneVector<StateValueDescriptor>::iterator it,ZoneVector<StateValueList * >::iterator nested)1313 iterator(ZoneVector<StateValueDescriptor>::iterator it,
1314 ZoneVector<StateValueList*>::iterator nested)
1315 : field_iterator(it), nested_iterator(nested) {}
1316
1317 ZoneVector<StateValueDescriptor>::iterator field_iterator;
1318 ZoneVector<StateValueList*>::iterator nested_iterator;
1319 };
1320
1321 struct Slice {
SliceSlice1322 Slice(ZoneVector<StateValueDescriptor>::iterator start, size_t fields)
1323 : start_position(start), fields_count(fields) {}
1324
1325 ZoneVector<StateValueDescriptor>::iterator start_position;
1326 size_t fields_count;
1327 };
1328
ReserveSize(size_t size)1329 void ReserveSize(size_t size) { fields_.reserve(size); }
1330
PushRecursiveField(Zone * zone,size_t id)1331 StateValueList* PushRecursiveField(Zone* zone, size_t id) {
1332 fields_.push_back(StateValueDescriptor::Recursive(id));
1333 StateValueList* nested = zone->New<StateValueList>(zone);
1334 nested_.push_back(nested);
1335 return nested;
1336 }
PushArgumentsElements(ArgumentsStateType type)1337 void PushArgumentsElements(ArgumentsStateType type) {
1338 fields_.push_back(StateValueDescriptor::ArgumentsElements(type));
1339 }
PushArgumentsLength()1340 void PushArgumentsLength() {
1341 fields_.push_back(StateValueDescriptor::ArgumentsLength());
1342 }
PushDuplicate(size_t id)1343 void PushDuplicate(size_t id) {
1344 fields_.push_back(StateValueDescriptor::Duplicate(id));
1345 }
PushPlain(MachineType type)1346 void PushPlain(MachineType type) {
1347 fields_.push_back(StateValueDescriptor::Plain(type));
1348 }
1349 void PushOptimizedOut(size_t num = 1) {
1350 fields_.insert(fields_.end(), num, StateValueDescriptor::OptimizedOut());
1351 }
PushCachedSlice(const Slice & cached)1352 void PushCachedSlice(const Slice& cached) {
1353 fields_.insert(fields_.end(), cached.start_position,
1354 cached.start_position + cached.fields_count);
1355 }
1356
1357 // Returns a Slice representing the (non-nested) fields in StateValueList from
1358 // values_start to the current end position.
MakeSlice(size_t values_start)1359 Slice MakeSlice(size_t values_start) {
1360 DCHECK(!HasNestedFieldsAfter(values_start));
1361 size_t fields_count = fields_.size() - values_start;
1362 return Slice(fields_.begin() + values_start, fields_count);
1363 }
1364
begin()1365 iterator begin() { return iterator(fields_.begin(), nested_.begin()); }
end()1366 iterator end() { return iterator(fields_.end(), nested_.end()); }
1367
1368 private:
HasNestedFieldsAfter(size_t values_start)1369 bool HasNestedFieldsAfter(size_t values_start) {
1370 auto it = fields_.begin() + values_start;
1371 for (; it != fields_.end(); it++) {
1372 if (it->IsNested()) return true;
1373 }
1374 return false;
1375 }
1376
1377 ZoneVector<StateValueDescriptor> fields_;
1378 ZoneVector<StateValueList*> nested_;
1379 };
1380
1381 class FrameStateDescriptor : public ZoneObject {
1382 public:
1383 FrameStateDescriptor(Zone* zone, FrameStateType type,
1384 BytecodeOffset bailout_id,
1385 OutputFrameStateCombine state_combine,
1386 size_t parameters_count, size_t locals_count,
1387 size_t stack_count,
1388 MaybeHandle<SharedFunctionInfo> shared_info,
1389 FrameStateDescriptor* outer_state = nullptr);
1390
type()1391 FrameStateType type() const { return type_; }
bailout_id()1392 BytecodeOffset bailout_id() const { return bailout_id_; }
state_combine()1393 OutputFrameStateCombine state_combine() const { return frame_state_combine_; }
parameters_count()1394 size_t parameters_count() const { return parameters_count_; }
locals_count()1395 size_t locals_count() const { return locals_count_; }
stack_count()1396 size_t stack_count() const { return stack_count_; }
shared_info()1397 MaybeHandle<SharedFunctionInfo> shared_info() const { return shared_info_; }
outer_state()1398 FrameStateDescriptor* outer_state() const { return outer_state_; }
HasContext()1399 bool HasContext() const {
1400 return FrameStateFunctionInfo::IsJSFunctionType(type_) ||
1401 type_ == FrameStateType::kBuiltinContinuation ||
1402 #if V8_ENABLE_WEBASSEMBLY
1403 type_ == FrameStateType::kJSToWasmBuiltinContinuation ||
1404 #endif // V8_ENABLE_WEBASSEMBLY
1405 type_ == FrameStateType::kConstructStub;
1406 }
1407
1408 // The frame height on the stack, in number of slots, as serialized into a
1409 // Translation and later used by the deoptimizer. Does *not* include
1410 // information from the chain of outer states. Unlike |GetSize| this does not
1411 // always include parameters, locals, and stack slots; instead, the returned
1412 // slot kinds depend on the frame type.
1413 size_t GetHeight() const;
1414
1415 // Returns an overapproximation of the unoptimized stack frame size in bytes,
1416 // as later produced by the deoptimizer. Considers both this and the chain of
1417 // outer states.
total_conservative_frame_size_in_bytes()1418 size_t total_conservative_frame_size_in_bytes() const {
1419 return total_conservative_frame_size_in_bytes_;
1420 }
1421
1422 size_t GetSize() const;
1423 size_t GetTotalSize() const;
1424 size_t GetFrameCount() const;
1425 size_t GetJSFrameCount() const;
1426
GetStateValueDescriptors()1427 StateValueList* GetStateValueDescriptors() { return &values_; }
1428
1429 static const int kImpossibleValue = 0xdead;
1430
1431 private:
1432 FrameStateType type_;
1433 BytecodeOffset bailout_id_;
1434 OutputFrameStateCombine frame_state_combine_;
1435 const size_t parameters_count_;
1436 const size_t locals_count_;
1437 const size_t stack_count_;
1438 const size_t total_conservative_frame_size_in_bytes_;
1439 StateValueList values_;
1440 MaybeHandle<SharedFunctionInfo> const shared_info_;
1441 FrameStateDescriptor* const outer_state_;
1442 };
1443
1444 #if V8_ENABLE_WEBASSEMBLY
1445 class JSToWasmFrameStateDescriptor : public FrameStateDescriptor {
1446 public:
1447 JSToWasmFrameStateDescriptor(Zone* zone, FrameStateType type,
1448 BytecodeOffset bailout_id,
1449 OutputFrameStateCombine state_combine,
1450 size_t parameters_count, size_t locals_count,
1451 size_t stack_count,
1452 MaybeHandle<SharedFunctionInfo> shared_info,
1453 FrameStateDescriptor* outer_state,
1454 const wasm::FunctionSig* wasm_signature);
1455
return_kind()1456 base::Optional<wasm::ValueKind> return_kind() const { return return_kind_; }
1457
1458 private:
1459 base::Optional<wasm::ValueKind> return_kind_;
1460 };
1461 #endif // V8_ENABLE_WEBASSEMBLY
1462
1463 // A deoptimization entry is a pair of the reason why we deoptimize and the
1464 // frame state descriptor that we have to go back to.
1465 class DeoptimizationEntry final {
1466 public:
DeoptimizationEntry(FrameStateDescriptor * descriptor,DeoptimizeKind kind,DeoptimizeReason reason,NodeId node_id,FeedbackSource const & feedback)1467 DeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeKind kind,
1468 DeoptimizeReason reason, NodeId node_id,
1469 FeedbackSource const& feedback)
1470 : descriptor_(descriptor),
1471 kind_(kind),
1472 reason_(reason),
1473 #ifdef DEBUG
1474 node_id_(node_id),
1475 #endif // DEBUG
1476 feedback_(feedback) {
1477 USE(node_id);
1478 }
1479
descriptor()1480 FrameStateDescriptor* descriptor() const { return descriptor_; }
kind()1481 DeoptimizeKind kind() const { return kind_; }
reason()1482 DeoptimizeReason reason() const { return reason_; }
1483 #ifdef DEBUG
node_id()1484 NodeId node_id() const { return node_id_; }
1485 #endif // DEBUG
feedback()1486 FeedbackSource const& feedback() const { return feedback_; }
1487
1488 private:
1489 FrameStateDescriptor* const descriptor_;
1490 const DeoptimizeKind kind_;
1491 const DeoptimizeReason reason_;
1492 #ifdef DEBUG
1493 const NodeId node_id_;
1494 #endif // DEBUG
1495 const FeedbackSource feedback_;
1496 };
1497
1498 using DeoptimizationVector = ZoneVector<DeoptimizationEntry>;
1499
1500 class V8_EXPORT_PRIVATE PhiInstruction final
NON_EXPORTED_BASE(ZoneObject)1501 : public NON_EXPORTED_BASE(ZoneObject) {
1502 public:
1503 using Inputs = ZoneVector<InstructionOperand>;
1504
1505 PhiInstruction(Zone* zone, int virtual_register, size_t input_count);
1506
1507 void SetInput(size_t offset, int virtual_register);
1508 void RenameInput(size_t offset, int virtual_register);
1509
1510 int virtual_register() const { return virtual_register_; }
1511 const IntVector& operands() const { return operands_; }
1512
1513 // TODO(dcarney): this has no real business being here, since it's internal to
1514 // the register allocator, but putting it here was convenient.
1515 const InstructionOperand& output() const { return output_; }
1516 InstructionOperand& output() { return output_; }
1517
1518 private:
1519 const int virtual_register_;
1520 InstructionOperand output_;
1521 IntVector operands_;
1522 };
1523
1524 // Analogue of BasicBlock for Instructions instead of Nodes.
1525 class V8_EXPORT_PRIVATE InstructionBlock final
NON_EXPORTED_BASE(ZoneObject)1526 : public NON_EXPORTED_BASE(ZoneObject) {
1527 public:
1528 InstructionBlock(Zone* zone, RpoNumber rpo_number, RpoNumber loop_header,
1529 RpoNumber loop_end, RpoNumber dominator, bool deferred,
1530 bool handler);
1531
1532 // Instruction indexes (used by the register allocator).
1533 int first_instruction_index() const {
1534 DCHECK_LE(0, code_start_);
1535 DCHECK_LT(0, code_end_);
1536 DCHECK_GE(code_end_, code_start_);
1537 return code_start_;
1538 }
1539 int last_instruction_index() const {
1540 DCHECK_LE(0, code_start_);
1541 DCHECK_LT(0, code_end_);
1542 DCHECK_GE(code_end_, code_start_);
1543 return code_end_ - 1;
1544 }
1545
1546 int32_t code_start() const { return code_start_; }
1547 void set_code_start(int32_t start) { code_start_ = start; }
1548
1549 int32_t code_end() const { return code_end_; }
1550 void set_code_end(int32_t end) { code_end_ = end; }
1551
1552 bool IsDeferred() const { return deferred_; }
1553 bool IsHandler() const { return handler_; }
1554 void MarkHandler() { handler_ = true; }
1555 void UnmarkHandler() { handler_ = false; }
1556
1557 RpoNumber ao_number() const { return ao_number_; }
1558 RpoNumber rpo_number() const { return rpo_number_; }
1559 RpoNumber loop_header() const { return loop_header_; }
1560 RpoNumber loop_end() const {
1561 DCHECK(IsLoopHeader());
1562 return loop_end_;
1563 }
1564 inline bool IsLoopHeader() const { return loop_end_.IsValid(); }
1565 inline bool IsSwitchTarget() const { return switch_target_; }
1566 inline bool ShouldAlignCodeTarget() const { return code_target_alignment_; }
1567 inline bool ShouldAlignLoopHeader() const { return loop_header_alignment_; }
1568
1569 using Predecessors = ZoneVector<RpoNumber>;
1570 Predecessors& predecessors() { return predecessors_; }
1571 const Predecessors& predecessors() const { return predecessors_; }
1572 size_t PredecessorCount() const { return predecessors_.size(); }
1573 size_t PredecessorIndexOf(RpoNumber rpo_number) const;
1574
1575 using Successors = ZoneVector<RpoNumber>;
1576 Successors& successors() { return successors_; }
1577 const Successors& successors() const { return successors_; }
1578 size_t SuccessorCount() const { return successors_.size(); }
1579
1580 RpoNumber dominator() const { return dominator_; }
1581 void set_dominator(RpoNumber dominator) { dominator_ = dominator; }
1582
1583 using PhiInstructions = ZoneVector<PhiInstruction*>;
1584 const PhiInstructions& phis() const { return phis_; }
1585 PhiInstruction* PhiAt(size_t i) const { return phis_[i]; }
1586 void AddPhi(PhiInstruction* phi) { phis_.push_back(phi); }
1587
1588 void set_ao_number(RpoNumber ao_number) { ao_number_ = ao_number; }
1589
1590 void set_code_target_alignment(bool val) { code_target_alignment_ = val; }
1591 void set_loop_header_alignment(bool val) { loop_header_alignment_ = val; }
1592
1593 void set_switch_target(bool val) { switch_target_ = val; }
1594
1595 bool needs_frame() const { return needs_frame_; }
1596 void mark_needs_frame() { needs_frame_ = true; }
1597
1598 bool must_construct_frame() const { return must_construct_frame_; }
1599 void mark_must_construct_frame() { must_construct_frame_ = true; }
1600
1601 bool must_deconstruct_frame() const { return must_deconstruct_frame_; }
1602 void mark_must_deconstruct_frame() { must_deconstruct_frame_ = true; }
1603 void clear_must_deconstruct_frame() { must_deconstruct_frame_ = false; }
1604
1605 private:
1606 Successors successors_;
1607 Predecessors predecessors_;
1608 PhiInstructions phis_;
1609 RpoNumber ao_number_; // Assembly order number.
1610 const RpoNumber rpo_number_;
1611 const RpoNumber loop_header_;
1612 const RpoNumber loop_end_;
1613 RpoNumber dominator_;
1614 int32_t code_start_; // start index of arch-specific code.
1615 int32_t code_end_ = -1; // end index of arch-specific code.
1616 const bool deferred_ : 1; // Block contains deferred code.
1617 bool handler_ : 1; // Block is a handler entry point.
1618 bool switch_target_ : 1;
1619 bool code_target_alignment_ : 1; // insert code target alignment before this
1620 // block
1621 bool loop_header_alignment_ : 1; // insert loop header alignment before this
1622 // block
1623 bool needs_frame_ : 1;
1624 bool must_construct_frame_ : 1;
1625 bool must_deconstruct_frame_ : 1;
1626 };
1627
1628 class InstructionSequence;
1629
1630 struct PrintableInstructionBlock {
1631 const InstructionBlock* block_;
1632 const InstructionSequence* code_;
1633 };
1634
1635 std::ostream& operator<<(std::ostream&, const PrintableInstructionBlock&);
1636
1637 using ConstantDeque = ZoneDeque<Constant>;
1638 using ConstantMap = std::map<int, Constant, std::less<int>,
1639 ZoneAllocator<std::pair<const int, Constant> > >;
1640
1641 using InstructionDeque = ZoneDeque<Instruction*>;
1642 using ReferenceMapDeque = ZoneDeque<ReferenceMap*>;
1643 using InstructionBlocks = ZoneVector<InstructionBlock*>;
1644
1645 // Represents architecture-specific generated code before, during, and after
1646 // register allocation.
1647 class V8_EXPORT_PRIVATE InstructionSequence final
NON_EXPORTED_BASE(ZoneObject)1648 : public NON_EXPORTED_BASE(ZoneObject) {
1649 public:
1650 static InstructionBlocks* InstructionBlocksFor(Zone* zone,
1651 const Schedule* schedule);
1652 InstructionSequence(Isolate* isolate, Zone* zone,
1653 InstructionBlocks* instruction_blocks);
1654 InstructionSequence(const InstructionSequence&) = delete;
1655 InstructionSequence& operator=(const InstructionSequence&) = delete;
1656
1657 int NextVirtualRegister();
1658 int VirtualRegisterCount() const { return next_virtual_register_; }
1659
1660 const InstructionBlocks& instruction_blocks() const {
1661 return *instruction_blocks_;
1662 }
1663
1664 const InstructionBlocks& ao_blocks() const { return *ao_blocks_; }
1665
1666 int InstructionBlockCount() const {
1667 return static_cast<int>(instruction_blocks_->size());
1668 }
1669
1670 InstructionBlock* InstructionBlockAt(RpoNumber rpo_number) {
1671 return instruction_blocks_->at(rpo_number.ToSize());
1672 }
1673
1674 int LastLoopInstructionIndex(const InstructionBlock* block) {
1675 return instruction_blocks_->at(block->loop_end().ToSize() - 1)
1676 ->last_instruction_index();
1677 }
1678
1679 const InstructionBlock* InstructionBlockAt(RpoNumber rpo_number) const {
1680 return instruction_blocks_->at(rpo_number.ToSize());
1681 }
1682
1683 InstructionBlock* GetInstructionBlock(int instruction_index) const;
1684
1685 static MachineRepresentation DefaultRepresentation() {
1686 return MachineType::PointerRepresentation();
1687 }
1688 MachineRepresentation GetRepresentation(int virtual_register) const;
1689 void MarkAsRepresentation(MachineRepresentation rep, int virtual_register);
1690
1691 bool IsReference(int virtual_register) const {
1692 return CanBeTaggedOrCompressedPointer(GetRepresentation(virtual_register));
1693 }
1694 bool IsFP(int virtual_register) const {
1695 return IsFloatingPoint(GetRepresentation(virtual_register));
1696 }
1697 int representation_mask() const { return representation_mask_; }
1698 bool HasFPVirtualRegisters() const {
1699 constexpr int kFPRepMask =
1700 RepresentationBit(MachineRepresentation::kFloat32) |
1701 RepresentationBit(MachineRepresentation::kFloat64) |
1702 RepresentationBit(MachineRepresentation::kSimd128);
1703 return (representation_mask() & kFPRepMask) != 0;
1704 }
1705
1706 bool HasSimd128VirtualRegisters() const {
1707 constexpr int kSimd128RepMask =
1708 RepresentationBit(MachineRepresentation::kSimd128);
1709 return (representation_mask() & kSimd128RepMask) != 0;
1710 }
1711
1712 Instruction* GetBlockStart(RpoNumber rpo) const;
1713
1714 using const_iterator = InstructionDeque::const_iterator;
1715 const_iterator begin() const { return instructions_.begin(); }
1716 const_iterator end() const { return instructions_.end(); }
1717 const InstructionDeque& instructions() const { return instructions_; }
1718 int LastInstructionIndex() const {
1719 return static_cast<int>(instructions().size()) - 1;
1720 }
1721
1722 Instruction* InstructionAt(int index) const {
1723 DCHECK_LE(0, index);
1724 DCHECK_GT(instructions_.size(), index);
1725 return instructions_[index];
1726 }
1727
1728 Isolate* isolate() const { return isolate_; }
1729 const ReferenceMapDeque* reference_maps() const { return &reference_maps_; }
1730 Zone* zone() const { return zone_; }
1731
1732 // Used by the instruction selector while adding instructions.
1733 int AddInstruction(Instruction* instr);
1734 void StartBlock(RpoNumber rpo);
1735 void EndBlock(RpoNumber rpo);
1736
1737 int AddConstant(int virtual_register, Constant constant) {
1738 // TODO(titzer): allow RPO numbers as constants?
1739 DCHECK_NE(Constant::kRpoNumber, constant.type());
1740 DCHECK(virtual_register >= 0 && virtual_register < next_virtual_register_);
1741 DCHECK(constants_.find(virtual_register) == constants_.end());
1742 constants_.insert(std::make_pair(virtual_register, constant));
1743 return virtual_register;
1744 }
1745 Constant GetConstant(int virtual_register) const {
1746 auto it = constants_.find(virtual_register);
1747 DCHECK(it != constants_.end());
1748 DCHECK_EQ(virtual_register, it->first);
1749 return it->second;
1750 }
1751
1752 using Immediates = ZoneVector<Constant>;
1753 Immediates& immediates() { return immediates_; }
1754
1755 using RpoImmediates = ZoneVector<RpoNumber>;
1756 RpoImmediates& rpo_immediates() { return rpo_immediates_; }
1757
1758 ImmediateOperand AddImmediate(const Constant& constant) {
1759 if (RelocInfo::IsNoInfo(constant.rmode())) {
1760 if (constant.type() == Constant::kRpoNumber) {
1761 // Ideally we would inline RPO numbers into the operand, however jump-
1762 // threading modifies RPO values and so we indirect through a vector
1763 // of rpo_immediates to enable rewriting. We keep this seperate from the
1764 // immediates vector so that we don't repeatedly push the same rpo
1765 // number.
1766 RpoNumber rpo_number = constant.ToRpoNumber();
1767 DCHECK(!rpo_immediates().at(rpo_number.ToSize()).IsValid() ||
1768 rpo_immediates().at(rpo_number.ToSize()) == rpo_number);
1769 rpo_immediates()[rpo_number.ToSize()] = rpo_number;
1770 return ImmediateOperand(ImmediateOperand::INDEXED_RPO,
1771 rpo_number.ToInt());
1772 } else if (constant.type() == Constant::kInt32) {
1773 return ImmediateOperand(ImmediateOperand::INLINE_INT32,
1774 constant.ToInt32());
1775 } else if (constant.type() == Constant::kInt64 &&
1776 constant.FitsInInt32()) {
1777 return ImmediateOperand(ImmediateOperand::INLINE_INT64,
1778 constant.ToInt32());
1779 }
1780 }
1781 int index = static_cast<int>(immediates_.size());
1782 immediates_.push_back(constant);
1783 return ImmediateOperand(ImmediateOperand::INDEXED_IMM, index);
1784 }
1785
1786 Constant GetImmediate(const ImmediateOperand* op) const {
1787 switch (op->type()) {
1788 case ImmediateOperand::INLINE_INT32:
1789 return Constant(op->inline_int32_value());
1790 case ImmediateOperand::INLINE_INT64:
1791 return Constant(op->inline_int64_value());
1792 case ImmediateOperand::INDEXED_RPO: {
1793 int index = op->indexed_value();
1794 DCHECK_LE(0, index);
1795 DCHECK_GT(rpo_immediates_.size(), index);
1796 return Constant(rpo_immediates_[index]);
1797 }
1798 case ImmediateOperand::INDEXED_IMM: {
1799 int index = op->indexed_value();
1800 DCHECK_LE(0, index);
1801 DCHECK_GT(immediates_.size(), index);
1802 return immediates_[index];
1803 }
1804 }
1805 UNREACHABLE();
1806 }
1807
1808 int AddDeoptimizationEntry(FrameStateDescriptor* descriptor,
1809 DeoptimizeKind kind, DeoptimizeReason reason,
1810 NodeId node_id, FeedbackSource const& feedback);
1811 DeoptimizationEntry const& GetDeoptimizationEntry(int deoptimization_id);
1812 int GetDeoptimizationEntryCount() const {
1813 return static_cast<int>(deoptimization_entries_.size());
1814 }
1815
1816 RpoNumber InputRpo(Instruction* instr, size_t index);
1817
1818 bool GetSourcePosition(const Instruction* instr,
1819 SourcePosition* result) const;
1820 void SetSourcePosition(const Instruction* instr, SourcePosition value);
1821
1822 bool ContainsCall() const {
1823 for (Instruction* instr : instructions_) {
1824 if (instr->IsCall()) return true;
1825 }
1826 return false;
1827 }
1828
1829 // APIs to aid debugging. For general-stream APIs, use operator<<.
1830 void Print() const;
1831
1832 void PrintBlock(int block_id) const;
1833
1834 void ValidateEdgeSplitForm() const;
1835 void ValidateDeferredBlockExitPaths() const;
1836 void ValidateDeferredBlockEntryPaths() const;
1837 void ValidateSSA() const;
1838
1839 static void SetRegisterConfigurationForTesting(
1840 const RegisterConfiguration* regConfig);
1841 static void ClearRegisterConfigurationForTesting();
1842
1843 void RecomputeAssemblyOrderForTesting();
1844
1845 void IncreaseRpoForTesting(size_t rpo_count) {
1846 DCHECK_GE(rpo_count, rpo_immediates().size());
1847 rpo_immediates().resize(rpo_count);
1848 }
1849
1850 private:
1851 friend V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
1852 const InstructionSequence&);
1853
1854 using SourcePositionMap = ZoneMap<const Instruction*, SourcePosition>;
1855
1856 static const RegisterConfiguration* RegisterConfigurationForTesting();
1857 static const RegisterConfiguration* registerConfigurationForTesting_;
1858
1859 // Puts the deferred blocks last and may rotate loops.
1860 void ComputeAssemblyOrder();
1861
1862 Isolate* isolate_;
1863 Zone* const zone_;
1864 InstructionBlocks* const instruction_blocks_;
1865 InstructionBlocks* ao_blocks_;
1866 SourcePositionMap source_positions_;
1867 ConstantMap constants_;
1868 Immediates immediates_;
1869 RpoImmediates rpo_immediates_;
1870 InstructionDeque instructions_;
1871 int next_virtual_register_;
1872 ReferenceMapDeque reference_maps_;
1873 ZoneVector<MachineRepresentation> representations_;
1874 int representation_mask_;
1875 DeoptimizationVector deoptimization_entries_;
1876
1877 // Used at construction time
1878 InstructionBlock* current_block_;
1879 };
1880
1881 V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
1882 const InstructionSequence&);
1883 #undef INSTRUCTION_OPERAND_ALIGN
1884
1885 } // namespace compiler
1886 } // namespace internal
1887 } // namespace v8
1888
1889 #endif // V8_COMPILER_BACKEND_INSTRUCTION_H_
1890