1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_COMPILER_BACKEND_INSTRUCTION_CODES_H_
6 #define V8_COMPILER_BACKEND_INSTRUCTION_CODES_H_
7
8 #include <iosfwd>
9
10 #if V8_TARGET_ARCH_ARM
11 #include "src/compiler/backend/arm/instruction-codes-arm.h"
12 #elif V8_TARGET_ARCH_ARM64
13 #include "src/compiler/backend/arm64/instruction-codes-arm64.h"
14 #elif V8_TARGET_ARCH_IA32
15 #include "src/compiler/backend/ia32/instruction-codes-ia32.h"
16 #elif V8_TARGET_ARCH_MIPS
17 #include "src/compiler/backend/mips/instruction-codes-mips.h"
18 #elif V8_TARGET_ARCH_MIPS64
19 #include "src/compiler/backend/mips64/instruction-codes-mips64.h"
20 #elif V8_TARGET_ARCH_LOONG64
21 #include "src/compiler/backend/loong64/instruction-codes-loong64.h"
22 #elif V8_TARGET_ARCH_X64
23 #include "src/compiler/backend/x64/instruction-codes-x64.h"
24 #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
25 #include "src/compiler/backend/ppc/instruction-codes-ppc.h"
26 #elif V8_TARGET_ARCH_S390
27 #include "src/compiler/backend/s390/instruction-codes-s390.h"
28 #elif V8_TARGET_ARCH_RISCV64
29 #include "src/compiler/backend/riscv64/instruction-codes-riscv64.h"
30 #else
31 #define TARGET_ARCH_OPCODE_LIST(V)
32 #define TARGET_ADDRESSING_MODE_LIST(V)
33 #endif
34 #include "src/base/bit-field.h"
35 #include "src/codegen/atomic-memory-order.h"
36 #include "src/compiler/write-barrier-kind.h"
37
38 namespace v8 {
39 namespace internal {
40 namespace compiler {
41
42 // Modes for ArchStoreWithWriteBarrier below.
43 enum class RecordWriteMode {
44 kValueIsMap,
45 kValueIsPointer,
46 kValueIsEphemeronKey,
47 kValueIsAny,
48 };
49
WriteBarrierKindToRecordWriteMode(WriteBarrierKind write_barrier_kind)50 inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
51 WriteBarrierKind write_barrier_kind) {
52 switch (write_barrier_kind) {
53 case kMapWriteBarrier:
54 return RecordWriteMode::kValueIsMap;
55 case kPointerWriteBarrier:
56 return RecordWriteMode::kValueIsPointer;
57 case kEphemeronKeyWriteBarrier:
58 return RecordWriteMode::kValueIsEphemeronKey;
59 case kFullWriteBarrier:
60 return RecordWriteMode::kValueIsAny;
61 case kNoWriteBarrier:
62 // Should not be passed as argument.
63 default:
64 break;
65 }
66 UNREACHABLE();
67 }
68
69 // Target-specific opcodes that specify which assembly sequence to emit.
70 // Most opcodes specify a single instruction.
71 #define COMMON_ARCH_OPCODE_LIST(V) \
72 /* Tail call opcodes are grouped together to make IsTailCall fast */ \
73 /* and Arch call opcodes are grouped together to make */ \
74 /* IsCallWithDescriptorFlags fast */ \
75 V(ArchTailCallCodeObject) \
76 V(ArchTailCallAddress) \
77 IF_WASM(V, ArchTailCallWasm) \
78 /* Update IsTailCall if further TailCall opcodes are added */ \
79 \
80 V(ArchCallCodeObject) \
81 V(ArchCallJSFunction) \
82 IF_WASM(V, ArchCallWasmFunction) \
83 V(ArchCallBuiltinPointer) \
84 /* Update IsCallWithDescriptorFlags if further Call opcodes are added */ \
85 \
86 V(ArchPrepareCallCFunction) \
87 V(ArchSaveCallerRegisters) \
88 V(ArchRestoreCallerRegisters) \
89 V(ArchCallCFunction) \
90 V(ArchPrepareTailCall) \
91 V(ArchJmp) \
92 V(ArchBinarySearchSwitch) \
93 V(ArchTableSwitch) \
94 V(ArchNop) \
95 V(ArchAbortCSADcheck) \
96 V(ArchDebugBreak) \
97 V(ArchComment) \
98 V(ArchThrowTerminator) \
99 V(ArchDeoptimize) \
100 V(ArchRet) \
101 V(ArchFramePointer) \
102 V(ArchParentFramePointer) \
103 V(ArchTruncateDoubleToI) \
104 V(ArchStoreWithWriteBarrier) \
105 V(ArchAtomicStoreWithWriteBarrier) \
106 V(ArchStackSlot) \
107 V(ArchStackPointerGreaterThan) \
108 V(ArchStackCheckOffset) \
109 V(AtomicLoadInt8) \
110 V(AtomicLoadUint8) \
111 V(AtomicLoadInt16) \
112 V(AtomicLoadUint16) \
113 V(AtomicLoadWord32) \
114 V(AtomicStoreWord8) \
115 V(AtomicStoreWord16) \
116 V(AtomicStoreWord32) \
117 V(AtomicExchangeInt8) \
118 V(AtomicExchangeUint8) \
119 V(AtomicExchangeInt16) \
120 V(AtomicExchangeUint16) \
121 V(AtomicExchangeWord32) \
122 V(AtomicCompareExchangeInt8) \
123 V(AtomicCompareExchangeUint8) \
124 V(AtomicCompareExchangeInt16) \
125 V(AtomicCompareExchangeUint16) \
126 V(AtomicCompareExchangeWord32) \
127 V(AtomicAddInt8) \
128 V(AtomicAddUint8) \
129 V(AtomicAddInt16) \
130 V(AtomicAddUint16) \
131 V(AtomicAddWord32) \
132 V(AtomicSubInt8) \
133 V(AtomicSubUint8) \
134 V(AtomicSubInt16) \
135 V(AtomicSubUint16) \
136 V(AtomicSubWord32) \
137 V(AtomicAndInt8) \
138 V(AtomicAndUint8) \
139 V(AtomicAndInt16) \
140 V(AtomicAndUint16) \
141 V(AtomicAndWord32) \
142 V(AtomicOrInt8) \
143 V(AtomicOrUint8) \
144 V(AtomicOrInt16) \
145 V(AtomicOrUint16) \
146 V(AtomicOrWord32) \
147 V(AtomicXorInt8) \
148 V(AtomicXorUint8) \
149 V(AtomicXorInt16) \
150 V(AtomicXorUint16) \
151 V(AtomicXorWord32) \
152 V(Ieee754Float64Acos) \
153 V(Ieee754Float64Acosh) \
154 V(Ieee754Float64Asin) \
155 V(Ieee754Float64Asinh) \
156 V(Ieee754Float64Atan) \
157 V(Ieee754Float64Atanh) \
158 V(Ieee754Float64Atan2) \
159 V(Ieee754Float64Cbrt) \
160 V(Ieee754Float64Cos) \
161 V(Ieee754Float64Cosh) \
162 V(Ieee754Float64Exp) \
163 V(Ieee754Float64Expm1) \
164 V(Ieee754Float64Log) \
165 V(Ieee754Float64Log1p) \
166 V(Ieee754Float64Log10) \
167 V(Ieee754Float64Log2) \
168 V(Ieee754Float64Pow) \
169 V(Ieee754Float64Sin) \
170 V(Ieee754Float64Sinh) \
171 V(Ieee754Float64Tan) \
172 V(Ieee754Float64Tanh)
173
174 #define ARCH_OPCODE_LIST(V) \
175 COMMON_ARCH_OPCODE_LIST(V) \
176 TARGET_ARCH_OPCODE_LIST(V)
177
178 enum ArchOpcode {
179 #define DECLARE_ARCH_OPCODE(Name) k##Name,
180 ARCH_OPCODE_LIST(DECLARE_ARCH_OPCODE)
181 #undef DECLARE_ARCH_OPCODE
182 #define COUNT_ARCH_OPCODE(Name) +1
183 kLastArchOpcode = -1 ARCH_OPCODE_LIST(COUNT_ARCH_OPCODE)
184 #undef COUNT_ARCH_OPCODE
185 };
186
187 V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
188 const ArchOpcode& ao);
189
190 // Addressing modes represent the "shape" of inputs to an instruction.
191 // Many instructions support multiple addressing modes. Addressing modes
192 // are encoded into the InstructionCode of the instruction and tell the
193 // code generator after register allocation which assembler method to call.
194 #define ADDRESSING_MODE_LIST(V) \
195 V(None) \
196 TARGET_ADDRESSING_MODE_LIST(V)
197
198 enum AddressingMode {
199 #define DECLARE_ADDRESSING_MODE(Name) kMode_##Name,
200 ADDRESSING_MODE_LIST(DECLARE_ADDRESSING_MODE)
201 #undef DECLARE_ADDRESSING_MODE
202 #define COUNT_ADDRESSING_MODE(Name) +1
203 kLastAddressingMode = -1 ADDRESSING_MODE_LIST(COUNT_ADDRESSING_MODE)
204 #undef COUNT_ADDRESSING_MODE
205 };
206
207 V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
208 const AddressingMode& am);
209
210 // The mode of the flags continuation (see below).
211 enum FlagsMode {
212 kFlags_none = 0,
213 kFlags_branch = 1,
214 kFlags_deoptimize = 2,
215 kFlags_set = 3,
216 kFlags_trap = 4,
217 kFlags_select = 5,
218 };
219
220 V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
221 const FlagsMode& fm);
222
223 // The condition of flags continuation (see below).
224 enum FlagsCondition {
225 kEqual,
226 kNotEqual,
227 kSignedLessThan,
228 kSignedGreaterThanOrEqual,
229 kSignedLessThanOrEqual,
230 kSignedGreaterThan,
231 kUnsignedLessThan,
232 kUnsignedGreaterThanOrEqual,
233 kUnsignedLessThanOrEqual,
234 kUnsignedGreaterThan,
235 kFloatLessThanOrUnordered,
236 kFloatGreaterThanOrEqual,
237 kFloatLessThanOrEqual,
238 kFloatGreaterThanOrUnordered,
239 kFloatLessThan,
240 kFloatGreaterThanOrEqualOrUnordered,
241 kFloatLessThanOrEqualOrUnordered,
242 kFloatGreaterThan,
243 kUnorderedEqual,
244 kUnorderedNotEqual,
245 kOverflow,
246 kNotOverflow,
247 kPositiveOrZero,
248 kNegative
249 };
250
251 static constexpr FlagsCondition kStackPointerGreaterThanCondition =
252 kUnsignedGreaterThan;
253
NegateFlagsCondition(FlagsCondition condition)254 inline FlagsCondition NegateFlagsCondition(FlagsCondition condition) {
255 return static_cast<FlagsCondition>(condition ^ 1);
256 }
257
258 FlagsCondition CommuteFlagsCondition(FlagsCondition condition);
259
260 V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
261 const FlagsCondition& fc);
262
263 enum MemoryAccessMode {
264 kMemoryAccessDirect = 0,
265 kMemoryAccessProtected = 1,
266 };
267
268 enum class AtomicWidth { kWord32, kWord64 };
269
AtomicWidthSize(AtomicWidth width)270 inline size_t AtomicWidthSize(AtomicWidth width) {
271 switch (width) {
272 case AtomicWidth::kWord32:
273 return 4;
274 case AtomicWidth::kWord64:
275 return 8;
276 }
277 UNREACHABLE();
278 }
279
280 // The InstructionCode is an opaque, target-specific integer that encodes
281 // what code to emit for an instruction in the code generator. It is not
282 // interesting to the register allocator, as the inputs and flags on the
283 // instructions specify everything of interest.
284 using InstructionCode = uint32_t;
285
286 // Helpers for encoding / decoding InstructionCode into the fields needed
287 // for code generation. We encode the instruction, addressing mode, and flags
288 // continuation into a single InstructionCode which is stored as part of
289 // the instruction.
290 using ArchOpcodeField = base::BitField<ArchOpcode, 0, 9>;
291 static_assert(ArchOpcodeField::is_valid(kLastArchOpcode),
292 "All opcodes must fit in the 9-bit ArchOpcodeField.");
293 using AddressingModeField = base::BitField<AddressingMode, 9, 5>;
294 static_assert(
295 AddressingModeField::is_valid(kLastAddressingMode),
296 "All addressing modes must fit in the 5-bit AddressingModeField.");
297 using FlagsModeField = base::BitField<FlagsMode, 14, 3>;
298 using FlagsConditionField = base::BitField<FlagsCondition, 17, 5>;
299 using MiscField = base::BitField<int, 22, 10>;
300
301 // {MiscField} is used for a variety of things, depending on the opcode.
302 // TODO(turbofan): There should be an abstraction that ensures safe encoding and
303 // decoding. {HasMemoryAccessMode} and its uses are a small step in that
304 // direction.
305
306 // LaneSizeField and AccessModeField are helper types to encode/decode a lane
307 // size, an access mode, or both inside the overlapping MiscField.
308 using LaneSizeField = base::BitField<int, 22, 8>;
309 using AccessModeField = base::BitField<MemoryAccessMode, 30, 2>;
310 // TODO(turbofan): {HasMemoryAccessMode} is currently only used to guard
311 // decoding (in CodeGenerator and InstructionScheduler). Encoding (in
312 // InstructionSelector) is not yet guarded. There are in fact instructions for
313 // which InstructionSelector does set a MemoryAccessMode but CodeGenerator
314 // doesn't care to consume it (e.g. kArm64LdrDecompressTaggedSigned). This is
315 // scary. {HasMemoryAccessMode} does not include these instructions, so they can
316 // be easily found by guarding encoding.
HasMemoryAccessMode(ArchOpcode opcode)317 inline bool HasMemoryAccessMode(ArchOpcode opcode) {
318 #if defined(TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST)
319 switch (opcode) {
320 #define CASE(Name) \
321 case k##Name: \
322 return true;
323 TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(CASE)
324 #undef CASE
325 default:
326 return false;
327 }
328 #else
329 return false;
330 #endif
331 }
332
333 using DeoptImmedArgsCountField = base::BitField<int, 22, 2>;
334 using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
335
336 // AtomicWidthField overlaps with MiscField and is used for the various Atomic
337 // opcodes. Only used on 64bit architectures. All atomic instructions on 32bit
338 // architectures are assumed to be 32bit wide.
339 using AtomicWidthField = base::BitField<AtomicWidth, 22, 2>;
340
341 // AtomicMemoryOrderField overlaps with MiscField and is used for the various
342 // Atomic opcodes. This field is not used on all architectures. It is used on
343 // architectures where the codegen for kSeqCst and kAcqRel differ only by
344 // emitting fences.
345 using AtomicMemoryOrderField = base::BitField<AtomicMemoryOrder, 24, 2>;
346 using AtomicStoreRecordWriteModeField = base::BitField<RecordWriteMode, 26, 4>;
347
348 // ParamField and FPParamField overlap with MiscField, as the latter is never
349 // used for Call instructions. These 2 fields represent the general purpose
350 // and floating point parameter counts of a direct call into C and are given 5
351 // bits each, which allow storing a number up to the current maximum parameter
352 // count, which is 20 (see kMaxCParameters defined in macro-assembler.h).
353 using ParamField = base::BitField<int, 22, 5>;
354 using FPParamField = base::BitField<int, 27, 5>;
355
356 // This static assertion serves as an early warning if we are about to exhaust
357 // the available opcode space. If we are about to exhaust it, we should start
358 // looking into options to compress some opcodes (see
359 // https://crbug.com/v8/12093) before we fully run out of available opcodes.
360 // Otherwise we risk being unable to land an important security fix or merge
361 // back fixes that add new opcodes.
362 // It is OK to temporarily reduce the required slack if we have a tracking bug
363 // to reduce the number of used opcodes again.
364 static_assert(ArchOpcodeField::kMax - kLastArchOpcode >= 16,
365 "We are running close to the number of available opcodes.");
366
367 } // namespace compiler
368 } // namespace internal
369 } // namespace v8
370
371 #endif // V8_COMPILER_BACKEND_INSTRUCTION_CODES_H_
372