1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_ARM64
6
7 #include "src/codegen/arm64/instructions-arm64.h"
8 #include "src/codegen/arm64/assembler-arm64-inl.h"
9
10 namespace v8 {
11 namespace internal {
12
IsLoad() const13 bool Instruction::IsLoad() const {
14 if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
15 return false;
16 }
17
18 if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
19 return Mask(LoadStorePairLBit) != 0;
20 } else {
21 LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
22 switch (op) {
23 case LDRB_w:
24 case LDRH_w:
25 case LDR_w:
26 case LDR_x:
27 case LDRSB_w:
28 case LDRSB_x:
29 case LDRSH_w:
30 case LDRSH_x:
31 case LDRSW_x:
32 case LDR_b:
33 case LDR_h:
34 case LDR_s:
35 case LDR_d:
36 case LDR_q:
37 return true;
38 default:
39 return false;
40 }
41 }
42 }
43
IsStore() const44 bool Instruction::IsStore() const {
45 if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
46 return false;
47 }
48
49 if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
50 return Mask(LoadStorePairLBit) == 0;
51 } else {
52 LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
53 switch (op) {
54 case STRB_w:
55 case STRH_w:
56 case STR_w:
57 case STR_x:
58 case STR_b:
59 case STR_h:
60 case STR_s:
61 case STR_d:
62 case STR_q:
63 return true;
64 default:
65 return false;
66 }
67 }
68 }
69
RotateRight(uint64_t value,unsigned int rotate,unsigned int width)70 static uint64_t RotateRight(uint64_t value, unsigned int rotate,
71 unsigned int width) {
72 DCHECK_LE(width, 64);
73 rotate &= 63;
74 if (rotate == 0) return value;
75 return ((value & ((1ULL << rotate) - 1ULL)) << (width - rotate)) |
76 (value >> rotate);
77 }
78
RepeatBitsAcrossReg(unsigned reg_size,uint64_t value,unsigned width)79 static uint64_t RepeatBitsAcrossReg(unsigned reg_size, uint64_t value,
80 unsigned width) {
81 DCHECK((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
82 (width == 32));
83 DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
84 uint64_t result = value & ((1ULL << width) - 1ULL);
85 for (unsigned i = width; i < reg_size; i *= 2) {
86 result |= (result << i);
87 }
88 return result;
89 }
90
91 // Logical immediates can't encode zero, so a return value of zero is used to
92 // indicate a failure case. Specifically, where the constraints on imm_s are not
93 // met.
ImmLogical()94 uint64_t Instruction::ImmLogical() {
95 unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits;
96 int32_t n = BitN();
97 int32_t imm_s = ImmSetBits();
98 int32_t imm_r = ImmRotate();
99
100 // An integer is constructed from the n, imm_s and imm_r bits according to
101 // the following table:
102 //
103 // N imms immr size S R
104 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
105 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
106 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
107 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
108 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
109 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
110 // (s bits must not be all set)
111 //
112 // A pattern is constructed of size bits, where the least significant S+1
113 // bits are set. The pattern is rotated right by R, and repeated across a
114 // 32 or 64-bit value, depending on destination register width.
115 //
116
117 if (n == 1) {
118 if (imm_s == 0x3F) {
119 return 0;
120 }
121 uint64_t bits = (1ULL << (imm_s + 1)) - 1;
122 return RotateRight(bits, imm_r, 64);
123 } else {
124 if ((imm_s >> 1) == 0x1F) {
125 return 0;
126 }
127 for (int width = 0x20; width >= 0x2; width >>= 1) {
128 if ((imm_s & width) == 0) {
129 int mask = width - 1;
130 if ((imm_s & mask) == mask) {
131 return 0;
132 }
133 uint64_t bits = (1ULL << ((imm_s & mask) + 1)) - 1;
134 return RepeatBitsAcrossReg(
135 reg_size, RotateRight(bits, imm_r & mask, width), width);
136 }
137 }
138 }
139 UNREACHABLE();
140 }
141
ImmNEONabcdefgh() const142 uint32_t Instruction::ImmNEONabcdefgh() const {
143 return ImmNEONabc() << 5 | ImmNEONdefgh();
144 }
145
ImmFP32()146 float Instruction::ImmFP32() { return Imm8ToFP32(ImmFP()); }
147
ImmFP64()148 double Instruction::ImmFP64() { return Imm8ToFP64(ImmFP()); }
149
ImmNEONFP32() const150 float Instruction::ImmNEONFP32() const { return Imm8ToFP32(ImmNEONabcdefgh()); }
151
ImmNEONFP64() const152 double Instruction::ImmNEONFP64() const {
153 return Imm8ToFP64(ImmNEONabcdefgh());
154 }
155
CalcLSDataSize(LoadStoreOp op)156 unsigned CalcLSDataSize(LoadStoreOp op) {
157 DCHECK_EQ(static_cast<unsigned>(LSSize_offset + LSSize_width),
158 kInstrSize * 8);
159 unsigned size = static_cast<Instr>(op) >> LSSize_offset;
160 if ((op & LSVector_mask) != 0) {
161 // Vector register memory operations encode the access size in the "size"
162 // and "opc" fields.
163 if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
164 size = kQRegSizeLog2;
165 }
166 }
167 return size;
168 }
169
CalcLSPairDataSize(LoadStorePairOp op)170 unsigned CalcLSPairDataSize(LoadStorePairOp op) {
171 static_assert(kXRegSize == kDRegSize, "X and D registers must be same size.");
172 static_assert(kWRegSize == kSRegSize, "W and S registers must be same size.");
173 switch (op) {
174 case STP_q:
175 case LDP_q:
176 return kQRegSizeLog2;
177 case STP_x:
178 case LDP_x:
179 case STP_d:
180 case LDP_d:
181 return kXRegSizeLog2;
182 default:
183 return kWRegSizeLog2;
184 }
185 }
186
ImmPCOffset()187 int64_t Instruction::ImmPCOffset() {
188 int64_t offset;
189 if (IsPCRelAddressing()) {
190 // PC-relative addressing. Only ADR is supported.
191 offset = ImmPCRel();
192 } else if (BranchType() != UnknownBranchType) {
193 // All PC-relative branches.
194 // Relative branch offsets are instruction-size-aligned.
195 offset = ImmBranch() * kInstrSize;
196 } else if (IsUnresolvedInternalReference()) {
197 // Internal references are always word-aligned.
198 offset = ImmUnresolvedInternalReference() * kInstrSize;
199 } else {
200 // Load literal (offset from PC).
201 DCHECK(IsLdrLiteral());
202 // The offset is always shifted by 2 bits, even for loads to 64-bits
203 // registers.
204 offset = ImmLLiteral() * kInstrSize;
205 }
206 return offset;
207 }
208
ImmPCOffsetTarget()209 Instruction* Instruction::ImmPCOffsetTarget() {
210 return InstructionAtOffset(ImmPCOffset());
211 }
212
IsValidImmPCOffset(ImmBranchType branch_type,ptrdiff_t offset)213 bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
214 ptrdiff_t offset) {
215 DCHECK_EQ(offset % kInstrSize, 0);
216 return is_intn(offset / kInstrSize, ImmBranchRangeBitwidth(branch_type));
217 }
218
IsTargetInImmPCOffsetRange(Instruction * target)219 bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
220 return IsValidImmPCOffset(BranchType(), DistanceTo(target));
221 }
222
SetImmPCOffsetTarget(const AssemblerOptions & options,Instruction * target)223 void Instruction::SetImmPCOffsetTarget(const AssemblerOptions& options,
224 Instruction* target) {
225 if (IsPCRelAddressing()) {
226 SetPCRelImmTarget(options, target);
227 } else if (BranchType() != UnknownBranchType) {
228 SetBranchImmTarget(target);
229 } else if (IsUnresolvedInternalReference()) {
230 SetUnresolvedInternalReferenceImmTarget(options, target);
231 } else {
232 // Load literal (offset from PC).
233 SetImmLLiteral(target);
234 }
235 }
236
SetPCRelImmTarget(const AssemblerOptions & options,Instruction * target)237 void Instruction::SetPCRelImmTarget(const AssemblerOptions& options,
238 Instruction* target) {
239 // ADRP is not supported, so 'this' must point to an ADR instruction.
240 DCHECK(IsAdr());
241
242 ptrdiff_t target_offset = DistanceTo(target);
243 Instr imm;
244 if (Instruction::IsValidPCRelOffset(target_offset)) {
245 imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
246 SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
247 } else {
248 PatchingAssembler patcher(options, reinterpret_cast<byte*>(this),
249 PatchingAssembler::kAdrFarPatchableNInstrs);
250 patcher.PatchAdrFar(target_offset);
251 }
252 }
253
SetBranchImmTarget(Instruction * target)254 void Instruction::SetBranchImmTarget(Instruction* target) {
255 DCHECK(IsAligned(DistanceTo(target), kInstrSize));
256 DCHECK(IsValidImmPCOffset(BranchType(), DistanceTo(target)));
257 int offset = static_cast<int>(DistanceTo(target) >> kInstrSizeLog2);
258 Instr branch_imm = 0;
259 uint32_t imm_mask = 0;
260 switch (BranchType()) {
261 case CondBranchType: {
262 branch_imm = Assembler::ImmCondBranch(offset);
263 imm_mask = ImmCondBranch_mask;
264 break;
265 }
266 case UncondBranchType: {
267 branch_imm = Assembler::ImmUncondBranch(offset);
268 imm_mask = ImmUncondBranch_mask;
269 break;
270 }
271 case CompareBranchType: {
272 branch_imm = Assembler::ImmCmpBranch(offset);
273 imm_mask = ImmCmpBranch_mask;
274 break;
275 }
276 case TestBranchType: {
277 branch_imm = Assembler::ImmTestBranch(offset);
278 imm_mask = ImmTestBranch_mask;
279 break;
280 }
281 default:
282 UNREACHABLE();
283 }
284 SetInstructionBits(Mask(~imm_mask) | branch_imm);
285 }
286
SetUnresolvedInternalReferenceImmTarget(const AssemblerOptions & options,Instruction * target)287 void Instruction::SetUnresolvedInternalReferenceImmTarget(
288 const AssemblerOptions& options, Instruction* target) {
289 DCHECK(IsUnresolvedInternalReference());
290 DCHECK(IsAligned(DistanceTo(target), kInstrSize));
291 DCHECK(is_int32(DistanceTo(target) >> kInstrSizeLog2));
292 int32_t target_offset =
293 static_cast<int32_t>(DistanceTo(target) >> kInstrSizeLog2);
294 uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
295 uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
296
297 PatchingAssembler patcher(options, reinterpret_cast<byte*>(this), 2);
298 patcher.brk(high16);
299 patcher.brk(low16);
300 }
301
SetImmLLiteral(Instruction * source)302 void Instruction::SetImmLLiteral(Instruction* source) {
303 DCHECK(IsLdrLiteral());
304 DCHECK(IsAligned(DistanceTo(source), kInstrSize));
305 DCHECK(Assembler::IsImmLLiteral(DistanceTo(source)));
306 Instr imm = Assembler::ImmLLiteral(
307 static_cast<int>(DistanceTo(source) >> kLoadLiteralScaleLog2));
308 Instr mask = ImmLLiteral_mask;
309
310 SetInstructionBits(Mask(~mask) | imm);
311 }
312
NEONFormatDecoder(const Instruction * instr)313 NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr) {
314 instrbits_ = instr->InstructionBits();
315 SetFormatMaps(IntegerFormatMap());
316 }
317
NEONFormatDecoder(const Instruction * instr,const NEONFormatMap * format)318 NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
319 const NEONFormatMap* format) {
320 instrbits_ = instr->InstructionBits();
321 SetFormatMaps(format);
322 }
323
NEONFormatDecoder(const Instruction * instr,const NEONFormatMap * format0,const NEONFormatMap * format1)324 NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
325 const NEONFormatMap* format0,
326 const NEONFormatMap* format1) {
327 instrbits_ = instr->InstructionBits();
328 SetFormatMaps(format0, format1);
329 }
330
NEONFormatDecoder(const Instruction * instr,const NEONFormatMap * format0,const NEONFormatMap * format1,const NEONFormatMap * format2)331 NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
332 const NEONFormatMap* format0,
333 const NEONFormatMap* format1,
334 const NEONFormatMap* format2) {
335 instrbits_ = instr->InstructionBits();
336 SetFormatMaps(format0, format1, format2);
337 }
338
SetFormatMaps(const NEONFormatMap * format0,const NEONFormatMap * format1,const NEONFormatMap * format2)339 void NEONFormatDecoder::SetFormatMaps(const NEONFormatMap* format0,
340 const NEONFormatMap* format1,
341 const NEONFormatMap* format2) {
342 DCHECK_NOT_NULL(format0);
343 formats_[0] = format0;
344 formats_[1] = (format1 == nullptr) ? formats_[0] : format1;
345 formats_[2] = (format2 == nullptr) ? formats_[1] : format2;
346 // Support four parameters form (e.i. ld4r)
347 // to avoid using positional arguments in DisassemblingDecoder.
348 // See: https://crbug.com/v8/10365
349 formats_[3] = formats_[2];
350 }
351
SetFormatMap(unsigned index,const NEONFormatMap * format)352 void NEONFormatDecoder::SetFormatMap(unsigned index,
353 const NEONFormatMap* format) {
354 DCHECK_LT(index, arraysize(formats_));
355 DCHECK_NOT_NULL(format);
356 formats_[index] = format;
357 }
358
SubstitutePlaceholders(const char * string)359 const char* NEONFormatDecoder::SubstitutePlaceholders(const char* string) {
360 return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder,
361 kPlaceholder);
362 }
363
Substitute(const char * string,SubstitutionMode mode0,SubstitutionMode mode1,SubstitutionMode mode2,SubstitutionMode mode3)364 const char* NEONFormatDecoder::Substitute(const char* string,
365 SubstitutionMode mode0,
366 SubstitutionMode mode1,
367 SubstitutionMode mode2,
368 SubstitutionMode mode3) {
369 snprintf(form_buffer_, sizeof(form_buffer_), string, GetSubstitute(0, mode0),
370 GetSubstitute(1, mode1), GetSubstitute(2, mode2),
371 GetSubstitute(3, mode3));
372 return form_buffer_;
373 }
374
Mnemonic(const char * mnemonic)375 const char* NEONFormatDecoder::Mnemonic(const char* mnemonic) {
376 if ((instrbits_ & NEON_Q) != 0) {
377 snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
378 return mne_buffer_;
379 }
380 return mnemonic;
381 }
382
GetVectorFormat(int format_index)383 VectorFormat NEONFormatDecoder::GetVectorFormat(int format_index) {
384 return GetVectorFormat(formats_[format_index]);
385 }
386
GetVectorFormat(const NEONFormatMap * format_map)387 VectorFormat NEONFormatDecoder::GetVectorFormat(
388 const NEONFormatMap* format_map) {
389 static const VectorFormat vform[] = {
390 kFormatUndefined, kFormat8B, kFormat16B, kFormat4H, kFormat8H,
391 kFormat2S, kFormat4S, kFormat1D, kFormat2D, kFormatB,
392 kFormatH, kFormatS, kFormatD};
393 DCHECK_LT(GetNEONFormat(format_map), arraysize(vform));
394 return vform[GetNEONFormat(format_map)];
395 }
396
GetSubstitute(int index,SubstitutionMode mode)397 const char* NEONFormatDecoder::GetSubstitute(int index, SubstitutionMode mode) {
398 if (mode == kFormat) {
399 return NEONFormatAsString(GetNEONFormat(formats_[index]));
400 }
401 DCHECK_EQ(mode, kPlaceholder);
402 return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
403 }
404
GetNEONFormat(const NEONFormatMap * format_map)405 NEONFormat NEONFormatDecoder::GetNEONFormat(const NEONFormatMap* format_map) {
406 return format_map->map[PickBits(format_map->bits)];
407 }
408
NEONFormatAsString(NEONFormat format)409 const char* NEONFormatDecoder::NEONFormatAsString(NEONFormat format) {
410 static const char* formats[] = {"undefined", "8b", "16b", "4h", "8h",
411 "2s", "4s", "1d", "2d", "b",
412 "h", "s", "d"};
413 DCHECK_LT(format, arraysize(formats));
414 return formats[format];
415 }
416
NEONFormatAsPlaceholder(NEONFormat format)417 const char* NEONFormatDecoder::NEONFormatAsPlaceholder(NEONFormat format) {
418 DCHECK((format == NF_B) || (format == NF_H) || (format == NF_S) ||
419 (format == NF_D) || (format == NF_UNDEF));
420 static const char* formats[] = {
421 "undefined", "undefined", "undefined", "undefined", "undefined",
422 "undefined", "undefined", "undefined", "undefined", "'B",
423 "'H", "'S", "'D"};
424 return formats[format];
425 }
426
PickBits(const uint8_t bits[])427 uint8_t NEONFormatDecoder::PickBits(const uint8_t bits[]) {
428 uint8_t result = 0;
429 for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
430 if (bits[b] == 0) break;
431 result <<= 1;
432 result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
433 }
434 return result;
435 }
436 } // namespace internal
437 } // namespace v8
438
439 #endif // V8_TARGET_ARCH_ARM64
440