• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_
6 #define V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_
7 
8 #include <type_traits>
9 
10 #include "src/base/memory.h"
11 #include "src/codegen/arm64/assembler-arm64.h"
12 #include "src/codegen/assembler.h"
13 #include "src/debug/debug.h"
14 #include "src/objects/objects-inl.h"
15 #include "src/objects/smi.h"
16 
17 namespace v8 {
18 namespace internal {
19 
SupportsOptimizer()20 bool CpuFeatures::SupportsOptimizer() { return true; }
21 
apply(intptr_t delta)22 void RelocInfo::apply(intptr_t delta) {
23   // On arm64 only internal references and immediate branches need extra work.
24   if (RelocInfo::IsInternalReference(rmode_)) {
25     // Absolute code pointer inside code object moves with the code object.
26     intptr_t internal_ref = ReadUnalignedValue<intptr_t>(pc_);
27     internal_ref += delta;  // Relocate entry.
28     WriteUnalignedValue<intptr_t>(pc_, internal_ref);
29   } else {
30     Instruction* instr = reinterpret_cast<Instruction*>(pc_);
31     if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) {
32       Address old_target =
33           reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
34       Address new_target = old_target - delta;
35       instr->SetBranchImmTarget(reinterpret_cast<Instruction*>(new_target));
36     }
37   }
38 }
39 
IsSameSizeAndType(const CPURegister & other)40 inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
41   return (reg_size_ == other.reg_size_) && (reg_type_ == other.reg_type_);
42 }
43 
IsZero()44 inline bool CPURegister::IsZero() const {
45   DCHECK(is_valid());
46   return IsRegister() && (code() == kZeroRegCode);
47 }
48 
IsSP()49 inline bool CPURegister::IsSP() const {
50   DCHECK(is_valid());
51   return IsRegister() && (code() == kSPRegInternalCode);
52 }
53 
Combine(const CPURegList & other)54 inline void CPURegList::Combine(const CPURegList& other) {
55   DCHECK(other.type() == type_);
56   DCHECK(other.RegisterSizeInBits() == size_);
57   list_ |= other.list_;
58 }
59 
Remove(const CPURegList & other)60 inline void CPURegList::Remove(const CPURegList& other) {
61   if (other.type() == type_) {
62     list_ &= ~other.list_;
63   }
64 }
65 
Combine(const CPURegister & other)66 inline void CPURegList::Combine(const CPURegister& other) {
67   DCHECK(other.type() == type_);
68   DCHECK(other.SizeInBits() == size_);
69   Combine(other.code());
70 }
71 
Remove(const CPURegister & other1,const CPURegister & other2,const CPURegister & other3,const CPURegister & other4)72 inline void CPURegList::Remove(const CPURegister& other1,
73                                const CPURegister& other2,
74                                const CPURegister& other3,
75                                const CPURegister& other4) {
76   if (!other1.IsNone() && (other1.type() == type_)) Remove(other1.code());
77   if (!other2.IsNone() && (other2.type() == type_)) Remove(other2.code());
78   if (!other3.IsNone() && (other3.type() == type_)) Remove(other3.code());
79   if (!other4.IsNone() && (other4.type() == type_)) Remove(other4.code());
80 }
81 
Combine(int code)82 inline void CPURegList::Combine(int code) {
83   DCHECK(CPURegister::Create(code, size_, type_).is_valid());
84   list_ |= (1ULL << code);
85   DCHECK(is_valid());
86 }
87 
Remove(int code)88 inline void CPURegList::Remove(int code) {
89   DCHECK(CPURegister::Create(code, size_, type_).is_valid());
90   list_ &= ~(1ULL << code);
91 }
92 
XRegFromCode(unsigned code)93 inline Register Register::XRegFromCode(unsigned code) {
94   if (code == kSPRegInternalCode) {
95     return sp;
96   } else {
97     DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
98     return Register::Create(code, kXRegSizeInBits);
99   }
100 }
101 
WRegFromCode(unsigned code)102 inline Register Register::WRegFromCode(unsigned code) {
103   if (code == kSPRegInternalCode) {
104     return wsp;
105   } else {
106     DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
107     return Register::Create(code, kWRegSizeInBits);
108   }
109 }
110 
BRegFromCode(unsigned code)111 inline VRegister VRegister::BRegFromCode(unsigned code) {
112   DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
113   return VRegister::Create(code, kBRegSizeInBits);
114 }
115 
HRegFromCode(unsigned code)116 inline VRegister VRegister::HRegFromCode(unsigned code) {
117   DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
118   return VRegister::Create(code, kHRegSizeInBits);
119 }
120 
SRegFromCode(unsigned code)121 inline VRegister VRegister::SRegFromCode(unsigned code) {
122   DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
123   return VRegister::Create(code, kSRegSizeInBits);
124 }
125 
DRegFromCode(unsigned code)126 inline VRegister VRegister::DRegFromCode(unsigned code) {
127   DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
128   return VRegister::Create(code, kDRegSizeInBits);
129 }
130 
QRegFromCode(unsigned code)131 inline VRegister VRegister::QRegFromCode(unsigned code) {
132   DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
133   return VRegister::Create(code, kQRegSizeInBits);
134 }
135 
VRegFromCode(unsigned code)136 inline VRegister VRegister::VRegFromCode(unsigned code) {
137   DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
138   return VRegister::Create(code, kVRegSizeInBits);
139 }
140 
W()141 inline Register CPURegister::W() const {
142   DCHECK(IsRegister());
143   return Register::WRegFromCode(code());
144 }
145 
Reg()146 inline Register CPURegister::Reg() const {
147   DCHECK(IsRegister());
148   return Register::Create(code(), reg_size_);
149 }
150 
VReg()151 inline VRegister CPURegister::VReg() const {
152   DCHECK(IsVRegister());
153   return VRegister::Create(code(), reg_size_);
154 }
155 
X()156 inline Register CPURegister::X() const {
157   DCHECK(IsRegister());
158   return Register::XRegFromCode(code());
159 }
160 
V()161 inline VRegister CPURegister::V() const {
162   DCHECK(IsVRegister());
163   return VRegister::VRegFromCode(code());
164 }
165 
B()166 inline VRegister CPURegister::B() const {
167   DCHECK(IsVRegister());
168   return VRegister::BRegFromCode(code());
169 }
170 
H()171 inline VRegister CPURegister::H() const {
172   DCHECK(IsVRegister());
173   return VRegister::HRegFromCode(code());
174 }
175 
S()176 inline VRegister CPURegister::S() const {
177   DCHECK(IsVRegister());
178   return VRegister::SRegFromCode(code());
179 }
180 
D()181 inline VRegister CPURegister::D() const {
182   DCHECK(IsVRegister());
183   return VRegister::DRegFromCode(code());
184 }
185 
Q()186 inline VRegister CPURegister::Q() const {
187   DCHECK(IsVRegister());
188   return VRegister::QRegFromCode(code());
189 }
190 
191 // Immediate.
192 // Default initializer is for int types
193 template <typename T>
194 struct ImmediateInitializer {
rmode_forImmediateInitializer195   static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NO_INFO; }
immediate_forImmediateInitializer196   static inline int64_t immediate_for(T t) {
197     STATIC_ASSERT(sizeof(T) <= 8);
198     STATIC_ASSERT(std::is_integral<T>::value || std::is_enum<T>::value);
199     return t;
200   }
201 };
202 
203 template <>
204 struct ImmediateInitializer<Smi> {
205   static inline RelocInfo::Mode rmode_for(Smi t) { return RelocInfo::NO_INFO; }
206   static inline int64_t immediate_for(Smi t) {
207     return static_cast<int64_t>(t.ptr());
208   }
209 };
210 
211 template <>
212 struct ImmediateInitializer<ExternalReference> {
213   static inline RelocInfo::Mode rmode_for(ExternalReference t) {
214     return RelocInfo::EXTERNAL_REFERENCE;
215   }
216   static inline int64_t immediate_for(ExternalReference t) {
217     return static_cast<int64_t>(t.address());
218   }
219 };
220 
221 template <typename T>
222 Immediate::Immediate(Handle<T> handle, RelocInfo::Mode mode)
223     : value_(static_cast<intptr_t>(handle.address())), rmode_(mode) {
224   DCHECK(RelocInfo::IsEmbeddedObjectMode(mode));
225 }
226 
227 template <typename T>
228 Immediate::Immediate(T t)
229     : value_(ImmediateInitializer<T>::immediate_for(t)),
230       rmode_(ImmediateInitializer<T>::rmode_for(t)) {}
231 
232 template <typename T>
233 Immediate::Immediate(T t, RelocInfo::Mode rmode)
234     : value_(ImmediateInitializer<T>::immediate_for(t)), rmode_(rmode) {
235   STATIC_ASSERT(std::is_integral<T>::value);
236 }
237 
238 template <typename T>
239 Operand::Operand(T t) : immediate_(t), reg_(NoReg) {}
240 
241 template <typename T>
242 Operand::Operand(T t, RelocInfo::Mode rmode)
243     : immediate_(t, rmode), reg_(NoReg) {}
244 
245 Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
246     : immediate_(0),
247       reg_(reg),
248       shift_(shift),
249       extend_(NO_EXTEND),
250       shift_amount_(shift_amount) {
251   DCHECK(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
252   DCHECK(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
253   DCHECK_IMPLIES(reg.IsSP(), shift_amount == 0);
254 }
255 
256 Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
257     : immediate_(0),
258       reg_(reg),
259       shift_(NO_SHIFT),
260       extend_(extend),
261       shift_amount_(shift_amount) {
262   DCHECK(reg.is_valid());
263   DCHECK_LE(shift_amount, 4);
264   DCHECK(!reg.IsSP());
265 
266   // Extend modes SXTX and UXTX require a 64-bit register.
267   DCHECK(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
268 }
269 
270 bool Operand::IsHeapObjectRequest() const {
271   DCHECK_IMPLIES(heap_object_request_.has_value(), reg_ == NoReg);
272   DCHECK_IMPLIES(heap_object_request_.has_value(),
273                  immediate_.rmode() == RelocInfo::FULL_EMBEDDED_OBJECT ||
274                      immediate_.rmode() == RelocInfo::CODE_TARGET);
275   return heap_object_request_.has_value();
276 }
277 
278 HeapObjectRequest Operand::heap_object_request() const {
279   DCHECK(IsHeapObjectRequest());
280   return *heap_object_request_;
281 }
282 
283 bool Operand::IsImmediate() const {
284   return reg_ == NoReg && !IsHeapObjectRequest();
285 }
286 
287 bool Operand::IsShiftedRegister() const {
288   return reg_.is_valid() && (shift_ != NO_SHIFT);
289 }
290 
291 bool Operand::IsExtendedRegister() const {
292   return reg_.is_valid() && (extend_ != NO_EXTEND);
293 }
294 
295 bool Operand::IsZero() const {
296   if (IsImmediate()) {
297     return ImmediateValue() == 0;
298   } else {
299     return reg().IsZero();
300   }
301 }
302 
303 Operand Operand::ToExtendedRegister() const {
304   DCHECK(IsShiftedRegister());
305   DCHECK((shift_ == LSL) && (shift_amount_ <= 4));
306   return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
307 }
308 
309 Operand Operand::ToW() const {
310   if (IsShiftedRegister()) {
311     DCHECK(reg_.Is64Bits());
312     return Operand(reg_.W(), shift(), shift_amount());
313   } else if (IsExtendedRegister()) {
314     DCHECK(reg_.Is64Bits());
315     return Operand(reg_.W(), extend(), shift_amount());
316   }
317   DCHECK(IsImmediate());
318   return *this;
319 }
320 
321 Immediate Operand::immediate_for_heap_object_request() const {
322   DCHECK((heap_object_request().kind() == HeapObjectRequest::kHeapNumber &&
323           immediate_.rmode() == RelocInfo::FULL_EMBEDDED_OBJECT) ||
324          (heap_object_request().kind() == HeapObjectRequest::kStringConstant &&
325           immediate_.rmode() == RelocInfo::FULL_EMBEDDED_OBJECT));
326   return immediate_;
327 }
328 
329 Immediate Operand::immediate() const {
330   DCHECK(IsImmediate());
331   return immediate_;
332 }
333 
334 int64_t Operand::ImmediateValue() const {
335   DCHECK(IsImmediate());
336   return immediate_.value();
337 }
338 
339 RelocInfo::Mode Operand::ImmediateRMode() const {
340   DCHECK(IsImmediate() || IsHeapObjectRequest());
341   return immediate_.rmode();
342 }
343 
344 Register Operand::reg() const {
345   DCHECK(IsShiftedRegister() || IsExtendedRegister());
346   return reg_;
347 }
348 
349 Shift Operand::shift() const {
350   DCHECK(IsShiftedRegister());
351   return shift_;
352 }
353 
354 Extend Operand::extend() const {
355   DCHECK(IsExtendedRegister());
356   return extend_;
357 }
358 
359 unsigned Operand::shift_amount() const {
360   DCHECK(IsShiftedRegister() || IsExtendedRegister());
361   return shift_amount_;
362 }
363 
364 MemOperand::MemOperand()
365     : base_(NoReg),
366       regoffset_(NoReg),
367       offset_(0),
368       addrmode_(Offset),
369       shift_(NO_SHIFT),
370       extend_(NO_EXTEND),
371       shift_amount_(0) {}
372 
373 MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
374     : base_(base),
375       regoffset_(NoReg),
376       offset_(offset),
377       addrmode_(addrmode),
378       shift_(NO_SHIFT),
379       extend_(NO_EXTEND),
380       shift_amount_(0) {
381   DCHECK(base.Is64Bits() && !base.IsZero());
382 }
383 
384 MemOperand::MemOperand(Register base, Register regoffset, Extend extend,
385                        unsigned shift_amount)
386     : base_(base),
387       regoffset_(regoffset),
388       offset_(0),
389       addrmode_(Offset),
390       shift_(NO_SHIFT),
391       extend_(extend),
392       shift_amount_(shift_amount) {
393   DCHECK(base.Is64Bits() && !base.IsZero());
394   DCHECK(!regoffset.IsSP());
395   DCHECK((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
396 
397   // SXTX extend mode requires a 64-bit offset register.
398   DCHECK(regoffset.Is64Bits() || (extend != SXTX));
399 }
400 
401 MemOperand::MemOperand(Register base, Register regoffset, Shift shift,
402                        unsigned shift_amount)
403     : base_(base),
404       regoffset_(regoffset),
405       offset_(0),
406       addrmode_(Offset),
407       shift_(shift),
408       extend_(NO_EXTEND),
409       shift_amount_(shift_amount) {
410   DCHECK(base.Is64Bits() && !base.IsZero());
411   DCHECK(regoffset.Is64Bits() && !regoffset.IsSP());
412   DCHECK(shift == LSL);
413 }
414 
415 MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
416     : base_(base), regoffset_(NoReg), addrmode_(addrmode) {
417   DCHECK(base.Is64Bits() && !base.IsZero());
418 
419   if (offset.IsImmediate()) {
420     offset_ = offset.ImmediateValue();
421   } else if (offset.IsShiftedRegister()) {
422     DCHECK((addrmode == Offset) || (addrmode == PostIndex));
423 
424     regoffset_ = offset.reg();
425     shift_ = offset.shift();
426     shift_amount_ = offset.shift_amount();
427 
428     extend_ = NO_EXTEND;
429     offset_ = 0;
430 
431     // These assertions match those in the shifted-register constructor.
432     DCHECK(regoffset_.Is64Bits() && !regoffset_.IsSP());
433     DCHECK(shift_ == LSL);
434   } else {
435     DCHECK(offset.IsExtendedRegister());
436     DCHECK(addrmode == Offset);
437 
438     regoffset_ = offset.reg();
439     extend_ = offset.extend();
440     shift_amount_ = offset.shift_amount();
441 
442     shift_ = NO_SHIFT;
443     offset_ = 0;
444 
445     // These assertions match those in the extended-register constructor.
446     DCHECK(!regoffset_.IsSP());
447     DCHECK((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
448     DCHECK((regoffset_.Is64Bits() || (extend_ != SXTX)));
449   }
450 }
451 
452 bool MemOperand::IsImmediateOffset() const {
453   return (addrmode_ == Offset) && regoffset_ == NoReg;
454 }
455 
456 bool MemOperand::IsRegisterOffset() const {
457   return (addrmode_ == Offset) && regoffset_ != NoReg;
458 }
459 
460 bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; }
461 
462 bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; }
463 
464 void Assembler::Unreachable() { debug("UNREACHABLE", __LINE__, BREAK); }
465 
466 Address Assembler::target_pointer_address_at(Address pc) {
467   Instruction* instr = reinterpret_cast<Instruction*>(pc);
468   DCHECK(instr->IsLdrLiteralX() || instr->IsLdrLiteralW());
469   return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
470 }
471 
472 // Read/Modify the code target address in the branch/call instruction at pc.
473 Address Assembler::target_address_at(Address pc, Address constant_pool) {
474   Instruction* instr = reinterpret_cast<Instruction*>(pc);
475   if (instr->IsLdrLiteralX()) {
476     return Memory<Address>(target_pointer_address_at(pc));
477   } else {
478     DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
479     return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
480   }
481 }
482 
483 Tagged_t Assembler::target_compressed_address_at(Address pc,
484                                                  Address constant_pool) {
485   Instruction* instr = reinterpret_cast<Instruction*>(pc);
486   CHECK(instr->IsLdrLiteralW());
487   return Memory<Tagged_t>(target_pointer_address_at(pc));
488 }
489 
490 Handle<CodeT> Assembler::code_target_object_handle_at(Address pc) {
491   Instruction* instr = reinterpret_cast<Instruction*>(pc);
492   if (instr->IsLdrLiteralX()) {
493     return Handle<CodeT>(reinterpret_cast<Address*>(
494         Assembler::target_address_at(pc, 0 /* unused */)));
495   } else {
496     DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
497     DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0);
498     return Handle<CodeT>::cast(
499         GetEmbeddedObject(instr->ImmPCOffset() >> kInstrSizeLog2));
500   }
501 }
502 
503 AssemblerBase::EmbeddedObjectIndex
504 Assembler::embedded_object_index_referenced_from(Address pc) {
505   Instruction* instr = reinterpret_cast<Instruction*>(pc);
506   if (instr->IsLdrLiteralX()) {
507     STATIC_ASSERT(sizeof(EmbeddedObjectIndex) == sizeof(intptr_t));
508     return Memory<EmbeddedObjectIndex>(target_pointer_address_at(pc));
509   } else {
510     DCHECK(instr->IsLdrLiteralW());
511     return Memory<uint32_t>(target_pointer_address_at(pc));
512   }
513 }
514 
515 void Assembler::set_embedded_object_index_referenced_from(
516     Address pc, EmbeddedObjectIndex data) {
517   Instruction* instr = reinterpret_cast<Instruction*>(pc);
518   if (instr->IsLdrLiteralX()) {
519     Memory<EmbeddedObjectIndex>(target_pointer_address_at(pc)) = data;
520   } else {
521     DCHECK(instr->IsLdrLiteralW());
522     DCHECK(is_uint32(data));
523     WriteUnalignedValue<uint32_t>(target_pointer_address_at(pc),
524                                   static_cast<uint32_t>(data));
525   }
526 }
527 
528 Handle<HeapObject> Assembler::target_object_handle_at(Address pc) {
529   return GetEmbeddedObject(
530       Assembler::embedded_object_index_referenced_from(pc));
531 }
532 
533 Address Assembler::runtime_entry_at(Address pc) {
534   Instruction* instr = reinterpret_cast<Instruction*>(pc);
535   if (instr->IsLdrLiteralX()) {
536     return Assembler::target_address_at(pc, 0 /* unused */);
537   } else {
538     DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
539     return instr->ImmPCOffset() + options().code_range_base;
540   }
541 }
542 
543 int Assembler::deserialization_special_target_size(Address location) {
544   Instruction* instr = reinterpret_cast<Instruction*>(location);
545   if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) {
546     return kSpecialTargetSize;
547   } else {
548     DCHECK_EQ(instr->InstructionBits(), 0);
549     return kSystemPointerSize;
550   }
551 }
552 
553 void Assembler::deserialization_set_special_target_at(Address location,
554                                                       Code code,
555                                                       Address target) {
556   Instruction* instr = reinterpret_cast<Instruction*>(location);
557   if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) {
558     if (target == 0) {
559       // We are simply wiping the target out for serialization. Set the offset
560       // to zero instead.
561       target = location;
562     }
563     instr->SetBranchImmTarget(reinterpret_cast<Instruction*>(target));
564     FlushInstructionCache(location, kInstrSize);
565   } else {
566     DCHECK_EQ(instr->InstructionBits(), 0);
567     Memory<Address>(location) = target;
568     // Intuitively, we would think it is necessary to always flush the
569     // instruction cache after patching a target address in the code. However,
570     // in this case, only the constant pool contents change. The instruction
571     // accessing the constant pool remains unchanged, so a flush is not
572     // required.
573   }
574 }
575 
576 void Assembler::deserialization_set_target_internal_reference_at(
577     Address pc, Address target, RelocInfo::Mode mode) {
578   WriteUnalignedValue<Address>(pc, target);
579 }
580 
581 void Assembler::set_target_address_at(Address pc, Address constant_pool,
582                                       Address target,
583                                       ICacheFlushMode icache_flush_mode) {
584   Instruction* instr = reinterpret_cast<Instruction*>(pc);
585   if (instr->IsLdrLiteralX()) {
586     Memory<Address>(target_pointer_address_at(pc)) = target;
587     // Intuitively, we would think it is necessary to always flush the
588     // instruction cache after patching a target address in the code. However,
589     // in this case, only the constant pool contents change. The instruction
590     // accessing the constant pool remains unchanged, so a flush is not
591     // required.
592   } else {
593     DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
594     if (target == 0) {
595       // We are simply wiping the target out for serialization. Set the offset
596       // to zero instead.
597       target = pc;
598     }
599     instr->SetBranchImmTarget(reinterpret_cast<Instruction*>(target));
600     if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
601       FlushInstructionCache(pc, kInstrSize);
602     }
603   }
604 }
605 
606 void Assembler::set_target_compressed_address_at(
607     Address pc, Address constant_pool, Tagged_t target,
608     ICacheFlushMode icache_flush_mode) {
609   Instruction* instr = reinterpret_cast<Instruction*>(pc);
610   CHECK(instr->IsLdrLiteralW());
611   Memory<Tagged_t>(target_pointer_address_at(pc)) = target;
612 }
613 
614 int RelocInfo::target_address_size() {
615   if (IsCodedSpecially()) {
616     return Assembler::kSpecialTargetSize;
617   } else {
618     Instruction* instr = reinterpret_cast<Instruction*>(pc_);
619     DCHECK(instr->IsLdrLiteralX() || instr->IsLdrLiteralW());
620     return instr->IsLdrLiteralW() ? kTaggedSize : kSystemPointerSize;
621   }
622 }
623 
624 Address RelocInfo::target_address() {
625   DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
626   return Assembler::target_address_at(pc_, constant_pool_);
627 }
628 
629 Address RelocInfo::target_address_address() {
630   DCHECK(HasTargetAddressAddress());
631   Instruction* instr = reinterpret_cast<Instruction*>(pc_);
632   // Read the address of the word containing the target_address in an
633   // instruction stream.
634   // The only architecture-independent user of this function is the serializer.
635   // The serializer uses it to find out how many raw bytes of instruction to
636   // output before the next target.
637   // For an instruction like B/BL, where the target bits are mixed into the
638   // instruction bits, the size of the target will be zero, indicating that the
639   // serializer should not step forward in memory after a target is resolved
640   // and written.
641   // For LDR literal instructions, we can skip up to the constant pool entry
642   // address. We make sure that RelocInfo is ordered by the
643   // target_address_address so that we do not skip over any relocatable
644   // instruction sequences.
645   if (instr->IsLdrLiteralX()) {
646     return constant_pool_entry_address();
647   } else {
648     DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
649     return pc_;
650   }
651 }
652 
653 Address RelocInfo::constant_pool_entry_address() {
654   DCHECK(IsInConstantPool());
655   return Assembler::target_pointer_address_at(pc_);
656 }
657 
658 HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
659   DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
660   if (IsDataEmbeddedObject(rmode_)) {
661     return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
662   } else if (IsCompressedEmbeddedObject(rmode_)) {
663     Tagged_t compressed =
664         Assembler::target_compressed_address_at(pc_, constant_pool_);
665     DCHECK(!HAS_SMI_TAG(compressed));
666     Object obj(DecompressTaggedPointer(cage_base, compressed));
667     // Embedding of compressed Code objects must not happen when external code
668     // space is enabled, because CodeDataContainers must be used instead.
669     DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL,
670                    !IsCodeSpaceObject(HeapObject::cast(obj)));
671     return HeapObject::cast(obj);
672   } else {
673     return HeapObject::cast(
674         Object(Assembler::target_address_at(pc_, constant_pool_)));
675   }
676 }
677 
678 Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
679   if (IsDataEmbeddedObject(rmode_)) {
680     return Handle<HeapObject>::cast(ReadUnalignedValue<Handle<Object>>(pc_));
681   } else if (IsEmbeddedObjectMode(rmode_)) {
682     return origin->target_object_handle_at(pc_);
683   } else {
684     DCHECK(IsCodeTarget(rmode_));
685     return origin->code_target_object_handle_at(pc_);
686   }
687 }
688 
689 void RelocInfo::set_target_object(Heap* heap, HeapObject target,
690                                   WriteBarrierMode write_barrier_mode,
691                                   ICacheFlushMode icache_flush_mode) {
692   DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
693   if (IsDataEmbeddedObject(rmode_)) {
694     WriteUnalignedValue(pc_, target.ptr());
695     // No need to flush icache since no instructions were changed.
696   } else if (IsCompressedEmbeddedObject(rmode_)) {
697     Assembler::set_target_compressed_address_at(
698         pc_, constant_pool_, CompressTagged(target.ptr()), icache_flush_mode);
699   } else {
700     DCHECK(IsFullEmbeddedObject(rmode_));
701     Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
702                                      icache_flush_mode);
703   }
704   if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
705       !FLAG_disable_write_barriers) {
706     WriteBarrierForCode(host(), this, target);
707   }
708 }
709 
710 Address RelocInfo::target_external_reference() {
711   DCHECK(rmode_ == EXTERNAL_REFERENCE);
712   return Assembler::target_address_at(pc_, constant_pool_);
713 }
714 
715 void RelocInfo::set_target_external_reference(
716     Address target, ICacheFlushMode icache_flush_mode) {
717   DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
718   Assembler::set_target_address_at(pc_, constant_pool_, target,
719                                    icache_flush_mode);
720 }
721 
722 Address RelocInfo::target_internal_reference() {
723   DCHECK(rmode_ == INTERNAL_REFERENCE);
724   return ReadUnalignedValue<Address>(pc_);
725 }
726 
727 Address RelocInfo::target_internal_reference_address() {
728   DCHECK(rmode_ == INTERNAL_REFERENCE);
729   return pc_;
730 }
731 
732 Address RelocInfo::target_runtime_entry(Assembler* origin) {
733   DCHECK(IsRuntimeEntry(rmode_));
734   return origin->runtime_entry_at(pc_);
735 }
736 
737 void RelocInfo::set_target_runtime_entry(Address target,
738                                          WriteBarrierMode write_barrier_mode,
739                                          ICacheFlushMode icache_flush_mode) {
740   DCHECK(IsRuntimeEntry(rmode_));
741   if (target_address() != target) {
742     set_target_address(target, write_barrier_mode, icache_flush_mode);
743   }
744 }
745 
746 Address RelocInfo::target_off_heap_target() {
747   DCHECK(IsOffHeapTarget(rmode_));
748   return Assembler::target_address_at(pc_, constant_pool_);
749 }
750 
751 void RelocInfo::WipeOut() {
752   DCHECK(IsEmbeddedObjectMode(rmode_) || IsCodeTarget(rmode_) ||
753          IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
754          IsInternalReference(rmode_) || IsOffHeapTarget(rmode_));
755   if (IsInternalReference(rmode_)) {
756     WriteUnalignedValue<Address>(pc_, kNullAddress);
757   } else if (IsCompressedEmbeddedObject(rmode_)) {
758     Assembler::set_target_compressed_address_at(pc_, constant_pool_,
759                                                 kNullAddress);
760   } else {
761     Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress);
762   }
763 }
764 
765 LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
766   DCHECK(rt.is_valid());
767   if (rt.IsRegister()) {
768     return rt.Is64Bits() ? LDR_x : LDR_w;
769   } else {
770     DCHECK(rt.IsVRegister());
771     switch (rt.SizeInBits()) {
772       case kBRegSizeInBits:
773         return LDR_b;
774       case kHRegSizeInBits:
775         return LDR_h;
776       case kSRegSizeInBits:
777         return LDR_s;
778       case kDRegSizeInBits:
779         return LDR_d;
780       default:
781         DCHECK(rt.IsQ());
782         return LDR_q;
783     }
784   }
785 }
786 
787 LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
788   DCHECK(rt.is_valid());
789   if (rt.IsRegister()) {
790     return rt.Is64Bits() ? STR_x : STR_w;
791   } else {
792     DCHECK(rt.IsVRegister());
793     switch (rt.SizeInBits()) {
794       case kBRegSizeInBits:
795         return STR_b;
796       case kHRegSizeInBits:
797         return STR_h;
798       case kSRegSizeInBits:
799         return STR_s;
800       case kDRegSizeInBits:
801         return STR_d;
802       default:
803         DCHECK(rt.IsQ());
804         return STR_q;
805     }
806   }
807 }
808 
809 LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
810                                          const CPURegister& rt2) {
811   DCHECK_EQ(STP_w | LoadStorePairLBit, LDP_w);
812   return static_cast<LoadStorePairOp>(StorePairOpFor(rt, rt2) |
813                                       LoadStorePairLBit);
814 }
815 
816 LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
817                                           const CPURegister& rt2) {
818   DCHECK(AreSameSizeAndType(rt, rt2));
819   USE(rt2);
820   if (rt.IsRegister()) {
821     return rt.Is64Bits() ? STP_x : STP_w;
822   } else {
823     DCHECK(rt.IsVRegister());
824     switch (rt.SizeInBits()) {
825       case kSRegSizeInBits:
826         return STP_s;
827       case kDRegSizeInBits:
828         return STP_d;
829       default:
830         DCHECK(rt.IsQ());
831         return STP_q;
832     }
833   }
834 }
835 
836 LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
837   if (rt.IsRegister()) {
838     return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
839   } else {
840     DCHECK(rt.IsVRegister());
841     return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit;
842   }
843 }
844 
845 int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
846   DCHECK_EQ(kStartOfLabelLinkChain, 0);
847   int offset = LinkAndGetByteOffsetTo(label);
848   DCHECK(IsAligned(offset, kInstrSize));
849   return offset >> kInstrSizeLog2;
850 }
851 
852 Instr Assembler::Flags(FlagsUpdate S) {
853   if (S == SetFlags) {
854     return 1 << FlagsUpdate_offset;
855   } else if (S == LeaveFlags) {
856     return 0 << FlagsUpdate_offset;
857   }
858   UNREACHABLE();
859 }
860 
861 Instr Assembler::Cond(Condition cond) { return cond << Condition_offset; }
862 
863 Instr Assembler::ImmPCRelAddress(int imm21) {
864   CHECK(is_int21(imm21));
865   Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
866   Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
867   Instr immlo = imm << ImmPCRelLo_offset;
868   return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
869 }
870 
871 Instr Assembler::ImmUncondBranch(int imm26) {
872   CHECK(is_int26(imm26));
873   return truncate_to_int26(imm26) << ImmUncondBranch_offset;
874 }
875 
876 Instr Assembler::ImmCondBranch(int imm19) {
877   CHECK(is_int19(imm19));
878   return truncate_to_int19(imm19) << ImmCondBranch_offset;
879 }
880 
881 Instr Assembler::ImmCmpBranch(int imm19) {
882   CHECK(is_int19(imm19));
883   return truncate_to_int19(imm19) << ImmCmpBranch_offset;
884 }
885 
886 Instr Assembler::ImmTestBranch(int imm14) {
887   CHECK(is_int14(imm14));
888   return truncate_to_int14(imm14) << ImmTestBranch_offset;
889 }
890 
891 Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
892   DCHECK(is_uint6(bit_pos));
893   // Subtract five from the shift offset, as we need bit 5 from bit_pos.
894   unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
895   unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
896   b5 &= ImmTestBranchBit5_mask;
897   b40 &= ImmTestBranchBit40_mask;
898   return b5 | b40;
899 }
900 
901 Instr Assembler::SF(Register rd) {
902   return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
903 }
904 
905 Instr Assembler::ImmAddSub(int imm) {
906   DCHECK(IsImmAddSub(imm));
907   if (is_uint12(imm)) {  // No shift required.
908     imm <<= ImmAddSub_offset;
909   } else {
910     imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
911   }
912   return imm;
913 }
914 
915 Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
916   DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
917          ((reg_size == kWRegSizeInBits) && is_uint5(imms)));
918   USE(reg_size);
919   return imms << ImmS_offset;
920 }
921 
922 Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
923   DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
924          ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
925   USE(reg_size);
926   DCHECK(is_uint6(immr));
927   return immr << ImmR_offset;
928 }
929 
930 Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
931   DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
932   DCHECK(is_uint6(imms));
933   DCHECK((reg_size == kXRegSizeInBits) || is_uint6(imms + 3));
934   USE(reg_size);
935   return imms << ImmSetBits_offset;
936 }
937 
938 Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
939   DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
940   DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
941          ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
942   USE(reg_size);
943   return immr << ImmRotate_offset;
944 }
945 
946 Instr Assembler::ImmLLiteral(int imm19) {
947   CHECK(is_int19(imm19));
948   return truncate_to_int19(imm19) << ImmLLiteral_offset;
949 }
950 
951 Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
952   DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
953   DCHECK((reg_size == kXRegSizeInBits) || (bitn == 0));
954   USE(reg_size);
955   return bitn << BitN_offset;
956 }
957 
958 Instr Assembler::ShiftDP(Shift shift) {
959   DCHECK(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
960   return shift << ShiftDP_offset;
961 }
962 
963 Instr Assembler::ImmDPShift(unsigned amount) {
964   DCHECK(is_uint6(amount));
965   return amount << ImmDPShift_offset;
966 }
967 
968 Instr Assembler::ExtendMode(Extend extend) {
969   return extend << ExtendMode_offset;
970 }
971 
972 Instr Assembler::ImmExtendShift(unsigned left_shift) {
973   DCHECK_LE(left_shift, 4);
974   return left_shift << ImmExtendShift_offset;
975 }
976 
977 Instr Assembler::ImmCondCmp(unsigned imm) {
978   DCHECK(is_uint5(imm));
979   return imm << ImmCondCmp_offset;
980 }
981 
982 Instr Assembler::Nzcv(StatusFlags nzcv) {
983   return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
984 }
985 
986 Instr Assembler::ImmLSUnsigned(int imm12) {
987   DCHECK(is_uint12(imm12));
988   return imm12 << ImmLSUnsigned_offset;
989 }
990 
991 Instr Assembler::ImmLS(int imm9) {
992   DCHECK(is_int9(imm9));
993   return truncate_to_int9(imm9) << ImmLS_offset;
994 }
995 
996 Instr Assembler::ImmLSPair(int imm7, unsigned size) {
997   DCHECK_EQ(imm7,
998             static_cast<int>(static_cast<uint32_t>(imm7 >> size) << size));
999   int scaled_imm7 = imm7 >> size;
1000   DCHECK(is_int7(scaled_imm7));
1001   return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
1002 }
1003 
1004 Instr Assembler::ImmShiftLS(unsigned shift_amount) {
1005   DCHECK(is_uint1(shift_amount));
1006   return shift_amount << ImmShiftLS_offset;
1007 }
1008 
1009 Instr Assembler::ImmException(int imm16) {
1010   DCHECK(is_uint16(imm16));
1011   return imm16 << ImmException_offset;
1012 }
1013 
1014 Instr Assembler::ImmSystemRegister(int imm15) {
1015   DCHECK(is_uint15(imm15));
1016   return imm15 << ImmSystemRegister_offset;
1017 }
1018 
1019 Instr Assembler::ImmHint(int imm7) {
1020   DCHECK(is_uint7(imm7));
1021   return imm7 << ImmHint_offset;
1022 }
1023 
1024 Instr Assembler::ImmBarrierDomain(int imm2) {
1025   DCHECK(is_uint2(imm2));
1026   return imm2 << ImmBarrierDomain_offset;
1027 }
1028 
1029 Instr Assembler::ImmBarrierType(int imm2) {
1030   DCHECK(is_uint2(imm2));
1031   return imm2 << ImmBarrierType_offset;
1032 }
1033 
1034 unsigned Assembler::CalcLSDataSize(LoadStoreOp op) {
1035   DCHECK((LSSize_offset + LSSize_width) == (kInstrSize * 8));
1036   unsigned size = static_cast<Instr>(op >> LSSize_offset);
1037   if ((op & LSVector_mask) != 0) {
1038     // Vector register memory operations encode the access size in the "size"
1039     // and "opc" fields.
1040     if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
1041       size = kQRegSizeLog2;
1042     }
1043   }
1044   return size;
1045 }
1046 
1047 Instr Assembler::ImmMoveWide(int imm) {
1048   DCHECK(is_uint16(imm));
1049   return imm << ImmMoveWide_offset;
1050 }
1051 
1052 Instr Assembler::ShiftMoveWide(int shift) {
1053   DCHECK(is_uint2(shift));
1054   return shift << ShiftMoveWide_offset;
1055 }
1056 
1057 Instr Assembler::FPType(VRegister fd) { return fd.Is64Bits() ? FP64 : FP32; }
1058 
1059 Instr Assembler::FPScale(unsigned scale) {
1060   DCHECK(is_uint6(scale));
1061   return scale << FPScale_offset;
1062 }
1063 
1064 const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
1065   return reg.Is64Bits() ? xzr : wzr;
1066 }
1067 
1068 inline void Assembler::CheckBufferSpace() {
1069   DCHECK_LT(pc_, buffer_start_ + buffer_->size());
1070   if (V8_UNLIKELY(buffer_space() < kGap)) {
1071     GrowBuffer();
1072   }
1073 }
1074 
1075 V8_INLINE void Assembler::CheckBuffer() {
1076   CheckBufferSpace();
1077   if (pc_offset() >= next_veneer_pool_check_) {
1078     CheckVeneerPool(false, true);
1079   }
1080   constpool_.MaybeCheck();
1081 }
1082 
1083 EnsureSpace::EnsureSpace(Assembler* assembler) : block_pools_scope_(assembler) {
1084   assembler->CheckBufferSpace();
1085 }
1086 
1087 }  // namespace internal
1088 }  // namespace v8
1089 
1090 #endif  // V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_
1091