• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 //
3 // Redistribution and use in source and binary forms, with or without
4 // modification, are permitted provided that the following conditions are
5 // met:
6 //
7 //     * Redistributions of source code must retain the above copyright
8 //       notice, this list of conditions and the following disclaimer.
9 //     * Redistributions in binary form must reproduce the above
10 //       copyright notice, this list of conditions and the following
11 //       disclaimer in the documentation and/or other materials provided
12 //       with the distribution.
13 //     * Neither the name of Google Inc. nor the names of its
14 //       contributors may be used to endorse or promote products derived
15 //       from this software without specific prior written permission.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 
29 #include "src/v8.h"
30 
31 #if V8_TARGET_ARCH_ARM64
32 
33 #define ARM64_DEFINE_REG_STATICS
34 
35 #include "src/arm64/assembler-arm64-inl.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 
41 // -----------------------------------------------------------------------------
42 // CpuFeatures implementation.
43 
ProbeImpl(bool cross_compile)44 void CpuFeatures::ProbeImpl(bool cross_compile) {
45   if (cross_compile) {
46     // Always align csp in cross compiled code - this is safe and ensures that
47     // csp will always be aligned if it is enabled by probing at runtime.
48     if (FLAG_enable_always_align_csp) supported_ |= 1u << ALWAYS_ALIGN_CSP;
49   } else {
50     CPU cpu;
51     if (FLAG_enable_always_align_csp && (cpu.implementer() == CPU::NVIDIA ||
52                                          FLAG_debug_code)) {
53       supported_ |= 1u << ALWAYS_ALIGN_CSP;
54     }
55   }
56 }
57 
58 
PrintTarget()59 void CpuFeatures::PrintTarget() { }
PrintFeatures()60 void CpuFeatures::PrintFeatures() { }
61 
62 
63 // -----------------------------------------------------------------------------
64 // CPURegList utilities.
65 
PopLowestIndex()66 CPURegister CPURegList::PopLowestIndex() {
67   ASSERT(IsValid());
68   if (IsEmpty()) {
69     return NoCPUReg;
70   }
71   int index = CountTrailingZeros(list_, kRegListSizeInBits);
72   ASSERT((1 << index) & list_);
73   Remove(index);
74   return CPURegister::Create(index, size_, type_);
75 }
76 
77 
PopHighestIndex()78 CPURegister CPURegList::PopHighestIndex() {
79   ASSERT(IsValid());
80   if (IsEmpty()) {
81     return NoCPUReg;
82   }
83   int index = CountLeadingZeros(list_, kRegListSizeInBits);
84   index = kRegListSizeInBits - 1 - index;
85   ASSERT((1 << index) & list_);
86   Remove(index);
87   return CPURegister::Create(index, size_, type_);
88 }
89 
90 
RemoveCalleeSaved()91 void CPURegList::RemoveCalleeSaved() {
92   if (type() == CPURegister::kRegister) {
93     Remove(GetCalleeSaved(RegisterSizeInBits()));
94   } else if (type() == CPURegister::kFPRegister) {
95     Remove(GetCalleeSavedFP(RegisterSizeInBits()));
96   } else {
97     ASSERT(type() == CPURegister::kNoRegister);
98     ASSERT(IsEmpty());
99     // The list must already be empty, so do nothing.
100   }
101 }
102 
103 
GetCalleeSaved(unsigned size)104 CPURegList CPURegList::GetCalleeSaved(unsigned size) {
105   return CPURegList(CPURegister::kRegister, size, 19, 29);
106 }
107 
108 
GetCalleeSavedFP(unsigned size)109 CPURegList CPURegList::GetCalleeSavedFP(unsigned size) {
110   return CPURegList(CPURegister::kFPRegister, size, 8, 15);
111 }
112 
113 
GetCallerSaved(unsigned size)114 CPURegList CPURegList::GetCallerSaved(unsigned size) {
115   // Registers x0-x18 and lr (x30) are caller-saved.
116   CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
117   list.Combine(lr);
118   return list;
119 }
120 
121 
GetCallerSavedFP(unsigned size)122 CPURegList CPURegList::GetCallerSavedFP(unsigned size) {
123   // Registers d0-d7 and d16-d31 are caller-saved.
124   CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7);
125   list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31));
126   return list;
127 }
128 
129 
130 // This function defines the list of registers which are associated with a
131 // safepoint slot. Safepoint register slots are saved contiguously on the stack.
132 // MacroAssembler::SafepointRegisterStackIndex handles mapping from register
133 // code to index in the safepoint register slots. Any change here can affect
134 // this mapping.
GetSafepointSavedRegisters()135 CPURegList CPURegList::GetSafepointSavedRegisters() {
136   CPURegList list = CPURegList::GetCalleeSaved();
137   list.Combine(
138       CPURegList(CPURegister::kRegister, kXRegSizeInBits, kJSCallerSaved));
139 
140   // Note that unfortunately we can't use symbolic names for registers and have
141   // to directly use register codes. This is because this function is used to
142   // initialize some static variables and we can't rely on register variables
143   // to be initialized due to static initialization order issues in C++.
144 
145   // Drop ip0 and ip1 (i.e. x16 and x17), as they should not be expected to be
146   // preserved outside of the macro assembler.
147   list.Remove(16);
148   list.Remove(17);
149 
150   // Add x18 to the safepoint list, as although it's not in kJSCallerSaved, it
151   // is a caller-saved register according to the procedure call standard.
152   list.Combine(18);
153 
154   // Drop jssp as the stack pointer doesn't need to be included.
155   list.Remove(28);
156 
157   // Add the link register (x30) to the safepoint list.
158   list.Combine(30);
159 
160   return list;
161 }
162 
163 
164 // -----------------------------------------------------------------------------
165 // Implementation of RelocInfo
166 
167 const int RelocInfo::kApplyMask = 0;
168 
169 
IsCodedSpecially()170 bool RelocInfo::IsCodedSpecially() {
171   // The deserializer needs to know whether a pointer is specially coded. Being
172   // specially coded on ARM64 means that it is a movz/movk sequence. We don't
173   // generate those for relocatable pointers.
174   return false;
175 }
176 
177 
IsInConstantPool()178 bool RelocInfo::IsInConstantPool() {
179   Instruction* instr = reinterpret_cast<Instruction*>(pc_);
180   return instr->IsLdrLiteralX();
181 }
182 
183 
PatchCode(byte * instructions,int instruction_count)184 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
185   // Patch the code at the current address with the supplied instructions.
186   Instr* pc = reinterpret_cast<Instr*>(pc_);
187   Instr* instr = reinterpret_cast<Instr*>(instructions);
188   for (int i = 0; i < instruction_count; i++) {
189     *(pc + i) = *(instr + i);
190   }
191 
192   // Indicate that code has changed.
193   CPU::FlushICache(pc_, instruction_count * kInstructionSize);
194 }
195 
196 
197 // Patch the code at the current PC with a call to the target address.
198 // Additional guard instructions can be added if required.
PatchCodeWithCall(Address target,int guard_bytes)199 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
200   UNIMPLEMENTED();
201 }
202 
203 
GetAllocatableRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4)204 Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
205                                               Register reg3, Register reg4) {
206   CPURegList regs(reg1, reg2, reg3, reg4);
207   for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
208     Register candidate = Register::FromAllocationIndex(i);
209     if (regs.IncludesAliasOf(candidate)) continue;
210     return candidate;
211   }
212   UNREACHABLE();
213   return NoReg;
214 }
215 
216 
AreAliased(const CPURegister & reg1,const CPURegister & reg2,const CPURegister & reg3,const CPURegister & reg4,const CPURegister & reg5,const CPURegister & reg6,const CPURegister & reg7,const CPURegister & reg8)217 bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
218                 const CPURegister& reg3, const CPURegister& reg4,
219                 const CPURegister& reg5, const CPURegister& reg6,
220                 const CPURegister& reg7, const CPURegister& reg8) {
221   int number_of_valid_regs = 0;
222   int number_of_valid_fpregs = 0;
223 
224   RegList unique_regs = 0;
225   RegList unique_fpregs = 0;
226 
227   const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
228 
229   for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) {
230     if (regs[i].IsRegister()) {
231       number_of_valid_regs++;
232       unique_regs |= regs[i].Bit();
233     } else if (regs[i].IsFPRegister()) {
234       number_of_valid_fpregs++;
235       unique_fpregs |= regs[i].Bit();
236     } else {
237       ASSERT(!regs[i].IsValid());
238     }
239   }
240 
241   int number_of_unique_regs =
242     CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte);
243   int number_of_unique_fpregs =
244     CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte);
245 
246   ASSERT(number_of_valid_regs >= number_of_unique_regs);
247   ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs);
248 
249   return (number_of_valid_regs != number_of_unique_regs) ||
250          (number_of_valid_fpregs != number_of_unique_fpregs);
251 }
252 
253 
AreSameSizeAndType(const CPURegister & reg1,const CPURegister & reg2,const CPURegister & reg3,const CPURegister & reg4,const CPURegister & reg5,const CPURegister & reg6,const CPURegister & reg7,const CPURegister & reg8)254 bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
255                         const CPURegister& reg3, const CPURegister& reg4,
256                         const CPURegister& reg5, const CPURegister& reg6,
257                         const CPURegister& reg7, const CPURegister& reg8) {
258   ASSERT(reg1.IsValid());
259   bool match = true;
260   match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
261   match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
262   match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
263   match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
264   match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
265   match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
266   match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
267   return match;
268 }
269 
270 
InitializeHandle(Handle<Object> handle)271 void Immediate::InitializeHandle(Handle<Object> handle) {
272   AllowDeferredHandleDereference using_raw_address;
273 
274   // Verify all Objects referred by code are NOT in new space.
275   Object* obj = *handle;
276   if (obj->IsHeapObject()) {
277     ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
278     value_ = reinterpret_cast<intptr_t>(handle.location());
279     rmode_ = RelocInfo::EMBEDDED_OBJECT;
280   } else {
281     STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t));
282     value_ = reinterpret_cast<intptr_t>(obj);
283     rmode_ = RelocInfo::NONE64;
284   }
285 }
286 
287 
NeedsRelocation(const Assembler * assembler) const288 bool Operand::NeedsRelocation(const Assembler* assembler) const {
289   RelocInfo::Mode rmode = immediate_.rmode();
290 
291   if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
292     return assembler->serializer_enabled();
293   }
294 
295   return !RelocInfo::IsNone(rmode);
296 }
297 
298 
299 // Assembler
300 
Assembler(Isolate * isolate,void * buffer,int buffer_size)301 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
302     : AssemblerBase(isolate, buffer, buffer_size),
303       recorded_ast_id_(TypeFeedbackId::None()),
304       unresolved_branches_(),
305       positions_recorder_(this) {
306   const_pool_blocked_nesting_ = 0;
307   veneer_pool_blocked_nesting_ = 0;
308   Reset();
309 }
310 
311 
~Assembler()312 Assembler::~Assembler() {
313   ASSERT(num_pending_reloc_info_ == 0);
314   ASSERT(const_pool_blocked_nesting_ == 0);
315   ASSERT(veneer_pool_blocked_nesting_ == 0);
316 }
317 
318 
Reset()319 void Assembler::Reset() {
320 #ifdef DEBUG
321   ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
322   ASSERT(const_pool_blocked_nesting_ == 0);
323   ASSERT(veneer_pool_blocked_nesting_ == 0);
324   ASSERT(unresolved_branches_.empty());
325   memset(buffer_, 0, pc_ - buffer_);
326 #endif
327   pc_ = buffer_;
328   reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_),
329                                reinterpret_cast<byte*>(pc_));
330   num_pending_reloc_info_ = 0;
331   next_constant_pool_check_ = 0;
332   next_veneer_pool_check_ = kMaxInt;
333   no_const_pool_before_ = 0;
334   first_const_pool_use_ = -1;
335   ClearRecordedAstId();
336 }
337 
338 
GetCode(CodeDesc * desc)339 void Assembler::GetCode(CodeDesc* desc) {
340   // Emit constant pool if necessary.
341   CheckConstPool(true, false);
342   ASSERT(num_pending_reloc_info_ == 0);
343 
344   // Set up code descriptor.
345   if (desc) {
346     desc->buffer = reinterpret_cast<byte*>(buffer_);
347     desc->buffer_size = buffer_size_;
348     desc->instr_size = pc_offset();
349     desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) -
350                        reloc_info_writer.pos();
351     desc->origin = this;
352   }
353 }
354 
355 
Align(int m)356 void Assembler::Align(int m) {
357   ASSERT(m >= 4 && IsPowerOf2(m));
358   while ((pc_offset() & (m - 1)) != 0) {
359     nop();
360   }
361 }
362 
363 
CheckLabelLinkChain(Label const * label)364 void Assembler::CheckLabelLinkChain(Label const * label) {
365 #ifdef DEBUG
366   if (label->is_linked()) {
367     int linkoffset = label->pos();
368     bool end_of_chain = false;
369     while (!end_of_chain) {
370       Instruction * link = InstructionAt(linkoffset);
371       int linkpcoffset = link->ImmPCOffset();
372       int prevlinkoffset = linkoffset + linkpcoffset;
373 
374       end_of_chain = (linkoffset == prevlinkoffset);
375       linkoffset = linkoffset + linkpcoffset;
376     }
377   }
378 #endif
379 }
380 
381 
RemoveBranchFromLabelLinkChain(Instruction * branch,Label * label,Instruction * label_veneer)382 void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
383                                                Label* label,
384                                                Instruction* label_veneer) {
385   ASSERT(label->is_linked());
386 
387   CheckLabelLinkChain(label);
388 
389   Instruction* link = InstructionAt(label->pos());
390   Instruction* prev_link = link;
391   Instruction* next_link;
392   bool end_of_chain = false;
393 
394   while (link != branch && !end_of_chain) {
395     next_link = link->ImmPCOffsetTarget();
396     end_of_chain = (link == next_link);
397     prev_link = link;
398     link = next_link;
399   }
400 
401   ASSERT(branch == link);
402   next_link = branch->ImmPCOffsetTarget();
403 
404   if (branch == prev_link) {
405     // The branch is the first instruction in the chain.
406     if (branch == next_link) {
407       // It is also the last instruction in the chain, so it is the only branch
408       // currently referring to this label.
409       label->Unuse();
410     } else {
411       label->link_to(reinterpret_cast<byte*>(next_link) - buffer_);
412     }
413 
414   } else if (branch == next_link) {
415     // The branch is the last (but not also the first) instruction in the chain.
416     prev_link->SetImmPCOffsetTarget(prev_link);
417 
418   } else {
419     // The branch is in the middle of the chain.
420     if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
421       prev_link->SetImmPCOffsetTarget(next_link);
422     } else if (label_veneer != NULL) {
423       // Use the veneer for all previous links in the chain.
424       prev_link->SetImmPCOffsetTarget(prev_link);
425 
426       end_of_chain = false;
427       link = next_link;
428       while (!end_of_chain) {
429         next_link = link->ImmPCOffsetTarget();
430         end_of_chain = (link == next_link);
431         link->SetImmPCOffsetTarget(label_veneer);
432         link = next_link;
433       }
434     } else {
435       // The assert below will fire.
436       // Some other work could be attempted to fix up the chain, but it would be
437       // rather complicated. If we crash here, we may want to consider using an
438       // other mechanism than a chain of branches.
439       //
440       // Note that this situation currently should not happen, as we always call
441       // this function with a veneer to the target label.
442       // However this could happen with a MacroAssembler in the following state:
443       //    [previous code]
444       //    B(label);
445       //    [20KB code]
446       //    Tbz(label);   // First tbz. Pointing to unconditional branch.
447       //    [20KB code]
448       //    Tbz(label);   // Second tbz. Pointing to the first tbz.
449       //    [more code]
450       // and this function is called to remove the first tbz from the label link
451       // chain. Since tbz has a range of +-32KB, the second tbz cannot point to
452       // the unconditional branch.
453       CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link));
454       UNREACHABLE();
455     }
456   }
457 
458   CheckLabelLinkChain(label);
459 }
460 
461 
bind(Label * label)462 void Assembler::bind(Label* label) {
463   // Bind label to the address at pc_. All instructions (most likely branches)
464   // that are linked to this label will be updated to point to the newly-bound
465   // label.
466 
467   ASSERT(!label->is_near_linked());
468   ASSERT(!label->is_bound());
469 
470   DeleteUnresolvedBranchInfoForLabel(label);
471 
472   // If the label is linked, the link chain looks something like this:
473   //
474   // |--I----I-------I-------L
475   // |---------------------->| pc_offset
476   // |-------------->|         linkoffset = label->pos()
477   //         |<------|         link->ImmPCOffset()
478   // |------>|                 prevlinkoffset = linkoffset + link->ImmPCOffset()
479   //
480   // On each iteration, the last link is updated and then removed from the
481   // chain until only one remains. At that point, the label is bound.
482   //
483   // If the label is not linked, no preparation is required before binding.
484   while (label->is_linked()) {
485     int linkoffset = label->pos();
486     Instruction* link = InstructionAt(linkoffset);
487     int prevlinkoffset = linkoffset + link->ImmPCOffset();
488 
489     CheckLabelLinkChain(label);
490 
491     ASSERT(linkoffset >= 0);
492     ASSERT(linkoffset < pc_offset());
493     ASSERT((linkoffset > prevlinkoffset) ||
494            (linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
495     ASSERT(prevlinkoffset >= 0);
496 
497     // Update the link to point to the label.
498     link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
499 
500     // Link the label to the previous link in the chain.
501     if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
502       // We hit kStartOfLabelLinkChain, so the chain is fully processed.
503       label->Unuse();
504     } else {
505       // Update the label for the next iteration.
506       label->link_to(prevlinkoffset);
507     }
508   }
509   label->bind_to(pc_offset());
510 
511   ASSERT(label->is_bound());
512   ASSERT(!label->is_linked());
513 }
514 
515 
LinkAndGetByteOffsetTo(Label * label)516 int Assembler::LinkAndGetByteOffsetTo(Label* label) {
517   ASSERT(sizeof(*pc_) == 1);
518   CheckLabelLinkChain(label);
519 
520   int offset;
521   if (label->is_bound()) {
522     // The label is bound, so it does not need to be updated. Referring
523     // instructions must link directly to the label as they will not be
524     // updated.
525     //
526     // In this case, label->pos() returns the offset of the label from the
527     // start of the buffer.
528     //
529     // Note that offset can be zero for self-referential instructions. (This
530     // could be useful for ADR, for example.)
531     offset = label->pos() - pc_offset();
532     ASSERT(offset <= 0);
533   } else {
534     if (label->is_linked()) {
535       // The label is linked, so the referring instruction should be added onto
536       // the end of the label's link chain.
537       //
538       // In this case, label->pos() returns the offset of the last linked
539       // instruction from the start of the buffer.
540       offset = label->pos() - pc_offset();
541       ASSERT(offset != kStartOfLabelLinkChain);
542       // Note that the offset here needs to be PC-relative only so that the
543       // first instruction in a buffer can link to an unbound label. Otherwise,
544       // the offset would be 0 for this case, and 0 is reserved for
545       // kStartOfLabelLinkChain.
546     } else {
547       // The label is unused, so it now becomes linked and the referring
548       // instruction is at the start of the new link chain.
549       offset = kStartOfLabelLinkChain;
550     }
551     // The instruction at pc is now the last link in the label's chain.
552     label->link_to(pc_offset());
553   }
554 
555   return offset;
556 }
557 
558 
DeleteUnresolvedBranchInfoForLabelTraverse(Label * label)559 void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
560   ASSERT(label->is_linked());
561   CheckLabelLinkChain(label);
562 
563   int link_offset = label->pos();
564   int link_pcoffset;
565   bool end_of_chain = false;
566 
567   while (!end_of_chain) {
568     Instruction * link = InstructionAt(link_offset);
569     link_pcoffset = link->ImmPCOffset();
570 
571     // ADR instructions are not handled by veneers.
572     if (link->IsImmBranch()) {
573       int max_reachable_pc = InstructionOffset(link) +
574           Instruction::ImmBranchRange(link->BranchType());
575       typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it;
576       std::pair<unresolved_info_it, unresolved_info_it> range;
577       range = unresolved_branches_.equal_range(max_reachable_pc);
578       unresolved_info_it it;
579       for (it = range.first; it != range.second; ++it) {
580         if (it->second.pc_offset_ == link_offset) {
581           unresolved_branches_.erase(it);
582           break;
583         }
584       }
585     }
586 
587     end_of_chain = (link_pcoffset == 0);
588     link_offset = link_offset + link_pcoffset;
589   }
590 }
591 
592 
DeleteUnresolvedBranchInfoForLabel(Label * label)593 void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
594   if (unresolved_branches_.empty()) {
595     ASSERT(next_veneer_pool_check_ == kMaxInt);
596     return;
597   }
598 
599   if (label->is_linked()) {
600     // Branches to this label will be resolved when the label is bound, normally
601     // just after all the associated info has been deleted.
602     DeleteUnresolvedBranchInfoForLabelTraverse(label);
603   }
604   if (unresolved_branches_.empty()) {
605     next_veneer_pool_check_ = kMaxInt;
606   } else {
607     next_veneer_pool_check_ =
608       unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
609   }
610 }
611 
612 
StartBlockConstPool()613 void Assembler::StartBlockConstPool() {
614   if (const_pool_blocked_nesting_++ == 0) {
615     // Prevent constant pool checks happening by setting the next check to
616     // the biggest possible offset.
617     next_constant_pool_check_ = kMaxInt;
618   }
619 }
620 
621 
EndBlockConstPool()622 void Assembler::EndBlockConstPool() {
623   if (--const_pool_blocked_nesting_ == 0) {
624     // Check the constant pool hasn't been blocked for too long.
625     ASSERT((num_pending_reloc_info_ == 0) ||
626            (pc_offset() < (first_const_pool_use_ + kMaxDistToConstPool)));
627     // Two cases:
628     //  * no_const_pool_before_ >= next_constant_pool_check_ and the emission is
629     //    still blocked
630     //  * no_const_pool_before_ < next_constant_pool_check_ and the next emit
631     //    will trigger a check.
632     next_constant_pool_check_ = no_const_pool_before_;
633   }
634 }
635 
636 
is_const_pool_blocked() const637 bool Assembler::is_const_pool_blocked() const {
638   return (const_pool_blocked_nesting_ > 0) ||
639          (pc_offset() < no_const_pool_before_);
640 }
641 
642 
IsConstantPoolAt(Instruction * instr)643 bool Assembler::IsConstantPoolAt(Instruction* instr) {
644   // The constant pool marker is made of two instructions. These instructions
645   // will never be emitted by the JIT, so checking for the first one is enough:
646   // 0: ldr xzr, #<size of pool>
647   bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code());
648 
649   // It is still worth asserting the marker is complete.
650   // 4: blr xzr
651   ASSERT(!result || (instr->following()->IsBranchAndLinkToRegister() &&
652                      instr->following()->Rn() == xzr.code()));
653 
654   return result;
655 }
656 
657 
ConstantPoolSizeAt(Instruction * instr)658 int Assembler::ConstantPoolSizeAt(Instruction* instr) {
659 #ifdef USE_SIMULATOR
660   // Assembler::debug() embeds constants directly into the instruction stream.
661   // Although this is not a genuine constant pool, treat it like one to avoid
662   // disassembling the constants.
663   if ((instr->Mask(ExceptionMask) == HLT) &&
664       (instr->ImmException() == kImmExceptionIsDebug)) {
665     const char* message =
666         reinterpret_cast<const char*>(
667             instr->InstructionAtOffset(kDebugMessageOffset));
668     int size = kDebugMessageOffset + strlen(message) + 1;
669     return RoundUp(size, kInstructionSize) / kInstructionSize;
670   }
671   // Same for printf support, see MacroAssembler::CallPrintf().
672   if ((instr->Mask(ExceptionMask) == HLT) &&
673       (instr->ImmException() == kImmExceptionIsPrintf)) {
674     return kPrintfLength / kInstructionSize;
675   }
676 #endif
677   if (IsConstantPoolAt(instr)) {
678     return instr->ImmLLiteral();
679   } else {
680     return -1;
681   }
682 }
683 
684 
ConstantPoolMarker(uint32_t size)685 void Assembler::ConstantPoolMarker(uint32_t size) {
686   ASSERT(is_const_pool_blocked());
687   // + 1 is for the crash guard.
688   Emit(LDR_x_lit | ImmLLiteral(size + 1) | Rt(xzr));
689 }
690 
691 
EmitPoolGuard()692 void Assembler::EmitPoolGuard() {
693   // We must generate only one instruction as this is used in scopes that
694   // control the size of the code generated.
695   Emit(BLR | Rn(xzr));
696 }
697 
698 
ConstantPoolGuard()699 void Assembler::ConstantPoolGuard() {
700 #ifdef DEBUG
701   // Currently this is only used after a constant pool marker.
702   ASSERT(is_const_pool_blocked());
703   Instruction* instr = reinterpret_cast<Instruction*>(pc_);
704   ASSERT(instr->preceding()->IsLdrLiteralX() &&
705          instr->preceding()->Rt() == xzr.code());
706 #endif
707   EmitPoolGuard();
708 }
709 
710 
StartBlockVeneerPool()711 void Assembler::StartBlockVeneerPool() {
712   ++veneer_pool_blocked_nesting_;
713 }
714 
715 
EndBlockVeneerPool()716 void Assembler::EndBlockVeneerPool() {
717   if (--veneer_pool_blocked_nesting_ == 0) {
718     // Check the veneer pool hasn't been blocked for too long.
719     ASSERT(unresolved_branches_.empty() ||
720            (pc_offset() < unresolved_branches_first_limit()));
721   }
722 }
723 
724 
br(const Register & xn)725 void Assembler::br(const Register& xn) {
726   positions_recorder()->WriteRecordedPositions();
727   ASSERT(xn.Is64Bits());
728   Emit(BR | Rn(xn));
729 }
730 
731 
blr(const Register & xn)732 void Assembler::blr(const Register& xn) {
733   positions_recorder()->WriteRecordedPositions();
734   ASSERT(xn.Is64Bits());
735   // The pattern 'blr xzr' is used as a guard to detect when execution falls
736   // through the constant pool. It should not be emitted.
737   ASSERT(!xn.Is(xzr));
738   Emit(BLR | Rn(xn));
739 }
740 
741 
ret(const Register & xn)742 void Assembler::ret(const Register& xn) {
743   positions_recorder()->WriteRecordedPositions();
744   ASSERT(xn.Is64Bits());
745   Emit(RET | Rn(xn));
746 }
747 
748 
b(int imm26)749 void Assembler::b(int imm26) {
750   Emit(B | ImmUncondBranch(imm26));
751 }
752 
753 
b(Label * label)754 void Assembler::b(Label* label) {
755   positions_recorder()->WriteRecordedPositions();
756   b(LinkAndGetInstructionOffsetTo(label));
757 }
758 
759 
b(int imm19,Condition cond)760 void Assembler::b(int imm19, Condition cond) {
761   Emit(B_cond | ImmCondBranch(imm19) | cond);
762 }
763 
764 
b(Label * label,Condition cond)765 void Assembler::b(Label* label, Condition cond) {
766   positions_recorder()->WriteRecordedPositions();
767   b(LinkAndGetInstructionOffsetTo(label), cond);
768 }
769 
770 
bl(int imm26)771 void Assembler::bl(int imm26) {
772   positions_recorder()->WriteRecordedPositions();
773   Emit(BL | ImmUncondBranch(imm26));
774 }
775 
776 
bl(Label * label)777 void Assembler::bl(Label* label) {
778   positions_recorder()->WriteRecordedPositions();
779   bl(LinkAndGetInstructionOffsetTo(label));
780 }
781 
782 
cbz(const Register & rt,int imm19)783 void Assembler::cbz(const Register& rt,
784                     int imm19) {
785   positions_recorder()->WriteRecordedPositions();
786   Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
787 }
788 
789 
cbz(const Register & rt,Label * label)790 void Assembler::cbz(const Register& rt,
791                     Label* label) {
792   positions_recorder()->WriteRecordedPositions();
793   cbz(rt, LinkAndGetInstructionOffsetTo(label));
794 }
795 
796 
cbnz(const Register & rt,int imm19)797 void Assembler::cbnz(const Register& rt,
798                      int imm19) {
799   positions_recorder()->WriteRecordedPositions();
800   Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
801 }
802 
803 
cbnz(const Register & rt,Label * label)804 void Assembler::cbnz(const Register& rt,
805                      Label* label) {
806   positions_recorder()->WriteRecordedPositions();
807   cbnz(rt, LinkAndGetInstructionOffsetTo(label));
808 }
809 
810 
tbz(const Register & rt,unsigned bit_pos,int imm14)811 void Assembler::tbz(const Register& rt,
812                     unsigned bit_pos,
813                     int imm14) {
814   positions_recorder()->WriteRecordedPositions();
815   ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
816   Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
817 }
818 
819 
tbz(const Register & rt,unsigned bit_pos,Label * label)820 void Assembler::tbz(const Register& rt,
821                     unsigned bit_pos,
822                     Label* label) {
823   positions_recorder()->WriteRecordedPositions();
824   tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
825 }
826 
827 
tbnz(const Register & rt,unsigned bit_pos,int imm14)828 void Assembler::tbnz(const Register& rt,
829                      unsigned bit_pos,
830                      int imm14) {
831   positions_recorder()->WriteRecordedPositions();
832   ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
833   Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
834 }
835 
836 
tbnz(const Register & rt,unsigned bit_pos,Label * label)837 void Assembler::tbnz(const Register& rt,
838                      unsigned bit_pos,
839                      Label* label) {
840   positions_recorder()->WriteRecordedPositions();
841   tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
842 }
843 
844 
adr(const Register & rd,int imm21)845 void Assembler::adr(const Register& rd, int imm21) {
846   ASSERT(rd.Is64Bits());
847   Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
848 }
849 
850 
adr(const Register & rd,Label * label)851 void Assembler::adr(const Register& rd, Label* label) {
852   adr(rd, LinkAndGetByteOffsetTo(label));
853 }
854 
855 
add(const Register & rd,const Register & rn,const Operand & operand)856 void Assembler::add(const Register& rd,
857                     const Register& rn,
858                     const Operand& operand) {
859   AddSub(rd, rn, operand, LeaveFlags, ADD);
860 }
861 
862 
adds(const Register & rd,const Register & rn,const Operand & operand)863 void Assembler::adds(const Register& rd,
864                      const Register& rn,
865                      const Operand& operand) {
866   AddSub(rd, rn, operand, SetFlags, ADD);
867 }
868 
869 
cmn(const Register & rn,const Operand & operand)870 void Assembler::cmn(const Register& rn,
871                     const Operand& operand) {
872   Register zr = AppropriateZeroRegFor(rn);
873   adds(zr, rn, operand);
874 }
875 
876 
sub(const Register & rd,const Register & rn,const Operand & operand)877 void Assembler::sub(const Register& rd,
878                     const Register& rn,
879                     const Operand& operand) {
880   AddSub(rd, rn, operand, LeaveFlags, SUB);
881 }
882 
883 
subs(const Register & rd,const Register & rn,const Operand & operand)884 void Assembler::subs(const Register& rd,
885                      const Register& rn,
886                      const Operand& operand) {
887   AddSub(rd, rn, operand, SetFlags, SUB);
888 }
889 
890 
cmp(const Register & rn,const Operand & operand)891 void Assembler::cmp(const Register& rn, const Operand& operand) {
892   Register zr = AppropriateZeroRegFor(rn);
893   subs(zr, rn, operand);
894 }
895 
896 
neg(const Register & rd,const Operand & operand)897 void Assembler::neg(const Register& rd, const Operand& operand) {
898   Register zr = AppropriateZeroRegFor(rd);
899   sub(rd, zr, operand);
900 }
901 
902 
negs(const Register & rd,const Operand & operand)903 void Assembler::negs(const Register& rd, const Operand& operand) {
904   Register zr = AppropriateZeroRegFor(rd);
905   subs(rd, zr, operand);
906 }
907 
908 
adc(const Register & rd,const Register & rn,const Operand & operand)909 void Assembler::adc(const Register& rd,
910                     const Register& rn,
911                     const Operand& operand) {
912   AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
913 }
914 
915 
adcs(const Register & rd,const Register & rn,const Operand & operand)916 void Assembler::adcs(const Register& rd,
917                      const Register& rn,
918                      const Operand& operand) {
919   AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
920 }
921 
922 
sbc(const Register & rd,const Register & rn,const Operand & operand)923 void Assembler::sbc(const Register& rd,
924                     const Register& rn,
925                     const Operand& operand) {
926   AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
927 }
928 
929 
sbcs(const Register & rd,const Register & rn,const Operand & operand)930 void Assembler::sbcs(const Register& rd,
931                      const Register& rn,
932                      const Operand& operand) {
933   AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
934 }
935 
936 
ngc(const Register & rd,const Operand & operand)937 void Assembler::ngc(const Register& rd, const Operand& operand) {
938   Register zr = AppropriateZeroRegFor(rd);
939   sbc(rd, zr, operand);
940 }
941 
942 
ngcs(const Register & rd,const Operand & operand)943 void Assembler::ngcs(const Register& rd, const Operand& operand) {
944   Register zr = AppropriateZeroRegFor(rd);
945   sbcs(rd, zr, operand);
946 }
947 
948 
949 // Logical instructions.
and_(const Register & rd,const Register & rn,const Operand & operand)950 void Assembler::and_(const Register& rd,
951                      const Register& rn,
952                      const Operand& operand) {
953   Logical(rd, rn, operand, AND);
954 }
955 
956 
ands(const Register & rd,const Register & rn,const Operand & operand)957 void Assembler::ands(const Register& rd,
958                      const Register& rn,
959                      const Operand& operand) {
960   Logical(rd, rn, operand, ANDS);
961 }
962 
963 
tst(const Register & rn,const Operand & operand)964 void Assembler::tst(const Register& rn,
965                     const Operand& operand) {
966   ands(AppropriateZeroRegFor(rn), rn, operand);
967 }
968 
969 
bic(const Register & rd,const Register & rn,const Operand & operand)970 void Assembler::bic(const Register& rd,
971                     const Register& rn,
972                     const Operand& operand) {
973   Logical(rd, rn, operand, BIC);
974 }
975 
976 
bics(const Register & rd,const Register & rn,const Operand & operand)977 void Assembler::bics(const Register& rd,
978                      const Register& rn,
979                      const Operand& operand) {
980   Logical(rd, rn, operand, BICS);
981 }
982 
983 
orr(const Register & rd,const Register & rn,const Operand & operand)984 void Assembler::orr(const Register& rd,
985                     const Register& rn,
986                     const Operand& operand) {
987   Logical(rd, rn, operand, ORR);
988 }
989 
990 
orn(const Register & rd,const Register & rn,const Operand & operand)991 void Assembler::orn(const Register& rd,
992                     const Register& rn,
993                     const Operand& operand) {
994   Logical(rd, rn, operand, ORN);
995 }
996 
997 
eor(const Register & rd,const Register & rn,const Operand & operand)998 void Assembler::eor(const Register& rd,
999                     const Register& rn,
1000                     const Operand& operand) {
1001   Logical(rd, rn, operand, EOR);
1002 }
1003 
1004 
eon(const Register & rd,const Register & rn,const Operand & operand)1005 void Assembler::eon(const Register& rd,
1006                     const Register& rn,
1007                     const Operand& operand) {
1008   Logical(rd, rn, operand, EON);
1009 }
1010 
1011 
lslv(const Register & rd,const Register & rn,const Register & rm)1012 void Assembler::lslv(const Register& rd,
1013                      const Register& rn,
1014                      const Register& rm) {
1015   ASSERT(rd.SizeInBits() == rn.SizeInBits());
1016   ASSERT(rd.SizeInBits() == rm.SizeInBits());
1017   Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
1018 }
1019 
1020 
lsrv(const Register & rd,const Register & rn,const Register & rm)1021 void Assembler::lsrv(const Register& rd,
1022                      const Register& rn,
1023                      const Register& rm) {
1024   ASSERT(rd.SizeInBits() == rn.SizeInBits());
1025   ASSERT(rd.SizeInBits() == rm.SizeInBits());
1026   Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
1027 }
1028 
1029 
asrv(const Register & rd,const Register & rn,const Register & rm)1030 void Assembler::asrv(const Register& rd,
1031                      const Register& rn,
1032                      const Register& rm) {
1033   ASSERT(rd.SizeInBits() == rn.SizeInBits());
1034   ASSERT(rd.SizeInBits() == rm.SizeInBits());
1035   Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
1036 }
1037 
1038 
rorv(const Register & rd,const Register & rn,const Register & rm)1039 void Assembler::rorv(const Register& rd,
1040                      const Register& rn,
1041                      const Register& rm) {
1042   ASSERT(rd.SizeInBits() == rn.SizeInBits());
1043   ASSERT(rd.SizeInBits() == rm.SizeInBits());
1044   Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
1045 }
1046 
1047 
1048 // Bitfield operations.
bfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)1049 void Assembler::bfm(const Register& rd,
1050                      const Register& rn,
1051                      unsigned immr,
1052                      unsigned imms) {
1053   ASSERT(rd.SizeInBits() == rn.SizeInBits());
1054   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1055   Emit(SF(rd) | BFM | N |
1056        ImmR(immr, rd.SizeInBits()) |
1057        ImmS(imms, rn.SizeInBits()) |
1058        Rn(rn) | Rd(rd));
1059 }
1060 
1061 
sbfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)1062 void Assembler::sbfm(const Register& rd,
1063                      const Register& rn,
1064                      unsigned immr,
1065                      unsigned imms) {
1066   ASSERT(rd.Is64Bits() || rn.Is32Bits());
1067   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1068   Emit(SF(rd) | SBFM | N |
1069        ImmR(immr, rd.SizeInBits()) |
1070        ImmS(imms, rn.SizeInBits()) |
1071        Rn(rn) | Rd(rd));
1072 }
1073 
1074 
ubfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)1075 void Assembler::ubfm(const Register& rd,
1076                      const Register& rn,
1077                      unsigned immr,
1078                      unsigned imms) {
1079   ASSERT(rd.SizeInBits() == rn.SizeInBits());
1080   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1081   Emit(SF(rd) | UBFM | N |
1082        ImmR(immr, rd.SizeInBits()) |
1083        ImmS(imms, rn.SizeInBits()) |
1084        Rn(rn) | Rd(rd));
1085 }
1086 
1087 
extr(const Register & rd,const Register & rn,const Register & rm,unsigned lsb)1088 void Assembler::extr(const Register& rd,
1089                      const Register& rn,
1090                      const Register& rm,
1091                      unsigned lsb) {
1092   ASSERT(rd.SizeInBits() == rn.SizeInBits());
1093   ASSERT(rd.SizeInBits() == rm.SizeInBits());
1094   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1095   Emit(SF(rd) | EXTR | N | Rm(rm) |
1096        ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd));
1097 }
1098 
1099 
csel(const Register & rd,const Register & rn,const Register & rm,Condition cond)1100 void Assembler::csel(const Register& rd,
1101                      const Register& rn,
1102                      const Register& rm,
1103                      Condition cond) {
1104   ConditionalSelect(rd, rn, rm, cond, CSEL);
1105 }
1106 
1107 
csinc(const Register & rd,const Register & rn,const Register & rm,Condition cond)1108 void Assembler::csinc(const Register& rd,
1109                       const Register& rn,
1110                       const Register& rm,
1111                       Condition cond) {
1112   ConditionalSelect(rd, rn, rm, cond, CSINC);
1113 }
1114 
1115 
csinv(const Register & rd,const Register & rn,const Register & rm,Condition cond)1116 void Assembler::csinv(const Register& rd,
1117                       const Register& rn,
1118                       const Register& rm,
1119                       Condition cond) {
1120   ConditionalSelect(rd, rn, rm, cond, CSINV);
1121 }
1122 
1123 
csneg(const Register & rd,const Register & rn,const Register & rm,Condition cond)1124 void Assembler::csneg(const Register& rd,
1125                       const Register& rn,
1126                       const Register& rm,
1127                       Condition cond) {
1128   ConditionalSelect(rd, rn, rm, cond, CSNEG);
1129 }
1130 
1131 
cset(const Register & rd,Condition cond)1132 void Assembler::cset(const Register &rd, Condition cond) {
1133   ASSERT((cond != al) && (cond != nv));
1134   Register zr = AppropriateZeroRegFor(rd);
1135   csinc(rd, zr, zr, NegateCondition(cond));
1136 }
1137 
1138 
csetm(const Register & rd,Condition cond)1139 void Assembler::csetm(const Register &rd, Condition cond) {
1140   ASSERT((cond != al) && (cond != nv));
1141   Register zr = AppropriateZeroRegFor(rd);
1142   csinv(rd, zr, zr, NegateCondition(cond));
1143 }
1144 
1145 
cinc(const Register & rd,const Register & rn,Condition cond)1146 void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
1147   ASSERT((cond != al) && (cond != nv));
1148   csinc(rd, rn, rn, NegateCondition(cond));
1149 }
1150 
1151 
cinv(const Register & rd,const Register & rn,Condition cond)1152 void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
1153   ASSERT((cond != al) && (cond != nv));
1154   csinv(rd, rn, rn, NegateCondition(cond));
1155 }
1156 
1157 
cneg(const Register & rd,const Register & rn,Condition cond)1158 void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
1159   ASSERT((cond != al) && (cond != nv));
1160   csneg(rd, rn, rn, NegateCondition(cond));
1161 }
1162 
1163 
ConditionalSelect(const Register & rd,const Register & rn,const Register & rm,Condition cond,ConditionalSelectOp op)1164 void Assembler::ConditionalSelect(const Register& rd,
1165                                   const Register& rn,
1166                                   const Register& rm,
1167                                   Condition cond,
1168                                   ConditionalSelectOp op) {
1169   ASSERT(rd.SizeInBits() == rn.SizeInBits());
1170   ASSERT(rd.SizeInBits() == rm.SizeInBits());
1171   Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
1172 }
1173 
1174 
ccmn(const Register & rn,const Operand & operand,StatusFlags nzcv,Condition cond)1175 void Assembler::ccmn(const Register& rn,
1176                      const Operand& operand,
1177                      StatusFlags nzcv,
1178                      Condition cond) {
1179   ConditionalCompare(rn, operand, nzcv, cond, CCMN);
1180 }
1181 
1182 
ccmp(const Register & rn,const Operand & operand,StatusFlags nzcv,Condition cond)1183 void Assembler::ccmp(const Register& rn,
1184                      const Operand& operand,
1185                      StatusFlags nzcv,
1186                      Condition cond) {
1187   ConditionalCompare(rn, operand, nzcv, cond, CCMP);
1188 }
1189 
1190 
DataProcessing3Source(const Register & rd,const Register & rn,const Register & rm,const Register & ra,DataProcessing3SourceOp op)1191 void Assembler::DataProcessing3Source(const Register& rd,
1192                                       const Register& rn,
1193                                       const Register& rm,
1194                                       const Register& ra,
1195                                       DataProcessing3SourceOp op) {
1196   Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
1197 }
1198 
1199 
mul(const Register & rd,const Register & rn,const Register & rm)1200 void Assembler::mul(const Register& rd,
1201                     const Register& rn,
1202                     const Register& rm) {
1203   ASSERT(AreSameSizeAndType(rd, rn, rm));
1204   Register zr = AppropriateZeroRegFor(rn);
1205   DataProcessing3Source(rd, rn, rm, zr, MADD);
1206 }
1207 
1208 
madd(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1209 void Assembler::madd(const Register& rd,
1210                      const Register& rn,
1211                      const Register& rm,
1212                      const Register& ra) {
1213   ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
1214   DataProcessing3Source(rd, rn, rm, ra, MADD);
1215 }
1216 
1217 
mneg(const Register & rd,const Register & rn,const Register & rm)1218 void Assembler::mneg(const Register& rd,
1219                      const Register& rn,
1220                      const Register& rm) {
1221   ASSERT(AreSameSizeAndType(rd, rn, rm));
1222   Register zr = AppropriateZeroRegFor(rn);
1223   DataProcessing3Source(rd, rn, rm, zr, MSUB);
1224 }
1225 
1226 
msub(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1227 void Assembler::msub(const Register& rd,
1228                      const Register& rn,
1229                      const Register& rm,
1230                      const Register& ra) {
1231   ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
1232   DataProcessing3Source(rd, rn, rm, ra, MSUB);
1233 }
1234 
1235 
smaddl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1236 void Assembler::smaddl(const Register& rd,
1237                        const Register& rn,
1238                        const Register& rm,
1239                        const Register& ra) {
1240   ASSERT(rd.Is64Bits() && ra.Is64Bits());
1241   ASSERT(rn.Is32Bits() && rm.Is32Bits());
1242   DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
1243 }
1244 
1245 
smsubl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1246 void Assembler::smsubl(const Register& rd,
1247                        const Register& rn,
1248                        const Register& rm,
1249                        const Register& ra) {
1250   ASSERT(rd.Is64Bits() && ra.Is64Bits());
1251   ASSERT(rn.Is32Bits() && rm.Is32Bits());
1252   DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
1253 }
1254 
1255 
umaddl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1256 void Assembler::umaddl(const Register& rd,
1257                        const Register& rn,
1258                        const Register& rm,
1259                        const Register& ra) {
1260   ASSERT(rd.Is64Bits() && ra.Is64Bits());
1261   ASSERT(rn.Is32Bits() && rm.Is32Bits());
1262   DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
1263 }
1264 
1265 
umsubl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1266 void Assembler::umsubl(const Register& rd,
1267                        const Register& rn,
1268                        const Register& rm,
1269                        const Register& ra) {
1270   ASSERT(rd.Is64Bits() && ra.Is64Bits());
1271   ASSERT(rn.Is32Bits() && rm.Is32Bits());
1272   DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
1273 }
1274 
1275 
smull(const Register & rd,const Register & rn,const Register & rm)1276 void Assembler::smull(const Register& rd,
1277                       const Register& rn,
1278                       const Register& rm) {
1279   ASSERT(rd.Is64Bits());
1280   ASSERT(rn.Is32Bits() && rm.Is32Bits());
1281   DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
1282 }
1283 
1284 
smulh(const Register & rd,const Register & rn,const Register & rm)1285 void Assembler::smulh(const Register& rd,
1286                       const Register& rn,
1287                       const Register& rm) {
1288   ASSERT(AreSameSizeAndType(rd, rn, rm));
1289   DataProcessing3Source(rd, rn, rm, xzr, SMULH_x);
1290 }
1291 
1292 
sdiv(const Register & rd,const Register & rn,const Register & rm)1293 void Assembler::sdiv(const Register& rd,
1294                      const Register& rn,
1295                      const Register& rm) {
1296   ASSERT(rd.SizeInBits() == rn.SizeInBits());
1297   ASSERT(rd.SizeInBits() == rm.SizeInBits());
1298   Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
1299 }
1300 
1301 
udiv(const Register & rd,const Register & rn,const Register & rm)1302 void Assembler::udiv(const Register& rd,
1303                      const Register& rn,
1304                      const Register& rm) {
1305   ASSERT(rd.SizeInBits() == rn.SizeInBits());
1306   ASSERT(rd.SizeInBits() == rm.SizeInBits());
1307   Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
1308 }
1309 
1310 
rbit(const Register & rd,const Register & rn)1311 void Assembler::rbit(const Register& rd,
1312                      const Register& rn) {
1313   DataProcessing1Source(rd, rn, RBIT);
1314 }
1315 
1316 
rev16(const Register & rd,const Register & rn)1317 void Assembler::rev16(const Register& rd,
1318                       const Register& rn) {
1319   DataProcessing1Source(rd, rn, REV16);
1320 }
1321 
1322 
rev32(const Register & rd,const Register & rn)1323 void Assembler::rev32(const Register& rd,
1324                       const Register& rn) {
1325   ASSERT(rd.Is64Bits());
1326   DataProcessing1Source(rd, rn, REV);
1327 }
1328 
1329 
rev(const Register & rd,const Register & rn)1330 void Assembler::rev(const Register& rd,
1331                     const Register& rn) {
1332   DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
1333 }
1334 
1335 
clz(const Register & rd,const Register & rn)1336 void Assembler::clz(const Register& rd,
1337                     const Register& rn) {
1338   DataProcessing1Source(rd, rn, CLZ);
1339 }
1340 
1341 
cls(const Register & rd,const Register & rn)1342 void Assembler::cls(const Register& rd,
1343                     const Register& rn) {
1344   DataProcessing1Source(rd, rn, CLS);
1345 }
1346 
1347 
ldp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & src)1348 void Assembler::ldp(const CPURegister& rt,
1349                     const CPURegister& rt2,
1350                     const MemOperand& src) {
1351   LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
1352 }
1353 
1354 
stp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & dst)1355 void Assembler::stp(const CPURegister& rt,
1356                     const CPURegister& rt2,
1357                     const MemOperand& dst) {
1358   LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
1359 }
1360 
1361 
ldpsw(const Register & rt,const Register & rt2,const MemOperand & src)1362 void Assembler::ldpsw(const Register& rt,
1363                       const Register& rt2,
1364                       const MemOperand& src) {
1365   ASSERT(rt.Is64Bits());
1366   LoadStorePair(rt, rt2, src, LDPSW_x);
1367 }
1368 
1369 
LoadStorePair(const CPURegister & rt,const CPURegister & rt2,const MemOperand & addr,LoadStorePairOp op)1370 void Assembler::LoadStorePair(const CPURegister& rt,
1371                               const CPURegister& rt2,
1372                               const MemOperand& addr,
1373                               LoadStorePairOp op) {
1374   // 'rt' and 'rt2' can only be aliased for stores.
1375   ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
1376   ASSERT(AreSameSizeAndType(rt, rt2));
1377 
1378   Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1379                 ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
1380 
1381   Instr addrmodeop;
1382   if (addr.IsImmediateOffset()) {
1383     addrmodeop = LoadStorePairOffsetFixed;
1384   } else {
1385     // Pre-index and post-index modes.
1386     ASSERT(!rt.Is(addr.base()));
1387     ASSERT(!rt2.Is(addr.base()));
1388     ASSERT(addr.offset() != 0);
1389     if (addr.IsPreIndex()) {
1390       addrmodeop = LoadStorePairPreIndexFixed;
1391     } else {
1392       ASSERT(addr.IsPostIndex());
1393       addrmodeop = LoadStorePairPostIndexFixed;
1394     }
1395   }
1396   Emit(addrmodeop | memop);
1397 }
1398 
1399 
ldnp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & src)1400 void Assembler::ldnp(const CPURegister& rt,
1401                      const CPURegister& rt2,
1402                      const MemOperand& src) {
1403   LoadStorePairNonTemporal(rt, rt2, src,
1404                            LoadPairNonTemporalOpFor(rt, rt2));
1405 }
1406 
1407 
stnp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & dst)1408 void Assembler::stnp(const CPURegister& rt,
1409                      const CPURegister& rt2,
1410                      const MemOperand& dst) {
1411   LoadStorePairNonTemporal(rt, rt2, dst,
1412                            StorePairNonTemporalOpFor(rt, rt2));
1413 }
1414 
1415 
LoadStorePairNonTemporal(const CPURegister & rt,const CPURegister & rt2,const MemOperand & addr,LoadStorePairNonTemporalOp op)1416 void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
1417                                          const CPURegister& rt2,
1418                                          const MemOperand& addr,
1419                                          LoadStorePairNonTemporalOp op) {
1420   ASSERT(!rt.Is(rt2));
1421   ASSERT(AreSameSizeAndType(rt, rt2));
1422   ASSERT(addr.IsImmediateOffset());
1423 
1424   LSDataSize size = CalcLSPairDataSize(
1425     static_cast<LoadStorePairOp>(op & LoadStorePairMask));
1426   Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1427        ImmLSPair(addr.offset(), size));
1428 }
1429 
1430 
1431 // Memory instructions.
ldrb(const Register & rt,const MemOperand & src)1432 void Assembler::ldrb(const Register& rt, const MemOperand& src) {
1433   LoadStore(rt, src, LDRB_w);
1434 }
1435 
1436 
strb(const Register & rt,const MemOperand & dst)1437 void Assembler::strb(const Register& rt, const MemOperand& dst) {
1438   LoadStore(rt, dst, STRB_w);
1439 }
1440 
1441 
ldrsb(const Register & rt,const MemOperand & src)1442 void Assembler::ldrsb(const Register& rt, const MemOperand& src) {
1443   LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
1444 }
1445 
1446 
ldrh(const Register & rt,const MemOperand & src)1447 void Assembler::ldrh(const Register& rt, const MemOperand& src) {
1448   LoadStore(rt, src, LDRH_w);
1449 }
1450 
1451 
strh(const Register & rt,const MemOperand & dst)1452 void Assembler::strh(const Register& rt, const MemOperand& dst) {
1453   LoadStore(rt, dst, STRH_w);
1454 }
1455 
1456 
ldrsh(const Register & rt,const MemOperand & src)1457 void Assembler::ldrsh(const Register& rt, const MemOperand& src) {
1458   LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
1459 }
1460 
1461 
ldr(const CPURegister & rt,const MemOperand & src)1462 void Assembler::ldr(const CPURegister& rt, const MemOperand& src) {
1463   LoadStore(rt, src, LoadOpFor(rt));
1464 }
1465 
1466 
str(const CPURegister & rt,const MemOperand & src)1467 void Assembler::str(const CPURegister& rt, const MemOperand& src) {
1468   LoadStore(rt, src, StoreOpFor(rt));
1469 }
1470 
1471 
ldrsw(const Register & rt,const MemOperand & src)1472 void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
1473   ASSERT(rt.Is64Bits());
1474   LoadStore(rt, src, LDRSW_x);
1475 }
1476 
1477 
ldr_pcrel(const CPURegister & rt,int imm19)1478 void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) {
1479   // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
1480   // constant pool. It should not be emitted.
1481   ASSERT(!rt.IsZero());
1482   Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt));
1483 }
1484 
1485 
ldr(const CPURegister & rt,const Immediate & imm)1486 void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
1487   // Currently we only support 64-bit literals.
1488   ASSERT(rt.Is64Bits());
1489 
1490   RecordRelocInfo(imm.rmode(), imm.value());
1491   BlockConstPoolFor(1);
1492   // The load will be patched when the constpool is emitted, patching code
1493   // expect a load literal with offset 0.
1494   ldr_pcrel(rt, 0);
1495 }
1496 
1497 
mov(const Register & rd,const Register & rm)1498 void Assembler::mov(const Register& rd, const Register& rm) {
1499   // Moves involving the stack pointer are encoded as add immediate with
1500   // second operand of zero. Otherwise, orr with first operand zr is
1501   // used.
1502   if (rd.IsSP() || rm.IsSP()) {
1503     add(rd, rm, 0);
1504   } else {
1505     orr(rd, AppropriateZeroRegFor(rd), rm);
1506   }
1507 }
1508 
1509 
mvn(const Register & rd,const Operand & operand)1510 void Assembler::mvn(const Register& rd, const Operand& operand) {
1511   orn(rd, AppropriateZeroRegFor(rd), operand);
1512 }
1513 
1514 
mrs(const Register & rt,SystemRegister sysreg)1515 void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
1516   ASSERT(rt.Is64Bits());
1517   Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
1518 }
1519 
1520 
msr(SystemRegister sysreg,const Register & rt)1521 void Assembler::msr(SystemRegister sysreg, const Register& rt) {
1522   ASSERT(rt.Is64Bits());
1523   Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
1524 }
1525 
1526 
hint(SystemHint code)1527 void Assembler::hint(SystemHint code) {
1528   Emit(HINT | ImmHint(code) | Rt(xzr));
1529 }
1530 
1531 
dmb(BarrierDomain domain,BarrierType type)1532 void Assembler::dmb(BarrierDomain domain, BarrierType type) {
1533   Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
1534 }
1535 
1536 
dsb(BarrierDomain domain,BarrierType type)1537 void Assembler::dsb(BarrierDomain domain, BarrierType type) {
1538   Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
1539 }
1540 
1541 
isb()1542 void Assembler::isb() {
1543   Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
1544 }
1545 
1546 
fmov(FPRegister fd,double imm)1547 void Assembler::fmov(FPRegister fd, double imm) {
1548   ASSERT(fd.Is64Bits());
1549   ASSERT(IsImmFP64(imm));
1550   Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm));
1551 }
1552 
1553 
fmov(FPRegister fd,float imm)1554 void Assembler::fmov(FPRegister fd, float imm) {
1555   ASSERT(fd.Is32Bits());
1556   ASSERT(IsImmFP32(imm));
1557   Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm));
1558 }
1559 
1560 
fmov(Register rd,FPRegister fn)1561 void Assembler::fmov(Register rd, FPRegister fn) {
1562   ASSERT(rd.SizeInBits() == fn.SizeInBits());
1563   FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
1564   Emit(op | Rd(rd) | Rn(fn));
1565 }
1566 
1567 
fmov(FPRegister fd,Register rn)1568 void Assembler::fmov(FPRegister fd, Register rn) {
1569   ASSERT(fd.SizeInBits() == rn.SizeInBits());
1570   FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx;
1571   Emit(op | Rd(fd) | Rn(rn));
1572 }
1573 
1574 
fmov(FPRegister fd,FPRegister fn)1575 void Assembler::fmov(FPRegister fd, FPRegister fn) {
1576   ASSERT(fd.SizeInBits() == fn.SizeInBits());
1577   Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn));
1578 }
1579 
1580 
fadd(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm)1581 void Assembler::fadd(const FPRegister& fd,
1582                      const FPRegister& fn,
1583                      const FPRegister& fm) {
1584   FPDataProcessing2Source(fd, fn, fm, FADD);
1585 }
1586 
1587 
fsub(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm)1588 void Assembler::fsub(const FPRegister& fd,
1589                      const FPRegister& fn,
1590                      const FPRegister& fm) {
1591   FPDataProcessing2Source(fd, fn, fm, FSUB);
1592 }
1593 
1594 
fmul(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm)1595 void Assembler::fmul(const FPRegister& fd,
1596                      const FPRegister& fn,
1597                      const FPRegister& fm) {
1598   FPDataProcessing2Source(fd, fn, fm, FMUL);
1599 }
1600 
1601 
fmadd(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm,const FPRegister & fa)1602 void Assembler::fmadd(const FPRegister& fd,
1603                       const FPRegister& fn,
1604                       const FPRegister& fm,
1605                       const FPRegister& fa) {
1606   FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
1607 }
1608 
1609 
fmsub(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm,const FPRegister & fa)1610 void Assembler::fmsub(const FPRegister& fd,
1611                       const FPRegister& fn,
1612                       const FPRegister& fm,
1613                       const FPRegister& fa) {
1614   FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
1615 }
1616 
1617 
fnmadd(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm,const FPRegister & fa)1618 void Assembler::fnmadd(const FPRegister& fd,
1619                        const FPRegister& fn,
1620                        const FPRegister& fm,
1621                        const FPRegister& fa) {
1622   FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
1623 }
1624 
1625 
fnmsub(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm,const FPRegister & fa)1626 void Assembler::fnmsub(const FPRegister& fd,
1627                        const FPRegister& fn,
1628                        const FPRegister& fm,
1629                        const FPRegister& fa) {
1630   FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
1631 }
1632 
1633 
fdiv(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm)1634 void Assembler::fdiv(const FPRegister& fd,
1635                      const FPRegister& fn,
1636                      const FPRegister& fm) {
1637   FPDataProcessing2Source(fd, fn, fm, FDIV);
1638 }
1639 
1640 
fmax(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm)1641 void Assembler::fmax(const FPRegister& fd,
1642                      const FPRegister& fn,
1643                      const FPRegister& fm) {
1644   FPDataProcessing2Source(fd, fn, fm, FMAX);
1645 }
1646 
1647 
fmaxnm(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm)1648 void Assembler::fmaxnm(const FPRegister& fd,
1649                        const FPRegister& fn,
1650                        const FPRegister& fm) {
1651   FPDataProcessing2Source(fd, fn, fm, FMAXNM);
1652 }
1653 
1654 
fmin(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm)1655 void Assembler::fmin(const FPRegister& fd,
1656                      const FPRegister& fn,
1657                      const FPRegister& fm) {
1658   FPDataProcessing2Source(fd, fn, fm, FMIN);
1659 }
1660 
1661 
fminnm(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm)1662 void Assembler::fminnm(const FPRegister& fd,
1663                        const FPRegister& fn,
1664                        const FPRegister& fm) {
1665   FPDataProcessing2Source(fd, fn, fm, FMINNM);
1666 }
1667 
1668 
fabs(const FPRegister & fd,const FPRegister & fn)1669 void Assembler::fabs(const FPRegister& fd,
1670                      const FPRegister& fn) {
1671   ASSERT(fd.SizeInBits() == fn.SizeInBits());
1672   FPDataProcessing1Source(fd, fn, FABS);
1673 }
1674 
1675 
fneg(const FPRegister & fd,const FPRegister & fn)1676 void Assembler::fneg(const FPRegister& fd,
1677                      const FPRegister& fn) {
1678   ASSERT(fd.SizeInBits() == fn.SizeInBits());
1679   FPDataProcessing1Source(fd, fn, FNEG);
1680 }
1681 
1682 
fsqrt(const FPRegister & fd,const FPRegister & fn)1683 void Assembler::fsqrt(const FPRegister& fd,
1684                       const FPRegister& fn) {
1685   ASSERT(fd.SizeInBits() == fn.SizeInBits());
1686   FPDataProcessing1Source(fd, fn, FSQRT);
1687 }
1688 
1689 
frinta(const FPRegister & fd,const FPRegister & fn)1690 void Assembler::frinta(const FPRegister& fd,
1691                        const FPRegister& fn) {
1692   ASSERT(fd.SizeInBits() == fn.SizeInBits());
1693   FPDataProcessing1Source(fd, fn, FRINTA);
1694 }
1695 
1696 
frintm(const FPRegister & fd,const FPRegister & fn)1697 void Assembler::frintm(const FPRegister& fd,
1698                        const FPRegister& fn) {
1699   ASSERT(fd.SizeInBits() == fn.SizeInBits());
1700   FPDataProcessing1Source(fd, fn, FRINTM);
1701 }
1702 
1703 
frintn(const FPRegister & fd,const FPRegister & fn)1704 void Assembler::frintn(const FPRegister& fd,
1705                        const FPRegister& fn) {
1706   ASSERT(fd.SizeInBits() == fn.SizeInBits());
1707   FPDataProcessing1Source(fd, fn, FRINTN);
1708 }
1709 
1710 
frintz(const FPRegister & fd,const FPRegister & fn)1711 void Assembler::frintz(const FPRegister& fd,
1712                        const FPRegister& fn) {
1713   ASSERT(fd.SizeInBits() == fn.SizeInBits());
1714   FPDataProcessing1Source(fd, fn, FRINTZ);
1715 }
1716 
1717 
fcmp(const FPRegister & fn,const FPRegister & fm)1718 void Assembler::fcmp(const FPRegister& fn,
1719                      const FPRegister& fm) {
1720   ASSERT(fn.SizeInBits() == fm.SizeInBits());
1721   Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
1722 }
1723 
1724 
fcmp(const FPRegister & fn,double value)1725 void Assembler::fcmp(const FPRegister& fn,
1726                      double value) {
1727   USE(value);
1728   // Although the fcmp instruction can strictly only take an immediate value of
1729   // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
1730   // affect the result of the comparison.
1731   ASSERT(value == 0.0);
1732   Emit(FPType(fn) | FCMP_zero | Rn(fn));
1733 }
1734 
1735 
fccmp(const FPRegister & fn,const FPRegister & fm,StatusFlags nzcv,Condition cond)1736 void Assembler::fccmp(const FPRegister& fn,
1737                       const FPRegister& fm,
1738                       StatusFlags nzcv,
1739                       Condition cond) {
1740   ASSERT(fn.SizeInBits() == fm.SizeInBits());
1741   Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
1742 }
1743 
1744 
fcsel(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm,Condition cond)1745 void Assembler::fcsel(const FPRegister& fd,
1746                       const FPRegister& fn,
1747                       const FPRegister& fm,
1748                       Condition cond) {
1749   ASSERT(fd.SizeInBits() == fn.SizeInBits());
1750   ASSERT(fd.SizeInBits() == fm.SizeInBits());
1751   Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
1752 }
1753 
1754 
FPConvertToInt(const Register & rd,const FPRegister & fn,FPIntegerConvertOp op)1755 void Assembler::FPConvertToInt(const Register& rd,
1756                                const FPRegister& fn,
1757                                FPIntegerConvertOp op) {
1758   Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd));
1759 }
1760 
1761 
fcvt(const FPRegister & fd,const FPRegister & fn)1762 void Assembler::fcvt(const FPRegister& fd,
1763                      const FPRegister& fn) {
1764   if (fd.Is64Bits()) {
1765     // Convert float to double.
1766     ASSERT(fn.Is32Bits());
1767     FPDataProcessing1Source(fd, fn, FCVT_ds);
1768   } else {
1769     // Convert double to float.
1770     ASSERT(fn.Is64Bits());
1771     FPDataProcessing1Source(fd, fn, FCVT_sd);
1772   }
1773 }
1774 
1775 
fcvtau(const Register & rd,const FPRegister & fn)1776 void Assembler::fcvtau(const Register& rd, const FPRegister& fn) {
1777   FPConvertToInt(rd, fn, FCVTAU);
1778 }
1779 
1780 
fcvtas(const Register & rd,const FPRegister & fn)1781 void Assembler::fcvtas(const Register& rd, const FPRegister& fn) {
1782   FPConvertToInt(rd, fn, FCVTAS);
1783 }
1784 
1785 
fcvtmu(const Register & rd,const FPRegister & fn)1786 void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) {
1787   FPConvertToInt(rd, fn, FCVTMU);
1788 }
1789 
1790 
fcvtms(const Register & rd,const FPRegister & fn)1791 void Assembler::fcvtms(const Register& rd, const FPRegister& fn) {
1792   FPConvertToInt(rd, fn, FCVTMS);
1793 }
1794 
1795 
fcvtnu(const Register & rd,const FPRegister & fn)1796 void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) {
1797   FPConvertToInt(rd, fn, FCVTNU);
1798 }
1799 
1800 
fcvtns(const Register & rd,const FPRegister & fn)1801 void Assembler::fcvtns(const Register& rd, const FPRegister& fn) {
1802   FPConvertToInt(rd, fn, FCVTNS);
1803 }
1804 
1805 
fcvtzu(const Register & rd,const FPRegister & fn)1806 void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) {
1807   FPConvertToInt(rd, fn, FCVTZU);
1808 }
1809 
1810 
fcvtzs(const Register & rd,const FPRegister & fn)1811 void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) {
1812   FPConvertToInt(rd, fn, FCVTZS);
1813 }
1814 
1815 
scvtf(const FPRegister & fd,const Register & rn,unsigned fbits)1816 void Assembler::scvtf(const FPRegister& fd,
1817                       const Register& rn,
1818                       unsigned fbits) {
1819   if (fbits == 0) {
1820     Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd));
1821   } else {
1822     Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
1823          Rd(fd));
1824   }
1825 }
1826 
1827 
ucvtf(const FPRegister & fd,const Register & rn,unsigned fbits)1828 void Assembler::ucvtf(const FPRegister& fd,
1829                       const Register& rn,
1830                       unsigned fbits) {
1831   if (fbits == 0) {
1832     Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
1833   } else {
1834     Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
1835          Rd(fd));
1836   }
1837 }
1838 
1839 
1840 // Note:
1841 // Below, a difference in case for the same letter indicates a
1842 // negated bit.
1843 // If b is 1, then B is 0.
ImmFP32(float imm)1844 Instr Assembler::ImmFP32(float imm) {
1845   ASSERT(IsImmFP32(imm));
1846   // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
1847   uint32_t bits = float_to_rawbits(imm);
1848   // bit7: a000.0000
1849   uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
1850   // bit6: 0b00.0000
1851   uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
1852   // bit5_to_0: 00cd.efgh
1853   uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
1854 
1855   return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
1856 }
1857 
1858 
ImmFP64(double imm)1859 Instr Assembler::ImmFP64(double imm) {
1860   ASSERT(IsImmFP64(imm));
1861   // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
1862   //       0000.0000.0000.0000.0000.0000.0000.0000
1863   uint64_t bits = double_to_rawbits(imm);
1864   // bit7: a000.0000
1865   uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
1866   // bit6: 0b00.0000
1867   uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
1868   // bit5_to_0: 00cd.efgh
1869   uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
1870 
1871   return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
1872 }
1873 
1874 
1875 // Code generation helpers.
MoveWide(const Register & rd,uint64_t imm,int shift,MoveWideImmediateOp mov_op)1876 void Assembler::MoveWide(const Register& rd,
1877                          uint64_t imm,
1878                          int shift,
1879                          MoveWideImmediateOp mov_op) {
1880   if (shift >= 0) {
1881     // Explicit shift specified.
1882     ASSERT((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
1883     ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
1884     shift /= 16;
1885   } else {
1886     // Calculate a new immediate and shift combination to encode the immediate
1887     // argument.
1888     shift = 0;
1889     if ((imm & ~0xffffUL) == 0) {
1890       // Nothing to do.
1891     } else if ((imm & ~(0xffffUL << 16)) == 0) {
1892       imm >>= 16;
1893       shift = 1;
1894     } else if ((imm & ~(0xffffUL << 32)) == 0) {
1895       ASSERT(rd.Is64Bits());
1896       imm >>= 32;
1897       shift = 2;
1898     } else if ((imm & ~(0xffffUL << 48)) == 0) {
1899       ASSERT(rd.Is64Bits());
1900       imm >>= 48;
1901       shift = 3;
1902     }
1903   }
1904 
1905   ASSERT(is_uint16(imm));
1906 
1907   Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
1908        Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
1909 }
1910 
1911 
AddSub(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,AddSubOp op)1912 void Assembler::AddSub(const Register& rd,
1913                        const Register& rn,
1914                        const Operand& operand,
1915                        FlagsUpdate S,
1916                        AddSubOp op) {
1917   ASSERT(rd.SizeInBits() == rn.SizeInBits());
1918   ASSERT(!operand.NeedsRelocation(this));
1919   if (operand.IsImmediate()) {
1920     int64_t immediate = operand.ImmediateValue();
1921     ASSERT(IsImmAddSub(immediate));
1922     Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
1923     Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
1924          ImmAddSub(immediate) | dest_reg | RnSP(rn));
1925   } else if (operand.IsShiftedRegister()) {
1926     ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
1927     ASSERT(operand.shift() != ROR);
1928 
1929     // For instructions of the form:
1930     //   add/sub   wsp, <Wn>, <Wm> [, LSL #0-3 ]
1931     //   add/sub   <Wd>, wsp, <Wm> [, LSL #0-3 ]
1932     //   add/sub   wsp, wsp, <Wm> [, LSL #0-3 ]
1933     //   adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
1934     // or their 64-bit register equivalents, convert the operand from shifted to
1935     // extended register mode, and emit an add/sub extended instruction.
1936     if (rn.IsSP() || rd.IsSP()) {
1937       ASSERT(!(rd.IsSP() && (S == SetFlags)));
1938       DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
1939                                AddSubExtendedFixed | op);
1940     } else {
1941       DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
1942     }
1943   } else {
1944     ASSERT(operand.IsExtendedRegister());
1945     DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
1946   }
1947 }
1948 
1949 
AddSubWithCarry(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,AddSubWithCarryOp op)1950 void Assembler::AddSubWithCarry(const Register& rd,
1951                                 const Register& rn,
1952                                 const Operand& operand,
1953                                 FlagsUpdate S,
1954                                 AddSubWithCarryOp op) {
1955   ASSERT(rd.SizeInBits() == rn.SizeInBits());
1956   ASSERT(rd.SizeInBits() == operand.reg().SizeInBits());
1957   ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
1958   ASSERT(!operand.NeedsRelocation(this));
1959   Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
1960 }
1961 
1962 
hlt(int code)1963 void Assembler::hlt(int code) {
1964   ASSERT(is_uint16(code));
1965   Emit(HLT | ImmException(code));
1966 }
1967 
1968 
brk(int code)1969 void Assembler::brk(int code) {
1970   ASSERT(is_uint16(code));
1971   Emit(BRK | ImmException(code));
1972 }
1973 
1974 
debug(const char * message,uint32_t code,Instr params)1975 void Assembler::debug(const char* message, uint32_t code, Instr params) {
1976 #ifdef USE_SIMULATOR
1977   // Don't generate simulator specific code if we are building a snapshot, which
1978   // might be run on real hardware.
1979   if (!serializer_enabled()) {
1980     // The arguments to the debug marker need to be contiguous in memory, so
1981     // make sure we don't try to emit pools.
1982     BlockPoolsScope scope(this);
1983 
1984     Label start;
1985     bind(&start);
1986 
1987     // Refer to instructions-arm64.h for a description of the marker and its
1988     // arguments.
1989     hlt(kImmExceptionIsDebug);
1990     ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
1991     dc32(code);
1992     ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset);
1993     dc32(params);
1994     ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset);
1995     EmitStringData(message);
1996     hlt(kImmExceptionIsUnreachable);
1997 
1998     return;
1999   }
2000   // Fall through if Serializer is enabled.
2001 #endif
2002 
2003   if (params & BREAK) {
2004     hlt(kImmExceptionIsDebug);
2005   }
2006 }
2007 
2008 
Logical(const Register & rd,const Register & rn,const Operand & operand,LogicalOp op)2009 void Assembler::Logical(const Register& rd,
2010                         const Register& rn,
2011                         const Operand& operand,
2012                         LogicalOp op) {
2013   ASSERT(rd.SizeInBits() == rn.SizeInBits());
2014   ASSERT(!operand.NeedsRelocation(this));
2015   if (operand.IsImmediate()) {
2016     int64_t immediate = operand.ImmediateValue();
2017     unsigned reg_size = rd.SizeInBits();
2018 
2019     ASSERT(immediate != 0);
2020     ASSERT(immediate != -1);
2021     ASSERT(rd.Is64Bits() || is_uint32(immediate));
2022 
2023     // If the operation is NOT, invert the operation and immediate.
2024     if ((op & NOT) == NOT) {
2025       op = static_cast<LogicalOp>(op & ~NOT);
2026       immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
2027     }
2028 
2029     unsigned n, imm_s, imm_r;
2030     if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
2031       // Immediate can be encoded in the instruction.
2032       LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
2033     } else {
2034       // This case is handled in the macro assembler.
2035       UNREACHABLE();
2036     }
2037   } else {
2038     ASSERT(operand.IsShiftedRegister());
2039     ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
2040     Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
2041     DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
2042   }
2043 }
2044 
2045 
LogicalImmediate(const Register & rd,const Register & rn,unsigned n,unsigned imm_s,unsigned imm_r,LogicalOp op)2046 void Assembler::LogicalImmediate(const Register& rd,
2047                                  const Register& rn,
2048                                  unsigned n,
2049                                  unsigned imm_s,
2050                                  unsigned imm_r,
2051                                  LogicalOp op) {
2052   unsigned reg_size = rd.SizeInBits();
2053   Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
2054   Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
2055        ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
2056        Rn(rn));
2057 }
2058 
2059 
ConditionalCompare(const Register & rn,const Operand & operand,StatusFlags nzcv,Condition cond,ConditionalCompareOp op)2060 void Assembler::ConditionalCompare(const Register& rn,
2061                                    const Operand& operand,
2062                                    StatusFlags nzcv,
2063                                    Condition cond,
2064                                    ConditionalCompareOp op) {
2065   Instr ccmpop;
2066   ASSERT(!operand.NeedsRelocation(this));
2067   if (operand.IsImmediate()) {
2068     int64_t immediate = operand.ImmediateValue();
2069     ASSERT(IsImmConditionalCompare(immediate));
2070     ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
2071   } else {
2072     ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
2073     ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
2074   }
2075   Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
2076 }
2077 
2078 
DataProcessing1Source(const Register & rd,const Register & rn,DataProcessing1SourceOp op)2079 void Assembler::DataProcessing1Source(const Register& rd,
2080                                       const Register& rn,
2081                                       DataProcessing1SourceOp op) {
2082   ASSERT(rd.SizeInBits() == rn.SizeInBits());
2083   Emit(SF(rn) | op | Rn(rn) | Rd(rd));
2084 }
2085 
2086 
FPDataProcessing1Source(const FPRegister & fd,const FPRegister & fn,FPDataProcessing1SourceOp op)2087 void Assembler::FPDataProcessing1Source(const FPRegister& fd,
2088                                         const FPRegister& fn,
2089                                         FPDataProcessing1SourceOp op) {
2090   Emit(FPType(fn) | op | Rn(fn) | Rd(fd));
2091 }
2092 
2093 
FPDataProcessing2Source(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm,FPDataProcessing2SourceOp op)2094 void Assembler::FPDataProcessing2Source(const FPRegister& fd,
2095                                         const FPRegister& fn,
2096                                         const FPRegister& fm,
2097                                         FPDataProcessing2SourceOp op) {
2098   ASSERT(fd.SizeInBits() == fn.SizeInBits());
2099   ASSERT(fd.SizeInBits() == fm.SizeInBits());
2100   Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
2101 }
2102 
2103 
FPDataProcessing3Source(const FPRegister & fd,const FPRegister & fn,const FPRegister & fm,const FPRegister & fa,FPDataProcessing3SourceOp op)2104 void Assembler::FPDataProcessing3Source(const FPRegister& fd,
2105                                         const FPRegister& fn,
2106                                         const FPRegister& fm,
2107                                         const FPRegister& fa,
2108                                         FPDataProcessing3SourceOp op) {
2109   ASSERT(AreSameSizeAndType(fd, fn, fm, fa));
2110   Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
2111 }
2112 
2113 
EmitShift(const Register & rd,const Register & rn,Shift shift,unsigned shift_amount)2114 void Assembler::EmitShift(const Register& rd,
2115                           const Register& rn,
2116                           Shift shift,
2117                           unsigned shift_amount) {
2118   switch (shift) {
2119     case LSL:
2120       lsl(rd, rn, shift_amount);
2121       break;
2122     case LSR:
2123       lsr(rd, rn, shift_amount);
2124       break;
2125     case ASR:
2126       asr(rd, rn, shift_amount);
2127       break;
2128     case ROR:
2129       ror(rd, rn, shift_amount);
2130       break;
2131     default:
2132       UNREACHABLE();
2133   }
2134 }
2135 
2136 
EmitExtendShift(const Register & rd,const Register & rn,Extend extend,unsigned left_shift)2137 void Assembler::EmitExtendShift(const Register& rd,
2138                                 const Register& rn,
2139                                 Extend extend,
2140                                 unsigned left_shift) {
2141   ASSERT(rd.SizeInBits() >= rn.SizeInBits());
2142   unsigned reg_size = rd.SizeInBits();
2143   // Use the correct size of register.
2144   Register rn_ = Register::Create(rn.code(), rd.SizeInBits());
2145   // Bits extracted are high_bit:0.
2146   unsigned high_bit = (8 << (extend & 0x3)) - 1;
2147   // Number of bits left in the result that are not introduced by the shift.
2148   unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
2149 
2150   if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
2151     switch (extend) {
2152       case UXTB:
2153       case UXTH:
2154       case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
2155       case SXTB:
2156       case SXTH:
2157       case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
2158       case UXTX:
2159       case SXTX: {
2160         ASSERT(rn.SizeInBits() == kXRegSizeInBits);
2161         // Nothing to extend. Just shift.
2162         lsl(rd, rn_, left_shift);
2163         break;
2164       }
2165       default: UNREACHABLE();
2166     }
2167   } else {
2168     // No need to extend as the extended bits would be shifted away.
2169     lsl(rd, rn_, left_shift);
2170   }
2171 }
2172 
2173 
DataProcShiftedRegister(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,Instr op)2174 void Assembler::DataProcShiftedRegister(const Register& rd,
2175                                         const Register& rn,
2176                                         const Operand& operand,
2177                                         FlagsUpdate S,
2178                                         Instr op) {
2179   ASSERT(operand.IsShiftedRegister());
2180   ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
2181   ASSERT(!operand.NeedsRelocation(this));
2182   Emit(SF(rd) | op | Flags(S) |
2183        ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
2184        Rm(operand.reg()) | Rn(rn) | Rd(rd));
2185 }
2186 
2187 
DataProcExtendedRegister(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,Instr op)2188 void Assembler::DataProcExtendedRegister(const Register& rd,
2189                                          const Register& rn,
2190                                          const Operand& operand,
2191                                          FlagsUpdate S,
2192                                          Instr op) {
2193   ASSERT(!operand.NeedsRelocation(this));
2194   Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
2195   Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
2196        ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
2197        dest_reg | RnSP(rn));
2198 }
2199 
2200 
IsImmAddSub(int64_t immediate)2201 bool Assembler::IsImmAddSub(int64_t immediate) {
2202   return is_uint12(immediate) ||
2203          (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
2204 }
2205 
LoadStore(const CPURegister & rt,const MemOperand & addr,LoadStoreOp op)2206 void Assembler::LoadStore(const CPURegister& rt,
2207                           const MemOperand& addr,
2208                           LoadStoreOp op) {
2209   Instr memop = op | Rt(rt) | RnSP(addr.base());
2210   ptrdiff_t offset = addr.offset();
2211 
2212   if (addr.IsImmediateOffset()) {
2213     LSDataSize size = CalcLSDataSize(op);
2214     if (IsImmLSScaled(offset, size)) {
2215       // Use the scaled addressing mode.
2216       Emit(LoadStoreUnsignedOffsetFixed | memop |
2217            ImmLSUnsigned(offset >> size));
2218     } else if (IsImmLSUnscaled(offset)) {
2219       // Use the unscaled addressing mode.
2220       Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
2221     } else {
2222       // This case is handled in the macro assembler.
2223       UNREACHABLE();
2224     }
2225   } else if (addr.IsRegisterOffset()) {
2226     Extend ext = addr.extend();
2227     Shift shift = addr.shift();
2228     unsigned shift_amount = addr.shift_amount();
2229 
2230     // LSL is encoded in the option field as UXTX.
2231     if (shift == LSL) {
2232       ext = UXTX;
2233     }
2234 
2235     // Shifts are encoded in one bit, indicating a left shift by the memory
2236     // access size.
2237     ASSERT((shift_amount == 0) ||
2238            (shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
2239     Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
2240          ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
2241   } else {
2242     // Pre-index and post-index modes.
2243     ASSERT(!rt.Is(addr.base()));
2244     if (IsImmLSUnscaled(offset)) {
2245       if (addr.IsPreIndex()) {
2246         Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
2247       } else {
2248         ASSERT(addr.IsPostIndex());
2249         Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
2250       }
2251     } else {
2252       // This case is handled in the macro assembler.
2253       UNREACHABLE();
2254     }
2255   }
2256 }
2257 
2258 
IsImmLSUnscaled(ptrdiff_t offset)2259 bool Assembler::IsImmLSUnscaled(ptrdiff_t offset) {
2260   return is_int9(offset);
2261 }
2262 
2263 
IsImmLSScaled(ptrdiff_t offset,LSDataSize size)2264 bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) {
2265   bool offset_is_size_multiple = (((offset >> size) << size) == offset);
2266   return offset_is_size_multiple && is_uint12(offset >> size);
2267 }
2268 
2269 
2270 // Test if a given value can be encoded in the immediate field of a logical
2271 // instruction.
2272 // If it can be encoded, the function returns true, and values pointed to by n,
2273 // imm_s and imm_r are updated with immediates encoded in the format required
2274 // by the corresponding fields in the logical instruction.
2275 // If it can not be encoded, the function returns false, and the values pointed
2276 // to by n, imm_s and imm_r are undefined.
IsImmLogical(uint64_t value,unsigned width,unsigned * n,unsigned * imm_s,unsigned * imm_r)2277 bool Assembler::IsImmLogical(uint64_t value,
2278                              unsigned width,
2279                              unsigned* n,
2280                              unsigned* imm_s,
2281                              unsigned* imm_r) {
2282   ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
2283   ASSERT((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
2284 
2285   // Logical immediates are encoded using parameters n, imm_s and imm_r using
2286   // the following table:
2287   //
2288   //  N   imms    immr    size        S             R
2289   //  1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
2290   //  0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
2291   //  0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
2292   //  0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
2293   //  0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
2294   //  0  11110s  xxxxxr     2    UInt(s)       UInt(r)
2295   // (s bits must not be all set)
2296   //
2297   // A pattern is constructed of size bits, where the least significant S+1
2298   // bits are set. The pattern is rotated right by R, and repeated across a
2299   // 32 or 64-bit value, depending on destination register width.
2300   //
2301   // To test if an arbitary immediate can be encoded using this scheme, an
2302   // iterative algorithm is used.
2303   //
2304   // TODO(mcapewel) This code does not consider using X/W register overlap to
2305   // support 64-bit immediates where the top 32-bits are zero, and the bottom
2306   // 32-bits are an encodable logical immediate.
2307 
2308   // 1. If the value has all set or all clear bits, it can't be encoded.
2309   if ((value == 0) || (value == 0xffffffffffffffffUL) ||
2310       ((width == kWRegSizeInBits) && (value == 0xffffffff))) {
2311     return false;
2312   }
2313 
2314   unsigned lead_zero = CountLeadingZeros(value, width);
2315   unsigned lead_one = CountLeadingZeros(~value, width);
2316   unsigned trail_zero = CountTrailingZeros(value, width);
2317   unsigned trail_one = CountTrailingZeros(~value, width);
2318   unsigned set_bits = CountSetBits(value, width);
2319 
2320   // The fixed bits in the immediate s field.
2321   // If width == 64 (X reg), start at 0xFFFFFF80.
2322   // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
2323   // widths won't be executed.
2324   int imm_s_fixed = (width == kXRegSizeInBits) ? -128 : -64;
2325   int imm_s_mask = 0x3F;
2326 
2327   for (;;) {
2328     // 2. If the value is two bits wide, it can be encoded.
2329     if (width == 2) {
2330       *n = 0;
2331       *imm_s = 0x3C;
2332       *imm_r = (value & 3) - 1;
2333       return true;
2334     }
2335 
2336     *n = (width == 64) ? 1 : 0;
2337     *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
2338     if ((lead_zero + set_bits) == width) {
2339       *imm_r = 0;
2340     } else {
2341       *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
2342     }
2343 
2344     // 3. If the sum of leading zeros, trailing zeros and set bits is equal to
2345     //    the bit width of the value, it can be encoded.
2346     if (lead_zero + trail_zero + set_bits == width) {
2347       return true;
2348     }
2349 
2350     // 4. If the sum of leading ones, trailing ones and unset bits in the
2351     //    value is equal to the bit width of the value, it can be encoded.
2352     if (lead_one + trail_one + (width - set_bits) == width) {
2353       return true;
2354     }
2355 
2356     // 5. If the most-significant half of the bitwise value is equal to the
2357     //    least-significant half, return to step 2 using the least-significant
2358     //    half of the value.
2359     uint64_t mask = (1UL << (width >> 1)) - 1;
2360     if ((value & mask) == ((value >> (width >> 1)) & mask)) {
2361       width >>= 1;
2362       set_bits >>= 1;
2363       imm_s_fixed >>= 1;
2364       continue;
2365     }
2366 
2367     // 6. Otherwise, the value can't be encoded.
2368     return false;
2369   }
2370 }
2371 
2372 
IsImmConditionalCompare(int64_t immediate)2373 bool Assembler::IsImmConditionalCompare(int64_t immediate) {
2374   return is_uint5(immediate);
2375 }
2376 
2377 
IsImmFP32(float imm)2378 bool Assembler::IsImmFP32(float imm) {
2379   // Valid values will have the form:
2380   // aBbb.bbbc.defg.h000.0000.0000.0000.0000
2381   uint32_t bits = float_to_rawbits(imm);
2382   // bits[19..0] are cleared.
2383   if ((bits & 0x7ffff) != 0) {
2384     return false;
2385   }
2386 
2387   // bits[29..25] are all set or all cleared.
2388   uint32_t b_pattern = (bits >> 16) & 0x3e00;
2389   if (b_pattern != 0 && b_pattern != 0x3e00) {
2390     return false;
2391   }
2392 
2393   // bit[30] and bit[29] are opposite.
2394   if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
2395     return false;
2396   }
2397 
2398   return true;
2399 }
2400 
2401 
IsImmFP64(double imm)2402 bool Assembler::IsImmFP64(double imm) {
2403   // Valid values will have the form:
2404   // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
2405   // 0000.0000.0000.0000.0000.0000.0000.0000
2406   uint64_t bits = double_to_rawbits(imm);
2407   // bits[47..0] are cleared.
2408   if ((bits & 0xffffffffffffL) != 0) {
2409     return false;
2410   }
2411 
2412   // bits[61..54] are all set or all cleared.
2413   uint32_t b_pattern = (bits >> 48) & 0x3fc0;
2414   if (b_pattern != 0 && b_pattern != 0x3fc0) {
2415     return false;
2416   }
2417 
2418   // bit[62] and bit[61] are opposite.
2419   if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) {
2420     return false;
2421   }
2422 
2423   return true;
2424 }
2425 
2426 
GrowBuffer()2427 void Assembler::GrowBuffer() {
2428   if (!own_buffer_) FATAL("external code buffer is too small");
2429 
2430   // Compute new buffer size.
2431   CodeDesc desc;  // the new buffer
2432   if (buffer_size_ < 4 * KB) {
2433     desc.buffer_size = 4 * KB;
2434   } else if (buffer_size_ < 1 * MB) {
2435     desc.buffer_size = 2 * buffer_size_;
2436   } else {
2437     desc.buffer_size = buffer_size_ + 1 * MB;
2438   }
2439   CHECK_GT(desc.buffer_size, 0);  // No overflow.
2440 
2441   byte* buffer = reinterpret_cast<byte*>(buffer_);
2442 
2443   // Set up new buffer.
2444   desc.buffer = NewArray<byte>(desc.buffer_size);
2445 
2446   desc.instr_size = pc_offset();
2447   desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos();
2448 
2449   // Copy the data.
2450   intptr_t pc_delta = desc.buffer - buffer;
2451   intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
2452                       (buffer + buffer_size_);
2453   memmove(desc.buffer, buffer, desc.instr_size);
2454   memmove(reloc_info_writer.pos() + rc_delta,
2455           reloc_info_writer.pos(), desc.reloc_size);
2456 
2457   // Switch buffers.
2458   DeleteArray(buffer_);
2459   buffer_ = desc.buffer;
2460   buffer_size_ = desc.buffer_size;
2461   pc_ = reinterpret_cast<byte*>(pc_) + pc_delta;
2462   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2463                                reloc_info_writer.last_pc() + pc_delta);
2464 
2465   // None of our relocation types are pc relative pointing outside the code
2466   // buffer nor pc absolute pointing inside the code buffer, so there is no need
2467   // to relocate any emitted relocation entries.
2468 
2469   // Relocate pending relocation entries.
2470   for (int i = 0; i < num_pending_reloc_info_; i++) {
2471     RelocInfo& rinfo = pending_reloc_info_[i];
2472     ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2473            rinfo.rmode() != RelocInfo::POSITION);
2474     if (rinfo.rmode() != RelocInfo::JS_RETURN) {
2475       rinfo.set_pc(rinfo.pc() + pc_delta);
2476     }
2477   }
2478 }
2479 
2480 
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)2481 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2482   // We do not try to reuse pool constants.
2483   RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
2484   if (((rmode >= RelocInfo::JS_RETURN) &&
2485        (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
2486       (rmode == RelocInfo::CONST_POOL) ||
2487       (rmode == RelocInfo::VENEER_POOL)) {
2488     // Adjust code for new modes.
2489     ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2490            || RelocInfo::IsJSReturn(rmode)
2491            || RelocInfo::IsComment(rmode)
2492            || RelocInfo::IsPosition(rmode)
2493            || RelocInfo::IsConstPool(rmode)
2494            || RelocInfo::IsVeneerPool(rmode));
2495     // These modes do not need an entry in the constant pool.
2496   } else {
2497     ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
2498     if (num_pending_reloc_info_ == 0) {
2499       first_const_pool_use_ = pc_offset();
2500     }
2501     pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
2502     // Make sure the constant pool is not emitted in place of the next
2503     // instruction for which we just recorded relocation info.
2504     BlockConstPoolFor(1);
2505   }
2506 
2507   if (!RelocInfo::IsNone(rmode)) {
2508     // Don't record external references unless the heap will be serialized.
2509     if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
2510         !serializer_enabled() && !emit_debug_code()) {
2511       return;
2512     }
2513     ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
2514     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2515       RelocInfo reloc_info_with_ast_id(
2516           reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL);
2517       ClearRecordedAstId();
2518       reloc_info_writer.Write(&reloc_info_with_ast_id);
2519     } else {
2520       reloc_info_writer.Write(&rinfo);
2521     }
2522   }
2523 }
2524 
2525 
BlockConstPoolFor(int instructions)2526 void Assembler::BlockConstPoolFor(int instructions) {
2527   int pc_limit = pc_offset() + instructions * kInstructionSize;
2528   if (no_const_pool_before_ < pc_limit) {
2529     // If there are some pending entries, the constant pool cannot be blocked
2530     // further than first_const_pool_use_ + kMaxDistToConstPool
2531     ASSERT((num_pending_reloc_info_ == 0) ||
2532            (pc_limit < (first_const_pool_use_ + kMaxDistToConstPool)));
2533     no_const_pool_before_ = pc_limit;
2534   }
2535 
2536   if (next_constant_pool_check_ < no_const_pool_before_) {
2537     next_constant_pool_check_ = no_const_pool_before_;
2538   }
2539 }
2540 
2541 
CheckConstPool(bool force_emit,bool require_jump)2542 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
2543   // Some short sequence of instruction mustn't be broken up by constant pool
2544   // emission, such sequences are protected by calls to BlockConstPoolFor and
2545   // BlockConstPoolScope.
2546   if (is_const_pool_blocked()) {
2547     // Something is wrong if emission is forced and blocked at the same time.
2548     ASSERT(!force_emit);
2549     return;
2550   }
2551 
2552   // There is nothing to do if there are no pending constant pool entries.
2553   if (num_pending_reloc_info_ == 0)  {
2554     // Calculate the offset of the next check.
2555     next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
2556     return;
2557   }
2558 
2559   // We emit a constant pool when:
2560   //  * requested to do so by parameter force_emit (e.g. after each function).
2561   //  * the distance to the first instruction accessing the constant pool is
2562   //    kAvgDistToConstPool or more.
2563   //  * no jump is required and the distance to the first instruction accessing
2564   //    the constant pool is at least kMaxDistToPConstool / 2.
2565   ASSERT(first_const_pool_use_ >= 0);
2566   int dist = pc_offset() - first_const_pool_use_;
2567   if (!force_emit && dist < kAvgDistToConstPool &&
2568       (require_jump || (dist < (kMaxDistToConstPool / 2)))) {
2569     return;
2570   }
2571 
2572   int jump_instr = require_jump ? kInstructionSize : 0;
2573   int size_pool_marker = kInstructionSize;
2574   int size_pool_guard = kInstructionSize;
2575   int pool_size = jump_instr + size_pool_marker + size_pool_guard +
2576     num_pending_reloc_info_ * kPointerSize;
2577   int needed_space = pool_size + kGap;
2578 
2579   // Emit veneers for branches that would go out of range during emission of the
2580   // constant pool.
2581   CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + pool_size);
2582 
2583   Label size_check;
2584   bind(&size_check);
2585 
2586   // Check that the code buffer is large enough before emitting the constant
2587   // pool (include the jump over the pool, the constant pool marker, the
2588   // constant pool guard, and the gap to the relocation information).
2589   while (buffer_space() <= needed_space) {
2590     GrowBuffer();
2591   }
2592 
2593   {
2594     // Block recursive calls to CheckConstPool and protect from veneer pools.
2595     BlockPoolsScope block_pools(this);
2596     RecordConstPool(pool_size);
2597 
2598     // Emit jump over constant pool if necessary.
2599     Label after_pool;
2600     if (require_jump) {
2601       b(&after_pool);
2602     }
2603 
2604     // Emit a constant pool header. The header has two goals:
2605     //  1) Encode the size of the constant pool, for use by the disassembler.
2606     //  2) Terminate the program, to try to prevent execution from accidentally
2607     //     flowing into the constant pool.
2608     // The header is therefore made of two arm64 instructions:
2609     //   ldr xzr, #<size of the constant pool in 32-bit words>
2610     //   blr xzr
2611     // If executed the code will likely segfault and lr will point to the
2612     // beginning of the constant pool.
2613     // TODO(all): currently each relocated constant is 64 bits, consider adding
2614     // support for 32-bit entries.
2615     RecordComment("[ Constant Pool");
2616     ConstantPoolMarker(2 * num_pending_reloc_info_);
2617     ConstantPoolGuard();
2618 
2619     // Emit constant pool entries.
2620     for (int i = 0; i < num_pending_reloc_info_; i++) {
2621       RelocInfo& rinfo = pending_reloc_info_[i];
2622       ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
2623              rinfo.rmode() != RelocInfo::POSITION &&
2624              rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
2625              rinfo.rmode() != RelocInfo::CONST_POOL &&
2626              rinfo.rmode() != RelocInfo::VENEER_POOL);
2627 
2628       Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc());
2629       // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
2630       ASSERT(instr->IsLdrLiteral() &&
2631              instr->ImmLLiteral() == 0);
2632 
2633       instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
2634       dc64(rinfo.data());
2635     }
2636 
2637     num_pending_reloc_info_ = 0;
2638     first_const_pool_use_ = -1;
2639 
2640     RecordComment("]");
2641 
2642     if (after_pool.is_linked()) {
2643       bind(&after_pool);
2644     }
2645   }
2646 
2647   // Since a constant pool was just emitted, move the check offset forward by
2648   // the standard interval.
2649   next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
2650 
2651   ASSERT(SizeOfCodeGeneratedSince(&size_check) ==
2652          static_cast<unsigned>(pool_size));
2653 }
2654 
2655 
ShouldEmitVeneer(int max_reachable_pc,int margin)2656 bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
2657   // Account for the branch around the veneers and the guard.
2658   int protection_offset = 2 * kInstructionSize;
2659   return pc_offset() > max_reachable_pc - margin - protection_offset -
2660     static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
2661 }
2662 
2663 
RecordVeneerPool(int location_offset,int size)2664 void Assembler::RecordVeneerPool(int location_offset, int size) {
2665   RelocInfo rinfo(buffer_ + location_offset,
2666                   RelocInfo::VENEER_POOL, static_cast<intptr_t>(size),
2667                   NULL);
2668   reloc_info_writer.Write(&rinfo);
2669 }
2670 
2671 
EmitVeneers(bool force_emit,bool need_protection,int margin)2672 void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
2673   BlockPoolsScope scope(this);
2674   RecordComment("[ Veneers");
2675 
2676   // The exact size of the veneer pool must be recorded (see the comment at the
2677   // declaration site of RecordConstPool()), but computing the number of
2678   // veneers that will be generated is not obvious. So instead we remember the
2679   // current position and will record the size after the pool has been
2680   // generated.
2681   Label size_check;
2682   bind(&size_check);
2683   int veneer_pool_relocinfo_loc = pc_offset();
2684 
2685   Label end;
2686   if (need_protection) {
2687     b(&end);
2688   }
2689 
2690   EmitVeneersGuard();
2691 
2692   Label veneer_size_check;
2693 
2694   std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
2695 
2696   it = unresolved_branches_.begin();
2697   while (it != unresolved_branches_.end()) {
2698     if (force_emit || ShouldEmitVeneer(it->first, margin)) {
2699       Instruction* branch = InstructionAt(it->second.pc_offset_);
2700       Label* label = it->second.label_;
2701 
2702 #ifdef DEBUG
2703       bind(&veneer_size_check);
2704 #endif
2705       // Patch the branch to point to the current position, and emit a branch
2706       // to the label.
2707       Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
2708       RemoveBranchFromLabelLinkChain(branch, label, veneer);
2709       branch->SetImmPCOffsetTarget(veneer);
2710       b(label);
2711 #ifdef DEBUG
2712       ASSERT(SizeOfCodeGeneratedSince(&veneer_size_check) <=
2713              static_cast<uint64_t>(kMaxVeneerCodeSize));
2714       veneer_size_check.Unuse();
2715 #endif
2716 
2717       it_to_delete = it++;
2718       unresolved_branches_.erase(it_to_delete);
2719     } else {
2720       ++it;
2721     }
2722   }
2723 
2724   // Record the veneer pool size.
2725   int pool_size = SizeOfCodeGeneratedSince(&size_check);
2726   RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
2727 
2728   if (unresolved_branches_.empty()) {
2729     next_veneer_pool_check_ = kMaxInt;
2730   } else {
2731     next_veneer_pool_check_ =
2732       unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
2733   }
2734 
2735   bind(&end);
2736 
2737   RecordComment("]");
2738 }
2739 
2740 
CheckVeneerPool(bool force_emit,bool require_jump,int margin)2741 void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
2742                                 int margin) {
2743   // There is nothing to do if there are no pending veneer pool entries.
2744   if (unresolved_branches_.empty())  {
2745     ASSERT(next_veneer_pool_check_ == kMaxInt);
2746     return;
2747   }
2748 
2749   ASSERT(pc_offset() < unresolved_branches_first_limit());
2750 
2751   // Some short sequence of instruction mustn't be broken up by veneer pool
2752   // emission, such sequences are protected by calls to BlockVeneerPoolFor and
2753   // BlockVeneerPoolScope.
2754   if (is_veneer_pool_blocked()) {
2755     ASSERT(!force_emit);
2756     return;
2757   }
2758 
2759   if (!require_jump) {
2760     // Prefer emitting veneers protected by an existing instruction.
2761     margin *= kVeneerNoProtectionFactor;
2762   }
2763   if (force_emit || ShouldEmitVeneers(margin)) {
2764     EmitVeneers(force_emit, require_jump, margin);
2765   } else {
2766     next_veneer_pool_check_ =
2767       unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
2768   }
2769 }
2770 
2771 
RecordComment(const char * msg)2772 void Assembler::RecordComment(const char* msg) {
2773   if (FLAG_code_comments) {
2774     CheckBuffer();
2775     RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
2776   }
2777 }
2778 
2779 
buffer_space() const2780 int Assembler::buffer_space() const {
2781   return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_);
2782 }
2783 
2784 
RecordJSReturn()2785 void Assembler::RecordJSReturn() {
2786   positions_recorder()->WriteRecordedPositions();
2787   CheckBuffer();
2788   RecordRelocInfo(RelocInfo::JS_RETURN);
2789 }
2790 
2791 
RecordDebugBreakSlot()2792 void Assembler::RecordDebugBreakSlot() {
2793   positions_recorder()->WriteRecordedPositions();
2794   CheckBuffer();
2795   RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
2796 }
2797 
2798 
RecordConstPool(int size)2799 void Assembler::RecordConstPool(int size) {
2800   // We only need this for debugger support, to correctly compute offsets in the
2801   // code.
2802   RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
2803 }
2804 
2805 
NewConstantPool(Isolate * isolate)2806 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
2807   // No out-of-line constant pool support.
2808   ASSERT(!FLAG_enable_ool_constant_pool);
2809   return isolate->factory()->empty_constant_pool_array();
2810 }
2811 
2812 
PopulateConstantPool(ConstantPoolArray * constant_pool)2813 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
2814   // No out-of-line constant pool support.
2815   ASSERT(!FLAG_enable_ool_constant_pool);
2816   return;
2817 }
2818 
2819 
MovInt64(const Register & rd,int64_t imm)2820 void PatchingAssembler::MovInt64(const Register& rd, int64_t imm) {
2821   Label start;
2822   bind(&start);
2823 
2824   ASSERT(rd.Is64Bits());
2825   ASSERT(!rd.IsSP());
2826 
2827   for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
2828     uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
2829     movk(rd, imm16, 16 * i);
2830   }
2831 
2832   ASSERT(SizeOfCodeGeneratedSince(&start) ==
2833          kMovInt64NInstrs * kInstructionSize);
2834 }
2835 
2836 
PatchAdrFar(Instruction * target)2837 void PatchingAssembler::PatchAdrFar(Instruction* target) {
2838   // The code at the current instruction should be:
2839   //   adr  rd, 0
2840   //   nop  (adr_far)
2841   //   nop  (adr_far)
2842   //   nop  (adr_far)
2843   //   movz scratch, 0
2844   //   add  rd, rd, scratch
2845 
2846   // Verify the expected code.
2847   Instruction* expected_adr = InstructionAt(0);
2848   CHECK(expected_adr->IsAdr() && (expected_adr->ImmPCRel() == 0));
2849   int rd_code = expected_adr->Rd();
2850   for (int i = 0; i < kAdrFarPatchableNNops; ++i) {
2851     CHECK(InstructionAt((i + 1) * kInstructionSize)->IsNop(ADR_FAR_NOP));
2852   }
2853   Instruction* expected_movz =
2854       InstructionAt((kAdrFarPatchableNInstrs - 2) * kInstructionSize);
2855   CHECK(expected_movz->IsMovz() &&
2856         (expected_movz->ImmMoveWide() == 0) &&
2857         (expected_movz->ShiftMoveWide() == 0));
2858   int scratch_code = expected_movz->Rd();
2859   Instruction* expected_add =
2860       InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstructionSize);
2861   CHECK(expected_add->IsAddSubShifted() &&
2862         (expected_add->Mask(AddSubOpMask) == ADD) &&
2863         expected_add->SixtyFourBits() &&
2864         (expected_add->Rd() == rd_code) && (expected_add->Rn() == rd_code) &&
2865         (expected_add->Rm() == scratch_code) &&
2866         (static_cast<Shift>(expected_add->ShiftDP()) == LSL) &&
2867         (expected_add->ImmDPShift() == 0));
2868 
2869   // Patch to load the correct address.
2870   Label start;
2871   bind(&start);
2872   Register rd = Register::XRegFromCode(rd_code);
2873   // If the target is in range, we only patch the adr. Otherwise we patch the
2874   // nops with fixup instructions.
2875   int target_offset = expected_adr->DistanceTo(target);
2876   if (Instruction::IsValidPCRelOffset(target_offset)) {
2877     adr(rd, target_offset);
2878     for (int i = 0; i < kAdrFarPatchableNInstrs - 2; ++i) {
2879       nop(ADR_FAR_NOP);
2880     }
2881   } else {
2882     Register scratch = Register::XRegFromCode(scratch_code);
2883     adr(rd, 0);
2884     MovInt64(scratch, target_offset);
2885     add(rd, rd, scratch);
2886   }
2887 }
2888 
2889 
2890 } }  // namespace v8::internal
2891 
2892 #endif  // V8_TARGET_ARCH_ARM64
2893