• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "assembler_arm64.h"
18 #include "base/logging.h"
19 #include "entrypoints/quick/quick_entrypoints.h"
20 #include "offsets.h"
21 #include "thread.h"
22 
23 using namespace vixl;  // NOLINT(build/namespaces)
24 
25 namespace art {
26 namespace arm64 {
27 
28 #ifdef ___
29 #error "ARM64 Assembler macro already defined."
30 #else
31 #define ___   vixl_masm_->
32 #endif
33 
FinalizeCode()34 void Arm64Assembler::FinalizeCode() {
35   for (const std::unique_ptr<Arm64Exception>& exception : exception_blocks_) {
36     EmitExceptionPoll(exception.get());
37   }
38   ___ FinalizeCode();
39 }
40 
CodeSize() const41 size_t Arm64Assembler::CodeSize() const {
42   return vixl_masm_->BufferCapacity() - vixl_masm_->RemainingBufferSpace();
43 }
44 
CodeBufferBaseAddress() const45 const uint8_t* Arm64Assembler::CodeBufferBaseAddress() const {
46   return vixl_masm_->GetStartAddress<uint8_t*>();
47 }
48 
FinalizeInstructions(const MemoryRegion & region)49 void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) {
50   // Copy the instructions from the buffer.
51   MemoryRegion from(vixl_masm_->GetStartAddress<void*>(), CodeSize());
52   region.CopyFrom(0, from);
53 }
54 
GetCurrentThread(ManagedRegister tr)55 void Arm64Assembler::GetCurrentThread(ManagedRegister tr) {
56   ___ Mov(reg_x(tr.AsArm64().AsXRegister()), reg_x(TR));
57 }
58 
GetCurrentThread(FrameOffset offset,ManagedRegister)59 void Arm64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) {
60   StoreToOffset(TR, SP, offset.Int32Value());
61 }
62 
63 // See Arm64 PCS Section 5.2.2.1.
IncreaseFrameSize(size_t adjust)64 void Arm64Assembler::IncreaseFrameSize(size_t adjust) {
65   CHECK_ALIGNED(adjust, kStackAlignment);
66   AddConstant(SP, -adjust);
67   cfi().AdjustCFAOffset(adjust);
68 }
69 
70 // See Arm64 PCS Section 5.2.2.1.
DecreaseFrameSize(size_t adjust)71 void Arm64Assembler::DecreaseFrameSize(size_t adjust) {
72   CHECK_ALIGNED(adjust, kStackAlignment);
73   AddConstant(SP, adjust);
74   cfi().AdjustCFAOffset(-adjust);
75 }
76 
AddConstant(XRegister rd,int32_t value,Condition cond)77 void Arm64Assembler::AddConstant(XRegister rd, int32_t value, Condition cond) {
78   AddConstant(rd, rd, value, cond);
79 }
80 
AddConstant(XRegister rd,XRegister rn,int32_t value,Condition cond)81 void Arm64Assembler::AddConstant(XRegister rd, XRegister rn, int32_t value,
82                                  Condition cond) {
83   if ((cond == al) || (cond == nv)) {
84     // VIXL macro-assembler handles all variants.
85     ___ Add(reg_x(rd), reg_x(rn), value);
86   } else {
87     // temp = rd + value
88     // rd = cond ? temp : rn
89     vixl::UseScratchRegisterScope temps(vixl_masm_);
90     temps.Exclude(reg_x(rd), reg_x(rn));
91     vixl::Register temp = temps.AcquireX();
92     ___ Add(temp, reg_x(rn), value);
93     ___ Csel(reg_x(rd), temp, reg_x(rd), cond);
94   }
95 }
96 
StoreWToOffset(StoreOperandType type,WRegister source,XRegister base,int32_t offset)97 void Arm64Assembler::StoreWToOffset(StoreOperandType type, WRegister source,
98                                     XRegister base, int32_t offset) {
99   switch (type) {
100     case kStoreByte:
101       ___ Strb(reg_w(source), MEM_OP(reg_x(base), offset));
102       break;
103     case kStoreHalfword:
104       ___ Strh(reg_w(source), MEM_OP(reg_x(base), offset));
105       break;
106     case kStoreWord:
107       ___ Str(reg_w(source), MEM_OP(reg_x(base), offset));
108       break;
109     default:
110       LOG(FATAL) << "UNREACHABLE";
111   }
112 }
113 
StoreToOffset(XRegister source,XRegister base,int32_t offset)114 void Arm64Assembler::StoreToOffset(XRegister source, XRegister base, int32_t offset) {
115   CHECK_NE(source, SP);
116   ___ Str(reg_x(source), MEM_OP(reg_x(base), offset));
117 }
118 
StoreSToOffset(SRegister source,XRegister base,int32_t offset)119 void Arm64Assembler::StoreSToOffset(SRegister source, XRegister base, int32_t offset) {
120   ___ Str(reg_s(source), MEM_OP(reg_x(base), offset));
121 }
122 
StoreDToOffset(DRegister source,XRegister base,int32_t offset)123 void Arm64Assembler::StoreDToOffset(DRegister source, XRegister base, int32_t offset) {
124   ___ Str(reg_d(source), MEM_OP(reg_x(base), offset));
125 }
126 
Store(FrameOffset offs,ManagedRegister m_src,size_t size)127 void Arm64Assembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) {
128   Arm64ManagedRegister src = m_src.AsArm64();
129   if (src.IsNoRegister()) {
130     CHECK_EQ(0u, size);
131   } else if (src.IsWRegister()) {
132     CHECK_EQ(4u, size);
133     StoreWToOffset(kStoreWord, src.AsWRegister(), SP, offs.Int32Value());
134   } else if (src.IsXRegister()) {
135     CHECK_EQ(8u, size);
136     StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
137   } else if (src.IsSRegister()) {
138     StoreSToOffset(src.AsSRegister(), SP, offs.Int32Value());
139   } else {
140     CHECK(src.IsDRegister()) << src;
141     StoreDToOffset(src.AsDRegister(), SP, offs.Int32Value());
142   }
143 }
144 
StoreRef(FrameOffset offs,ManagedRegister m_src)145 void Arm64Assembler::StoreRef(FrameOffset offs, ManagedRegister m_src) {
146   Arm64ManagedRegister src = m_src.AsArm64();
147   CHECK(src.IsXRegister()) << src;
148   StoreWToOffset(kStoreWord, src.AsOverlappingWRegister(), SP,
149                  offs.Int32Value());
150 }
151 
StoreRawPtr(FrameOffset offs,ManagedRegister m_src)152 void Arm64Assembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
153   Arm64ManagedRegister src = m_src.AsArm64();
154   CHECK(src.IsXRegister()) << src;
155   StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
156 }
157 
StoreImmediateToFrame(FrameOffset offs,uint32_t imm,ManagedRegister m_scratch)158 void Arm64Assembler::StoreImmediateToFrame(FrameOffset offs, uint32_t imm,
159                                            ManagedRegister m_scratch) {
160   Arm64ManagedRegister scratch = m_scratch.AsArm64();
161   CHECK(scratch.IsXRegister()) << scratch;
162   LoadImmediate(scratch.AsXRegister(), imm);
163   StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP,
164                  offs.Int32Value());
165 }
166 
StoreImmediateToThread64(ThreadOffset<8> offs,uint32_t imm,ManagedRegister m_scratch)167 void Arm64Assembler::StoreImmediateToThread64(ThreadOffset<8> offs, uint32_t imm,
168                                             ManagedRegister m_scratch) {
169   Arm64ManagedRegister scratch = m_scratch.AsArm64();
170   CHECK(scratch.IsXRegister()) << scratch;
171   LoadImmediate(scratch.AsXRegister(), imm);
172   StoreToOffset(scratch.AsXRegister(), TR, offs.Int32Value());
173 }
174 
StoreStackOffsetToThread64(ThreadOffset<8> tr_offs,FrameOffset fr_offs,ManagedRegister m_scratch)175 void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> tr_offs,
176                                               FrameOffset fr_offs,
177                                               ManagedRegister m_scratch) {
178   Arm64ManagedRegister scratch = m_scratch.AsArm64();
179   CHECK(scratch.IsXRegister()) << scratch;
180   AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value());
181   StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
182 }
183 
StoreStackPointerToThread64(ThreadOffset<8> tr_offs)184 void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset<8> tr_offs) {
185   vixl::UseScratchRegisterScope temps(vixl_masm_);
186   vixl::Register temp = temps.AcquireX();
187   ___ Mov(temp, reg_x(SP));
188   ___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
189 }
190 
StoreSpanning(FrameOffset dest_off,ManagedRegister m_source,FrameOffset in_off,ManagedRegister m_scratch)191 void Arm64Assembler::StoreSpanning(FrameOffset dest_off, ManagedRegister m_source,
192                                    FrameOffset in_off, ManagedRegister m_scratch) {
193   Arm64ManagedRegister source = m_source.AsArm64();
194   Arm64ManagedRegister scratch = m_scratch.AsArm64();
195   StoreToOffset(source.AsXRegister(), SP, dest_off.Int32Value());
196   LoadFromOffset(scratch.AsXRegister(), SP, in_off.Int32Value());
197   StoreToOffset(scratch.AsXRegister(), SP, dest_off.Int32Value() + 8);
198 }
199 
200 // Load routines.
LoadImmediate(XRegister dest,int32_t value,Condition cond)201 void Arm64Assembler::LoadImmediate(XRegister dest, int32_t value,
202                                    Condition cond) {
203   if ((cond == al) || (cond == nv)) {
204     ___ Mov(reg_x(dest), value);
205   } else {
206     // temp = value
207     // rd = cond ? temp : rd
208     if (value != 0) {
209       vixl::UseScratchRegisterScope temps(vixl_masm_);
210       temps.Exclude(reg_x(dest));
211       vixl::Register temp = temps.AcquireX();
212       ___ Mov(temp, value);
213       ___ Csel(reg_x(dest), temp, reg_x(dest), cond);
214     } else {
215       ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), cond);
216     }
217   }
218 }
219 
LoadWFromOffset(LoadOperandType type,WRegister dest,XRegister base,int32_t offset)220 void Arm64Assembler::LoadWFromOffset(LoadOperandType type, WRegister dest,
221                                      XRegister base, int32_t offset) {
222   switch (type) {
223     case kLoadSignedByte:
224       ___ Ldrsb(reg_w(dest), MEM_OP(reg_x(base), offset));
225       break;
226     case kLoadSignedHalfword:
227       ___ Ldrsh(reg_w(dest), MEM_OP(reg_x(base), offset));
228       break;
229     case kLoadUnsignedByte:
230       ___ Ldrb(reg_w(dest), MEM_OP(reg_x(base), offset));
231       break;
232     case kLoadUnsignedHalfword:
233       ___ Ldrh(reg_w(dest), MEM_OP(reg_x(base), offset));
234       break;
235     case kLoadWord:
236       ___ Ldr(reg_w(dest), MEM_OP(reg_x(base), offset));
237       break;
238     default:
239         LOG(FATAL) << "UNREACHABLE";
240   }
241 }
242 
243 // Note: We can extend this member by adding load type info - see
244 // sign extended A64 load variants.
LoadFromOffset(XRegister dest,XRegister base,int32_t offset)245 void Arm64Assembler::LoadFromOffset(XRegister dest, XRegister base,
246                                     int32_t offset) {
247   CHECK_NE(dest, SP);
248   ___ Ldr(reg_x(dest), MEM_OP(reg_x(base), offset));
249 }
250 
LoadSFromOffset(SRegister dest,XRegister base,int32_t offset)251 void Arm64Assembler::LoadSFromOffset(SRegister dest, XRegister base,
252                                      int32_t offset) {
253   ___ Ldr(reg_s(dest), MEM_OP(reg_x(base), offset));
254 }
255 
LoadDFromOffset(DRegister dest,XRegister base,int32_t offset)256 void Arm64Assembler::LoadDFromOffset(DRegister dest, XRegister base,
257                                      int32_t offset) {
258   ___ Ldr(reg_d(dest), MEM_OP(reg_x(base), offset));
259 }
260 
Load(Arm64ManagedRegister dest,XRegister base,int32_t offset,size_t size)261 void Arm64Assembler::Load(Arm64ManagedRegister dest, XRegister base,
262                           int32_t offset, size_t size) {
263   if (dest.IsNoRegister()) {
264     CHECK_EQ(0u, size) << dest;
265   } else if (dest.IsWRegister()) {
266     CHECK_EQ(4u, size) << dest;
267     ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset));
268   } else if (dest.IsXRegister()) {
269     CHECK_NE(dest.AsXRegister(), SP) << dest;
270     if (size == 4u) {
271       ___ Ldr(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset));
272     } else {
273       CHECK_EQ(8u, size) << dest;
274       ___ Ldr(reg_x(dest.AsXRegister()), MEM_OP(reg_x(base), offset));
275     }
276   } else if (dest.IsSRegister()) {
277     ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset));
278   } else {
279     CHECK(dest.IsDRegister()) << dest;
280     ___ Ldr(reg_d(dest.AsDRegister()), MEM_OP(reg_x(base), offset));
281   }
282 }
283 
Load(ManagedRegister m_dst,FrameOffset src,size_t size)284 void Arm64Assembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
285   return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
286 }
287 
LoadFromThread64(ManagedRegister m_dst,ThreadOffset<8> src,size_t size)288 void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset<8> src, size_t size) {
289   return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
290 }
291 
LoadRef(ManagedRegister m_dst,FrameOffset offs)292 void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
293   Arm64ManagedRegister dst = m_dst.AsArm64();
294   CHECK(dst.IsXRegister()) << dst;
295   LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value());
296 }
297 
LoadRef(ManagedRegister m_dst,ManagedRegister m_base,MemberOffset offs,bool unpoison_reference)298 void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base, MemberOffset offs,
299                              bool unpoison_reference) {
300   Arm64ManagedRegister dst = m_dst.AsArm64();
301   Arm64ManagedRegister base = m_base.AsArm64();
302   CHECK(dst.IsXRegister() && base.IsXRegister());
303   LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(),
304                   offs.Int32Value());
305   if (unpoison_reference) {
306     WRegister ref_reg = dst.AsOverlappingWRegister();
307     MaybeUnpoisonHeapReference(reg_w(ref_reg));
308   }
309 }
310 
LoadRawPtr(ManagedRegister m_dst,ManagedRegister m_base,Offset offs)311 void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, Offset offs) {
312   Arm64ManagedRegister dst = m_dst.AsArm64();
313   Arm64ManagedRegister base = m_base.AsArm64();
314   CHECK(dst.IsXRegister() && base.IsXRegister());
315   // Remove dst and base form the temp list - higher level API uses IP1, IP0.
316   vixl::UseScratchRegisterScope temps(vixl_masm_);
317   temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
318   ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
319 }
320 
LoadRawPtrFromThread64(ManagedRegister m_dst,ThreadOffset<8> offs)321 void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset<8> offs) {
322   Arm64ManagedRegister dst = m_dst.AsArm64();
323   CHECK(dst.IsXRegister()) << dst;
324   LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value());
325 }
326 
327 // Copying routines.
Move(ManagedRegister m_dst,ManagedRegister m_src,size_t size)328 void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t size) {
329   Arm64ManagedRegister dst = m_dst.AsArm64();
330   Arm64ManagedRegister src = m_src.AsArm64();
331   if (!dst.Equals(src)) {
332     if (dst.IsXRegister()) {
333       if (size == 4) {
334         CHECK(src.IsWRegister());
335         ___ Mov(reg_w(dst.AsOverlappingWRegister()), reg_w(src.AsWRegister()));
336       } else {
337         if (src.IsXRegister()) {
338           ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsXRegister()));
339         } else {
340           ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsOverlappingXRegister()));
341         }
342       }
343     } else if (dst.IsWRegister()) {
344       CHECK(src.IsWRegister()) << src;
345       ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister()));
346     } else if (dst.IsSRegister()) {
347       CHECK(src.IsSRegister()) << src;
348       ___ Fmov(reg_s(dst.AsSRegister()), reg_s(src.AsSRegister()));
349     } else {
350       CHECK(dst.IsDRegister()) << dst;
351       CHECK(src.IsDRegister()) << src;
352       ___ Fmov(reg_d(dst.AsDRegister()), reg_d(src.AsDRegister()));
353     }
354   }
355 }
356 
CopyRawPtrFromThread64(FrameOffset fr_offs,ThreadOffset<8> tr_offs,ManagedRegister m_scratch)357 void Arm64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
358                                           ThreadOffset<8> tr_offs,
359                                           ManagedRegister m_scratch) {
360   Arm64ManagedRegister scratch = m_scratch.AsArm64();
361   CHECK(scratch.IsXRegister()) << scratch;
362   LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
363   StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
364 }
365 
CopyRawPtrToThread64(ThreadOffset<8> tr_offs,FrameOffset fr_offs,ManagedRegister m_scratch)366 void Arm64Assembler::CopyRawPtrToThread64(ThreadOffset<8> tr_offs,
367                                         FrameOffset fr_offs,
368                                         ManagedRegister m_scratch) {
369   Arm64ManagedRegister scratch = m_scratch.AsArm64();
370   CHECK(scratch.IsXRegister()) << scratch;
371   LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
372   StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
373 }
374 
CopyRef(FrameOffset dest,FrameOffset src,ManagedRegister m_scratch)375 void Arm64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
376                              ManagedRegister m_scratch) {
377   Arm64ManagedRegister scratch = m_scratch.AsArm64();
378   CHECK(scratch.IsXRegister()) << scratch;
379   LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(),
380                   SP, src.Int32Value());
381   StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(),
382                  SP, dest.Int32Value());
383 }
384 
Copy(FrameOffset dest,FrameOffset src,ManagedRegister m_scratch,size_t size)385 void Arm64Assembler::Copy(FrameOffset dest, FrameOffset src,
386                           ManagedRegister m_scratch, size_t size) {
387   Arm64ManagedRegister scratch = m_scratch.AsArm64();
388   CHECK(scratch.IsXRegister()) << scratch;
389   CHECK(size == 4 || size == 8) << size;
390   if (size == 4) {
391     LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, src.Int32Value());
392     StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP, dest.Int32Value());
393   } else if (size == 8) {
394     LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
395     StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
396   } else {
397     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
398   }
399 }
400 
Copy(FrameOffset dest,ManagedRegister src_base,Offset src_offset,ManagedRegister m_scratch,size_t size)401 void Arm64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
402                           ManagedRegister m_scratch, size_t size) {
403   Arm64ManagedRegister scratch = m_scratch.AsArm64();
404   Arm64ManagedRegister base = src_base.AsArm64();
405   CHECK(base.IsXRegister()) << base;
406   CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
407   CHECK(size == 4 || size == 8) << size;
408   if (size == 4) {
409     LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsXRegister(),
410                    src_offset.Int32Value());
411     StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value());
412   } else if (size == 8) {
413     LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), src_offset.Int32Value());
414     StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
415   } else {
416     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
417   }
418 }
419 
Copy(ManagedRegister m_dest_base,Offset dest_offs,FrameOffset src,ManagedRegister m_scratch,size_t size)420 void Arm64Assembler::Copy(ManagedRegister m_dest_base, Offset dest_offs, FrameOffset src,
421                           ManagedRegister m_scratch, size_t size) {
422   Arm64ManagedRegister scratch = m_scratch.AsArm64();
423   Arm64ManagedRegister base = m_dest_base.AsArm64();
424   CHECK(base.IsXRegister()) << base;
425   CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
426   CHECK(size == 4 || size == 8) << size;
427   if (size == 4) {
428     LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value());
429     StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsXRegister(),
430                    dest_offs.Int32Value());
431   } else if (size == 8) {
432     LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
433     StoreToOffset(scratch.AsXRegister(), base.AsXRegister(), dest_offs.Int32Value());
434   } else {
435     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
436   }
437 }
438 
Copy(FrameOffset,FrameOffset,Offset,ManagedRegister,size_t)439 void Arm64Assembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
440                           ManagedRegister /*mscratch*/, size_t /*size*/) {
441   UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
442 }
443 
Copy(ManagedRegister m_dest,Offset dest_offset,ManagedRegister m_src,Offset src_offset,ManagedRegister m_scratch,size_t size)444 void Arm64Assembler::Copy(ManagedRegister m_dest, Offset dest_offset,
445                           ManagedRegister m_src, Offset src_offset,
446                           ManagedRegister m_scratch, size_t size) {
447   Arm64ManagedRegister scratch = m_scratch.AsArm64();
448   Arm64ManagedRegister src = m_src.AsArm64();
449   Arm64ManagedRegister dest = m_dest.AsArm64();
450   CHECK(dest.IsXRegister()) << dest;
451   CHECK(src.IsXRegister()) << src;
452   CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
453   CHECK(size == 4 || size == 8) << size;
454   if (size == 4) {
455     if (scratch.IsWRegister()) {
456       LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsXRegister(),
457                     src_offset.Int32Value());
458       StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsXRegister(),
459                    dest_offset.Int32Value());
460     } else {
461       LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), src.AsXRegister(),
462                     src_offset.Int32Value());
463       StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), dest.AsXRegister(),
464                    dest_offset.Int32Value());
465     }
466   } else if (size == 8) {
467     LoadFromOffset(scratch.AsXRegister(), src.AsXRegister(), src_offset.Int32Value());
468     StoreToOffset(scratch.AsXRegister(), dest.AsXRegister(), dest_offset.Int32Value());
469   } else {
470     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
471   }
472 }
473 
Copy(FrameOffset,Offset,FrameOffset,Offset,ManagedRegister,size_t)474 void Arm64Assembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/,
475                           FrameOffset /*src*/, Offset /*src_offset*/,
476                           ManagedRegister /*scratch*/, size_t /*size*/) {
477   UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
478 }
479 
MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED)480 void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
481   // TODO: Should we check that m_scratch is IP? - see arm.
482   ___ Dmb(vixl::InnerShareable, vixl::BarrierAll);
483 }
484 
SignExtend(ManagedRegister mreg,size_t size)485 void Arm64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
486   Arm64ManagedRegister reg = mreg.AsArm64();
487   CHECK(size == 1 || size == 2) << size;
488   CHECK(reg.IsWRegister()) << reg;
489   if (size == 1) {
490     ___ Sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
491   } else {
492     ___ Sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
493   }
494 }
495 
ZeroExtend(ManagedRegister mreg,size_t size)496 void Arm64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
497   Arm64ManagedRegister reg = mreg.AsArm64();
498   CHECK(size == 1 || size == 2) << size;
499   CHECK(reg.IsWRegister()) << reg;
500   if (size == 1) {
501     ___ Uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
502   } else {
503     ___ Uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
504   }
505 }
506 
VerifyObject(ManagedRegister,bool)507 void Arm64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
508   // TODO: not validating references.
509 }
510 
VerifyObject(FrameOffset,bool)511 void Arm64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
512   // TODO: not validating references.
513 }
514 
Call(ManagedRegister m_base,Offset offs,ManagedRegister m_scratch)515 void Arm64Assembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
516   Arm64ManagedRegister base = m_base.AsArm64();
517   Arm64ManagedRegister scratch = m_scratch.AsArm64();
518   CHECK(base.IsXRegister()) << base;
519   CHECK(scratch.IsXRegister()) << scratch;
520   LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), offs.Int32Value());
521   ___ Blr(reg_x(scratch.AsXRegister()));
522 }
523 
JumpTo(ManagedRegister m_base,Offset offs,ManagedRegister m_scratch)524 void Arm64Assembler::JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
525   Arm64ManagedRegister base = m_base.AsArm64();
526   Arm64ManagedRegister scratch = m_scratch.AsArm64();
527   CHECK(base.IsXRegister()) << base;
528   CHECK(scratch.IsXRegister()) << scratch;
529   // Remove base and scratch form the temp list - higher level API uses IP1, IP0.
530   vixl::UseScratchRegisterScope temps(vixl_masm_);
531   temps.Exclude(reg_x(base.AsXRegister()), reg_x(scratch.AsXRegister()));
532   ___ Ldr(reg_x(scratch.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
533   ___ Br(reg_x(scratch.AsXRegister()));
534 }
535 
Call(FrameOffset base,Offset offs,ManagedRegister m_scratch)536 void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) {
537   Arm64ManagedRegister scratch = m_scratch.AsArm64();
538   CHECK(scratch.IsXRegister()) << scratch;
539   // Call *(*(SP + base) + offset)
540   LoadFromOffset(scratch.AsXRegister(), SP, base.Int32Value());
541   LoadFromOffset(scratch.AsXRegister(), scratch.AsXRegister(), offs.Int32Value());
542   ___ Blr(reg_x(scratch.AsXRegister()));
543 }
544 
CallFromThread64(ThreadOffset<8>,ManagedRegister)545 void Arm64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegister /*scratch*/) {
546   UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
547 }
548 
CreateHandleScopeEntry(ManagedRegister m_out_reg,FrameOffset handle_scope_offs,ManagedRegister m_in_reg,bool null_allowed)549 void Arm64Assembler::CreateHandleScopeEntry(
550     ManagedRegister m_out_reg, FrameOffset handle_scope_offs, ManagedRegister m_in_reg,
551     bool null_allowed) {
552   Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
553   Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
554   // For now we only hold stale handle scope entries in x registers.
555   CHECK(in_reg.IsNoRegister() || in_reg.IsXRegister()) << in_reg;
556   CHECK(out_reg.IsXRegister()) << out_reg;
557   if (null_allowed) {
558     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
559     // the address in the handle scope holding the reference.
560     // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
561     if (in_reg.IsNoRegister()) {
562       LoadWFromOffset(kLoadWord, out_reg.AsOverlappingWRegister(), SP,
563                       handle_scope_offs.Int32Value());
564       in_reg = out_reg;
565     }
566     ___ Cmp(reg_w(in_reg.AsOverlappingWRegister()), 0);
567     if (!out_reg.Equals(in_reg)) {
568       LoadImmediate(out_reg.AsXRegister(), 0, eq);
569     }
570     AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), ne);
571   } else {
572     AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), al);
573   }
574 }
575 
CreateHandleScopeEntry(FrameOffset out_off,FrameOffset handle_scope_offset,ManagedRegister m_scratch,bool null_allowed)576 void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handle_scope_offset,
577                                             ManagedRegister m_scratch, bool null_allowed) {
578   Arm64ManagedRegister scratch = m_scratch.AsArm64();
579   CHECK(scratch.IsXRegister()) << scratch;
580   if (null_allowed) {
581     LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP,
582                     handle_scope_offset.Int32Value());
583     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
584     // the address in the handle scope holding the reference.
585     // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
586     ___ Cmp(reg_w(scratch.AsOverlappingWRegister()), 0);
587     // Move this logic in add constants with flags.
588     AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), ne);
589   } else {
590     AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), al);
591   }
592   StoreToOffset(scratch.AsXRegister(), SP, out_off.Int32Value());
593 }
594 
LoadReferenceFromHandleScope(ManagedRegister m_out_reg,ManagedRegister m_in_reg)595 void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
596                                                   ManagedRegister m_in_reg) {
597   Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
598   Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
599   CHECK(out_reg.IsXRegister()) << out_reg;
600   CHECK(in_reg.IsXRegister()) << in_reg;
601   vixl::Label exit;
602   if (!out_reg.Equals(in_reg)) {
603     // FIXME: Who sets the flags here?
604     LoadImmediate(out_reg.AsXRegister(), 0, eq);
605   }
606   ___ Cbz(reg_x(in_reg.AsXRegister()), &exit);
607   LoadFromOffset(out_reg.AsXRegister(), in_reg.AsXRegister(), 0);
608   ___ Bind(&exit);
609 }
610 
ExceptionPoll(ManagedRegister m_scratch,size_t stack_adjust)611 void Arm64Assembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
612   CHECK_ALIGNED(stack_adjust, kStackAlignment);
613   Arm64ManagedRegister scratch = m_scratch.AsArm64();
614   exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust));
615   LoadFromOffset(scratch.AsXRegister(), TR, Thread::ExceptionOffset<8>().Int32Value());
616   ___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry());
617 }
618 
EmitExceptionPoll(Arm64Exception * exception)619 void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) {
620   vixl::UseScratchRegisterScope temps(vixl_masm_);
621   temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
622   vixl::Register temp = temps.AcquireX();
623 
624   // Bind exception poll entry.
625   ___ Bind(exception->Entry());
626   if (exception->stack_adjust_ != 0) {  // Fix up the frame.
627     DecreaseFrameSize(exception->stack_adjust_);
628   }
629   // Pass exception object as argument.
630   // Don't care about preserving X0 as this won't return.
631   ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsXRegister()));
632   ___ Ldr(temp, MEM_OP(reg_x(TR), QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value()));
633 
634   ___ Blr(temp);
635   // Call should never return.
636   ___ Brk();
637 }
638 
DWARFReg(CPURegister reg)639 static inline dwarf::Reg DWARFReg(CPURegister reg) {
640   if (reg.IsFPRegister()) {
641     return dwarf::Reg::Arm64Fp(reg.code());
642   } else {
643     DCHECK_LT(reg.code(), 31u);  // X0 - X30.
644     return dwarf::Reg::Arm64Core(reg.code());
645   }
646 }
647 
SpillRegisters(vixl::CPURegList registers,int offset)648 void Arm64Assembler::SpillRegisters(vixl::CPURegList registers, int offset) {
649   int size = registers.RegisterSizeInBytes();
650   const Register sp = vixl_masm_->StackPointer();
651   while (registers.Count() >= 2) {
652     const CPURegister& dst0 = registers.PopLowestIndex();
653     const CPURegister& dst1 = registers.PopLowestIndex();
654     ___ Stp(dst0, dst1, MemOperand(sp, offset));
655     cfi_.RelOffset(DWARFReg(dst0), offset);
656     cfi_.RelOffset(DWARFReg(dst1), offset + size);
657     offset += 2 * size;
658   }
659   if (!registers.IsEmpty()) {
660     const CPURegister& dst0 = registers.PopLowestIndex();
661     ___ Str(dst0, MemOperand(sp, offset));
662     cfi_.RelOffset(DWARFReg(dst0), offset);
663   }
664   DCHECK(registers.IsEmpty());
665 }
666 
UnspillRegisters(vixl::CPURegList registers,int offset)667 void Arm64Assembler::UnspillRegisters(vixl::CPURegList registers, int offset) {
668   int size = registers.RegisterSizeInBytes();
669   const Register sp = vixl_masm_->StackPointer();
670   while (registers.Count() >= 2) {
671     const CPURegister& dst0 = registers.PopLowestIndex();
672     const CPURegister& dst1 = registers.PopLowestIndex();
673     ___ Ldp(dst0, dst1, MemOperand(sp, offset));
674     cfi_.Restore(DWARFReg(dst0));
675     cfi_.Restore(DWARFReg(dst1));
676     offset += 2 * size;
677   }
678   if (!registers.IsEmpty()) {
679     const CPURegister& dst0 = registers.PopLowestIndex();
680     ___ Ldr(dst0, MemOperand(sp, offset));
681     cfi_.Restore(DWARFReg(dst0));
682   }
683   DCHECK(registers.IsEmpty());
684 }
685 
BuildFrame(size_t frame_size,ManagedRegister method_reg,const std::vector<ManagedRegister> & callee_save_regs,const ManagedRegisterEntrySpills & entry_spills)686 void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
687                                 const std::vector<ManagedRegister>& callee_save_regs,
688                                 const ManagedRegisterEntrySpills& entry_spills) {
689   // Setup VIXL CPURegList for callee-saves.
690   CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
691   CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
692   for (auto r : callee_save_regs) {
693     Arm64ManagedRegister reg = r.AsArm64();
694     if (reg.IsXRegister()) {
695       core_reg_list.Combine(reg_x(reg.AsXRegister()).code());
696     } else {
697       DCHECK(reg.IsDRegister());
698       fp_reg_list.Combine(reg_d(reg.AsDRegister()).code());
699     }
700   }
701   size_t core_reg_size = core_reg_list.TotalSizeInBytes();
702   size_t fp_reg_size = fp_reg_list.TotalSizeInBytes();
703 
704   // Increase frame to required size.
705   DCHECK_ALIGNED(frame_size, kStackAlignment);
706   DCHECK_GE(frame_size, core_reg_size + fp_reg_size + kArm64PointerSize);
707   IncreaseFrameSize(frame_size);
708 
709   // Save callee-saves.
710   SpillRegisters(core_reg_list, frame_size - core_reg_size);
711   SpillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
712 
713   DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
714 
715   // Write ArtMethod*
716   DCHECK(X0 == method_reg.AsArm64().AsXRegister());
717   StoreToOffset(X0, SP, 0);
718 
719   // Write out entry spills
720   int32_t offset = frame_size + kArm64PointerSize;
721   for (size_t i = 0; i < entry_spills.size(); ++i) {
722     Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
723     if (reg.IsNoRegister()) {
724       // only increment stack offset.
725       ManagedRegisterSpill spill = entry_spills.at(i);
726       offset += spill.getSize();
727     } else if (reg.IsXRegister()) {
728       StoreToOffset(reg.AsXRegister(), SP, offset);
729       offset += 8;
730     } else if (reg.IsWRegister()) {
731       StoreWToOffset(kStoreWord, reg.AsWRegister(), SP, offset);
732       offset += 4;
733     } else if (reg.IsDRegister()) {
734       StoreDToOffset(reg.AsDRegister(), SP, offset);
735       offset += 8;
736     } else if (reg.IsSRegister()) {
737       StoreSToOffset(reg.AsSRegister(), SP, offset);
738       offset += 4;
739     }
740   }
741 }
742 
RemoveFrame(size_t frame_size,const std::vector<ManagedRegister> & callee_save_regs)743 void Arm64Assembler::RemoveFrame(size_t frame_size,
744                                  const std::vector<ManagedRegister>& callee_save_regs) {
745   // Setup VIXL CPURegList for callee-saves.
746   CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
747   CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
748   for (auto r : callee_save_regs) {
749     Arm64ManagedRegister reg = r.AsArm64();
750     if (reg.IsXRegister()) {
751       core_reg_list.Combine(reg_x(reg.AsXRegister()).code());
752     } else {
753       DCHECK(reg.IsDRegister());
754       fp_reg_list.Combine(reg_d(reg.AsDRegister()).code());
755     }
756   }
757   size_t core_reg_size = core_reg_list.TotalSizeInBytes();
758   size_t fp_reg_size = fp_reg_list.TotalSizeInBytes();
759 
760   // For now we only check that the size of the frame is large enough to hold spills and method
761   // reference.
762   DCHECK_GE(frame_size, core_reg_size + fp_reg_size + kArm64PointerSize);
763   DCHECK_ALIGNED(frame_size, kStackAlignment);
764 
765   DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
766 
767   cfi_.RememberState();
768 
769   // Restore callee-saves.
770   UnspillRegisters(core_reg_list, frame_size - core_reg_size);
771   UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
772 
773   // Decrease frame size to start of callee saved regs.
774   DecreaseFrameSize(frame_size);
775 
776   // Pop callee saved and return to LR.
777   ___ Ret();
778 
779   // The CFI should be restored for any code that follows the exit block.
780   cfi_.RestoreState();
781   cfi_.DefCFAOffset(frame_size);
782 }
783 
PoisonHeapReference(vixl::Register reg)784 void Arm64Assembler::PoisonHeapReference(vixl::Register reg) {
785   DCHECK(reg.IsW());
786   // reg = -reg.
787   ___ Neg(reg, vixl::Operand(reg));
788 }
789 
UnpoisonHeapReference(vixl::Register reg)790 void Arm64Assembler::UnpoisonHeapReference(vixl::Register reg) {
791   DCHECK(reg.IsW());
792   // reg = -reg.
793   ___ Neg(reg, vixl::Operand(reg));
794 }
795 
MaybeUnpoisonHeapReference(vixl::Register reg)796 void Arm64Assembler::MaybeUnpoisonHeapReference(vixl::Register reg) {
797   if (kPoisonHeapReferences) {
798     UnpoisonHeapReference(reg);
799   }
800 }
801 
802 #undef ___
803 
804 }  // namespace arm64
805 }  // namespace art
806