• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "arch/arm64/instruction_set_features_arm64.h"
18 #include "assembler_arm64.h"
19 #include "entrypoints/quick/quick_entrypoints.h"
20 #include "heap_poisoning.h"
21 #include "offsets.h"
22 #include "thread.h"
23 
24 using namespace vixl::aarch64;  // NOLINT(build/namespaces)
25 
26 namespace art {
27 namespace arm64 {
28 
29 #ifdef ___
30 #error "ARM64 Assembler macro already defined."
31 #else
32 #define ___   vixl_masm_.
33 #endif
34 
35 // Sets vixl::CPUFeatures according to ART instruction set features.
SetVIXLCPUFeaturesFromART(vixl::aarch64::MacroAssembler * vixl_masm_,const Arm64InstructionSetFeatures * art_features)36 static void SetVIXLCPUFeaturesFromART(vixl::aarch64::MacroAssembler* vixl_masm_,
37                                       const Arm64InstructionSetFeatures* art_features) {
38   // Retrieve already initialized default features of vixl.
39   vixl::CPUFeatures* features = vixl_masm_->GetCPUFeatures();
40 
41   DCHECK(features->Has(vixl::CPUFeatures::kFP));
42   DCHECK(features->Has(vixl::CPUFeatures::kNEON));
43   DCHECK(art_features != nullptr);
44   if (art_features->HasCRC()) {
45     features->Combine(vixl::CPUFeatures::kCRC32);
46   }
47   if (art_features->HasDotProd()) {
48     features->Combine(vixl::CPUFeatures::kDotProduct);
49   }
50   if (art_features->HasFP16()) {
51     features->Combine(vixl::CPUFeatures::kFPHalf);
52   }
53   if (art_features->HasLSE()) {
54     features->Combine(vixl::CPUFeatures::kAtomics);
55   }
56 }
57 
Arm64Assembler(ArenaAllocator * allocator,const Arm64InstructionSetFeatures * art_features)58 Arm64Assembler::Arm64Assembler(ArenaAllocator* allocator,
59                                const Arm64InstructionSetFeatures* art_features)
60     : Assembler(allocator) {
61   if (art_features != nullptr) {
62     SetVIXLCPUFeaturesFromART(&vixl_masm_, art_features);
63   }
64 }
65 
FinalizeCode()66 void Arm64Assembler::FinalizeCode() {
67   ___ FinalizeCode();
68 }
69 
CodeSize() const70 size_t Arm64Assembler::CodeSize() const {
71   return vixl_masm_.GetSizeOfCodeGenerated();
72 }
73 
CodeBufferBaseAddress() const74 const uint8_t* Arm64Assembler::CodeBufferBaseAddress() const {
75   return vixl_masm_.GetBuffer().GetStartAddress<const uint8_t*>();
76 }
77 
FinalizeInstructions(const MemoryRegion & region)78 void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) {
79   // Copy the instructions from the buffer.
80   MemoryRegion from(vixl_masm_.GetBuffer()->GetStartAddress<void*>(), CodeSize());
81   region.CopyFrom(0, from);
82 }
83 
LoadRawPtr(ManagedRegister m_dst,ManagedRegister m_base,Offset offs)84 void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, Offset offs) {
85   Arm64ManagedRegister dst = m_dst.AsArm64();
86   Arm64ManagedRegister base = m_base.AsArm64();
87   CHECK(dst.IsXRegister() && base.IsXRegister());
88   // Remove dst and base form the temp list - higher level API uses IP1, IP0.
89   UseScratchRegisterScope temps(&vixl_masm_);
90   temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
91   ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
92 }
93 
JumpTo(ManagedRegister m_base,Offset offs,ManagedRegister m_scratch)94 void Arm64Assembler::JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
95   Arm64ManagedRegister base = m_base.AsArm64();
96   Arm64ManagedRegister scratch = m_scratch.AsArm64();
97   CHECK(base.IsXRegister()) << base;
98   CHECK(scratch.IsXRegister()) << scratch;
99   // Remove base and scratch form the temp list - higher level API uses IP1, IP0.
100   UseScratchRegisterScope temps(&vixl_masm_);
101   temps.Exclude(reg_x(base.AsXRegister()), reg_x(scratch.AsXRegister()));
102   ___ Ldr(reg_x(scratch.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
103   ___ Br(reg_x(scratch.AsXRegister()));
104 }
105 
DWARFReg(CPURegister reg)106 static inline dwarf::Reg DWARFReg(CPURegister reg) {
107   if (reg.IsFPRegister()) {
108     return dwarf::Reg::Arm64Fp(reg.GetCode());
109   } else {
110     DCHECK_LT(reg.GetCode(), 31u);  // X0 - X30.
111     return dwarf::Reg::Arm64Core(reg.GetCode());
112   }
113 }
114 
SpillRegisters(CPURegList registers,int offset)115 void Arm64Assembler::SpillRegisters(CPURegList registers, int offset) {
116   int size = registers.GetRegisterSizeInBytes();
117   const Register sp = vixl_masm_.StackPointer();
118   // Since we are operating on register pairs, we would like to align on
119   // double the standard size; on the other hand, we don't want to insert
120   // an extra store, which will happen if the number of registers is even.
121   if (!IsAlignedParam(offset, 2 * size) && registers.GetCount() % 2 != 0) {
122     const CPURegister& dst0 = registers.PopLowestIndex();
123     ___ Str(dst0, MemOperand(sp, offset));
124     cfi_.RelOffset(DWARFReg(dst0), offset);
125     offset += size;
126   }
127   while (registers.GetCount() >= 2) {
128     const CPURegister& dst0 = registers.PopLowestIndex();
129     const CPURegister& dst1 = registers.PopLowestIndex();
130     ___ Stp(dst0, dst1, MemOperand(sp, offset));
131     cfi_.RelOffset(DWARFReg(dst0), offset);
132     cfi_.RelOffset(DWARFReg(dst1), offset + size);
133     offset += 2 * size;
134   }
135   if (!registers.IsEmpty()) {
136     const CPURegister& dst0 = registers.PopLowestIndex();
137     ___ Str(dst0, MemOperand(sp, offset));
138     cfi_.RelOffset(DWARFReg(dst0), offset);
139   }
140   DCHECK(registers.IsEmpty());
141 }
142 
UnspillRegisters(CPURegList registers,int offset)143 void Arm64Assembler::UnspillRegisters(CPURegList registers, int offset) {
144   int size = registers.GetRegisterSizeInBytes();
145   const Register sp = vixl_masm_.StackPointer();
146   // Be consistent with the logic for spilling registers.
147   if (!IsAlignedParam(offset, 2 * size) && registers.GetCount() % 2 != 0) {
148     const CPURegister& dst0 = registers.PopLowestIndex();
149     ___ Ldr(dst0, MemOperand(sp, offset));
150     cfi_.Restore(DWARFReg(dst0));
151     offset += size;
152   }
153   while (registers.GetCount() >= 2) {
154     const CPURegister& dst0 = registers.PopLowestIndex();
155     const CPURegister& dst1 = registers.PopLowestIndex();
156     ___ Ldp(dst0, dst1, MemOperand(sp, offset));
157     cfi_.Restore(DWARFReg(dst0));
158     cfi_.Restore(DWARFReg(dst1));
159     offset += 2 * size;
160   }
161   if (!registers.IsEmpty()) {
162     const CPURegister& dst0 = registers.PopLowestIndex();
163     ___ Ldr(dst0, MemOperand(sp, offset));
164     cfi_.Restore(DWARFReg(dst0));
165   }
166   DCHECK(registers.IsEmpty());
167 }
168 
PoisonHeapReference(Register reg)169 void Arm64Assembler::PoisonHeapReference(Register reg) {
170   DCHECK(reg.IsW());
171   // reg = -reg.
172   ___ Neg(reg, Operand(reg));
173 }
174 
UnpoisonHeapReference(Register reg)175 void Arm64Assembler::UnpoisonHeapReference(Register reg) {
176   DCHECK(reg.IsW());
177   // reg = -reg.
178   ___ Neg(reg, Operand(reg));
179 }
180 
MaybePoisonHeapReference(Register reg)181 void Arm64Assembler::MaybePoisonHeapReference(Register reg) {
182   if (kPoisonHeapReferences) {
183     PoisonHeapReference(reg);
184   }
185 }
186 
MaybeUnpoisonHeapReference(Register reg)187 void Arm64Assembler::MaybeUnpoisonHeapReference(Register reg) {
188   if (kPoisonHeapReferences) {
189     UnpoisonHeapReference(reg);
190   }
191 }
192 
GenerateMarkingRegisterCheck(Register temp,int code)193 void Arm64Assembler::GenerateMarkingRegisterCheck(Register temp, int code) {
194   // The Marking Register is only used in the Baker read barrier configuration.
195   DCHECK(kEmitCompilerReadBarrier);
196   DCHECK(kUseBakerReadBarrier);
197 
198   vixl::aarch64::Register mr = reg_x(MR);  // Marking Register.
199   vixl::aarch64::Register tr = reg_x(TR);  // Thread Register.
200   vixl::aarch64::Label mr_is_ok;
201 
202   // temp = self.tls32_.is.gc_marking
203   ___ Ldr(temp, MemOperand(tr, Thread::IsGcMarkingOffset<kArm64PointerSize>().Int32Value()));
204   // Check that mr == self.tls32_.is.gc_marking.
205   ___ Cmp(mr.W(), temp);
206   ___ B(eq, &mr_is_ok);
207   ___ Brk(code);
208   ___ Bind(&mr_is_ok);
209 }
210 
211 #undef ___
212 
213 }  // namespace arm64
214 }  // namespace art
215