• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "arch/arm64/instruction_set_features_arm64.h"
18 #include "assembler_arm64.h"
19 #include "base/bit_utils_iterator.h"
20 #include "entrypoints/quick/quick_entrypoints.h"
21 #include "heap_poisoning.h"
22 #include "offsets.h"
23 #include "thread.h"
24 
25 using namespace vixl::aarch64;  // NOLINT(build/namespaces)
26 
27 namespace art {
28 namespace arm64 {
29 
30 #ifdef ___
31 #error "ARM64 Assembler macro already defined."
32 #else
33 #define ___   vixl_masm_.
34 #endif
35 
36 // Sets vixl::CPUFeatures according to ART instruction set features.
SetVIXLCPUFeaturesFromART(vixl::aarch64::MacroAssembler * vixl_masm_,const Arm64InstructionSetFeatures * art_features)37 static void SetVIXLCPUFeaturesFromART(vixl::aarch64::MacroAssembler* vixl_masm_,
38                                       const Arm64InstructionSetFeatures* art_features) {
39   // Retrieve already initialized default features of vixl.
40   vixl::CPUFeatures* features = vixl_masm_->GetCPUFeatures();
41 
42   DCHECK(features->Has(vixl::CPUFeatures::kFP));
43   DCHECK(features->Has(vixl::CPUFeatures::kNEON));
44   DCHECK(art_features != nullptr);
45   if (art_features->HasCRC()) {
46     features->Combine(vixl::CPUFeatures::kCRC32);
47   }
48   if (art_features->HasDotProd()) {
49     features->Combine(vixl::CPUFeatures::kDotProduct);
50   }
51   if (art_features->HasFP16()) {
52     features->Combine(vixl::CPUFeatures::kFPHalf);
53     features->Combine(vixl::CPUFeatures::kNEONHalf);
54   }
55   if (art_features->HasLSE()) {
56     features->Combine(vixl::CPUFeatures::kAtomics);
57   }
58   if (art_features->HasSVE()) {
59     features->Combine(vixl::CPUFeatures::kSVE);
60   }
61 }
62 
Arm64Assembler(ArenaAllocator * allocator,const Arm64InstructionSetFeatures * art_features)63 Arm64Assembler::Arm64Assembler(ArenaAllocator* allocator,
64                                const Arm64InstructionSetFeatures* art_features)
65     : Assembler(allocator) {
66   if (art_features != nullptr) {
67     SetVIXLCPUFeaturesFromART(&vixl_masm_, art_features);
68   }
69 }
70 
FinalizeCode()71 void Arm64Assembler::FinalizeCode() {
72   ___ FinalizeCode();
73 }
74 
CodeSize() const75 size_t Arm64Assembler::CodeSize() const {
76   return vixl_masm_.GetSizeOfCodeGenerated();
77 }
78 
CodeBufferBaseAddress() const79 const uint8_t* Arm64Assembler::CodeBufferBaseAddress() const {
80   return vixl_masm_.GetBuffer().GetStartAddress<const uint8_t*>();
81 }
82 
FinalizeInstructions(const MemoryRegion & region)83 void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) {
84   // Copy the instructions from the buffer.
85   MemoryRegion from(vixl_masm_.GetBuffer()->GetStartAddress<void*>(), CodeSize());
86   region.CopyFrom(0, from);
87 }
88 
LoadRawPtr(ManagedRegister m_dst,ManagedRegister m_base,Offset offs)89 void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, Offset offs) {
90   Arm64ManagedRegister dst = m_dst.AsArm64();
91   Arm64ManagedRegister base = m_base.AsArm64();
92   CHECK(dst.IsXRegister() && base.IsXRegister());
93   // Remove dst and base form the temp list - higher level API uses IP1, IP0.
94   UseScratchRegisterScope temps(&vixl_masm_);
95   temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
96   ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
97 }
98 
JumpTo(ManagedRegister m_base,Offset offs,ManagedRegister m_scratch)99 void Arm64Assembler::JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
100   Arm64ManagedRegister base = m_base.AsArm64();
101   Arm64ManagedRegister scratch = m_scratch.AsArm64();
102   CHECK(base.IsXRegister()) << base;
103   CHECK(scratch.IsXRegister()) << scratch;
104   // Remove base and scratch form the temp list - higher level API uses IP1, IP0.
105   UseScratchRegisterScope temps(&vixl_masm_);
106   temps.Exclude(reg_x(base.AsXRegister()), reg_x(scratch.AsXRegister()));
107   ___ Ldr(reg_x(scratch.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
108   ___ Br(reg_x(scratch.AsXRegister()));
109 }
110 
SpillRegisters(CPURegList registers,int offset)111 void Arm64Assembler::SpillRegisters(CPURegList registers, int offset) {
112   int size = registers.GetRegisterSizeInBytes();
113   const Register sp = vixl_masm_.StackPointer();
114   // Since we are operating on register pairs, we would like to align on
115   // double the standard size; on the other hand, we don't want to insert
116   // an extra store, which will happen if the number of registers is even.
117   if (!IsAlignedParam(offset, 2 * size) && registers.GetCount() % 2 != 0) {
118     const CPURegister& dst0 = registers.PopLowestIndex();
119     ___ Str(dst0, MemOperand(sp, offset));
120     cfi_.RelOffset(DWARFReg(dst0), offset);
121     offset += size;
122   }
123   while (registers.GetCount() >= 2) {
124     const CPURegister& dst0 = registers.PopLowestIndex();
125     const CPURegister& dst1 = registers.PopLowestIndex();
126     ___ Stp(dst0, dst1, MemOperand(sp, offset));
127     cfi_.RelOffset(DWARFReg(dst0), offset);
128     cfi_.RelOffset(DWARFReg(dst1), offset + size);
129     offset += 2 * size;
130   }
131   if (!registers.IsEmpty()) {
132     const CPURegister& dst0 = registers.PopLowestIndex();
133     ___ Str(dst0, MemOperand(sp, offset));
134     cfi_.RelOffset(DWARFReg(dst0), offset);
135   }
136   DCHECK(registers.IsEmpty());
137 }
138 
UnspillRegisters(CPURegList registers,int offset)139 void Arm64Assembler::UnspillRegisters(CPURegList registers, int offset) {
140   int size = registers.GetRegisterSizeInBytes();
141   const Register sp = vixl_masm_.StackPointer();
142   // Be consistent with the logic for spilling registers.
143   if (!IsAlignedParam(offset, 2 * size) && registers.GetCount() % 2 != 0) {
144     const CPURegister& dst0 = registers.PopLowestIndex();
145     ___ Ldr(dst0, MemOperand(sp, offset));
146     cfi_.Restore(DWARFReg(dst0));
147     offset += size;
148   }
149   while (registers.GetCount() >= 2) {
150     const CPURegister& dst0 = registers.PopLowestIndex();
151     const CPURegister& dst1 = registers.PopLowestIndex();
152     ___ Ldp(dst0, dst1, MemOperand(sp, offset));
153     cfi_.Restore(DWARFReg(dst0));
154     cfi_.Restore(DWARFReg(dst1));
155     offset += 2 * size;
156   }
157   if (!registers.IsEmpty()) {
158     const CPURegister& dst0 = registers.PopLowestIndex();
159     ___ Ldr(dst0, MemOperand(sp, offset));
160     cfi_.Restore(DWARFReg(dst0));
161   }
162   DCHECK(registers.IsEmpty());
163 }
164 
PoisonHeapReference(Register reg)165 void Arm64Assembler::PoisonHeapReference(Register reg) {
166   DCHECK(reg.IsW());
167   // reg = -reg.
168   ___ Neg(reg, Operand(reg));
169 }
170 
UnpoisonHeapReference(Register reg)171 void Arm64Assembler::UnpoisonHeapReference(Register reg) {
172   DCHECK(reg.IsW());
173   // reg = -reg.
174   ___ Neg(reg, Operand(reg));
175 }
176 
MaybePoisonHeapReference(Register reg)177 void Arm64Assembler::MaybePoisonHeapReference(Register reg) {
178   if (kPoisonHeapReferences) {
179     PoisonHeapReference(reg);
180   }
181 }
182 
MaybeUnpoisonHeapReference(Register reg)183 void Arm64Assembler::MaybeUnpoisonHeapReference(Register reg) {
184   if (kPoisonHeapReferences) {
185     UnpoisonHeapReference(reg);
186   }
187 }
188 
GenerateMarkingRegisterCheck(Register temp,int code)189 void Arm64Assembler::GenerateMarkingRegisterCheck(Register temp, int code) {
190   // The Marking Register is only used in the Baker read barrier configuration.
191   DCHECK(kEmitCompilerReadBarrier);
192   DCHECK(kUseBakerReadBarrier);
193 
194   vixl::aarch64::Register mr = reg_x(MR);  // Marking Register.
195   vixl::aarch64::Register tr = reg_x(TR);  // Thread Register.
196   vixl::aarch64::Label mr_is_ok;
197 
198   // temp = self.tls32_.is.gc_marking
199   ___ Ldr(temp, MemOperand(tr, Thread::IsGcMarkingOffset<kArm64PointerSize>().Int32Value()));
200   // Check that mr == self.tls32_.is.gc_marking.
201   ___ Cmp(mr.W(), temp);
202   ___ B(eq, &mr_is_ok);
203   ___ Brk(code);
204   ___ Bind(&mr_is_ok);
205 }
206 
207 #undef ___
208 
209 }  // namespace arm64
210 }  // namespace art
211