• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <memory>
18 #include <vector>
19 
20 #include "arch/instruction_set.h"
21 #include "base/runtime_debug.h"
22 #include "cfi_test.h"
23 #include "driver/compiler_options.h"
24 #include "gtest/gtest.h"
25 #include "optimizing/code_generator.h"
26 #include "optimizing/optimizing_unit_test.h"
27 #include "read_barrier_config.h"
28 #include "utils/arm/assembler_arm_vixl.h"
29 #include "utils/assembler.h"
30 #include "utils/mips/assembler_mips.h"
31 #include "utils/mips64/assembler_mips64.h"
32 
33 #include "optimizing/optimizing_cfi_test_expected.inc"
34 
35 namespace vixl32 = vixl::aarch32;
36 
37 namespace art {
38 
39 // Run the tests only on host.
40 #ifndef ART_TARGET_ANDROID
41 
42 class OptimizingCFITest : public CFITest, public OptimizingUnitTestHelper {
43  public:
44   // Enable this flag to generate the expected outputs.
45   static constexpr bool kGenerateExpected = false;
46 
OptimizingCFITest()47   OptimizingCFITest()
48       : graph_(nullptr),
49         code_gen_(),
50         blocks_(GetAllocator()->Adapter()) {}
51 
SetUpFrame(InstructionSet isa)52   void SetUpFrame(InstructionSet isa) {
53     OverrideInstructionSetFeatures(isa, "default");
54 
55     // Ensure that slow-debug is off, so that there is no unexpected read-barrier check emitted.
56     SetRuntimeDebugFlagsEnabled(false);
57 
58     // Setup simple context.
59     graph_ = CreateGraph();
60     // Generate simple frame with some spills.
61     code_gen_ = CodeGenerator::Create(graph_, *compiler_options_);
62     code_gen_->GetAssembler()->cfi().SetEnabled(true);
63     code_gen_->InitializeCodeGenerationData();
64     const int frame_size = 64;
65     int core_reg = 0;
66     int fp_reg = 0;
67     for (int i = 0; i < 2; i++) {  // Two registers of each kind.
68       for (; core_reg < 32; core_reg++) {
69         if (code_gen_->IsCoreCalleeSaveRegister(core_reg)) {
70           auto location = Location::RegisterLocation(core_reg);
71           code_gen_->AddAllocatedRegister(location);
72           core_reg++;
73           break;
74         }
75       }
76       for (; fp_reg < 32; fp_reg++) {
77         if (code_gen_->IsFloatingPointCalleeSaveRegister(fp_reg)) {
78           auto location = Location::FpuRegisterLocation(fp_reg);
79           code_gen_->AddAllocatedRegister(location);
80           fp_reg++;
81           break;
82         }
83       }
84     }
85     code_gen_->block_order_ = &blocks_;
86     code_gen_->ComputeSpillMask();
87     code_gen_->SetFrameSize(frame_size);
88     code_gen_->GenerateFrameEntry();
89   }
90 
Finish()91   void Finish() {
92     code_gen_->GenerateFrameExit();
93     code_gen_->Finalize(&code_allocator_);
94   }
95 
Check(InstructionSet isa,const char * isa_str,const std::vector<uint8_t> & expected_asm,const std::vector<uint8_t> & expected_cfi)96   void Check(InstructionSet isa,
97              const char* isa_str,
98              const std::vector<uint8_t>& expected_asm,
99              const std::vector<uint8_t>& expected_cfi) {
100     // Get the outputs.
101     ArrayRef<const uint8_t> actual_asm = code_allocator_.GetMemory();
102     Assembler* opt_asm = code_gen_->GetAssembler();
103     ArrayRef<const uint8_t> actual_cfi(*(opt_asm->cfi().data()));
104 
105     if (kGenerateExpected) {
106       GenerateExpected(stdout, isa, isa_str, actual_asm, actual_cfi);
107     } else {
108       EXPECT_EQ(ArrayRef<const uint8_t>(expected_asm), actual_asm);
109       EXPECT_EQ(ArrayRef<const uint8_t>(expected_cfi), actual_cfi);
110     }
111   }
112 
TestImpl(InstructionSet isa,const char * isa_str,const std::vector<uint8_t> & expected_asm,const std::vector<uint8_t> & expected_cfi)113   void TestImpl(InstructionSet isa, const char*
114                 isa_str,
115                 const std::vector<uint8_t>& expected_asm,
116                 const std::vector<uint8_t>& expected_cfi) {
117     SetUpFrame(isa);
118     Finish();
119     Check(isa, isa_str, expected_asm, expected_cfi);
120   }
121 
GetCodeGenerator()122   CodeGenerator* GetCodeGenerator() {
123     return code_gen_.get();
124   }
125 
126  private:
127   class InternalCodeAllocator : public CodeAllocator {
128    public:
InternalCodeAllocator()129     InternalCodeAllocator() {}
130 
Allocate(size_t size)131     uint8_t* Allocate(size_t size) override {
132       memory_.resize(size);
133       return memory_.data();
134     }
135 
GetMemory() const136     ArrayRef<const uint8_t> GetMemory() const override { return ArrayRef<const uint8_t>(memory_); }
137 
138    private:
139     std::vector<uint8_t> memory_;
140 
141     DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator);
142   };
143 
144   HGraph* graph_;
145   std::unique_ptr<CodeGenerator> code_gen_;
146   ArenaVector<HBasicBlock*> blocks_;
147   InternalCodeAllocator code_allocator_;
148 };
149 
150 #define TEST_ISA(isa)                                                 \
151   TEST_F(OptimizingCFITest, isa) {                                    \
152     std::vector<uint8_t> expected_asm(                                \
153         expected_asm_##isa,                                           \
154         expected_asm_##isa + arraysize(expected_asm_##isa));          \
155     std::vector<uint8_t> expected_cfi(                                \
156         expected_cfi_##isa,                                           \
157         expected_cfi_##isa + arraysize(expected_cfi_##isa));          \
158     TestImpl(InstructionSet::isa, #isa, expected_asm, expected_cfi);  \
159   }
160 
161 #ifdef ART_ENABLE_CODEGEN_arm
162 TEST_ISA(kThumb2)
163 #endif
164 
165 #ifdef ART_ENABLE_CODEGEN_arm64
166 // Run the tests for ARM64 only with Baker read barriers, as the
167 // expected generated code saves and restore X21 and X22 (instead of
168 // X20 and X21), as X20 is used as Marking Register in the Baker read
169 // barrier configuration, and as such is removed from the set of
170 // callee-save registers in the ARM64 code generator of the Optimizing
171 // compiler.
172 #if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
TEST_ISA(kArm64)173 TEST_ISA(kArm64)
174 #endif
175 #endif
176 
177 #ifdef ART_ENABLE_CODEGEN_x86
178 TEST_ISA(kX86)
179 #endif
180 
181 #ifdef ART_ENABLE_CODEGEN_x86_64
182 TEST_ISA(kX86_64)
183 #endif
184 
185 #ifdef ART_ENABLE_CODEGEN_mips
186 TEST_ISA(kMips)
187 #endif
188 
189 #ifdef ART_ENABLE_CODEGEN_mips64
190 TEST_ISA(kMips64)
191 #endif
192 
193 #ifdef ART_ENABLE_CODEGEN_arm
194 TEST_F(OptimizingCFITest, kThumb2Adjust) {
195   using vixl32::r0;
196   std::vector<uint8_t> expected_asm(
197       expected_asm_kThumb2_adjust,
198       expected_asm_kThumb2_adjust + arraysize(expected_asm_kThumb2_adjust));
199   std::vector<uint8_t> expected_cfi(
200       expected_cfi_kThumb2_adjust,
201       expected_cfi_kThumb2_adjust + arraysize(expected_cfi_kThumb2_adjust));
202   SetUpFrame(InstructionSet::kThumb2);
203 #define __ down_cast<arm::ArmVIXLAssembler*>(GetCodeGenerator() \
204     ->GetAssembler())->GetVIXLAssembler()->
205   vixl32::Label target;
206   __ CompareAndBranchIfZero(r0, &target);
207   // Push the target out of range of CBZ.
208   for (size_t i = 0; i != 65; ++i) {
209     __ Ldr(r0, vixl32::MemOperand(r0));
210   }
211   __ Bind(&target);
212 #undef __
213   Finish();
214   Check(InstructionSet::kThumb2, "kThumb2_adjust", expected_asm, expected_cfi);
215 }
216 #endif
217 
218 #ifdef ART_ENABLE_CODEGEN_mips
TEST_F(OptimizingCFITest,kMipsAdjust)219 TEST_F(OptimizingCFITest, kMipsAdjust) {
220   // One NOP in delay slot, 1 << 15 NOPS have size 1 << 17 which exceeds 18-bit signed maximum.
221   static constexpr size_t kNumNops = 1u + (1u << 15);
222   std::vector<uint8_t> expected_asm(
223       expected_asm_kMips_adjust_head,
224       expected_asm_kMips_adjust_head + arraysize(expected_asm_kMips_adjust_head));
225   expected_asm.resize(expected_asm.size() + kNumNops * 4u, 0u);
226   expected_asm.insert(
227       expected_asm.end(),
228       expected_asm_kMips_adjust_tail,
229       expected_asm_kMips_adjust_tail + arraysize(expected_asm_kMips_adjust_tail));
230   std::vector<uint8_t> expected_cfi(
231       expected_cfi_kMips_adjust,
232       expected_cfi_kMips_adjust + arraysize(expected_cfi_kMips_adjust));
233   SetUpFrame(InstructionSet::kMips);
234 #define __ down_cast<mips::MipsAssembler*>(GetCodeGenerator()->GetAssembler())->
235   mips::MipsLabel target;
236   __ Beqz(mips::A0, &target);
237   // Push the target out of range of BEQZ.
238   for (size_t i = 0; i != kNumNops; ++i) {
239     __ Nop();
240   }
241   __ Bind(&target);
242 #undef __
243   Finish();
244   Check(InstructionSet::kMips, "kMips_adjust", expected_asm, expected_cfi);
245 }
246 #endif
247 
248 #ifdef ART_ENABLE_CODEGEN_mips64
TEST_F(OptimizingCFITest,kMips64Adjust)249 TEST_F(OptimizingCFITest, kMips64Adjust) {
250   // One NOP in forbidden slot, 1 << 15 NOPS have size 1 << 17 which exceeds 18-bit signed maximum.
251   static constexpr size_t kNumNops = 1u + (1u << 15);
252   std::vector<uint8_t> expected_asm(
253       expected_asm_kMips64_adjust_head,
254       expected_asm_kMips64_adjust_head + arraysize(expected_asm_kMips64_adjust_head));
255   expected_asm.resize(expected_asm.size() + kNumNops * 4u, 0u);
256   expected_asm.insert(
257       expected_asm.end(),
258       expected_asm_kMips64_adjust_tail,
259       expected_asm_kMips64_adjust_tail + arraysize(expected_asm_kMips64_adjust_tail));
260   std::vector<uint8_t> expected_cfi(
261       expected_cfi_kMips64_adjust,
262       expected_cfi_kMips64_adjust + arraysize(expected_cfi_kMips64_adjust));
263   SetUpFrame(InstructionSet::kMips64);
264 #define __ down_cast<mips64::Mips64Assembler*>(GetCodeGenerator()->GetAssembler())->
265   mips64::Mips64Label target;
266   __ Beqc(mips64::A1, mips64::A2, &target);
267   // Push the target out of range of BEQC.
268   for (size_t i = 0; i != kNumNops; ++i) {
269     __ Nop();
270   }
271   __ Bind(&target);
272 #undef __
273   Finish();
274   Check(InstructionSet::kMips64, "kMips64_adjust", expected_asm, expected_cfi);
275 }
276 #endif
277 
278 #endif  // ART_TARGET_ANDROID
279 
280 }  // namespace art
281