• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "base/casts.h"
18 #include "linker/relative_patcher_test.h"
19 #include "linker/arm64/relative_patcher_arm64.h"
20 #include "lock_word.h"
21 #include "mirror/object.h"
22 #include "oat_quick_method_header.h"
23 
24 namespace art {
25 namespace linker {
26 
27 class Arm64RelativePatcherTest : public RelativePatcherTest {
28  public:
Arm64RelativePatcherTest(const std::string & variant)29   explicit Arm64RelativePatcherTest(const std::string& variant)
30       : RelativePatcherTest(kArm64, variant) { }
31 
32  protected:
33   static const uint8_t kCallRawCode[];
34   static const ArrayRef<const uint8_t> kCallCode;
35   static const uint8_t kNopRawCode[];
36   static const ArrayRef<const uint8_t> kNopCode;
37 
38   // NOP instruction.
39   static constexpr uint32_t kNopInsn = 0xd503201f;
40 
41   // All branches can be created from kBlPlus0 or kBPlus0 by adding the low 26 bits.
42   static constexpr uint32_t kBlPlus0 = 0x94000000u;
43   static constexpr uint32_t kBPlus0 = 0x14000000u;
44 
45   // Special BL values.
46   static constexpr uint32_t kBlPlusMax = 0x95ffffffu;
47   static constexpr uint32_t kBlMinusMax = 0x96000000u;
48 
49   // LDR immediate, unsigned offset.
50   static constexpr uint32_t kLdrWInsn = 0xb9400000u;
51 
52   // ADD/ADDS/SUB/SUBS immediate, 64-bit.
53   static constexpr uint32_t kAddXInsn = 0x91000000u;
54   static constexpr uint32_t kAddsXInsn = 0xb1000000u;
55   static constexpr uint32_t kSubXInsn = 0xd1000000u;
56   static constexpr uint32_t kSubsXInsn = 0xf1000000u;
57 
58   // LDUR x2, [sp, #4], i.e. unaligned load crossing 64-bit boundary (assuming aligned sp).
59   static constexpr uint32_t kLdurInsn = 0xf840405fu;
60 
61   // LDR w12, <label> and LDR x12, <label>. Bits 5-23 contain label displacement in 4-byte units.
62   static constexpr uint32_t kLdrWPcRelInsn = 0x1800000cu;
63   static constexpr uint32_t kLdrXPcRelInsn = 0x5800000cu;
64 
65   // LDR w13, [SP, #<pimm>] and LDR x13, [SP, #<pimm>]. Bits 10-21 contain displacement from SP
66   // in units of 4-bytes (for 32-bit load) or 8-bytes (for 64-bit load).
67   static constexpr uint32_t kLdrWSpRelInsn = 0xb94003edu;
68   static constexpr uint32_t kLdrXSpRelInsn = 0xf94003edu;
69 
70   // CBNZ x17, +0. Bits 5-23 are a placeholder for target offset from PC in units of 4-bytes.
71   static constexpr uint32_t kCbnzIP1Plus0Insn = 0xb5000011;
72 
InsertInsn(std::vector<uint8_t> * code,size_t pos,uint32_t insn)73   void InsertInsn(std::vector<uint8_t>* code, size_t pos, uint32_t insn) {
74     CHECK_LE(pos, code->size());
75     const uint8_t insn_code[] = {
76         static_cast<uint8_t>(insn),
77         static_cast<uint8_t>(insn >> 8),
78         static_cast<uint8_t>(insn >> 16),
79         static_cast<uint8_t>(insn >> 24),
80     };
81     static_assert(sizeof(insn_code) == 4u, "Invalid sizeof(insn_code).");
82     code->insert(code->begin() + pos, insn_code, insn_code + sizeof(insn_code));
83   }
84 
PushBackInsn(std::vector<uint8_t> * code,uint32_t insn)85   void PushBackInsn(std::vector<uint8_t>* code, uint32_t insn) {
86     InsertInsn(code, code->size(), insn);
87   }
88 
RawCode(std::initializer_list<uint32_t> insns)89   std::vector<uint8_t> RawCode(std::initializer_list<uint32_t> insns) {
90     std::vector<uint8_t> raw_code;
91     raw_code.reserve(insns.size() * 4u);
92     for (uint32_t insn : insns) {
93       PushBackInsn(&raw_code, insn);
94     }
95     return raw_code;
96   }
97 
Create2MethodsWithGap(const ArrayRef<const uint8_t> & method1_code,const ArrayRef<const LinkerPatch> & method1_patches,const ArrayRef<const uint8_t> & last_method_code,const ArrayRef<const LinkerPatch> & last_method_patches,uint32_t distance_without_thunks)98   uint32_t Create2MethodsWithGap(const ArrayRef<const uint8_t>& method1_code,
99                                  const ArrayRef<const LinkerPatch>& method1_patches,
100                                  const ArrayRef<const uint8_t>& last_method_code,
101                                  const ArrayRef<const LinkerPatch>& last_method_patches,
102                                  uint32_t distance_without_thunks) {
103     CHECK_EQ(distance_without_thunks % kArm64Alignment, 0u);
104     uint32_t method1_offset =
105         kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader);
106     AddCompiledMethod(MethodRef(1u), method1_code, method1_patches);
107     const uint32_t gap_start = method1_offset + method1_code.size();
108 
109     // We want to put the method3 at a very precise offset.
110     const uint32_t last_method_offset = method1_offset + distance_without_thunks;
111     CHECK_ALIGNED(last_method_offset, kArm64Alignment);
112     const uint32_t gap_end = last_method_offset - sizeof(OatQuickMethodHeader);
113 
114     // Fill the gap with intermediate methods in chunks of 2MiB and the first in [2MiB, 4MiB).
115     // (This allows deduplicating the small chunks to avoid using 256MiB of memory for +-128MiB
116     // offsets by this test. Making the first chunk bigger makes it easy to give all intermediate
117     // methods the same alignment of the end, so the thunk insertion adds a predictable size as
118     // long as it's after the first chunk.)
119     uint32_t method_idx = 2u;
120     constexpr uint32_t kSmallChunkSize = 2 * MB;
121     std::vector<uint8_t> gap_code;
122     uint32_t gap_size = gap_end - gap_start;
123     uint32_t num_small_chunks = std::max(gap_size / kSmallChunkSize, 1u) - 1u;
124     uint32_t chunk_start = gap_start;
125     uint32_t chunk_size = gap_size - num_small_chunks * kSmallChunkSize;
126     for (uint32_t i = 0; i <= num_small_chunks; ++i) {  // num_small_chunks+1 iterations.
127       uint32_t chunk_code_size =
128           chunk_size - CodeAlignmentSize(chunk_start) - sizeof(OatQuickMethodHeader);
129       gap_code.resize(chunk_code_size, 0u);
130       AddCompiledMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(gap_code));
131       method_idx += 1u;
132       chunk_start += chunk_size;
133       chunk_size = kSmallChunkSize;  // For all but the first chunk.
134       DCHECK_EQ(CodeAlignmentSize(gap_end), CodeAlignmentSize(chunk_start));
135     }
136 
137     // Add the last method and link
138     AddCompiledMethod(MethodRef(method_idx), last_method_code, last_method_patches);
139     Link();
140 
141     // Check assumptions.
142     CHECK_EQ(GetMethodOffset(1), method1_offset);
143     auto last_result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
144     CHECK(last_result.first);
145     // There may be a thunk before method2.
146     if (last_result.second != last_method_offset) {
147       // Thunk present. Check that there's only one.
148       uint32_t thunk_end = CompiledCode::AlignCode(gap_end, kArm64) + MethodCallThunkSize();
149       uint32_t header_offset = thunk_end + CodeAlignmentSize(thunk_end);
150       CHECK_EQ(last_result.second, header_offset + sizeof(OatQuickMethodHeader));
151     }
152     return method_idx;
153   }
154 
GetMethodOffset(uint32_t method_idx)155   uint32_t GetMethodOffset(uint32_t method_idx) {
156     auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
157     CHECK(result.first);
158     CHECK_ALIGNED(result.second, 4u);
159     return result.second;
160   }
161 
CompileMethodCallThunk()162   std::vector<uint8_t> CompileMethodCallThunk() {
163     ArmBaseRelativePatcher::ThunkKey key(
164         ArmBaseRelativePatcher::ThunkType::kMethodCall,
165         ArmBaseRelativePatcher::ThunkParams{{ 0, 0 }});  // NOLINT(whitespace/braces)
166     return down_cast<Arm64RelativePatcher*>(patcher_.get())->CompileThunk(key);
167   }
168 
MethodCallThunkSize()169   uint32_t MethodCallThunkSize() {
170     return CompileMethodCallThunk().size();
171   }
172 
CheckThunk(uint32_t thunk_offset)173   bool CheckThunk(uint32_t thunk_offset) {
174     const std::vector<uint8_t> expected_code = CompileMethodCallThunk();
175     if (output_.size() < thunk_offset + expected_code.size()) {
176       LOG(ERROR) << "output_.size() == " << output_.size() << " < "
177           << "thunk_offset + expected_code.size() == " << (thunk_offset + expected_code.size());
178       return false;
179     }
180     ArrayRef<const uint8_t> linked_code(&output_[thunk_offset], expected_code.size());
181     if (linked_code == ArrayRef<const uint8_t>(expected_code)) {
182       return true;
183     }
184     // Log failure info.
185     DumpDiff(ArrayRef<const uint8_t>(expected_code), linked_code);
186     return false;
187   }
188 
GenNops(size_t num_nops)189   std::vector<uint8_t> GenNops(size_t num_nops) {
190     std::vector<uint8_t> result;
191     result.reserve(num_nops * 4u + 4u);
192     for (size_t i = 0; i != num_nops; ++i) {
193       PushBackInsn(&result, kNopInsn);
194     }
195     return result;
196   }
197 
GenNopsAndBl(size_t num_nops,uint32_t bl)198   std::vector<uint8_t> GenNopsAndBl(size_t num_nops, uint32_t bl) {
199     std::vector<uint8_t> result;
200     result.reserve(num_nops * 4u + 4u);
201     for (size_t i = 0; i != num_nops; ++i) {
202       PushBackInsn(&result, kNopInsn);
203     }
204     PushBackInsn(&result, bl);
205     return result;
206   }
207 
GenNopsAndAdrpAndUse(size_t num_nops,uint32_t method_offset,uint32_t target_offset,uint32_t use_insn)208   std::vector<uint8_t> GenNopsAndAdrpAndUse(size_t num_nops,
209                                             uint32_t method_offset,
210                                             uint32_t target_offset,
211                                             uint32_t use_insn) {
212     std::vector<uint8_t> result;
213     result.reserve(num_nops * 4u + 8u);
214     for (size_t i = 0; i != num_nops; ++i) {
215       PushBackInsn(&result, kNopInsn);
216     }
217     CHECK_ALIGNED(method_offset, 4u);
218     CHECK_ALIGNED(target_offset, 4u);
219     uint32_t adrp_offset = method_offset + num_nops * 4u;
220     uint32_t disp = target_offset - (adrp_offset & ~0xfffu);
221     if (use_insn == kLdrWInsn) {
222       DCHECK_ALIGNED(disp, 1u << 2);
223       use_insn |= 1 |                         // LDR x1, [x0, #(imm12 << 2)]
224           ((disp & 0xfffu) << (10 - 2));      // imm12 = ((disp & 0xfffu) >> 2) is at bit 10.
225     } else if (use_insn == kAddXInsn) {
226       use_insn |= 1 |                         // ADD x1, x0, #imm
227           (disp & 0xfffu) << 10;              // imm12 = (disp & 0xfffu) is at bit 10.
228     } else {
229       LOG(FATAL) << "Unexpected instruction: 0x" << std::hex << use_insn;
230     }
231     uint32_t adrp = 0x90000000 |              // ADRP x0, +SignExtend(immhi:immlo:Zeros(12), 64)
232         ((disp & 0x3000u) << (29 - 12)) |     // immlo = ((disp & 0x3000u) >> 12) is at bit 29,
233         ((disp & 0xffffc000) >> (14 - 5)) |   // immhi = (disp >> 14) is at bit 5,
234         // We take the sign bit from the disp, limiting disp to +- 2GiB.
235         ((disp & 0x80000000) >> (31 - 23));   // sign bit in immhi is at bit 23.
236     PushBackInsn(&result, adrp);
237     PushBackInsn(&result, use_insn);
238     return result;
239   }
240 
GenNopsAndAdrpLdr(size_t num_nops,uint32_t method_offset,uint32_t target_offset)241   std::vector<uint8_t> GenNopsAndAdrpLdr(size_t num_nops,
242                                          uint32_t method_offset,
243                                          uint32_t target_offset) {
244     return GenNopsAndAdrpAndUse(num_nops, method_offset, target_offset, kLdrWInsn);
245   }
246 
TestNopsAdrpLdr(size_t num_nops,uint32_t dex_cache_arrays_begin,uint32_t element_offset)247   void TestNopsAdrpLdr(size_t num_nops, uint32_t dex_cache_arrays_begin, uint32_t element_offset) {
248     dex_cache_arrays_begin_ = dex_cache_arrays_begin;
249     auto code = GenNopsAndAdrpLdr(num_nops, 0u, 0u);  // Unpatched.
250     const LinkerPatch patches[] = {
251         LinkerPatch::DexCacheArrayPatch(num_nops * 4u     , nullptr, num_nops * 4u, element_offset),
252         LinkerPatch::DexCacheArrayPatch(num_nops * 4u + 4u, nullptr, num_nops * 4u, element_offset),
253     };
254     AddCompiledMethod(MethodRef(1u),
255                       ArrayRef<const uint8_t>(code),
256                       ArrayRef<const LinkerPatch>(patches));
257     Link();
258 
259     uint32_t method1_offset = GetMethodOffset(1u);
260     uint32_t target_offset = dex_cache_arrays_begin_ + element_offset;
261     auto expected_code = GenNopsAndAdrpLdr(num_nops, method1_offset, target_offset);
262     EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
263   }
264 
GenNopsAndAdrpAdd(size_t num_nops,uint32_t method_offset,uint32_t target_offset)265   std::vector<uint8_t> GenNopsAndAdrpAdd(size_t num_nops,
266                                          uint32_t method_offset,
267                                          uint32_t target_offset) {
268     return GenNopsAndAdrpAndUse(num_nops, method_offset, target_offset, kAddXInsn);
269   }
270 
TestNopsAdrpAdd(size_t num_nops,uint32_t string_offset)271   void TestNopsAdrpAdd(size_t num_nops, uint32_t string_offset) {
272     constexpr uint32_t kStringIndex = 1u;
273     string_index_to_offset_map_.Put(kStringIndex, string_offset);
274     auto code = GenNopsAndAdrpAdd(num_nops, 0u, 0u);  // Unpatched.
275     const LinkerPatch patches[] = {
276         LinkerPatch::RelativeStringPatch(num_nops * 4u     , nullptr, num_nops * 4u, kStringIndex),
277         LinkerPatch::RelativeStringPatch(num_nops * 4u + 4u, nullptr, num_nops * 4u, kStringIndex),
278     };
279     AddCompiledMethod(MethodRef(1u),
280                       ArrayRef<const uint8_t>(code),
281                       ArrayRef<const LinkerPatch>(patches));
282     Link();
283 
284     uint32_t method1_offset = GetMethodOffset(1u);
285     auto expected_code = GenNopsAndAdrpAdd(num_nops, method1_offset, string_offset);
286     EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
287   }
288 
PrepareNopsAdrpInsn2Ldr(size_t num_nops,uint32_t insn2,uint32_t dex_cache_arrays_begin,uint32_t element_offset)289   void PrepareNopsAdrpInsn2Ldr(size_t num_nops,
290                                uint32_t insn2,
291                                uint32_t dex_cache_arrays_begin,
292                                uint32_t element_offset) {
293     dex_cache_arrays_begin_ = dex_cache_arrays_begin;
294     auto code = GenNopsAndAdrpLdr(num_nops, 0u, 0u);  // Unpatched.
295     InsertInsn(&code, num_nops * 4u + 4u, insn2);
296     const LinkerPatch patches[] = {
297         LinkerPatch::DexCacheArrayPatch(num_nops * 4u     , nullptr, num_nops * 4u, element_offset),
298         LinkerPatch::DexCacheArrayPatch(num_nops * 4u + 8u, nullptr, num_nops * 4u, element_offset),
299     };
300     AddCompiledMethod(MethodRef(1u),
301                       ArrayRef<const uint8_t>(code),
302                       ArrayRef<const LinkerPatch>(patches));
303     Link();
304   }
305 
PrepareNopsAdrpInsn2Add(size_t num_nops,uint32_t insn2,uint32_t string_offset)306   void PrepareNopsAdrpInsn2Add(size_t num_nops, uint32_t insn2, uint32_t string_offset) {
307     constexpr uint32_t kStringIndex = 1u;
308     string_index_to_offset_map_.Put(kStringIndex, string_offset);
309     auto code = GenNopsAndAdrpAdd(num_nops, 0u, 0u);  // Unpatched.
310     InsertInsn(&code, num_nops * 4u + 4u, insn2);
311     const LinkerPatch patches[] = {
312         LinkerPatch::RelativeStringPatch(num_nops * 4u     , nullptr, num_nops * 4u, kStringIndex),
313         LinkerPatch::RelativeStringPatch(num_nops * 4u + 8u, nullptr, num_nops * 4u, kStringIndex),
314     };
315     AddCompiledMethod(MethodRef(1u),
316                       ArrayRef<const uint8_t>(code),
317                       ArrayRef<const LinkerPatch>(patches));
318     Link();
319   }
320 
TestNopsAdrpInsn2AndUse(size_t num_nops,uint32_t insn2,uint32_t target_offset,uint32_t use_insn)321   void TestNopsAdrpInsn2AndUse(size_t num_nops,
322                                uint32_t insn2,
323                                uint32_t target_offset,
324                                uint32_t use_insn) {
325     uint32_t method1_offset = GetMethodOffset(1u);
326     auto expected_code = GenNopsAndAdrpAndUse(num_nops, method1_offset, target_offset, use_insn);
327     InsertInsn(&expected_code, num_nops * 4u + 4u, insn2);
328     EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
329   }
330 
TestNopsAdrpInsn2AndUseHasThunk(size_t num_nops,uint32_t insn2,uint32_t target_offset,uint32_t use_insn)331   void TestNopsAdrpInsn2AndUseHasThunk(size_t num_nops,
332                                        uint32_t insn2,
333                                        uint32_t target_offset,
334                                        uint32_t use_insn) {
335     uint32_t method1_offset = GetMethodOffset(1u);
336     CHECK(!compiled_method_refs_.empty());
337     CHECK_EQ(compiled_method_refs_[0].dex_method_index, 1u);
338     CHECK_EQ(compiled_method_refs_.size(), compiled_methods_.size());
339     uint32_t method1_size = compiled_methods_[0]->GetQuickCode().size();
340     uint32_t thunk_offset = CompiledCode::AlignCode(method1_offset + method1_size, kArm64);
341     uint32_t b_diff = thunk_offset - (method1_offset + num_nops * 4u);
342     CHECK_ALIGNED(b_diff, 4u);
343     ASSERT_LT(b_diff, 128 * MB);
344     uint32_t b_out = kBPlus0 + ((b_diff >> 2) & 0x03ffffffu);
345     uint32_t b_in = kBPlus0 + ((-b_diff >> 2) & 0x03ffffffu);
346 
347     auto expected_code = GenNopsAndAdrpAndUse(num_nops, method1_offset, target_offset, use_insn);
348     InsertInsn(&expected_code, num_nops * 4u + 4u, insn2);
349     // Replace adrp with bl.
350     expected_code.erase(expected_code.begin() + num_nops * 4u,
351                         expected_code.begin() + num_nops * 4u + 4u);
352     InsertInsn(&expected_code, num_nops * 4u, b_out);
353     EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
354 
355     auto expected_thunk_code = GenNopsAndAdrpLdr(0u, thunk_offset, target_offset);
356     ASSERT_EQ(expected_thunk_code.size(), 8u);
357     expected_thunk_code.erase(expected_thunk_code.begin() + 4u, expected_thunk_code.begin() + 8u);
358     InsertInsn(&expected_thunk_code, 4u, b_in);
359     ASSERT_EQ(expected_thunk_code.size(), 8u);
360 
361     uint32_t thunk_size = MethodCallThunkSize();
362     ASSERT_EQ(thunk_offset + thunk_size, output_.size());
363     ASSERT_EQ(thunk_size, expected_thunk_code.size());
364     ArrayRef<const uint8_t> thunk_code(&output_[thunk_offset], thunk_size);
365     if (ArrayRef<const uint8_t>(expected_thunk_code) != thunk_code) {
366       DumpDiff(ArrayRef<const uint8_t>(expected_thunk_code), thunk_code);
367       FAIL();
368     }
369   }
370 
TestAdrpInsn2Ldr(uint32_t insn2,uint32_t adrp_offset,bool has_thunk,uint32_t dex_cache_arrays_begin,uint32_t element_offset)371   void TestAdrpInsn2Ldr(uint32_t insn2,
372                         uint32_t adrp_offset,
373                         bool has_thunk,
374                         uint32_t dex_cache_arrays_begin,
375                         uint32_t element_offset) {
376     uint32_t method1_offset =
377         kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader);
378     ASSERT_LT(method1_offset, adrp_offset);
379     CHECK_ALIGNED(adrp_offset, 4u);
380     uint32_t num_nops = (adrp_offset - method1_offset) / 4u;
381     PrepareNopsAdrpInsn2Ldr(num_nops, insn2, dex_cache_arrays_begin, element_offset);
382     uint32_t target_offset = dex_cache_arrays_begin_ + element_offset;
383     if (has_thunk) {
384       TestNopsAdrpInsn2AndUseHasThunk(num_nops, insn2, target_offset, kLdrWInsn);
385     } else {
386       TestNopsAdrpInsn2AndUse(num_nops, insn2, target_offset, kLdrWInsn);
387     }
388     ASSERT_EQ(method1_offset, GetMethodOffset(1u));  // If this fails, num_nops is wrong.
389   }
390 
TestAdrpLdurLdr(uint32_t adrp_offset,bool has_thunk,uint32_t dex_cache_arrays_begin,uint32_t element_offset)391   void TestAdrpLdurLdr(uint32_t adrp_offset,
392                        bool has_thunk,
393                        uint32_t dex_cache_arrays_begin,
394                        uint32_t element_offset) {
395     TestAdrpInsn2Ldr(kLdurInsn, adrp_offset, has_thunk, dex_cache_arrays_begin, element_offset);
396   }
397 
TestAdrpLdrPcRelLdr(uint32_t pcrel_ldr_insn,int32_t pcrel_disp,uint32_t adrp_offset,bool has_thunk,uint32_t dex_cache_arrays_begin,uint32_t element_offset)398   void TestAdrpLdrPcRelLdr(uint32_t pcrel_ldr_insn,
399                            int32_t pcrel_disp,
400                            uint32_t adrp_offset,
401                            bool has_thunk,
402                            uint32_t dex_cache_arrays_begin,
403                            uint32_t element_offset) {
404     ASSERT_LT(pcrel_disp, 0x100000);
405     ASSERT_GE(pcrel_disp, -0x100000);
406     ASSERT_EQ(pcrel_disp & 0x3, 0);
407     uint32_t insn2 = pcrel_ldr_insn | (((static_cast<uint32_t>(pcrel_disp) >> 2) & 0x7ffffu) << 5);
408     TestAdrpInsn2Ldr(insn2, adrp_offset, has_thunk, dex_cache_arrays_begin, element_offset);
409   }
410 
TestAdrpLdrSpRelLdr(uint32_t sprel_ldr_insn,uint32_t sprel_disp_in_load_units,uint32_t adrp_offset,bool has_thunk,uint32_t dex_cache_arrays_begin,uint32_t element_offset)411   void TestAdrpLdrSpRelLdr(uint32_t sprel_ldr_insn,
412                            uint32_t sprel_disp_in_load_units,
413                            uint32_t adrp_offset,
414                            bool has_thunk,
415                            uint32_t dex_cache_arrays_begin,
416                            uint32_t element_offset) {
417     ASSERT_LT(sprel_disp_in_load_units, 0x1000u);
418     uint32_t insn2 = sprel_ldr_insn | ((sprel_disp_in_load_units & 0xfffu) << 10);
419     TestAdrpInsn2Ldr(insn2, adrp_offset, has_thunk, dex_cache_arrays_begin, element_offset);
420   }
421 
TestAdrpInsn2Add(uint32_t insn2,uint32_t adrp_offset,bool has_thunk,uint32_t string_offset)422   void TestAdrpInsn2Add(uint32_t insn2,
423                         uint32_t adrp_offset,
424                         bool has_thunk,
425                         uint32_t string_offset) {
426     uint32_t method1_offset =
427         kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader);
428     ASSERT_LT(method1_offset, adrp_offset);
429     CHECK_ALIGNED(adrp_offset, 4u);
430     uint32_t num_nops = (adrp_offset - method1_offset) / 4u;
431     PrepareNopsAdrpInsn2Add(num_nops, insn2, string_offset);
432     if (has_thunk) {
433       TestNopsAdrpInsn2AndUseHasThunk(num_nops, insn2, string_offset, kAddXInsn);
434     } else {
435       TestNopsAdrpInsn2AndUse(num_nops, insn2, string_offset, kAddXInsn);
436     }
437     ASSERT_EQ(method1_offset, GetMethodOffset(1u));  // If this fails, num_nops is wrong.
438   }
439 
TestAdrpLdurAdd(uint32_t adrp_offset,bool has_thunk,uint32_t string_offset)440   void TestAdrpLdurAdd(uint32_t adrp_offset, bool has_thunk, uint32_t string_offset) {
441     TestAdrpInsn2Add(kLdurInsn, adrp_offset, has_thunk, string_offset);
442   }
443 
TestAdrpLdrPcRelAdd(uint32_t pcrel_ldr_insn,int32_t pcrel_disp,uint32_t adrp_offset,bool has_thunk,uint32_t string_offset)444   void TestAdrpLdrPcRelAdd(uint32_t pcrel_ldr_insn,
445                            int32_t pcrel_disp,
446                            uint32_t adrp_offset,
447                            bool has_thunk,
448                            uint32_t string_offset) {
449     ASSERT_LT(pcrel_disp, 0x100000);
450     ASSERT_GE(pcrel_disp, -0x100000);
451     ASSERT_EQ(pcrel_disp & 0x3, 0);
452     uint32_t insn2 = pcrel_ldr_insn | (((static_cast<uint32_t>(pcrel_disp) >> 2) & 0x7ffffu) << 5);
453     TestAdrpInsn2Add(insn2, adrp_offset, has_thunk, string_offset);
454   }
455 
TestAdrpLdrSpRelAdd(uint32_t sprel_ldr_insn,uint32_t sprel_disp_in_load_units,uint32_t adrp_offset,bool has_thunk,uint32_t string_offset)456   void TestAdrpLdrSpRelAdd(uint32_t sprel_ldr_insn,
457                            uint32_t sprel_disp_in_load_units,
458                            uint32_t adrp_offset,
459                            bool has_thunk,
460                            uint32_t string_offset) {
461     ASSERT_LT(sprel_disp_in_load_units, 0x1000u);
462     uint32_t insn2 = sprel_ldr_insn | ((sprel_disp_in_load_units & 0xfffu) << 10);
463     TestAdrpInsn2Add(insn2, adrp_offset, has_thunk, string_offset);
464   }
465 
CompileBakerOffsetThunk(uint32_t base_reg,uint32_t holder_reg)466   std::vector<uint8_t> CompileBakerOffsetThunk(uint32_t base_reg, uint32_t holder_reg) {
467     const LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch(
468         0u, Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(base_reg, holder_reg));
469     auto* patcher = down_cast<Arm64RelativePatcher*>(patcher_.get());
470     ArmBaseRelativePatcher::ThunkKey key = patcher->GetBakerReadBarrierKey(patch);
471     return patcher->CompileThunk(key);
472   }
473 
CompileBakerGcRootThunk(uint32_t root_reg)474   std::vector<uint8_t> CompileBakerGcRootThunk(uint32_t root_reg) {
475     LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch(
476         0u, Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg));
477     auto* patcher = down_cast<Arm64RelativePatcher*>(patcher_.get());
478     ArmBaseRelativePatcher::ThunkKey key = patcher->GetBakerReadBarrierKey(patch);
479     return patcher->CompileThunk(key);
480   }
481 
GetOutputInsn(uint32_t offset)482   uint32_t GetOutputInsn(uint32_t offset) {
483     CHECK_LE(offset, output_.size());
484     CHECK_GE(output_.size() - offset, 4u);
485     return (static_cast<uint32_t>(output_[offset]) << 0) |
486            (static_cast<uint32_t>(output_[offset + 1]) << 8) |
487            (static_cast<uint32_t>(output_[offset + 2]) << 16) |
488            (static_cast<uint32_t>(output_[offset + 3]) << 24);
489   }
490 
491   void TestBakerField(uint32_t offset, uint32_t root_reg);
492 };
493 
494 const uint8_t Arm64RelativePatcherTest::kCallRawCode[] = {
495     0x00, 0x00, 0x00, 0x94
496 };
497 
498 const ArrayRef<const uint8_t> Arm64RelativePatcherTest::kCallCode(kCallRawCode);
499 
500 const uint8_t Arm64RelativePatcherTest::kNopRawCode[] = {
501     0x1f, 0x20, 0x03, 0xd5
502 };
503 
504 const ArrayRef<const uint8_t> Arm64RelativePatcherTest::kNopCode(kNopRawCode);
505 
506 class Arm64RelativePatcherTestDefault : public Arm64RelativePatcherTest {
507  public:
Arm64RelativePatcherTestDefault()508   Arm64RelativePatcherTestDefault() : Arm64RelativePatcherTest("default") { }
509 };
510 
511 class Arm64RelativePatcherTestDenver64 : public Arm64RelativePatcherTest {
512  public:
Arm64RelativePatcherTestDenver64()513   Arm64RelativePatcherTestDenver64() : Arm64RelativePatcherTest("denver64") { }
514 };
515 
TEST_F(Arm64RelativePatcherTestDefault,CallSelf)516 TEST_F(Arm64RelativePatcherTestDefault, CallSelf) {
517   const LinkerPatch patches[] = {
518       LinkerPatch::RelativeCodePatch(0u, nullptr, 1u),
519   };
520   AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(patches));
521   Link();
522 
523   const std::vector<uint8_t> expected_code = RawCode({kBlPlus0});
524   EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
525 }
526 
TEST_F(Arm64RelativePatcherTestDefault,CallOther)527 TEST_F(Arm64RelativePatcherTestDefault, CallOther) {
528   const LinkerPatch method1_patches[] = {
529       LinkerPatch::RelativeCodePatch(0u, nullptr, 2u),
530   };
531   AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(method1_patches));
532   const LinkerPatch method2_patches[] = {
533       LinkerPatch::RelativeCodePatch(0u, nullptr, 1u),
534   };
535   AddCompiledMethod(MethodRef(2u), kCallCode, ArrayRef<const LinkerPatch>(method2_patches));
536   Link();
537 
538   uint32_t method1_offset = GetMethodOffset(1u);
539   uint32_t method2_offset = GetMethodOffset(2u);
540   uint32_t diff_after = method2_offset - method1_offset;
541   CHECK_ALIGNED(diff_after, 4u);
542   ASSERT_LT(diff_after >> 2, 1u << 8);  // Simple encoding, (diff_after >> 2) fits into 8 bits.
543   const std::vector<uint8_t> method1_expected_code = RawCode({kBlPlus0 + (diff_after >> 2)});
544   EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(method1_expected_code)));
545   uint32_t diff_before = method1_offset - method2_offset;
546   CHECK_ALIGNED(diff_before, 4u);
547   ASSERT_GE(diff_before, -1u << 27);
548   auto method2_expected_code = GenNopsAndBl(0u, kBlPlus0 | ((diff_before >> 2) & 0x03ffffffu));
549   EXPECT_TRUE(CheckLinkedMethod(MethodRef(2u), ArrayRef<const uint8_t>(method2_expected_code)));
550 }
551 
TEST_F(Arm64RelativePatcherTestDefault,CallTrampoline)552 TEST_F(Arm64RelativePatcherTestDefault, CallTrampoline) {
553   const LinkerPatch patches[] = {
554       LinkerPatch::RelativeCodePatch(0u, nullptr, 2u),
555   };
556   AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(patches));
557   Link();
558 
559   uint32_t method1_offset = GetMethodOffset(1u);
560   uint32_t diff = kTrampolineOffset - method1_offset;
561   ASSERT_EQ(diff & 1u, 0u);
562   ASSERT_GE(diff, -1u << 9);  // Simple encoding, -256 <= (diff >> 1) < 0 (checked as unsigned).
563   auto expected_code = GenNopsAndBl(0u, kBlPlus0 | ((diff >> 2) & 0x03ffffffu));
564   EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
565 }
566 
TEST_F(Arm64RelativePatcherTestDefault,CallTrampolineTooFar)567 TEST_F(Arm64RelativePatcherTestDefault, CallTrampolineTooFar) {
568   constexpr uint32_t missing_method_index = 1024u;
569   auto last_method_raw_code = GenNopsAndBl(1u, kBlPlus0);
570   constexpr uint32_t bl_offset_in_last_method = 1u * 4u;  // After NOPs.
571   ArrayRef<const uint8_t> last_method_code(last_method_raw_code);
572   ASSERT_EQ(bl_offset_in_last_method + 4u, last_method_code.size());
573   const LinkerPatch last_method_patches[] = {
574       LinkerPatch::RelativeCodePatch(bl_offset_in_last_method, nullptr, missing_method_index),
575   };
576 
577   constexpr uint32_t just_over_max_negative_disp = 128 * MB + 4;
578   uint32_t last_method_idx = Create2MethodsWithGap(
579       kNopCode, ArrayRef<const LinkerPatch>(), last_method_code,
580       ArrayRef<const LinkerPatch>(last_method_patches),
581       just_over_max_negative_disp - bl_offset_in_last_method);
582   uint32_t method1_offset = GetMethodOffset(1u);
583   uint32_t last_method_offset = GetMethodOffset(last_method_idx);
584   ASSERT_EQ(method1_offset,
585             last_method_offset + bl_offset_in_last_method - just_over_max_negative_disp);
586   ASSERT_FALSE(method_offset_map_.FindMethodOffset(MethodRef(missing_method_index)).first);
587 
588   // Check linked code.
589   uint32_t thunk_offset =
590       CompiledCode::AlignCode(last_method_offset + last_method_code.size(), kArm64);
591   uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method);
592   CHECK_ALIGNED(diff, 4u);
593   ASSERT_LT(diff, 128 * MB);
594   auto expected_code = GenNopsAndBl(1u, kBlPlus0 | (diff >> 2));
595   EXPECT_TRUE(CheckLinkedMethod(MethodRef(last_method_idx),
596                                 ArrayRef<const uint8_t>(expected_code)));
597   EXPECT_TRUE(CheckThunk(thunk_offset));
598 }
599 
TEST_F(Arm64RelativePatcherTestDefault,CallOtherAlmostTooFarAfter)600 TEST_F(Arm64RelativePatcherTestDefault, CallOtherAlmostTooFarAfter) {
601   auto method1_raw_code = GenNopsAndBl(1u, kBlPlus0);
602   constexpr uint32_t bl_offset_in_method1 = 1u * 4u;  // After NOPs.
603   ArrayRef<const uint8_t> method1_code(method1_raw_code);
604   ASSERT_EQ(bl_offset_in_method1 + 4u, method1_code.size());
605   uint32_t expected_last_method_idx = 65;  // Based on 2MiB chunks in Create2MethodsWithGap().
606   const LinkerPatch method1_patches[] = {
607       LinkerPatch::RelativeCodePatch(bl_offset_in_method1, nullptr, expected_last_method_idx),
608   };
609 
610   constexpr uint32_t max_positive_disp = 128 * MB - 4u;
611   uint32_t last_method_idx = Create2MethodsWithGap(method1_code,
612                                                    ArrayRef<const LinkerPatch>(method1_patches),
613                                                    kNopCode,
614                                                    ArrayRef<const LinkerPatch>(),
615                                                    bl_offset_in_method1 + max_positive_disp);
616   ASSERT_EQ(expected_last_method_idx, last_method_idx);
617 
618   uint32_t method1_offset = GetMethodOffset(1u);
619   uint32_t last_method_offset = GetMethodOffset(last_method_idx);
620   ASSERT_EQ(method1_offset + bl_offset_in_method1 + max_positive_disp, last_method_offset);
621 
622   // Check linked code.
623   auto expected_code = GenNopsAndBl(1u, kBlPlusMax);
624   EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
625 }
626 
TEST_F(Arm64RelativePatcherTestDefault,CallOtherAlmostTooFarBefore)627 TEST_F(Arm64RelativePatcherTestDefault, CallOtherAlmostTooFarBefore) {
628   auto last_method_raw_code = GenNopsAndBl(0u, kBlPlus0);
629   constexpr uint32_t bl_offset_in_last_method = 0u * 4u;  // After NOPs.
630   ArrayRef<const uint8_t> last_method_code(last_method_raw_code);
631   ASSERT_EQ(bl_offset_in_last_method + 4u, last_method_code.size());
632   const LinkerPatch last_method_patches[] = {
633       LinkerPatch::RelativeCodePatch(bl_offset_in_last_method, nullptr, 1u),
634   };
635 
636   constexpr uint32_t max_negative_disp = 128 * MB;
637   uint32_t last_method_idx = Create2MethodsWithGap(kNopCode,
638                                                    ArrayRef<const LinkerPatch>(),
639                                                    last_method_code,
640                                                    ArrayRef<const LinkerPatch>(last_method_patches),
641                                                    max_negative_disp - bl_offset_in_last_method);
642   uint32_t method1_offset = GetMethodOffset(1u);
643   uint32_t last_method_offset = GetMethodOffset(last_method_idx);
644   ASSERT_EQ(method1_offset, last_method_offset + bl_offset_in_last_method - max_negative_disp);
645 
646   // Check linked code.
647   auto expected_code = GenNopsAndBl(0u, kBlMinusMax);
648   EXPECT_TRUE(CheckLinkedMethod(MethodRef(last_method_idx),
649                                 ArrayRef<const uint8_t>(expected_code)));
650 }
651 
TEST_F(Arm64RelativePatcherTestDefault,CallOtherJustTooFarAfter)652 TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarAfter) {
653   auto method1_raw_code = GenNopsAndBl(0u, kBlPlus0);
654   constexpr uint32_t bl_offset_in_method1 = 0u * 4u;  // After NOPs.
655   ArrayRef<const uint8_t> method1_code(method1_raw_code);
656   ASSERT_EQ(bl_offset_in_method1 + 4u, method1_code.size());
657   uint32_t expected_last_method_idx = 65;  // Based on 2MiB chunks in Create2MethodsWithGap().
658   const LinkerPatch method1_patches[] = {
659       LinkerPatch::RelativeCodePatch(bl_offset_in_method1, nullptr, expected_last_method_idx),
660   };
661 
662   constexpr uint32_t just_over_max_positive_disp = 128 * MB;
663   uint32_t last_method_idx = Create2MethodsWithGap(
664       method1_code,
665       ArrayRef<const LinkerPatch>(method1_patches),
666       kNopCode,
667       ArrayRef<const LinkerPatch>(),
668       bl_offset_in_method1 + just_over_max_positive_disp);
669   ASSERT_EQ(expected_last_method_idx, last_method_idx);
670 
671   uint32_t method1_offset = GetMethodOffset(1u);
672   uint32_t last_method_offset = GetMethodOffset(last_method_idx);
673   ASSERT_TRUE(IsAligned<kArm64Alignment>(last_method_offset));
674   uint32_t last_method_header_offset = last_method_offset - sizeof(OatQuickMethodHeader);
675   uint32_t thunk_size = MethodCallThunkSize();
676   uint32_t thunk_offset =
677       RoundDown(last_method_header_offset - thunk_size, GetInstructionSetAlignment(kArm64));
678   DCHECK_EQ(thunk_offset + thunk_size + CodeAlignmentSize(thunk_offset + thunk_size),
679             last_method_header_offset);
680   uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1);
681   CHECK_ALIGNED(diff, 4u);
682   ASSERT_LT(diff, 128 * MB);
683   auto expected_code = GenNopsAndBl(0u, kBlPlus0 | (diff >> 2));
684   EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
685   CheckThunk(thunk_offset);
686 }
687 
TEST_F(Arm64RelativePatcherTestDefault,CallOtherJustTooFarBefore)688 TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarBefore) {
689   auto last_method_raw_code = GenNopsAndBl(1u, kBlPlus0);
690   constexpr uint32_t bl_offset_in_last_method = 1u * 4u;  // After NOPs.
691   ArrayRef<const uint8_t> last_method_code(last_method_raw_code);
692   ASSERT_EQ(bl_offset_in_last_method + 4u, last_method_code.size());
693   const LinkerPatch last_method_patches[] = {
694       LinkerPatch::RelativeCodePatch(bl_offset_in_last_method, nullptr, 1u),
695   };
696 
697   constexpr uint32_t just_over_max_negative_disp = 128 * MB + 4;
698   uint32_t last_method_idx = Create2MethodsWithGap(
699       kNopCode, ArrayRef<const LinkerPatch>(), last_method_code,
700       ArrayRef<const LinkerPatch>(last_method_patches),
701       just_over_max_negative_disp - bl_offset_in_last_method);
702   uint32_t method1_offset = GetMethodOffset(1u);
703   uint32_t last_method_offset = GetMethodOffset(last_method_idx);
704   ASSERT_EQ(method1_offset,
705             last_method_offset + bl_offset_in_last_method - just_over_max_negative_disp);
706 
707   // Check linked code.
708   uint32_t thunk_offset =
709       CompiledCode::AlignCode(last_method_offset + last_method_code.size(), kArm64);
710   uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method);
711   CHECK_ALIGNED(diff, 4u);
712   ASSERT_LT(diff, 128 * MB);
713   auto expected_code = GenNopsAndBl(1u, kBlPlus0 | (diff >> 2));
714   EXPECT_TRUE(CheckLinkedMethod(MethodRef(last_method_idx),
715                                 ArrayRef<const uint8_t>(expected_code)));
716   EXPECT_TRUE(CheckThunk(thunk_offset));
717 }
718 
TEST_F(Arm64RelativePatcherTestDefault,DexCacheReference1)719 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference1) {
720   TestNopsAdrpLdr(0u, 0x12345678u, 0x1234u);
721 }
722 
TEST_F(Arm64RelativePatcherTestDefault,DexCacheReference2)723 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference2) {
724   TestNopsAdrpLdr(0u, -0x12345678u, 0x4444u);
725 }
726 
TEST_F(Arm64RelativePatcherTestDefault,DexCacheReference3)727 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference3) {
728   TestNopsAdrpLdr(0u, 0x12345000u, 0x3ffcu);
729 }
730 
TEST_F(Arm64RelativePatcherTestDefault,DexCacheReference4)731 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference4) {
732   TestNopsAdrpLdr(0u, 0x12345000u, 0x4000u);
733 }
734 
TEST_F(Arm64RelativePatcherTestDefault,StringReference1)735 TEST_F(Arm64RelativePatcherTestDefault, StringReference1) {
736   TestNopsAdrpAdd(0u, 0x12345678u);
737 }
738 
TEST_F(Arm64RelativePatcherTestDefault,StringReference2)739 TEST_F(Arm64RelativePatcherTestDefault, StringReference2) {
740   TestNopsAdrpAdd(0u, -0x12345678u);
741 }
742 
TEST_F(Arm64RelativePatcherTestDefault,StringReference3)743 TEST_F(Arm64RelativePatcherTestDefault, StringReference3) {
744   TestNopsAdrpAdd(0u, 0x12345000u);
745 }
746 
TEST_F(Arm64RelativePatcherTestDefault,StringReference4)747 TEST_F(Arm64RelativePatcherTestDefault, StringReference4) {
748   TestNopsAdrpAdd(0u, 0x12345ffcu);
749 }
750 
751 #define TEST_FOR_OFFSETS(test, disp1, disp2) \
752   test(0xff4u, disp1) test(0xff8u, disp1) test(0xffcu, disp1) test(0x1000u, disp1) \
753   test(0xff4u, disp2) test(0xff8u, disp2) test(0xffcu, disp2) test(0x1000u, disp2)
754 
755 #define DEFAULT_LDUR_LDR_TEST(adrp_offset, disp) \
756   TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## Ldur ## disp) { \
757     bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu); \
758     TestAdrpLdurLdr(adrp_offset, has_thunk, 0x12345678u, disp); \
759   }
760 
761 TEST_FOR_OFFSETS(DEFAULT_LDUR_LDR_TEST, 0x1234, 0x1238)
762 
763 #define DENVER64_LDUR_LDR_TEST(adrp_offset, disp) \
764   TEST_F(Arm64RelativePatcherTestDenver64, DexCacheReference ## adrp_offset ## Ldur ## disp) { \
765     TestAdrpLdurLdr(adrp_offset, false, 0x12345678u, disp); \
766   }
767 
768 TEST_FOR_OFFSETS(DENVER64_LDUR_LDR_TEST, 0x1234, 0x1238)
769 
770 // LDR <Wt>, <label> is always aligned. We should never have to use a fixup.
771 #define LDRW_PCREL_LDR_TEST(adrp_offset, disp) \
772   TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## WPcRel ## disp) { \
773     TestAdrpLdrPcRelLdr(kLdrWPcRelInsn, disp, adrp_offset, false, 0x12345678u, 0x1234u); \
774   }
775 
776 TEST_FOR_OFFSETS(LDRW_PCREL_LDR_TEST, 0x1234, 0x1238)
777 
778 // LDR <Xt>, <label> is aligned when offset + displacement is a multiple of 8.
779 #define LDRX_PCREL_LDR_TEST(adrp_offset, disp) \
780   TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## XPcRel ## disp) { \
781     bool unaligned = !IsAligned<8u>((adrp_offset) + 4u + static_cast<uint32_t>(disp)); \
782     bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu) && unaligned; \
783     TestAdrpLdrPcRelLdr(kLdrXPcRelInsn, disp, adrp_offset, has_thunk, 0x12345678u, 0x1234u); \
784   }
785 
786 TEST_FOR_OFFSETS(LDRX_PCREL_LDR_TEST, 0x1234, 0x1238)
787 
788 // LDR <Wt>, [SP, #<pimm>] and LDR <Xt>, [SP, #<pimm>] are always aligned. No fixup needed.
789 #define LDRW_SPREL_LDR_TEST(adrp_offset, disp) \
790   TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## WSpRel ## disp) { \
791     TestAdrpLdrSpRelLdr(kLdrWSpRelInsn, (disp) >> 2, adrp_offset, false, 0x12345678u, 0x1234u); \
792   }
793 
794 TEST_FOR_OFFSETS(LDRW_SPREL_LDR_TEST, 0, 4)
795 
796 #define LDRX_SPREL_LDR_TEST(adrp_offset, disp) \
797   TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## XSpRel ## disp) { \
798     TestAdrpLdrSpRelLdr(kLdrXSpRelInsn, (disp) >> 3, adrp_offset, false, 0x12345678u, 0x1234u); \
799   }
800 
801 TEST_FOR_OFFSETS(LDRX_SPREL_LDR_TEST, 0, 8)
802 
803 #define DEFAULT_LDUR_ADD_TEST(adrp_offset, disp) \
804   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## Ldur ## disp) { \
805     bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu); \
806     TestAdrpLdurAdd(adrp_offset, has_thunk, disp); \
807   }
808 
809 TEST_FOR_OFFSETS(DEFAULT_LDUR_ADD_TEST, 0x12345678, 0xffffc840)
810 
811 #define DENVER64_LDUR_ADD_TEST(adrp_offset, disp) \
812   TEST_F(Arm64RelativePatcherTestDenver64, StringReference ## adrp_offset ## Ldur ## disp) { \
813     TestAdrpLdurAdd(adrp_offset, false, disp); \
814   }
815 
816 TEST_FOR_OFFSETS(DENVER64_LDUR_ADD_TEST, 0x12345678, 0xffffc840)
817 
818 #define DEFAULT_SUBX3X2_ADD_TEST(adrp_offset, disp) \
819   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## SubX3X2 ## disp) { \
820     /* SUB unrelated to "ADRP x0, addr". */ \
821     uint32_t sub = kSubXInsn | (100 << 10) | (2u << 5) | 3u;  /* SUB x3, x2, #100 */ \
822     TestAdrpInsn2Add(sub, adrp_offset, false, disp); \
823   }
824 
825 TEST_FOR_OFFSETS(DEFAULT_SUBX3X2_ADD_TEST, 0x12345678, 0xffffc840)
826 
827 #define DEFAULT_SUBSX3X0_ADD_TEST(adrp_offset, disp) \
828   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## SubsX3X0 ## disp) { \
829     /* SUBS that uses the result of "ADRP x0, addr". */ \
830     uint32_t subs = kSubsXInsn | (100 << 10) | (0u << 5) | 3u;  /* SUBS x3, x0, #100 */ \
831     TestAdrpInsn2Add(subs, adrp_offset, false, disp); \
832   }
833 
834 TEST_FOR_OFFSETS(DEFAULT_SUBSX3X0_ADD_TEST, 0x12345678, 0xffffc840)
835 
836 #define DEFAULT_ADDX0X0_ADD_TEST(adrp_offset, disp) \
837   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## AddX0X0 ## disp) { \
838     /* ADD that uses the result register of "ADRP x0, addr" as both source and destination. */ \
839     uint32_t add = kSubXInsn | (100 << 10) | (0u << 5) | 0u;  /* ADD x0, x0, #100 */ \
840     TestAdrpInsn2Add(add, adrp_offset, false, disp); \
841   }
842 
843 TEST_FOR_OFFSETS(DEFAULT_ADDX0X0_ADD_TEST, 0x12345678, 0xffffc840)
844 
845 #define DEFAULT_ADDSX0X2_ADD_TEST(adrp_offset, disp) \
846   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## AddsX0X2 ## disp) { \
847     /* ADDS that does not use the result of "ADRP x0, addr" but overwrites that register. */ \
848     uint32_t adds = kAddsXInsn | (100 << 10) | (2u << 5) | 0u;  /* ADDS x0, x2, #100 */ \
849     bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu); \
850     TestAdrpInsn2Add(adds, adrp_offset, has_thunk, disp); \
851   }
852 
853 TEST_FOR_OFFSETS(DEFAULT_ADDSX0X2_ADD_TEST, 0x12345678, 0xffffc840)
854 
855 // LDR <Wt>, <label> is always aligned. We should never have to use a fixup.
856 #define LDRW_PCREL_ADD_TEST(adrp_offset, disp) \
857   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## WPcRel ## disp) { \
858     TestAdrpLdrPcRelAdd(kLdrWPcRelInsn, disp, adrp_offset, false, 0x12345678u); \
859   }
860 
861 TEST_FOR_OFFSETS(LDRW_PCREL_ADD_TEST, 0x1234, 0x1238)
862 
863 // LDR <Xt>, <label> is aligned when offset + displacement is a multiple of 8.
864 #define LDRX_PCREL_ADD_TEST(adrp_offset, disp) \
865   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## XPcRel ## disp) { \
866     bool unaligned = !IsAligned<8u>((adrp_offset) + 4u + static_cast<uint32_t>(disp)); \
867     bool has_thunk = ((adrp_offset) == 0xff8u || (adrp_offset) == 0xffcu) && unaligned; \
868     TestAdrpLdrPcRelAdd(kLdrXPcRelInsn, disp, adrp_offset, has_thunk, 0x12345678u); \
869   }
870 
871 TEST_FOR_OFFSETS(LDRX_PCREL_ADD_TEST, 0x1234, 0x1238)
872 
873 // LDR <Wt>, [SP, #<pimm>] and LDR <Xt>, [SP, #<pimm>] are always aligned. No fixup needed.
874 #define LDRW_SPREL_ADD_TEST(adrp_offset, disp) \
875   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## WSpRel ## disp) { \
876     TestAdrpLdrSpRelAdd(kLdrWSpRelInsn, (disp) >> 2, adrp_offset, false, 0x12345678u); \
877   }
878 
879 TEST_FOR_OFFSETS(LDRW_SPREL_ADD_TEST, 0, 4)
880 
881 #define LDRX_SPREL_ADD_TEST(adrp_offset, disp) \
882   TEST_F(Arm64RelativePatcherTestDefault, StringReference ## adrp_offset ## XSpRel ## disp) { \
883     TestAdrpLdrSpRelAdd(kLdrXSpRelInsn, (disp) >> 3, adrp_offset, false, 0x12345678u); \
884   }
885 
886 TEST_FOR_OFFSETS(LDRX_SPREL_ADD_TEST, 0, 8)
887 
TestBakerField(uint32_t offset,uint32_t root_reg)888 void Arm64RelativePatcherTest::TestBakerField(uint32_t offset, uint32_t root_reg) {
889   uint32_t valid_regs[] = {
890       0,  1,  2,  3,  4,  5,  6,  7,  8,  9,
891       10, 11, 12, 13, 14, 15,         18, 19,  // IP0 and IP1 are reserved.
892       20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
893       // LR and SP/ZR are reserved.
894   };
895   DCHECK_ALIGNED(offset, 4u);
896   DCHECK_LT(offset, 16 * KB);
897   constexpr size_t kMethodCodeSize = 8u;
898   constexpr size_t kLiteralOffset = 0u;
899   uint32_t method_idx = 0u;
900   for (uint32_t base_reg : valid_regs) {
901     for (uint32_t holder_reg : valid_regs) {
902       uint32_t ldr = kLdrWInsn | (offset << (10 - 2)) | (base_reg << 5) | root_reg;
903       const std::vector<uint8_t> raw_code = RawCode({kCbnzIP1Plus0Insn, ldr});
904       ASSERT_EQ(kMethodCodeSize, raw_code.size());
905       ArrayRef<const uint8_t> code(raw_code);
906       uint32_t encoded_data =
907           Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(base_reg, holder_reg);
908       const LinkerPatch patches[] = {
909           LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset, encoded_data),
910       };
911       ++method_idx;
912       AddCompiledMethod(MethodRef(method_idx), code, ArrayRef<const LinkerPatch>(patches));
913     }
914   }
915   Link();
916 
917   // All thunks are at the end.
918   uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArm64Alignment);
919   method_idx = 0u;
920   for (uint32_t base_reg : valid_regs) {
921     for (uint32_t holder_reg : valid_regs) {
922       ++method_idx;
923       uint32_t cbnz_offset = thunk_offset - (GetMethodOffset(method_idx) + kLiteralOffset);
924       uint32_t cbnz = kCbnzIP1Plus0Insn | (cbnz_offset << (5 - 2));
925       uint32_t ldr = kLdrWInsn | (offset << (10 - 2)) | (base_reg << 5) | root_reg;
926       const std::vector<uint8_t> expected_code = RawCode({cbnz, ldr});
927       ASSERT_EQ(kMethodCodeSize, expected_code.size());
928       ASSERT_TRUE(
929           CheckLinkedMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(expected_code)));
930 
931       std::vector<uint8_t> expected_thunk = CompileBakerOffsetThunk(base_reg, holder_reg);
932       ASSERT_GT(output_.size(), thunk_offset);
933       ASSERT_GE(output_.size() - thunk_offset, expected_thunk.size());
934       ArrayRef<const uint8_t> compiled_thunk(output_.data() + thunk_offset,
935                                              expected_thunk.size());
936       if (ArrayRef<const uint8_t>(expected_thunk) != compiled_thunk) {
937         DumpDiff(ArrayRef<const uint8_t>(expected_thunk), compiled_thunk);
938         ASSERT_TRUE(false);
939       }
940 
941       size_t gray_check_offset = thunk_offset;
942       if (holder_reg == base_reg) {
943         // Verify that the null-check CBZ uses the correct register, i.e. holder_reg.
944         ASSERT_GE(output_.size() - gray_check_offset, 4u);
945         ASSERT_EQ(0x34000000 | holder_reg, GetOutputInsn(thunk_offset) & 0xff00001f);
946         gray_check_offset +=4u;
947       }
948       // Verify that the lock word for gray bit check is loaded from the holder address.
949       static constexpr size_t kGrayCheckInsns = 5;
950       ASSERT_GE(output_.size() - gray_check_offset, 4u * kGrayCheckInsns);
951       const uint32_t load_lock_word =
952           kLdrWInsn |
953           (mirror::Object::MonitorOffset().Uint32Value() << (10 - 2)) |
954           (holder_reg << 5) |
955           /* ip0 */ 16;
956       EXPECT_EQ(load_lock_word, GetOutputInsn(gray_check_offset));
957       // Verify the gray bit check.
958       const uint32_t check_gray_bit_witout_offset =
959           0x37000000 | (LockWord::kReadBarrierStateShift << 19) | /* ip0 */ 16;
960       EXPECT_EQ(check_gray_bit_witout_offset, GetOutputInsn(gray_check_offset + 4u) & 0xfff8001f);
961       // Verify the fake dependency.
962       const uint32_t fake_dependency =
963           0x8b408000 |              // ADD Xd, Xn, Xm, LSR 32
964           (/* ip0 */ 16 << 16) |    // Xm = ip0
965           (base_reg << 5) |         // Xn = base_reg
966           base_reg;                 // Xd = base_reg
967       EXPECT_EQ(fake_dependency, GetOutputInsn(gray_check_offset + 12u));
968       // Do not check the rest of the implementation.
969 
970       // The next thunk follows on the next aligned offset.
971       thunk_offset += RoundUp(expected_thunk.size(), kArm64Alignment);
972     }
973   }
974 }
975 
976 #define TEST_BAKER_FIELD(offset, root_reg)    \
977   TEST_F(Arm64RelativePatcherTestDefault,     \
978     BakerOffset##offset##_##root_reg) {       \
979     TestBakerField(offset, root_reg);         \
980   }
981 
982 TEST_BAKER_FIELD(/* offset */ 0, /* root_reg */ 0)
983 TEST_BAKER_FIELD(/* offset */ 8, /* root_reg */ 15)
984 TEST_BAKER_FIELD(/* offset */ 0x3ffc, /* root_reg */ 29)
985 
TEST_F(Arm64RelativePatcherTestDefault,BakerOffsetThunkInTheMiddle)986 TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkInTheMiddle) {
987   // One thunk in the middle with maximum distance branches to it from both sides.
988   // Use offset = 0, base_reg = 0, root_reg = 0, the LDR is simply `kLdrWInsn`.
989   constexpr uint32_t kLiteralOffset1 = 4;
990   const std::vector<uint8_t> raw_code1 = RawCode({kNopInsn, kCbnzIP1Plus0Insn, kLdrWInsn});
991   ArrayRef<const uint8_t> code1(raw_code1);
992   uint32_t encoded_data =
993       Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(/* base_reg */ 0, /* holder_reg */ 0);
994   const LinkerPatch patches1[] = {
995       LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data),
996   };
997   AddCompiledMethod(MethodRef(1u), code1, ArrayRef<const LinkerPatch>(patches1));
998 
999   // Allow thunk at 1MiB offset from the start of the method above. Literal offset being 4
1000   // allows the branch to reach that thunk.
1001   size_t filler1_size =
1002       1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArm64Alignment);
1003   std::vector<uint8_t> raw_filler1_code = GenNops(filler1_size / 4u);
1004   ArrayRef<const uint8_t> filler1_code(raw_filler1_code);
1005   AddCompiledMethod(MethodRef(2u), filler1_code);
1006 
1007   // Enforce thunk reservation with a tiny method.
1008   AddCompiledMethod(MethodRef(3u), kNopCode);
1009 
1010   // Allow reaching the thunk from the very beginning of a method 1MiB away. Backward branch
1011   // reaches the full 1MiB. Things to subtract:
1012   //   - thunk size and method 3 pre-header, rounded up (padding in between if needed)
1013   //   - method 3 code and method 4 pre-header, rounded up (padding in between if needed)
1014   //   - method 4 header (let there be no padding between method 4 code and method 5 pre-header).
1015   size_t thunk_size = CompileBakerOffsetThunk(/* base_reg */ 0, /* holder_reg */ 0).size();
1016   size_t filler2_size =
1017       1 * MB - RoundUp(thunk_size + sizeof(OatQuickMethodHeader), kArm64Alignment)
1018              - RoundUp(kNopCode.size() + sizeof(OatQuickMethodHeader), kArm64Alignment)
1019              - sizeof(OatQuickMethodHeader);
1020   std::vector<uint8_t> raw_filler2_code = GenNops(filler2_size / 4u);
1021   ArrayRef<const uint8_t> filler2_code(raw_filler2_code);
1022   AddCompiledMethod(MethodRef(4u), filler2_code);
1023 
1024   constexpr uint32_t kLiteralOffset2 = 0;
1025   const std::vector<uint8_t> raw_code2 = RawCode({kCbnzIP1Plus0Insn, kLdrWInsn});
1026   ArrayRef<const uint8_t> code2(raw_code2);
1027   const LinkerPatch patches2[] = {
1028       LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset2, encoded_data),
1029   };
1030   AddCompiledMethod(MethodRef(5u), code2, ArrayRef<const LinkerPatch>(patches2));
1031 
1032   Link();
1033 
1034   uint32_t first_method_offset = GetMethodOffset(1u);
1035   uint32_t last_method_offset = GetMethodOffset(5u);
1036   EXPECT_EQ(2 * MB, last_method_offset - first_method_offset);
1037 
1038   const uint32_t cbnz_max_forward = kCbnzIP1Plus0Insn | 0x007fffe0;
1039   const uint32_t cbnz_max_backward = kCbnzIP1Plus0Insn | 0x00800000;
1040   const std::vector<uint8_t> expected_code1 = RawCode({kNopInsn, cbnz_max_forward, kLdrWInsn});
1041   const std::vector<uint8_t> expected_code2 = RawCode({cbnz_max_backward, kLdrWInsn});
1042   ASSERT_TRUE(CheckLinkedMethod(MethodRef(1), ArrayRef<const uint8_t>(expected_code1)));
1043   ASSERT_TRUE(CheckLinkedMethod(MethodRef(5), ArrayRef<const uint8_t>(expected_code2)));
1044 }
1045 
TEST_F(Arm64RelativePatcherTestDefault,BakerOffsetThunkBeforeFiller)1046 TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkBeforeFiller) {
1047   // Based on the first part of BakerOffsetThunkInTheMiddle but the CBNZ is one instruction
1048   // earlier, so the thunk is emitted before the filler.
1049   // Use offset = 0, base_reg = 0, root_reg = 0, the LDR is simply `kLdrWInsn`.
1050   constexpr uint32_t kLiteralOffset1 = 0;
1051   const std::vector<uint8_t> raw_code1 = RawCode({kCbnzIP1Plus0Insn, kLdrWInsn, kNopInsn});
1052   ArrayRef<const uint8_t> code1(raw_code1);
1053   uint32_t encoded_data =
1054       Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(/* base_reg */ 0, /* holder_reg */ 0);
1055   const LinkerPatch patches1[] = {
1056       LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data),
1057   };
1058   AddCompiledMethod(MethodRef(1u), code1, ArrayRef<const LinkerPatch>(patches1));
1059 
1060   // Allow thunk at 1MiB offset from the start of the method above. Literal offset being 4
1061   // allows the branch to reach that thunk.
1062   size_t filler1_size =
1063       1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArm64Alignment);
1064   std::vector<uint8_t> raw_filler1_code = GenNops(filler1_size / 4u);
1065   ArrayRef<const uint8_t> filler1_code(raw_filler1_code);
1066   AddCompiledMethod(MethodRef(2u), filler1_code);
1067 
1068   Link();
1069 
1070   const uint32_t cbnz_offset = RoundUp(raw_code1.size(), kArm64Alignment) - kLiteralOffset1;
1071   const uint32_t cbnz = kCbnzIP1Plus0Insn | (cbnz_offset << (5 - 2));
1072   const std::vector<uint8_t> expected_code1 = RawCode({cbnz, kLdrWInsn, kNopInsn});
1073   ASSERT_TRUE(CheckLinkedMethod(MethodRef(1), ArrayRef<const uint8_t>(expected_code1)));
1074 }
1075 
TEST_F(Arm64RelativePatcherTestDefault,BakerOffsetThunkInTheMiddleUnreachableFromLast)1076 TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkInTheMiddleUnreachableFromLast) {
1077   // Based on the BakerOffsetThunkInTheMiddle but the CBNZ in the last method is preceded
1078   // by NOP and cannot reach the thunk in the middle, so we emit an extra thunk at the end.
1079   // Use offset = 0, base_reg = 0, root_reg = 0, the LDR is simply `kLdrWInsn`.
1080   constexpr uint32_t kLiteralOffset1 = 4;
1081   const std::vector<uint8_t> raw_code1 = RawCode({kNopInsn, kCbnzIP1Plus0Insn, kLdrWInsn});
1082   ArrayRef<const uint8_t> code1(raw_code1);
1083   uint32_t encoded_data =
1084       Arm64RelativePatcher::EncodeBakerReadBarrierFieldData(/* base_reg */ 0, /* holder_reg */ 0);
1085   const LinkerPatch patches1[] = {
1086       LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data),
1087   };
1088   AddCompiledMethod(MethodRef(1u), code1, ArrayRef<const LinkerPatch>(patches1));
1089 
1090   // Allow thunk at 1MiB offset from the start of the method above. Literal offset being 4
1091   // allows the branch to reach that thunk.
1092   size_t filler1_size =
1093       1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArm64Alignment);
1094   std::vector<uint8_t> raw_filler1_code = GenNops(filler1_size / 4u);
1095   ArrayRef<const uint8_t> filler1_code(raw_filler1_code);
1096   AddCompiledMethod(MethodRef(2u), filler1_code);
1097 
1098   // Enforce thunk reservation with a tiny method.
1099   AddCompiledMethod(MethodRef(3u), kNopCode);
1100 
1101   // If not for the extra NOP, this would allow reaching the thunk from the very beginning
1102   // of a method 1MiB away. Backward branch reaches the full 1MiB. Things to subtract:
1103   //   - thunk size and method 3 pre-header, rounded up (padding in between if needed)
1104   //   - method 3 code and method 4 pre-header, rounded up (padding in between if needed)
1105   //   - method 4 header (let there be no padding between method 4 code and method 5 pre-header).
1106   size_t thunk_size = CompileBakerOffsetThunk(/* base_reg */ 0, /* holder_reg */ 0).size();
1107   size_t filler2_size =
1108       1 * MB - RoundUp(thunk_size + sizeof(OatQuickMethodHeader), kArm64Alignment)
1109              - RoundUp(kNopCode.size() + sizeof(OatQuickMethodHeader), kArm64Alignment)
1110              - sizeof(OatQuickMethodHeader);
1111   std::vector<uint8_t> raw_filler2_code = GenNops(filler2_size / 4u);
1112   ArrayRef<const uint8_t> filler2_code(raw_filler2_code);
1113   AddCompiledMethod(MethodRef(4u), filler2_code);
1114 
1115   // Extra NOP compared to BakerOffsetThunkInTheMiddle.
1116   constexpr uint32_t kLiteralOffset2 = 4;
1117   const std::vector<uint8_t> raw_code2 = RawCode({kNopInsn, kCbnzIP1Plus0Insn, kLdrWInsn});
1118   ArrayRef<const uint8_t> code2(raw_code2);
1119   const LinkerPatch patches2[] = {
1120       LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset2, encoded_data),
1121   };
1122   AddCompiledMethod(MethodRef(5u), code2, ArrayRef<const LinkerPatch>(patches2));
1123 
1124   Link();
1125 
1126   const uint32_t cbnz_max_forward = kCbnzIP1Plus0Insn | 0x007fffe0;
1127   const uint32_t cbnz_last_offset = RoundUp(raw_code2.size(), kArm64Alignment) - kLiteralOffset2;
1128   const uint32_t cbnz_last = kCbnzIP1Plus0Insn | (cbnz_last_offset << (5 - 2));
1129   const std::vector<uint8_t> expected_code1 = RawCode({kNopInsn, cbnz_max_forward, kLdrWInsn});
1130   const std::vector<uint8_t> expected_code2 = RawCode({kNopInsn, cbnz_last, kLdrWInsn});
1131   ASSERT_TRUE(CheckLinkedMethod(MethodRef(1), ArrayRef<const uint8_t>(expected_code1)));
1132   ASSERT_TRUE(CheckLinkedMethod(MethodRef(5), ArrayRef<const uint8_t>(expected_code2)));
1133 }
1134 
TEST_F(Arm64RelativePatcherTestDefault,BakerRootGcRoot)1135 TEST_F(Arm64RelativePatcherTestDefault, BakerRootGcRoot) {
1136   uint32_t valid_regs[] = {
1137       0,  1,  2,  3,  4,  5,  6,  7,  8,  9,
1138       10, 11, 12, 13, 14, 15,         18, 19,  // IP0 and IP1 are reserved.
1139       20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
1140       // LR and SP/ZR are reserved.
1141   };
1142   constexpr size_t kMethodCodeSize = 8u;
1143   constexpr size_t kLiteralOffset = 4u;
1144   uint32_t method_idx = 0u;
1145   for (uint32_t root_reg : valid_regs) {
1146     ++method_idx;
1147     uint32_t ldr = kLdrWInsn | (/* offset */ 8 << (10 - 2)) | (/* base_reg */ 0 << 5) | root_reg;
1148     const std::vector<uint8_t> raw_code = RawCode({ldr, kCbnzIP1Plus0Insn});
1149     ASSERT_EQ(kMethodCodeSize, raw_code.size());
1150     ArrayRef<const uint8_t> code(raw_code);
1151     const LinkerPatch patches[] = {
1152         LinkerPatch::BakerReadBarrierBranchPatch(
1153             kLiteralOffset, Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg)),
1154     };
1155     AddCompiledMethod(MethodRef(method_idx), code, ArrayRef<const LinkerPatch>(patches));
1156   }
1157   Link();
1158 
1159   // All thunks are at the end.
1160   uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArm64Alignment);
1161   method_idx = 0u;
1162   for (uint32_t root_reg : valid_regs) {
1163     ++method_idx;
1164     uint32_t cbnz_offset = thunk_offset - (GetMethodOffset(method_idx) + kLiteralOffset);
1165     uint32_t cbnz = kCbnzIP1Plus0Insn | (cbnz_offset << (5 - 2));
1166     uint32_t ldr = kLdrWInsn | (/* offset */ 8 << (10 - 2)) | (/* base_reg */ 0 << 5) | root_reg;
1167     const std::vector<uint8_t> expected_code = RawCode({ldr, cbnz});
1168     ASSERT_EQ(kMethodCodeSize, expected_code.size());
1169     EXPECT_TRUE(CheckLinkedMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(expected_code)));
1170 
1171     std::vector<uint8_t> expected_thunk = CompileBakerGcRootThunk(root_reg);
1172     ASSERT_GT(output_.size(), thunk_offset);
1173     ASSERT_GE(output_.size() - thunk_offset, expected_thunk.size());
1174     ArrayRef<const uint8_t> compiled_thunk(output_.data() + thunk_offset,
1175                                            expected_thunk.size());
1176     if (ArrayRef<const uint8_t>(expected_thunk) != compiled_thunk) {
1177       DumpDiff(ArrayRef<const uint8_t>(expected_thunk), compiled_thunk);
1178       ASSERT_TRUE(false);
1179     }
1180 
1181     // Verify that the fast-path null-check CBZ uses the correct register, i.e. root_reg.
1182     ASSERT_GE(output_.size() - thunk_offset, 4u);
1183     ASSERT_EQ(0x34000000 | root_reg, GetOutputInsn(thunk_offset) & 0xff00001f);
1184     // Do not check the rest of the implementation.
1185 
1186     // The next thunk follows on the next aligned offset.
1187     thunk_offset += RoundUp(expected_thunk.size(), kArm64Alignment);
1188   }
1189 }
1190 
TEST_F(Arm64RelativePatcherTestDefault,BakerAndMethodCallInteraction)1191 TEST_F(Arm64RelativePatcherTestDefault, BakerAndMethodCallInteraction) {
1192   // During development, there was a `DCHECK_LE(MaxNextOffset(), next_thunk.MaxNextOffset());`
1193   // in `ArmBaseRelativePatcher::ThunkData::MakeSpaceBefore()` which does not necessarily
1194   // hold when we're reserving thunks of different sizes. This test exposes the situation
1195   // by using Baker thunks and a method call thunk.
1196 
1197   // Add a method call patch that can reach to method 1 offset + 128MiB.
1198   uint32_t method_idx = 0u;
1199   constexpr size_t kMethodCallLiteralOffset = 4u;
1200   constexpr uint32_t kMissingMethodIdx = 2u;
1201   const std::vector<uint8_t> raw_code1 = RawCode({kNopInsn, kBlPlus0});
1202   const LinkerPatch method1_patches[] = {
1203       LinkerPatch::RelativeCodePatch(kMethodCallLiteralOffset, nullptr, 2u),
1204   };
1205   ArrayRef<const uint8_t> code1(raw_code1);
1206   ++method_idx;
1207   AddCompiledMethod(MethodRef(1u), code1, ArrayRef<const LinkerPatch>(method1_patches));
1208 
1209   // Skip kMissingMethodIdx.
1210   ++method_idx;
1211   ASSERT_EQ(kMissingMethodIdx, method_idx);
1212   // Add a method with the right size that the method code for the next one starts 1MiB
1213   // after code for method 1.
1214   size_t filler_size =
1215       1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArm64Alignment)
1216              - sizeof(OatQuickMethodHeader);
1217   std::vector<uint8_t> filler_code = GenNops(filler_size / 4u);
1218   ++method_idx;
1219   AddCompiledMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(filler_code));
1220   // Add 126 methods with 1MiB code+header, making the code for the next method start 1MiB
1221   // before the currently scheduled MaxNextOffset() for the method call thunk.
1222   for (uint32_t i = 0; i != 126; ++i) {
1223     filler_size = 1 * MB - sizeof(OatQuickMethodHeader);
1224     filler_code = GenNops(filler_size / 4u);
1225     ++method_idx;
1226     AddCompiledMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(filler_code));
1227   }
1228 
1229   // Add 2 Baker GC root patches to the last method, one that would allow the thunk at
1230   // 1MiB + kArm64Alignment, i.e. kArm64Alignment after the method call thunk, and the
1231   // second that needs it kArm64Alignment after that. Given the size of the GC root thunk
1232   // is more than the space required by the method call thunk plus kArm64Alignment,
1233   // this pushes the first GC root thunk's pending MaxNextOffset() before the method call
1234   // thunk's pending MaxNextOffset() which needs to be adjusted.
1235   ASSERT_LT(RoundUp(CompileMethodCallThunk().size(), kArm64Alignment) + kArm64Alignment,
1236             CompileBakerGcRootThunk(/* root_reg */ 0).size());
1237   static_assert(kArm64Alignment == 16, "Code below assumes kArm64Alignment == 16");
1238   constexpr size_t kBakerLiteralOffset1 = 4u + kArm64Alignment;
1239   constexpr size_t kBakerLiteralOffset2 = 4u + 2 * kArm64Alignment;
1240   // Use offset = 0, base_reg = 0, the LDR is simply `kLdrWInsn | root_reg`.
1241   const uint32_t ldr1 = kLdrWInsn | /* root_reg */ 1;
1242   const uint32_t ldr2 = kLdrWInsn | /* root_reg */ 2;
1243   const std::vector<uint8_t> last_method_raw_code = RawCode({
1244       kNopInsn, kNopInsn, kNopInsn, kNopInsn,   // Padding before first GC root read barrier.
1245       ldr1, kCbnzIP1Plus0Insn,                  // First GC root LDR with read barrier.
1246       kNopInsn, kNopInsn,                       // Padding before second GC root read barrier.
1247       ldr2, kCbnzIP1Plus0Insn,                  // Second GC root LDR with read barrier.
1248   });
1249   uint32_t encoded_data1 = Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(/* root_reg */ 1);
1250   uint32_t encoded_data2 = Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(/* root_reg */ 2);
1251   const LinkerPatch last_method_patches[] = {
1252       LinkerPatch::BakerReadBarrierBranchPatch(kBakerLiteralOffset1, encoded_data1),
1253       LinkerPatch::BakerReadBarrierBranchPatch(kBakerLiteralOffset2, encoded_data2),
1254   };
1255   ++method_idx;
1256   AddCompiledMethod(MethodRef(method_idx),
1257                     ArrayRef<const uint8_t>(last_method_raw_code),
1258                     ArrayRef<const LinkerPatch>(last_method_patches));
1259 
1260   // The main purpose of the test is to check that Link() does not cause a crash.
1261   Link();
1262 
1263   ASSERT_EQ(127 * MB, GetMethodOffset(method_idx) - GetMethodOffset(1u));
1264 }
1265 
1266 }  // namespace linker
1267 }  // namespace art
1268