• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "linker/arm/relative_patcher_arm_base.h"
18 
19 #include "base/stl_util.h"
20 #include "compiled_method.h"
21 #include "linker/output_stream.h"
22 #include "oat.h"
23 #include "oat_quick_method_header.h"
24 
25 namespace art {
26 namespace linker {
27 
28 class ArmBaseRelativePatcher::ThunkData {
29  public:
ThunkData(std::vector<uint8_t> code,uint32_t max_next_offset)30   ThunkData(std::vector<uint8_t> code, uint32_t max_next_offset)
31       : code_(code),
32         offsets_(),
33         max_next_offset_(max_next_offset),
34         pending_offset_(0u) {
35     DCHECK(NeedsNextThunk());  // The data is constructed only when we expect to need the thunk.
36   }
37 
38   ThunkData(ThunkData&& src) = default;
39 
CodeSize() const40   size_t CodeSize() const {
41     return code_.size();
42   }
43 
GetCode() const44   ArrayRef<const uint8_t> GetCode() const {
45     return ArrayRef<const uint8_t>(code_);
46   }
47 
NeedsNextThunk() const48   bool NeedsNextThunk() const {
49     return max_next_offset_ != 0u;
50   }
51 
MaxNextOffset() const52   uint32_t MaxNextOffset() const {
53     DCHECK(NeedsNextThunk());
54     return max_next_offset_;
55   }
56 
ClearMaxNextOffset()57   void ClearMaxNextOffset() {
58     DCHECK(NeedsNextThunk());
59     max_next_offset_ = 0u;
60   }
61 
SetMaxNextOffset(uint32_t max_next_offset)62   void SetMaxNextOffset(uint32_t max_next_offset) {
63     DCHECK(!NeedsNextThunk());
64     max_next_offset_ = max_next_offset;
65   }
66 
67   // Adjust the MaxNextOffset() down if needed to fit the code before the next thunk.
68   // Returns true if it was adjusted, false if the old value was kept.
MakeSpaceBefore(const ThunkData & next_thunk,size_t alignment)69   bool MakeSpaceBefore(const ThunkData& next_thunk, size_t alignment) {
70     DCHECK(NeedsNextThunk());
71     DCHECK(next_thunk.NeedsNextThunk());
72     DCHECK_ALIGNED_PARAM(MaxNextOffset(), alignment);
73     DCHECK_ALIGNED_PARAM(next_thunk.MaxNextOffset(), alignment);
74     if (next_thunk.MaxNextOffset() - CodeSize() < MaxNextOffset()) {
75       max_next_offset_ = RoundDown(next_thunk.MaxNextOffset() - CodeSize(), alignment);
76       return true;
77     } else {
78       return false;
79     }
80   }
81 
ReserveOffset(size_t offset)82   uint32_t ReserveOffset(size_t offset) {
83     DCHECK(NeedsNextThunk());
84     DCHECK_LE(offset, max_next_offset_);
85     max_next_offset_ = 0u;  // The reserved offset should satisfy all pending references.
86     offsets_.push_back(offset);
87     return offset + CodeSize();
88   }
89 
HasReservedOffset() const90   bool HasReservedOffset() const {
91     return !offsets_.empty();
92   }
93 
LastReservedOffset() const94   uint32_t LastReservedOffset() const {
95     DCHECK(HasReservedOffset());
96     return offsets_.back();
97   }
98 
HasPendingOffset() const99   bool HasPendingOffset() const {
100     return pending_offset_ != offsets_.size();
101   }
102 
GetPendingOffset() const103   uint32_t GetPendingOffset() const {
104     DCHECK(HasPendingOffset());
105     return offsets_[pending_offset_];
106   }
107 
MarkPendingOffsetAsWritten()108   void MarkPendingOffsetAsWritten() {
109     DCHECK(HasPendingOffset());
110     ++pending_offset_;
111   }
112 
HasWrittenOffset() const113   bool HasWrittenOffset() const {
114     return pending_offset_ != 0u;
115   }
116 
LastWrittenOffset() const117   uint32_t LastWrittenOffset() const {
118     DCHECK(HasWrittenOffset());
119     return offsets_[pending_offset_ - 1u];
120   }
121 
122  private:
123   std::vector<uint8_t> code_;       // The code of the thunk.
124   std::vector<uint32_t> offsets_;   // Offsets at which the thunk needs to be written.
125   uint32_t max_next_offset_;        // The maximum offset at which the next thunk can be placed.
126   uint32_t pending_offset_;         // The index of the next offset to write.
127 };
128 
129 class ArmBaseRelativePatcher::PendingThunkComparator {
130  public:
operator ()(const ThunkData * lhs,const ThunkData * rhs) const131   bool operator()(const ThunkData* lhs, const ThunkData* rhs) const {
132     DCHECK(lhs->HasPendingOffset());
133     DCHECK(rhs->HasPendingOffset());
134     // The top of the heap is defined to contain the highest element and we want to pick
135     // the thunk with the smallest pending offset, so use the reverse ordering, i.e. ">".
136     return lhs->GetPendingOffset() > rhs->GetPendingOffset();
137   }
138 };
139 
ReserveSpace(uint32_t offset,const CompiledMethod * compiled_method,MethodReference method_ref)140 uint32_t ArmBaseRelativePatcher::ReserveSpace(uint32_t offset,
141                                               const CompiledMethod* compiled_method,
142                                               MethodReference method_ref) {
143   return ReserveSpaceInternal(offset, compiled_method, method_ref, 0u);
144 }
145 
ReserveSpaceEnd(uint32_t offset)146 uint32_t ArmBaseRelativePatcher::ReserveSpaceEnd(uint32_t offset) {
147   // For multi-oat compilations (boot image), ReserveSpaceEnd() is called for each oat file.
148   // Since we do not know here whether this is the last file or whether the next opportunity
149   // to place thunk will be soon enough, we need to reserve all needed thunks now. Code for
150   // subsequent oat files can still call back to them.
151   if (!unprocessed_method_call_patches_.empty()) {
152     ResolveMethodCalls(offset, MethodReference(nullptr, DexFile::kDexNoIndex));
153   }
154   for (ThunkData* data : unreserved_thunks_) {
155     uint32_t thunk_offset = CompiledCode::AlignCode(offset, instruction_set_);
156     offset = data->ReserveOffset(thunk_offset);
157   }
158   unreserved_thunks_.clear();
159   // We also need to delay initiating the pending_thunks_ until the call to WriteThunks().
160   // Check that the `pending_thunks_.capacity()` indicates that no WriteThunks() has taken place.
161   DCHECK_EQ(pending_thunks_.capacity(), 0u);
162   return offset;
163 }
164 
WriteThunks(OutputStream * out,uint32_t offset)165 uint32_t ArmBaseRelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) {
166   if (pending_thunks_.capacity() == 0u) {
167     if (thunks_.empty()) {
168       return offset;
169     }
170     // First call to WriteThunks(), prepare the thunks for writing.
171     pending_thunks_.reserve(thunks_.size());
172     for (auto& entry : thunks_) {
173       ThunkData* data = &entry.second;
174       if (data->HasPendingOffset()) {
175         pending_thunks_.push_back(data);
176       }
177     }
178     std::make_heap(pending_thunks_.begin(), pending_thunks_.end(), PendingThunkComparator());
179   }
180   uint32_t aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_);
181   while (!pending_thunks_.empty() &&
182          pending_thunks_.front()->GetPendingOffset() == aligned_offset) {
183     // Write alignment bytes and code.
184     uint32_t aligned_code_delta = aligned_offset - offset;
185     if (aligned_code_delta != 0u && UNLIKELY(!WriteCodeAlignment(out, aligned_code_delta))) {
186       return 0u;
187     }
188     if (UNLIKELY(!WriteThunk(out, pending_thunks_.front()->GetCode()))) {
189       return 0u;
190     }
191     offset = aligned_offset + pending_thunks_.front()->CodeSize();
192     // Mark the thunk as written at the pending offset and update the `pending_thunks_` heap.
193     std::pop_heap(pending_thunks_.begin(), pending_thunks_.end(), PendingThunkComparator());
194     pending_thunks_.back()->MarkPendingOffsetAsWritten();
195     if (pending_thunks_.back()->HasPendingOffset()) {
196       std::push_heap(pending_thunks_.begin(), pending_thunks_.end(), PendingThunkComparator());
197     } else {
198       pending_thunks_.pop_back();
199     }
200     aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_);
201   }
202   DCHECK(pending_thunks_.empty() || pending_thunks_.front()->GetPendingOffset() > aligned_offset);
203   return offset;
204 }
205 
ArmBaseRelativePatcher(RelativePatcherTargetProvider * provider,InstructionSet instruction_set)206 ArmBaseRelativePatcher::ArmBaseRelativePatcher(RelativePatcherTargetProvider* provider,
207                                                InstructionSet instruction_set)
208     : provider_(provider),
209       instruction_set_(instruction_set),
210       thunks_(),
211       unprocessed_method_call_patches_(),
212       method_call_thunk_(nullptr),
213       pending_thunks_() {
214 }
215 
~ArmBaseRelativePatcher()216 ArmBaseRelativePatcher::~ArmBaseRelativePatcher() {
217   // All work done by member destructors.
218 }
219 
ReserveSpaceInternal(uint32_t offset,const CompiledMethod * compiled_method,MethodReference method_ref,uint32_t max_extra_space)220 uint32_t ArmBaseRelativePatcher::ReserveSpaceInternal(uint32_t offset,
221                                                       const CompiledMethod* compiled_method,
222                                                       MethodReference method_ref,
223                                                       uint32_t max_extra_space) {
224   // Adjust code size for extra space required by the subclass.
225   uint32_t max_code_size = compiled_method->GetQuickCode().size() + max_extra_space;
226   uint32_t code_offset;
227   uint32_t next_aligned_offset;
228   while (true) {
229     code_offset = compiled_method->AlignCode(offset + sizeof(OatQuickMethodHeader));
230     next_aligned_offset = compiled_method->AlignCode(code_offset + max_code_size);
231     if (unreserved_thunks_.empty() ||
232         unreserved_thunks_.front()->MaxNextOffset() >= next_aligned_offset) {
233       break;
234     }
235     ThunkData* thunk = unreserved_thunks_.front();
236     if (thunk == method_call_thunk_) {
237       ResolveMethodCalls(code_offset, method_ref);
238       // This may have changed `method_call_thunk_` data, so re-check if we need to reserve.
239       if (unreserved_thunks_.empty() ||
240           unreserved_thunks_.front()->MaxNextOffset() >= next_aligned_offset) {
241         break;
242       }
243       // We need to process the new `front()` whether it's still the `method_call_thunk_` or not.
244       thunk = unreserved_thunks_.front();
245     }
246     unreserved_thunks_.pop_front();
247     uint32_t thunk_offset = CompiledCode::AlignCode(offset, instruction_set_);
248     offset = thunk->ReserveOffset(thunk_offset);
249     if (thunk == method_call_thunk_) {
250       // All remaining method call patches will be handled by this thunk.
251       DCHECK(!unprocessed_method_call_patches_.empty());
252       DCHECK_LE(thunk_offset - unprocessed_method_call_patches_.front().GetPatchOffset(),
253                 MaxPositiveDisplacement(GetMethodCallKey()));
254       unprocessed_method_call_patches_.clear();
255     }
256   }
257 
258   // Process patches and check that adding thunks for the current method did not push any
259   // thunks (previously existing or newly added) before `next_aligned_offset`. This is
260   // essentially a check that we never compile a method that's too big. The calls or branches
261   // from the method should be able to reach beyond the end of the method and over any pending
262   // thunks. (The number of different thunks should be relatively low and their code short.)
263   ProcessPatches(compiled_method, code_offset);
264   CHECK(unreserved_thunks_.empty() ||
265         unreserved_thunks_.front()->MaxNextOffset() >= next_aligned_offset);
266 
267   return offset;
268 }
269 
CalculateMethodCallDisplacement(uint32_t patch_offset,uint32_t target_offset)270 uint32_t ArmBaseRelativePatcher::CalculateMethodCallDisplacement(uint32_t patch_offset,
271                                                                  uint32_t target_offset) {
272   DCHECK(method_call_thunk_ != nullptr);
273   // Unsigned arithmetic with its well-defined overflow behavior is just fine here.
274   uint32_t displacement = target_offset - patch_offset;
275   uint32_t max_positive_displacement = MaxPositiveDisplacement(GetMethodCallKey());
276   uint32_t max_negative_displacement = MaxNegativeDisplacement(GetMethodCallKey());
277   // NOTE: With unsigned arithmetic we do mean to use && rather than || below.
278   if (displacement > max_positive_displacement && displacement < -max_negative_displacement) {
279     // Unwritten thunks have higher offsets, check if it's within range.
280     DCHECK(!method_call_thunk_->HasPendingOffset() ||
281            method_call_thunk_->GetPendingOffset() > patch_offset);
282     if (method_call_thunk_->HasPendingOffset() &&
283         method_call_thunk_->GetPendingOffset() - patch_offset <= max_positive_displacement) {
284       displacement = method_call_thunk_->GetPendingOffset() - patch_offset;
285     } else {
286       // We must have a previous thunk then.
287       DCHECK(method_call_thunk_->HasWrittenOffset());
288       DCHECK_LT(method_call_thunk_->LastWrittenOffset(), patch_offset);
289       displacement = method_call_thunk_->LastWrittenOffset() - patch_offset;
290       DCHECK_GE(displacement, -max_negative_displacement);
291     }
292   }
293   return displacement;
294 }
295 
GetThunkTargetOffset(const ThunkKey & key,uint32_t patch_offset)296 uint32_t ArmBaseRelativePatcher::GetThunkTargetOffset(const ThunkKey& key, uint32_t patch_offset) {
297   auto it = thunks_.find(key);
298   CHECK(it != thunks_.end());
299   const ThunkData& data = it->second;
300   if (data.HasWrittenOffset()) {
301     uint32_t offset = data.LastWrittenOffset();
302     DCHECK_LT(offset, patch_offset);
303     if (patch_offset - offset <= MaxNegativeDisplacement(key)) {
304       return offset;
305     }
306   }
307   DCHECK(data.HasPendingOffset());
308   uint32_t offset = data.GetPendingOffset();
309   DCHECK_GT(offset, patch_offset);
310   DCHECK_LE(offset - patch_offset, MaxPositiveDisplacement(key));
311   return offset;
312 }
313 
GetMethodCallKey()314 ArmBaseRelativePatcher::ThunkKey ArmBaseRelativePatcher::GetMethodCallKey() {
315   return ThunkKey(ThunkType::kMethodCall);
316 }
317 
GetBakerThunkKey(const LinkerPatch & patch)318 ArmBaseRelativePatcher::ThunkKey ArmBaseRelativePatcher::GetBakerThunkKey(
319     const LinkerPatch& patch) {
320   DCHECK_EQ(patch.GetType(), LinkerPatch::Type::kBakerReadBarrierBranch);
321   return ThunkKey(ThunkType::kBakerReadBarrier,
322                   patch.GetBakerCustomValue1(),
323                   patch.GetBakerCustomValue2());
324 }
325 
ProcessPatches(const CompiledMethod * compiled_method,uint32_t code_offset)326 void ArmBaseRelativePatcher::ProcessPatches(const CompiledMethod* compiled_method,
327                                             uint32_t code_offset) {
328   for (const LinkerPatch& patch : compiled_method->GetPatches()) {
329     uint32_t patch_offset = code_offset + patch.LiteralOffset();
330     ThunkKey key(static_cast<ThunkType>(-1));
331     ThunkData* old_data = nullptr;
332     if (patch.GetType() == LinkerPatch::Type::kCallRelative) {
333       key = GetMethodCallKey();
334       unprocessed_method_call_patches_.emplace_back(patch_offset, patch.TargetMethod());
335       if (method_call_thunk_ == nullptr) {
336         uint32_t max_next_offset = CalculateMaxNextOffset(patch_offset, key);
337         auto it = thunks_.Put(key, ThunkData(CompileThunk(key), max_next_offset));
338         method_call_thunk_ = &it->second;
339         AddUnreservedThunk(method_call_thunk_);
340       } else {
341         old_data = method_call_thunk_;
342       }
343     } else if (patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch) {
344       key = GetBakerThunkKey(patch);
345       auto lb = thunks_.lower_bound(key);
346       if (lb == thunks_.end() || thunks_.key_comp()(key, lb->first)) {
347         uint32_t max_next_offset = CalculateMaxNextOffset(patch_offset, key);
348         auto it = thunks_.PutBefore(lb, key, ThunkData(CompileThunk(key), max_next_offset));
349         AddUnreservedThunk(&it->second);
350       } else {
351         old_data = &lb->second;
352       }
353     }
354     if (old_data != nullptr) {
355       // Shared path where an old thunk may need an update.
356       DCHECK(key.GetType() != static_cast<ThunkType>(-1));
357       DCHECK(!old_data->HasReservedOffset() || old_data->LastReservedOffset() < patch_offset);
358       if (old_data->NeedsNextThunk()) {
359         // Patches for a method are ordered by literal offset, so if we still need to place
360         // this thunk for a previous patch, that thunk shall be in range for this patch.
361         DCHECK_LE(old_data->MaxNextOffset(), CalculateMaxNextOffset(patch_offset, key));
362       } else {
363         if (!old_data->HasReservedOffset() ||
364             patch_offset - old_data->LastReservedOffset() > MaxNegativeDisplacement(key)) {
365           old_data->SetMaxNextOffset(CalculateMaxNextOffset(patch_offset, key));
366           AddUnreservedThunk(old_data);
367         }
368       }
369     }
370   }
371 }
372 
AddUnreservedThunk(ThunkData * data)373 void ArmBaseRelativePatcher::AddUnreservedThunk(ThunkData* data) {
374   DCHECK(data->NeedsNextThunk());
375   size_t index = unreserved_thunks_.size();
376   while (index != 0u && data->MaxNextOffset() < unreserved_thunks_[index - 1u]->MaxNextOffset()) {
377     --index;
378   }
379   unreserved_thunks_.insert(unreserved_thunks_.begin() + index, data);
380   // We may need to update the max next offset(s) if the thunk code would not fit.
381   size_t alignment = GetInstructionSetAlignment(instruction_set_);
382   if (index + 1u != unreserved_thunks_.size()) {
383     // Note: Ignore the return value as we need to process previous thunks regardless.
384     data->MakeSpaceBefore(*unreserved_thunks_[index + 1u], alignment);
385   }
386   // Make space for previous thunks. Once we find a pending thunk that does
387   // not need an adjustment, we can stop.
388   while (index != 0u && unreserved_thunks_[index - 1u]->MakeSpaceBefore(*data, alignment)) {
389     --index;
390     data = unreserved_thunks_[index];
391   }
392 }
393 
ResolveMethodCalls(uint32_t quick_code_offset,MethodReference method_ref)394 void ArmBaseRelativePatcher::ResolveMethodCalls(uint32_t quick_code_offset,
395                                                 MethodReference method_ref) {
396   DCHECK(!unreserved_thunks_.empty());
397   DCHECK(!unprocessed_method_call_patches_.empty());
398   DCHECK(method_call_thunk_ != nullptr);
399   uint32_t max_positive_displacement = MaxPositiveDisplacement(GetMethodCallKey());
400   uint32_t max_negative_displacement = MaxNegativeDisplacement(GetMethodCallKey());
401   // Process as many patches as possible, stop only on unresolved targets or calls too far back.
402   while (!unprocessed_method_call_patches_.empty()) {
403     MethodReference target_method = unprocessed_method_call_patches_.front().GetTargetMethod();
404     uint32_t patch_offset = unprocessed_method_call_patches_.front().GetPatchOffset();
405     DCHECK(!method_call_thunk_->HasReservedOffset() ||
406            method_call_thunk_->LastReservedOffset() <= patch_offset);
407     if (!method_call_thunk_->HasReservedOffset() ||
408         patch_offset - method_call_thunk_->LastReservedOffset() > max_negative_displacement) {
409       // No previous thunk in range, check if we can reach the target directly.
410       if (target_method.dex_file == method_ref.dex_file &&
411           target_method.dex_method_index == method_ref.dex_method_index) {
412         DCHECK_GT(quick_code_offset, patch_offset);
413         if (quick_code_offset - patch_offset > max_positive_displacement) {
414           break;
415         }
416       } else {
417         auto result = provider_->FindMethodOffset(target_method);
418         if (!result.first) {
419           break;
420         }
421         uint32_t target_offset = result.second - CompiledCode::CodeDelta(instruction_set_);
422         if (target_offset >= patch_offset) {
423           DCHECK_LE(target_offset - patch_offset, max_positive_displacement);
424         } else if (patch_offset - target_offset > max_negative_displacement) {
425           break;
426         }
427       }
428     }
429     unprocessed_method_call_patches_.pop_front();
430   }
431   if (!unprocessed_method_call_patches_.empty()) {
432     // Try to adjust the max next offset in `method_call_thunk_`. Do this conservatively only if
433     // the thunk shall be at the end of the `unreserved_thunks_` to avoid dealing with overlaps.
434     uint32_t new_max_next_offset =
435         unprocessed_method_call_patches_.front().GetPatchOffset() + max_positive_displacement;
436     if (new_max_next_offset >
437         unreserved_thunks_.back()->MaxNextOffset() + unreserved_thunks_.back()->CodeSize()) {
438       method_call_thunk_->ClearMaxNextOffset();
439       method_call_thunk_->SetMaxNextOffset(new_max_next_offset);
440       if (method_call_thunk_ != unreserved_thunks_.back()) {
441         RemoveElement(unreserved_thunks_, method_call_thunk_);
442         unreserved_thunks_.push_back(method_call_thunk_);
443       }
444     }
445   } else {
446     // We have resolved all method calls, we do not need a new thunk anymore.
447     method_call_thunk_->ClearMaxNextOffset();
448     RemoveElement(unreserved_thunks_, method_call_thunk_);
449   }
450 }
451 
CalculateMaxNextOffset(uint32_t patch_offset,const ThunkKey & key)452 inline uint32_t ArmBaseRelativePatcher::CalculateMaxNextOffset(uint32_t patch_offset,
453                                                                const ThunkKey& key) {
454   return RoundDown(patch_offset + MaxPositiveDisplacement(key),
455                    GetInstructionSetAlignment(instruction_set_));
456 }
457 
458 }  // namespace linker
459 }  // namespace art
460