1 /*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "berberis/runtime_primitives/translation_cache.h"
18
19 #include <atomic>
20 #include <map>
21 #include <mutex> // std::lock_guard, std::mutex
22
23 #include "berberis/base/bit_util.h"
24 #include "berberis/base/checks.h"
25 #include "berberis/base/config.h"
26 #include "berberis/base/forever_alloc.h"
27 #include "berberis/guest_state/guest_addr.h"
28 #include "berberis/runtime_primitives/host_code.h"
29 #include "berberis/runtime_primitives/runtime_library.h"
30
31 namespace berberis {
32
GetInstance()33 TranslationCache* TranslationCache::GetInstance() {
34 static auto* g_translation_cache = NewForever<TranslationCache>();
35 return g_translation_cache;
36 }
37
AddAndLockForTranslation(GuestAddr pc,uint32_t counter_threshold)38 GuestCodeEntry* TranslationCache::AddAndLockForTranslation(GuestAddr pc,
39 uint32_t counter_threshold) {
40 // Make sure host code is updated under the mutex, so that it's in sync with
41 // the set of the translating regions (e.g as invalidation observes it).
42 std::lock_guard<std::mutex> lock(mutex_);
43
44 auto* host_code_ptr = GetHostCodePtrWritable(pc);
45 bool added;
46 auto* entry = AddUnsafe(pc,
47 host_code_ptr,
48 {kEntryNotTranslated, 0}, // TODO(b/232598137): set true host_size?
49 1, // Non-zero size simplifies invalidation.
50 GuestCodeEntry::Kind::kInterpreted,
51 &added);
52 CHECK(entry);
53
54 // Must not be translated yet.
55 if (entry->host_code->load() != kEntryNotTranslated) {
56 return nullptr;
57 }
58
59 // Check the threshold.
60 if (entry->invocation_counter < counter_threshold) {
61 ++entry->invocation_counter;
62 return nullptr;
63 }
64
65 LockForTranslationUnsafe(entry);
66 return entry;
67 }
68
LockForGearUpTranslation(GuestAddr pc)69 GuestCodeEntry* TranslationCache::LockForGearUpTranslation(GuestAddr pc) {
70 std::lock_guard<std::mutex> lock(mutex_);
71
72 auto* entry = LookupGuestCodeEntryUnsafe(pc);
73 if (!entry) {
74 // Entry could have been invalidated and erased.
75 return nullptr;
76 }
77
78 // This method should be called for lite-translated region, but we cannot
79 // guarantee they stay as such before we lock the mutex.
80 if (entry->kind != GuestCodeEntry::Kind::kLiteTranslated) {
81 return nullptr;
82 }
83
84 LockForTranslationUnsafe(entry);
85 return entry;
86 }
87
LockForTranslationUnsafe(GuestCodeEntry * entry)88 void TranslationCache::LockForTranslationUnsafe(GuestCodeEntry* entry) {
89 entry->host_code->store(kEntryTranslating);
90 entry->kind = GuestCodeEntry::Kind::kUnderProcessing;
91
92 bool inserted = translating_.insert(entry).second;
93 CHECK(inserted);
94 }
95
SetTranslatedAndUnlock(GuestAddr pc,GuestCodeEntry * entry,uint32_t guest_size,GuestCodeEntry::Kind kind,HostCodePiece code)96 void TranslationCache::SetTranslatedAndUnlock(GuestAddr pc,
97 GuestCodeEntry* entry,
98 uint32_t guest_size,
99 GuestCodeEntry::Kind kind,
100 HostCodePiece code) {
101 CHECK(kind != GuestCodeEntry::Kind::kUnderProcessing);
102 CHECK(kind != GuestCodeEntry::Kind::kGuestWrapped);
103 CHECK(kind != GuestCodeEntry::Kind::kHostWrapped);
104 // Make sure host code is updated under the mutex, so that it's in sync with
105 // the set of the translating regions (e.g as invalidation observes it).
106 std::lock_guard<std::mutex> lock(mutex_);
107
108 auto current = entry->host_code->load();
109
110 // Might have been invalidated while translating.
111 if (current == kEntryInvalidating) {
112 // ATTENTION: all transitions from kEntryInvalidating are protected by mutex!
113 entry->host_code->store(kEntryNotTranslated);
114 guest_entries_.erase(pc);
115 return;
116 }
117
118 // Must be translating
119 CHECK_EQ(current, kEntryTranslating);
120 CHECK(entry->kind == GuestCodeEntry::Kind::kUnderProcessing);
121
122 // ATTENTION: all transitions from kEntryTranslating are protected by mutex!
123 entry->host_code->store(code.code);
124
125 CHECK_GT(guest_size, 0);
126 entry->host_size = code.size;
127 entry->guest_size = guest_size;
128 entry->kind = kind;
129
130 size_t num_erased = translating_.erase(entry);
131 CHECK_EQ(num_erased, 1);
132
133 if (max_guest_size_ < guest_size) {
134 max_guest_size_ = guest_size;
135 }
136 }
137
AddAndLockForWrapping(GuestAddr pc)138 GuestCodeEntry* TranslationCache::AddAndLockForWrapping(GuestAddr pc) {
139 // This should be relatively rare, don't need a fast pass.
140 std::lock_guard<std::mutex> lock(mutex_);
141
142 // ATTENTION: kEntryWrapping is a locked state, can return the entry.
143 bool locked;
144 auto* entry = AddUnsafe(pc,
145 GetHostCodePtrWritable(pc),
146 {kEntryWrapping, 0}, // TODO(b/232598137): set true host_size?
147 1, // Non-zero size simplifies invalidation.
148 GuestCodeEntry::Kind::kUnderProcessing,
149 &locked);
150 return locked ? entry : nullptr;
151 }
152
SetWrappedAndUnlock(GuestAddr pc,GuestCodeEntry * entry,bool is_host_func,HostCodePiece code)153 void TranslationCache::SetWrappedAndUnlock(GuestAddr pc,
154 GuestCodeEntry* entry,
155 bool is_host_func,
156 HostCodePiece code) {
157 std::lock_guard<std::mutex> lock(mutex_);
158
159 auto current = entry->host_code->load();
160
161 // Might have been invalidated while wrapping.
162 if (current == kEntryInvalidating) {
163 // ATTENTION: all transitions from kEntryInvalidating are protected by mutex!
164 entry->host_code->store(kEntryNotTranslated);
165 guest_entries_.erase(pc);
166 return;
167 }
168
169 // Must be wrapping.
170 CHECK_EQ(current, kEntryWrapping);
171 CHECK(entry->kind == GuestCodeEntry::Kind::kUnderProcessing);
172
173 // ATTENTION: all transitions from kEntryWrapping are protected by mutex!
174 entry->host_code->store(code.code);
175
176 entry->host_size = code.size;
177 entry->kind =
178 is_host_func ? GuestCodeEntry::Kind::kHostWrapped : GuestCodeEntry::Kind::kGuestWrapped;
179 // entry->guest_size remains from 'wrapping'.
180 CHECK_EQ(entry->guest_size, 1);
181 }
182
IsHostFunctionWrapped(GuestAddr pc) const183 bool TranslationCache::IsHostFunctionWrapped(GuestAddr pc) const {
184 std::lock_guard<std::mutex> lock(mutex_);
185 if (auto* entry = LookupGuestCodeEntryUnsafe(pc)) {
186 return entry->kind == GuestCodeEntry::Kind::kHostWrapped;
187 }
188 return false;
189 }
190
AddUnsafe(GuestAddr pc,std::atomic<HostCodeAddr> * host_code_ptr,HostCodePiece host_code_piece,uint32_t guest_size,GuestCodeEntry::Kind kind,bool * added)191 GuestCodeEntry* TranslationCache::AddUnsafe(GuestAddr pc,
192 std::atomic<HostCodeAddr>* host_code_ptr,
193 HostCodePiece host_code_piece,
194 uint32_t guest_size,
195 GuestCodeEntry::Kind kind,
196 bool* added) {
197 auto [it, inserted] = guest_entries_.emplace(
198 std::pair{pc, GuestCodeEntry{host_code_ptr, host_code_piece.size, guest_size, kind, 0}});
199
200 if (inserted) {
201 host_code_ptr->store(host_code_piece.code);
202 }
203
204 *added = inserted;
205 return &it->second;
206 }
207
ProfilerLookupGuestCodeEntryByGuestPC(GuestAddr pc)208 GuestCodeEntry* TranslationCache::ProfilerLookupGuestCodeEntryByGuestPC(GuestAddr pc) {
209 std::lock_guard<std::mutex> lock(mutex_);
210 return LookupGuestCodeEntryUnsafe(pc);
211 }
212
GetInvocationCounter(GuestAddr pc) const213 uint32_t TranslationCache::GetInvocationCounter(GuestAddr pc) const {
214 std::lock_guard<std::mutex> lock(mutex_);
215 auto* entry = LookupGuestCodeEntryUnsafe(pc);
216 if (entry == nullptr) {
217 return 0;
218 }
219 return entry->invocation_counter;
220 }
221
LookupGuestCodeEntryUnsafe(GuestAddr pc)222 GuestCodeEntry* TranslationCache::LookupGuestCodeEntryUnsafe(GuestAddr pc) {
223 auto it = guest_entries_.find(pc);
224 if (it != std::end(guest_entries_)) {
225 return &it->second;
226 }
227
228 return nullptr;
229 }
230
LookupGuestCodeEntryUnsafe(GuestAddr pc) const231 const GuestCodeEntry* TranslationCache::LookupGuestCodeEntryUnsafe(GuestAddr pc) const {
232 return const_cast<TranslationCache*>(this)->LookupGuestCodeEntryUnsafe(pc);
233 }
234
SlowLookupGuestCodeEntryPCByHostPC(HostCode pc)235 GuestAddr TranslationCache::SlowLookupGuestCodeEntryPCByHostPC(HostCode pc) {
236 std::lock_guard<std::mutex> lock(mutex_);
237 const auto pc_addr = AsHostCodeAddr(pc);
238
239 for (auto& it : guest_entries_) {
240 auto* entry = &it.second;
241 auto host_code = entry->host_code->load();
242 if (host_code <= pc_addr && pc_addr < host_code + entry->host_size) {
243 return it.first;
244 }
245 }
246 return 0;
247 }
248
InvalidateEntriesBeingTranslatedUnsafe()249 void TranslationCache::InvalidateEntriesBeingTranslatedUnsafe() {
250 for (GuestCodeEntry* entry : translating_) {
251 CHECK(entry->kind == GuestCodeEntry::Kind::kUnderProcessing);
252 CHECK_EQ(entry->host_code->load(), kEntryTranslating);
253 entry->host_code->store(kEntryInvalidating);
254 entry->host_size = 0; // TODO(b/232598137): set true host_size?
255 // entry->guest_size and entry->kind remain from 'translating'.
256 // The entry will be erased on SetTranslatedAndUnlock.
257 }
258 translating_.clear();
259 }
260
InvalidateGuestRange(GuestAddr start,GuestAddr end)261 void TranslationCache::InvalidateGuestRange(GuestAddr start, GuestAddr end) {
262 std::lock_guard<std::mutex> lock(mutex_);
263
264 // Also invalidate all entries being translated, since they may possibly overlap with the
265 // start/end invalidation range. Technically, in the current implementation where we only
266 // translate regions that are a linear range of addresses, we would not need to invalidate the
267 // Translating entries that come after the end of the region being invalidated. But whether this
268 // would be beneficial is unclear and unlikely, and furthermore we may change the "linear" aspect
269 // later e.g. to follow static jumps.
270 InvalidateEntriesBeingTranslatedUnsafe();
271
272 std::map<GuestAddr, GuestCodeEntry>::iterator first;
273 if (start <= max_guest_size_) {
274 first = guest_entries_.begin();
275 } else {
276 first = guest_entries_.upper_bound(start - max_guest_size_);
277 }
278
279 while (first != guest_entries_.end()) {
280 auto curr = first++;
281 auto guest_pc = curr->first;
282 GuestCodeEntry* entry = &curr->second;
283
284 CHECK_GT(entry->guest_size, 0);
285 if (guest_pc + entry->guest_size <= start) {
286 continue;
287 }
288 if (guest_pc >= end) {
289 break;
290 }
291
292 HostCodeAddr current = entry->host_code->load();
293
294 if (current == kEntryInvalidating) {
295 // Translating but invalidated entry is handled in SetTranslatedAndUnlock.
296 } else if (current == kEntryWrapping) {
297 // Wrapping entry range is known in advance, so we don't have it in translating_.
298 entry->host_code->store(kEntryInvalidating);
299 // Wrapping but invalidated entry is handled in SetWrappedAndUnlock.
300 } else {
301 entry->host_code->store(kEntryNotTranslated);
302 guest_entries_.erase(curr);
303 }
304 }
305 }
306
TriggerGearShift(GuestAddr target,size_t range)307 void TranslationCache::TriggerGearShift(GuestAddr target, size_t range) {
308 std::lock_guard<std::mutex> lock(mutex_);
309 GuestAddr start = (target > range) ? target - range : kNullGuestAddr;
310
311 for (auto it = guest_entries_.lower_bound(start); it != guest_entries_.end(); ++it) {
312 auto& [guest_pc, entry] = *it;
313 CHECK_GT(entry.guest_size, 0);
314 if ((guest_pc > target) && ((guest_pc - target) > range)) {
315 break;
316 }
317 if (entry.kind == GuestCodeEntry::Kind::kLiteTranslated) {
318 // Lite translator may update the counter non-atomically for efficiency, but here
319 // we can be more strict.
320 auto* counter = bit_cast<std::atomic<uint32_t>*>(&entry.invocation_counter);
321 *counter = config::kGearSwitchThreshold;
322 }
323 }
324 }
325
326 } // namespace berberis
327