1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "transaction.h"
18
19 #include <android-base/logging.h>
20
21 #include "aot_class_linker.h"
22 #include "base/mutex-inl.h"
23 #include "base/stl_util.h"
24 #include "dex/descriptors_names.h"
25 #include "gc/accounting/card_table-inl.h"
26 #include "gc/heap.h"
27 #include "gc_root-inl.h"
28 #include "intern_table.h"
29 #include "mirror/class-inl.h"
30 #include "mirror/dex_cache-inl.h"
31 #include "mirror/object-inl.h"
32 #include "mirror/object_array-inl.h"
33 #include "obj_ptr-inl.h"
34 #include "runtime.h"
35
36 #include <list>
37
38 namespace art {
39
40 // TODO: remove (only used for debugging purpose).
41 static constexpr bool kEnableTransactionStats = false;
42
Transaction(bool strict,mirror::Class * root,ArenaStack * arena_stack,ArenaPool * arena_pool)43 Transaction::Transaction(bool strict,
44 mirror::Class* root,
45 ArenaStack* arena_stack,
46 ArenaPool* arena_pool)
47 : arena_stack_(std::nullopt),
48 allocator_(arena_stack != nullptr ? arena_stack : &arena_stack_.emplace(arena_pool)),
49 object_logs_(std::less<mirror::Object*>(), allocator_.Adapter(kArenaAllocTransaction)),
50 array_logs_(std::less<mirror::Array*>(), allocator_.Adapter(kArenaAllocTransaction)),
51 intern_string_logs_(allocator_.Adapter(kArenaAllocTransaction)),
52 resolve_string_logs_(allocator_.Adapter(kArenaAllocTransaction)),
53 resolve_method_type_logs_(allocator_.Adapter(kArenaAllocTransaction)),
54 aborted_(false),
55 rolling_back_(false),
56 heap_(Runtime::Current()->GetHeap()),
57 strict_(strict),
58 root_(root),
59 assert_no_new_records_reason_(nullptr) {
60 DCHECK(Runtime::Current()->IsAotCompiler());
61 DCHECK_NE(arena_stack != nullptr, arena_pool != nullptr);
62 }
63
~Transaction()64 Transaction::~Transaction() {
65 if (kEnableTransactionStats) {
66 size_t objects_count = object_logs_.size();
67 size_t field_values_count = 0;
68 for (const auto& it : object_logs_) {
69 field_values_count += it.second.Size();
70 }
71 size_t array_count = array_logs_.size();
72 size_t array_values_count = 0;
73 for (const auto& it : array_logs_) {
74 array_values_count += it.second.Size();
75 }
76 size_t intern_string_count =
77 std::distance(intern_string_logs_.begin(), intern_string_logs_.end());
78 size_t resolve_string_count =
79 std::distance(resolve_string_logs_.begin(), resolve_string_logs_.end());
80 size_t resolve_method_type_count =
81 std::distance(resolve_method_type_logs_.begin(), resolve_method_type_logs_.end());
82 LOG(INFO) << "Transaction::~Transaction"
83 << ": objects_count=" << objects_count
84 << ", field_values_count=" << field_values_count
85 << ", array_count=" << array_count
86 << ", array_values_count=" << array_values_count
87 << ", intern_string_count=" << intern_string_count
88 << ", resolve_string_count=" << resolve_string_count
89 << ", resolve_method_type_count=" << resolve_method_type_count;
90 }
91 }
92
Abort(const std::string & abort_message)93 void Transaction::Abort(const std::string& abort_message) {
94 // We may abort more than once if the exception thrown at the time of the
95 // previous abort has been caught during execution of a class initializer.
96 // We just keep the message of the first abort because it will cause the
97 // transaction to be rolled back anyway.
98 if (!aborted_) {
99 aborted_ = true;
100 abort_message_ = abort_message;
101 }
102 }
103
ThrowAbortError(Thread * self,const std::string * abort_message)104 void Transaction::ThrowAbortError(Thread* self, const std::string* abort_message) {
105 const bool rethrow = (abort_message == nullptr);
106 if (kIsDebugBuild && rethrow) {
107 CHECK(IsAborted()) << "Rethrow " << DescriptorToDot(Transaction::kAbortExceptionDescriptor)
108 << " while transaction is not aborted";
109 }
110 if (rethrow) {
111 // Rethrow an exception with the earlier abort message stored in the transaction.
112 self->ThrowNewWrappedException(Transaction::kAbortExceptionDescriptor,
113 GetAbortMessage().c_str());
114 } else {
115 // Throw an exception with the given abort message.
116 self->ThrowNewWrappedException(Transaction::kAbortExceptionDescriptor,
117 abort_message->c_str());
118 }
119 }
120
GetAbortMessage() const121 const std::string& Transaction::GetAbortMessage() const {
122 return abort_message_;
123 }
124
WriteConstraint(ObjPtr<mirror::Object> obj) const125 bool Transaction::WriteConstraint(ObjPtr<mirror::Object> obj) const {
126 DCHECK(obj != nullptr);
127
128 // Prevent changes in boot image spaces for app or boot image extension.
129 // For boot image there are no boot image spaces and this condition evaluates to false.
130 if (heap_->ObjectIsInBootImageSpace(obj)) {
131 return true;
132 }
133
134 // For apps, also prevent writing to other classes.
135 return IsStrict() &&
136 obj->IsClass() && // no constraint updating instances or arrays
137 obj != root_; // modifying other classes' static field, fail
138 }
139
WriteValueConstraint(ObjPtr<mirror::Object> value) const140 bool Transaction::WriteValueConstraint(ObjPtr<mirror::Object> value) const {
141 if (value == nullptr) {
142 return false; // We can always store null values.
143 }
144 gc::Heap* heap = Runtime::Current()->GetHeap();
145 if (IsStrict()) {
146 // TODO: Should we restrict writes the same way as for boot image extension?
147 return false;
148 } else if (heap->GetBootImageSpaces().empty()) {
149 return false; // No constraints for boot image.
150 } else {
151 // Boot image extension.
152 ObjPtr<mirror::Class> klass = value->IsClass() ? value->AsClass() : value->GetClass();
153 return !AotClassLinker::CanReferenceInBootImageExtension(klass, heap);
154 }
155 }
156
ReadConstraint(ObjPtr<mirror::Object> obj) const157 bool Transaction::ReadConstraint(ObjPtr<mirror::Object> obj) const {
158 // Read constraints are checked only for static field reads as there are
159 // no constraints on reading instance fields and array elements.
160 DCHECK(obj->IsClass());
161 if (IsStrict()) {
162 return obj != root_; // fail if not self-updating
163 } else {
164 // For boot image and boot image extension, allow reading any field.
165 return false;
166 }
167 }
168
GetOrCreateObjectLog(mirror::Object * obj)169 inline Transaction::ObjectLog& Transaction::GetOrCreateObjectLog(mirror::Object* obj) {
170 return object_logs_.GetOrCreate(obj, [&]() { return ObjectLog(&allocator_); });
171 }
172
RecordWriteFieldBoolean(mirror::Object * obj,MemberOffset field_offset,uint8_t value,bool is_volatile)173 void Transaction::RecordWriteFieldBoolean(mirror::Object* obj,
174 MemberOffset field_offset,
175 uint8_t value,
176 bool is_volatile) {
177 DCHECK(obj != nullptr);
178 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
179 ObjectLog& object_log = GetOrCreateObjectLog(obj);
180 object_log.LogBooleanValue(field_offset, value, is_volatile);
181 }
182
RecordWriteFieldByte(mirror::Object * obj,MemberOffset field_offset,int8_t value,bool is_volatile)183 void Transaction::RecordWriteFieldByte(mirror::Object* obj,
184 MemberOffset field_offset,
185 int8_t value,
186 bool is_volatile) {
187 DCHECK(obj != nullptr);
188 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
189 ObjectLog& object_log = GetOrCreateObjectLog(obj);
190 object_log.LogByteValue(field_offset, value, is_volatile);
191 }
192
RecordWriteFieldChar(mirror::Object * obj,MemberOffset field_offset,uint16_t value,bool is_volatile)193 void Transaction::RecordWriteFieldChar(mirror::Object* obj,
194 MemberOffset field_offset,
195 uint16_t value,
196 bool is_volatile) {
197 DCHECK(obj != nullptr);
198 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
199 ObjectLog& object_log = GetOrCreateObjectLog(obj);
200 object_log.LogCharValue(field_offset, value, is_volatile);
201 }
202
203
RecordWriteFieldShort(mirror::Object * obj,MemberOffset field_offset,int16_t value,bool is_volatile)204 void Transaction::RecordWriteFieldShort(mirror::Object* obj,
205 MemberOffset field_offset,
206 int16_t value,
207 bool is_volatile) {
208 DCHECK(obj != nullptr);
209 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
210 ObjectLog& object_log = GetOrCreateObjectLog(obj);
211 object_log.LogShortValue(field_offset, value, is_volatile);
212 }
213
214
RecordWriteField32(mirror::Object * obj,MemberOffset field_offset,uint32_t value,bool is_volatile)215 void Transaction::RecordWriteField32(mirror::Object* obj,
216 MemberOffset field_offset,
217 uint32_t value,
218 bool is_volatile) {
219 DCHECK(obj != nullptr);
220 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
221 ObjectLog& object_log = GetOrCreateObjectLog(obj);
222 object_log.Log32BitsValue(field_offset, value, is_volatile);
223 }
224
RecordWriteField64(mirror::Object * obj,MemberOffset field_offset,uint64_t value,bool is_volatile)225 void Transaction::RecordWriteField64(mirror::Object* obj,
226 MemberOffset field_offset,
227 uint64_t value,
228 bool is_volatile) {
229 DCHECK(obj != nullptr);
230 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
231 ObjectLog& object_log = GetOrCreateObjectLog(obj);
232 object_log.Log64BitsValue(field_offset, value, is_volatile);
233 }
234
RecordWriteFieldReference(mirror::Object * obj,MemberOffset field_offset,mirror::Object * value,bool is_volatile)235 void Transaction::RecordWriteFieldReference(mirror::Object* obj,
236 MemberOffset field_offset,
237 mirror::Object* value,
238 bool is_volatile) {
239 DCHECK(obj != nullptr);
240 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
241 ObjectLog& object_log = GetOrCreateObjectLog(obj);
242 object_log.LogReferenceValue(field_offset, value, is_volatile);
243 }
244
RecordWriteArray(mirror::Array * array,size_t index,uint64_t value)245 void Transaction::RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) {
246 DCHECK(array != nullptr);
247 DCHECK(array->IsArrayInstance());
248 DCHECK(!array->IsObjectArray());
249 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
250 ArrayLog& array_log = array_logs_.GetOrCreate(array, [&]() { return ArrayLog(&allocator_); });
251 array_log.LogValue(index, value);
252 }
253
RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,dex::StringIndex string_idx)254 void Transaction::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,
255 dex::StringIndex string_idx) {
256 DCHECK(dex_cache != nullptr);
257 DCHECK_LT(string_idx.index_, dex_cache->GetDexFile()->NumStringIds());
258 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
259 resolve_string_logs_.emplace_front(dex_cache, string_idx);
260 }
261
RecordResolveMethodType(ObjPtr<mirror::DexCache> dex_cache,dex::ProtoIndex proto_idx)262 void Transaction::RecordResolveMethodType(ObjPtr<mirror::DexCache> dex_cache,
263 dex::ProtoIndex proto_idx) {
264 DCHECK(dex_cache != nullptr);
265 DCHECK_LT(proto_idx.index_, dex_cache->GetDexFile()->NumProtoIds());
266 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
267 resolve_method_type_logs_.emplace_front(dex_cache, proto_idx);
268 }
269
RecordStrongStringInsertion(ObjPtr<mirror::String> s)270 void Transaction::RecordStrongStringInsertion(ObjPtr<mirror::String> s) {
271 InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kInsert);
272 LogInternedString(std::move(log));
273 }
274
RecordWeakStringInsertion(ObjPtr<mirror::String> s)275 void Transaction::RecordWeakStringInsertion(ObjPtr<mirror::String> s) {
276 InternStringLog log(s, InternStringLog::kWeakString, InternStringLog::kInsert);
277 LogInternedString(std::move(log));
278 }
279
RecordStrongStringRemoval(ObjPtr<mirror::String> s)280 void Transaction::RecordStrongStringRemoval(ObjPtr<mirror::String> s) {
281 InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kRemove);
282 LogInternedString(std::move(log));
283 }
284
RecordWeakStringRemoval(ObjPtr<mirror::String> s)285 void Transaction::RecordWeakStringRemoval(ObjPtr<mirror::String> s) {
286 InternStringLog log(s, InternStringLog::kWeakString, InternStringLog::kRemove);
287 LogInternedString(std::move(log));
288 }
289
LogInternedString(InternStringLog && log)290 void Transaction::LogInternedString(InternStringLog&& log) {
291 Locks::intern_table_lock_->AssertExclusiveHeld(Thread::Current());
292 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
293 intern_string_logs_.push_front(std::move(log));
294 }
295
Rollback()296 void Transaction::Rollback() {
297 Thread* self = Thread::Current();
298 self->AssertNoPendingException();
299 MutexLock mu(self, *Locks::intern_table_lock_);
300 rolling_back_ = true;
301 CHECK(!Runtime::Current()->IsActiveTransaction());
302 UndoObjectModifications();
303 UndoArrayModifications();
304 UndoInternStringTableModifications();
305 UndoResolveStringModifications();
306 UndoResolveMethodTypeModifications();
307 rolling_back_ = false;
308 }
309
UndoObjectModifications()310 void Transaction::UndoObjectModifications() {
311 // TODO we may not need to restore objects allocated during this transaction. Or we could directly
312 // remove them from the heap.
313 for (const auto& it : object_logs_) {
314 it.second.Undo(it.first);
315 }
316 object_logs_.clear();
317 }
318
UndoArrayModifications()319 void Transaction::UndoArrayModifications() {
320 // TODO we may not need to restore array allocated during this transaction. Or we could directly
321 // remove them from the heap.
322 for (const auto& it : array_logs_) {
323 it.second.Undo(it.first);
324 }
325 array_logs_.clear();
326 }
327
UndoInternStringTableModifications()328 void Transaction::UndoInternStringTableModifications() {
329 InternTable* const intern_table = Runtime::Current()->GetInternTable();
330 // We want to undo each operation from the most recent to the oldest. List has been filled so the
331 // most recent operation is at list begin so just have to iterate over it.
332 for (const InternStringLog& string_log : intern_string_logs_) {
333 string_log.Undo(intern_table);
334 }
335 intern_string_logs_.clear();
336 }
337
UndoResolveStringModifications()338 void Transaction::UndoResolveStringModifications() {
339 for (ResolveStringLog& string_log : resolve_string_logs_) {
340 string_log.Undo();
341 }
342 resolve_string_logs_.clear();
343 }
344
UndoResolveMethodTypeModifications()345 void Transaction::UndoResolveMethodTypeModifications() {
346 for (ResolveMethodTypeLog& method_type_log : resolve_method_type_logs_) {
347 method_type_log.Undo();
348 }
349 resolve_method_type_logs_.clear();
350 }
351
VisitRoots(RootVisitor * visitor)352 void Transaction::VisitRoots(RootVisitor* visitor) {
353 // Transactions are used for single-threaded initialization.
354 // This is the only function that should be called from a different thread,
355 // namely the GC thread, and it is called with the mutator lock held exclusively,
356 // so the data structures in the `Transaction` are protected from concurrent use.
357 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(Thread::Current()));
358
359 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&root_), RootInfo(kRootUnknown));
360 {
361 // Create a separate `ArenaStack` for this thread.
362 ArenaStack arena_stack(Runtime::Current()->GetArenaPool());
363 VisitObjectLogs(visitor, &arena_stack);
364 VisitArrayLogs(visitor, &arena_stack);
365 }
366 VisitInternStringLogs(visitor);
367 VisitResolveStringLogs(visitor);
368 VisitResolveMethodTypeLogs(visitor);
369 }
370
371 template <typename MovingRoots, typename Container>
UpdateKeys(const MovingRoots & moving_roots,Container & container)372 void UpdateKeys(const MovingRoots& moving_roots, Container& container) {
373 for (const auto& pair : moving_roots) {
374 auto* old_root = pair.first;
375 auto* new_root = pair.second;
376 auto node = container.extract(old_root);
377 CHECK(!node.empty());
378 node.key() = new_root;
379 bool inserted = container.insert(std::move(node)).inserted;
380 CHECK(inserted);
381 }
382 }
383
VisitObjectLogs(RootVisitor * visitor,ArenaStack * arena_stack)384 void Transaction::VisitObjectLogs(RootVisitor* visitor, ArenaStack* arena_stack) {
385 // List of moving roots.
386 ScopedArenaAllocator allocator(arena_stack);
387 using ObjectPair = std::pair<mirror::Object*, mirror::Object*>;
388 ScopedArenaForwardList<ObjectPair> moving_roots(allocator.Adapter(kArenaAllocTransaction));
389
390 // Visit roots.
391 for (auto& it : object_logs_) {
392 it.second.VisitRoots(visitor);
393 mirror::Object* old_root = it.first;
394 mirror::Object* new_root = old_root;
395 visitor->VisitRoot(&new_root, RootInfo(kRootUnknown));
396 if (new_root != old_root) {
397 moving_roots.push_front(std::make_pair(old_root, new_root));
398 }
399 }
400
401 // Update object logs with moving roots.
402 UpdateKeys(moving_roots, object_logs_);
403 }
404
VisitArrayLogs(RootVisitor * visitor,ArenaStack * arena_stack)405 void Transaction::VisitArrayLogs(RootVisitor* visitor, ArenaStack* arena_stack) {
406 // List of moving roots.
407 ScopedArenaAllocator allocator(arena_stack);
408 using ArrayPair = std::pair<mirror::Array*, mirror::Array*>;
409 ScopedArenaForwardList<ArrayPair> moving_roots(allocator.Adapter(kArenaAllocTransaction));
410
411 for (auto& it : array_logs_) {
412 mirror::Array* old_root = it.first;
413 CHECK(!old_root->IsObjectArray());
414 mirror::Array* new_root = old_root;
415 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&new_root), RootInfo(kRootUnknown));
416 if (new_root != old_root) {
417 moving_roots.push_front(std::make_pair(old_root, new_root));
418 }
419 }
420
421 // Update array logs with moving roots.
422 UpdateKeys(moving_roots, array_logs_);
423 }
424
VisitInternStringLogs(RootVisitor * visitor)425 void Transaction::VisitInternStringLogs(RootVisitor* visitor) {
426 for (InternStringLog& log : intern_string_logs_) {
427 log.VisitRoots(visitor);
428 }
429 }
430
VisitResolveStringLogs(RootVisitor * visitor)431 void Transaction::VisitResolveStringLogs(RootVisitor* visitor) {
432 for (ResolveStringLog& log : resolve_string_logs_) {
433 log.VisitRoots(visitor);
434 }
435 }
436
VisitResolveMethodTypeLogs(RootVisitor * visitor)437 void Transaction::VisitResolveMethodTypeLogs(RootVisitor* visitor) {
438 for (ResolveMethodTypeLog& log : resolve_method_type_logs_) {
439 log.VisitRoots(visitor);
440 }
441 }
442
LogBooleanValue(MemberOffset offset,uint8_t value,bool is_volatile)443 void Transaction::ObjectLog::LogBooleanValue(MemberOffset offset, uint8_t value, bool is_volatile) {
444 LogValue(ObjectLog::kBoolean, offset, value, is_volatile);
445 }
446
LogByteValue(MemberOffset offset,int8_t value,bool is_volatile)447 void Transaction::ObjectLog::LogByteValue(MemberOffset offset, int8_t value, bool is_volatile) {
448 LogValue(ObjectLog::kByte, offset, value, is_volatile);
449 }
450
LogCharValue(MemberOffset offset,uint16_t value,bool is_volatile)451 void Transaction::ObjectLog::LogCharValue(MemberOffset offset, uint16_t value, bool is_volatile) {
452 LogValue(ObjectLog::kChar, offset, value, is_volatile);
453 }
454
LogShortValue(MemberOffset offset,int16_t value,bool is_volatile)455 void Transaction::ObjectLog::LogShortValue(MemberOffset offset, int16_t value, bool is_volatile) {
456 LogValue(ObjectLog::kShort, offset, value, is_volatile);
457 }
458
Log32BitsValue(MemberOffset offset,uint32_t value,bool is_volatile)459 void Transaction::ObjectLog::Log32BitsValue(MemberOffset offset, uint32_t value, bool is_volatile) {
460 LogValue(ObjectLog::k32Bits, offset, value, is_volatile);
461 }
462
Log64BitsValue(MemberOffset offset,uint64_t value,bool is_volatile)463 void Transaction::ObjectLog::Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile) {
464 LogValue(ObjectLog::k64Bits, offset, value, is_volatile);
465 }
466
LogReferenceValue(MemberOffset offset,mirror::Object * obj,bool is_volatile)467 void Transaction::ObjectLog::LogReferenceValue(MemberOffset offset,
468 mirror::Object* obj,
469 bool is_volatile) {
470 LogValue(ObjectLog::kReference, offset, reinterpret_cast<uintptr_t>(obj), is_volatile);
471 }
472
LogValue(ObjectLog::FieldValueKind kind,MemberOffset offset,uint64_t value,bool is_volatile)473 void Transaction::ObjectLog::LogValue(ObjectLog::FieldValueKind kind,
474 MemberOffset offset,
475 uint64_t value,
476 bool is_volatile) {
477 auto it = field_values_.find(offset.Uint32Value());
478 if (it == field_values_.end()) {
479 ObjectLog::FieldValue field_value;
480 field_value.value = value;
481 field_value.is_volatile = is_volatile;
482 field_value.kind = kind;
483 field_values_.emplace(offset.Uint32Value(), std::move(field_value));
484 }
485 }
486
Undo(mirror::Object * obj) const487 void Transaction::ObjectLog::Undo(mirror::Object* obj) const {
488 for (auto& it : field_values_) {
489 // Garbage collector needs to access object's class and array's length. So we don't rollback
490 // these values.
491 MemberOffset field_offset(it.first);
492 if (field_offset.Uint32Value() == mirror::Class::ClassOffset().Uint32Value()) {
493 // Skip Object::class field.
494 continue;
495 }
496 if (obj->IsArrayInstance() &&
497 field_offset.Uint32Value() == mirror::Array::LengthOffset().Uint32Value()) {
498 // Skip Array::length field.
499 continue;
500 }
501 const FieldValue& field_value = it.second;
502 UndoFieldWrite(obj, field_offset, field_value);
503 }
504 }
505
UndoFieldWrite(mirror::Object * obj,MemberOffset field_offset,const FieldValue & field_value) const506 void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj,
507 MemberOffset field_offset,
508 const FieldValue& field_value) const {
509 // TODO We may want to abort a transaction while still being in transaction mode. In this case,
510 // we'd need to disable the check.
511 constexpr bool kCheckTransaction = false;
512 switch (field_value.kind) {
513 case kBoolean:
514 if (UNLIKELY(field_value.is_volatile)) {
515 obj->SetFieldBooleanVolatile<false, kCheckTransaction>(
516 field_offset,
517 field_value.value);
518 } else {
519 obj->SetFieldBoolean<false, kCheckTransaction>(
520 field_offset,
521 field_value.value);
522 }
523 break;
524 case kByte:
525 if (UNLIKELY(field_value.is_volatile)) {
526 obj->SetFieldByteVolatile<false, kCheckTransaction>(
527 field_offset,
528 static_cast<int8_t>(field_value.value));
529 } else {
530 obj->SetFieldByte<false, kCheckTransaction>(
531 field_offset,
532 static_cast<int8_t>(field_value.value));
533 }
534 break;
535 case kChar:
536 if (UNLIKELY(field_value.is_volatile)) {
537 obj->SetFieldCharVolatile<false, kCheckTransaction>(
538 field_offset,
539 static_cast<uint16_t>(field_value.value));
540 } else {
541 obj->SetFieldChar<false, kCheckTransaction>(
542 field_offset,
543 static_cast<uint16_t>(field_value.value));
544 }
545 break;
546 case kShort:
547 if (UNLIKELY(field_value.is_volatile)) {
548 obj->SetFieldShortVolatile<false, kCheckTransaction>(
549 field_offset,
550 static_cast<int16_t>(field_value.value));
551 } else {
552 obj->SetFieldShort<false, kCheckTransaction>(
553 field_offset,
554 static_cast<int16_t>(field_value.value));
555 }
556 break;
557 case k32Bits:
558 if (UNLIKELY(field_value.is_volatile)) {
559 obj->SetField32Volatile<false, kCheckTransaction>(
560 field_offset,
561 static_cast<uint32_t>(field_value.value));
562 } else {
563 obj->SetField32<false, kCheckTransaction>(
564 field_offset,
565 static_cast<uint32_t>(field_value.value));
566 }
567 break;
568 case k64Bits:
569 if (UNLIKELY(field_value.is_volatile)) {
570 obj->SetField64Volatile<false, kCheckTransaction>(field_offset, field_value.value);
571 } else {
572 obj->SetField64<false, kCheckTransaction>(field_offset, field_value.value);
573 }
574 break;
575 case kReference:
576 if (UNLIKELY(field_value.is_volatile)) {
577 obj->SetFieldObjectVolatile<false, kCheckTransaction>(
578 field_offset,
579 reinterpret_cast<mirror::Object*>(field_value.value));
580 } else {
581 obj->SetFieldObject<false, kCheckTransaction>(
582 field_offset,
583 reinterpret_cast<mirror::Object*>(field_value.value));
584 }
585 break;
586 default:
587 LOG(FATAL) << "Unknown value kind " << static_cast<int>(field_value.kind);
588 UNREACHABLE();
589 }
590 }
591
VisitRoots(RootVisitor * visitor)592 void Transaction::ObjectLog::VisitRoots(RootVisitor* visitor) {
593 for (auto& it : field_values_) {
594 FieldValue& field_value = it.second;
595 if (field_value.kind == ObjectLog::kReference) {
596 visitor->VisitRootIfNonNull(reinterpret_cast<mirror::Object**>(&field_value.value),
597 RootInfo(kRootUnknown));
598 }
599 }
600 }
601
Undo(InternTable * intern_table) const602 void Transaction::InternStringLog::Undo(InternTable* intern_table) const {
603 DCHECK(!Runtime::Current()->IsActiveTransaction());
604 DCHECK(intern_table != nullptr);
605 ObjPtr<mirror::String> s = str_.Read();
606 uint32_t hash = static_cast<uint32_t>(s->GetStoredHashCode());
607 switch (string_op_) {
608 case InternStringLog::kInsert: {
609 switch (string_kind_) {
610 case InternStringLog::kStrongString:
611 intern_table->RemoveStrong(s, hash);
612 break;
613 case InternStringLog::kWeakString:
614 intern_table->RemoveWeak(s, hash);
615 break;
616 default:
617 LOG(FATAL) << "Unknown interned string kind";
618 UNREACHABLE();
619 }
620 break;
621 }
622 case InternStringLog::kRemove: {
623 switch (string_kind_) {
624 case InternStringLog::kStrongString:
625 intern_table->InsertStrong(s, hash);
626 break;
627 case InternStringLog::kWeakString:
628 intern_table->InsertWeak(s, hash);
629 break;
630 default:
631 LOG(FATAL) << "Unknown interned string kind";
632 UNREACHABLE();
633 }
634 break;
635 }
636 default:
637 LOG(FATAL) << "Unknown interned string op";
638 UNREACHABLE();
639 }
640 }
641
VisitRoots(RootVisitor * visitor)642 void Transaction::InternStringLog::VisitRoots(RootVisitor* visitor) {
643 str_.VisitRoot(visitor, RootInfo(kRootInternedString));
644 }
645
Undo() const646 void Transaction::ResolveStringLog::Undo() const {
647 dex_cache_.Read()->ClearString(string_idx_);
648 }
649
ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache,dex::StringIndex string_idx)650 Transaction::ResolveStringLog::ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache,
651 dex::StringIndex string_idx)
652 : dex_cache_(dex_cache),
653 string_idx_(string_idx) {
654 DCHECK(dex_cache != nullptr);
655 DCHECK_LT(string_idx_.index_, dex_cache->GetDexFile()->NumStringIds());
656 }
657
VisitRoots(RootVisitor * visitor)658 void Transaction::ResolveStringLog::VisitRoots(RootVisitor* visitor) {
659 dex_cache_.VisitRoot(visitor, RootInfo(kRootVMInternal));
660 }
661
Undo() const662 void Transaction::ResolveMethodTypeLog::Undo() const {
663 dex_cache_.Read()->ClearMethodType(proto_idx_);
664 }
665
ResolveMethodTypeLog(ObjPtr<mirror::DexCache> dex_cache,dex::ProtoIndex proto_idx)666 Transaction::ResolveMethodTypeLog::ResolveMethodTypeLog(ObjPtr<mirror::DexCache> dex_cache,
667 dex::ProtoIndex proto_idx)
668 : dex_cache_(dex_cache),
669 proto_idx_(proto_idx) {
670 DCHECK(dex_cache != nullptr);
671 DCHECK_LT(proto_idx_.index_, dex_cache->GetDexFile()->NumProtoIds());
672 }
673
VisitRoots(RootVisitor * visitor)674 void Transaction::ResolveMethodTypeLog::VisitRoots(RootVisitor* visitor) {
675 dex_cache_.VisitRoot(visitor, RootInfo(kRootVMInternal));
676 }
677
InternStringLog(ObjPtr<mirror::String> s,StringKind kind,StringOp op)678 Transaction::InternStringLog::InternStringLog(ObjPtr<mirror::String> s,
679 StringKind kind,
680 StringOp op)
681 : str_(s),
682 string_kind_(kind),
683 string_op_(op) {
684 DCHECK(s != nullptr);
685 }
686
LogValue(size_t index,uint64_t value)687 void Transaction::ArrayLog::LogValue(size_t index, uint64_t value) {
688 // Add a mapping if there is none yet.
689 array_values_.FindOrAdd(index, value);
690 }
691
Undo(mirror::Array * array) const692 void Transaction::ArrayLog::Undo(mirror::Array* array) const {
693 DCHECK(array != nullptr);
694 DCHECK(array->IsArrayInstance());
695 Primitive::Type type = array->GetClass()->GetComponentType()->GetPrimitiveType();
696 for (auto it : array_values_) {
697 UndoArrayWrite(array, type, it.first, it.second);
698 }
699 }
700
UndoArrayWrite(mirror::Array * array,Primitive::Type array_type,size_t index,uint64_t value) const701 void Transaction::ArrayLog::UndoArrayWrite(mirror::Array* array,
702 Primitive::Type array_type,
703 size_t index,
704 uint64_t value) const {
705 // TODO We may want to abort a transaction while still being in transaction mode. In this case,
706 // we'd need to disable the check.
707 constexpr bool kCheckTransaction = false;
708 switch (array_type) {
709 case Primitive::kPrimBoolean:
710 array->AsBooleanArray()->SetWithoutChecks<false, kCheckTransaction>(
711 index, static_cast<uint8_t>(value));
712 break;
713 case Primitive::kPrimByte:
714 array->AsByteArray()->SetWithoutChecks<false, kCheckTransaction>(
715 index, static_cast<int8_t>(value));
716 break;
717 case Primitive::kPrimChar:
718 array->AsCharArray()->SetWithoutChecks<false, kCheckTransaction>(
719 index, static_cast<uint16_t>(value));
720 break;
721 case Primitive::kPrimShort:
722 array->AsShortArray()->SetWithoutChecks<false, kCheckTransaction>(
723 index, static_cast<int16_t>(value));
724 break;
725 case Primitive::kPrimInt:
726 array->AsIntArray()->SetWithoutChecks<false, kCheckTransaction>(
727 index, static_cast<int32_t>(value));
728 break;
729 case Primitive::kPrimFloat:
730 array->AsFloatArray()->SetWithoutChecks<false, kCheckTransaction>(
731 index, static_cast<float>(value));
732 break;
733 case Primitive::kPrimLong:
734 array->AsLongArray()->SetWithoutChecks<false, kCheckTransaction>(
735 index, static_cast<int64_t>(value));
736 break;
737 case Primitive::kPrimDouble:
738 array->AsDoubleArray()->SetWithoutChecks<false, kCheckTransaction>(
739 index, static_cast<double>(value));
740 break;
741 case Primitive::kPrimNot:
742 LOG(FATAL) << "ObjectArray should be treated as Object";
743 UNREACHABLE();
744 default:
745 LOG(FATAL) << "Unsupported type " << array_type;
746 UNREACHABLE();
747 }
748 }
749
InstallAssertion(const char * reason)750 Transaction* ScopedAssertNoNewTransactionRecords::InstallAssertion(const char* reason) {
751 Transaction* transaction = nullptr;
752 if (kIsDebugBuild && Runtime::Current()->IsActiveTransaction()) {
753 transaction = Runtime::Current()->GetTransaction();
754 if (transaction != nullptr) {
755 CHECK(transaction->assert_no_new_records_reason_ == nullptr)
756 << "old: " << transaction->assert_no_new_records_reason_ << " new: " << reason;
757 transaction->assert_no_new_records_reason_ = reason;
758 }
759 }
760 return transaction;
761 }
762
RemoveAssertion(Transaction * transaction)763 void ScopedAssertNoNewTransactionRecords::RemoveAssertion(Transaction* transaction) {
764 if (kIsDebugBuild) {
765 CHECK(Runtime::Current()->GetTransaction() == transaction);
766 CHECK(transaction->assert_no_new_records_reason_ != nullptr);
767 transaction->assert_no_new_records_reason_ = nullptr;
768 }
769 }
770
771 } // namespace art
772