1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "transaction.h"
18
19 #include <android-base/logging.h>
20
21 #include "aot_class_linker.h"
22 #include "base/mutex-inl.h"
23 #include "base/stl_util.h"
24 #include "dex/descriptors_names.h"
25 #include "gc/accounting/card_table-inl.h"
26 #include "gc/heap.h"
27 #include "gc_root-inl.h"
28 #include "intern_table.h"
29 #include "mirror/class-inl.h"
30 #include "mirror/dex_cache-inl.h"
31 #include "mirror/object-inl.h"
32 #include "mirror/object_array-inl.h"
33 #include "obj_ptr-inl.h"
34 #include "runtime.h"
35
36 #include <list>
37
38 namespace art {
39
40 // TODO: remove (only used for debugging purpose).
41 static constexpr bool kEnableTransactionStats = false;
42
Transaction(bool strict,mirror::Class * root,ArenaStack * arena_stack,ArenaPool * arena_pool)43 Transaction::Transaction(bool strict,
44 mirror::Class* root,
45 ArenaStack* arena_stack,
46 ArenaPool* arena_pool)
47 : arena_stack_(std::nullopt),
48 allocator_(arena_stack != nullptr ? arena_stack : &arena_stack_.emplace(arena_pool)),
49 object_logs_(std::less<mirror::Object*>(), allocator_.Adapter(kArenaAllocTransaction)),
50 array_logs_(std::less<mirror::Array*>(), allocator_.Adapter(kArenaAllocTransaction)),
51 intern_string_logs_(allocator_.Adapter(kArenaAllocTransaction)),
52 resolve_string_logs_(allocator_.Adapter(kArenaAllocTransaction)),
53 resolve_method_type_logs_(allocator_.Adapter(kArenaAllocTransaction)),
54 aborted_(false),
55 rolling_back_(false),
56 heap_(Runtime::Current()->GetHeap()),
57 strict_(strict),
58 root_(root),
59 assert_no_new_records_reason_(nullptr) {
60 DCHECK(Runtime::Current()->IsAotCompiler());
61 DCHECK_NE(arena_stack != nullptr, arena_pool != nullptr);
62 }
63
~Transaction()64 Transaction::~Transaction() {
65 if (kEnableTransactionStats) {
66 size_t objects_count = object_logs_.size();
67 size_t field_values_count = 0;
68 for (const auto& it : object_logs_) {
69 field_values_count += it.second.Size();
70 }
71 size_t array_count = array_logs_.size();
72 size_t array_values_count = 0;
73 for (const auto& it : array_logs_) {
74 array_values_count += it.second.Size();
75 }
76 size_t intern_string_count =
77 std::distance(intern_string_logs_.begin(), intern_string_logs_.end());
78 size_t resolve_string_count =
79 std::distance(resolve_string_logs_.begin(), resolve_string_logs_.end());
80 size_t resolve_method_type_count =
81 std::distance(resolve_method_type_logs_.begin(), resolve_method_type_logs_.end());
82 LOG(INFO) << "Transaction::~Transaction"
83 << ": objects_count=" << objects_count
84 << ", field_values_count=" << field_values_count
85 << ", array_count=" << array_count
86 << ", array_values_count=" << array_values_count
87 << ", intern_string_count=" << intern_string_count
88 << ", resolve_string_count=" << resolve_string_count
89 << ", resolve_method_type_count=" << resolve_method_type_count;
90 }
91 }
92
Abort(const std::string & abort_message)93 void Transaction::Abort(const std::string& abort_message) {
94 // We may abort more than once if the exception thrown at the time of the
95 // previous abort has been caught during execution of a class initializer.
96 // We just keep the message of the first abort because it will cause the
97 // transaction to be rolled back anyway.
98 if (!aborted_) {
99 aborted_ = true;
100 abort_message_ = abort_message;
101 }
102 }
103
ThrowAbortError(Thread * self,const std::string * abort_message)104 void Transaction::ThrowAbortError(Thread* self, const std::string* abort_message) {
105 const bool rethrow = (abort_message == nullptr);
106 if (kIsDebugBuild && rethrow) {
107 CHECK(IsAborted()) << "Rethrow " << DescriptorToDot(Transaction::kAbortExceptionDescriptor)
108 << " while transaction is not aborted";
109 }
110 if (rethrow) {
111 // Rethrow an exception with the earlier abort message stored in the transaction.
112 self->ThrowNewWrappedException(Transaction::kAbortExceptionDescriptor,
113 GetAbortMessage().c_str());
114 } else {
115 // Throw an exception with the given abort message.
116 self->ThrowNewWrappedException(Transaction::kAbortExceptionDescriptor,
117 abort_message->c_str());
118 }
119 }
120
GetAbortMessage() const121 const std::string& Transaction::GetAbortMessage() const {
122 return abort_message_;
123 }
124
WriteConstraint(ObjPtr<mirror::Object> obj) const125 bool Transaction::WriteConstraint(ObjPtr<mirror::Object> obj) const {
126 DCHECK(obj != nullptr);
127
128 // Prevent changes in boot image spaces for app or boot image extension.
129 // For boot image there are no boot image spaces and this condition evaluates to false.
130 if (heap_->ObjectIsInBootImageSpace(obj)) {
131 return true;
132 }
133
134 // For apps, also prevent writing to other classes.
135 return IsStrict() &&
136 obj->IsClass() && // no constraint updating instances or arrays
137 obj != root_; // modifying other classes' static field, fail
138 }
139
WriteValueConstraint(ObjPtr<mirror::Object> value) const140 bool Transaction::WriteValueConstraint(ObjPtr<mirror::Object> value) const {
141 if (value == nullptr) {
142 return false; // We can always store null values.
143 }
144 gc::Heap* heap = Runtime::Current()->GetHeap();
145 if (IsStrict()) {
146 // TODO: Should we restrict writes the same way as for boot image extension?
147 return false;
148 } else if (heap->GetBootImageSpaces().empty()) {
149 return false; // No constraints for boot image.
150 } else {
151 // Boot image extension.
152 ObjPtr<mirror::Class> klass = value->IsClass() ? value->AsClass() : value->GetClass();
153 return !AotClassLinker::CanReferenceInBootImageExtension(klass, heap);
154 }
155 }
156
ReadConstraint(ObjPtr<mirror::Object> obj) const157 bool Transaction::ReadConstraint(ObjPtr<mirror::Object> obj) const {
158 // Read constraints are checked only for static field reads as there are
159 // no constraints on reading instance fields and array elements.
160 DCHECK(obj->IsClass());
161 if (IsStrict()) {
162 return obj != root_; // fail if not self-updating
163 } else {
164 // For boot image and boot image extension, allow reading any field.
165 return false;
166 }
167 }
168
GetOrCreateObjectLog(mirror::Object * obj)169 inline Transaction::ObjectLog& Transaction::GetOrCreateObjectLog(mirror::Object* obj) {
170 return object_logs_.GetOrCreate(obj, [&]() { return ObjectLog(&allocator_); });
171 }
172
RecordWriteFieldBoolean(mirror::Object * obj,MemberOffset field_offset,uint8_t value,bool is_volatile)173 void Transaction::RecordWriteFieldBoolean(mirror::Object* obj,
174 MemberOffset field_offset,
175 uint8_t value,
176 bool is_volatile) {
177 DCHECK(obj != nullptr);
178 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
179 ObjectLog& object_log = GetOrCreateObjectLog(obj);
180 object_log.LogBooleanValue(field_offset, value, is_volatile);
181 }
182
RecordWriteFieldByte(mirror::Object * obj,MemberOffset field_offset,int8_t value,bool is_volatile)183 void Transaction::RecordWriteFieldByte(mirror::Object* obj,
184 MemberOffset field_offset,
185 int8_t value,
186 bool is_volatile) {
187 DCHECK(obj != nullptr);
188 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
189 ObjectLog& object_log = GetOrCreateObjectLog(obj);
190 object_log.LogByteValue(field_offset, value, is_volatile);
191 }
192
RecordWriteFieldChar(mirror::Object * obj,MemberOffset field_offset,uint16_t value,bool is_volatile)193 void Transaction::RecordWriteFieldChar(mirror::Object* obj,
194 MemberOffset field_offset,
195 uint16_t value,
196 bool is_volatile) {
197 DCHECK(obj != nullptr);
198 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
199 ObjectLog& object_log = GetOrCreateObjectLog(obj);
200 object_log.LogCharValue(field_offset, value, is_volatile);
201 }
202
203
RecordWriteFieldShort(mirror::Object * obj,MemberOffset field_offset,int16_t value,bool is_volatile)204 void Transaction::RecordWriteFieldShort(mirror::Object* obj,
205 MemberOffset field_offset,
206 int16_t value,
207 bool is_volatile) {
208 DCHECK(obj != nullptr);
209 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
210 ObjectLog& object_log = GetOrCreateObjectLog(obj);
211 object_log.LogShortValue(field_offset, value, is_volatile);
212 }
213
214
RecordWriteField32(mirror::Object * obj,MemberOffset field_offset,uint32_t value,bool is_volatile)215 void Transaction::RecordWriteField32(mirror::Object* obj,
216 MemberOffset field_offset,
217 uint32_t value,
218 bool is_volatile) {
219 DCHECK(obj != nullptr);
220 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
221 ObjectLog& object_log = GetOrCreateObjectLog(obj);
222 object_log.Log32BitsValue(field_offset, value, is_volatile);
223 }
224
RecordWriteField64(mirror::Object * obj,MemberOffset field_offset,uint64_t value,bool is_volatile)225 void Transaction::RecordWriteField64(mirror::Object* obj,
226 MemberOffset field_offset,
227 uint64_t value,
228 bool is_volatile) {
229 DCHECK(obj != nullptr);
230 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
231 ObjectLog& object_log = GetOrCreateObjectLog(obj);
232 object_log.Log64BitsValue(field_offset, value, is_volatile);
233 }
234
RecordWriteFieldReference(mirror::Object * obj,MemberOffset field_offset,mirror::Object * value,bool is_volatile)235 void Transaction::RecordWriteFieldReference(mirror::Object* obj,
236 MemberOffset field_offset,
237 mirror::Object* value,
238 bool is_volatile) {
239 DCHECK(obj != nullptr);
240 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
241 ObjectLog& object_log = GetOrCreateObjectLog(obj);
242 object_log.LogReferenceValue(field_offset, value, is_volatile);
243 }
244
RecordWriteArray(mirror::Array * array,size_t index,uint64_t value)245 void Transaction::RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) {
246 DCHECK(array != nullptr);
247 DCHECK(array->IsArrayInstance());
248 DCHECK(!array->IsObjectArray());
249 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
250 ArrayLog& array_log = array_logs_.GetOrCreate(array, [&]() { return ArrayLog(&allocator_); });
251 array_log.LogValue(index, value);
252 }
253
RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,dex::StringIndex string_idx)254 void Transaction::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,
255 dex::StringIndex string_idx) {
256 DCHECK(dex_cache != nullptr);
257 DCHECK_LT(string_idx.index_, dex_cache->GetDexFile()->NumStringIds());
258 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
259 resolve_string_logs_.emplace_front(dex_cache, string_idx);
260 }
261
RecordResolveMethodType(ObjPtr<mirror::DexCache> dex_cache,dex::ProtoIndex proto_idx)262 void Transaction::RecordResolveMethodType(ObjPtr<mirror::DexCache> dex_cache,
263 dex::ProtoIndex proto_idx) {
264 DCHECK(dex_cache != nullptr);
265 DCHECK_LT(proto_idx.index_, dex_cache->GetDexFile()->NumProtoIds());
266 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
267 resolve_method_type_logs_.emplace_front(dex_cache, proto_idx);
268 }
269
RecordStrongStringInsertion(ObjPtr<mirror::String> s)270 void Transaction::RecordStrongStringInsertion(ObjPtr<mirror::String> s) {
271 InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kInsert);
272 LogInternedString(std::move(log));
273 }
274
RecordWeakStringInsertion(ObjPtr<mirror::String> s)275 void Transaction::RecordWeakStringInsertion(ObjPtr<mirror::String> s) {
276 InternStringLog log(s, InternStringLog::kWeakString, InternStringLog::kInsert);
277 LogInternedString(std::move(log));
278 }
279
RecordStrongStringRemoval(ObjPtr<mirror::String> s)280 void Transaction::RecordStrongStringRemoval(ObjPtr<mirror::String> s) {
281 InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kRemove);
282 LogInternedString(std::move(log));
283 }
284
RecordWeakStringRemoval(ObjPtr<mirror::String> s)285 void Transaction::RecordWeakStringRemoval(ObjPtr<mirror::String> s) {
286 InternStringLog log(s, InternStringLog::kWeakString, InternStringLog::kRemove);
287 LogInternedString(std::move(log));
288 }
289
LogInternedString(InternStringLog && log)290 void Transaction::LogInternedString(InternStringLog&& log) {
291 Locks::intern_table_lock_->AssertExclusiveHeld(Thread::Current());
292 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
293 intern_string_logs_.push_front(std::move(log));
294 }
295
Rollback()296 void Transaction::Rollback() {
297 Thread* self = Thread::Current();
298 self->AssertNoPendingException();
299 MutexLock mu(self, *Locks::intern_table_lock_);
300 rolling_back_ = true;
301 CHECK(!Runtime::Current()->IsActiveTransaction());
302 UndoObjectModifications();
303 UndoArrayModifications();
304 UndoInternStringTableModifications();
305 UndoResolveStringModifications();
306 UndoResolveMethodTypeModifications();
307 rolling_back_ = false;
308 }
309
UndoObjectModifications()310 void Transaction::UndoObjectModifications() {
311 // TODO we may not need to restore objects allocated during this transaction. Or we could directly
312 // remove them from the heap.
313 for (const auto& it : object_logs_) {
314 it.second.Undo(it.first);
315 }
316 object_logs_.clear();
317 }
318
UndoArrayModifications()319 void Transaction::UndoArrayModifications() {
320 // TODO we may not need to restore array allocated during this transaction. Or we could directly
321 // remove them from the heap.
322 for (const auto& it : array_logs_) {
323 it.second.Undo(it.first);
324 }
325 array_logs_.clear();
326 }
327
UndoInternStringTableModifications()328 void Transaction::UndoInternStringTableModifications() {
329 InternTable* const intern_table = Runtime::Current()->GetInternTable();
330 // We want to undo each operation from the most recent to the oldest. List has been filled so the
331 // most recent operation is at list begin so just have to iterate over it.
332 for (const InternStringLog& string_log : intern_string_logs_) {
333 string_log.Undo(intern_table);
334 }
335 intern_string_logs_.clear();
336 }
337
UndoResolveStringModifications()338 void Transaction::UndoResolveStringModifications() {
339 for (ResolveStringLog& string_log : resolve_string_logs_) {
340 string_log.Undo();
341 }
342 resolve_string_logs_.clear();
343 }
344
UndoResolveMethodTypeModifications()345 void Transaction::UndoResolveMethodTypeModifications() {
346 for (ResolveMethodTypeLog& method_type_log : resolve_method_type_logs_) {
347 method_type_log.Undo();
348 }
349 resolve_method_type_logs_.clear();
350 }
351
VisitRoots(RootVisitor * visitor)352 void Transaction::VisitRoots(RootVisitor* visitor) {
353 // Transactions are used for single-threaded initialization.
354 // This is the only function that should be called from a different thread,
355 // namely the GC thread, and it is called with the mutator lock held exclusively,
356 // so the data structures in the `Transaction` are protected from concurrent use.
357 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(Thread::Current()));
358
359 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&root_), RootInfo(kRootUnknown));
360 {
361 // Create a separate `ArenaStack` for this thread.
362 ArenaStack arena_stack(Runtime::Current()->GetArenaPool());
363 VisitObjectLogs(visitor, &arena_stack);
364 VisitArrayLogs(visitor, &arena_stack);
365 }
366 VisitInternStringLogs(visitor);
367 VisitResolveStringLogs(visitor);
368 VisitResolveMethodTypeLogs(visitor);
369 }
370
371 template <typename MovingRoots, typename Container>
UpdateKeys(const MovingRoots & moving_roots,Container & container)372 void UpdateKeys(const MovingRoots& moving_roots, Container& container) {
373 for (const auto& pair : moving_roots) {
374 auto* old_root = pair.first;
375 auto* new_root = pair.second;
376 auto node = container.extract(old_root);
377 CHECK(!node.empty());
378 node.key() = new_root;
379 bool inserted = container.insert(std::move(node)).inserted;
380 CHECK(inserted);
381 }
382 }
383
VisitObjectLogs(RootVisitor * visitor,ArenaStack * arena_stack)384 void Transaction::VisitObjectLogs(RootVisitor* visitor, ArenaStack* arena_stack) {
385 // List of moving roots.
386 ScopedArenaAllocator allocator(arena_stack);
387 using ObjectPair = std::pair<mirror::Object*, mirror::Object*>;
388 ScopedArenaForwardList<ObjectPair> moving_roots(allocator.Adapter(kArenaAllocTransaction));
389
390 // Visit roots.
391 for (auto& it : object_logs_) {
392 it.second.VisitRoots(visitor);
393 mirror::Object* old_root = it.first;
394 mirror::Object* new_root = old_root;
395 visitor->VisitRoot(&new_root, RootInfo(kRootUnknown));
396 if (new_root != old_root) {
397 moving_roots.push_front(std::make_pair(old_root, new_root));
398 }
399 }
400
401 // Update object logs with moving roots.
402 UpdateKeys(moving_roots, object_logs_);
403 }
404
VisitArrayLogs(RootVisitor * visitor,ArenaStack * arena_stack)405 void Transaction::VisitArrayLogs(RootVisitor* visitor, ArenaStack* arena_stack) {
406 // List of moving roots.
407 ScopedArenaAllocator allocator(arena_stack);
408 using ArrayPair = std::pair<mirror::Array*, mirror::Array*>;
409 ScopedArenaForwardList<ArrayPair> moving_roots(allocator.Adapter(kArenaAllocTransaction));
410
411 for (auto& it : array_logs_) {
412 mirror::Array* old_root = it.first;
413 mirror::Array* new_root = old_root;
414 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&new_root), RootInfo(kRootUnknown));
415 if (new_root != old_root) {
416 moving_roots.push_front(std::make_pair(old_root, new_root));
417 }
418 }
419
420 // Update array logs with moving roots.
421 UpdateKeys(moving_roots, array_logs_);
422 }
423
VisitInternStringLogs(RootVisitor * visitor)424 void Transaction::VisitInternStringLogs(RootVisitor* visitor) {
425 for (InternStringLog& log : intern_string_logs_) {
426 log.VisitRoots(visitor);
427 }
428 }
429
VisitResolveStringLogs(RootVisitor * visitor)430 void Transaction::VisitResolveStringLogs(RootVisitor* visitor) {
431 for (ResolveStringLog& log : resolve_string_logs_) {
432 log.VisitRoots(visitor);
433 }
434 }
435
VisitResolveMethodTypeLogs(RootVisitor * visitor)436 void Transaction::VisitResolveMethodTypeLogs(RootVisitor* visitor) {
437 for (ResolveMethodTypeLog& log : resolve_method_type_logs_) {
438 log.VisitRoots(visitor);
439 }
440 }
441
LogBooleanValue(MemberOffset offset,uint8_t value,bool is_volatile)442 void Transaction::ObjectLog::LogBooleanValue(MemberOffset offset, uint8_t value, bool is_volatile) {
443 LogValue(ObjectLog::kBoolean, offset, value, is_volatile);
444 }
445
LogByteValue(MemberOffset offset,int8_t value,bool is_volatile)446 void Transaction::ObjectLog::LogByteValue(MemberOffset offset, int8_t value, bool is_volatile) {
447 LogValue(ObjectLog::kByte, offset, value, is_volatile);
448 }
449
LogCharValue(MemberOffset offset,uint16_t value,bool is_volatile)450 void Transaction::ObjectLog::LogCharValue(MemberOffset offset, uint16_t value, bool is_volatile) {
451 LogValue(ObjectLog::kChar, offset, value, is_volatile);
452 }
453
LogShortValue(MemberOffset offset,int16_t value,bool is_volatile)454 void Transaction::ObjectLog::LogShortValue(MemberOffset offset, int16_t value, bool is_volatile) {
455 LogValue(ObjectLog::kShort, offset, value, is_volatile);
456 }
457
Log32BitsValue(MemberOffset offset,uint32_t value,bool is_volatile)458 void Transaction::ObjectLog::Log32BitsValue(MemberOffset offset, uint32_t value, bool is_volatile) {
459 LogValue(ObjectLog::k32Bits, offset, value, is_volatile);
460 }
461
Log64BitsValue(MemberOffset offset,uint64_t value,bool is_volatile)462 void Transaction::ObjectLog::Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile) {
463 LogValue(ObjectLog::k64Bits, offset, value, is_volatile);
464 }
465
LogReferenceValue(MemberOffset offset,mirror::Object * obj,bool is_volatile)466 void Transaction::ObjectLog::LogReferenceValue(MemberOffset offset,
467 mirror::Object* obj,
468 bool is_volatile) {
469 LogValue(ObjectLog::kReference, offset, reinterpret_cast<uintptr_t>(obj), is_volatile);
470 }
471
LogValue(ObjectLog::FieldValueKind kind,MemberOffset offset,uint64_t value,bool is_volatile)472 void Transaction::ObjectLog::LogValue(ObjectLog::FieldValueKind kind,
473 MemberOffset offset,
474 uint64_t value,
475 bool is_volatile) {
476 auto it = field_values_.find(offset.Uint32Value());
477 if (it == field_values_.end()) {
478 ObjectLog::FieldValue field_value;
479 field_value.value = value;
480 field_value.is_volatile = is_volatile;
481 field_value.kind = kind;
482 field_values_.emplace(offset.Uint32Value(), std::move(field_value));
483 }
484 }
485
Undo(mirror::Object * obj) const486 void Transaction::ObjectLog::Undo(mirror::Object* obj) const {
487 for (auto& it : field_values_) {
488 // Garbage collector needs to access object's class and array's length. So we don't rollback
489 // these values.
490 MemberOffset field_offset(it.first);
491 if (field_offset.Uint32Value() == mirror::Class::ClassOffset().Uint32Value()) {
492 // Skip Object::class field.
493 continue;
494 }
495 if (obj->IsArrayInstance() &&
496 field_offset.Uint32Value() == mirror::Array::LengthOffset().Uint32Value()) {
497 // Skip Array::length field.
498 continue;
499 }
500 const FieldValue& field_value = it.second;
501 UndoFieldWrite(obj, field_offset, field_value);
502 }
503 }
504
UndoFieldWrite(mirror::Object * obj,MemberOffset field_offset,const FieldValue & field_value) const505 void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj,
506 MemberOffset field_offset,
507 const FieldValue& field_value) const {
508 // TODO We may want to abort a transaction while still being in transaction mode. In this case,
509 // we'd need to disable the check.
510 constexpr bool kCheckTransaction = false;
511 switch (field_value.kind) {
512 case kBoolean:
513 if (UNLIKELY(field_value.is_volatile)) {
514 obj->SetFieldBooleanVolatile<false, kCheckTransaction>(
515 field_offset,
516 field_value.value);
517 } else {
518 obj->SetFieldBoolean<false, kCheckTransaction>(
519 field_offset,
520 field_value.value);
521 }
522 break;
523 case kByte:
524 if (UNLIKELY(field_value.is_volatile)) {
525 obj->SetFieldByteVolatile<false, kCheckTransaction>(
526 field_offset,
527 static_cast<int8_t>(field_value.value));
528 } else {
529 obj->SetFieldByte<false, kCheckTransaction>(
530 field_offset,
531 static_cast<int8_t>(field_value.value));
532 }
533 break;
534 case kChar:
535 if (UNLIKELY(field_value.is_volatile)) {
536 obj->SetFieldCharVolatile<false, kCheckTransaction>(
537 field_offset,
538 static_cast<uint16_t>(field_value.value));
539 } else {
540 obj->SetFieldChar<false, kCheckTransaction>(
541 field_offset,
542 static_cast<uint16_t>(field_value.value));
543 }
544 break;
545 case kShort:
546 if (UNLIKELY(field_value.is_volatile)) {
547 obj->SetFieldShortVolatile<false, kCheckTransaction>(
548 field_offset,
549 static_cast<int16_t>(field_value.value));
550 } else {
551 obj->SetFieldShort<false, kCheckTransaction>(
552 field_offset,
553 static_cast<int16_t>(field_value.value));
554 }
555 break;
556 case k32Bits:
557 if (UNLIKELY(field_value.is_volatile)) {
558 obj->SetField32Volatile<false, kCheckTransaction>(
559 field_offset,
560 static_cast<uint32_t>(field_value.value));
561 } else {
562 obj->SetField32<false, kCheckTransaction>(
563 field_offset,
564 static_cast<uint32_t>(field_value.value));
565 }
566 break;
567 case k64Bits:
568 if (UNLIKELY(field_value.is_volatile)) {
569 obj->SetField64Volatile<false, kCheckTransaction>(field_offset, field_value.value);
570 } else {
571 obj->SetField64<false, kCheckTransaction>(field_offset, field_value.value);
572 }
573 break;
574 case kReference:
575 if (UNLIKELY(field_value.is_volatile)) {
576 obj->SetFieldObjectVolatile<false, kCheckTransaction>(
577 field_offset,
578 reinterpret_cast<mirror::Object*>(field_value.value));
579 } else {
580 obj->SetFieldObject<false, kCheckTransaction>(
581 field_offset,
582 reinterpret_cast<mirror::Object*>(field_value.value));
583 }
584 break;
585 default:
586 LOG(FATAL) << "Unknown value kind " << static_cast<int>(field_value.kind);
587 UNREACHABLE();
588 }
589 }
590
VisitRoots(RootVisitor * visitor)591 void Transaction::ObjectLog::VisitRoots(RootVisitor* visitor) {
592 for (auto& it : field_values_) {
593 FieldValue& field_value = it.second;
594 if (field_value.kind == ObjectLog::kReference) {
595 visitor->VisitRootIfNonNull(reinterpret_cast<mirror::Object**>(&field_value.value),
596 RootInfo(kRootUnknown));
597 }
598 }
599 }
600
Undo(InternTable * intern_table) const601 void Transaction::InternStringLog::Undo(InternTable* intern_table) const {
602 DCHECK(!Runtime::Current()->IsActiveTransaction());
603 DCHECK(intern_table != nullptr);
604 ObjPtr<mirror::String> s = str_.Read();
605 uint32_t hash = static_cast<uint32_t>(s->GetStoredHashCode());
606 switch (string_op_) {
607 case InternStringLog::kInsert: {
608 switch (string_kind_) {
609 case InternStringLog::kStrongString:
610 intern_table->RemoveStrong(s, hash);
611 break;
612 case InternStringLog::kWeakString:
613 intern_table->RemoveWeak(s, hash);
614 break;
615 default:
616 LOG(FATAL) << "Unknown interned string kind";
617 UNREACHABLE();
618 }
619 break;
620 }
621 case InternStringLog::kRemove: {
622 switch (string_kind_) {
623 case InternStringLog::kStrongString:
624 intern_table->InsertStrong(s, hash);
625 break;
626 case InternStringLog::kWeakString:
627 intern_table->InsertWeak(s, hash);
628 break;
629 default:
630 LOG(FATAL) << "Unknown interned string kind";
631 UNREACHABLE();
632 }
633 break;
634 }
635 default:
636 LOG(FATAL) << "Unknown interned string op";
637 UNREACHABLE();
638 }
639 }
640
VisitRoots(RootVisitor * visitor)641 void Transaction::InternStringLog::VisitRoots(RootVisitor* visitor) {
642 str_.VisitRoot(visitor, RootInfo(kRootInternedString));
643 }
644
Undo() const645 void Transaction::ResolveStringLog::Undo() const {
646 dex_cache_.Read()->ClearString(string_idx_);
647 }
648
ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache,dex::StringIndex string_idx)649 Transaction::ResolveStringLog::ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache,
650 dex::StringIndex string_idx)
651 : dex_cache_(dex_cache),
652 string_idx_(string_idx) {
653 DCHECK(dex_cache != nullptr);
654 DCHECK_LT(string_idx_.index_, dex_cache->GetDexFile()->NumStringIds());
655 }
656
VisitRoots(RootVisitor * visitor)657 void Transaction::ResolveStringLog::VisitRoots(RootVisitor* visitor) {
658 dex_cache_.VisitRoot(visitor, RootInfo(kRootVMInternal));
659 }
660
Undo() const661 void Transaction::ResolveMethodTypeLog::Undo() const {
662 dex_cache_.Read()->ClearMethodType(proto_idx_);
663 }
664
ResolveMethodTypeLog(ObjPtr<mirror::DexCache> dex_cache,dex::ProtoIndex proto_idx)665 Transaction::ResolveMethodTypeLog::ResolveMethodTypeLog(ObjPtr<mirror::DexCache> dex_cache,
666 dex::ProtoIndex proto_idx)
667 : dex_cache_(dex_cache),
668 proto_idx_(proto_idx) {
669 DCHECK(dex_cache != nullptr);
670 DCHECK_LT(proto_idx_.index_, dex_cache->GetDexFile()->NumProtoIds());
671 }
672
VisitRoots(RootVisitor * visitor)673 void Transaction::ResolveMethodTypeLog::VisitRoots(RootVisitor* visitor) {
674 dex_cache_.VisitRoot(visitor, RootInfo(kRootVMInternal));
675 }
676
InternStringLog(ObjPtr<mirror::String> s,StringKind kind,StringOp op)677 Transaction::InternStringLog::InternStringLog(ObjPtr<mirror::String> s,
678 StringKind kind,
679 StringOp op)
680 : str_(s),
681 string_kind_(kind),
682 string_op_(op) {
683 DCHECK(s != nullptr);
684 }
685
LogValue(size_t index,uint64_t value)686 void Transaction::ArrayLog::LogValue(size_t index, uint64_t value) {
687 // Add a mapping if there is none yet.
688 array_values_.FindOrAdd(index, value);
689 }
690
Undo(mirror::Array * array) const691 void Transaction::ArrayLog::Undo(mirror::Array* array) const {
692 DCHECK(array != nullptr);
693 DCHECK(array->IsArrayInstance());
694 Primitive::Type type = array->GetClass()->GetComponentType()->GetPrimitiveType();
695 for (auto it : array_values_) {
696 UndoArrayWrite(array, type, it.first, it.second);
697 }
698 }
699
UndoArrayWrite(mirror::Array * array,Primitive::Type array_type,size_t index,uint64_t value) const700 void Transaction::ArrayLog::UndoArrayWrite(mirror::Array* array,
701 Primitive::Type array_type,
702 size_t index,
703 uint64_t value) const {
704 // TODO We may want to abort a transaction while still being in transaction mode. In this case,
705 // we'd need to disable the check.
706 constexpr bool kCheckTransaction = false;
707 switch (array_type) {
708 case Primitive::kPrimBoolean:
709 array->AsBooleanArray()->SetWithoutChecks<false, kCheckTransaction>(
710 index, static_cast<uint8_t>(value));
711 break;
712 case Primitive::kPrimByte:
713 array->AsByteArray()->SetWithoutChecks<false, kCheckTransaction>(
714 index, static_cast<int8_t>(value));
715 break;
716 case Primitive::kPrimChar:
717 array->AsCharArray()->SetWithoutChecks<false, kCheckTransaction>(
718 index, static_cast<uint16_t>(value));
719 break;
720 case Primitive::kPrimShort:
721 array->AsShortArray()->SetWithoutChecks<false, kCheckTransaction>(
722 index, static_cast<int16_t>(value));
723 break;
724 case Primitive::kPrimInt:
725 array->AsIntArray()->SetWithoutChecks<false, kCheckTransaction>(
726 index, static_cast<int32_t>(value));
727 break;
728 case Primitive::kPrimFloat:
729 array->AsFloatArray()->SetWithoutChecks<false, kCheckTransaction>(
730 index, static_cast<float>(value));
731 break;
732 case Primitive::kPrimLong:
733 array->AsLongArray()->SetWithoutChecks<false, kCheckTransaction>(
734 index, static_cast<int64_t>(value));
735 break;
736 case Primitive::kPrimDouble:
737 array->AsDoubleArray()->SetWithoutChecks<false, kCheckTransaction>(
738 index, static_cast<double>(value));
739 break;
740 case Primitive::kPrimNot:
741 LOG(FATAL) << "ObjectArray should be treated as Object";
742 UNREACHABLE();
743 default:
744 LOG(FATAL) << "Unsupported type " << array_type;
745 UNREACHABLE();
746 }
747 }
748
InstallAssertion(const char * reason)749 Transaction* ScopedAssertNoNewTransactionRecords::InstallAssertion(const char* reason) {
750 Transaction* transaction = nullptr;
751 if (kIsDebugBuild && Runtime::Current()->IsActiveTransaction()) {
752 transaction = Runtime::Current()->GetTransaction();
753 if (transaction != nullptr) {
754 CHECK(transaction->assert_no_new_records_reason_ == nullptr)
755 << "old: " << transaction->assert_no_new_records_reason_ << " new: " << reason;
756 transaction->assert_no_new_records_reason_ = reason;
757 }
758 }
759 return transaction;
760 }
761
RemoveAssertion(Transaction * transaction)762 void ScopedAssertNoNewTransactionRecords::RemoveAssertion(Transaction* transaction) {
763 if (kIsDebugBuild) {
764 CHECK(Runtime::Current()->GetTransaction() == transaction);
765 CHECK(transaction->assert_no_new_records_reason_ != nullptr);
766 transaction->assert_no_new_records_reason_ = nullptr;
767 }
768 }
769
770 } // namespace art
771