1 /* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_TRANSACTION_H_ 18 #define ART_RUNTIME_TRANSACTION_H_ 19 20 #include "base/macros.h" 21 #include "base/mutex.h" 22 #include "base/safe_map.h" 23 #include "base/value_object.h" 24 #include "dex/dex_file_types.h" 25 #include "dex/primitive.h" 26 #include "gc_root.h" 27 #include "offsets.h" 28 29 #include <list> 30 #include <map> 31 32 namespace art { 33 namespace gc { 34 class Heap; 35 } // namespace gc 36 namespace mirror { 37 class Array; 38 class Class; 39 class DexCache; 40 class Object; 41 class String; 42 } // namespace mirror 43 class InternTable; 44 template<class MirrorType> class ObjPtr; 45 46 class Transaction final { 47 public: 48 static constexpr const char* kAbortExceptionDescriptor = "Ldalvik/system/TransactionAbortError;"; 49 50 Transaction(bool strict, mirror::Class* root); 51 ~Transaction(); 52 53 void Abort(const std::string& abort_message) 54 REQUIRES(!log_lock_) 55 REQUIRES_SHARED(Locks::mutator_lock_); 56 void ThrowAbortError(Thread* self, const std::string* abort_message) 57 REQUIRES(!log_lock_) 58 REQUIRES_SHARED(Locks::mutator_lock_); 59 bool IsAborted() REQUIRES(!log_lock_); 60 61 // If the transaction is rollbacking. Transactions will set this flag when they start rollbacking, 62 // because the nested transaction should be disabled when rollbacking to restore the memory. 63 bool IsRollingBack(); 64 65 // If the transaction is in strict mode, then all access of static fields will be constrained, 66 // one class's clinit will not be allowed to read or modify another class's static fields, unless 67 // the transaction is aborted. IsStrict()68 bool IsStrict() { 69 return strict_; 70 } 71 72 // Record object field changes. 73 void RecordWriteFieldBoolean(mirror::Object* obj, 74 MemberOffset field_offset, 75 uint8_t value, 76 bool is_volatile) 77 REQUIRES(!log_lock_); 78 void RecordWriteFieldByte(mirror::Object* obj, 79 MemberOffset field_offset, 80 int8_t value, 81 bool is_volatile) 82 REQUIRES(!log_lock_); 83 void RecordWriteFieldChar(mirror::Object* obj, 84 MemberOffset field_offset, 85 uint16_t value, 86 bool is_volatile) 87 REQUIRES(!log_lock_); 88 void RecordWriteFieldShort(mirror::Object* obj, 89 MemberOffset field_offset, 90 int16_t value, 91 bool is_volatile) 92 REQUIRES(!log_lock_); 93 void RecordWriteField32(mirror::Object* obj, 94 MemberOffset field_offset, 95 uint32_t value, 96 bool is_volatile) 97 REQUIRES(!log_lock_); 98 void RecordWriteField64(mirror::Object* obj, 99 MemberOffset field_offset, 100 uint64_t value, 101 bool is_volatile) 102 REQUIRES(!log_lock_); 103 void RecordWriteFieldReference(mirror::Object* obj, 104 MemberOffset field_offset, 105 mirror::Object* value, 106 bool is_volatile) 107 REQUIRES(!log_lock_); 108 109 // Record array change. 110 void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) 111 REQUIRES(!log_lock_) 112 REQUIRES_SHARED(Locks::mutator_lock_); 113 114 // Record intern string table changes. 115 void RecordStrongStringInsertion(ObjPtr<mirror::String> s) 116 REQUIRES(Locks::intern_table_lock_) 117 REQUIRES(!log_lock_); 118 void RecordWeakStringInsertion(ObjPtr<mirror::String> s) 119 REQUIRES(Locks::intern_table_lock_) 120 REQUIRES(!log_lock_); 121 void RecordStrongStringRemoval(ObjPtr<mirror::String> s) 122 REQUIRES(Locks::intern_table_lock_) 123 REQUIRES(!log_lock_); 124 void RecordWeakStringRemoval(ObjPtr<mirror::String> s) 125 REQUIRES(Locks::intern_table_lock_) 126 REQUIRES(!log_lock_); 127 128 // Record resolve string. 129 void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) 130 REQUIRES_SHARED(Locks::mutator_lock_) 131 REQUIRES(!log_lock_); 132 133 // Abort transaction by undoing all recorded changes. 134 void Rollback() 135 REQUIRES_SHARED(Locks::mutator_lock_) 136 REQUIRES(!log_lock_); 137 138 void VisitRoots(RootVisitor* visitor) 139 REQUIRES(!log_lock_) 140 REQUIRES_SHARED(Locks::mutator_lock_); 141 142 bool ReadConstraint(Thread* self, ObjPtr<mirror::Object> obj) 143 REQUIRES(!log_lock_) 144 REQUIRES_SHARED(Locks::mutator_lock_); 145 146 bool WriteConstraint(Thread* self, ObjPtr<mirror::Object> obj) 147 REQUIRES(!log_lock_) 148 REQUIRES_SHARED(Locks::mutator_lock_); 149 150 bool WriteValueConstraint(Thread* self, ObjPtr<mirror::Object> value) 151 REQUIRES(!log_lock_) 152 REQUIRES_SHARED(Locks::mutator_lock_); 153 154 private: 155 class ObjectLog : public ValueObject { 156 public: 157 void LogBooleanValue(MemberOffset offset, uint8_t value, bool is_volatile); 158 void LogByteValue(MemberOffset offset, int8_t value, bool is_volatile); 159 void LogCharValue(MemberOffset offset, uint16_t value, bool is_volatile); 160 void LogShortValue(MemberOffset offset, int16_t value, bool is_volatile); 161 void Log32BitsValue(MemberOffset offset, uint32_t value, bool is_volatile); 162 void Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile); 163 void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile); 164 165 void Undo(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_); 166 void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_); 167 Size()168 size_t Size() const { 169 return field_values_.size(); 170 } 171 172 ObjectLog() = default; 173 ObjectLog(ObjectLog&& log) = default; 174 175 private: 176 enum FieldValueKind { 177 kBoolean, 178 kByte, 179 kChar, 180 kShort, 181 k32Bits, 182 k64Bits, 183 kReference 184 }; 185 struct FieldValue : public ValueObject { 186 // TODO use JValue instead ? 187 uint64_t value; 188 FieldValueKind kind; 189 bool is_volatile; 190 FieldValueFieldValue191 FieldValue() : value(0), kind(FieldValueKind::kBoolean), is_volatile(false) {} 192 FieldValue(FieldValue&& log) = default; 193 194 private: 195 DISALLOW_COPY_AND_ASSIGN(FieldValue); 196 }; 197 198 void LogValue(FieldValueKind kind, MemberOffset offset, uint64_t value, bool is_volatile); 199 void UndoFieldWrite(mirror::Object* obj, 200 MemberOffset field_offset, 201 const FieldValue& field_value) const REQUIRES_SHARED(Locks::mutator_lock_); 202 203 // Maps field's offset to its value. 204 std::map<uint32_t, FieldValue> field_values_; 205 206 DISALLOW_COPY_AND_ASSIGN(ObjectLog); 207 }; 208 209 class ArrayLog : public ValueObject { 210 public: 211 void LogValue(size_t index, uint64_t value); 212 213 void Undo(mirror::Array* obj) const REQUIRES_SHARED(Locks::mutator_lock_); 214 Size()215 size_t Size() const { 216 return array_values_.size(); 217 } 218 219 ArrayLog() = default; 220 ArrayLog(ArrayLog&& log) = default; 221 222 private: 223 void UndoArrayWrite(mirror::Array* array, 224 Primitive::Type array_type, 225 size_t index, 226 uint64_t value) const REQUIRES_SHARED(Locks::mutator_lock_); 227 228 // Maps index to value. 229 // TODO use JValue instead ? 230 std::map<size_t, uint64_t> array_values_; 231 232 DISALLOW_COPY_AND_ASSIGN(ArrayLog); 233 }; 234 235 class InternStringLog : public ValueObject { 236 public: 237 enum StringKind { 238 kStrongString, 239 kWeakString 240 }; 241 enum StringOp { 242 kInsert, 243 kRemove 244 }; 245 InternStringLog(ObjPtr<mirror::String> s, StringKind kind, StringOp op); 246 247 void Undo(InternTable* intern_table) const 248 REQUIRES_SHARED(Locks::mutator_lock_) 249 REQUIRES(Locks::intern_table_lock_); 250 void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_); 251 252 InternStringLog() = default; 253 InternStringLog(InternStringLog&& log) = default; 254 255 private: 256 mutable GcRoot<mirror::String> str_; 257 const StringKind string_kind_; 258 const StringOp string_op_; 259 260 DISALLOW_COPY_AND_ASSIGN(InternStringLog); 261 }; 262 263 class ResolveStringLog : public ValueObject { 264 public: 265 ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx); 266 267 void Undo() const REQUIRES_SHARED(Locks::mutator_lock_); 268 269 void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_); 270 271 private: 272 GcRoot<mirror::DexCache> dex_cache_; 273 const dex::StringIndex string_idx_; 274 275 DISALLOW_COPY_AND_ASSIGN(ResolveStringLog); 276 }; 277 278 void LogInternedString(InternStringLog&& log) 279 REQUIRES(Locks::intern_table_lock_) 280 REQUIRES(!log_lock_); 281 282 void UndoObjectModifications() 283 REQUIRES(log_lock_) 284 REQUIRES_SHARED(Locks::mutator_lock_); 285 void UndoArrayModifications() 286 REQUIRES(log_lock_) 287 REQUIRES_SHARED(Locks::mutator_lock_); 288 void UndoInternStringTableModifications() 289 REQUIRES(Locks::intern_table_lock_) 290 REQUIRES(log_lock_) 291 REQUIRES_SHARED(Locks::mutator_lock_); 292 void UndoResolveStringModifications() 293 REQUIRES(log_lock_) 294 REQUIRES_SHARED(Locks::mutator_lock_); 295 296 void VisitObjectLogs(RootVisitor* visitor) 297 REQUIRES(log_lock_) 298 REQUIRES_SHARED(Locks::mutator_lock_); 299 void VisitArrayLogs(RootVisitor* visitor) 300 REQUIRES(log_lock_) 301 REQUIRES_SHARED(Locks::mutator_lock_); 302 void VisitInternStringLogs(RootVisitor* visitor) 303 REQUIRES(log_lock_) 304 REQUIRES_SHARED(Locks::mutator_lock_); 305 void VisitResolveStringLogs(RootVisitor* visitor) 306 REQUIRES(log_lock_) 307 REQUIRES_SHARED(Locks::mutator_lock_); 308 309 const std::string& GetAbortMessage() REQUIRES(!log_lock_); 310 311 Mutex log_lock_ ACQUIRED_AFTER(Locks::intern_table_lock_); 312 std::map<mirror::Object*, ObjectLog> object_logs_ GUARDED_BY(log_lock_); 313 std::map<mirror::Array*, ArrayLog> array_logs_ GUARDED_BY(log_lock_); 314 std::list<InternStringLog> intern_string_logs_ GUARDED_BY(log_lock_); 315 std::list<ResolveStringLog> resolve_string_logs_ GUARDED_BY(log_lock_); 316 bool aborted_ GUARDED_BY(log_lock_); 317 bool rolling_back_; // Single thread, no race. 318 gc::Heap* const heap_; 319 const bool strict_; 320 std::string abort_message_ GUARDED_BY(log_lock_); 321 mirror::Class* root_ GUARDED_BY(log_lock_); 322 const char* assert_no_new_records_reason_ GUARDED_BY(log_lock_); 323 324 friend class ScopedAssertNoNewTransactionRecords; 325 326 DISALLOW_COPY_AND_ASSIGN(Transaction); 327 }; 328 329 class ScopedAssertNoNewTransactionRecords { 330 public: ScopedAssertNoNewTransactionRecords(const char * reason)331 explicit ScopedAssertNoNewTransactionRecords(const char* reason) 332 : transaction_(kIsDebugBuild ? InstallAssertion(reason) : nullptr) {} 333 ~ScopedAssertNoNewTransactionRecords()334 ~ScopedAssertNoNewTransactionRecords() { 335 if (kIsDebugBuild && transaction_ != nullptr) { 336 RemoveAssertion(transaction_); 337 } 338 } 339 340 private: 341 static Transaction* InstallAssertion(const char* reason); 342 static void RemoveAssertion(Transaction* transaction); 343 344 Transaction* transaction_; 345 }; 346 347 } // namespace art 348 349 #endif // ART_RUNTIME_TRANSACTION_H_ 350