• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_TRANSACTION_H_
18 #define ART_RUNTIME_TRANSACTION_H_
19 
20 #include "base/scoped_arena_containers.h"
21 #include "base/macros.h"
22 #include "base/mutex.h"
23 #include "base/safe_map.h"
24 #include "base/value_object.h"
25 #include "dex/dex_file_types.h"
26 #include "dex/primitive.h"
27 #include "gc_root.h"
28 #include "offsets.h"
29 
30 #include <list>
31 #include <map>
32 
33 namespace art {
34 namespace gc {
35 class Heap;
36 }  // namespace gc
37 namespace mirror {
38 class Array;
39 class Class;
40 class DexCache;
41 class Object;
42 class String;
43 }  // namespace mirror
44 class InternTable;
45 template<class MirrorType> class ObjPtr;
46 
47 class Transaction final {
48  public:
49   static constexpr const char* kAbortExceptionDescriptor = "Ldalvik/system/TransactionAbortError;";
50 
51   Transaction(bool strict, mirror::Class* root, ArenaStack* arena_stack, ArenaPool* arena_pool);
52   ~Transaction();
53 
GetArenaStack()54   ArenaStack* GetArenaStack() {
55     return allocator_.GetArenaStack();
56   }
57 
58   void Abort(const std::string& abort_message)
59       REQUIRES_SHARED(Locks::mutator_lock_);
60   void ThrowAbortError(Thread* self, const std::string* abort_message)
61       REQUIRES_SHARED(Locks::mutator_lock_);
IsAborted()62   bool IsAborted() const {
63     return aborted_;
64   }
65 
66   // If the transaction is rollbacking. Transactions will set this flag when they start rollbacking,
67   // because the nested transaction should be disabled when rollbacking to restore the memory.
IsRollingBack()68   bool IsRollingBack() const {
69     return rolling_back_;
70   }
71 
72   // If the transaction is in strict mode, then all access of static fields will be constrained,
73   // one class's clinit will not be allowed to read or modify another class's static fields, unless
74   // the transaction is aborted.
IsStrict()75   bool IsStrict() const {
76     return strict_;
77   }
78 
79   // Record object field changes.
80   void RecordWriteFieldBoolean(mirror::Object* obj,
81                                MemberOffset field_offset,
82                                uint8_t value,
83                                bool is_volatile);
84   void RecordWriteFieldByte(mirror::Object* obj,
85                             MemberOffset field_offset,
86                             int8_t value,
87                             bool is_volatile);
88   void RecordWriteFieldChar(mirror::Object* obj,
89                             MemberOffset field_offset,
90                             uint16_t value,
91                             bool is_volatile);
92   void RecordWriteFieldShort(mirror::Object* obj,
93                              MemberOffset field_offset,
94                              int16_t value,
95                              bool is_volatile);
96   void RecordWriteField32(mirror::Object* obj,
97                           MemberOffset field_offset,
98                           uint32_t value,
99                           bool is_volatile);
100   void RecordWriteField64(mirror::Object* obj,
101                           MemberOffset field_offset,
102                           uint64_t value,
103                           bool is_volatile);
104   void RecordWriteFieldReference(mirror::Object* obj,
105                                  MemberOffset field_offset,
106                                  mirror::Object* value,
107                                  bool is_volatile);
108 
109   // Record array change.
110   void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value)
111       REQUIRES_SHARED(Locks::mutator_lock_);
112 
113   // Record intern string table changes.
114   void RecordStrongStringInsertion(ObjPtr<mirror::String> s)
115       REQUIRES(Locks::intern_table_lock_);
116   void RecordWeakStringInsertion(ObjPtr<mirror::String> s)
117       REQUIRES(Locks::intern_table_lock_);
118   void RecordStrongStringRemoval(ObjPtr<mirror::String> s)
119       REQUIRES(Locks::intern_table_lock_);
120   void RecordWeakStringRemoval(ObjPtr<mirror::String> s)
121       REQUIRES(Locks::intern_table_lock_);
122 
123   // Record resolve string.
124   void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx)
125       REQUIRES_SHARED(Locks::mutator_lock_);
126 
127   // Record resolve method type.
128   void RecordResolveMethodType(ObjPtr<mirror::DexCache> dex_cache, dex::ProtoIndex proto_idx)
129       REQUIRES_SHARED(Locks::mutator_lock_);
130 
131   // Abort transaction by undoing all recorded changes.
132   void Rollback()
133       REQUIRES_SHARED(Locks::mutator_lock_);
134 
135   void VisitRoots(RootVisitor* visitor)
136       REQUIRES_SHARED(Locks::mutator_lock_);
137 
138   bool ReadConstraint(ObjPtr<mirror::Object> obj) const
139       REQUIRES_SHARED(Locks::mutator_lock_);
140 
141   bool WriteConstraint(ObjPtr<mirror::Object> obj) const
142       REQUIRES_SHARED(Locks::mutator_lock_);
143 
144   bool WriteValueConstraint(ObjPtr<mirror::Object> value) const
145       REQUIRES_SHARED(Locks::mutator_lock_);
146 
147  private:
148   class ObjectLog : public ValueObject {
149    public:
150     void LogBooleanValue(MemberOffset offset, uint8_t value, bool is_volatile);
151     void LogByteValue(MemberOffset offset, int8_t value, bool is_volatile);
152     void LogCharValue(MemberOffset offset, uint16_t value, bool is_volatile);
153     void LogShortValue(MemberOffset offset, int16_t value, bool is_volatile);
154     void Log32BitsValue(MemberOffset offset, uint32_t value, bool is_volatile);
155     void Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile);
156     void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile);
157 
158     void Undo(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
159     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
160 
Size()161     size_t Size() const {
162       return field_values_.size();
163     }
164 
ObjectLog(ScopedArenaAllocator * allocator)165     explicit ObjectLog(ScopedArenaAllocator* allocator)
166         : field_values_(std::less<uint32_t>(), allocator->Adapter(kArenaAllocTransaction)) {}
167     ObjectLog(ObjectLog&& log) = default;
168 
169    private:
170     enum FieldValueKind {
171       kBoolean,
172       kByte,
173       kChar,
174       kShort,
175       k32Bits,
176       k64Bits,
177       kReference
178     };
179     struct FieldValue : public ValueObject {
180       // TODO use JValue instead ?
181       uint64_t value;
182       FieldValueKind kind;
183       bool is_volatile;
184 
FieldValueFieldValue185       FieldValue() : value(0), kind(FieldValueKind::kBoolean), is_volatile(false) {}
186       FieldValue(FieldValue&& log) = default;
187 
188      private:
189       DISALLOW_COPY_AND_ASSIGN(FieldValue);
190     };
191 
192     void LogValue(FieldValueKind kind, MemberOffset offset, uint64_t value, bool is_volatile);
193     void UndoFieldWrite(mirror::Object* obj,
194                         MemberOffset field_offset,
195                         const FieldValue& field_value) const REQUIRES_SHARED(Locks::mutator_lock_);
196 
197     // Maps field's offset to its value.
198     ScopedArenaSafeMap<uint32_t, FieldValue> field_values_;
199 
200     DISALLOW_COPY_AND_ASSIGN(ObjectLog);
201   };
202 
203   class ArrayLog : public ValueObject {
204    public:
205     void LogValue(size_t index, uint64_t value);
206 
207     void Undo(mirror::Array* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
208 
Size()209     size_t Size() const {
210       return array_values_.size();
211     }
212 
ArrayLog(ScopedArenaAllocator * allocator)213     explicit ArrayLog(ScopedArenaAllocator* allocator)
214         : array_values_(std::less<size_t>(), allocator->Adapter(kArenaAllocTransaction)) {}
215 
216     ArrayLog(ArrayLog&& log) = default;
217 
218    private:
219     void UndoArrayWrite(mirror::Array* array,
220                         Primitive::Type array_type,
221                         size_t index,
222                         uint64_t value) const REQUIRES_SHARED(Locks::mutator_lock_);
223 
224     // Maps index to value.
225     // TODO use JValue instead ?
226     ScopedArenaSafeMap<size_t, uint64_t> array_values_;
227 
228     DISALLOW_COPY_AND_ASSIGN(ArrayLog);
229   };
230 
231   class InternStringLog : public ValueObject {
232    public:
233     enum StringKind {
234       kStrongString,
235       kWeakString
236     };
237     enum StringOp {
238       kInsert,
239       kRemove
240     };
241     InternStringLog(ObjPtr<mirror::String> s, StringKind kind, StringOp op);
242 
243     void Undo(InternTable* intern_table) const
244         REQUIRES_SHARED(Locks::mutator_lock_)
245         REQUIRES(Locks::intern_table_lock_);
246     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
247 
248     InternStringLog() = default;
249     InternStringLog(InternStringLog&& log) = default;
250 
251    private:
252     mutable GcRoot<mirror::String> str_;
253     const StringKind string_kind_;
254     const StringOp string_op_;
255 
256     DISALLOW_COPY_AND_ASSIGN(InternStringLog);
257   };
258 
259   class ResolveStringLog : public ValueObject {
260    public:
261     ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx);
262 
263     void Undo() const REQUIRES_SHARED(Locks::mutator_lock_);
264 
265     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
266 
267    private:
268     GcRoot<mirror::DexCache> dex_cache_;
269     const dex::StringIndex string_idx_;
270 
271     DISALLOW_COPY_AND_ASSIGN(ResolveStringLog);
272   };
273 
274   class ResolveMethodTypeLog : public ValueObject {
275    public:
276     ResolveMethodTypeLog(ObjPtr<mirror::DexCache> dex_cache, dex::ProtoIndex proto_idx);
277 
278     void Undo() const REQUIRES_SHARED(Locks::mutator_lock_);
279 
280     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
281 
282    private:
283     GcRoot<mirror::DexCache> dex_cache_;
284     const dex::ProtoIndex proto_idx_;
285 
286     DISALLOW_COPY_AND_ASSIGN(ResolveMethodTypeLog);
287   };
288 
289   void LogInternedString(InternStringLog&& log)
290       REQUIRES(Locks::intern_table_lock_);
291 
292   void UndoObjectModifications()
293       REQUIRES_SHARED(Locks::mutator_lock_);
294   void UndoArrayModifications()
295       REQUIRES_SHARED(Locks::mutator_lock_);
296   void UndoInternStringTableModifications()
297       REQUIRES(Locks::intern_table_lock_)
298       REQUIRES_SHARED(Locks::mutator_lock_);
299   void UndoResolveStringModifications()
300       REQUIRES_SHARED(Locks::mutator_lock_);
301   void UndoResolveMethodTypeModifications()
302       REQUIRES_SHARED(Locks::mutator_lock_);
303 
304   void VisitObjectLogs(RootVisitor* visitor, ArenaStack* arena_stack)
305       REQUIRES_SHARED(Locks::mutator_lock_);
306   void VisitArrayLogs(RootVisitor* visitor, ArenaStack* arena_stack)
307       REQUIRES_SHARED(Locks::mutator_lock_);
308   void VisitInternStringLogs(RootVisitor* visitor)
309       REQUIRES_SHARED(Locks::mutator_lock_);
310   void VisitResolveStringLogs(RootVisitor* visitor)
311       REQUIRES_SHARED(Locks::mutator_lock_);
312   void VisitResolveMethodTypeLogs(RootVisitor* visitor)
313       REQUIRES_SHARED(Locks::mutator_lock_);
314 
315   const std::string& GetAbortMessage() const;
316 
317   ObjectLog& GetOrCreateObjectLog(mirror::Object* obj);
318 
319   // The top-level transaction creates an `ArenaStack` which is then
320   // passed down to nested transactions.
321   std::optional<ArenaStack> arena_stack_;
322   // The allocator uses the `ArenaStack` from the top-level transaction.
323   ScopedArenaAllocator allocator_;
324 
325   ScopedArenaSafeMap<mirror::Object*, ObjectLog> object_logs_;
326   ScopedArenaSafeMap<mirror::Array*, ArrayLog> array_logs_;
327   ScopedArenaForwardList<InternStringLog> intern_string_logs_;
328   ScopedArenaForwardList<ResolveStringLog> resolve_string_logs_;
329   ScopedArenaForwardList<ResolveMethodTypeLog> resolve_method_type_logs_;
330   bool aborted_;
331   bool rolling_back_;  // Single thread, no race.
332   gc::Heap* const heap_;
333   const bool strict_;
334   std::string abort_message_;
335   mirror::Class* root_;
336   const char* assert_no_new_records_reason_;
337 
338   friend class ScopedAssertNoNewTransactionRecords;
339 
340   DISALLOW_COPY_AND_ASSIGN(Transaction);
341 };
342 
343 class ScopedAssertNoNewTransactionRecords {
344  public:
ScopedAssertNoNewTransactionRecords(const char * reason)345   explicit ScopedAssertNoNewTransactionRecords(const char* reason)
346     : transaction_(kIsDebugBuild ? InstallAssertion(reason) : nullptr) {}
347 
~ScopedAssertNoNewTransactionRecords()348   ~ScopedAssertNoNewTransactionRecords() {
349     if (kIsDebugBuild && transaction_ != nullptr) {
350       RemoveAssertion(transaction_);
351     }
352   }
353 
354  private:
355   static Transaction* InstallAssertion(const char* reason);
356   static void RemoveAssertion(Transaction* transaction);
357 
358   Transaction* transaction_;
359 };
360 
361 }  // namespace art
362 
363 #endif  // ART_RUNTIME_TRANSACTION_H_
364