• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_TRANSACTION_H_
18 #define ART_RUNTIME_TRANSACTION_H_
19 
20 #include "base/scoped_arena_containers.h"
21 #include "base/macros.h"
22 #include "base/mutex.h"
23 #include "base/safe_map.h"
24 #include "base/value_object.h"
25 #include "dex/dex_file_types.h"
26 #include "dex/primitive.h"
27 #include "gc_root.h"
28 #include "offsets.h"
29 
30 #include <list>
31 #include <map>
32 
33 namespace art {
34 namespace gc {
35 class Heap;
36 }  // namespace gc
37 namespace mirror {
38 class Array;
39 class Class;
40 class DexCache;
41 class Object;
42 class String;
43 }  // namespace mirror
44 class InternTable;
45 template<class MirrorType> class ObjPtr;
46 
47 class Transaction final {
48  public:
49   static constexpr const char* kAbortExceptionDescriptor = "Ldalvik/system/TransactionAbortError;";
50 
51   Transaction(bool strict, mirror::Class* root, ArenaStack* arena_stack, ArenaPool* arena_pool);
52   ~Transaction();
53 
GetArenaStack()54   ArenaStack* GetArenaStack() {
55     return allocator_.GetArenaStack();
56   }
57 
58   void Abort(const std::string& abort_message)
59       REQUIRES_SHARED(Locks::mutator_lock_);
60   void ThrowAbortError(Thread* self, const std::string* abort_message)
61       REQUIRES_SHARED(Locks::mutator_lock_);
IsAborted()62   bool IsAborted() const {
63     return aborted_;
64   }
65 
66   // If the transaction is rollbacking. Transactions will set this flag when they start rollbacking,
67   // because the nested transaction should be disabled when rollbacking to restore the memory.
IsRollingBack()68   bool IsRollingBack() const {
69     return rolling_back_;
70   }
71 
72   // If the transaction is in strict mode, then all access of static fields will be constrained,
73   // one class's clinit will not be allowed to read or modify another class's static fields, unless
74   // the transaction is aborted.
IsStrict()75   bool IsStrict() const {
76     return strict_;
77   }
78 
79   // Record object field changes.
80   void RecordWriteFieldBoolean(mirror::Object* obj,
81                                MemberOffset field_offset,
82                                uint8_t value,
83                                bool is_volatile);
84   void RecordWriteFieldByte(mirror::Object* obj,
85                             MemberOffset field_offset,
86                             int8_t value,
87                             bool is_volatile);
88   void RecordWriteFieldChar(mirror::Object* obj,
89                             MemberOffset field_offset,
90                             uint16_t value,
91                             bool is_volatile);
92   void RecordWriteFieldShort(mirror::Object* obj,
93                              MemberOffset field_offset,
94                              int16_t value,
95                              bool is_volatile);
96   void RecordWriteField32(mirror::Object* obj,
97                           MemberOffset field_offset,
98                           uint32_t value,
99                           bool is_volatile);
100   void RecordWriteField64(mirror::Object* obj,
101                           MemberOffset field_offset,
102                           uint64_t value,
103                           bool is_volatile);
104   void RecordWriteFieldReference(mirror::Object* obj,
105                                  MemberOffset field_offset,
106                                  mirror::Object* value,
107                                  bool is_volatile);
108 
109   // Record array change.
110   void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value)
111       REQUIRES_SHARED(Locks::mutator_lock_);
112 
113   // Record intern string table changes.
114   void RecordStrongStringInsertion(ObjPtr<mirror::String> s)
115       REQUIRES(Locks::intern_table_lock_);
116   void RecordWeakStringInsertion(ObjPtr<mirror::String> s)
117       REQUIRES(Locks::intern_table_lock_);
118   void RecordStrongStringRemoval(ObjPtr<mirror::String> s)
119       REQUIRES(Locks::intern_table_lock_);
120   void RecordWeakStringRemoval(ObjPtr<mirror::String> s)
121       REQUIRES(Locks::intern_table_lock_);
122 
123   // Record resolve string.
124   void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx)
125       REQUIRES_SHARED(Locks::mutator_lock_);
126 
127   // Record resolve method type.
128   void RecordResolveMethodType(ObjPtr<mirror::DexCache> dex_cache, dex::ProtoIndex proto_idx)
129       REQUIRES_SHARED(Locks::mutator_lock_);
130 
131   // Abort transaction by undoing all recorded changes.
132   void Rollback()
133       REQUIRES_SHARED(Locks::mutator_lock_);
134 
135   void VisitRoots(RootVisitor* visitor)
136       REQUIRES_SHARED(Locks::mutator_lock_);
137 
138   bool ReadConstraint(ObjPtr<mirror::Object> obj) const
139       REQUIRES_SHARED(Locks::mutator_lock_);
140 
141   bool WriteConstraint(ObjPtr<mirror::Object> obj) const
142       REQUIRES_SHARED(Locks::mutator_lock_);
143 
144   bool WriteValueConstraint(ObjPtr<mirror::Object> value) const
145       REQUIRES_SHARED(Locks::mutator_lock_);
146 
147  private:
148   class ObjectLog : public ValueObject {
149    public:
150     void LogBooleanValue(MemberOffset offset, uint8_t value, bool is_volatile);
151     void LogByteValue(MemberOffset offset, int8_t value, bool is_volatile);
152     void LogCharValue(MemberOffset offset, uint16_t value, bool is_volatile);
153     void LogShortValue(MemberOffset offset, int16_t value, bool is_volatile);
154     void Log32BitsValue(MemberOffset offset, uint32_t value, bool is_volatile);
155     void Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile);
156     void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile);
157 
158     void Undo(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
159     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
160 
Size()161     size_t Size() const {
162       return field_values_.size();
163     }
164 
ObjectLog(ScopedArenaAllocator * allocator)165     explicit ObjectLog(ScopedArenaAllocator* allocator)
166         : field_values_(std::less<uint32_t>(), allocator->Adapter(kArenaAllocTransaction)) {}
167     ObjectLog(ObjectLog&& log) = default;
168 
169    private:
170     enum FieldValueKind {
171       kBoolean,
172       kByte,
173       kChar,
174       kShort,
175       k32Bits,
176       k64Bits,
177       kReference
178     };
179     struct FieldValue : public ValueObject {
180       // TODO use JValue instead ?
181       uint64_t value;
182       FieldValueKind kind;
183       bool is_volatile;
184 
FieldValueFieldValue185       FieldValue() : value(0), kind(FieldValueKind::kBoolean), is_volatile(false) {}
186       FieldValue(FieldValue&& log) = default;
187 
188      private:
189       DISALLOW_COPY_AND_ASSIGN(FieldValue);
190     };
191 
192     void LogValue(FieldValueKind kind, MemberOffset offset, uint64_t value, bool is_volatile);
193     void UndoFieldWrite(mirror::Object* obj,
194                         MemberOffset field_offset,
195                         const FieldValue& field_value) const REQUIRES_SHARED(Locks::mutator_lock_);
196 
197     // Maps field's offset to its value.
198     ScopedArenaSafeMap<uint32_t, FieldValue> field_values_;
199 
200     DISALLOW_COPY_AND_ASSIGN(ObjectLog);
201   };
202 
203   class ArrayLog : public ValueObject {
204    public:
205     void LogValue(size_t index, uint64_t value);
206 
207     void Undo(mirror::Array* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
208 
Size()209     size_t Size() const {
210       return array_values_.size();
211     }
212 
ArrayLog(ScopedArenaAllocator * allocator)213     explicit ArrayLog(ScopedArenaAllocator* allocator)
214         : array_values_(std::less<size_t>(), allocator->Adapter(kArenaAllocTransaction)) {}
215 
216     ArrayLog(ArrayLog&& log) = default;
217 
218    private:
219     void UndoArrayWrite(mirror::Array* array,
220                         Primitive::Type array_type,
221                         size_t index,
222                         uint64_t value) const REQUIRES_SHARED(Locks::mutator_lock_);
223 
224     // Maps index to value.
225     // TODO use JValue instead ?
226     ScopedArenaSafeMap<size_t, uint64_t> array_values_;
227 
228     DISALLOW_COPY_AND_ASSIGN(ArrayLog);
229   };
230 
231   class InternStringLog : public ValueObject {
232    public:
233     enum StringKind {
234       kStrongString,
235       kWeakString
236     };
237     enum StringOp {
238       kInsert,
239       kRemove
240     };
241     InternStringLog(ObjPtr<mirror::String> s, StringKind kind, StringOp op);
242 
243     void Undo(InternTable* intern_table) const
244         REQUIRES_SHARED(Locks::mutator_lock_)
245         REQUIRES(Locks::intern_table_lock_);
246     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
247 
248     // Only the move constructor is supported.
249     InternStringLog() = delete;
250     InternStringLog(const InternStringLog& log) = delete;
251     InternStringLog& operator=(const InternStringLog& log) = delete;
252     InternStringLog(InternStringLog&& log) = default;
253     InternStringLog& operator=(InternStringLog&& log) = delete;
254 
255    private:
256     mutable GcRoot<mirror::String> str_;
257     const StringKind string_kind_;
258     const StringOp string_op_;
259   };
260 
261   class ResolveStringLog : public ValueObject {
262    public:
263     ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx);
264 
265     void Undo() const REQUIRES_SHARED(Locks::mutator_lock_);
266 
267     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
268 
269    private:
270     GcRoot<mirror::DexCache> dex_cache_;
271     const dex::StringIndex string_idx_;
272 
273     DISALLOW_COPY_AND_ASSIGN(ResolveStringLog);
274   };
275 
276   class ResolveMethodTypeLog : public ValueObject {
277    public:
278     ResolveMethodTypeLog(ObjPtr<mirror::DexCache> dex_cache, dex::ProtoIndex proto_idx);
279 
280     void Undo() const REQUIRES_SHARED(Locks::mutator_lock_);
281 
282     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
283 
284    private:
285     GcRoot<mirror::DexCache> dex_cache_;
286     const dex::ProtoIndex proto_idx_;
287 
288     DISALLOW_COPY_AND_ASSIGN(ResolveMethodTypeLog);
289   };
290 
291   void LogInternedString(InternStringLog&& log)
292       REQUIRES(Locks::intern_table_lock_);
293 
294   void UndoObjectModifications()
295       REQUIRES_SHARED(Locks::mutator_lock_);
296   void UndoArrayModifications()
297       REQUIRES_SHARED(Locks::mutator_lock_);
298   void UndoInternStringTableModifications()
299       REQUIRES(Locks::intern_table_lock_)
300       REQUIRES_SHARED(Locks::mutator_lock_);
301   void UndoResolveStringModifications()
302       REQUIRES_SHARED(Locks::mutator_lock_);
303   void UndoResolveMethodTypeModifications()
304       REQUIRES_SHARED(Locks::mutator_lock_);
305 
306   void VisitObjectLogs(RootVisitor* visitor, ArenaStack* arena_stack)
307       REQUIRES_SHARED(Locks::mutator_lock_);
308   void VisitArrayLogs(RootVisitor* visitor, ArenaStack* arena_stack)
309       REQUIRES_SHARED(Locks::mutator_lock_);
310   void VisitInternStringLogs(RootVisitor* visitor)
311       REQUIRES_SHARED(Locks::mutator_lock_);
312   void VisitResolveStringLogs(RootVisitor* visitor)
313       REQUIRES_SHARED(Locks::mutator_lock_);
314   void VisitResolveMethodTypeLogs(RootVisitor* visitor)
315       REQUIRES_SHARED(Locks::mutator_lock_);
316 
317   const std::string& GetAbortMessage() const;
318 
319   ObjectLog& GetOrCreateObjectLog(mirror::Object* obj);
320 
321   // The top-level transaction creates an `ArenaStack` which is then
322   // passed down to nested transactions.
323   std::optional<ArenaStack> arena_stack_;
324   // The allocator uses the `ArenaStack` from the top-level transaction.
325   ScopedArenaAllocator allocator_;
326 
327   ScopedArenaSafeMap<mirror::Object*, ObjectLog> object_logs_;
328   ScopedArenaSafeMap<mirror::Array*, ArrayLog> array_logs_;
329   ScopedArenaForwardList<InternStringLog> intern_string_logs_;
330   ScopedArenaForwardList<ResolveStringLog> resolve_string_logs_;
331   ScopedArenaForwardList<ResolveMethodTypeLog> resolve_method_type_logs_;
332   bool aborted_;
333   bool rolling_back_;  // Single thread, no race.
334   gc::Heap* const heap_;
335   const bool strict_;
336   std::string abort_message_;
337   mirror::Class* root_;
338   const char* assert_no_new_records_reason_;
339 
340   friend class ScopedAssertNoNewTransactionRecords;
341 
342   DISALLOW_COPY_AND_ASSIGN(Transaction);
343 };
344 
345 class ScopedAssertNoNewTransactionRecords {
346  public:
ScopedAssertNoNewTransactionRecords(const char * reason)347   explicit ScopedAssertNoNewTransactionRecords(const char* reason)
348     : transaction_(kIsDebugBuild ? InstallAssertion(reason) : nullptr) {}
349 
~ScopedAssertNoNewTransactionRecords()350   ~ScopedAssertNoNewTransactionRecords() {
351     if (kIsDebugBuild && transaction_ != nullptr) {
352       RemoveAssertion(transaction_);
353     }
354   }
355 
356  private:
357   static Transaction* InstallAssertion(const char* reason);
358   static void RemoveAssertion(Transaction* transaction);
359 
360   Transaction* transaction_;
361 };
362 
363 }  // namespace art
364 
365 #endif  // ART_RUNTIME_TRANSACTION_H_
366