• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_MIRROR_OBJECT_H_
18 #define ART_RUNTIME_MIRROR_OBJECT_H_
19 
20 #include "base/casts.h"
21 #include "base/logging.h"
22 #include "base/macros.h"
23 #include "cutils/atomic-inline.h"
24 #include "offsets.h"
25 
26 namespace art {
27 
28 class ImageWriter;
29 struct ObjectOffsets;
30 class Thread;
31 
32 namespace mirror {
33 
34 class ArtField;
35 class ArtMethod;
36 class Array;
37 class Class;
38 template<class T> class ObjectArray;
39 template<class T> class PrimitiveArray;
40 typedef PrimitiveArray<uint8_t> BooleanArray;
41 typedef PrimitiveArray<int8_t> ByteArray;
42 typedef PrimitiveArray<uint16_t> CharArray;
43 typedef PrimitiveArray<double> DoubleArray;
44 typedef PrimitiveArray<float> FloatArray;
45 typedef PrimitiveArray<int32_t> IntArray;
46 typedef PrimitiveArray<int64_t> LongArray;
47 typedef PrimitiveArray<int16_t> ShortArray;
48 class String;
49 class Throwable;
50 
51 // Classes shared with the managed side of the world need to be packed so that they don't have
52 // extra platform specific padding.
53 #define MANAGED PACKED(4)
54 
55 // Fields within mirror objects aren't accessed directly so that the appropriate amount of
56 // handshaking is done with GC (for example, read and write barriers). This macro is used to
57 // compute an offset for the Set/Get methods defined in Object that can safely access fields.
58 #define OFFSET_OF_OBJECT_MEMBER(type, field) \
59     MemberOffset(OFFSETOF_MEMBER(type, field))
60 
61 const bool kCheckFieldAssignments = false;
62 
63 // C++ mirror of java.lang.Object
64 class MANAGED Object {
65  public:
ClassOffset()66   static MemberOffset ClassOffset() {
67     return OFFSET_OF_OBJECT_MEMBER(Object, klass_);
68   }
69 
70   Class* GetClass() const;
71 
72   void SetClass(Class* new_klass);
73 
74   // The verifier treats all interfaces as java.lang.Object and relies on runtime checks in
75   // invoke-interface to detect incompatible interface types.
76   bool VerifierInstanceOf(const Class* klass) const
77         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
78 
79   bool InstanceOf(const Class* klass) const
80       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
81 
82   size_t SizeOf() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
83 
84   Object* Clone(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
85 
IdentityHashCode()86   int32_t IdentityHashCode() const {
87 #ifdef MOVING_GARBAGE_COLLECTOR
88     // TODO: we'll need to use the Object's internal concept of identity
89     UNIMPLEMENTED(FATAL);
90 #endif
91     return reinterpret_cast<int32_t>(this);
92   }
93 
MonitorOffset()94   static MemberOffset MonitorOffset() {
95     return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
96   }
97 
GetRawLockWordAddress()98   volatile int32_t* GetRawLockWordAddress() {
99     byte* raw_addr = reinterpret_cast<byte*>(this) +
100         OFFSET_OF_OBJECT_MEMBER(Object, monitor_).Int32Value();
101     int32_t* word_addr = reinterpret_cast<int32_t*>(raw_addr);
102     return const_cast<volatile int32_t*>(word_addr);
103   }
104 
105   uint32_t GetThinLockId();
106 
107   void MonitorEnter(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
108       EXCLUSIVE_LOCK_FUNCTION(monitor_lock_);
109 
110   bool MonitorExit(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
111       UNLOCK_FUNCTION(monitor_lock_);
112 
113   void Notify(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
114 
115   void NotifyAll(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
116 
117   void Wait(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
118 
119   void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
120 
121   bool IsClass() const;
122 
123   Class* AsClass();
124 
125   const Class* AsClass() const;
126 
127   bool IsObjectArray() const;
128 
129   template<class T>
130   ObjectArray<T>* AsObjectArray();
131 
132   template<class T>
133   const ObjectArray<T>* AsObjectArray() const;
134 
135   bool IsArrayInstance() const;
136 
137   Array* AsArray();
138 
139   const Array* AsArray() const;
140 
141   BooleanArray* AsBooleanArray();
142   ByteArray* AsByteArray();
143   CharArray* AsCharArray();
144   ShortArray* AsShortArray();
145   IntArray* AsIntArray();
146   LongArray* AsLongArray();
147 
148   String* AsString();
149 
150   Throwable* AsThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
151 
152   bool IsArtMethod() const;
153 
154   ArtMethod* AsArtMethod();
155 
156   const ArtMethod* AsArtMethod() const;
157 
158   bool IsArtField() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
159 
160   ArtField* AsArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
161 
162   const ArtField* AsArtField() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
163 
164   bool IsReferenceInstance() const;
165 
166   bool IsWeakReferenceInstance() const;
167 
168   bool IsSoftReferenceInstance() const;
169 
170   bool IsFinalizerReferenceInstance() const;
171 
172   bool IsPhantomReferenceInstance() const;
173 
174   // Accessors for Java type fields
175   template<class T>
GetFieldObject(MemberOffset field_offset,bool is_volatile)176   T GetFieldObject(MemberOffset field_offset, bool is_volatile) const {
177     T result = reinterpret_cast<T>(GetField32(field_offset, is_volatile));
178     VerifyObject(result);
179     return result;
180   }
181 
182   void SetFieldObject(MemberOffset field_offset, const Object* new_value, bool is_volatile,
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)183                       bool this_is_valid = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
184     VerifyObject(new_value);
185     SetField32(field_offset, reinterpret_cast<uint32_t>(new_value), is_volatile, this_is_valid);
186     if (new_value != NULL) {
187       CheckFieldAssignment(field_offset, new_value);
188       WriteBarrierField(this, field_offset, new_value);
189     }
190   }
191 
GetField32(MemberOffset field_offset,bool is_volatile)192   uint32_t GetField32(MemberOffset field_offset, bool is_volatile) const {
193     VerifyObject(this);
194     const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
195     const int32_t* word_addr = reinterpret_cast<const int32_t*>(raw_addr);
196     if (UNLIKELY(is_volatile)) {
197       return android_atomic_acquire_load(word_addr);
198     } else {
199       return *word_addr;
200     }
201   }
202 
203   void SetField32(MemberOffset field_offset, uint32_t new_value, bool is_volatile,
204                   bool this_is_valid = true) {
205     if (this_is_valid) {
206       VerifyObject(this);
207     }
208     byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
209     uint32_t* word_addr = reinterpret_cast<uint32_t*>(raw_addr);
210     if (UNLIKELY(is_volatile)) {
211       /*
212        * TODO: add an android_atomic_synchronization_store() function and
213        * use it in the 32-bit volatile set handlers.  On some platforms we
214        * can use a fast atomic instruction and avoid the barriers.
215        */
216       ANDROID_MEMBAR_STORE();
217       *word_addr = new_value;
218       ANDROID_MEMBAR_FULL();
219     } else {
220       *word_addr = new_value;
221     }
222   }
223 
224   uint64_t GetField64(MemberOffset field_offset, bool is_volatile) const;
225 
226   void SetField64(MemberOffset field_offset, uint64_t new_value, bool is_volatile);
227 
228  protected:
229   // Accessors for non-Java type fields
230   template<class T>
GetFieldPtr(MemberOffset field_offset,bool is_volatile)231   T GetFieldPtr(MemberOffset field_offset, bool is_volatile) const {
232     return reinterpret_cast<T>(GetField32(field_offset, is_volatile));
233   }
234 
235   template<typename T>
236   void SetFieldPtr(MemberOffset field_offset, T new_value, bool is_volatile, bool this_is_valid = true) {
237     SetField32(field_offset, reinterpret_cast<uint32_t>(new_value), is_volatile, this_is_valid);
238   }
239 
240  private:
241   static void VerifyObject(const Object* obj) ALWAYS_INLINE;
242 
243   // Verify the type correctness of stores to fields.
244   void CheckFieldAssignmentImpl(MemberOffset field_offset, const Object* new_value)
245       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
CheckFieldAssignment(MemberOffset field_offset,const Object * new_value)246   void CheckFieldAssignment(MemberOffset field_offset, const Object* new_value)
247       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
248     if (kCheckFieldAssignments) {
249       CheckFieldAssignmentImpl(field_offset, new_value);
250     }
251   }
252 
253   // Write barrier called post update to a reference bearing field.
254   static void WriteBarrierField(const Object* dst, MemberOffset offset, const Object* new_value);
255 
256   Class* klass_;
257 
258   uint32_t monitor_;
259 
260   friend class art::ImageWriter;
261   friend struct art::ObjectOffsets;  // for verifying offset information
262   DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
263 };
264 
265 }  // namespace mirror
266 }  // namespace art
267 
268 #endif  // ART_RUNTIME_MIRROR_OBJECT_H_
269