• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <ctime>
18 
19 #include "object.h"
20 
21 #include "array-inl.h"
22 #include "art_field-inl.h"
23 #include "art_field.h"
24 #include "class-inl.h"
25 #include "class.h"
26 #include "class_linker-inl.h"
27 #include "dex/descriptors_names.h"
28 #include "dex/dex_file-inl.h"
29 #include "gc/accounting/card_table-inl.h"
30 #include "gc/heap-inl.h"
31 #include "handle_scope-inl.h"
32 #include "iftable-inl.h"
33 #include "monitor.h"
34 #include "object-inl.h"
35 #include "object-refvisitor-inl.h"
36 #include "object_array-inl.h"
37 #include "runtime.h"
38 #include "throwable.h"
39 #include "well_known_classes.h"
40 
41 namespace art {
42 namespace mirror {
43 
44 Atomic<uint32_t> Object::hash_code_seed(987654321U + std::time(nullptr));
45 
46 class CopyReferenceFieldsWithReadBarrierVisitor {
47  public:
CopyReferenceFieldsWithReadBarrierVisitor(ObjPtr<Object> dest_obj)48   explicit CopyReferenceFieldsWithReadBarrierVisitor(ObjPtr<Object> dest_obj)
49       : dest_obj_(dest_obj) {}
50 
operator ()(ObjPtr<Object> obj,MemberOffset offset,bool) const51   void operator()(ObjPtr<Object> obj, MemberOffset offset, bool /* is_static */) const
52       ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
53     // GetFieldObject() contains a RB.
54     ObjPtr<Object> ref = obj->GetFieldObject<Object>(offset);
55     // No WB here as a large object space does not have a card table
56     // coverage. Instead, cards will be marked separately.
57     dest_obj_->SetFieldObjectWithoutWriteBarrier<false, false>(offset, ref);
58   }
59 
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const60   void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
61       ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
62     // Copy java.lang.ref.Reference.referent which isn't visited in
63     // Object::VisitReferences().
64     DCHECK(klass->IsTypeOfReferenceClass());
65     this->operator()(ref, mirror::Reference::ReferentOffset(), false);
66   }
67 
68   // Unused since we don't copy class native roots.
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const69   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
70       const {}
VisitRoot(mirror::CompressedReference<mirror::Object> * root ATTRIBUTE_UNUSED) const71   void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
72 
73  private:
74   const ObjPtr<Object> dest_obj_;
75 };
76 
CopyObject(ObjPtr<mirror::Object> dest,ObjPtr<mirror::Object> src,size_t num_bytes)77 ObjPtr<Object> Object::CopyObject(ObjPtr<mirror::Object> dest,
78                                   ObjPtr<mirror::Object> src,
79                                   size_t num_bytes) {
80   // Copy instance data.  Don't assume memcpy copies by words (b/32012820).
81   {
82     const size_t offset = sizeof(Object);
83     uint8_t* src_bytes = reinterpret_cast<uint8_t*>(src.Ptr()) + offset;
84     uint8_t* dst_bytes = reinterpret_cast<uint8_t*>(dest.Ptr()) + offset;
85     num_bytes -= offset;
86     DCHECK_ALIGNED(src_bytes, sizeof(uintptr_t));
87     DCHECK_ALIGNED(dst_bytes, sizeof(uintptr_t));
88     // Use word sized copies to begin.
89     while (num_bytes >= sizeof(uintptr_t)) {
90       reinterpret_cast<Atomic<uintptr_t>*>(dst_bytes)->store(
91           reinterpret_cast<Atomic<uintptr_t>*>(src_bytes)->load(std::memory_order_relaxed),
92           std::memory_order_relaxed);
93       src_bytes += sizeof(uintptr_t);
94       dst_bytes += sizeof(uintptr_t);
95       num_bytes -= sizeof(uintptr_t);
96     }
97     // Copy possible 32 bit word.
98     if (sizeof(uintptr_t) != sizeof(uint32_t) && num_bytes >= sizeof(uint32_t)) {
99       reinterpret_cast<Atomic<uint32_t>*>(dst_bytes)->store(
100           reinterpret_cast<Atomic<uint32_t>*>(src_bytes)->load(std::memory_order_relaxed),
101           std::memory_order_relaxed);
102       src_bytes += sizeof(uint32_t);
103       dst_bytes += sizeof(uint32_t);
104       num_bytes -= sizeof(uint32_t);
105     }
106     // Copy remaining bytes, avoid going past the end of num_bytes since there may be a redzone
107     // there.
108     while (num_bytes > 0) {
109       reinterpret_cast<Atomic<uint8_t>*>(dst_bytes)->store(
110           reinterpret_cast<Atomic<uint8_t>*>(src_bytes)->load(std::memory_order_relaxed),
111           std::memory_order_relaxed);
112       src_bytes += sizeof(uint8_t);
113       dst_bytes += sizeof(uint8_t);
114       num_bytes -= sizeof(uint8_t);
115     }
116   }
117 
118   if (kUseReadBarrier) {
119     // We need a RB here. After copying the whole object above, copy references fields one by one
120     // again with a RB to make sure there are no from space refs. TODO: Optimize this later?
121     CopyReferenceFieldsWithReadBarrierVisitor visitor(dest);
122     src->VisitReferences(visitor, visitor);
123   }
124   // Perform write barriers on copied object references.
125   ObjPtr<Class> c = src->GetClass();
126   if (c->IsArrayClass()) {
127     if (!c->GetComponentType()->IsPrimitive()) {
128       ObjPtr<ObjectArray<Object>> array = dest->AsObjectArray<Object>();
129       WriteBarrier::ForArrayWrite(dest, 0, array->GetLength());
130     }
131   } else {
132     WriteBarrier::ForEveryFieldWrite(dest);
133   }
134   return dest;
135 }
136 
137 // An allocation pre-fence visitor that copies the object.
138 class CopyObjectVisitor {
139  public:
CopyObjectVisitor(Handle<Object> * orig,size_t num_bytes)140   CopyObjectVisitor(Handle<Object>* orig, size_t num_bytes)
141       : orig_(orig), num_bytes_(num_bytes) {}
142 
operator ()(ObjPtr<Object> obj,size_t usable_size ATTRIBUTE_UNUSED) const143   void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
144       REQUIRES_SHARED(Locks::mutator_lock_) {
145     Object::CopyObject(obj, orig_->Get(), num_bytes_);
146   }
147 
148  private:
149   Handle<Object>* const orig_;
150   const size_t num_bytes_;
151   DISALLOW_COPY_AND_ASSIGN(CopyObjectVisitor);
152 };
153 
Clone(Thread * self)154 ObjPtr<Object> Object::Clone(Thread* self) {
155   CHECK(!IsClass()) << "Can't clone classes.";
156   // Object::SizeOf gets the right size even if we're an array. Using c->AllocObject() here would
157   // be wrong.
158   gc::Heap* heap = Runtime::Current()->GetHeap();
159   size_t num_bytes = SizeOf();
160   StackHandleScope<1> hs(self);
161   Handle<Object> this_object(hs.NewHandle(this));
162   ObjPtr<Object> copy;
163   CopyObjectVisitor visitor(&this_object, num_bytes);
164   if (heap->IsMovableObject(this)) {
165     copy = heap->AllocObject<true>(self, GetClass(), num_bytes, visitor);
166   } else {
167     copy = heap->AllocNonMovableObject<true>(self, GetClass(), num_bytes, visitor);
168   }
169   if (this_object->GetClass()->IsFinalizable()) {
170     heap->AddFinalizerReference(self, &copy);
171   }
172   return copy;
173 }
174 
GenerateIdentityHashCode()175 uint32_t Object::GenerateIdentityHashCode() {
176   uint32_t expected_value, new_value;
177   do {
178     expected_value = hash_code_seed.load(std::memory_order_relaxed);
179     new_value = expected_value * 1103515245 + 12345;
180   } while (!hash_code_seed.CompareAndSetWeakRelaxed(expected_value, new_value) ||
181       (expected_value & LockWord::kHashMask) == 0);
182   return expected_value & LockWord::kHashMask;
183 }
184 
SetHashCodeSeed(uint32_t new_seed)185 void Object::SetHashCodeSeed(uint32_t new_seed) {
186   hash_code_seed.store(new_seed, std::memory_order_relaxed);
187 }
188 
IdentityHashCode()189 int32_t Object::IdentityHashCode() {
190   ObjPtr<Object> current_this = this;  // The this pointer may get invalidated by thread suspension.
191   while (true) {
192     LockWord lw = current_this->GetLockWord(false);
193     switch (lw.GetState()) {
194       case LockWord::kUnlocked: {
195         // Try to compare and swap in a new hash, if we succeed we will return the hash on the next
196         // loop iteration.
197         LockWord hash_word = LockWord::FromHashCode(GenerateIdentityHashCode(), lw.GCState());
198         DCHECK_EQ(hash_word.GetState(), LockWord::kHashCode);
199         // Use a strong CAS to prevent spurious failures since these can make the boot image
200         // non-deterministic.
201         if (current_this->CasLockWord(lw, hash_word, CASMode::kStrong, std::memory_order_relaxed)) {
202           return hash_word.GetHashCode();
203         }
204         break;
205       }
206       case LockWord::kThinLocked: {
207         // Inflate the thin lock to a monitor and stick the hash code inside of the monitor. May
208         // fail spuriously.
209         Thread* self = Thread::Current();
210         StackHandleScope<1> hs(self);
211         Handle<mirror::Object> h_this(hs.NewHandle(current_this));
212         Monitor::InflateThinLocked(self, h_this, lw, GenerateIdentityHashCode());
213         // A GC may have occurred when we switched to kBlocked.
214         current_this = h_this.Get();
215         break;
216       }
217       case LockWord::kFatLocked: {
218         // Already inflated, return the hash stored in the monitor.
219         Monitor* monitor = lw.FatLockMonitor();
220         DCHECK(monitor != nullptr);
221         return monitor->GetHashCode();
222       }
223       case LockWord::kHashCode: {
224         return lw.GetHashCode();
225       }
226       default: {
227         LOG(FATAL) << "Invalid state during hashcode " << lw.GetState();
228         UNREACHABLE();
229       }
230     }
231   }
232 }
233 
CheckFieldAssignmentImpl(MemberOffset field_offset,ObjPtr<Object> new_value)234 void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, ObjPtr<Object> new_value) {
235   ObjPtr<Class> c = GetClass();
236   Runtime* runtime = Runtime::Current();
237   if (runtime->GetClassLinker() == nullptr || !runtime->IsStarted() ||
238       !runtime->GetHeap()->IsObjectValidationEnabled() || !c->IsResolved()) {
239     return;
240   }
241   for (ObjPtr<Class> cur = c; cur != nullptr; cur = cur->GetSuperClass()) {
242     for (ArtField& field : cur->GetIFields()) {
243       if (field.GetOffset().Int32Value() == field_offset.Int32Value()) {
244         CHECK_NE(field.GetTypeAsPrimitiveType(), Primitive::kPrimNot);
245         // TODO: resolve the field type for moving GC.
246         ObjPtr<mirror::Class> field_type =
247             kMovingCollector ? field.LookupResolvedType() : field.ResolveType();
248         if (field_type != nullptr) {
249           CHECK(field_type->IsAssignableFrom(new_value->GetClass()));
250         }
251         return;
252       }
253     }
254   }
255   if (c->IsArrayClass()) {
256     // Bounds and assign-ability done in the array setter.
257     return;
258   }
259   if (IsClass()) {
260     for (ArtField& field : AsClass()->GetSFields()) {
261       if (field.GetOffset().Int32Value() == field_offset.Int32Value()) {
262         CHECK_NE(field.GetTypeAsPrimitiveType(), Primitive::kPrimNot);
263         // TODO: resolve the field type for moving GC.
264         ObjPtr<mirror::Class> field_type =
265             kMovingCollector ? field.LookupResolvedType() : field.ResolveType();
266         if (field_type != nullptr) {
267           CHECK(field_type->IsAssignableFrom(new_value->GetClass()));
268         }
269         return;
270       }
271     }
272   }
273   LOG(FATAL) << "Failed to find field for assignment to " << reinterpret_cast<void*>(this)
274              << " of type " << c->PrettyDescriptor() << " at offset " << field_offset;
275   UNREACHABLE();
276 }
277 
FindFieldByOffset(MemberOffset offset)278 ArtField* Object::FindFieldByOffset(MemberOffset offset) {
279   return IsClass() ? ArtField::FindStaticFieldWithOffset(AsClass(), offset.Uint32Value())
280       : ArtField::FindInstanceFieldWithOffset(GetClass(), offset.Uint32Value());
281 }
282 
PrettyTypeOf(ObjPtr<mirror::Object> obj)283 std::string Object::PrettyTypeOf(ObjPtr<mirror::Object> obj) {
284   return (obj == nullptr) ? "null" : obj->PrettyTypeOf();
285 }
286 
PrettyTypeOf()287 std::string Object::PrettyTypeOf() {
288   // From-space version is the same as the to-space version since the dex file never changes.
289   // Avoiding the read barrier here is important to prevent recursive AssertToSpaceInvariant
290   // issues.
291   ObjPtr<mirror::Class> klass = GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>();
292   if (klass == nullptr) {
293     return "(raw)";
294   }
295   std::string temp;
296   std::string result(PrettyDescriptor(klass->GetDescriptor(&temp)));
297   if (klass->IsClassClass()) {
298     result += "<" + PrettyDescriptor(AsClass()->GetDescriptor(&temp)) + ">";
299   }
300   return result;
301 }
302 
303 }  // namespace mirror
304 }  // namespace art
305