• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "intern_table-inl.h"
18 
19 #include <memory>
20 
21 #include "class_linker.h"
22 #include "dex/utf.h"
23 #include "gc/collector/garbage_collector.h"
24 #include "gc/space/image_space.h"
25 #include "gc/weak_root_state.h"
26 #include "gc_root-inl.h"
27 #include "handle_scope-inl.h"
28 #include "mirror/dex_cache-inl.h"
29 #include "mirror/object-inl.h"
30 #include "mirror/object_array-inl.h"
31 #include "mirror/string-inl.h"
32 #include "oat/image-inl.h"
33 #include "object_callbacks.h"
34 #include "scoped_thread_state_change-inl.h"
35 #include "thread.h"
36 #include "thread-inl.h"
37 
38 namespace art HIDDEN {
39 
InternTable()40 InternTable::InternTable()
41     : log_new_roots_(false),
42       weak_intern_condition_("New intern condition", *Locks::intern_table_lock_),
43       weak_root_state_(gc::kWeakRootStateNormal) {
44 }
45 
Size() const46 size_t InternTable::Size() const {
47   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
48   return strong_interns_.Size() + weak_interns_.Size();
49 }
50 
StrongSize() const51 size_t InternTable::StrongSize() const {
52   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
53   return strong_interns_.Size();
54 }
55 
WeakSize() const56 size_t InternTable::WeakSize() const {
57   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
58   return weak_interns_.Size();
59 }
60 
DumpForSigQuit(std::ostream & os) const61 void InternTable::DumpForSigQuit(std::ostream& os) const {
62   os << "Intern table: " << StrongSize() << " strong; " << WeakSize() << " weak\n";
63 }
64 
VisitRoots(RootVisitor * visitor,VisitRootFlags flags)65 void InternTable::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
66   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
67   if ((flags & kVisitRootFlagAllRoots) != 0) {
68     strong_interns_.VisitRoots(visitor);
69   } else if ((flags & kVisitRootFlagNewRoots) != 0) {
70     for (auto& root : new_strong_intern_roots_) {
71       ObjPtr<mirror::String> old_ref = root.Read<kWithoutReadBarrier>();
72       root.VisitRoot(visitor, RootInfo(kRootInternedString));
73       ObjPtr<mirror::String> new_ref = root.Read<kWithoutReadBarrier>();
74       if (new_ref != old_ref) {
75         // The GC moved a root in the log. Need to search the strong interns and update the
76         // corresponding object. This is slow, but luckily for us, this may only happen with a
77         // concurrent moving GC.
78         DCHECK(new_ref != nullptr);
79         uint32_t hash = static_cast<uint32_t>(old_ref->GetStoredHashCode());
80         DCHECK_EQ(hash, static_cast<uint32_t>(new_ref->GetStoredHashCode()));
81         DCHECK(new_ref->Equals(old_ref));
82         bool found = false;
83         for (Table::InternalTable& table : strong_interns_.tables_) {
84           auto it = table.set_.FindWithHash(GcRoot<mirror::String>(old_ref), hash);
85           if (it != table.set_.end()) {
86             *it = GcRoot<mirror::String>(new_ref);
87             found = true;
88             break;
89           }
90         }
91         DCHECK(found);
92       }
93     }
94   }
95   if ((flags & kVisitRootFlagClearRootLog) != 0) {
96     new_strong_intern_roots_.clear();
97   }
98   if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
99     log_new_roots_ = true;
100   } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
101     log_new_roots_ = false;
102   }
103   // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
104 }
105 
LookupWeak(Thread * self,ObjPtr<mirror::String> s)106 ObjPtr<mirror::String> InternTable::LookupWeak(Thread* self, ObjPtr<mirror::String> s) {
107   DCHECK(s != nullptr);
108   // `String::GetHashCode()` ensures that the stored hash is calculated.
109   uint32_t hash = static_cast<uint32_t>(s->GetHashCode());
110   MutexLock mu(self, *Locks::intern_table_lock_);
111   return weak_interns_.Find(s, hash);
112 }
113 
LookupStrong(Thread * self,ObjPtr<mirror::String> s)114 ObjPtr<mirror::String> InternTable::LookupStrong(Thread* self, ObjPtr<mirror::String> s) {
115   DCHECK(s != nullptr);
116   // `String::GetHashCode()` ensures that the stored hash is calculated.
117   uint32_t hash = static_cast<uint32_t>(s->GetHashCode());
118   MutexLock mu(self, *Locks::intern_table_lock_);
119   return strong_interns_.Find(s, hash);
120 }
121 
LookupStrong(Thread * self,uint32_t utf16_length,const char * utf8_data)122 ObjPtr<mirror::String> InternTable::LookupStrong(Thread* self,
123                                                  uint32_t utf16_length,
124                                                  const char* utf8_data) {
125   uint32_t hash = Utf8String::Hash(utf16_length, utf8_data);
126   MutexLock mu(self, *Locks::intern_table_lock_);
127   return strong_interns_.Find(Utf8String(utf16_length, utf8_data), hash);
128 }
129 
LookupWeakLocked(ObjPtr<mirror::String> s)130 ObjPtr<mirror::String> InternTable::LookupWeakLocked(ObjPtr<mirror::String> s) {
131   DCHECK(s != nullptr);
132   // `String::GetHashCode()` ensures that the stored hash is calculated.
133   uint32_t hash = static_cast<uint32_t>(s->GetHashCode());
134   return weak_interns_.Find(s, hash);
135 }
136 
LookupStrongLocked(ObjPtr<mirror::String> s)137 ObjPtr<mirror::String> InternTable::LookupStrongLocked(ObjPtr<mirror::String> s) {
138   DCHECK(s != nullptr);
139   // `String::GetHashCode()` ensures that the stored hash is calculated.
140   uint32_t hash = static_cast<uint32_t>(s->GetHashCode());
141   return strong_interns_.Find(s, hash);
142 }
143 
AddNewTable()144 void InternTable::AddNewTable() {
145   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
146   weak_interns_.AddNewTable();
147   strong_interns_.AddNewTable();
148 }
149 
InsertStrong(ObjPtr<mirror::String> s,uint32_t hash)150 ObjPtr<mirror::String> InternTable::InsertStrong(ObjPtr<mirror::String> s, uint32_t hash) {
151   Runtime* runtime = Runtime::Current();
152   if (runtime->IsActiveTransaction()) {
153     runtime->GetClassLinker()->RecordStrongStringInsertion(s);
154   }
155   if (log_new_roots_) {
156     new_strong_intern_roots_.push_back(GcRoot<mirror::String>(s));
157   }
158   strong_interns_.Insert(s, hash);
159   return s;
160 }
161 
InsertWeak(ObjPtr<mirror::String> s,uint32_t hash)162 ObjPtr<mirror::String> InternTable::InsertWeak(ObjPtr<mirror::String> s, uint32_t hash) {
163   Runtime* runtime = Runtime::Current();
164   if (runtime->IsActiveTransaction()) {
165     runtime->GetClassLinker()->RecordWeakStringInsertion(s);
166   }
167   weak_interns_.Insert(s, hash);
168   return s;
169 }
170 
RemoveStrong(ObjPtr<mirror::String> s,uint32_t hash)171 void InternTable::RemoveStrong(ObjPtr<mirror::String> s, uint32_t hash) {
172   strong_interns_.Remove(s, hash);
173 }
174 
RemoveWeak(ObjPtr<mirror::String> s,uint32_t hash)175 void InternTable::RemoveWeak(ObjPtr<mirror::String> s, uint32_t hash) {
176   Runtime* runtime = Runtime::Current();
177   if (runtime->IsActiveTransaction()) {
178     runtime->GetClassLinker()->RecordWeakStringRemoval(s);
179   }
180   weak_interns_.Remove(s, hash);
181 }
182 
BroadcastForNewInterns()183 void InternTable::BroadcastForNewInterns() {
184   Thread* self = Thread::Current();
185   MutexLock mu(self, *Locks::intern_table_lock_);
186   weak_intern_condition_.Broadcast(self);
187 }
188 
WaitUntilAccessible(Thread * self)189 void InternTable::WaitUntilAccessible(Thread* self) {
190   Locks::intern_table_lock_->ExclusiveUnlock(self);
191   {
192     ScopedThreadSuspension sts(self, ThreadState::kWaitingWeakGcRootRead);
193     MutexLock mu(self, *Locks::intern_table_lock_);
194     while ((!gUseReadBarrier && weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) ||
195            (gUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
196       weak_intern_condition_.Wait(self);
197     }
198   }
199   Locks::intern_table_lock_->ExclusiveLock(self);
200 }
201 
Insert(ObjPtr<mirror::String> s,uint32_t hash,bool is_strong,size_t num_searched_strong_frozen_tables)202 ObjPtr<mirror::String> InternTable::Insert(ObjPtr<mirror::String> s,
203                                            uint32_t hash,
204                                            bool is_strong,
205                                            size_t num_searched_strong_frozen_tables) {
206   DCHECK(s != nullptr);
207   DCHECK_EQ(hash, static_cast<uint32_t>(s->GetStoredHashCode()));
208   DCHECK_IMPLIES(hash == 0u, s->ComputeHashCode() == 0);
209   Thread* const self = Thread::Current();
210   MutexLock mu(self, *Locks::intern_table_lock_);
211   if (kDebugLocking) {
212     Locks::mutator_lock_->AssertSharedHeld(self);
213     CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock";
214   }
215   while (true) {
216     // Check the strong table for a match.
217     ObjPtr<mirror::String> strong =
218         strong_interns_.Find(s, hash, num_searched_strong_frozen_tables);
219     if (strong != nullptr) {
220       return strong;
221     }
222     if (gUseReadBarrier ? self->GetWeakRefAccessEnabled()
223                         : weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) {
224       break;
225     }
226     num_searched_strong_frozen_tables = strong_interns_.tables_.size() - 1u;
227     // weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only
228     // cleared after SweepSystemWeaks has completed. This is why we need to wait until it is
229     // cleared.
230     StackHandleScope<1> hs(self);
231     auto h = hs.NewHandleWrapper(&s);
232     WaitUntilAccessible(self);
233   }
234   if (!gUseReadBarrier) {
235     CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
236   } else {
237     CHECK(self->GetWeakRefAccessEnabled());
238   }
239   // There is no match in the strong table, check the weak table.
240   ObjPtr<mirror::String> weak = weak_interns_.Find(s, hash);
241   if (weak != nullptr) {
242     if (is_strong) {
243       // A match was found in the weak table. Promote to the strong table.
244       RemoveWeak(weak, hash);
245       return InsertStrong(weak, hash);
246     }
247     return weak;
248   }
249   // No match in the strong table or the weak table. Insert into the strong / weak table.
250   return is_strong ? InsertStrong(s, hash) : InsertWeak(s, hash);
251 }
252 
InternStrong(uint32_t utf16_length,const char * utf8_data)253 ObjPtr<mirror::String> InternTable::InternStrong(uint32_t utf16_length, const char* utf8_data) {
254   DCHECK(utf8_data != nullptr);
255   uint32_t hash = Utf8String::Hash(utf16_length, utf8_data);
256   Thread* self = Thread::Current();
257   ObjPtr<mirror::String> s;
258   size_t num_searched_strong_frozen_tables;
259   {
260     // Try to avoid allocation. If we need to allocate, release the mutex before the allocation.
261     MutexLock mu(self, *Locks::intern_table_lock_);
262     DCHECK(!strong_interns_.tables_.empty());
263     num_searched_strong_frozen_tables = strong_interns_.tables_.size() - 1u;
264     s = strong_interns_.Find(Utf8String(utf16_length, utf8_data), hash);
265   }
266   if (s != nullptr) {
267     return s;
268   }
269   bool is_ascii = (utf8_data[utf16_length] == 0);
270   int32_t utf8_length = utf16_length + (LIKELY(is_ascii) ? 0 : strlen(utf8_data + utf16_length));
271   DCHECK_EQ(static_cast<size_t>(utf8_length), strlen(utf8_data));
272   s = mirror::String::AllocFromModifiedUtf8(self, utf16_length, utf8_data, utf8_length);
273   if (UNLIKELY(s == nullptr)) {
274     self->AssertPendingOOMException();
275     return nullptr;
276   }
277   s->SetHashCode(static_cast<int32_t>(hash));
278   return Insert(s, hash, /*is_strong=*/ true, num_searched_strong_frozen_tables);
279 }
280 
InternStrong(const char * utf8_data)281 ObjPtr<mirror::String> InternTable::InternStrong(const char* utf8_data) {
282   DCHECK(utf8_data != nullptr);
283   Thread* self = Thread::Current();
284   ObjPtr<mirror::String> s = mirror::String::AllocFromModifiedUtf8(self, utf8_data);
285   if (UNLIKELY(s == nullptr)) {
286     self->AssertPendingOOMException();
287     return nullptr;
288   }
289   return InternStrong(s);
290 }
291 
InternStrong(ObjPtr<mirror::String> s)292 ObjPtr<mirror::String> InternTable::InternStrong(ObjPtr<mirror::String> s) {
293   DCHECK(s != nullptr);
294   // `String::GetHashCode()` ensures that the stored hash is calculated.
295   uint32_t hash = static_cast<uint32_t>(s->GetHashCode());
296   return Insert(s, hash, /*is_strong=*/ true);
297 }
298 
InternWeak(const char * utf8_data)299 ObjPtr<mirror::String> InternTable::InternWeak(const char* utf8_data) {
300   DCHECK(utf8_data != nullptr);
301   Thread* self = Thread::Current();
302   ObjPtr<mirror::String> s = mirror::String::AllocFromModifiedUtf8(self, utf8_data);
303   if (UNLIKELY(s == nullptr)) {
304     self->AssertPendingOOMException();
305     return nullptr;
306   }
307   return InternWeak(s);
308 }
309 
InternWeak(ObjPtr<mirror::String> s)310 ObjPtr<mirror::String> InternTable::InternWeak(ObjPtr<mirror::String> s) {
311   DCHECK(s != nullptr);
312   // `String::GetHashCode()` ensures that the stored hash is calculated.
313   uint32_t hash = static_cast<uint32_t>(s->GetHashCode());
314   return Insert(s, hash, /*is_strong=*/ false);
315 }
316 
SweepInternTableWeaks(IsMarkedVisitor * visitor)317 void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
318   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
319   weak_interns_.SweepWeaks(visitor);
320 }
321 
Remove(ObjPtr<mirror::String> s,uint32_t hash)322 void InternTable::Table::Remove(ObjPtr<mirror::String> s, uint32_t hash) {
323   // Note: We can remove weak interns even from frozen tables when promoting to strong interns.
324   // We can remove strong interns only for a transaction rollback.
325   for (InternalTable& table : tables_) {
326     auto it = table.set_.FindWithHash(GcRoot<mirror::String>(s), hash);
327     if (it != table.set_.end()) {
328       table.set_.erase(it);
329       return;
330     }
331   }
332   LOG(FATAL) << "Attempting to remove non-interned string " << s->ToModifiedUtf8();
333 }
334 
335 FLATTEN
Find(ObjPtr<mirror::String> s,uint32_t hash,size_t num_searched_frozen_tables)336 ObjPtr<mirror::String> InternTable::Table::Find(ObjPtr<mirror::String> s,
337                                                 uint32_t hash,
338                                                 size_t num_searched_frozen_tables) {
339   Locks::intern_table_lock_->AssertHeld(Thread::Current());
340   auto mid = tables_.begin() + num_searched_frozen_tables;
341   for (Table::InternalTable& table : MakeIterationRange(tables_.begin(), mid)) {
342     DCHECK(table.set_.FindWithHash(GcRoot<mirror::String>(s), hash) == table.set_.end());
343   }
344   // Search from the last table, assuming that apps shall search for their own
345   // strings more often than for boot image strings.
346   for (Table::InternalTable& table : ReverseRange(MakeIterationRange(mid, tables_.end()))) {
347     auto it = table.set_.FindWithHash(GcRoot<mirror::String>(s), hash);
348     if (it != table.set_.end()) {
349       return it->Read();
350     }
351   }
352   return nullptr;
353 }
354 
355 FLATTEN
Find(const Utf8String & string,uint32_t hash)356 ObjPtr<mirror::String> InternTable::Table::Find(const Utf8String& string, uint32_t hash) {
357   Locks::intern_table_lock_->AssertHeld(Thread::Current());
358   // Search from the last table, assuming that apps shall search for their own
359   // strings more often than for boot image strings.
360   for (InternalTable& table : ReverseRange(tables_)) {
361     auto it = table.set_.FindWithHash(string, hash);
362     if (it != table.set_.end()) {
363       return it->Read();
364     }
365   }
366   return nullptr;
367 }
368 
AddNewTable()369 void InternTable::Table::AddNewTable() {
370   // Propagate the min/max load factor from the old active set.
371   DCHECK(!tables_.empty());
372   const UnorderedSet& last_set = tables_.back().set_;
373   InternalTable new_table;
374   new_table.set_.SetLoadFactor(last_set.GetMinLoadFactor(), last_set.GetMaxLoadFactor());
375   tables_.push_back(std::move(new_table));
376 }
377 
Insert(ObjPtr<mirror::String> s,uint32_t hash)378 void InternTable::Table::Insert(ObjPtr<mirror::String> s, uint32_t hash) {
379   // Always insert the last table, the image tables are before and we avoid inserting into these
380   // to prevent dirty pages.
381   DCHECK(!tables_.empty());
382   tables_.back().set_.PutWithHash(GcRoot<mirror::String>(s), hash);
383 }
384 
VisitRoots(RootVisitor * visitor)385 void InternTable::Table::VisitRoots(RootVisitor* visitor) {
386   BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
387       visitor, RootInfo(kRootInternedString));
388   for (InternalTable& table : tables_) {
389     for (auto& intern : table.set_) {
390       buffered_visitor.VisitRoot(intern);
391     }
392   }
393 }
394 
SweepWeaks(IsMarkedVisitor * visitor)395 void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) {
396   for (InternalTable& table : tables_) {
397     SweepWeaks(&table.set_, visitor);
398   }
399 }
400 
SweepWeaks(UnorderedSet * set,IsMarkedVisitor * visitor)401 void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) {
402   for (auto it = set->begin(), end = set->end(); it != end;) {
403     // This does not need a read barrier because this is called by GC.
404     mirror::Object* object = it->Read<kWithoutReadBarrier>();
405     mirror::Object* new_object = visitor->IsMarked(object);
406     if (new_object == nullptr) {
407       it = set->erase(it);
408     } else {
409       // Don't use AsString as it does IsString check in debug builds which, in
410       // case of userfaultfd GC, is called when the object's content isn't
411       // thereyet.
412       *it = GcRoot<mirror::String>(ObjPtr<mirror::String>::DownCast(new_object));
413       ++it;
414     }
415   }
416 }
417 
Size() const418 size_t InternTable::Table::Size() const {
419   return std::accumulate(tables_.begin(),
420                          tables_.end(),
421                          0U,
422                          [](size_t sum, const InternalTable& table) {
423                            return sum + table.Size();
424                          });
425 }
426 
ChangeWeakRootState(gc::WeakRootState new_state)427 void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) {
428   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
429   ChangeWeakRootStateLocked(new_state);
430 }
431 
ChangeWeakRootStateLocked(gc::WeakRootState new_state)432 void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
433   CHECK(!gUseReadBarrier);
434   weak_root_state_ = new_state;
435   if (new_state != gc::kWeakRootStateNoReadsOrWrites) {
436     weak_intern_condition_.Broadcast(Thread::Current());
437   }
438 }
439 
Table()440 InternTable::Table::Table() {
441   Runtime* const runtime = Runtime::Current();
442   InternalTable initial_table;
443   initial_table.set_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
444                                    runtime->GetHashTableMaxLoadFactor());
445   tables_.push_back(std::move(initial_table));
446 }
447 
448 }  // namespace art
449