• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "intern_table-inl.h"
18 
19 #include <memory>
20 
21 #include "dex/utf.h"
22 #include "gc/collector/garbage_collector.h"
23 #include "gc/space/image_space.h"
24 #include "gc/weak_root_state.h"
25 #include "gc_root-inl.h"
26 #include "handle_scope-inl.h"
27 #include "image-inl.h"
28 #include "mirror/dex_cache-inl.h"
29 #include "mirror/object-inl.h"
30 #include "mirror/object_array-inl.h"
31 #include "mirror/string-inl.h"
32 #include "object_callbacks.h"
33 #include "scoped_thread_state_change-inl.h"
34 #include "thread.h"
35 #include "thread-inl.h"
36 
37 namespace art {
38 
InternTable()39 InternTable::InternTable()
40     : log_new_roots_(false),
41       weak_intern_condition_("New intern condition", *Locks::intern_table_lock_),
42       weak_root_state_(gc::kWeakRootStateNormal) {
43 }
44 
Size() const45 size_t InternTable::Size() const {
46   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
47   return strong_interns_.Size() + weak_interns_.Size();
48 }
49 
StrongSize() const50 size_t InternTable::StrongSize() const {
51   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
52   return strong_interns_.Size();
53 }
54 
WeakSize() const55 size_t InternTable::WeakSize() const {
56   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
57   return weak_interns_.Size();
58 }
59 
DumpForSigQuit(std::ostream & os) const60 void InternTable::DumpForSigQuit(std::ostream& os) const {
61   os << "Intern table: " << StrongSize() << " strong; " << WeakSize() << " weak\n";
62 }
63 
VisitRoots(RootVisitor * visitor,VisitRootFlags flags)64 void InternTable::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
65   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
66   if ((flags & kVisitRootFlagAllRoots) != 0) {
67     strong_interns_.VisitRoots(visitor);
68   } else if ((flags & kVisitRootFlagNewRoots) != 0) {
69     for (auto& root : new_strong_intern_roots_) {
70       ObjPtr<mirror::String> old_ref = root.Read<kWithoutReadBarrier>();
71       root.VisitRoot(visitor, RootInfo(kRootInternedString));
72       ObjPtr<mirror::String> new_ref = root.Read<kWithoutReadBarrier>();
73       if (new_ref != old_ref) {
74         // The GC moved a root in the log. Need to search the strong interns and update the
75         // corresponding object. This is slow, but luckily for us, this may only happen with a
76         // concurrent moving GC.
77         DCHECK(new_ref != nullptr);
78         uint32_t hash = static_cast<uint32_t>(old_ref->GetStoredHashCode());
79         DCHECK_EQ(hash, static_cast<uint32_t>(new_ref->GetStoredHashCode()));
80         DCHECK(new_ref->Equals(old_ref));
81         bool found = false;
82         for (Table::InternalTable& table : strong_interns_.tables_) {
83           auto it = table.set_.FindWithHash(GcRoot<mirror::String>(old_ref), hash);
84           if (it != table.set_.end()) {
85             *it = GcRoot<mirror::String>(new_ref);
86             found = true;
87             break;
88           }
89         }
90         DCHECK(found);
91       }
92     }
93   }
94   if ((flags & kVisitRootFlagClearRootLog) != 0) {
95     new_strong_intern_roots_.clear();
96   }
97   if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
98     log_new_roots_ = true;
99   } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
100     log_new_roots_ = false;
101   }
102   // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
103 }
104 
LookupWeak(Thread * self,ObjPtr<mirror::String> s)105 ObjPtr<mirror::String> InternTable::LookupWeak(Thread* self, ObjPtr<mirror::String> s) {
106   DCHECK(s != nullptr);
107   // `String::GetHashCode()` ensures that the stored hash is calculated.
108   uint32_t hash = static_cast<uint32_t>(s->GetHashCode());
109   MutexLock mu(self, *Locks::intern_table_lock_);
110   return weak_interns_.Find(s, hash);
111 }
112 
LookupStrong(Thread * self,ObjPtr<mirror::String> s)113 ObjPtr<mirror::String> InternTable::LookupStrong(Thread* self, ObjPtr<mirror::String> s) {
114   DCHECK(s != nullptr);
115   // `String::GetHashCode()` ensures that the stored hash is calculated.
116   uint32_t hash = static_cast<uint32_t>(s->GetHashCode());
117   MutexLock mu(self, *Locks::intern_table_lock_);
118   return strong_interns_.Find(s, hash);
119 }
120 
LookupStrong(Thread * self,uint32_t utf16_length,const char * utf8_data)121 ObjPtr<mirror::String> InternTable::LookupStrong(Thread* self,
122                                                  uint32_t utf16_length,
123                                                  const char* utf8_data) {
124   uint32_t hash = Utf8String::Hash(utf16_length, utf8_data);
125   MutexLock mu(self, *Locks::intern_table_lock_);
126   return strong_interns_.Find(Utf8String(utf16_length, utf8_data), hash);
127 }
128 
LookupWeakLocked(ObjPtr<mirror::String> s)129 ObjPtr<mirror::String> InternTable::LookupWeakLocked(ObjPtr<mirror::String> s) {
130   DCHECK(s != nullptr);
131   // `String::GetHashCode()` ensures that the stored hash is calculated.
132   uint32_t hash = static_cast<uint32_t>(s->GetHashCode());
133   return weak_interns_.Find(s, hash);
134 }
135 
LookupStrongLocked(ObjPtr<mirror::String> s)136 ObjPtr<mirror::String> InternTable::LookupStrongLocked(ObjPtr<mirror::String> s) {
137   DCHECK(s != nullptr);
138   // `String::GetHashCode()` ensures that the stored hash is calculated.
139   uint32_t hash = static_cast<uint32_t>(s->GetHashCode());
140   return strong_interns_.Find(s, hash);
141 }
142 
AddNewTable()143 void InternTable::AddNewTable() {
144   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
145   weak_interns_.AddNewTable();
146   strong_interns_.AddNewTable();
147 }
148 
InsertStrong(ObjPtr<mirror::String> s,uint32_t hash)149 ObjPtr<mirror::String> InternTable::InsertStrong(ObjPtr<mirror::String> s, uint32_t hash) {
150   Runtime* runtime = Runtime::Current();
151   if (runtime->IsActiveTransaction()) {
152     runtime->RecordStrongStringInsertion(s);
153   }
154   if (log_new_roots_) {
155     new_strong_intern_roots_.push_back(GcRoot<mirror::String>(s));
156   }
157   strong_interns_.Insert(s, hash);
158   return s;
159 }
160 
InsertWeak(ObjPtr<mirror::String> s,uint32_t hash)161 ObjPtr<mirror::String> InternTable::InsertWeak(ObjPtr<mirror::String> s, uint32_t hash) {
162   Runtime* runtime = Runtime::Current();
163   if (runtime->IsActiveTransaction()) {
164     runtime->RecordWeakStringInsertion(s);
165   }
166   weak_interns_.Insert(s, hash);
167   return s;
168 }
169 
RemoveStrong(ObjPtr<mirror::String> s,uint32_t hash)170 void InternTable::RemoveStrong(ObjPtr<mirror::String> s, uint32_t hash) {
171   strong_interns_.Remove(s, hash);
172 }
173 
RemoveWeak(ObjPtr<mirror::String> s,uint32_t hash)174 void InternTable::RemoveWeak(ObjPtr<mirror::String> s, uint32_t hash) {
175   Runtime* runtime = Runtime::Current();
176   if (runtime->IsActiveTransaction()) {
177     runtime->RecordWeakStringRemoval(s);
178   }
179   weak_interns_.Remove(s, hash);
180 }
181 
BroadcastForNewInterns()182 void InternTable::BroadcastForNewInterns() {
183   Thread* self = Thread::Current();
184   MutexLock mu(self, *Locks::intern_table_lock_);
185   weak_intern_condition_.Broadcast(self);
186 }
187 
WaitUntilAccessible(Thread * self)188 void InternTable::WaitUntilAccessible(Thread* self) {
189   Locks::intern_table_lock_->ExclusiveUnlock(self);
190   {
191     ScopedThreadSuspension sts(self, ThreadState::kWaitingWeakGcRootRead);
192     MutexLock mu(self, *Locks::intern_table_lock_);
193     while ((!kUseReadBarrier && weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) ||
194            (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
195       weak_intern_condition_.Wait(self);
196     }
197   }
198   Locks::intern_table_lock_->ExclusiveLock(self);
199 }
200 
Insert(ObjPtr<mirror::String> s,uint32_t hash,bool is_strong,size_t num_searched_strong_frozen_tables)201 ObjPtr<mirror::String> InternTable::Insert(ObjPtr<mirror::String> s,
202                                            uint32_t hash,
203                                            bool is_strong,
204                                            size_t num_searched_strong_frozen_tables) {
205   DCHECK(s != nullptr);
206   DCHECK_EQ(hash, static_cast<uint32_t>(s->GetStoredHashCode()));
207   DCHECK_IMPLIES(hash == 0u, s->ComputeHashCode() == 0);
208   Thread* const self = Thread::Current();
209   MutexLock mu(self, *Locks::intern_table_lock_);
210   if (kDebugLocking) {
211     Locks::mutator_lock_->AssertSharedHeld(self);
212     CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock";
213   }
214   while (true) {
215     // Check the strong table for a match.
216     ObjPtr<mirror::String> strong =
217         strong_interns_.Find(s, hash, num_searched_strong_frozen_tables);
218     if (strong != nullptr) {
219       return strong;
220     }
221     if (kUseReadBarrier ? self->GetWeakRefAccessEnabled()
222                         : weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) {
223       break;
224     }
225     num_searched_strong_frozen_tables = strong_interns_.tables_.size() - 1u;
226     // weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only
227     // cleared after SweepSystemWeaks has completed. This is why we need to wait until it is
228     // cleared.
229     StackHandleScope<1> hs(self);
230     auto h = hs.NewHandleWrapper(&s);
231     WaitUntilAccessible(self);
232   }
233   if (!kUseReadBarrier) {
234     CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
235   } else {
236     CHECK(self->GetWeakRefAccessEnabled());
237   }
238   // There is no match in the strong table, check the weak table.
239   ObjPtr<mirror::String> weak = weak_interns_.Find(s, hash);
240   if (weak != nullptr) {
241     if (is_strong) {
242       // A match was found in the weak table. Promote to the strong table.
243       RemoveWeak(weak, hash);
244       return InsertStrong(weak, hash);
245     }
246     return weak;
247   }
248   // No match in the strong table or the weak table. Insert into the strong / weak table.
249   return is_strong ? InsertStrong(s, hash) : InsertWeak(s, hash);
250 }
251 
InternStrong(uint32_t utf16_length,const char * utf8_data)252 ObjPtr<mirror::String> InternTable::InternStrong(uint32_t utf16_length, const char* utf8_data) {
253   DCHECK(utf8_data != nullptr);
254   uint32_t hash = Utf8String::Hash(utf16_length, utf8_data);
255   Thread* self = Thread::Current();
256   ObjPtr<mirror::String> s;
257   size_t num_searched_strong_frozen_tables;
258   {
259     // Try to avoid allocation. If we need to allocate, release the mutex before the allocation.
260     MutexLock mu(self, *Locks::intern_table_lock_);
261     DCHECK(!strong_interns_.tables_.empty());
262     num_searched_strong_frozen_tables = strong_interns_.tables_.size() - 1u;
263     s = strong_interns_.Find(Utf8String(utf16_length, utf8_data), hash);
264   }
265   if (s != nullptr) {
266     return s;
267   }
268   bool is_ascii = (utf8_data[utf16_length] == 0);
269   int32_t utf8_length = utf16_length + (LIKELY(is_ascii) ? 0 : strlen(utf8_data + utf16_length));
270   DCHECK_EQ(static_cast<size_t>(utf8_length), strlen(utf8_data));
271   s = mirror::String::AllocFromModifiedUtf8(self, utf16_length, utf8_data, utf8_length);
272   if (UNLIKELY(s == nullptr)) {
273     self->AssertPendingOOMException();
274     return nullptr;
275   }
276   s->SetHashCode(static_cast<int32_t>(hash));
277   return Insert(s, hash, /*is_strong=*/ true, num_searched_strong_frozen_tables);
278 }
279 
InternStrong(const char * utf8_data)280 ObjPtr<mirror::String> InternTable::InternStrong(const char* utf8_data) {
281   DCHECK(utf8_data != nullptr);
282   Thread* self = Thread::Current();
283   ObjPtr<mirror::String> s = mirror::String::AllocFromModifiedUtf8(self, utf8_data);
284   if (UNLIKELY(s == nullptr)) {
285     self->AssertPendingOOMException();
286     return nullptr;
287   }
288   return InternStrong(s);
289 }
290 
InternStrong(ObjPtr<mirror::String> s)291 ObjPtr<mirror::String> InternTable::InternStrong(ObjPtr<mirror::String> s) {
292   DCHECK(s != nullptr);
293   // `String::GetHashCode()` ensures that the stored hash is calculated.
294   uint32_t hash = static_cast<uint32_t>(s->GetHashCode());
295   return Insert(s, hash, /*is_strong=*/ true);
296 }
297 
InternWeak(const char * utf8_data)298 ObjPtr<mirror::String> InternTable::InternWeak(const char* utf8_data) {
299   DCHECK(utf8_data != nullptr);
300   Thread* self = Thread::Current();
301   ObjPtr<mirror::String> s = mirror::String::AllocFromModifiedUtf8(self, utf8_data);
302   if (UNLIKELY(s == nullptr)) {
303     self->AssertPendingOOMException();
304     return nullptr;
305   }
306   return InternWeak(s);
307 }
308 
InternWeak(ObjPtr<mirror::String> s)309 ObjPtr<mirror::String> InternTable::InternWeak(ObjPtr<mirror::String> s) {
310   DCHECK(s != nullptr);
311   // `String::GetHashCode()` ensures that the stored hash is calculated.
312   uint32_t hash = static_cast<uint32_t>(s->GetHashCode());
313   return Insert(s, hash, /*is_strong=*/ false);
314 }
315 
SweepInternTableWeaks(IsMarkedVisitor * visitor)316 void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
317   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
318   weak_interns_.SweepWeaks(visitor);
319 }
320 
Remove(ObjPtr<mirror::String> s,uint32_t hash)321 void InternTable::Table::Remove(ObjPtr<mirror::String> s, uint32_t hash) {
322   // Note: We can remove weak interns even from frozen tables when promoting to strong interns.
323   // We can remove strong interns only for a transaction rollback.
324   for (InternalTable& table : tables_) {
325     auto it = table.set_.FindWithHash(GcRoot<mirror::String>(s), hash);
326     if (it != table.set_.end()) {
327       table.set_.erase(it);
328       return;
329     }
330   }
331   LOG(FATAL) << "Attempting to remove non-interned string " << s->ToModifiedUtf8();
332 }
333 
334 FLATTEN
Find(ObjPtr<mirror::String> s,uint32_t hash,size_t num_searched_frozen_tables)335 ObjPtr<mirror::String> InternTable::Table::Find(ObjPtr<mirror::String> s,
336                                                 uint32_t hash,
337                                                 size_t num_searched_frozen_tables) {
338   Locks::intern_table_lock_->AssertHeld(Thread::Current());
339   auto mid = tables_.begin() + num_searched_frozen_tables;
340   for (Table::InternalTable& table : MakeIterationRange(tables_.begin(), mid)) {
341     DCHECK(table.set_.FindWithHash(GcRoot<mirror::String>(s), hash) == table.set_.end());
342   }
343   // Search from the last table, assuming that apps shall search for their own
344   // strings more often than for boot image strings.
345   for (Table::InternalTable& table : ReverseRange(MakeIterationRange(mid, tables_.end()))) {
346     auto it = table.set_.FindWithHash(GcRoot<mirror::String>(s), hash);
347     if (it != table.set_.end()) {
348       return it->Read();
349     }
350   }
351   return nullptr;
352 }
353 
354 FLATTEN
Find(const Utf8String & string,uint32_t hash)355 ObjPtr<mirror::String> InternTable::Table::Find(const Utf8String& string, uint32_t hash) {
356   Locks::intern_table_lock_->AssertHeld(Thread::Current());
357   // Search from the last table, assuming that apps shall search for their own
358   // strings more often than for boot image strings.
359   for (InternalTable& table : ReverseRange(tables_)) {
360     auto it = table.set_.FindWithHash(string, hash);
361     if (it != table.set_.end()) {
362       return it->Read();
363     }
364   }
365   return nullptr;
366 }
367 
AddNewTable()368 void InternTable::Table::AddNewTable() {
369   // Propagate the min/max load factor from the old active set.
370   DCHECK(!tables_.empty());
371   const UnorderedSet& last_set = tables_.back().set_;
372   InternalTable new_table;
373   new_table.set_.SetLoadFactor(last_set.GetMinLoadFactor(), last_set.GetMaxLoadFactor());
374   tables_.push_back(std::move(new_table));
375 }
376 
Insert(ObjPtr<mirror::String> s,uint32_t hash)377 void InternTable::Table::Insert(ObjPtr<mirror::String> s, uint32_t hash) {
378   // Always insert the last table, the image tables are before and we avoid inserting into these
379   // to prevent dirty pages.
380   DCHECK(!tables_.empty());
381   tables_.back().set_.PutWithHash(GcRoot<mirror::String>(s), hash);
382 }
383 
VisitRoots(RootVisitor * visitor)384 void InternTable::Table::VisitRoots(RootVisitor* visitor) {
385   BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
386       visitor, RootInfo(kRootInternedString));
387   for (InternalTable& table : tables_) {
388     for (auto& intern : table.set_) {
389       buffered_visitor.VisitRoot(intern);
390     }
391   }
392 }
393 
SweepWeaks(IsMarkedVisitor * visitor)394 void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) {
395   for (InternalTable& table : tables_) {
396     SweepWeaks(&table.set_, visitor);
397   }
398 }
399 
SweepWeaks(UnorderedSet * set,IsMarkedVisitor * visitor)400 void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) {
401   for (auto it = set->begin(), end = set->end(); it != end;) {
402     // This does not need a read barrier because this is called by GC.
403     mirror::Object* object = it->Read<kWithoutReadBarrier>();
404     mirror::Object* new_object = visitor->IsMarked(object);
405     if (new_object == nullptr) {
406       it = set->erase(it);
407     } else {
408       *it = GcRoot<mirror::String>(new_object->AsString());
409       ++it;
410     }
411   }
412 }
413 
Size() const414 size_t InternTable::Table::Size() const {
415   return std::accumulate(tables_.begin(),
416                          tables_.end(),
417                          0U,
418                          [](size_t sum, const InternalTable& table) {
419                            return sum + table.Size();
420                          });
421 }
422 
ChangeWeakRootState(gc::WeakRootState new_state)423 void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) {
424   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
425   ChangeWeakRootStateLocked(new_state);
426 }
427 
ChangeWeakRootStateLocked(gc::WeakRootState new_state)428 void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
429   CHECK(!kUseReadBarrier);
430   weak_root_state_ = new_state;
431   if (new_state != gc::kWeakRootStateNoReadsOrWrites) {
432     weak_intern_condition_.Broadcast(Thread::Current());
433   }
434 }
435 
Table()436 InternTable::Table::Table() {
437   Runtime* const runtime = Runtime::Current();
438   InternalTable initial_table;
439   initial_table.set_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
440                                    runtime->GetHashTableMaxLoadFactor());
441   tables_.push_back(std::move(initial_table));
442 }
443 
444 }  // namespace art
445