1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_INTERN_TABLE_H_ 18 #define ART_RUNTIME_INTERN_TABLE_H_ 19 20 #include <unordered_set> 21 22 #include "base/allocator.h" 23 #include "base/hash_set.h" 24 #include "base/mutex.h" 25 #include "gc_root.h" 26 #include "object_callbacks.h" 27 28 namespace art { 29 30 namespace gc { 31 namespace space { 32 class ImageSpace; 33 } // namespace space 34 } // namespace gc 35 36 enum VisitRootFlags : uint8_t; 37 38 namespace mirror { 39 class String; 40 } // namespace mirror 41 class Transaction; 42 43 /** 44 * Used to intern strings. 45 * 46 * There are actually two tables: one that holds strong references to its strings, and one that 47 * holds weak references. The former is used for string literals, for which there is an effective 48 * reference from the constant pool. The latter is used for strings interned at runtime via 49 * String.intern. Some code (XML parsers being a prime example) relies on being able to intern 50 * arbitrarily many strings for the duration of a parse without permanently increasing the memory 51 * footprint. 52 */ 53 class InternTable { 54 public: 55 InternTable(); 56 57 // Interns a potentially new string in the 'strong' table. (See above.) 58 mirror::String* InternStrong(int32_t utf16_length, const char* utf8_data) 59 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 60 61 // Interns a potentially new string in the 'strong' table. (See above.) 62 mirror::String* InternStrong(const char* utf8_data) 63 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 64 65 // Interns a potentially new string in the 'strong' table. (See above.) 66 mirror::String* InternStrong(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 67 68 // Interns a potentially new string in the 'weak' table. (See above.) 69 mirror::String* InternWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 70 71 void SweepInternTableWeaks(IsMarkedCallback* callback, void* arg) 72 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 73 74 bool ContainsWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 75 76 // Total number of interned strings. 77 size_t Size() const LOCKS_EXCLUDED(Locks::intern_table_lock_); 78 // Total number of weakly live interned strings. 79 size_t StrongSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_); 80 // Total number of strongly live interned strings. 81 size_t WeakSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_); 82 83 void VisitRoots(RootVisitor* visitor, VisitRootFlags flags) 84 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 85 86 void DumpForSigQuit(std::ostream& os) const; 87 88 void DisallowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 89 void AllowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 90 void EnsureNewInternsDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 91 92 // Adds all of the resolved image strings from the image space into the intern table. The 93 // advantage of doing this is preventing expensive DexFile::FindStringId calls. 94 void AddImageStringsToTable(gc::space::ImageSpace* image_space) 95 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_); 96 // Copy the post zygote tables to pre zygote to save memory by preventing dirty pages. 97 void SwapPostZygoteWithPreZygote() 98 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_); 99 100 // Add an intern table which was serialized to the image. 101 void AddImageInternTable(gc::space::ImageSpace* image_space) 102 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_); 103 104 // Read the intern table from memory. The elements aren't copied, the intern hash set data will 105 // point to somewhere within ptr. Only reads the strong interns. 106 size_t ReadFromMemory(const uint8_t* ptr) LOCKS_EXCLUDED(Locks::intern_table_lock_) 107 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 108 109 // Write the post zygote intern table to a pointer. Only writes the strong interns since it is 110 // expected that there is no weak interns since this is called from the image writer. 111 size_t WriteToMemory(uint8_t* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 112 LOCKS_EXCLUDED(Locks::intern_table_lock_); 113 114 private: 115 class StringHashEquals { 116 public: 117 std::size_t operator()(const GcRoot<mirror::String>& root) const NO_THREAD_SAFETY_ANALYSIS; 118 bool operator()(const GcRoot<mirror::String>& a, const GcRoot<mirror::String>& b) const 119 NO_THREAD_SAFETY_ANALYSIS; 120 }; 121 class GcRootEmptyFn { 122 public: MakeEmpty(GcRoot<mirror::String> & item)123 void MakeEmpty(GcRoot<mirror::String>& item) const { 124 item = GcRoot<mirror::String>(); 125 } IsEmpty(const GcRoot<mirror::String> & item)126 bool IsEmpty(const GcRoot<mirror::String>& item) const { 127 return item.IsNull(); 128 } 129 }; 130 131 // Table which holds pre zygote and post zygote interned strings. There is one instance for 132 // weak interns and strong interns. 133 class Table { 134 public: 135 mirror::String* Find(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 136 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 137 void Insert(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 138 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 139 void Remove(mirror::String* s) 140 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 141 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 142 void VisitRoots(RootVisitor* visitor) 143 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 144 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 145 void SweepWeaks(IsMarkedCallback* callback, void* arg) 146 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 147 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 148 void SwapPostZygoteWithPreZygote() EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 149 size_t Size() const EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 150 // Read pre zygote table is called from ReadFromMemory which happens during runtime creation 151 // when we load the image intern table. Returns how many bytes were read. 152 size_t ReadIntoPreZygoteTable(const uint8_t* ptr) 153 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) 154 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 155 // The image writer calls WritePostZygoteTable through WriteToMemory, it writes the interns in 156 // the post zygote table. Returns how many bytes were written. 157 size_t WriteFromPostZygoteTable(uint8_t* ptr) 158 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) 159 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 160 161 private: 162 typedef HashSet<GcRoot<mirror::String>, GcRootEmptyFn, StringHashEquals, StringHashEquals, 163 TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>> UnorderedSet; 164 165 void SweepWeaks(UnorderedSet* set, IsMarkedCallback* callback, void* arg) 166 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 167 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 168 169 // We call SwapPostZygoteWithPreZygote when we create the zygote to reduce private dirty pages 170 // caused by modifying the zygote intern table hash table. The pre zygote table are the 171 // interned strings which were interned before we created the zygote space. Post zygote is self 172 // explanatory. 173 UnorderedSet pre_zygote_table_; 174 UnorderedSet post_zygote_table_; 175 }; 176 177 // Insert if non null, otherwise return null. 178 mirror::String* Insert(mirror::String* s, bool is_strong) 179 LOCKS_EXCLUDED(Locks::intern_table_lock_) 180 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 181 182 mirror::String* LookupStrong(mirror::String* s) 183 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 184 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 185 mirror::String* LookupWeak(mirror::String* s) 186 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 187 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 188 mirror::String* InsertStrong(mirror::String* s) 189 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 190 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 191 mirror::String* InsertWeak(mirror::String* s) 192 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 193 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 194 void RemoveStrong(mirror::String* s) 195 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 196 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 197 void RemoveWeak(mirror::String* s) 198 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 199 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 200 201 // Transaction rollback access. 202 mirror::String* LookupStringFromImage(mirror::String* s) 203 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 204 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 205 mirror::String* InsertStrongFromTransaction(mirror::String* s) 206 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 207 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 208 mirror::String* InsertWeakFromTransaction(mirror::String* s) 209 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 210 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 211 void RemoveStrongFromTransaction(mirror::String* s) 212 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 213 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 214 void RemoveWeakFromTransaction(mirror::String* s) 215 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 216 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); 217 friend class Transaction; 218 219 size_t ReadFromMemoryLocked(const uint8_t* ptr) 220 EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) 221 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 222 223 bool image_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_); 224 bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_); 225 bool allow_new_interns_ GUARDED_BY(Locks::intern_table_lock_); 226 ConditionVariable new_intern_condition_ GUARDED_BY(Locks::intern_table_lock_); 227 // Since this contains (strong) roots, they need a read barrier to 228 // enable concurrent intern table (strong) root scan. Do not 229 // directly access the strings in it. Use functions that contain 230 // read barriers. 231 Table strong_interns_ GUARDED_BY(Locks::intern_table_lock_); 232 std::vector<GcRoot<mirror::String>> new_strong_intern_roots_ 233 GUARDED_BY(Locks::intern_table_lock_); 234 // Since this contains (weak) roots, they need a read barrier. Do 235 // not directly access the strings in it. Use functions that contain 236 // read barriers. 237 Table weak_interns_ GUARDED_BY(Locks::intern_table_lock_); 238 }; 239 240 } // namespace art 241 242 #endif // ART_RUNTIME_INTERN_TABLE_H_ 243