• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_STUB_CACHE_H_
6 #define V8_STUB_CACHE_H_
7 
8 #include "src/macro-assembler.h"
9 
10 namespace v8 {
11 namespace internal {
12 
13 class SmallMapList;
14 
15 // The stub cache is used for megamorphic property accesses.
16 // It maps (map, name, type) to property access handlers. The cache does not
17 // need explicit invalidation when a prototype chain is modified, since the
18 // handlers verify the chain.
19 
20 
21 class SCTableReference {
22  public:
address()23   Address address() const { return address_; }
24 
25  private:
SCTableReference(Address address)26   explicit SCTableReference(Address address) : address_(address) {}
27 
28   Address address_;
29 
30   friend class StubCache;
31 };
32 
33 
34 class StubCache {
35  public:
36   struct Entry {
37     Name* key;
38     Object* value;
39     Map* map;
40   };
41 
42   void Initialize();
43   // Access cache for entry hash(name, map).
44   Object* Set(Name* name, Map* map, Object* handler);
45   Object* Get(Name* name, Map* map);
46   // Clear the lookup table (@ mark compact collection).
47   void Clear();
48   // Collect all maps that match the name.
49   void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
50                            Handle<Context> native_context, Zone* zone);
51 
52   enum Table { kPrimary, kSecondary };
53 
key_reference(StubCache::Table table)54   SCTableReference key_reference(StubCache::Table table) {
55     return SCTableReference(
56         reinterpret_cast<Address>(&first_entry(table)->key));
57   }
58 
map_reference(StubCache::Table table)59   SCTableReference map_reference(StubCache::Table table) {
60     return SCTableReference(
61         reinterpret_cast<Address>(&first_entry(table)->map));
62   }
63 
value_reference(StubCache::Table table)64   SCTableReference value_reference(StubCache::Table table) {
65     return SCTableReference(
66         reinterpret_cast<Address>(&first_entry(table)->value));
67   }
68 
first_entry(StubCache::Table table)69   StubCache::Entry* first_entry(StubCache::Table table) {
70     switch (table) {
71       case StubCache::kPrimary:
72         return StubCache::primary_;
73       case StubCache::kSecondary:
74         return StubCache::secondary_;
75     }
76     UNREACHABLE();
77     return nullptr;
78   }
79 
isolate()80   Isolate* isolate() { return isolate_; }
ic_kind()81   Code::Kind ic_kind() const { return ic_kind_; }
82 
83   // Setting the entry size such that the index is shifted by Name::kHashShift
84   // is convenient; shifting down the length field (to extract the hash code)
85   // automatically discards the hash bit field.
86   static const int kCacheIndexShift = Name::kHashShift;
87 
88   static const int kPrimaryTableBits = 11;
89   static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
90   static const int kSecondaryTableBits = 9;
91   static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
92 
93   // Some magic number used in primary and secondary hash computations.
94   static const int kPrimaryMagic = 0x3d532433;
95   static const int kSecondaryMagic = 0xb16ca6e5;
96 
PrimaryOffsetForTesting(Name * name,Map * map)97   static int PrimaryOffsetForTesting(Name* name, Map* map) {
98     return PrimaryOffset(name, map);
99   }
100 
SecondaryOffsetForTesting(Name * name,int seed)101   static int SecondaryOffsetForTesting(Name* name, int seed) {
102     return SecondaryOffset(name, seed);
103   }
104 
105   // The constructor is made public only for the purposes of testing.
106   StubCache(Isolate* isolate, Code::Kind ic_kind);
107 
108  private:
109   // The stub cache has a primary and secondary level.  The two levels have
110   // different hashing algorithms in order to avoid simultaneous collisions
111   // in both caches.  Unlike a probing strategy (quadratic or otherwise) the
112   // update strategy on updates is fairly clear and simple:  Any existing entry
113   // in the primary cache is moved to the secondary cache, and secondary cache
114   // entries are overwritten.
115 
116   // Hash algorithm for the primary table.  This algorithm is replicated in
117   // assembler for every architecture.  Returns an index into the table that
118   // is scaled by 1 << kCacheIndexShift.
PrimaryOffset(Name * name,Map * map)119   static int PrimaryOffset(Name* name, Map* map) {
120     STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
121     // Compute the hash of the name (use entire hash field).
122     DCHECK(name->HasHashCode());
123     uint32_t field = name->hash_field();
124     // Using only the low bits in 64-bit mode is unlikely to increase the
125     // risk of collision even if the heap is spread over an area larger than
126     // 4Gb (and not at all if it isn't).
127     uint32_t map_low32bits =
128         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
129     // Base the offset on a simple combination of name and map.
130     uint32_t key = (map_low32bits + field) ^ kPrimaryMagic;
131     return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
132   }
133 
134   // Hash algorithm for the secondary table.  This algorithm is replicated in
135   // assembler for every architecture.  Returns an index into the table that
136   // is scaled by 1 << kCacheIndexShift.
SecondaryOffset(Name * name,int seed)137   static int SecondaryOffset(Name* name, int seed) {
138     // Use the seed from the primary cache in the secondary cache.
139     uint32_t name_low32bits =
140         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
141     uint32_t key = (seed - name_low32bits) + kSecondaryMagic;
142     return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
143   }
144 
145   // Compute the entry for a given offset in exactly the same way as
146   // we do in generated code.  We generate an hash code that already
147   // ends in Name::kHashShift 0s.  Then we multiply it so it is a multiple
148   // of sizeof(Entry).  This makes it easier to avoid making mistakes
149   // in the hashed offset computations.
entry(Entry * table,int offset)150   static Entry* entry(Entry* table, int offset) {
151     const int multiplier = sizeof(*table) >> Name::kHashShift;
152     return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) +
153                                     offset * multiplier);
154   }
155 
156  private:
157   Entry primary_[kPrimaryTableSize];
158   Entry secondary_[kSecondaryTableSize];
159   Isolate* isolate_;
160   Code::Kind ic_kind_;
161 
162   friend class Isolate;
163   friend class SCTableReference;
164 
165   DISALLOW_COPY_AND_ASSIGN(StubCache);
166 };
167 }  // namespace internal
168 }  // namespace v8
169 
170 #endif  // V8_STUB_CACHE_H_
171