• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_STUB_CACHE_H_
6 #define V8_STUB_CACHE_H_
7 
8 #include "src/macro-assembler.h"
9 
10 namespace v8 {
11 namespace internal {
12 
13 
14 // The stub cache is used for megamorphic property accesses.
15 // It maps (map, name, type) to property access handlers. The cache does not
16 // need explicit invalidation when a prototype chain is modified, since the
17 // handlers verify the chain.
18 
19 
20 class SCTableReference {
21  public:
address()22   Address address() const { return address_; }
23 
24  private:
SCTableReference(Address address)25   explicit SCTableReference(Address address) : address_(address) {}
26 
27   Address address_;
28 
29   friend class StubCache;
30 };
31 
32 
33 class StubCache {
34  public:
35   struct Entry {
36     Name* key;
37     Code* value;
38     Map* map;
39   };
40 
41   void Initialize();
42   // Access cache for entry hash(name, map).
43   Code* Set(Name* name, Map* map, Code* code);
44   Code* Get(Name* name, Map* map, Code::Flags flags);
45   // Clear the lookup table (@ mark compact collection).
46   void Clear();
47   // Collect all maps that match the name and flags.
48   void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
49                            Code::Flags flags, Handle<Context> native_context,
50                            Zone* zone);
51   // Generate code for probing the stub cache table.
52   // Arguments extra, extra2 and extra3 may be used to pass additional scratch
53   // registers. Set to no_reg if not needed.
54   // If leave_frame is true, then exit a frame before the tail call.
55   void GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
56                      Code::Flags flags, Register receiver, Register name,
57                      Register scratch, Register extra, Register extra2 = no_reg,
58                      Register extra3 = no_reg);
59 
60   enum Table { kPrimary, kSecondary };
61 
key_reference(StubCache::Table table)62   SCTableReference key_reference(StubCache::Table table) {
63     return SCTableReference(
64         reinterpret_cast<Address>(&first_entry(table)->key));
65   }
66 
map_reference(StubCache::Table table)67   SCTableReference map_reference(StubCache::Table table) {
68     return SCTableReference(
69         reinterpret_cast<Address>(&first_entry(table)->map));
70   }
71 
value_reference(StubCache::Table table)72   SCTableReference value_reference(StubCache::Table table) {
73     return SCTableReference(
74         reinterpret_cast<Address>(&first_entry(table)->value));
75   }
76 
first_entry(StubCache::Table table)77   StubCache::Entry* first_entry(StubCache::Table table) {
78     switch (table) {
79       case StubCache::kPrimary:
80         return StubCache::primary_;
81       case StubCache::kSecondary:
82         return StubCache::secondary_;
83     }
84     UNREACHABLE();
85     return NULL;
86   }
87 
isolate()88   Isolate* isolate() { return isolate_; }
89 
90   // Setting the entry size such that the index is shifted by Name::kHashShift
91   // is convenient; shifting down the length field (to extract the hash code)
92   // automatically discards the hash bit field.
93   static const int kCacheIndexShift = Name::kHashShift;
94 
95   static const int kPrimaryTableBits = 11;
96   static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
97   static const int kSecondaryTableBits = 9;
98   static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
99 
PrimaryOffsetForTesting(Name * name,Code::Flags flags,Map * map)100   static int PrimaryOffsetForTesting(Name* name, Code::Flags flags, Map* map) {
101     return PrimaryOffset(name, flags, map);
102   }
103 
SecondaryOffsetForTesting(Name * name,Code::Flags flags,int seed)104   static int SecondaryOffsetForTesting(Name* name, Code::Flags flags,
105                                        int seed) {
106     return SecondaryOffset(name, flags, seed);
107   }
108 
109   // The constructor is made public only for the purposes of testing.
110   explicit StubCache(Isolate* isolate);
111 
112  private:
113   // The stub cache has a primary and secondary level.  The two levels have
114   // different hashing algorithms in order to avoid simultaneous collisions
115   // in both caches.  Unlike a probing strategy (quadratic or otherwise) the
116   // update strategy on updates is fairly clear and simple:  Any existing entry
117   // in the primary cache is moved to the secondary cache, and secondary cache
118   // entries are overwritten.
119 
120   // Hash algorithm for the primary table.  This algorithm is replicated in
121   // assembler for every architecture.  Returns an index into the table that
122   // is scaled by 1 << kCacheIndexShift.
PrimaryOffset(Name * name,Code::Flags flags,Map * map)123   static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) {
124     STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
125     // Compute the hash of the name (use entire hash field).
126     DCHECK(name->HasHashCode());
127     uint32_t field = name->hash_field();
128     // Using only the low bits in 64-bit mode is unlikely to increase the
129     // risk of collision even if the heap is spread over an area larger than
130     // 4Gb (and not at all if it isn't).
131     uint32_t map_low32bits =
132         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
133     // We always set the in_loop bit to zero when generating the lookup code
134     // so do it here too so the hash codes match.
135     uint32_t iflags =
136         (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
137     // Base the offset on a simple combination of name, flags, and map.
138     uint32_t key = (map_low32bits + field) ^ iflags;
139     return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
140   }
141 
142   // Hash algorithm for the secondary table.  This algorithm is replicated in
143   // assembler for every architecture.  Returns an index into the table that
144   // is scaled by 1 << kCacheIndexShift.
SecondaryOffset(Name * name,Code::Flags flags,int seed)145   static int SecondaryOffset(Name* name, Code::Flags flags, int seed) {
146     // Use the seed from the primary cache in the secondary cache.
147     uint32_t name_low32bits =
148         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
149     // We always set the in_loop bit to zero when generating the lookup code
150     // so do it here too so the hash codes match.
151     uint32_t iflags =
152         (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
153     uint32_t key = (seed - name_low32bits) + iflags;
154     return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
155   }
156 
157   // Compute the entry for a given offset in exactly the same way as
158   // we do in generated code.  We generate an hash code that already
159   // ends in Name::kHashShift 0s.  Then we multiply it so it is a multiple
160   // of sizeof(Entry).  This makes it easier to avoid making mistakes
161   // in the hashed offset computations.
entry(Entry * table,int offset)162   static Entry* entry(Entry* table, int offset) {
163     const int multiplier = sizeof(*table) >> Name::kHashShift;
164     return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) +
165                                     offset * multiplier);
166   }
167 
168  private:
169   Entry primary_[kPrimaryTableSize];
170   Entry secondary_[kSecondaryTableSize];
171   Isolate* isolate_;
172 
173   friend class Isolate;
174   friend class SCTableReference;
175 
176   DISALLOW_COPY_AND_ASSIGN(StubCache);
177 };
178 }  // namespace internal
179 }  // namespace v8
180 
181 #endif  // V8_STUB_CACHE_H_
182