• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_STUB_CACHE_H_
6 #define V8_STUB_CACHE_H_
7 
8 #include "src/macro-assembler.h"
9 
10 namespace v8 {
11 namespace internal {
12 
13 class SmallMapList;
14 
15 // The stub cache is used for megamorphic property accesses.
16 // It maps (map, name, type) to property access handlers. The cache does not
17 // need explicit invalidation when a prototype chain is modified, since the
18 // handlers verify the chain.
19 
20 
21 class SCTableReference {
22  public:
address()23   Address address() const { return address_; }
24 
25  private:
SCTableReference(Address address)26   explicit SCTableReference(Address address) : address_(address) {}
27 
28   Address address_;
29 
30   friend class StubCache;
31 };
32 
33 
34 class StubCache {
35  public:
36   struct Entry {
37     Name* key;
38     Object* value;
39     Map* map;
40   };
41 
42   void Initialize();
43   // Access cache for entry hash(name, map).
44   Object* Set(Name* name, Map* map, Object* handler);
45   Object* Get(Name* name, Map* map);
46   // Clear the lookup table (@ mark compact collection).
47   void Clear();
48   // Collect all maps that match the name.
49   void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
50                            Handle<Context> native_context, Zone* zone);
51   // Generate code for probing the stub cache table.
52   // Arguments extra, extra2 and extra3 may be used to pass additional scratch
53   // registers. Set to no_reg if not needed.
54   // If leave_frame is true, then exit a frame before the tail call.
55   void GenerateProbe(MacroAssembler* masm, Register receiver, Register name,
56                      Register scratch, Register extra, Register extra2 = no_reg,
57                      Register extra3 = no_reg);
58 
59   enum Table { kPrimary, kSecondary };
60 
key_reference(StubCache::Table table)61   SCTableReference key_reference(StubCache::Table table) {
62     return SCTableReference(
63         reinterpret_cast<Address>(&first_entry(table)->key));
64   }
65 
map_reference(StubCache::Table table)66   SCTableReference map_reference(StubCache::Table table) {
67     return SCTableReference(
68         reinterpret_cast<Address>(&first_entry(table)->map));
69   }
70 
value_reference(StubCache::Table table)71   SCTableReference value_reference(StubCache::Table table) {
72     return SCTableReference(
73         reinterpret_cast<Address>(&first_entry(table)->value));
74   }
75 
first_entry(StubCache::Table table)76   StubCache::Entry* first_entry(StubCache::Table table) {
77     switch (table) {
78       case StubCache::kPrimary:
79         return StubCache::primary_;
80       case StubCache::kSecondary:
81         return StubCache::secondary_;
82     }
83     UNREACHABLE();
84     return NULL;
85   }
86 
isolate()87   Isolate* isolate() { return isolate_; }
ic_kind()88   Code::Kind ic_kind() const { return ic_kind_; }
89 
90   // Setting the entry size such that the index is shifted by Name::kHashShift
91   // is convenient; shifting down the length field (to extract the hash code)
92   // automatically discards the hash bit field.
93   static const int kCacheIndexShift = Name::kHashShift;
94 
95   static const int kPrimaryTableBits = 11;
96   static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
97   static const int kSecondaryTableBits = 9;
98   static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
99 
100   // Some magic number used in primary and secondary hash computations.
101   static const int kPrimaryMagic = 0x3d532433;
102   static const int kSecondaryMagic = 0xb16b00b5;
103 
PrimaryOffsetForTesting(Name * name,Map * map)104   static int PrimaryOffsetForTesting(Name* name, Map* map) {
105     return PrimaryOffset(name, map);
106   }
107 
SecondaryOffsetForTesting(Name * name,int seed)108   static int SecondaryOffsetForTesting(Name* name, int seed) {
109     return SecondaryOffset(name, seed);
110   }
111 
112   // The constructor is made public only for the purposes of testing.
113   StubCache(Isolate* isolate, Code::Kind ic_kind);
114 
115  private:
116   // The stub cache has a primary and secondary level.  The two levels have
117   // different hashing algorithms in order to avoid simultaneous collisions
118   // in both caches.  Unlike a probing strategy (quadratic or otherwise) the
119   // update strategy on updates is fairly clear and simple:  Any existing entry
120   // in the primary cache is moved to the secondary cache, and secondary cache
121   // entries are overwritten.
122 
123   // Hash algorithm for the primary table.  This algorithm is replicated in
124   // assembler for every architecture.  Returns an index into the table that
125   // is scaled by 1 << kCacheIndexShift.
PrimaryOffset(Name * name,Map * map)126   static int PrimaryOffset(Name* name, Map* map) {
127     STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
128     // Compute the hash of the name (use entire hash field).
129     DCHECK(name->HasHashCode());
130     uint32_t field = name->hash_field();
131     // Using only the low bits in 64-bit mode is unlikely to increase the
132     // risk of collision even if the heap is spread over an area larger than
133     // 4Gb (and not at all if it isn't).
134     uint32_t map_low32bits =
135         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
136     // Base the offset on a simple combination of name and map.
137     uint32_t key = (map_low32bits + field) ^ kPrimaryMagic;
138     return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
139   }
140 
141   // Hash algorithm for the secondary table.  This algorithm is replicated in
142   // assembler for every architecture.  Returns an index into the table that
143   // is scaled by 1 << kCacheIndexShift.
SecondaryOffset(Name * name,int seed)144   static int SecondaryOffset(Name* name, int seed) {
145     // Use the seed from the primary cache in the secondary cache.
146     uint32_t name_low32bits =
147         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
148     uint32_t key = (seed - name_low32bits) + kSecondaryMagic;
149     return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
150   }
151 
152   // Compute the entry for a given offset in exactly the same way as
153   // we do in generated code.  We generate an hash code that already
154   // ends in Name::kHashShift 0s.  Then we multiply it so it is a multiple
155   // of sizeof(Entry).  This makes it easier to avoid making mistakes
156   // in the hashed offset computations.
entry(Entry * table,int offset)157   static Entry* entry(Entry* table, int offset) {
158     const int multiplier = sizeof(*table) >> Name::kHashShift;
159     return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) +
160                                     offset * multiplier);
161   }
162 
163  private:
164   Entry primary_[kPrimaryTableSize];
165   Entry secondary_[kSecondaryTableSize];
166   Isolate* isolate_;
167   Code::Kind ic_kind_;
168 
169   friend class Isolate;
170   friend class SCTableReference;
171 
172   DISALLOW_COPY_AND_ASSIGN(StubCache);
173 };
174 }  // namespace internal
175 }  // namespace v8
176 
177 #endif  // V8_STUB_CACHE_H_
178