• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/ic/stub-cache.h"
6 
7 #include "src/ast/ast.h"
8 #include "src/base/bits.h"
9 #include "src/counters.h"
10 #include "src/heap/heap.h"
11 #include "src/ic/ic-inl.h"
12 
13 namespace v8 {
14 namespace internal {
15 
StubCache(Isolate * isolate)16 StubCache::StubCache(Isolate* isolate) : isolate_(isolate) {
17   // Ensure the nullptr (aka Smi::kZero) which StubCache::Get() returns
18   // when the entry is not found is not considered as a handler.
19   DCHECK(!IC::IsHandler(nullptr));
20 }
21 
Initialize()22 void StubCache::Initialize() {
23   DCHECK(base::bits::IsPowerOfTwo(kPrimaryTableSize));
24   DCHECK(base::bits::IsPowerOfTwo(kSecondaryTableSize));
25   Clear();
26 }
27 
28 // Hash algorithm for the primary table.  This algorithm is replicated in
29 // assembler for every architecture.  Returns an index into the table that
30 // is scaled by 1 << kCacheIndexShift.
PrimaryOffset(Name * name,Map * map)31 int StubCache::PrimaryOffset(Name* name, Map* map) {
32   STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
33   // Compute the hash of the name (use entire hash field).
34   DCHECK(name->HasHashCode());
35   uint32_t field = name->hash_field();
36   // Using only the low bits in 64-bit mode is unlikely to increase the
37   // risk of collision even if the heap is spread over an area larger than
38   // 4Gb (and not at all if it isn't).
39   uint32_t map_low32bits =
40       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
41   // Base the offset on a simple combination of name and map.
42   uint32_t key = map_low32bits + field;
43   return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
44 }
45 
46 // Hash algorithm for the secondary table.  This algorithm is replicated in
47 // assembler for every architecture.  Returns an index into the table that
48 // is scaled by 1 << kCacheIndexShift.
SecondaryOffset(Name * name,int seed)49 int StubCache::SecondaryOffset(Name* name, int seed) {
50   // Use the seed from the primary cache in the secondary cache.
51   uint32_t name_low32bits =
52       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
53   uint32_t key = (seed - name_low32bits) + kSecondaryMagic;
54   return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
55 }
56 
57 #ifdef DEBUG
58 namespace {
59 
CommonStubCacheChecks(StubCache * stub_cache,Name * name,Map * map,MaybeObject * handler)60 bool CommonStubCacheChecks(StubCache* stub_cache, Name* name, Map* map,
61                            MaybeObject* handler) {
62   // Validate that the name and handler do not move on scavenge, and that we
63   // can use identity checks instead of structural equality checks.
64   DCHECK(!Heap::InNewSpace(name));
65   DCHECK(!Heap::InNewSpace(handler));
66   DCHECK(name->IsUniqueName());
67   DCHECK(name->HasHashCode());
68   if (handler) DCHECK(IC::IsHandler(handler));
69   return true;
70 }
71 
72 }  // namespace
73 #endif
74 
Set(Name * name,Map * map,MaybeObject * handler)75 MaybeObject* StubCache::Set(Name* name, Map* map, MaybeObject* handler) {
76   DCHECK(CommonStubCacheChecks(this, name, map, handler));
77 
78   // Compute the primary entry.
79   int primary_offset = PrimaryOffset(name, map);
80   Entry* primary = entry(primary_, primary_offset);
81   MaybeObject* old_handler = primary->value;
82 
83   // If the primary entry has useful data in it, we retire it to the
84   // secondary cache before overwriting it.
85   if (old_handler != MaybeObject::FromObject(
86                          isolate_->builtins()->builtin(Builtins::kIllegal))) {
87     Map* old_map = primary->map;
88     int seed = PrimaryOffset(primary->key, old_map);
89     int secondary_offset = SecondaryOffset(primary->key, seed);
90     Entry* secondary = entry(secondary_, secondary_offset);
91     *secondary = *primary;
92   }
93 
94   // Update primary cache.
95   primary->key = name;
96   primary->value = handler;
97   primary->map = map;
98   isolate()->counters()->megamorphic_stub_cache_updates()->Increment();
99   return handler;
100 }
101 
Get(Name * name,Map * map)102 MaybeObject* StubCache::Get(Name* name, Map* map) {
103   DCHECK(CommonStubCacheChecks(this, name, map, nullptr));
104   int primary_offset = PrimaryOffset(name, map);
105   Entry* primary = entry(primary_, primary_offset);
106   if (primary->key == name && primary->map == map) {
107     return primary->value;
108   }
109   int secondary_offset = SecondaryOffset(name, primary_offset);
110   Entry* secondary = entry(secondary_, secondary_offset);
111   if (secondary->key == name && secondary->map == map) {
112     return secondary->value;
113   }
114   return nullptr;
115 }
116 
117 
Clear()118 void StubCache::Clear() {
119   MaybeObject* empty = MaybeObject::FromObject(
120       isolate_->builtins()->builtin(Builtins::kIllegal));
121   Name* empty_string = ReadOnlyRoots(isolate()).empty_string();
122   for (int i = 0; i < kPrimaryTableSize; i++) {
123     primary_[i].key = empty_string;
124     primary_[i].map = nullptr;
125     primary_[i].value = empty;
126   }
127   for (int j = 0; j < kSecondaryTableSize; j++) {
128     secondary_[j].key = empty_string;
129     secondary_[j].map = nullptr;
130     secondary_[j].value = empty;
131   }
132 }
133 
134 }  // namespace internal
135 }  // namespace v8
136