V8 Project
stub-cache.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_STUB_CACHE_H_
6 #define V8_STUB_CACHE_H_
7 
8 #include "src/macro-assembler.h"
9 
10 namespace v8 {
11 namespace internal {
12 
13 
14 // The stub cache is used for megamorphic property accesses.
15 // It maps (map, name, type) to property access handlers. The cache does not
16 // need explicit invalidation when a prototype chain is modified, since the
17 // handlers verify the chain.
18 
19 
21  public:
22  Address address() const { return address_; }
23 
24  private:
26 
28 
29  friend class StubCache;
30 };
31 
32 
33 class StubCache {
34  public:
35  struct Entry {
38  Map* map;
39  };
40 
41  void Initialize();
42  // Access cache for entry hash(name, map).
43  Code* Set(Name* name, Map* map, Code* code);
45  // Clear the lookup table (@ mark compact collection).
46  void Clear();
47  // Collect all maps that match the name and flags.
48  void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
49  Code::Flags flags, Handle<Context> native_context,
50  Zone* zone);
51  // Generate code for probing the stub cache table.
52  // Arguments extra, extra2 and extra3 may be used to pass additional scratch
53  // registers. Set to no_reg if not needed.
54  // If leave_frame is true, then exit a frame before the tail call.
55  void GenerateProbe(MacroAssembler* masm, Code::Flags flags, bool leave_frame,
56  Register receiver, Register name, Register scratch,
57  Register extra, Register extra2 = no_reg,
58  Register extra3 = no_reg);
59 
61 
63  return SCTableReference(
64  reinterpret_cast<Address>(&first_entry(table)->key));
65  }
66 
68  return SCTableReference(
69  reinterpret_cast<Address>(&first_entry(table)->map));
70  }
71 
73  return SCTableReference(
74  reinterpret_cast<Address>(&first_entry(table)->value));
75  }
76 
78  switch (table) {
80  return StubCache::primary_;
82  return StubCache::secondary_;
83  }
84  UNREACHABLE();
85  return NULL;
86  }
87 
88  Isolate* isolate() { return isolate_; }
89 
90  // Setting the entry size such that the index is shifted by Name::kHashShift
91  // is convenient; shifting down the length field (to extract the hash code)
92  // automatically discards the hash bit field.
93  static const int kCacheIndexShift = Name::kHashShift;
94 
95  private:
96  explicit StubCache(Isolate* isolate);
97 
98  // The stub cache has a primary and secondary level. The two levels have
99  // different hashing algorithms in order to avoid simultaneous collisions
100  // in both caches. Unlike a probing strategy (quadratic or otherwise) the
101  // update strategy on updates is fairly clear and simple: Any existing entry
102  // in the primary cache is moved to the secondary cache, and secondary cache
103  // entries are overwritten.
104 
105  // Hash algorithm for the primary table. This algorithm is replicated in
106  // assembler for every architecture. Returns an index into the table that
107  // is scaled by 1 << kCacheIndexShift.
110  // Compute the hash of the name (use entire hash field).
111  DCHECK(name->HasHashCode());
112  uint32_t field = name->hash_field();
113  // Using only the low bits in 64-bit mode is unlikely to increase the
114  // risk of collision even if the heap is spread over an area larger than
115  // 4Gb (and not at all if it isn't).
116  uint32_t map_low32bits =
117  static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
118  // We always set the in_loop bit to zero when generating the lookup code
119  // so do it here too so the hash codes match.
120  uint32_t iflags =
121  (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
122  // Base the offset on a simple combination of name, flags, and map.
123  uint32_t key = (map_low32bits + field) ^ iflags;
124  return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
125  }
126 
127  // Hash algorithm for the secondary table. This algorithm is replicated in
128  // assembler for every architecture. Returns an index into the table that
129  // is scaled by 1 << kCacheIndexShift.
130  static int SecondaryOffset(Name* name, Code::Flags flags, int seed) {
131  // Use the seed from the primary cache in the secondary cache.
132  uint32_t name_low32bits =
133  static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
134  // We always set the in_loop bit to zero when generating the lookup code
135  // so do it here too so the hash codes match.
136  uint32_t iflags =
137  (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
138  uint32_t key = (seed - name_low32bits) + iflags;
139  return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
140  }
141 
142  // Compute the entry for a given offset in exactly the same way as
143  // we do in generated code. We generate an hash code that already
144  // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple
145  // of sizeof(Entry). This makes it easier to avoid making mistakes
146  // in the hashed offset computations.
147  static Entry* entry(Entry* table, int offset) {
148  const int multiplier = sizeof(*table) >> Name::kHashShift;
149  return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) +
150  offset * multiplier);
151  }
152 
153  static const int kPrimaryTableBits = 11;
154  static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
155  static const int kSecondaryTableBits = 9;
156  static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
157 
158  private:
162 
163  friend class Isolate;
164  friend class SCTableReference;
165 
167 };
168 }
169 } // namespace v8::internal
170 
171 #endif // V8_STUB_CACHE_H_
static const int kFlagsNotUsedInLookup
Definition: objects.h:5448
static const int kHashShift
Definition: objects.h:8499
SCTableReference(Address address)
Definition: stub-cache.h:25
static const int kCacheIndexShift
Definition: stub-cache.h:93
static Entry * entry(Entry *table, int offset)
Definition: stub-cache.h:147
static const int kPrimaryTableBits
Definition: stub-cache.h:153
SCTableReference value_reference(StubCache::Table table)
Definition: stub-cache.h:72
DISALLOW_COPY_AND_ASSIGN(StubCache)
Entry secondary_[kSecondaryTableSize]
Definition: stub-cache.h:160
static const int kSecondaryTableBits
Definition: stub-cache.h:155
StubCache(Isolate *isolate)
Definition: stub-cache.cc:15
Code * Get(Name *name, Map *map, Code::Flags flags)
Definition: stub-cache.cc:77
SCTableReference key_reference(StubCache::Table table)
Definition: stub-cache.h:62
void GenerateProbe(MacroAssembler *masm, Code::Flags flags, bool leave_frame, Register receiver, Register name, Register scratch, Register extra, Register extra2=no_reg, Register extra3=no_reg)
SCTableReference map_reference(StubCache::Table table)
Definition: stub-cache.h:67
static int PrimaryOffset(Name *name, Code::Flags flags, Map *map)
Definition: stub-cache.h:108
static const int kSecondaryTableSize
Definition: stub-cache.h:156
StubCache::Entry * first_entry(StubCache::Table table)
Definition: stub-cache.h:77
static const int kPrimaryTableSize
Definition: stub-cache.h:154
static int SecondaryOffset(Name *name, Code::Flags flags, int seed)
Definition: stub-cache.h:130
Entry primary_[kPrimaryTableSize]
Definition: stub-cache.h:159
friend class SCTableReference
Definition: stub-cache.h:164
Code * Set(Name *name, Map *map, Code *code)
Definition: stub-cache.cc:48
void CollectMatchingMaps(SmallMapList *types, Handle< Name > name, Code::Flags flags, Handle< Context > native_context, Zone *zone)
Definition: stub-cache.cc:108
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
byte * Address
Definition: globals.h:101
const Register no_reg
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20