V8 Project
stub-cache-mips64.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_MIPS64
8 
9 #include "src/codegen.h"
10 #include "src/ic/stub-cache.h"
11 
12 namespace v8 {
13 namespace internal {
14 
15 #define __ ACCESS_MASM(masm)
16 
17 
18 static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
19  Code::Flags flags, bool leave_frame,
20  StubCache::Table table, Register receiver, Register name,
21  // Number of the cache entry, not scaled.
22  Register offset, Register scratch, Register scratch2,
23  Register offset_scratch) {
24  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
25  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
26  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
27 
28  uint64_t key_off_addr = reinterpret_cast<uint64_t>(key_offset.address());
29  uint64_t value_off_addr = reinterpret_cast<uint64_t>(value_offset.address());
30  uint64_t map_off_addr = reinterpret_cast<uint64_t>(map_offset.address());
31 
32  // Check the relative positions of the address fields.
33  DCHECK(value_off_addr > key_off_addr);
34  DCHECK((value_off_addr - key_off_addr) % 4 == 0);
35  DCHECK((value_off_addr - key_off_addr) < (256 * 4));
36  DCHECK(map_off_addr > key_off_addr);
37  DCHECK((map_off_addr - key_off_addr) % 4 == 0);
38  DCHECK((map_off_addr - key_off_addr) < (256 * 4));
39 
40  Label miss;
41  Register base_addr = scratch;
42  scratch = no_reg;
43 
44  // Multiply by 3 because there are 3 fields per entry (name, code, map).
45  __ dsll(offset_scratch, offset, 1);
46  __ Daddu(offset_scratch, offset_scratch, offset);
47 
48  // Calculate the base address of the entry.
49  __ li(base_addr, Operand(key_offset));
50  __ dsll(at, offset_scratch, kPointerSizeLog2);
51  __ Daddu(base_addr, base_addr, at);
52 
53  // Check that the key in the entry matches the name.
54  __ ld(at, MemOperand(base_addr, 0));
55  __ Branch(&miss, ne, name, Operand(at));
56 
57  // Check the map matches.
58  __ ld(at, MemOperand(base_addr, map_off_addr - key_off_addr));
59  __ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
60  __ Branch(&miss, ne, at, Operand(scratch2));
61 
62  // Get the code entry from the cache.
63  Register code = scratch2;
64  scratch2 = no_reg;
65  __ ld(code, MemOperand(base_addr, value_off_addr - key_off_addr));
66 
67  // Check that the flags match what we're looking for.
68  Register flags_reg = base_addr;
69  base_addr = no_reg;
70  __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
71  __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
72  __ Branch(&miss, ne, flags_reg, Operand(flags));
73 
74 #ifdef DEBUG
75  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
76  __ jmp(&miss);
77  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
78  __ jmp(&miss);
79  }
80 #endif
81 
82  if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL);
83 
84  // Jump to the first instruction in the code stub.
85  __ Daddu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
86  __ Jump(at);
87 
88  // Miss: fall through.
89  __ bind(&miss);
90 }
91 
92 
93 void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
94  bool leave_frame, Register receiver,
95  Register name, Register scratch, Register extra,
96  Register extra2, Register extra3) {
97  Isolate* isolate = masm->isolate();
98  Label miss;
99 
100  // Make sure that code is valid. The multiplying code relies on the
101  // entry size being 12.
102  // DCHECK(sizeof(Entry) == 12);
103  // DCHECK(sizeof(Entry) == 3 * kPointerSize);
104 
105  // Make sure the flags does not name a specific type.
107 
108  // Make sure that there are no register conflicts.
109  DCHECK(!scratch.is(receiver));
110  DCHECK(!scratch.is(name));
111  DCHECK(!extra.is(receiver));
112  DCHECK(!extra.is(name));
113  DCHECK(!extra.is(scratch));
114  DCHECK(!extra2.is(receiver));
115  DCHECK(!extra2.is(name));
116  DCHECK(!extra2.is(scratch));
117  DCHECK(!extra2.is(extra));
118 
119  // Check register validity.
120  DCHECK(!scratch.is(no_reg));
121  DCHECK(!extra.is(no_reg));
122  DCHECK(!extra2.is(no_reg));
123  DCHECK(!extra3.is(no_reg));
124 
125  Counters* counters = masm->isolate()->counters();
126  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
127  extra3);
128 
129  // Check that the receiver isn't a smi.
130  __ JumpIfSmi(receiver, &miss);
131 
132  // Get the map of the receiver and compute the hash.
134  __ ld(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
135  __ Daddu(scratch, scratch, at);
136  uint64_t mask = kPrimaryTableSize - 1;
137  // We shift out the last two bits because they are not part of the hash and
138  // they are always 01 for maps.
139  __ dsrl(scratch, scratch, kCacheIndexShift);
140  __ Xor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
141  __ And(scratch, scratch, Operand(mask));
142 
143  // Probe the primary table.
144  ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
145  scratch, extra, extra2, extra3);
146 
147  // Primary miss: Compute hash for secondary probe.
148  __ dsrl(at, name, kCacheIndexShift);
149  __ Dsubu(scratch, scratch, at);
150  uint64_t mask2 = kSecondaryTableSize - 1;
151  __ Daddu(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
152  __ And(scratch, scratch, Operand(mask2));
153 
154  // Probe the secondary table.
155  ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
156  scratch, extra, extra2, extra3);
157 
158  // Cache miss: Fall-through and let caller handle the miss by
159  // entering the runtime system.
160  __ bind(&miss);
161  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
162  extra3);
163 }
164 
165 
166 #undef __
167 }
168 } // namespace v8::internal
169 
170 #endif // V8_TARGET_ARCH_MIPS64
static const int kFlagsOffset
Definition: objects.h:5361
static StubType ExtractTypeFromFlags(Flags flags)
Definition: objects-inl.h:4996
static const int kFlagsNotUsedInLookup
Definition: objects.h:5448
static const int kHeaderSize
Definition: objects.h:5373
uint32_t Flags
Definition: objects.h:4929
static const int kMapOffset
Definition: objects.h:1427
static const int kHashFieldOffset
Definition: objects.h:8486
static const int kCacheIndexShift
Definition: stub-cache.h:93
void GenerateProbe(MacroAssembler *masm, Code::Flags flags, bool leave_frame, Register receiver, Register name, Register scratch, Register extra, Register extra2=no_reg, Register extra3=no_reg)
static const int kSecondaryTableSize
Definition: stub-cache.h:156
static const int kPrimaryTableSize
Definition: stub-cache.h:154
friend class Isolate
Definition: stub-cache.h:163
#define __
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
#define DCHECK(condition)
Definition: logging.h:205
const int kPointerSizeLog2
Definition: globals.h:147
MemOperand FieldMemOperand(Register object, int offset)
const int kHeapObjectTag
Definition: v8.h:5737
const Register no_reg
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20