V8 Project
stub-cache-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_ARM
8 
9 #include "src/codegen.h"
10 #include "src/ic/stub-cache.h"
11 
12 namespace v8 {
13 namespace internal {
14 
15 #define __ ACCESS_MASM(masm)
16 
17 
18 static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
19  Code::Flags flags, bool leave_frame,
20  StubCache::Table table, Register receiver, Register name,
21  // Number of the cache entry, not scaled.
22  Register offset, Register scratch, Register scratch2,
23  Register offset_scratch) {
24  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
25  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
26  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
27 
28  uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
29  uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
30  uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
31 
32  // Check the relative positions of the address fields.
33  DCHECK(value_off_addr > key_off_addr);
34  DCHECK((value_off_addr - key_off_addr) % 4 == 0);
35  DCHECK((value_off_addr - key_off_addr) < (256 * 4));
36  DCHECK(map_off_addr > key_off_addr);
37  DCHECK((map_off_addr - key_off_addr) % 4 == 0);
38  DCHECK((map_off_addr - key_off_addr) < (256 * 4));
39 
40  Label miss;
41  Register base_addr = scratch;
42  scratch = no_reg;
43 
44  // Multiply by 3 because there are 3 fields per entry (name, code, map).
45  __ add(offset_scratch, offset, Operand(offset, LSL, 1));
46 
47  // Calculate the base address of the entry.
48  __ mov(base_addr, Operand(key_offset));
49  __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
50 
51  // Check that the key in the entry matches the name.
52  __ ldr(ip, MemOperand(base_addr, 0));
53  __ cmp(name, ip);
54  __ b(ne, &miss);
55 
56  // Check the map matches.
57  __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
58  __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
59  __ cmp(ip, scratch2);
60  __ b(ne, &miss);
61 
62  // Get the code entry from the cache.
63  Register code = scratch2;
64  scratch2 = no_reg;
65  __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
66 
67  // Check that the flags match what we're looking for.
68  Register flags_reg = base_addr;
69  base_addr = no_reg;
70  __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
71  // It's a nice optimization if this constant is encodable in the bic insn.
72 
74  DCHECK(__ ImmediateFitsAddrMode1Instruction(mask));
75  __ bic(flags_reg, flags_reg, Operand(mask));
76  __ cmp(flags_reg, Operand(flags));
77  __ b(ne, &miss);
78 
79 #ifdef DEBUG
80  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
81  __ jmp(&miss);
82  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
83  __ jmp(&miss);
84  }
85 #endif
86 
87  if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL);
88 
89  // Jump to the first instruction in the code stub.
90  __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
91 
92  // Miss: fall through.
93  __ bind(&miss);
94 }
95 
96 
97 void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
98  bool leave_frame, Register receiver,
99  Register name, Register scratch, Register extra,
100  Register extra2, Register extra3) {
101  Isolate* isolate = masm->isolate();
102  Label miss;
103 
104  // Make sure that code is valid. The multiplying code relies on the
105  // entry size being 12.
106  DCHECK(sizeof(Entry) == 12);
107 
108  // Make sure the flags does not name a specific type.
110 
111  // Make sure that there are no register conflicts.
112  DCHECK(!scratch.is(receiver));
113  DCHECK(!scratch.is(name));
114  DCHECK(!extra.is(receiver));
115  DCHECK(!extra.is(name));
116  DCHECK(!extra.is(scratch));
117  DCHECK(!extra2.is(receiver));
118  DCHECK(!extra2.is(name));
119  DCHECK(!extra2.is(scratch));
120  DCHECK(!extra2.is(extra));
121 
122  // Check scratch, extra and extra2 registers are valid.
123  DCHECK(!scratch.is(no_reg));
124  DCHECK(!extra.is(no_reg));
125  DCHECK(!extra2.is(no_reg));
126  DCHECK(!extra3.is(no_reg));
127 
128  Counters* counters = masm->isolate()->counters();
129  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
130  extra3);
131 
132  // Check that the receiver isn't a smi.
133  __ JumpIfSmi(receiver, &miss);
134 
135  // Get the map of the receiver and compute the hash.
137  __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
138  __ add(scratch, scratch, Operand(ip));
139  uint32_t mask = kPrimaryTableSize - 1;
140  // We shift out the last two bits because they are not part of the hash and
141  // they are always 01 for maps.
142  __ mov(scratch, Operand(scratch, LSR, kCacheIndexShift));
143  // Mask down the eor argument to the minimum to keep the immediate
144  // ARM-encodable.
145  __ eor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
146  // Prefer and_ to ubfx here because ubfx takes 2 cycles.
147  __ and_(scratch, scratch, Operand(mask));
148 
149  // Probe the primary table.
150  ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
151  scratch, extra, extra2, extra3);
152 
153  // Primary miss: Compute hash for secondary probe.
154  __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
155  uint32_t mask2 = kSecondaryTableSize - 1;
156  __ add(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
157  __ and_(scratch, scratch, Operand(mask2));
158 
159  // Probe the secondary table.
160  ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
161  scratch, extra, extra2, extra3);
162 
163  // Cache miss: Fall-through and let caller handle the miss by
164  // entering the runtime system.
165  __ bind(&miss);
166  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
167  extra3);
168 }
169 
170 
171 #undef __
172 }
173 } // namespace v8::internal
174 
175 #endif // V8_TARGET_ARCH_ARM
static const int kFlagsOffset
Definition: objects.h:5361
static StubType ExtractTypeFromFlags(Flags flags)
Definition: objects-inl.h:4996
static const int kFlagsNotUsedInLookup
Definition: objects.h:5448
static const int kHeaderSize
Definition: objects.h:5373
uint32_t Flags
Definition: objects.h:4929
static const int kMapOffset
Definition: objects.h:1427
static const int kHashFieldOffset
Definition: objects.h:8486
static const int kCacheIndexShift
Definition: stub-cache.h:93
void GenerateProbe(MacroAssembler *masm, Code::Flags flags, bool leave_frame, Register receiver, Register name, Register scratch, Register extra, Register extra2=no_reg, Register extra3=no_reg)
static const int kSecondaryTableSize
Definition: stub-cache.h:156
static const int kPrimaryTableSize
Definition: stub-cache.h:154
friend class Isolate
Definition: stub-cache.h:163
#define __
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
#define DCHECK(condition)
Definition: logging.h:205
const Register ip
const int kPointerSizeLog2
Definition: globals.h:147
MemOperand FieldMemOperand(Register object, int offset)
const Register pc
const int kHeapObjectTag
Definition: v8.h:5737
const Register no_reg
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20