15 kMinInt, -42, -1, 0, 1, 2, 3, 4, 5,
16 6, 7, 8, 16, 42, 0xff, 0xffff, 0x0f0f0f0f,
kMaxInt};
23 m.Return(m.Int32Add(m.Parameter(0), m.Parameter(1)));
25 ASSERT_EQ(1U, s.size());
26 EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
34 m.Return(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)));
36 ASSERT_EQ(1U, s.size());
37 EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
38 ASSERT_EQ(2U, s[0]->InputCount());
39 EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
43 m.Return(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)));
45 ASSERT_EQ(1U, s.size());
46 EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
47 ASSERT_EQ(2U, s[0]->InputCount());
48 EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
56 m.Return(m.Int32Sub(m.Parameter(0), m.Parameter(1)));
58 ASSERT_EQ(1U, s.size());
59 EXPECT_EQ(kIA32Sub, s[0]->arch_opcode());
60 EXPECT_EQ(1U, s[0]->OutputCount());
67 m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm)));
69 ASSERT_EQ(1U, s.size());
70 EXPECT_EQ(kIA32Sub, s[0]->arch_opcode());
71 ASSERT_EQ(2U, s[0]->InputCount());
72 EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
81 TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
83 m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
85 ASSERT_EQ(1U, s.size());
86 EXPECT_EQ(kSSECvtss2sd, s[0]->arch_opcode());
87 EXPECT_EQ(1U, s[0]->InputCount());
88 EXPECT_EQ(1U, s[0]->OutputCount());
92 TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
94 m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
96 ASSERT_EQ(1U, s.size());
97 EXPECT_EQ(kSSECvtsd2ss, s[0]->arch_opcode());
98 EXPECT_EQ(1U, s[0]->InputCount());
99 EXPECT_EQ(1U, s[0]->OutputCount());
108 Node* param1 = m.Parameter(0);
109 Node* param2 = m.Parameter(1);
110 Node* add = m.Int32Add(param1, param2);
111 m.Return(m.Int32Add(add, param1));
112 Stream s = m.Build();
113 ASSERT_EQ(2U, s.size());
114 EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
115 ASSERT_EQ(2U, s[0]->InputCount());
116 ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
117 EXPECT_EQ(param2->id(), s.ToVreg(s[0]->InputAt(0)));
123 Node* param1 = m.Parameter(0);
124 Node* param2 = m.Parameter(1);
125 Node* mul = m.Int32Mul(param1, param2);
126 m.Return(m.Int32Mul(mul, param1));
127 Stream s = m.Build();
128 ASSERT_EQ(2U, s.size());
129 EXPECT_EQ(kIA32Imul, s[0]->arch_opcode());
130 ASSERT_EQ(2U, s[0]->InputCount());
131 ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
132 EXPECT_EQ(param2->id(), s.ToVreg(s[0]->InputAt(0)));
151 return os << ost.
c_str();
168 typedef InstructionSelectorTestWithParam<MemoryAccess>
173 const MemoryAccess memacc = GetParam();
175 m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
176 Stream s = m.Build();
177 ASSERT_EQ(1U, s.size());
178 EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
179 EXPECT_EQ(2U, s[0]->InputCount());
180 EXPECT_EQ(1U, s[0]->OutputCount());
185 const MemoryAccess memacc = GetParam();
187 StreamBuilder m(
this, memacc.type,
kMachPtr);
188 m.Return(m.Load(memacc.type, m.Int32Constant(base), m.Parameter(0)));
189 Stream s = m.Build();
190 ASSERT_EQ(1U, s.size());
191 EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
193 ASSERT_EQ(1U, s[0]->InputCount());
195 ASSERT_EQ(2U, s[0]->InputCount());
197 EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1)));
199 EXPECT_EQ(1U, s[0]->OutputCount());
205 const MemoryAccess memacc = GetParam();
207 StreamBuilder m(
this, memacc.type,
kMachPtr);
208 m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
209 Stream s = m.Build();
210 ASSERT_EQ(1U, s.size());
211 EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
213 ASSERT_EQ(1U, s[0]->InputCount());
215 ASSERT_EQ(2U, s[0]->InputCount());
217 EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
219 EXPECT_EQ(1U, s[0]->OutputCount());
225 const MemoryAccess memacc = GetParam();
227 m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
228 m.Return(m.Int32Constant(0));
229 Stream s = m.Build();
230 ASSERT_EQ(1U, s.size());
231 EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
232 EXPECT_EQ(3U, s[0]->InputCount());
233 EXPECT_EQ(0
U, s[0]->OutputCount());
238 const MemoryAccess memacc = GetParam();
241 m.Store(memacc.type, m.Int32Constant(base), m.Parameter(0), m.Parameter(1));
242 m.Return(m.Int32Constant(0));
243 Stream s = m.Build();
244 ASSERT_EQ(1U, s.size());
245 EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
247 ASSERT_EQ(2U, s[0]->InputCount());
249 ASSERT_EQ(3U, s[0]->InputCount());
251 EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1)));
253 EXPECT_EQ(0
U, s[0]->OutputCount());
259 const MemoryAccess memacc = GetParam();
262 m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
264 m.Return(m.Int32Constant(0));
265 Stream s = m.Build();
266 ASSERT_EQ(1U, s.size());
267 EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
269 ASSERT_EQ(2U, s[0]->InputCount());
271 ASSERT_EQ(3U, s[0]->InputCount());
273 EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
275 EXPECT_EQ(0
U, s[0]->OutputCount());
296 m->Return(
m->Int32Constant(0));
297 Stream s =
m->Build();
298 ASSERT_EQ(2U, s.size());
299 EXPECT_EQ(
mode, s[0]->addressing_mode());
300 EXPECT_EQ(
mode, s[1]->addressing_mode());
314 zero =
m->Int32Constant(0);
320 scales[0] =
m->Int32Constant(1);
321 scales[1] =
m->Int32Constant(2);
322 scales[2] =
m->Int32Constant(4);
323 scales[3] =
m->Int32Constant(8);
329 Node* base = base_reg;
331 Run(base, index, kMode_MR);
336 Node* base = base_reg;
337 Node* index = non_zero;
338 Run(base, index, kMode_MRI);
343 Node* base = base_reg;
344 Node* index = index_reg;
345 Run(base, index, kMode_MR1);
350 AddressingMode expected[] = {kMode_MR1, kMode_MR2, kMode_MR4, kMode_MR8};
353 Node* base = base_reg;
354 Node* index = m->Int32Mul(index_reg, scales[
i]);
355 Run(base, index, expected[
i]);
361 Node* base = base_reg;
362 Node* index = m->Int32Add(index_reg, non_zero);
363 Run(base, index, kMode_MR1I);
368 AddressingMode expected[] = {kMode_MR1I, kMode_MR2I, kMode_MR4I, kMode_MR8I};
371 Node* base = base_reg;
372 Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[
i]), non_zero);
373 Run(base, index, expected[
i]);
379 Node* base = null_ptr;
380 Node* index = index_reg;
381 Run(base, index, kMode_MR);
386 AddressingMode expected[] = {kMode_MR, kMode_M2, kMode_M4, kMode_M8};
389 Node* base = null_ptr;
390 Node* index = m->Int32Mul(index_reg, scales[
i]);
391 Run(base, index, expected[
i]);
397 Node* base = null_ptr;
398 Node* index = m->Int32Add(index_reg, non_zero);
399 Run(base, index, kMode_MRI);
404 AddressingMode expected[] = {kMode_MRI, kMode_M2I, kMode_M4I, kMode_M8I};
407 Node* base = null_ptr;
408 Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[
i]), non_zero);
409 Run(base, index, expected[
i]);
415 Node* bases[] = {null_ptr, non_zero};
416 Node* indices[] = {
zero, non_zero};
418 for (
size_t j = 0; j <
arraysize(indices); ++j) {
420 Node* base = bases[
i];
421 Node* index = indices[j];
422 Run(base, index, kMode_MI);
const char * c_str() const
~AddressingModeUnitTest()
void Run(Node *base, Node *index, AddressingMode mode)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
static const int32_t kImmediates[]
std::ostream & operator<<(std::ostream &os, const MachineType &type)
static const MemoryAccess kMemoryAccesses[]
TEST_P(InstructionSelectorDPITest, Parameters)
InstructionSelectorTestWithParam< MemoryAccess > InstructionSelectorMemoryAccessTest
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorDPITest, ::testing::ValuesIn(kDPIs))
TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter)
Debugger support for the V8 JavaScript engine.