V8 Project
instruction-selector-ia32-unittest.cc
Go to the documentation of this file.
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
6 
7 namespace v8 {
8 namespace internal {
9 namespace compiler {
10 
11 namespace {
12 
13 // Immediates (random subset).
14 static const int32_t kImmediates[] = {
15  kMinInt, -42, -1, 0, 1, 2, 3, 4, 5,
16  6, 7, 8, 16, 42, 0xff, 0xffff, 0x0f0f0f0f, kMaxInt};
17 
18 } // namespace
19 
20 
21 TEST_F(InstructionSelectorTest, Int32AddWithParameter) {
22  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
23  m.Return(m.Int32Add(m.Parameter(0), m.Parameter(1)));
24  Stream s = m.Build();
25  ASSERT_EQ(1U, s.size());
26  EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
27 }
28 
29 
30 TEST_F(InstructionSelectorTest, Int32AddWithImmediate) {
31  TRACED_FOREACH(int32_t, imm, kImmediates) {
32  {
33  StreamBuilder m(this, kMachInt32, kMachInt32);
34  m.Return(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)));
35  Stream s = m.Build();
36  ASSERT_EQ(1U, s.size());
37  EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
38  ASSERT_EQ(2U, s[0]->InputCount());
39  EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
40  }
41  {
42  StreamBuilder m(this, kMachInt32, kMachInt32);
43  m.Return(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)));
44  Stream s = m.Build();
45  ASSERT_EQ(1U, s.size());
46  EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
47  ASSERT_EQ(2U, s[0]->InputCount());
48  EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
49  }
50  }
51 }
52 
53 
54 TEST_F(InstructionSelectorTest, Int32SubWithParameter) {
55  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
56  m.Return(m.Int32Sub(m.Parameter(0), m.Parameter(1)));
57  Stream s = m.Build();
58  ASSERT_EQ(1U, s.size());
59  EXPECT_EQ(kIA32Sub, s[0]->arch_opcode());
60  EXPECT_EQ(1U, s[0]->OutputCount());
61 }
62 
63 
64 TEST_F(InstructionSelectorTest, Int32SubWithImmediate) {
65  TRACED_FOREACH(int32_t, imm, kImmediates) {
66  StreamBuilder m(this, kMachInt32, kMachInt32);
67  m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm)));
68  Stream s = m.Build();
69  ASSERT_EQ(1U, s.size());
70  EXPECT_EQ(kIA32Sub, s[0]->arch_opcode());
71  ASSERT_EQ(2U, s[0]->InputCount());
72  EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
73  }
74 }
75 
76 
77 // -----------------------------------------------------------------------------
78 // Conversions.
79 
80 
81 TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
82  StreamBuilder m(this, kMachFloat32, kMachFloat64);
83  m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
84  Stream s = m.Build();
85  ASSERT_EQ(1U, s.size());
86  EXPECT_EQ(kSSECvtss2sd, s[0]->arch_opcode());
87  EXPECT_EQ(1U, s[0]->InputCount());
88  EXPECT_EQ(1U, s[0]->OutputCount());
89 }
90 
91 
92 TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
93  StreamBuilder m(this, kMachFloat64, kMachFloat32);
94  m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
95  Stream s = m.Build();
96  ASSERT_EQ(1U, s.size());
97  EXPECT_EQ(kSSECvtsd2ss, s[0]->arch_opcode());
98  EXPECT_EQ(1U, s[0]->InputCount());
99  EXPECT_EQ(1U, s[0]->OutputCount());
100 }
101 
102 
103 // -----------------------------------------------------------------------------
104 // Better left operand for commutative binops
105 
106 TEST_F(InstructionSelectorTest, BetterLeftOperandTestAddBinop) {
107  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
108  Node* param1 = m.Parameter(0);
109  Node* param2 = m.Parameter(1);
110  Node* add = m.Int32Add(param1, param2);
111  m.Return(m.Int32Add(add, param1));
112  Stream s = m.Build();
113  ASSERT_EQ(2U, s.size());
114  EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
115  ASSERT_EQ(2U, s[0]->InputCount());
116  ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
117  EXPECT_EQ(param2->id(), s.ToVreg(s[0]->InputAt(0)));
118 }
119 
120 
121 TEST_F(InstructionSelectorTest, BetterLeftOperandTestMulBinop) {
122  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
123  Node* param1 = m.Parameter(0);
124  Node* param2 = m.Parameter(1);
125  Node* mul = m.Int32Mul(param1, param2);
126  m.Return(m.Int32Mul(mul, param1));
127  Stream s = m.Build();
128  ASSERT_EQ(2U, s.size());
129  EXPECT_EQ(kIA32Imul, s[0]->arch_opcode());
130  ASSERT_EQ(2U, s[0]->InputCount());
131  ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
132  EXPECT_EQ(param2->id(), s.ToVreg(s[0]->InputAt(0)));
133 }
134 
135 
136 // -----------------------------------------------------------------------------
137 // Loads and stores
138 
139 namespace {
140 
141 struct MemoryAccess {
145 };
146 
147 
148 std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
149  OStringStream ost;
150  ost << memacc.type;
151  return os << ost.c_str();
152 }
153 
154 
155 static const MemoryAccess kMemoryAccesses[] = {
156  {kMachInt8, kIA32Movsxbl, kIA32Movb},
157  {kMachUint8, kIA32Movzxbl, kIA32Movb},
158  {kMachInt16, kIA32Movsxwl, kIA32Movw},
159  {kMachUint16, kIA32Movzxwl, kIA32Movw},
160  {kMachInt32, kIA32Movl, kIA32Movl},
161  {kMachUint32, kIA32Movl, kIA32Movl},
162  {kMachFloat32, kIA32Movss, kIA32Movss},
163  {kMachFloat64, kIA32Movsd, kIA32Movsd}};
164 
165 } // namespace
166 
167 
168 typedef InstructionSelectorTestWithParam<MemoryAccess>
170 
171 
172 TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
173  const MemoryAccess memacc = GetParam();
174  StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
175  m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
176  Stream s = m.Build();
177  ASSERT_EQ(1U, s.size());
178  EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
179  EXPECT_EQ(2U, s[0]->InputCount());
180  EXPECT_EQ(1U, s[0]->OutputCount());
181 }
182 
183 
185  const MemoryAccess memacc = GetParam();
186  TRACED_FOREACH(int32_t, base, kImmediates) {
187  StreamBuilder m(this, memacc.type, kMachPtr);
188  m.Return(m.Load(memacc.type, m.Int32Constant(base), m.Parameter(0)));
189  Stream s = m.Build();
190  ASSERT_EQ(1U, s.size());
191  EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
192  if (base == 0) {
193  ASSERT_EQ(1U, s[0]->InputCount());
194  } else {
195  ASSERT_EQ(2U, s[0]->InputCount());
196  ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
197  EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1)));
198  }
199  EXPECT_EQ(1U, s[0]->OutputCount());
200  }
201 }
202 
203 
204 TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
205  const MemoryAccess memacc = GetParam();
206  TRACED_FOREACH(int32_t, index, kImmediates) {
207  StreamBuilder m(this, memacc.type, kMachPtr);
208  m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
209  Stream s = m.Build();
210  ASSERT_EQ(1U, s.size());
211  EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
212  if (index == 0) {
213  ASSERT_EQ(1U, s[0]->InputCount());
214  } else {
215  ASSERT_EQ(2U, s[0]->InputCount());
216  ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
217  EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
218  }
219  EXPECT_EQ(1U, s[0]->OutputCount());
220  }
221 }
222 
223 
224 TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
225  const MemoryAccess memacc = GetParam();
226  StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
227  m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
228  m.Return(m.Int32Constant(0));
229  Stream s = m.Build();
230  ASSERT_EQ(1U, s.size());
231  EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
232  EXPECT_EQ(3U, s[0]->InputCount());
233  EXPECT_EQ(0U, s[0]->OutputCount());
234 }
235 
236 
237 TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateBase) {
238  const MemoryAccess memacc = GetParam();
239  TRACED_FOREACH(int32_t, base, kImmediates) {
240  StreamBuilder m(this, kMachInt32, kMachInt32, memacc.type);
241  m.Store(memacc.type, m.Int32Constant(base), m.Parameter(0), m.Parameter(1));
242  m.Return(m.Int32Constant(0));
243  Stream s = m.Build();
244  ASSERT_EQ(1U, s.size());
245  EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
246  if (base == 0) {
247  ASSERT_EQ(2U, s[0]->InputCount());
248  } else {
249  ASSERT_EQ(3U, s[0]->InputCount());
250  ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
251  EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1)));
252  }
253  EXPECT_EQ(0U, s[0]->OutputCount());
254  }
255 }
256 
257 
258 TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
259  const MemoryAccess memacc = GetParam();
260  TRACED_FOREACH(int32_t, index, kImmediates) {
261  StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
262  m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
263  m.Parameter(1));
264  m.Return(m.Int32Constant(0));
265  Stream s = m.Build();
266  ASSERT_EQ(1U, s.size());
267  EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
268  if (index == 0) {
269  ASSERT_EQ(2U, s[0]->InputCount());
270  } else {
271  ASSERT_EQ(3U, s[0]->InputCount());
272  ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
273  EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
274  }
275  EXPECT_EQ(0U, s[0]->OutputCount());
276  }
277 }
278 
279 
280 INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
282  ::testing::ValuesIn(kMemoryAccesses));
283 
284 
285 // -----------------------------------------------------------------------------
286 // AddressingMode for loads and stores.
287 
289  public:
292 
293  void Run(Node* base, Node* index, AddressingMode mode) {
294  Node* load = m->Load(kMachInt32, base, index);
295  m->Store(kMachInt32, base, index, load);
296  m->Return(m->Int32Constant(0));
297  Stream s = m->Build();
298  ASSERT_EQ(2U, s.size());
299  EXPECT_EQ(mode, s[0]->addressing_mode());
300  EXPECT_EQ(mode, s[1]->addressing_mode());
301  }
302 
303  Node* zero;
304  Node* null_ptr;
305  Node* non_zero;
306  Node* base_reg; // opaque value to generate base as register
307  Node* index_reg; // opaque value to generate index as register
308  Node* scales[4];
309  StreamBuilder* m;
310 
311  void Reset() {
312  delete m;
313  m = new StreamBuilder(this, kMachInt32, kMachInt32, kMachInt32);
314  zero = m->Int32Constant(0);
315  null_ptr = m->Int32Constant(0);
316  non_zero = m->Int32Constant(127);
317  base_reg = m->Parameter(0);
318  index_reg = m->Parameter(0);
319 
320  scales[0] = m->Int32Constant(1);
321  scales[1] = m->Int32Constant(2);
322  scales[2] = m->Int32Constant(4);
323  scales[3] = m->Int32Constant(8);
324  }
325 };
326 
327 
328 TEST_F(AddressingModeUnitTest, AddressingMode_MR) {
329  Node* base = base_reg;
330  Node* index = zero;
331  Run(base, index, kMode_MR);
332 }
333 
334 
335 TEST_F(AddressingModeUnitTest, AddressingMode_MRI) {
336  Node* base = base_reg;
337  Node* index = non_zero;
338  Run(base, index, kMode_MRI);
339 }
340 
341 
342 TEST_F(AddressingModeUnitTest, AddressingMode_MR1) {
343  Node* base = base_reg;
344  Node* index = index_reg;
345  Run(base, index, kMode_MR1);
346 }
347 
348 
349 TEST_F(AddressingModeUnitTest, AddressingMode_MRN) {
350  AddressingMode expected[] = {kMode_MR1, kMode_MR2, kMode_MR4, kMode_MR8};
351  for (size_t i = 0; i < arraysize(scales); ++i) {
352  Reset();
353  Node* base = base_reg;
354  Node* index = m->Int32Mul(index_reg, scales[i]);
355  Run(base, index, expected[i]);
356  }
357 }
358 
359 
360 TEST_F(AddressingModeUnitTest, AddressingMode_MR1I) {
361  Node* base = base_reg;
362  Node* index = m->Int32Add(index_reg, non_zero);
363  Run(base, index, kMode_MR1I);
364 }
365 
366 
367 TEST_F(AddressingModeUnitTest, AddressingMode_MRNI) {
368  AddressingMode expected[] = {kMode_MR1I, kMode_MR2I, kMode_MR4I, kMode_MR8I};
369  for (size_t i = 0; i < arraysize(scales); ++i) {
370  Reset();
371  Node* base = base_reg;
372  Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
373  Run(base, index, expected[i]);
374  }
375 }
376 
377 
378 TEST_F(AddressingModeUnitTest, AddressingMode_M1) {
379  Node* base = null_ptr;
380  Node* index = index_reg;
381  Run(base, index, kMode_MR);
382 }
383 
384 
385 TEST_F(AddressingModeUnitTest, AddressingMode_MN) {
386  AddressingMode expected[] = {kMode_MR, kMode_M2, kMode_M4, kMode_M8};
387  for (size_t i = 0; i < arraysize(scales); ++i) {
388  Reset();
389  Node* base = null_ptr;
390  Node* index = m->Int32Mul(index_reg, scales[i]);
391  Run(base, index, expected[i]);
392  }
393 }
394 
395 
396 TEST_F(AddressingModeUnitTest, AddressingMode_M1I) {
397  Node* base = null_ptr;
398  Node* index = m->Int32Add(index_reg, non_zero);
399  Run(base, index, kMode_MRI);
400 }
401 
402 
403 TEST_F(AddressingModeUnitTest, AddressingMode_MNI) {
404  AddressingMode expected[] = {kMode_MRI, kMode_M2I, kMode_M4I, kMode_M8I};
405  for (size_t i = 0; i < arraysize(scales); ++i) {
406  Reset();
407  Node* base = null_ptr;
408  Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
409  Run(base, index, expected[i]);
410  }
411 }
412 
413 
414 TEST_F(AddressingModeUnitTest, AddressingMode_MI) {
415  Node* bases[] = {null_ptr, non_zero};
416  Node* indices[] = {zero, non_zero};
417  for (size_t i = 0; i < arraysize(bases); ++i) {
418  for (size_t j = 0; j < arraysize(indices); ++j) {
419  Reset();
420  Node* base = bases[i];
421  Node* index = indices[j];
422  Run(base, index, kMode_MI);
423  }
424  }
425 }
426 
427 } // namespace compiler
428 } // namespace internal
429 } // namespace v8
const char * c_str() const
Definition: ostreams.h:84
void Run(Node *base, Node *index, AddressingMode mode)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define arraysize(array)
Definition: macros.h:86
int int32_t
Definition: unicode.cc:24
std::ostream & operator<<(std::ostream &os, const MachineType &type)
static const MemoryAccess kMemoryAccesses[]
TEST_P(InstructionSelectorDPITest, Parameters)
InstructionSelectorTestWithParam< MemoryAccess > InstructionSelectorMemoryAccessTest
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorDPITest, ::testing::ValuesIn(kDPIs))
TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter)
const int kMaxInt
Definition: globals.h:109
const int kMinInt
Definition: globals.h:110
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20