V8 Project
instruction-selector-x64.cc
Go to the documentation of this file.
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
7 
8 namespace v8 {
9 namespace internal {
10 namespace compiler {
11 
12 // Adds X64-specific methods for generating operands.
13 class X64OperandGenerator FINAL : public OperandGenerator {
14  public:
15  explicit X64OperandGenerator(InstructionSelector* selector)
16  : OperandGenerator(selector) {}
17 
21  }
22 
23  InstructionOperand* UseImmediate64(Node* node) { return UseImmediate(node); }
24 
25  bool CanBeImmediate(Node* node) {
26  switch (node->opcode()) {
27  case IrOpcode::kInt32Constant:
28  return true;
29  default:
30  return false;
31  }
32  }
33 
34  bool CanBeImmediate64(Node* node) {
35  switch (node->opcode()) {
36  case IrOpcode::kInt32Constant:
37  return true;
38  case IrOpcode::kNumberConstant:
39  return true;
40  case IrOpcode::kHeapConstant: {
41  // Constants in new space cannot be used as immediates in V8 because
42  // the GC does not scan code objects when collecting the new generation.
43  Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
44  return !isolate()->heap()->InNewSpace(*value.handle());
45  }
46  default:
47  return false;
48  }
49  }
50 
51  bool CanBeBetterLeftOperand(Node* node) const {
52  return !selector()->IsLive(node);
53  }
54 };
55 
56 
57 class AddressingModeMatcher {
58  public:
59  AddressingModeMatcher(X64OperandGenerator* g, Node* base, Node* index)
63  mode_(kMode_None) {
64  Int32Matcher index_imm(index);
65  if (index_imm.HasValue()) {
66  int32_t value = index_imm.Value();
67  if (value == 0) {
68  mode_ = kMode_MR;
69  } else {
70  mode_ = kMode_MRI;
71  index_operand_ = g->UseImmediate(index);
72  }
73  base_operand_ = g->UseRegister(base);
74  } else {
75  // Compute base operand.
76  Int64Matcher base_imm(base);
77  if (!base_imm.HasValue() || base_imm.Value() != 0) {
78  base_operand_ = g->UseRegister(base);
79  }
80  // Compute index and displacement.
81  IndexAndDisplacementMatcher matcher(index);
82  index_operand_ = g->UseRegister(matcher.index_node());
83  if (matcher.displacement() != 0) {
84  displacement_operand_ = g->TempImmediate(matcher.displacement());
85  }
86  // Compute mode with scale factor one.
87  if (base_operand_ == NULL) {
88  if (displacement_operand_ == NULL) {
89  mode_ = kMode_M1;
90  } else {
91  mode_ = kMode_M1I;
92  }
93  } else {
94  if (displacement_operand_ == NULL) {
95  mode_ = kMode_MR1;
96  } else {
97  mode_ = kMode_MR1I;
98  }
99  }
100  // Adjust mode to actual scale factor.
101  mode_ = GetMode(mode_, matcher.power());
102  }
103  DCHECK_NE(kMode_None, mode_);
104  }
105 
107  return static_cast<AddressingMode>(static_cast<int>(one) + power);
108  }
109 
110  size_t SetInputs(InstructionOperand** inputs) {
111  size_t input_count = 0;
112  // Compute inputs_ and input_count.
113  if (base_operand_ != NULL) {
114  inputs[input_count++] = base_operand_;
115  }
116  if (index_operand_ != NULL) {
117  inputs[input_count++] = index_operand_;
118  }
119  if (displacement_operand_ != NULL) {
120  // Pure displacement mode not supported by x64.
121  DCHECK_NE(input_count, 0);
122  inputs[input_count++] = displacement_operand_;
123  }
124  DCHECK_NE(input_count, 0);
125  return input_count;
126  }
127 
128  static const int kMaxInputCount = 3;
133 };
134 
135 
136 void InstructionSelector::VisitLoad(Node* node) {
137  MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
138  MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
139  Node* base = node->InputAt(0);
140  Node* index = node->InputAt(1);
141 
142  ArchOpcode opcode;
143  // TODO(titzer): signed/unsigned small loads
144  switch (rep) {
145  case kRepFloat32:
146  opcode = kX64Movss;
147  break;
148  case kRepFloat64:
149  opcode = kX64Movsd;
150  break;
151  case kRepBit: // Fall through.
152  case kRepWord8:
153  opcode = typ == kTypeInt32 ? kX64Movsxbl : kX64Movzxbl;
154  break;
155  case kRepWord16:
156  opcode = typ == kTypeInt32 ? kX64Movsxwl : kX64Movzxwl;
157  break;
158  case kRepWord32:
159  opcode = kX64Movl;
160  break;
161  case kRepTagged: // Fall through.
162  case kRepWord64:
163  opcode = kX64Movq;
164  break;
165  default:
166  UNREACHABLE();
167  return;
168  }
169 
170  X64OperandGenerator g(this);
171  AddressingModeMatcher matcher(&g, base, index);
172  InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
173  InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
174  InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount];
175  size_t input_count = matcher.SetInputs(inputs);
176  Emit(code, 1, outputs, input_count, inputs);
177 }
178 
179 
180 void InstructionSelector::VisitStore(Node* node) {
181  X64OperandGenerator g(this);
182  Node* base = node->InputAt(0);
183  Node* index = node->InputAt(1);
184  Node* value = node->InputAt(2);
185 
186  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
187  MachineType rep = RepresentationOf(store_rep.machine_type());
188  if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
189  DCHECK(rep == kRepTagged);
190  // TODO(dcarney): refactor RecordWrite function to take temp registers
191  // and pass them here instead of using fixed regs
192  // TODO(dcarney): handle immediate indices.
193  InstructionOperand* temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
194  Emit(kX64StoreWriteBarrier, NULL, g.UseFixed(base, rbx),
195  g.UseFixed(index, rcx), g.UseFixed(value, rdx), arraysize(temps),
196  temps);
197  return;
198  }
199  DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
200  ArchOpcode opcode;
201  switch (rep) {
202  case kRepFloat32:
203  opcode = kX64Movss;
204  break;
205  case kRepFloat64:
206  opcode = kX64Movsd;
207  break;
208  case kRepBit: // Fall through.
209  case kRepWord8:
210  opcode = kX64Movb;
211  break;
212  case kRepWord16:
213  opcode = kX64Movw;
214  break;
215  case kRepWord32:
216  opcode = kX64Movl;
217  break;
218  case kRepTagged: // Fall through.
219  case kRepWord64:
220  opcode = kX64Movq;
221  break;
222  default:
223  UNREACHABLE();
224  return;
225  }
226 
227  InstructionOperand* val;
228  if (g.CanBeImmediate(value)) {
229  val = g.UseImmediate(value);
230  } else {
231  val = g.UseRegister(value);
232  }
233 
234  AddressingModeMatcher matcher(&g, base, index);
235  InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
236  InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount + 1];
237  size_t input_count = matcher.SetInputs(inputs);
238  inputs[input_count++] = val;
239  Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
240 }
241 
242 
243 // Shared routine for multiple binary operations.
244 static void VisitBinop(InstructionSelector* selector, Node* node,
245  InstructionCode opcode, FlagsContinuation* cont) {
246  X64OperandGenerator g(selector);
247  Int32BinopMatcher m(node);
248  Node* left = m.left().node();
249  Node* right = m.right().node();
250  InstructionOperand* inputs[4];
251  size_t input_count = 0;
252  InstructionOperand* outputs[2];
253  size_t output_count = 0;
254 
255  // TODO(turbofan): match complex addressing modes.
256  if (g.CanBeImmediate(right)) {
257  inputs[input_count++] = g.Use(left);
258  inputs[input_count++] = g.UseImmediate(right);
259  } else {
260  if (node->op()->HasProperty(Operator::kCommutative) &&
261  g.CanBeBetterLeftOperand(right)) {
262  std::swap(left, right);
263  }
264  inputs[input_count++] = g.UseRegister(left);
265  inputs[input_count++] = g.Use(right);
266  }
267 
268  if (cont->IsBranch()) {
269  inputs[input_count++] = g.Label(cont->true_block());
270  inputs[input_count++] = g.Label(cont->false_block());
271  }
272 
273  outputs[output_count++] = g.DefineSameAsFirst(node);
274  if (cont->IsSet()) {
275  outputs[output_count++] = g.DefineAsRegister(cont->result());
276  }
277 
278  DCHECK_NE(0, input_count);
279  DCHECK_NE(0, output_count);
280  DCHECK_GE(arraysize(inputs), input_count);
281  DCHECK_GE(arraysize(outputs), output_count);
282 
283  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
284  outputs, input_count, inputs);
285  if (cont->IsBranch()) instr->MarkAsControl();
286 }
287 
288 
289 // Shared routine for multiple binary operations.
290 static void VisitBinop(InstructionSelector* selector, Node* node,
291  InstructionCode opcode) {
292  FlagsContinuation cont;
293  VisitBinop(selector, node, opcode, &cont);
294 }
295 
296 
297 void InstructionSelector::VisitWord32And(Node* node) {
298  VisitBinop(this, node, kX64And32);
299 }
300 
301 
302 void InstructionSelector::VisitWord64And(Node* node) {
303  VisitBinop(this, node, kX64And);
304 }
305 
306 
307 void InstructionSelector::VisitWord32Or(Node* node) {
308  VisitBinop(this, node, kX64Or32);
309 }
310 
311 
312 void InstructionSelector::VisitWord64Or(Node* node) {
313  VisitBinop(this, node, kX64Or);
314 }
315 
316 
317 void InstructionSelector::VisitWord32Xor(Node* node) {
318  X64OperandGenerator g(this);
319  Uint32BinopMatcher m(node);
320  if (m.right().Is(-1)) {
321  Emit(kX64Not32, g.DefineSameAsFirst(node), g.Use(m.left().node()));
322  } else {
323  VisitBinop(this, node, kX64Xor32);
324  }
325 }
326 
327 
328 void InstructionSelector::VisitWord64Xor(Node* node) {
329  X64OperandGenerator g(this);
330  Uint64BinopMatcher m(node);
331  if (m.right().Is(-1)) {
332  Emit(kX64Not, g.DefineSameAsFirst(node), g.Use(m.left().node()));
333  } else {
334  VisitBinop(this, node, kX64Xor);
335  }
336 }
337 
338 
339 // Shared routine for multiple 32-bit shift operations.
340 // TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
341 static void VisitWord32Shift(InstructionSelector* selector, Node* node,
342  ArchOpcode opcode) {
343  X64OperandGenerator g(selector);
344  Node* left = node->InputAt(0);
345  Node* right = node->InputAt(1);
346 
347  // TODO(turbofan): assembler only supports some addressing modes for shifts.
348  if (g.CanBeImmediate(right)) {
349  selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
350  g.UseImmediate(right));
351  } else {
352  Int32BinopMatcher m(node);
353  if (m.right().IsWord32And()) {
354  Int32BinopMatcher mright(right);
355  if (mright.right().Is(0x1F)) {
356  right = mright.left().node();
357  }
358  }
359  selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
360  g.UseFixed(right, rcx));
361  }
362 }
363 
364 
365 // Shared routine for multiple 64-bit shift operations.
366 // TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
367 static void VisitWord64Shift(InstructionSelector* selector, Node* node,
368  ArchOpcode opcode) {
369  X64OperandGenerator g(selector);
370  Node* left = node->InputAt(0);
371  Node* right = node->InputAt(1);
372 
373  // TODO(turbofan): assembler only supports some addressing modes for shifts.
374  if (g.CanBeImmediate(right)) {
375  selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
376  g.UseImmediate(right));
377  } else {
378  Int64BinopMatcher m(node);
379  if (m.right().IsWord64And()) {
380  Int64BinopMatcher mright(right);
381  if (mright.right().Is(0x3F)) {
382  right = mright.left().node();
383  }
384  }
385  selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
386  g.UseFixed(right, rcx));
387  }
388 }
389 
390 
391 void InstructionSelector::VisitWord32Shl(Node* node) {
392  VisitWord32Shift(this, node, kX64Shl32);
393 }
394 
395 
396 void InstructionSelector::VisitWord64Shl(Node* node) {
397  VisitWord64Shift(this, node, kX64Shl);
398 }
399 
400 
401 void InstructionSelector::VisitWord32Shr(Node* node) {
402  VisitWord32Shift(this, node, kX64Shr32);
403 }
404 
405 
406 void InstructionSelector::VisitWord64Shr(Node* node) {
407  VisitWord64Shift(this, node, kX64Shr);
408 }
409 
410 
411 void InstructionSelector::VisitWord32Sar(Node* node) {
412  VisitWord32Shift(this, node, kX64Sar32);
413 }
414 
415 
416 void InstructionSelector::VisitWord64Sar(Node* node) {
417  VisitWord64Shift(this, node, kX64Sar);
418 }
419 
420 
421 void InstructionSelector::VisitWord32Ror(Node* node) {
422  VisitWord32Shift(this, node, kX64Ror32);
423 }
424 
425 
426 void InstructionSelector::VisitWord64Ror(Node* node) {
427  VisitWord64Shift(this, node, kX64Ror);
428 }
429 
430 
431 void InstructionSelector::VisitInt32Add(Node* node) {
432  VisitBinop(this, node, kX64Add32);
433 }
434 
435 
436 void InstructionSelector::VisitInt64Add(Node* node) {
437  VisitBinop(this, node, kX64Add);
438 }
439 
440 
441 void InstructionSelector::VisitInt32Sub(Node* node) {
442  X64OperandGenerator g(this);
443  Int32BinopMatcher m(node);
444  if (m.left().Is(0)) {
445  Emit(kX64Neg32, g.DefineSameAsFirst(node), g.Use(m.right().node()));
446  } else {
447  VisitBinop(this, node, kX64Sub32);
448  }
449 }
450 
451 
452 void InstructionSelector::VisitInt64Sub(Node* node) {
453  X64OperandGenerator g(this);
454  Int64BinopMatcher m(node);
455  if (m.left().Is(0)) {
456  Emit(kX64Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
457  } else {
458  VisitBinop(this, node, kX64Sub);
459  }
460 }
461 
462 
463 static void VisitMul(InstructionSelector* selector, Node* node,
464  ArchOpcode opcode) {
465  X64OperandGenerator g(selector);
466  Int32BinopMatcher m(node);
467  Node* left = m.left().node();
468  Node* right = m.right().node();
469  if (g.CanBeImmediate(right)) {
470  selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
471  g.UseImmediate(right));
472  } else {
473  if (g.CanBeBetterLeftOperand(right)) {
474  std::swap(left, right);
475  }
476  selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
477  g.Use(right));
478  }
479 }
480 
481 
482 void InstructionSelector::VisitInt32Mul(Node* node) {
483  VisitMul(this, node, kX64Imul32);
484 }
485 
486 
487 void InstructionSelector::VisitInt64Mul(Node* node) {
488  VisitMul(this, node, kX64Imul);
489 }
490 
491 
492 static void VisitDiv(InstructionSelector* selector, Node* node,
493  ArchOpcode opcode) {
494  X64OperandGenerator g(selector);
495  InstructionOperand* temps[] = {g.TempRegister(rdx)};
496  selector->Emit(
497  opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
498  g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
499 }
500 
501 
502 void InstructionSelector::VisitInt32Div(Node* node) {
503  VisitDiv(this, node, kX64Idiv32);
504 }
505 
506 
507 void InstructionSelector::VisitInt64Div(Node* node) {
508  VisitDiv(this, node, kX64Idiv);
509 }
510 
511 
512 void InstructionSelector::VisitInt32UDiv(Node* node) {
513  VisitDiv(this, node, kX64Udiv32);
514 }
515 
516 
517 void InstructionSelector::VisitInt64UDiv(Node* node) {
518  VisitDiv(this, node, kX64Udiv);
519 }
520 
521 
522 static void VisitMod(InstructionSelector* selector, Node* node,
523  ArchOpcode opcode) {
524  X64OperandGenerator g(selector);
525  InstructionOperand* temps[] = {g.TempRegister(rax), g.TempRegister(rdx)};
526  selector->Emit(
527  opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax),
528  g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
529 }
530 
531 
532 void InstructionSelector::VisitInt32Mod(Node* node) {
533  VisitMod(this, node, kX64Idiv32);
534 }
535 
536 
537 void InstructionSelector::VisitInt64Mod(Node* node) {
538  VisitMod(this, node, kX64Idiv);
539 }
540 
541 
542 void InstructionSelector::VisitInt32UMod(Node* node) {
543  VisitMod(this, node, kX64Udiv32);
544 }
545 
546 
547 void InstructionSelector::VisitInt64UMod(Node* node) {
548  VisitMod(this, node, kX64Udiv);
549 }
550 
551 
552 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
553  X64OperandGenerator g(this);
554  // TODO(turbofan): X64 SSE conversions should take an operand.
555  Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
556 }
557 
558 
559 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
560  X64OperandGenerator g(this);
561  Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
562 }
563 
564 
565 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
566  X64OperandGenerator g(this);
567  // TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
568  Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node),
569  g.UseRegister(node->InputAt(0)));
570 }
571 
572 
573 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
574  X64OperandGenerator g(this);
575  Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
576 }
577 
578 
579 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
580  X64OperandGenerator g(this);
581  Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
582 }
583 
584 
585 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
586  X64OperandGenerator g(this);
587  Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
588 }
589 
590 
591 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
592  X64OperandGenerator g(this);
593  Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
594 }
595 
596 
597 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
598  X64OperandGenerator g(this);
599  // TODO(turbofan): X64 SSE conversions should take an operand.
600  Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
601 }
602 
603 
604 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
605  X64OperandGenerator g(this);
606  Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
607 }
608 
609 
610 void InstructionSelector::VisitFloat64Add(Node* node) {
611  X64OperandGenerator g(this);
612  Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
613  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
614 }
615 
616 
617 void InstructionSelector::VisitFloat64Sub(Node* node) {
618  X64OperandGenerator g(this);
619  Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
620  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
621 }
622 
623 
624 void InstructionSelector::VisitFloat64Mul(Node* node) {
625  X64OperandGenerator g(this);
626  Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
627  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
628 }
629 
630 
631 void InstructionSelector::VisitFloat64Div(Node* node) {
632  X64OperandGenerator g(this);
633  Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
634  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
635 }
636 
637 
638 void InstructionSelector::VisitFloat64Mod(Node* node) {
639  X64OperandGenerator g(this);
640  InstructionOperand* temps[] = {g.TempRegister(rax)};
641  Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
642  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
643  temps);
644 }
645 
646 
647 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
648  X64OperandGenerator g(this);
649  Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
650 }
651 
652 
653 void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
654  FlagsContinuation* cont) {
655  VisitBinop(this, node, kX64Add32, cont);
656 }
657 
658 
659 void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
660  FlagsContinuation* cont) {
661  VisitBinop(this, node, kX64Sub32, cont);
662 }
663 
664 
665 // Shared routine for multiple compare operations.
666 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
668  FlagsContinuation* cont) {
669  X64OperandGenerator g(selector);
670  opcode = cont->Encode(opcode);
671  if (cont->IsBranch()) {
672  selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
673  g.Label(cont->false_block()))->MarkAsControl();
674  } else {
675  DCHECK(cont->IsSet());
676  selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
677  }
678 }
679 
680 
681 // Shared routine for multiple word compare operations.
682 static void VisitWordCompare(InstructionSelector* selector, Node* node,
683  InstructionCode opcode, FlagsContinuation* cont,
684  bool commutative) {
685  X64OperandGenerator g(selector);
686  Node* left = node->InputAt(0);
687  Node* right = node->InputAt(1);
688 
689  // Match immediates on left or right side of comparison.
690  if (g.CanBeImmediate(right)) {
691  VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
692  } else if (g.CanBeImmediate(left)) {
693  if (!commutative) cont->Commute();
694  VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
695  } else {
696  VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
697  }
698 }
699 
700 
701 void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
702  switch (node->opcode()) {
703  case IrOpcode::kInt32Sub:
704  return VisitWordCompare(this, node, kX64Cmp32, cont, false);
705  case IrOpcode::kWord32And:
706  return VisitWordCompare(this, node, kX64Test32, cont, true);
707  default:
708  break;
709  }
710 
711  X64OperandGenerator g(this);
712  VisitCompare(this, kX64Test32, g.Use(node), g.TempImmediate(-1), cont);
713 }
714 
715 
716 void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
717  switch (node->opcode()) {
718  case IrOpcode::kInt64Sub:
719  return VisitWordCompare(this, node, kX64Cmp, cont, false);
720  case IrOpcode::kWord64And:
721  return VisitWordCompare(this, node, kX64Test, cont, true);
722  default:
723  break;
724  }
725 
726  X64OperandGenerator g(this);
727  VisitCompare(this, kX64Test, g.Use(node), g.TempImmediate(-1), cont);
728 }
729 
730 
731 void InstructionSelector::VisitWord32Compare(Node* node,
732  FlagsContinuation* cont) {
733  VisitWordCompare(this, node, kX64Cmp32, cont, false);
734 }
735 
736 
737 void InstructionSelector::VisitWord64Compare(Node* node,
738  FlagsContinuation* cont) {
739  VisitWordCompare(this, node, kX64Cmp, cont, false);
740 }
741 
742 
743 void InstructionSelector::VisitFloat64Compare(Node* node,
744  FlagsContinuation* cont) {
745  X64OperandGenerator g(this);
746  Node* left = node->InputAt(0);
747  Node* right = node->InputAt(1);
748  VisitCompare(this, kSSEFloat64Cmp, g.UseRegister(left), g.Use(right), cont);
749 }
750 
751 
752 void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
753  BasicBlock* deoptimization) {
754  X64OperandGenerator g(this);
755  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
756 
757  FrameStateDescriptor* frame_state_descriptor = NULL;
758  if (descriptor->NeedsFrameState()) {
759  frame_state_descriptor = GetFrameStateDescriptor(
760  call->InputAt(static_cast<int>(descriptor->InputCount())));
761  }
762 
763  CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
764 
765  // Compute InstructionOperands for inputs and outputs.
766  InitializeCallBuffer(call, &buffer, true, true);
767 
768  // Push any stack arguments.
769  for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
770  input != buffer.pushed_nodes.rend(); input++) {
771  // TODO(titzer): handle pushing double parameters.
772  Emit(kX64Push, NULL,
773  g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
774  }
775 
776  // Select the appropriate opcode based on the call type.
777  InstructionCode opcode;
778  switch (descriptor->kind()) {
779  case CallDescriptor::kCallCodeObject: {
780  opcode = kArchCallCodeObject;
781  break;
782  }
783  case CallDescriptor::kCallJSFunction:
784  opcode = kArchCallJSFunction;
785  break;
786  default:
787  UNREACHABLE();
788  return;
789  }
790  opcode |= MiscField::encode(descriptor->flags());
791 
792  // Emit the call instruction.
793  Instruction* call_instr =
794  Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
795  buffer.instruction_args.size(), &buffer.instruction_args.front());
796 
797  call_instr->MarkAsCall();
798  if (deoptimization != NULL) {
799  DCHECK(continuation != NULL);
800  call_instr->MarkAsControl();
801  }
802 }
803 
804 } // namespace compiler
805 } // namespace internal
806 } // namespace v8
Handle< T > handle() const
Definition: unique.h:99
AddressingModeMatcher(X64OperandGenerator *g, Node *base, Node *index)
AddressingMode GetMode(AddressingMode one, int power)
bool CanBeBetterLeftOperand(Node *node) const
X64OperandGenerator(InstructionSelector *selector)
InstructionOperand * UseImmediate64(Node *node)
InstructionOperand * TempRegister(Register reg)
#define FINAL
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK_NE(v1, v2)
Definition: logging.h:207
#define DCHECK_GE(v1, v2)
Definition: logging.h:208
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
#define arraysize(array)
Definition: macros.h:86
int int32_t
Definition: unicode.cc:24
static void VisitBinop(InstructionSelector *selector, Node *node, InstructionCode opcode, InstructionCode reverse_opcode, FlagsContinuation *cont)
IntMatcher< int64_t, IrOpcode::kInt64Constant > Int64Matcher
Definition: node-matchers.h:81
BinopMatcher< Uint64Matcher, Uint64Matcher > Uint64BinopMatcher
static void VisitDiv(InstructionSelector *selector, Node *node, ArchOpcode div_opcode, ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode)
BinopMatcher< Uint32Matcher, Uint32Matcher > Uint32BinopMatcher
BinopMatcher< Int32Matcher, Int32Matcher > Int32BinopMatcher
static void VisitWord64Shift(InstructionSelector *selector, Node *node, ArchOpcode opcode)
MachineType TypeOf(MachineType machine_type)
Definition: machine-type.h:70
static void VisitWordCompare(InstructionSelector *selector, Node *node, InstructionCode opcode, FlagsContinuation *cont, bool commutative)
MachineType RepresentationOf(MachineType machine_type)
Definition: machine-type.h:76
static void VisitWord32Shift(InstructionSelector *selector, Node *node, ArchOpcode opcode)
BinopMatcher< Int64Matcher, Int64Matcher > Int64BinopMatcher
static void VisitMod(InstructionSelector *selector, Node *node, ArchOpcode div_opcode, ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode)
static void VisitMul(InstructionSelector *selector, Node *node, ArchOpcode opcode)
NodeVector::reverse_iterator NodeVectorRIter
Definition: node.h:75
IntMatcher< int32_t, IrOpcode::kInt32Constant > Int32Matcher
Definition: node-matchers.h:79
static void VisitCompare(InstructionSelector *selector, InstructionCode opcode, InstructionOperand *left, InstructionOperand *right, FlagsContinuation *cont)
const Register rbx
const Register rdx
const Register rax
const Register rcx
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
static int ToAllocationIndex(Register reg)