13 class X64OperandGenerator
FINAL :
public OperandGenerator {
26 switch (node->opcode()) {
27 case IrOpcode::kInt32Constant:
35 switch (node->opcode()) {
36 case IrOpcode::kInt32Constant:
38 case IrOpcode::kNumberConstant:
40 case IrOpcode::kHeapConstant: {
44 return !isolate()->heap()->InNewSpace(*value.
handle());
52 return !selector()->IsLive(node);
57 class AddressingModeMatcher {
65 if (index_imm.HasValue()) {
66 int32_t value = index_imm.Value();
77 if (!base_imm.HasValue() || base_imm.Value() != 0) {
107 return static_cast<AddressingMode>(
static_cast<int>(one) + power);
111 size_t input_count = 0;
136 void InstructionSelector::VisitLoad(Node* node) {
139 Node* base = node->InputAt(0);
140 Node* index = node->InputAt(1);
153 opcode = typ ==
kTypeInt32 ? kX64Movsxbl : kX64Movzxbl;
156 opcode = typ ==
kTypeInt32 ? kX64Movsxwl : kX64Movzxwl;
170 X64OperandGenerator g(
this);
171 AddressingModeMatcher matcher(&g, base, index);
173 InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
175 size_t input_count = matcher.SetInputs(inputs);
176 Emit(code, 1, outputs, input_count, inputs);
180 void InstructionSelector::VisitStore(Node* node) {
181 X64OperandGenerator g(
this);
182 Node* base = node->InputAt(0);
183 Node* index = node->InputAt(1);
184 Node* value = node->InputAt(2);
186 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
193 InstructionOperand* temps[] = {g.TempRegister(
rcx), g.TempRegister(
rdx)};
194 Emit(kX64StoreWriteBarrier,
NULL, g.UseFixed(base,
rbx),
227 InstructionOperand* val;
228 if (g.CanBeImmediate(value)) {
229 val = g.UseImmediate(value);
231 val = g.UseRegister(value);
234 AddressingModeMatcher matcher(&g, base, index);
237 size_t input_count = matcher.SetInputs(inputs);
238 inputs[input_count++] = val;
239 Emit(code, 0,
static_cast<InstructionOperand**
>(
NULL), input_count, inputs);
244 static void VisitBinop(InstructionSelector* selector, Node* node,
246 X64OperandGenerator g(selector);
248 Node* left = m.left().node();
249 Node* right = m.right().node();
251 size_t input_count = 0;
253 size_t output_count = 0;
256 if (g.CanBeImmediate(right)) {
257 inputs[input_count++] = g.Use(left);
258 inputs[input_count++] = g.UseImmediate(right);
261 g.CanBeBetterLeftOperand(right)) {
262 std::swap(left, right);
264 inputs[input_count++] = g.UseRegister(left);
265 inputs[input_count++] = g.Use(right);
268 if (cont->IsBranch()) {
269 inputs[input_count++] = g.Label(cont->true_block());
270 inputs[input_count++] = g.Label(cont->false_block());
273 outputs[output_count++] = g.DefineSameAsFirst(node);
275 outputs[output_count++] = g.DefineAsRegister(cont->result());
283 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
284 outputs, input_count, inputs);
290 static void VisitBinop(InstructionSelector* selector, Node* node,
292 FlagsContinuation cont;
297 void InstructionSelector::VisitWord32And(Node* node) {
302 void InstructionSelector::VisitWord64And(Node* node) {
307 void InstructionSelector::VisitWord32Or(Node* node) {
312 void InstructionSelector::VisitWord64Or(Node* node) {
317 void InstructionSelector::VisitWord32Xor(Node* node) {
318 X64OperandGenerator g(
this);
320 if (m.right().Is(-1)) {
321 Emit(kX64Not32, g.DefineSameAsFirst(node), g.Use(m.left().node()));
328 void InstructionSelector::VisitWord64Xor(Node* node) {
329 X64OperandGenerator g(
this);
331 if (m.right().Is(-1)) {
332 Emit(kX64Not, g.DefineSameAsFirst(node), g.Use(m.left().node()));
343 X64OperandGenerator g(selector);
344 Node* left = node->InputAt(0);
345 Node* right = node->InputAt(1);
348 if (g.CanBeImmediate(right)) {
349 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
350 g.UseImmediate(right));
353 if (m.right().IsWord32And()) {
355 if (mright.right().Is(0x1F)) {
356 right = mright.left().node();
359 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
360 g.UseFixed(right,
rcx));
369 X64OperandGenerator g(selector);
370 Node* left = node->InputAt(0);
371 Node* right = node->InputAt(1);
374 if (g.CanBeImmediate(right)) {
375 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
376 g.UseImmediate(right));
379 if (m.right().IsWord64And()) {
381 if (mright.right().Is(0x3F)) {
382 right = mright.left().node();
385 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
386 g.UseFixed(right,
rcx));
391 void InstructionSelector::VisitWord32Shl(Node* node) {
396 void InstructionSelector::VisitWord64Shl(Node* node) {
401 void InstructionSelector::VisitWord32Shr(Node* node) {
406 void InstructionSelector::VisitWord64Shr(Node* node) {
411 void InstructionSelector::VisitWord32Sar(Node* node) {
416 void InstructionSelector::VisitWord64Sar(Node* node) {
421 void InstructionSelector::VisitWord32Ror(Node* node) {
426 void InstructionSelector::VisitWord64Ror(Node* node) {
431 void InstructionSelector::VisitInt32Add(Node* node) {
436 void InstructionSelector::VisitInt64Add(Node* node) {
441 void InstructionSelector::VisitInt32Sub(Node* node) {
442 X64OperandGenerator g(
this);
444 if (m.left().Is(0)) {
445 Emit(kX64Neg32, g.DefineSameAsFirst(node), g.Use(m.right().node()));
452 void InstructionSelector::VisitInt64Sub(Node* node) {
453 X64OperandGenerator g(
this);
455 if (m.left().Is(0)) {
456 Emit(kX64Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
463 static void VisitMul(InstructionSelector* selector, Node* node,
465 X64OperandGenerator g(selector);
467 Node* left = m.left().node();
468 Node* right = m.right().node();
469 if (g.CanBeImmediate(right)) {
470 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
471 g.UseImmediate(right));
473 if (g.CanBeBetterLeftOperand(right)) {
474 std::swap(left, right);
476 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
482 void InstructionSelector::VisitInt32Mul(Node* node) {
487 void InstructionSelector::VisitInt64Mul(Node* node) {
492 static void VisitDiv(InstructionSelector* selector, Node* node,
494 X64OperandGenerator g(selector);
497 opcode, g.DefineAsFixed(node,
rax), g.UseFixed(node->InputAt(0),
rax),
498 g.UseUniqueRegister(node->InputAt(1)),
arraysize(temps), temps);
502 void InstructionSelector::VisitInt32Div(Node* node) {
507 void InstructionSelector::VisitInt64Div(Node* node) {
512 void InstructionSelector::VisitInt32UDiv(Node* node) {
517 void InstructionSelector::VisitInt64UDiv(Node* node) {
522 static void VisitMod(InstructionSelector* selector, Node* node,
524 X64OperandGenerator g(selector);
527 opcode, g.DefineAsFixed(node,
rdx), g.UseFixed(node->InputAt(0),
rax),
528 g.UseUniqueRegister(node->InputAt(1)),
arraysize(temps), temps);
532 void InstructionSelector::VisitInt32Mod(Node* node) {
537 void InstructionSelector::VisitInt64Mod(Node* node) {
542 void InstructionSelector::VisitInt32UMod(Node* node) {
547 void InstructionSelector::VisitInt64UMod(Node* node) {
552 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
553 X64OperandGenerator g(
this);
555 Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
559 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
560 X64OperandGenerator g(
this);
561 Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
565 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
566 X64OperandGenerator g(
this);
568 Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node),
569 g.UseRegister(node->InputAt(0)));
573 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
574 X64OperandGenerator g(
this);
575 Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
579 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
580 X64OperandGenerator g(
this);
581 Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
585 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
586 X64OperandGenerator g(
this);
587 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
591 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
592 X64OperandGenerator g(
this);
593 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
597 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
598 X64OperandGenerator g(
this);
600 Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
604 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
605 X64OperandGenerator g(
this);
606 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
610 void InstructionSelector::VisitFloat64Add(Node* node) {
611 X64OperandGenerator g(
this);
612 Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
613 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
617 void InstructionSelector::VisitFloat64Sub(Node* node) {
618 X64OperandGenerator g(
this);
619 Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
620 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
624 void InstructionSelector::VisitFloat64Mul(Node* node) {
625 X64OperandGenerator g(
this);
626 Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
627 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
631 void InstructionSelector::VisitFloat64Div(Node* node) {
632 X64OperandGenerator g(
this);
633 Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
634 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
638 void InstructionSelector::VisitFloat64Mod(Node* node) {
639 X64OperandGenerator g(
this);
640 InstructionOperand* temps[] = {g.TempRegister(
rax)};
641 Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
642 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
647 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
648 X64OperandGenerator g(
this);
649 Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
653 void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
654 FlagsContinuation* cont) {
659 void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
660 FlagsContinuation* cont) {
668 FlagsContinuation* cont) {
669 X64OperandGenerator g(selector);
670 opcode = cont->Encode(opcode);
671 if (cont->IsBranch()) {
672 selector->Emit(opcode,
NULL, left, right, g.Label(cont->true_block()),
673 g.Label(cont->false_block()))->MarkAsControl();
676 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
685 X64OperandGenerator g(selector);
686 Node* left = node->InputAt(0);
687 Node* right = node->InputAt(1);
690 if (g.CanBeImmediate(right)) {
691 VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
692 }
else if (g.CanBeImmediate(left)) {
693 if (!commutative) cont->Commute();
694 VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
696 VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
701 void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
702 switch (node->opcode()) {
703 case IrOpcode::kInt32Sub:
705 case IrOpcode::kWord32And:
711 X64OperandGenerator g(
this);
712 VisitCompare(
this, kX64Test32, g.Use(node), g.TempImmediate(-1), cont);
716 void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
717 switch (node->opcode()) {
718 case IrOpcode::kInt64Sub:
720 case IrOpcode::kWord64And:
726 X64OperandGenerator g(
this);
727 VisitCompare(
this, kX64Test, g.Use(node), g.TempImmediate(-1), cont);
731 void InstructionSelector::VisitWord32Compare(Node* node,
732 FlagsContinuation* cont) {
737 void InstructionSelector::VisitWord64Compare(Node* node,
738 FlagsContinuation* cont) {
743 void InstructionSelector::VisitFloat64Compare(Node* node,
744 FlagsContinuation* cont) {
745 X64OperandGenerator g(
this);
746 Node* left = node->InputAt(0);
747 Node* right = node->InputAt(1);
748 VisitCompare(
this, kSSEFloat64Cmp, g.UseRegister(left), g.Use(right), cont);
752 void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
753 BasicBlock* deoptimization) {
754 X64OperandGenerator g(
this);
755 CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
757 FrameStateDescriptor* frame_state_descriptor =
NULL;
758 if (descriptor->NeedsFrameState()) {
759 frame_state_descriptor = GetFrameStateDescriptor(
760 call->InputAt(
static_cast<int>(descriptor->InputCount())));
763 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
766 InitializeCallBuffer(call, &buffer,
true,
true);
770 input != buffer.pushed_nodes.rend(); input++) {
773 g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
778 switch (descriptor->kind()) {
779 case CallDescriptor::kCallCodeObject: {
780 opcode = kArchCallCodeObject;
783 case CallDescriptor::kCallJSFunction:
784 opcode = kArchCallJSFunction;
793 Instruction* call_instr =
794 Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
795 buffer.instruction_args.size(), &buffer.instruction_args.front());
797 call_instr->MarkAsCall();
798 if (deoptimization !=
NULL) {
800 call_instr->MarkAsControl();
static uint32_t encode(T value)
Handle< T > handle() const
InstructionOperand * base_operand_
InstructionOperand * index_operand_
AddressingModeMatcher(X64OperandGenerator *g, Node *base, Node *index)
static const int kMaxInputCount
AddressingMode GetMode(AddressingMode one, int power)
InstructionOperand * displacement_operand_
size_t SetInputs(InstructionOperand **inputs)
bool CanBeImmediate64(Node *node)
bool CanBeBetterLeftOperand(Node *node) const
X64OperandGenerator(InstructionSelector *selector)
InstructionOperand * UseImmediate64(Node *node)
InstructionOperand * TempRegister(Register reg)
bool CanBeImmediate(Node *node)
Instruction * MarkAsControl()
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
static void VisitBinop(InstructionSelector *selector, Node *node, InstructionCode opcode, InstructionCode reverse_opcode, FlagsContinuation *cont)
IntMatcher< int64_t, IrOpcode::kInt64Constant > Int64Matcher
BinopMatcher< Uint64Matcher, Uint64Matcher > Uint64BinopMatcher
static void VisitDiv(InstructionSelector *selector, Node *node, ArchOpcode div_opcode, ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode)
BinopMatcher< Uint32Matcher, Uint32Matcher > Uint32BinopMatcher
BinopMatcher< Int32Matcher, Int32Matcher > Int32BinopMatcher
static void VisitWord64Shift(InstructionSelector *selector, Node *node, ArchOpcode opcode)
MachineType TypeOf(MachineType machine_type)
static void VisitWordCompare(InstructionSelector *selector, Node *node, InstructionCode opcode, FlagsContinuation *cont, bool commutative)
MachineType RepresentationOf(MachineType machine_type)
static void VisitWord32Shift(InstructionSelector *selector, Node *node, ArchOpcode opcode)
BinopMatcher< Int64Matcher, Int64Matcher > Int64BinopMatcher
static void VisitMod(InstructionSelector *selector, Node *node, ArchOpcode div_opcode, ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode)
static void VisitMul(InstructionSelector *selector, Node *node, ArchOpcode opcode)
NodeVector::reverse_iterator NodeVectorRIter
IntMatcher< int32_t, IrOpcode::kInt32Constant > Int32Matcher
static void VisitCompare(InstructionSelector *selector, InstructionCode opcode, InstructionOperand *left, InstructionOperand *right, FlagsContinuation *cont)
Debugger support for the V8 JavaScript engine.
static int ToAllocationIndex(Register reg)