27 class Arm64OperandGenerator
FINAL :
public OperandGenerator {
33 if (CanBeImmediate(node,
mode)) {
34 return UseImmediate(node);
36 return UseRegister(node);
41 if (node->opcode() == IrOpcode::kInt32Constant)
42 value = OpParameter<int32_t>(node);
43 else if (node->opcode() == IrOpcode::kInt64Constant)
44 value = OpParameter<int64_t>(node);
53 &ignored, &ignored, &ignored);
56 &ignored, &ignored, &ignored);
61 return 0 <= value && value < 32;
63 return 0 <= value && value < 64;
65 return IsLoadStoreImmediate(value,
LSByte);
67 return IsLoadStoreImmediate(value,
LSHalfword);
69 return IsLoadStoreImmediate(value,
LSWord);
88 Arm64OperandGenerator g(selector);
89 selector->Emit(opcode, g.DefineAsRegister(node),
90 g.UseRegister(node->InputAt(0)),
91 g.UseRegister(node->InputAt(1)));
97 Arm64OperandGenerator g(selector);
98 selector->Emit(opcode, g.DefineAsRegister(node),
99 g.UseRegister(node->InputAt(0)),
100 g.UseRegister(node->InputAt(1)));
106 Arm64OperandGenerator g(selector);
107 selector->Emit(opcode, g.DefineAsRegister(node),
108 g.UseRegister(node->InputAt(0)),
109 g.UseOperand(node->InputAt(1), operand_mode));
114 template <
typename Matcher>
115 static void VisitBinop(InstructionSelector* selector, Node* node,
117 FlagsContinuation* cont) {
118 Arm64OperandGenerator g(selector);
121 size_t input_count = 0;
123 size_t output_count = 0;
125 inputs[input_count++] = g.UseRegister(m.left().node());
126 inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
128 if (cont->IsBranch()) {
129 inputs[input_count++] = g.Label(cont->true_block());
130 inputs[input_count++] = g.Label(cont->false_block());
133 outputs[output_count++] = g.DefineAsRegister(node);
135 outputs[output_count++] = g.DefineAsRegister(cont->result());
143 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
144 outputs, input_count, inputs);
150 template <
typename Matcher>
151 static void VisitBinop(InstructionSelector* selector, Node* node,
153 FlagsContinuation cont;
154 VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
158 void InstructionSelector::VisitLoad(Node* node) {
161 Arm64OperandGenerator g(
this);
162 Node* base = node->InputAt(0);
163 Node* index = node->InputAt(1);
177 opcode = typ ==
kTypeInt32 ? kArm64Ldrsb : kArm64Ldrb;
181 opcode = typ ==
kTypeInt32 ? kArm64Ldrsh : kArm64Ldrh;
197 if (g.CanBeImmediate(index, immediate_mode)) {
199 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
202 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
207 void InstructionSelector::VisitStore(Node* node) {
208 Arm64OperandGenerator g(
this);
209 Node* base = node->InputAt(0);
210 Node* index = node->InputAt(1);
211 Node* value = node->InputAt(2);
213 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
220 InstructionOperand* temps[] = {g.TempRegister(x11), g.TempRegister(x12)};
221 Emit(kArm64StoreWriteBarrier,
NULL, g.UseFixed(base, x10),
222 g.UseFixed(index, x11), g.UseFixed(value, x12),
arraysize(temps),
260 if (g.CanBeImmediate(index, immediate_mode)) {
262 g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
265 g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
270 template <
typename Matcher>
271 static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
274 Arm64OperandGenerator g(selector);
280 inv_opcode = kArm64Bic32;
283 inv_opcode = kArm64Bic;
286 inv_opcode = kArm64Orn32;
289 inv_opcode = kArm64Orn;
292 inv_opcode = kArm64Eon32;
295 inv_opcode = kArm64Eon;
302 if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
303 Matcher mleft(m->left().node());
304 if (mleft.right().Is(-1)) {
306 selector->Emit(inv_opcode, g.DefineAsRegister(node),
307 g.UseRegister(m->right().node()),
308 g.UseRegister(mleft.left().node()));
314 if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
316 Matcher mright(m->right().node());
317 if (mright.right().Is(-1)) {
319 selector->Emit(inv_opcode, g.DefineAsRegister(node),
320 g.UseRegister(m->left().node()),
321 g.UseRegister(mright.left().node()));
326 if (m->IsWord32Xor() && m->right().Is(-1)) {
327 selector->Emit(kArm64Not32, g.DefineAsRegister(node),
328 g.UseRegister(m->left().node()));
329 }
else if (m->IsWord64Xor() && m->right().Is(-1)) {
330 selector->Emit(kArm64Not, g.DefineAsRegister(node),
331 g.UseRegister(m->left().node()));
333 VisitBinop<Matcher>(selector, node, opcode, imm_mode);
338 void InstructionSelector::VisitWord32And(Node* node) {
340 VisitLogical<Int32BinopMatcher>(
341 this, node, &m, kArm64And32, CanCover(node, m.left().node()),
346 void InstructionSelector::VisitWord64And(Node* node) {
348 VisitLogical<Int64BinopMatcher>(
349 this, node, &m, kArm64And, CanCover(node, m.left().node()),
354 void InstructionSelector::VisitWord32Or(Node* node) {
356 VisitLogical<Int32BinopMatcher>(
357 this, node, &m, kArm64Or32, CanCover(node, m.left().node()),
362 void InstructionSelector::VisitWord64Or(Node* node) {
364 VisitLogical<Int64BinopMatcher>(
365 this, node, &m, kArm64Or, CanCover(node, m.left().node()),
370 void InstructionSelector::VisitWord32Xor(Node* node) {
372 VisitLogical<Int32BinopMatcher>(
373 this, node, &m, kArm64Eor32, CanCover(node, m.left().node()),
378 void InstructionSelector::VisitWord64Xor(Node* node) {
380 VisitLogical<Int64BinopMatcher>(
381 this, node, &m, kArm64Eor, CanCover(node, m.left().node()),
386 void InstructionSelector::VisitWord32Shl(Node* node) {
391 void InstructionSelector::VisitWord64Shl(Node* node) {
396 void InstructionSelector::VisitWord32Shr(Node* node) {
401 void InstructionSelector::VisitWord64Shr(Node* node) {
406 void InstructionSelector::VisitWord32Sar(Node* node) {
411 void InstructionSelector::VisitWord64Sar(Node* node) {
416 void InstructionSelector::VisitWord32Ror(Node* node) {
421 void InstructionSelector::VisitWord64Ror(Node* node) {
426 void InstructionSelector::VisitInt32Add(Node* node) {
427 Arm64OperandGenerator g(
this);
430 if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
432 Emit(kArm64Madd32, g.DefineAsRegister(node),
433 g.UseRegister(mleft.left().node()),
434 g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
438 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
440 Emit(kArm64Madd32, g.DefineAsRegister(node),
441 g.UseRegister(mright.left().node()),
442 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
445 VisitBinop<Int32BinopMatcher>(
this, node, kArm64Add32,
kArithmeticImm);
449 void InstructionSelector::VisitInt64Add(Node* node) {
450 Arm64OperandGenerator g(
this);
453 if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) {
455 Emit(kArm64Madd, g.DefineAsRegister(node),
456 g.UseRegister(mleft.left().node()),
457 g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
461 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
463 Emit(kArm64Madd, g.DefineAsRegister(node),
464 g.UseRegister(mright.left().node()),
465 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
468 VisitBinop<Int64BinopMatcher>(
this, node, kArm64Add,
kArithmeticImm);
472 void InstructionSelector::VisitInt32Sub(Node* node) {
473 Arm64OperandGenerator g(
this);
477 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
479 Emit(kArm64Msub32, g.DefineAsRegister(node),
480 g.UseRegister(mright.left().node()),
481 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
485 if (m.left().Is(0)) {
486 Emit(kArm64Neg32, g.DefineAsRegister(node),
487 g.UseRegister(m.right().node()));
489 VisitBinop<Int32BinopMatcher>(
this, node, kArm64Sub32,
kArithmeticImm);
494 void InstructionSelector::VisitInt64Sub(Node* node) {
495 Arm64OperandGenerator g(
this);
499 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
501 Emit(kArm64Msub, g.DefineAsRegister(node),
502 g.UseRegister(mright.left().node()),
503 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
507 if (m.left().Is(0)) {
508 Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
510 VisitBinop<Int64BinopMatcher>(
this, node, kArm64Sub,
kArithmeticImm);
515 void InstructionSelector::VisitInt32Mul(Node* node) {
516 Arm64OperandGenerator g(
this);
519 if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) {
523 if (mleft.left().Is(0)) {
524 Emit(kArm64Mneg32, g.DefineAsRegister(node),
525 g.UseRegister(mleft.right().node()),
526 g.UseRegister(m.right().node()));
531 if (m.right().IsInt32Sub() && CanCover(node, m.right().node())) {
535 if (mright.left().Is(0)) {
536 Emit(kArm64Mneg32, g.DefineAsRegister(node),
537 g.UseRegister(m.left().node()),
538 g.UseRegister(mright.right().node()));
547 void InstructionSelector::VisitInt64Mul(Node* node) {
548 Arm64OperandGenerator g(
this);
551 if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) {
555 if (mleft.left().Is(0)) {
556 Emit(kArm64Mneg, g.DefineAsRegister(node),
557 g.UseRegister(mleft.right().node()),
558 g.UseRegister(m.right().node()));
563 if (m.right().IsInt64Sub() && CanCover(node, m.right().node())) {
567 if (mright.left().Is(0)) {
568 Emit(kArm64Mneg, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
569 g.UseRegister(mright.right().node()));
578 void InstructionSelector::VisitInt32Div(Node* node) {
583 void InstructionSelector::VisitInt64Div(Node* node) {
588 void InstructionSelector::VisitInt32UDiv(Node* node) {
593 void InstructionSelector::VisitInt64UDiv(Node* node) {
598 void InstructionSelector::VisitInt32Mod(Node* node) {
603 void InstructionSelector::VisitInt64Mod(Node* node) {
608 void InstructionSelector::VisitInt32UMod(Node* node) {
613 void InstructionSelector::VisitInt64UMod(Node* node) {
618 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
619 Arm64OperandGenerator g(
this);
620 Emit(kArm64Float32ToFloat64, g.DefineAsRegister(node),
621 g.UseRegister(node->InputAt(0)));
625 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
626 Arm64OperandGenerator g(
this);
627 Emit(kArm64Int32ToFloat64, g.DefineAsRegister(node),
628 g.UseRegister(node->InputAt(0)));
632 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
633 Arm64OperandGenerator g(
this);
634 Emit(kArm64Uint32ToFloat64, g.DefineAsRegister(node),
635 g.UseRegister(node->InputAt(0)));
639 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
640 Arm64OperandGenerator g(
this);
641 Emit(kArm64Float64ToInt32, g.DefineAsRegister(node),
642 g.UseRegister(node->InputAt(0)));
646 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
647 Arm64OperandGenerator g(
this);
648 Emit(kArm64Float64ToUint32, g.DefineAsRegister(node),
649 g.UseRegister(node->InputAt(0)));
653 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
654 Arm64OperandGenerator g(
this);
655 Emit(kArm64Sxtw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
659 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
660 Arm64OperandGenerator g(
this);
661 Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
665 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
666 Arm64OperandGenerator g(
this);
667 Emit(kArm64Float64ToFloat32, g.DefineAsRegister(node),
668 g.UseRegister(node->InputAt(0)));
672 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
673 Arm64OperandGenerator g(
this);
674 Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
678 void InstructionSelector::VisitFloat64Add(Node* node) {
683 void InstructionSelector::VisitFloat64Sub(Node* node) {
688 void InstructionSelector::VisitFloat64Mul(Node* node) {
693 void InstructionSelector::VisitFloat64Div(Node* node) {
698 void InstructionSelector::VisitFloat64Mod(Node* node) {
699 Arm64OperandGenerator g(
this);
700 Emit(kArm64Float64Mod, g.DefineAsFixed(node,
d0),
701 g.UseFixed(node->InputAt(0),
d0),
702 g.UseFixed(node->InputAt(1),
d1))->MarkAsCall();
706 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
707 Arm64OperandGenerator g(
this);
708 Emit(kArm64Float64Sqrt, g.DefineAsRegister(node),
709 g.UseRegister(node->InputAt(0)));
713 void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
714 FlagsContinuation* cont) {
715 VisitBinop<Int32BinopMatcher>(
this, node, kArm64Add32,
kArithmeticImm, cont);
719 void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
720 FlagsContinuation* cont) {
721 VisitBinop<Int32BinopMatcher>(
this, node, kArm64Sub32,
kArithmeticImm, cont);
728 FlagsContinuation* cont) {
729 Arm64OperandGenerator g(selector);
730 opcode = cont->Encode(opcode);
731 if (cont->IsBranch()) {
732 selector->Emit(opcode,
NULL, left, right, g.Label(cont->true_block()),
733 g.Label(cont->false_block()))->MarkAsControl();
736 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
745 Arm64OperandGenerator g(selector);
746 Node* left = node->InputAt(0);
747 Node* right = node->InputAt(1);
751 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
754 if (!commutative) cont->Commute();
755 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
758 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
764 void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
765 switch (node->opcode()) {
766 case IrOpcode::kInt32Add:
768 case IrOpcode::kInt32Sub:
770 case IrOpcode::kWord32And:
776 Arm64OperandGenerator g(
this);
777 VisitCompare(
this, kArm64Tst32, g.UseRegister(node), g.UseRegister(node),
782 void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
783 switch (node->opcode()) {
784 case IrOpcode::kWord64And:
790 Arm64OperandGenerator g(
this);
791 VisitCompare(
this, kArm64Tst, g.UseRegister(node), g.UseRegister(node), cont);
795 void InstructionSelector::VisitWord32Compare(Node* node,
796 FlagsContinuation* cont) {
801 void InstructionSelector::VisitWord64Compare(Node* node,
802 FlagsContinuation* cont) {
807 void InstructionSelector::VisitFloat64Compare(Node* node,
808 FlagsContinuation* cont) {
809 Arm64OperandGenerator g(
this);
810 Node* left = node->InputAt(0);
811 Node* right = node->InputAt(1);
812 VisitCompare(
this, kArm64Float64Cmp, g.UseRegister(left),
813 g.UseRegister(right), cont);
817 void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
818 BasicBlock* deoptimization) {
819 Arm64OperandGenerator g(
this);
820 CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
822 FrameStateDescriptor* frame_state_descriptor =
NULL;
823 if (descriptor->NeedsFrameState()) {
824 frame_state_descriptor =
825 GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
828 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
834 InitializeCallBuffer(call, &buffer,
true,
false);
837 bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
838 int aligned_push_count = buffer.pushed_nodes.size();
842 if (aligned_push_count > 0) {
849 int slot = buffer.pushed_nodes.size() - 1;
851 if (pushed_count_uneven) {
852 Node* input = buffer.pushed_nodes[slot];
857 for (; slot >= 0; slot -= 2) {
859 g.UseRegister(buffer.pushed_nodes[slot]),
860 g.UseRegister(buffer.pushed_nodes[slot - 1]));
866 switch (descriptor->kind()) {
867 case CallDescriptor::kCallCodeObject: {
868 opcode = kArchCallCodeObject;
871 case CallDescriptor::kCallJSFunction:
872 opcode = kArchCallJSFunction;
881 Instruction* call_instr =
882 Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
883 buffer.instruction_args.size(), &buffer.instruction_args.front());
885 call_instr->MarkAsCall();
886 if (deoptimization !=
NULL) {
888 call_instr->MarkAsControl();
static bool IsImmAddSub(int64_t immediate)
static bool IsImmLSScaled(int64_t offset, LSDataSize size)
static bool IsImmLogical(uint64_t value, unsigned width, unsigned *n, unsigned *imm_s, unsigned *imm_r)
static bool IsImmLSUnscaled(int64_t offset)
static uint32_t encode(T value)
Source to read snapshot and builtins files from.
Arm64OperandGenerator(InstructionSelector *selector)
bool CanBeImmediate(Node *node, ImmediateMode mode)
bool IsLoadStoreImmediate(int64_t value, LSDataSize size)
InstructionOperand * UseOperand(Node *node, ImmediateMode mode)
Instruction * MarkAsControl()
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
static void VisitBinop(InstructionSelector *selector, Node *node, InstructionCode opcode, InstructionCode reverse_opcode, FlagsContinuation *cont)
BinopMatcher< Int32Matcher, Int32Matcher > Int32BinopMatcher
MachineType TypeOf(MachineType machine_type)
static void VisitWordCompare(InstructionSelector *selector, Node *node, InstructionCode opcode, FlagsContinuation *cont, bool commutative)
MachineType RepresentationOf(MachineType machine_type)
static void VisitRRO(InstructionSelector *selector, ArchOpcode opcode, Node *node, ImmediateMode operand_mode)
static void VisitRRR(InstructionSelector *selector, ArchOpcode opcode, Node *node)
BinopMatcher< Int64Matcher, Int64Matcher > Int64BinopMatcher
static void VisitLogical(InstructionSelector *selector, Node *node, Matcher *m, ArchOpcode opcode, bool left_can_cover, bool right_can_cover, ImmediateMode imm_mode)
static void VisitRRRFloat64(InstructionSelector *selector, ArchOpcode opcode, Node *node)
static void VisitCompare(InstructionSelector *selector, InstructionCode opcode, InstructionOperand *left, InstructionOperand *right, FlagsContinuation *cont)
const LowDwVfpRegister d1
const LowDwVfpRegister d0
Debugger support for the V8 JavaScript engine.