28 if (!m.HasValue())
return false;
56 return value >= -1020 && value <= 1020 && (value % 4) == 0;
63 case kArmStoreWriteBarrier:
64 return value >= -4095 && value <= 4095;
69 return value >= -255 && value <= 255;
71 case kArchCallCodeObject:
72 case kArchCallJSFunction:
76 case kArchTruncateDoubleToI:
128 if (node->opcode() != IrOpcode::kWord32Ror)
return false;
131 if (m.right().IsInRange(1, 31)) {
147 if (node->opcode() != IrOpcode::kWord32Sar)
return false;
150 if (m.right().IsInRange(1, 32)) {
166 if (node->opcode() != IrOpcode::kWord32Shl)
return false;
169 if (m.right().IsInRange(0, 31)) {
185 if (node->opcode() != IrOpcode::kWord32Shr)
return false;
188 if (m.right().IsInRange(1, 32)) {
204 TryMatchASR(selector, opcode_return, node, value_return, shift_return) ||
205 TryMatchLSL(selector, opcode_return, node, value_return, shift_return) ||
206 TryMatchLSR(selector, opcode_return, node, value_return, shift_return) ||
207 TryMatchROR(selector, opcode_return, node, value_return, shift_return));
214 size_t* input_count_return,
220 *input_count_return = 1;
223 if (
TryMatchShift(selector, opcode_return, node, &inputs[0], &inputs[1])) {
224 *input_count_return = 2;
231 static void VisitBinop(InstructionSelector* selector, Node* node,
233 FlagsContinuation* cont) {
237 size_t input_count = 0;
239 size_t output_count = 0;
242 &input_count, &inputs[1])) {
246 m.left().node(), &input_count,
249 opcode = reverse_opcode;
253 inputs[input_count++] = g.
UseRegister(m.left().node());
254 inputs[input_count++] = g.
UseRegister(m.right().node());
257 if (cont->IsBranch()) {
258 inputs[input_count++] = g.
Label(cont->true_block());
259 inputs[input_count++] = g.
Label(cont->false_block());
273 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
274 outputs, input_count, inputs);
279 static void VisitBinop(InstructionSelector* selector, Node* node,
281 FlagsContinuation cont;
282 VisitBinop(selector, node, opcode, reverse_opcode, &cont);
286 void InstructionSelector::VisitLoad(Node* node) {
289 ArmOperandGenerator g(
this);
290 Node* base = node->InputAt(0);
291 Node* index = node->InputAt(1);
296 opcode = kArmVldrF32;
299 opcode = kArmVldrF64;
303 opcode = typ ==
kTypeUint32 ? kArmLdrb : kArmLdrsb;
306 opcode = typ ==
kTypeUint32 ? kArmLdrh : kArmLdrsh;
317 if (g.CanBeImmediate(index, opcode)) {
319 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
322 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
327 void InstructionSelector::VisitStore(Node* node) {
328 ArmOperandGenerator g(
this);
329 Node* base = node->InputAt(0);
330 Node* index = node->InputAt(1);
331 Node* value = node->InputAt(2);
333 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
340 InstructionOperand* temps[] = {g.TempRegister(
r5), g.TempRegister(
r6)};
341 Emit(kArmStoreWriteBarrier,
NULL, g.UseFixed(base,
r4),
342 g.UseFixed(index,
r5), g.UseFixed(value,
r6),
arraysize(temps),
351 opcode = kArmVstrF32;
354 opcode = kArmVstrF64;
372 if (g.CanBeImmediate(index, opcode)) {
374 g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
377 g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
382 static inline void EmitBic(InstructionSelector* selector, Node* node,
383 Node* left, Node* right) {
388 if (
TryMatchShift(selector, &opcode, right, &value_operand, &shift_operand)) {
390 value_operand, shift_operand);
399 void InstructionSelector::VisitWord32And(Node* node) {
400 ArmOperandGenerator g(
this);
402 if (m.left().IsWord32Xor() && CanCover(node, m.left().node())) {
404 if (mleft.right().Is(-1)) {
405 EmitBic(
this, node, m.right().node(), mleft.left().node());
409 if (m.right().IsWord32Xor() && CanCover(node, m.right().node())) {
411 if (mright.right().Is(-1)) {
412 EmitBic(
this, node, m.left().node(), mright.left().node());
416 if (IsSupported(
ARMv7) && m.right().HasValue()) {
420 if (width != 0 && msb + width == 32) {
422 if (m.left().IsWord32Shr()) {
424 if (mleft.right().IsInRange(0, 31)) {
425 Emit(kArmUbfx, g.DefineAsRegister(node),
426 g.UseRegister(mleft.left().node()),
427 g.UseImmediate(mleft.right().node()), g.TempImmediate(width));
431 Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
432 g.TempImmediate(0), g.TempImmediate(width));
439 if (msb + width + lsb == 32) {
440 Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
441 g.TempImmediate(lsb), g.TempImmediate(width));
449 void InstructionSelector::VisitWord32Or(Node* node) {
454 void InstructionSelector::VisitWord32Xor(Node* node) {
455 ArmOperandGenerator g(
this);
457 if (m.right().Is(-1)) {
459 InstructionOperand* value_operand;
460 InstructionOperand* shift_operand;
461 if (
TryMatchShift(
this, &opcode, m.left().node(), &value_operand,
463 Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand);
467 g.DefineAsRegister(node), g.UseRegister(m.left().node()));
474 template <
typename TryMatchShift>
475 static inline void VisitShift(InstructionSelector* selector, Node* node,
477 FlagsContinuation* cont) {
481 size_t input_count = 2;
483 size_t output_count = 0;
485 CHECK(try_match_shift(selector, &opcode, node, &inputs[0], &inputs[1]));
487 if (cont->IsBranch()) {
488 inputs[input_count++] = g.
Label(cont->true_block());
489 inputs[input_count++] = g.
Label(cont->false_block());
503 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
504 outputs, input_count, inputs);
509 template <
typename TryMatchShift>
510 static inline void VisitShift(InstructionSelector* selector, Node* node,
512 FlagsContinuation cont;
513 VisitShift(selector, node, try_match_shift, &cont);
517 void InstructionSelector::VisitWord32Shl(Node* node) {
522 void InstructionSelector::VisitWord32Shr(Node* node) {
523 ArmOperandGenerator g(
this);
525 if (IsSupported(
ARMv7) && m.left().IsWord32And() &&
526 m.right().IsInRange(0, 31)) {
527 int32_t lsb = m.right().Value();
529 if (mleft.right().HasValue()) {
530 uint32_t value = (mleft.right().Value() >> lsb) << lsb;
533 if (msb + width + lsb == 32) {
535 Emit(kArmUbfx, g.DefineAsRegister(node),
536 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
537 g.TempImmediate(width));
546 void InstructionSelector::VisitWord32Sar(Node* node) {
551 void InstructionSelector::VisitWord32Ror(Node* node) {
556 void InstructionSelector::VisitInt32Add(Node* node) {
557 ArmOperandGenerator g(
this);
559 if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
561 Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mleft.left().node()),
562 g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
565 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
567 Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
568 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
575 void InstructionSelector::VisitInt32Sub(Node* node) {
576 ArmOperandGenerator g(
this);
578 if (IsSupported(
MLS) && m.right().IsInt32Mul() &&
579 CanCover(node, m.right().node())) {
581 Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
582 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
589 void InstructionSelector::VisitInt32Mul(Node* node) {
590 ArmOperandGenerator g(
this);
592 if (m.right().HasValue() && m.right().Value() > 0) {
593 int32_t value = m.right().Value();
596 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
597 g.UseRegister(m.left().node()),
603 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
604 g.UseRegister(m.left().node()),
609 Emit(kArmMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
610 g.UseRegister(m.right().node()));
620 if (selector->IsSupported(
SUDIV)) {
621 selector->Emit(div_opcode, result_operand, left_operand, right_operand);
627 selector->Emit(f64i32_opcode, left_double_operand, left_operand);
628 selector->Emit(f64i32_opcode, right_double_operand, right_operand);
629 selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
630 right_double_operand);
631 selector->Emit(i32f64_opcode, result_operand, result_double_operand);
635 static void VisitDiv(InstructionSelector* selector, Node* node,
640 EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
646 void InstructionSelector::VisitInt32Div(Node* node) {
647 VisitDiv(
this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
651 void InstructionSelector::VisitInt32UDiv(Node* node) {
652 VisitDiv(
this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
656 static void VisitMod(InstructionSelector* selector, Node* node,
665 EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
666 left_operand, right_operand);
667 if (selector->IsSupported(
MLS)) {
668 selector->Emit(kArmMls, result_operand, div_operand, right_operand,
673 selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
674 selector->Emit(kArmSub, result_operand, left_operand, mul_operand);
678 void InstructionSelector::VisitInt32Mod(Node* node) {
679 VisitMod(
this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
683 void InstructionSelector::VisitInt32UMod(Node* node) {
684 VisitMod(
this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
688 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
689 ArmOperandGenerator g(
this);
690 Emit(kArmVcvtF64F32, g.DefineAsRegister(node),
691 g.UseRegister(node->InputAt(0)));
695 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
696 ArmOperandGenerator g(
this);
697 Emit(kArmVcvtF64S32, g.DefineAsRegister(node),
698 g.UseRegister(node->InputAt(0)));
702 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
703 ArmOperandGenerator g(
this);
704 Emit(kArmVcvtF64U32, g.DefineAsRegister(node),
705 g.UseRegister(node->InputAt(0)));
709 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
710 ArmOperandGenerator g(
this);
711 Emit(kArmVcvtS32F64, g.DefineAsRegister(node),
712 g.UseRegister(node->InputAt(0)));
716 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
717 ArmOperandGenerator g(
this);
718 Emit(kArmVcvtU32F64, g.DefineAsRegister(node),
719 g.UseRegister(node->InputAt(0)));
723 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
724 ArmOperandGenerator g(
this);
725 Emit(kArmVcvtF32F64, g.DefineAsRegister(node),
726 g.UseRegister(node->InputAt(0)));
730 void InstructionSelector::VisitFloat64Add(Node* node) {
731 ArmOperandGenerator g(
this);
733 if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
735 Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
736 g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
737 g.UseRegister(mleft.right().node()));
740 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
742 Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
743 g.UseRegister(mright.left().node()),
744 g.UseRegister(mright.right().node()));
751 void InstructionSelector::VisitFloat64Sub(Node* node) {
752 ArmOperandGenerator g(
this);
754 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
756 Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
757 g.UseRegister(mright.left().node()),
758 g.UseRegister(mright.right().node()));
765 void InstructionSelector::VisitFloat64Mul(Node* node) {
766 ArmOperandGenerator g(
this);
768 if (m.right().Is(-1.0)) {
769 Emit(kArmVnegF64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
776 void InstructionSelector::VisitFloat64Div(Node* node) {
781 void InstructionSelector::VisitFloat64Mod(Node* node) {
782 ArmOperandGenerator g(
this);
783 Emit(kArmVmodF64, g.DefineAsFixed(node,
d0), g.UseFixed(node->InputAt(0),
d0),
784 g.UseFixed(node->InputAt(1),
d1))->MarkAsCall();
788 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
789 ArmOperandGenerator g(
this);
790 Emit(kArmVsqrtF64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
794 void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
795 BasicBlock* deoptimization) {
796 ArmOperandGenerator g(
this);
797 CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
799 FrameStateDescriptor* frame_state_descriptor =
NULL;
800 if (descriptor->NeedsFrameState()) {
801 frame_state_descriptor =
802 GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
805 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
811 InitializeCallBuffer(call, &buffer,
true,
false);
816 input != buffer.pushed_nodes.rend(); input++) {
817 Emit(kArmPush,
NULL, g.UseRegister(*input));
822 switch (descriptor->kind()) {
823 case CallDescriptor::kCallCodeObject: {
824 opcode = kArchCallCodeObject;
827 case CallDescriptor::kCallJSFunction:
828 opcode = kArchCallJSFunction;
837 Instruction* call_instr =
838 Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
839 buffer.instruction_args.size(), &buffer.instruction_args.front());
841 call_instr->MarkAsCall();
842 if (deoptimization !=
NULL) {
844 call_instr->MarkAsControl();
849 void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
850 FlagsContinuation* cont) {
851 VisitBinop(
this, node, kArmAdd, kArmAdd, cont);
855 void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
856 FlagsContinuation* cont) {
857 VisitBinop(
this, node, kArmSub, kArmRsb, cont);
868 size_t input_count = 0;
870 size_t output_count = 0;
873 &input_count, &inputs[1])) {
877 &input_count, &inputs[1])) {
878 if (!commutative) cont->Commute();
883 inputs[input_count++] = g.
UseRegister(m.left().node());
884 inputs[input_count++] = g.
UseRegister(m.right().node());
887 if (cont->IsBranch()) {
888 inputs[input_count++] = g.
Label(cont->true_block());
889 inputs[input_count++] = g.
Label(cont->false_block());
899 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
900 outputs, input_count, inputs);
905 void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
906 switch (node->opcode()) {
907 case IrOpcode::kInt32Add:
909 case IrOpcode::kInt32Sub:
911 case IrOpcode::kWord32And:
913 case IrOpcode::kWord32Or:
914 return VisitBinop(
this, node, kArmOrr, kArmOrr, cont);
915 case IrOpcode::kWord32Xor:
917 case IrOpcode::kWord32Sar:
919 case IrOpcode::kWord32Shl:
921 case IrOpcode::kWord32Shr:
923 case IrOpcode::kWord32Ror:
929 ArmOperandGenerator g(
this);
932 if (cont->IsBranch()) {
933 Emit(opcode,
NULL, g.UseRegister(node), g.UseRegister(node),
934 g.Label(cont->true_block()),
935 g.Label(cont->false_block()))->MarkAsControl();
937 Emit(opcode, g.DefineAsRegister(cont->result()), g.UseRegister(node),
938 g.UseRegister(node));
943 void InstructionSelector::VisitWord32Compare(Node* node,
944 FlagsContinuation* cont) {
949 void InstructionSelector::VisitFloat64Compare(Node* node,
950 FlagsContinuation* cont) {
951 ArmOperandGenerator g(
this);
953 if (cont->IsBranch()) {
954 Emit(cont->Encode(kArmVcmpF64),
NULL, g.UseRegister(m.left().node()),
955 g.UseRegister(m.right().node()), g.Label(cont->true_block()),
956 g.Label(cont->false_block()))->MarkAsControl();
959 Emit(cont->Encode(kArmVcmpF64), g.DefineAsRegister(cont->result()),
960 g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
static bool ImmediateFitsAddrMode1Instruction(int32_t imm32)
static uint32_t encode(T value)
static T decode(uint32_t value)
ArmOperandGenerator(InstructionSelector *selector)
InstructionOperand * UseOperand(Node *node, InstructionCode opcode)
bool CanBeImmediate(Node *node, InstructionCode opcode)
bool ImmediateFitsAddrMode1Instruction(int32_t imm) const
Instruction * MarkAsControl()
InstructionSelector * selector() const
InstructionOperand * UseImmediate(Node *node)
InstructionOperand * TempDoubleRegister()
InstructionOperand * TempRegister()
InstructionOperand * DefineAsRegister(Node *node)
InstructionOperand * UseRegister(Node *node)
InstructionOperand * Label(BasicBlock *block)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK_NE(v1, v2)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
uint32_t CountTrailingZeros32(uint32_t value)
uint32_t CountPopulation32(uint32_t value)
uint32_t CountLeadingZeros32(uint32_t value)
bool IsPowerOfTwo32(uint32_t value)
static void EmitDiv(InstructionSelector *selector, ArchOpcode div_opcode, ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode, InstructionOperand *result_operand, InstructionOperand *left_operand, InstructionOperand *right_operand)
static void VisitBinop(InstructionSelector *selector, Node *node, InstructionCode opcode, InstructionCode reverse_opcode, FlagsContinuation *cont)
static bool TryMatchASR(InstructionSelector *selector, InstructionCode *opcode_return, Node *node, InstructionOperand **value_return, InstructionOperand **shift_return)
static void EmitBic(InstructionSelector *selector, Node *node, Node *left, Node *right)
static void VisitDiv(InstructionSelector *selector, Node *node, ArchOpcode div_opcode, ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode)
static bool TryMatchImmediateOrShift(InstructionSelector *selector, InstructionCode *opcode_return, Node *node, size_t *input_count_return, InstructionOperand **inputs)
static bool TryMatchLSR(InstructionSelector *selector, InstructionCode *opcode_return, Node *node, InstructionOperand **value_return, InstructionOperand **shift_return)
BinopMatcher< Int32Matcher, Int32Matcher > Int32BinopMatcher
static bool TryMatchShift(InstructionSelector *selector, InstructionCode *opcode_return, Node *node, InstructionOperand **value_return, InstructionOperand **shift_return)
MachineType TypeOf(MachineType machine_type)
static void VisitWordCompare(InstructionSelector *selector, Node *node, InstructionCode opcode, FlagsContinuation *cont, bool commutative)
MachineType RepresentationOf(MachineType machine_type)
BinopMatcher< Float64Matcher, Float64Matcher > Float64BinopMatcher
static void VisitMod(InstructionSelector *selector, Node *node, ArchOpcode div_opcode, ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode)
static void VisitShift(InstructionSelector *selector, Node *node, TryMatchShift try_match_shift, FlagsContinuation *cont)
static bool TryMatchLSL(InstructionSelector *selector, InstructionCode *opcode_return, Node *node, InstructionOperand **value_return, InstructionOperand **shift_return)
static bool TryMatchROR(InstructionSelector *selector, InstructionCode *opcode_return, Node *node, InstructionOperand **value_return, InstructionOperand **shift_return)
NodeVector::reverse_iterator NodeVectorRIter
IntMatcher< int32_t, IrOpcode::kInt32Constant > Int32Matcher
static void VisitRRRFloat64(InstructionSelector *selector, ArchOpcode opcode, Node *node)
int WhichPowerOf2(uint32_t x)
const LowDwVfpRegister d1
const LowDwVfpRegister d0
Debugger support for the V8 JavaScript engine.