21 #define kScratchReg r9
25 class ArmOperandConverter
FINAL :
public InstructionOperandConverter {
31 return ToFloat32Register(instr_->OutputAt(index));
35 return ToFloat32Register(instr_->InputAt(index));
39 return ToFloat64Register(op).low();
43 return ToFloat64Register(instr_->OutputAt(index));
47 return ToFloat64Register(instr_->InputAt(index));
55 switch (instr_->flags_mode()) {
67 Constant constant = ToConstant(instr_->InputAt(index));
68 switch (constant.type()) {
69 case Constant::kInt32:
70 return Operand(constant.ToInt32());
71 case Constant::kFloat32:
73 isolate()->factory()->NewNumber(constant.ToFloat32(),
TENURED));
74 case Constant::kFloat64:
76 isolate()->factory()->NewNumber(constant.ToFloat64(),
TENURED));
77 case Constant::kInt64:
78 case Constant::kExternalReference:
79 case Constant::kHeapObject:
83 return Operand::Zero();
87 const int index = first_index;
93 case kMode_Operand2_I:
94 return InputImmediate(index + 0);
95 case kMode_Operand2_R:
96 return Operand(InputRegister(index + 0));
97 case kMode_Operand2_R_ASR_I:
98 return Operand(InputRegister(index + 0),
ASR, InputInt5(index + 1));
99 case kMode_Operand2_R_ASR_R:
100 return Operand(InputRegister(index + 0),
ASR, InputRegister(index + 1));
101 case kMode_Operand2_R_LSL_I:
102 return Operand(InputRegister(index + 0),
LSL, InputInt5(index + 1));
103 case kMode_Operand2_R_LSL_R:
104 return Operand(InputRegister(index + 0),
LSL, InputRegister(index + 1));
105 case kMode_Operand2_R_LSR_I:
106 return Operand(InputRegister(index + 0),
LSR, InputInt5(index + 1));
107 case kMode_Operand2_R_LSR_R:
108 return Operand(InputRegister(index + 0),
LSR, InputRegister(index + 1));
109 case kMode_Operand2_R_ROR_I:
110 return Operand(InputRegister(index + 0),
ROR, InputInt5(index + 1));
111 case kMode_Operand2_R_ROR_R:
112 return Operand(InputRegister(index + 0),
ROR, InputRegister(index + 1));
115 return Operand::Zero();
119 const int index = *first_index;
122 case kMode_Operand2_I:
123 case kMode_Operand2_R:
124 case kMode_Operand2_R_ASR_I:
125 case kMode_Operand2_R_ASR_R:
126 case kMode_Operand2_R_LSL_I:
127 case kMode_Operand2_R_LSL_R:
128 case kMode_Operand2_R_LSR_I:
129 case kMode_Operand2_R_LSR_R:
130 case kMode_Operand2_R_ROR_I:
131 case kMode_Operand2_R_ROR_R:
133 case kMode_Offset_RI:
135 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
136 case kMode_Offset_RR:
138 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
146 return InputOffset(&index);
151 DCHECK(!op->IsRegister());
152 DCHECK(!op->IsDoubleRegister());
153 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
162 void CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
163 ArmOperandConverter
i(
this, instr);
166 case kArchCallCodeObject: {
167 EnsureSpaceForLazyDeopt();
168 if (instr->InputAt(0)->IsImmediate()) {
172 __ add(
ip,
i.InputRegister(0),
176 AddSafepointAndDeopt(instr);
180 case kArchCallJSFunction: {
181 EnsureSpaceForLazyDeopt();
182 Register func =
i.InputRegister(0);
183 if (FLAG_debug_code) {
187 __ Assert(
eq, kWrongFunctionContext);
191 AddSafepointAndDeopt(instr);
196 __ b(code_->GetLabel(
i.InputBlock(0)));
207 case kArchTruncateDoubleToI:
208 __ TruncateDoubleToI(
i.OutputRegister(),
i.InputFloat64Register(0));
212 __ add(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand2(1),
216 __ and_(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand2(1),
220 __ bic(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand2(1),
224 __ mul(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
228 __ mla(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
229 i.InputRegister(2),
i.OutputSBit());
232 CpuFeatureScope scope(masm(),
MLS);
233 __ mls(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
239 CpuFeatureScope scope(masm(),
SUDIV);
240 __ sdiv(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
245 CpuFeatureScope scope(masm(),
SUDIV);
246 __ udiv(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
251 __ Move(
i.OutputRegister(),
i.InputOperand2(0),
i.OutputSBit());
254 __ mvn(
i.OutputRegister(),
i.InputOperand2(0),
i.OutputSBit());
257 __ orr(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand2(1),
261 __ eor(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand2(1),
265 __ sub(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand2(1),
269 __ rsb(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand2(1),
273 CpuFeatureScope scope(masm(),
ARMv7);
274 __ bfc(
i.OutputRegister(),
i.InputInt8(1),
i.InputInt8(2));
279 CpuFeatureScope scope(masm(),
ARMv7);
280 __ ubfx(
i.OutputRegister(),
i.InputRegister(0),
i.InputInt8(1),
286 __ cmp(
i.InputRegister(0),
i.InputOperand2(1));
290 __ cmn(
i.InputRegister(0),
i.InputOperand2(1));
294 __ tst(
i.InputRegister(0),
i.InputOperand2(1));
298 __ teq(
i.InputRegister(0),
i.InputOperand2(1));
302 __ VFPCompareAndSetFlags(
i.InputFloat64Register(0),
303 i.InputFloat64Register(1));
307 __ vadd(
i.OutputFloat64Register(),
i.InputFloat64Register(0),
308 i.InputFloat64Register(1));
312 __ vsub(
i.OutputFloat64Register(),
i.InputFloat64Register(0),
313 i.InputFloat64Register(1));
317 __ vmul(
i.OutputFloat64Register(),
i.InputFloat64Register(0),
318 i.InputFloat64Register(1));
322 __ vmla(
i.OutputFloat64Register(),
i.InputFloat64Register(1),
323 i.InputFloat64Register(2));
327 __ vmls(
i.OutputFloat64Register(),
i.InputFloat64Register(1),
328 i.InputFloat64Register(2));
332 __ vdiv(
i.OutputFloat64Register(),
i.InputFloat64Register(0),
333 i.InputFloat64Register(1));
339 FrameScope scope(masm(), StackFrame::MANUAL);
341 __ MovToFloatParameters(
i.InputFloat64Register(0),
342 i.InputFloat64Register(1));
343 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
346 __ MovFromFloatResult(
i.OutputFloat64Register());
351 __ vsqrt(
i.OutputFloat64Register(),
i.InputFloat64Register(0));
354 __ vneg(
i.OutputFloat64Register(),
i.InputFloat64Register(0));
356 case kArmVcvtF32F64: {
357 __ vcvt_f32_f64(
i.OutputFloat32Register(),
i.InputFloat64Register(0));
361 case kArmVcvtF64F32: {
362 __ vcvt_f64_f32(
i.OutputFloat64Register(),
i.InputFloat32Register(0));
366 case kArmVcvtF64S32: {
368 __ vmov(scratch,
i.InputRegister(0));
369 __ vcvt_f64_s32(
i.OutputFloat64Register(), scratch);
373 case kArmVcvtF64U32: {
375 __ vmov(scratch,
i.InputRegister(0));
376 __ vcvt_f64_u32(
i.OutputFloat64Register(), scratch);
380 case kArmVcvtS32F64: {
382 __ vcvt_s32_f64(scratch,
i.InputFloat64Register(0));
383 __ vmov(
i.OutputRegister(), scratch);
387 case kArmVcvtU32F64: {
389 __ vcvt_u32_f64(scratch,
i.InputFloat64Register(0));
390 __ vmov(
i.OutputRegister(), scratch);
395 __ ldrb(
i.OutputRegister(),
i.InputOffset());
399 __ ldrsb(
i.OutputRegister(),
i.InputOffset());
405 __ strb(
i.InputRegister(index), operand);
410 __ ldrh(
i.OutputRegister(),
i.InputOffset());
413 __ ldrsh(
i.OutputRegister(),
i.InputOffset());
418 __ strh(
i.InputRegister(index), operand);
423 __ ldr(
i.OutputRegister(),
i.InputOffset());
428 __ str(
i.InputRegister(index), operand);
433 __ vldr(
i.OutputFloat32Register(),
i.InputOffset());
440 __ vstr(
i.InputFloat32Register(index), operand);
445 __ vldr(
i.OutputFloat64Register(),
i.InputOffset());
451 __ vstr(
i.InputFloat64Register(index), operand);
459 case kArmStoreWriteBarrier: {
460 Register
object =
i.InputRegister(0);
461 Register index =
i.InputRegister(1);
462 Register value =
i.InputRegister(2);
463 __ add(index,
object, index);
468 __ RecordWrite(
object, index, value, lr_status,
mode);
477 void CodeGenerator::AssembleArchBranch(Instruction* instr,
479 ArmOperandConverter
i(
this, instr);
484 BasicBlock* tblock =
i.InputBlock(instr->InputCount() - 2);
485 BasicBlock* fblock =
i.InputBlock(instr->InputCount() - 1);
486 bool fallthru = IsNextInAssemblyOrder(fblock);
487 Label* tlabel = code()->GetLabel(tblock);
488 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
545 if (!fallthru)
__ b(flabel);
551 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
553 ArmOperandConverter
i(
this, instr);
560 Register reg =
i.OutputRegister(instr->OutputCount() - 1);
565 __ mov(reg, Operand(0));
573 __ mov(reg, Operand(1));
593 __ mov(reg, Operand(0));
601 __ mov(reg, Operand(1));
609 __ mov(reg, Operand(0));
617 __ mov(reg, Operand(1));
631 __ mov(reg, Operand(0));
637 void CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id) {
644 void CodeGenerator::AssemblePrologue() {
645 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
646 if (descriptor->kind() == CallDescriptor::kCallAddress) {
648 if (FLAG_enable_ool_constant_pool) {
658 const RegList saves = descriptor->CalleeSavedRegisters();
659 if (saves != 0 || saved_pp) {
661 int register_save_area_size = saved_pp ?
kPointerSize : 0;
663 if (!((1 <<
i) & saves))
continue;
666 frame()->SetRegisterSaveAreaSize(register_save_area_size);
669 }
else if (descriptor->IsJSFunctionCall()) {
670 CompilationInfo* info = linkage()->info();
671 __ Prologue(info->IsCodePreAgingActive());
672 frame()->SetRegisterSaveAreaSize(
679 if (info->strict_mode() ==
SLOPPY && !info->is_native()) {
682 int receiver_slot = info->scope()->num_parameters() + 2;
684 __ CompareRoot(
r2, Heap::kUndefinedValueRootIndex);
694 frame()->SetRegisterSaveAreaSize(
697 int stack_slots = frame()->GetSpillSlotCount();
698 if (stack_slots > 0) {
704 void CodeGenerator::AssembleReturn() {
705 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
706 if (descriptor->kind() == CallDescriptor::kCallAddress) {
707 if (frame()->GetRegisterSaveAreaSize() > 0) {
709 int stack_slots = frame()->GetSpillSlotCount();
710 if (stack_slots > 0) {
714 const RegList saves = descriptor->CalleeSavedRegisters();
719 __ LeaveFrame(StackFrame::MANUAL);
722 __ LeaveFrame(StackFrame::MANUAL);
723 int pop_count = descriptor->IsJSFunctionCall()
724 ?
static_cast<int>(descriptor->JSParameterCount())
732 void CodeGenerator::AssembleMove(InstructionOperand* source,
733 InstructionOperand* destination) {
734 ArmOperandConverter g(
this,
NULL);
737 if (source->IsRegister()) {
738 DCHECK(destination->IsRegister() || destination->IsStackSlot());
739 Register src = g.ToRegister(source);
740 if (destination->IsRegister()) {
741 __ mov(g.ToRegister(destination), src);
743 __ str(src, g.ToMemOperand(destination));
745 }
else if (source->IsStackSlot()) {
746 DCHECK(destination->IsRegister() || destination->IsStackSlot());
748 if (destination->IsRegister()) {
749 __ ldr(g.ToRegister(destination), src);
753 __ str(temp, g.ToMemOperand(destination));
755 }
else if (source->IsConstant()) {
756 Constant src = g.ToConstant(source);
757 if (destination->IsRegister() || destination->IsStackSlot()) {
759 destination->IsRegister() ? g.ToRegister(destination) :
kScratchReg;
760 switch (src.type()) {
761 case Constant::kInt32:
762 __ mov(dst, Operand(src.ToInt32()));
764 case Constant::kInt64:
767 case Constant::kFloat32:
769 isolate()->factory()->NewNumber(src.ToFloat32(),
TENURED));
771 case Constant::kFloat64:
773 isolate()->factory()->NewNumber(src.ToFloat64(),
TENURED));
775 case Constant::kExternalReference:
776 __ mov(dst, Operand(src.ToExternalReference()));
778 case Constant::kHeapObject:
779 __ Move(dst, src.ToHeapObject());
782 if (destination->IsStackSlot())
__ str(dst, g.ToMemOperand(destination));
783 }
else if (src.type() == Constant::kFloat32) {
784 SwVfpRegister dst = destination->IsDoubleRegister()
785 ? g.ToFloat32Register(destination)
788 __ mov(
ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
790 if (destination->IsDoubleStackSlot()) {
791 __ vstr(dst, g.ToMemOperand(destination));
794 DCHECK_EQ(Constant::kFloat64, src.type());
795 DwVfpRegister dst = destination->IsDoubleRegister()
796 ? g.ToFloat64Register(destination)
798 __ vmov(dst, src.ToFloat64());
799 if (destination->IsDoubleStackSlot()) {
800 __ vstr(dst, g.ToMemOperand(destination));
803 }
else if (source->IsDoubleRegister()) {
804 DwVfpRegister src = g.ToDoubleRegister(source);
805 if (destination->IsDoubleRegister()) {
806 DwVfpRegister dst = g.ToDoubleRegister(destination);
809 DCHECK(destination->IsDoubleStackSlot());
810 __ vstr(src, g.ToMemOperand(destination));
812 }
else if (source->IsDoubleStackSlot()) {
813 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
815 if (destination->IsDoubleRegister()) {
816 __ vldr(g.ToDoubleRegister(destination), src);
820 __ vstr(temp, g.ToMemOperand(destination));
828 void CodeGenerator::AssembleSwap(InstructionOperand* source,
829 InstructionOperand* destination) {
830 ArmOperandConverter g(
this,
NULL);
833 if (source->IsRegister()) {
836 Register src = g.ToRegister(source);
837 if (destination->IsRegister()) {
838 Register dst = g.ToRegister(destination);
843 DCHECK(destination->IsStackSlot());
849 }
else if (source->IsStackSlot()) {
850 DCHECK(destination->IsStackSlot());
856 __ vldr(temp_1, dst);
858 __ vstr(temp_1, src);
859 }
else if (source->IsDoubleRegister()) {
861 DwVfpRegister src = g.ToDoubleRegister(source);
862 if (destination->IsDoubleRegister()) {
863 DwVfpRegister dst = g.ToDoubleRegister(destination);
868 DCHECK(destination->IsDoubleStackSlot());
874 }
else if (source->IsDoubleStackSlot()) {
875 DCHECK(destination->IsDoubleStackSlot());
880 MemOperand dst0 = g.ToMemOperand(destination);
882 __ vldr(temp_1, dst0);
883 __ ldr(temp_0, src0);
884 __ str(temp_0, dst0);
885 __ ldr(temp_0, src1);
886 __ str(temp_0, dst1);
887 __ vstr(temp_1, src0);
895 void CodeGenerator::AddNopForSmiCodeInlining() {
900 void CodeGenerator::EnsureSpaceForLazyDeopt() {
902 if (!linkage()->info()->IsStub()) {
905 int current_pc = masm()->pc_offset();
906 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
909 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
911 while (padding_size > 0) {
#define kScratchDoubleReg
static const int kInstrSize
static T decode(uint32_t value)
static const int kHeaderSize
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
static const int kGlobalProxyOffset
static const int kContextOffset
static const int kCodeEntryOffset
static const int kFixedFrameSizeFromFp
static const int kConstantPoolOffset
Operand InputOperand2(int first_index)
LowDwVfpRegister InputFloat64Register(int index)
Operand InputImmediate(int index)
LowDwVfpRegister ToFloat64Register(InstructionOperand *op)
MemOperand ToMemOperand(InstructionOperand *op) const
SwVfpRegister InputFloat32Register(int index)
ArmOperandConverter(CodeGenerator *gen, Instruction *instr)
SwVfpRegister OutputFloat32Register(int index=0)
SwVfpRegister ToFloat32Register(InstructionOperand *op)
MemOperand InputOffset(int *first_index)
LowDwVfpRegister OutputFloat64Register(int index=0)
bool from_stack_pointer()
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
@ kSignedGreaterThanOrEqual
@ kUnsignedLessThanOrEqual
@ kUnorderedGreaterThanOrEqual
@ kUnsignedGreaterThanOrEqual
@ kUnorderedLessThanOrEqual
MemOperand FieldMemOperand(Register object, int offset)
MemOperand GlobalObjectOperand()
Debugger support for the V8 JavaScript engine.
static LowDwVfpRegister from_code(int code)
static const int kNumRegisters