22 class Arm64OperandConverter
FINAL :
public InstructionOperandConverter {
34 return ToImmediate(instr_->InputAt(index));
42 return ToOperand32(instr_->InputAt(index));
50 const int index = *first_index;
56 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
59 return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
68 return MemoryOperand(&index);
72 if (op->IsRegister()) {
75 return ToImmediate(op);
79 if (op->IsRegister()) {
82 return ToImmediate(op);
86 Constant constant = ToConstant(operand);
87 switch (constant.type()) {
88 case Constant::kInt32:
89 return Operand(constant.ToInt32());
90 case Constant::kInt64:
91 return Operand(constant.ToInt64());
92 case Constant::kFloat32:
94 isolate()->factory()->NewNumber(constant.ToFloat32(),
TENURED));
95 case Constant::kFloat64:
97 isolate()->factory()->NewNumber(constant.ToFloat64(),
TENURED));
98 case Constant::kExternalReference:
99 return Operand(constant.ToExternalReference());
100 case Constant::kHeapObject:
101 return Operand(constant.ToHeapObject());
109 DCHECK(!op->IsRegister());
110 DCHECK(!op->IsDoubleRegister());
111 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
120 #define ASSEMBLE_SHIFT(asm_instr, width) \
122 if (instr->InputAt(1)->IsRegister()) { \
123 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
124 i.InputRegister##width(1)); \
126 int64_t imm = i.InputOperand##width(1).immediate().value(); \
127 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \
133 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
134 Arm64OperandConverter
i(
this, instr);
137 case kArchCallCodeObject: {
138 EnsureSpaceForLazyDeopt();
139 if (instr->InputAt(0)->IsImmediate()) {
143 Register target =
i.InputRegister(0);
147 AddSafepointAndDeopt(instr);
150 case kArchCallJSFunction: {
151 EnsureSpaceForLazyDeopt();
152 Register func =
i.InputRegister(0);
153 if (FLAG_debug_code) {
155 UseScratchRegisterScope scope(masm());
156 Register temp = scope.AcquireX();
159 __ Assert(
eq, kWrongFunctionContext);
163 AddSafepointAndDeopt(instr);
167 __ B(code_->GetLabel(
i.InputBlock(0)));
175 case kArchTruncateDoubleToI:
176 __ TruncateDoubleToI(
i.OutputRegister(),
i.InputDoubleRegister(0));
179 __ Add(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
183 __ Adds(
i.OutputRegister32(),
i.InputRegister32(0),
184 i.InputOperand32(1));
186 __ Add(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputOperand32(1));
190 __ And(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
193 __ And(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputOperand32(1));
196 __ Bic(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
199 __ Bic(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputOperand32(1));
202 __ Mul(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
205 __ Mul(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputRegister32(1));
208 __ Madd(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
212 __ Madd(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputRegister32(1),
213 i.InputRegister32(2));
216 __ Msub(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
220 __ Msub(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputRegister32(1),
221 i.InputRegister32(2));
224 __ Mneg(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
227 __ Mneg(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputRegister32(1));
230 __ Sdiv(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
233 __ Sdiv(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputRegister32(1));
236 __ Udiv(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
239 __ Udiv(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputRegister32(1));
242 UseScratchRegisterScope scope(masm());
243 Register temp = scope.AcquireX();
244 __ Sdiv(temp,
i.InputRegister(0),
i.InputRegister(1));
245 __ Msub(
i.OutputRegister(), temp,
i.InputRegister(1),
i.InputRegister(0));
249 UseScratchRegisterScope scope(masm());
250 Register temp = scope.AcquireW();
251 __ Sdiv(temp,
i.InputRegister32(0),
i.InputRegister32(1));
252 __ Msub(
i.OutputRegister32(), temp,
i.InputRegister32(1),
253 i.InputRegister32(0));
257 UseScratchRegisterScope scope(masm());
258 Register temp = scope.AcquireX();
259 __ Udiv(temp,
i.InputRegister(0),
i.InputRegister(1));
260 __ Msub(
i.OutputRegister(), temp,
i.InputRegister(1),
i.InputRegister(0));
264 UseScratchRegisterScope scope(masm());
265 Register temp = scope.AcquireW();
266 __ Udiv(temp,
i.InputRegister32(0),
i.InputRegister32(1));
267 __ Msub(
i.OutputRegister32(), temp,
i.InputRegister32(1),
268 i.InputRegister32(0));
273 __ Orn(
i.OutputRegister(), xzr,
i.InputOperand(0));
276 __ Orn(
i.OutputRegister32(), wzr,
i.InputOperand32(0));
279 __ Neg(
i.OutputRegister(),
i.InputOperand(0));
282 __ Neg(
i.OutputRegister32(),
i.InputOperand32(0));
285 __ Orr(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
288 __ Orr(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputOperand32(1));
291 __ Orn(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
294 __ Orn(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputOperand32(1));
297 __ Eor(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
300 __ Eor(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputOperand32(1));
303 __ Eon(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
306 __ Eon(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputOperand32(1));
309 __ Sub(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
313 __ Subs(
i.OutputRegister32(),
i.InputRegister32(0),
314 i.InputOperand32(1));
316 __ Sub(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputOperand32(1));
344 __ Mov(
i.OutputRegister32(),
i.InputRegister32(0));
347 __ Sxtw(
i.OutputRegister(),
i.InputRegister32(0));
357 __ Poke(
i.InputRegister(0), operand);
360 case kArm64PokePairZero: {
366 case kArm64PokePair: {
372 __ Cmp(
i.InputRegister(0),
i.InputOperand(1));
375 __ Cmp(
i.InputRegister32(0),
i.InputOperand32(1));
378 __ Cmn(
i.InputRegister(0),
i.InputOperand(1));
381 __ Cmn(
i.InputRegister32(0),
i.InputOperand32(1));
384 __ Tst(
i.InputRegister(0),
i.InputOperand(1));
387 __ Tst(
i.InputRegister32(0),
i.InputOperand32(1));
389 case kArm64Float64Cmp:
390 __ Fcmp(
i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
392 case kArm64Float64Add:
393 __ Fadd(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
394 i.InputDoubleRegister(1));
396 case kArm64Float64Sub:
397 __ Fsub(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
398 i.InputDoubleRegister(1));
400 case kArm64Float64Mul:
401 __ Fmul(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
402 i.InputDoubleRegister(1));
404 case kArm64Float64Div:
405 __ Fdiv(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
406 i.InputDoubleRegister(1));
408 case kArm64Float64Mod: {
410 FrameScope scope(masm(), StackFrame::MANUAL);
415 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
419 case kArm64Float64Sqrt:
420 __ Fsqrt(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
422 case kArm64Float32ToFloat64:
423 __ Fcvt(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0).S());
425 case kArm64Float64ToFloat32:
426 __ Fcvt(
i.OutputDoubleRegister().S(),
i.InputDoubleRegister(0));
428 case kArm64Float64ToInt32:
429 __ Fcvtzs(
i.OutputRegister32(),
i.InputDoubleRegister(0));
431 case kArm64Float64ToUint32:
432 __ Fcvtzu(
i.OutputRegister32(),
i.InputDoubleRegister(0));
434 case kArm64Int32ToFloat64:
435 __ Scvtf(
i.OutputDoubleRegister(),
i.InputRegister32(0));
437 case kArm64Uint32ToFloat64:
438 __ Ucvtf(
i.OutputDoubleRegister(),
i.InputRegister32(0));
441 __ Ldrb(
i.OutputRegister(),
i.MemoryOperand());
444 __ Ldrsb(
i.OutputRegister(),
i.MemoryOperand());
447 __ Strb(
i.InputRegister(2),
i.MemoryOperand());
450 __ Ldrh(
i.OutputRegister(),
i.MemoryOperand());
453 __ Ldrsh(
i.OutputRegister(),
i.MemoryOperand());
456 __ Strh(
i.InputRegister(2),
i.MemoryOperand());
459 __ Ldr(
i.OutputRegister32(),
i.MemoryOperand());
462 __ Str(
i.InputRegister32(2),
i.MemoryOperand());
465 __ Ldr(
i.OutputRegister(),
i.MemoryOperand());
468 __ Str(
i.InputRegister(2),
i.MemoryOperand());
471 __ Ldr(
i.OutputDoubleRegister().S(),
i.MemoryOperand());
474 __ Str(
i.InputDoubleRegister(2).S(),
i.MemoryOperand());
477 __ Ldr(
i.OutputDoubleRegister(),
i.MemoryOperand());
480 __ Str(
i.InputDoubleRegister(2),
i.MemoryOperand());
482 case kArm64StoreWriteBarrier: {
483 Register
object =
i.InputRegister(0);
484 Register index =
i.InputRegister(1);
485 Register value =
i.InputRegister(2);
486 __ Add(index,
object, Operand(index,
SXTW));
493 UseScratchRegisterScope scope(masm());
495 if (csp.is(masm()->StackPointer())) {
496 temp = scope.AcquireX();
500 __ RecordWrite(
object, index, value, lr_status,
mode);
501 if (csp.is(masm()->StackPointer())) {
511 void CodeGenerator::AssembleArchBranch(Instruction* instr,
513 Arm64OperandConverter
i(
this, instr);
518 BasicBlock* tblock =
i.InputBlock(instr->InputCount() - 2);
519 BasicBlock* fblock =
i.InputBlock(instr->InputCount() - 1);
520 bool fallthru = IsNextInAssemblyOrder(fblock);
521 Label* tlabel = code()->GetLabel(tblock);
522 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
579 if (!fallthru)
__ B(flabel);
585 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
587 Arm64OperandConverter
i(
this, instr);
594 Register reg =
i.OutputRegister(instr->OutputCount() - 1);
670 void CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id) {
679 if (stack_slots & 1) stack_slots++;
684 void CodeGenerator::AssemblePrologue() {
685 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
686 if (descriptor->kind() == CallDescriptor::kCallAddress) {
687 __ SetStackPointer(csp);
691 __ PushCalleeSavedRegisters();
693 }
else if (descriptor->IsJSFunctionCall()) {
694 CompilationInfo* info = linkage()->info();
695 __ SetStackPointer(jssp);
696 __ Prologue(info->IsCodePreAgingActive());
697 frame()->SetRegisterSaveAreaSize(
704 if (info->strict_mode() ==
SLOPPY && !info->is_native()) {
707 int receiver_slot = info->scope()->num_parameters() + 2;
709 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
717 __ SetStackPointer(jssp);
719 frame()->SetRegisterSaveAreaSize(
722 int stack_slots = frame()->GetSpillSlotCount();
723 if (stack_slots > 0) {
724 Register
sp =
__ StackPointer();
733 void CodeGenerator::AssembleReturn() {
734 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
735 if (descriptor->kind() == CallDescriptor::kCallAddress) {
736 if (frame()->GetRegisterSaveAreaSize() > 0) {
738 int stack_slots = frame()->GetSpillSlotCount();
739 if (stack_slots > 0) {
744 __ PopCalleeSavedRegisters();
752 int pop_count = descriptor->IsJSFunctionCall()
753 ?
static_cast<int>(descriptor->JSParameterCount())
761 void CodeGenerator::AssembleMove(InstructionOperand* source,
762 InstructionOperand* destination) {
763 Arm64OperandConverter g(
this,
NULL);
766 if (source->IsRegister()) {
767 DCHECK(destination->IsRegister() || destination->IsStackSlot());
768 Register src = g.ToRegister(source);
769 if (destination->IsRegister()) {
770 __ Mov(g.ToRegister(destination), src);
772 __ Str(src, g.ToMemOperand(destination, masm()));
774 }
else if (source->IsStackSlot()) {
775 MemOperand src = g.ToMemOperand(source, masm());
776 DCHECK(destination->IsRegister() || destination->IsStackSlot());
777 if (destination->IsRegister()) {
778 __ Ldr(g.ToRegister(destination), src);
780 UseScratchRegisterScope scope(masm());
781 Register temp = scope.AcquireX();
783 __ Str(temp, g.ToMemOperand(destination, masm()));
785 }
else if (source->IsConstant()) {
786 Constant src = g.ToConstant(ConstantOperand::cast(source));
787 if (destination->IsRegister() || destination->IsStackSlot()) {
788 UseScratchRegisterScope scope(masm());
789 Register dst = destination->IsRegister() ? g.ToRegister(destination)
791 if (src.type() == Constant::kHeapObject) {
792 __ LoadObject(dst, src.ToHeapObject());
794 __ Mov(dst, g.ToImmediate(source));
796 if (destination->IsStackSlot()) {
797 __ Str(dst, g.ToMemOperand(destination, masm()));
799 }
else if (src.type() == Constant::kFloat32) {
800 if (destination->IsDoubleRegister()) {
801 FPRegister dst = g.ToDoubleRegister(destination).S();
802 __ Fmov(dst, src.ToFloat32());
804 DCHECK(destination->IsDoubleStackSlot());
805 UseScratchRegisterScope scope(masm());
806 FPRegister temp = scope.AcquireS();
807 __ Fmov(temp, src.ToFloat32());
808 __ Str(temp, g.ToMemOperand(destination, masm()));
811 DCHECK_EQ(Constant::kFloat64, src.type());
812 if (destination->IsDoubleRegister()) {
813 FPRegister dst = g.ToDoubleRegister(destination);
814 __ Fmov(dst, src.ToFloat64());
816 DCHECK(destination->IsDoubleStackSlot());
817 UseScratchRegisterScope scope(masm());
818 FPRegister temp = scope.AcquireD();
819 __ Fmov(temp, src.ToFloat64());
820 __ Str(temp, g.ToMemOperand(destination, masm()));
823 }
else if (source->IsDoubleRegister()) {
824 FPRegister src = g.ToDoubleRegister(source);
825 if (destination->IsDoubleRegister()) {
826 FPRegister dst = g.ToDoubleRegister(destination);
829 DCHECK(destination->IsDoubleStackSlot());
830 __ Str(src, g.ToMemOperand(destination, masm()));
832 }
else if (source->IsDoubleStackSlot()) {
833 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
834 MemOperand src = g.ToMemOperand(source, masm());
835 if (destination->IsDoubleRegister()) {
836 __ Ldr(g.ToDoubleRegister(destination), src);
838 UseScratchRegisterScope scope(masm());
839 FPRegister temp = scope.AcquireD();
841 __ Str(temp, g.ToMemOperand(destination, masm()));
849 void CodeGenerator::AssembleSwap(InstructionOperand* source,
850 InstructionOperand* destination) {
851 Arm64OperandConverter g(
this,
NULL);
854 if (source->IsRegister()) {
856 UseScratchRegisterScope scope(masm());
857 Register temp = scope.AcquireX();
858 Register src = g.ToRegister(source);
859 if (destination->IsRegister()) {
860 Register dst = g.ToRegister(destination);
865 DCHECK(destination->IsStackSlot());
866 MemOperand dst = g.ToMemOperand(destination, masm());
871 }
else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
872 UseScratchRegisterScope scope(masm());
873 CPURegister temp_0 = scope.AcquireX();
874 CPURegister temp_1 = scope.AcquireX();
875 MemOperand src = g.ToMemOperand(source, masm());
876 MemOperand dst = g.ToMemOperand(destination, masm());
881 }
else if (source->IsDoubleRegister()) {
882 UseScratchRegisterScope scope(masm());
883 FPRegister temp = scope.AcquireD();
884 FPRegister src = g.ToDoubleRegister(source);
885 if (destination->IsDoubleRegister()) {
886 FPRegister dst = g.ToDoubleRegister(destination);
891 DCHECK(destination->IsDoubleStackSlot());
892 MemOperand dst = g.ToMemOperand(destination, masm());
904 void CodeGenerator::AddNopForSmiCodeInlining() {
__ movz(xzr, 0); }
907 void CodeGenerator::EnsureSpaceForLazyDeopt() {
909 if (!linkage()->info()->IsStub()) {
912 intptr_t current_pc = masm()->pc_offset();
914 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
915 intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
917 InstructionAccurateScope instruction_accurate(
920 while (padding_size > 0) {
static T decode(uint32_t value)
static const int kHeaderSize
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
static const int kGlobalProxyOffset
static Handle< T > cast(Handle< S > that)
static const int kContextOffset
static const int kCodeEntryOffset
const Register & StackPointer() const
static const int kFixedFrameSizeFromFp
Register OutputRegister32()
MemOperand MemoryOperand()
Operand InputImmediate(int index)
Operand ToImmediate(InstructionOperand *operand)
Register InputRegister32(int index)
MemOperand MemoryOperand(int *first_index)
Register OutputRegister64()
Operand InputOperand32(int index)
Operand InputOperand64(int index)
Register InputRegister64(int index)
MemOperand ToMemOperand(InstructionOperand *op, MacroAssembler *masm) const
Operand InputOperand(int index)
Operand ToOperand(InstructionOperand *op)
Operand ToOperand32(InstructionOperand *op)
Arm64OperandConverter(CodeGenerator *gen, Instruction *instr)
bool from_stack_pointer()
#define ASSEMBLE_SHIFT(asm_instr, width)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
static int AlignedStackSlots(int stack_slots)
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
@ kSignedGreaterThanOrEqual
@ kUnsignedLessThanOrEqual
@ kUnorderedGreaterThanOrEqual
@ kUnsignedGreaterThanOrEqual
@ kUnorderedLessThanOrEqual
const LowDwVfpRegister d1
const LowDwVfpRegister d0
MemOperand GlobalObjectMemOperand()
MemOperand FieldMemOperand(Register object, int offset)
Register ToRegister(int num)
const unsigned kInstructionSize
Debugger support for the V8 JavaScript engine.
bool Is(const CPURegister &other) const
bool is(DwVfpRegister reg) const