71 immediate.
value = 0xbeefdeaddeefbeed;
73 switch (constant.type()) {
74 case Constant::kInt32:
75 case Constant::kInt64:
76 immediate.
value = constant.ToInt64();
78 case Constant::kFloat32:
83 case Constant::kFloat64:
88 case Constant::kExternalReference:
90 immediate.
reference = constant.ToExternalReference();
92 case Constant::kHeapObject:
94 immediate.
handle = constant.ToHeapObject();
103 switch (constant.type()) {
104 case Constant::kInt32:
106 case Constant::kInt64:
107 case Constant::kFloat32:
108 case Constant::kFloat64:
109 case Constant::kExternalReference:
110 case Constant::kHeapObject:
125 if (op->IsRegister()) {
130 }
else if (op->IsDoubleRegister()) {
138 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
159 int scale =
static_cast<int>(
mode - one);
160 DCHECK(scale >= 0 && scale < 4);
185 return Operand(base, index, scale, disp);
195 return Operand(base, index, scale, disp);
204 return Operand(index, scale, disp);
213 return Operand(index, scale, disp);
231 return instr->
InputAt(index)->IsImmediate();
235 #define ASSEMBLE_BINOP(asm_instr) \
237 if (HasImmediateInput(instr, 1)) { \
238 RegisterOrOperand input = i.InputRegisterOrOperand(0); \
239 if (input.type == kRegister) { \
240 __ asm_instr(input.reg, i.InputImmediate(1)); \
242 __ asm_instr(input.operand, i.InputImmediate(1)); \
245 RegisterOrOperand input = i.InputRegisterOrOperand(1); \
246 if (input.type == kRegister) { \
247 __ asm_instr(i.InputRegister(0), input.reg); \
249 __ asm_instr(i.InputRegister(0), input.operand); \
255 #define ASSEMBLE_SHIFT(asm_instr, width) \
257 if (HasImmediateInput(instr, 1)) { \
258 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
260 __ asm_instr##_cl(i.OutputRegister()); \
266 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
267 X64OperandConverter
i(
this, instr);
270 case kArchCallCodeObject: {
271 EnsureSpaceForLazyDeopt();
276 Register reg =
i.InputRegister(0);
278 __ Call(Operand(reg, entry));
280 AddSafepointAndDeopt(instr);
283 case kArchCallJSFunction: {
284 EnsureSpaceForLazyDeopt();
285 Register func =
i.InputRegister(0);
286 if (FLAG_debug_code) {
289 __ Assert(
equal, kWrongFunctionContext);
292 AddSafepointAndDeopt(instr);
296 __ jmp(code_->GetLabel(
i.InputBlock(0)));
304 case kArchTruncateDoubleToI:
305 __ TruncateDoubleToI(
i.OutputRegister(),
i.InputDoubleRegister(0));
339 RegisterOrOperand input =
i.InputRegisterOrOperand(0);
341 __ imull(
i.OutputRegister(), input.reg,
i.InputImmediate(1));
343 __ imull(
i.OutputRegister(), input.operand,
i.InputImmediate(1));
346 RegisterOrOperand input =
i.InputRegisterOrOperand(1);
348 __ imull(
i.OutputRegister(), input.reg);
350 __ imull(
i.OutputRegister(), input.operand);
356 RegisterOrOperand input =
i.InputRegisterOrOperand(0);
358 __ imulq(
i.OutputRegister(), input.reg,
i.InputImmediate(1));
360 __ imulq(
i.OutputRegister(), input.operand,
i.InputImmediate(1));
363 RegisterOrOperand input =
i.InputRegisterOrOperand(1);
365 __ imulq(
i.OutputRegister(), input.reg);
367 __ imulq(
i.OutputRegister(), input.operand);
373 __ idivl(
i.InputRegister(1));
377 __ idivq(
i.InputRegister(1));
381 __ divl(
i.InputRegister(1));
385 __ divq(
i.InputRegister(1));
388 RegisterOrOperand output =
i.OutputRegisterOrOperand();
392 __ notq(output.operand);
397 RegisterOrOperand output =
i.OutputRegisterOrOperand();
401 __ notl(output.operand);
406 RegisterOrOperand output =
i.OutputRegisterOrOperand();
410 __ negq(output.operand);
415 RegisterOrOperand output =
i.OutputRegisterOrOperand();
419 __ negl(output.operand);
459 case kSSEFloat64Cmp: {
460 RegisterOrOperand input =
i.InputRegisterOrOperand(1);
462 __ ucomisd(
i.InputDoubleRegister(0), input.double_reg);
464 __ ucomisd(
i.InputDoubleRegister(0), input.operand);
469 __ addsd(
i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
472 __ subsd(
i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
475 __ mulsd(
i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
478 __ divsd(
i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
480 case kSSEFloat64Mod: {
483 __ movsd(Operand(
rsp, 0),
i.InputDoubleRegister(1));
484 __ fld_d(Operand(
rsp, 0));
485 __ movsd(Operand(
rsp, 0),
i.InputDoubleRegister(0));
486 __ fld_d(Operand(
rsp, 0));
498 __ shrl(
rax, Immediate(8));
499 __ andl(
rax, Immediate(0xFF));
506 __ fstp_d(Operand(
rsp, 0));
507 __ movsd(
i.OutputDoubleRegister(), Operand(
rsp, 0));
511 case kSSEFloat64Sqrt: {
512 RegisterOrOperand input =
i.InputRegisterOrOperand(0);
514 __ sqrtsd(
i.OutputDoubleRegister(), input.double_reg);
516 __ sqrtsd(
i.OutputDoubleRegister(), input.operand);
521 __ cvtss2sd(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
524 __ cvtsd2ss(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
526 case kSSEFloat64ToInt32: {
527 RegisterOrOperand input =
i.InputRegisterOrOperand(0);
529 __ cvttsd2si(
i.OutputRegister(), input.double_reg);
531 __ cvttsd2si(
i.OutputRegister(), input.operand);
535 case kSSEFloat64ToUint32: {
536 RegisterOrOperand input =
i.InputRegisterOrOperand(0);
538 __ cvttsd2siq(
i.OutputRegister(), input.double_reg);
540 __ cvttsd2siq(
i.OutputRegister(), input.operand);
542 __ andl(
i.OutputRegister(),
i.OutputRegister());
547 case kSSEInt32ToFloat64: {
548 RegisterOrOperand input =
i.InputRegisterOrOperand(0);
550 __ cvtlsi2sd(
i.OutputDoubleRegister(), input.reg);
552 __ cvtlsi2sd(
i.OutputDoubleRegister(), input.operand);
556 case kSSEUint32ToFloat64: {
558 __ cvtqsi2sd(
i.OutputDoubleRegister(),
i.InputRegister(0));
562 __ movsxbl(
i.OutputRegister(),
i.MemoryOperand());
565 __ movzxbl(
i.OutputRegister(),
i.MemoryOperand());
569 Operand operand =
i.MemoryOperand(&index);
571 __ movb(operand, Immediate(
i.InputInt8(index)));
573 __ movb(operand,
i.InputRegister(index));
578 __ movsxwl(
i.OutputRegister(),
i.MemoryOperand());
581 __ movzxwl(
i.OutputRegister(),
i.MemoryOperand());
585 Operand operand =
i.MemoryOperand(&index);
587 __ movw(operand, Immediate(
i.InputInt16(index)));
589 __ movw(operand,
i.InputRegister(index));
594 if (instr->HasOutput()) {
595 if (instr->addressing_mode() == kMode_None) {
596 RegisterOrOperand input =
i.InputRegisterOrOperand(0);
598 __ movl(
i.OutputRegister(), input.reg);
600 __ movl(
i.OutputRegister(), input.operand);
603 __ movl(
i.OutputRegister(),
i.MemoryOperand());
607 Operand operand =
i.MemoryOperand(&index);
609 __ movl(operand,
i.InputImmediate(index));
611 __ movl(operand,
i.InputRegister(index));
616 RegisterOrOperand input =
i.InputRegisterOrOperand(0);
618 __ movsxlq(
i.OutputRegister(), input.reg);
620 __ movsxlq(
i.OutputRegister(), input.operand);
625 if (instr->HasOutput()) {
626 __ movq(
i.OutputRegister(),
i.MemoryOperand());
629 Operand operand =
i.MemoryOperand(&index);
631 __ movq(operand,
i.InputImmediate(index));
633 __ movq(operand,
i.InputRegister(index));
638 if (instr->HasOutput()) {
639 __ movss(
i.OutputDoubleRegister(),
i.MemoryOperand());
642 Operand operand =
i.MemoryOperand(&index);
643 __ movss(operand,
i.InputDoubleRegister(index));
647 if (instr->HasOutput()) {
648 __ movsd(
i.OutputDoubleRegister(),
i.MemoryOperand());
651 Operand operand =
i.MemoryOperand(&index);
652 __ movsd(operand,
i.InputDoubleRegister(index));
657 __ pushq(
i.InputImmediate(0));
659 RegisterOrOperand input =
i.InputRegisterOrOperand(0);
663 __ pushq(input.operand);
667 case kX64StoreWriteBarrier: {
668 Register
object =
i.InputRegister(0);
669 Register index =
i.InputRegister(1);
670 Register value =
i.InputRegister(2);
671 __ movsxlq(index, index);
672 __ movq(Operand(
object, index,
times_1, 0), value);
673 __ leaq(index, Operand(
object, index,
times_1, 0));
677 __ RecordWrite(
object, index, value,
mode);
685 void CodeGenerator::AssembleArchBranch(Instruction* instr,
687 X64OperandConverter
i(
this, instr);
692 BasicBlock* tblock =
i.InputBlock(
static_cast<int>(instr->InputCount()) - 2);
693 BasicBlock* fblock =
i.InputBlock(
static_cast<int>(instr->InputCount()) - 1);
694 bool fallthru = IsNextInAssemblyOrder(fblock);
695 Label* tlabel = code()->GetLabel(tblock);
696 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
697 Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
754 if (!fallthru)
__ jmp(flabel, flabel_distance);
760 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
762 X64OperandConverter
i(
this, instr);
769 Register reg =
i.OutputRegister(
static_cast<int>(instr->OutputCount() - 1));
774 __ movl(reg, Immediate(0));
775 __ jmp(&done, Label::kNear);
782 __ movl(reg, Immediate(1));
783 __ jmp(&done, Label::kNear);
802 __ movl(reg, Immediate(0));
803 __ jmp(&done, Label::kNear);
810 __ movl(reg, Immediate(1));
811 __ jmp(&done, Label::kNear);
818 __ movl(reg, Immediate(0));
819 __ jmp(&done, Label::kNear);
826 __ movl(reg, Immediate(1));
827 __ jmp(&done, Label::kNear);
841 __ movzxbl(reg, reg);
846 void CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id) {
853 void CodeGenerator::AssemblePrologue() {
854 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
855 int stack_slots = frame()->GetSpillSlotCount();
856 if (descriptor->kind() == CallDescriptor::kCallAddress) {
859 const RegList saves = descriptor->CalleeSavedRegisters();
861 int register_save_area_size = 0;
863 if (!((1 <<
i) & saves))
continue;
867 frame()->SetRegisterSaveAreaSize(register_save_area_size);
869 }
else if (descriptor->IsJSFunctionCall()) {
870 CompilationInfo* info = linkage()->info();
871 __ Prologue(info->IsCodePreAgingActive());
872 frame()->SetRegisterSaveAreaSize(
879 if (info->strict_mode() ==
SLOPPY && !info->is_native()) {
881 StackArgumentsAccessor args(
rbp, info->scope()->num_parameters());
882 __ movp(
rcx, args.GetReceiverOperand());
883 __ CompareRoot(
rcx, Heap::kUndefinedValueRootIndex);
887 __ movp(args.GetReceiverOperand(),
rcx);
893 frame()->SetRegisterSaveAreaSize(
896 if (stack_slots > 0) {
902 void CodeGenerator::AssembleReturn() {
903 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
904 if (descriptor->kind() == CallDescriptor::kCallAddress) {
905 if (frame()->GetRegisterSaveAreaSize() > 0) {
907 int stack_slots = frame()->GetSpillSlotCount();
908 if (stack_slots > 0) {
911 const RegList saves = descriptor->CalleeSavedRegisters();
915 if (!((1 <<
i) & saves))
continue;
930 int pop_count = descriptor->IsJSFunctionCall()
931 ?
static_cast<int>(descriptor->JSParameterCount())
938 void CodeGenerator::AssembleMove(InstructionOperand* source,
939 InstructionOperand* destination) {
940 X64OperandConverter g(
this,
NULL);
943 if (source->IsRegister()) {
944 DCHECK(destination->IsRegister() || destination->IsStackSlot());
945 Register src = g.ToRegister(source);
946 if (destination->IsRegister()) {
947 __ movq(g.ToRegister(destination), src);
949 __ movq(g.ToOperand(destination), src);
951 }
else if (source->IsStackSlot()) {
952 DCHECK(destination->IsRegister() || destination->IsStackSlot());
953 Operand src = g.ToOperand(source);
954 if (destination->IsRegister()) {
955 Register dst = g.ToRegister(destination);
961 Operand dst = g.ToOperand(destination);
965 }
else if (source->IsConstant()) {
966 ConstantOperand* constant_source = ConstantOperand::cast(source);
967 Constant src = g.ToConstant(constant_source);
968 if (destination->IsRegister() || destination->IsStackSlot()) {
969 Register dst = destination->IsRegister() ? g.ToRegister(destination)
971 Immediate64 imm = g.ToImmediate64(constant_source);
974 __ Set(dst, imm.value);
977 __ Move(dst, imm.reference);
980 __ Move(dst, imm.handle);
983 if (destination->IsStackSlot()) {
986 }
else if (src.type() == Constant::kFloat32) {
989 if (destination->IsDoubleRegister()) {
990 XMMRegister dst = g.ToDoubleRegister(destination);
993 DCHECK(destination->IsDoubleStackSlot());
994 Operand dst = g.ToOperand(destination);
998 DCHECK_EQ(Constant::kFloat64, src.type());
1000 if (destination->IsDoubleRegister()) {
1003 DCHECK(destination->IsDoubleStackSlot());
1007 }
else if (source->IsDoubleRegister()) {
1008 XMMRegister src = g.ToDoubleRegister(source);
1009 if (destination->IsDoubleRegister()) {
1010 XMMRegister dst = g.ToDoubleRegister(destination);
1013 DCHECK(destination->IsDoubleStackSlot());
1014 Operand dst = g.ToOperand(destination);
1017 }
else if (source->IsDoubleStackSlot()) {
1018 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1019 Operand src = g.ToOperand(source);
1020 if (destination->IsDoubleRegister()) {
1021 XMMRegister dst = g.ToDoubleRegister(destination);
1025 Operand dst = g.ToOperand(destination);
1035 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1036 InstructionOperand* destination) {
1037 X64OperandConverter g(
this,
NULL);
1040 if (source->IsRegister() && destination->IsRegister()) {
1042 __ xchgq(g.ToRegister(source), g.ToRegister(destination));
1043 }
else if (source->IsRegister() && destination->IsStackSlot()) {
1044 Register src = g.ToRegister(source);
1045 Operand dst = g.ToOperand(destination);
1047 }
else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
1048 (source->IsDoubleStackSlot() &&
1049 destination->IsDoubleStackSlot())) {
1052 Operand src = g.ToOperand(source);
1053 Operand dst = g.ToOperand(destination);
1057 }
else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1060 XMMRegister src = g.ToDoubleRegister(source);
1061 XMMRegister dst = g.ToDoubleRegister(destination);
1065 }
else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1068 XMMRegister src = g.ToDoubleRegister(source);
1069 Operand dst = g.ToOperand(destination);
1080 void CodeGenerator::AddNopForSmiCodeInlining() {
__ nop(); }
1083 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1085 if (!linkage()->info()->IsStub()) {
1088 int current_pc = masm()->pc_offset();
1089 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1090 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1091 __ Nop(padding_size);
1094 MarkLazyDeoptSite();
static T decode(uint32_t value)
static const int kHeaderSize
static bool IsSupported(CpuFeature f)
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
static const int kGlobalProxyOffset
static Handle< T > cast(Handle< S > that)
static const int kContextOffset
static const int kCodeEntryOffset
static const int kFixedFrameSizeFromFp
bool from_stack_pointer()
DoubleRegister ToDoubleRegister(InstructionOperand *op)
Register InputRegister(int index)
Constant ToConstant(InstructionOperand *operand)
Register ToRegister(InstructionOperand *op)
Linkage * linkage() const
Isolate * isolate() const
int32_t InputInt32(int index)
InstructionOperand * Output() const
InstructionCode opcode() const
InstructionOperand * InputAt(size_t i) const
FrameOffset GetFrameOffset(int spill_slot, Frame *frame, int extra=0)
RegisterOrOperand ToRegisterOrOperand(InstructionOperand *op, int extra=0)
Immediate64 InputImmediate64(int index)
static int NextOffset(int *offset)
RegisterOrOperand InputRegisterOrOperand(int index)
RegisterOrOperand OutputRegisterOrOperand()
static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode)
Immediate InputImmediate(int index)
Operand MemoryOperand(int *offset)
Immediate64 ToImmediate64(InstructionOperand *operand)
X64OperandConverter(CodeGenerator *gen, Instruction *instr)
Operand ToOperand(InstructionOperand *op, int extra=0)
Immediate ToImmediate(InstructionOperand *operand)
#define ASSEMBLE_SHIFT(asm_instr, width)
#define ASSEMBLE_BINOP(asm_instr)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
static bool HasImmediateInput(Instruction *instr, int index)
STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=Register::kMaxNumAllocatableRegisters)
@ kSignedGreaterThanOrEqual
@ kUnsignedLessThanOrEqual
@ kUnorderedGreaterThanOrEqual
@ kUnsignedGreaterThanOrEqual
@ kUnorderedLessThanOrEqual
const Register kScratchRegister
Operand FieldOperand(Register object, int offset)
MemOperand GlobalObjectOperand()
Debugger support for the V8 JavaScript engine.
static const int kNumRegisters
static Register from_code(int code)
RegisterOrOperandType type
DoubleRegister double_reg