39 if (op->IsRegister()) {
42 }
else if (op->IsDoubleRegister()) {
46 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
53 DCHECK(op->IsDoubleStackSlot());
59 switch (constant.type()) {
60 case Constant::kInt32:
62 case Constant::kFloat32:
65 case Constant::kFloat64:
68 case Constant::kExternalReference:
69 return Immediate(constant.ToExternalReference());
70 case Constant::kHeapObject:
71 return Immediate(constant.ToHeapObject());
72 case Constant::kInt64:
90 int scale =
static_cast<int>(
mode - one);
91 DCHECK(scale >= 0 && scale < 4);
116 return Operand(base, index, scale, disp);
126 return Operand(base, index, scale, disp);
135 return Operand(index, scale, disp);
144 return Operand(index, scale, disp);
166 return instr->
InputAt(index)->IsImmediate();
171 void CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
172 IA32OperandConverter
i(
this, instr);
175 case kArchCallCodeObject: {
176 EnsureSpaceForLazyDeopt();
181 Register reg =
i.InputRegister(0);
184 AddSafepointAndDeopt(instr);
187 case kArchCallJSFunction: {
188 EnsureSpaceForLazyDeopt();
189 Register func =
i.InputRegister(0);
190 if (FLAG_debug_code) {
193 __ Assert(
equal, kWrongFunctionContext);
196 AddSafepointAndDeopt(instr);
200 __ jmp(code()->GetLabel(
i.InputBlock(0)));
208 case kArchTruncateDoubleToI:
209 __ TruncateDoubleToI(
i.OutputRegister(),
i.InputDoubleRegister(0));
213 __ add(
i.InputOperand(0),
i.InputImmediate(1));
215 __ add(
i.InputRegister(0),
i.InputOperand(1));
220 __ and_(
i.InputOperand(0),
i.InputImmediate(1));
222 __ and_(
i.InputRegister(0),
i.InputOperand(1));
227 __ cmp(
i.InputOperand(0),
i.InputImmediate(1));
229 __ cmp(
i.InputRegister(0),
i.InputOperand(1));
234 __ test(
i.InputOperand(0),
i.InputImmediate(1));
236 __ test(
i.InputRegister(0),
i.InputOperand(1));
241 __ imul(
i.OutputRegister(),
i.InputOperand(0),
i.InputInt32(1));
243 __ imul(
i.OutputRegister(),
i.InputOperand(1));
248 __ idiv(
i.InputOperand(1));
252 __ div(
i.InputOperand(1));
255 __ not_(
i.OutputOperand());
258 __ neg(
i.OutputOperand());
262 __ or_(
i.InputOperand(0),
i.InputImmediate(1));
264 __ or_(
i.InputRegister(0),
i.InputOperand(1));
269 __ xor_(
i.InputOperand(0),
i.InputImmediate(1));
271 __ xor_(
i.InputRegister(0),
i.InputOperand(1));
276 __ sub(
i.InputOperand(0),
i.InputImmediate(1));
278 __ sub(
i.InputRegister(0),
i.InputOperand(1));
283 __ shl(
i.OutputRegister(),
i.InputInt5(1));
285 __ shl_cl(
i.OutputRegister());
290 __ shr(
i.OutputRegister(),
i.InputInt5(1));
292 __ shr_cl(
i.OutputRegister());
297 __ sar(
i.OutputRegister(),
i.InputInt5(1));
299 __ sar_cl(
i.OutputRegister());
304 __ ror(
i.OutputRegister(),
i.InputInt5(1));
306 __ ror_cl(
i.OutputRegister());
310 __ ucomisd(
i.InputDoubleRegister(0),
i.InputOperand(1));
313 __ addsd(
i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
316 __ subsd(
i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
319 __ mulsd(
i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
322 __ divsd(
i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
324 case kSSEFloat64Mod: {
328 __ movsd(Operand(
esp, 0),
i.InputDoubleRegister(1));
329 __ fld_d(Operand(
esp, 0));
330 __ movsd(Operand(
esp, 0),
i.InputDoubleRegister(0));
331 __ fld_d(Operand(
esp, 0));
344 __ fstp_d(Operand(
esp, 0));
345 __ movsd(
i.OutputDoubleRegister(), Operand(
esp, 0));
349 case kSSEFloat64Sqrt:
350 __ sqrtsd(
i.OutputDoubleRegister(),
i.InputOperand(0));
353 __ cvtss2sd(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
356 __ cvtsd2ss(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
358 case kSSEFloat64ToInt32:
359 __ cvttsd2si(
i.OutputRegister(),
i.InputOperand(0));
361 case kSSEFloat64ToUint32: {
362 XMMRegister scratch =
xmm0;
363 __ Move(scratch, -2147483648.0);
364 __ addsd(scratch,
i.InputOperand(0));
365 __ cvttsd2si(
i.OutputRegister(), scratch);
366 __ add(
i.OutputRegister(), Immediate(0x80000000));
369 case kSSEInt32ToFloat64:
370 __ cvtsi2sd(
i.OutputDoubleRegister(),
i.InputOperand(0));
372 case kSSEUint32ToFloat64:
374 __ LoadUint32(
i.OutputDoubleRegister(),
i.InputRegister(0));
377 __ movsx_b(
i.OutputRegister(),
i.MemoryOperand());
380 __ movzx_b(
i.OutputRegister(),
i.MemoryOperand());
384 Operand operand =
i.MemoryOperand(&index);
386 __ mov_b(operand,
i.InputInt8(index));
388 __ mov_b(operand,
i.InputRegister(index));
393 __ movsx_w(
i.OutputRegister(),
i.MemoryOperand());
396 __ movzx_w(
i.OutputRegister(),
i.MemoryOperand());
400 Operand operand =
i.MemoryOperand(&index);
402 __ mov_w(operand,
i.InputInt16(index));
404 __ mov_w(operand,
i.InputRegister(index));
409 if (instr->HasOutput()) {
410 __ mov(
i.OutputRegister(),
i.MemoryOperand());
413 Operand operand =
i.MemoryOperand(&index);
415 __ mov(operand,
i.InputImmediate(index));
417 __ mov(operand,
i.InputRegister(index));
422 if (instr->HasOutput()) {
423 __ movsd(
i.OutputDoubleRegister(),
i.MemoryOperand());
426 Operand operand =
i.MemoryOperand(&index);
427 __ movsd(operand,
i.InputDoubleRegister(index));
431 if (instr->HasOutput()) {
432 __ movss(
i.OutputDoubleRegister(),
i.MemoryOperand());
435 Operand operand =
i.MemoryOperand(&index);
436 __ movss(operand,
i.InputDoubleRegister(index));
441 __ push(
i.InputImmediate(0));
443 __ push(
i.InputOperand(0));
446 case kIA32StoreWriteBarrier: {
447 Register
object =
i.InputRegister(0);
448 Register index =
i.InputRegister(1);
449 Register value =
i.InputRegister(2);
450 __ mov(Operand(
object, index,
times_1, 0), value);
451 __ lea(index, Operand(
object, index,
times_1, 0));
455 __ RecordWrite(
object, index, value,
mode);
463 void CodeGenerator::AssembleArchBranch(Instruction* instr,
465 IA32OperandConverter
i(
this, instr);
470 BasicBlock* tblock =
i.InputBlock(instr->InputCount() - 2);
471 BasicBlock* fblock =
i.InputBlock(instr->InputCount() - 1);
472 bool fallthru = IsNextInAssemblyOrder(fblock);
473 Label* tlabel = code()->GetLabel(tblock);
474 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
475 Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
532 if (!fallthru)
__ jmp(flabel, flabel_distance);
538 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
540 IA32OperandConverter
i(
this, instr);
547 Register reg =
i.OutputRegister(instr->OutputCount() - 1);
552 __ mov(reg, Immediate(0));
553 __ jmp(&done, Label::kNear);
560 __ mov(reg, Immediate(1));
561 __ jmp(&done, Label::kNear);
580 __ mov(reg, Immediate(0));
581 __ jmp(&done, Label::kNear);
588 __ mov(reg, Immediate(1));
589 __ jmp(&done, Label::kNear);
596 __ mov(reg, Immediate(0));
597 __ jmp(&done, Label::kNear);
604 __ mov(reg, Immediate(1));
605 __ jmp(&done, Label::kNear);
618 if (reg.is_byte_register()) {
621 __ movzx_b(reg, reg);
625 __ j(
cc, &set, Label::kNear);
626 __ mov(reg, Immediate(0));
627 __ jmp(&done, Label::kNear);
629 __ mov(reg, Immediate(1));
635 void CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id) {
770 void CodeGenerator::AssemblePrologue() {
771 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
772 Frame* frame = code_->frame();
773 int stack_slots = frame->GetSpillSlotCount();
774 if (descriptor->kind() == CallDescriptor::kCallAddress) {
778 const RegList saves = descriptor->CalleeSavedRegisters();
780 int register_save_area_size = 0;
782 if (!((1 <<
i) & saves))
continue;
786 frame->SetRegisterSaveAreaSize(register_save_area_size);
788 }
else if (descriptor->IsJSFunctionCall()) {
789 CompilationInfo* info = linkage()->info();
790 __ Prologue(info->IsCodePreAgingActive());
791 frame->SetRegisterSaveAreaSize(
798 if (info->strict_mode() ==
SLOPPY && !info->is_native()) {
801 int receiver_slot = info->scope()->num_parameters() + 2;
803 __ cmp(
ecx, isolate()->factory()->undefined_value());
813 frame->SetRegisterSaveAreaSize(
816 if (stack_slots > 0) {
822 void CodeGenerator::AssembleReturn() {
823 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
824 if (descriptor->kind() == CallDescriptor::kCallAddress) {
825 const RegList saves = descriptor->CalleeSavedRegisters();
826 if (frame()->GetRegisterSaveAreaSize() > 0) {
828 int stack_slots = frame()->GetSpillSlotCount();
829 if (stack_slots > 0) {
835 if (!((1 <<
i) & saves))
continue;
850 int pop_count = descriptor->IsJSFunctionCall()
851 ?
static_cast<int>(descriptor->JSParameterCount())
858 void CodeGenerator::AssembleMove(InstructionOperand* source,
859 InstructionOperand* destination) {
860 IA32OperandConverter g(
this,
NULL);
863 if (source->IsRegister()) {
864 DCHECK(destination->IsRegister() || destination->IsStackSlot());
865 Register src = g.ToRegister(source);
866 Operand dst = g.ToOperand(destination);
868 }
else if (source->IsStackSlot()) {
869 DCHECK(destination->IsRegister() || destination->IsStackSlot());
870 Operand src = g.ToOperand(source);
871 if (destination->IsRegister()) {
872 Register dst = g.ToRegister(destination);
875 Operand dst = g.ToOperand(destination);
879 }
else if (source->IsConstant()) {
880 Constant src_constant = g.ToConstant(source);
881 if (src_constant.type() == Constant::kHeapObject) {
882 Handle<HeapObject> src = src_constant.ToHeapObject();
883 if (destination->IsRegister()) {
884 Register dst = g.ToRegister(destination);
885 __ LoadHeapObject(dst, src);
887 DCHECK(destination->IsStackSlot());
888 Operand dst = g.ToOperand(destination);
890 if (isolate()->heap()->InNewSpace(*src)) {
891 __ PushHeapObject(src);
897 }
else if (destination->IsRegister()) {
898 Register dst = g.ToRegister(destination);
899 __ mov(dst, g.ToImmediate(source));
900 }
else if (destination->IsStackSlot()) {
901 Operand dst = g.ToOperand(destination);
902 __ mov(dst, g.ToImmediate(source));
903 }
else if (src_constant.type() == Constant::kFloat32) {
905 Immediate src(bit_cast<int32_t>(src_constant.ToFloat32()));
906 if (destination->IsDoubleRegister()) {
907 XMMRegister dst = g.ToDoubleRegister(destination);
908 __ push(Immediate(src));
909 __ movss(dst, Operand(
esp, 0));
912 DCHECK(destination->IsDoubleStackSlot());
913 Operand dst = g.ToOperand(destination);
917 DCHECK_EQ(Constant::kFloat64, src_constant.type());
918 double v = src_constant.ToFloat64();
919 uint64_t int_val = bit_cast<uint64_t, double>(v);
922 if (destination->IsDoubleRegister()) {
923 XMMRegister dst = g.ToDoubleRegister(destination);
926 DCHECK(destination->IsDoubleStackSlot());
927 Operand dst0 = g.ToOperand(destination);
928 Operand dst1 = g.HighOperand(destination);
929 __ mov(dst0, Immediate(lower));
930 __ mov(dst1, Immediate(upper));
933 }
else if (source->IsDoubleRegister()) {
934 XMMRegister src = g.ToDoubleRegister(source);
935 if (destination->IsDoubleRegister()) {
936 XMMRegister dst = g.ToDoubleRegister(destination);
939 DCHECK(destination->IsDoubleStackSlot());
940 Operand dst = g.ToOperand(destination);
943 }
else if (source->IsDoubleStackSlot()) {
944 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
945 Operand src = g.ToOperand(source);
946 if (destination->IsDoubleRegister()) {
947 XMMRegister dst = g.ToDoubleRegister(destination);
951 Operand dst = g.ToOperand(destination);
961 void CodeGenerator::AssembleSwap(InstructionOperand* source,
962 InstructionOperand* destination) {
963 IA32OperandConverter g(
this,
NULL);
966 if (source->IsRegister() && destination->IsRegister()) {
968 Register src = g.ToRegister(source);
969 Register dst = g.ToRegister(destination);
971 }
else if (source->IsRegister() && destination->IsStackSlot()) {
973 __ xchg(g.ToRegister(source), g.ToOperand(destination));
974 }
else if (source->IsStackSlot() && destination->IsStackSlot()) {
976 Operand src = g.ToOperand(source);
977 Operand dst = g.ToOperand(destination);
982 }
else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
985 XMMRegister src = g.ToDoubleRegister(source);
986 XMMRegister dst = g.ToDoubleRegister(destination);
990 }
else if (source->IsDoubleRegister() && source->IsDoubleStackSlot()) {
993 XMMRegister reg = g.ToDoubleRegister(source);
994 Operand other = g.ToOperand(destination);
996 __ movsd(other, reg);
998 }
else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
1000 Operand src0 = g.ToOperand(source);
1001 Operand src1 = g.HighOperand(source);
1002 Operand dst0 = g.ToOperand(destination);
1003 Operand dst1 = g.HighOperand(destination);
1017 void CodeGenerator::AddNopForSmiCodeInlining() {
__ nop(); }
1020 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1022 if (!linkage()->info()->IsStub()) {
1025 int current_pc = masm()->pc_offset();
1026 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1027 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1028 __ Nop(padding_size);
1031 MarkLazyDeoptSite();
static T decode(uint32_t value)
static const int kHeaderSize
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
static const int kGlobalProxyOffset
static Handle< T > cast(Handle< S > that)
static const int kContextOffset
static const int kCodeEntryOffset
static const int kFixedFrameSizeFromFp
bool from_stack_pointer()
static int NextOffset(int *offset)
Operand ToOperand(InstructionOperand *op, int extra=0)
IA32OperandConverter(CodeGenerator *gen, Instruction *instr)
Operand MemoryOperand(int *offset)
static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode)
Immediate InputImmediate(int index)
Immediate ToImmediate(InstructionOperand *operand)
Operand HighOperand(InstructionOperand *op)
Operand InputOperand(int index)
Operand TempOperand(int index)
DoubleRegister ToDoubleRegister(InstructionOperand *op)
Register InputRegister(int index)
Constant ToConstant(InstructionOperand *operand)
Register ToRegister(InstructionOperand *op)
Linkage * linkage() const
Isolate * isolate() const
int32_t InputInt32(int index)
InstructionOperand * Output() const
InstructionCode opcode() const
InstructionOperand * TempAt(size_t i) const
InstructionOperand * InputAt(size_t i) const
FrameOffset GetFrameOffset(int spill_slot, Frame *frame, int extra=0)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK_NE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
static bool HasImmediateInput(Instruction *instr, int index)
STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=Register::kMaxNumAllocatableRegisters)
@ kSignedGreaterThanOrEqual
@ kUnsignedLessThanOrEqual
@ kUnorderedGreaterThanOrEqual
@ kUnsignedGreaterThanOrEqual
@ kUnorderedLessThanOrEqual
Operand FieldOperand(Register object, int offset)
MemOperand GlobalObjectOperand()
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Debugger support for the V8 JavaScript engine.
static const int kNumRegisters
static Register from_code(int code)