V8 Project
code-generator-x64.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
6 
11 #include "src/scopes.h"
12 #include "src/x64/assembler-x64.h"
14 
15 namespace v8 {
16 namespace internal {
17 namespace compiler {
18 
19 #define __ masm()->
20 
21 
22 // TODO(turbofan): Cleanup these hacks.
24 
25 
26 struct Immediate64 {
27  uint64_t value;
29  ExternalReference reference;
31 };
32 
33 
35 
36 
43 };
44 
45 
46 // Adds X64 specific methods for decoding operands.
48  public:
50  : InstructionOperandConverter(gen, instr) {}
51 
53  return ToRegisterOrOperand(instr_->InputAt(index));
54  }
55 
57  return ToImmediate(instr_->InputAt(index));
58  }
59 
62  }
63 
65  return ToImmediate64(instr_->InputAt(index));
66  }
67 
69  Constant constant = ToConstant(operand);
70  Immediate64 immediate;
71  immediate.value = 0xbeefdeaddeefbeed;
72  immediate.type = kImm64Value;
73  switch (constant.type()) {
74  case Constant::kInt32:
75  case Constant::kInt64:
76  immediate.value = constant.ToInt64();
77  return immediate;
78  case Constant::kFloat32:
79  immediate.type = kImm64Handle;
80  immediate.handle =
81  isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED);
82  return immediate;
83  case Constant::kFloat64:
84  immediate.type = kImm64Handle;
85  immediate.handle =
86  isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED);
87  return immediate;
88  case Constant::kExternalReference:
89  immediate.type = kImm64Reference;
90  immediate.reference = constant.ToExternalReference();
91  return immediate;
92  case Constant::kHeapObject:
93  immediate.type = kImm64Handle;
94  immediate.handle = constant.ToHeapObject();
95  return immediate;
96  }
97  UNREACHABLE();
98  return immediate;
99  }
100 
102  Constant constant = ToConstant(operand);
103  switch (constant.type()) {
104  case Constant::kInt32:
105  return Immediate(constant.ToInt32());
106  case Constant::kInt64:
107  case Constant::kFloat32:
108  case Constant::kFloat64:
109  case Constant::kExternalReference:
110  case Constant::kHeapObject:
111  break;
112  }
113  UNREACHABLE();
114  return Immediate(-1);
115  }
116 
117  Operand ToOperand(InstructionOperand* op, int extra = 0) {
118  RegisterOrOperand result = ToRegisterOrOperand(op, extra);
119  DCHECK_EQ(kOperand, result.type);
120  return result.operand;
121  }
122 
124  RegisterOrOperand result;
125  if (op->IsRegister()) {
126  DCHECK(extra == 0);
127  result.type = kRegister;
128  result.reg = ToRegister(op);
129  return result;
130  } else if (op->IsDoubleRegister()) {
131  DCHECK(extra == 0);
132  DCHECK(extra == 0);
133  result.type = kDoubleRegister;
134  result.double_reg = ToDoubleRegister(op);
135  return result;
136  }
137 
138  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
139 
140  result.type = kOperand;
141  // The linkage computes where all spill slots are located.
142  FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
143  result.operand =
144  Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
145  return result;
146  }
147 
148  static int NextOffset(int* offset) {
149  int i = *offset;
150  (*offset)++;
151  return i;
152  }
153 
155  STATIC_ASSERT(0 == static_cast<int>(times_1));
156  STATIC_ASSERT(1 == static_cast<int>(times_2));
157  STATIC_ASSERT(2 == static_cast<int>(times_4));
158  STATIC_ASSERT(3 == static_cast<int>(times_8));
159  int scale = static_cast<int>(mode - one);
160  DCHECK(scale >= 0 && scale < 4);
161  return static_cast<ScaleFactor>(scale);
162  }
163 
164  Operand MemoryOperand(int* offset) {
166  switch (mode) {
167  case kMode_MR: {
168  Register base = InputRegister(NextOffset(offset));
169  int32_t disp = 0;
170  return Operand(base, disp);
171  }
172  case kMode_MRI: {
173  Register base = InputRegister(NextOffset(offset));
174  int32_t disp = InputInt32(NextOffset(offset));
175  return Operand(base, disp);
176  }
177  case kMode_MR1:
178  case kMode_MR2:
179  case kMode_MR4:
180  case kMode_MR8: {
181  Register base = InputRegister(NextOffset(offset));
182  Register index = InputRegister(NextOffset(offset));
183  ScaleFactor scale = ScaleFor(kMode_MR1, mode);
184  int32_t disp = 0;
185  return Operand(base, index, scale, disp);
186  }
187  case kMode_MR1I:
188  case kMode_MR2I:
189  case kMode_MR4I:
190  case kMode_MR8I: {
191  Register base = InputRegister(NextOffset(offset));
192  Register index = InputRegister(NextOffset(offset));
193  ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
194  int32_t disp = InputInt32(NextOffset(offset));
195  return Operand(base, index, scale, disp);
196  }
197  case kMode_M1:
198  case kMode_M2:
199  case kMode_M4:
200  case kMode_M8: {
201  Register index = InputRegister(NextOffset(offset));
202  ScaleFactor scale = ScaleFor(kMode_M1, mode);
203  int32_t disp = 0;
204  return Operand(index, scale, disp);
205  }
206  case kMode_M1I:
207  case kMode_M2I:
208  case kMode_M4I:
209  case kMode_M8I: {
210  Register index = InputRegister(NextOffset(offset));
211  ScaleFactor scale = ScaleFor(kMode_M1I, mode);
212  int32_t disp = InputInt32(NextOffset(offset));
213  return Operand(index, scale, disp);
214  }
215  case kMode_None:
216  UNREACHABLE();
217  return Operand(no_reg, 0);
218  }
219  UNREACHABLE();
220  return Operand(no_reg, 0);
221  }
222 
224  int first_input = 0;
225  return MemoryOperand(&first_input);
226  }
227 };
228 
229 
230 static bool HasImmediateInput(Instruction* instr, int index) {
231  return instr->InputAt(index)->IsImmediate();
232 }
233 
234 
235 #define ASSEMBLE_BINOP(asm_instr) \
236  do { \
237  if (HasImmediateInput(instr, 1)) { \
238  RegisterOrOperand input = i.InputRegisterOrOperand(0); \
239  if (input.type == kRegister) { \
240  __ asm_instr(input.reg, i.InputImmediate(1)); \
241  } else { \
242  __ asm_instr(input.operand, i.InputImmediate(1)); \
243  } \
244  } else { \
245  RegisterOrOperand input = i.InputRegisterOrOperand(1); \
246  if (input.type == kRegister) { \
247  __ asm_instr(i.InputRegister(0), input.reg); \
248  } else { \
249  __ asm_instr(i.InputRegister(0), input.operand); \
250  } \
251  } \
252  } while (0)
253 
254 
255 #define ASSEMBLE_SHIFT(asm_instr, width) \
256  do { \
257  if (HasImmediateInput(instr, 1)) { \
258  __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
259  } else { \
260  __ asm_instr##_cl(i.OutputRegister()); \
261  } \
262  } while (0)
263 
264 
265 // Assembles an instruction after register allocation, producing machine code.
266 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
267  X64OperandConverter i(this, instr);
268 
269  switch (ArchOpcodeField::decode(instr->opcode())) {
270  case kArchCallCodeObject: {
271  EnsureSpaceForLazyDeopt();
272  if (HasImmediateInput(instr, 0)) {
273  Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
274  __ Call(code, RelocInfo::CODE_TARGET);
275  } else {
276  Register reg = i.InputRegister(0);
277  int entry = Code::kHeaderSize - kHeapObjectTag;
278  __ Call(Operand(reg, entry));
279  }
280  AddSafepointAndDeopt(instr);
281  break;
282  }
283  case kArchCallJSFunction: {
284  EnsureSpaceForLazyDeopt();
285  Register func = i.InputRegister(0);
286  if (FLAG_debug_code) {
287  // Check the function's context matches the context argument.
289  __ Assert(equal, kWrongFunctionContext);
290  }
292  AddSafepointAndDeopt(instr);
293  break;
294  }
295  case kArchJmp:
296  __ jmp(code_->GetLabel(i.InputBlock(0)));
297  break;
298  case kArchNop:
299  // don't emit code for nops.
300  break;
301  case kArchRet:
302  AssembleReturn();
303  break;
304  case kArchTruncateDoubleToI:
305  __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
306  break;
307  case kX64Add32:
308  ASSEMBLE_BINOP(addl);
309  break;
310  case kX64Add:
311  ASSEMBLE_BINOP(addq);
312  break;
313  case kX64Sub32:
314  ASSEMBLE_BINOP(subl);
315  break;
316  case kX64Sub:
317  ASSEMBLE_BINOP(subq);
318  break;
319  case kX64And32:
320  ASSEMBLE_BINOP(andl);
321  break;
322  case kX64And:
323  ASSEMBLE_BINOP(andq);
324  break;
325  case kX64Cmp32:
326  ASSEMBLE_BINOP(cmpl);
327  break;
328  case kX64Cmp:
329  ASSEMBLE_BINOP(cmpq);
330  break;
331  case kX64Test32:
332  ASSEMBLE_BINOP(testl);
333  break;
334  case kX64Test:
335  ASSEMBLE_BINOP(testq);
336  break;
337  case kX64Imul32:
338  if (HasImmediateInput(instr, 1)) {
339  RegisterOrOperand input = i.InputRegisterOrOperand(0);
340  if (input.type == kRegister) {
341  __ imull(i.OutputRegister(), input.reg, i.InputImmediate(1));
342  } else {
343  __ imull(i.OutputRegister(), input.operand, i.InputImmediate(1));
344  }
345  } else {
346  RegisterOrOperand input = i.InputRegisterOrOperand(1);
347  if (input.type == kRegister) {
348  __ imull(i.OutputRegister(), input.reg);
349  } else {
350  __ imull(i.OutputRegister(), input.operand);
351  }
352  }
353  break;
354  case kX64Imul:
355  if (HasImmediateInput(instr, 1)) {
356  RegisterOrOperand input = i.InputRegisterOrOperand(0);
357  if (input.type == kRegister) {
358  __ imulq(i.OutputRegister(), input.reg, i.InputImmediate(1));
359  } else {
360  __ imulq(i.OutputRegister(), input.operand, i.InputImmediate(1));
361  }
362  } else {
363  RegisterOrOperand input = i.InputRegisterOrOperand(1);
364  if (input.type == kRegister) {
365  __ imulq(i.OutputRegister(), input.reg);
366  } else {
367  __ imulq(i.OutputRegister(), input.operand);
368  }
369  }
370  break;
371  case kX64Idiv32:
372  __ cdq();
373  __ idivl(i.InputRegister(1));
374  break;
375  case kX64Idiv:
376  __ cqo();
377  __ idivq(i.InputRegister(1));
378  break;
379  case kX64Udiv32:
380  __ xorl(rdx, rdx);
381  __ divl(i.InputRegister(1));
382  break;
383  case kX64Udiv:
384  __ xorq(rdx, rdx);
385  __ divq(i.InputRegister(1));
386  break;
387  case kX64Not: {
388  RegisterOrOperand output = i.OutputRegisterOrOperand();
389  if (output.type == kRegister) {
390  __ notq(output.reg);
391  } else {
392  __ notq(output.operand);
393  }
394  break;
395  }
396  case kX64Not32: {
397  RegisterOrOperand output = i.OutputRegisterOrOperand();
398  if (output.type == kRegister) {
399  __ notl(output.reg);
400  } else {
401  __ notl(output.operand);
402  }
403  break;
404  }
405  case kX64Neg: {
406  RegisterOrOperand output = i.OutputRegisterOrOperand();
407  if (output.type == kRegister) {
408  __ negq(output.reg);
409  } else {
410  __ negq(output.operand);
411  }
412  break;
413  }
414  case kX64Neg32: {
415  RegisterOrOperand output = i.OutputRegisterOrOperand();
416  if (output.type == kRegister) {
417  __ negl(output.reg);
418  } else {
419  __ negl(output.operand);
420  }
421  break;
422  }
423  case kX64Or32:
424  ASSEMBLE_BINOP(orl);
425  break;
426  case kX64Or:
427  ASSEMBLE_BINOP(orq);
428  break;
429  case kX64Xor32:
430  ASSEMBLE_BINOP(xorl);
431  break;
432  case kX64Xor:
433  ASSEMBLE_BINOP(xorq);
434  break;
435  case kX64Shl32:
436  ASSEMBLE_SHIFT(shll, 5);
437  break;
438  case kX64Shl:
439  ASSEMBLE_SHIFT(shlq, 6);
440  break;
441  case kX64Shr32:
442  ASSEMBLE_SHIFT(shrl, 5);
443  break;
444  case kX64Shr:
445  ASSEMBLE_SHIFT(shrq, 6);
446  break;
447  case kX64Sar32:
448  ASSEMBLE_SHIFT(sarl, 5);
449  break;
450  case kX64Sar:
451  ASSEMBLE_SHIFT(sarq, 6);
452  break;
453  case kX64Ror32:
454  ASSEMBLE_SHIFT(rorl, 5);
455  break;
456  case kX64Ror:
457  ASSEMBLE_SHIFT(rorq, 6);
458  break;
459  case kSSEFloat64Cmp: {
460  RegisterOrOperand input = i.InputRegisterOrOperand(1);
461  if (input.type == kDoubleRegister) {
462  __ ucomisd(i.InputDoubleRegister(0), input.double_reg);
463  } else {
464  __ ucomisd(i.InputDoubleRegister(0), input.operand);
465  }
466  break;
467  }
468  case kSSEFloat64Add:
469  __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
470  break;
471  case kSSEFloat64Sub:
472  __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
473  break;
474  case kSSEFloat64Mul:
475  __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
476  break;
477  case kSSEFloat64Div:
478  __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
479  break;
480  case kSSEFloat64Mod: {
481  __ subq(rsp, Immediate(kDoubleSize));
482  // Move values to st(0) and st(1).
483  __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
484  __ fld_d(Operand(rsp, 0));
485  __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
486  __ fld_d(Operand(rsp, 0));
487  // Loop while fprem isn't done.
488  Label mod_loop;
489  __ bind(&mod_loop);
490  // This instructions traps on all kinds inputs, but we are assuming the
491  // floating point control word is set to ignore them all.
492  __ fprem();
493  // The following 2 instruction implicitly use rax.
494  __ fnstsw_ax();
495  if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) {
496  __ sahf();
497  } else {
498  __ shrl(rax, Immediate(8));
499  __ andl(rax, Immediate(0xFF));
500  __ pushq(rax);
501  __ popfq();
502  }
503  __ j(parity_even, &mod_loop);
504  // Move output to stack and clean up.
505  __ fstp(1);
506  __ fstp_d(Operand(rsp, 0));
507  __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
508  __ addq(rsp, Immediate(kDoubleSize));
509  break;
510  }
511  case kSSEFloat64Sqrt: {
512  RegisterOrOperand input = i.InputRegisterOrOperand(0);
513  if (input.type == kDoubleRegister) {
514  __ sqrtsd(i.OutputDoubleRegister(), input.double_reg);
515  } else {
516  __ sqrtsd(i.OutputDoubleRegister(), input.operand);
517  }
518  break;
519  }
520  case kSSECvtss2sd:
521  __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
522  break;
523  case kSSECvtsd2ss:
524  __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
525  break;
526  case kSSEFloat64ToInt32: {
527  RegisterOrOperand input = i.InputRegisterOrOperand(0);
528  if (input.type == kDoubleRegister) {
529  __ cvttsd2si(i.OutputRegister(), input.double_reg);
530  } else {
531  __ cvttsd2si(i.OutputRegister(), input.operand);
532  }
533  break;
534  }
535  case kSSEFloat64ToUint32: {
536  RegisterOrOperand input = i.InputRegisterOrOperand(0);
537  if (input.type == kDoubleRegister) {
538  __ cvttsd2siq(i.OutputRegister(), input.double_reg);
539  } else {
540  __ cvttsd2siq(i.OutputRegister(), input.operand);
541  }
542  __ andl(i.OutputRegister(), i.OutputRegister()); // clear upper bits.
543  // TODO(turbofan): generated code should not look at the upper 32 bits
544  // of the result, but those bits could escape to the outside world.
545  break;
546  }
547  case kSSEInt32ToFloat64: {
548  RegisterOrOperand input = i.InputRegisterOrOperand(0);
549  if (input.type == kRegister) {
550  __ cvtlsi2sd(i.OutputDoubleRegister(), input.reg);
551  } else {
552  __ cvtlsi2sd(i.OutputDoubleRegister(), input.operand);
553  }
554  break;
555  }
556  case kSSEUint32ToFloat64: {
557  // TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
558  __ cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
559  break;
560  }
561  case kX64Movsxbl:
562  __ movsxbl(i.OutputRegister(), i.MemoryOperand());
563  break;
564  case kX64Movzxbl:
565  __ movzxbl(i.OutputRegister(), i.MemoryOperand());
566  break;
567  case kX64Movb: {
568  int index = 0;
569  Operand operand = i.MemoryOperand(&index);
570  if (HasImmediateInput(instr, index)) {
571  __ movb(operand, Immediate(i.InputInt8(index)));
572  } else {
573  __ movb(operand, i.InputRegister(index));
574  }
575  break;
576  }
577  case kX64Movsxwl:
578  __ movsxwl(i.OutputRegister(), i.MemoryOperand());
579  break;
580  case kX64Movzxwl:
581  __ movzxwl(i.OutputRegister(), i.MemoryOperand());
582  break;
583  case kX64Movw: {
584  int index = 0;
585  Operand operand = i.MemoryOperand(&index);
586  if (HasImmediateInput(instr, index)) {
587  __ movw(operand, Immediate(i.InputInt16(index)));
588  } else {
589  __ movw(operand, i.InputRegister(index));
590  }
591  break;
592  }
593  case kX64Movl:
594  if (instr->HasOutput()) {
595  if (instr->addressing_mode() == kMode_None) {
596  RegisterOrOperand input = i.InputRegisterOrOperand(0);
597  if (input.type == kRegister) {
598  __ movl(i.OutputRegister(), input.reg);
599  } else {
600  __ movl(i.OutputRegister(), input.operand);
601  }
602  } else {
603  __ movl(i.OutputRegister(), i.MemoryOperand());
604  }
605  } else {
606  int index = 0;
607  Operand operand = i.MemoryOperand(&index);
608  if (HasImmediateInput(instr, index)) {
609  __ movl(operand, i.InputImmediate(index));
610  } else {
611  __ movl(operand, i.InputRegister(index));
612  }
613  }
614  break;
615  case kX64Movsxlq: {
616  RegisterOrOperand input = i.InputRegisterOrOperand(0);
617  if (input.type == kRegister) {
618  __ movsxlq(i.OutputRegister(), input.reg);
619  } else {
620  __ movsxlq(i.OutputRegister(), input.operand);
621  }
622  break;
623  }
624  case kX64Movq:
625  if (instr->HasOutput()) {
626  __ movq(i.OutputRegister(), i.MemoryOperand());
627  } else {
628  int index = 0;
629  Operand operand = i.MemoryOperand(&index);
630  if (HasImmediateInput(instr, index)) {
631  __ movq(operand, i.InputImmediate(index));
632  } else {
633  __ movq(operand, i.InputRegister(index));
634  }
635  }
636  break;
637  case kX64Movss:
638  if (instr->HasOutput()) {
639  __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
640  } else {
641  int index = 0;
642  Operand operand = i.MemoryOperand(&index);
643  __ movss(operand, i.InputDoubleRegister(index));
644  }
645  break;
646  case kX64Movsd:
647  if (instr->HasOutput()) {
648  __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
649  } else {
650  int index = 0;
651  Operand operand = i.MemoryOperand(&index);
652  __ movsd(operand, i.InputDoubleRegister(index));
653  }
654  break;
655  case kX64Push:
656  if (HasImmediateInput(instr, 0)) {
657  __ pushq(i.InputImmediate(0));
658  } else {
659  RegisterOrOperand input = i.InputRegisterOrOperand(0);
660  if (input.type == kRegister) {
661  __ pushq(input.reg);
662  } else {
663  __ pushq(input.operand);
664  }
665  }
666  break;
667  case kX64StoreWriteBarrier: {
668  Register object = i.InputRegister(0);
669  Register index = i.InputRegister(1);
670  Register value = i.InputRegister(2);
671  __ movsxlq(index, index);
672  __ movq(Operand(object, index, times_1, 0), value);
673  __ leaq(index, Operand(object, index, times_1, 0));
674  SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
675  ? kSaveFPRegs
676  : kDontSaveFPRegs;
677  __ RecordWrite(object, index, value, mode);
678  break;
679  }
680  }
681 }
682 
683 
684 // Assembles branches after this instruction.
685 void CodeGenerator::AssembleArchBranch(Instruction* instr,
686  FlagsCondition condition) {
687  X64OperandConverter i(this, instr);
688  Label done;
689 
690  // Emit a branch. The true and false targets are always the last two inputs
691  // to the instruction.
692  BasicBlock* tblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 2);
693  BasicBlock* fblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 1);
694  bool fallthru = IsNextInAssemblyOrder(fblock);
695  Label* tlabel = code()->GetLabel(tblock);
696  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
697  Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
698  switch (condition) {
699  case kUnorderedEqual:
700  __ j(parity_even, flabel, flabel_distance);
701  // Fall through.
702  case kEqual:
703  __ j(equal, tlabel);
704  break;
705  case kUnorderedNotEqual:
706  __ j(parity_even, tlabel);
707  // Fall through.
708  case kNotEqual:
709  __ j(not_equal, tlabel);
710  break;
711  case kSignedLessThan:
712  __ j(less, tlabel);
713  break;
715  __ j(greater_equal, tlabel);
716  break;
718  __ j(less_equal, tlabel);
719  break;
720  case kSignedGreaterThan:
721  __ j(greater, tlabel);
722  break;
723  case kUnorderedLessThan:
724  __ j(parity_even, flabel, flabel_distance);
725  // Fall through.
726  case kUnsignedLessThan:
727  __ j(below, tlabel);
728  break;
730  __ j(parity_even, tlabel);
731  // Fall through.
733  __ j(above_equal, tlabel);
734  break;
736  __ j(parity_even, flabel, flabel_distance);
737  // Fall through.
739  __ j(below_equal, tlabel);
740  break;
742  __ j(parity_even, tlabel);
743  // Fall through.
745  __ j(above, tlabel);
746  break;
747  case kOverflow:
748  __ j(overflow, tlabel);
749  break;
750  case kNotOverflow:
751  __ j(no_overflow, tlabel);
752  break;
753  }
754  if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
755  __ bind(&done);
756 }
757 
758 
759 // Assembles boolean materializations after this instruction.
760 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
761  FlagsCondition condition) {
762  X64OperandConverter i(this, instr);
763  Label done;
764 
765  // Materialize a full 64-bit 1 or 0 value. The result register is always the
766  // last output of the instruction.
767  Label check;
768  DCHECK_NE(0, instr->OutputCount());
769  Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
771  switch (condition) {
772  case kUnorderedEqual:
773  __ j(parity_odd, &check, Label::kNear);
774  __ movl(reg, Immediate(0));
775  __ jmp(&done, Label::kNear);
776  // Fall through.
777  case kEqual:
778  cc = equal;
779  break;
780  case kUnorderedNotEqual:
781  __ j(parity_odd, &check, Label::kNear);
782  __ movl(reg, Immediate(1));
783  __ jmp(&done, Label::kNear);
784  // Fall through.
785  case kNotEqual:
786  cc = not_equal;
787  break;
788  case kSignedLessThan:
789  cc = less;
790  break;
792  cc = greater_equal;
793  break;
795  cc = less_equal;
796  break;
797  case kSignedGreaterThan:
798  cc = greater;
799  break;
800  case kUnorderedLessThan:
801  __ j(parity_odd, &check, Label::kNear);
802  __ movl(reg, Immediate(0));
803  __ jmp(&done, Label::kNear);
804  // Fall through.
805  case kUnsignedLessThan:
806  cc = below;
807  break;
809  __ j(parity_odd, &check, Label::kNear);
810  __ movl(reg, Immediate(1));
811  __ jmp(&done, Label::kNear);
812  // Fall through.
814  cc = above_equal;
815  break;
817  __ j(parity_odd, &check, Label::kNear);
818  __ movl(reg, Immediate(0));
819  __ jmp(&done, Label::kNear);
820  // Fall through.
822  cc = below_equal;
823  break;
825  __ j(parity_odd, &check, Label::kNear);
826  __ movl(reg, Immediate(1));
827  __ jmp(&done, Label::kNear);
828  // Fall through.
830  cc = above;
831  break;
832  case kOverflow:
833  cc = overflow;
834  break;
835  case kNotOverflow:
836  cc = no_overflow;
837  break;
838  }
839  __ bind(&check);
840  __ setcc(cc, reg);
841  __ movzxbl(reg, reg);
842  __ bind(&done);
843 }
844 
845 
846 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
848  isolate(), deoptimization_id, Deoptimizer::LAZY);
849  __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
850 }
851 
852 
853 void CodeGenerator::AssemblePrologue() {
854  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
855  int stack_slots = frame()->GetSpillSlotCount();
856  if (descriptor->kind() == CallDescriptor::kCallAddress) {
857  __ pushq(rbp);
858  __ movq(rbp, rsp);
859  const RegList saves = descriptor->CalleeSavedRegisters();
860  if (saves != 0) { // Save callee-saved registers.
861  int register_save_area_size = 0;
862  for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
863  if (!((1 << i) & saves)) continue;
864  __ pushq(Register::from_code(i));
865  register_save_area_size += kPointerSize;
866  }
867  frame()->SetRegisterSaveAreaSize(register_save_area_size);
868  }
869  } else if (descriptor->IsJSFunctionCall()) {
870  CompilationInfo* info = linkage()->info();
871  __ Prologue(info->IsCodePreAgingActive());
872  frame()->SetRegisterSaveAreaSize(
874 
875  // Sloppy mode functions and builtins need to replace the receiver with the
876  // global proxy when called as functions (without an explicit receiver
877  // object).
878  // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
879  if (info->strict_mode() == SLOPPY && !info->is_native()) {
880  Label ok;
881  StackArgumentsAccessor args(rbp, info->scope()->num_parameters());
882  __ movp(rcx, args.GetReceiverOperand());
883  __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
884  __ j(not_equal, &ok, Label::kNear);
885  __ movp(rcx, GlobalObjectOperand());
887  __ movp(args.GetReceiverOperand(), rcx);
888  __ bind(&ok);
889  }
890 
891  } else {
892  __ StubPrologue();
893  frame()->SetRegisterSaveAreaSize(
895  }
896  if (stack_slots > 0) {
897  __ subq(rsp, Immediate(stack_slots * kPointerSize));
898  }
899 }
900 
901 
902 void CodeGenerator::AssembleReturn() {
903  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
904  if (descriptor->kind() == CallDescriptor::kCallAddress) {
905  if (frame()->GetRegisterSaveAreaSize() > 0) {
906  // Remove this frame's spill slots first.
907  int stack_slots = frame()->GetSpillSlotCount();
908  if (stack_slots > 0) {
909  __ addq(rsp, Immediate(stack_slots * kPointerSize));
910  }
911  const RegList saves = descriptor->CalleeSavedRegisters();
912  // Restore registers.
913  if (saves != 0) {
914  for (int i = 0; i < Register::kNumRegisters; i++) {
915  if (!((1 << i) & saves)) continue;
916  __ popq(Register::from_code(i));
917  }
918  }
919  __ popq(rbp); // Pop caller's frame pointer.
920  __ ret(0);
921  } else {
922  // No saved registers.
923  __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
924  __ popq(rbp); // Pop caller's frame pointer.
925  __ ret(0);
926  }
927  } else {
928  __ movq(rsp, rbp); // Move stack pointer back to frame pointer.
929  __ popq(rbp); // Pop caller's frame pointer.
930  int pop_count = descriptor->IsJSFunctionCall()
931  ? static_cast<int>(descriptor->JSParameterCount())
932  : 0;
933  __ ret(pop_count * kPointerSize);
934  }
935 }
936 
937 
938 void CodeGenerator::AssembleMove(InstructionOperand* source,
939  InstructionOperand* destination) {
940  X64OperandConverter g(this, NULL);
941  // Dispatch on the source and destination operand kinds. Not all
942  // combinations are possible.
943  if (source->IsRegister()) {
944  DCHECK(destination->IsRegister() || destination->IsStackSlot());
945  Register src = g.ToRegister(source);
946  if (destination->IsRegister()) {
947  __ movq(g.ToRegister(destination), src);
948  } else {
949  __ movq(g.ToOperand(destination), src);
950  }
951  } else if (source->IsStackSlot()) {
952  DCHECK(destination->IsRegister() || destination->IsStackSlot());
953  Operand src = g.ToOperand(source);
954  if (destination->IsRegister()) {
955  Register dst = g.ToRegister(destination);
956  __ movq(dst, src);
957  } else {
958  // Spill on demand to use a temporary register for memory-to-memory
959  // moves.
960  Register tmp = kScratchRegister;
961  Operand dst = g.ToOperand(destination);
962  __ movq(tmp, src);
963  __ movq(dst, tmp);
964  }
965  } else if (source->IsConstant()) {
966  ConstantOperand* constant_source = ConstantOperand::cast(source);
967  Constant src = g.ToConstant(constant_source);
968  if (destination->IsRegister() || destination->IsStackSlot()) {
969  Register dst = destination->IsRegister() ? g.ToRegister(destination)
971  Immediate64 imm = g.ToImmediate64(constant_source);
972  switch (imm.type) {
973  case kImm64Value:
974  __ Set(dst, imm.value);
975  break;
976  case kImm64Reference:
977  __ Move(dst, imm.reference);
978  break;
979  case kImm64Handle:
980  __ Move(dst, imm.handle);
981  break;
982  }
983  if (destination->IsStackSlot()) {
984  __ movq(g.ToOperand(destination), kScratchRegister);
985  }
986  } else if (src.type() == Constant::kFloat32) {
987  // TODO(turbofan): Can we do better here?
988  __ movl(kScratchRegister, Immediate(bit_cast<int32_t>(src.ToFloat32())));
989  if (destination->IsDoubleRegister()) {
990  XMMRegister dst = g.ToDoubleRegister(destination);
991  __ movq(dst, kScratchRegister);
992  } else {
993  DCHECK(destination->IsDoubleStackSlot());
994  Operand dst = g.ToOperand(destination);
995  __ movl(dst, kScratchRegister);
996  }
997  } else {
998  DCHECK_EQ(Constant::kFloat64, src.type());
999  __ movq(kScratchRegister, bit_cast<int64_t>(src.ToFloat64()));
1000  if (destination->IsDoubleRegister()) {
1001  __ movq(g.ToDoubleRegister(destination), kScratchRegister);
1002  } else {
1003  DCHECK(destination->IsDoubleStackSlot());
1004  __ movq(g.ToOperand(destination), kScratchRegister);
1005  }
1006  }
1007  } else if (source->IsDoubleRegister()) {
1008  XMMRegister src = g.ToDoubleRegister(source);
1009  if (destination->IsDoubleRegister()) {
1010  XMMRegister dst = g.ToDoubleRegister(destination);
1011  __ movsd(dst, src);
1012  } else {
1013  DCHECK(destination->IsDoubleStackSlot());
1014  Operand dst = g.ToOperand(destination);
1015  __ movsd(dst, src);
1016  }
1017  } else if (source->IsDoubleStackSlot()) {
1018  DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1019  Operand src = g.ToOperand(source);
1020  if (destination->IsDoubleRegister()) {
1021  XMMRegister dst = g.ToDoubleRegister(destination);
1022  __ movsd(dst, src);
1023  } else {
1024  // We rely on having xmm0 available as a fixed scratch register.
1025  Operand dst = g.ToOperand(destination);
1026  __ movsd(xmm0, src);
1027  __ movsd(dst, xmm0);
1028  }
1029  } else {
1030  UNREACHABLE();
1031  }
1032 }
1033 
1034 
1035 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1036  InstructionOperand* destination) {
1037  X64OperandConverter g(this, NULL);
1038  // Dispatch on the source and destination operand kinds. Not all
1039  // combinations are possible.
1040  if (source->IsRegister() && destination->IsRegister()) {
1041  // Register-register.
1042  __ xchgq(g.ToRegister(source), g.ToRegister(destination));
1043  } else if (source->IsRegister() && destination->IsStackSlot()) {
1044  Register src = g.ToRegister(source);
1045  Operand dst = g.ToOperand(destination);
1046  __ xchgq(src, dst);
1047  } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
1048  (source->IsDoubleStackSlot() &&
1049  destination->IsDoubleStackSlot())) {
1050  // Memory-memory.
1051  Register tmp = kScratchRegister;
1052  Operand src = g.ToOperand(source);
1053  Operand dst = g.ToOperand(destination);
1054  __ movq(tmp, dst);
1055  __ xchgq(tmp, src);
1056  __ movq(dst, tmp);
1057  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1058  // XMM register-register swap. We rely on having xmm0
1059  // available as a fixed scratch register.
1060  XMMRegister src = g.ToDoubleRegister(source);
1061  XMMRegister dst = g.ToDoubleRegister(destination);
1062  __ movsd(xmm0, src);
1063  __ movsd(src, dst);
1064  __ movsd(dst, xmm0);
1065  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
1066  // XMM register-memory swap. We rely on having xmm0
1067  // available as a fixed scratch register.
1068  XMMRegister src = g.ToDoubleRegister(source);
1069  Operand dst = g.ToOperand(destination);
1070  __ movsd(xmm0, src);
1071  __ movsd(src, dst);
1072  __ movsd(dst, xmm0);
1073  } else {
1074  // No other combinations are possible.
1075  UNREACHABLE();
1076  }
1077 }
1078 
1079 
1080 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1081 
1082 
1083 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1084  int space_needed = Deoptimizer::patch_size();
1085  if (!linkage()->info()->IsStub()) {
1086  // Ensure that we have enough space after the previous lazy-bailout
1087  // instruction for patching the code here.
1088  int current_pc = masm()->pc_offset();
1089  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1090  int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1091  __ Nop(padding_size);
1092  }
1093  }
1094  MarkLazyDeoptSite();
1095 }
1096 
1097 #undef __
1098 
1099 } // namespace internal
1100 } // namespace compiler
1101 } // namespace v8
static const int kHeaderSize
Definition: objects.h:5373
static bool IsSupported(CpuFeature f)
Definition: assembler.h:184
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
Definition: deoptimizer.cc:672
static const int kGlobalProxyOffset
Definition: objects.h:7461
static Handle< T > cast(Handle< S > that)
Definition: handles.h:116
Factory * factory()
Definition: isolate.h:982
static const int kContextOffset
Definition: objects.h:7381
static const int kCodeEntryOffset
Definition: objects.h:7376
static const int kFixedFrameSizeFromFp
Definition: frames.h:157
DoubleRegister ToDoubleRegister(InstructionOperand *op)
Constant ToConstant(InstructionOperand *operand)
InstructionOperand * Output() const
Definition: instruction.h:413
InstructionCode opcode() const
Definition: instruction.h:427
InstructionOperand * InputAt(size_t i) const
Definition: instruction.h:416
FrameOffset GetFrameOffset(int spill_slot, Frame *frame, int extra=0)
Definition: linkage.cc:63
RegisterOrOperand ToRegisterOrOperand(InstructionOperand *op, int extra=0)
RegisterOrOperand InputRegisterOrOperand(int index)
static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode)
Immediate64 ToImmediate64(InstructionOperand *operand)
X64OperandConverter(CodeGenerator *gen, Instruction *instr)
Operand ToOperand(InstructionOperand *op, int extra=0)
Immediate ToImmediate(InstructionOperand *operand)
#define ASSEMBLE_SHIFT(asm_instr, width)
#define __
#define ASSEMBLE_BINOP(asm_instr)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK_NE(v1, v2)
Definition: logging.h:207
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
int int32_t
Definition: unicode.cc:24
static bool HasImmediateInput(Instruction *instr, int index)
STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=Register::kMaxNumAllocatableRegisters)
const int kPointerSize
Definition: globals.h:129
const Register kScratchRegister
const int kDoubleSize
Definition: globals.h:127
Operand FieldOperand(Register object, int offset)
const Register rsi
const Register rbp
const XMMRegister xmm0
uint32_t RegList
Definition: frames.h:18
byte * Address
Definition: globals.h:101
const int kHeapObjectTag
Definition: v8.h:5737
const Register no_reg
const Register rdx
const Register rax
MemOperand GlobalObjectOperand()
const Register rcx
const Register rsp
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
static const int kNumRegisters
Definition: assembler-arm.h:95
static Register from_code(int code)