V8 Project
code-generator-arm64.cc
Go to the documentation of this file.
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
6 
12 #include "src/scopes.h"
13 
14 namespace v8 {
15 namespace internal {
16 namespace compiler {
17 
18 #define __ masm()->
19 
20 
21 // Adds Arm64-specific methods to convert InstructionOperands.
22 class Arm64OperandConverter FINAL : public InstructionOperandConverter {
23  public:
25  : InstructionOperandConverter(gen, instr) {}
26 
28  return ToRegister(instr_->InputAt(index)).W();
29  }
30 
31  Register InputRegister64(int index) { return InputRegister(index); }
32 
33  Operand InputImmediate(int index) {
34  return ToImmediate(instr_->InputAt(index));
35  }
36 
37  Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
38 
39  Operand InputOperand64(int index) { return InputOperand(index); }
40 
41  Operand InputOperand32(int index) {
42  return ToOperand32(instr_->InputAt(index));
43  }
44 
45  Register OutputRegister64() { return OutputRegister(); }
46 
47  Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
48 
49  MemOperand MemoryOperand(int* first_index) {
50  const int index = *first_index;
51  switch (AddressingModeField::decode(instr_->opcode())) {
52  case kMode_None:
53  break;
54  case kMode_MRI:
55  *first_index += 2;
56  return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
57  case kMode_MRR:
58  *first_index += 2;
59  return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
60  SXTW);
61  }
62  UNREACHABLE();
63  return MemOperand(no_reg);
64  }
65 
67  int index = 0;
68  return MemoryOperand(&index);
69  }
70 
72  if (op->IsRegister()) {
73  return Operand(ToRegister(op));
74  }
75  return ToImmediate(op);
76  }
77 
79  if (op->IsRegister()) {
80  return Operand(ToRegister(op).W());
81  }
82  return ToImmediate(op);
83  }
84 
86  Constant constant = ToConstant(operand);
87  switch (constant.type()) {
88  case Constant::kInt32:
89  return Operand(constant.ToInt32());
90  case Constant::kInt64:
91  return Operand(constant.ToInt64());
92  case Constant::kFloat32:
93  return Operand(
94  isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
95  case Constant::kFloat64:
96  return Operand(
97  isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
98  case Constant::kExternalReference:
99  return Operand(constant.ToExternalReference());
100  case Constant::kHeapObject:
101  return Operand(constant.ToHeapObject());
102  }
103  UNREACHABLE();
104  return Operand(-1);
105  }
106 
108  DCHECK(op != NULL);
109  DCHECK(!op->IsRegister());
110  DCHECK(!op->IsDoubleRegister());
111  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
112  // The linkage computes where all spill slots are located.
113  FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
114  return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
115  offset.offset());
116  }
117 };
118 
119 
120 #define ASSEMBLE_SHIFT(asm_instr, width) \
121  do { \
122  if (instr->InputAt(1)->IsRegister()) { \
123  __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
124  i.InputRegister##width(1)); \
125  } else { \
126  int64_t imm = i.InputOperand##width(1).immediate().value(); \
127  __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \
128  } \
129  } while (0);
130 
131 
132 // Assembles an instruction after register allocation, producing machine code.
133 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
134  Arm64OperandConverter i(this, instr);
135  InstructionCode opcode = instr->opcode();
136  switch (ArchOpcodeField::decode(opcode)) {
137  case kArchCallCodeObject: {
138  EnsureSpaceForLazyDeopt();
139  if (instr->InputAt(0)->IsImmediate()) {
140  __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
142  } else {
143  Register target = i.InputRegister(0);
144  __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
145  __ Call(target);
146  }
147  AddSafepointAndDeopt(instr);
148  break;
149  }
150  case kArchCallJSFunction: {
151  EnsureSpaceForLazyDeopt();
152  Register func = i.InputRegister(0);
153  if (FLAG_debug_code) {
154  // Check the function's context matches the context argument.
155  UseScratchRegisterScope scope(masm());
156  Register temp = scope.AcquireX();
158  __ cmp(cp, temp);
159  __ Assert(eq, kWrongFunctionContext);
160  }
162  __ Call(x10);
163  AddSafepointAndDeopt(instr);
164  break;
165  }
166  case kArchJmp:
167  __ B(code_->GetLabel(i.InputBlock(0)));
168  break;
169  case kArchNop:
170  // don't emit code for nops.
171  break;
172  case kArchRet:
173  AssembleReturn();
174  break;
175  case kArchTruncateDoubleToI:
176  __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
177  break;
178  case kArm64Add:
179  __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
180  break;
181  case kArm64Add32:
182  if (FlagsModeField::decode(opcode) != kFlags_none) {
183  __ Adds(i.OutputRegister32(), i.InputRegister32(0),
184  i.InputOperand32(1));
185  } else {
186  __ Add(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
187  }
188  break;
189  case kArm64And:
190  __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
191  break;
192  case kArm64And32:
193  __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
194  break;
195  case kArm64Bic:
196  __ Bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
197  break;
198  case kArm64Bic32:
199  __ Bic(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
200  break;
201  case kArm64Mul:
202  __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
203  break;
204  case kArm64Mul32:
205  __ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
206  break;
207  case kArm64Madd:
208  __ Madd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
209  i.InputRegister(2));
210  break;
211  case kArm64Madd32:
212  __ Madd(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1),
213  i.InputRegister32(2));
214  break;
215  case kArm64Msub:
216  __ Msub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
217  i.InputRegister(2));
218  break;
219  case kArm64Msub32:
220  __ Msub(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1),
221  i.InputRegister32(2));
222  break;
223  case kArm64Mneg:
224  __ Mneg(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
225  break;
226  case kArm64Mneg32:
227  __ Mneg(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
228  break;
229  case kArm64Idiv:
230  __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
231  break;
232  case kArm64Idiv32:
233  __ Sdiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
234  break;
235  case kArm64Udiv:
236  __ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
237  break;
238  case kArm64Udiv32:
239  __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
240  break;
241  case kArm64Imod: {
242  UseScratchRegisterScope scope(masm());
243  Register temp = scope.AcquireX();
244  __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
245  __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
246  break;
247  }
248  case kArm64Imod32: {
249  UseScratchRegisterScope scope(masm());
250  Register temp = scope.AcquireW();
251  __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
252  __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
253  i.InputRegister32(0));
254  break;
255  }
256  case kArm64Umod: {
257  UseScratchRegisterScope scope(masm());
258  Register temp = scope.AcquireX();
259  __ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
260  __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
261  break;
262  }
263  case kArm64Umod32: {
264  UseScratchRegisterScope scope(masm());
265  Register temp = scope.AcquireW();
266  __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
267  __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
268  i.InputRegister32(0));
269  break;
270  }
271  // TODO(dcarney): use mvn instr??
272  case kArm64Not:
273  __ Orn(i.OutputRegister(), xzr, i.InputOperand(0));
274  break;
275  case kArm64Not32:
276  __ Orn(i.OutputRegister32(), wzr, i.InputOperand32(0));
277  break;
278  case kArm64Neg:
279  __ Neg(i.OutputRegister(), i.InputOperand(0));
280  break;
281  case kArm64Neg32:
282  __ Neg(i.OutputRegister32(), i.InputOperand32(0));
283  break;
284  case kArm64Or:
285  __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
286  break;
287  case kArm64Or32:
288  __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
289  break;
290  case kArm64Orn:
291  __ Orn(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
292  break;
293  case kArm64Orn32:
294  __ Orn(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
295  break;
296  case kArm64Eor:
297  __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
298  break;
299  case kArm64Eor32:
300  __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
301  break;
302  case kArm64Eon:
303  __ Eon(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
304  break;
305  case kArm64Eon32:
306  __ Eon(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
307  break;
308  case kArm64Sub:
309  __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
310  break;
311  case kArm64Sub32:
312  if (FlagsModeField::decode(opcode) != kFlags_none) {
313  __ Subs(i.OutputRegister32(), i.InputRegister32(0),
314  i.InputOperand32(1));
315  } else {
316  __ Sub(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
317  }
318  break;
319  case kArm64Shl:
320  ASSEMBLE_SHIFT(Lsl, 64);
321  break;
322  case kArm64Shl32:
323  ASSEMBLE_SHIFT(Lsl, 32);
324  break;
325  case kArm64Shr:
326  ASSEMBLE_SHIFT(Lsr, 64);
327  break;
328  case kArm64Shr32:
329  ASSEMBLE_SHIFT(Lsr, 32);
330  break;
331  case kArm64Sar:
332  ASSEMBLE_SHIFT(Asr, 64);
333  break;
334  case kArm64Sar32:
335  ASSEMBLE_SHIFT(Asr, 32);
336  break;
337  case kArm64Ror:
338  ASSEMBLE_SHIFT(Ror, 64);
339  break;
340  case kArm64Ror32:
341  ASSEMBLE_SHIFT(Ror, 32);
342  break;
343  case kArm64Mov32:
344  __ Mov(i.OutputRegister32(), i.InputRegister32(0));
345  break;
346  case kArm64Sxtw:
347  __ Sxtw(i.OutputRegister(), i.InputRegister32(0));
348  break;
349  case kArm64Claim: {
350  int words = MiscField::decode(instr->opcode());
351  __ Claim(words);
352  break;
353  }
354  case kArm64Poke: {
355  int slot = MiscField::decode(instr->opcode());
356  Operand operand(slot * kPointerSize);
357  __ Poke(i.InputRegister(0), operand);
358  break;
359  }
360  case kArm64PokePairZero: {
361  // TODO(dcarney): test slot offset and register order.
362  int slot = MiscField::decode(instr->opcode()) - 1;
363  __ PokePair(i.InputRegister(0), xzr, slot * kPointerSize);
364  break;
365  }
366  case kArm64PokePair: {
367  int slot = MiscField::decode(instr->opcode()) - 1;
368  __ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
369  break;
370  }
371  case kArm64Cmp:
372  __ Cmp(i.InputRegister(0), i.InputOperand(1));
373  break;
374  case kArm64Cmp32:
375  __ Cmp(i.InputRegister32(0), i.InputOperand32(1));
376  break;
377  case kArm64Cmn:
378  __ Cmn(i.InputRegister(0), i.InputOperand(1));
379  break;
380  case kArm64Cmn32:
381  __ Cmn(i.InputRegister32(0), i.InputOperand32(1));
382  break;
383  case kArm64Tst:
384  __ Tst(i.InputRegister(0), i.InputOperand(1));
385  break;
386  case kArm64Tst32:
387  __ Tst(i.InputRegister32(0), i.InputOperand32(1));
388  break;
389  case kArm64Float64Cmp:
390  __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
391  break;
392  case kArm64Float64Add:
393  __ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
394  i.InputDoubleRegister(1));
395  break;
396  case kArm64Float64Sub:
397  __ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
398  i.InputDoubleRegister(1));
399  break;
400  case kArm64Float64Mul:
401  __ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
402  i.InputDoubleRegister(1));
403  break;
404  case kArm64Float64Div:
405  __ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
406  i.InputDoubleRegister(1));
407  break;
408  case kArm64Float64Mod: {
409  // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
410  FrameScope scope(masm(), StackFrame::MANUAL);
411  DCHECK(d0.is(i.InputDoubleRegister(0)));
412  DCHECK(d1.is(i.InputDoubleRegister(1)));
413  DCHECK(d0.is(i.OutputDoubleRegister()));
414  // TODO(dcarney): make sure this saves all relevant registers.
415  __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
416  0, 2);
417  break;
418  }
419  case kArm64Float64Sqrt:
420  __ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
421  break;
422  case kArm64Float32ToFloat64:
423  __ Fcvt(i.OutputDoubleRegister(), i.InputDoubleRegister(0).S());
424  break;
425  case kArm64Float64ToFloat32:
426  __ Fcvt(i.OutputDoubleRegister().S(), i.InputDoubleRegister(0));
427  break;
428  case kArm64Float64ToInt32:
429  __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
430  break;
431  case kArm64Float64ToUint32:
432  __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
433  break;
434  case kArm64Int32ToFloat64:
435  __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
436  break;
437  case kArm64Uint32ToFloat64:
438  __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
439  break;
440  case kArm64Ldrb:
441  __ Ldrb(i.OutputRegister(), i.MemoryOperand());
442  break;
443  case kArm64Ldrsb:
444  __ Ldrsb(i.OutputRegister(), i.MemoryOperand());
445  break;
446  case kArm64Strb:
447  __ Strb(i.InputRegister(2), i.MemoryOperand());
448  break;
449  case kArm64Ldrh:
450  __ Ldrh(i.OutputRegister(), i.MemoryOperand());
451  break;
452  case kArm64Ldrsh:
453  __ Ldrsh(i.OutputRegister(), i.MemoryOperand());
454  break;
455  case kArm64Strh:
456  __ Strh(i.InputRegister(2), i.MemoryOperand());
457  break;
458  case kArm64LdrW:
459  __ Ldr(i.OutputRegister32(), i.MemoryOperand());
460  break;
461  case kArm64StrW:
462  __ Str(i.InputRegister32(2), i.MemoryOperand());
463  break;
464  case kArm64Ldr:
465  __ Ldr(i.OutputRegister(), i.MemoryOperand());
466  break;
467  case kArm64Str:
468  __ Str(i.InputRegister(2), i.MemoryOperand());
469  break;
470  case kArm64LdrS:
471  __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
472  break;
473  case kArm64StrS:
474  __ Str(i.InputDoubleRegister(2).S(), i.MemoryOperand());
475  break;
476  case kArm64LdrD:
477  __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
478  break;
479  case kArm64StrD:
480  __ Str(i.InputDoubleRegister(2), i.MemoryOperand());
481  break;
482  case kArm64StoreWriteBarrier: {
483  Register object = i.InputRegister(0);
484  Register index = i.InputRegister(1);
485  Register value = i.InputRegister(2);
486  __ Add(index, object, Operand(index, SXTW));
487  __ Str(value, MemOperand(index));
488  SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
489  ? kSaveFPRegs
490  : kDontSaveFPRegs;
491  // TODO(dcarney): we shouldn't test write barriers from c calls.
493  UseScratchRegisterScope scope(masm());
494  Register temp = no_reg;
495  if (csp.is(masm()->StackPointer())) {
496  temp = scope.AcquireX();
497  lr_status = kLRHasBeenSaved;
498  __ Push(lr, temp); // Need to push a pair
499  }
500  __ RecordWrite(object, index, value, lr_status, mode);
501  if (csp.is(masm()->StackPointer())) {
502  __ Pop(temp, lr);
503  }
504  break;
505  }
506  }
507 }
508 
509 
510 // Assemble branches after this instruction.
511 void CodeGenerator::AssembleArchBranch(Instruction* instr,
512  FlagsCondition condition) {
513  Arm64OperandConverter i(this, instr);
514  Label done;
515 
516  // Emit a branch. The true and false targets are always the last two inputs
517  // to the instruction.
518  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
519  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
520  bool fallthru = IsNextInAssemblyOrder(fblock);
521  Label* tlabel = code()->GetLabel(tblock);
522  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
523  switch (condition) {
524  case kUnorderedEqual:
525  __ B(vs, flabel);
526  // Fall through.
527  case kEqual:
528  __ B(eq, tlabel);
529  break;
530  case kUnorderedNotEqual:
531  __ B(vs, tlabel);
532  // Fall through.
533  case kNotEqual:
534  __ B(ne, tlabel);
535  break;
536  case kSignedLessThan:
537  __ B(lt, tlabel);
538  break;
540  __ B(ge, tlabel);
541  break;
543  __ B(le, tlabel);
544  break;
545  case kSignedGreaterThan:
546  __ B(gt, tlabel);
547  break;
548  case kUnorderedLessThan:
549  __ B(vs, flabel);
550  // Fall through.
551  case kUnsignedLessThan:
552  __ B(lo, tlabel);
553  break;
555  __ B(vs, tlabel);
556  // Fall through.
558  __ B(hs, tlabel);
559  break;
561  __ B(vs, flabel);
562  // Fall through.
564  __ B(ls, tlabel);
565  break;
567  __ B(vs, tlabel);
568  // Fall through.
570  __ B(hi, tlabel);
571  break;
572  case kOverflow:
573  __ B(vs, tlabel);
574  break;
575  case kNotOverflow:
576  __ B(vc, tlabel);
577  break;
578  }
579  if (!fallthru) __ B(flabel); // no fallthru to flabel.
580  __ Bind(&done);
581 }
582 
583 
584 // Assemble boolean materializations after this instruction.
585 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
586  FlagsCondition condition) {
587  Arm64OperandConverter i(this, instr);
588  Label done;
589 
590  // Materialize a full 64-bit 1 or 0 value. The result register is always the
591  // last output of the instruction.
592  Label check;
593  DCHECK_NE(0, instr->OutputCount());
594  Register reg = i.OutputRegister(instr->OutputCount() - 1);
595  Condition cc = nv;
596  switch (condition) {
597  case kUnorderedEqual:
598  __ B(vc, &check);
599  __ Mov(reg, 0);
600  __ B(&done);
601  // Fall through.
602  case kEqual:
603  cc = eq;
604  break;
605  case kUnorderedNotEqual:
606  __ B(vc, &check);
607  __ Mov(reg, 1);
608  __ B(&done);
609  // Fall through.
610  case kNotEqual:
611  cc = ne;
612  break;
613  case kSignedLessThan:
614  cc = lt;
615  break;
617  cc = ge;
618  break;
620  cc = le;
621  break;
622  case kSignedGreaterThan:
623  cc = gt;
624  break;
625  case kUnorderedLessThan:
626  __ B(vc, &check);
627  __ Mov(reg, 0);
628  __ B(&done);
629  // Fall through.
630  case kUnsignedLessThan:
631  cc = lo;
632  break;
634  __ B(vc, &check);
635  __ Mov(reg, 1);
636  __ B(&done);
637  // Fall through.
639  cc = hs;
640  break;
642  __ B(vc, &check);
643  __ Mov(reg, 0);
644  __ B(&done);
645  // Fall through.
647  cc = ls;
648  break;
650  __ B(vc, &check);
651  __ Mov(reg, 1);
652  __ B(&done);
653  // Fall through.
655  cc = hi;
656  break;
657  case kOverflow:
658  cc = vs;
659  break;
660  case kNotOverflow:
661  cc = vc;
662  break;
663  }
664  __ bind(&check);
665  __ Cset(reg, cc);
666  __ Bind(&done);
667 }
668 
669 
670 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
672  isolate(), deoptimization_id, Deoptimizer::LAZY);
673  __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
674 }
675 
676 
677 // TODO(dcarney): increase stack slots in frame once before first use.
678 static int AlignedStackSlots(int stack_slots) {
679  if (stack_slots & 1) stack_slots++;
680  return stack_slots;
681 }
682 
683 
684 void CodeGenerator::AssemblePrologue() {
685  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
686  if (descriptor->kind() == CallDescriptor::kCallAddress) {
687  __ SetStackPointer(csp);
688  __ Push(lr, fp);
689  __ Mov(fp, csp);
690  // TODO(dcarney): correct callee saved registers.
691  __ PushCalleeSavedRegisters();
692  frame()->SetRegisterSaveAreaSize(20 * kPointerSize);
693  } else if (descriptor->IsJSFunctionCall()) {
694  CompilationInfo* info = linkage()->info();
695  __ SetStackPointer(jssp);
696  __ Prologue(info->IsCodePreAgingActive());
697  frame()->SetRegisterSaveAreaSize(
699 
700  // Sloppy mode functions and builtins need to replace the receiver with the
701  // global proxy when called as functions (without an explicit receiver
702  // object).
703  // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
704  if (info->strict_mode() == SLOPPY && !info->is_native()) {
705  Label ok;
706  // +2 for return address and saved frame pointer.
707  int receiver_slot = info->scope()->num_parameters() + 2;
708  __ Ldr(x10, MemOperand(fp, receiver_slot * kXRegSize));
709  __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
710  __ Ldr(x10, GlobalObjectMemOperand());
712  __ Str(x10, MemOperand(fp, receiver_slot * kXRegSize));
713  __ Bind(&ok);
714  }
715 
716  } else {
717  __ SetStackPointer(jssp);
718  __ StubPrologue();
719  frame()->SetRegisterSaveAreaSize(
721  }
722  int stack_slots = frame()->GetSpillSlotCount();
723  if (stack_slots > 0) {
724  Register sp = __ StackPointer();
725  if (!sp.Is(csp)) {
726  __ Sub(sp, sp, stack_slots * kPointerSize);
727  }
728  __ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
729  }
730 }
731 
732 
733 void CodeGenerator::AssembleReturn() {
734  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
735  if (descriptor->kind() == CallDescriptor::kCallAddress) {
736  if (frame()->GetRegisterSaveAreaSize() > 0) {
737  // Remove this frame's spill slots first.
738  int stack_slots = frame()->GetSpillSlotCount();
739  if (stack_slots > 0) {
740  __ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
741  }
742  // Restore registers.
743  // TODO(dcarney): correct callee saved registers.
744  __ PopCalleeSavedRegisters();
745  }
746  __ Mov(csp, fp);
747  __ Pop(fp, lr);
748  __ Ret();
749  } else {
750  __ Mov(jssp, fp);
751  __ Pop(fp, lr);
752  int pop_count = descriptor->IsJSFunctionCall()
753  ? static_cast<int>(descriptor->JSParameterCount())
754  : 0;
755  __ Drop(pop_count);
756  __ Ret();
757  }
758 }
759 
760 
761 void CodeGenerator::AssembleMove(InstructionOperand* source,
762  InstructionOperand* destination) {
763  Arm64OperandConverter g(this, NULL);
764  // Dispatch on the source and destination operand kinds. Not all
765  // combinations are possible.
766  if (source->IsRegister()) {
767  DCHECK(destination->IsRegister() || destination->IsStackSlot());
768  Register src = g.ToRegister(source);
769  if (destination->IsRegister()) {
770  __ Mov(g.ToRegister(destination), src);
771  } else {
772  __ Str(src, g.ToMemOperand(destination, masm()));
773  }
774  } else if (source->IsStackSlot()) {
775  MemOperand src = g.ToMemOperand(source, masm());
776  DCHECK(destination->IsRegister() || destination->IsStackSlot());
777  if (destination->IsRegister()) {
778  __ Ldr(g.ToRegister(destination), src);
779  } else {
780  UseScratchRegisterScope scope(masm());
781  Register temp = scope.AcquireX();
782  __ Ldr(temp, src);
783  __ Str(temp, g.ToMemOperand(destination, masm()));
784  }
785  } else if (source->IsConstant()) {
786  Constant src = g.ToConstant(ConstantOperand::cast(source));
787  if (destination->IsRegister() || destination->IsStackSlot()) {
788  UseScratchRegisterScope scope(masm());
789  Register dst = destination->IsRegister() ? g.ToRegister(destination)
790  : scope.AcquireX();
791  if (src.type() == Constant::kHeapObject) {
792  __ LoadObject(dst, src.ToHeapObject());
793  } else {
794  __ Mov(dst, g.ToImmediate(source));
795  }
796  if (destination->IsStackSlot()) {
797  __ Str(dst, g.ToMemOperand(destination, masm()));
798  }
799  } else if (src.type() == Constant::kFloat32) {
800  if (destination->IsDoubleRegister()) {
801  FPRegister dst = g.ToDoubleRegister(destination).S();
802  __ Fmov(dst, src.ToFloat32());
803  } else {
804  DCHECK(destination->IsDoubleStackSlot());
805  UseScratchRegisterScope scope(masm());
806  FPRegister temp = scope.AcquireS();
807  __ Fmov(temp, src.ToFloat32());
808  __ Str(temp, g.ToMemOperand(destination, masm()));
809  }
810  } else {
811  DCHECK_EQ(Constant::kFloat64, src.type());
812  if (destination->IsDoubleRegister()) {
813  FPRegister dst = g.ToDoubleRegister(destination);
814  __ Fmov(dst, src.ToFloat64());
815  } else {
816  DCHECK(destination->IsDoubleStackSlot());
817  UseScratchRegisterScope scope(masm());
818  FPRegister temp = scope.AcquireD();
819  __ Fmov(temp, src.ToFloat64());
820  __ Str(temp, g.ToMemOperand(destination, masm()));
821  }
822  }
823  } else if (source->IsDoubleRegister()) {
824  FPRegister src = g.ToDoubleRegister(source);
825  if (destination->IsDoubleRegister()) {
826  FPRegister dst = g.ToDoubleRegister(destination);
827  __ Fmov(dst, src);
828  } else {
829  DCHECK(destination->IsDoubleStackSlot());
830  __ Str(src, g.ToMemOperand(destination, masm()));
831  }
832  } else if (source->IsDoubleStackSlot()) {
833  DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
834  MemOperand src = g.ToMemOperand(source, masm());
835  if (destination->IsDoubleRegister()) {
836  __ Ldr(g.ToDoubleRegister(destination), src);
837  } else {
838  UseScratchRegisterScope scope(masm());
839  FPRegister temp = scope.AcquireD();
840  __ Ldr(temp, src);
841  __ Str(temp, g.ToMemOperand(destination, masm()));
842  }
843  } else {
844  UNREACHABLE();
845  }
846 }
847 
848 
849 void CodeGenerator::AssembleSwap(InstructionOperand* source,
850  InstructionOperand* destination) {
851  Arm64OperandConverter g(this, NULL);
852  // Dispatch on the source and destination operand kinds. Not all
853  // combinations are possible.
854  if (source->IsRegister()) {
855  // Register-register.
856  UseScratchRegisterScope scope(masm());
857  Register temp = scope.AcquireX();
858  Register src = g.ToRegister(source);
859  if (destination->IsRegister()) {
860  Register dst = g.ToRegister(destination);
861  __ Mov(temp, src);
862  __ Mov(src, dst);
863  __ Mov(dst, temp);
864  } else {
865  DCHECK(destination->IsStackSlot());
866  MemOperand dst = g.ToMemOperand(destination, masm());
867  __ Mov(temp, src);
868  __ Ldr(src, dst);
869  __ Str(temp, dst);
870  }
871  } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
872  UseScratchRegisterScope scope(masm());
873  CPURegister temp_0 = scope.AcquireX();
874  CPURegister temp_1 = scope.AcquireX();
875  MemOperand src = g.ToMemOperand(source, masm());
876  MemOperand dst = g.ToMemOperand(destination, masm());
877  __ Ldr(temp_0, src);
878  __ Ldr(temp_1, dst);
879  __ Str(temp_0, dst);
880  __ Str(temp_1, src);
881  } else if (source->IsDoubleRegister()) {
882  UseScratchRegisterScope scope(masm());
883  FPRegister temp = scope.AcquireD();
884  FPRegister src = g.ToDoubleRegister(source);
885  if (destination->IsDoubleRegister()) {
886  FPRegister dst = g.ToDoubleRegister(destination);
887  __ Fmov(temp, src);
888  __ Fmov(src, dst);
889  __ Fmov(dst, temp);
890  } else {
891  DCHECK(destination->IsDoubleStackSlot());
892  MemOperand dst = g.ToMemOperand(destination, masm());
893  __ Fmov(temp, src);
894  __ Ldr(src, dst);
895  __ Str(temp, dst);
896  }
897  } else {
898  // No other combinations are possible.
899  UNREACHABLE();
900  }
901 }
902 
903 
904 void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
905 
906 
907 void CodeGenerator::EnsureSpaceForLazyDeopt() {
908  int space_needed = Deoptimizer::patch_size();
909  if (!linkage()->info()->IsStub()) {
910  // Ensure that we have enough space after the previous lazy-bailout
911  // instruction for patching the code here.
912  intptr_t current_pc = masm()->pc_offset();
913 
914  if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
915  intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
916  DCHECK((padding_size % kInstructionSize) == 0);
917  InstructionAccurateScope instruction_accurate(
918  masm(), padding_size / kInstructionSize);
919 
920  while (padding_size > 0) {
921  __ nop();
922  padding_size -= kInstructionSize;
923  }
924  }
925  }
926  MarkLazyDeoptSite();
927 }
928 
929 #undef __
930 
931 } // namespace compiler
932 } // namespace internal
933 } // namespace v8
static const int kHeaderSize
Definition: objects.h:5373
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
Definition: deoptimizer.cc:672
static const int kGlobalProxyOffset
Definition: objects.h:7461
static Handle< T > cast(Handle< S > that)
Definition: handles.h:116
static const int kContextOffset
Definition: objects.h:7381
static const int kCodeEntryOffset
Definition: objects.h:7376
const Register & StackPointer() const
static const int kFixedFrameSizeFromFp
Definition: frames.h:157
Operand ToImmediate(InstructionOperand *operand)
MemOperand MemoryOperand(int *first_index)
MemOperand ToMemOperand(InstructionOperand *op, MacroAssembler *masm) const
Operand ToOperand(InstructionOperand *op)
Operand ToOperand32(InstructionOperand *op)
Arm64OperandConverter(CodeGenerator *gen, Instruction *instr)
#define ASSEMBLE_SHIFT(asm_instr, width)
#define __
#define FINAL
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK_NE(v1, v2)
Definition: logging.h:207
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
static int AlignedStackSlots(int stack_slots)
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
const int kPointerSize
Definition: globals.h:129
const Register cp
const LowDwVfpRegister d1
const LowDwVfpRegister d0
MemOperand GlobalObjectMemOperand()
const Register fp
const Register sp
MemOperand FieldMemOperand(Register object, int offset)
const Register lr
byte * Address
Definition: globals.h:101
const unsigned kXRegSize
const int kHeapObjectTag
Definition: v8.h:5737
const Register no_reg
Register ToRegister(int num)
const unsigned kInstructionSize
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
bool Is(const CPURegister &other) const
bool is(DwVfpRegister reg) const