V8 Project
code-generator-arm.cc
Go to the documentation of this file.
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
6 
12 #include "src/scopes.h"
13 
14 namespace v8 {
15 namespace internal {
16 namespace compiler {
17 
18 #define __ masm()->
19 
20 
21 #define kScratchReg r9
22 
23 
24 // Adds Arm-specific methods to convert InstructionOperands.
25 class ArmOperandConverter FINAL : public InstructionOperandConverter {
26  public:
28  : InstructionOperandConverter(gen, instr) {}
29 
31  return ToFloat32Register(instr_->OutputAt(index));
32  }
33 
35  return ToFloat32Register(instr_->InputAt(index));
36  }
37 
39  return ToFloat64Register(op).low();
40  }
41 
43  return ToFloat64Register(instr_->OutputAt(index));
44  }
45 
47  return ToFloat64Register(instr_->InputAt(index));
48  }
49 
51  return LowDwVfpRegister::from_code(ToDoubleRegister(op).code());
52  }
53 
54  SBit OutputSBit() const {
55  switch (instr_->flags_mode()) {
56  case kFlags_branch:
57  case kFlags_set:
58  return SetCC;
59  case kFlags_none:
60  return LeaveCC;
61  }
62  UNREACHABLE();
63  return LeaveCC;
64  }
65 
66  Operand InputImmediate(int index) {
67  Constant constant = ToConstant(instr_->InputAt(index));
68  switch (constant.type()) {
69  case Constant::kInt32:
70  return Operand(constant.ToInt32());
71  case Constant::kFloat32:
72  return Operand(
73  isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
74  case Constant::kFloat64:
75  return Operand(
76  isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
77  case Constant::kInt64:
78  case Constant::kExternalReference:
79  case Constant::kHeapObject:
80  break;
81  }
82  UNREACHABLE();
83  return Operand::Zero();
84  }
85 
86  Operand InputOperand2(int first_index) {
87  const int index = first_index;
88  switch (AddressingModeField::decode(instr_->opcode())) {
89  case kMode_None:
90  case kMode_Offset_RI:
91  case kMode_Offset_RR:
92  break;
93  case kMode_Operand2_I:
94  return InputImmediate(index + 0);
95  case kMode_Operand2_R:
96  return Operand(InputRegister(index + 0));
97  case kMode_Operand2_R_ASR_I:
98  return Operand(InputRegister(index + 0), ASR, InputInt5(index + 1));
99  case kMode_Operand2_R_ASR_R:
100  return Operand(InputRegister(index + 0), ASR, InputRegister(index + 1));
101  case kMode_Operand2_R_LSL_I:
102  return Operand(InputRegister(index + 0), LSL, InputInt5(index + 1));
103  case kMode_Operand2_R_LSL_R:
104  return Operand(InputRegister(index + 0), LSL, InputRegister(index + 1));
105  case kMode_Operand2_R_LSR_I:
106  return Operand(InputRegister(index + 0), LSR, InputInt5(index + 1));
107  case kMode_Operand2_R_LSR_R:
108  return Operand(InputRegister(index + 0), LSR, InputRegister(index + 1));
109  case kMode_Operand2_R_ROR_I:
110  return Operand(InputRegister(index + 0), ROR, InputInt5(index + 1));
111  case kMode_Operand2_R_ROR_R:
112  return Operand(InputRegister(index + 0), ROR, InputRegister(index + 1));
113  }
114  UNREACHABLE();
115  return Operand::Zero();
116  }
117 
118  MemOperand InputOffset(int* first_index) {
119  const int index = *first_index;
120  switch (AddressingModeField::decode(instr_->opcode())) {
121  case kMode_None:
122  case kMode_Operand2_I:
123  case kMode_Operand2_R:
124  case kMode_Operand2_R_ASR_I:
125  case kMode_Operand2_R_ASR_R:
126  case kMode_Operand2_R_LSL_I:
127  case kMode_Operand2_R_LSL_R:
128  case kMode_Operand2_R_LSR_I:
129  case kMode_Operand2_R_LSR_R:
130  case kMode_Operand2_R_ROR_I:
131  case kMode_Operand2_R_ROR_R:
132  break;
133  case kMode_Offset_RI:
134  *first_index += 2;
135  return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
136  case kMode_Offset_RR:
137  *first_index += 2;
138  return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
139  }
140  UNREACHABLE();
141  return MemOperand(r0);
142  }
143 
145  int index = 0;
146  return InputOffset(&index);
147  }
148 
150  DCHECK(op != NULL);
151  DCHECK(!op->IsRegister());
152  DCHECK(!op->IsDoubleRegister());
153  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
154  // The linkage computes where all spill slots are located.
155  FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
156  return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
157  }
158 };
159 
160 
161 // Assembles an instruction after register allocation, producing machine code.
162 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
163  ArmOperandConverter i(this, instr);
164 
165  switch (ArchOpcodeField::decode(instr->opcode())) {
166  case kArchCallCodeObject: {
167  EnsureSpaceForLazyDeopt();
168  if (instr->InputAt(0)->IsImmediate()) {
169  __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
171  } else {
172  __ add(ip, i.InputRegister(0),
173  Operand(Code::kHeaderSize - kHeapObjectTag));
174  __ Call(ip);
175  }
176  AddSafepointAndDeopt(instr);
177  DCHECK_EQ(LeaveCC, i.OutputSBit());
178  break;
179  }
180  case kArchCallJSFunction: {
181  EnsureSpaceForLazyDeopt();
182  Register func = i.InputRegister(0);
183  if (FLAG_debug_code) {
184  // Check the function's context matches the context argument.
186  __ cmp(cp, kScratchReg);
187  __ Assert(eq, kWrongFunctionContext);
188  }
190  __ Call(ip);
191  AddSafepointAndDeopt(instr);
192  DCHECK_EQ(LeaveCC, i.OutputSBit());
193  break;
194  }
195  case kArchJmp:
196  __ b(code_->GetLabel(i.InputBlock(0)));
197  DCHECK_EQ(LeaveCC, i.OutputSBit());
198  break;
199  case kArchNop:
200  // don't emit code for nops.
201  DCHECK_EQ(LeaveCC, i.OutputSBit());
202  break;
203  case kArchRet:
204  AssembleReturn();
205  DCHECK_EQ(LeaveCC, i.OutputSBit());
206  break;
207  case kArchTruncateDoubleToI:
208  __ TruncateDoubleToI(i.OutputRegister(), i.InputFloat64Register(0));
209  DCHECK_EQ(LeaveCC, i.OutputSBit());
210  break;
211  case kArmAdd:
212  __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
213  i.OutputSBit());
214  break;
215  case kArmAnd:
216  __ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
217  i.OutputSBit());
218  break;
219  case kArmBic:
220  __ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
221  i.OutputSBit());
222  break;
223  case kArmMul:
224  __ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
225  i.OutputSBit());
226  break;
227  case kArmMla:
228  __ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
229  i.InputRegister(2), i.OutputSBit());
230  break;
231  case kArmMls: {
232  CpuFeatureScope scope(masm(), MLS);
233  __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
234  i.InputRegister(2));
235  DCHECK_EQ(LeaveCC, i.OutputSBit());
236  break;
237  }
238  case kArmSdiv: {
239  CpuFeatureScope scope(masm(), SUDIV);
240  __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
241  DCHECK_EQ(LeaveCC, i.OutputSBit());
242  break;
243  }
244  case kArmUdiv: {
245  CpuFeatureScope scope(masm(), SUDIV);
246  __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
247  DCHECK_EQ(LeaveCC, i.OutputSBit());
248  break;
249  }
250  case kArmMov:
251  __ Move(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
252  break;
253  case kArmMvn:
254  __ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
255  break;
256  case kArmOrr:
257  __ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
258  i.OutputSBit());
259  break;
260  case kArmEor:
261  __ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
262  i.OutputSBit());
263  break;
264  case kArmSub:
265  __ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
266  i.OutputSBit());
267  break;
268  case kArmRsb:
269  __ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
270  i.OutputSBit());
271  break;
272  case kArmBfc: {
273  CpuFeatureScope scope(masm(), ARMv7);
274  __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
275  DCHECK_EQ(LeaveCC, i.OutputSBit());
276  break;
277  }
278  case kArmUbfx: {
279  CpuFeatureScope scope(masm(), ARMv7);
280  __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
281  i.InputInt8(2));
282  DCHECK_EQ(LeaveCC, i.OutputSBit());
283  break;
284  }
285  case kArmCmp:
286  __ cmp(i.InputRegister(0), i.InputOperand2(1));
287  DCHECK_EQ(SetCC, i.OutputSBit());
288  break;
289  case kArmCmn:
290  __ cmn(i.InputRegister(0), i.InputOperand2(1));
291  DCHECK_EQ(SetCC, i.OutputSBit());
292  break;
293  case kArmTst:
294  __ tst(i.InputRegister(0), i.InputOperand2(1));
295  DCHECK_EQ(SetCC, i.OutputSBit());
296  break;
297  case kArmTeq:
298  __ teq(i.InputRegister(0), i.InputOperand2(1));
299  DCHECK_EQ(SetCC, i.OutputSBit());
300  break;
301  case kArmVcmpF64:
302  __ VFPCompareAndSetFlags(i.InputFloat64Register(0),
303  i.InputFloat64Register(1));
304  DCHECK_EQ(SetCC, i.OutputSBit());
305  break;
306  case kArmVaddF64:
307  __ vadd(i.OutputFloat64Register(), i.InputFloat64Register(0),
308  i.InputFloat64Register(1));
309  DCHECK_EQ(LeaveCC, i.OutputSBit());
310  break;
311  case kArmVsubF64:
312  __ vsub(i.OutputFloat64Register(), i.InputFloat64Register(0),
313  i.InputFloat64Register(1));
314  DCHECK_EQ(LeaveCC, i.OutputSBit());
315  break;
316  case kArmVmulF64:
317  __ vmul(i.OutputFloat64Register(), i.InputFloat64Register(0),
318  i.InputFloat64Register(1));
319  DCHECK_EQ(LeaveCC, i.OutputSBit());
320  break;
321  case kArmVmlaF64:
322  __ vmla(i.OutputFloat64Register(), i.InputFloat64Register(1),
323  i.InputFloat64Register(2));
324  DCHECK_EQ(LeaveCC, i.OutputSBit());
325  break;
326  case kArmVmlsF64:
327  __ vmls(i.OutputFloat64Register(), i.InputFloat64Register(1),
328  i.InputFloat64Register(2));
329  DCHECK_EQ(LeaveCC, i.OutputSBit());
330  break;
331  case kArmVdivF64:
332  __ vdiv(i.OutputFloat64Register(), i.InputFloat64Register(0),
333  i.InputFloat64Register(1));
334  DCHECK_EQ(LeaveCC, i.OutputSBit());
335  break;
336  case kArmVmodF64: {
337  // TODO(bmeurer): We should really get rid of this special instruction,
338  // and generate a CallAddress instruction instead.
339  FrameScope scope(masm(), StackFrame::MANUAL);
340  __ PrepareCallCFunction(0, 2, kScratchReg);
341  __ MovToFloatParameters(i.InputFloat64Register(0),
342  i.InputFloat64Register(1));
343  __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
344  0, 2);
345  // Move the result in the double result register.
346  __ MovFromFloatResult(i.OutputFloat64Register());
347  DCHECK_EQ(LeaveCC, i.OutputSBit());
348  break;
349  }
350  case kArmVsqrtF64:
351  __ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
352  break;
353  case kArmVnegF64:
354  __ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
355  break;
356  case kArmVcvtF32F64: {
357  __ vcvt_f32_f64(i.OutputFloat32Register(), i.InputFloat64Register(0));
358  DCHECK_EQ(LeaveCC, i.OutputSBit());
359  break;
360  }
361  case kArmVcvtF64F32: {
362  __ vcvt_f64_f32(i.OutputFloat64Register(), i.InputFloat32Register(0));
363  DCHECK_EQ(LeaveCC, i.OutputSBit());
364  break;
365  }
366  case kArmVcvtF64S32: {
367  SwVfpRegister scratch = kScratchDoubleReg.low();
368  __ vmov(scratch, i.InputRegister(0));
369  __ vcvt_f64_s32(i.OutputFloat64Register(), scratch);
370  DCHECK_EQ(LeaveCC, i.OutputSBit());
371  break;
372  }
373  case kArmVcvtF64U32: {
374  SwVfpRegister scratch = kScratchDoubleReg.low();
375  __ vmov(scratch, i.InputRegister(0));
376  __ vcvt_f64_u32(i.OutputFloat64Register(), scratch);
377  DCHECK_EQ(LeaveCC, i.OutputSBit());
378  break;
379  }
380  case kArmVcvtS32F64: {
381  SwVfpRegister scratch = kScratchDoubleReg.low();
382  __ vcvt_s32_f64(scratch, i.InputFloat64Register(0));
383  __ vmov(i.OutputRegister(), scratch);
384  DCHECK_EQ(LeaveCC, i.OutputSBit());
385  break;
386  }
387  case kArmVcvtU32F64: {
388  SwVfpRegister scratch = kScratchDoubleReg.low();
389  __ vcvt_u32_f64(scratch, i.InputFloat64Register(0));
390  __ vmov(i.OutputRegister(), scratch);
391  DCHECK_EQ(LeaveCC, i.OutputSBit());
392  break;
393  }
394  case kArmLdrb:
395  __ ldrb(i.OutputRegister(), i.InputOffset());
396  DCHECK_EQ(LeaveCC, i.OutputSBit());
397  break;
398  case kArmLdrsb:
399  __ ldrsb(i.OutputRegister(), i.InputOffset());
400  DCHECK_EQ(LeaveCC, i.OutputSBit());
401  break;
402  case kArmStrb: {
403  int index = 0;
404  MemOperand operand = i.InputOffset(&index);
405  __ strb(i.InputRegister(index), operand);
406  DCHECK_EQ(LeaveCC, i.OutputSBit());
407  break;
408  }
409  case kArmLdrh:
410  __ ldrh(i.OutputRegister(), i.InputOffset());
411  break;
412  case kArmLdrsh:
413  __ ldrsh(i.OutputRegister(), i.InputOffset());
414  break;
415  case kArmStrh: {
416  int index = 0;
417  MemOperand operand = i.InputOffset(&index);
418  __ strh(i.InputRegister(index), operand);
419  DCHECK_EQ(LeaveCC, i.OutputSBit());
420  break;
421  }
422  case kArmLdr:
423  __ ldr(i.OutputRegister(), i.InputOffset());
424  break;
425  case kArmStr: {
426  int index = 0;
427  MemOperand operand = i.InputOffset(&index);
428  __ str(i.InputRegister(index), operand);
429  DCHECK_EQ(LeaveCC, i.OutputSBit());
430  break;
431  }
432  case kArmVldrF32: {
433  __ vldr(i.OutputFloat32Register(), i.InputOffset());
434  DCHECK_EQ(LeaveCC, i.OutputSBit());
435  break;
436  }
437  case kArmVstrF32: {
438  int index = 0;
439  MemOperand operand = i.InputOffset(&index);
440  __ vstr(i.InputFloat32Register(index), operand);
441  DCHECK_EQ(LeaveCC, i.OutputSBit());
442  break;
443  }
444  case kArmVldrF64:
445  __ vldr(i.OutputFloat64Register(), i.InputOffset());
446  DCHECK_EQ(LeaveCC, i.OutputSBit());
447  break;
448  case kArmVstrF64: {
449  int index = 0;
450  MemOperand operand = i.InputOffset(&index);
451  __ vstr(i.InputFloat64Register(index), operand);
452  DCHECK_EQ(LeaveCC, i.OutputSBit());
453  break;
454  }
455  case kArmPush:
456  __ Push(i.InputRegister(0));
457  DCHECK_EQ(LeaveCC, i.OutputSBit());
458  break;
459  case kArmStoreWriteBarrier: {
460  Register object = i.InputRegister(0);
461  Register index = i.InputRegister(1);
462  Register value = i.InputRegister(2);
463  __ add(index, object, index);
464  __ str(value, MemOperand(index));
466  frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
468  __ RecordWrite(object, index, value, lr_status, mode);
469  DCHECK_EQ(LeaveCC, i.OutputSBit());
470  break;
471  }
472  }
473 }
474 
475 
476 // Assembles branches after an instruction.
477 void CodeGenerator::AssembleArchBranch(Instruction* instr,
478  FlagsCondition condition) {
479  ArmOperandConverter i(this, instr);
480  Label done;
481 
482  // Emit a branch. The true and false targets are always the last two inputs
483  // to the instruction.
484  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
485  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
486  bool fallthru = IsNextInAssemblyOrder(fblock);
487  Label* tlabel = code()->GetLabel(tblock);
488  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
489  switch (condition) {
490  case kUnorderedEqual:
491  __ b(vs, flabel);
492  // Fall through.
493  case kEqual:
494  __ b(eq, tlabel);
495  break;
496  case kUnorderedNotEqual:
497  __ b(vs, tlabel);
498  // Fall through.
499  case kNotEqual:
500  __ b(ne, tlabel);
501  break;
502  case kSignedLessThan:
503  __ b(lt, tlabel);
504  break;
506  __ b(ge, tlabel);
507  break;
509  __ b(le, tlabel);
510  break;
511  case kSignedGreaterThan:
512  __ b(gt, tlabel);
513  break;
514  case kUnorderedLessThan:
515  __ b(vs, flabel);
516  // Fall through.
517  case kUnsignedLessThan:
518  __ b(lo, tlabel);
519  break;
521  __ b(vs, tlabel);
522  // Fall through.
524  __ b(hs, tlabel);
525  break;
527  __ b(vs, flabel);
528  // Fall through.
530  __ b(ls, tlabel);
531  break;
533  __ b(vs, tlabel);
534  // Fall through.
536  __ b(hi, tlabel);
537  break;
538  case kOverflow:
539  __ b(vs, tlabel);
540  break;
541  case kNotOverflow:
542  __ b(vc, tlabel);
543  break;
544  }
545  if (!fallthru) __ b(flabel); // no fallthru to flabel.
546  __ bind(&done);
547 }
548 
549 
550 // Assembles boolean materializations after an instruction.
551 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
552  FlagsCondition condition) {
553  ArmOperandConverter i(this, instr);
554  Label done;
555 
556  // Materialize a full 32-bit 1 or 0 value. The result register is always the
557  // last output of the instruction.
558  Label check;
559  DCHECK_NE(0, instr->OutputCount());
560  Register reg = i.OutputRegister(instr->OutputCount() - 1);
562  switch (condition) {
563  case kUnorderedEqual:
564  __ b(vc, &check);
565  __ mov(reg, Operand(0));
566  __ b(&done);
567  // Fall through.
568  case kEqual:
569  cc = eq;
570  break;
571  case kUnorderedNotEqual:
572  __ b(vc, &check);
573  __ mov(reg, Operand(1));
574  __ b(&done);
575  // Fall through.
576  case kNotEqual:
577  cc = ne;
578  break;
579  case kSignedLessThan:
580  cc = lt;
581  break;
583  cc = ge;
584  break;
586  cc = le;
587  break;
588  case kSignedGreaterThan:
589  cc = gt;
590  break;
591  case kUnorderedLessThan:
592  __ b(vc, &check);
593  __ mov(reg, Operand(0));
594  __ b(&done);
595  // Fall through.
596  case kUnsignedLessThan:
597  cc = lo;
598  break;
600  __ b(vc, &check);
601  __ mov(reg, Operand(1));
602  __ b(&done);
603  // Fall through.
605  cc = hs;
606  break;
608  __ b(vc, &check);
609  __ mov(reg, Operand(0));
610  __ b(&done);
611  // Fall through.
613  cc = ls;
614  break;
616  __ b(vc, &check);
617  __ mov(reg, Operand(1));
618  __ b(&done);
619  // Fall through.
621  cc = hi;
622  break;
623  case kOverflow:
624  cc = vs;
625  break;
626  case kNotOverflow:
627  cc = vc;
628  break;
629  }
630  __ bind(&check);
631  __ mov(reg, Operand(0));
632  __ mov(reg, Operand(1), LeaveCC, cc);
633  __ bind(&done);
634 }
635 
636 
637 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
639  isolate(), deoptimization_id, Deoptimizer::LAZY);
640  __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
641 }
642 
643 
644 void CodeGenerator::AssemblePrologue() {
645  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
646  if (descriptor->kind() == CallDescriptor::kCallAddress) {
647  bool saved_pp;
648  if (FLAG_enable_ool_constant_pool) {
649  __ Push(lr, fp, pp);
650  // Adjust FP to point to saved FP.
652  saved_pp = true;
653  } else {
654  __ Push(lr, fp);
655  __ mov(fp, sp);
656  saved_pp = false;
657  }
658  const RegList saves = descriptor->CalleeSavedRegisters();
659  if (saves != 0 || saved_pp) {
660  // Save callee-saved registers.
661  int register_save_area_size = saved_pp ? kPointerSize : 0;
662  for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
663  if (!((1 << i) & saves)) continue;
664  register_save_area_size += kPointerSize;
665  }
666  frame()->SetRegisterSaveAreaSize(register_save_area_size);
667  __ stm(db_w, sp, saves);
668  }
669  } else if (descriptor->IsJSFunctionCall()) {
670  CompilationInfo* info = linkage()->info();
671  __ Prologue(info->IsCodePreAgingActive());
672  frame()->SetRegisterSaveAreaSize(
674 
675  // Sloppy mode functions and builtins need to replace the receiver with the
676  // global proxy when called as functions (without an explicit receiver
677  // object).
678  // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
679  if (info->strict_mode() == SLOPPY && !info->is_native()) {
680  Label ok;
681  // +2 for return address and saved frame pointer.
682  int receiver_slot = info->scope()->num_parameters() + 2;
683  __ ldr(r2, MemOperand(fp, receiver_slot * kPointerSize));
684  __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
685  __ b(ne, &ok);
686  __ ldr(r2, GlobalObjectOperand());
688  __ str(r2, MemOperand(fp, receiver_slot * kPointerSize));
689  __ bind(&ok);
690  }
691 
692  } else {
693  __ StubPrologue();
694  frame()->SetRegisterSaveAreaSize(
696  }
697  int stack_slots = frame()->GetSpillSlotCount();
698  if (stack_slots > 0) {
699  __ sub(sp, sp, Operand(stack_slots * kPointerSize));
700  }
701 }
702 
703 
704 void CodeGenerator::AssembleReturn() {
705  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
706  if (descriptor->kind() == CallDescriptor::kCallAddress) {
707  if (frame()->GetRegisterSaveAreaSize() > 0) {
708  // Remove this frame's spill slots first.
709  int stack_slots = frame()->GetSpillSlotCount();
710  if (stack_slots > 0) {
711  __ add(sp, sp, Operand(stack_slots * kPointerSize));
712  }
713  // Restore registers.
714  const RegList saves = descriptor->CalleeSavedRegisters();
715  if (saves != 0) {
716  __ ldm(ia_w, sp, saves);
717  }
718  }
719  __ LeaveFrame(StackFrame::MANUAL);
720  __ Ret();
721  } else {
722  __ LeaveFrame(StackFrame::MANUAL);
723  int pop_count = descriptor->IsJSFunctionCall()
724  ? static_cast<int>(descriptor->JSParameterCount())
725  : 0;
726  __ Drop(pop_count);
727  __ Ret();
728  }
729 }
730 
731 
732 void CodeGenerator::AssembleMove(InstructionOperand* source,
733  InstructionOperand* destination) {
734  ArmOperandConverter g(this, NULL);
735  // Dispatch on the source and destination operand kinds. Not all
736  // combinations are possible.
737  if (source->IsRegister()) {
738  DCHECK(destination->IsRegister() || destination->IsStackSlot());
739  Register src = g.ToRegister(source);
740  if (destination->IsRegister()) {
741  __ mov(g.ToRegister(destination), src);
742  } else {
743  __ str(src, g.ToMemOperand(destination));
744  }
745  } else if (source->IsStackSlot()) {
746  DCHECK(destination->IsRegister() || destination->IsStackSlot());
747  MemOperand src = g.ToMemOperand(source);
748  if (destination->IsRegister()) {
749  __ ldr(g.ToRegister(destination), src);
750  } else {
751  Register temp = kScratchReg;
752  __ ldr(temp, src);
753  __ str(temp, g.ToMemOperand(destination));
754  }
755  } else if (source->IsConstant()) {
756  Constant src = g.ToConstant(source);
757  if (destination->IsRegister() || destination->IsStackSlot()) {
758  Register dst =
759  destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
760  switch (src.type()) {
761  case Constant::kInt32:
762  __ mov(dst, Operand(src.ToInt32()));
763  break;
764  case Constant::kInt64:
765  UNREACHABLE();
766  break;
767  case Constant::kFloat32:
768  __ Move(dst,
769  isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
770  break;
771  case Constant::kFloat64:
772  __ Move(dst,
773  isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
774  break;
775  case Constant::kExternalReference:
776  __ mov(dst, Operand(src.ToExternalReference()));
777  break;
778  case Constant::kHeapObject:
779  __ Move(dst, src.ToHeapObject());
780  break;
781  }
782  if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
783  } else if (src.type() == Constant::kFloat32) {
784  SwVfpRegister dst = destination->IsDoubleRegister()
785  ? g.ToFloat32Register(destination)
786  : kScratchDoubleReg.low();
787  // TODO(turbofan): Can we do better here?
788  __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
789  __ vmov(dst, ip);
790  if (destination->IsDoubleStackSlot()) {
791  __ vstr(dst, g.ToMemOperand(destination));
792  }
793  } else {
794  DCHECK_EQ(Constant::kFloat64, src.type());
795  DwVfpRegister dst = destination->IsDoubleRegister()
796  ? g.ToFloat64Register(destination)
798  __ vmov(dst, src.ToFloat64());
799  if (destination->IsDoubleStackSlot()) {
800  __ vstr(dst, g.ToMemOperand(destination));
801  }
802  }
803  } else if (source->IsDoubleRegister()) {
804  DwVfpRegister src = g.ToDoubleRegister(source);
805  if (destination->IsDoubleRegister()) {
806  DwVfpRegister dst = g.ToDoubleRegister(destination);
807  __ Move(dst, src);
808  } else {
809  DCHECK(destination->IsDoubleStackSlot());
810  __ vstr(src, g.ToMemOperand(destination));
811  }
812  } else if (source->IsDoubleStackSlot()) {
813  DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
814  MemOperand src = g.ToMemOperand(source);
815  if (destination->IsDoubleRegister()) {
816  __ vldr(g.ToDoubleRegister(destination), src);
817  } else {
818  DwVfpRegister temp = kScratchDoubleReg;
819  __ vldr(temp, src);
820  __ vstr(temp, g.ToMemOperand(destination));
821  }
822  } else {
823  UNREACHABLE();
824  }
825 }
826 
827 
828 void CodeGenerator::AssembleSwap(InstructionOperand* source,
829  InstructionOperand* destination) {
830  ArmOperandConverter g(this, NULL);
831  // Dispatch on the source and destination operand kinds. Not all
832  // combinations are possible.
833  if (source->IsRegister()) {
834  // Register-register.
835  Register temp = kScratchReg;
836  Register src = g.ToRegister(source);
837  if (destination->IsRegister()) {
838  Register dst = g.ToRegister(destination);
839  __ Move(temp, src);
840  __ Move(src, dst);
841  __ Move(dst, temp);
842  } else {
843  DCHECK(destination->IsStackSlot());
844  MemOperand dst = g.ToMemOperand(destination);
845  __ mov(temp, src);
846  __ ldr(src, dst);
847  __ str(temp, dst);
848  }
849  } else if (source->IsStackSlot()) {
850  DCHECK(destination->IsStackSlot());
851  Register temp_0 = kScratchReg;
852  SwVfpRegister temp_1 = kScratchDoubleReg.low();
853  MemOperand src = g.ToMemOperand(source);
854  MemOperand dst = g.ToMemOperand(destination);
855  __ ldr(temp_0, src);
856  __ vldr(temp_1, dst);
857  __ str(temp_0, dst);
858  __ vstr(temp_1, src);
859  } else if (source->IsDoubleRegister()) {
860  DwVfpRegister temp = kScratchDoubleReg;
861  DwVfpRegister src = g.ToDoubleRegister(source);
862  if (destination->IsDoubleRegister()) {
863  DwVfpRegister dst = g.ToDoubleRegister(destination);
864  __ Move(temp, src);
865  __ Move(src, dst);
866  __ Move(dst, temp);
867  } else {
868  DCHECK(destination->IsDoubleStackSlot());
869  MemOperand dst = g.ToMemOperand(destination);
870  __ Move(temp, src);
871  __ vldr(src, dst);
872  __ vstr(temp, dst);
873  }
874  } else if (source->IsDoubleStackSlot()) {
875  DCHECK(destination->IsDoubleStackSlot());
876  Register temp_0 = kScratchReg;
877  DwVfpRegister temp_1 = kScratchDoubleReg;
878  MemOperand src0 = g.ToMemOperand(source);
879  MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
880  MemOperand dst0 = g.ToMemOperand(destination);
881  MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
882  __ vldr(temp_1, dst0); // Save destination in temp_1.
883  __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
884  __ str(temp_0, dst0);
885  __ ldr(temp_0, src1);
886  __ str(temp_0, dst1);
887  __ vstr(temp_1, src0);
888  } else {
889  // No other combinations are possible.
890  UNREACHABLE();
891  }
892 }
893 
894 
895 void CodeGenerator::AddNopForSmiCodeInlining() {
896  // On 32-bit ARM we do not insert nops for inlined Smi code.
897 }
898 
899 
900 void CodeGenerator::EnsureSpaceForLazyDeopt() {
901  int space_needed = Deoptimizer::patch_size();
902  if (!linkage()->info()->IsStub()) {
903  // Ensure that we have enough space after the previous lazy-bailout
904  // instruction for patching the code here.
905  int current_pc = masm()->pc_offset();
906  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
907  // Block literal pool emission for duration of padding.
908  v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
909  int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
911  while (padding_size > 0) {
912  __ nop();
913  padding_size -= v8::internal::Assembler::kInstrSize;
914  }
915  }
916  }
917  MarkLazyDeoptSite();
918 }
919 
920 #undef __
921 
922 } // namespace compiler
923 } // namespace internal
924 } // namespace v8
#define kScratchDoubleReg
static const int kInstrSize
static const int kHeaderSize
Definition: objects.h:5373
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
Definition: deoptimizer.cc:672
static const int kGlobalProxyOffset
Definition: objects.h:7461
static const int kContextOffset
Definition: objects.h:7381
static const int kCodeEntryOffset
Definition: objects.h:7376
static const int kFixedFrameSizeFromFp
Definition: frames.h:157
static const int kConstantPoolOffset
Definition: frames.h:163
Operand InputOperand2(int first_index)
LowDwVfpRegister InputFloat64Register(int index)
LowDwVfpRegister ToFloat64Register(InstructionOperand *op)
MemOperand ToMemOperand(InstructionOperand *op) const
SwVfpRegister InputFloat32Register(int index)
ArmOperandConverter(CodeGenerator *gen, Instruction *instr)
SwVfpRegister OutputFloat32Register(int index=0)
SwVfpRegister ToFloat32Register(InstructionOperand *op)
MemOperand InputOffset(int *first_index)
LowDwVfpRegister OutputFloat64Register(int index=0)
#define __
#define kScratchReg
#define FINAL
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK_NE(v1, v2)
Definition: logging.h:207
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
const int kPointerSize
Definition: globals.h:129
const Register r2
const Register cp
const Register r0
const Register ip
const Register fp
const Register sp
MemOperand FieldMemOperand(Register object, int offset)
uint32_t RegList
Definition: frames.h:18
const Register lr
byte * Address
Definition: globals.h:101
const int kHeapObjectTag
Definition: v8.h:5737
MemOperand GlobalObjectOperand()
const Register pp
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
static LowDwVfpRegister from_code(int code)
static const int kNumRegisters
Definition: assembler-arm.h:95