V8 Project
lithium-codegen-arm64.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/hydrogen-osr.h"
13 #include "src/ic/ic.h"
14 #include "src/ic/stub-cache.h"
15 
16 namespace v8 {
17 namespace internal {
18 
19 
20 class SafepointGenerator FINAL : public CallWrapper {
21  public:
23  LPointerMap* pointers,
24  Safepoint::DeoptMode mode)
25  : codegen_(codegen),
26  pointers_(pointers),
27  deopt_mode_(mode) { }
28  virtual ~SafepointGenerator() { }
29 
30  virtual void BeforeCall(int call_size) const { }
31 
32  virtual void AfterCall() const {
33  codegen_->RecordSafepoint(pointers_, deopt_mode_);
34  }
35 
36  private:
37  LCodeGen* codegen_;
38  LPointerMap* pointers_;
39  Safepoint::DeoptMode deopt_mode_;
40 };
41 
42 
43 #define __ masm()->
44 
45 // Emit code to branch if the given condition holds.
46 // The code generated here doesn't modify the flags and they must have
47 // been set by some prior instructions.
48 //
49 // The EmitInverted function simply inverts the condition.
51  public:
53  : BranchGenerator(codegen),
54  cond_(cond) { }
55 
56  virtual void Emit(Label* label) const {
57  __ B(cond_, label);
58  }
59 
60  virtual void EmitInverted(Label* label) const {
61  if (cond_ != al) {
62  __ B(NegateCondition(cond_), label);
63  }
64  }
65 
66  private:
68 };
69 
70 
71 // Emit code to compare lhs and rhs and branch if the condition holds.
72 // This uses MacroAssembler's CompareAndBranch function so it will handle
73 // converting the comparison to Cbz/Cbnz if the right-hand side is 0.
74 //
75 // EmitInverted still compares the two operands but inverts the condition.
77  public:
79  Condition cond,
80  const Register& lhs,
81  const Operand& rhs)
82  : BranchGenerator(codegen),
83  cond_(cond),
84  lhs_(lhs),
85  rhs_(rhs) { }
86 
87  virtual void Emit(Label* label) const {
88  __ CompareAndBranch(lhs_, rhs_, cond_, label);
89  }
90 
91  virtual void EmitInverted(Label* label) const {
93  }
94 
95  private:
97  const Register& lhs_;
98  const Operand& rhs_;
99 };
100 
101 
102 // Test the input with the given mask and branch if the condition holds.
103 // If the condition is 'eq' or 'ne' this will use MacroAssembler's
104 // TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
105 // conversion to Tbz/Tbnz when possible.
107  public:
109  Condition cond,
110  const Register& value,
111  uint64_t mask)
112  : BranchGenerator(codegen),
113  cond_(cond),
114  value_(value),
115  mask_(mask) { }
116 
117  virtual void Emit(Label* label) const {
118  switch (cond_) {
119  case eq:
120  __ TestAndBranchIfAllClear(value_, mask_, label);
121  break;
122  case ne:
123  __ TestAndBranchIfAnySet(value_, mask_, label);
124  break;
125  default:
126  __ Tst(value_, mask_);
127  __ B(cond_, label);
128  }
129  }
130 
131  virtual void EmitInverted(Label* label) const {
132  // The inverse of "all clear" is "any set" and vice versa.
133  switch (cond_) {
134  case eq:
135  __ TestAndBranchIfAnySet(value_, mask_, label);
136  break;
137  case ne:
138  __ TestAndBranchIfAllClear(value_, mask_, label);
139  break;
140  default:
141  __ Tst(value_, mask_);
142  __ B(NegateCondition(cond_), label);
143  }
144  }
145 
146  private:
148  const Register& value_;
149  uint64_t mask_;
150 };
151 
152 
153 // Test the input and branch if it is non-zero and not a NaN.
155  public:
156  BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
157  const FPRegister& scratch)
158  : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
159 
160  virtual void Emit(Label* label) const {
161  __ Fabs(scratch_, value_);
162  // Compare with 0.0. Because scratch_ is positive, the result can be one of
163  // nZCv (equal), nzCv (greater) or nzCV (unordered).
164  __ Fcmp(scratch_, 0.0);
165  __ B(gt, label);
166  }
167 
168  virtual void EmitInverted(Label* label) const {
169  __ Fabs(scratch_, value_);
170  __ Fcmp(scratch_, 0.0);
171  __ B(le, label);
172  }
173 
174  private:
177 };
178 
179 
180 // Test the input and branch if it is a heap number.
182  public:
183  BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
184  : BranchGenerator(codegen), value_(value) { }
185 
186  virtual void Emit(Label* label) const {
187  __ JumpIfHeapNumber(value_, label);
188  }
189 
190  virtual void EmitInverted(Label* label) const {
191  __ JumpIfNotHeapNumber(value_, label);
192  }
193 
194  private:
195  const Register& value_;
196 };
197 
198 
199 // Test the input and branch if it is the specified root value.
201  public:
202  BranchIfRoot(LCodeGen* codegen, const Register& value,
203  Heap::RootListIndex index)
204  : BranchGenerator(codegen), value_(value), index_(index) { }
205 
206  virtual void Emit(Label* label) const {
207  __ JumpIfRoot(value_, index_, label);
208  }
209 
210  virtual void EmitInverted(Label* label) const {
211  __ JumpIfNotRoot(value_, index_, label);
212  }
213 
214  private:
215  const Register& value_;
217 };
218 
219 
220 void LCodeGen::WriteTranslation(LEnvironment* environment,
221  Translation* translation) {
222  if (environment == NULL) return;
223 
224  // The translation includes one command per value in the environment.
225  int translation_size = environment->translation_size();
226  // The output frame height does not include the parameters.
227  int height = translation_size - environment->parameter_count();
228 
229  WriteTranslation(environment->outer(), translation);
230  bool has_closure_id = !info()->closure().is_null() &&
231  !info()->closure().is_identical_to(environment->closure());
232  int closure_id = has_closure_id
233  ? DefineDeoptimizationLiteral(environment->closure())
234  : Translation::kSelfLiteralId;
235 
236  switch (environment->frame_type()) {
237  case JS_FUNCTION:
238  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
239  break;
240  case JS_CONSTRUCT:
241  translation->BeginConstructStubFrame(closure_id, translation_size);
242  break;
243  case JS_GETTER:
244  DCHECK(translation_size == 1);
245  DCHECK(height == 0);
246  translation->BeginGetterStubFrame(closure_id);
247  break;
248  case JS_SETTER:
249  DCHECK(translation_size == 2);
250  DCHECK(height == 0);
251  translation->BeginSetterStubFrame(closure_id);
252  break;
253  case STUB:
254  translation->BeginCompiledStubFrame();
255  break;
256  case ARGUMENTS_ADAPTOR:
257  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
258  break;
259  default:
260  UNREACHABLE();
261  }
262 
263  int object_index = 0;
264  int dematerialized_index = 0;
265  for (int i = 0; i < translation_size; ++i) {
266  LOperand* value = environment->values()->at(i);
267 
268  AddToTranslation(environment,
269  translation,
270  value,
271  environment->HasTaggedValueAt(i),
272  environment->HasUint32ValueAt(i),
273  &object_index,
274  &dematerialized_index);
275  }
276 }
277 
278 
279 void LCodeGen::AddToTranslation(LEnvironment* environment,
280  Translation* translation,
281  LOperand* op,
282  bool is_tagged,
283  bool is_uint32,
284  int* object_index_pointer,
285  int* dematerialized_index_pointer) {
286  if (op == LEnvironment::materialization_marker()) {
287  int object_index = (*object_index_pointer)++;
288  if (environment->ObjectIsDuplicateAt(object_index)) {
289  int dupe_of = environment->ObjectDuplicateOfAt(object_index);
290  translation->DuplicateObject(dupe_of);
291  return;
292  }
293  int object_length = environment->ObjectLengthAt(object_index);
294  if (environment->ObjectIsArgumentsAt(object_index)) {
295  translation->BeginArgumentsObject(object_length);
296  } else {
297  translation->BeginCapturedObject(object_length);
298  }
299  int dematerialized_index = *dematerialized_index_pointer;
300  int env_offset = environment->translation_size() + dematerialized_index;
301  *dematerialized_index_pointer += object_length;
302  for (int i = 0; i < object_length; ++i) {
303  LOperand* value = environment->values()->at(env_offset + i);
304  AddToTranslation(environment,
305  translation,
306  value,
307  environment->HasTaggedValueAt(env_offset + i),
308  environment->HasUint32ValueAt(env_offset + i),
309  object_index_pointer,
310  dematerialized_index_pointer);
311  }
312  return;
313  }
314 
315  if (op->IsStackSlot()) {
316  if (is_tagged) {
317  translation->StoreStackSlot(op->index());
318  } else if (is_uint32) {
319  translation->StoreUint32StackSlot(op->index());
320  } else {
321  translation->StoreInt32StackSlot(op->index());
322  }
323  } else if (op->IsDoubleStackSlot()) {
324  translation->StoreDoubleStackSlot(op->index());
325  } else if (op->IsRegister()) {
326  Register reg = ToRegister(op);
327  if (is_tagged) {
328  translation->StoreRegister(reg);
329  } else if (is_uint32) {
330  translation->StoreUint32Register(reg);
331  } else {
332  translation->StoreInt32Register(reg);
333  }
334  } else if (op->IsDoubleRegister()) {
336  translation->StoreDoubleRegister(reg);
337  } else if (op->IsConstantOperand()) {
338  HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
339  int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
340  translation->StoreLiteral(src_index);
341  } else {
342  UNREACHABLE();
343  }
344 }
345 
346 
347 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
348  int result = deoptimization_literals_.length();
349  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
350  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
351  }
352  deoptimization_literals_.Add(literal, zone());
353  return result;
354 }
355 
356 
357 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
358  Safepoint::DeoptMode mode) {
359  environment->set_has_been_used();
360  if (!environment->HasBeenRegistered()) {
361  int frame_count = 0;
362  int jsframe_count = 0;
363  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
364  ++frame_count;
365  if (e->frame_type() == JS_FUNCTION) {
366  ++jsframe_count;
367  }
368  }
369  Translation translation(&translations_, frame_count, jsframe_count, zone());
370  WriteTranslation(environment, &translation);
371  int deoptimization_index = deoptimizations_.length();
372  int pc_offset = masm()->pc_offset();
373  environment->Register(deoptimization_index,
374  translation.index(),
375  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
376  deoptimizations_.Add(environment, zone());
377  }
378 }
379 
380 
383  LInstruction* instr) {
385 }
386 
387 
390  LInstruction* instr,
391  SafepointMode safepoint_mode) {
392  DCHECK(instr != NULL);
393 
395  __ Call(code, mode);
396  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
397 
398  if ((code->kind() == Code::BINARY_OP_IC) ||
399  (code->kind() == Code::COMPARE_IC)) {
400  // Signal that we don't inline smi code before these stubs in the
401  // optimizing code generator.
403  }
404 }
405 
406 
407 void LCodeGen::DoCallFunction(LCallFunction* instr) {
408  DCHECK(ToRegister(instr->context()).is(cp));
409  DCHECK(ToRegister(instr->function()).Is(x1));
410  DCHECK(ToRegister(instr->result()).Is(x0));
411 
412  int arity = instr->arity();
413  CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
414  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
415  after_push_argument_ = false;
416 }
417 
418 
419 void LCodeGen::DoCallNew(LCallNew* instr) {
420  DCHECK(ToRegister(instr->context()).is(cp));
421  DCHECK(instr->IsMarkedAsCall());
422  DCHECK(ToRegister(instr->constructor()).is(x1));
423 
424  __ Mov(x0, instr->arity());
425  // No cell in x2 for construct type feedback in optimized code.
426  __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
427 
428  CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
429  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
430  after_push_argument_ = false;
431 
432  DCHECK(ToRegister(instr->result()).is(x0));
433 }
434 
435 
436 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
437  DCHECK(instr->IsMarkedAsCall());
438  DCHECK(ToRegister(instr->context()).is(cp));
439  DCHECK(ToRegister(instr->constructor()).is(x1));
440 
441  __ Mov(x0, Operand(instr->arity()));
442  __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
443 
444  ElementsKind kind = instr->hydrogen()->elements_kind();
445  AllocationSiteOverrideMode override_mode =
448  : DONT_OVERRIDE;
449 
450  if (instr->arity() == 0) {
451  ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
452  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
453  } else if (instr->arity() == 1) {
454  Label done;
455  if (IsFastPackedElementsKind(kind)) {
456  Label packed_case;
457 
458  // We might need to create a holey array; look at the first argument.
459  __ Peek(x10, 0);
460  __ Cbz(x10, &packed_case);
461 
462  ElementsKind holey_kind = GetHoleyElementsKind(kind);
463  ArraySingleArgumentConstructorStub stub(isolate(),
464  holey_kind,
465  override_mode);
466  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
467  __ B(&done);
468  __ Bind(&packed_case);
469  }
470 
471  ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
472  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
473  __ Bind(&done);
474  } else {
475  ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
476  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
477  }
478  after_push_argument_ = false;
479 
480  DCHECK(ToRegister(instr->result()).is(x0));
481 }
482 
483 
484 void LCodeGen::CallRuntime(const Runtime::Function* function,
485  int num_arguments,
486  LInstruction* instr,
487  SaveFPRegsMode save_doubles) {
488  DCHECK(instr != NULL);
489 
490  __ CallRuntime(function, num_arguments, save_doubles);
491 
493 }
494 
495 
496 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
497  if (context->IsRegister()) {
498  __ Mov(cp, ToRegister(context));
499  } else if (context->IsStackSlot()) {
500  __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
501  } else if (context->IsConstantOperand()) {
502  HConstant* constant =
503  chunk_->LookupConstant(LConstantOperand::cast(context));
504  __ LoadHeapObject(cp,
505  Handle<HeapObject>::cast(constant->handle(isolate())));
506  } else {
507  UNREACHABLE();
508  }
509 }
510 
511 
513  int argc,
514  LInstruction* instr,
515  LOperand* context) {
516  LoadContextFromDeferred(context);
517  __ CallRuntimeSaveDoubles(id);
519  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
520 }
521 
522 
523 void LCodeGen::RecordAndWritePosition(int position) {
524  if (position == RelocInfo::kNoPosition) return;
525  masm()->positions_recorder()->RecordPosition(position);
526  masm()->positions_recorder()->WriteRecordedPositions();
527 }
528 
529 
530 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
531  SafepointMode safepoint_mode) {
532  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
533  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
534  } else {
537  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
538  }
539 }
540 
541 
542 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
543  Safepoint::Kind kind,
544  int arguments,
545  Safepoint::DeoptMode deopt_mode) {
547 
548  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
549  Safepoint safepoint = safepoints_.DefineSafepoint(
550  masm(), kind, arguments, deopt_mode);
551 
552  for (int i = 0; i < operands->length(); i++) {
553  LOperand* pointer = operands->at(i);
554  if (pointer->IsStackSlot()) {
555  safepoint.DefinePointerSlot(pointer->index(), zone());
556  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
557  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
558  }
559  }
560 
561  if (kind & Safepoint::kWithRegisters) {
562  // Register cp always contains a pointer to the context.
563  safepoint.DefinePointerRegister(cp, zone());
564  }
565 }
566 
567 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
568  Safepoint::DeoptMode deopt_mode) {
569  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
570 }
571 
572 
573 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
574  LPointerMap empty_pointers(zone());
575  RecordSafepoint(&empty_pointers, deopt_mode);
576 }
577 
578 
579 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
580  int arguments,
581  Safepoint::DeoptMode deopt_mode) {
582  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
583 }
584 
585 
586 bool LCodeGen::GenerateCode() {
587  LPhase phase("Z_Code generation", chunk());
588  DCHECK(is_unused());
589  status_ = GENERATING;
590 
591  // Open a frame scope to indicate that there is a frame on the stack. The
592  // NONE indicates that the scope shouldn't actually generate code to set up
593  // the frame (that is done in GeneratePrologue).
594  FrameScope frame_scope(masm_, StackFrame::NONE);
595 
596  return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
598 }
599 
600 
602  DCHECK(info()->saves_caller_doubles());
604  Comment(";;; Save clobbered callee double registers");
605  BitVector* doubles = chunk()->allocated_double_registers();
606  BitVector::Iterator iterator(doubles);
607  int count = 0;
608  while (!iterator.Done()) {
609  // TODO(all): Is this supposed to save just the callee-saved doubles? It
610  // looks like it's saving all of them.
611  FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
612  __ Poke(value, count * kDoubleSize);
613  iterator.Advance();
614  count++;
615  }
616 }
617 
618 
620  DCHECK(info()->saves_caller_doubles());
622  Comment(";;; Restore clobbered callee double registers");
623  BitVector* doubles = chunk()->allocated_double_registers();
624  BitVector::Iterator iterator(doubles);
625  int count = 0;
626  while (!iterator.Done()) {
627  // TODO(all): Is this supposed to restore just the callee-saved doubles? It
628  // looks like it's restoring all of them.
629  FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
630  __ Peek(value, count * kDoubleSize);
631  iterator.Advance();
632  count++;
633  }
634 }
635 
636 
638  DCHECK(is_generating());
639 
640  if (info()->IsOptimizing()) {
642 
643  // TODO(all): Add support for stop_t FLAG in DEBUG mode.
644 
645  // Sloppy mode functions and builtins need to replace the receiver with the
646  // global proxy when called as functions (without an explicit receiver
647  // object).
648  if (info_->this_has_uses() &&
649  info_->strict_mode() == SLOPPY &&
650  !info_->is_native()) {
651  Label ok;
652  int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
653  __ Peek(x10, receiver_offset);
654  __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
655 
656  __ Ldr(x10, GlobalObjectMemOperand());
658  __ Poke(x10, receiver_offset);
659 
660  __ Bind(&ok);
661  }
662  }
663 
664  DCHECK(__ StackPointer().Is(jssp));
665  info()->set_prologue_offset(masm_->pc_offset());
666  if (NeedsEagerFrame()) {
667  if (info()->IsStub()) {
668  __ StubPrologue();
669  } else {
670  __ Prologue(info()->IsCodePreAgingActive());
671  }
672  frame_is_built_ = true;
673  info_->AddNoFrameRange(0, masm_->pc_offset());
674  }
675 
676  // Reserve space for the stack slots needed by the code.
677  int slots = GetStackSlotCount();
678  if (slots > 0) {
679  __ Claim(slots, kPointerSize);
680  }
681 
682  if (info()->saves_caller_doubles()) {
684  }
685 
686  // Allocate a local context if needed.
687  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
688  if (heap_slots > 0) {
689  Comment(";;; Allocate local context");
690  bool need_write_barrier = true;
691  // Argument to NewContext is the function, which is in x1.
692  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
693  FastNewContextStub stub(isolate(), heap_slots);
694  __ CallStub(&stub);
695  // Result of FastNewContextStub is always in new space.
696  need_write_barrier = false;
697  } else {
698  __ Push(x1);
699  __ CallRuntime(Runtime::kNewFunctionContext, 1);
700  }
701  RecordSafepoint(Safepoint::kNoLazyDeopt);
702  // Context is returned in x0. It replaces the context passed to us. It's
703  // saved in the stack and kept live in cp.
704  __ Mov(cp, x0);
706  // Copy any necessary parameters into the context.
707  int num_parameters = scope()->num_parameters();
708  for (int i = 0; i < num_parameters; i++) {
709  Variable* var = scope()->parameter(i);
710  if (var->IsContextSlot()) {
711  Register value = x0;
712  Register scratch = x3;
713 
714  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
715  (num_parameters - 1 - i) * kPointerSize;
716  // Load parameter from stack.
717  __ Ldr(value, MemOperand(fp, parameter_offset));
718  // Store it in the context.
719  MemOperand target = ContextMemOperand(cp, var->index());
720  __ Str(value, target);
721  // Update the write barrier. This clobbers value and scratch.
722  if (need_write_barrier) {
723  __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
725  } else if (FLAG_debug_code) {
726  Label done;
727  __ JumpIfInNewSpace(cp, &done);
728  __ Abort(kExpectedNewSpaceObject);
729  __ bind(&done);
730  }
731  }
732  }
733  Comment(";;; End allocate local context");
734  }
735 
736  // Trace the call.
737  if (FLAG_trace && info()->IsOptimizing()) {
738  // We have not executed any compiled code yet, so cp still holds the
739  // incoming context.
740  __ CallRuntime(Runtime::kTraceEnter, 0);
741  }
742 
743  return !is_aborted();
744 }
745 
746 
748  // Generate the OSR entry prologue at the first unknown OSR value, or if there
749  // are none, at the OSR entrypoint instruction.
750  if (osr_pc_offset_ >= 0) return;
751 
752  osr_pc_offset_ = masm()->pc_offset();
753 
754  // Adjust the frame size, subsuming the unoptimized frame into the
755  // optimized frame.
756  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
757  DCHECK(slots >= 0);
758  __ Claim(slots);
759 }
760 
761 
762 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
763  if (instr->IsCall()) {
765  }
766  if (!instr->IsLazyBailout() && !instr->IsGap()) {
767  safepoints_.BumpLastLazySafepointIndex();
768  }
769 }
770 
771 
773  DCHECK(is_generating());
774  if (deferred_.length() > 0) {
775  for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
776  LDeferredCode* code = deferred_[i];
777 
778  HValue* value =
779  instructions_->at(code->instruction_index())->hydrogen_value();
781  chunk()->graph()->SourcePositionToScriptPosition(value->position()));
782 
783  Comment(";;; <@%d,#%d> "
784  "-------------------- Deferred %s --------------------",
785  code->instruction_index(),
786  code->instr()->hydrogen_value()->id(),
787  code->instr()->Mnemonic());
788 
789  __ Bind(code->entry());
790 
791  if (NeedsDeferredFrame()) {
792  Comment(";;; Build frame");
794  DCHECK(info()->IsStub());
795  frame_is_built_ = true;
796  __ Push(lr, fp, cp);
798  __ Push(fp);
799  __ Add(fp, __ StackPointer(),
801  Comment(";;; Deferred code");
802  }
803 
804  code->Generate();
805 
806  if (NeedsDeferredFrame()) {
807  Comment(";;; Destroy frame");
809  __ Pop(xzr, cp, fp, lr);
810  frame_is_built_ = false;
811  }
812 
813  __ B(code->exit());
814  }
815  }
816 
817  // Force constant pool emission at the end of the deferred code to make
818  // sure that no constant pools are emitted after deferred code because
819  // deferred code generation is the last step which generates code. The two
820  // following steps will only output data used by crakshaft.
821  masm()->CheckConstPool(true, false);
822 
823  return !is_aborted();
824 }
825 
826 
828  Label needs_frame, restore_caller_doubles, call_deopt_entry;
829 
830  if (jump_table_.length() > 0) {
831  Comment(";;; -------------------- Jump table --------------------");
832  Address base = jump_table_[0]->address;
833 
834  UseScratchRegisterScope temps(masm());
835  Register entry_offset = temps.AcquireX();
836 
837  int length = jump_table_.length();
838  for (int i = 0; i < length; i++) {
839  Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
840  __ Bind(&table_entry->label);
841 
842  Address entry = table_entry->address;
843  DeoptComment(table_entry->reason);
844 
845  // Second-level deopt table entries are contiguous and small, so instead
846  // of loading the full, absolute address of each one, load the base
847  // address and add an immediate offset.
848  __ Mov(entry_offset, entry - base);
849 
850  // The last entry can fall through into `call_deopt_entry`, avoiding a
851  // branch.
852  bool last_entry = (i + 1) == length;
853 
854  if (table_entry->needs_frame) {
855  DCHECK(!info()->saves_caller_doubles());
856  if (!needs_frame.is_bound()) {
857  // This variant of deopt can only be used with stubs. Since we don't
858  // have a function pointer to install in the stack frame that we're
859  // building, install a special marker there instead.
860  DCHECK(info()->IsStub());
861 
862  UseScratchRegisterScope temps(masm());
863  Register stub_marker = temps.AcquireX();
864  __ Bind(&needs_frame);
865  __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
866  __ Push(lr, fp, cp, stub_marker);
867  __ Add(fp, __ StackPointer(), 2 * kPointerSize);
868  if (!last_entry) __ B(&call_deopt_entry);
869  } else {
870  // Reuse the existing needs_frame code.
871  __ B(&needs_frame);
872  }
873  } else if (info()->saves_caller_doubles()) {
874  DCHECK(info()->IsStub());
875  if (!restore_caller_doubles.is_bound()) {
876  __ Bind(&restore_caller_doubles);
878  if (!last_entry) __ B(&call_deopt_entry);
879  } else {
880  // Reuse the existing restore_caller_doubles code.
881  __ B(&restore_caller_doubles);
882  }
883  } else {
884  // There is nothing special to do, so just continue to the second-level
885  // table.
886  if (!last_entry) __ B(&call_deopt_entry);
887  }
888 
889  masm()->CheckConstPool(false, last_entry);
890  }
891 
892  // Generate common code for calling the second-level deopt table.
893  Register deopt_entry = temps.AcquireX();
894  __ Bind(&call_deopt_entry);
895  __ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base),
897  __ Add(deopt_entry, deopt_entry, entry_offset);
898  __ Call(deopt_entry);
899  }
900 
901  // Force constant pool emission at the end of the deopt jump table to make
902  // sure that no constant pools are emitted after.
903  masm()->CheckConstPool(true, false);
904 
905  // The deoptimization jump table is the last part of the instruction
906  // sequence. Mark the generated code as done unless we bailed out.
907  if (!is_aborted()) status_ = DONE;
908  return !is_aborted();
909 }
910 
911 
913  DCHECK(is_done());
914  // We do not know how much data will be emitted for the safepoint table, so
915  // force emission of the veneer pool.
916  masm()->CheckVeneerPool(true, true);
917  safepoints_.Emit(masm(), GetStackSlotCount());
918  return !is_aborted();
919 }
920 
921 
922 void LCodeGen::FinishCode(Handle<Code> code) {
923  DCHECK(is_done());
924  code->set_stack_slots(GetStackSlotCount());
925  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
926  if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
928 }
929 
930 
931 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
932  int length = deoptimizations_.length();
933  if (length == 0) return;
934 
935  Handle<DeoptimizationInputData> data =
936  DeoptimizationInputData::New(isolate(), length, TENURED);
937 
938  Handle<ByteArray> translations =
939  translations_.CreateByteArray(isolate()->factory());
940  data->SetTranslationByteArray(*translations);
941  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
942  data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
943  if (info_->IsOptimizing()) {
944  // Reference to shared function info does not change between phases.
945  AllowDeferredHandleDereference allow_handle_dereference;
946  data->SetSharedFunctionInfo(*info_->shared_info());
947  } else {
948  data->SetSharedFunctionInfo(Smi::FromInt(0));
949  }
950 
951  Handle<FixedArray> literals =
952  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
953  { AllowDeferredHandleDereference copy_handles;
954  for (int i = 0; i < deoptimization_literals_.length(); i++) {
956  }
957  data->SetLiteralArray(*literals);
958  }
959 
960  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
961  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
962 
963  // Populate the deoptimization entries.
964  for (int i = 0; i < length; i++) {
966  data->SetAstId(i, env->ast_id());
967  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
968  data->SetArgumentsStackHeight(i,
969  Smi::FromInt(env->arguments_stack_height()));
970  data->SetPc(i, Smi::FromInt(env->pc_offset()));
971  }
972 
973  code->set_deoptimization_data(*data);
974 }
975 
976 
978  DCHECK(deoptimization_literals_.length() == 0);
979 
980  const ZoneList<Handle<JSFunction> >* inlined_closures =
981  chunk()->inlined_closures();
982 
983  for (int i = 0, length = inlined_closures->length(); i < length; i++) {
984  DefineDeoptimizationLiteral(inlined_closures->at(i));
985  }
986 
988 }
989 
990 
992  LInstruction* instr, const char* detail, BranchType branch_type,
993  Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) {
994  LEnvironment* environment = instr->environment();
995  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
996  Deoptimizer::BailoutType bailout_type =
997  info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
998 
999  if (override_bailout_type != NULL) {
1000  bailout_type = *override_bailout_type;
1001  }
1002 
1003  DCHECK(environment->HasBeenRegistered());
1004  DCHECK(info()->IsOptimizing() || info()->IsStub());
1005  int id = environment->deoptimization_index();
1006  Address entry =
1007  Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1008 
1009  if (entry == NULL) {
1010  Abort(kBailoutWasNotPrepared);
1011  }
1012 
1013  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
1014  Label not_zero;
1015  ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1016 
1017  __ Push(x0, x1, x2);
1018  __ Mrs(x2, NZCV);
1019  __ Mov(x0, count);
1020  __ Ldr(w1, MemOperand(x0));
1021  __ Subs(x1, x1, 1);
1022  __ B(gt, &not_zero);
1023  __ Mov(w1, FLAG_deopt_every_n_times);
1024  __ Str(w1, MemOperand(x0));
1025  __ Pop(x2, x1, x0);
1027  __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1028  __ Unreachable();
1029 
1030  __ Bind(&not_zero);
1031  __ Str(w1, MemOperand(x0));
1032  __ Msr(NZCV, x2);
1033  __ Pop(x2, x1, x0);
1034  }
1035 
1036  if (info()->ShouldTrapOnDeopt()) {
1037  Label dont_trap;
1038  __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
1039  __ Debug("trap_on_deopt", __LINE__, BREAK);
1040  __ Bind(&dont_trap);
1041  }
1042 
1043  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
1044  instr->Mnemonic(), detail);
1045  DCHECK(info()->IsStub() || frame_is_built_);
1046  // Go through jump table if we need to build frame, or restore caller doubles.
1047  if (branch_type == always &&
1048  frame_is_built_ && !info()->saves_caller_doubles()) {
1049  DeoptComment(reason);
1050  __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1051  } else {
1052  Deoptimizer::JumpTableEntry* table_entry =
1053  new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type,
1054  !frame_is_built_);
1055  // We often have several deopts to the same entry, reuse the last
1056  // jump entry if this is the case.
1057  if (jump_table_.is_empty() ||
1058  !table_entry->IsEquivalentTo(*jump_table_.last())) {
1059  jump_table_.Add(table_entry, zone());
1060  }
1061  __ B(&jump_table_.last()->label, branch_type, reg, bit);
1062  }
1063 }
1064 
1065 
1066 void LCodeGen::Deoptimize(LInstruction* instr, const char* detail,
1067  Deoptimizer::BailoutType* override_bailout_type) {
1068  DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type);
1069 }
1070 
1071 
1073  const char* detail) {
1074  DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond));
1075 }
1076 
1077 
1079  const char* detail) {
1080  DeoptimizeBranch(instr, detail, reg_zero, rt);
1081 }
1082 
1083 
1085  const char* detail) {
1086  DeoptimizeBranch(instr, detail, reg_not_zero, rt);
1087 }
1088 
1089 
1091  const char* detail) {
1092  int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
1093  DeoptimizeIfBitSet(rt, sign_bit, instr, detail);
1094 }
1095 
1096 
1098  const char* detail) {
1099  DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, detail);
1100 }
1101 
1102 
1104  const char* detail) {
1105  DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, detail);
1106 }
1107 
1108 
1110  LInstruction* instr, const char* detail) {
1111  __ CompareRoot(rt, index);
1112  DeoptimizeIf(eq, instr, detail);
1113 }
1114 
1115 
1117  LInstruction* instr, const char* detail) {
1118  __ CompareRoot(rt, index);
1119  DeoptimizeIf(ne, instr, detail);
1120 }
1121 
1122 
1124  const char* detail) {
1125  __ TestForMinusZero(input);
1126  DeoptimizeIf(vs, instr, detail);
1127 }
1128 
1129 
1131  __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
1132  DeoptimizeIf(ne, instr, "not heap number");
1133 }
1134 
1135 
1137  const char* detail) {
1138  DeoptimizeBranch(instr, detail, reg_bit_set, rt, bit);
1139 }
1140 
1141 
1143  const char* detail) {
1144  DeoptimizeBranch(instr, detail, reg_bit_clear, rt, bit);
1145 }
1146 
1147 
1148 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
1149  if (!info()->IsStub()) {
1150  // Ensure that we have enough space after the previous lazy-bailout
1151  // instruction for patching the code here.
1152  intptr_t current_pc = masm()->pc_offset();
1153 
1154  if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
1155  ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1156  DCHECK((padding_size % kInstructionSize) == 0);
1157  InstructionAccurateScope instruction_accurate(
1158  masm(), padding_size / kInstructionSize);
1159 
1160  while (padding_size > 0) {
1161  __ nop();
1162  padding_size -= kInstructionSize;
1163  }
1164  }
1165  }
1166  last_lazy_deopt_pc_ = masm()->pc_offset();
1167 }
1168 
1169 
1170 Register LCodeGen::ToRegister(LOperand* op) const {
1171  // TODO(all): support zero register results, as ToRegister32.
1172  DCHECK((op != NULL) && op->IsRegister());
1173  return Register::FromAllocationIndex(op->index());
1174 }
1175 
1176 
1178  DCHECK(op != NULL);
1179  if (op->IsConstantOperand()) {
1180  // If this is a constant operand, the result must be the zero register.
1181  DCHECK(ToInteger32(LConstantOperand::cast(op)) == 0);
1182  return wzr;
1183  } else {
1184  return ToRegister(op).W();
1185  }
1186 }
1187 
1188 
1189 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
1190  HConstant* constant = chunk_->LookupConstant(op);
1191  return Smi::FromInt(constant->Integer32Value());
1192 }
1193 
1194 
1195 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
1196  DCHECK((op != NULL) && op->IsDoubleRegister());
1197  return DoubleRegister::FromAllocationIndex(op->index());
1198 }
1199 
1200 
1201 Operand LCodeGen::ToOperand(LOperand* op) {
1202  DCHECK(op != NULL);
1203  if (op->IsConstantOperand()) {
1204  LConstantOperand* const_op = LConstantOperand::cast(op);
1205  HConstant* constant = chunk()->LookupConstant(const_op);
1206  Representation r = chunk_->LookupLiteralRepresentation(const_op);
1207  if (r.IsSmi()) {
1208  DCHECK(constant->HasSmiValue());
1209  return Operand(Smi::FromInt(constant->Integer32Value()));
1210  } else if (r.IsInteger32()) {
1211  DCHECK(constant->HasInteger32Value());
1212  return Operand(constant->Integer32Value());
1213  } else if (r.IsDouble()) {
1214  Abort(kToOperandUnsupportedDoubleImmediate);
1215  }
1216  DCHECK(r.IsTagged());
1217  return Operand(constant->handle(isolate()));
1218  } else if (op->IsRegister()) {
1219  return Operand(ToRegister(op));
1220  } else if (op->IsDoubleRegister()) {
1221  Abort(kToOperandIsDoubleRegisterUnimplemented);
1222  return Operand(0);
1223  }
1224  // Stack slots not implemented, use ToMemOperand instead.
1225  UNREACHABLE();
1226  return Operand(0);
1227 }
1228 
1229 
1231  DCHECK(op != NULL);
1232  if (op->IsRegister()) {
1233  return Operand(ToRegister32(op));
1234  } else if (op->IsConstantOperand()) {
1235  LConstantOperand* const_op = LConstantOperand::cast(op);
1236  HConstant* constant = chunk()->LookupConstant(const_op);
1237  Representation r = chunk_->LookupLiteralRepresentation(const_op);
1238  if (r.IsInteger32()) {
1239  return Operand(constant->Integer32Value());
1240  } else {
1241  // Other constants not implemented.
1242  Abort(kToOperand32UnsupportedImmediate);
1243  }
1244  }
1245  // Other cases are not implemented.
1246  UNREACHABLE();
1247  return Operand(0);
1248 }
1249 
1250 
1251 static int64_t ArgumentsOffsetWithoutFrame(int index) {
1252  DCHECK(index < 0);
1253  return -(index + 1) * kPointerSize;
1254 }
1255 
1256 
1258  DCHECK(op != NULL);
1259  DCHECK(!op->IsRegister());
1260  DCHECK(!op->IsDoubleRegister());
1261  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
1262  if (NeedsEagerFrame()) {
1263  int fp_offset = StackSlotOffset(op->index());
1264  if (op->index() >= 0) {
1265  // Loads and stores have a bigger reach in positive offset than negative.
1266  // When the load or the store can't be done in one instruction via fp
1267  // (too big negative offset), we try to access via jssp (positive offset).
1268  // We can reference a stack slot from jssp only if jssp references the end
1269  // of the stack slots. It's not the case when:
1270  // - stack_mode != kCanUseStackPointer: this is the case when a deferred
1271  // code saved the registers.
1272  // - after_push_argument_: arguments has been pushed for a call.
1273  // - inlined_arguments_: inlined arguments have been pushed once. All the
1274  // remainder of the function cannot trust jssp any longer.
1275  // - saves_caller_doubles: some double registers have been pushed, jssp
1276  // references the end of the double registers and not the end of the
1277  // stack slots.
1278  // Also, if the offset from fp is small enough to make a load/store in
1279  // one instruction, we use a fp access.
1280  if ((stack_mode == kCanUseStackPointer) && !after_push_argument_ &&
1281  !inlined_arguments_ && !is_int9(fp_offset) &&
1282  !info()->saves_caller_doubles()) {
1283  int jssp_offset =
1284  (GetStackSlotCount() - op->index() - 1) * kPointerSize;
1285  return MemOperand(masm()->StackPointer(), jssp_offset);
1286  }
1287  }
1288  return MemOperand(fp, fp_offset);
1289  } else {
1290  // Retrieve parameter without eager stack-frame relative to the
1291  // stack-pointer.
1292  return MemOperand(masm()->StackPointer(),
1294  }
1295 }
1296 
1297 
1298 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
1299  HConstant* constant = chunk_->LookupConstant(op);
1300  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
1301  return constant->handle(isolate());
1302 }
1303 
1304 
1305 template <class LI>
1307  if (shift_info->shift() == NO_SHIFT) {
1308  return ToOperand32(right);
1309  } else {
1310  return Operand(
1311  ToRegister32(right),
1312  shift_info->shift(),
1313  JSShiftAmountFromLConstant(shift_info->shift_amount()));
1314  }
1315 }
1316 
1317 
1318 bool LCodeGen::IsSmi(LConstantOperand* op) const {
1319  return chunk_->LookupLiteralRepresentation(op).IsSmi();
1320 }
1321 
1322 
1323 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
1324  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
1325 }
1326 
1327 
1328 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
1329  HConstant* constant = chunk_->LookupConstant(op);
1330  return constant->Integer32Value();
1331 }
1332 
1333 
1334 double LCodeGen::ToDouble(LConstantOperand* op) const {
1335  HConstant* constant = chunk_->LookupConstant(op);
1336  DCHECK(constant->HasDoubleValue());
1337  return constant->DoubleValue();
1338 }
1339 
1340 
1341 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1342  Condition cond = nv;
1343  switch (op) {
1344  case Token::EQ:
1345  case Token::EQ_STRICT:
1346  cond = eq;
1347  break;
1348  case Token::NE:
1349  case Token::NE_STRICT:
1350  cond = ne;
1351  break;
1352  case Token::LT:
1353  cond = is_unsigned ? lo : lt;
1354  break;
1355  case Token::GT:
1356  cond = is_unsigned ? hi : gt;
1357  break;
1358  case Token::LTE:
1359  cond = is_unsigned ? ls : le;
1360  break;
1361  case Token::GTE:
1362  cond = is_unsigned ? hs : ge;
1363  break;
1364  case Token::IN:
1365  case Token::INSTANCEOF:
1366  default:
1367  UNREACHABLE();
1368  }
1369  return cond;
1370 }
1371 
1372 
1373 template<class InstrType>
1374 void LCodeGen::EmitBranchGeneric(InstrType instr,
1375  const BranchGenerator& branch) {
1376  int left_block = instr->TrueDestination(chunk_);
1377  int right_block = instr->FalseDestination(chunk_);
1378 
1379  int next_block = GetNextEmittedBlock();
1380 
1381  if (right_block == left_block) {
1382  EmitGoto(left_block);
1383  } else if (left_block == next_block) {
1384  branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
1385  } else {
1386  branch.Emit(chunk_->GetAssemblyLabel(left_block));
1387  if (right_block != next_block) {
1388  __ B(chunk_->GetAssemblyLabel(right_block));
1389  }
1390  }
1391 }
1392 
1393 
1394 template<class InstrType>
1395 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
1396  DCHECK((condition != al) && (condition != nv));
1397  BranchOnCondition branch(this, condition);
1398  EmitBranchGeneric(instr, branch);
1399 }
1400 
1401 
1402 template<class InstrType>
1403 void LCodeGen::EmitCompareAndBranch(InstrType instr,
1404  Condition condition,
1405  const Register& lhs,
1406  const Operand& rhs) {
1407  DCHECK((condition != al) && (condition != nv));
1408  CompareAndBranch branch(this, condition, lhs, rhs);
1409  EmitBranchGeneric(instr, branch);
1410 }
1411 
1412 
1413 template<class InstrType>
1414 void LCodeGen::EmitTestAndBranch(InstrType instr,
1415  Condition condition,
1416  const Register& value,
1417  uint64_t mask) {
1418  DCHECK((condition != al) && (condition != nv));
1419  TestAndBranch branch(this, condition, value, mask);
1420  EmitBranchGeneric(instr, branch);
1421 }
1422 
1423 
1424 template<class InstrType>
1426  const FPRegister& value,
1427  const FPRegister& scratch) {
1428  BranchIfNonZeroNumber branch(this, value, scratch);
1429  EmitBranchGeneric(instr, branch);
1430 }
1431 
1432 
1433 template<class InstrType>
1435  const Register& value) {
1436  BranchIfHeapNumber branch(this, value);
1437  EmitBranchGeneric(instr, branch);
1438 }
1439 
1440 
1441 template<class InstrType>
1442 void LCodeGen::EmitBranchIfRoot(InstrType instr,
1443  const Register& value,
1444  Heap::RootListIndex index) {
1445  BranchIfRoot branch(this, value, index);
1446  EmitBranchGeneric(instr, branch);
1447 }
1448 
1449 
1450 void LCodeGen::DoGap(LGap* gap) {
1451  for (int i = LGap::FIRST_INNER_POSITION;
1453  i++) {
1454  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1455  LParallelMove* move = gap->GetParallelMove(inner_pos);
1456  if (move != NULL) {
1457  resolver_.Resolve(move);
1458  }
1459  }
1460 }
1461 
1462 
1463 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
1464  Register arguments = ToRegister(instr->arguments());
1465  Register result = ToRegister(instr->result());
1466 
1467  // The pointer to the arguments array come from DoArgumentsElements.
1468  // It does not point directly to the arguments and there is an offest of
1469  // two words that we must take into account when accessing an argument.
1470  // Subtracting the index from length accounts for one, so we add one more.
1471 
1472  if (instr->length()->IsConstantOperand() &&
1473  instr->index()->IsConstantOperand()) {
1474  int index = ToInteger32(LConstantOperand::cast(instr->index()));
1475  int length = ToInteger32(LConstantOperand::cast(instr->length()));
1476  int offset = ((length - index) + 1) * kPointerSize;
1477  __ Ldr(result, MemOperand(arguments, offset));
1478  } else if (instr->index()->IsConstantOperand()) {
1479  Register length = ToRegister32(instr->length());
1480  int index = ToInteger32(LConstantOperand::cast(instr->index()));
1481  int loc = index - 1;
1482  if (loc != 0) {
1483  __ Sub(result.W(), length, loc);
1484  __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1485  } else {
1486  __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
1487  }
1488  } else {
1489  Register length = ToRegister32(instr->length());
1490  Operand index = ToOperand32(instr->index());
1491  __ Sub(result.W(), length, index);
1492  __ Add(result.W(), result.W(), 1);
1493  __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1494  }
1495 }
1496 
1497 
1498 void LCodeGen::DoAddE(LAddE* instr) {
1499  Register result = ToRegister(instr->result());
1500  Register left = ToRegister(instr->left());
1501  Operand right = (instr->right()->IsConstantOperand())
1502  ? ToInteger32(LConstantOperand::cast(instr->right()))
1503  : Operand(ToRegister32(instr->right()), SXTW);
1504 
1505  DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
1506  __ Add(result, left, right);
1507 }
1508 
1509 
1510 void LCodeGen::DoAddI(LAddI* instr) {
1511  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1512  Register result = ToRegister32(instr->result());
1513  Register left = ToRegister32(instr->left());
1514  Operand right = ToShiftedRightOperand32(instr->right(), instr);
1515 
1516  if (can_overflow) {
1517  __ Adds(result, left, right);
1518  DeoptimizeIf(vs, instr, "overflow");
1519  } else {
1520  __ Add(result, left, right);
1521  }
1522 }
1523 
1524 
1525 void LCodeGen::DoAddS(LAddS* instr) {
1526  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1527  Register result = ToRegister(instr->result());
1528  Register left = ToRegister(instr->left());
1529  Operand right = ToOperand(instr->right());
1530  if (can_overflow) {
1531  __ Adds(result, left, right);
1532  DeoptimizeIf(vs, instr, "overflow");
1533  } else {
1534  __ Add(result, left, right);
1535  }
1536 }
1537 
1538 
1539 void LCodeGen::DoAllocate(LAllocate* instr) {
1540  class DeferredAllocate: public LDeferredCode {
1541  public:
1542  DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
1543  : LDeferredCode(codegen), instr_(instr) { }
1544  virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
1545  virtual LInstruction* instr() { return instr_; }
1546  private:
1547  LAllocate* instr_;
1548  };
1549 
1550  DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
1551 
1552  Register result = ToRegister(instr->result());
1553  Register temp1 = ToRegister(instr->temp1());
1554  Register temp2 = ToRegister(instr->temp2());
1555 
1556  // Allocate memory for the object.
1558  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
1559  flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
1560  }
1561 
1562  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1563  DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
1564  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1566  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1567  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1569  }
1570 
1571  if (instr->size()->IsConstantOperand()) {
1572  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1574  __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
1575  } else {
1576  __ B(deferred->entry());
1577  }
1578  } else {
1579  Register size = ToRegister32(instr->size());
1580  __ Sxtw(size.X(), size);
1581  __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
1582  }
1583 
1584  __ Bind(deferred->exit());
1585 
1586  if (instr->hydrogen()->MustPrefillWithFiller()) {
1587  Register filler_count = temp1;
1588  Register filler = temp2;
1589  Register untagged_result = ToRegister(instr->temp3());
1590 
1591  if (instr->size()->IsConstantOperand()) {
1592  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1593  __ Mov(filler_count, size / kPointerSize);
1594  } else {
1595  __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2);
1596  }
1597 
1598  __ Sub(untagged_result, result, kHeapObjectTag);
1599  __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
1600  __ FillFields(untagged_result, filler_count, filler);
1601  } else {
1602  DCHECK(instr->temp3() == NULL);
1603  }
1604 }
1605 
1606 
1607 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
1608  // TODO(3095996): Get rid of this. For now, we need to make the
1609  // result register contain a valid pointer because it is already
1610  // contained in the register pointer map.
1611  __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
1612 
1613  PushSafepointRegistersScope scope(this);
1614  // We're in a SafepointRegistersScope so we can use any scratch registers.
1615  Register size = x0;
1616  if (instr->size()->IsConstantOperand()) {
1617  __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
1618  } else {
1619  __ SmiTag(size, ToRegister32(instr->size()).X());
1620  }
1622  instr->hydrogen()->MustAllocateDoubleAligned());
1623  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1624  DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
1625  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1627  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1628  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1630  } else {
1632  }
1633  __ Mov(x10, Smi::FromInt(flags));
1634  __ Push(size, x10);
1635 
1637  Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
1638  __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
1639 }
1640 
1641 
1642 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1643  Register receiver = ToRegister(instr->receiver());
1644  Register function = ToRegister(instr->function());
1645  Register length = ToRegister32(instr->length());
1646 
1647  Register elements = ToRegister(instr->elements());
1648  Register scratch = x5;
1649  DCHECK(receiver.Is(x0)); // Used for parameter count.
1650  DCHECK(function.Is(x1)); // Required by InvokeFunction.
1651  DCHECK(ToRegister(instr->result()).Is(x0));
1652  DCHECK(instr->IsMarkedAsCall());
1653 
1654  // Copy the arguments to this function possibly from the
1655  // adaptor frame below it.
1656  const uint32_t kArgumentsLimit = 1 * KB;
1657  __ Cmp(length, kArgumentsLimit);
1658  DeoptimizeIf(hi, instr, "too many arguments");
1659 
1660  // Push the receiver and use the register to keep the original
1661  // number of arguments.
1662  __ Push(receiver);
1663  Register argc = receiver;
1664  receiver = NoReg;
1665  __ Sxtw(argc, length);
1666  // The arguments are at a one pointer size offset from elements.
1667  __ Add(elements, elements, 1 * kPointerSize);
1668 
1669  // Loop through the arguments pushing them onto the execution
1670  // stack.
1671  Label invoke, loop;
1672  // length is a small non-negative integer, due to the test above.
1673  __ Cbz(length, &invoke);
1674  __ Bind(&loop);
1675  __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
1676  __ Push(scratch);
1677  __ Subs(length, length, 1);
1678  __ B(ne, &loop);
1679 
1680  __ Bind(&invoke);
1681  DCHECK(instr->HasPointerMap());
1682  LPointerMap* pointers = instr->pointer_map();
1683  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
1684  // The number of arguments is stored in argc (receiver) which is x0, as
1685  // expected by InvokeFunction.
1686  ParameterCount actual(argc);
1687  __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
1688 }
1689 
1690 
1691 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
1692  // We push some arguments and they will be pop in an other block. We can't
1693  // trust that jssp references the end of the stack slots until the end of
1694  // the function.
1695  inlined_arguments_ = true;
1696  Register result = ToRegister(instr->result());
1697 
1698  if (instr->hydrogen()->from_inlined()) {
1699  // When we are inside an inlined function, the arguments are the last things
1700  // that have been pushed on the stack. Therefore the arguments array can be
1701  // accessed directly from jssp.
1702  // However in the normal case, it is accessed via fp but there are two words
1703  // on the stack between fp and the arguments (the saved lr and fp) and the
1704  // LAccessArgumentsAt implementation take that into account.
1705  // In the inlined case we need to subtract the size of 2 words to jssp to
1706  // get a pointer which will work well with LAccessArgumentsAt.
1707  DCHECK(masm()->StackPointer().Is(jssp));
1708  __ Sub(result, jssp, 2 * kPointerSize);
1709  } else {
1710  DCHECK(instr->temp() != NULL);
1711  Register previous_fp = ToRegister(instr->temp());
1712 
1713  __ Ldr(previous_fp,
1715  __ Ldr(result,
1718  __ Csel(result, fp, previous_fp, ne);
1719  }
1720 }
1721 
1722 
1723 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
1724  Register elements = ToRegister(instr->elements());
1725  Register result = ToRegister32(instr->result());
1726  Label done;
1727 
1728  // If no arguments adaptor frame the number of arguments is fixed.
1729  __ Cmp(fp, elements);
1730  __ Mov(result, scope()->num_parameters());
1731  __ B(eq, &done);
1732 
1733  // Arguments adaptor frame present. Get argument length from there.
1735  __ Ldr(result,
1736  UntagSmiMemOperand(result.X(),
1738 
1739  // Argument length is in result register.
1740  __ Bind(&done);
1741 }
1742 
1743 
1744 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1745  DoubleRegister left = ToDoubleRegister(instr->left());
1746  DoubleRegister right = ToDoubleRegister(instr->right());
1747  DoubleRegister result = ToDoubleRegister(instr->result());
1748 
1749  switch (instr->op()) {
1750  case Token::ADD: __ Fadd(result, left, right); break;
1751  case Token::SUB: __ Fsub(result, left, right); break;
1752  case Token::MUL: __ Fmul(result, left, right); break;
1753  case Token::DIV: __ Fdiv(result, left, right); break;
1754  case Token::MOD: {
1755  // The ECMA-262 remainder operator is the remainder from a truncating
1756  // (round-towards-zero) division. Note that this differs from IEEE-754.
1757  //
1758  // TODO(jbramley): See if it's possible to do this inline, rather than by
1759  // calling a helper function. With frintz (to produce the intermediate
1760  // quotient) and fmsub (to calculate the remainder without loss of
1761  // precision), it should be possible. However, we would need support for
1762  // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
1763  // support that yet.
1764  DCHECK(left.Is(d0));
1765  DCHECK(right.Is(d1));
1766  __ CallCFunction(
1767  ExternalReference::mod_two_doubles_operation(isolate()),
1768  0, 2);
1769  DCHECK(result.Is(d0));
1770  break;
1771  }
1772  default:
1773  UNREACHABLE();
1774  break;
1775  }
1776 }
1777 
1778 
1779 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1780  DCHECK(ToRegister(instr->context()).is(cp));
1781  DCHECK(ToRegister(instr->left()).is(x1));
1782  DCHECK(ToRegister(instr->right()).is(x0));
1783  DCHECK(ToRegister(instr->result()).is(x0));
1784 
1785  Handle<Code> code =
1786  CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
1787  CallCode(code, RelocInfo::CODE_TARGET, instr);
1788 }
1789 
1790 
1791 void LCodeGen::DoBitI(LBitI* instr) {
1792  Register result = ToRegister32(instr->result());
1793  Register left = ToRegister32(instr->left());
1794  Operand right = ToShiftedRightOperand32(instr->right(), instr);
1795 
1796  switch (instr->op()) {
1797  case Token::BIT_AND: __ And(result, left, right); break;
1798  case Token::BIT_OR: __ Orr(result, left, right); break;
1799  case Token::BIT_XOR: __ Eor(result, left, right); break;
1800  default:
1801  UNREACHABLE();
1802  break;
1803  }
1804 }
1805 
1806 
1807 void LCodeGen::DoBitS(LBitS* instr) {
1808  Register result = ToRegister(instr->result());
1809  Register left = ToRegister(instr->left());
1810  Operand right = ToOperand(instr->right());
1811 
1812  switch (instr->op()) {
1813  case Token::BIT_AND: __ And(result, left, right); break;
1814  case Token::BIT_OR: __ Orr(result, left, right); break;
1815  case Token::BIT_XOR: __ Eor(result, left, right); break;
1816  default:
1817  UNREACHABLE();
1818  break;
1819  }
1820 }
1821 
1822 
1823 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
1824  Condition cond = instr->hydrogen()->allow_equality() ? hi : hs;
1825  DCHECK(instr->hydrogen()->index()->representation().IsInteger32());
1826  DCHECK(instr->hydrogen()->length()->representation().IsInteger32());
1827  if (instr->index()->IsConstantOperand()) {
1828  Operand index = ToOperand32(instr->index());
1829  Register length = ToRegister32(instr->length());
1830  __ Cmp(length, index);
1831  cond = CommuteCondition(cond);
1832  } else {
1833  Register index = ToRegister32(instr->index());
1834  Operand length = ToOperand32(instr->length());
1835  __ Cmp(index, length);
1836  }
1837  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
1838  __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
1839  } else {
1840  DeoptimizeIf(cond, instr, "out of bounds");
1841  }
1842 }
1843 
1844 
1845 void LCodeGen::DoBranch(LBranch* instr) {
1846  Representation r = instr->hydrogen()->value()->representation();
1847  Label* true_label = instr->TrueLabel(chunk_);
1848  Label* false_label = instr->FalseLabel(chunk_);
1849 
1850  if (r.IsInteger32()) {
1851  DCHECK(!info()->IsStub());
1852  EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
1853  } else if (r.IsSmi()) {
1854  DCHECK(!info()->IsStub());
1855  STATIC_ASSERT(kSmiTag == 0);
1856  EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
1857  } else if (r.IsDouble()) {
1858  DoubleRegister value = ToDoubleRegister(instr->value());
1859  // Test the double value. Zero and NaN are false.
1860  EmitBranchIfNonZeroNumber(instr, value, double_scratch());
1861  } else {
1862  DCHECK(r.IsTagged());
1863  Register value = ToRegister(instr->value());
1864  HType type = instr->hydrogen()->value()->type();
1865 
1866  if (type.IsBoolean()) {
1867  DCHECK(!info()->IsStub());
1868  __ CompareRoot(value, Heap::kTrueValueRootIndex);
1869  EmitBranch(instr, eq);
1870  } else if (type.IsSmi()) {
1871  DCHECK(!info()->IsStub());
1872  EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
1873  } else if (type.IsJSArray()) {
1874  DCHECK(!info()->IsStub());
1875  EmitGoto(instr->TrueDestination(chunk()));
1876  } else if (type.IsHeapNumber()) {
1877  DCHECK(!info()->IsStub());
1878  __ Ldr(double_scratch(), FieldMemOperand(value,
1880  // Test the double value. Zero and NaN are false.
1882  } else if (type.IsString()) {
1883  DCHECK(!info()->IsStub());
1884  Register temp = ToRegister(instr->temp1());
1885  __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
1886  EmitCompareAndBranch(instr, ne, temp, 0);
1887  } else {
1888  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1889  // Avoid deopts in the case where we've never executed this path before.
1890  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
1891 
1892  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1893  // undefined -> false.
1894  __ JumpIfRoot(
1895  value, Heap::kUndefinedValueRootIndex, false_label);
1896  }
1897 
1898  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1899  // Boolean -> its value.
1900  __ JumpIfRoot(
1901  value, Heap::kTrueValueRootIndex, true_label);
1902  __ JumpIfRoot(
1903  value, Heap::kFalseValueRootIndex, false_label);
1904  }
1905 
1906  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1907  // 'null' -> false.
1908  __ JumpIfRoot(
1909  value, Heap::kNullValueRootIndex, false_label);
1910  }
1911 
1912  if (expected.Contains(ToBooleanStub::SMI)) {
1913  // Smis: 0 -> false, all other -> true.
1914  DCHECK(Smi::FromInt(0) == 0);
1915  __ Cbz(value, false_label);
1916  __ JumpIfSmi(value, true_label);
1917  } else if (expected.NeedsMap()) {
1918  // If we need a map later and have a smi, deopt.
1919  DeoptimizeIfSmi(value, instr, "Smi");
1920  }
1921 
1922  Register map = NoReg;
1923  Register scratch = NoReg;
1924 
1925  if (expected.NeedsMap()) {
1926  DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
1927  map = ToRegister(instr->temp1());
1928  scratch = ToRegister(instr->temp2());
1929 
1931 
1932  if (expected.CanBeUndetectable()) {
1933  // Undetectable -> false.
1934  __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
1935  __ TestAndBranchIfAnySet(
1936  scratch, 1 << Map::kIsUndetectable, false_label);
1937  }
1938  }
1939 
1940  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1941  // spec object -> true.
1942  __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
1943  __ B(ge, true_label);
1944  }
1945 
1946  if (expected.Contains(ToBooleanStub::STRING)) {
1947  // String value -> false iff empty.
1948  Label not_string;
1949  __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
1950  __ B(ge, &not_string);
1951  __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
1952  __ Cbz(scratch, false_label);
1953  __ B(true_label);
1954  __ Bind(&not_string);
1955  }
1956 
1957  if (expected.Contains(ToBooleanStub::SYMBOL)) {
1958  // Symbol value -> true.
1959  __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
1960  __ B(eq, true_label);
1961  }
1962 
1963  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1964  Label not_heap_number;
1965  __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
1966 
1967  __ Ldr(double_scratch(),
1969  __ Fcmp(double_scratch(), 0.0);
1970  // If we got a NaN (overflow bit is set), jump to the false branch.
1971  __ B(vs, false_label);
1972  __ B(eq, false_label);
1973  __ B(true_label);
1974  __ Bind(&not_heap_number);
1975  }
1976 
1977  if (!expected.IsGeneric()) {
1978  // We've seen something for the first time -> deopt.
1979  // This can only happen if we are not generic already.
1980  Deoptimize(instr, "unexpected object");
1981  }
1982  }
1983  }
1984 }
1985 
1986 
1988  int formal_parameter_count,
1989  int arity,
1990  LInstruction* instr,
1991  Register function_reg) {
1992  bool dont_adapt_arguments =
1993  formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1994  bool can_invoke_directly =
1995  dont_adapt_arguments || formal_parameter_count == arity;
1996 
1997  // The function interface relies on the following register assignments.
1998  DCHECK(function_reg.Is(x1) || function_reg.IsNone());
1999  Register arity_reg = x0;
2000 
2001  LPointerMap* pointers = instr->pointer_map();
2002 
2003  // If necessary, load the function object.
2004  if (function_reg.IsNone()) {
2005  function_reg = x1;
2006  __ LoadObject(function_reg, function);
2007  }
2008 
2009  if (FLAG_debug_code) {
2010  Label is_not_smi;
2011  // Try to confirm that function_reg (x1) is a tagged pointer.
2012  __ JumpIfNotSmi(function_reg, &is_not_smi);
2013  __ Abort(kExpectedFunctionObject);
2014  __ Bind(&is_not_smi);
2015  }
2016 
2017  if (can_invoke_directly) {
2018  // Change context.
2019  __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
2020 
2021  // Set the arguments count if adaption is not needed. Assumes that x0 is
2022  // available to write to at this point.
2023  if (dont_adapt_arguments) {
2024  __ Mov(arity_reg, arity);
2025  }
2026 
2027  // Invoke function.
2028  __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
2029  __ Call(x10);
2030 
2031  // Set up deoptimization.
2033  } else {
2034  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2035  ParameterCount count(arity);
2036  ParameterCount expected(formal_parameter_count);
2037  __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
2038  }
2039 }
2040 
2041 
2042 void LCodeGen::DoTailCallThroughMegamorphicCache(
2043  LTailCallThroughMegamorphicCache* instr) {
2044  Register receiver = ToRegister(instr->receiver());
2045  Register name = ToRegister(instr->name());
2048  DCHECK(receiver.is(x1));
2049  DCHECK(name.is(x2));
2050 
2051  Register scratch = x3;
2052  Register extra = x4;
2053  Register extra2 = x5;
2054  Register extra3 = x6;
2055 
2056  // Important for the tail-call.
2057  bool must_teardown_frame = NeedsEagerFrame();
2058 
2059  // The probe will tail call to a handler if found.
2060  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
2061  must_teardown_frame, receiver, name,
2062  scratch, extra, extra2, extra3);
2063 
2064  // Tail call to miss if we ended up here.
2065  if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
2066  LoadIC::GenerateMiss(masm());
2067 }
2068 
2069 
2070 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
2071  DCHECK(instr->IsMarkedAsCall());
2072  DCHECK(ToRegister(instr->result()).Is(x0));
2073 
2074  LPointerMap* pointers = instr->pointer_map();
2075  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2076 
2077  if (instr->target()->IsConstantOperand()) {
2078  LConstantOperand* target = LConstantOperand::cast(instr->target());
2079  Handle<Code> code = Handle<Code>::cast(ToHandle(target));
2080  generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
2081  // TODO(all): on ARM we use a call descriptor to specify a storage mode
2082  // but on ARM64 we only have one storage mode so it isn't necessary. Check
2083  // this understanding is correct.
2085  } else {
2086  DCHECK(instr->target()->IsRegister());
2087  Register target = ToRegister(instr->target());
2088  generator.BeforeCall(__ CallSize(target));
2089  __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
2090  __ Call(target);
2091  }
2092  generator.AfterCall();
2093  after_push_argument_ = false;
2094 }
2095 
2096 
2097 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
2098  DCHECK(instr->IsMarkedAsCall());
2099  DCHECK(ToRegister(instr->function()).is(x1));
2100 
2101  if (instr->hydrogen()->pass_argument_count()) {
2102  __ Mov(x0, Operand(instr->arity()));
2103  }
2104 
2105  // Change context.
2107 
2108  // Load the code entry address
2110  __ Call(x10);
2111 
2113  after_push_argument_ = false;
2114 }
2115 
2116 
2117 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2118  CallRuntime(instr->function(), instr->arity(), instr);
2119  after_push_argument_ = false;
2120 }
2121 
2122 
2123 void LCodeGen::DoCallStub(LCallStub* instr) {
2124  DCHECK(ToRegister(instr->context()).is(cp));
2125  DCHECK(ToRegister(instr->result()).is(x0));
2126  switch (instr->hydrogen()->major_key()) {
2127  case CodeStub::RegExpExec: {
2128  RegExpExecStub stub(isolate());
2129  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2130  break;
2131  }
2132  case CodeStub::SubString: {
2133  SubStringStub stub(isolate());
2134  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2135  break;
2136  }
2137  case CodeStub::StringCompare: {
2138  StringCompareStub stub(isolate());
2139  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2140  break;
2141  }
2142  default:
2143  UNREACHABLE();
2144  }
2145  after_push_argument_ = false;
2146 }
2147 
2148 
2149 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
2151 }
2152 
2153 
2154 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
2155  Register temp = ToRegister(instr->temp());
2156  {
2157  PushSafepointRegistersScope scope(this);
2158  __ Push(object);
2159  __ Mov(cp, 0);
2160  __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
2162  instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
2163  __ StoreToSafepointRegisterSlot(x0, temp);
2164  }
2165  DeoptimizeIfSmi(temp, instr, "instance migration failed");
2166 }
2167 
2168 
2169 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
2170  class DeferredCheckMaps: public LDeferredCode {
2171  public:
2172  DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
2173  : LDeferredCode(codegen), instr_(instr), object_(object) {
2174  SetExit(check_maps());
2175  }
2176  virtual void Generate() {
2177  codegen()->DoDeferredInstanceMigration(instr_, object_);
2178  }
2179  Label* check_maps() { return &check_maps_; }
2180  virtual LInstruction* instr() { return instr_; }
2181  private:
2182  LCheckMaps* instr_;
2183  Label check_maps_;
2184  Register object_;
2185  };
2186 
2187  if (instr->hydrogen()->IsStabilityCheck()) {
2188  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
2189  for (int i = 0; i < maps->size(); ++i) {
2190  AddStabilityDependency(maps->at(i).handle());
2191  }
2192  return;
2193  }
2194 
2195  Register object = ToRegister(instr->value());
2196  Register map_reg = ToRegister(instr->temp());
2197 
2198  __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
2199 
2200  DeferredCheckMaps* deferred = NULL;
2201  if (instr->hydrogen()->HasMigrationTarget()) {
2202  deferred = new(zone()) DeferredCheckMaps(this, instr, object);
2203  __ Bind(deferred->check_maps());
2204  }
2205 
2206  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
2207  Label success;
2208  for (int i = 0; i < maps->size() - 1; i++) {
2209  Handle<Map> map = maps->at(i).handle();
2210  __ CompareMap(map_reg, map);
2211  __ B(eq, &success);
2212  }
2213  Handle<Map> map = maps->at(maps->size() - 1).handle();
2214  __ CompareMap(map_reg, map);
2215 
2216  // We didn't match a map.
2217  if (instr->hydrogen()->HasMigrationTarget()) {
2218  __ B(ne, deferred->entry());
2219  } else {
2220  DeoptimizeIf(ne, instr, "wrong map");
2221  }
2222 
2223  __ Bind(&success);
2224 }
2225 
2226 
2227 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
2228  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2229  DeoptimizeIfSmi(ToRegister(instr->value()), instr, "Smi");
2230  }
2231 }
2232 
2233 
2234 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2235  Register value = ToRegister(instr->value());
2236  DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
2237  DeoptimizeIfNotSmi(value, instr, "not a Smi");
2238 }
2239 
2240 
2241 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2242  Register input = ToRegister(instr->value());
2243  Register scratch = ToRegister(instr->temp());
2244 
2245  __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2246  __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2247 
2248  if (instr->hydrogen()->is_interval_check()) {
2249  InstanceType first, last;
2250  instr->hydrogen()->GetCheckInterval(&first, &last);
2251 
2252  __ Cmp(scratch, first);
2253  if (first == last) {
2254  // If there is only one type in the interval check for equality.
2255  DeoptimizeIf(ne, instr, "wrong instance type");
2256  } else if (last == LAST_TYPE) {
2257  // We don't need to compare with the higher bound of the interval.
2258  DeoptimizeIf(lo, instr, "wrong instance type");
2259  } else {
2260  // If we are below the lower bound, set the C flag and clear the Z flag
2261  // to force a deopt.
2262  __ Ccmp(scratch, last, CFlag, hs);
2263  DeoptimizeIf(hi, instr, "wrong instance type");
2264  }
2265  } else {
2266  uint8_t mask;
2267  uint8_t tag;
2268  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
2269 
2270  if (base::bits::IsPowerOfTwo32(mask)) {
2271  DCHECK((tag == 0) || (tag == mask));
2272  if (tag == 0) {
2273  DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
2274  "wrong instance type");
2275  } else {
2276  DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
2277  "wrong instance type");
2278  }
2279  } else {
2280  if (tag == 0) {
2281  __ Tst(scratch, mask);
2282  } else {
2283  __ And(scratch, scratch, mask);
2284  __ Cmp(scratch, tag);
2285  }
2286  DeoptimizeIf(ne, instr, "wrong instance type");
2287  }
2288  }
2289 }
2290 
2291 
2292 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
2293  DoubleRegister input = ToDoubleRegister(instr->unclamped());
2294  Register result = ToRegister32(instr->result());
2295  __ ClampDoubleToUint8(result, input, double_scratch());
2296 }
2297 
2298 
2299 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
2300  Register input = ToRegister32(instr->unclamped());
2301  Register result = ToRegister32(instr->result());
2302  __ ClampInt32ToUint8(result, input);
2303 }
2304 
2305 
2306 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
2307  Register input = ToRegister(instr->unclamped());
2308  Register result = ToRegister32(instr->result());
2309  Label done;
2310 
2311  // Both smi and heap number cases are handled.
2312  Label is_not_smi;
2313  __ JumpIfNotSmi(input, &is_not_smi);
2314  __ SmiUntag(result.X(), input);
2315  __ ClampInt32ToUint8(result);
2316  __ B(&done);
2317 
2318  __ Bind(&is_not_smi);
2319 
2320  // Check for heap number.
2321  Label is_heap_number;
2322  __ JumpIfHeapNumber(input, &is_heap_number);
2323 
2324  // Check for undefined. Undefined is coverted to zero for clamping conversion.
2325  DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
2326  "not a heap number/undefined");
2327  __ Mov(result, 0);
2328  __ B(&done);
2329 
2330  // Heap number case.
2331  __ Bind(&is_heap_number);
2332  DoubleRegister dbl_scratch = double_scratch();
2333  DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1());
2334  __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
2335  __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
2336 
2337  __ Bind(&done);
2338 }
2339 
2340 
2341 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
2342  DoubleRegister value_reg = ToDoubleRegister(instr->value());
2343  Register result_reg = ToRegister(instr->result());
2344  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
2345  __ Fmov(result_reg, value_reg);
2346  __ Lsr(result_reg, result_reg, 32);
2347  } else {
2348  __ Fmov(result_reg.W(), value_reg.S());
2349  }
2350 }
2351 
2352 
2353 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
2354  Register hi_reg = ToRegister(instr->hi());
2355  Register lo_reg = ToRegister(instr->lo());
2356  DoubleRegister result_reg = ToDoubleRegister(instr->result());
2357 
2358  // Insert the least significant 32 bits of hi_reg into the most significant
2359  // 32 bits of lo_reg, and move to a floating point register.
2360  __ Bfi(lo_reg, hi_reg, 32, 32);
2361  __ Fmov(result_reg, lo_reg);
2362 }
2363 
2364 
2365 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2366  Handle<String> class_name = instr->hydrogen()->class_name();
2367  Label* true_label = instr->TrueLabel(chunk_);
2368  Label* false_label = instr->FalseLabel(chunk_);
2369  Register input = ToRegister(instr->value());
2370  Register scratch1 = ToRegister(instr->temp1());
2371  Register scratch2 = ToRegister(instr->temp2());
2372 
2373  __ JumpIfSmi(input, false_label);
2374 
2375  Register map = scratch2;
2376  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2377  // Assuming the following assertions, we can use the same compares to test
2378  // for both being a function type and being in the object type range.
2383  LAST_SPEC_OBJECT_TYPE - 1);
2385 
2386  // We expect CompareObjectType to load the object instance type in scratch1.
2387  __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
2388  __ B(lt, false_label);
2389  __ B(eq, true_label);
2391  __ B(eq, true_label);
2392  } else {
2393  __ IsObjectJSObjectType(input, map, scratch1, false_label);
2394  }
2395 
2396  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2397  // Check if the constructor in the map is a function.
2399 
2400  // Objects with a non-function constructor have class 'Object'.
2401  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2402  __ JumpIfNotObjectType(
2403  scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
2404  } else {
2405  __ JumpIfNotObjectType(
2406  scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label);
2407  }
2408 
2409  // The constructor function is in scratch1. Get its instance class name.
2410  __ Ldr(scratch1,
2412  __ Ldr(scratch1,
2415 
2416  // The class name we are testing against is internalized since it's a literal.
2417  // The name in the constructor is internalized because of the way the context
2418  // is booted. This routine isn't expected to work for random API-created
2419  // classes and it doesn't have to because you can't access it with natives
2420  // syntax. Since both sides are internalized it is sufficient to use an
2421  // identity comparison.
2422  EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
2423 }
2424 
2425 
2426 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
2427  DCHECK(instr->hydrogen()->representation().IsDouble());
2428  FPRegister object = ToDoubleRegister(instr->object());
2429  Register temp = ToRegister(instr->temp());
2430 
2431  // If we don't have a NaN, we don't have the hole, so branch now to avoid the
2432  // (relatively expensive) hole-NaN check.
2433  __ Fcmp(object, object);
2434  __ B(vc, instr->FalseLabel(chunk_));
2435 
2436  // We have a NaN, but is it the hole?
2437  __ Fmov(temp, object);
2438  EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
2439 }
2440 
2441 
2442 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
2443  DCHECK(instr->hydrogen()->representation().IsTagged());
2444  Register object = ToRegister(instr->object());
2445 
2446  EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
2447 }
2448 
2449 
2450 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2451  Register value = ToRegister(instr->value());
2452  Register map = ToRegister(instr->temp());
2453 
2455  EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
2456 }
2457 
2458 
2459 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2460  Representation rep = instr->hydrogen()->value()->representation();
2461  DCHECK(!rep.IsInteger32());
2462  Register scratch = ToRegister(instr->temp());
2463 
2464  if (rep.IsDouble()) {
2465  __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
2466  instr->TrueLabel(chunk()));
2467  } else {
2468  Register value = ToRegister(instr->value());
2469  __ JumpIfNotHeapNumber(value, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2470  __ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
2471  __ JumpIfMinusZero(scratch, instr->TrueLabel(chunk()));
2472  }
2473  EmitGoto(instr->FalseDestination(chunk()));
2474 }
2475 
2476 
2477 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2478  LOperand* left = instr->left();
2479  LOperand* right = instr->right();
2480  bool is_unsigned =
2481  instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2482  instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2483  Condition cond = TokenToCondition(instr->op(), is_unsigned);
2484 
2485  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2486  // We can statically evaluate the comparison.
2487  double left_val = ToDouble(LConstantOperand::cast(left));
2488  double right_val = ToDouble(LConstantOperand::cast(right));
2489  int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2490  instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2491  EmitGoto(next_block);
2492  } else {
2493  if (instr->is_double()) {
2494  __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
2495 
2496  // If a NaN is involved, i.e. the result is unordered (V set),
2497  // jump to false block label.
2498  __ B(vs, instr->FalseLabel(chunk_));
2499  EmitBranch(instr, cond);
2500  } else {
2501  if (instr->hydrogen_value()->representation().IsInteger32()) {
2502  if (right->IsConstantOperand()) {
2503  EmitCompareAndBranch(instr, cond, ToRegister32(left),
2504  ToOperand32(right));
2505  } else {
2506  // Commute the operands and the condition.
2508  ToRegister32(right), ToOperand32(left));
2509  }
2510  } else {
2511  DCHECK(instr->hydrogen_value()->representation().IsSmi());
2512  if (right->IsConstantOperand()) {
2513  int32_t value = ToInteger32(LConstantOperand::cast(right));
2514  EmitCompareAndBranch(instr,
2515  cond,
2516  ToRegister(left),
2517  Operand(Smi::FromInt(value)));
2518  } else if (left->IsConstantOperand()) {
2519  // Commute the operands and the condition.
2520  int32_t value = ToInteger32(LConstantOperand::cast(left));
2521  EmitCompareAndBranch(instr,
2522  CommuteCondition(cond),
2523  ToRegister(right),
2524  Operand(Smi::FromInt(value)));
2525  } else {
2526  EmitCompareAndBranch(instr,
2527  cond,
2528  ToRegister(left),
2529  ToRegister(right));
2530  }
2531  }
2532  }
2533  }
2534 }
2535 
2536 
2537 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2538  Register left = ToRegister(instr->left());
2539  Register right = ToRegister(instr->right());
2540  EmitCompareAndBranch(instr, eq, left, right);
2541 }
2542 
2543 
2544 void LCodeGen::DoCmpT(LCmpT* instr) {
2545  DCHECK(ToRegister(instr->context()).is(cp));
2546  Token::Value op = instr->op();
2547  Condition cond = TokenToCondition(op, false);
2548 
2549  DCHECK(ToRegister(instr->left()).Is(x1));
2550  DCHECK(ToRegister(instr->right()).Is(x0));
2551  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2552  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2553  // Signal that we don't inline smi code before this stub.
2555 
2556  // Return true or false depending on CompareIC result.
2557  // This instruction is marked as call. We can clobber any register.
2558  DCHECK(instr->IsMarkedAsCall());
2559  __ LoadTrueFalseRoots(x1, x2);
2560  __ Cmp(x0, 0);
2561  __ Csel(ToRegister(instr->result()), x1, x2, cond);
2562 }
2563 
2564 
2565 void LCodeGen::DoConstantD(LConstantD* instr) {
2566  DCHECK(instr->result()->IsDoubleRegister());
2567  DoubleRegister result = ToDoubleRegister(instr->result());
2568  if (instr->value() == 0) {
2569  if (copysign(1.0, instr->value()) == 1.0) {
2570  __ Fmov(result, fp_zero);
2571  } else {
2572  __ Fneg(result, fp_zero);
2573  }
2574  } else {
2575  __ Fmov(result, instr->value());
2576  }
2577 }
2578 
2579 
2580 void LCodeGen::DoConstantE(LConstantE* instr) {
2581  __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2582 }
2583 
2584 
2585 void LCodeGen::DoConstantI(LConstantI* instr) {
2586  DCHECK(is_int32(instr->value()));
2587  // Cast the value here to ensure that the value isn't sign extended by the
2588  // implicit Operand constructor.
2589  __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
2590 }
2591 
2592 
2593 void LCodeGen::DoConstantS(LConstantS* instr) {
2594  __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2595 }
2596 
2597 
2598 void LCodeGen::DoConstantT(LConstantT* instr) {
2599  Handle<Object> object = instr->value(isolate());
2601  __ LoadObject(ToRegister(instr->result()), object);
2602 }
2603 
2604 
2605 void LCodeGen::DoContext(LContext* instr) {
2606  // If there is a non-return use, the context must be moved to a register.
2607  Register result = ToRegister(instr->result());
2608  if (info()->IsOptimizing()) {
2610  } else {
2611  // If there is no frame, the context must be in cp.
2612  DCHECK(result.is(cp));
2613  }
2614 }
2615 
2616 
2617 void LCodeGen::DoCheckValue(LCheckValue* instr) {
2618  Register reg = ToRegister(instr->value());
2619  Handle<HeapObject> object = instr->hydrogen()->object().handle();
2621  if (isolate()->heap()->InNewSpace(*object)) {
2622  UseScratchRegisterScope temps(masm());
2623  Register temp = temps.AcquireX();
2624  Handle<Cell> cell = isolate()->factory()->NewCell(object);
2625  __ Mov(temp, Operand(Handle<Object>(cell)));
2626  __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
2627  __ Cmp(reg, temp);
2628  } else {
2629  __ Cmp(reg, Operand(object));
2630  }
2631  DeoptimizeIf(ne, instr, "value mismatch");
2632 }
2633 
2634 
2635 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
2636  last_lazy_deopt_pc_ = masm()->pc_offset();
2637  DCHECK(instr->HasEnvironment());
2638  LEnvironment* env = instr->environment();
2639  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
2640  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2641 }
2642 
2643 
2644 void LCodeGen::DoDateField(LDateField* instr) {
2645  Register object = ToRegister(instr->date());
2646  Register result = ToRegister(instr->result());
2647  Register temp1 = x10;
2648  Register temp2 = x11;
2649  Smi* index = instr->index();
2650  Label runtime, done;
2651 
2652  DCHECK(object.is(result) && object.Is(x0));
2653  DCHECK(instr->IsMarkedAsCall());
2654 
2655  DeoptimizeIfSmi(object, instr, "Smi");
2656  __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
2657  DeoptimizeIf(ne, instr, "not a date object");
2658 
2659  if (index->value() == 0) {
2660  __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
2661  } else {
2662  if (index->value() < JSDate::kFirstUncachedField) {
2663  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2664  __ Mov(temp1, Operand(stamp));
2665  __ Ldr(temp1, MemOperand(temp1));
2666  __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
2667  __ Cmp(temp1, temp2);
2668  __ B(ne, &runtime);
2669  __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
2670  kPointerSize * index->value()));
2671  __ B(&done);
2672  }
2673 
2674  __ Bind(&runtime);
2675  __ Mov(x1, Operand(index));
2676  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2677  }
2678 
2679  __ Bind(&done);
2680 }
2681 
2682 
2683 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
2684  Deoptimizer::BailoutType type = instr->hydrogen()->type();
2685  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
2686  // needed return address), even though the implementation of LAZY and EAGER is
2687  // now identical. When LAZY is eventually completely folded into EAGER, remove
2688  // the special case below.
2689  if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
2690  type = Deoptimizer::LAZY;
2691  }
2692 
2693  Deoptimize(instr, instr->hydrogen()->reason(), &type);
2694 }
2695 
2696 
2697 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
2698  Register dividend = ToRegister32(instr->dividend());
2699  int32_t divisor = instr->divisor();
2700  Register result = ToRegister32(instr->result());
2701  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
2702  DCHECK(!result.is(dividend));
2703 
2704  // Check for (0 / -x) that will produce negative zero.
2705  HDiv* hdiv = instr->hydrogen();
2706  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2707  DeoptimizeIfZero(dividend, instr, "division by zero");
2708  }
2709  // Check for (kMinInt / -1).
2710  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
2711  // Test dividend for kMinInt by subtracting one (cmp) and checking for
2712  // overflow.
2713  __ Cmp(dividend, 1);
2714  DeoptimizeIf(vs, instr, "overflow");
2715  }
2716  // Deoptimize if remainder will not be 0.
2717  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
2718  divisor != 1 && divisor != -1) {
2719  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
2720  __ Tst(dividend, mask);
2721  DeoptimizeIf(ne, instr, "lost precision");
2722  }
2723 
2724  if (divisor == -1) { // Nice shortcut, not needed for correctness.
2725  __ Neg(result, dividend);
2726  return;
2727  }
2728  int32_t shift = WhichPowerOf2Abs(divisor);
2729  if (shift == 0) {
2730  __ Mov(result, dividend);
2731  } else if (shift == 1) {
2732  __ Add(result, dividend, Operand(dividend, LSR, 31));
2733  } else {
2734  __ Mov(result, Operand(dividend, ASR, 31));
2735  __ Add(result, dividend, Operand(result, LSR, 32 - shift));
2736  }
2737  if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
2738  if (divisor < 0) __ Neg(result, result);
2739 }
2740 
2741 
2742 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
2743  Register dividend = ToRegister32(instr->dividend());
2744  int32_t divisor = instr->divisor();
2745  Register result = ToRegister32(instr->result());
2746  DCHECK(!AreAliased(dividend, result));
2747 
2748  if (divisor == 0) {
2749  Deoptimize(instr, "division by zero");
2750  return;
2751  }
2752 
2753  // Check for (0 / -x) that will produce negative zero.
2754  HDiv* hdiv = instr->hydrogen();
2755  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2756  DeoptimizeIfZero(dividend, instr, "minus zero");
2757  }
2758 
2759  __ TruncatingDiv(result, dividend, Abs(divisor));
2760  if (divisor < 0) __ Neg(result, result);
2761 
2762  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
2763  Register temp = ToRegister32(instr->temp());
2764  DCHECK(!AreAliased(dividend, result, temp));
2765  __ Sxtw(dividend.X(), dividend);
2766  __ Mov(temp, divisor);
2767  __ Smsubl(temp.X(), result, temp, dividend.X());
2768  DeoptimizeIfNotZero(temp, instr, "lost precision");
2769  }
2770 }
2771 
2772 
2773 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
2774 void LCodeGen::DoDivI(LDivI* instr) {
2775  HBinaryOperation* hdiv = instr->hydrogen();
2776  Register dividend = ToRegister32(instr->dividend());
2777  Register divisor = ToRegister32(instr->divisor());
2778  Register result = ToRegister32(instr->result());
2779 
2780  // Issue the division first, and then check for any deopt cases whilst the
2781  // result is computed.
2782  __ Sdiv(result, dividend, divisor);
2783 
2784  if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
2785  DCHECK_EQ(NULL, instr->temp());
2786  return;
2787  }
2788 
2789  // Check for x / 0.
2790  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
2791  DeoptimizeIfZero(divisor, instr, "division by zero");
2792  }
2793 
2794  // Check for (0 / -x) as that will produce negative zero.
2795  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
2796  __ Cmp(divisor, 0);
2797 
2798  // If the divisor < 0 (mi), compare the dividend, and deopt if it is
2799  // zero, ie. zero dividend with negative divisor deopts.
2800  // If the divisor >= 0 (pl, the opposite of mi) set the flags to
2801  // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
2802  __ Ccmp(dividend, 0, NoFlag, mi);
2803  DeoptimizeIf(eq, instr, "minus zero");
2804  }
2805 
2806  // Check for (kMinInt / -1).
2807  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
2808  // Test dividend for kMinInt by subtracting one (cmp) and checking for
2809  // overflow.
2810  __ Cmp(dividend, 1);
2811  // If overflow is set, ie. dividend = kMinInt, compare the divisor with
2812  // -1. If overflow is clear, set the flags for condition ne, as the
2813  // dividend isn't -1, and thus we shouldn't deopt.
2814  __ Ccmp(divisor, -1, NoFlag, vs);
2815  DeoptimizeIf(eq, instr, "overflow");
2816  }
2817 
2818  // Compute remainder and deopt if it's not zero.
2819  Register remainder = ToRegister32(instr->temp());
2820  __ Msub(remainder, result, divisor, dividend);
2821  DeoptimizeIfNotZero(remainder, instr, "lost precision");
2822 }
2823 
2824 
2825 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
2826  DoubleRegister input = ToDoubleRegister(instr->value());
2827  Register result = ToRegister32(instr->result());
2828 
2829  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2830  DeoptimizeIfMinusZero(input, instr, "minus zero");
2831  }
2832 
2833  __ TryRepresentDoubleAsInt32(result, input, double_scratch());
2834  DeoptimizeIf(ne, instr, "lost precision or NaN");
2835 
2836  if (instr->tag_result()) {
2837  __ SmiTag(result.X());
2838  }
2839 }
2840 
2841 
2842 void LCodeGen::DoDrop(LDrop* instr) {
2843  __ Drop(instr->count());
2844 }
2845 
2846 
2847 void LCodeGen::DoDummy(LDummy* instr) {
2848  // Nothing to see here, move on!
2849 }
2850 
2851 
2852 void LCodeGen::DoDummyUse(LDummyUse* instr) {
2853  // Nothing to see here, move on!
2854 }
2855 
2856 
2857 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
2858  DCHECK(ToRegister(instr->context()).is(cp));
2859  // FunctionLiteral instruction is marked as call, we can trash any register.
2860  DCHECK(instr->IsMarkedAsCall());
2861 
2862  // Use the fast case closure allocation code that allocates in new
2863  // space for nested functions that don't need literals cloning.
2864  bool pretenure = instr->hydrogen()->pretenure();
2865  if (!pretenure && instr->hydrogen()->has_no_literals()) {
2866  FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
2867  instr->hydrogen()->kind());
2868  __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2869  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2870  } else {
2871  __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2872  __ Mov(x1, Operand(pretenure ? factory()->true_value()
2873  : factory()->false_value()));
2874  __ Push(cp, x2, x1);
2875  CallRuntime(Runtime::kNewClosure, 3, instr);
2876  }
2877 }
2878 
2879 
2880 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
2881  Register map = ToRegister(instr->map());
2882  Register result = ToRegister(instr->result());
2883  Label load_cache, done;
2884 
2885  __ EnumLengthUntagged(result, map);
2886  __ Cbnz(result, &load_cache);
2887 
2888  __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
2889  __ B(&done);
2890 
2891  __ Bind(&load_cache);
2892  __ LoadInstanceDescriptors(map, result);
2894  __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
2895  DeoptimizeIfZero(result, instr, "no cache");
2896 
2897  __ Bind(&done);
2898 }
2899 
2900 
2901 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
2902  Register object = ToRegister(instr->object());
2903  Register null_value = x5;
2904 
2905  DCHECK(instr->IsMarkedAsCall());
2906  DCHECK(object.Is(x0));
2907 
2908  DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr, "undefined");
2909 
2910  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2911  __ Cmp(object, null_value);
2912  DeoptimizeIf(eq, instr, "null");
2913 
2914  DeoptimizeIfSmi(object, instr, "Smi");
2915 
2917  __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
2918  DeoptimizeIf(le, instr, "not a JavaScript object");
2919 
2920  Label use_cache, call_runtime;
2921  __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
2922 
2923  __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2924  __ B(&use_cache);
2925 
2926  // Get the set of properties to enumerate.
2927  __ Bind(&call_runtime);
2928  __ Push(object);
2929  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
2930 
2931  __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
2932  DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr, "wrong map");
2933 
2934  __ Bind(&use_cache);
2935 }
2936 
2937 
2938 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2939  Register input = ToRegister(instr->value());
2940  Register result = ToRegister(instr->result());
2941 
2942  __ AssertString(input);
2943 
2944  // Assert that we can use a W register load to get the hash.
2946  __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
2947  __ IndexFromHash(result, result);
2948 }
2949 
2950 
2951 void LCodeGen::EmitGoto(int block) {
2952  // Do not emit jump if we are emitting a goto to the next block.
2953  if (!IsNextEmittedBlock(block)) {
2954  __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
2955  }
2956 }
2957 
2958 
2959 void LCodeGen::DoGoto(LGoto* instr) {
2960  EmitGoto(instr->block_id());
2961 }
2962 
2963 
2964 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2965  LHasCachedArrayIndexAndBranch* instr) {
2966  Register input = ToRegister(instr->value());
2967  Register temp = ToRegister32(instr->temp());
2968 
2969  // Assert that the cache status bits fit in a W register.
2971  __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
2973  EmitBranch(instr, eq);
2974 }
2975 
2976 
2977 // HHasInstanceTypeAndBranch instruction is built with an interval of type
2978 // to test but is only used in very restricted ways. The only possible kinds
2979 // of intervals are:
2980 // - [ FIRST_TYPE, instr->to() ]
2981 // - [ instr->form(), LAST_TYPE ]
2982 // - instr->from() == instr->to()
2983 //
2984 // These kinds of intervals can be check with only one compare instruction
2985 // providing the correct value and test condition are used.
2986 //
2987 // TestType() will return the value to use in the compare instruction and
2988 // BranchCondition() will return the condition to use depending on the kind
2989 // of interval actually specified in the instruction.
2990 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2991  InstanceType from = instr->from();
2992  InstanceType to = instr->to();
2993  if (from == FIRST_TYPE) return to;
2994  DCHECK((from == to) || (to == LAST_TYPE));
2995  return from;
2996 }
2997 
2998 
2999 // See comment above TestType function for what this function does.
3000 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
3001  InstanceType from = instr->from();
3002  InstanceType to = instr->to();
3003  if (from == to) return eq;
3004  if (to == LAST_TYPE) return hs;
3005  if (from == FIRST_TYPE) return ls;
3006  UNREACHABLE();
3007  return eq;
3008 }
3009 
3010 
3011 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
3012  Register input = ToRegister(instr->value());
3013  Register scratch = ToRegister(instr->temp());
3014 
3015  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
3016  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3017  }
3018  __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
3019  EmitBranch(instr, BranchCondition(instr->hydrogen()));
3020 }
3021 
3022 
3023 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3024  Register result = ToRegister(instr->result());
3025  Register base = ToRegister(instr->base_object());
3026  if (instr->offset()->IsConstantOperand()) {
3027  __ Add(result, base, ToOperand32(instr->offset()));
3028  } else {
3029  __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
3030  }
3031 }
3032 
3033 
3034 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
3035  DCHECK(ToRegister(instr->context()).is(cp));
3036  // Assert that the arguments are in the registers expected by InstanceofStub.
3037  DCHECK(ToRegister(instr->left()).Is(InstanceofStub::left()));
3038  DCHECK(ToRegister(instr->right()).Is(InstanceofStub::right()));
3039 
3040  InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
3041  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3042 
3043  // InstanceofStub returns a result in x0:
3044  // 0 => not an instance
3045  // smi 1 => instance.
3046  __ Cmp(x0, 0);
3047  __ LoadTrueFalseRoots(x0, x1);
3048  __ Csel(x0, x0, x1, eq);
3049 }
3050 
3051 
3052 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3053  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
3054  public:
3055  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
3056  LInstanceOfKnownGlobal* instr)
3057  : LDeferredCode(codegen), instr_(instr) { }
3058  virtual void Generate() {
3059  codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
3060  }
3061  virtual LInstruction* instr() { return instr_; }
3062  private:
3063  LInstanceOfKnownGlobal* instr_;
3064  };
3065 
3066  DeferredInstanceOfKnownGlobal* deferred =
3067  new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
3068 
3069  Label map_check, return_false, cache_miss, done;
3070  Register object = ToRegister(instr->value());
3071  Register result = ToRegister(instr->result());
3072  // x4 is expected in the associated deferred code and stub.
3073  Register map_check_site = x4;
3074  Register map = x5;
3075 
3076  // This instruction is marked as call. We can clobber any register.
3077  DCHECK(instr->IsMarkedAsCall());
3078 
3079  // We must take into account that object is in x11.
3080  DCHECK(object.Is(x11));
3081  Register scratch = x10;
3082 
3083  // A Smi is not instance of anything.
3084  __ JumpIfSmi(object, &return_false);
3085 
3086  // This is the inlined call site instanceof cache. The two occurences of the
3087  // hole value will be patched to the last map/result pair generated by the
3088  // instanceof stub.
3090  {
3091  // Below we use Factory::the_hole_value() on purpose instead of loading from
3092  // the root array to force relocation and later be able to patch with a
3093  // custom value.
3094  InstructionAccurateScope scope(masm(), 5);
3095  __ bind(&map_check);
3096  // Will be patched with the cached map.
3097  Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
3098  __ ldr(scratch, Immediate(Handle<Object>(cell)));
3099  __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
3100  __ cmp(map, scratch);
3101  __ b(&cache_miss, ne);
3102  // The address of this instruction is computed relative to the map check
3103  // above, so check the size of the code generated.
3104  DCHECK(masm()->InstructionsGeneratedSince(&map_check) == 4);
3105  // Will be patched with the cached result.
3106  __ ldr(result, Immediate(factory()->the_hole_value()));
3107  }
3108  __ B(&done);
3109 
3110  // The inlined call site cache did not match.
3111  // Check null and string before calling the deferred code.
3112  __ Bind(&cache_miss);
3113  // Compute the address of the map check. It must not be clobbered until the
3114  // InstanceOfStub has used it.
3115  __ Adr(map_check_site, &map_check);
3116  // Null is not instance of anything.
3117  __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
3118 
3119  // String values are not instances of anything.
3120  // Return false if the object is a string. Otherwise, jump to the deferred
3121  // code.
3122  // Note that we can't jump directly to deferred code from
3123  // IsObjectJSStringType, because it uses tbz for the jump and the deferred
3124  // code can be out of range.
3125  __ IsObjectJSStringType(object, scratch, NULL, &return_false);
3126  __ B(deferred->entry());
3127 
3128  __ Bind(&return_false);
3129  __ LoadRoot(result, Heap::kFalseValueRootIndex);
3130 
3131  // Here result is either true or false.
3132  __ Bind(deferred->exit());
3133  __ Bind(&done);
3134 }
3135 
3136 
3137 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3138  Register result = ToRegister(instr->result());
3139  DCHECK(result.Is(x0)); // InstanceofStub returns its result in x0.
3141  flags = static_cast<InstanceofStub::Flags>(
3143  flags = static_cast<InstanceofStub::Flags>(
3145  flags = static_cast<InstanceofStub::Flags>(
3147 
3148  PushSafepointRegistersScope scope(this);
3149  LoadContextFromDeferred(instr->context());
3150 
3151  // Prepare InstanceofStub arguments.
3152  DCHECK(ToRegister(instr->value()).Is(InstanceofStub::left()));
3153  __ LoadObject(InstanceofStub::right(), instr->function());
3154 
3155  InstanceofStub stub(isolate(), flags);
3156  CallCodeGeneric(stub.GetCode(),
3158  instr,
3160  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3161  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3162 
3163  // Put the result value into the result register slot.
3164  __ StoreToSafepointRegisterSlot(result, result);
3165 }
3166 
3167 
3168 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
3169  DoGap(instr);
3170 }
3171 
3172 
3173 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3174  Register value = ToRegister32(instr->value());
3175  DoubleRegister result = ToDoubleRegister(instr->result());
3176  __ Scvtf(result, value);
3177 }
3178 
3179 
3180 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3181  DCHECK(ToRegister(instr->context()).is(cp));
3182  // The function is required to be in x1.
3183  DCHECK(ToRegister(instr->function()).is(x1));
3184  DCHECK(instr->HasPointerMap());
3185 
3186  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3187  if (known_function.is_null()) {
3188  LPointerMap* pointers = instr->pointer_map();
3189  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3190  ParameterCount count(instr->arity());
3191  __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
3192  } else {
3193  CallKnownFunction(known_function,
3194  instr->hydrogen()->formal_parameter_count(),
3195  instr->arity(),
3196  instr,
3197  x1);
3198  }
3199  after_push_argument_ = false;
3200 }
3201 
3202 
3203 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
3204  Register temp1 = ToRegister(instr->temp1());
3205  Register temp2 = ToRegister(instr->temp2());
3206 
3207  // Get the frame pointer for the calling frame.
3209 
3210  // Skip the arguments adaptor frame if it exists.
3211  Label check_frame_marker;
3214  __ B(ne, &check_frame_marker);
3216 
3217  // Check the marker in the calling frame.
3218  __ Bind(&check_frame_marker);
3220 
3222  instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3223 }
3224 
3225 
3226 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
3227  Label* is_object = instr->TrueLabel(chunk_);
3228  Label* is_not_object = instr->FalseLabel(chunk_);
3229  Register value = ToRegister(instr->value());
3230  Register map = ToRegister(instr->temp1());
3231  Register scratch = ToRegister(instr->temp2());
3232 
3233  __ JumpIfSmi(value, is_not_object);
3234  __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
3235 
3237 
3238  // Check for undetectable objects.
3239  __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
3240  __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
3241 
3242  // Check that instance type is in object type range.
3243  __ IsInstanceJSObjectType(map, scratch, NULL);
3244  // Flags have been updated by IsInstanceJSObjectType. We can now test the
3245  // flags for "le" condition to check if the object's type is a valid
3246  // JS object type.
3247  EmitBranch(instr, le);
3248 }
3249 
3250 
3251 Condition LCodeGen::EmitIsString(Register input,
3252  Register temp1,
3253  Label* is_not_string,
3254  SmiCheck check_needed = INLINE_SMI_CHECK) {
3255  if (check_needed == INLINE_SMI_CHECK) {
3256  __ JumpIfSmi(input, is_not_string);
3257  }
3258  __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
3259 
3260  return lt;
3261 }
3262 
3263 
3264 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
3265  Register val = ToRegister(instr->value());
3266  Register scratch = ToRegister(instr->temp());
3267 
3268  SmiCheck check_needed =
3269  instr->hydrogen()->value()->type().IsHeapObject()
3271  Condition true_cond =
3272  EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
3273 
3274  EmitBranch(instr, true_cond);
3275 }
3276 
3277 
3278 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
3279  Register value = ToRegister(instr->value());
3280  STATIC_ASSERT(kSmiTag == 0);
3281  EmitTestAndBranch(instr, eq, value, kSmiTagMask);
3282 }
3283 
3284 
3285 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
3286  Register input = ToRegister(instr->value());
3287  Register temp = ToRegister(instr->temp());
3288 
3289  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
3290  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3291  }
3292  __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
3293  __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3294 
3295  EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
3296 }
3297 
3298 
3299 static const char* LabelType(LLabel* label) {
3300  if (label->is_loop_header()) return " (loop header)";
3301  if (label->is_osr_entry()) return " (OSR entry)";
3302  return "";
3303 }
3304 
3305 
3306 void LCodeGen::DoLabel(LLabel* label) {
3307  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
3308  current_instruction_,
3309  label->hydrogen_value()->id(),
3310  label->block_id(),
3311  LabelType(label));
3312 
3313  __ Bind(label->label());
3314  current_block_ = label->block_id();
3315  DoGap(label);
3316 }
3317 
3318 
3319 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3320  Register context = ToRegister(instr->context());
3321  Register result = ToRegister(instr->result());
3322  __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
3323  if (instr->hydrogen()->RequiresHoleCheck()) {
3324  if (instr->hydrogen()->DeoptimizesOnHole()) {
3325  DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
3326  } else {
3327  Label not_the_hole;
3328  __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
3329  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3330  __ Bind(&not_the_hole);
3331  }
3332  }
3333 }
3334 
3335 
3336 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3337  Register function = ToRegister(instr->function());
3338  Register result = ToRegister(instr->result());
3339  Register temp = ToRegister(instr->temp());
3340 
3341  // Get the prototype or initial map from the function.
3342  __ Ldr(result, FieldMemOperand(function,
3344 
3345  // Check that the function has a prototype or an initial map.
3346  DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
3347 
3348  // If the function does not have an initial map, we're done.
3349  Label done;
3350  __ CompareObjectType(result, temp, temp, MAP_TYPE);
3351  __ B(ne, &done);
3352 
3353  // Get the prototype from the initial map.
3354  __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3355 
3356  // All done.
3357  __ Bind(&done);
3358 }
3359 
3360 
3361 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3362  Register result = ToRegister(instr->result());
3363  __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
3364  __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
3365  if (instr->hydrogen()->RequiresHoleCheck()) {
3366  DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
3367  }
3368 }
3369 
3370 
3371 template <class T>
3373  DCHECK(FLAG_vector_ics);
3374  Register vector = ToRegister(instr->temp_vector());
3376  __ Mov(vector, instr->hydrogen()->feedback_vector());
3377  // No need to allocate this register.
3380  Smi::FromInt(instr->hydrogen()->slot()));
3381 }
3382 
3383 
3384 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3385  DCHECK(ToRegister(instr->context()).is(cp));
3386  DCHECK(ToRegister(instr->global_object())
3388  DCHECK(ToRegister(instr->result()).Is(x0));
3389  __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3390  if (FLAG_vector_ics) {
3391  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
3392  }
3393  ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3394  Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
3395  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3396 }
3397 
3398 
3400  Register key,
3401  Register base,
3402  Register scratch,
3403  bool key_is_smi,
3404  bool key_is_constant,
3405  int constant_key,
3406  ElementsKind elements_kind,
3407  int base_offset) {
3408  int element_size_shift = ElementsKindToShiftSize(elements_kind);
3409 
3410  if (key_is_constant) {
3411  int key_offset = constant_key << element_size_shift;
3412  return MemOperand(base, key_offset + base_offset);
3413  }
3414 
3415  if (key_is_smi) {
3416  __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
3417  return MemOperand(scratch, base_offset);
3418  }
3419 
3420  if (base_offset == 0) {
3421  return MemOperand(base, key, SXTW, element_size_shift);
3422  }
3423 
3424  DCHECK(!AreAliased(scratch, key));
3425  __ Add(scratch, base, base_offset);
3426  return MemOperand(scratch, key, SXTW, element_size_shift);
3427 }
3428 
3429 
3430 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
3431  Register ext_ptr = ToRegister(instr->elements());
3432  Register scratch;
3433  ElementsKind elements_kind = instr->elements_kind();
3434 
3435  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3436  bool key_is_constant = instr->key()->IsConstantOperand();
3437  Register key = no_reg;
3438  int constant_key = 0;
3439  if (key_is_constant) {
3440  DCHECK(instr->temp() == NULL);
3441  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3442  if (constant_key & 0xf0000000) {
3443  Abort(kArrayIndexConstantValueTooBig);
3444  }
3445  } else {
3446  scratch = ToRegister(instr->temp());
3447  key = ToRegister(instr->key());
3448  }
3449 
3450  MemOperand mem_op =
3451  PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
3452  key_is_constant, constant_key,
3453  elements_kind,
3454  instr->base_offset());
3455 
3456  if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
3457  (elements_kind == FLOAT32_ELEMENTS)) {
3458  DoubleRegister result = ToDoubleRegister(instr->result());
3459  __ Ldr(result.S(), mem_op);
3460  __ Fcvt(result, result.S());
3461  } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
3462  (elements_kind == FLOAT64_ELEMENTS)) {
3463  DoubleRegister result = ToDoubleRegister(instr->result());
3464  __ Ldr(result, mem_op);
3465  } else {
3466  Register result = ToRegister(instr->result());
3467 
3468  switch (elements_kind) {
3470  case INT8_ELEMENTS:
3471  __ Ldrsb(result, mem_op);
3472  break;
3475  case UINT8_ELEMENTS:
3477  __ Ldrb(result, mem_op);
3478  break;
3480  case INT16_ELEMENTS:
3481  __ Ldrsh(result, mem_op);
3482  break;
3484  case UINT16_ELEMENTS:
3485  __ Ldrh(result, mem_op);
3486  break;
3488  case INT32_ELEMENTS:
3489  __ Ldrsw(result, mem_op);
3490  break;
3492  case UINT32_ELEMENTS:
3493  __ Ldr(result.W(), mem_op);
3494  if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3495  // Deopt if value > 0x80000000.
3496  __ Tst(result, 0xFFFFFFFF80000000);
3497  DeoptimizeIf(ne, instr, "negative value");
3498  }
3499  break;
3500  case FLOAT32_ELEMENTS:
3501  case FLOAT64_ELEMENTS:
3505  case FAST_HOLEY_ELEMENTS:
3507  case FAST_DOUBLE_ELEMENTS:
3508  case FAST_ELEMENTS:
3509  case FAST_SMI_ELEMENTS:
3510  case DICTIONARY_ELEMENTS:
3512  UNREACHABLE();
3513  break;
3514  }
3515  }
3516 }
3517 
3518 
3520  Register elements,
3521  Register key,
3522  bool key_is_tagged,
3523  ElementsKind elements_kind,
3524  Representation representation,
3525  int base_offset) {
3526  STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3527  STATIC_ASSERT(kSmiTag == 0);
3528  int element_size_shift = ElementsKindToShiftSize(elements_kind);
3529 
3530  // Even though the HLoad/StoreKeyed instructions force the input
3531  // representation for the key to be an integer, the input gets replaced during
3532  // bounds check elimination with the index argument to the bounds check, which
3533  // can be tagged, so that case must be handled here, too.
3534  if (key_is_tagged) {
3535  __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
3536  if (representation.IsInteger32()) {
3537  DCHECK(elements_kind == FAST_SMI_ELEMENTS);
3538  // Read or write only the smi payload in the case of fast smi arrays.
3539  return UntagSmiMemOperand(base, base_offset);
3540  } else {
3541  return MemOperand(base, base_offset);
3542  }
3543  } else {
3544  // Sign extend key because it could be a 32-bit negative value or contain
3545  // garbage in the top 32-bits. The address computation happens in 64-bit.
3546  DCHECK((element_size_shift >= 0) && (element_size_shift <= 4));
3547  if (representation.IsInteger32()) {
3548  DCHECK(elements_kind == FAST_SMI_ELEMENTS);
3549  // Read or write only the smi payload in the case of fast smi arrays.
3550  __ Add(base, elements, Operand(key, SXTW, element_size_shift));
3551  return UntagSmiMemOperand(base, base_offset);
3552  } else {
3553  __ Add(base, elements, base_offset);
3554  return MemOperand(base, key, SXTW, element_size_shift);
3555  }
3556  }
3557 }
3558 
3559 
3560 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
3561  Register elements = ToRegister(instr->elements());
3562  DoubleRegister result = ToDoubleRegister(instr->result());
3563  MemOperand mem_op;
3564 
3565  if (instr->key()->IsConstantOperand()) {
3566  DCHECK(instr->hydrogen()->RequiresHoleCheck() ||
3567  (instr->temp() == NULL));
3568 
3569  int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3570  if (constant_key & 0xf0000000) {
3571  Abort(kArrayIndexConstantValueTooBig);
3572  }
3573  int offset = instr->base_offset() + constant_key * kDoubleSize;
3574  mem_op = MemOperand(elements, offset);
3575  } else {
3576  Register load_base = ToRegister(instr->temp());
3577  Register key = ToRegister(instr->key());
3578  bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3579  mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3580  instr->hydrogen()->elements_kind(),
3581  instr->hydrogen()->representation(),
3582  instr->base_offset());
3583  }
3584 
3585  __ Ldr(result, mem_op);
3586 
3587  if (instr->hydrogen()->RequiresHoleCheck()) {
3588  Register scratch = ToRegister(instr->temp());
3589  // Detect the hole NaN by adding one to the integer representation of the
3590  // result, and checking for overflow.
3591  STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff);
3592  __ Ldr(scratch, mem_op);
3593  __ Cmn(scratch, 1);
3594  DeoptimizeIf(vs, instr, "hole");
3595  }
3596 }
3597 
3598 
3599 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
3600  Register elements = ToRegister(instr->elements());
3601  Register result = ToRegister(instr->result());
3602  MemOperand mem_op;
3603 
3604  Representation representation = instr->hydrogen()->representation();
3605  if (instr->key()->IsConstantOperand()) {
3606  DCHECK(instr->temp() == NULL);
3607  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3608  int offset = instr->base_offset() +
3609  ToInteger32(const_operand) * kPointerSize;
3610  if (representation.IsInteger32()) {
3611  DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
3612  STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3613  STATIC_ASSERT(kSmiTag == 0);
3614  mem_op = UntagSmiMemOperand(elements, offset);
3615  } else {
3616  mem_op = MemOperand(elements, offset);
3617  }
3618  } else {
3619  Register load_base = ToRegister(instr->temp());
3620  Register key = ToRegister(instr->key());
3621  bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3622 
3623  mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
3624  instr->hydrogen()->elements_kind(),
3625  representation, instr->base_offset());
3626  }
3627 
3628  __ Load(result, mem_op, representation);
3629 
3630  if (instr->hydrogen()->RequiresHoleCheck()) {
3631  if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3632  DeoptimizeIfNotSmi(result, instr, "not a Smi");
3633  } else {
3634  DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
3635  }
3636  }
3637 }
3638 
3639 
3640 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3641  DCHECK(ToRegister(instr->context()).is(cp));
3644  if (FLAG_vector_ics) {
3645  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3646  }
3647 
3648  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
3649  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3650 
3651  DCHECK(ToRegister(instr->result()).Is(x0));
3652 }
3653 
3654 
3655 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3656  HObjectAccess access = instr->hydrogen()->access();
3657  int offset = access.offset();
3658  Register object = ToRegister(instr->object());
3659 
3660  if (access.IsExternalMemory()) {
3661  Register result = ToRegister(instr->result());
3662  __ Load(result, MemOperand(object, offset), access.representation());
3663  return;
3664  }
3665 
3666  if (instr->hydrogen()->representation().IsDouble()) {
3667  FPRegister result = ToDoubleRegister(instr->result());
3668  __ Ldr(result, FieldMemOperand(object, offset));
3669  return;
3670  }
3671 
3672  Register result = ToRegister(instr->result());
3673  Register source;
3674  if (access.IsInobject()) {
3675  source = object;
3676  } else {
3677  // Load the properties array, using result as a scratch register.
3678  __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3679  source = result;
3680  }
3681 
3682  if (access.representation().IsSmi() &&
3683  instr->hydrogen()->representation().IsInteger32()) {
3684  // Read int value directly from upper half of the smi.
3685  STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3686  STATIC_ASSERT(kSmiTag == 0);
3687  __ Load(result, UntagSmiFieldMemOperand(source, offset),
3689  } else {
3690  __ Load(result, FieldMemOperand(source, offset), access.representation());
3691  }
3692 }
3693 
3694 
3695 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3696  DCHECK(ToRegister(instr->context()).is(cp));
3697  // LoadIC expects name and receiver in registers.
3699  __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3700  if (FLAG_vector_ics) {
3701  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3702  }
3703 
3704  Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
3705  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3706 
3707  DCHECK(ToRegister(instr->result()).is(x0));
3708 }
3709 
3710 
3711 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3712  Register result = ToRegister(instr->result());
3713  __ LoadRoot(result, instr->index());
3714 }
3715 
3716 
3717 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
3718  Register result = ToRegister(instr->result());
3719  Register map = ToRegister(instr->value());
3720  __ EnumLengthSmi(result, map);
3721 }
3722 
3723 
3724 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3725  Representation r = instr->hydrogen()->value()->representation();
3726  if (r.IsDouble()) {
3727  DoubleRegister input = ToDoubleRegister(instr->value());
3728  DoubleRegister result = ToDoubleRegister(instr->result());
3729  __ Fabs(result, input);
3730  } else if (r.IsSmi() || r.IsInteger32()) {
3731  Register input = r.IsSmi() ? ToRegister(instr->value())
3732  : ToRegister32(instr->value());
3733  Register result = r.IsSmi() ? ToRegister(instr->result())
3734  : ToRegister32(instr->result());
3735  __ Abs(result, input);
3736  DeoptimizeIf(vs, instr, "overflow");
3737  }
3738 }
3739 
3740 
3742  Label* exit,
3743  Label* allocation_entry) {
3744  // Handle the tricky cases of MathAbsTagged:
3745  // - HeapNumber inputs.
3746  // - Negative inputs produce a positive result, so a new HeapNumber is
3747  // allocated to hold it.
3748  // - Positive inputs are returned as-is, since there is no need to allocate
3749  // a new HeapNumber for the result.
3750  // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
3751  // a smi. In this case, the inline code sets the result and jumps directly
3752  // to the allocation_entry label.
3753  DCHECK(instr->context() != NULL);
3754  DCHECK(ToRegister(instr->context()).is(cp));
3755  Register input = ToRegister(instr->value());
3756  Register temp1 = ToRegister(instr->temp1());
3757  Register temp2 = ToRegister(instr->temp2());
3758  Register result_bits = ToRegister(instr->temp3());
3759  Register result = ToRegister(instr->result());
3760 
3761  Label runtime_allocation;
3762 
3763  // Deoptimize if the input is not a HeapNumber.
3764  DeoptimizeIfNotHeapNumber(input, instr);
3765 
3766  // If the argument is positive, we can return it as-is, without any need to
3767  // allocate a new HeapNumber for the result. We have to do this in integer
3768  // registers (rather than with fabs) because we need to be able to distinguish
3769  // the two zeroes.
3770  __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
3771  __ Mov(result, input);
3772  __ Tbz(result_bits, kXSignBit, exit);
3773 
3774  // Calculate abs(input) by clearing the sign bit.
3775  __ Bic(result_bits, result_bits, kXSignMask);
3776 
3777  // Allocate a new HeapNumber to hold the result.
3778  // result_bits The bit representation of the (double) result.
3779  __ Bind(allocation_entry);
3780  __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
3781  // The inline (non-deferred) code will store result_bits into result.
3782  __ B(exit);
3783 
3784  __ Bind(&runtime_allocation);
3785  if (FLAG_debug_code) {
3786  // Because result is in the pointer map, we need to make sure it has a valid
3787  // tagged value before we call the runtime. We speculatively set it to the
3788  // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
3789  // be valid.
3790  Label result_ok;
3791  Register input = ToRegister(instr->value());
3792  __ JumpIfSmi(result, &result_ok);
3793  __ Cmp(input, result);
3794  __ Assert(eq, kUnexpectedValue);
3795  __ Bind(&result_ok);
3796  }
3797 
3798  { PushSafepointRegistersScope scope(this);
3799  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3800  instr->context());
3801  __ StoreToSafepointRegisterSlot(x0, result);
3802  }
3803  // The inline (non-deferred) code will store result_bits into result.
3804 }
3805 
3806 
3807 void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
3808  // Class for deferred case.
3809  class DeferredMathAbsTagged: public LDeferredCode {
3810  public:
3811  DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
3812  : LDeferredCode(codegen), instr_(instr) { }
3813  virtual void Generate() {
3814  codegen()->DoDeferredMathAbsTagged(instr_, exit(),
3815  allocation_entry());
3816  }
3817  virtual LInstruction* instr() { return instr_; }
3818  Label* allocation_entry() { return &allocation; }
3819  private:
3820  LMathAbsTagged* instr_;
3821  Label allocation;
3822  };
3823 
3824  // TODO(jbramley): The early-exit mechanism would skip the new frame handling
3825  // in GenerateDeferredCode. Tidy this up.
3827 
3828  DeferredMathAbsTagged* deferred =
3829  new(zone()) DeferredMathAbsTagged(this, instr);
3830 
3831  DCHECK(instr->hydrogen()->value()->representation().IsTagged() ||
3832  instr->hydrogen()->value()->representation().IsSmi());
3833  Register input = ToRegister(instr->value());
3834  Register result_bits = ToRegister(instr->temp3());
3835  Register result = ToRegister(instr->result());
3836  Label done;
3837 
3838  // Handle smis inline.
3839  // We can treat smis as 64-bit integers, since the (low-order) tag bits will
3840  // never get set by the negation. This is therefore the same as the Integer32
3841  // case in DoMathAbs, except that it operates on 64-bit values.
3842  STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
3843 
3844  __ JumpIfNotSmi(input, deferred->entry());
3845 
3846  __ Abs(result, input, NULL, &done);
3847 
3848  // The result is the magnitude (abs) of the smallest value a smi can
3849  // represent, encoded as a double.
3850  __ Mov(result_bits, double_to_rawbits(0x80000000));
3851  __ B(deferred->allocation_entry());
3852 
3853  __ Bind(deferred->exit());
3854  __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
3855 
3856  __ Bind(&done);
3857 }
3858 
3859 
3860 void LCodeGen::DoMathExp(LMathExp* instr) {
3861  DoubleRegister input = ToDoubleRegister(instr->value());
3862  DoubleRegister result = ToDoubleRegister(instr->result());
3863  DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
3864  DoubleRegister double_temp2 = double_scratch();
3865  Register temp1 = ToRegister(instr->temp1());
3866  Register temp2 = ToRegister(instr->temp2());
3867  Register temp3 = ToRegister(instr->temp3());
3868 
3869  MathExpGenerator::EmitMathExp(masm(), input, result,
3870  double_temp1, double_temp2,
3871  temp1, temp2, temp3);
3872 }
3873 
3874 
3875 void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
3876  DoubleRegister input = ToDoubleRegister(instr->value());
3877  DoubleRegister result = ToDoubleRegister(instr->result());
3878 
3879  __ Frintm(result, input);
3880 }
3881 
3882 
3883 void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
3884  DoubleRegister input = ToDoubleRegister(instr->value());
3885  Register result = ToRegister(instr->result());
3886 
3887  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3888  DeoptimizeIfMinusZero(input, instr, "minus zero");
3889  }
3890 
3891  __ Fcvtms(result, input);
3892 
3893  // Check that the result fits into a 32-bit integer.
3894  // - The result did not overflow.
3895  __ Cmp(result, Operand(result, SXTW));
3896  // - The input was not NaN.
3897  __ Fccmp(input, input, NoFlag, eq);
3898  DeoptimizeIf(ne, instr, "lost precision or NaN");
3899 }
3900 
3901 
3902 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
3903  Register dividend = ToRegister32(instr->dividend());
3904  Register result = ToRegister32(instr->result());
3905  int32_t divisor = instr->divisor();
3906 
3907  // If the divisor is 1, return the dividend.
3908  if (divisor == 1) {
3909  __ Mov(result, dividend, kDiscardForSameWReg);
3910  return;
3911  }
3912 
3913  // If the divisor is positive, things are easy: There can be no deopts and we
3914  // can simply do an arithmetic right shift.
3915  int32_t shift = WhichPowerOf2Abs(divisor);
3916  if (divisor > 1) {
3917  __ Mov(result, Operand(dividend, ASR, shift));
3918  return;
3919  }
3920 
3921  // If the divisor is negative, we have to negate and handle edge cases.
3922  __ Negs(result, dividend);
3923  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3924  DeoptimizeIf(eq, instr, "minus zero");
3925  }
3926 
3927  // Dividing by -1 is basically negation, unless we overflow.
3928  if (divisor == -1) {
3929  if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3930  DeoptimizeIf(vs, instr, "overflow");
3931  }
3932  return;
3933  }
3934 
3935  // If the negation could not overflow, simply shifting is OK.
3936  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
3937  __ Mov(result, Operand(dividend, ASR, shift));
3938  return;
3939  }
3940 
3941  __ Asr(result, result, shift);
3942  __ Csel(result, result, kMinInt / divisor, vc);
3943 }
3944 
3945 
3946 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
3947  Register dividend = ToRegister32(instr->dividend());
3948  int32_t divisor = instr->divisor();
3949  Register result = ToRegister32(instr->result());
3950  DCHECK(!AreAliased(dividend, result));
3951 
3952  if (divisor == 0) {
3953  Deoptimize(instr, "division by zero");
3954  return;
3955  }
3956 
3957  // Check for (0 / -x) that will produce negative zero.
3958  HMathFloorOfDiv* hdiv = instr->hydrogen();
3959  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
3960  DeoptimizeIfZero(dividend, instr, "minus zero");
3961  }
3962 
3963  // Easy case: We need no dynamic check for the dividend and the flooring
3964  // division is the same as the truncating division.
3965  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
3966  (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
3967  __ TruncatingDiv(result, dividend, Abs(divisor));
3968  if (divisor < 0) __ Neg(result, result);
3969  return;
3970  }
3971 
3972  // In the general case we may need to adjust before and after the truncating
3973  // division to get a flooring division.
3974  Register temp = ToRegister32(instr->temp());
3975  DCHECK(!AreAliased(temp, dividend, result));
3976  Label needs_adjustment, done;
3977  __ Cmp(dividend, 0);
3978  __ B(divisor > 0 ? lt : gt, &needs_adjustment);
3979  __ TruncatingDiv(result, dividend, Abs(divisor));
3980  if (divisor < 0) __ Neg(result, result);
3981  __ B(&done);
3982  __ Bind(&needs_adjustment);
3983  __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
3984  __ TruncatingDiv(result, temp, Abs(divisor));
3985  if (divisor < 0) __ Neg(result, result);
3986  __ Sub(result, result, Operand(1));
3987  __ Bind(&done);
3988 }
3989 
3990 
3991 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
3992 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
3993  Register dividend = ToRegister32(instr->dividend());
3994  Register divisor = ToRegister32(instr->divisor());
3995  Register remainder = ToRegister32(instr->temp());
3996  Register result = ToRegister32(instr->result());
3997 
3998  // This can't cause an exception on ARM, so we can speculatively
3999  // execute it already now.
4000  __ Sdiv(result, dividend, divisor);
4001 
4002  // Check for x / 0.
4003  DeoptimizeIfZero(divisor, instr, "division by zero");
4004 
4005  // Check for (kMinInt / -1).
4006  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
4007  // The V flag will be set iff dividend == kMinInt.
4008  __ Cmp(dividend, 1);
4009  __ Ccmp(divisor, -1, NoFlag, vs);
4010  DeoptimizeIf(eq, instr, "overflow");
4011  }
4012 
4013  // Check for (0 / -x) that will produce negative zero.
4014  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4015  __ Cmp(divisor, 0);
4016  __ Ccmp(dividend, 0, ZFlag, mi);
4017  // "divisor" can't be null because the code would have already been
4018  // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
4019  // In this case we need to deoptimize to produce a -0.
4020  DeoptimizeIf(eq, instr, "minus zero");
4021  }
4022 
4023  Label done;
4024  // If both operands have the same sign then we are done.
4025  __ Eor(remainder, dividend, divisor);
4026  __ Tbz(remainder, kWSignBit, &done);
4027 
4028  // Check if the result needs to be corrected.
4029  __ Msub(remainder, result, divisor, dividend);
4030  __ Cbz(remainder, &done);
4031  __ Sub(result, result, 1);
4032 
4033  __ Bind(&done);
4034 }
4035 
4036 
4037 void LCodeGen::DoMathLog(LMathLog* instr) {
4038  DCHECK(instr->IsMarkedAsCall());
4039  DCHECK(ToDoubleRegister(instr->value()).is(d0));
4040  __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
4041  0, 1);
4042  DCHECK(ToDoubleRegister(instr->result()).Is(d0));
4043 }
4044 
4045 
4046 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4047  Register input = ToRegister32(instr->value());
4048  Register result = ToRegister32(instr->result());
4049  __ Clz(result, input);
4050 }
4051 
4052 
4053 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4054  DoubleRegister input = ToDoubleRegister(instr->value());
4055  DoubleRegister result = ToDoubleRegister(instr->result());
4056  Label done;
4057 
4058  // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
4059  // Math.pow(-Infinity, 0.5) == +Infinity
4060  // Math.pow(-0.0, 0.5) == +0.0
4061 
4062  // Catch -infinity inputs first.
4063  // TODO(jbramley): A constant infinity register would be helpful here.
4064  __ Fmov(double_scratch(), kFP64NegativeInfinity);
4065  __ Fcmp(double_scratch(), input);
4066  __ Fabs(result, input);
4067  __ B(&done, eq);
4068 
4069  // Add +0.0 to convert -0.0 to +0.0.
4070  __ Fadd(double_scratch(), input, fp_zero);
4071  __ Fsqrt(result, double_scratch());
4072 
4073  __ Bind(&done);
4074 }
4075 
4076 
4077 void LCodeGen::DoPower(LPower* instr) {
4078  Representation exponent_type = instr->hydrogen()->right()->representation();
4079  // Having marked this as a call, we can use any registers.
4080  // Just make sure that the input/output registers are the expected ones.
4081  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
4082  Register integer_exponent = MathPowIntegerDescriptor::exponent();
4083  DCHECK(!instr->right()->IsDoubleRegister() ||
4084  ToDoubleRegister(instr->right()).is(d1));
4085  DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
4086  ToRegister(instr->right()).is(tagged_exponent));
4087  DCHECK(!exponent_type.IsInteger32() ||
4088  ToRegister(instr->right()).is(integer_exponent));
4089  DCHECK(ToDoubleRegister(instr->left()).is(d0));
4090  DCHECK(ToDoubleRegister(instr->result()).is(d0));
4091 
4092  if (exponent_type.IsSmi()) {
4093  MathPowStub stub(isolate(), MathPowStub::TAGGED);
4094  __ CallStub(&stub);
4095  } else if (exponent_type.IsTagged()) {
4096  Label no_deopt;
4097  __ JumpIfSmi(tagged_exponent, &no_deopt);
4098  DeoptimizeIfNotHeapNumber(tagged_exponent, instr);
4099  __ Bind(&no_deopt);
4100  MathPowStub stub(isolate(), MathPowStub::TAGGED);
4101  __ CallStub(&stub);
4102  } else if (exponent_type.IsInteger32()) {
4103  // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
4104  // supports large integer exponents.
4105  __ Sxtw(integer_exponent, integer_exponent);
4106  MathPowStub stub(isolate(), MathPowStub::INTEGER);
4107  __ CallStub(&stub);
4108  } else {
4109  DCHECK(exponent_type.IsDouble());
4110  MathPowStub stub(isolate(), MathPowStub::DOUBLE);
4111  __ CallStub(&stub);
4112  }
4113 }
4114 
4115 
4116 void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
4117  DoubleRegister input = ToDoubleRegister(instr->value());
4118  DoubleRegister result = ToDoubleRegister(instr->result());
4119  DoubleRegister scratch_d = double_scratch();
4120 
4121  DCHECK(!AreAliased(input, result, scratch_d));
4122 
4123  Label done;
4124 
4125  __ Frinta(result, input);
4126  __ Fcmp(input, 0.0);
4127  __ Fccmp(result, input, ZFlag, lt);
4128  // The result is correct if the input was in [-0, +infinity], or was a
4129  // negative integral value.
4130  __ B(eq, &done);
4131 
4132  // Here the input is negative, non integral, with an exponent lower than 52.
4133  // We do not have to worry about the 0.49999999999999994 (0x3fdfffffffffffff)
4134  // case. So we can safely add 0.5.
4135  __ Fmov(scratch_d, 0.5);
4136  __ Fadd(result, input, scratch_d);
4137  __ Frintm(result, result);
4138  // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
4139  __ Fabs(result, result);
4140  __ Fneg(result, result);
4141 
4142  __ Bind(&done);
4143 }
4144 
4145 
4146 void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
4147  DoubleRegister input = ToDoubleRegister(instr->value());
4148  DoubleRegister temp = ToDoubleRegister(instr->temp1());
4149  DoubleRegister dot_five = double_scratch();
4150  Register result = ToRegister(instr->result());
4151  Label done;
4152 
4153  // Math.round() rounds to the nearest integer, with ties going towards
4154  // +infinity. This does not match any IEEE-754 rounding mode.
4155  // - Infinities and NaNs are propagated unchanged, but cause deopts because
4156  // they can't be represented as integers.
4157  // - The sign of the result is the same as the sign of the input. This means
4158  // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
4159  // result of -0.0.
4160 
4161  // Add 0.5 and round towards -infinity.
4162  __ Fmov(dot_five, 0.5);
4163  __ Fadd(temp, input, dot_five);
4164  __ Fcvtms(result, temp);
4165 
4166  // The result is correct if:
4167  // result is not 0, as the input could be NaN or [-0.5, -0.0].
4168  // result is not 1, as 0.499...94 will wrongly map to 1.
4169  // result fits in 32 bits.
4170  __ Cmp(result, Operand(result.W(), SXTW));
4171  __ Ccmp(result, 1, ZFlag, eq);
4172  __ B(hi, &done);
4173 
4174  // At this point, we have to handle possible inputs of NaN or numbers in the
4175  // range [-0.5, 1.5[, or numbers larger than 32 bits.
4176 
4177  // Deoptimize if the result > 1, as it must be larger than 32 bits.
4178  __ Cmp(result, 1);
4179  DeoptimizeIf(hi, instr, "overflow");
4180 
4181  // Deoptimize for negative inputs, which at this point are only numbers in
4182  // the range [-0.5, -0.0]
4183  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4184  __ Fmov(result, input);
4185  DeoptimizeIfNegative(result, instr, "minus zero");
4186  }
4187 
4188  // Deoptimize if the input was NaN.
4189  __ Fcmp(input, dot_five);
4190  DeoptimizeIf(vs, instr, "NaN");
4191 
4192  // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
4193  // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
4194  // else 0; we avoid dealing with 0.499...94 directly.
4195  __ Cset(result, ge);
4196  __ Bind(&done);
4197 }
4198 
4199 
4200 void LCodeGen::DoMathFround(LMathFround* instr) {
4201  DoubleRegister input = ToDoubleRegister(instr->value());
4202  DoubleRegister result = ToDoubleRegister(instr->result());
4203  __ Fcvt(result.S(), input);
4204  __ Fcvt(result, result.S());
4205 }
4206 
4207 
4208 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4209  DoubleRegister input = ToDoubleRegister(instr->value());
4210  DoubleRegister result = ToDoubleRegister(instr->result());
4211  __ Fsqrt(result, input);
4212 }
4213 
4214 
4215 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
4216  HMathMinMax::Operation op = instr->hydrogen()->operation();
4217  if (instr->hydrogen()->representation().IsInteger32()) {
4218  Register result = ToRegister32(instr->result());
4219  Register left = ToRegister32(instr->left());
4220  Operand right = ToOperand32(instr->right());
4221 
4222  __ Cmp(left, right);
4223  __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4224  } else if (instr->hydrogen()->representation().IsSmi()) {
4225  Register result = ToRegister(instr->result());
4226  Register left = ToRegister(instr->left());
4227  Operand right = ToOperand(instr->right());
4228 
4229  __ Cmp(left, right);
4230  __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4231  } else {
4232  DCHECK(instr->hydrogen()->representation().IsDouble());
4233  DoubleRegister result = ToDoubleRegister(instr->result());
4234  DoubleRegister left = ToDoubleRegister(instr->left());
4235  DoubleRegister right = ToDoubleRegister(instr->right());
4236 
4237  if (op == HMathMinMax::kMathMax) {
4238  __ Fmax(result, left, right);
4239  } else {
4240  DCHECK(op == HMathMinMax::kMathMin);
4241  __ Fmin(result, left, right);
4242  }
4243  }
4244 }
4245 
4246 
4247 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
4248  Register dividend = ToRegister32(instr->dividend());
4249  int32_t divisor = instr->divisor();
4250  DCHECK(dividend.is(ToRegister32(instr->result())));
4251 
4252  // Theoretically, a variation of the branch-free code for integer division by
4253  // a power of 2 (calculating the remainder via an additional multiplication
4254  // (which gets simplified to an 'and') and subtraction) should be faster, and
4255  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
4256  // indicate that positive dividends are heavily favored, so the branching
4257  // version performs better.
4258  HMod* hmod = instr->hydrogen();
4259  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
4260  Label dividend_is_not_negative, done;
4261  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
4262  __ Tbz(dividend, kWSignBit, &dividend_is_not_negative);
4263  // Note that this is correct even for kMinInt operands.
4264  __ Neg(dividend, dividend);
4265  __ And(dividend, dividend, mask);
4266  __ Negs(dividend, dividend);
4267  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4268  DeoptimizeIf(eq, instr, "minus zero");
4269  }
4270  __ B(&done);
4271  }
4272 
4273  __ bind(&dividend_is_not_negative);
4274  __ And(dividend, dividend, mask);
4275  __ bind(&done);
4276 }
4277 
4278 
4279 void LCodeGen::DoModByConstI(LModByConstI* instr) {
4280  Register dividend = ToRegister32(instr->dividend());
4281  int32_t divisor = instr->divisor();
4282  Register result = ToRegister32(instr->result());
4283  Register temp = ToRegister32(instr->temp());
4284  DCHECK(!AreAliased(dividend, result, temp));
4285 
4286  if (divisor == 0) {
4287  Deoptimize(instr, "division by zero");
4288  return;
4289  }
4290 
4291  __ TruncatingDiv(result, dividend, Abs(divisor));
4292  __ Sxtw(dividend.X(), dividend);
4293  __ Mov(temp, Abs(divisor));
4294  __ Smsubl(result.X(), result, temp, dividend.X());
4295 
4296  // Check for negative zero.
4297  HMod* hmod = instr->hydrogen();
4298  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
4299  Label remainder_not_zero;
4300  __ Cbnz(result, &remainder_not_zero);
4301  DeoptimizeIfNegative(dividend, instr, "minus zero");
4302  __ bind(&remainder_not_zero);
4303  }
4304 }
4305 
4306 
4307 void LCodeGen::DoModI(LModI* instr) {
4308  Register dividend = ToRegister32(instr->left());
4309  Register divisor = ToRegister32(instr->right());
4310  Register result = ToRegister32(instr->result());
4311 
4312  Label done;
4313  // modulo = dividend - quotient * divisor
4314  __ Sdiv(result, dividend, divisor);
4315  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
4316  DeoptimizeIfZero(divisor, instr, "division by zero");
4317  }
4318  __ Msub(result, result, divisor, dividend);
4319  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4320  __ Cbnz(result, &done);
4321  DeoptimizeIfNegative(dividend, instr, "minus zero");
4322  }
4323  __ Bind(&done);
4324 }
4325 
4326 
4327 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
4328  DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32());
4329  bool is_smi = instr->hydrogen()->representation().IsSmi();
4330  Register result =
4331  is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
4332  Register left =
4333  is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
4334  int32_t right = ToInteger32(instr->right());
4335  DCHECK((right > -kMaxInt) || (right < kMaxInt));
4336 
4337  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4338  bool bailout_on_minus_zero =
4339  instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4340 
4341  if (bailout_on_minus_zero) {
4342  if (right < 0) {
4343  // The result is -0 if right is negative and left is zero.
4344  DeoptimizeIfZero(left, instr, "minus zero");
4345  } else if (right == 0) {
4346  // The result is -0 if the right is zero and the left is negative.
4347  DeoptimizeIfNegative(left, instr, "minus zero");
4348  }
4349  }
4350 
4351  switch (right) {
4352  // Cases which can detect overflow.
4353  case -1:
4354  if (can_overflow) {
4355  // Only 0x80000000 can overflow here.
4356  __ Negs(result, left);
4357  DeoptimizeIf(vs, instr, "overflow");
4358  } else {
4359  __ Neg(result, left);
4360  }
4361  break;
4362  case 0:
4363  // This case can never overflow.
4364  __ Mov(result, 0);
4365  break;
4366  case 1:
4367  // This case can never overflow.
4368  __ Mov(result, left, kDiscardForSameWReg);
4369  break;
4370  case 2:
4371  if (can_overflow) {
4372  __ Adds(result, left, left);
4373  DeoptimizeIf(vs, instr, "overflow");
4374  } else {
4375  __ Add(result, left, left);
4376  }
4377  break;
4378 
4379  default:
4380  // Multiplication by constant powers of two (and some related values)
4381  // can be done efficiently with shifted operands.
4382  int32_t right_abs = Abs(right);
4383 
4384  if (base::bits::IsPowerOfTwo32(right_abs)) {
4385  int right_log2 = WhichPowerOf2(right_abs);
4386 
4387  if (can_overflow) {
4388  Register scratch = result;
4389  DCHECK(!AreAliased(scratch, left));
4390  __ Cls(scratch, left);
4391  __ Cmp(scratch, right_log2);
4392  DeoptimizeIf(lt, instr, "overflow");
4393  }
4394 
4395  if (right >= 0) {
4396  // result = left << log2(right)
4397  __ Lsl(result, left, right_log2);
4398  } else {
4399  // result = -left << log2(-right)
4400  if (can_overflow) {
4401  __ Negs(result, Operand(left, LSL, right_log2));
4402  DeoptimizeIf(vs, instr, "overflow");
4403  } else {
4404  __ Neg(result, Operand(left, LSL, right_log2));
4405  }
4406  }
4407  return;
4408  }
4409 
4410 
4411  // For the following cases, we could perform a conservative overflow check
4412  // with CLS as above. However the few cycles saved are likely not worth
4413  // the risk of deoptimizing more often than required.
4414  DCHECK(!can_overflow);
4415 
4416  if (right >= 0) {
4417  if (base::bits::IsPowerOfTwo32(right - 1)) {
4418  // result = left + left << log2(right - 1)
4419  __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
4420  } else if (base::bits::IsPowerOfTwo32(right + 1)) {
4421  // result = -left + left << log2(right + 1)
4422  __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
4423  __ Neg(result, result);
4424  } else {
4425  UNREACHABLE();
4426  }
4427  } else {
4428  if (base::bits::IsPowerOfTwo32(-right + 1)) {
4429  // result = left - left << log2(-right + 1)
4430  __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
4431  } else if (base::bits::IsPowerOfTwo32(-right - 1)) {
4432  // result = -left - left << log2(-right - 1)
4433  __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
4434  __ Neg(result, result);
4435  } else {
4436  UNREACHABLE();
4437  }
4438  }
4439  }
4440 }
4441 
4442 
4443 void LCodeGen::DoMulI(LMulI* instr) {
4444  Register result = ToRegister32(instr->result());
4445  Register left = ToRegister32(instr->left());
4446  Register right = ToRegister32(instr->right());
4447 
4448  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4449  bool bailout_on_minus_zero =
4450  instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4451 
4452  if (bailout_on_minus_zero && !left.Is(right)) {
4453  // If one operand is zero and the other is negative, the result is -0.
4454  // - Set Z (eq) if either left or right, or both, are 0.
4455  __ Cmp(left, 0);
4456  __ Ccmp(right, 0, ZFlag, ne);
4457  // - If so (eq), set N (mi) if left + right is negative.
4458  // - Otherwise, clear N.
4459  __ Ccmn(left, right, NoFlag, eq);
4460  DeoptimizeIf(mi, instr, "minus zero");
4461  }
4462 
4463  if (can_overflow) {
4464  __ Smull(result.X(), left, right);
4465  __ Cmp(result.X(), Operand(result, SXTW));
4466  DeoptimizeIf(ne, instr, "overflow");
4467  } else {
4468  __ Mul(result, left, right);
4469  }
4470 }
4471 
4472 
4473 void LCodeGen::DoMulS(LMulS* instr) {
4474  Register result = ToRegister(instr->result());
4475  Register left = ToRegister(instr->left());
4476  Register right = ToRegister(instr->right());
4477 
4478  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4479  bool bailout_on_minus_zero =
4480  instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4481 
4482  if (bailout_on_minus_zero && !left.Is(right)) {
4483  // If one operand is zero and the other is negative, the result is -0.
4484  // - Set Z (eq) if either left or right, or both, are 0.
4485  __ Cmp(left, 0);
4486  __ Ccmp(right, 0, ZFlag, ne);
4487  // - If so (eq), set N (mi) if left + right is negative.
4488  // - Otherwise, clear N.
4489  __ Ccmn(left, right, NoFlag, eq);
4490  DeoptimizeIf(mi, instr, "minus zero");
4491  }
4492 
4493  STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
4494  if (can_overflow) {
4495  __ Smulh(result, left, right);
4496  __ Cmp(result, Operand(result.W(), SXTW));
4497  __ SmiTag(result);
4498  DeoptimizeIf(ne, instr, "overflow");
4499  } else {
4500  if (AreAliased(result, left, right)) {
4501  // All three registers are the same: half untag the input and then
4502  // multiply, giving a tagged result.
4503  STATIC_ASSERT((kSmiShift % 2) == 0);
4504  __ Asr(result, left, kSmiShift / 2);
4505  __ Mul(result, result, result);
4506  } else if (result.Is(left) && !left.Is(right)) {
4507  // Registers result and left alias, right is distinct: untag left into
4508  // result, and then multiply by right, giving a tagged result.
4509  __ SmiUntag(result, left);
4510  __ Mul(result, result, right);
4511  } else {
4512  DCHECK(!left.Is(result));
4513  // Registers result and right alias, left is distinct, or all registers
4514  // are distinct: untag right into result, and then multiply by left,
4515  // giving a tagged result.
4516  __ SmiUntag(result, right);
4517  __ Mul(result, left, result);
4518  }
4519  }
4520 }
4521 
4522 
4523 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4524  // TODO(3095996): Get rid of this. For now, we need to make the
4525  // result register contain a valid pointer because it is already
4526  // contained in the register pointer map.
4527  Register result = ToRegister(instr->result());
4528  __ Mov(result, 0);
4529 
4530  PushSafepointRegistersScope scope(this);
4531  // NumberTagU and NumberTagD use the context from the frame, rather than
4532  // the environment's HContext or HInlinedContext value.
4533  // They only call Runtime::kAllocateHeapNumber.
4534  // The corresponding HChange instructions are added in a phase that does
4535  // not have easy access to the local context.
4537  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4539  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4540  __ StoreToSafepointRegisterSlot(x0, result);
4541 }
4542 
4543 
4544 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4545  class DeferredNumberTagD: public LDeferredCode {
4546  public:
4547  DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4548  : LDeferredCode(codegen), instr_(instr) { }
4549  virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4550  virtual LInstruction* instr() { return instr_; }
4551  private:
4552  LNumberTagD* instr_;
4553  };
4554 
4555  DoubleRegister input = ToDoubleRegister(instr->value());
4556  Register result = ToRegister(instr->result());
4557  Register temp1 = ToRegister(instr->temp1());
4558  Register temp2 = ToRegister(instr->temp2());
4559 
4560  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4561  if (FLAG_inline_new) {
4562  __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
4563  } else {
4564  __ B(deferred->entry());
4565  }
4566 
4567  __ Bind(deferred->exit());
4568  __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
4569 }
4570 
4571 
4573  LOperand* value,
4574  LOperand* temp1,
4575  LOperand* temp2) {
4576  Label slow, convert_and_store;
4577  Register src = ToRegister32(value);
4578  Register dst = ToRegister(instr->result());
4579  Register scratch1 = ToRegister(temp1);
4580 
4581  if (FLAG_inline_new) {
4582  Register scratch2 = ToRegister(temp2);
4583  __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
4584  __ B(&convert_and_store);
4585  }
4586 
4587  // Slow case: call the runtime system to do the number allocation.
4588  __ Bind(&slow);
4589  // TODO(3095996): Put a valid pointer value in the stack slot where the result
4590  // register is stored, as this register is in the pointer map, but contains an
4591  // integer value.
4592  __ Mov(dst, 0);
4593  {
4594  // Preserve the value of all registers.
4595  PushSafepointRegistersScope scope(this);
4596 
4597  // NumberTagU and NumberTagD use the context from the frame, rather than
4598  // the environment's HContext or HInlinedContext value.
4599  // They only call Runtime::kAllocateHeapNumber.
4600  // The corresponding HChange instructions are added in a phase that does
4601  // not have easy access to the local context.
4603  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4605  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4606  __ StoreToSafepointRegisterSlot(x0, dst);
4607  }
4608 
4609  // Convert number to floating point and store in the newly allocated heap
4610  // number.
4611  __ Bind(&convert_and_store);
4612  DoubleRegister dbl_scratch = double_scratch();
4613  __ Ucvtf(dbl_scratch, src);
4614  __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4615 }
4616 
4617 
4618 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4619  class DeferredNumberTagU: public LDeferredCode {
4620  public:
4621  DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4622  : LDeferredCode(codegen), instr_(instr) { }
4623  virtual void Generate() {
4624  codegen()->DoDeferredNumberTagU(instr_,
4625  instr_->value(),
4626  instr_->temp1(),
4627  instr_->temp2());
4628  }
4629  virtual LInstruction* instr() { return instr_; }
4630  private:
4631  LNumberTagU* instr_;
4632  };
4633 
4634  Register value = ToRegister32(instr->value());
4635  Register result = ToRegister(instr->result());
4636 
4637  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4638  __ Cmp(value, Smi::kMaxValue);
4639  __ B(hi, deferred->entry());
4640  __ SmiTag(result, value.X());
4641  __ Bind(deferred->exit());
4642 }
4643 
4644 
4645 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4646  Register input = ToRegister(instr->value());
4647  Register scratch = ToRegister(instr->temp());
4648  DoubleRegister result = ToDoubleRegister(instr->result());
4649  bool can_convert_undefined_to_nan =
4650  instr->hydrogen()->can_convert_undefined_to_nan();
4651 
4652  Label done, load_smi;
4653 
4654  // Work out what untag mode we're working with.
4655  HValue* value = instr->hydrogen()->value();
4656  NumberUntagDMode mode = value->representation().IsSmi()
4658 
4660  __ JumpIfSmi(input, &load_smi);
4661 
4662  Label convert_undefined;
4663 
4664  // Heap number map check.
4665  if (can_convert_undefined_to_nan) {
4666  __ JumpIfNotHeapNumber(input, &convert_undefined);
4667  } else {
4668  DeoptimizeIfNotHeapNumber(input, instr);
4669  }
4670 
4671  // Load heap number.
4672  __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
4673  if (instr->hydrogen()->deoptimize_on_minus_zero()) {
4674  DeoptimizeIfMinusZero(result, instr, "minus zero");
4675  }
4676  __ B(&done);
4677 
4678  if (can_convert_undefined_to_nan) {
4679  __ Bind(&convert_undefined);
4680  DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
4681  "not a heap number/undefined");
4682 
4683  __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4684  __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4685  __ B(&done);
4686  }
4687 
4688  } else {
4690  // Fall through to load_smi.
4691  }
4692 
4693  // Smi to double register conversion.
4694  __ Bind(&load_smi);
4695  __ SmiUntagToDouble(result, input);
4696 
4697  __ Bind(&done);
4698 }
4699 
4700 
4701 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4702  // This is a pseudo-instruction that ensures that the environment here is
4703  // properly registered for deoptimization and records the assembler's PC
4704  // offset.
4705  LEnvironment* environment = instr->environment();
4706 
4707  // If the environment were already registered, we would have no way of
4708  // backpatching it with the spill slot operands.
4709  DCHECK(!environment->HasBeenRegistered());
4710  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4711 
4713 }
4714 
4715 
4716 void LCodeGen::DoParameter(LParameter* instr) {
4717  // Nothing to do.
4718 }
4719 
4720 
4721 void LCodeGen::DoPreparePushArguments(LPreparePushArguments* instr) {
4722  __ PushPreamble(instr->argc(), kPointerSize);
4723 }
4724 
4725 
4726 void LCodeGen::DoPushArguments(LPushArguments* instr) {
4727  MacroAssembler::PushPopQueue args(masm());
4728 
4729  for (int i = 0; i < instr->ArgumentCount(); ++i) {
4730  LOperand* arg = instr->argument(i);
4731  if (arg->IsDoubleRegister() || arg->IsDoubleStackSlot()) {
4732  Abort(kDoPushArgumentNotImplementedForDoubleType);
4733  return;
4734  }
4735  args.Queue(ToRegister(arg));
4736  }
4737 
4738  // The preamble was done by LPreparePushArguments.
4740 
4741  after_push_argument_ = true;
4742 }
4743 
4744 
4745 void LCodeGen::DoReturn(LReturn* instr) {
4746  if (FLAG_trace && info()->IsOptimizing()) {
4747  // Push the return value on the stack as the parameter.
4748  // Runtime::TraceExit returns its parameter in x0. We're leaving the code
4749  // managed by the register allocator and tearing down the frame, it's
4750  // safe to write to the context register.
4751  __ Push(x0);
4753  __ CallRuntime(Runtime::kTraceExit, 1);
4754  }
4755 
4756  if (info()->saves_caller_doubles()) {
4758  }
4759 
4760  int no_frame_start = -1;
4761  if (NeedsEagerFrame()) {
4762  Register stack_pointer = masm()->StackPointer();
4763  __ Mov(stack_pointer, fp);
4764  no_frame_start = masm_->pc_offset();
4765  __ Pop(fp, lr);
4766  }
4767 
4768  if (instr->has_constant_parameter_count()) {
4769  int parameter_count = ToInteger32(instr->constant_parameter_count());
4770  __ Drop(parameter_count + 1);
4771  } else {
4772  Register parameter_count = ToRegister(instr->parameter_count());
4773  __ DropBySMI(parameter_count);
4774  }
4775  __ Ret();
4776 
4777  if (no_frame_start != -1) {
4778  info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
4779  }
4780 }
4781 
4782 
4784  Register temp,
4785  LOperand* index,
4786  String::Encoding encoding) {
4787  if (index->IsConstantOperand()) {
4788  int offset = ToInteger32(LConstantOperand::cast(index));
4789  if (encoding == String::TWO_BYTE_ENCODING) {
4790  offset *= kUC16Size;
4791  }
4792  STATIC_ASSERT(kCharSize == 1);
4793  return FieldMemOperand(string, SeqString::kHeaderSize + offset);
4794  }
4795 
4796  __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag);
4797  if (encoding == String::ONE_BYTE_ENCODING) {
4798  return MemOperand(temp, ToRegister32(index), SXTW);
4799  } else {
4800  STATIC_ASSERT(kUC16Size == 2);
4801  return MemOperand(temp, ToRegister32(index), SXTW, 1);
4802  }
4803 }
4804 
4805 
4806 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
4807  String::Encoding encoding = instr->hydrogen()->encoding();
4808  Register string = ToRegister(instr->string());
4809  Register result = ToRegister(instr->result());
4810  Register temp = ToRegister(instr->temp());
4811 
4812  if (FLAG_debug_code) {
4813  // Even though this lithium instruction comes with a temp register, we
4814  // can't use it here because we want to use "AtStart" constraints on the
4815  // inputs and the debug code here needs a scratch register.
4816  UseScratchRegisterScope temps(masm());
4817  Register dbg_temp = temps.AcquireX();
4818 
4819  __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset));
4820  __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset));
4821 
4822  __ And(dbg_temp, dbg_temp,
4824  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4825  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4826  __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING
4827  ? one_byte_seq_type : two_byte_seq_type));
4828  __ Check(eq, kUnexpectedStringType);
4829  }
4830 
4831  MemOperand operand =
4832  BuildSeqStringOperand(string, temp, instr->index(), encoding);
4833  if (encoding == String::ONE_BYTE_ENCODING) {
4834  __ Ldrb(result, operand);
4835  } else {
4836  __ Ldrh(result, operand);
4837  }
4838 }
4839 
4840 
4841 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
4842  String::Encoding encoding = instr->hydrogen()->encoding();
4843  Register string = ToRegister(instr->string());
4844  Register value = ToRegister(instr->value());
4845  Register temp = ToRegister(instr->temp());
4846 
4847  if (FLAG_debug_code) {
4848  DCHECK(ToRegister(instr->context()).is(cp));
4849  Register index = ToRegister(instr->index());
4850  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4851  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4852  int encoding_mask =
4853  instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
4854  ? one_byte_seq_type : two_byte_seq_type;
4855  __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
4856  encoding_mask);
4857  }
4858  MemOperand operand =
4859  BuildSeqStringOperand(string, temp, instr->index(), encoding);
4860  if (encoding == String::ONE_BYTE_ENCODING) {
4861  __ Strb(value, operand);
4862  } else {
4863  __ Strh(value, operand);
4864  }
4865 }
4866 
4867 
4868 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4869  HChange* hchange = instr->hydrogen();
4870  Register input = ToRegister(instr->value());
4871  Register output = ToRegister(instr->result());
4872  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4873  hchange->value()->CheckFlag(HValue::kUint32)) {
4874  DeoptimizeIfNegative(input.W(), instr, "overflow");
4875  }
4876  __ SmiTag(output, input);
4877 }
4878 
4879 
4880 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4881  Register input = ToRegister(instr->value());
4882  Register result = ToRegister(instr->result());
4883  Label done, untag;
4884 
4885  if (instr->needs_check()) {
4886  DeoptimizeIfNotSmi(input, instr, "not a Smi");
4887  }
4888 
4889  __ Bind(&untag);
4890  __ SmiUntag(result, input);
4891  __ Bind(&done);
4892 }
4893 
4894 
4895 void LCodeGen::DoShiftI(LShiftI* instr) {
4896  LOperand* right_op = instr->right();
4897  Register left = ToRegister32(instr->left());
4898  Register result = ToRegister32(instr->result());
4899 
4900  if (right_op->IsRegister()) {
4901  Register right = ToRegister32(instr->right());
4902  switch (instr->op()) {
4903  case Token::ROR: __ Ror(result, left, right); break;
4904  case Token::SAR: __ Asr(result, left, right); break;
4905  case Token::SHL: __ Lsl(result, left, right); break;
4906  case Token::SHR:
4907  __ Lsr(result, left, right);
4908  if (instr->can_deopt()) {
4909  // If `left >>> right` >= 0x80000000, the result is not representable
4910  // in a signed 32-bit smi.
4911  DeoptimizeIfNegative(result, instr, "negative value");
4912  }
4913  break;
4914  default: UNREACHABLE();
4915  }
4916  } else {
4917  DCHECK(right_op->IsConstantOperand());
4918  int shift_count = JSShiftAmountFromLConstant(right_op);
4919  if (shift_count == 0) {
4920  if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4921  DeoptimizeIfNegative(left, instr, "negative value");
4922  }
4923  __ Mov(result, left, kDiscardForSameWReg);
4924  } else {
4925  switch (instr->op()) {
4926  case Token::ROR: __ Ror(result, left, shift_count); break;
4927  case Token::SAR: __ Asr(result, left, shift_count); break;
4928  case Token::SHL: __ Lsl(result, left, shift_count); break;
4929  case Token::SHR: __ Lsr(result, left, shift_count); break;
4930  default: UNREACHABLE();
4931  }
4932  }
4933  }
4934 }
4935 
4936 
4937 void LCodeGen::DoShiftS(LShiftS* instr) {
4938  LOperand* right_op = instr->right();
4939  Register left = ToRegister(instr->left());
4940  Register result = ToRegister(instr->result());
4941 
4942  if (right_op->IsRegister()) {
4943  Register right = ToRegister(instr->right());
4944 
4945  // JavaScript shifts only look at the bottom 5 bits of the 'right' operand.
4946  // Since we're handling smis in X registers, we have to extract these bits
4947  // explicitly.
4948  __ Ubfx(result, right, kSmiShift, 5);
4949 
4950  switch (instr->op()) {
4951  case Token::ROR: {
4952  // This is the only case that needs a scratch register. To keep things
4953  // simple for the other cases, borrow a MacroAssembler scratch register.
4954  UseScratchRegisterScope temps(masm());
4955  Register temp = temps.AcquireW();
4956  __ SmiUntag(temp, left);
4957  __ Ror(result.W(), temp.W(), result.W());
4958  __ SmiTag(result);
4959  break;
4960  }
4961  case Token::SAR:
4962  __ Asr(result, left, result);
4963  __ Bic(result, result, kSmiShiftMask);
4964  break;
4965  case Token::SHL:
4966  __ Lsl(result, left, result);
4967  break;
4968  case Token::SHR:
4969  __ Lsr(result, left, result);
4970  __ Bic(result, result, kSmiShiftMask);
4971  if (instr->can_deopt()) {
4972  // If `left >>> right` >= 0x80000000, the result is not representable
4973  // in a signed 32-bit smi.
4974  DeoptimizeIfNegative(result, instr, "negative value");
4975  }
4976  break;
4977  default: UNREACHABLE();
4978  }
4979  } else {
4980  DCHECK(right_op->IsConstantOperand());
4981  int shift_count = JSShiftAmountFromLConstant(right_op);
4982  if (shift_count == 0) {
4983  if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4984  DeoptimizeIfNegative(left, instr, "negative value");
4985  }
4986  __ Mov(result, left);
4987  } else {
4988  switch (instr->op()) {
4989  case Token::ROR:
4990  __ SmiUntag(result, left);
4991  __ Ror(result.W(), result.W(), shift_count);
4992  __ SmiTag(result);
4993  break;
4994  case Token::SAR:
4995  __ Asr(result, left, shift_count);
4996  __ Bic(result, result, kSmiShiftMask);
4997  break;
4998  case Token::SHL:
4999  __ Lsl(result, left, shift_count);
5000  break;
5001  case Token::SHR:
5002  __ Lsr(result, left, shift_count);
5003  __ Bic(result, result, kSmiShiftMask);
5004  break;
5005  default: UNREACHABLE();
5006  }
5007  }
5008  }
5009 }
5010 
5011 
5012 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
5013  __ Debug("LDebugBreak", 0, BREAK);
5014 }
5015 
5016 
5017 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
5018  DCHECK(ToRegister(instr->context()).is(cp));
5019  Register scratch1 = x5;
5020  Register scratch2 = x6;
5021  DCHECK(instr->IsMarkedAsCall());
5022 
5023  ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
5024  // TODO(all): if Mov could handle object in new space then it could be used
5025  // here.
5026  __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
5027  __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
5028  __ Push(cp, scratch1, scratch2); // The context is the first argument.
5029  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
5030 }
5031 
5032 
5033 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5034  PushSafepointRegistersScope scope(this);
5035  LoadContextFromDeferred(instr->context());
5036  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5039  DCHECK(instr->HasEnvironment());
5040  LEnvironment* env = instr->environment();
5041  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5042 }
5043 
5044 
5045 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5046  class DeferredStackCheck: public LDeferredCode {
5047  public:
5048  DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5049  : LDeferredCode(codegen), instr_(instr) { }
5050  virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5051  virtual LInstruction* instr() { return instr_; }
5052  private:
5053  LStackCheck* instr_;
5054  };
5055 
5056  DCHECK(instr->HasEnvironment());
5057  LEnvironment* env = instr->environment();
5058  // There is no LLazyBailout instruction for stack-checks. We have to
5059  // prepare for lazy deoptimization explicitly here.
5060  if (instr->hydrogen()->is_function_entry()) {
5061  // Perform stack overflow check.
5062  Label done;
5063  __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
5064  __ B(hs, &done);
5065 
5066  PredictableCodeSizeScope predictable(masm_,
5068  DCHECK(instr->context()->IsRegister());
5069  DCHECK(ToRegister(instr->context()).is(cp));
5070  CallCode(isolate()->builtins()->StackCheck(),
5072  instr);
5073  __ Bind(&done);
5074  } else {
5075  DCHECK(instr->hydrogen()->is_backwards_branch());
5076  // Perform stack overflow check if this goto needs it before jumping.
5077  DeferredStackCheck* deferred_stack_check =
5078  new(zone()) DeferredStackCheck(this, instr);
5079  __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
5080  __ B(lo, deferred_stack_check->entry());
5081 
5083  __ Bind(instr->done_label());
5084  deferred_stack_check->SetExit(instr->done_label());
5085  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5086  // Don't record a deoptimization index for the safepoint here.
5087  // This will be done explicitly when emitting call and the safepoint in
5088  // the deferred code.
5089  }
5090 }
5091 
5092 
5093 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
5094  Register function = ToRegister(instr->function());
5095  Register code_object = ToRegister(instr->code_object());
5096  Register temp = ToRegister(instr->temp());
5097  __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
5098  __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
5099 }
5100 
5101 
5102 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
5103  Register context = ToRegister(instr->context());
5104  Register value = ToRegister(instr->value());
5105  Register scratch = ToRegister(instr->temp());
5106  MemOperand target = ContextMemOperand(context, instr->slot_index());
5107 
5108  Label skip_assignment;
5109 
5110  if (instr->hydrogen()->RequiresHoleCheck()) {
5111  __ Ldr(scratch, target);
5112  if (instr->hydrogen()->DeoptimizesOnHole()) {
5113  DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, "hole");
5114  } else {
5115  __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
5116  }
5117  }
5118 
5119  __ Str(value, target);
5120  if (instr->hydrogen()->NeedsWriteBarrier()) {
5121  SmiCheck check_needed =
5122  instr->hydrogen()->value()->type().IsHeapObject()
5124  __ RecordWriteContextSlot(context,
5125  target.offset(),
5126  value,
5127  scratch,
5129  kSaveFPRegs,
5131  check_needed);
5132  }
5133  __ Bind(&skip_assignment);
5134 }
5135 
5136 
5137 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
5138  Register value = ToRegister(instr->value());
5139  Register cell = ToRegister(instr->temp1());
5140 
5141  // Load the cell.
5142  __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
5143 
5144  // If the cell we are storing to contains the hole it could have
5145  // been deleted from the property dictionary. In that case, we need
5146  // to update the property details in the property dictionary to mark
5147  // it as no longer deleted. We deoptimize in that case.
5148  if (instr->hydrogen()->RequiresHoleCheck()) {
5149  Register payload = ToRegister(instr->temp2());
5150  __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
5151  DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr, "hole");
5152  }
5153 
5154  // Store the value.
5155  __ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
5156  // Cells are always rescanned, so no write barrier here.
5157 }
5158 
5159 
5160 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
5161  Register ext_ptr = ToRegister(instr->elements());
5162  Register key = no_reg;
5163  Register scratch;
5164  ElementsKind elements_kind = instr->elements_kind();
5165 
5166  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
5167  bool key_is_constant = instr->key()->IsConstantOperand();
5168  int constant_key = 0;
5169  if (key_is_constant) {
5170  DCHECK(instr->temp() == NULL);
5171  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5172  if (constant_key & 0xf0000000) {
5173  Abort(kArrayIndexConstantValueTooBig);
5174  }
5175  } else {
5176  key = ToRegister(instr->key());
5177  scratch = ToRegister(instr->temp());
5178  }
5179 
5180  MemOperand dst =
5181  PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
5182  key_is_constant, constant_key,
5183  elements_kind,
5184  instr->base_offset());
5185 
5186  if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
5187  (elements_kind == FLOAT32_ELEMENTS)) {
5188  DoubleRegister value = ToDoubleRegister(instr->value());
5189  DoubleRegister dbl_scratch = double_scratch();
5190  __ Fcvt(dbl_scratch.S(), value);
5191  __ Str(dbl_scratch.S(), dst);
5192  } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
5193  (elements_kind == FLOAT64_ELEMENTS)) {
5194  DoubleRegister value = ToDoubleRegister(instr->value());
5195  __ Str(value, dst);
5196  } else {
5197  Register value = ToRegister(instr->value());
5198 
5199  switch (elements_kind) {
5203  case UINT8_ELEMENTS:
5205  case INT8_ELEMENTS:
5206  __ Strb(value, dst);
5207  break;
5210  case INT16_ELEMENTS:
5211  case UINT16_ELEMENTS:
5212  __ Strh(value, dst);
5213  break;
5216  case INT32_ELEMENTS:
5217  case UINT32_ELEMENTS:
5218  __ Str(value.W(), dst);
5219  break;
5220  case FLOAT32_ELEMENTS:
5221  case FLOAT64_ELEMENTS:
5224  case FAST_DOUBLE_ELEMENTS:
5225  case FAST_ELEMENTS:
5226  case FAST_SMI_ELEMENTS:
5228  case FAST_HOLEY_ELEMENTS:
5230  case DICTIONARY_ELEMENTS:
5232  UNREACHABLE();
5233  break;
5234  }
5235  }
5236 }
5237 
5238 
5239 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
5240  Register elements = ToRegister(instr->elements());
5241  DoubleRegister value = ToDoubleRegister(instr->value());
5242  MemOperand mem_op;
5243 
5244  if (instr->key()->IsConstantOperand()) {
5245  int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5246  if (constant_key & 0xf0000000) {
5247  Abort(kArrayIndexConstantValueTooBig);
5248  }
5249  int offset = instr->base_offset() + constant_key * kDoubleSize;
5250  mem_op = MemOperand(elements, offset);
5251  } else {
5252  Register store_base = ToRegister(instr->temp());
5253  Register key = ToRegister(instr->key());
5254  bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5255  mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
5256  instr->hydrogen()->elements_kind(),
5257  instr->hydrogen()->representation(),
5258  instr->base_offset());
5259  }
5260 
5261  if (instr->NeedsCanonicalization()) {
5262  __ CanonicalizeNaN(double_scratch(), value);
5263  __ Str(double_scratch(), mem_op);
5264  } else {
5265  __ Str(value, mem_op);
5266  }
5267 }
5268 
5269 
5270 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
5271  Register value = ToRegister(instr->value());
5272  Register elements = ToRegister(instr->elements());
5273  Register scratch = no_reg;
5274  Register store_base = no_reg;
5275  Register key = no_reg;
5276  MemOperand mem_op;
5277 
5278  if (!instr->key()->IsConstantOperand() ||
5279  instr->hydrogen()->NeedsWriteBarrier()) {
5280  scratch = ToRegister(instr->temp());
5281  }
5282 
5283  Representation representation = instr->hydrogen()->value()->representation();
5284  if (instr->key()->IsConstantOperand()) {
5285  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
5286  int offset = instr->base_offset() +
5287  ToInteger32(const_operand) * kPointerSize;
5288  store_base = elements;
5289  if (representation.IsInteger32()) {
5290  DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5291  DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
5292  STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
5293  STATIC_ASSERT(kSmiTag == 0);
5294  mem_op = UntagSmiMemOperand(store_base, offset);
5295  } else {
5296  mem_op = MemOperand(store_base, offset);
5297  }
5298  } else {
5299  store_base = scratch;
5300  key = ToRegister(instr->key());
5301  bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5302 
5303  mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
5304  instr->hydrogen()->elements_kind(),
5305  representation, instr->base_offset());
5306  }
5307 
5308  __ Store(value, mem_op, representation);
5309 
5310  if (instr->hydrogen()->NeedsWriteBarrier()) {
5311  DCHECK(representation.IsTagged());
5312  // This assignment may cause element_addr to alias store_base.
5313  Register element_addr = scratch;
5314  SmiCheck check_needed =
5315  instr->hydrogen()->value()->type().IsHeapObject()
5317  // Compute address of modified element and store it into key register.
5318  __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand());
5319  __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
5320  kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed,
5321  instr->hydrogen()->PointersToHereCheckForValue());
5322  }
5323 }
5324 
5325 
5326 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
5327  DCHECK(ToRegister(instr->context()).is(cp));
5330  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
5331 
5332  Handle<Code> ic =
5333  CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
5334  CallCode(ic, RelocInfo::CODE_TARGET, instr);
5335 }
5336 
5337 
5338 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
5339  Representation representation = instr->representation();
5340 
5341  Register object = ToRegister(instr->object());
5342  HObjectAccess access = instr->hydrogen()->access();
5343  int offset = access.offset();
5344 
5345  if (access.IsExternalMemory()) {
5346  DCHECK(!instr->hydrogen()->has_transition());
5347  DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
5348  Register value = ToRegister(instr->value());
5349  __ Store(value, MemOperand(object, offset), representation);
5350  return;
5351  }
5352 
5353  __ AssertNotSmi(object);
5354 
5355  if (representation.IsDouble()) {
5356  DCHECK(access.IsInobject());
5357  DCHECK(!instr->hydrogen()->has_transition());
5358  DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
5359  FPRegister value = ToDoubleRegister(instr->value());
5360  __ Str(value, FieldMemOperand(object, offset));
5361  return;
5362  }
5363 
5364  Register value = ToRegister(instr->value());
5365 
5366  DCHECK(!representation.IsSmi() ||
5367  !instr->value()->IsConstantOperand() ||
5368  IsInteger32Constant(LConstantOperand::cast(instr->value())));
5369 
5370  if (instr->hydrogen()->has_transition()) {
5371  Handle<Map> transition = instr->hydrogen()->transition_map();
5372  AddDeprecationDependency(transition);
5373  // Store the new map value.
5374  Register new_map_value = ToRegister(instr->temp0());
5375  __ Mov(new_map_value, Operand(transition));
5376  __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
5377  if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
5378  // Update the write barrier for the map field.
5379  __ RecordWriteForMap(object,
5380  new_map_value,
5381  ToRegister(instr->temp1()),
5383  kSaveFPRegs);
5384  }
5385  }
5386 
5387  // Do the store.
5388  Register destination;
5389  if (access.IsInobject()) {
5390  destination = object;
5391  } else {
5392  Register temp0 = ToRegister(instr->temp0());
5393  __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
5394  destination = temp0;
5395  }
5396 
5397  if (representation.IsSmi() &&
5398  instr->hydrogen()->value()->representation().IsInteger32()) {
5399  DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5400 #ifdef DEBUG
5401  Register temp0 = ToRegister(instr->temp0());
5402  __ Ldr(temp0, FieldMemOperand(destination, offset));
5403  __ AssertSmi(temp0);
5404  // If destination aliased temp0, restore it to the address calculated
5405  // earlier.
5406  if (destination.Is(temp0)) {
5407  DCHECK(!access.IsInobject());
5408  __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
5409  }
5410 #endif
5411  STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
5412  STATIC_ASSERT(kSmiTag == 0);
5413  __ Store(value, UntagSmiFieldMemOperand(destination, offset),
5415  } else {
5416  __ Store(value, FieldMemOperand(destination, offset), representation);
5417  }
5418  if (instr->hydrogen()->NeedsWriteBarrier()) {
5419  __ RecordWriteField(destination,
5420  offset,
5421  value, // Clobbered.
5422  ToRegister(instr->temp1()), // Clobbered.
5424  kSaveFPRegs,
5426  instr->hydrogen()->SmiCheckForWriteBarrier(),
5427  instr->hydrogen()->PointersToHereCheckForValue());
5428  }
5429 }
5430 
5431 
5432 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
5433  DCHECK(ToRegister(instr->context()).is(cp));
5435  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
5436 
5437  __ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
5438  Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
5439  CallCode(ic, RelocInfo::CODE_TARGET, instr);
5440 }
5441 
5442 
5443 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5444  DCHECK(ToRegister(instr->context()).is(cp));
5445  DCHECK(ToRegister(instr->left()).Is(x1));
5446  DCHECK(ToRegister(instr->right()).Is(x0));
5447  StringAddStub stub(isolate(),
5448  instr->hydrogen()->flags(),
5449  instr->hydrogen()->pretenure_flag());
5450  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5451 }
5452 
5453 
5454 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5455  class DeferredStringCharCodeAt: public LDeferredCode {
5456  public:
5457  DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
5458  : LDeferredCode(codegen), instr_(instr) { }
5459  virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
5460  virtual LInstruction* instr() { return instr_; }
5461  private:
5462  LStringCharCodeAt* instr_;
5463  };
5464 
5465  DeferredStringCharCodeAt* deferred =
5466  new(zone()) DeferredStringCharCodeAt(this, instr);
5467 
5469  ToRegister(instr->string()),
5470  ToRegister32(instr->index()),
5471  ToRegister(instr->result()),
5472  deferred->entry());
5473  __ Bind(deferred->exit());
5474 }
5475 
5476 
5477 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
5478  Register string = ToRegister(instr->string());
5479  Register result = ToRegister(instr->result());
5480 
5481  // TODO(3095996): Get rid of this. For now, we need to make the
5482  // result register contain a valid pointer because it is already
5483  // contained in the register pointer map.
5484  __ Mov(result, 0);
5485 
5486  PushSafepointRegistersScope scope(this);
5487  __ Push(string);
5488  // Push the index as a smi. This is safe because of the checks in
5489  // DoStringCharCodeAt above.
5490  Register index = ToRegister(instr->index());
5491  __ SmiTagAndPush(index);
5492 
5493  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
5494  instr->context());
5495  __ AssertSmi(x0);
5496  __ SmiUntag(x0);
5497  __ StoreToSafepointRegisterSlot(x0, result);
5498 }
5499 
5500 
5501 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5502  class DeferredStringCharFromCode: public LDeferredCode {
5503  public:
5504  DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
5505  : LDeferredCode(codegen), instr_(instr) { }
5506  virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
5507  virtual LInstruction* instr() { return instr_; }
5508  private:
5509  LStringCharFromCode* instr_;
5510  };
5511 
5512  DeferredStringCharFromCode* deferred =
5513  new(zone()) DeferredStringCharFromCode(this, instr);
5514 
5515  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
5516  Register char_code = ToRegister32(instr->char_code());
5517  Register result = ToRegister(instr->result());
5518 
5519  __ Cmp(char_code, String::kMaxOneByteCharCode);
5520  __ B(hi, deferred->entry());
5521  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5522  __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag);
5523  __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2));
5524  __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5525  __ B(eq, deferred->entry());
5526  __ Bind(deferred->exit());
5527 }
5528 
5529 
5530 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5531  Register char_code = ToRegister(instr->char_code());
5532  Register result = ToRegister(instr->result());
5533 
5534  // TODO(3095996): Get rid of this. For now, we need to make the
5535  // result register contain a valid pointer because it is already
5536  // contained in the register pointer map.
5537  __ Mov(result, 0);
5538 
5539  PushSafepointRegistersScope scope(this);
5540  __ SmiTagAndPush(char_code);
5541  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5542  __ StoreToSafepointRegisterSlot(x0, result);
5543 }
5544 
5545 
5546 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
5547  DCHECK(ToRegister(instr->context()).is(cp));
5548  Token::Value op = instr->op();
5549 
5550  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
5551  CallCode(ic, RelocInfo::CODE_TARGET, instr);
5553 
5554  Condition condition = TokenToCondition(op, false);
5555 
5556  EmitCompareAndBranch(instr, condition, x0, 0);
5557 }
5558 
5559 
5560 void LCodeGen::DoSubI(LSubI* instr) {
5561  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5562  Register result = ToRegister32(instr->result());
5563  Register left = ToRegister32(instr->left());
5564  Operand right = ToShiftedRightOperand32(instr->right(), instr);
5565 
5566  if (can_overflow) {
5567  __ Subs(result, left, right);
5568  DeoptimizeIf(vs, instr, "overflow");
5569  } else {
5570  __ Sub(result, left, right);
5571  }
5572 }
5573 
5574 
5575 void LCodeGen::DoSubS(LSubS* instr) {
5576  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5577  Register result = ToRegister(instr->result());
5578  Register left = ToRegister(instr->left());
5579  Operand right = ToOperand(instr->right());
5580  if (can_overflow) {
5581  __ Subs(result, left, right);
5582  DeoptimizeIf(vs, instr, "overflow");
5583  } else {
5584  __ Sub(result, left, right);
5585  }
5586 }
5587 
5588 
5589 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
5590  LOperand* value,
5591  LOperand* temp1,
5592  LOperand* temp2) {
5593  Register input = ToRegister(value);
5594  Register scratch1 = ToRegister(temp1);
5595  DoubleRegister dbl_scratch1 = double_scratch();
5596 
5597  Label done;
5598 
5599  if (instr->truncating()) {
5600  Register output = ToRegister(instr->result());
5601  Label check_bools;
5602 
5603  // If it's not a heap number, jump to undefined check.
5604  __ JumpIfNotHeapNumber(input, &check_bools);
5605 
5606  // A heap number: load value and convert to int32 using truncating function.
5607  __ TruncateHeapNumberToI(output, input);
5608  __ B(&done);
5609 
5610  __ Bind(&check_bools);
5611 
5612  Register true_root = output;
5613  Register false_root = scratch1;
5614  __ LoadTrueFalseRoots(true_root, false_root);
5615  __ Cmp(input, true_root);
5616  __ Cset(output, eq);
5617  __ Ccmp(input, false_root, ZFlag, ne);
5618  __ B(eq, &done);
5619 
5620  // Output contains zero, undefined is converted to zero for truncating
5621  // conversions.
5622  DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
5623  "not a heap number/undefined/true/false");
5624  } else {
5625  Register output = ToRegister32(instr->result());
5626  DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
5627 
5628  DeoptimizeIfNotHeapNumber(input, instr);
5629 
5630  // A heap number: load value and convert to int32 using non-truncating
5631  // function. If the result is out of range, branch to deoptimize.
5632  __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
5633  __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
5634  DeoptimizeIf(ne, instr, "lost precision or NaN");
5635 
5636  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5637  __ Cmp(output, 0);
5638  __ B(ne, &done);
5639  __ Fmov(scratch1, dbl_scratch1);
5640  DeoptimizeIfNegative(scratch1, instr, "minus zero");
5641  }
5642  }
5643  __ Bind(&done);
5644 }
5645 
5646 
5647 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5648  class DeferredTaggedToI: public LDeferredCode {
5649  public:
5650  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5651  : LDeferredCode(codegen), instr_(instr) { }
5652  virtual void Generate() {
5653  codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
5654  instr_->temp2());
5655  }
5656 
5657  virtual LInstruction* instr() { return instr_; }
5658  private:
5659  LTaggedToI* instr_;
5660  };
5661 
5662  Register input = ToRegister(instr->value());
5663  Register output = ToRegister(instr->result());
5664 
5665  if (instr->hydrogen()->value()->representation().IsSmi()) {
5666  __ SmiUntag(output, input);
5667  } else {
5668  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5669 
5670  __ JumpIfNotSmi(input, deferred->entry());
5671  __ SmiUntag(output, input);
5672  __ Bind(deferred->exit());
5673  }
5674 }
5675 
5676 
5677 void LCodeGen::DoThisFunction(LThisFunction* instr) {
5678  Register result = ToRegister(instr->result());
5680 }
5681 
5682 
5683 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5684  DCHECK(ToRegister(instr->value()).Is(x0));
5685  DCHECK(ToRegister(instr->result()).Is(x0));
5686  __ Push(x0);
5687  CallRuntime(Runtime::kToFastProperties, 1, instr);
5688 }
5689 
5690 
5691 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5692  DCHECK(ToRegister(instr->context()).is(cp));
5693  Label materialized;
5694  // Registers will be used as follows:
5695  // x7 = literals array.
5696  // x1 = regexp literal.
5697  // x0 = regexp literal clone.
5698  // x10-x12 are used as temporaries.
5699  int literal_offset =
5700  FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5701  __ LoadObject(x7, instr->hydrogen()->literals());
5702  __ Ldr(x1, FieldMemOperand(x7, literal_offset));
5703  __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
5704 
5705  // Create regexp literal using runtime function
5706  // Result will be in x0.
5707  __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5708  __ Mov(x11, Operand(instr->hydrogen()->pattern()));
5709  __ Mov(x10, Operand(instr->hydrogen()->flags()));
5710  __ Push(x7, x12, x11, x10);
5711  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5712  __ Mov(x1, x0);
5713 
5714  __ Bind(&materialized);
5716  Label allocated, runtime_allocate;
5717 
5718  __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
5719  __ B(&allocated);
5720 
5721  __ Bind(&runtime_allocate);
5722  __ Mov(x0, Smi::FromInt(size));
5723  __ Push(x1, x0);
5724  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5725  __ Pop(x1);
5726 
5727  __ Bind(&allocated);
5728  // Copy the content into the newly allocated memory.
5729  __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
5730 }
5731 
5732 
5733 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
5734  Register object = ToRegister(instr->object());
5735 
5736  Handle<Map> from_map = instr->original_map();
5737  Handle<Map> to_map = instr->transitioned_map();
5738  ElementsKind from_kind = instr->from_kind();
5739  ElementsKind to_kind = instr->to_kind();
5740 
5741  Label not_applicable;
5742 
5743  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
5744  Register temp1 = ToRegister(instr->temp1());
5745  Register new_map = ToRegister(instr->temp2());
5746  __ CheckMap(object, temp1, from_map, &not_applicable, DONT_DO_SMI_CHECK);
5747  __ Mov(new_map, Operand(to_map));
5748  __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
5749  // Write barrier.
5750  __ RecordWriteForMap(object, new_map, temp1, GetLinkRegisterState(),
5751  kDontSaveFPRegs);
5752  } else {
5753  {
5754  UseScratchRegisterScope temps(masm());
5755  // Use the temp register only in a restricted scope - the codegen checks
5756  // that we do not use any register across a call.
5757  __ CheckMap(object, temps.AcquireX(), from_map, &not_applicable,
5759  }
5760  DCHECK(object.is(x0));
5761  DCHECK(ToRegister(instr->context()).is(cp));
5762  PushSafepointRegistersScope scope(this);
5763  __ Mov(x1, Operand(to_map));
5764  bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
5765  TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
5766  __ CallStub(&stub);
5768  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
5769  }
5770  __ Bind(&not_applicable);
5771 }
5772 
5773 
5774 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5775  Register object = ToRegister(instr->object());
5776  Register temp1 = ToRegister(instr->temp1());
5777  Register temp2 = ToRegister(instr->temp2());
5778 
5779  Label no_memento_found;
5780  __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
5781  DeoptimizeIf(eq, instr, "memento found");
5782  __ Bind(&no_memento_found);
5783 }
5784 
5785 
5786 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
5787  DoubleRegister input = ToDoubleRegister(instr->value());
5788  Register result = ToRegister(instr->result());
5789  __ TruncateDoubleToI(result, input);
5790  if (instr->tag_result()) {
5791  __ SmiTag(result, result);
5792  }
5793 }
5794 
5795 
5796 void LCodeGen::DoTypeof(LTypeof* instr) {
5797  Register input = ToRegister(instr->value());
5798  __ Push(input);
5799  CallRuntime(Runtime::kTypeof, 1, instr);
5800 }
5801 
5802 
5803 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5804  Handle<String> type_name = instr->type_literal();
5805  Label* true_label = instr->TrueLabel(chunk_);
5806  Label* false_label = instr->FalseLabel(chunk_);
5807  Register value = ToRegister(instr->value());
5808 
5809  Factory* factory = isolate()->factory();
5810  if (String::Equals(type_name, factory->number_string())) {
5811  __ JumpIfSmi(value, true_label);
5812 
5813  int true_block = instr->TrueDestination(chunk_);
5814  int false_block = instr->FalseDestination(chunk_);
5815  int next_block = GetNextEmittedBlock();
5816 
5817  if (true_block == false_block) {
5818  EmitGoto(true_block);
5819  } else if (true_block == next_block) {
5820  __ JumpIfNotHeapNumber(value, chunk_->GetAssemblyLabel(false_block));
5821  } else {
5822  __ JumpIfHeapNumber(value, chunk_->GetAssemblyLabel(true_block));
5823  if (false_block != next_block) {
5824  __ B(chunk_->GetAssemblyLabel(false_block));
5825  }
5826  }
5827 
5828  } else if (String::Equals(type_name, factory->string_string())) {
5829  DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5830  Register map = ToRegister(instr->temp1());
5831  Register scratch = ToRegister(instr->temp2());
5832 
5833  __ JumpIfSmi(value, false_label);
5834  __ JumpIfObjectType(
5835  value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
5836  __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5837  EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5838 
5839  } else if (String::Equals(type_name, factory->symbol_string())) {
5840  DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5841  Register map = ToRegister(instr->temp1());
5842  Register scratch = ToRegister(instr->temp2());
5843 
5844  __ JumpIfSmi(value, false_label);
5845  __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
5846  EmitBranch(instr, eq);
5847 
5848  } else if (String::Equals(type_name, factory->boolean_string())) {
5849  __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
5850  __ CompareRoot(value, Heap::kFalseValueRootIndex);
5851  EmitBranch(instr, eq);
5852 
5853  } else if (String::Equals(type_name, factory->undefined_string())) {
5854  DCHECK(instr->temp1() != NULL);
5855  Register scratch = ToRegister(instr->temp1());
5856 
5857  __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
5858  __ JumpIfSmi(value, false_label);
5859  // Check for undetectable objects and jump to the true branch in this case.
5860  __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5861  __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5862  EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
5863 
5864  } else if (String::Equals(type_name, factory->function_string())) {
5866  DCHECK(instr->temp1() != NULL);
5867  Register type = ToRegister(instr->temp1());
5868 
5869  __ JumpIfSmi(value, false_label);
5870  __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
5871  // HeapObject's type has been loaded into type register by JumpIfObjectType.
5873 
5874  } else if (String::Equals(type_name, factory->object_string())) {
5875  DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5876  Register map = ToRegister(instr->temp1());
5877  Register scratch = ToRegister(instr->temp2());
5878 
5879  __ JumpIfSmi(value, false_label);
5880  __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
5881  __ JumpIfObjectType(value, map, scratch,
5882  FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
5883  __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5884  __ B(gt, false_label);
5885  // Check for undetectable objects => false.
5886  __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5887  EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5888 
5889  } else {
5890  __ B(false_label);
5891  }
5892 }
5893 
5894 
5895 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5896  __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
5897 }
5898 
5899 
5900 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5901  Register object = ToRegister(instr->value());
5902  Register map = ToRegister(instr->map());
5903  Register temp = ToRegister(instr->temp());
5904  __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
5905  __ Cmp(map, temp);
5906  DeoptimizeIf(ne, instr, "wrong map");
5907 }
5908 
5909 
5910 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
5911  Register receiver = ToRegister(instr->receiver());
5912  Register function = ToRegister(instr->function());
5913  Register result = ToRegister(instr->result());
5914 
5915  // If the receiver is null or undefined, we have to pass the global object as
5916  // a receiver to normal functions. Values have to be passed unchanged to
5917  // builtins and strict-mode functions.
5918  Label global_object, done, copy_receiver;
5919 
5920  if (!instr->hydrogen()->known_function()) {
5921  __ Ldr(result, FieldMemOperand(function,
5923 
5924  // CompilerHints is an int32 field. See objects.h.
5925  __ Ldr(result.W(),
5927 
5928  // Do not transform the receiver to object for strict mode functions.
5929  __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &copy_receiver);
5930 
5931  // Do not transform the receiver to object for builtins.
5932  __ Tbnz(result, SharedFunctionInfo::kNative, &copy_receiver);
5933  }
5934 
5935  // Normal function. Replace undefined or null with global receiver.
5936  __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
5937  __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
5938 
5939  // Deoptimize if the receiver is not a JS object.
5940  DeoptimizeIfSmi(receiver, instr, "Smi");
5941  __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
5942  __ B(ge, &copy_receiver);
5943  Deoptimize(instr, "not a JavaScript object");
5944 
5945  __ Bind(&global_object);
5946  __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
5947  __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
5948  __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
5949  __ B(&done);
5950 
5951  __ Bind(&copy_receiver);
5952  __ Mov(result, receiver);
5953  __ Bind(&done);
5954 }
5955 
5956 
5957 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5958  Register result,
5959  Register object,
5960  Register index) {
5961  PushSafepointRegistersScope scope(this);
5962  __ Push(object);
5963  __ Push(index);
5964  __ Mov(cp, 0);
5965  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5967  instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5968  __ StoreToSafepointRegisterSlot(x0, result);
5969 }
5970 
5971 
5972 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5973  class DeferredLoadMutableDouble FINAL : public LDeferredCode {
5974  public:
5975  DeferredLoadMutableDouble(LCodeGen* codegen,
5976  LLoadFieldByIndex* instr,
5977  Register result,
5978  Register object,
5979  Register index)
5980  : LDeferredCode(codegen),
5981  instr_(instr),
5982  result_(result),
5983  object_(object),
5984  index_(index) {
5985  }
5986  virtual void Generate() OVERRIDE {
5987  codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5988  }
5989  virtual LInstruction* instr() OVERRIDE { return instr_; }
5990  private:
5991  LLoadFieldByIndex* instr_;
5992  Register result_;
5993  Register object_;
5994  Register index_;
5995  };
5996  Register object = ToRegister(instr->object());
5997  Register index = ToRegister(instr->index());
5998  Register result = ToRegister(instr->result());
5999 
6000  __ AssertSmi(index);
6001 
6002  DeferredLoadMutableDouble* deferred;
6003  deferred = new(zone()) DeferredLoadMutableDouble(
6004  this, instr, result, object, index);
6005 
6006  Label out_of_object, done;
6007 
6008  __ TestAndBranchIfAnySet(
6009  index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
6010  __ Mov(index, Operand(index, ASR, 1));
6011 
6012  __ Cmp(index, Smi::FromInt(0));
6013  __ B(lt, &out_of_object);
6014 
6016  __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
6017  __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
6018 
6019  __ B(&done);
6020 
6021  __ Bind(&out_of_object);
6022  __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
6023  // Index is equal to negated out of object property index plus 1.
6024  __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
6025  __ Ldr(result, FieldMemOperand(result,
6027  __ Bind(deferred->exit());
6028  __ Bind(&done);
6029 }
6030 
6031 
6032 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
6033  Register context = ToRegister(instr->context());
6035 }
6036 
6037 
6038 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6039  Handle<ScopeInfo> scope_info = instr->scope_info();
6040  __ Push(scope_info);
6041  __ Push(ToRegister(instr->function()));
6042  CallRuntime(Runtime::kPushBlockContext, 2, instr);
6043  RecordSafepoint(Safepoint::kNoLazyDeopt);
6044 }
6045 
6046 
6047 
6048 } } // namespace v8::internal
An object reference managed by the v8 garbage collector.
Definition: v8.h:198
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1591
static const int kCallSizeWithRelocation
static U update(U previous, T value)
Definition: utils.h:223
static U encode(T value)
Definition: utils.h:217
virtual void Emit(Label *label) const
virtual void EmitInverted(Label *label) const
BranchIfHeapNumber(LCodeGen *codegen, const Register &value)
virtual void EmitInverted(Label *label) const
BranchIfNonZeroNumber(LCodeGen *codegen, const FPRegister &value, const FPRegister &scratch)
virtual void Emit(Label *label) const
const Heap::RootListIndex index_
virtual void Emit(Label *label) const
virtual void EmitInverted(Label *label) const
BranchIfRoot(LCodeGen *codegen, const Register &value, Heap::RootListIndex index)
virtual void EmitInverted(Label *label) const
BranchOnCondition(LCodeGen *codegen, Condition cond)
virtual void Emit(Label *label) const
static const int kValueOffset
Definition: objects.h:9446
static const int kHeaderSize
Definition: objects.h:5373
virtual void Emit(Label *label) const
virtual void EmitInverted(Label *label) const
CompareAndBranch(LCodeGen *codegen, Condition cond, const Register &lhs, const Operand &rhs)
static Handle< DeoptimizationInputData > New(Isolate *isolate, int deopt_entry_count, PretenureFlag pretenure)
Definition: objects.cc:7918
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
Definition: deoptimizer.cc:672
static const int kEnumCacheOffset
Definition: objects.h:3028
virtual void AfterCall() const
virtual void BeforeCall(int call_size) const
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
static const int kHeaderSize
Definition: objects.h:2393
static int OffsetOfElementAt(int index)
Definition: objects.h:2455
static int SizeFor(int length)
Definition: objects.h:2452
static const int kGlobalProxyOffset
Definition: objects.h:7461
virtual HSourcePosition position() const
static Handle< T > cast(Handle< S > that)
Definition: handles.h:116
static const int kValueOffset
Definition: objects.h:1506
static const int kMapOffset
Definition: objects.h:1427
static void EmitNotInlined(MacroAssembler *masm)
static Register right()
Definition: code-stubs.h:686
static Register left()
Definition: code-stubs.h:685
static const int kValueOffset
Definition: objects.h:7623
static const int kCacheStampOffset
Definition: objects.h:7631
static const int kSharedFunctionInfoOffset
Definition: objects.h:7379
static const int kContextOffset
Definition: objects.h:7381
static const int kCodeEntryOffset
Definition: objects.h:7376
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7377
static const int kHeaderSize
Definition: objects.h:2195
static const int kPropertiesOffset
Definition: objects.h:2193
static const int kSize
Definition: objects.h:7772
static const int kInObjectFieldCount
Definition: objects.h:7826
void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction *instr, const char *detail)
bool IsNextEmittedBlock(int block_id) const
void DeoptimizeBranch(LInstruction *instr, const char *detail, BranchType branch_type, Register reg=NoReg, int bit=-1, Deoptimizer::BailoutType *override_bailout_type=NULL)
DwVfpRegister ToDoubleRegister(LOperand *op) const
void RecordSafepointWithRegisters(LPointerMap *pointers, int arguments, Safepoint::DeoptMode mode)
bool IsSmi(LConstantOperand *op) const
LinkRegisterStatus GetLinkRegisterState() const
TranslationBuffer translations_
MemOperand BuildSeqStringOperand(Register string, LOperand *index, String::Encoding encoding)
void DeoptimizeIfZero(Register rt, LInstruction *instr, const char *detail)
Condition EmitIsString(Register input, Register temp1, Label *is_not_string, SmiCheck check_needed)
MemOperand PrepareKeyedArrayOperand(Register base, Register elements, Register key, bool key_is_tagged, ElementsKind elements_kind, Representation representation, int base_offset)
void EmitCompareAndBranch(InstrType instr, Condition condition, const Register &lhs, const Operand &rhs)
void DoDeferredStackCheck(LStackCheck *instr)
void DeoptimizeIfSmi(Register rt, LInstruction *instr, const char *detail)
void DeoptimizeIfBitSet(Register rt, int bit, LInstruction *instr, const char *detail)
SafepointTableBuilder safepoints_
void EmitVectorLoadICRegisters(T *instr)
static Condition TokenToCondition(Token::Value op, bool is_unsigned)
void DeoptimizeIfNotZero(Register rt, LInstruction *instr, const char *detail)
ZoneList< Handle< Object > > deoptimization_literals_
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check, Label *bool_load)
void DeoptimizeIfNotHeapNumber(Register object, LInstruction *instr)
void PopulateDeoptimizationLiteralsWithInlinedFunctions()
void AddToTranslation(LEnvironment *environment, Translation *translation, LOperand *op, bool is_tagged, bool is_uint32, int *object_index_pointer, int *dematerialized_index_pointer)
Operand ToShiftedRightOperand32(LOperand *right, LI *shift_info)
Operand ToOperand32(LOperand *op)
ZoneList< LEnvironment * > deoptimizations_
void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index, LInstruction *instr, const char *detail)
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, LInstruction *instr, LOperand *context)
int32_t ToInteger32(LConstantOperand *op) const
LPlatformChunk * chunk() const
void FinishCode(Handle< Code > code)
void DeoptimizeIfNotSmi(Register rt, LInstruction *instr, const char *detail)
int LookupDestination(int block_id) const
void DoDeferredAllocate(LAllocate *instr)
void RecordSafepoint(LPointerMap *pointers, Safepoint::Kind kind, int arguments, Safepoint::DeoptMode mode)
int JSShiftAmountFromLConstant(LOperand *constant)
void DoDeferredTaggedToI(LTaggedToI *instr)
void CallCodeGeneric(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr, SafepointMode safepoint_mode, TargetAddressStorageMode storage_mode=CAN_INLINE_TARGET_ADDRESS)
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
void CallCode(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr, TargetAddressStorageMode storage_mode=CAN_INLINE_TARGET_ADDRESS)
Safepoint::Kind expected_safepoint_kind_
ZoneList< LDeferredCode * > deferred_
void EmitBranchGeneric(InstrType instr, const BranchGenerator &branch)
void EmitBranchIfNonZeroNumber(InstrType instr, const FPRegister &value, const FPRegister &scratch)
Handle< Object > ToHandle(LConstantOperand *op) const
void RegisterEnvironmentForDeoptimization(LEnvironment *environment, Safepoint::DeoptMode mode)
void DeoptimizeIfNegative(Register rt, LInstruction *instr, const char *detail)
void LoadContextFromDeferred(LOperand *context)
void DoDeferredNumberTagU(LInstruction *instr, LOperand *value, LOperand *temp1, LOperand *temp2)
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoDeferredLoadMutableDouble(LLoadFieldByIndex *instr, Register result, Register object, Register index)
int DefineDeoptimizationLiteral(Handle< Object > literal)
Register ToRegister32(LOperand *op) const
void DeoptimizeIf(Condition condition, LInstruction *instr, const char *detail, Deoptimizer::BailoutType bailout_type)
void CallKnownFunction(Handle< JSFunction > function, int formal_parameter_count, int arity, LInstruction *instr, R1State r1_state)
void WriteTranslation(LEnvironment *environment, Translation *translation)
void DoDeferredMathAbsTagged(LMathAbsTagged *instr, Label *exit, Label *allocation_entry)
void EmitTestAndBranch(InstrType instr, Condition condition, const Register &value, uint64_t mask)
Operand ToOperand(LOperand *op)
void DeoptimizeIfBitClear(Register rt, int bit, LInstruction *instr, const char *detail)
double ToDouble(LConstantOperand *op) const
void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index, LInstruction *instr, const char *detail)
Register ToRegister(LOperand *op) const
void RecordAndWritePosition(int position) OVERRIDE
void PopulateDeoptimizationData(Handle< Code > code)
Smi * ToSmi(LConstantOperand *op) const
void CallRuntime(const Runtime::Function *function, int num_arguments, LInstruction *instr, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
void EmitBranchIfRoot(InstrType instr, const Register &value, Heap::RootListIndex index)
bool IsInteger32Constant(LConstantOperand *op) const
void Deoptimize(LInstruction *instr, const char *detail, Deoptimizer::BailoutType *override_bailout_type=NULL)
void EmitBranchIfHeapNumber(InstrType instr, const Register &value)
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
ZoneList< Deoptimizer::JumpTableEntry > jump_table_
void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE
MemOperand PrepareKeyedExternalArrayOperand(Register key, Register base, Register scratch, bool key_is_smi, bool key_is_constant, int constant_key, ElementsKind elements_kind, int base_offset)
MemOperand ToMemOperand(LOperand *op) const
void GenerateBodyInstructionPre(LInstruction *instr) OVERRIDE
void RecordSafepointWithLazyDeopt(LInstruction *instr, SafepointMode safepoint_mode)
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
void EmitBranch(InstrType instr, Condition condition)
void DoDeferredNumberTagD(LNumberTagD *instr)
LParallelMove * GetParallelMove(InnerPosition pos)
Definition: lithium-arm.h:362
virtual const char * Mnemonic() const =0
LEnvironment * environment() const
Definition: lithium-arm.h:231
virtual LOperand * result() const =0
HValue * hydrogen_value() const
Definition: lithium-arm.h:239
LPointerMap * pointer_map() const
Definition: lithium-arm.h:235
ElementsKind elements_kind() const
uint32_t base_offset() const
int index() const
Definition: lithium.h:41
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:17
static const Register ReceiverRegister()
static const Register NameRegister()
static void GenerateMiss(MacroAssembler *masm)
static const int kIsUndetectable
Definition: objects.h:6244
static const int kBitFieldOffset
Definition: objects.h:6228
static const int kInstanceTypeOffset
Definition: objects.h:6229
static const int kConstructorOffset
Definition: objects.h:6191
static const int kPrototypeOffset
Definition: objects.h:6190
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
static const Register exponent()
static const Register exponent()
static const int kHashShift
Definition: objects.h:8499
static const int kHashFieldOffset
Definition: objects.h:8486
static Operand UntagSmiAndScale(Register smi, int scale)
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:754
static void MaybeCallEntryHook(MacroAssembler *masm)
static const int kNoPosition
Definition: assembler.h:317
static Representation Integer32()
int num_parameters() const
Definition: scopes.h:321
Variable * parameter(int index) const
Definition: scopes.h:316
static const int kHeaderSize
Definition: objects.h:8941
static const int kDontAdaptArgumentsSentinel
Definition: objects.h:6888
static const int kInstanceClassNameOffset
Definition: objects.h:6897
static const int kCompilerHintsOffset
Definition: objects.h:6961
static const int kMaxValue
Definition: objects.h:1272
static Smi * FromInt(int value)
Definition: objects-inl.h:1321
static const int kFixedFrameSizeFromFp
Definition: frames.h:157
static const int kContextOffset
Definition: frames.h:162
static const int kCallerSPOffset
Definition: frames.h:167
static const int kMarkerOffset
Definition: frames.h:161
static const int kCallerFPOffset
Definition: frames.h:165
static const Register ReceiverRegister()
static const Register NameRegister()
static const Register ValueRegister()
static Handle< Code > initialize_stub(Isolate *isolate, StrictMode strict_mode)
Definition: ic.cc:1346
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const unsigned int kContainsCachedArrayIndexMask
Definition: objects.h:8618
static const int32_t kMaxOneByteCharCode
Definition: objects.h:8811
static const int kArrayIndexValueBits
Definition: objects.h:8601
static const int kLengthOffset
Definition: objects.h:8802
bool Equals(String *other)
Definition: objects-inl.h:3336
TestAndBranch(LCodeGen *codegen, Condition cond, const Register &value, uint64_t mask)
virtual void EmitInverted(Label *label) const
virtual void Emit(Label *label) const
static TypeFeedbackId None()
Definition: utils.h:945
static const Register VectorRegister()
#define OVERRIDE
#define FINAL
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric literals(0o77, 0b11)") DEFINE_BOOL(harmony_object_literals
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
enable harmony numeric enable harmony object literal extensions Optimize object Array shift
#define __
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
#define ASM_UNIMPLEMENTED_BREAK(message)
@ CALL_FUNCTION
AllocationFlags
@ DOUBLE_ALIGNMENT
@ PRETENURE_OLD_POINTER_SPACE
@ TAG_OBJECT
@ PRETENURE_OLD_DATA_SPACE
int int32_t
Definition: unicode.cc:24
bool IsPowerOfTwo32(uint32_t value)
Definition: bits.h:77
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
int WhichPowerOf2(uint32_t x)
Definition: utils.h:37
const int kPointerSize
Definition: globals.h:129
const uint32_t kStringEncodingMask
Definition: objects.h:555
@ DONT_DO_SMI_CHECK
Definition: globals.h:640
@ DO_SMI_CHECK
Definition: globals.h:641
bool Is(Object *obj)
const int KB
Definition: globals.h:106
Condition CommuteCondition(Condition cond)
Definition: constants-arm.h:93
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1488
@ TRACK_ALLOCATION_SITE
Definition: objects.h:8085
@ kSeqStringTag
Definition: objects.h:563
const int kSmiShift
@ ARGUMENTS_ADAPTOR
Definition: hydrogen.h:546
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const Register cp
const LowDwVfpRegister d1
const uint32_t kTwoByteStringTag
Definition: objects.h:556
const int kSmiTagSize
Definition: v8.h:5743
int MaskToBit(uint64_t mask)
const LowDwVfpRegister d0
const int kDoubleSize
Definition: globals.h:127
MemOperand GlobalObjectMemOperand()
const Register fp
DwVfpRegister DoubleRegister
const int64_t kWSignBit
const unsigned kWRegSizeInBits
const int kMaxInt
Definition: globals.h:109
const int kPointerSizeLog2
Definition: globals.h:147
MemOperand ContextMemOperand(Register context, int index)
@ LAST_NONCALLABLE_SPEC_OBJECT_TYPE
Definition: objects.h:785
@ NUM_OF_CALLABLE_SPEC_OBJECT_TYPES
Definition: objects.h:788
@ JS_DATE_TYPE
Definition: objects.h:730
@ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE
Definition: objects.h:784
@ FIRST_JS_PROXY_TYPE
Definition: objects.h:778
@ JS_ARRAY_TYPE
Definition: objects.h:738
@ FIRST_NONSTRING_TYPE
Definition: objects.h:758
@ FIRST_SPEC_OBJECT_TYPE
Definition: objects.h:781
@ LAST_SPEC_OBJECT_TYPE
Definition: objects.h:782
@ JS_FUNCTION_TYPE
Definition: objects.h:749
@ JS_FUNCTION_PROXY_TYPE
Definition: objects.h:726
@ LAST_JS_PROXY_TYPE
Definition: objects.h:779
@ EXTERNAL_UINT16_ELEMENTS
Definition: elements-kind.h:36
@ UINT8_CLAMPED_ELEMENTS
Definition: elements-kind.h:52
@ EXTERNAL_INT16_ELEMENTS
Definition: elements-kind.h:35
@ EXTERNAL_UINT8_ELEMENTS
Definition: elements-kind.h:34
@ EXTERNAL_INT32_ELEMENTS
Definition: elements-kind.h:37
@ FAST_HOLEY_DOUBLE_ELEMENTS
Definition: elements-kind.h:27
@ SLOPPY_ARGUMENTS_ELEMENTS
Definition: elements-kind.h:31
@ EXTERNAL_INT8_ELEMENTS
Definition: elements-kind.h:33
@ EXTERNAL_FLOAT32_ELEMENTS
Definition: elements-kind.h:39
@ EXTERNAL_FLOAT64_ELEMENTS
Definition: elements-kind.h:40
@ FAST_HOLEY_SMI_ELEMENTS
Definition: elements-kind.h:17
@ EXTERNAL_UINT32_ELEMENTS
Definition: elements-kind.h:38
@ EXTERNAL_UINT8_CLAMPED_ELEMENTS
Definition: elements-kind.h:41
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
const uint32_t kOneByteStringTag
Definition: objects.h:557
@ NO_OVERWRITE
Definition: ic-state.h:58
int ElementsKindToShiftSize(ElementsKind elements_kind)
MemOperand FieldMemOperand(Register object, int offset)
int32_t WhichPowerOf2Abs(int32_t x)
Definition: utils.h:168
int StackSlotOffset(int index)
Definition: lithium.cc:254
const int kUC16Size
Definition: globals.h:187
bool IsFastPackedElementsKind(ElementsKind kind)
@ NUMBER_CANDIDATE_IS_SMI
Definition: lithium.h:756
@ NUMBER_CANDIDATE_IS_ANY_TAGGED
Definition: lithium.h:757
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
const int64_t kXSignMask
const uint64_t kSmiShiftMask
static uint64_t double_to_rawbits(double value)
Definition: utils-arm64.h:34
AllocationSiteOverrideMode
Definition: code-stubs.h:716
@ DISABLE_ALLOCATION_SITES
Definition: code-stubs.h:718
Condition NegateCondition(Condition cond)
Definition: constants-arm.h:86
static InstanceType TestType(HHasInstanceTypeAndBranch *instr)
const int kMinInt
Definition: globals.h:110
T Abs(T a)
Definition: utils.h:153
const uint32_t kStringRepresentationMask
Definition: objects.h:561
const Register lr
byte * Address
Definition: globals.h:101
static Condition BranchCondition(HHasInstanceTypeAndBranch *instr)
@ NOT_CONTEXTUAL
Definition: objects.h:174
const unsigned kXRegSize
BranchType InvertBranchType(BranchType type)
@ OLD_DATA_SPACE
Definition: globals.h:361
@ OLD_POINTER_SPACE
Definition: globals.h:360
const int kHeapObjectTag
Definition: v8.h:5737
const Register no_reg
const int kSmiValueSize
Definition: v8.h:5806
static int ArgumentsOffsetWithoutFrame(int index)
MemOperand UntagSmiFieldMemOperand(Register object, int offset)
static const char * LabelType(LLabel *label)
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const uint64_t kHoleNanInt64
Definition: globals.h:660
const intptr_t kSmiTagMask
Definition: v8.h:5744
@ NO_CALL_CONSTRUCTOR_FLAGS
Definition: globals.h:478
const int kSmiTag
Definition: v8.h:5742
bool IsFastSmiElementsKind(ElementsKind kind)
const int kCharSize
Definition: globals.h:122
const unsigned kInstructionSize
MemOperand UntagSmiMemOperand(Register object, int offset)
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:130
const int64_t kXSignBit
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
#define IN
@ NONE
bool Is(const CPURegister &other) const
bool IsEquivalentTo(const JumpTableEntry &other) const
Definition: deoptimizer.h:130
bool is(DwVfpRegister reg) const
static DwVfpRegister FromAllocationIndex(int index)
static FPRegister FromAllocationIndex(unsigned int index)
static Register FromAllocationIndex(int index)
bool is(Register reg) const
#define T(name, string, precedence)
Definition: token.cc:25