V8 Project
lithium-codegen-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/hydrogen-osr.h"
13 #include "src/ic/ic.h"
14 #include "src/ic/stub-cache.h"
15 
16 namespace v8 {
17 namespace internal {
18 
19 
20 class SafepointGenerator FINAL : public CallWrapper {
21  public:
23  LPointerMap* pointers,
24  Safepoint::DeoptMode mode)
25  : codegen_(codegen),
26  pointers_(pointers),
27  deopt_mode_(mode) { }
28  virtual ~SafepointGenerator() {}
29 
30  virtual void BeforeCall(int call_size) const OVERRIDE {}
31 
32  virtual void AfterCall() const OVERRIDE {
33  codegen_->RecordSafepoint(pointers_, deopt_mode_);
34  }
35 
36  private:
38  LPointerMap* pointers_;
39  Safepoint::DeoptMode deopt_mode_;
40 };
41 
42 
43 #define __ masm()->
44 
46  LPhase phase("Z_Code generation", chunk());
47  DCHECK(is_unused());
48  status_ = GENERATING;
49 
50  // Open a frame scope to indicate that there is a frame on the stack. The
51  // NONE indicates that the scope shouldn't actually generate code to set up
52  // the frame (that is done in GeneratePrologue).
53  FrameScope frame_scope(masm_, StackFrame::NONE);
54 
55  return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
57 }
58 
59 
61  DCHECK(is_done());
62  code->set_stack_slots(GetStackSlotCount());
63  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
64  if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
66 }
67 
68 
70  DCHECK(info()->saves_caller_doubles());
72  Comment(";;; Save clobbered callee double registers");
73  int count = 0;
74  BitVector* doubles = chunk()->allocated_double_registers();
75  BitVector::Iterator save_iterator(doubles);
76  while (!save_iterator.Done()) {
77  __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
78  MemOperand(sp, count * kDoubleSize));
79  save_iterator.Advance();
80  count++;
81  }
82 }
83 
84 
86  DCHECK(info()->saves_caller_doubles());
88  Comment(";;; Restore clobbered callee double registers");
89  BitVector* doubles = chunk()->allocated_double_registers();
90  BitVector::Iterator save_iterator(doubles);
91  int count = 0;
92  while (!save_iterator.Done()) {
93  __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
94  MemOperand(sp, count * kDoubleSize));
95  save_iterator.Advance();
96  count++;
97  }
98 }
99 
100 
102  DCHECK(is_generating());
103 
104  if (info()->IsOptimizing()) {
106 
107 #ifdef DEBUG
108  if (strlen(FLAG_stop_at) > 0 &&
109  info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
110  __ stop("stop_at");
111  }
112 #endif
113 
114  // r1: Callee's JS function.
115  // cp: Callee's context.
116  // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
117  // fp: Caller's frame pointer.
118  // lr: Caller's pc.
119 
120  // Sloppy mode functions and builtins need to replace the receiver with the
121  // global proxy when called as functions (without an explicit receiver
122  // object).
123  if (info_->this_has_uses() &&
124  info_->strict_mode() == SLOPPY &&
125  !info_->is_native()) {
126  Label ok;
127  int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
128  __ ldr(r2, MemOperand(sp, receiver_offset));
129  __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
130  __ b(ne, &ok);
131 
132  __ ldr(r2, GlobalObjectOperand());
134 
135  __ str(r2, MemOperand(sp, receiver_offset));
136 
137  __ bind(&ok);
138  }
139  }
140 
141  info()->set_prologue_offset(masm_->pc_offset());
142  if (NeedsEagerFrame()) {
143  if (info()->IsStub()) {
144  __ StubPrologue();
145  } else {
146  __ Prologue(info()->IsCodePreAgingActive());
147  }
148  frame_is_built_ = true;
149  info_->AddNoFrameRange(0, masm_->pc_offset());
150  }
151 
152  // Reserve space for the stack slots needed by the code.
153  int slots = GetStackSlotCount();
154  if (slots > 0) {
155  if (FLAG_debug_code) {
156  __ sub(sp, sp, Operand(slots * kPointerSize));
157  __ push(r0);
158  __ push(r1);
159  __ add(r0, sp, Operand(slots * kPointerSize));
160  __ mov(r1, Operand(kSlotsZapValue));
161  Label loop;
162  __ bind(&loop);
163  __ sub(r0, r0, Operand(kPointerSize));
164  __ str(r1, MemOperand(r0, 2 * kPointerSize));
165  __ cmp(r0, sp);
166  __ b(ne, &loop);
167  __ pop(r1);
168  __ pop(r0);
169  } else {
170  __ sub(sp, sp, Operand(slots * kPointerSize));
171  }
172  }
173 
174  if (info()->saves_caller_doubles()) {
176  }
177 
178  // Possibly allocate a local context.
179  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
180  if (heap_slots > 0) {
181  Comment(";;; Allocate local context");
182  bool need_write_barrier = true;
183  // Argument to NewContext is the function, which is in r1.
184  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
185  FastNewContextStub stub(isolate(), heap_slots);
186  __ CallStub(&stub);
187  // Result of FastNewContextStub is always in new space.
188  need_write_barrier = false;
189  } else {
190  __ push(r1);
191  __ CallRuntime(Runtime::kNewFunctionContext, 1);
192  }
193  RecordSafepoint(Safepoint::kNoLazyDeopt);
194  // Context is returned in both r0 and cp. It replaces the context
195  // passed to us. It's saved in the stack and kept live in cp.
196  __ mov(cp, r0);
198  // Copy any necessary parameters into the context.
199  int num_parameters = scope()->num_parameters();
200  for (int i = 0; i < num_parameters; i++) {
201  Variable* var = scope()->parameter(i);
202  if (var->IsContextSlot()) {
203  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
204  (num_parameters - 1 - i) * kPointerSize;
205  // Load parameter from stack.
206  __ ldr(r0, MemOperand(fp, parameter_offset));
207  // Store it in the context.
208  MemOperand target = ContextOperand(cp, var->index());
209  __ str(r0, target);
210  // Update the write barrier. This clobbers r3 and r0.
211  if (need_write_barrier) {
212  __ RecordWriteContextSlot(
213  cp,
214  target.offset(),
215  r0,
216  r3,
218  kSaveFPRegs);
219  } else if (FLAG_debug_code) {
220  Label done;
221  __ JumpIfInNewSpace(cp, r0, &done);
222  __ Abort(kExpectedNewSpaceObject);
223  __ bind(&done);
224  }
225  }
226  }
227  Comment(";;; End allocate local context");
228  }
229 
230  // Trace the call.
231  if (FLAG_trace && info()->IsOptimizing()) {
232  // We have not executed any compiled code yet, so cp still holds the
233  // incoming context.
234  __ CallRuntime(Runtime::kTraceEnter, 0);
235  }
236  return !is_aborted();
237 }
238 
239 
241  // Generate the OSR entry prologue at the first unknown OSR value, or if there
242  // are none, at the OSR entrypoint instruction.
243  if (osr_pc_offset_ >= 0) return;
244 
245  osr_pc_offset_ = masm()->pc_offset();
246 
247  // Adjust the frame size, subsuming the unoptimized frame into the
248  // optimized frame.
249  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
250  DCHECK(slots >= 0);
251  __ sub(sp, sp, Operand(slots * kPointerSize));
252 }
253 
254 
256  if (instr->IsCall()) {
258  }
259  if (!instr->IsLazyBailout() && !instr->IsGap()) {
260  safepoints_.BumpLastLazySafepointIndex();
261  }
262 }
263 
264 
266  DCHECK(is_generating());
267  if (deferred_.length() > 0) {
268  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
269  LDeferredCode* code = deferred_[i];
270 
271  HValue* value =
272  instructions_->at(code->instruction_index())->hydrogen_value();
274  chunk()->graph()->SourcePositionToScriptPosition(value->position()));
275 
276  Comment(";;; <@%d,#%d> "
277  "-------------------- Deferred %s --------------------",
278  code->instruction_index(),
279  code->instr()->hydrogen_value()->id(),
280  code->instr()->Mnemonic());
281  __ bind(code->entry());
282  if (NeedsDeferredFrame()) {
283  Comment(";;; Build frame");
285  DCHECK(info()->IsStub());
286  frame_is_built_ = true;
287  __ PushFixedFrame();
289  __ push(scratch0());
291  Comment(";;; Deferred code");
292  }
293  code->Generate();
294  if (NeedsDeferredFrame()) {
295  Comment(";;; Destroy frame");
297  __ pop(ip);
298  __ PopFixedFrame();
299  frame_is_built_ = false;
300  }
301  __ jmp(code->exit());
302  }
303  }
304 
305  // Force constant pool emission at the end of the deferred code to make
306  // sure that no constant pools are emitted after.
307  masm()->CheckConstPool(true, false);
308 
309  return !is_aborted();
310 }
311 
312 
314  // Check that the jump table is accessible from everywhere in the function
315  // code, i.e. that offsets to the table can be encoded in the 24bit signed
316  // immediate of a branch instruction.
317  // To simplify we consider the code size from the first instruction to the
318  // end of the jump table. We also don't consider the pc load delta.
319  // Each entry in the jump table generates one instruction and inlines one
320  // 32bit data after it.
321  if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
322  jump_table_.length() * 7)) {
323  Abort(kGeneratedCodeIsTooLarge);
324  }
325 
326  if (jump_table_.length() > 0) {
327  Label needs_frame, call_deopt_entry;
328 
329  Comment(";;; -------------------- Jump table --------------------");
330  Address base = jump_table_[0].address;
331 
332  Register entry_offset = scratch0();
333 
334  int length = jump_table_.length();
335  for (int i = 0; i < length; i++) {
336  Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
337  __ bind(&table_entry->label);
338 
339  DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
340  Address entry = table_entry->address;
341  DeoptComment(table_entry->reason);
342 
343  // Second-level deopt table entries are contiguous and small, so instead
344  // of loading the full, absolute address of each one, load an immediate
345  // offset which will be added to the base address later.
346  __ mov(entry_offset, Operand(entry - base));
347 
348  if (table_entry->needs_frame) {
349  DCHECK(!info()->saves_caller_doubles());
350  if (needs_frame.is_bound()) {
351  __ b(&needs_frame);
352  } else {
353  __ bind(&needs_frame);
354  Comment(";;; call deopt with frame");
355  __ PushFixedFrame();
356  // This variant of deopt can only be used with stubs. Since we don't
357  // have a function pointer to install in the stack frame that we're
358  // building, install a special marker there instead.
359  DCHECK(info()->IsStub());
361  __ push(ip);
362  __ add(fp, sp,
364  __ bind(&call_deopt_entry);
365  // Add the base address to the offset previously loaded in
366  // entry_offset.
367  __ add(entry_offset, entry_offset,
368  Operand(ExternalReference::ForDeoptEntry(base)));
369  __ blx(entry_offset);
370  }
371 
372  masm()->CheckConstPool(false, false);
373  } else {
374  // The last entry can fall through into `call_deopt_entry`, avoiding a
375  // branch.
376  bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
377 
378  if (need_branch) __ b(&call_deopt_entry);
379 
380  masm()->CheckConstPool(false, !need_branch);
381  }
382  }
383 
384  if (!call_deopt_entry.is_bound()) {
385  Comment(";;; call deopt");
386  __ bind(&call_deopt_entry);
387 
388  if (info()->saves_caller_doubles()) {
389  DCHECK(info()->IsStub());
391  }
392 
393  // Add the base address to the offset previously loaded in entry_offset.
394  __ add(entry_offset, entry_offset,
395  Operand(ExternalReference::ForDeoptEntry(base)));
396  __ blx(entry_offset);
397  }
398  }
399 
400  // Force constant pool emission at the end of the deopt jump table to make
401  // sure that no constant pools are emitted after.
402  masm()->CheckConstPool(true, false);
403 
404  // The deoptimization jump table is the last part of the instruction
405  // sequence. Mark the generated code as done unless we bailed out.
406  if (!is_aborted()) status_ = DONE;
407  return !is_aborted();
408 }
409 
410 
412  DCHECK(is_done());
413  safepoints_.Emit(masm(), GetStackSlotCount());
414  return !is_aborted();
415 }
416 
417 
418 Register LCodeGen::ToRegister(int index) const {
419  return Register::FromAllocationIndex(index);
420 }
421 
422 
425 }
426 
427 
429  DCHECK(op->IsRegister());
430  return ToRegister(op->index());
431 }
432 
433 
435  if (op->IsRegister()) {
436  return ToRegister(op->index());
437  } else if (op->IsConstantOperand()) {
438  LConstantOperand* const_op = LConstantOperand::cast(op);
439  HConstant* constant = chunk_->LookupConstant(const_op);
440  Handle<Object> literal = constant->handle(isolate());
441  Representation r = chunk_->LookupLiteralRepresentation(const_op);
442  if (r.IsInteger32()) {
443  DCHECK(literal->IsNumber());
444  __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
445  } else if (r.IsDouble()) {
446  Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
447  } else {
448  DCHECK(r.IsSmiOrTagged());
449  __ Move(scratch, literal);
450  }
451  return scratch;
452  } else if (op->IsStackSlot()) {
453  __ ldr(scratch, ToMemOperand(op));
454  return scratch;
455  }
456  UNREACHABLE();
457  return scratch;
458 }
459 
460 
462  DCHECK(op->IsDoubleRegister());
463  return ToDoubleRegister(op->index());
464 }
465 
466 
468  SwVfpRegister flt_scratch,
469  DwVfpRegister dbl_scratch) {
470  if (op->IsDoubleRegister()) {
471  return ToDoubleRegister(op->index());
472  } else if (op->IsConstantOperand()) {
473  LConstantOperand* const_op = LConstantOperand::cast(op);
474  HConstant* constant = chunk_->LookupConstant(const_op);
475  Handle<Object> literal = constant->handle(isolate());
476  Representation r = chunk_->LookupLiteralRepresentation(const_op);
477  if (r.IsInteger32()) {
478  DCHECK(literal->IsNumber());
479  __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
480  __ vmov(flt_scratch, ip);
481  __ vcvt_f64_s32(dbl_scratch, flt_scratch);
482  return dbl_scratch;
483  } else if (r.IsDouble()) {
484  Abort(kUnsupportedDoubleImmediate);
485  } else if (r.IsTagged()) {
486  Abort(kUnsupportedTaggedImmediate);
487  }
488  } else if (op->IsStackSlot()) {
489  // TODO(regis): Why is vldr not taking a MemOperand?
490  // __ vldr(dbl_scratch, ToMemOperand(op));
491  MemOperand mem_op = ToMemOperand(op);
492  __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
493  return dbl_scratch;
494  }
495  UNREACHABLE();
496  return dbl_scratch;
497 }
498 
499 
500 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
501  HConstant* constant = chunk_->LookupConstant(op);
502  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
503  return constant->handle(isolate());
504 }
505 
506 
507 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
508  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
509 }
510 
511 
512 bool LCodeGen::IsSmi(LConstantOperand* op) const {
513  return chunk_->LookupLiteralRepresentation(op).IsSmi();
514 }
515 
516 
517 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
519 }
520 
521 
522 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
523  const Representation& r) const {
524  HConstant* constant = chunk_->LookupConstant(op);
525  int32_t value = constant->Integer32Value();
526  if (r.IsInteger32()) return value;
527  DCHECK(r.IsSmiOrTagged());
528  return reinterpret_cast<int32_t>(Smi::FromInt(value));
529 }
530 
531 
532 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
533  HConstant* constant = chunk_->LookupConstant(op);
534  return Smi::FromInt(constant->Integer32Value());
535 }
536 
537 
538 double LCodeGen::ToDouble(LConstantOperand* op) const {
539  HConstant* constant = chunk_->LookupConstant(op);
540  DCHECK(constant->HasDoubleValue());
541  return constant->DoubleValue();
542 }
543 
544 
546  if (op->IsConstantOperand()) {
547  LConstantOperand* const_op = LConstantOperand::cast(op);
548  HConstant* constant = chunk()->LookupConstant(const_op);
549  Representation r = chunk_->LookupLiteralRepresentation(const_op);
550  if (r.IsSmi()) {
551  DCHECK(constant->HasSmiValue());
552  return Operand(Smi::FromInt(constant->Integer32Value()));
553  } else if (r.IsInteger32()) {
554  DCHECK(constant->HasInteger32Value());
555  return Operand(constant->Integer32Value());
556  } else if (r.IsDouble()) {
557  Abort(kToOperandUnsupportedDoubleImmediate);
558  }
559  DCHECK(r.IsTagged());
560  return Operand(constant->handle(isolate()));
561  } else if (op->IsRegister()) {
562  return Operand(ToRegister(op));
563  } else if (op->IsDoubleRegister()) {
564  Abort(kToOperandIsDoubleRegisterUnimplemented);
565  return Operand::Zero();
566  }
567  // Stack slots not implemented, use ToMemOperand instead.
568  UNREACHABLE();
569  return Operand::Zero();
570 }
571 
572 
573 static int ArgumentsOffsetWithoutFrame(int index) {
574  DCHECK(index < 0);
575  return -(index + 1) * kPointerSize;
576 }
577 
578 
580  DCHECK(!op->IsRegister());
581  DCHECK(!op->IsDoubleRegister());
582  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
583  if (NeedsEagerFrame()) {
584  return MemOperand(fp, StackSlotOffset(op->index()));
585  } else {
586  // Retrieve parameter without eager stack-frame relative to the
587  // stack-pointer.
589  }
590 }
591 
592 
594  DCHECK(op->IsDoubleStackSlot());
595  if (NeedsEagerFrame()) {
596  return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
597  } else {
598  // Retrieve parameter without eager stack-frame relative to the
599  // stack-pointer.
600  return MemOperand(
602  }
603 }
604 
605 
606 void LCodeGen::WriteTranslation(LEnvironment* environment,
607  Translation* translation) {
608  if (environment == NULL) return;
609 
610  // The translation includes one command per value in the environment.
611  int translation_size = environment->translation_size();
612  // The output frame height does not include the parameters.
613  int height = translation_size - environment->parameter_count();
614 
615  WriteTranslation(environment->outer(), translation);
616  bool has_closure_id = !info()->closure().is_null() &&
617  !info()->closure().is_identical_to(environment->closure());
618  int closure_id = has_closure_id
619  ? DefineDeoptimizationLiteral(environment->closure())
620  : Translation::kSelfLiteralId;
621 
622  switch (environment->frame_type()) {
623  case JS_FUNCTION:
624  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
625  break;
626  case JS_CONSTRUCT:
627  translation->BeginConstructStubFrame(closure_id, translation_size);
628  break;
629  case JS_GETTER:
630  DCHECK(translation_size == 1);
631  DCHECK(height == 0);
632  translation->BeginGetterStubFrame(closure_id);
633  break;
634  case JS_SETTER:
635  DCHECK(translation_size == 2);
636  DCHECK(height == 0);
637  translation->BeginSetterStubFrame(closure_id);
638  break;
639  case STUB:
640  translation->BeginCompiledStubFrame();
641  break;
642  case ARGUMENTS_ADAPTOR:
643  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
644  break;
645  }
646 
647  int object_index = 0;
648  int dematerialized_index = 0;
649  for (int i = 0; i < translation_size; ++i) {
650  LOperand* value = environment->values()->at(i);
651  AddToTranslation(environment,
652  translation,
653  value,
654  environment->HasTaggedValueAt(i),
655  environment->HasUint32ValueAt(i),
656  &object_index,
657  &dematerialized_index);
658  }
659 }
660 
661 
662 void LCodeGen::AddToTranslation(LEnvironment* environment,
663  Translation* translation,
664  LOperand* op,
665  bool is_tagged,
666  bool is_uint32,
667  int* object_index_pointer,
668  int* dematerialized_index_pointer) {
669  if (op == LEnvironment::materialization_marker()) {
670  int object_index = (*object_index_pointer)++;
671  if (environment->ObjectIsDuplicateAt(object_index)) {
672  int dupe_of = environment->ObjectDuplicateOfAt(object_index);
673  translation->DuplicateObject(dupe_of);
674  return;
675  }
676  int object_length = environment->ObjectLengthAt(object_index);
677  if (environment->ObjectIsArgumentsAt(object_index)) {
678  translation->BeginArgumentsObject(object_length);
679  } else {
680  translation->BeginCapturedObject(object_length);
681  }
682  int dematerialized_index = *dematerialized_index_pointer;
683  int env_offset = environment->translation_size() + dematerialized_index;
684  *dematerialized_index_pointer += object_length;
685  for (int i = 0; i < object_length; ++i) {
686  LOperand* value = environment->values()->at(env_offset + i);
687  AddToTranslation(environment,
688  translation,
689  value,
690  environment->HasTaggedValueAt(env_offset + i),
691  environment->HasUint32ValueAt(env_offset + i),
692  object_index_pointer,
693  dematerialized_index_pointer);
694  }
695  return;
696  }
697 
698  if (op->IsStackSlot()) {
699  if (is_tagged) {
700  translation->StoreStackSlot(op->index());
701  } else if (is_uint32) {
702  translation->StoreUint32StackSlot(op->index());
703  } else {
704  translation->StoreInt32StackSlot(op->index());
705  }
706  } else if (op->IsDoubleStackSlot()) {
707  translation->StoreDoubleStackSlot(op->index());
708  } else if (op->IsRegister()) {
709  Register reg = ToRegister(op);
710  if (is_tagged) {
711  translation->StoreRegister(reg);
712  } else if (is_uint32) {
713  translation->StoreUint32Register(reg);
714  } else {
715  translation->StoreInt32Register(reg);
716  }
717  } else if (op->IsDoubleRegister()) {
719  translation->StoreDoubleRegister(reg);
720  } else if (op->IsConstantOperand()) {
721  HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
722  int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
723  translation->StoreLiteral(src_index);
724  } else {
725  UNREACHABLE();
726  }
727 }
728 
729 
731  int size = masm()->CallSize(code, mode);
732  if (code->kind() == Code::BINARY_OP_IC ||
733  code->kind() == Code::COMPARE_IC) {
734  size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric.
735  }
736  return size;
737 }
738 
739 
742  LInstruction* instr,
743  TargetAddressStorageMode storage_mode) {
744  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
745 }
746 
747 
750  LInstruction* instr,
751  SafepointMode safepoint_mode,
752  TargetAddressStorageMode storage_mode) {
753  DCHECK(instr != NULL);
754  // Block literal pool emission to ensure nop indicating no inlined smi code
755  // is in the correct position.
756  Assembler::BlockConstPoolScope block_const_pool(masm());
757  __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
758  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
759 
760  // Signal that we don't inline smi code before these stubs in the
761  // optimizing code generator.
762  if (code->kind() == Code::BINARY_OP_IC ||
763  code->kind() == Code::COMPARE_IC) {
764  __ nop();
765  }
766 }
767 
768 
770  int num_arguments,
771  LInstruction* instr,
772  SaveFPRegsMode save_doubles) {
773  DCHECK(instr != NULL);
774 
775  __ CallRuntime(function, num_arguments, save_doubles);
776 
778 }
779 
780 
782  if (context->IsRegister()) {
783  __ Move(cp, ToRegister(context));
784  } else if (context->IsStackSlot()) {
785  __ ldr(cp, ToMemOperand(context));
786  } else if (context->IsConstantOperand()) {
787  HConstant* constant =
788  chunk_->LookupConstant(LConstantOperand::cast(context));
789  __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
790  } else {
791  UNREACHABLE();
792  }
793 }
794 
795 
797  int argc,
798  LInstruction* instr,
799  LOperand* context) {
800  LoadContextFromDeferred(context);
801  __ CallRuntimeSaveDoubles(id);
803  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
804 }
805 
806 
807 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
808  Safepoint::DeoptMode mode) {
809  environment->set_has_been_used();
810  if (!environment->HasBeenRegistered()) {
811  // Physical stack frame layout:
812  // -x ............. -4 0 ..................................... y
813  // [incoming arguments] [spill slots] [pushed outgoing arguments]
814 
815  // Layout of the environment:
816  // 0 ..................................................... size-1
817  // [parameters] [locals] [expression stack including arguments]
818 
819  // Layout of the translation:
820  // 0 ........................................................ size - 1 + 4
821  // [expression stack including arguments] [locals] [4 words] [parameters]
822  // |>------------ translation_size ------------<|
823 
824  int frame_count = 0;
825  int jsframe_count = 0;
826  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
827  ++frame_count;
828  if (e->frame_type() == JS_FUNCTION) {
829  ++jsframe_count;
830  }
831  }
832  Translation translation(&translations_, frame_count, jsframe_count, zone());
833  WriteTranslation(environment, &translation);
834  int deoptimization_index = deoptimizations_.length();
835  int pc_offset = masm()->pc_offset();
836  environment->Register(deoptimization_index,
837  translation.index(),
838  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
839  deoptimizations_.Add(environment, zone());
840  }
841 }
842 
843 
845  const char* detail,
846  Deoptimizer::BailoutType bailout_type) {
847  LEnvironment* environment = instr->environment();
848  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
849  DCHECK(environment->HasBeenRegistered());
850  int id = environment->deoptimization_index();
851  DCHECK(info()->IsOptimizing() || info()->IsStub());
852  Address entry =
853  Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
854  if (entry == NULL) {
855  Abort(kBailoutWasNotPrepared);
856  return;
857  }
858 
859  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
860  Register scratch = scratch0();
861  ExternalReference count = ExternalReference::stress_deopt_count(isolate());
862 
863  // Store the condition on the stack if necessary
864  if (condition != al) {
865  __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition));
866  __ mov(scratch, Operand(1), LeaveCC, condition);
867  __ push(scratch);
868  }
869 
870  __ push(r1);
871  __ mov(scratch, Operand(count));
872  __ ldr(r1, MemOperand(scratch));
873  __ sub(r1, r1, Operand(1), SetCC);
874  __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq);
875  __ str(r1, MemOperand(scratch));
876  __ pop(r1);
877 
878  if (condition != al) {
879  // Clean up the stack before the deoptimizer call
880  __ pop(scratch);
881  }
882 
883  __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
884 
885  // 'Restore' the condition in a slightly hacky way. (It would be better
886  // to use 'msr' and 'mrs' instructions here, but they are not supported by
887  // our ARM simulator).
888  if (condition != al) {
889  condition = ne;
890  __ cmp(scratch, Operand::Zero());
891  }
892  }
893 
894  if (info()->ShouldTrapOnDeopt()) {
895  __ stop("trap_on_deopt", condition);
896  }
897 
898  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
899  instr->Mnemonic(), detail);
900  DCHECK(info()->IsStub() || frame_is_built_);
901  // Go through jump table if we need to handle condition, build frame, or
902  // restore caller doubles.
903  if (condition == al && frame_is_built_ &&
904  !info()->saves_caller_doubles()) {
905  DeoptComment(reason);
906  __ Call(entry, RelocInfo::RUNTIME_ENTRY);
907  } else {
908  Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
909  !frame_is_built_);
910  // We often have several deopts to the same entry, reuse the last
911  // jump entry if this is the case.
912  if (jump_table_.is_empty() ||
913  !table_entry.IsEquivalentTo(jump_table_.last())) {
914  jump_table_.Add(table_entry, zone());
915  }
916  __ b(condition, &jump_table_.last().label);
917  }
918 }
919 
920 
922  const char* detail) {
923  Deoptimizer::BailoutType bailout_type = info()->IsStub()
926  DeoptimizeIf(condition, instr, detail, bailout_type);
927 }
928 
929 
931  int length = deoptimizations_.length();
932  if (length == 0) return;
934  DeoptimizationInputData::New(isolate(), length, TENURED);
935 
936  Handle<ByteArray> translations =
937  translations_.CreateByteArray(isolate()->factory());
938  data->SetTranslationByteArray(*translations);
939  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
940  data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
941  if (info_->IsOptimizing()) {
942  // Reference to shared function info does not change between phases.
943  AllowDeferredHandleDereference allow_handle_dereference;
944  data->SetSharedFunctionInfo(*info_->shared_info());
945  } else {
946  data->SetSharedFunctionInfo(Smi::FromInt(0));
947  }
948 
950  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
951  { AllowDeferredHandleDereference copy_handles;
952  for (int i = 0; i < deoptimization_literals_.length(); i++) {
954  }
955  data->SetLiteralArray(*literals);
956  }
957 
958  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
959  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
960 
961  // Populate the deoptimization entries.
962  for (int i = 0; i < length; i++) {
964  data->SetAstId(i, env->ast_id());
965  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
966  data->SetArgumentsStackHeight(i,
967  Smi::FromInt(env->arguments_stack_height()));
968  data->SetPc(i, Smi::FromInt(env->pc_offset()));
969  }
970  code->set_deoptimization_data(*data);
971 }
972 
973 
975  int result = deoptimization_literals_.length();
976  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
977  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
978  }
979  deoptimization_literals_.Add(literal, zone());
980  return result;
981 }
982 
983 
985  DCHECK(deoptimization_literals_.length() == 0);
986 
987  const ZoneList<Handle<JSFunction> >* inlined_closures =
988  chunk()->inlined_closures();
989 
990  for (int i = 0, length = inlined_closures->length();
991  i < length;
992  i++) {
993  DefineDeoptimizationLiteral(inlined_closures->at(i));
994  }
995 
997 }
998 
999 
1001  LInstruction* instr, SafepointMode safepoint_mode) {
1002  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
1003  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
1004  } else {
1007  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
1008  }
1009 }
1010 
1011 
1013  LPointerMap* pointers,
1014  Safepoint::Kind kind,
1015  int arguments,
1016  Safepoint::DeoptMode deopt_mode) {
1018 
1019  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1020  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
1021  kind, arguments, deopt_mode);
1022  for (int i = 0; i < operands->length(); i++) {
1023  LOperand* pointer = operands->at(i);
1024  if (pointer->IsStackSlot()) {
1025  safepoint.DefinePointerSlot(pointer->index(), zone());
1026  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1027  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1028  }
1029  }
1030  if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) {
1031  // Register pp always contains a pointer to the constant pool.
1032  safepoint.DefinePointerRegister(pp, zone());
1033  }
1034 }
1035 
1036 
1037 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1038  Safepoint::DeoptMode deopt_mode) {
1039  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
1040 }
1041 
1042 
1043 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
1044  LPointerMap empty_pointers(zone());
1045  RecordSafepoint(&empty_pointers, deopt_mode);
1046 }
1047 
1048 
1049 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1050  int arguments,
1051  Safepoint::DeoptMode deopt_mode) {
1053  pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
1054 }
1055 
1056 
1058  if (position == RelocInfo::kNoPosition) return;
1059  masm()->positions_recorder()->RecordPosition(position);
1060  masm()->positions_recorder()->WriteRecordedPositions();
1061 }
1062 
1063 
1064 static const char* LabelType(LLabel* label) {
1065  if (label->is_loop_header()) return " (loop header)";
1066  if (label->is_osr_entry()) return " (OSR entry)";
1067  return "";
1068 }
1069 
1070 
1071 void LCodeGen::DoLabel(LLabel* label) {
1072  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1073  current_instruction_,
1074  label->hydrogen_value()->id(),
1075  label->block_id(),
1076  LabelType(label));
1077  __ bind(label->label());
1078  current_block_ = label->block_id();
1079  DoGap(label);
1080 }
1081 
1082 
1083 void LCodeGen::DoParallelMove(LParallelMove* move) {
1084  resolver_.Resolve(move);
1085 }
1086 
1087 
1088 void LCodeGen::DoGap(LGap* gap) {
1089  for (int i = LGap::FIRST_INNER_POSITION;
1091  i++) {
1092  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1093  LParallelMove* move = gap->GetParallelMove(inner_pos);
1094  if (move != NULL) DoParallelMove(move);
1095  }
1096 }
1097 
1098 
1099 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1100  DoGap(instr);
1101 }
1102 
1103 
1104 void LCodeGen::DoParameter(LParameter* instr) {
1105  // Nothing to do.
1106 }
1107 
1108 
1109 void LCodeGen::DoCallStub(LCallStub* instr) {
1110  DCHECK(ToRegister(instr->context()).is(cp));
1111  DCHECK(ToRegister(instr->result()).is(r0));
1112  switch (instr->hydrogen()->major_key()) {
1113  case CodeStub::RegExpExec: {
1114  RegExpExecStub stub(isolate());
1115  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1116  break;
1117  }
1118  case CodeStub::SubString: {
1119  SubStringStub stub(isolate());
1120  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1121  break;
1122  }
1123  case CodeStub::StringCompare: {
1124  StringCompareStub stub(isolate());
1125  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1126  break;
1127  }
1128  default:
1129  UNREACHABLE();
1130  }
1131 }
1132 
1133 
1134 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1136 }
1137 
1138 
1139 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1140  Register dividend = ToRegister(instr->dividend());
1141  int32_t divisor = instr->divisor();
1142  DCHECK(dividend.is(ToRegister(instr->result())));
1143 
1144  // Theoretically, a variation of the branch-free code for integer division by
1145  // a power of 2 (calculating the remainder via an additional multiplication
1146  // (which gets simplified to an 'and') and subtraction) should be faster, and
1147  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1148  // indicate that positive dividends are heavily favored, so the branching
1149  // version performs better.
1150  HMod* hmod = instr->hydrogen();
1151  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1152  Label dividend_is_not_negative, done;
1153  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1154  __ cmp(dividend, Operand::Zero());
1155  __ b(pl, &dividend_is_not_negative);
1156  // Note that this is correct even for kMinInt operands.
1157  __ rsb(dividend, dividend, Operand::Zero());
1158  __ and_(dividend, dividend, Operand(mask));
1159  __ rsb(dividend, dividend, Operand::Zero(), SetCC);
1160  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1161  DeoptimizeIf(eq, instr, "minus zero");
1162  }
1163  __ b(&done);
1164  }
1165 
1166  __ bind(&dividend_is_not_negative);
1167  __ and_(dividend, dividend, Operand(mask));
1168  __ bind(&done);
1169 }
1170 
1171 
1172 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1173  Register dividend = ToRegister(instr->dividend());
1174  int32_t divisor = instr->divisor();
1175  Register result = ToRegister(instr->result());
1176  DCHECK(!dividend.is(result));
1177 
1178  if (divisor == 0) {
1179  DeoptimizeIf(al, instr, "division by zero");
1180  return;
1181  }
1182 
1183  __ TruncatingDiv(result, dividend, Abs(divisor));
1184  __ mov(ip, Operand(Abs(divisor)));
1185  __ smull(result, ip, result, ip);
1186  __ sub(result, dividend, result, SetCC);
1187 
1188  // Check for negative zero.
1189  HMod* hmod = instr->hydrogen();
1190  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1191  Label remainder_not_zero;
1192  __ b(ne, &remainder_not_zero);
1193  __ cmp(dividend, Operand::Zero());
1194  DeoptimizeIf(lt, instr, "minus zero");
1195  __ bind(&remainder_not_zero);
1196  }
1197 }
1198 
1199 
1200 void LCodeGen::DoModI(LModI* instr) {
1201  HMod* hmod = instr->hydrogen();
1203  CpuFeatureScope scope(masm(), SUDIV);
1204 
1205  Register left_reg = ToRegister(instr->left());
1206  Register right_reg = ToRegister(instr->right());
1207  Register result_reg = ToRegister(instr->result());
1208 
1209  Label done;
1210  // Check for x % 0, sdiv might signal an exception. We have to deopt in this
1211  // case because we can't return a NaN.
1212  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1213  __ cmp(right_reg, Operand::Zero());
1214  DeoptimizeIf(eq, instr, "division by zero");
1215  }
1216 
1217  // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
1218  // want. We have to deopt if we care about -0, because we can't return that.
1219  if (hmod->CheckFlag(HValue::kCanOverflow)) {
1220  Label no_overflow_possible;
1221  __ cmp(left_reg, Operand(kMinInt));
1222  __ b(ne, &no_overflow_possible);
1223  __ cmp(right_reg, Operand(-1));
1224  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1225  DeoptimizeIf(eq, instr, "minus zero");
1226  } else {
1227  __ b(ne, &no_overflow_possible);
1228  __ mov(result_reg, Operand::Zero());
1229  __ jmp(&done);
1230  }
1231  __ bind(&no_overflow_possible);
1232  }
1233 
1234  // For 'r3 = r1 % r2' we can have the following ARM code:
1235  // sdiv r3, r1, r2
1236  // mls r3, r3, r2, r1
1237 
1238  __ sdiv(result_reg, left_reg, right_reg);
1239  __ Mls(result_reg, result_reg, right_reg, left_reg);
1240 
1241  // If we care about -0, test if the dividend is <0 and the result is 0.
1242  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1243  __ cmp(result_reg, Operand::Zero());
1244  __ b(ne, &done);
1245  __ cmp(left_reg, Operand::Zero());
1246  DeoptimizeIf(lt, instr, "minus zero");
1247  }
1248  __ bind(&done);
1249 
1250  } else {
1251  // General case, without any SDIV support.
1252  Register left_reg = ToRegister(instr->left());
1253  Register right_reg = ToRegister(instr->right());
1254  Register result_reg = ToRegister(instr->result());
1255  Register scratch = scratch0();
1256  DCHECK(!scratch.is(left_reg));
1257  DCHECK(!scratch.is(right_reg));
1258  DCHECK(!scratch.is(result_reg));
1259  DwVfpRegister dividend = ToDoubleRegister(instr->temp());
1260  DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
1261  DCHECK(!divisor.is(dividend));
1262  LowDwVfpRegister quotient = double_scratch0();
1263  DCHECK(!quotient.is(dividend));
1264  DCHECK(!quotient.is(divisor));
1265 
1266  Label done;
1267  // Check for x % 0, we have to deopt in this case because we can't return a
1268  // NaN.
1269  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1270  __ cmp(right_reg, Operand::Zero());
1271  DeoptimizeIf(eq, instr, "division by zero");
1272  }
1273 
1274  __ Move(result_reg, left_reg);
1275  // Load the arguments in VFP registers. The divisor value is preloaded
1276  // before. Be careful that 'right_reg' is only live on entry.
1277  // TODO(svenpanne) The last comments seems to be wrong nowadays.
1278  __ vmov(double_scratch0().low(), left_reg);
1279  __ vcvt_f64_s32(dividend, double_scratch0().low());
1280  __ vmov(double_scratch0().low(), right_reg);
1281  __ vcvt_f64_s32(divisor, double_scratch0().low());
1282 
1283  // We do not care about the sign of the divisor. Note that we still handle
1284  // the kMinInt % -1 case correctly, though.
1285  __ vabs(divisor, divisor);
1286  // Compute the quotient and round it to a 32bit integer.
1287  __ vdiv(quotient, dividend, divisor);
1288  __ vcvt_s32_f64(quotient.low(), quotient);
1289  __ vcvt_f64_s32(quotient, quotient.low());
1290 
1291  // Compute the remainder in result.
1292  __ vmul(double_scratch0(), divisor, quotient);
1293  __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
1294  __ vmov(scratch, double_scratch0().low());
1295  __ sub(result_reg, left_reg, scratch, SetCC);
1296 
1297  // If we care about -0, test if the dividend is <0 and the result is 0.
1298  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1299  __ b(ne, &done);
1300  __ cmp(left_reg, Operand::Zero());
1301  DeoptimizeIf(mi, instr, "minus zero");
1302  }
1303  __ bind(&done);
1304  }
1305 }
1306 
1307 
1308 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1309  Register dividend = ToRegister(instr->dividend());
1310  int32_t divisor = instr->divisor();
1311  Register result = ToRegister(instr->result());
1312  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1313  DCHECK(!result.is(dividend));
1314 
1315  // Check for (0 / -x) that will produce negative zero.
1316  HDiv* hdiv = instr->hydrogen();
1317  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1318  __ cmp(dividend, Operand::Zero());
1319  DeoptimizeIf(eq, instr, "minus zero");
1320  }
1321  // Check for (kMinInt / -1).
1322  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1323  __ cmp(dividend, Operand(kMinInt));
1324  DeoptimizeIf(eq, instr, "overflow");
1325  }
1326  // Deoptimize if remainder will not be 0.
1327  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1328  divisor != 1 && divisor != -1) {
1329  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1330  __ tst(dividend, Operand(mask));
1331  DeoptimizeIf(ne, instr, "lost precision");
1332  }
1333 
1334  if (divisor == -1) { // Nice shortcut, not needed for correctness.
1335  __ rsb(result, dividend, Operand(0));
1336  return;
1337  }
1338  int32_t shift = WhichPowerOf2Abs(divisor);
1339  if (shift == 0) {
1340  __ mov(result, dividend);
1341  } else if (shift == 1) {
1342  __ add(result, dividend, Operand(dividend, LSR, 31));
1343  } else {
1344  __ mov(result, Operand(dividend, ASR, 31));
1345  __ add(result, dividend, Operand(result, LSR, 32 - shift));
1346  }
1347  if (shift > 0) __ mov(result, Operand(result, ASR, shift));
1348  if (divisor < 0) __ rsb(result, result, Operand(0));
1349 }
1350 
1351 
1352 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1353  Register dividend = ToRegister(instr->dividend());
1354  int32_t divisor = instr->divisor();
1355  Register result = ToRegister(instr->result());
1356  DCHECK(!dividend.is(result));
1357 
1358  if (divisor == 0) {
1359  DeoptimizeIf(al, instr, "division by zero");
1360  return;
1361  }
1362 
1363  // Check for (0 / -x) that will produce negative zero.
1364  HDiv* hdiv = instr->hydrogen();
1365  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1366  __ cmp(dividend, Operand::Zero());
1367  DeoptimizeIf(eq, instr, "minus zero");
1368  }
1369 
1370  __ TruncatingDiv(result, dividend, Abs(divisor));
1371  if (divisor < 0) __ rsb(result, result, Operand::Zero());
1372 
1373  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1374  __ mov(ip, Operand(divisor));
1375  __ smull(scratch0(), ip, result, ip);
1376  __ sub(scratch0(), scratch0(), dividend, SetCC);
1377  DeoptimizeIf(ne, instr, "lost precision");
1378  }
1379 }
1380 
1381 
1382 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1383 void LCodeGen::DoDivI(LDivI* instr) {
1384  HBinaryOperation* hdiv = instr->hydrogen();
1385  Register dividend = ToRegister(instr->dividend());
1386  Register divisor = ToRegister(instr->divisor());
1387  Register result = ToRegister(instr->result());
1388 
1389  // Check for x / 0.
1390  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1391  __ cmp(divisor, Operand::Zero());
1392  DeoptimizeIf(eq, instr, "division by zero");
1393  }
1394 
1395  // Check for (0 / -x) that will produce negative zero.
1396  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1397  Label positive;
1398  if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1399  // Do the test only if it hadn't be done above.
1400  __ cmp(divisor, Operand::Zero());
1401  }
1402  __ b(pl, &positive);
1403  __ cmp(dividend, Operand::Zero());
1404  DeoptimizeIf(eq, instr, "minus zero");
1405  __ bind(&positive);
1406  }
1407 
1408  // Check for (kMinInt / -1).
1409  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1411  !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1412  // We don't need to check for overflow when truncating with sdiv
1413  // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1414  __ cmp(dividend, Operand(kMinInt));
1415  __ cmp(divisor, Operand(-1), eq);
1416  DeoptimizeIf(eq, instr, "overflow");
1417  }
1418 
1420  CpuFeatureScope scope(masm(), SUDIV);
1421  __ sdiv(result, dividend, divisor);
1422  } else {
1423  DoubleRegister vleft = ToDoubleRegister(instr->temp());
1424  DoubleRegister vright = double_scratch0();
1425  __ vmov(double_scratch0().low(), dividend);
1426  __ vcvt_f64_s32(vleft, double_scratch0().low());
1427  __ vmov(double_scratch0().low(), divisor);
1428  __ vcvt_f64_s32(vright, double_scratch0().low());
1429  __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1430  __ vcvt_s32_f64(double_scratch0().low(), vleft);
1431  __ vmov(result, double_scratch0().low());
1432  }
1433 
1434  if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1435  // Compute remainder and deopt if it's not zero.
1436  Register remainder = scratch0();
1437  __ Mls(remainder, result, divisor, dividend);
1438  __ cmp(remainder, Operand::Zero());
1439  DeoptimizeIf(ne, instr, "lost precision");
1440  }
1441 }
1442 
1443 
1444 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1445  DwVfpRegister addend = ToDoubleRegister(instr->addend());
1446  DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1447  DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1448 
1449  // This is computed in-place.
1450  DCHECK(addend.is(ToDoubleRegister(instr->result())));
1451 
1452  __ vmla(addend, multiplier, multiplicand);
1453 }
1454 
1455 
1456 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1457  DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
1458  DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1459  DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1460 
1461  // This is computed in-place.
1462  DCHECK(minuend.is(ToDoubleRegister(instr->result())));
1463 
1464  __ vmls(minuend, multiplier, multiplicand);
1465 }
1466 
1467 
1468 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1469  Register dividend = ToRegister(instr->dividend());
1470  Register result = ToRegister(instr->result());
1471  int32_t divisor = instr->divisor();
1472 
1473  // If the divisor is 1, return the dividend.
1474  if (divisor == 1) {
1475  __ Move(result, dividend);
1476  return;
1477  }
1478 
1479  // If the divisor is positive, things are easy: There can be no deopts and we
1480  // can simply do an arithmetic right shift.
1481  int32_t shift = WhichPowerOf2Abs(divisor);
1482  if (divisor > 1) {
1483  __ mov(result, Operand(dividend, ASR, shift));
1484  return;
1485  }
1486 
1487  // If the divisor is negative, we have to negate and handle edge cases.
1488  __ rsb(result, dividend, Operand::Zero(), SetCC);
1489  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1490  DeoptimizeIf(eq, instr, "minus zero");
1491  }
1492 
1493  // Dividing by -1 is basically negation, unless we overflow.
1494  if (divisor == -1) {
1495  if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1496  DeoptimizeIf(vs, instr, "overflow");
1497  }
1498  return;
1499  }
1500 
1501  // If the negation could not overflow, simply shifting is OK.
1502  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1503  __ mov(result, Operand(result, ASR, shift));
1504  return;
1505  }
1506 
1507  __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
1508  __ mov(result, Operand(result, ASR, shift), LeaveCC, vc);
1509 }
1510 
1511 
1512 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1513  Register dividend = ToRegister(instr->dividend());
1514  int32_t divisor = instr->divisor();
1515  Register result = ToRegister(instr->result());
1516  DCHECK(!dividend.is(result));
1517 
1518  if (divisor == 0) {
1519  DeoptimizeIf(al, instr, "division by zero");
1520  return;
1521  }
1522 
1523  // Check for (0 / -x) that will produce negative zero.
1524  HMathFloorOfDiv* hdiv = instr->hydrogen();
1525  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1526  __ cmp(dividend, Operand::Zero());
1527  DeoptimizeIf(eq, instr, "minus zero");
1528  }
1529 
1530  // Easy case: We need no dynamic check for the dividend and the flooring
1531  // division is the same as the truncating division.
1532  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1533  (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1534  __ TruncatingDiv(result, dividend, Abs(divisor));
1535  if (divisor < 0) __ rsb(result, result, Operand::Zero());
1536  return;
1537  }
1538 
1539  // In the general case we may need to adjust before and after the truncating
1540  // division to get a flooring division.
1541  Register temp = ToRegister(instr->temp());
1542  DCHECK(!temp.is(dividend) && !temp.is(result));
1543  Label needs_adjustment, done;
1544  __ cmp(dividend, Operand::Zero());
1545  __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1546  __ TruncatingDiv(result, dividend, Abs(divisor));
1547  if (divisor < 0) __ rsb(result, result, Operand::Zero());
1548  __ jmp(&done);
1549  __ bind(&needs_adjustment);
1550  __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1551  __ TruncatingDiv(result, temp, Abs(divisor));
1552  if (divisor < 0) __ rsb(result, result, Operand::Zero());
1553  __ sub(result, result, Operand(1));
1554  __ bind(&done);
1555 }
1556 
1557 
1558 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1559 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1560  HBinaryOperation* hdiv = instr->hydrogen();
1561  Register left = ToRegister(instr->dividend());
1562  Register right = ToRegister(instr->divisor());
1563  Register result = ToRegister(instr->result());
1564 
1565  // Check for x / 0.
1566  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1567  __ cmp(right, Operand::Zero());
1568  DeoptimizeIf(eq, instr, "division by zero");
1569  }
1570 
1571  // Check for (0 / -x) that will produce negative zero.
1572  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1573  Label positive;
1574  if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1575  // Do the test only if it hadn't be done above.
1576  __ cmp(right, Operand::Zero());
1577  }
1578  __ b(pl, &positive);
1579  __ cmp(left, Operand::Zero());
1580  DeoptimizeIf(eq, instr, "minus zero");
1581  __ bind(&positive);
1582  }
1583 
1584  // Check for (kMinInt / -1).
1585  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1587  !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1588  // We don't need to check for overflow when truncating with sdiv
1589  // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1590  __ cmp(left, Operand(kMinInt));
1591  __ cmp(right, Operand(-1), eq);
1592  DeoptimizeIf(eq, instr, "overflow");
1593  }
1594 
1596  CpuFeatureScope scope(masm(), SUDIV);
1597  __ sdiv(result, left, right);
1598  } else {
1599  DoubleRegister vleft = ToDoubleRegister(instr->temp());
1600  DoubleRegister vright = double_scratch0();
1601  __ vmov(double_scratch0().low(), left);
1602  __ vcvt_f64_s32(vleft, double_scratch0().low());
1603  __ vmov(double_scratch0().low(), right);
1604  __ vcvt_f64_s32(vright, double_scratch0().low());
1605  __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1606  __ vcvt_s32_f64(double_scratch0().low(), vleft);
1607  __ vmov(result, double_scratch0().low());
1608  }
1609 
1610  Label done;
1611  Register remainder = scratch0();
1612  __ Mls(remainder, result, right, left);
1613  __ cmp(remainder, Operand::Zero());
1614  __ b(eq, &done);
1615  __ eor(remainder, remainder, Operand(right));
1616  __ add(result, result, Operand(remainder, ASR, 31));
1617  __ bind(&done);
1618 }
1619 
1620 
1621 void LCodeGen::DoMulI(LMulI* instr) {
1622  Register result = ToRegister(instr->result());
1623  // Note that result may alias left.
1624  Register left = ToRegister(instr->left());
1625  LOperand* right_op = instr->right();
1626 
1627  bool bailout_on_minus_zero =
1628  instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1629  bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1630 
1631  if (right_op->IsConstantOperand()) {
1632  int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1633 
1634  if (bailout_on_minus_zero && (constant < 0)) {
1635  // The case of a null constant will be handled separately.
1636  // If constant is negative and left is null, the result should be -0.
1637  __ cmp(left, Operand::Zero());
1638  DeoptimizeIf(eq, instr, "minus zero");
1639  }
1640 
1641  switch (constant) {
1642  case -1:
1643  if (overflow) {
1644  __ rsb(result, left, Operand::Zero(), SetCC);
1645  DeoptimizeIf(vs, instr, "overflow");
1646  } else {
1647  __ rsb(result, left, Operand::Zero());
1648  }
1649  break;
1650  case 0:
1651  if (bailout_on_minus_zero) {
1652  // If left is strictly negative and the constant is null, the
1653  // result is -0. Deoptimize if required, otherwise return 0.
1654  __ cmp(left, Operand::Zero());
1655  DeoptimizeIf(mi, instr, "minus zero");
1656  }
1657  __ mov(result, Operand::Zero());
1658  break;
1659  case 1:
1660  __ Move(result, left);
1661  break;
1662  default:
1663  // Multiplying by powers of two and powers of two plus or minus
1664  // one can be done faster with shifted operands.
1665  // For other constants we emit standard code.
1666  int32_t mask = constant >> 31;
1667  uint32_t constant_abs = (constant + mask) ^ mask;
1668 
1669  if (base::bits::IsPowerOfTwo32(constant_abs)) {
1670  int32_t shift = WhichPowerOf2(constant_abs);
1671  __ mov(result, Operand(left, LSL, shift));
1672  // Correct the sign of the result is the constant is negative.
1673  if (constant < 0) __ rsb(result, result, Operand::Zero());
1674  } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1675  int32_t shift = WhichPowerOf2(constant_abs - 1);
1676  __ add(result, left, Operand(left, LSL, shift));
1677  // Correct the sign of the result is the constant is negative.
1678  if (constant < 0) __ rsb(result, result, Operand::Zero());
1679  } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1680  int32_t shift = WhichPowerOf2(constant_abs + 1);
1681  __ rsb(result, left, Operand(left, LSL, shift));
1682  // Correct the sign of the result is the constant is negative.
1683  if (constant < 0) __ rsb(result, result, Operand::Zero());
1684  } else {
1685  // Generate standard code.
1686  __ mov(ip, Operand(constant));
1687  __ mul(result, left, ip);
1688  }
1689  }
1690 
1691  } else {
1692  DCHECK(right_op->IsRegister());
1693  Register right = ToRegister(right_op);
1694 
1695  if (overflow) {
1696  Register scratch = scratch0();
1697  // scratch:result = left * right.
1698  if (instr->hydrogen()->representation().IsSmi()) {
1699  __ SmiUntag(result, left);
1700  __ smull(result, scratch, result, right);
1701  } else {
1702  __ smull(result, scratch, left, right);
1703  }
1704  __ cmp(scratch, Operand(result, ASR, 31));
1705  DeoptimizeIf(ne, instr, "overflow");
1706  } else {
1707  if (instr->hydrogen()->representation().IsSmi()) {
1708  __ SmiUntag(result, left);
1709  __ mul(result, result, right);
1710  } else {
1711  __ mul(result, left, right);
1712  }
1713  }
1714 
1715  if (bailout_on_minus_zero) {
1716  Label done;
1717  __ teq(left, Operand(right));
1718  __ b(pl, &done);
1719  // Bail out if the result is minus zero.
1720  __ cmp(result, Operand::Zero());
1721  DeoptimizeIf(eq, instr, "minus zero");
1722  __ bind(&done);
1723  }
1724  }
1725 }
1726 
1727 
1728 void LCodeGen::DoBitI(LBitI* instr) {
1729  LOperand* left_op = instr->left();
1730  LOperand* right_op = instr->right();
1731  DCHECK(left_op->IsRegister());
1732  Register left = ToRegister(left_op);
1733  Register result = ToRegister(instr->result());
1734  Operand right(no_reg);
1735 
1736  if (right_op->IsStackSlot()) {
1737  right = Operand(EmitLoadRegister(right_op, ip));
1738  } else {
1739  DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1740  right = ToOperand(right_op);
1741  }
1742 
1743  switch (instr->op()) {
1744  case Token::BIT_AND:
1745  __ and_(result, left, right);
1746  break;
1747  case Token::BIT_OR:
1748  __ orr(result, left, right);
1749  break;
1750  case Token::BIT_XOR:
1751  if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1752  __ mvn(result, Operand(left));
1753  } else {
1754  __ eor(result, left, right);
1755  }
1756  break;
1757  default:
1758  UNREACHABLE();
1759  break;
1760  }
1761 }
1762 
1763 
1764 void LCodeGen::DoShiftI(LShiftI* instr) {
1765  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1766  // result may alias either of them.
1767  LOperand* right_op = instr->right();
1768  Register left = ToRegister(instr->left());
1769  Register result = ToRegister(instr->result());
1770  Register scratch = scratch0();
1771  if (right_op->IsRegister()) {
1772  // Mask the right_op operand.
1773  __ and_(scratch, ToRegister(right_op), Operand(0x1F));
1774  switch (instr->op()) {
1775  case Token::ROR:
1776  __ mov(result, Operand(left, ROR, scratch));
1777  break;
1778  case Token::SAR:
1779  __ mov(result, Operand(left, ASR, scratch));
1780  break;
1781  case Token::SHR:
1782  if (instr->can_deopt()) {
1783  __ mov(result, Operand(left, LSR, scratch), SetCC);
1784  DeoptimizeIf(mi, instr, "negative value");
1785  } else {
1786  __ mov(result, Operand(left, LSR, scratch));
1787  }
1788  break;
1789  case Token::SHL:
1790  __ mov(result, Operand(left, LSL, scratch));
1791  break;
1792  default:
1793  UNREACHABLE();
1794  break;
1795  }
1796  } else {
1797  // Mask the right_op operand.
1798  int value = ToInteger32(LConstantOperand::cast(right_op));
1799  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1800  switch (instr->op()) {
1801  case Token::ROR:
1802  if (shift_count != 0) {
1803  __ mov(result, Operand(left, ROR, shift_count));
1804  } else {
1805  __ Move(result, left);
1806  }
1807  break;
1808  case Token::SAR:
1809  if (shift_count != 0) {
1810  __ mov(result, Operand(left, ASR, shift_count));
1811  } else {
1812  __ Move(result, left);
1813  }
1814  break;
1815  case Token::SHR:
1816  if (shift_count != 0) {
1817  __ mov(result, Operand(left, LSR, shift_count));
1818  } else {
1819  if (instr->can_deopt()) {
1820  __ tst(left, Operand(0x80000000));
1821  DeoptimizeIf(ne, instr, "negative value");
1822  }
1823  __ Move(result, left);
1824  }
1825  break;
1826  case Token::SHL:
1827  if (shift_count != 0) {
1828  if (instr->hydrogen_value()->representation().IsSmi() &&
1829  instr->can_deopt()) {
1830  if (shift_count != 1) {
1831  __ mov(result, Operand(left, LSL, shift_count - 1));
1832  __ SmiTag(result, result, SetCC);
1833  } else {
1834  __ SmiTag(result, left, SetCC);
1835  }
1836  DeoptimizeIf(vs, instr, "overflow");
1837  } else {
1838  __ mov(result, Operand(left, LSL, shift_count));
1839  }
1840  } else {
1841  __ Move(result, left);
1842  }
1843  break;
1844  default:
1845  UNREACHABLE();
1846  break;
1847  }
1848  }
1849 }
1850 
1851 
1852 void LCodeGen::DoSubI(LSubI* instr) {
1853  LOperand* left = instr->left();
1854  LOperand* right = instr->right();
1855  LOperand* result = instr->result();
1856  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1857  SBit set_cond = can_overflow ? SetCC : LeaveCC;
1858 
1859  if (right->IsStackSlot()) {
1860  Register right_reg = EmitLoadRegister(right, ip);
1861  __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1862  } else {
1863  DCHECK(right->IsRegister() || right->IsConstantOperand());
1864  __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1865  }
1866 
1867  if (can_overflow) {
1868  DeoptimizeIf(vs, instr, "overflow");
1869  }
1870 }
1871 
1872 
1873 void LCodeGen::DoRSubI(LRSubI* instr) {
1874  LOperand* left = instr->left();
1875  LOperand* right = instr->right();
1876  LOperand* result = instr->result();
1877  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1878  SBit set_cond = can_overflow ? SetCC : LeaveCC;
1879 
1880  if (right->IsStackSlot()) {
1881  Register right_reg = EmitLoadRegister(right, ip);
1882  __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1883  } else {
1884  DCHECK(right->IsRegister() || right->IsConstantOperand());
1885  __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1886  }
1887 
1888  if (can_overflow) {
1889  DeoptimizeIf(vs, instr, "overflow");
1890  }
1891 }
1892 
1893 
1894 void LCodeGen::DoConstantI(LConstantI* instr) {
1895  __ mov(ToRegister(instr->result()), Operand(instr->value()));
1896 }
1897 
1898 
1899 void LCodeGen::DoConstantS(LConstantS* instr) {
1900  __ mov(ToRegister(instr->result()), Operand(instr->value()));
1901 }
1902 
1903 
1904 void LCodeGen::DoConstantD(LConstantD* instr) {
1905  DCHECK(instr->result()->IsDoubleRegister());
1906  DwVfpRegister result = ToDoubleRegister(instr->result());
1907  double v = instr->value();
1908  __ Vmov(result, v, scratch0());
1909 }
1910 
1911 
1912 void LCodeGen::DoConstantE(LConstantE* instr) {
1913  __ mov(ToRegister(instr->result()), Operand(instr->value()));
1914 }
1915 
1916 
1917 void LCodeGen::DoConstantT(LConstantT* instr) {
1918  Handle<Object> object = instr->value(isolate());
1920  __ Move(ToRegister(instr->result()), object);
1921 }
1922 
1923 
1924 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1925  Register result = ToRegister(instr->result());
1926  Register map = ToRegister(instr->value());
1927  __ EnumLength(result, map);
1928 }
1929 
1930 
1931 void LCodeGen::DoDateField(LDateField* instr) {
1932  Register object = ToRegister(instr->date());
1933  Register result = ToRegister(instr->result());
1934  Register scratch = ToRegister(instr->temp());
1935  Smi* index = instr->index();
1936  Label runtime, done;
1937  DCHECK(object.is(result));
1938  DCHECK(object.is(r0));
1939  DCHECK(!scratch.is(scratch0()));
1940  DCHECK(!scratch.is(object));
1941 
1942  __ SmiTst(object);
1943  DeoptimizeIf(eq, instr, "Smi");
1944  __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
1945  DeoptimizeIf(ne, instr, "not a date object");
1946 
1947  if (index->value() == 0) {
1948  __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
1949  } else {
1950  if (index->value() < JSDate::kFirstUncachedField) {
1951  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1952  __ mov(scratch, Operand(stamp));
1953  __ ldr(scratch, MemOperand(scratch));
1955  __ cmp(scratch, scratch0());
1956  __ b(ne, &runtime);
1957  __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
1958  kPointerSize * index->value()));
1959  __ jmp(&done);
1960  }
1961  __ bind(&runtime);
1962  __ PrepareCallCFunction(2, scratch);
1963  __ mov(r1, Operand(index));
1964  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1965  __ bind(&done);
1966  }
1967 }
1968 
1969 
1971  LOperand* index,
1972  String::Encoding encoding) {
1973  if (index->IsConstantOperand()) {
1974  int offset = ToInteger32(LConstantOperand::cast(index));
1975  if (encoding == String::TWO_BYTE_ENCODING) {
1976  offset *= kUC16Size;
1977  }
1978  STATIC_ASSERT(kCharSize == 1);
1979  return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1980  }
1981  Register scratch = scratch0();
1982  DCHECK(!scratch.is(string));
1983  DCHECK(!scratch.is(ToRegister(index)));
1984  if (encoding == String::ONE_BYTE_ENCODING) {
1985  __ add(scratch, string, Operand(ToRegister(index)));
1986  } else {
1987  STATIC_ASSERT(kUC16Size == 2);
1988  __ add(scratch, string, Operand(ToRegister(index), LSL, 1));
1989  }
1990  return FieldMemOperand(scratch, SeqString::kHeaderSize);
1991 }
1992 
1993 
1994 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1995  String::Encoding encoding = instr->hydrogen()->encoding();
1996  Register string = ToRegister(instr->string());
1997  Register result = ToRegister(instr->result());
1998 
1999  if (FLAG_debug_code) {
2000  Register scratch = scratch0();
2001  __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
2002  __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2003 
2004  __ and_(scratch, scratch,
2006  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2007  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2008  __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
2009  ? one_byte_seq_type : two_byte_seq_type));
2010  __ Check(eq, kUnexpectedStringType);
2011  }
2012 
2013  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2014  if (encoding == String::ONE_BYTE_ENCODING) {
2015  __ ldrb(result, operand);
2016  } else {
2017  __ ldrh(result, operand);
2018  }
2019 }
2020 
2021 
2022 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2023  String::Encoding encoding = instr->hydrogen()->encoding();
2024  Register string = ToRegister(instr->string());
2025  Register value = ToRegister(instr->value());
2026 
2027  if (FLAG_debug_code) {
2028  Register index = ToRegister(instr->index());
2029  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2030  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2031  int encoding_mask =
2032  instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
2033  ? one_byte_seq_type : two_byte_seq_type;
2034  __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
2035  }
2036 
2037  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2038  if (encoding == String::ONE_BYTE_ENCODING) {
2039  __ strb(value, operand);
2040  } else {
2041  __ strh(value, operand);
2042  }
2043 }
2044 
2045 
2046 void LCodeGen::DoAddI(LAddI* instr) {
2047  LOperand* left = instr->left();
2048  LOperand* right = instr->right();
2049  LOperand* result = instr->result();
2050  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
2051  SBit set_cond = can_overflow ? SetCC : LeaveCC;
2052 
2053  if (right->IsStackSlot()) {
2054  Register right_reg = EmitLoadRegister(right, ip);
2055  __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
2056  } else {
2057  DCHECK(right->IsRegister() || right->IsConstantOperand());
2058  __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
2059  }
2060 
2061  if (can_overflow) {
2062  DeoptimizeIf(vs, instr, "overflow");
2063  }
2064 }
2065 
2066 
2067 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2068  LOperand* left = instr->left();
2069  LOperand* right = instr->right();
2070  HMathMinMax::Operation operation = instr->hydrogen()->operation();
2071  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2072  Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
2073  Register left_reg = ToRegister(left);
2074  Operand right_op = (right->IsRegister() || right->IsConstantOperand())
2075  ? ToOperand(right)
2076  : Operand(EmitLoadRegister(right, ip));
2077  Register result_reg = ToRegister(instr->result());
2078  __ cmp(left_reg, right_op);
2079  __ Move(result_reg, left_reg, condition);
2080  __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
2081  } else {
2082  DCHECK(instr->hydrogen()->representation().IsDouble());
2083  DwVfpRegister left_reg = ToDoubleRegister(left);
2084  DwVfpRegister right_reg = ToDoubleRegister(right);
2085  DwVfpRegister result_reg = ToDoubleRegister(instr->result());
2086  Label result_is_nan, return_left, return_right, check_zero, done;
2087  __ VFPCompareAndSetFlags(left_reg, right_reg);
2088  if (operation == HMathMinMax::kMathMin) {
2089  __ b(mi, &return_left);
2090  __ b(gt, &return_right);
2091  } else {
2092  __ b(mi, &return_right);
2093  __ b(gt, &return_left);
2094  }
2095  __ b(vs, &result_is_nan);
2096  // Left equals right => check for -0.
2097  __ VFPCompareAndSetFlags(left_reg, 0.0);
2098  if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
2099  __ b(ne, &done); // left == right != 0.
2100  } else {
2101  __ b(ne, &return_left); // left == right != 0.
2102  }
2103  // At this point, both left and right are either 0 or -0.
2104  if (operation == HMathMinMax::kMathMin) {
2105  // We could use a single 'vorr' instruction here if we had NEON support.
2106  __ vneg(left_reg, left_reg);
2107  __ vsub(result_reg, left_reg, right_reg);
2108  __ vneg(result_reg, result_reg);
2109  } else {
2110  // Since we operate on +0 and/or -0, vadd and vand have the same effect;
2111  // the decision for vadd is easy because vand is a NEON instruction.
2112  __ vadd(result_reg, left_reg, right_reg);
2113  }
2114  __ b(&done);
2115 
2116  __ bind(&result_is_nan);
2117  __ vadd(result_reg, left_reg, right_reg);
2118  __ b(&done);
2119 
2120  __ bind(&return_right);
2121  __ Move(result_reg, right_reg);
2122  if (!left_reg.is(result_reg)) {
2123  __ b(&done);
2124  }
2125 
2126  __ bind(&return_left);
2127  __ Move(result_reg, left_reg);
2128 
2129  __ bind(&done);
2130  }
2131 }
2132 
2133 
2134 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2135  DwVfpRegister left = ToDoubleRegister(instr->left());
2136  DwVfpRegister right = ToDoubleRegister(instr->right());
2137  DwVfpRegister result = ToDoubleRegister(instr->result());
2138  switch (instr->op()) {
2139  case Token::ADD:
2140  __ vadd(result, left, right);
2141  break;
2142  case Token::SUB:
2143  __ vsub(result, left, right);
2144  break;
2145  case Token::MUL:
2146  __ vmul(result, left, right);
2147  break;
2148  case Token::DIV:
2149  __ vdiv(result, left, right);
2150  break;
2151  case Token::MOD: {
2152  __ PrepareCallCFunction(0, 2, scratch0());
2153  __ MovToFloatParameters(left, right);
2154  __ CallCFunction(
2155  ExternalReference::mod_two_doubles_operation(isolate()),
2156  0, 2);
2157  // Move the result in the double result register.
2158  __ MovFromFloatResult(result);
2159  break;
2160  }
2161  default:
2162  UNREACHABLE();
2163  break;
2164  }
2165 }
2166 
2167 
2168 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2169  DCHECK(ToRegister(instr->context()).is(cp));
2170  DCHECK(ToRegister(instr->left()).is(r1));
2171  DCHECK(ToRegister(instr->right()).is(r0));
2172  DCHECK(ToRegister(instr->result()).is(r0));
2173 
2174  Handle<Code> code =
2175  CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
2176  // Block literal pool emission to ensure nop indicating no inlined smi code
2177  // is in the correct position.
2178  Assembler::BlockConstPoolScope block_const_pool(masm());
2179  CallCode(code, RelocInfo::CODE_TARGET, instr);
2180 }
2181 
2182 
2183 template<class InstrType>
2184 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
2185  int left_block = instr->TrueDestination(chunk_);
2186  int right_block = instr->FalseDestination(chunk_);
2187 
2188  int next_block = GetNextEmittedBlock();
2189 
2190  if (right_block == left_block || condition == al) {
2191  EmitGoto(left_block);
2192  } else if (left_block == next_block) {
2193  __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
2194  } else if (right_block == next_block) {
2195  __ b(condition, chunk_->GetAssemblyLabel(left_block));
2196  } else {
2197  __ b(condition, chunk_->GetAssemblyLabel(left_block));
2198  __ b(chunk_->GetAssemblyLabel(right_block));
2199  }
2200 }
2201 
2202 
2203 template<class InstrType>
2204 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
2205  int false_block = instr->FalseDestination(chunk_);
2206  __ b(condition, chunk_->GetAssemblyLabel(false_block));
2207 }
2208 
2209 
2210 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2211  __ stop("LBreak");
2212 }
2213 
2214 
2215 void LCodeGen::DoBranch(LBranch* instr) {
2216  Representation r = instr->hydrogen()->value()->representation();
2217  if (r.IsInteger32() || r.IsSmi()) {
2218  DCHECK(!info()->IsStub());
2219  Register reg = ToRegister(instr->value());
2220  __ cmp(reg, Operand::Zero());
2221  EmitBranch(instr, ne);
2222  } else if (r.IsDouble()) {
2223  DCHECK(!info()->IsStub());
2224  DwVfpRegister reg = ToDoubleRegister(instr->value());
2225  // Test the double value. Zero and NaN are false.
2226  __ VFPCompareAndSetFlags(reg, 0.0);
2227  __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false)
2228  EmitBranch(instr, ne);
2229  } else {
2230  DCHECK(r.IsTagged());
2231  Register reg = ToRegister(instr->value());
2232  HType type = instr->hydrogen()->value()->type();
2233  if (type.IsBoolean()) {
2234  DCHECK(!info()->IsStub());
2235  __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2236  EmitBranch(instr, eq);
2237  } else if (type.IsSmi()) {
2238  DCHECK(!info()->IsStub());
2239  __ cmp(reg, Operand::Zero());
2240  EmitBranch(instr, ne);
2241  } else if (type.IsJSArray()) {
2242  DCHECK(!info()->IsStub());
2243  EmitBranch(instr, al);
2244  } else if (type.IsHeapNumber()) {
2245  DCHECK(!info()->IsStub());
2246  DwVfpRegister dbl_scratch = double_scratch0();
2247  __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2248  // Test the double value. Zero and NaN are false.
2249  __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2250  __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN)
2251  EmitBranch(instr, ne);
2252  } else if (type.IsString()) {
2253  DCHECK(!info()->IsStub());
2255  __ cmp(ip, Operand::Zero());
2256  EmitBranch(instr, ne);
2257  } else {
2258  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2259  // Avoid deopts in the case where we've never executed this path before.
2260  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2261 
2262  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2263  // undefined -> false.
2264  __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2265  __ b(eq, instr->FalseLabel(chunk_));
2266  }
2267  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2268  // Boolean -> its value.
2269  __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2270  __ b(eq, instr->TrueLabel(chunk_));
2271  __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2272  __ b(eq, instr->FalseLabel(chunk_));
2273  }
2274  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2275  // 'null' -> false.
2276  __ CompareRoot(reg, Heap::kNullValueRootIndex);
2277  __ b(eq, instr->FalseLabel(chunk_));
2278  }
2279 
2280  if (expected.Contains(ToBooleanStub::SMI)) {
2281  // Smis: 0 -> false, all other -> true.
2282  __ cmp(reg, Operand::Zero());
2283  __ b(eq, instr->FalseLabel(chunk_));
2284  __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2285  } else if (expected.NeedsMap()) {
2286  // If we need a map later and have a Smi -> deopt.
2287  __ SmiTst(reg);
2288  DeoptimizeIf(eq, instr, "Smi");
2289  }
2290 
2291  const Register map = scratch0();
2292  if (expected.NeedsMap()) {
2294 
2295  if (expected.CanBeUndetectable()) {
2296  // Undetectable -> false.
2298  __ tst(ip, Operand(1 << Map::kIsUndetectable));
2299  __ b(ne, instr->FalseLabel(chunk_));
2300  }
2301  }
2302 
2303  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2304  // spec object -> true.
2305  __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
2306  __ b(ge, instr->TrueLabel(chunk_));
2307  }
2308 
2309  if (expected.Contains(ToBooleanStub::STRING)) {
2310  // String value -> false iff empty.
2311  Label not_string;
2312  __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2313  __ b(ge, &not_string);
2315  __ cmp(ip, Operand::Zero());
2316  __ b(ne, instr->TrueLabel(chunk_));
2317  __ b(instr->FalseLabel(chunk_));
2318  __ bind(&not_string);
2319  }
2320 
2321  if (expected.Contains(ToBooleanStub::SYMBOL)) {
2322  // Symbol value -> true.
2323  __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2324  __ b(eq, instr->TrueLabel(chunk_));
2325  }
2326 
2327  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2328  // heap number -> false iff +0, -0, or NaN.
2329  DwVfpRegister dbl_scratch = double_scratch0();
2330  Label not_heap_number;
2331  __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2332  __ b(ne, &not_heap_number);
2333  __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2334  __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2335  __ cmp(r0, r0, vs); // NaN -> false.
2336  __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false.
2337  __ b(instr->TrueLabel(chunk_));
2338  __ bind(&not_heap_number);
2339  }
2340 
2341  if (!expected.IsGeneric()) {
2342  // We've seen something for the first time -> deopt.
2343  // This can only happen if we are not generic already.
2344  DeoptimizeIf(al, instr, "unexpected object");
2345  }
2346  }
2347  }
2348 }
2349 
2350 
2351 void LCodeGen::EmitGoto(int block) {
2352  if (!IsNextEmittedBlock(block)) {
2353  __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2354  }
2355 }
2356 
2357 
2358 void LCodeGen::DoGoto(LGoto* instr) {
2359  EmitGoto(instr->block_id());
2360 }
2361 
2362 
2364  Condition cond = kNoCondition;
2365  switch (op) {
2366  case Token::EQ:
2367  case Token::EQ_STRICT:
2368  cond = eq;
2369  break;
2370  case Token::NE:
2371  case Token::NE_STRICT:
2372  cond = ne;
2373  break;
2374  case Token::LT:
2375  cond = is_unsigned ? lo : lt;
2376  break;
2377  case Token::GT:
2378  cond = is_unsigned ? hi : gt;
2379  break;
2380  case Token::LTE:
2381  cond = is_unsigned ? ls : le;
2382  break;
2383  case Token::GTE:
2384  cond = is_unsigned ? hs : ge;
2385  break;
2386  case Token::IN:
2387  case Token::INSTANCEOF:
2388  default:
2389  UNREACHABLE();
2390  }
2391  return cond;
2392 }
2393 
2394 
2395 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2396  LOperand* left = instr->left();
2397  LOperand* right = instr->right();
2398  bool is_unsigned =
2399  instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2400  instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2401  Condition cond = TokenToCondition(instr->op(), is_unsigned);
2402 
2403  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2404  // We can statically evaluate the comparison.
2405  double left_val = ToDouble(LConstantOperand::cast(left));
2406  double right_val = ToDouble(LConstantOperand::cast(right));
2407  int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2408  instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2409  EmitGoto(next_block);
2410  } else {
2411  if (instr->is_double()) {
2412  // Compare left and right operands as doubles and load the
2413  // resulting flags into the normal status register.
2414  __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
2415  // If a NaN is involved, i.e. the result is unordered (V set),
2416  // jump to false block label.
2417  __ b(vs, instr->FalseLabel(chunk_));
2418  } else {
2419  if (right->IsConstantOperand()) {
2420  int32_t value = ToInteger32(LConstantOperand::cast(right));
2421  if (instr->hydrogen_value()->representation().IsSmi()) {
2422  __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
2423  } else {
2424  __ cmp(ToRegister(left), Operand(value));
2425  }
2426  } else if (left->IsConstantOperand()) {
2427  int32_t value = ToInteger32(LConstantOperand::cast(left));
2428  if (instr->hydrogen_value()->representation().IsSmi()) {
2429  __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
2430  } else {
2431  __ cmp(ToRegister(right), Operand(value));
2432  }
2433  // We commuted the operands, so commute the condition.
2434  cond = CommuteCondition(cond);
2435  } else {
2436  __ cmp(ToRegister(left), ToRegister(right));
2437  }
2438  }
2439  EmitBranch(instr, cond);
2440  }
2441 }
2442 
2443 
2444 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2445  Register left = ToRegister(instr->left());
2446  Register right = ToRegister(instr->right());
2447 
2448  __ cmp(left, Operand(right));
2449  EmitBranch(instr, eq);
2450 }
2451 
2452 
2453 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2454  if (instr->hydrogen()->representation().IsTagged()) {
2455  Register input_reg = ToRegister(instr->object());
2456  __ mov(ip, Operand(factory()->the_hole_value()));
2457  __ cmp(input_reg, ip);
2458  EmitBranch(instr, eq);
2459  return;
2460  }
2461 
2462  DwVfpRegister input_reg = ToDoubleRegister(instr->object());
2463  __ VFPCompareAndSetFlags(input_reg, input_reg);
2464  EmitFalseBranch(instr, vc);
2465 
2466  Register scratch = scratch0();
2467  __ VmovHigh(scratch, input_reg);
2468  __ cmp(scratch, Operand(kHoleNanUpper32));
2469  EmitBranch(instr, eq);
2470 }
2471 
2472 
2473 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2474  Representation rep = instr->hydrogen()->value()->representation();
2475  DCHECK(!rep.IsInteger32());
2476  Register scratch = ToRegister(instr->temp());
2477 
2478  if (rep.IsDouble()) {
2479  DwVfpRegister value = ToDoubleRegister(instr->value());
2480  __ VFPCompareAndSetFlags(value, 0.0);
2481  EmitFalseBranch(instr, ne);
2482  __ VmovHigh(scratch, value);
2483  __ cmp(scratch, Operand(0x80000000));
2484  } else {
2485  Register value = ToRegister(instr->value());
2486  __ CheckMap(value,
2487  scratch,
2488  Heap::kHeapNumberMapRootIndex,
2489  instr->FalseLabel(chunk()),
2490  DO_SMI_CHECK);
2491  __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2492  __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2493  __ cmp(scratch, Operand(0x80000000));
2494  __ cmp(ip, Operand(0x00000000), eq);
2495  }
2496  EmitBranch(instr, eq);
2497 }
2498 
2499 
2501  Register temp1,
2502  Label* is_not_object,
2503  Label* is_object) {
2504  Register temp2 = scratch0();
2505  __ JumpIfSmi(input, is_not_object);
2506 
2507  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2508  __ cmp(input, temp2);
2509  __ b(eq, is_object);
2510 
2511  // Load map.
2512  __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2513  // Undetectable objects behave like undefined.
2514  __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2515  __ tst(temp2, Operand(1 << Map::kIsUndetectable));
2516  __ b(ne, is_not_object);
2517 
2518  // Load instance type and check that it is in object type range.
2519  __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2521  __ b(lt, is_not_object);
2523  return le;
2524 }
2525 
2526 
2527 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2528  Register reg = ToRegister(instr->value());
2529  Register temp1 = ToRegister(instr->temp());
2530 
2531  Condition true_cond =
2532  EmitIsObject(reg, temp1,
2533  instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2534 
2535  EmitBranch(instr, true_cond);
2536 }
2537 
2538 
2540  Register temp1,
2541  Label* is_not_string,
2542  SmiCheck check_needed = INLINE_SMI_CHECK) {
2543  if (check_needed == INLINE_SMI_CHECK) {
2544  __ JumpIfSmi(input, is_not_string);
2545  }
2546  __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2547 
2548  return lt;
2549 }
2550 
2551 
2552 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2553  Register reg = ToRegister(instr->value());
2554  Register temp1 = ToRegister(instr->temp());
2555 
2556  SmiCheck check_needed =
2557  instr->hydrogen()->value()->type().IsHeapObject()
2559  Condition true_cond =
2560  EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2561 
2562  EmitBranch(instr, true_cond);
2563 }
2564 
2565 
2566 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2567  Register input_reg = EmitLoadRegister(instr->value(), ip);
2568  __ SmiTst(input_reg);
2569  EmitBranch(instr, eq);
2570 }
2571 
2572 
2573 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2574  Register input = ToRegister(instr->value());
2575  Register temp = ToRegister(instr->temp());
2576 
2577  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2578  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2579  }
2580  __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2581  __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2582  __ tst(temp, Operand(1 << Map::kIsUndetectable));
2583  EmitBranch(instr, ne);
2584 }
2585 
2586 
2588  switch (op) {
2589  case Token::EQ_STRICT:
2590  case Token::EQ:
2591  return eq;
2592  case Token::LT:
2593  return lt;
2594  case Token::GT:
2595  return gt;
2596  case Token::LTE:
2597  return le;
2598  case Token::GTE:
2599  return ge;
2600  default:
2601  UNREACHABLE();
2602  return kNoCondition;
2603  }
2604 }
2605 
2606 
2607 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2608  DCHECK(ToRegister(instr->context()).is(cp));
2609  Token::Value op = instr->op();
2610 
2611  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2612  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2613  // This instruction also signals no smi code inlined.
2614  __ cmp(r0, Operand::Zero());
2615 
2616  Condition condition = ComputeCompareCondition(op);
2617 
2618  EmitBranch(instr, condition);
2619 }
2620 
2621 
2622 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2623  InstanceType from = instr->from();
2624  InstanceType to = instr->to();
2625  if (from == FIRST_TYPE) return to;
2626  DCHECK(from == to || to == LAST_TYPE);
2627  return from;
2628 }
2629 
2630 
2631 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2632  InstanceType from = instr->from();
2633  InstanceType to = instr->to();
2634  if (from == to) return eq;
2635  if (to == LAST_TYPE) return hs;
2636  if (from == FIRST_TYPE) return ls;
2637  UNREACHABLE();
2638  return eq;
2639 }
2640 
2641 
2642 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2643  Register scratch = scratch0();
2644  Register input = ToRegister(instr->value());
2645 
2646  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2647  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2648  }
2649 
2650  __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2651  EmitBranch(instr, BranchCondition(instr->hydrogen()));
2652 }
2653 
2654 
2655 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2656  Register input = ToRegister(instr->value());
2657  Register result = ToRegister(instr->result());
2658 
2659  __ AssertString(input);
2660 
2661  __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
2662  __ IndexFromHash(result, result);
2663 }
2664 
2665 
2666 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2667  LHasCachedArrayIndexAndBranch* instr) {
2668  Register input = ToRegister(instr->value());
2669  Register scratch = scratch0();
2670 
2671  __ ldr(scratch,
2673  __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
2674  EmitBranch(instr, eq);
2675 }
2676 
2677 
2678 // Branches to a label or falls through with the answer in flags. Trashes
2679 // the temp registers, but not the input.
2680 void LCodeGen::EmitClassOfTest(Label* is_true,
2681  Label* is_false,
2682  Handle<String>class_name,
2683  Register input,
2684  Register temp,
2685  Register temp2) {
2686  DCHECK(!input.is(temp));
2687  DCHECK(!input.is(temp2));
2688  DCHECK(!temp.is(temp2));
2689 
2690  __ JumpIfSmi(input, is_false);
2691 
2692  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2693  // Assuming the following assertions, we can use the same compares to test
2694  // for both being a function type and being in the object type range.
2699  LAST_SPEC_OBJECT_TYPE - 1);
2701  __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2702  __ b(lt, is_false);
2703  __ b(eq, is_true);
2704  __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2705  __ b(eq, is_true);
2706  } else {
2707  // Faster code path to avoid two compares: subtract lower bound from the
2708  // actual type and do a signed compare with the width of the type range.
2709  __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2710  __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2711  __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2714  __ b(gt, is_false);
2715  }
2716 
2717  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2718  // Check if the constructor in the map is a function.
2719  __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2720 
2721  // Objects with a non-function constructor have class 'Object'.
2722  __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
2723  if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
2724  __ b(ne, is_true);
2725  } else {
2726  __ b(ne, is_false);
2727  }
2728 
2729  // temp now contains the constructor function. Grab the
2730  // instance class name from there.
2732  __ ldr(temp, FieldMemOperand(temp,
2734  // The class name we are testing against is internalized since it's a literal.
2735  // The name in the constructor is internalized because of the way the context
2736  // is booted. This routine isn't expected to work for random API-created
2737  // classes and it doesn't have to because you can't access it with natives
2738  // syntax. Since both sides are internalized it is sufficient to use an
2739  // identity comparison.
2740  __ cmp(temp, Operand(class_name));
2741  // End with the answer in flags.
2742 }
2743 
2744 
2745 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2746  Register input = ToRegister(instr->value());
2747  Register temp = scratch0();
2748  Register temp2 = ToRegister(instr->temp());
2749  Handle<String> class_name = instr->hydrogen()->class_name();
2750 
2751  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2752  class_name, input, temp, temp2);
2753 
2754  EmitBranch(instr, eq);
2755 }
2756 
2757 
2758 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2759  Register reg = ToRegister(instr->value());
2760  Register temp = ToRegister(instr->temp());
2761 
2762  __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2763  __ cmp(temp, Operand(instr->map()));
2764  EmitBranch(instr, eq);
2765 }
2766 
2767 
2768 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2769  DCHECK(ToRegister(instr->context()).is(cp));
2770  DCHECK(ToRegister(instr->left()).is(r0)); // Object is in r0.
2771  DCHECK(ToRegister(instr->right()).is(r1)); // Function is in r1.
2772 
2773  InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2774  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2775 
2776  __ cmp(r0, Operand::Zero());
2777  __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
2778  __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
2779 }
2780 
2781 
2782 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2783  class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
2784  public:
2785  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2786  LInstanceOfKnownGlobal* instr)
2787  : LDeferredCode(codegen), instr_(instr) { }
2788  virtual void Generate() OVERRIDE {
2789  codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
2790  &load_bool_);
2791  }
2792  virtual LInstruction* instr() OVERRIDE { return instr_; }
2793  Label* map_check() { return &map_check_; }
2794  Label* load_bool() { return &load_bool_; }
2795 
2796  private:
2797  LInstanceOfKnownGlobal* instr_;
2798  Label map_check_;
2799  Label load_bool_;
2800  };
2801 
2802  DeferredInstanceOfKnownGlobal* deferred;
2803  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2804 
2805  Label done, false_result;
2806  Register object = ToRegister(instr->value());
2807  Register temp = ToRegister(instr->temp());
2808  Register result = ToRegister(instr->result());
2809 
2810  // A Smi is not instance of anything.
2811  __ JumpIfSmi(object, &false_result);
2812 
2813  // This is the inlined call site instanceof cache. The two occurences of the
2814  // hole value will be patched to the last map/result pair generated by the
2815  // instanceof stub.
2816  Label cache_miss;
2817  Register map = temp;
2819  {
2820  // Block constant pool emission to ensure the positions of instructions are
2821  // as expected by the patcher. See InstanceofStub::Generate().
2822  Assembler::BlockConstPoolScope block_const_pool(masm());
2823  __ bind(deferred->map_check()); // Label for calculating code patching.
2824  // We use Factory::the_hole_value() on purpose instead of loading from the
2825  // root array to force relocation to be able to later patch with
2826  // the cached map.
2827  Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2828  __ mov(ip, Operand(Handle<Object>(cell)));
2830  __ cmp(map, Operand(ip));
2831  __ b(ne, &cache_miss);
2832  __ bind(deferred->load_bool()); // Label for calculating code patching.
2833  // We use Factory::the_hole_value() on purpose instead of loading from the
2834  // root array to force relocation to be able to later patch
2835  // with true or false.
2836  __ mov(result, Operand(factory()->the_hole_value()));
2837  }
2838  __ b(&done);
2839 
2840  // The inlined call site cache did not match. Check null and string before
2841  // calling the deferred code.
2842  __ bind(&cache_miss);
2843  // Null is not instance of anything.
2844  __ LoadRoot(ip, Heap::kNullValueRootIndex);
2845  __ cmp(object, Operand(ip));
2846  __ b(eq, &false_result);
2847 
2848  // String values is not instance of anything.
2849  Condition is_string = masm_->IsObjectStringType(object, temp);
2850  __ b(is_string, &false_result);
2851 
2852  // Go to the deferred code.
2853  __ b(deferred->entry());
2854 
2855  __ bind(&false_result);
2856  __ LoadRoot(result, Heap::kFalseValueRootIndex);
2857 
2858  // Here result has either true or false. Deferred code also produces true or
2859  // false object.
2860  __ bind(deferred->exit());
2861  __ bind(&done);
2862 }
2863 
2864 
2865 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2866  Label* map_check,
2867  Label* bool_load) {
2869  flags = static_cast<InstanceofStub::Flags>(
2871  flags = static_cast<InstanceofStub::Flags>(
2873  flags = static_cast<InstanceofStub::Flags>(
2875  InstanceofStub stub(isolate(), flags);
2876 
2877  PushSafepointRegistersScope scope(this);
2878  LoadContextFromDeferred(instr->context());
2879 
2880  __ Move(InstanceofStub::right(), instr->function());
2881 
2882  int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET);
2883  int additional_delta = (call_size / Assembler::kInstrSize) + 4;
2884  // Make sure that code size is predicable, since we use specific constants
2885  // offsets in the code to find embedded values..
2886  PredictableCodeSizeScope predictable(
2887  masm_, (additional_delta + 1) * Assembler::kInstrSize);
2888  // Make sure we don't emit any additional entries in the constant pool before
2889  // the call to ensure that the CallCodeSize() calculated the correct number of
2890  // instructions for the constant pool load.
2891  {
2892  ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
2893  int map_check_delta =
2894  masm_->InstructionsGeneratedSince(map_check) + additional_delta;
2895  int bool_load_delta =
2896  masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
2897  Label before_push_delta;
2898  __ bind(&before_push_delta);
2899  __ BlockConstPoolFor(additional_delta);
2900  // r5 is used to communicate the offset to the location of the map check.
2901  __ mov(r5, Operand(map_check_delta * kPointerSize));
2902  // r6 is used to communicate the offset to the location of the bool load.
2903  __ mov(r6, Operand(bool_load_delta * kPointerSize));
2904  // The mov above can generate one or two instructions. The delta was
2905  // computed for two instructions, so we need to pad here in case of one
2906  // instruction.
2907  while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
2908  __ nop();
2909  }
2910  }
2911  CallCodeGeneric(stub.GetCode(),
2913  instr,
2915  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2916  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2917  // Put the result value (r0) into the result register slot and
2918  // restore all registers.
2919  __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result()));
2920 }
2921 
2922 
2923 void LCodeGen::DoCmpT(LCmpT* instr) {
2924  DCHECK(ToRegister(instr->context()).is(cp));
2925  Token::Value op = instr->op();
2926 
2927  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2928  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2929  // This instruction also signals no smi code inlined.
2930  __ cmp(r0, Operand::Zero());
2931 
2932  Condition condition = ComputeCompareCondition(op);
2933  __ LoadRoot(ToRegister(instr->result()),
2934  Heap::kTrueValueRootIndex,
2935  condition);
2936  __ LoadRoot(ToRegister(instr->result()),
2937  Heap::kFalseValueRootIndex,
2938  NegateCondition(condition));
2939 }
2940 
2941 
2942 void LCodeGen::DoReturn(LReturn* instr) {
2943  if (FLAG_trace && info()->IsOptimizing()) {
2944  // Push the return value on the stack as the parameter.
2945  // Runtime::TraceExit returns its parameter in r0. We're leaving the code
2946  // managed by the register allocator and tearing down the frame, it's
2947  // safe to write to the context register.
2948  __ push(r0);
2950  __ CallRuntime(Runtime::kTraceExit, 1);
2951  }
2952  if (info()->saves_caller_doubles()) {
2954  }
2955  int no_frame_start = -1;
2956  if (NeedsEagerFrame()) {
2957  no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2958  }
2959  { ConstantPoolUnavailableScope constant_pool_unavailable(masm());
2960  if (instr->has_constant_parameter_count()) {
2961  int parameter_count = ToInteger32(instr->constant_parameter_count());
2962  int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2963  if (sp_delta != 0) {
2964  __ add(sp, sp, Operand(sp_delta));
2965  }
2966  } else {
2967  Register reg = ToRegister(instr->parameter_count());
2968  // The argument count parameter is a smi
2969  __ SmiUntag(reg);
2970  __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
2971  }
2972 
2973  __ Jump(lr);
2974 
2975  if (no_frame_start != -1) {
2976  info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2977  }
2978  }
2979 }
2980 
2981 
2982 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2983  Register result = ToRegister(instr->result());
2984  __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2985  __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
2986  if (instr->hydrogen()->RequiresHoleCheck()) {
2987  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2988  __ cmp(result, ip);
2989  DeoptimizeIf(eq, instr, "hole");
2990  }
2991 }
2992 
2993 
2994 template <class T>
2996  DCHECK(FLAG_vector_ics);
2997  Register vector = ToRegister(instr->temp_vector());
2999  __ Move(vector, instr->hydrogen()->feedback_vector());
3000  // No need to allocate this register.
3003  Operand(Smi::FromInt(instr->hydrogen()->slot())));
3004 }
3005 
3006 
3007 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3008  DCHECK(ToRegister(instr->context()).is(cp));
3009  DCHECK(ToRegister(instr->global_object())
3011  DCHECK(ToRegister(instr->result()).is(r0));
3012 
3013  __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3014  if (FLAG_vector_ics) {
3015  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
3016  }
3017  ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3018  Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
3019  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3020 }
3021 
3022 
3023 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
3024  Register value = ToRegister(instr->value());
3025  Register cell = scratch0();
3026 
3027  // Load the cell.
3028  __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
3029 
3030  // If the cell we are storing to contains the hole it could have
3031  // been deleted from the property dictionary. In that case, we need
3032  // to update the property details in the property dictionary to mark
3033  // it as no longer deleted.
3034  if (instr->hydrogen()->RequiresHoleCheck()) {
3035  // We use a temp to check the payload (CompareRoot might clobber ip).
3036  Register payload = ToRegister(instr->temp());
3037  __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
3038  __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
3039  DeoptimizeIf(eq, instr, "hole");
3040  }
3041 
3042  // Store the value.
3043  __ str(value, FieldMemOperand(cell, Cell::kValueOffset));
3044  // Cells are always rescanned, so no write barrier here.
3045 }
3046 
3047 
3048 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3049  Register context = ToRegister(instr->context());
3050  Register result = ToRegister(instr->result());
3051  __ ldr(result, ContextOperand(context, instr->slot_index()));
3052  if (instr->hydrogen()->RequiresHoleCheck()) {
3053  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3054  __ cmp(result, ip);
3055  if (instr->hydrogen()->DeoptimizesOnHole()) {
3056  DeoptimizeIf(eq, instr, "hole");
3057  } else {
3058  __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
3059  }
3060  }
3061 }
3062 
3063 
3064 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3065  Register context = ToRegister(instr->context());
3066  Register value = ToRegister(instr->value());
3067  Register scratch = scratch0();
3068  MemOperand target = ContextOperand(context, instr->slot_index());
3069 
3070  Label skip_assignment;
3071 
3072  if (instr->hydrogen()->RequiresHoleCheck()) {
3073  __ ldr(scratch, target);
3074  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3075  __ cmp(scratch, ip);
3076  if (instr->hydrogen()->DeoptimizesOnHole()) {
3077  DeoptimizeIf(eq, instr, "hole");
3078  } else {
3079  __ b(ne, &skip_assignment);
3080  }
3081  }
3082 
3083  __ str(value, target);
3084  if (instr->hydrogen()->NeedsWriteBarrier()) {
3085  SmiCheck check_needed =
3086  instr->hydrogen()->value()->type().IsHeapObject()
3088  __ RecordWriteContextSlot(context,
3089  target.offset(),
3090  value,
3091  scratch,
3093  kSaveFPRegs,
3095  check_needed);
3096  }
3097 
3098  __ bind(&skip_assignment);
3099 }
3100 
3101 
3102 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3103  HObjectAccess access = instr->hydrogen()->access();
3104  int offset = access.offset();
3105  Register object = ToRegister(instr->object());
3106 
3107  if (access.IsExternalMemory()) {
3108  Register result = ToRegister(instr->result());
3109  MemOperand operand = MemOperand(object, offset);
3110  __ Load(result, operand, access.representation());
3111  return;
3112  }
3113 
3114  if (instr->hydrogen()->representation().IsDouble()) {
3115  DwVfpRegister result = ToDoubleRegister(instr->result());
3116  __ vldr(result, FieldMemOperand(object, offset));
3117  return;
3118  }
3119 
3120  Register result = ToRegister(instr->result());
3121  if (!access.IsInobject()) {
3122  __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3123  object = result;
3124  }
3125  MemOperand operand = FieldMemOperand(object, offset);
3126  __ Load(result, operand, access.representation());
3127 }
3128 
3129 
3130 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3131  DCHECK(ToRegister(instr->context()).is(cp));
3133  DCHECK(ToRegister(instr->result()).is(r0));
3134 
3135  // Name is always in r2.
3136  __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3137  if (FLAG_vector_ics) {
3138  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3139  }
3140  Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
3142 }
3143 
3144 
3145 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3146  Register scratch = scratch0();
3147  Register function = ToRegister(instr->function());
3148  Register result = ToRegister(instr->result());
3149 
3150  // Get the prototype or initial map from the function.
3151  __ ldr(result,
3153 
3154  // Check that the function has a prototype or an initial map.
3155  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3156  __ cmp(result, ip);
3157  DeoptimizeIf(eq, instr, "hole");
3158 
3159  // If the function does not have an initial map, we're done.
3160  Label done;
3161  __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3162  __ b(ne, &done);
3163 
3164  // Get the prototype from the initial map.
3165  __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3166 
3167  // All done.
3168  __ bind(&done);
3169 }
3170 
3171 
3172 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3173  Register result = ToRegister(instr->result());
3174  __ LoadRoot(result, instr->index());
3175 }
3176 
3177 
3178 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3179  Register arguments = ToRegister(instr->arguments());
3180  Register result = ToRegister(instr->result());
3181  // There are two words between the frame pointer and the last argument.
3182  // Subtracting from length accounts for one of them add one more.
3183  if (instr->length()->IsConstantOperand()) {
3184  int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3185  if (instr->index()->IsConstantOperand()) {
3186  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3187  int index = (const_length - const_index) + 1;
3188  __ ldr(result, MemOperand(arguments, index * kPointerSize));
3189  } else {
3190  Register index = ToRegister(instr->index());
3191  __ rsb(result, index, Operand(const_length + 1));
3192  __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3193  }
3194  } else if (instr->index()->IsConstantOperand()) {
3195  Register length = ToRegister(instr->length());
3196  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3197  int loc = const_index - 1;
3198  if (loc != 0) {
3199  __ sub(result, length, Operand(loc));
3200  __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3201  } else {
3202  __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
3203  }
3204  } else {
3205  Register length = ToRegister(instr->length());
3206  Register index = ToRegister(instr->index());
3207  __ sub(result, length, index);
3208  __ add(result, result, Operand(1));
3209  __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3210  }
3211 }
3212 
3213 
3215  Register external_pointer = ToRegister(instr->elements());
3216  Register key = no_reg;
3217  ElementsKind elements_kind = instr->elements_kind();
3218  bool key_is_constant = instr->key()->IsConstantOperand();
3219  int constant_key = 0;
3220  if (key_is_constant) {
3221  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3222  if (constant_key & 0xF0000000) {
3223  Abort(kArrayIndexConstantValueTooBig);
3224  }
3225  } else {
3226  key = ToRegister(instr->key());
3227  }
3228  int element_size_shift = ElementsKindToShiftSize(elements_kind);
3229  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3230  ? (element_size_shift - kSmiTagSize) : element_size_shift;
3231  int base_offset = instr->base_offset();
3232 
3233  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3234  elements_kind == FLOAT32_ELEMENTS ||
3235  elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3236  elements_kind == FLOAT64_ELEMENTS) {
3237  int base_offset = instr->base_offset();
3238  DwVfpRegister result = ToDoubleRegister(instr->result());
3239  Operand operand = key_is_constant
3240  ? Operand(constant_key << element_size_shift)
3241  : Operand(key, LSL, shift_size);
3242  __ add(scratch0(), external_pointer, operand);
3243  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3244  elements_kind == FLOAT32_ELEMENTS) {
3245  __ vldr(double_scratch0().low(), scratch0(), base_offset);
3246  __ vcvt_f64_f32(result, double_scratch0().low());
3247  } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3248  __ vldr(result, scratch0(), base_offset);
3249  }
3250  } else {
3251  Register result = ToRegister(instr->result());
3252  MemOperand mem_operand = PrepareKeyedOperand(
3253  key, external_pointer, key_is_constant, constant_key,
3254  element_size_shift, shift_size, base_offset);
3255  switch (elements_kind) {
3257  case INT8_ELEMENTS:
3258  __ ldrsb(result, mem_operand);
3259  break;
3262  case UINT8_ELEMENTS:
3264  __ ldrb(result, mem_operand);
3265  break;
3267  case INT16_ELEMENTS:
3268  __ ldrsh(result, mem_operand);
3269  break;
3271  case UINT16_ELEMENTS:
3272  __ ldrh(result, mem_operand);
3273  break;
3275  case INT32_ELEMENTS:
3276  __ ldr(result, mem_operand);
3277  break;
3279  case UINT32_ELEMENTS:
3280  __ ldr(result, mem_operand);
3281  if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3282  __ cmp(result, Operand(0x80000000));
3283  DeoptimizeIf(cs, instr, "negative value");
3284  }
3285  break;
3286  case FLOAT32_ELEMENTS:
3287  case FLOAT64_ELEMENTS:
3291  case FAST_HOLEY_ELEMENTS:
3293  case FAST_DOUBLE_ELEMENTS:
3294  case FAST_ELEMENTS:
3295  case FAST_SMI_ELEMENTS:
3296  case DICTIONARY_ELEMENTS:
3298  UNREACHABLE();
3299  break;
3300  }
3301  }
3302 }
3303 
3304 
3306  Register elements = ToRegister(instr->elements());
3307  bool key_is_constant = instr->key()->IsConstantOperand();
3308  Register key = no_reg;
3309  DwVfpRegister result = ToDoubleRegister(instr->result());
3310  Register scratch = scratch0();
3311 
3312  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3313 
3314  int base_offset = instr->base_offset();
3315  if (key_is_constant) {
3316  int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3317  if (constant_key & 0xF0000000) {
3318  Abort(kArrayIndexConstantValueTooBig);
3319  }
3320  base_offset += constant_key * kDoubleSize;
3321  }
3322  __ add(scratch, elements, Operand(base_offset));
3323 
3324  if (!key_is_constant) {
3325  key = ToRegister(instr->key());
3326  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3327  ? (element_size_shift - kSmiTagSize) : element_size_shift;
3328  __ add(scratch, scratch, Operand(key, LSL, shift_size));
3329  }
3330 
3331  __ vldr(result, scratch, 0);
3332 
3333  if (instr->hydrogen()->RequiresHoleCheck()) {
3334  __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3335  __ cmp(scratch, Operand(kHoleNanUpper32));
3336  DeoptimizeIf(eq, instr, "hole");
3337  }
3338 }
3339 
3340 
3342  Register elements = ToRegister(instr->elements());
3343  Register result = ToRegister(instr->result());
3344  Register scratch = scratch0();
3345  Register store_base = scratch;
3346  int offset = instr->base_offset();
3347 
3348  if (instr->key()->IsConstantOperand()) {
3349  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3350  offset += ToInteger32(const_operand) * kPointerSize;
3351  store_base = elements;
3352  } else {
3353  Register key = ToRegister(instr->key());
3354  // Even though the HLoadKeyed instruction forces the input
3355  // representation for the key to be an integer, the input gets replaced
3356  // during bound check elimination with the index argument to the bounds
3357  // check, which can be tagged, so that case must be handled here, too.
3358  if (instr->hydrogen()->key()->representation().IsSmi()) {
3359  __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
3360  } else {
3361  __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
3362  }
3363  }
3364  __ ldr(result, MemOperand(store_base, offset));
3365 
3366  // Check for the hole value.
3367  if (instr->hydrogen()->RequiresHoleCheck()) {
3368  if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3369  __ SmiTst(result);
3370  DeoptimizeIf(ne, instr, "not a Smi");
3371  } else {
3372  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3373  __ cmp(result, scratch);
3374  DeoptimizeIf(eq, instr, "hole");
3375  }
3376  }
3377 }
3378 
3379 
3380 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3381  if (instr->is_typed_elements()) {
3382  DoLoadKeyedExternalArray(instr);
3383  } else if (instr->hydrogen()->representation().IsDouble()) {
3385  } else {
3386  DoLoadKeyedFixedArray(instr);
3387  }
3388 }
3389 
3390 
3392  Register base,
3393  bool key_is_constant,
3394  int constant_key,
3395  int element_size,
3396  int shift_size,
3397  int base_offset) {
3398  if (key_is_constant) {
3399  return MemOperand(base, (constant_key << element_size) + base_offset);
3400  }
3401 
3402  if (base_offset == 0) {
3403  if (shift_size >= 0) {
3404  return MemOperand(base, key, LSL, shift_size);
3405  } else {
3406  DCHECK_EQ(-1, shift_size);
3407  return MemOperand(base, key, LSR, 1);
3408  }
3409  }
3410 
3411  if (shift_size >= 0) {
3412  __ add(scratch0(), base, Operand(key, LSL, shift_size));
3413  return MemOperand(scratch0(), base_offset);
3414  } else {
3415  DCHECK_EQ(-1, shift_size);
3416  __ add(scratch0(), base, Operand(key, ASR, 1));
3417  return MemOperand(scratch0(), base_offset);
3418  }
3419 }
3420 
3421 
3422 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3423  DCHECK(ToRegister(instr->context()).is(cp));
3426 
3427  if (FLAG_vector_ics) {
3428  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3429  }
3430 
3431  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
3433 }
3434 
3435 
3436 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3437  Register scratch = scratch0();
3438  Register result = ToRegister(instr->result());
3439 
3440  if (instr->hydrogen()->from_inlined()) {
3441  __ sub(result, sp, Operand(2 * kPointerSize));
3442  } else {
3443  // Check if the calling frame is an arguments adaptor frame.
3444  Label done, adapted;
3446  __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3447  __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3448 
3449  // Result is the frame pointer for the frame if not adapted and for the real
3450  // frame below the adaptor frame if adapted.
3451  __ mov(result, fp, LeaveCC, ne);
3452  __ mov(result, scratch, LeaveCC, eq);
3453  }
3454 }
3455 
3456 
3457 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3458  Register elem = ToRegister(instr->elements());
3459  Register result = ToRegister(instr->result());
3460 
3461  Label done;
3462 
3463  // If no arguments adaptor frame the number of arguments is fixed.
3464  __ cmp(fp, elem);
3465  __ mov(result, Operand(scope()->num_parameters()));
3466  __ b(eq, &done);
3467 
3468  // Arguments adaptor frame present. Get argument length from there.
3470  __ ldr(result,
3472  __ SmiUntag(result);
3473 
3474  // Argument length is in result register.
3475  __ bind(&done);
3476 }
3477 
3478 
3479 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3480  Register receiver = ToRegister(instr->receiver());
3481  Register function = ToRegister(instr->function());
3482  Register result = ToRegister(instr->result());
3483  Register scratch = scratch0();
3484 
3485  // If the receiver is null or undefined, we have to pass the global
3486  // object as a receiver to normal functions. Values have to be
3487  // passed unchanged to builtins and strict-mode functions.
3488  Label global_object, result_in_receiver;
3489 
3490  if (!instr->hydrogen()->known_function()) {
3491  // Do not transform the receiver to object for strict mode
3492  // functions.
3493  __ ldr(scratch,
3495  __ ldr(scratch,
3498  __ tst(scratch, Operand(mask));
3499  __ b(ne, &result_in_receiver);
3500 
3501  // Do not transform the receiver to object for builtins.
3502  __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
3503  __ b(ne, &result_in_receiver);
3504  }
3505 
3506  // Normal function. Replace undefined or null with global receiver.
3507  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3508  __ cmp(receiver, scratch);
3509  __ b(eq, &global_object);
3510  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3511  __ cmp(receiver, scratch);
3512  __ b(eq, &global_object);
3513 
3514  // Deoptimize if the receiver is not a JS object.
3515  __ SmiTst(receiver);
3516  DeoptimizeIf(eq, instr, "Smi");
3517  __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3518  DeoptimizeIf(lt, instr, "not a JavaScript object");
3519 
3520  __ b(&result_in_receiver);
3521  __ bind(&global_object);
3522  __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
3523  __ ldr(result,
3525  __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3526 
3527  if (result.is(receiver)) {
3528  __ bind(&result_in_receiver);
3529  } else {
3530  Label result_ok;
3531  __ b(&result_ok);
3532  __ bind(&result_in_receiver);
3533  __ mov(result, receiver);
3534  __ bind(&result_ok);
3535  }
3536 }
3537 
3538 
3539 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3540  Register receiver = ToRegister(instr->receiver());
3541  Register function = ToRegister(instr->function());
3542  Register length = ToRegister(instr->length());
3543  Register elements = ToRegister(instr->elements());
3544  Register scratch = scratch0();
3545  DCHECK(receiver.is(r0)); // Used for parameter count.
3546  DCHECK(function.is(r1)); // Required by InvokeFunction.
3547  DCHECK(ToRegister(instr->result()).is(r0));
3548 
3549  // Copy the arguments to this function possibly from the
3550  // adaptor frame below it.
3551  const uint32_t kArgumentsLimit = 1 * KB;
3552  __ cmp(length, Operand(kArgumentsLimit));
3553  DeoptimizeIf(hi, instr, "too many arguments");
3554 
3555  // Push the receiver and use the register to keep the original
3556  // number of arguments.
3557  __ push(receiver);
3558  __ mov(receiver, length);
3559  // The arguments are at a one pointer size offset from elements.
3560  __ add(elements, elements, Operand(1 * kPointerSize));
3561 
3562  // Loop through the arguments pushing them onto the execution
3563  // stack.
3564  Label invoke, loop;
3565  // length is a small non-negative integer, due to the test above.
3566  __ cmp(length, Operand::Zero());
3567  __ b(eq, &invoke);
3568  __ bind(&loop);
3569  __ ldr(scratch, MemOperand(elements, length, LSL, 2));
3570  __ push(scratch);
3571  __ sub(length, length, Operand(1), SetCC);
3572  __ b(ne, &loop);
3573 
3574  __ bind(&invoke);
3575  DCHECK(instr->HasPointerMap());
3576  LPointerMap* pointers = instr->pointer_map();
3577  SafepointGenerator safepoint_generator(
3578  this, pointers, Safepoint::kLazyDeopt);
3579  // The number of arguments is stored in receiver which is r0, as expected
3580  // by InvokeFunction.
3581  ParameterCount actual(receiver);
3582  __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3583 }
3584 
3585 
3586 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3587  LOperand* argument = instr->value();
3588  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3589  Abort(kDoPushArgumentNotImplementedForDoubleType);
3590  } else {
3591  Register argument_reg = EmitLoadRegister(argument, ip);
3592  __ push(argument_reg);
3593  }
3594 }
3595 
3596 
3597 void LCodeGen::DoDrop(LDrop* instr) {
3598  __ Drop(instr->count());
3599 }
3600 
3601 
3602 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3603  Register result = ToRegister(instr->result());
3605 }
3606 
3607 
3608 void LCodeGen::DoContext(LContext* instr) {
3609  // If there is a non-return use, the context must be moved to a register.
3610  Register result = ToRegister(instr->result());
3611  if (info()->IsOptimizing()) {
3613  } else {
3614  // If there is no frame, the context must be in cp.
3615  DCHECK(result.is(cp));
3616  }
3617 }
3618 
3619 
3620 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3621  DCHECK(ToRegister(instr->context()).is(cp));
3622  __ push(cp); // The context is the first argument.
3623  __ Move(scratch0(), instr->hydrogen()->pairs());
3624  __ push(scratch0());
3625  __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3626  __ push(scratch0());
3627  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3628 }
3629 
3630 
3632  int formal_parameter_count,
3633  int arity,
3634  LInstruction* instr,
3635  R1State r1_state) {
3636  bool dont_adapt_arguments =
3637  formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3638  bool can_invoke_directly =
3639  dont_adapt_arguments || formal_parameter_count == arity;
3640 
3641  LPointerMap* pointers = instr->pointer_map();
3642 
3643  if (can_invoke_directly) {
3644  if (r1_state == R1_UNINITIALIZED) {
3645  __ Move(r1, function);
3646  }
3647 
3648  // Change context.
3650 
3651  // Set r0 to arguments count if adaption is not needed. Assumes that r0
3652  // is available to write to at this point.
3653  if (dont_adapt_arguments) {
3654  __ mov(r0, Operand(arity));
3655  }
3656 
3657  // Invoke function.
3659  __ Call(ip);
3660 
3661  // Set up deoptimization.
3663  } else {
3664  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3665  ParameterCount count(arity);
3666  ParameterCount expected(formal_parameter_count);
3667  __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3668  }
3669 }
3670 
3671 
3673  DCHECK(instr->context() != NULL);
3674  DCHECK(ToRegister(instr->context()).is(cp));
3675  Register input = ToRegister(instr->value());
3676  Register result = ToRegister(instr->result());
3677  Register scratch = scratch0();
3678 
3679  // Deoptimize if not a heap number.
3680  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3681  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3682  __ cmp(scratch, Operand(ip));
3683  DeoptimizeIf(ne, instr, "not a heap number");
3684 
3685  Label done;
3686  Register exponent = scratch0();
3687  scratch = no_reg;
3688  __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3689  // Check the sign of the argument. If the argument is positive, just
3690  // return it.
3691  __ tst(exponent, Operand(HeapNumber::kSignMask));
3692  // Move the input to the result if necessary.
3693  __ Move(result, input);
3694  __ b(eq, &done);
3695 
3696  // Input is negative. Reverse its sign.
3697  // Preserve the value of all registers.
3698  {
3699  PushSafepointRegistersScope scope(this);
3700 
3701  // Registers were saved at the safepoint, so we can use
3702  // many scratch registers.
3703  Register tmp1 = input.is(r1) ? r0 : r1;
3704  Register tmp2 = input.is(r2) ? r0 : r2;
3705  Register tmp3 = input.is(r3) ? r0 : r3;
3706  Register tmp4 = input.is(r4) ? r0 : r4;
3707 
3708  // exponent: floating point exponent value.
3709 
3710  Label allocated, slow;
3711  __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3712  __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3713  __ b(&allocated);
3714 
3715  // Slow case: Call the runtime system to do the number allocation.
3716  __ bind(&slow);
3717 
3718  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3719  instr->context());
3720  // Set the pointer to the new heap number in tmp.
3721  if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
3722  // Restore input_reg after call to runtime.
3723  __ LoadFromSafepointRegisterSlot(input, input);
3724  __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3725 
3726  __ bind(&allocated);
3727  // exponent: floating point exponent value.
3728  // tmp1: allocated heap number.
3729  __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
3730  __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3731  __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3732  __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3733 
3734  __ StoreToSafepointRegisterSlot(tmp1, result);
3735  }
3736 
3737  __ bind(&done);
3738 }
3739 
3740 
3741 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3742  Register input = ToRegister(instr->value());
3743  Register result = ToRegister(instr->result());
3744  __ cmp(input, Operand::Zero());
3745  __ Move(result, input, pl);
3746  // We can make rsb conditional because the previous cmp instruction
3747  // will clear the V (overflow) flag and rsb won't set this flag
3748  // if input is positive.
3749  __ rsb(result, input, Operand::Zero(), SetCC, mi);
3750  // Deoptimize on overflow.
3751  DeoptimizeIf(vs, instr, "overflow");
3752 }
3753 
3754 
3755 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3756  // Class for deferred case.
3757  class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
3758  public:
3759  DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3760  : LDeferredCode(codegen), instr_(instr) { }
3761  virtual void Generate() OVERRIDE {
3762  codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3763  }
3764  virtual LInstruction* instr() OVERRIDE { return instr_; }
3765  private:
3766  LMathAbs* instr_;
3767  };
3768 
3769  Representation r = instr->hydrogen()->value()->representation();
3770  if (r.IsDouble()) {
3771  DwVfpRegister input = ToDoubleRegister(instr->value());
3772  DwVfpRegister result = ToDoubleRegister(instr->result());
3773  __ vabs(result, input);
3774  } else if (r.IsSmiOrInteger32()) {
3775  EmitIntegerMathAbs(instr);
3776  } else {
3777  // Representation is tagged.
3778  DeferredMathAbsTaggedHeapNumber* deferred =
3779  new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3780  Register input = ToRegister(instr->value());
3781  // Smi check.
3782  __ JumpIfNotSmi(input, deferred->entry());
3783  // If smi, handle it directly.
3784  EmitIntegerMathAbs(instr);
3785  __ bind(deferred->exit());
3786  }
3787 }
3788 
3789 
3790 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3791  DwVfpRegister input = ToDoubleRegister(instr->value());
3792  Register result = ToRegister(instr->result());
3793  Register input_high = scratch0();
3794  Label done, exact;
3795 
3796  __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
3797  DeoptimizeIf(al, instr, "lost precision or NaN");
3798 
3799  __ bind(&exact);
3800  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3801  // Test for -0.
3802  __ cmp(result, Operand::Zero());
3803  __ b(ne, &done);
3804  __ cmp(input_high, Operand::Zero());
3805  DeoptimizeIf(mi, instr, "minus zero");
3806  }
3807  __ bind(&done);
3808 }
3809 
3810 
3811 void LCodeGen::DoMathRound(LMathRound* instr) {
3812  DwVfpRegister input = ToDoubleRegister(instr->value());
3813  Register result = ToRegister(instr->result());
3814  DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
3815  DwVfpRegister input_plus_dot_five = double_scratch1;
3816  Register input_high = scratch0();
3817  DwVfpRegister dot_five = double_scratch0();
3818  Label convert, done;
3819 
3820  __ Vmov(dot_five, 0.5, scratch0());
3821  __ vabs(double_scratch1, input);
3822  __ VFPCompareAndSetFlags(double_scratch1, dot_five);
3823  // If input is in [-0.5, -0], the result is -0.
3824  // If input is in [+0, +0.5[, the result is +0.
3825  // If the input is +0.5, the result is 1.
3826  __ b(hi, &convert); // Out of [-0.5, +0.5].
3827  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3828  __ VmovHigh(input_high, input);
3829  __ cmp(input_high, Operand::Zero());
3830  // [-0.5, -0].
3831  DeoptimizeIf(mi, instr, "minus zero");
3832  }
3833  __ VFPCompareAndSetFlags(input, dot_five);
3834  __ mov(result, Operand(1), LeaveCC, eq); // +0.5.
3835  // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3836  // flag kBailoutOnMinusZero.
3837  __ mov(result, Operand::Zero(), LeaveCC, ne);
3838  __ b(&done);
3839 
3840  __ bind(&convert);
3841  __ vadd(input_plus_dot_five, input, dot_five);
3842  // Reuse dot_five (double_scratch0) as we no longer need this value.
3843  __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
3844  &done, &done);
3845  DeoptimizeIf(al, instr, "lost precision or NaN");
3846  __ bind(&done);
3847 }
3848 
3849 
3850 void LCodeGen::DoMathFround(LMathFround* instr) {
3851  DwVfpRegister input_reg = ToDoubleRegister(instr->value());
3852  DwVfpRegister output_reg = ToDoubleRegister(instr->result());
3853  LowDwVfpRegister scratch = double_scratch0();
3854  __ vcvt_f32_f64(scratch.low(), input_reg);
3855  __ vcvt_f64_f32(output_reg, scratch.low());
3856 }
3857 
3858 
3859 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3860  DwVfpRegister input = ToDoubleRegister(instr->value());
3861  DwVfpRegister result = ToDoubleRegister(instr->result());
3862  __ vsqrt(result, input);
3863 }
3864 
3865 
3866 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3867  DwVfpRegister input = ToDoubleRegister(instr->value());
3868  DwVfpRegister result = ToDoubleRegister(instr->result());
3869  DwVfpRegister temp = double_scratch0();
3870 
3871  // Note that according to ECMA-262 15.8.2.13:
3872  // Math.pow(-Infinity, 0.5) == Infinity
3873  // Math.sqrt(-Infinity) == NaN
3874  Label done;
3875  __ vmov(temp, -V8_INFINITY, scratch0());
3876  __ VFPCompareAndSetFlags(input, temp);
3877  __ vneg(result, temp, eq);
3878  __ b(&done, eq);
3879 
3880  // Add +0 to convert -0 to +0.
3881  __ vadd(result, input, kDoubleRegZero);
3882  __ vsqrt(result, result);
3883  __ bind(&done);
3884 }
3885 
3886 
3887 void LCodeGen::DoPower(LPower* instr) {
3888  Representation exponent_type = instr->hydrogen()->right()->representation();
3889  // Having marked this as a call, we can use any registers.
3890  // Just make sure that the input/output registers are the expected ones.
3891  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3892  DCHECK(!instr->right()->IsDoubleRegister() ||
3893  ToDoubleRegister(instr->right()).is(d1));
3894  DCHECK(!instr->right()->IsRegister() ||
3895  ToRegister(instr->right()).is(tagged_exponent));
3896  DCHECK(ToDoubleRegister(instr->left()).is(d0));
3897  DCHECK(ToDoubleRegister(instr->result()).is(d2));
3898 
3899  if (exponent_type.IsSmi()) {
3900  MathPowStub stub(isolate(), MathPowStub::TAGGED);
3901  __ CallStub(&stub);
3902  } else if (exponent_type.IsTagged()) {
3903  Label no_deopt;
3904  __ JumpIfSmi(tagged_exponent, &no_deopt);
3905  DCHECK(!r6.is(tagged_exponent));
3906  __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3907  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3908  __ cmp(r6, Operand(ip));
3909  DeoptimizeIf(ne, instr, "not a heap number");
3910  __ bind(&no_deopt);
3911  MathPowStub stub(isolate(), MathPowStub::TAGGED);
3912  __ CallStub(&stub);
3913  } else if (exponent_type.IsInteger32()) {
3914  MathPowStub stub(isolate(), MathPowStub::INTEGER);
3915  __ CallStub(&stub);
3916  } else {
3917  DCHECK(exponent_type.IsDouble());
3918  MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3919  __ CallStub(&stub);
3920  }
3921 }
3922 
3923 
3924 void LCodeGen::DoMathExp(LMathExp* instr) {
3925  DwVfpRegister input = ToDoubleRegister(instr->value());
3926  DwVfpRegister result = ToDoubleRegister(instr->result());
3927  DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3928  DwVfpRegister double_scratch2 = double_scratch0();
3929  Register temp1 = ToRegister(instr->temp1());
3930  Register temp2 = ToRegister(instr->temp2());
3931 
3933  masm(), input, result, double_scratch1, double_scratch2,
3934  temp1, temp2, scratch0());
3935 }
3936 
3937 
3938 void LCodeGen::DoMathLog(LMathLog* instr) {
3939  __ PrepareCallCFunction(0, 1, scratch0());
3940  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3941  __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3942  0, 1);
3943  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3944 }
3945 
3946 
3947 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3948  Register input = ToRegister(instr->value());
3949  Register result = ToRegister(instr->result());
3950  __ clz(result, input);
3951 }
3952 
3953 
3954 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3955  DCHECK(ToRegister(instr->context()).is(cp));
3956  DCHECK(ToRegister(instr->function()).is(r1));
3957  DCHECK(instr->HasPointerMap());
3958 
3959  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3960  if (known_function.is_null()) {
3961  LPointerMap* pointers = instr->pointer_map();
3962  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3963  ParameterCount count(instr->arity());
3964  __ InvokeFunction(r1, count, CALL_FUNCTION, generator);
3965  } else {
3966  CallKnownFunction(known_function,
3967  instr->hydrogen()->formal_parameter_count(),
3968  instr->arity(),
3969  instr,
3971  }
3972 }
3973 
3974 
3975 void LCodeGen::DoTailCallThroughMegamorphicCache(
3976  LTailCallThroughMegamorphicCache* instr) {
3977  Register receiver = ToRegister(instr->receiver());
3978  Register name = ToRegister(instr->name());
3979  DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
3981  DCHECK(receiver.is(r1));
3982  DCHECK(name.is(r2));
3983 
3984  Register scratch = r3;
3985  Register extra = r4;
3986  Register extra2 = r5;
3987  Register extra3 = r6;
3988 
3989  // Important for the tail-call.
3990  bool must_teardown_frame = NeedsEagerFrame();
3991 
3992  // The probe will tail call to a handler if found.
3993  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
3994  must_teardown_frame, receiver, name,
3995  scratch, extra, extra2, extra3);
3996 
3997  // Tail call to miss if we ended up here.
3998  if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
3999  LoadIC::GenerateMiss(masm());
4000 }
4001 
4002 
4003 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
4004  DCHECK(ToRegister(instr->result()).is(r0));
4005 
4006  LPointerMap* pointers = instr->pointer_map();
4007  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4008 
4009  if (instr->target()->IsConstantOperand()) {
4010  LConstantOperand* target = LConstantOperand::cast(instr->target());
4011  Handle<Code> code = Handle<Code>::cast(ToHandle(target));
4012  generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
4013  PlatformInterfaceDescriptor* call_descriptor =
4014  instr->descriptor().platform_specific_descriptor();
4016  call_descriptor->storage_mode());
4017  } else {
4018  DCHECK(instr->target()->IsRegister());
4019  Register target = ToRegister(instr->target());
4020  generator.BeforeCall(__ CallSize(target));
4021  // Make sure we don't emit any additional entries in the constant pool
4022  // before the call to ensure that the CallCodeSize() calculated the correct
4023  // number of instructions for the constant pool load.
4024  {
4025  ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
4026  __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4027  }
4028  __ Call(target);
4029  }
4030  generator.AfterCall();
4031 }
4032 
4033 
4034 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
4035  DCHECK(ToRegister(instr->function()).is(r1));
4036  DCHECK(ToRegister(instr->result()).is(r0));
4037 
4038  if (instr->hydrogen()->pass_argument_count()) {
4039  __ mov(r0, Operand(instr->arity()));
4040  }
4041 
4042  // Change context.
4044 
4045  // Load the code entry address
4047  __ Call(ip);
4048 
4050 }
4051 
4052 
4053 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4054  DCHECK(ToRegister(instr->context()).is(cp));
4055  DCHECK(ToRegister(instr->function()).is(r1));
4056  DCHECK(ToRegister(instr->result()).is(r0));
4057 
4058  int arity = instr->arity();
4059  CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
4060  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4061 }
4062 
4063 
4064 void LCodeGen::DoCallNew(LCallNew* instr) {
4065  DCHECK(ToRegister(instr->context()).is(cp));
4066  DCHECK(ToRegister(instr->constructor()).is(r1));
4067  DCHECK(ToRegister(instr->result()).is(r0));
4068 
4069  __ mov(r0, Operand(instr->arity()));
4070  // No cell in r2 for construct type feedback in optimized code
4071  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4072  CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4073  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4074 }
4075 
4076 
4077 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4078  DCHECK(ToRegister(instr->context()).is(cp));
4079  DCHECK(ToRegister(instr->constructor()).is(r1));
4080  DCHECK(ToRegister(instr->result()).is(r0));
4081 
4082  __ mov(r0, Operand(instr->arity()));
4083  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4084  ElementsKind kind = instr->hydrogen()->elements_kind();
4085  AllocationSiteOverrideMode override_mode =
4088  : DONT_OVERRIDE;
4089 
4090  if (instr->arity() == 0) {
4091  ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4092  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4093  } else if (instr->arity() == 1) {
4094  Label done;
4095  if (IsFastPackedElementsKind(kind)) {
4096  Label packed_case;
4097  // We might need a change here
4098  // look at the first argument
4099  __ ldr(r5, MemOperand(sp, 0));
4100  __ cmp(r5, Operand::Zero());
4101  __ b(eq, &packed_case);
4102 
4103  ElementsKind holey_kind = GetHoleyElementsKind(kind);
4104  ArraySingleArgumentConstructorStub stub(isolate(),
4105  holey_kind,
4106  override_mode);
4107  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4108  __ jmp(&done);
4109  __ bind(&packed_case);
4110  }
4111 
4112  ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4113  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4114  __ bind(&done);
4115  } else {
4116  ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4117  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4118  }
4119 }
4120 
4121 
4122 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4123  CallRuntime(instr->function(), instr->arity(), instr);
4124 }
4125 
4126 
4127 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4128  Register function = ToRegister(instr->function());
4129  Register code_object = ToRegister(instr->code_object());
4130  __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
4131  __ str(code_object,
4133 }
4134 
4135 
4136 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4137  Register result = ToRegister(instr->result());
4138  Register base = ToRegister(instr->base_object());
4139  if (instr->offset()->IsConstantOperand()) {
4140  LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4141  __ add(result, base, Operand(ToInteger32(offset)));
4142  } else {
4143  Register offset = ToRegister(instr->offset());
4144  __ add(result, base, offset);
4145  }
4146 }
4147 
4148 
4149 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4150  Representation representation = instr->representation();
4151 
4152  Register object = ToRegister(instr->object());
4153  Register scratch = scratch0();
4154  HObjectAccess access = instr->hydrogen()->access();
4155  int offset = access.offset();
4156 
4157  if (access.IsExternalMemory()) {
4158  Register value = ToRegister(instr->value());
4159  MemOperand operand = MemOperand(object, offset);
4160  __ Store(value, operand, representation);
4161  return;
4162  }
4163 
4164  __ AssertNotSmi(object);
4165 
4166  DCHECK(!representation.IsSmi() ||
4167  !instr->value()->IsConstantOperand() ||
4168  IsSmi(LConstantOperand::cast(instr->value())));
4169  if (representation.IsDouble()) {
4170  DCHECK(access.IsInobject());
4171  DCHECK(!instr->hydrogen()->has_transition());
4172  DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4173  DwVfpRegister value = ToDoubleRegister(instr->value());
4174  __ vstr(value, FieldMemOperand(object, offset));
4175  return;
4176  }
4177 
4178  if (instr->hydrogen()->has_transition()) {
4179  Handle<Map> transition = instr->hydrogen()->transition_map();
4180  AddDeprecationDependency(transition);
4181  __ mov(scratch, Operand(transition));
4182  __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4183  if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4184  Register temp = ToRegister(instr->temp());
4185  // Update the write barrier for the map field.
4186  __ RecordWriteForMap(object,
4187  scratch,
4188  temp,
4190  kSaveFPRegs);
4191  }
4192  }
4193 
4194  // Do the store.
4195  Register value = ToRegister(instr->value());
4196  if (access.IsInobject()) {
4197  MemOperand operand = FieldMemOperand(object, offset);
4198  __ Store(value, operand, representation);
4199  if (instr->hydrogen()->NeedsWriteBarrier()) {
4200  // Update the write barrier for the object for in-object properties.
4201  __ RecordWriteField(object,
4202  offset,
4203  value,
4204  scratch,
4206  kSaveFPRegs,
4208  instr->hydrogen()->SmiCheckForWriteBarrier(),
4209  instr->hydrogen()->PointersToHereCheckForValue());
4210  }
4211  } else {
4212  __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4213  MemOperand operand = FieldMemOperand(scratch, offset);
4214  __ Store(value, operand, representation);
4215  if (instr->hydrogen()->NeedsWriteBarrier()) {
4216  // Update the write barrier for the properties array.
4217  // object is used as a scratch register.
4218  __ RecordWriteField(scratch,
4219  offset,
4220  value,
4221  object,
4223  kSaveFPRegs,
4225  instr->hydrogen()->SmiCheckForWriteBarrier(),
4226  instr->hydrogen()->PointersToHereCheckForValue());
4227  }
4228  }
4229 }
4230 
4231 
4232 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4233  DCHECK(ToRegister(instr->context()).is(cp));
4235  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4236 
4237  __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
4238  Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4240 }
4241 
4242 
4243 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4244  Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
4245  if (instr->index()->IsConstantOperand()) {
4246  Operand index = ToOperand(instr->index());
4247  Register length = ToRegister(instr->length());
4248  __ cmp(length, index);
4249  cc = CommuteCondition(cc);
4250  } else {
4251  Register index = ToRegister(instr->index());
4252  Operand length = ToOperand(instr->length());
4253  __ cmp(index, length);
4254  }
4255  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4256  Label done;
4257  __ b(NegateCondition(cc), &done);
4258  __ stop("eliminated bounds check failed");
4259  __ bind(&done);
4260  } else {
4261  DeoptimizeIf(cc, instr, "out of bounds");
4262  }
4263 }
4264 
4265 
4267  Register external_pointer = ToRegister(instr->elements());
4268  Register key = no_reg;
4269  ElementsKind elements_kind = instr->elements_kind();
4270  bool key_is_constant = instr->key()->IsConstantOperand();
4271  int constant_key = 0;
4272  if (key_is_constant) {
4273  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4274  if (constant_key & 0xF0000000) {
4275  Abort(kArrayIndexConstantValueTooBig);
4276  }
4277  } else {
4278  key = ToRegister(instr->key());
4279  }
4280  int element_size_shift = ElementsKindToShiftSize(elements_kind);
4281  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4282  ? (element_size_shift - kSmiTagSize) : element_size_shift;
4283  int base_offset = instr->base_offset();
4284 
4285  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4286  elements_kind == FLOAT32_ELEMENTS ||
4287  elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4288  elements_kind == FLOAT64_ELEMENTS) {
4289  Register address = scratch0();
4290  DwVfpRegister value(ToDoubleRegister(instr->value()));
4291  if (key_is_constant) {
4292  if (constant_key != 0) {
4293  __ add(address, external_pointer,
4294  Operand(constant_key << element_size_shift));
4295  } else {
4296  address = external_pointer;
4297  }
4298  } else {
4299  __ add(address, external_pointer, Operand(key, LSL, shift_size));
4300  }
4301  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4302  elements_kind == FLOAT32_ELEMENTS) {
4303  __ vcvt_f32_f64(double_scratch0().low(), value);
4304  __ vstr(double_scratch0().low(), address, base_offset);
4305  } else { // Storing doubles, not floats.
4306  __ vstr(value, address, base_offset);
4307  }
4308  } else {
4309  Register value(ToRegister(instr->value()));
4310  MemOperand mem_operand = PrepareKeyedOperand(
4311  key, external_pointer, key_is_constant, constant_key,
4312  element_size_shift, shift_size,
4313  base_offset);
4314  switch (elements_kind) {
4318  case UINT8_ELEMENTS:
4320  case INT8_ELEMENTS:
4321  __ strb(value, mem_operand);
4322  break;
4325  case INT16_ELEMENTS:
4326  case UINT16_ELEMENTS:
4327  __ strh(value, mem_operand);
4328  break;
4331  case INT32_ELEMENTS:
4332  case UINT32_ELEMENTS:
4333  __ str(value, mem_operand);
4334  break;
4335  case FLOAT32_ELEMENTS:
4336  case FLOAT64_ELEMENTS:
4339  case FAST_DOUBLE_ELEMENTS:
4340  case FAST_ELEMENTS:
4341  case FAST_SMI_ELEMENTS:
4343  case FAST_HOLEY_ELEMENTS:
4345  case DICTIONARY_ELEMENTS:
4347  UNREACHABLE();
4348  break;
4349  }
4350  }
4351 }
4352 
4353 
4355  DwVfpRegister value = ToDoubleRegister(instr->value());
4356  Register elements = ToRegister(instr->elements());
4357  Register scratch = scratch0();
4359  bool key_is_constant = instr->key()->IsConstantOperand();
4360  int base_offset = instr->base_offset();
4361 
4362  // Calculate the effective address of the slot in the array to store the
4363  // double value.
4364  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4365  if (key_is_constant) {
4366  int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4367  if (constant_key & 0xF0000000) {
4368  Abort(kArrayIndexConstantValueTooBig);
4369  }
4370  __ add(scratch, elements,
4371  Operand((constant_key << element_size_shift) + base_offset));
4372  } else {
4373  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4374  ? (element_size_shift - kSmiTagSize) : element_size_shift;
4375  __ add(scratch, elements, Operand(base_offset));
4376  __ add(scratch, scratch,
4377  Operand(ToRegister(instr->key()), LSL, shift_size));
4378  }
4379 
4380  if (instr->NeedsCanonicalization()) {
4381  // Force a canonical NaN.
4382  if (masm()->emit_debug_code()) {
4383  __ vmrs(ip);
4385  __ Assert(ne, kDefaultNaNModeNotSet);
4386  }
4387  __ VFPCanonicalizeNaN(double_scratch, value);
4388  __ vstr(double_scratch, scratch, 0);
4389  } else {
4390  __ vstr(value, scratch, 0);
4391  }
4392 }
4393 
4394 
4396  Register value = ToRegister(instr->value());
4397  Register elements = ToRegister(instr->elements());
4398  Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4399  : no_reg;
4400  Register scratch = scratch0();
4401  Register store_base = scratch;
4402  int offset = instr->base_offset();
4403 
4404  // Do the store.
4405  if (instr->key()->IsConstantOperand()) {
4406  DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4407  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4408  offset += ToInteger32(const_operand) * kPointerSize;
4409  store_base = elements;
4410  } else {
4411  // Even though the HLoadKeyed instruction forces the input
4412  // representation for the key to be an integer, the input gets replaced
4413  // during bound check elimination with the index argument to the bounds
4414  // check, which can be tagged, so that case must be handled here, too.
4415  if (instr->hydrogen()->key()->representation().IsSmi()) {
4416  __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
4417  } else {
4418  __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
4419  }
4420  }
4421  __ str(value, MemOperand(store_base, offset));
4422 
4423  if (instr->hydrogen()->NeedsWriteBarrier()) {
4424  SmiCheck check_needed =
4425  instr->hydrogen()->value()->type().IsHeapObject()
4427  // Compute address of modified element and store it into key register.
4428  __ add(key, store_base, Operand(offset));
4429  __ RecordWrite(elements,
4430  key,
4431  value,
4433  kSaveFPRegs,
4435  check_needed,
4436  instr->hydrogen()->PointersToHereCheckForValue());
4437  }
4438 }
4439 
4440 
4441 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4442  // By cases: external, fast double
4443  if (instr->is_typed_elements()) {
4445  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4447  } else {
4448  DoStoreKeyedFixedArray(instr);
4449  }
4450 }
4451 
4452 
4453 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4454  DCHECK(ToRegister(instr->context()).is(cp));
4457  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4458 
4459  Handle<Code> ic =
4460  CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
4462 }
4463 
4464 
4465 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4466  Register object_reg = ToRegister(instr->object());
4467  Register scratch = scratch0();
4468 
4469  Handle<Map> from_map = instr->original_map();
4470  Handle<Map> to_map = instr->transitioned_map();
4471  ElementsKind from_kind = instr->from_kind();
4472  ElementsKind to_kind = instr->to_kind();
4473 
4474  Label not_applicable;
4475  __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4476  __ cmp(scratch, Operand(from_map));
4477  __ b(ne, &not_applicable);
4478 
4479  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4480  Register new_map_reg = ToRegister(instr->new_map_temp());
4481  __ mov(new_map_reg, Operand(to_map));
4482  __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4483  // Write barrier.
4484  __ RecordWriteForMap(object_reg,
4485  new_map_reg,
4486  scratch,
4488  kDontSaveFPRegs);
4489  } else {
4490  DCHECK(ToRegister(instr->context()).is(cp));
4491  DCHECK(object_reg.is(r0));
4492  PushSafepointRegistersScope scope(this);
4493  __ Move(r1, to_map);
4494  bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4495  TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4496  __ CallStub(&stub);
4498  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4499  }
4500  __ bind(&not_applicable);
4501 }
4502 
4503 
4504 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4505  Register object = ToRegister(instr->object());
4506  Register temp = ToRegister(instr->temp());
4507  Label no_memento_found;
4508  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4509  DeoptimizeIf(eq, instr, "memento found");
4510  __ bind(&no_memento_found);
4511 }
4512 
4513 
4514 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4515  DCHECK(ToRegister(instr->context()).is(cp));
4516  DCHECK(ToRegister(instr->left()).is(r1));
4517  DCHECK(ToRegister(instr->right()).is(r0));
4518  StringAddStub stub(isolate(),
4519  instr->hydrogen()->flags(),
4520  instr->hydrogen()->pretenure_flag());
4521  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4522 }
4523 
4524 
4525 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4526  class DeferredStringCharCodeAt FINAL : public LDeferredCode {
4527  public:
4528  DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4529  : LDeferredCode(codegen), instr_(instr) { }
4530  virtual void Generate() OVERRIDE {
4531  codegen()->DoDeferredStringCharCodeAt(instr_);
4532  }
4533  virtual LInstruction* instr() OVERRIDE { return instr_; }
4534  private:
4535  LStringCharCodeAt* instr_;
4536  };
4537 
4538  DeferredStringCharCodeAt* deferred =
4539  new(zone()) DeferredStringCharCodeAt(this, instr);
4540 
4542  ToRegister(instr->string()),
4543  ToRegister(instr->index()),
4544  ToRegister(instr->result()),
4545  deferred->entry());
4546  __ bind(deferred->exit());
4547 }
4548 
4549 
4550 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4551  Register string = ToRegister(instr->string());
4552  Register result = ToRegister(instr->result());
4553  Register scratch = scratch0();
4554 
4555  // TODO(3095996): Get rid of this. For now, we need to make the
4556  // result register contain a valid pointer because it is already
4557  // contained in the register pointer map.
4558  __ mov(result, Operand::Zero());
4559 
4560  PushSafepointRegistersScope scope(this);
4561  __ push(string);
4562  // Push the index as a smi. This is safe because of the checks in
4563  // DoStringCharCodeAt above.
4564  if (instr->index()->IsConstantOperand()) {
4565  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4566  __ mov(scratch, Operand(Smi::FromInt(const_index)));
4567  __ push(scratch);
4568  } else {
4569  Register index = ToRegister(instr->index());
4570  __ SmiTag(index);
4571  __ push(index);
4572  }
4573  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4574  instr->context());
4575  __ AssertSmi(r0);
4576  __ SmiUntag(r0);
4577  __ StoreToSafepointRegisterSlot(r0, result);
4578 }
4579 
4580 
4581 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4582  class DeferredStringCharFromCode FINAL : public LDeferredCode {
4583  public:
4584  DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4585  : LDeferredCode(codegen), instr_(instr) { }
4586  virtual void Generate() OVERRIDE {
4587  codegen()->DoDeferredStringCharFromCode(instr_);
4588  }
4589  virtual LInstruction* instr() OVERRIDE { return instr_; }
4590  private:
4591  LStringCharFromCode* instr_;
4592  };
4593 
4594  DeferredStringCharFromCode* deferred =
4595  new(zone()) DeferredStringCharFromCode(this, instr);
4596 
4597  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4598  Register char_code = ToRegister(instr->char_code());
4599  Register result = ToRegister(instr->result());
4600  DCHECK(!char_code.is(result));
4601 
4602  __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
4603  __ b(hi, deferred->entry());
4604  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4605  __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
4606  __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4607  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4608  __ cmp(result, ip);
4609  __ b(eq, deferred->entry());
4610  __ bind(deferred->exit());
4611 }
4612 
4613 
4614 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4615  Register char_code = ToRegister(instr->char_code());
4616  Register result = ToRegister(instr->result());
4617 
4618  // TODO(3095996): Get rid of this. For now, we need to make the
4619  // result register contain a valid pointer because it is already
4620  // contained in the register pointer map.
4621  __ mov(result, Operand::Zero());
4622 
4623  PushSafepointRegistersScope scope(this);
4624  __ SmiTag(char_code);
4625  __ push(char_code);
4626  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4627  __ StoreToSafepointRegisterSlot(r0, result);
4628 }
4629 
4630 
4631 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4632  LOperand* input = instr->value();
4633  DCHECK(input->IsRegister() || input->IsStackSlot());
4634  LOperand* output = instr->result();
4635  DCHECK(output->IsDoubleRegister());
4636  SwVfpRegister single_scratch = double_scratch0().low();
4637  if (input->IsStackSlot()) {
4638  Register scratch = scratch0();
4639  __ ldr(scratch, ToMemOperand(input));
4640  __ vmov(single_scratch, scratch);
4641  } else {
4642  __ vmov(single_scratch, ToRegister(input));
4643  }
4644  __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4645 }
4646 
4647 
4648 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4649  LOperand* input = instr->value();
4650  LOperand* output = instr->result();
4651 
4652  SwVfpRegister flt_scratch = double_scratch0().low();
4653  __ vmov(flt_scratch, ToRegister(input));
4654  __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4655 }
4656 
4657 
4658 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4659  class DeferredNumberTagI FINAL : public LDeferredCode {
4660  public:
4661  DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4662  : LDeferredCode(codegen), instr_(instr) { }
4663  virtual void Generate() OVERRIDE {
4664  codegen()->DoDeferredNumberTagIU(instr_,
4665  instr_->value(),
4666  instr_->temp1(),
4667  instr_->temp2(),
4668  SIGNED_INT32);
4669  }
4670  virtual LInstruction* instr() OVERRIDE { return instr_; }
4671  private:
4672  LNumberTagI* instr_;
4673  };
4674 
4675  Register src = ToRegister(instr->value());
4676  Register dst = ToRegister(instr->result());
4677 
4678  DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4679  __ SmiTag(dst, src, SetCC);
4680  __ b(vs, deferred->entry());
4681  __ bind(deferred->exit());
4682 }
4683 
4684 
4685 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4686  class DeferredNumberTagU FINAL : public LDeferredCode {
4687  public:
4688  DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4689  : LDeferredCode(codegen), instr_(instr) { }
4690  virtual void Generate() OVERRIDE {
4691  codegen()->DoDeferredNumberTagIU(instr_,
4692  instr_->value(),
4693  instr_->temp1(),
4694  instr_->temp2(),
4695  UNSIGNED_INT32);
4696  }
4697  virtual LInstruction* instr() OVERRIDE { return instr_; }
4698  private:
4699  LNumberTagU* instr_;
4700  };
4701 
4702  Register input = ToRegister(instr->value());
4703  Register result = ToRegister(instr->result());
4704 
4705  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4706  __ cmp(input, Operand(Smi::kMaxValue));
4707  __ b(hi, deferred->entry());
4708  __ SmiTag(result, input);
4709  __ bind(deferred->exit());
4710 }
4711 
4712 
4714  LOperand* value,
4715  LOperand* temp1,
4716  LOperand* temp2,
4717  IntegerSignedness signedness) {
4718  Label done, slow;
4719  Register src = ToRegister(value);
4720  Register dst = ToRegister(instr->result());
4721  Register tmp1 = scratch0();
4722  Register tmp2 = ToRegister(temp1);
4723  Register tmp3 = ToRegister(temp2);
4724  LowDwVfpRegister dbl_scratch = double_scratch0();
4725 
4726  if (signedness == SIGNED_INT32) {
4727  // There was overflow, so bits 30 and 31 of the original integer
4728  // disagree. Try to allocate a heap number in new space and store
4729  // the value in there. If that fails, call the runtime system.
4730  if (dst.is(src)) {
4731  __ SmiUntag(src, dst);
4732  __ eor(src, src, Operand(0x80000000));
4733  }
4734  __ vmov(dbl_scratch.low(), src);
4735  __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
4736  } else {
4737  __ vmov(dbl_scratch.low(), src);
4738  __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
4739  }
4740 
4741  if (FLAG_inline_new) {
4742  __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4743  __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
4744  __ b(&done);
4745  }
4746 
4747  // Slow case: Call the runtime system to do the number allocation.
4748  __ bind(&slow);
4749  {
4750  // TODO(3095996): Put a valid pointer value in the stack slot where the
4751  // result register is stored, as this register is in the pointer map, but
4752  // contains an integer value.
4753  __ mov(dst, Operand::Zero());
4754 
4755  // Preserve the value of all registers.
4756  PushSafepointRegistersScope scope(this);
4757 
4758  // NumberTagI and NumberTagD use the context from the frame, rather than
4759  // the environment's HContext or HInlinedContext value.
4760  // They only call Runtime::kAllocateHeapNumber.
4761  // The corresponding HChange instructions are added in a phase that does
4762  // not have easy access to the local context.
4764  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4766  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4767  __ sub(r0, r0, Operand(kHeapObjectTag));
4768  __ StoreToSafepointRegisterSlot(r0, dst);
4769  }
4770 
4771  // Done. Put the value in dbl_scratch into the value of the allocated heap
4772  // number.
4773  __ bind(&done);
4774  __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
4775  __ add(dst, dst, Operand(kHeapObjectTag));
4776 }
4777 
4778 
4779 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4780  class DeferredNumberTagD FINAL : public LDeferredCode {
4781  public:
4782  DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4783  : LDeferredCode(codegen), instr_(instr) { }
4784  virtual void Generate() OVERRIDE {
4785  codegen()->DoDeferredNumberTagD(instr_);
4786  }
4787  virtual LInstruction* instr() OVERRIDE { return instr_; }
4788  private:
4789  LNumberTagD* instr_;
4790  };
4791 
4792  DwVfpRegister input_reg = ToDoubleRegister(instr->value());
4793  Register scratch = scratch0();
4794  Register reg = ToRegister(instr->result());
4795  Register temp1 = ToRegister(instr->temp());
4796  Register temp2 = ToRegister(instr->temp2());
4797 
4798  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4799  if (FLAG_inline_new) {
4800  __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4801  // We want the untagged address first for performance
4802  __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4803  DONT_TAG_RESULT);
4804  } else {
4805  __ jmp(deferred->entry());
4806  }
4807  __ bind(deferred->exit());
4808  __ vstr(input_reg, reg, HeapNumber::kValueOffset);
4809  // Now that we have finished with the object's real address tag it
4810  __ add(reg, reg, Operand(kHeapObjectTag));
4811 }
4812 
4813 
4814 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4815  // TODO(3095996): Get rid of this. For now, we need to make the
4816  // result register contain a valid pointer because it is already
4817  // contained in the register pointer map.
4818  Register reg = ToRegister(instr->result());
4819  __ mov(reg, Operand::Zero());
4820 
4821  PushSafepointRegistersScope scope(this);
4822  // NumberTagI and NumberTagD use the context from the frame, rather than
4823  // the environment's HContext or HInlinedContext value.
4824  // They only call Runtime::kAllocateHeapNumber.
4825  // The corresponding HChange instructions are added in a phase that does
4826  // not have easy access to the local context.
4828  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4830  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4831  __ sub(r0, r0, Operand(kHeapObjectTag));
4832  __ StoreToSafepointRegisterSlot(r0, reg);
4833 }
4834 
4835 
4836 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4837  HChange* hchange = instr->hydrogen();
4838  Register input = ToRegister(instr->value());
4839  Register output = ToRegister(instr->result());
4840  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4841  hchange->value()->CheckFlag(HValue::kUint32)) {
4842  __ tst(input, Operand(0xc0000000));
4843  DeoptimizeIf(ne, instr, "overflow");
4844  }
4845  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4846  !hchange->value()->CheckFlag(HValue::kUint32)) {
4847  __ SmiTag(output, input, SetCC);
4848  DeoptimizeIf(vs, instr, "overflow");
4849  } else {
4850  __ SmiTag(output, input);
4851  }
4852 }
4853 
4854 
4855 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4856  Register input = ToRegister(instr->value());
4857  Register result = ToRegister(instr->result());
4858  if (instr->needs_check()) {
4860  // If the input is a HeapObject, SmiUntag will set the carry flag.
4861  __ SmiUntag(result, input, SetCC);
4862  DeoptimizeIf(cs, instr, "not a Smi");
4863  } else {
4864  __ SmiUntag(result, input);
4865  }
4866 }
4867 
4868 
4869 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4870  DwVfpRegister result_reg,
4872  bool can_convert_undefined_to_nan =
4873  instr->hydrogen()->can_convert_undefined_to_nan();
4874  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4875 
4876  Register scratch = scratch0();
4877  SwVfpRegister flt_scratch = double_scratch0().low();
4878  DCHECK(!result_reg.is(double_scratch0()));
4879  Label convert, load_smi, done;
4881  // Smi check.
4882  __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4883  // Heap number map check.
4884  __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4885  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4886  __ cmp(scratch, Operand(ip));
4887  if (can_convert_undefined_to_nan) {
4888  __ b(ne, &convert);
4889  } else {
4890  DeoptimizeIf(ne, instr, "not a heap number");
4891  }
4892  // load heap number
4893  __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
4894  if (deoptimize_on_minus_zero) {
4895  __ VmovLow(scratch, result_reg);
4896  __ cmp(scratch, Operand::Zero());
4897  __ b(ne, &done);
4898  __ VmovHigh(scratch, result_reg);
4899  __ cmp(scratch, Operand(HeapNumber::kSignMask));
4900  DeoptimizeIf(eq, instr, "minus zero");
4901  }
4902  __ jmp(&done);
4903  if (can_convert_undefined_to_nan) {
4904  __ bind(&convert);
4905  // Convert undefined (and hole) to NaN.
4906  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4907  __ cmp(input_reg, Operand(ip));
4908  DeoptimizeIf(ne, instr, "not a heap number/undefined");
4909  __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4910  __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
4911  __ jmp(&done);
4912  }
4913  } else {
4914  __ SmiUntag(scratch, input_reg);
4916  }
4917  // Smi to double register conversion
4918  __ bind(&load_smi);
4919  // scratch: untagged value of input_reg
4920  __ vmov(flt_scratch, scratch);
4921  __ vcvt_f64_s32(result_reg, flt_scratch);
4922  __ bind(&done);
4923 }
4924 
4925 
4926 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4927  Register input_reg = ToRegister(instr->value());
4929  Register scratch2 = ToRegister(instr->temp());
4931  DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4932 
4933  DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4934  DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4935 
4936  Label done;
4937 
4938  // The input was optimistically untagged; revert it.
4939  // The carry flag is set when we reach this deferred code as we just executed
4940  // SmiUntag(heap_object, SetCC)
4942  __ adc(scratch2, input_reg, Operand(input_reg));
4943 
4944  // Heap number map check.
4946  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4947  __ cmp(scratch1, Operand(ip));
4948 
4949  if (instr->truncating()) {
4950  // Performs a truncating conversion of a floating point number as used by
4951  // the JS bitwise operations.
4952  Label no_heap_number, check_bools, check_false;
4953  __ b(ne, &no_heap_number);
4954  __ TruncateHeapNumberToI(input_reg, scratch2);
4955  __ b(&done);
4956 
4957  // Check for Oddballs. Undefined/False is converted to zero and True to one
4958  // for truncating conversions.
4959  __ bind(&no_heap_number);
4960  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4961  __ cmp(scratch2, Operand(ip));
4962  __ b(ne, &check_bools);
4963  __ mov(input_reg, Operand::Zero());
4964  __ b(&done);
4965 
4966  __ bind(&check_bools);
4967  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
4968  __ cmp(scratch2, Operand(ip));
4969  __ b(ne, &check_false);
4970  __ mov(input_reg, Operand(1));
4971  __ b(&done);
4972 
4973  __ bind(&check_false);
4974  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
4975  __ cmp(scratch2, Operand(ip));
4976  DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false");
4977  __ mov(input_reg, Operand::Zero());
4978  } else {
4979  DeoptimizeIf(ne, instr, "not a heap number");
4980 
4981  __ sub(ip, scratch2, Operand(kHeapObjectTag));
4982  __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
4983  __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
4984  DeoptimizeIf(ne, instr, "lost precision or NaN");
4985 
4986  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4987  __ cmp(input_reg, Operand::Zero());
4988  __ b(ne, &done);
4989  __ VmovHigh(scratch1, double_scratch2);
4991  DeoptimizeIf(ne, instr, "minus zero");
4992  }
4993  }
4994  __ bind(&done);
4995 }
4996 
4997 
4998 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4999  class DeferredTaggedToI FINAL : public LDeferredCode {
5000  public:
5001  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5002  : LDeferredCode(codegen), instr_(instr) { }
5003  virtual void Generate() OVERRIDE {
5004  codegen()->DoDeferredTaggedToI(instr_);
5005  }
5006  virtual LInstruction* instr() OVERRIDE { return instr_; }
5007  private:
5008  LTaggedToI* instr_;
5009  };
5010 
5011  LOperand* input = instr->value();
5012  DCHECK(input->IsRegister());
5013  DCHECK(input->Equals(instr->result()));
5014 
5015  Register input_reg = ToRegister(input);
5016 
5017  if (instr->hydrogen()->value()->representation().IsSmi()) {
5018  __ SmiUntag(input_reg);
5019  } else {
5020  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5021 
5022  // Optimistically untag the input.
5023  // If the input is a HeapObject, SmiUntag will set the carry flag.
5024  __ SmiUntag(input_reg, SetCC);
5025  // Branch to deferred code if the input was tagged.
5026  // The deferred code will take care of restoring the tag.
5027  __ b(cs, deferred->entry());
5028  __ bind(deferred->exit());
5029  }
5030 }
5031 
5032 
5033 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5034  LOperand* input = instr->value();
5035  DCHECK(input->IsRegister());
5036  LOperand* result = instr->result();
5037  DCHECK(result->IsDoubleRegister());
5038 
5039  Register input_reg = ToRegister(input);
5040  DwVfpRegister result_reg = ToDoubleRegister(result);
5041 
5042  HValue* value = instr->hydrogen()->value();
5043  NumberUntagDMode mode = value->representation().IsSmi()
5045 
5046  EmitNumberUntagD(instr, input_reg, result_reg, mode);
5047 }
5048 
5049 
5050 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5051  Register result_reg = ToRegister(instr->result());
5052  Register scratch1 = scratch0();
5053  DwVfpRegister double_input = ToDoubleRegister(instr->value());
5054  LowDwVfpRegister double_scratch = double_scratch0();
5055 
5056  if (instr->truncating()) {
5057  __ TruncateDoubleToI(result_reg, double_input);
5058  } else {
5059  __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
5060  // Deoptimize if the input wasn't a int32 (inside a double).
5061  DeoptimizeIf(ne, instr, "lost precision or NaN");
5062  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5063  Label done;
5064  __ cmp(result_reg, Operand::Zero());
5065  __ b(ne, &done);
5066  __ VmovHigh(scratch1, double_input);
5067  __ tst(scratch1, Operand(HeapNumber::kSignMask));
5068  DeoptimizeIf(ne, instr, "minus zero");
5069  __ bind(&done);
5070  }
5071  }
5072 }
5073 
5074 
5075 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5076  Register result_reg = ToRegister(instr->result());
5077  Register scratch1 = scratch0();
5078  DwVfpRegister double_input = ToDoubleRegister(instr->value());
5079  LowDwVfpRegister double_scratch = double_scratch0();
5080 
5081  if (instr->truncating()) {
5082  __ TruncateDoubleToI(result_reg, double_input);
5083  } else {
5084  __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
5085  // Deoptimize if the input wasn't a int32 (inside a double).
5086  DeoptimizeIf(ne, instr, "lost precision or NaN");
5087  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5088  Label done;
5089  __ cmp(result_reg, Operand::Zero());
5090  __ b(ne, &done);
5091  __ VmovHigh(scratch1, double_input);
5092  __ tst(scratch1, Operand(HeapNumber::kSignMask));
5093  DeoptimizeIf(ne, instr, "minus zero");
5094  __ bind(&done);
5095  }
5096  }
5097  __ SmiTag(result_reg, SetCC);
5098  DeoptimizeIf(vs, instr, "overflow");
5099 }
5100 
5101 
5102 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5103  LOperand* input = instr->value();
5104  __ SmiTst(ToRegister(input));
5105  DeoptimizeIf(ne, instr, "not a Smi");
5106 }
5107 
5108 
5109 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5110  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5111  LOperand* input = instr->value();
5112  __ SmiTst(ToRegister(input));
5113  DeoptimizeIf(eq, instr, "Smi");
5114  }
5115 }
5116 
5117 
5118 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5119  Register input = ToRegister(instr->value());
5120  Register scratch = scratch0();
5121 
5122  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5123  __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5124 
5125  if (instr->hydrogen()->is_interval_check()) {
5126  InstanceType first;
5127  InstanceType last;
5128  instr->hydrogen()->GetCheckInterval(&first, &last);
5129 
5130  __ cmp(scratch, Operand(first));
5131 
5132  // If there is only one type in the interval check for equality.
5133  if (first == last) {
5134  DeoptimizeIf(ne, instr, "wrong instance type");
5135  } else {
5136  DeoptimizeIf(lo, instr, "wrong instance type");
5137  // Omit check for the last type.
5138  if (last != LAST_TYPE) {
5139  __ cmp(scratch, Operand(last));
5140  DeoptimizeIf(hi, instr, "wrong instance type");
5141  }
5142  }
5143  } else {
5144  uint8_t mask;
5145  uint8_t tag;
5146  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5147 
5148  if (base::bits::IsPowerOfTwo32(mask)) {
5149  DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5150  __ tst(scratch, Operand(mask));
5151  DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type");
5152  } else {
5153  __ and_(scratch, scratch, Operand(mask));
5154  __ cmp(scratch, Operand(tag));
5155  DeoptimizeIf(ne, instr, "wrong instance type");
5156  }
5157  }
5158 }
5159 
5160 
5161 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5162  Register reg = ToRegister(instr->value());
5163  Handle<HeapObject> object = instr->hydrogen()->object().handle();
5165  if (isolate()->heap()->InNewSpace(*object)) {
5166  Register reg = ToRegister(instr->value());
5167  Handle<Cell> cell = isolate()->factory()->NewCell(object);
5168  __ mov(ip, Operand(Handle<Object>(cell)));
5170  __ cmp(reg, ip);
5171  } else {
5172  __ cmp(reg, Operand(object));
5173  }
5174  DeoptimizeIf(ne, instr, "value mismatch");
5175 }
5176 
5177 
5178 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5179  {
5180  PushSafepointRegistersScope scope(this);
5181  __ push(object);
5182  __ mov(cp, Operand::Zero());
5183  __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5185  instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5186  __ StoreToSafepointRegisterSlot(r0, scratch0());
5187  }
5188  __ tst(scratch0(), Operand(kSmiTagMask));
5189  DeoptimizeIf(eq, instr, "instance migration failed");
5190 }
5191 
5192 
5193 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5194  class DeferredCheckMaps FINAL : public LDeferredCode {
5195  public:
5196  DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5197  : LDeferredCode(codegen), instr_(instr), object_(object) {
5198  SetExit(check_maps());
5199  }
5200  virtual void Generate() OVERRIDE {
5201  codegen()->DoDeferredInstanceMigration(instr_, object_);
5202  }
5203  Label* check_maps() { return &check_maps_; }
5204  virtual LInstruction* instr() OVERRIDE { return instr_; }
5205  private:
5206  LCheckMaps* instr_;
5207  Label check_maps_;
5208  Register object_;
5209  };
5210 
5211  if (instr->hydrogen()->IsStabilityCheck()) {
5212  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5213  for (int i = 0; i < maps->size(); ++i) {
5214  AddStabilityDependency(maps->at(i).handle());
5215  }
5216  return;
5217  }
5218 
5219  Register map_reg = scratch0();
5220 
5221  LOperand* input = instr->value();
5222  DCHECK(input->IsRegister());
5223  Register reg = ToRegister(input);
5224 
5225  __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5226 
5227  DeferredCheckMaps* deferred = NULL;
5228  if (instr->hydrogen()->HasMigrationTarget()) {
5229  deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5230  __ bind(deferred->check_maps());
5231  }
5232 
5233  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5234  Label success;
5235  for (int i = 0; i < maps->size() - 1; i++) {
5236  Handle<Map> map = maps->at(i).handle();
5237  __ CompareMap(map_reg, map, &success);
5238  __ b(eq, &success);
5239  }
5240 
5241  Handle<Map> map = maps->at(maps->size() - 1).handle();
5242  __ CompareMap(map_reg, map, &success);
5243  if (instr->hydrogen()->HasMigrationTarget()) {
5244  __ b(ne, deferred->entry());
5245  } else {
5246  DeoptimizeIf(ne, instr, "wrong map");
5247  }
5248 
5249  __ bind(&success);
5250 }
5251 
5252 
5253 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5254  DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
5255  Register result_reg = ToRegister(instr->result());
5256  __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5257 }
5258 
5259 
5260 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5261  Register unclamped_reg = ToRegister(instr->unclamped());
5262  Register result_reg = ToRegister(instr->result());
5263  __ ClampUint8(result_reg, unclamped_reg);
5264 }
5265 
5266 
5267 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5268  Register scratch = scratch0();
5269  Register input_reg = ToRegister(instr->unclamped());
5270  Register result_reg = ToRegister(instr->result());
5271  DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
5272  Label is_smi, done, heap_number;
5273 
5274  // Both smi and heap number cases are handled.
5275  __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5276 
5277  // Check for heap number
5278  __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5279  __ cmp(scratch, Operand(factory()->heap_number_map()));
5280  __ b(eq, &heap_number);
5281 
5282  // Check for undefined. Undefined is converted to zero for clamping
5283  // conversions.
5284  __ cmp(input_reg, Operand(factory()->undefined_value()));
5285  DeoptimizeIf(ne, instr, "not a heap number/undefined");
5286  __ mov(result_reg, Operand::Zero());
5287  __ jmp(&done);
5288 
5289  // Heap number
5290  __ bind(&heap_number);
5291  __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5292  __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5293  __ jmp(&done);
5294 
5295  // smi
5296  __ bind(&is_smi);
5297  __ ClampUint8(result_reg, result_reg);
5298 
5299  __ bind(&done);
5300 }
5301 
5302 
5303 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5304  DwVfpRegister value_reg = ToDoubleRegister(instr->value());
5305  Register result_reg = ToRegister(instr->result());
5306  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5307  __ VmovHigh(result_reg, value_reg);
5308  } else {
5309  __ VmovLow(result_reg, value_reg);
5310  }
5311 }
5312 
5313 
5314 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5315  Register hi_reg = ToRegister(instr->hi());
5316  Register lo_reg = ToRegister(instr->lo());
5317  DwVfpRegister result_reg = ToDoubleRegister(instr->result());
5318  __ VmovHigh(result_reg, hi_reg);
5319  __ VmovLow(result_reg, lo_reg);
5320 }
5321 
5322 
5323 void LCodeGen::DoAllocate(LAllocate* instr) {
5324  class DeferredAllocate FINAL : public LDeferredCode {
5325  public:
5326  DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5327  : LDeferredCode(codegen), instr_(instr) { }
5328  virtual void Generate() OVERRIDE {
5329  codegen()->DoDeferredAllocate(instr_);
5330  }
5331  virtual LInstruction* instr() OVERRIDE { return instr_; }
5332  private:
5333  LAllocate* instr_;
5334  };
5335 
5336  DeferredAllocate* deferred =
5337  new(zone()) DeferredAllocate(this, instr);
5338 
5339  Register result = ToRegister(instr->result());
5340  Register scratch = ToRegister(instr->temp1());
5341  Register scratch2 = ToRegister(instr->temp2());
5342 
5343  // Allocate memory for the object.
5345  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5346  flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5347  }
5348  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5349  DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5350  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5352  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5353  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5355  }
5356 
5357  if (instr->size()->IsConstantOperand()) {
5358  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5360  __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5361  } else {
5362  __ jmp(deferred->entry());
5363  }
5364  } else {
5365  Register size = ToRegister(instr->size());
5366  __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5367  }
5368 
5369  __ bind(deferred->exit());
5370 
5371  if (instr->hydrogen()->MustPrefillWithFiller()) {
5373  if (instr->size()->IsConstantOperand()) {
5374  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5375  __ mov(scratch, Operand(size - kHeapObjectTag));
5376  } else {
5377  __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5378  }
5379  __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5380  Label loop;
5381  __ bind(&loop);
5382  __ sub(scratch, scratch, Operand(kPointerSize), SetCC);
5383  __ str(scratch2, MemOperand(result, scratch));
5384  __ b(ge, &loop);
5385  }
5386 }
5387 
5388 
5389 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5390  Register result = ToRegister(instr->result());
5391 
5392  // TODO(3095996): Get rid of this. For now, we need to make the
5393  // result register contain a valid pointer because it is already
5394  // contained in the register pointer map.
5395  __ mov(result, Operand(Smi::FromInt(0)));
5396 
5397  PushSafepointRegistersScope scope(this);
5398  if (instr->size()->IsRegister()) {
5399  Register size = ToRegister(instr->size());
5400  DCHECK(!size.is(result));
5401  __ SmiTag(size);
5402  __ push(size);
5403  } else {
5404  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5405  if (size >= 0 && size <= Smi::kMaxValue) {
5407  } else {
5408  // We should never get here at runtime => abort
5409  __ stop("invalid allocation size");
5410  return;
5411  }
5412  }
5413 
5415  instr->hydrogen()->MustAllocateDoubleAligned());
5416  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5417  DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5418  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5420  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5421  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5423  } else {
5425  }
5427 
5429  Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5430  __ StoreToSafepointRegisterSlot(r0, result);
5431 }
5432 
5433 
5434 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5435  DCHECK(ToRegister(instr->value()).is(r0));
5436  __ push(r0);
5437  CallRuntime(Runtime::kToFastProperties, 1, instr);
5438 }
5439 
5440 
5441 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5442  DCHECK(ToRegister(instr->context()).is(cp));
5443  Label materialized;
5444  // Registers will be used as follows:
5445  // r6 = literals array.
5446  // r1 = regexp literal.
5447  // r0 = regexp literal clone.
5448  // r2-5 are used as temporaries.
5449  int literal_offset =
5450  FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5451  __ Move(r6, instr->hydrogen()->literals());
5452  __ ldr(r1, FieldMemOperand(r6, literal_offset));
5453  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5454  __ cmp(r1, ip);
5455  __ b(ne, &materialized);
5456 
5457  // Create regexp literal using runtime function
5458  // Result will be in r0.
5459  __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5460  __ mov(r4, Operand(instr->hydrogen()->pattern()));
5461  __ mov(r3, Operand(instr->hydrogen()->flags()));
5462  __ Push(r6, r5, r4, r3);
5463  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5464  __ mov(r1, r0);
5465 
5466  __ bind(&materialized);
5468  Label allocated, runtime_allocate;
5469 
5470  __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
5471  __ jmp(&allocated);
5472 
5473  __ bind(&runtime_allocate);
5474  __ mov(r0, Operand(Smi::FromInt(size)));
5475  __ Push(r1, r0);
5476  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5477  __ pop(r1);
5478 
5479  __ bind(&allocated);
5480  // Copy the content into the newly allocated memory.
5481  __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
5482 }
5483 
5484 
5485 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5486  DCHECK(ToRegister(instr->context()).is(cp));
5487  // Use the fast case closure allocation code that allocates in new
5488  // space for nested functions that don't need literals cloning.
5489  bool pretenure = instr->hydrogen()->pretenure();
5490  if (!pretenure && instr->hydrogen()->has_no_literals()) {
5491  FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
5492  instr->hydrogen()->kind());
5493  __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5494  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5495  } else {
5496  __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5497  __ mov(r1, Operand(pretenure ? factory()->true_value()
5498  : factory()->false_value()));
5499  __ Push(cp, r2, r1);
5500  CallRuntime(Runtime::kNewClosure, 3, instr);
5501  }
5502 }
5503 
5504 
5505 void LCodeGen::DoTypeof(LTypeof* instr) {
5506  Register input = ToRegister(instr->value());
5507  __ push(input);
5508  CallRuntime(Runtime::kTypeof, 1, instr);
5509 }
5510 
5511 
5512 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5513  Register input = ToRegister(instr->value());
5514 
5515  Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5516  instr->FalseLabel(chunk_),
5517  input,
5518  instr->type_literal());
5519  if (final_branch_condition != kNoCondition) {
5520  EmitBranch(instr, final_branch_condition);
5521  }
5522 }
5523 
5524 
5526  Label* false_label,
5527  Register input,
5528  Handle<String> type_name) {
5529  Condition final_branch_condition = kNoCondition;
5530  Register scratch = scratch0();
5531  Factory* factory = isolate()->factory();
5532  if (String::Equals(type_name, factory->number_string())) {
5533  __ JumpIfSmi(input, true_label);
5534  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5535  __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5536  final_branch_condition = eq;
5537 
5538  } else if (String::Equals(type_name, factory->string_string())) {
5539  __ JumpIfSmi(input, false_label);
5540  __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5541  __ b(ge, false_label);
5542  __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5543  __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5544  final_branch_condition = eq;
5545 
5546  } else if (String::Equals(type_name, factory->symbol_string())) {
5547  __ JumpIfSmi(input, false_label);
5548  __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5549  final_branch_condition = eq;
5550 
5551  } else if (String::Equals(type_name, factory->boolean_string())) {
5552  __ CompareRoot(input, Heap::kTrueValueRootIndex);
5553  __ b(eq, true_label);
5554  __ CompareRoot(input, Heap::kFalseValueRootIndex);
5555  final_branch_condition = eq;
5556 
5557  } else if (String::Equals(type_name, factory->undefined_string())) {
5558  __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5559  __ b(eq, true_label);
5560  __ JumpIfSmi(input, false_label);
5561  // Check for undetectable objects => true.
5562  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5563  __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5564  __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5565  final_branch_condition = ne;
5566 
5567  } else if (String::Equals(type_name, factory->function_string())) {
5569  Register type_reg = scratch;
5570  __ JumpIfSmi(input, false_label);
5571  __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
5572  __ b(eq, true_label);
5573  __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
5574  final_branch_condition = eq;
5575 
5576  } else if (String::Equals(type_name, factory->object_string())) {
5577  Register map = scratch;
5578  __ JumpIfSmi(input, false_label);
5579  __ CompareRoot(input, Heap::kNullValueRootIndex);
5580  __ b(eq, true_label);
5581  __ CheckObjectTypeRange(input,
5582  map,
5585  false_label);
5586  // Check for undetectable objects => false.
5587  __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5588  __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5589  final_branch_condition = eq;
5590 
5591  } else {
5592  __ b(false_label);
5593  }
5594 
5595  return final_branch_condition;
5596 }
5597 
5598 
5599 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5600  Register temp1 = ToRegister(instr->temp());
5601 
5602  EmitIsConstructCall(temp1, scratch0());
5603  EmitBranch(instr, eq);
5604 }
5605 
5606 
5608  DCHECK(!temp1.is(temp2));
5609  // Get the frame pointer for the calling frame.
5611 
5612  // Skip the arguments adaptor frame if it exists.
5616 
5617  // Check the marker in the calling frame.
5619  __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5620 }
5621 
5622 
5623 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5624  if (!info()->IsStub()) {
5625  // Ensure that we have enough space after the previous lazy-bailout
5626  // instruction for patching the code here.
5627  int current_pc = masm()->pc_offset();
5628  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5629  // Block literal pool emission for duration of padding.
5630  Assembler::BlockConstPoolScope block_const_pool(masm());
5631  int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5632  DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5633  while (padding_size > 0) {
5634  __ nop();
5635  padding_size -= Assembler::kInstrSize;
5636  }
5637  }
5638  }
5639  last_lazy_deopt_pc_ = masm()->pc_offset();
5640 }
5641 
5642 
5643 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5644  last_lazy_deopt_pc_ = masm()->pc_offset();
5645  DCHECK(instr->HasEnvironment());
5646  LEnvironment* env = instr->environment();
5647  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5648  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5649 }
5650 
5651 
5652 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5653  Deoptimizer::BailoutType type = instr->hydrogen()->type();
5654  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5655  // needed return address), even though the implementation of LAZY and EAGER is
5656  // now identical. When LAZY is eventually completely folded into EAGER, remove
5657  // the special case below.
5658  if (info()->IsStub() && type == Deoptimizer::EAGER) {
5659  type = Deoptimizer::LAZY;
5660  }
5661 
5662  DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
5663 }
5664 
5665 
5666 void LCodeGen::DoDummy(LDummy* instr) {
5667  // Nothing to see here, move on!
5668 }
5669 
5670 
5671 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5672  // Nothing to see here, move on!
5673 }
5674 
5675 
5676 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5677  PushSafepointRegistersScope scope(this);
5678  LoadContextFromDeferred(instr->context());
5679  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5682  DCHECK(instr->HasEnvironment());
5683  LEnvironment* env = instr->environment();
5684  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5685 }
5686 
5687 
5688 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5689  class DeferredStackCheck FINAL : public LDeferredCode {
5690  public:
5691  DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5692  : LDeferredCode(codegen), instr_(instr) { }
5693  virtual void Generate() OVERRIDE {
5694  codegen()->DoDeferredStackCheck(instr_);
5695  }
5696  virtual LInstruction* instr() OVERRIDE { return instr_; }
5697  private:
5698  LStackCheck* instr_;
5699  };
5700 
5701  DCHECK(instr->HasEnvironment());
5702  LEnvironment* env = instr->environment();
5703  // There is no LLazyBailout instruction for stack-checks. We have to
5704  // prepare for lazy deoptimization explicitly here.
5705  if (instr->hydrogen()->is_function_entry()) {
5706  // Perform stack overflow check.
5707  Label done;
5708  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5709  __ cmp(sp, Operand(ip));
5710  __ b(hs, &done);
5711  Handle<Code> stack_check = isolate()->builtins()->StackCheck();
5712  PredictableCodeSizeScope predictable(masm(),
5713  CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
5714  DCHECK(instr->context()->IsRegister());
5715  DCHECK(ToRegister(instr->context()).is(cp));
5716  CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
5717  __ bind(&done);
5718  } else {
5719  DCHECK(instr->hydrogen()->is_backwards_branch());
5720  // Perform stack overflow check if this goto needs it before jumping.
5721  DeferredStackCheck* deferred_stack_check =
5722  new(zone()) DeferredStackCheck(this, instr);
5723  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5724  __ cmp(sp, Operand(ip));
5725  __ b(lo, deferred_stack_check->entry());
5727  __ bind(instr->done_label());
5728  deferred_stack_check->SetExit(instr->done_label());
5729  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5730  // Don't record a deoptimization index for the safepoint here.
5731  // This will be done explicitly when emitting call and the safepoint in
5732  // the deferred code.
5733  }
5734 }
5735 
5736 
5737 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5738  // This is a pseudo-instruction that ensures that the environment here is
5739  // properly registered for deoptimization and records the assembler's PC
5740  // offset.
5741  LEnvironment* environment = instr->environment();
5742 
5743  // If the environment were already registered, we would have no way of
5744  // backpatching it with the spill slot operands.
5745  DCHECK(!environment->HasBeenRegistered());
5746  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5747 
5749 }
5750 
5751 
5752 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5753  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5754  __ cmp(r0, ip);
5755  DeoptimizeIf(eq, instr, "undefined");
5756 
5757  Register null_value = r5;
5758  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5759  __ cmp(r0, null_value);
5760  DeoptimizeIf(eq, instr, "null");
5761 
5762  __ SmiTst(r0);
5763  DeoptimizeIf(eq, instr, "Smi");
5764 
5766  __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
5767  DeoptimizeIf(le, instr, "wrong instance type");
5768 
5769  Label use_cache, call_runtime;
5770  __ CheckEnumCache(null_value, &call_runtime);
5771 
5773  __ b(&use_cache);
5774 
5775  // Get the set of properties to enumerate.
5776  __ bind(&call_runtime);
5777  __ push(r0);
5778  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5779 
5781  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
5782  __ cmp(r1, ip);
5783  DeoptimizeIf(ne, instr, "wrong map");
5784  __ bind(&use_cache);
5785 }
5786 
5787 
5788 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5789  Register map = ToRegister(instr->map());
5790  Register result = ToRegister(instr->result());
5791  Label load_cache, done;
5792  __ EnumLength(result, map);
5793  __ cmp(result, Operand(Smi::FromInt(0)));
5794  __ b(ne, &load_cache);
5795  __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5796  __ jmp(&done);
5797 
5798  __ bind(&load_cache);
5799  __ LoadInstanceDescriptors(map, result);
5800  __ ldr(result,
5802  __ ldr(result,
5803  FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5804  __ cmp(result, Operand::Zero());
5805  DeoptimizeIf(eq, instr, "no cache");
5806 
5807  __ bind(&done);
5808 }
5809 
5810 
5811 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5812  Register object = ToRegister(instr->value());
5813  Register map = ToRegister(instr->map());
5815  __ cmp(map, scratch0());
5816  DeoptimizeIf(ne, instr, "wrong map");
5817 }
5818 
5819 
5820 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5821  Register result,
5822  Register object,
5823  Register index) {
5824  PushSafepointRegistersScope scope(this);
5825  __ Push(object);
5826  __ Push(index);
5827  __ mov(cp, Operand::Zero());
5828  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5830  instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5831  __ StoreToSafepointRegisterSlot(r0, result);
5832 }
5833 
5834 
5835 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5836  class DeferredLoadMutableDouble FINAL : public LDeferredCode {
5837  public:
5838  DeferredLoadMutableDouble(LCodeGen* codegen,
5839  LLoadFieldByIndex* instr,
5840  Register result,
5841  Register object,
5842  Register index)
5843  : LDeferredCode(codegen),
5844  instr_(instr),
5845  result_(result),
5846  object_(object),
5847  index_(index) {
5848  }
5849  virtual void Generate() OVERRIDE {
5850  codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5851  }
5852  virtual LInstruction* instr() OVERRIDE { return instr_; }
5853  private:
5854  LLoadFieldByIndex* instr_;
5855  Register result_;
5856  Register object_;
5857  Register index_;
5858  };
5859 
5860  Register object = ToRegister(instr->object());
5861  Register index = ToRegister(instr->index());
5862  Register result = ToRegister(instr->result());
5863  Register scratch = scratch0();
5864 
5865  DeferredLoadMutableDouble* deferred;
5866  deferred = new(zone()) DeferredLoadMutableDouble(
5867  this, instr, result, object, index);
5868 
5869  Label out_of_object, done;
5870 
5871  __ tst(index, Operand(Smi::FromInt(1)));
5872  __ b(ne, deferred->entry());
5873  __ mov(index, Operand(index, ASR, 1));
5874 
5875  __ cmp(index, Operand::Zero());
5876  __ b(lt, &out_of_object);
5877 
5878  __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
5879  __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5880 
5881  __ b(&done);
5882 
5883  __ bind(&out_of_object);
5884  __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5885  // Index is equal to negated out of object property index plus 1.
5887  __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
5888  __ ldr(result, FieldMemOperand(scratch,
5890  __ bind(deferred->exit());
5891  __ bind(&done);
5892 }
5893 
5894 
5895 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5896  Register context = ToRegister(instr->context());
5898 }
5899 
5900 
5901 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5902  Handle<ScopeInfo> scope_info = instr->scope_info();
5903  __ Push(scope_info);
5904  __ push(ToRegister(instr->function()));
5905  CallRuntime(Runtime::kPushBlockContext, 2, instr);
5906  RecordSafepoint(Safepoint::kNoLazyDeopt);
5907 }
5908 
5909 
5910 #undef __
5911 
5912 } } // namespace v8::internal
#define kDoubleRegZero
An object reference managed by the v8 garbage collector.
Definition: v8.h:198
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1591
static const int kInstrSize
friend class BlockConstPoolScope
static U update(U previous, T value)
Definition: utils.h:223
static U encode(T value)
Definition: utils.h:217
static const int kValueOffset
Definition: objects.h:9446
static const int kHeaderSize
Definition: objects.h:5373
static bool IsSupported(CpuFeature f)
Definition: assembler.h:184
static Handle< DeoptimizationInputData > New(Isolate *isolate, int deopt_entry_count, PretenureFlag pretenure)
Definition: objects.cc:7918
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
Definition: deoptimizer.cc:672
static const int kEnumCacheOffset
Definition: objects.h:3028
Source to read snapshot and builtins files from.
Definition: lithium-arm.h:372
Safepoint::DeoptMode deopt_mode_
virtual void BeforeCall(int call_size) const OVERRIDE
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
virtual void AfterCall() const OVERRIDE
static const int kHeaderSize
Definition: objects.h:2393
static int OffsetOfElementAt(int index)
Definition: objects.h:2455
static int SizeFor(int length)
Definition: objects.h:2452
static const int kGlobalProxyOffset
Definition: objects.h:7461
virtual HSourcePosition position() const
static Handle< T > cast(Handle< S > that)
Definition: handles.h:116
static const uint32_t kSignMask
Definition: objects.h:1522
static const int kValueOffset
Definition: objects.h:1506
static const int kMapOffset
Definition: objects.h:1427
static Register right()
Definition: code-stubs.h:686
static const int kValueOffset
Definition: objects.h:7623
static const int kCacheStampOffset
Definition: objects.h:7631
static const int kSharedFunctionInfoOffset
Definition: objects.h:7379
static const int kContextOffset
Definition: objects.h:7381
static const int kCodeEntryOffset
Definition: objects.h:7376
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7377
static const int kHeaderSize
Definition: objects.h:2195
static const int kPropertiesOffset
Definition: objects.h:2193
static const int kSize
Definition: objects.h:7772
static const int kInObjectFieldCount
Definition: objects.h:7826
bool IsNextEmittedBlock(int block_id) const
void DoStoreKeyedFixedArray(LStoreKeyed *instr)
DwVfpRegister ToDoubleRegister(LOperand *op) const
void RecordSafepointWithRegisters(LPointerMap *pointers, int arguments, Safepoint::DeoptMode mode)
bool IsSmi(LConstantOperand *op) const
LinkRegisterStatus GetLinkRegisterState() const
TranslationBuffer translations_
MemOperand BuildSeqStringOperand(Register string, LOperand *index, String::Encoding encoding)
Condition EmitIsString(Register input, Register temp1, Label *is_not_string, SmiCheck check_needed)
DwVfpRegister EmitLoadDoubleRegister(LOperand *op, SwVfpRegister flt_scratch, DwVfpRegister dbl_scratch)
void DoDeferredStackCheck(LStackCheck *instr)
SafepointTableBuilder safepoints_
void EmitVectorLoadICRegisters(T *instr)
static Condition TokenToCondition(Token::Value op, bool is_unsigned)
ZoneList< Handle< Object > > deoptimization_literals_
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check, Label *bool_load)
MemOperand PrepareKeyedOperand(Register key, Register base, bool key_is_constant, int constant_key, int element_size, int shift_size, int base_offset)
void PopulateDeoptimizationLiteralsWithInlinedFunctions()
void AddToTranslation(LEnvironment *environment, Translation *translation, LOperand *op, bool is_tagged, bool is_uint32, int *object_index_pointer, int *dematerialized_index_pointer)
ZoneList< LEnvironment * > deoptimizations_
void EmitIntegerMathAbs(LMathAbs *instr)
int32_t ToRepresentation(LConstantOperand *op, const Representation &r) const
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, LInstruction *instr, LOperand *context)
void EmitIsConstructCall(Register temp1, Register temp2)
int32_t ToInteger32(LConstantOperand *op) const
LPlatformChunk * chunk() const
void FinishCode(Handle< Code > code)
int LookupDestination(int block_id) const
Condition EmitTypeofIs(Label *true_label, Label *false_label, Register input, Handle< String > type_name)
void DoDeferredAllocate(LAllocate *instr)
void RecordSafepoint(LPointerMap *pointers, Safepoint::Kind kind, int arguments, Safepoint::DeoptMode mode)
void DoDeferredTaggedToI(LTaggedToI *instr)
LowDwVfpRegister double_scratch0()
void CallCodeGeneric(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr, SafepointMode safepoint_mode, TargetAddressStorageMode storage_mode=CAN_INLINE_TARGET_ADDRESS)
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
void CallCode(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr, TargetAddressStorageMode storage_mode=CAN_INLINE_TARGET_ADDRESS)
Safepoint::Kind expected_safepoint_kind_
ZoneList< LDeferredCode * > deferred_
void DoDeferredNumberTagIU(LInstruction *instr, LOperand *value, LOperand *temp1, LOperand *temp2, IntegerSignedness signedness)
Handle< Object > ToHandle(LConstantOperand *op) const
int CallCodeSize(Handle< Code > code, RelocInfo::Mode mode)
void RegisterEnvironmentForDeoptimization(LEnvironment *environment, Safepoint::DeoptMode mode)
void LoadContextFromDeferred(LOperand *context)
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoDeferredLoadMutableDouble(LLoadFieldByIndex *instr, Register result, Register object, Register index)
int DefineDeoptimizationLiteral(Handle< Object > literal)
void DeoptimizeIf(Condition condition, LInstruction *instr, const char *detail, Deoptimizer::BailoutType bailout_type)
void CallKnownFunction(Handle< JSFunction > function, int formal_parameter_count, int arity, LInstruction *instr, R1State r1_state)
void WriteTranslation(LEnvironment *environment, Translation *translation)
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
void DoLoadKeyedFixedDoubleArray(LLoadKeyed *instr)
Operand ToOperand(LOperand *op)
Register EmitLoadRegister(LOperand *op, Register scratch)
void EmitClassOfTest(Label *if_true, Label *if_false, Handle< String > class_name, Register input, Register temporary, Register temporary2)
void DoLoadKeyedExternalArray(LLoadKeyed *instr)
double ToDouble(LConstantOperand *op) const
Register ToRegister(LOperand *op) const
void DoStoreKeyedExternalArray(LStoreKeyed *instr)
void RecordAndWritePosition(int position) OVERRIDE
bool IsInteger32(LConstantOperand *op) const
void PopulateDeoptimizationData(Handle< Code > code)
void DoParallelMove(LParallelMove *move)
Smi * ToSmi(LConstantOperand *op) const
void CallRuntime(const Runtime::Function *function, int num_arguments, LInstruction *instr, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
ZoneList< Deoptimizer::JumpTableEntry > jump_table_
Condition EmitIsObject(Register input, Register temp1, Label *is_not_object, Label *is_object)
void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE
void EmitNumberUntagD(LNumberUntagD *instr, Register input, DwVfpRegister result, NumberUntagDMode mode)
MemOperand ToMemOperand(LOperand *op) const
void GenerateBodyInstructionPre(LInstruction *instr) OVERRIDE
MemOperand ToHighMemOperand(LOperand *op) const
void RecordSafepointWithLazyDeopt(LInstruction *instr, SafepointMode safepoint_mode)
void EmitFalseBranch(InstrType instr, Condition condition)
void DoLoadKeyedFixedArray(LLoadKeyed *instr)
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
void EmitBranch(InstrType instr, Condition condition)
void DoDeferredNumberTagD(LNumberTagD *instr)
void DoStoreKeyedFixedDoubleArray(LStoreKeyed *instr)
virtual void Generate()=0
virtual LInstruction * instr()=0
LParallelMove * GetParallelMove(InnerPosition pos)
Definition: lithium-arm.h:362
virtual const char * Mnemonic() const =0
LEnvironment * environment() const
Definition: lithium-arm.h:231
virtual LOperand * result() const =0
HValue * hydrogen_value() const
Definition: lithium-arm.h:239
LPointerMap * pointer_map() const
Definition: lithium-arm.h:235
virtual bool IsGap() const
Definition: lithium-arm.h:223
bool is_typed_elements() const
ElementsKind elements_kind() const
uint32_t base_offset() const
int index() const
Definition: lithium.h:41
ElementsKind elements_kind() const
uint32_t base_offset() const
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:17
T & at(int i) const
Definition: list.h:69
static const Register ReceiverRegister()
static const Register NameRegister()
static void GenerateMiss(MacroAssembler *masm)
static const int kIsUndetectable
Definition: objects.h:6244
static const int kBitFieldOffset
Definition: objects.h:6228
static const int kInstanceTypeOffset
Definition: objects.h:6229
static const int kConstructorOffset
Definition: objects.h:6191
static const int kPrototypeOffset
Definition: objects.h:6190
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
static const Register exponent()
static const int kHashFieldOffset
Definition: objects.h:8486
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:754
static void MaybeCallEntryHook(MacroAssembler *masm)
static const int kNoPosition
Definition: assembler.h:317
static Representation Integer32()
int num_parameters() const
Definition: scopes.h:321
Variable * parameter(int index) const
Definition: scopes.h:316
static const int kHeaderSize
Definition: objects.h:8941
static const int kDontAdaptArgumentsSentinel
Definition: objects.h:6888
static const int kInstanceClassNameOffset
Definition: objects.h:6897
static const int kCompilerHintsOffset
Definition: objects.h:6961
static const int kMaxValue
Definition: objects.h:1272
static Smi * FromInt(int value)
Definition: objects-inl.h:1321
static const int kFixedFrameSizeFromFp
Definition: frames.h:157
static const int kContextOffset
Definition: frames.h:162
static const int kCallerSPOffset
Definition: frames.h:167
static const int kMarkerOffset
Definition: frames.h:161
static const int kCallerFPOffset
Definition: frames.h:165
static const Register ReceiverRegister()
static const Register NameRegister()
static const Register ValueRegister()
static Handle< Code > initialize_stub(Isolate *isolate, StrictMode strict_mode)
Definition: ic.cc:1346
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const unsigned int kContainsCachedArrayIndexMask
Definition: objects.h:8618
static const int32_t kMaxOneByteCharCode
Definition: objects.h:8811
static const int kLengthOffset
Definition: objects.h:8802
bool Equals(String *other)
Definition: objects-inl.h:3336
static TypeFeedbackId None()
Definition: utils.h:945
bool IsContextSlot() const
Definition: variables.h:97
static const Register VectorRegister()
#define OVERRIDE
#define FINAL
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric literals(0o77, 0b11)") DEFINE_BOOL(harmony_object_literals
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
enable harmony numeric enable harmony object literal extensions Optimize object Array shift
#define V8_INFINITY
Definition: globals.h:25
#define __
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
@ CALL_FUNCTION
AllocationFlags
@ DOUBLE_ALIGNMENT
@ PRETENURE_OLD_POINTER_SPACE
@ TAG_OBJECT
@ PRETENURE_OLD_DATA_SPACE
int int32_t
Definition: unicode.cc:24
bool IsPowerOfTwo32(uint32_t value)
Definition: bits.h:77
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
int WhichPowerOf2(uint32_t x)
Definition: utils.h:37
Vector< const char > CStrVector(const char *data)
Definition: vector.h:158
const int kPointerSize
Definition: globals.h:129
const LowDwVfpRegister d2
const uint32_t kStringEncodingMask
Definition: objects.h:555
MemOperand ContextOperand(Register context, int index)
const Register r2
@ DO_SMI_CHECK
Definition: globals.h:641
const int KB
Definition: globals.h:106
Condition CommuteCondition(Condition cond)
Definition: constants-arm.h:93
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1488
@ TRACK_ALLOCATION_SITE
Definition: objects.h:8085
@ kSeqStringTag
Definition: objects.h:563
@ ARGUMENTS_ADAPTOR
Definition: hydrogen.h:546
const Register cp
const uint32_t kVFPDefaultNaNModeControlBit
const LowDwVfpRegister d1
const Register r6
const uint32_t kTwoByteStringTag
Definition: objects.h:556
const Register r0
const int kSmiTagSize
Definition: v8.h:5743
const LowDwVfpRegister d0
const Register ip
const int kDoubleSize
Definition: globals.h:127
const Register r3
const Register fp
DwVfpRegister DoubleRegister
const Register sp
const int kPointerSizeLog2
Definition: globals.h:147
@ LAST_NONCALLABLE_SPEC_OBJECT_TYPE
Definition: objects.h:785
@ NUM_OF_CALLABLE_SPEC_OBJECT_TYPES
Definition: objects.h:788
@ JS_DATE_TYPE
Definition: objects.h:730
@ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE
Definition: objects.h:784
@ FIRST_JS_PROXY_TYPE
Definition: objects.h:778
@ JS_ARRAY_TYPE
Definition: objects.h:738
@ FIRST_NONSTRING_TYPE
Definition: objects.h:758
@ FIRST_SPEC_OBJECT_TYPE
Definition: objects.h:781
@ LAST_SPEC_OBJECT_TYPE
Definition: objects.h:782
@ JS_FUNCTION_TYPE
Definition: objects.h:749
@ JS_FUNCTION_PROXY_TYPE
Definition: objects.h:726
@ LAST_JS_PROXY_TYPE
Definition: objects.h:779
@ EXTERNAL_UINT16_ELEMENTS
Definition: elements-kind.h:36
@ UINT8_CLAMPED_ELEMENTS
Definition: elements-kind.h:52
@ EXTERNAL_INT16_ELEMENTS
Definition: elements-kind.h:35
@ EXTERNAL_UINT8_ELEMENTS
Definition: elements-kind.h:34
@ EXTERNAL_INT32_ELEMENTS
Definition: elements-kind.h:37
@ FAST_HOLEY_DOUBLE_ELEMENTS
Definition: elements-kind.h:27
@ SLOPPY_ARGUMENTS_ELEMENTS
Definition: elements-kind.h:31
@ EXTERNAL_INT8_ELEMENTS
Definition: elements-kind.h:33
@ EXTERNAL_FLOAT32_ELEMENTS
Definition: elements-kind.h:39
@ EXTERNAL_FLOAT64_ELEMENTS
Definition: elements-kind.h:40
@ FAST_HOLEY_SMI_ELEMENTS
Definition: elements-kind.h:17
@ EXTERNAL_UINT32_ELEMENTS
Definition: elements-kind.h:38
@ EXTERNAL_UINT8_CLAMPED_ELEMENTS
Definition: elements-kind.h:41
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
const uint32_t kOneByteStringTag
Definition: objects.h:557
@ NO_OVERWRITE
Definition: ic-state.h:58
int ElementsKindToShiftSize(ElementsKind elements_kind)
const Register r4
MemOperand FieldMemOperand(Register object, int offset)
int32_t WhichPowerOf2Abs(int32_t x)
Definition: utils.h:168
int StackSlotOffset(int index)
Definition: lithium.cc:254
const int kUC16Size
Definition: globals.h:187
bool IsFastPackedElementsKind(ElementsKind kind)
@ NUMBER_CANDIDATE_IS_SMI
Definition: lithium.h:756
@ NUMBER_CANDIDATE_IS_ANY_TAGGED
Definition: lithium.h:757
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
AllocationSiteOverrideMode
Definition: code-stubs.h:716
@ DISABLE_ALLOCATION_SITES
Definition: code-stubs.h:718
const Register r5
Condition NegateCondition(Condition cond)
Definition: constants-arm.h:86
static InstanceType TestType(HHasInstanceTypeAndBranch *instr)
const int kMinInt
Definition: globals.h:110
T Abs(T a)
Definition: utils.h:153
const uint32_t kStringRepresentationMask
Definition: objects.h:561
const Register lr
byte * Address
Definition: globals.h:101
static Condition BranchCondition(HHasInstanceTypeAndBranch *instr)
@ NOT_CONTEXTUAL
Definition: objects.h:174
const Register r1
@ OLD_DATA_SPACE
Definition: globals.h:361
@ OLD_POINTER_SPACE
Definition: globals.h:360
const int kHeapObjectTag
Definition: v8.h:5737
const Register no_reg
static int ArgumentsOffsetWithoutFrame(int index)
static Condition ComputeCompareCondition(Token::Value op)
static const char * LabelType(LLabel *label)
MemOperand GlobalObjectOperand()
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const intptr_t kSmiTagMask
Definition: v8.h:5744
const Register pp
@ NO_CALL_CONSTRUCTOR_FLAGS
Definition: globals.h:478
const int kSmiTag
Definition: v8.h:5742
bool IsFastSmiElementsKind(ElementsKind kind)
const uint32_t kHoleNanLower32
Definition: globals.h:657
const uint32_t kSlotsZapValue
Definition: globals.h:273
const int kCharSize
Definition: globals.h:122
const uint32_t kHoleNanUpper32
Definition: globals.h:656
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:130
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
#define IN
@ NONE
bool IsEquivalentTo(const JumpTableEntry &other) const
Definition: deoptimizer.h:130
Deoptimizer::BailoutType bailout_type
Definition: deoptimizer.h:139
bool is(DwVfpRegister reg) const
static DwVfpRegister FromAllocationIndex(int index)
SwVfpRegister low() const
static Register FromAllocationIndex(int index)
bool is(Register reg) const
#define T(name, string, precedence)
Definition: token.cc:25
#define STATIC_CHAR_VECTOR(x)
Definition: vector.h:154