V8 Project
lithium-codegen-mips64.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #include "src/code-factory.h"
8 #include "src/code-stubs.h"
9 #include "src/hydrogen-osr.h"
10 #include "src/ic/ic.h"
11 #include "src/ic/stub-cache.h"
14 
15 namespace v8 {
16 namespace internal {
17 
18 
19 class SafepointGenerator FINAL : public CallWrapper {
20  public:
22  LPointerMap* pointers,
23  Safepoint::DeoptMode mode)
24  : codegen_(codegen),
25  pointers_(pointers),
26  deopt_mode_(mode) { }
27  virtual ~SafepointGenerator() {}
28 
29  virtual void BeforeCall(int call_size) const OVERRIDE {}
30 
31  virtual void AfterCall() const OVERRIDE {
32  codegen_->RecordSafepoint(pointers_, deopt_mode_);
33  }
34 
35  private:
36  LCodeGen* codegen_;
37  LPointerMap* pointers_;
38  Safepoint::DeoptMode deopt_mode_;
39 };
40 
41 
42 #define __ masm()->
43 
45  LPhase phase("Z_Code generation", chunk());
46  DCHECK(is_unused());
47  status_ = GENERATING;
48 
49  // Open a frame scope to indicate that there is a frame on the stack. The
50  // NONE indicates that the scope shouldn't actually generate code to set up
51  // the frame (that is done in GeneratePrologue).
52  FrameScope frame_scope(masm_, StackFrame::NONE);
53 
54  return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
56 }
57 
58 
59 void LCodeGen::FinishCode(Handle<Code> code) {
60  DCHECK(is_done());
61  code->set_stack_slots(GetStackSlotCount());
62  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
63  if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
65 }
66 
67 
69  DCHECK(info()->saves_caller_doubles());
71  Comment(";;; Save clobbered callee double registers");
72  int count = 0;
73  BitVector* doubles = chunk()->allocated_double_registers();
74  BitVector::Iterator save_iterator(doubles);
75  while (!save_iterator.Done()) {
76  __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
77  MemOperand(sp, count * kDoubleSize));
78  save_iterator.Advance();
79  count++;
80  }
81 }
82 
83 
85  DCHECK(info()->saves_caller_doubles());
87  Comment(";;; Restore clobbered callee double registers");
88  BitVector* doubles = chunk()->allocated_double_registers();
89  BitVector::Iterator save_iterator(doubles);
90  int count = 0;
91  while (!save_iterator.Done()) {
92  __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
93  MemOperand(sp, count * kDoubleSize));
94  save_iterator.Advance();
95  count++;
96  }
97 }
98 
99 
101  DCHECK(is_generating());
102 
103  if (info()->IsOptimizing()) {
105 
106 #ifdef DEBUG
107  if (strlen(FLAG_stop_at) > 0 &&
108  info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
109  __ stop("stop_at");
110  }
111 #endif
112 
113  // a1: Callee's JS function.
114  // cp: Callee's context.
115  // fp: Caller's frame pointer.
116  // lr: Caller's pc.
117 
118  // Sloppy mode functions and builtins need to replace the receiver with the
119  // global proxy when called as functions (without an explicit receiver
120  // object).
121  if (info_->this_has_uses() &&
122  info_->strict_mode() == SLOPPY &&
123  !info_->is_native()) {
124  Label ok;
125  int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
126  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
127  __ ld(a2, MemOperand(sp, receiver_offset));
128  __ Branch(&ok, ne, a2, Operand(at));
129 
130  __ ld(a2, GlobalObjectOperand());
132 
133  __ sd(a2, MemOperand(sp, receiver_offset));
134 
135  __ bind(&ok);
136  }
137  }
138 
139  info()->set_prologue_offset(masm_->pc_offset());
140  if (NeedsEagerFrame()) {
141  if (info()->IsStub()) {
142  __ StubPrologue();
143  } else {
144  __ Prologue(info()->IsCodePreAgingActive());
145  }
146  frame_is_built_ = true;
147  info_->AddNoFrameRange(0, masm_->pc_offset());
148  }
149 
150  // Reserve space for the stack slots needed by the code.
151  int slots = GetStackSlotCount();
152  if (slots > 0) {
153  if (FLAG_debug_code) {
154  __ Dsubu(sp, sp, Operand(slots * kPointerSize));
155  __ Push(a0, a1);
156  __ Daddu(a0, sp, Operand(slots * kPointerSize));
157  __ li(a1, Operand(kSlotsZapValue));
158  Label loop;
159  __ bind(&loop);
160  __ Dsubu(a0, a0, Operand(kPointerSize));
161  __ sd(a1, MemOperand(a0, 2 * kPointerSize));
162  __ Branch(&loop, ne, a0, Operand(sp));
163  __ Pop(a0, a1);
164  } else {
165  __ Dsubu(sp, sp, Operand(slots * kPointerSize));
166  }
167  }
168 
169  if (info()->saves_caller_doubles()) {
171  }
172 
173  // Possibly allocate a local context.
174  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
175  if (heap_slots > 0) {
176  Comment(";;; Allocate local context");
177  bool need_write_barrier = true;
178  // Argument to NewContext is the function, which is in a1.
179  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
180  FastNewContextStub stub(isolate(), heap_slots);
181  __ CallStub(&stub);
182  // Result of FastNewContextStub is always in new space.
183  need_write_barrier = false;
184  } else {
185  __ push(a1);
186  __ CallRuntime(Runtime::kNewFunctionContext, 1);
187  }
188  RecordSafepoint(Safepoint::kNoLazyDeopt);
189  // Context is returned in both v0. It replaces the context passed to us.
190  // It's saved in the stack and kept live in cp.
191  __ mov(cp, v0);
193  // Copy any necessary parameters into the context.
194  int num_parameters = scope()->num_parameters();
195  for (int i = 0; i < num_parameters; i++) {
196  Variable* var = scope()->parameter(i);
197  if (var->IsContextSlot()) {
198  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
199  (num_parameters - 1 - i) * kPointerSize;
200  // Load parameter from stack.
201  __ ld(a0, MemOperand(fp, parameter_offset));
202  // Store it in the context.
203  MemOperand target = ContextOperand(cp, var->index());
204  __ sd(a0, target);
205  // Update the write barrier. This clobbers a3 and a0.
206  if (need_write_barrier) {
207  __ RecordWriteContextSlot(
208  cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
209  } else if (FLAG_debug_code) {
210  Label done;
211  __ JumpIfInNewSpace(cp, a0, &done);
212  __ Abort(kExpectedNewSpaceObject);
213  __ bind(&done);
214  }
215  }
216  }
217  Comment(";;; End allocate local context");
218  }
219 
220  // Trace the call.
221  if (FLAG_trace && info()->IsOptimizing()) {
222  // We have not executed any compiled code yet, so cp still holds the
223  // incoming context.
224  __ CallRuntime(Runtime::kTraceEnter, 0);
225  }
226  return !is_aborted();
227 }
228 
229 
231  // Generate the OSR entry prologue at the first unknown OSR value, or if there
232  // are none, at the OSR entrypoint instruction.
233  if (osr_pc_offset_ >= 0) return;
234 
235  osr_pc_offset_ = masm()->pc_offset();
236 
237  // Adjust the frame size, subsuming the unoptimized frame into the
238  // optimized frame.
239  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
240  DCHECK(slots >= 0);
241  __ Dsubu(sp, sp, Operand(slots * kPointerSize));
242 }
243 
244 
245 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
246  if (instr->IsCall()) {
248  }
249  if (!instr->IsLazyBailout() && !instr->IsGap()) {
250  safepoints_.BumpLastLazySafepointIndex();
251  }
252 }
253 
254 
256  DCHECK(is_generating());
257  if (deferred_.length() > 0) {
258  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
259  LDeferredCode* code = deferred_[i];
260 
261  HValue* value =
262  instructions_->at(code->instruction_index())->hydrogen_value();
264  chunk()->graph()->SourcePositionToScriptPosition(value->position()));
265 
266  Comment(";;; <@%d,#%d> "
267  "-------------------- Deferred %s --------------------",
268  code->instruction_index(),
269  code->instr()->hydrogen_value()->id(),
270  code->instr()->Mnemonic());
271  __ bind(code->entry());
272  if (NeedsDeferredFrame()) {
273  Comment(";;; Build frame");
275  DCHECK(info()->IsStub());
276  frame_is_built_ = true;
277  __ MultiPush(cp.bit() | fp.bit() | ra.bit());
278  __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
279  __ push(scratch0());
280  __ Daddu(fp, sp,
282  Comment(";;; Deferred code");
283  }
284  code->Generate();
285  if (NeedsDeferredFrame()) {
286  Comment(";;; Destroy frame");
288  __ pop(at);
289  __ MultiPop(cp.bit() | fp.bit() | ra.bit());
290  frame_is_built_ = false;
291  }
292  __ jmp(code->exit());
293  }
294  }
295  // Deferred code is the last part of the instruction sequence. Mark
296  // the generated code as done unless we bailed out.
297  if (!is_aborted()) status_ = DONE;
298  return !is_aborted();
299 }
300 
301 
303  if (jump_table_.length() > 0) {
304  Comment(";;; -------------------- Jump table --------------------");
305  }
306  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
307  Label table_start;
308  __ bind(&table_start);
309  Label needs_frame;
310  for (int i = 0; i < jump_table_.length(); i++) {
311  Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
312  __ bind(&table_entry->label);
313  Address entry = table_entry->address;
314  DeoptComment(table_entry->reason);
315  __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
316  if (table_entry->needs_frame) {
317  DCHECK(!info()->saves_caller_doubles());
318  if (needs_frame.is_bound()) {
319  __ Branch(&needs_frame);
320  } else {
321  __ bind(&needs_frame);
322  __ MultiPush(cp.bit() | fp.bit() | ra.bit());
323  // This variant of deopt can only be used with stubs. Since we don't
324  // have a function pointer to install in the stack frame that we're
325  // building, install a special marker there instead.
326  DCHECK(info()->IsStub());
327  __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
328  __ push(scratch0());
329  __ Daddu(fp, sp,
331  __ Call(t9);
332  }
333  } else {
334  if (info()->saves_caller_doubles()) {
335  DCHECK(info()->IsStub());
337  }
338  __ Call(t9);
339  }
340  }
341  __ RecordComment("]");
342 
343  // The deoptimization jump table is the last part of the instruction
344  // sequence. Mark the generated code as done unless we bailed out.
345  if (!is_aborted()) status_ = DONE;
346  return !is_aborted();
347 }
348 
349 
351  DCHECK(is_done());
352  safepoints_.Emit(masm(), GetStackSlotCount());
353  return !is_aborted();
354 }
355 
356 
357 Register LCodeGen::ToRegister(int index) const {
358  return Register::FromAllocationIndex(index);
359 }
360 
361 
364 }
365 
366 
367 Register LCodeGen::ToRegister(LOperand* op) const {
368  DCHECK(op->IsRegister());
369  return ToRegister(op->index());
370 }
371 
372 
373 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
374  if (op->IsRegister()) {
375  return ToRegister(op->index());
376  } else if (op->IsConstantOperand()) {
377  LConstantOperand* const_op = LConstantOperand::cast(op);
378  HConstant* constant = chunk_->LookupConstant(const_op);
379  Handle<Object> literal = constant->handle(isolate());
380  Representation r = chunk_->LookupLiteralRepresentation(const_op);
381  if (r.IsInteger32()) {
382  DCHECK(literal->IsNumber());
383  __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
384  } else if (r.IsSmi()) {
385  DCHECK(constant->HasSmiValue());
386  __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
387  } else if (r.IsDouble()) {
388  Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
389  } else {
390  DCHECK(r.IsSmiOrTagged());
391  __ li(scratch, literal);
392  }
393  return scratch;
394  } else if (op->IsStackSlot()) {
395  __ ld(scratch, ToMemOperand(op));
396  return scratch;
397  }
398  UNREACHABLE();
399  return scratch;
400 }
401 
402 
403 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
404  DCHECK(op->IsDoubleRegister());
405  return ToDoubleRegister(op->index());
406 }
407 
408 
410  FloatRegister flt_scratch,
411  DoubleRegister dbl_scratch) {
412  if (op->IsDoubleRegister()) {
413  return ToDoubleRegister(op->index());
414  } else if (op->IsConstantOperand()) {
415  LConstantOperand* const_op = LConstantOperand::cast(op);
416  HConstant* constant = chunk_->LookupConstant(const_op);
417  Handle<Object> literal = constant->handle(isolate());
418  Representation r = chunk_->LookupLiteralRepresentation(const_op);
419  if (r.IsInteger32()) {
420  DCHECK(literal->IsNumber());
421  __ li(at, Operand(static_cast<int32_t>(literal->Number())));
422  __ mtc1(at, flt_scratch);
423  __ cvt_d_w(dbl_scratch, flt_scratch);
424  return dbl_scratch;
425  } else if (r.IsDouble()) {
426  Abort(kUnsupportedDoubleImmediate);
427  } else if (r.IsTagged()) {
428  Abort(kUnsupportedTaggedImmediate);
429  }
430  } else if (op->IsStackSlot()) {
431  MemOperand mem_op = ToMemOperand(op);
432  __ ldc1(dbl_scratch, mem_op);
433  return dbl_scratch;
434  }
435  UNREACHABLE();
436  return dbl_scratch;
437 }
438 
439 
440 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
441  HConstant* constant = chunk_->LookupConstant(op);
442  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
443  return constant->handle(isolate());
444 }
445 
446 
447 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
448  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
449 }
450 
451 
452 bool LCodeGen::IsSmi(LConstantOperand* op) const {
453  return chunk_->LookupLiteralRepresentation(op).IsSmi();
454 }
455 
456 
457 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
458  // return ToRepresentation(op, Representation::Integer32());
459  HConstant* constant = chunk_->LookupConstant(op);
460  return constant->Integer32Value();
461 }
462 
463 
465  const Representation& r) const {
466  HConstant* constant = chunk_->LookupConstant(op);
467  int32_t value = constant->Integer32Value();
468  if (r.IsInteger32()) return value;
469  DCHECK(r.IsSmiOrTagged());
470  return reinterpret_cast<int64_t>(Smi::FromInt(value));
471 }
472 
473 
474 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
475  HConstant* constant = chunk_->LookupConstant(op);
476  return Smi::FromInt(constant->Integer32Value());
477 }
478 
479 
480 double LCodeGen::ToDouble(LConstantOperand* op) const {
481  HConstant* constant = chunk_->LookupConstant(op);
482  DCHECK(constant->HasDoubleValue());
483  return constant->DoubleValue();
484 }
485 
486 
487 Operand LCodeGen::ToOperand(LOperand* op) {
488  if (op->IsConstantOperand()) {
489  LConstantOperand* const_op = LConstantOperand::cast(op);
490  HConstant* constant = chunk()->LookupConstant(const_op);
491  Representation r = chunk_->LookupLiteralRepresentation(const_op);
492  if (r.IsSmi()) {
493  DCHECK(constant->HasSmiValue());
494  return Operand(Smi::FromInt(constant->Integer32Value()));
495  } else if (r.IsInteger32()) {
496  DCHECK(constant->HasInteger32Value());
497  return Operand(constant->Integer32Value());
498  } else if (r.IsDouble()) {
499  Abort(kToOperandUnsupportedDoubleImmediate);
500  }
501  DCHECK(r.IsTagged());
502  return Operand(constant->handle(isolate()));
503  } else if (op->IsRegister()) {
504  return Operand(ToRegister(op));
505  } else if (op->IsDoubleRegister()) {
506  Abort(kToOperandIsDoubleRegisterUnimplemented);
507  return Operand((int64_t)0);
508  }
509  // Stack slots not implemented, use ToMemOperand instead.
510  UNREACHABLE();
511  return Operand((int64_t)0);
512 }
513 
514 
515 static int ArgumentsOffsetWithoutFrame(int index) {
516  DCHECK(index < 0);
517  return -(index + 1) * kPointerSize;
518 }
519 
520 
521 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
522  DCHECK(!op->IsRegister());
523  DCHECK(!op->IsDoubleRegister());
524  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
525  if (NeedsEagerFrame()) {
526  return MemOperand(fp, StackSlotOffset(op->index()));
527  } else {
528  // Retrieve parameter without eager stack-frame relative to the
529  // stack-pointer.
530  return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
531  }
532 }
533 
534 
535 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
536  DCHECK(op->IsDoubleStackSlot());
537  if (NeedsEagerFrame()) {
538  // return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
539  return MemOperand(fp, StackSlotOffset(op->index()) + kIntSize);
540  } else {
541  // Retrieve parameter without eager stack-frame relative to the
542  // stack-pointer.
543  // return MemOperand(
544  // sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
545  return MemOperand(
546  sp, ArgumentsOffsetWithoutFrame(op->index()) + kIntSize);
547  }
548 }
549 
550 
551 void LCodeGen::WriteTranslation(LEnvironment* environment,
552  Translation* translation) {
553  if (environment == NULL) return;
554 
555  // The translation includes one command per value in the environment.
556  int translation_size = environment->translation_size();
557  // The output frame height does not include the parameters.
558  int height = translation_size - environment->parameter_count();
559 
560  WriteTranslation(environment->outer(), translation);
561  bool has_closure_id = !info()->closure().is_null() &&
562  !info()->closure().is_identical_to(environment->closure());
563  int closure_id = has_closure_id
564  ? DefineDeoptimizationLiteral(environment->closure())
565  : Translation::kSelfLiteralId;
566 
567  switch (environment->frame_type()) {
568  case JS_FUNCTION:
569  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
570  break;
571  case JS_CONSTRUCT:
572  translation->BeginConstructStubFrame(closure_id, translation_size);
573  break;
574  case JS_GETTER:
575  DCHECK(translation_size == 1);
576  DCHECK(height == 0);
577  translation->BeginGetterStubFrame(closure_id);
578  break;
579  case JS_SETTER:
580  DCHECK(translation_size == 2);
581  DCHECK(height == 0);
582  translation->BeginSetterStubFrame(closure_id);
583  break;
584  case STUB:
585  translation->BeginCompiledStubFrame();
586  break;
587  case ARGUMENTS_ADAPTOR:
588  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
589  break;
590  }
591 
592  int object_index = 0;
593  int dematerialized_index = 0;
594  for (int i = 0; i < translation_size; ++i) {
595  LOperand* value = environment->values()->at(i);
596  AddToTranslation(environment,
597  translation,
598  value,
599  environment->HasTaggedValueAt(i),
600  environment->HasUint32ValueAt(i),
601  &object_index,
602  &dematerialized_index);
603  }
604 }
605 
606 
607 void LCodeGen::AddToTranslation(LEnvironment* environment,
608  Translation* translation,
609  LOperand* op,
610  bool is_tagged,
611  bool is_uint32,
612  int* object_index_pointer,
613  int* dematerialized_index_pointer) {
614  if (op == LEnvironment::materialization_marker()) {
615  int object_index = (*object_index_pointer)++;
616  if (environment->ObjectIsDuplicateAt(object_index)) {
617  int dupe_of = environment->ObjectDuplicateOfAt(object_index);
618  translation->DuplicateObject(dupe_of);
619  return;
620  }
621  int object_length = environment->ObjectLengthAt(object_index);
622  if (environment->ObjectIsArgumentsAt(object_index)) {
623  translation->BeginArgumentsObject(object_length);
624  } else {
625  translation->BeginCapturedObject(object_length);
626  }
627  int dematerialized_index = *dematerialized_index_pointer;
628  int env_offset = environment->translation_size() + dematerialized_index;
629  *dematerialized_index_pointer += object_length;
630  for (int i = 0; i < object_length; ++i) {
631  LOperand* value = environment->values()->at(env_offset + i);
632  AddToTranslation(environment,
633  translation,
634  value,
635  environment->HasTaggedValueAt(env_offset + i),
636  environment->HasUint32ValueAt(env_offset + i),
637  object_index_pointer,
638  dematerialized_index_pointer);
639  }
640  return;
641  }
642 
643  if (op->IsStackSlot()) {
644  if (is_tagged) {
645  translation->StoreStackSlot(op->index());
646  } else if (is_uint32) {
647  translation->StoreUint32StackSlot(op->index());
648  } else {
649  translation->StoreInt32StackSlot(op->index());
650  }
651  } else if (op->IsDoubleStackSlot()) {
652  translation->StoreDoubleStackSlot(op->index());
653  } else if (op->IsRegister()) {
654  Register reg = ToRegister(op);
655  if (is_tagged) {
656  translation->StoreRegister(reg);
657  } else if (is_uint32) {
658  translation->StoreUint32Register(reg);
659  } else {
660  translation->StoreInt32Register(reg);
661  }
662  } else if (op->IsDoubleRegister()) {
664  translation->StoreDoubleRegister(reg);
665  } else if (op->IsConstantOperand()) {
666  HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
667  int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
668  translation->StoreLiteral(src_index);
669  } else {
670  UNREACHABLE();
671  }
672 }
673 
674 
675 void LCodeGen::CallCode(Handle<Code> code,
677  LInstruction* instr) {
679 }
680 
681 
682 void LCodeGen::CallCodeGeneric(Handle<Code> code,
684  LInstruction* instr,
685  SafepointMode safepoint_mode) {
686  DCHECK(instr != NULL);
687  __ Call(code, mode);
688  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
689 }
690 
691 
692 void LCodeGen::CallRuntime(const Runtime::Function* function,
693  int num_arguments,
694  LInstruction* instr,
695  SaveFPRegsMode save_doubles) {
696  DCHECK(instr != NULL);
697 
698  __ CallRuntime(function, num_arguments, save_doubles);
699 
701 }
702 
703 
704 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
705  if (context->IsRegister()) {
706  __ Move(cp, ToRegister(context));
707  } else if (context->IsStackSlot()) {
708  __ ld(cp, ToMemOperand(context));
709  } else if (context->IsConstantOperand()) {
710  HConstant* constant =
711  chunk_->LookupConstant(LConstantOperand::cast(context));
712  __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
713  } else {
714  UNREACHABLE();
715  }
716 }
717 
718 
720  int argc,
721  LInstruction* instr,
722  LOperand* context) {
723  LoadContextFromDeferred(context);
724  __ CallRuntimeSaveDoubles(id);
726  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
727 }
728 
729 
730 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
731  Safepoint::DeoptMode mode) {
732  environment->set_has_been_used();
733  if (!environment->HasBeenRegistered()) {
734  // Physical stack frame layout:
735  // -x ............. -4 0 ..................................... y
736  // [incoming arguments] [spill slots] [pushed outgoing arguments]
737 
738  // Layout of the environment:
739  // 0 ..................................................... size-1
740  // [parameters] [locals] [expression stack including arguments]
741 
742  // Layout of the translation:
743  // 0 ........................................................ size - 1 + 4
744  // [expression stack including arguments] [locals] [4 words] [parameters]
745  // |>------------ translation_size ------------<|
746 
747  int frame_count = 0;
748  int jsframe_count = 0;
749  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
750  ++frame_count;
751  if (e->frame_type() == JS_FUNCTION) {
752  ++jsframe_count;
753  }
754  }
755  Translation translation(&translations_, frame_count, jsframe_count, zone());
756  WriteTranslation(environment, &translation);
757  int deoptimization_index = deoptimizations_.length();
758  int pc_offset = masm()->pc_offset();
759  environment->Register(deoptimization_index,
760  translation.index(),
761  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
762  deoptimizations_.Add(environment, zone());
763  }
764 }
765 
766 
767 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
768  Deoptimizer::BailoutType bailout_type,
769  const char* detail, Register src1,
770  const Operand& src2) {
771  LEnvironment* environment = instr->environment();
772  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
773  DCHECK(environment->HasBeenRegistered());
774  int id = environment->deoptimization_index();
775  DCHECK(info()->IsOptimizing() || info()->IsStub());
776  Address entry =
777  Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
778  if (entry == NULL) {
779  Abort(kBailoutWasNotPrepared);
780  return;
781  }
782 
783  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
784  Register scratch = scratch0();
785  ExternalReference count = ExternalReference::stress_deopt_count(isolate());
786  Label no_deopt;
787  __ Push(a1, scratch);
788  __ li(scratch, Operand(count));
789  __ lw(a1, MemOperand(scratch));
790  __ Subu(a1, a1, Operand(1));
791  __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
792  __ li(a1, Operand(FLAG_deopt_every_n_times));
793  __ sw(a1, MemOperand(scratch));
794  __ Pop(a1, scratch);
795 
796  __ Call(entry, RelocInfo::RUNTIME_ENTRY);
797  __ bind(&no_deopt);
798  __ sw(a1, MemOperand(scratch));
799  __ Pop(a1, scratch);
800  }
801 
802  if (info()->ShouldTrapOnDeopt()) {
803  Label skip;
804  if (condition != al) {
805  __ Branch(&skip, NegateCondition(condition), src1, src2);
806  }
807  __ stop("trap_on_deopt");
808  __ bind(&skip);
809  }
810 
811  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
812  instr->Mnemonic(), detail);
813  DCHECK(info()->IsStub() || frame_is_built_);
814  // Go through jump table if we need to handle condition, build frame, or
815  // restore caller doubles.
816  if (condition == al && frame_is_built_ &&
817  !info()->saves_caller_doubles()) {
818  DeoptComment(reason);
819  __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
820  } else {
821  Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
822  !frame_is_built_);
823  // We often have several deopts to the same entry, reuse the last
824  // jump entry if this is the case.
825  if (jump_table_.is_empty() ||
826  !table_entry.IsEquivalentTo(jump_table_.last())) {
827  jump_table_.Add(table_entry, zone());
828  }
829  __ Branch(&jump_table_.last().label, condition, src1, src2);
830  }
831 }
832 
833 
834 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
835  const char* detail, Register src1,
836  const Operand& src2) {
837  Deoptimizer::BailoutType bailout_type = info()->IsStub()
840  DeoptimizeIf(condition, instr, bailout_type, detail, src1, src2);
841 }
842 
843 
844 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
845  int length = deoptimizations_.length();
846  if (length == 0) return;
847  Handle<DeoptimizationInputData> data =
848  DeoptimizationInputData::New(isolate(), length, TENURED);
849 
850  Handle<ByteArray> translations =
851  translations_.CreateByteArray(isolate()->factory());
852  data->SetTranslationByteArray(*translations);
853  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
854  data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
855  if (info_->IsOptimizing()) {
856  // Reference to shared function info does not change between phases.
857  AllowDeferredHandleDereference allow_handle_dereference;
858  data->SetSharedFunctionInfo(*info_->shared_info());
859  } else {
860  data->SetSharedFunctionInfo(Smi::FromInt(0));
861  }
862 
863  Handle<FixedArray> literals =
864  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
865  { AllowDeferredHandleDereference copy_handles;
866  for (int i = 0; i < deoptimization_literals_.length(); i++) {
868  }
869  data->SetLiteralArray(*literals);
870  }
871 
872  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
873  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
874 
875  // Populate the deoptimization entries.
876  for (int i = 0; i < length; i++) {
878  data->SetAstId(i, env->ast_id());
879  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
880  data->SetArgumentsStackHeight(i,
881  Smi::FromInt(env->arguments_stack_height()));
882  data->SetPc(i, Smi::FromInt(env->pc_offset()));
883  }
884  code->set_deoptimization_data(*data);
885 }
886 
887 
888 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
889  int result = deoptimization_literals_.length();
890  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
891  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
892  }
893  deoptimization_literals_.Add(literal, zone());
894  return result;
895 }
896 
897 
899  DCHECK(deoptimization_literals_.length() == 0);
900 
901  const ZoneList<Handle<JSFunction> >* inlined_closures =
902  chunk()->inlined_closures();
903 
904  for (int i = 0, length = inlined_closures->length();
905  i < length;
906  i++) {
907  DefineDeoptimizationLiteral(inlined_closures->at(i));
908  }
909 
911 }
912 
913 
915  LInstruction* instr, SafepointMode safepoint_mode) {
916  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
917  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
918  } else {
921  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
922  }
923 }
924 
925 
927  LPointerMap* pointers,
928  Safepoint::Kind kind,
929  int arguments,
930  Safepoint::DeoptMode deopt_mode) {
932 
933  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
934  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
935  kind, arguments, deopt_mode);
936  for (int i = 0; i < operands->length(); i++) {
937  LOperand* pointer = operands->at(i);
938  if (pointer->IsStackSlot()) {
939  safepoint.DefinePointerSlot(pointer->index(), zone());
940  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
941  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
942  }
943  }
944 }
945 
946 
947 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
948  Safepoint::DeoptMode deopt_mode) {
949  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
950 }
951 
952 
953 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
954  LPointerMap empty_pointers(zone());
955  RecordSafepoint(&empty_pointers, deopt_mode);
956 }
957 
958 
959 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
960  int arguments,
961  Safepoint::DeoptMode deopt_mode) {
963  pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
964 }
965 
966 
967 void LCodeGen::RecordAndWritePosition(int position) {
968  if (position == RelocInfo::kNoPosition) return;
969  masm()->positions_recorder()->RecordPosition(position);
970  masm()->positions_recorder()->WriteRecordedPositions();
971 }
972 
973 
974 static const char* LabelType(LLabel* label) {
975  if (label->is_loop_header()) return " (loop header)";
976  if (label->is_osr_entry()) return " (OSR entry)";
977  return "";
978 }
979 
980 
981 void LCodeGen::DoLabel(LLabel* label) {
982  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
983  current_instruction_,
984  label->hydrogen_value()->id(),
985  label->block_id(),
986  LabelType(label));
987  __ bind(label->label());
988  current_block_ = label->block_id();
989  DoGap(label);
990 }
991 
992 
993 void LCodeGen::DoParallelMove(LParallelMove* move) {
994  resolver_.Resolve(move);
995 }
996 
997 
998 void LCodeGen::DoGap(LGap* gap) {
999  for (int i = LGap::FIRST_INNER_POSITION;
1001  i++) {
1002  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1003  LParallelMove* move = gap->GetParallelMove(inner_pos);
1004  if (move != NULL) DoParallelMove(move);
1005  }
1006 }
1007 
1008 
1009 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1010  DoGap(instr);
1011 }
1012 
1013 
1014 void LCodeGen::DoParameter(LParameter* instr) {
1015  // Nothing to do.
1016 }
1017 
1018 
1019 void LCodeGen::DoCallStub(LCallStub* instr) {
1020  DCHECK(ToRegister(instr->context()).is(cp));
1021  DCHECK(ToRegister(instr->result()).is(v0));
1022  switch (instr->hydrogen()->major_key()) {
1023  case CodeStub::RegExpExec: {
1024  RegExpExecStub stub(isolate());
1025  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1026  break;
1027  }
1028  case CodeStub::SubString: {
1029  SubStringStub stub(isolate());
1030  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1031  break;
1032  }
1033  case CodeStub::StringCompare: {
1034  StringCompareStub stub(isolate());
1035  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1036  break;
1037  }
1038  default:
1039  UNREACHABLE();
1040  }
1041 }
1042 
1043 
1044 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1046 }
1047 
1048 
1049 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1050  Register dividend = ToRegister(instr->dividend());
1051  int32_t divisor = instr->divisor();
1052  DCHECK(dividend.is(ToRegister(instr->result())));
1053 
1054  // Theoretically, a variation of the branch-free code for integer division by
1055  // a power of 2 (calculating the remainder via an additional multiplication
1056  // (which gets simplified to an 'and') and subtraction) should be faster, and
1057  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1058  // indicate that positive dividends are heavily favored, so the branching
1059  // version performs better.
1060  HMod* hmod = instr->hydrogen();
1061  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1062  Label dividend_is_not_negative, done;
1063 
1064  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1065  __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
1066  // Note: The code below even works when right contains kMinInt.
1067  __ dsubu(dividend, zero_reg, dividend);
1068  __ And(dividend, dividend, Operand(mask));
1069  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1070  DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
1071  }
1072  __ Branch(USE_DELAY_SLOT, &done);
1073  __ dsubu(dividend, zero_reg, dividend);
1074  }
1075 
1076  __ bind(&dividend_is_not_negative);
1077  __ And(dividend, dividend, Operand(mask));
1078  __ bind(&done);
1079 }
1080 
1081 
1082 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1083  Register dividend = ToRegister(instr->dividend());
1084  int32_t divisor = instr->divisor();
1085  Register result = ToRegister(instr->result());
1086  DCHECK(!dividend.is(result));
1087 
1088  if (divisor == 0) {
1089  DeoptimizeIf(al, instr, "division by zero");
1090  return;
1091  }
1092 
1093  __ TruncatingDiv(result, dividend, Abs(divisor));
1094  __ Dmul(result, result, Operand(Abs(divisor)));
1095  __ Dsubu(result, dividend, Operand(result));
1096 
1097  // Check for negative zero.
1098  HMod* hmod = instr->hydrogen();
1099  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1100  Label remainder_not_zero;
1101  __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
1102  DeoptimizeIf(lt, instr, "minus zero", dividend, Operand(zero_reg));
1103  __ bind(&remainder_not_zero);
1104  }
1105 }
1106 
1107 
1108 void LCodeGen::DoModI(LModI* instr) {
1109  HMod* hmod = instr->hydrogen();
1110  const Register left_reg = ToRegister(instr->left());
1111  const Register right_reg = ToRegister(instr->right());
1112  const Register result_reg = ToRegister(instr->result());
1113 
1114  // div runs in the background while we check for special cases.
1115  __ Dmod(result_reg, left_reg, right_reg);
1116 
1117  Label done;
1118  // Check for x % 0, we have to deopt in this case because we can't return a
1119  // NaN.
1120  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1121  DeoptimizeIf(eq, instr, "division by zero", right_reg, Operand(zero_reg));
1122  }
1123 
1124  // Check for kMinInt % -1, div will return kMinInt, which is not what we
1125  // want. We have to deopt if we care about -0, because we can't return that.
1126  if (hmod->CheckFlag(HValue::kCanOverflow)) {
1127  Label no_overflow_possible;
1128  __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
1129  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1130  DeoptimizeIf(eq, instr, "minus zero", right_reg, Operand(-1));
1131  } else {
1132  __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
1133  __ Branch(USE_DELAY_SLOT, &done);
1134  __ mov(result_reg, zero_reg);
1135  }
1136  __ bind(&no_overflow_possible);
1137  }
1138 
1139  // If we care about -0, test if the dividend is <0 and the result is 0.
1140  __ Branch(&done, ge, left_reg, Operand(zero_reg));
1141 
1142  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1143  DeoptimizeIf(eq, instr, "minus zero", result_reg, Operand(zero_reg));
1144  }
1145  __ bind(&done);
1146 }
1147 
1148 
1149 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1150  Register dividend = ToRegister(instr->dividend());
1151  int32_t divisor = instr->divisor();
1152  Register result = ToRegister(instr->result());
1153  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1154  DCHECK(!result.is(dividend));
1155 
1156  // Check for (0 / -x) that will produce negative zero.
1157  HDiv* hdiv = instr->hydrogen();
1158  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1159  DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
1160  }
1161  // Check for (kMinInt / -1).
1162  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1163  DeoptimizeIf(eq, instr, "overflow", dividend, Operand(kMinInt));
1164  }
1165  // Deoptimize if remainder will not be 0.
1166  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1167  divisor != 1 && divisor != -1) {
1168  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1169  __ And(at, dividend, Operand(mask));
1170  DeoptimizeIf(ne, instr, "lost precision", at, Operand(zero_reg));
1171  }
1172 
1173  if (divisor == -1) { // Nice shortcut, not needed for correctness.
1174  __ Dsubu(result, zero_reg, dividend);
1175  return;
1176  }
1177  uint16_t shift = WhichPowerOf2Abs(divisor);
1178  if (shift == 0) {
1179  __ Move(result, dividend);
1180  } else if (shift == 1) {
1181  __ dsrl32(result, dividend, 31);
1182  __ Daddu(result, dividend, Operand(result));
1183  } else {
1184  __ dsra32(result, dividend, 31);
1185  __ dsrl32(result, result, 32 - shift);
1186  __ Daddu(result, dividend, Operand(result));
1187  }
1188  if (shift > 0) __ dsra(result, result, shift);
1189  if (divisor < 0) __ Dsubu(result, zero_reg, result);
1190 }
1191 
1192 
1193 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1194  Register dividend = ToRegister(instr->dividend());
1195  int32_t divisor = instr->divisor();
1196  Register result = ToRegister(instr->result());
1197  DCHECK(!dividend.is(result));
1198 
1199  if (divisor == 0) {
1200  DeoptimizeIf(al, instr, "division by zero");
1201  return;
1202  }
1203 
1204  // Check for (0 / -x) that will produce negative zero.
1205  HDiv* hdiv = instr->hydrogen();
1206  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1207  DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
1208  }
1209 
1210  __ TruncatingDiv(result, dividend, Abs(divisor));
1211  if (divisor < 0) __ Subu(result, zero_reg, result);
1212 
1213  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1214  __ Dmul(scratch0(), result, Operand(divisor));
1215  __ Dsubu(scratch0(), scratch0(), dividend);
1216  DeoptimizeIf(ne, instr, "lost precision", scratch0(), Operand(zero_reg));
1217  }
1218 }
1219 
1220 
1221 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1222 void LCodeGen::DoDivI(LDivI* instr) {
1223  HBinaryOperation* hdiv = instr->hydrogen();
1224  Register dividend = ToRegister(instr->dividend());
1225  Register divisor = ToRegister(instr->divisor());
1226  const Register result = ToRegister(instr->result());
1227 
1228  // On MIPS div is asynchronous - it will run in the background while we
1229  // check for special cases.
1230  __ Ddiv(result, dividend, divisor);
1231 
1232  // Check for x / 0.
1233  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1234  DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg));
1235  }
1236 
1237  // Check for (0 / -x) that will produce negative zero.
1238  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1239  Label left_not_zero;
1240  __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1241  DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg));
1242  __ bind(&left_not_zero);
1243  }
1244 
1245  // Check for (kMinInt / -1).
1246  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1247  !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1248  Label left_not_min_int;
1249  __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1250  DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1));
1251  __ bind(&left_not_min_int);
1252  }
1253 
1254  if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1255  // Calculate remainder.
1256  Register remainder = ToRegister(instr->temp());
1257  if (kArchVariant != kMips64r6) {
1258  __ mfhi(remainder);
1259  } else {
1260  __ dmod(remainder, dividend, divisor);
1261  }
1262  DeoptimizeIf(ne, instr, "lost precision", remainder, Operand(zero_reg));
1263  }
1264 }
1265 
1266 
1267 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1268  DoubleRegister addend = ToDoubleRegister(instr->addend());
1269  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1270  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1271 
1272  // This is computed in-place.
1273  DCHECK(addend.is(ToDoubleRegister(instr->result())));
1274 
1275  __ Madd_d(addend, addend, multiplier, multiplicand, double_scratch0());
1276 }
1277 
1278 
1279 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1280  Register dividend = ToRegister(instr->dividend());
1281  Register result = ToRegister(instr->result());
1282  int32_t divisor = instr->divisor();
1283  Register scratch = result.is(dividend) ? scratch0() : dividend;
1284  DCHECK(!result.is(dividend) || !scratch.is(dividend));
1285 
1286  // If the divisor is 1, return the dividend.
1287  if (divisor == 1) {
1288  __ Move(result, dividend);
1289  return;
1290  }
1291 
1292  // If the divisor is positive, things are easy: There can be no deopts and we
1293  // can simply do an arithmetic right shift.
1294  uint16_t shift = WhichPowerOf2Abs(divisor);
1295  if (divisor > 1) {
1296  __ dsra(result, dividend, shift);
1297  return;
1298  }
1299 
1300  // If the divisor is negative, we have to negate and handle edge cases.
1301  // Dividend can be the same register as result so save the value of it
1302  // for checking overflow.
1303  __ Move(scratch, dividend);
1304 
1305  __ Dsubu(result, zero_reg, dividend);
1306  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1307  DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg));
1308  }
1309 
1310  __ Xor(scratch, scratch, result);
1311  // Dividing by -1 is basically negation, unless we overflow.
1312  if (divisor == -1) {
1313  if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1314  DeoptimizeIf(gt, instr, "overflow", result, Operand(kMaxInt));
1315  }
1316  return;
1317  }
1318 
1319  // If the negation could not overflow, simply shifting is OK.
1320  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1321  __ dsra(result, result, shift);
1322  return;
1323  }
1324 
1325  Label no_overflow, done;
1326  __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
1327  __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE);
1328  __ Branch(&done);
1329  __ bind(&no_overflow);
1330  __ dsra(result, result, shift);
1331  __ bind(&done);
1332 }
1333 
1334 
1335 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1336  Register dividend = ToRegister(instr->dividend());
1337  int32_t divisor = instr->divisor();
1338  Register result = ToRegister(instr->result());
1339  DCHECK(!dividend.is(result));
1340 
1341  if (divisor == 0) {
1342  DeoptimizeIf(al, instr, "division by zero");
1343  return;
1344  }
1345 
1346  // Check for (0 / -x) that will produce negative zero.
1347  HMathFloorOfDiv* hdiv = instr->hydrogen();
1348  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1349  DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
1350  }
1351 
1352  // Easy case: We need no dynamic check for the dividend and the flooring
1353  // division is the same as the truncating division.
1354  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1355  (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1356  __ TruncatingDiv(result, dividend, Abs(divisor));
1357  if (divisor < 0) __ Dsubu(result, zero_reg, result);
1358  return;
1359  }
1360 
1361  // In the general case we may need to adjust before and after the truncating
1362  // division to get a flooring division.
1363  Register temp = ToRegister(instr->temp());
1364  DCHECK(!temp.is(dividend) && !temp.is(result));
1365  Label needs_adjustment, done;
1366  __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
1367  dividend, Operand(zero_reg));
1368  __ TruncatingDiv(result, dividend, Abs(divisor));
1369  if (divisor < 0) __ Dsubu(result, zero_reg, result);
1370  __ jmp(&done);
1371  __ bind(&needs_adjustment);
1372  __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1373  __ TruncatingDiv(result, temp, Abs(divisor));
1374  if (divisor < 0) __ Dsubu(result, zero_reg, result);
1375  __ Dsubu(result, result, Operand(1));
1376  __ bind(&done);
1377 }
1378 
1379 
1380 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1381 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1382  HBinaryOperation* hdiv = instr->hydrogen();
1383  Register dividend = ToRegister(instr->dividend());
1384  Register divisor = ToRegister(instr->divisor());
1385  const Register result = ToRegister(instr->result());
1386 
1387  // On MIPS div is asynchronous - it will run in the background while we
1388  // check for special cases.
1389  __ Ddiv(result, dividend, divisor);
1390 
1391  // Check for x / 0.
1392  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1393  DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg));
1394  }
1395 
1396  // Check for (0 / -x) that will produce negative zero.
1397  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1398  Label left_not_zero;
1399  __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1400  DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg));
1401  __ bind(&left_not_zero);
1402  }
1403 
1404  // Check for (kMinInt / -1).
1405  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1406  !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1407  Label left_not_min_int;
1408  __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1409  DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1));
1410  __ bind(&left_not_min_int);
1411  }
1412 
1413  // We performed a truncating division. Correct the result if necessary.
1414  Label done;
1415  Register remainder = scratch0();
1416  if (kArchVariant != kMips64r6) {
1417  __ mfhi(remainder);
1418  } else {
1419  __ dmod(remainder, dividend, divisor);
1420  }
1421  __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1422  __ Xor(remainder, remainder, Operand(divisor));
1423  __ Branch(&done, ge, remainder, Operand(zero_reg));
1424  __ Dsubu(result, result, Operand(1));
1425  __ bind(&done);
1426 }
1427 
1428 
1429 void LCodeGen::DoMulI(LMulI* instr) {
1430  Register scratch = scratch0();
1431  Register result = ToRegister(instr->result());
1432  // Note that result may alias left.
1433  Register left = ToRegister(instr->left());
1434  LOperand* right_op = instr->right();
1435 
1436  bool bailout_on_minus_zero =
1437  instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1438  bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1439 
1440  if (right_op->IsConstantOperand()) {
1441  int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1442 
1443  if (bailout_on_minus_zero && (constant < 0)) {
1444  // The case of a null constant will be handled separately.
1445  // If constant is negative and left is null, the result should be -0.
1446  DeoptimizeIf(eq, instr, "minus zero", left, Operand(zero_reg));
1447  }
1448 
1449  switch (constant) {
1450  case -1:
1451  if (overflow) {
1452  __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
1453  DeoptimizeIf(gt, instr, "overflow", scratch, Operand(kMaxInt));
1454  } else {
1455  __ Dsubu(result, zero_reg, left);
1456  }
1457  break;
1458  case 0:
1459  if (bailout_on_minus_zero) {
1460  // If left is strictly negative and the constant is null, the
1461  // result is -0. Deoptimize if required, otherwise return 0.
1462  DeoptimizeIf(lt, instr, "minus zero", left, Operand(zero_reg));
1463  }
1464  __ mov(result, zero_reg);
1465  break;
1466  case 1:
1467  // Nothing to do.
1468  __ Move(result, left);
1469  break;
1470  default:
1471  // Multiplying by powers of two and powers of two plus or minus
1472  // one can be done faster with shifted operands.
1473  // For other constants we emit standard code.
1474  int32_t mask = constant >> 31;
1475  uint32_t constant_abs = (constant + mask) ^ mask;
1476 
1477  if (base::bits::IsPowerOfTwo32(constant_abs)) {
1478  int32_t shift = WhichPowerOf2(constant_abs);
1479  __ dsll(result, left, shift);
1480  // Correct the sign of the result if the constant is negative.
1481  if (constant < 0) __ Dsubu(result, zero_reg, result);
1482  } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1483  int32_t shift = WhichPowerOf2(constant_abs - 1);
1484  __ dsll(scratch, left, shift);
1485  __ Daddu(result, scratch, left);
1486  // Correct the sign of the result if the constant is negative.
1487  if (constant < 0) __ Dsubu(result, zero_reg, result);
1488  } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1489  int32_t shift = WhichPowerOf2(constant_abs + 1);
1490  __ dsll(scratch, left, shift);
1491  __ Dsubu(result, scratch, left);
1492  // Correct the sign of the result if the constant is negative.
1493  if (constant < 0) __ Dsubu(result, zero_reg, result);
1494  } else {
1495  // Generate standard code.
1496  __ li(at, constant);
1497  __ Dmul(result, left, at);
1498  }
1499  }
1500 
1501  } else {
1502  DCHECK(right_op->IsRegister());
1503  Register right = ToRegister(right_op);
1504 
1505  if (overflow) {
1506  // hi:lo = left * right.
1507  if (instr->hydrogen()->representation().IsSmi()) {
1508  __ Dmulh(result, left, right);
1509  } else {
1510  __ Dmul(result, left, right);
1511  }
1512  __ dsra32(scratch, result, 0);
1513  __ sra(at, result, 31);
1514  if (instr->hydrogen()->representation().IsSmi()) {
1515  __ SmiTag(result);
1516  }
1517  DeoptimizeIf(ne, instr, "overflow", scratch, Operand(at));
1518  } else {
1519  if (instr->hydrogen()->representation().IsSmi()) {
1520  __ SmiUntag(result, left);
1521  __ Dmul(result, result, right);
1522  } else {
1523  __ Dmul(result, left, right);
1524  }
1525  }
1526 
1527  if (bailout_on_minus_zero) {
1528  Label done;
1529  __ Xor(at, left, right);
1530  __ Branch(&done, ge, at, Operand(zero_reg));
1531  // Bail out if the result is minus zero.
1532  DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg));
1533  __ bind(&done);
1534  }
1535  }
1536 }
1537 
1538 
1539 void LCodeGen::DoBitI(LBitI* instr) {
1540  LOperand* left_op = instr->left();
1541  LOperand* right_op = instr->right();
1542  DCHECK(left_op->IsRegister());
1543  Register left = ToRegister(left_op);
1544  Register result = ToRegister(instr->result());
1545  Operand right(no_reg);
1546 
1547  if (right_op->IsStackSlot()) {
1548  right = Operand(EmitLoadRegister(right_op, at));
1549  } else {
1550  DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1551  right = ToOperand(right_op);
1552  }
1553 
1554  switch (instr->op()) {
1555  case Token::BIT_AND:
1556  __ And(result, left, right);
1557  break;
1558  case Token::BIT_OR:
1559  __ Or(result, left, right);
1560  break;
1561  case Token::BIT_XOR:
1562  if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1563  __ Nor(result, zero_reg, left);
1564  } else {
1565  __ Xor(result, left, right);
1566  }
1567  break;
1568  default:
1569  UNREACHABLE();
1570  break;
1571  }
1572 }
1573 
1574 
1575 void LCodeGen::DoShiftI(LShiftI* instr) {
1576  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1577  // result may alias either of them.
1578  LOperand* right_op = instr->right();
1579  Register left = ToRegister(instr->left());
1580  Register result = ToRegister(instr->result());
1581 
1582  if (right_op->IsRegister()) {
1583  // No need to mask the right operand on MIPS, it is built into the variable
1584  // shift instructions.
1585  switch (instr->op()) {
1586  case Token::ROR:
1587  __ Ror(result, left, Operand(ToRegister(right_op)));
1588  break;
1589  case Token::SAR:
1590  __ srav(result, left, ToRegister(right_op));
1591  break;
1592  case Token::SHR:
1593  __ srlv(result, left, ToRegister(right_op));
1594  if (instr->can_deopt()) {
1595  // TODO(yy): (-1) >>> 0. anything else?
1596  DeoptimizeIf(lt, instr, "negative value", result, Operand(zero_reg));
1597  DeoptimizeIf(gt, instr, "negative value", result, Operand(kMaxInt));
1598  }
1599  break;
1600  case Token::SHL:
1601  __ sllv(result, left, ToRegister(right_op));
1602  break;
1603  default:
1604  UNREACHABLE();
1605  break;
1606  }
1607  } else {
1608  // Mask the right_op operand.
1609  int value = ToInteger32(LConstantOperand::cast(right_op));
1610  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1611  switch (instr->op()) {
1612  case Token::ROR:
1613  if (shift_count != 0) {
1614  __ Ror(result, left, Operand(shift_count));
1615  } else {
1616  __ Move(result, left);
1617  }
1618  break;
1619  case Token::SAR:
1620  if (shift_count != 0) {
1621  __ sra(result, left, shift_count);
1622  } else {
1623  __ Move(result, left);
1624  }
1625  break;
1626  case Token::SHR:
1627  if (shift_count != 0) {
1628  __ srl(result, left, shift_count);
1629  } else {
1630  if (instr->can_deopt()) {
1631  __ And(at, left, Operand(0x80000000));
1632  DeoptimizeIf(ne, instr, "negative value", at, Operand(zero_reg));
1633  }
1634  __ Move(result, left);
1635  }
1636  break;
1637  case Token::SHL:
1638  if (shift_count != 0) {
1639  if (instr->hydrogen_value()->representation().IsSmi()) {
1640  __ dsll(result, left, shift_count);
1641  } else {
1642  __ sll(result, left, shift_count);
1643  }
1644  } else {
1645  __ Move(result, left);
1646  }
1647  break;
1648  default:
1649  UNREACHABLE();
1650  break;
1651  }
1652  }
1653 }
1654 
1655 
1656 void LCodeGen::DoSubI(LSubI* instr) {
1657  LOperand* left = instr->left();
1658  LOperand* right = instr->right();
1659  LOperand* result = instr->result();
1660  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1661 
1662  if (!can_overflow) {
1663  if (right->IsStackSlot()) {
1664  Register right_reg = EmitLoadRegister(right, at);
1665  __ Dsubu(ToRegister(result), ToRegister(left), Operand(right_reg));
1666  } else {
1667  DCHECK(right->IsRegister() || right->IsConstantOperand());
1668  __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right));
1669  }
1670  } else { // can_overflow.
1671  Register overflow = scratch0();
1672  Register scratch = scratch1();
1673  if (right->IsStackSlot() || right->IsConstantOperand()) {
1674  Register right_reg = EmitLoadRegister(right, scratch);
1675  __ SubuAndCheckForOverflow(ToRegister(result),
1676  ToRegister(left),
1677  right_reg,
1678  overflow); // Reg at also used as scratch.
1679  } else {
1680  DCHECK(right->IsRegister());
1681  // Due to overflow check macros not supporting constant operands,
1682  // handling the IsConstantOperand case was moved to prev if clause.
1683  __ SubuAndCheckForOverflow(ToRegister(result),
1684  ToRegister(left),
1685  ToRegister(right),
1686  overflow); // Reg at also used as scratch.
1687  }
1688  DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg));
1689  if (!instr->hydrogen()->representation().IsSmi()) {
1690  DeoptimizeIf(gt, instr, "overflow", ToRegister(result), Operand(kMaxInt));
1691  DeoptimizeIf(lt, instr, "overflow", ToRegister(result), Operand(kMinInt));
1692  }
1693  }
1694 }
1695 
1696 
1697 void LCodeGen::DoConstantI(LConstantI* instr) {
1698  __ li(ToRegister(instr->result()), Operand(instr->value()));
1699 }
1700 
1701 
1702 void LCodeGen::DoConstantS(LConstantS* instr) {
1703  __ li(ToRegister(instr->result()), Operand(instr->value()));
1704 }
1705 
1706 
1707 void LCodeGen::DoConstantD(LConstantD* instr) {
1708  DCHECK(instr->result()->IsDoubleRegister());
1709  DoubleRegister result = ToDoubleRegister(instr->result());
1710  double v = instr->value();
1711  __ Move(result, v);
1712 }
1713 
1714 
1715 void LCodeGen::DoConstantE(LConstantE* instr) {
1716  __ li(ToRegister(instr->result()), Operand(instr->value()));
1717 }
1718 
1719 
1720 void LCodeGen::DoConstantT(LConstantT* instr) {
1721  Handle<Object> object = instr->value(isolate());
1723  __ li(ToRegister(instr->result()), object);
1724 }
1725 
1726 
1727 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1728  Register result = ToRegister(instr->result());
1729  Register map = ToRegister(instr->value());
1730  __ EnumLength(result, map);
1731 }
1732 
1733 
1734 void LCodeGen::DoDateField(LDateField* instr) {
1735  Register object = ToRegister(instr->date());
1736  Register result = ToRegister(instr->result());
1737  Register scratch = ToRegister(instr->temp());
1738  Smi* index = instr->index();
1739  Label runtime, done;
1740  DCHECK(object.is(a0));
1741  DCHECK(result.is(v0));
1742  DCHECK(!scratch.is(scratch0()));
1743  DCHECK(!scratch.is(object));
1744 
1745  __ SmiTst(object, at);
1746  DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
1747  __ GetObjectType(object, scratch, scratch);
1748  DeoptimizeIf(ne, instr, "not a date object", scratch, Operand(JS_DATE_TYPE));
1749 
1750  if (index->value() == 0) {
1751  __ ld(result, FieldMemOperand(object, JSDate::kValueOffset));
1752  } else {
1753  if (index->value() < JSDate::kFirstUncachedField) {
1754  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1755  __ li(scratch, Operand(stamp));
1756  __ ld(scratch, MemOperand(scratch));
1758  __ Branch(&runtime, ne, scratch, Operand(scratch0()));
1759  __ ld(result, FieldMemOperand(object, JSDate::kValueOffset +
1760  kPointerSize * index->value()));
1761  __ jmp(&done);
1762  }
1763  __ bind(&runtime);
1764  __ PrepareCallCFunction(2, scratch);
1765  __ li(a1, Operand(index));
1766  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1767  __ bind(&done);
1768  }
1769 }
1770 
1771 
1773  LOperand* index,
1774  String::Encoding encoding) {
1775  if (index->IsConstantOperand()) {
1776  int offset = ToInteger32(LConstantOperand::cast(index));
1777  if (encoding == String::TWO_BYTE_ENCODING) {
1778  offset *= kUC16Size;
1779  }
1780  STATIC_ASSERT(kCharSize == 1);
1781  return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1782  }
1783  Register scratch = scratch0();
1784  DCHECK(!scratch.is(string));
1785  DCHECK(!scratch.is(ToRegister(index)));
1786  if (encoding == String::ONE_BYTE_ENCODING) {
1787  __ Daddu(scratch, string, ToRegister(index));
1788  } else {
1789  STATIC_ASSERT(kUC16Size == 2);
1790  __ dsll(scratch, ToRegister(index), 1);
1791  __ Daddu(scratch, string, scratch);
1792  }
1793  return FieldMemOperand(scratch, SeqString::kHeaderSize);
1794 }
1795 
1796 
1797 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1798  String::Encoding encoding = instr->hydrogen()->encoding();
1799  Register string = ToRegister(instr->string());
1800  Register result = ToRegister(instr->result());
1801 
1802  if (FLAG_debug_code) {
1803  Register scratch = scratch0();
1804  __ ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1805  __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1806 
1807  __ And(scratch, scratch,
1809  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1810  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1811  __ Dsubu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1812  ? one_byte_seq_type : two_byte_seq_type));
1813  __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1814  }
1815 
1816  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1817  if (encoding == String::ONE_BYTE_ENCODING) {
1818  __ lbu(result, operand);
1819  } else {
1820  __ lhu(result, operand);
1821  }
1822 }
1823 
1824 
1825 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1826  String::Encoding encoding = instr->hydrogen()->encoding();
1827  Register string = ToRegister(instr->string());
1828  Register value = ToRegister(instr->value());
1829 
1830  if (FLAG_debug_code) {
1831  Register scratch = scratch0();
1832  Register index = ToRegister(instr->index());
1833  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1834  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1835  int encoding_mask =
1836  instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1837  ? one_byte_seq_type : two_byte_seq_type;
1838  __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
1839  }
1840 
1841  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1842  if (encoding == String::ONE_BYTE_ENCODING) {
1843  __ sb(value, operand);
1844  } else {
1845  __ sh(value, operand);
1846  }
1847 }
1848 
1849 
1850 void LCodeGen::DoAddI(LAddI* instr) {
1851  LOperand* left = instr->left();
1852  LOperand* right = instr->right();
1853  LOperand* result = instr->result();
1854  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1855 
1856  if (!can_overflow) {
1857  if (right->IsStackSlot()) {
1858  Register right_reg = EmitLoadRegister(right, at);
1859  __ Daddu(ToRegister(result), ToRegister(left), Operand(right_reg));
1860  } else {
1861  DCHECK(right->IsRegister() || right->IsConstantOperand());
1862  __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
1863  }
1864  } else { // can_overflow.
1865  Register overflow = scratch0();
1866  Register scratch = scratch1();
1867  if (right->IsStackSlot() ||
1868  right->IsConstantOperand()) {
1869  Register right_reg = EmitLoadRegister(right, scratch);
1870  __ AdduAndCheckForOverflow(ToRegister(result),
1871  ToRegister(left),
1872  right_reg,
1873  overflow); // Reg at also used as scratch.
1874  } else {
1875  DCHECK(right->IsRegister());
1876  // Due to overflow check macros not supporting constant operands,
1877  // handling the IsConstantOperand case was moved to prev if clause.
1878  __ AdduAndCheckForOverflow(ToRegister(result),
1879  ToRegister(left),
1880  ToRegister(right),
1881  overflow); // Reg at also used as scratch.
1882  }
1883  DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg));
1884  // if not smi, it must int32.
1885  if (!instr->hydrogen()->representation().IsSmi()) {
1886  DeoptimizeIf(gt, instr, "overflow", ToRegister(result), Operand(kMaxInt));
1887  DeoptimizeIf(lt, instr, "overflow", ToRegister(result), Operand(kMinInt));
1888  }
1889  }
1890 }
1891 
1892 
1893 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1894  LOperand* left = instr->left();
1895  LOperand* right = instr->right();
1896  HMathMinMax::Operation operation = instr->hydrogen()->operation();
1897  Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1898  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1899  Register left_reg = ToRegister(left);
1900  Register right_reg = EmitLoadRegister(right, scratch0());
1901  Register result_reg = ToRegister(instr->result());
1902  Label return_right, done;
1903  Register scratch = scratch1();
1904  __ Slt(scratch, left_reg, Operand(right_reg));
1905  if (condition == ge) {
1906  __ Movz(result_reg, left_reg, scratch);
1907  __ Movn(result_reg, right_reg, scratch);
1908  } else {
1909  DCHECK(condition == le);
1910  __ Movn(result_reg, left_reg, scratch);
1911  __ Movz(result_reg, right_reg, scratch);
1912  }
1913  } else {
1914  DCHECK(instr->hydrogen()->representation().IsDouble());
1915  FPURegister left_reg = ToDoubleRegister(left);
1916  FPURegister right_reg = ToDoubleRegister(right);
1917  FPURegister result_reg = ToDoubleRegister(instr->result());
1918  Label check_nan_left, check_zero, return_left, return_right, done;
1919  __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
1920  __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
1921  __ Branch(&return_right);
1922 
1923  __ bind(&check_zero);
1924  // left == right != 0.
1925  __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
1926  // At this point, both left and right are either 0 or -0.
1927  if (operation == HMathMinMax::kMathMin) {
1928  __ neg_d(left_reg, left_reg);
1929  __ sub_d(result_reg, left_reg, right_reg);
1930  __ neg_d(result_reg, result_reg);
1931  } else {
1932  __ add_d(result_reg, left_reg, right_reg);
1933  }
1934  __ Branch(&done);
1935 
1936  __ bind(&check_nan_left);
1937  // left == NaN.
1938  __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
1939  __ bind(&return_right);
1940  if (!right_reg.is(result_reg)) {
1941  __ mov_d(result_reg, right_reg);
1942  }
1943  __ Branch(&done);
1944 
1945  __ bind(&return_left);
1946  if (!left_reg.is(result_reg)) {
1947  __ mov_d(result_reg, left_reg);
1948  }
1949  __ bind(&done);
1950  }
1951 }
1952 
1953 
1954 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1955  DoubleRegister left = ToDoubleRegister(instr->left());
1956  DoubleRegister right = ToDoubleRegister(instr->right());
1957  DoubleRegister result = ToDoubleRegister(instr->result());
1958  switch (instr->op()) {
1959  case Token::ADD:
1960  __ add_d(result, left, right);
1961  break;
1962  case Token::SUB:
1963  __ sub_d(result, left, right);
1964  break;
1965  case Token::MUL:
1966  __ mul_d(result, left, right);
1967  break;
1968  case Token::DIV:
1969  __ div_d(result, left, right);
1970  break;
1971  case Token::MOD: {
1972  // Save a0-a3 on the stack.
1973  RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1974  __ MultiPush(saved_regs);
1975 
1976  __ PrepareCallCFunction(0, 2, scratch0());
1977  __ MovToFloatParameters(left, right);
1978  __ CallCFunction(
1979  ExternalReference::mod_two_doubles_operation(isolate()),
1980  0, 2);
1981  // Move the result in the double result register.
1982  __ MovFromFloatResult(result);
1983 
1984  // Restore saved register.
1985  __ MultiPop(saved_regs);
1986  break;
1987  }
1988  default:
1989  UNREACHABLE();
1990  break;
1991  }
1992 }
1993 
1994 
1995 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1996  DCHECK(ToRegister(instr->context()).is(cp));
1997  DCHECK(ToRegister(instr->left()).is(a1));
1998  DCHECK(ToRegister(instr->right()).is(a0));
1999  DCHECK(ToRegister(instr->result()).is(v0));
2000 
2001  Handle<Code> code =
2002  CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
2003  CallCode(code, RelocInfo::CODE_TARGET, instr);
2004  // Other arch use a nop here, to signal that there is no inlined
2005  // patchable code. Mips does not need the nop, since our marker
2006  // instruction (andi zero_reg) will never be used in normal code.
2007 }
2008 
2009 
2010 template<class InstrType>
2011 void LCodeGen::EmitBranch(InstrType instr,
2012  Condition condition,
2013  Register src1,
2014  const Operand& src2) {
2015  int left_block = instr->TrueDestination(chunk_);
2016  int right_block = instr->FalseDestination(chunk_);
2017 
2018  int next_block = GetNextEmittedBlock();
2019  if (right_block == left_block || condition == al) {
2020  EmitGoto(left_block);
2021  } else if (left_block == next_block) {
2022  __ Branch(chunk_->GetAssemblyLabel(right_block),
2023  NegateCondition(condition), src1, src2);
2024  } else if (right_block == next_block) {
2025  __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2026  } else {
2027  __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2028  __ Branch(chunk_->GetAssemblyLabel(right_block));
2029  }
2030 }
2031 
2032 
2033 template<class InstrType>
2034 void LCodeGen::EmitBranchF(InstrType instr,
2035  Condition condition,
2036  FPURegister src1,
2037  FPURegister src2) {
2038  int right_block = instr->FalseDestination(chunk_);
2039  int left_block = instr->TrueDestination(chunk_);
2040 
2041  int next_block = GetNextEmittedBlock();
2042  if (right_block == left_block) {
2043  EmitGoto(left_block);
2044  } else if (left_block == next_block) {
2045  __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
2046  NegateCondition(condition), src1, src2);
2047  } else if (right_block == next_block) {
2048  __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2049  condition, src1, src2);
2050  } else {
2051  __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2052  condition, src1, src2);
2053  __ Branch(chunk_->GetAssemblyLabel(right_block));
2054  }
2055 }
2056 
2057 
2058 template<class InstrType>
2059 void LCodeGen::EmitFalseBranch(InstrType instr,
2060  Condition condition,
2061  Register src1,
2062  const Operand& src2) {
2063  int false_block = instr->FalseDestination(chunk_);
2064  __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
2065 }
2066 
2067 
2068 template<class InstrType>
2069 void LCodeGen::EmitFalseBranchF(InstrType instr,
2070  Condition condition,
2071  FPURegister src1,
2072  FPURegister src2) {
2073  int false_block = instr->FalseDestination(chunk_);
2074  __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
2075  condition, src1, src2);
2076 }
2077 
2078 
2079 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2080  __ stop("LDebugBreak");
2081 }
2082 
2083 
2084 void LCodeGen::DoBranch(LBranch* instr) {
2085  Representation r = instr->hydrogen()->value()->representation();
2086  if (r.IsInteger32() || r.IsSmi()) {
2087  DCHECK(!info()->IsStub());
2088  Register reg = ToRegister(instr->value());
2089  EmitBranch(instr, ne, reg, Operand(zero_reg));
2090  } else if (r.IsDouble()) {
2091  DCHECK(!info()->IsStub());
2092  DoubleRegister reg = ToDoubleRegister(instr->value());
2093  // Test the double value. Zero and NaN are false.
2094  EmitBranchF(instr, nue, reg, kDoubleRegZero);
2095  } else {
2096  DCHECK(r.IsTagged());
2097  Register reg = ToRegister(instr->value());
2098  HType type = instr->hydrogen()->value()->type();
2099  if (type.IsBoolean()) {
2100  DCHECK(!info()->IsStub());
2101  __ LoadRoot(at, Heap::kTrueValueRootIndex);
2102  EmitBranch(instr, eq, reg, Operand(at));
2103  } else if (type.IsSmi()) {
2104  DCHECK(!info()->IsStub());
2105  EmitBranch(instr, ne, reg, Operand(zero_reg));
2106  } else if (type.IsJSArray()) {
2107  DCHECK(!info()->IsStub());
2108  EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2109  } else if (type.IsHeapNumber()) {
2110  DCHECK(!info()->IsStub());
2111  DoubleRegister dbl_scratch = double_scratch0();
2112  __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2113  // Test the double value. Zero and NaN are false.
2114  EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
2115  } else if (type.IsString()) {
2116  DCHECK(!info()->IsStub());
2118  EmitBranch(instr, ne, at, Operand(zero_reg));
2119  } else {
2120  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2121  // Avoid deopts in the case where we've never executed this path before.
2122  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2123 
2124  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2125  // undefined -> false.
2126  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2127  __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2128  }
2129  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2130  // Boolean -> its value.
2131  __ LoadRoot(at, Heap::kTrueValueRootIndex);
2132  __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
2133  __ LoadRoot(at, Heap::kFalseValueRootIndex);
2134  __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2135  }
2136  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2137  // 'null' -> false.
2138  __ LoadRoot(at, Heap::kNullValueRootIndex);
2139  __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2140  }
2141 
2142  if (expected.Contains(ToBooleanStub::SMI)) {
2143  // Smis: 0 -> false, all other -> true.
2144  __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2145  __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2146  } else if (expected.NeedsMap()) {
2147  // If we need a map later and have a Smi -> deopt.
2148  __ SmiTst(reg, at);
2149  DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
2150  }
2151 
2152  const Register map = scratch0();
2153  if (expected.NeedsMap()) {
2155  if (expected.CanBeUndetectable()) {
2156  // Undetectable -> false.
2158  __ And(at, at, Operand(1 << Map::kIsUndetectable));
2159  __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2160  }
2161  }
2162 
2163  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2164  // spec object -> true.
2166  __ Branch(instr->TrueLabel(chunk_),
2167  ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
2168  }
2169 
2170  if (expected.Contains(ToBooleanStub::STRING)) {
2171  // String value -> false iff empty.
2172  Label not_string;
2174  __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2176  __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2177  __ Branch(instr->FalseLabel(chunk_));
2178  __ bind(&not_string);
2179  }
2180 
2181  if (expected.Contains(ToBooleanStub::SYMBOL)) {
2182  // Symbol value -> true.
2183  const Register scratch = scratch1();
2185  __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2186  }
2187 
2188  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2189  // heap number -> false iff +0, -0, or NaN.
2190  DoubleRegister dbl_scratch = double_scratch0();
2191  Label not_heap_number;
2192  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2193  __ Branch(&not_heap_number, ne, map, Operand(at));
2194  __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2195  __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2196  ne, dbl_scratch, kDoubleRegZero);
2197  // Falls through if dbl_scratch == 0.
2198  __ Branch(instr->FalseLabel(chunk_));
2199  __ bind(&not_heap_number);
2200  }
2201 
2202  if (!expected.IsGeneric()) {
2203  // We've seen something for the first time -> deopt.
2204  // This can only happen if we are not generic already.
2205  DeoptimizeIf(al, instr, "unexpected object", zero_reg,
2206  Operand(zero_reg));
2207  }
2208  }
2209  }
2210 }
2211 
2212 
2213 void LCodeGen::EmitGoto(int block) {
2214  if (!IsNextEmittedBlock(block)) {
2215  __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2216  }
2217 }
2218 
2219 
2220 void LCodeGen::DoGoto(LGoto* instr) {
2221  EmitGoto(instr->block_id());
2222 }
2223 
2224 
2225 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2226  Condition cond = kNoCondition;
2227  switch (op) {
2228  case Token::EQ:
2229  case Token::EQ_STRICT:
2230  cond = eq;
2231  break;
2232  case Token::NE:
2233  case Token::NE_STRICT:
2234  cond = ne;
2235  break;
2236  case Token::LT:
2237  cond = is_unsigned ? lo : lt;
2238  break;
2239  case Token::GT:
2240  cond = is_unsigned ? hi : gt;
2241  break;
2242  case Token::LTE:
2243  cond = is_unsigned ? ls : le;
2244  break;
2245  case Token::GTE:
2246  cond = is_unsigned ? hs : ge;
2247  break;
2248  case Token::IN:
2249  case Token::INSTANCEOF:
2250  default:
2251  UNREACHABLE();
2252  }
2253  return cond;
2254 }
2255 
2256 
2257 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2258  LOperand* left = instr->left();
2259  LOperand* right = instr->right();
2260  bool is_unsigned =
2261  instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2262  instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2263  Condition cond = TokenToCondition(instr->op(), is_unsigned);
2264 
2265  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2266  // We can statically evaluate the comparison.
2267  double left_val = ToDouble(LConstantOperand::cast(left));
2268  double right_val = ToDouble(LConstantOperand::cast(right));
2269  int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2270  instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2271  EmitGoto(next_block);
2272  } else {
2273  if (instr->is_double()) {
2274  // Compare left and right as doubles and load the
2275  // resulting flags into the normal status register.
2276  FPURegister left_reg = ToDoubleRegister(left);
2277  FPURegister right_reg = ToDoubleRegister(right);
2278 
2279  // If a NaN is involved, i.e. the result is unordered,
2280  // jump to false block label.
2281  __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2282  left_reg, right_reg);
2283 
2284  EmitBranchF(instr, cond, left_reg, right_reg);
2285  } else {
2286  Register cmp_left;
2287  Operand cmp_right = Operand((int64_t)0);
2288  if (right->IsConstantOperand()) {
2289  int32_t value = ToInteger32(LConstantOperand::cast(right));
2290  if (instr->hydrogen_value()->representation().IsSmi()) {
2291  cmp_left = ToRegister(left);
2292  cmp_right = Operand(Smi::FromInt(value));
2293  } else {
2294  cmp_left = ToRegister(left);
2295  cmp_right = Operand(value);
2296  }
2297  } else if (left->IsConstantOperand()) {
2298  int32_t value = ToInteger32(LConstantOperand::cast(left));
2299  if (instr->hydrogen_value()->representation().IsSmi()) {
2300  cmp_left = ToRegister(right);
2301  cmp_right = Operand(Smi::FromInt(value));
2302  } else {
2303  cmp_left = ToRegister(right);
2304  cmp_right = Operand(value);
2305  }
2306  // We commuted the operands, so commute the condition.
2307  cond = CommuteCondition(cond);
2308  } else {
2309  cmp_left = ToRegister(left);
2310  cmp_right = Operand(ToRegister(right));
2311  }
2312 
2313  EmitBranch(instr, cond, cmp_left, cmp_right);
2314  }
2315  }
2316 }
2317 
2318 
2319 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2320  Register left = ToRegister(instr->left());
2321  Register right = ToRegister(instr->right());
2322 
2323  EmitBranch(instr, eq, left, Operand(right));
2324 }
2325 
2326 
2327 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2328  if (instr->hydrogen()->representation().IsTagged()) {
2329  Register input_reg = ToRegister(instr->object());
2330  __ li(at, Operand(factory()->the_hole_value()));
2331  EmitBranch(instr, eq, input_reg, Operand(at));
2332  return;
2333  }
2334 
2335  DoubleRegister input_reg = ToDoubleRegister(instr->object());
2336  EmitFalseBranchF(instr, eq, input_reg, input_reg);
2337 
2338  Register scratch = scratch0();
2339  __ FmoveHigh(scratch, input_reg);
2340  EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
2341 }
2342 
2343 
2344 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2345  Representation rep = instr->hydrogen()->value()->representation();
2346  DCHECK(!rep.IsInteger32());
2347  Register scratch = ToRegister(instr->temp());
2348 
2349  if (rep.IsDouble()) {
2350  DoubleRegister value = ToDoubleRegister(instr->value());
2351  EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
2352  __ FmoveHigh(scratch, value);
2353  // Only use low 32-bits of value.
2354  __ dsll32(scratch, scratch, 0);
2355  __ dsrl32(scratch, scratch, 0);
2356  __ li(at, 0x80000000);
2357  } else {
2358  Register value = ToRegister(instr->value());
2359  __ CheckMap(value,
2360  scratch,
2361  Heap::kHeapNumberMapRootIndex,
2362  instr->FalseLabel(chunk()),
2363  DO_SMI_CHECK);
2364  __ lwu(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2365  EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
2366  __ lwu(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2367  __ mov(at, zero_reg);
2368  }
2369  EmitBranch(instr, eq, scratch, Operand(at));
2370 }
2371 
2372 
2373 Condition LCodeGen::EmitIsObject(Register input,
2374  Register temp1,
2375  Register temp2,
2376  Label* is_not_object,
2377  Label* is_object) {
2378  __ JumpIfSmi(input, is_not_object);
2379 
2380  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2381  __ Branch(is_object, eq, input, Operand(temp2));
2382 
2383  // Load map.
2384  __ ld(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2385  // Undetectable objects behave like undefined.
2386  __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2387  __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
2388  __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
2389 
2390  // Load instance type and check that it is in object type range.
2391  __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2392  __ Branch(is_not_object,
2393  lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2394 
2395  return le;
2396 }
2397 
2398 
2399 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2400  Register reg = ToRegister(instr->value());
2401  Register temp1 = ToRegister(instr->temp());
2402  Register temp2 = scratch0();
2403 
2404  Condition true_cond =
2405  EmitIsObject(reg, temp1, temp2,
2406  instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2407 
2408  EmitBranch(instr, true_cond, temp2,
2410 }
2411 
2412 
2413 Condition LCodeGen::EmitIsString(Register input,
2414  Register temp1,
2415  Label* is_not_string,
2416  SmiCheck check_needed = INLINE_SMI_CHECK) {
2417  if (check_needed == INLINE_SMI_CHECK) {
2418  __ JumpIfSmi(input, is_not_string);
2419  }
2420  __ GetObjectType(input, temp1, temp1);
2421 
2422  return lt;
2423 }
2424 
2425 
2426 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2427  Register reg = ToRegister(instr->value());
2428  Register temp1 = ToRegister(instr->temp());
2429 
2430  SmiCheck check_needed =
2431  instr->hydrogen()->value()->type().IsHeapObject()
2433  Condition true_cond =
2434  EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2435 
2436  EmitBranch(instr, true_cond, temp1,
2437  Operand(FIRST_NONSTRING_TYPE));
2438 }
2439 
2440 
2441 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2442  Register input_reg = EmitLoadRegister(instr->value(), at);
2443  __ And(at, input_reg, kSmiTagMask);
2444  EmitBranch(instr, eq, at, Operand(zero_reg));
2445 }
2446 
2447 
2448 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2449  Register input = ToRegister(instr->value());
2450  Register temp = ToRegister(instr->temp());
2451 
2452  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2453  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2454  }
2455  __ ld(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2456  __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2457  __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2458  EmitBranch(instr, ne, at, Operand(zero_reg));
2459 }
2460 
2461 
2463  switch (op) {
2464  case Token::EQ_STRICT:
2465  case Token::EQ:
2466  return eq;
2467  case Token::LT:
2468  return lt;
2469  case Token::GT:
2470  return gt;
2471  case Token::LTE:
2472  return le;
2473  case Token::GTE:
2474  return ge;
2475  default:
2476  UNREACHABLE();
2477  return kNoCondition;
2478  }
2479 }
2480 
2481 
2482 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2483  DCHECK(ToRegister(instr->context()).is(cp));
2484  Token::Value op = instr->op();
2485 
2486  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2487  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2488 
2489  Condition condition = ComputeCompareCondition(op);
2490 
2491  EmitBranch(instr, condition, v0, Operand(zero_reg));
2492 }
2493 
2494 
2495 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2496  InstanceType from = instr->from();
2497  InstanceType to = instr->to();
2498  if (from == FIRST_TYPE) return to;
2499  DCHECK(from == to || to == LAST_TYPE);
2500  return from;
2501 }
2502 
2503 
2504 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2505  InstanceType from = instr->from();
2506  InstanceType to = instr->to();
2507  if (from == to) return eq;
2508  if (to == LAST_TYPE) return hs;
2509  if (from == FIRST_TYPE) return ls;
2510  UNREACHABLE();
2511  return eq;
2512 }
2513 
2514 
2515 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2516  Register scratch = scratch0();
2517  Register input = ToRegister(instr->value());
2518 
2519  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2520  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2521  }
2522 
2523  __ GetObjectType(input, scratch, scratch);
2524  EmitBranch(instr,
2525  BranchCondition(instr->hydrogen()),
2526  scratch,
2527  Operand(TestType(instr->hydrogen())));
2528 }
2529 
2530 
2531 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2532  Register input = ToRegister(instr->value());
2533  Register result = ToRegister(instr->result());
2534 
2535  __ AssertString(input);
2536 
2537  __ lwu(result, FieldMemOperand(input, String::kHashFieldOffset));
2538  __ IndexFromHash(result, result);
2539 }
2540 
2541 
2542 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2543  LHasCachedArrayIndexAndBranch* instr) {
2544  Register input = ToRegister(instr->value());
2545  Register scratch = scratch0();
2546 
2547  __ lwu(scratch,
2549  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2550  EmitBranch(instr, eq, at, Operand(zero_reg));
2551 }
2552 
2553 
2554 // Branches to a label or falls through with the answer in flags. Trashes
2555 // the temp registers, but not the input.
2556 void LCodeGen::EmitClassOfTest(Label* is_true,
2557  Label* is_false,
2558  Handle<String>class_name,
2559  Register input,
2560  Register temp,
2561  Register temp2) {
2562  DCHECK(!input.is(temp));
2563  DCHECK(!input.is(temp2));
2564  DCHECK(!temp.is(temp2));
2565 
2566  __ JumpIfSmi(input, is_false);
2567 
2568  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2569  // Assuming the following assertions, we can use the same compares to test
2570  // for both being a function type and being in the object type range.
2575  LAST_SPEC_OBJECT_TYPE - 1);
2577 
2578  __ GetObjectType(input, temp, temp2);
2579  __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2580  __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2581  __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2582  } else {
2583  // Faster code path to avoid two compares: subtract lower bound from the
2584  // actual type and do a signed compare with the width of the type range.
2585  __ GetObjectType(input, temp, temp2);
2586  __ Dsubu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2587  __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2589  }
2590 
2591  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2592  // Check if the constructor in the map is a function.
2593  __ ld(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2594 
2595  // Objects with a non-function constructor have class 'Object'.
2596  __ GetObjectType(temp, temp2, temp2);
2597  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2598  __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
2599  } else {
2600  __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
2601  }
2602 
2603  // temp now contains the constructor function. Grab the
2604  // instance class name from there.
2606  __ ld(temp, FieldMemOperand(temp,
2608  // The class name we are testing against is internalized since it's a literal.
2609  // The name in the constructor is internalized because of the way the context
2610  // is booted. This routine isn't expected to work for random API-created
2611  // classes and it doesn't have to because you can't access it with natives
2612  // syntax. Since both sides are internalized it is sufficient to use an
2613  // identity comparison.
2614 
2615  // End with the address of this class_name instance in temp register.
2616  // On MIPS, the caller must do the comparison with Handle<String>class_name.
2617 }
2618 
2619 
2620 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2621  Register input = ToRegister(instr->value());
2622  Register temp = scratch0();
2623  Register temp2 = ToRegister(instr->temp());
2624  Handle<String> class_name = instr->hydrogen()->class_name();
2625 
2626  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2627  class_name, input, temp, temp2);
2628 
2629  EmitBranch(instr, eq, temp, Operand(class_name));
2630 }
2631 
2632 
2633 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2634  Register reg = ToRegister(instr->value());
2635  Register temp = ToRegister(instr->temp());
2636 
2637  __ ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2638  EmitBranch(instr, eq, temp, Operand(instr->map()));
2639 }
2640 
2641 
2642 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2643  DCHECK(ToRegister(instr->context()).is(cp));
2644  Label true_label, done;
2645  DCHECK(ToRegister(instr->left()).is(a0)); // Object is in a0.
2646  DCHECK(ToRegister(instr->right()).is(a1)); // Function is in a1.
2647  Register result = ToRegister(instr->result());
2648  DCHECK(result.is(v0));
2649 
2650  InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2651  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2652 
2653  __ Branch(&true_label, eq, result, Operand(zero_reg));
2654  __ li(result, Operand(factory()->false_value()));
2655  __ Branch(&done);
2656  __ bind(&true_label);
2657  __ li(result, Operand(factory()->true_value()));
2658  __ bind(&done);
2659 }
2660 
2661 
2662 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2663  class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
2664  public:
2665  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2666  LInstanceOfKnownGlobal* instr)
2667  : LDeferredCode(codegen), instr_(instr) { }
2668  virtual void Generate() OVERRIDE {
2669  codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2670  }
2671  virtual LInstruction* instr() OVERRIDE { return instr_; }
2672  Label* map_check() { return &map_check_; }
2673 
2674  private:
2675  LInstanceOfKnownGlobal* instr_;
2676  Label map_check_;
2677  };
2678 
2679  DeferredInstanceOfKnownGlobal* deferred;
2680  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2681 
2682  Label done, false_result;
2683  Register object = ToRegister(instr->value());
2684  Register temp = ToRegister(instr->temp());
2685  Register result = ToRegister(instr->result());
2686 
2687  DCHECK(object.is(a0));
2688  DCHECK(result.is(v0));
2689 
2690  // A Smi is not instance of anything.
2691  __ JumpIfSmi(object, &false_result);
2692 
2693  // This is the inlined call site instanceof cache. The two occurences of the
2694  // hole value will be patched to the last map/result pair generated by the
2695  // instanceof stub.
2696  Label cache_miss;
2697  Register map = temp;
2699 
2700  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2701  __ bind(deferred->map_check()); // Label for calculating code patching.
2702  // We use Factory::the_hole_value() on purpose instead of loading from the
2703  // root array to force relocation to be able to later patch with
2704  // the cached map.
2705  Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2706  __ li(at, Operand(Handle<Object>(cell)));
2708  __ BranchShort(&cache_miss, ne, map, Operand(at));
2709  // We use Factory::the_hole_value() on purpose instead of loading from the
2710  // root array to force relocation to be able to later patch
2711  // with true or false. The distance from map check has to be constant.
2712  __ li(result, Operand(factory()->the_hole_value()));
2713  __ Branch(&done);
2714 
2715  // The inlined call site cache did not match. Check null and string before
2716  // calling the deferred code.
2717  __ bind(&cache_miss);
2718  // Null is not instance of anything.
2719  __ LoadRoot(temp, Heap::kNullValueRootIndex);
2720  __ Branch(&false_result, eq, object, Operand(temp));
2721 
2722  // String values is not instance of anything.
2723  Condition cc = __ IsObjectStringType(object, temp, temp);
2724  __ Branch(&false_result, cc, temp, Operand(zero_reg));
2725 
2726  // Go to the deferred code.
2727  __ Branch(deferred->entry());
2728 
2729  __ bind(&false_result);
2730  __ LoadRoot(result, Heap::kFalseValueRootIndex);
2731 
2732  // Here result has either true or false. Deferred code also produces true or
2733  // false object.
2734  __ bind(deferred->exit());
2735  __ bind(&done);
2736 }
2737 
2738 
2739 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2740  Label* map_check) {
2741  Register result = ToRegister(instr->result());
2742  DCHECK(result.is(v0));
2743 
2745  flags = static_cast<InstanceofStub::Flags>(
2747  flags = static_cast<InstanceofStub::Flags>(
2749  flags = static_cast<InstanceofStub::Flags>(
2751  InstanceofStub stub(isolate(), flags);
2752 
2753  PushSafepointRegistersScope scope(this);
2754  LoadContextFromDeferred(instr->context());
2755 
2756  // Get the temp register reserved by the instruction. This needs to be a4 as
2757  // its slot of the pushing of safepoint registers is used to communicate the
2758  // offset to the location of the map check.
2759  Register temp = ToRegister(instr->temp());
2760  DCHECK(temp.is(a4));
2761  __ li(InstanceofStub::right(), instr->function());
2762  static const int kAdditionalDelta = 13;
2763  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2764  Label before_push_delta;
2765  __ bind(&before_push_delta);
2766  {
2767  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2768  __ li(temp, Operand(delta * kIntSize), CONSTANT_SIZE);
2769  __ StoreToSafepointRegisterSlot(temp, temp);
2770  }
2771  CallCodeGeneric(stub.GetCode(),
2773  instr,
2775  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2776  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2777  // Put the result value into the result register slot and
2778  // restore all registers.
2779  __ StoreToSafepointRegisterSlot(result, result);
2780 }
2781 
2782 
2783 void LCodeGen::DoCmpT(LCmpT* instr) {
2784  DCHECK(ToRegister(instr->context()).is(cp));
2785  Token::Value op = instr->op();
2786 
2787  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2788  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2789  // On MIPS there is no need for a "no inlined smi code" marker (nop).
2790 
2791  Condition condition = ComputeCompareCondition(op);
2792  // A minor optimization that relies on LoadRoot always emitting one
2793  // instruction.
2794  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2795  Label done, check;
2796  __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2797  __ bind(&check);
2798  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2799  DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
2800  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2801  __ bind(&done);
2802 }
2803 
2804 
2805 void LCodeGen::DoReturn(LReturn* instr) {
2806  if (FLAG_trace && info()->IsOptimizing()) {
2807  // Push the return value on the stack as the parameter.
2808  // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2809  // managed by the register allocator and tearing down the frame, it's
2810  // safe to write to the context register.
2811  __ push(v0);
2813  __ CallRuntime(Runtime::kTraceExit, 1);
2814  }
2815  if (info()->saves_caller_doubles()) {
2817  }
2818  int no_frame_start = -1;
2819  if (NeedsEagerFrame()) {
2820  __ mov(sp, fp);
2821  no_frame_start = masm_->pc_offset();
2822  __ Pop(ra, fp);
2823  }
2824  if (instr->has_constant_parameter_count()) {
2825  int parameter_count = ToInteger32(instr->constant_parameter_count());
2826  int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2827  if (sp_delta != 0) {
2828  __ Daddu(sp, sp, Operand(sp_delta));
2829  }
2830  } else {
2831  Register reg = ToRegister(instr->parameter_count());
2832  // The argument count parameter is a smi
2833  __ SmiUntag(reg);
2834  __ dsll(at, reg, kPointerSizeLog2);
2835  __ Daddu(sp, sp, at);
2836  }
2837 
2838  __ Jump(ra);
2839 
2840  if (no_frame_start != -1) {
2841  info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2842  }
2843 }
2844 
2845 
2846 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2847  Register result = ToRegister(instr->result());
2848  __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2849  __ ld(result, FieldMemOperand(at, Cell::kValueOffset));
2850  if (instr->hydrogen()->RequiresHoleCheck()) {
2851  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2852  DeoptimizeIf(eq, instr, "hole", result, Operand(at));
2853  }
2854 }
2855 
2856 
2857 template <class T>
2859  DCHECK(FLAG_vector_ics);
2860  Register vector = ToRegister(instr->temp_vector());
2862  __ li(vector, instr->hydrogen()->feedback_vector());
2863  // No need to allocate this register.
2866  Operand(Smi::FromInt(instr->hydrogen()->slot())));
2867 }
2868 
2869 
2870 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2871  DCHECK(ToRegister(instr->context()).is(cp));
2872  DCHECK(ToRegister(instr->global_object())
2874  DCHECK(ToRegister(instr->result()).is(v0));
2875 
2876  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
2877  if (FLAG_vector_ics) {
2878  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2879  }
2880  ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2881  Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
2882  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2883 }
2884 
2885 
2886 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2887  Register value = ToRegister(instr->value());
2888  Register cell = scratch0();
2889 
2890  // Load the cell.
2891  __ li(cell, Operand(instr->hydrogen()->cell().handle()));
2892 
2893  // If the cell we are storing to contains the hole it could have
2894  // been deleted from the property dictionary. In that case, we need
2895  // to update the property details in the property dictionary to mark
2896  // it as no longer deleted.
2897  if (instr->hydrogen()->RequiresHoleCheck()) {
2898  // We use a temp to check the payload.
2899  Register payload = ToRegister(instr->temp());
2900  __ ld(payload, FieldMemOperand(cell, Cell::kValueOffset));
2901  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2902  DeoptimizeIf(eq, instr, "hole", payload, Operand(at));
2903  }
2904 
2905  // Store the value.
2906  __ sd(value, FieldMemOperand(cell, Cell::kValueOffset));
2907  // Cells are always rescanned, so no write barrier here.
2908 }
2909 
2910 
2911 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2912  Register context = ToRegister(instr->context());
2913  Register result = ToRegister(instr->result());
2914 
2915  __ ld(result, ContextOperand(context, instr->slot_index()));
2916  if (instr->hydrogen()->RequiresHoleCheck()) {
2917  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2918 
2919  if (instr->hydrogen()->DeoptimizesOnHole()) {
2920  DeoptimizeIf(eq, instr, "hole", result, Operand(at));
2921  } else {
2922  Label is_not_hole;
2923  __ Branch(&is_not_hole, ne, result, Operand(at));
2924  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2925  __ bind(&is_not_hole);
2926  }
2927  }
2928 }
2929 
2930 
2931 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2932  Register context = ToRegister(instr->context());
2933  Register value = ToRegister(instr->value());
2934  Register scratch = scratch0();
2935  MemOperand target = ContextOperand(context, instr->slot_index());
2936 
2937  Label skip_assignment;
2938 
2939  if (instr->hydrogen()->RequiresHoleCheck()) {
2940  __ ld(scratch, target);
2941  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2942 
2943  if (instr->hydrogen()->DeoptimizesOnHole()) {
2944  DeoptimizeIf(eq, instr, "hole", scratch, Operand(at));
2945  } else {
2946  __ Branch(&skip_assignment, ne, scratch, Operand(at));
2947  }
2948  }
2949 
2950  __ sd(value, target);
2951  if (instr->hydrogen()->NeedsWriteBarrier()) {
2952  SmiCheck check_needed =
2953  instr->hydrogen()->value()->type().IsHeapObject()
2955  __ RecordWriteContextSlot(context,
2956  target.offset(),
2957  value,
2958  scratch0(),
2959  GetRAState(),
2960  kSaveFPRegs,
2962  check_needed);
2963  }
2964 
2965  __ bind(&skip_assignment);
2966 }
2967 
2968 
2969 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2970  HObjectAccess access = instr->hydrogen()->access();
2971  int offset = access.offset();
2972  Register object = ToRegister(instr->object());
2973  if (access.IsExternalMemory()) {
2974  Register result = ToRegister(instr->result());
2975  MemOperand operand = MemOperand(object, offset);
2976  __ Load(result, operand, access.representation());
2977  return;
2978  }
2979 
2980  if (instr->hydrogen()->representation().IsDouble()) {
2981  DoubleRegister result = ToDoubleRegister(instr->result());
2982  __ ldc1(result, FieldMemOperand(object, offset));
2983  return;
2984  }
2985 
2986  Register result = ToRegister(instr->result());
2987  if (!access.IsInobject()) {
2988  __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2989  object = result;
2990  }
2991 
2992  Representation representation = access.representation();
2993  if (representation.IsSmi() && SmiValuesAre32Bits() &&
2994  instr->hydrogen()->representation().IsInteger32()) {
2995  if (FLAG_debug_code) {
2996  // Verify this is really an Smi.
2997  Register scratch = scratch0();
2998  __ Load(scratch, FieldMemOperand(object, offset), representation);
2999  __ AssertSmi(scratch);
3000  }
3001 
3002  // Read int value directly from upper half of the smi.
3003  STATIC_ASSERT(kSmiTag == 0);
3005  offset += kPointerSize / 2;
3006  representation = Representation::Integer32();
3007  }
3008  __ Load(result, FieldMemOperand(object, offset), representation);
3009 }
3010 
3011 
3012 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3013  DCHECK(ToRegister(instr->context()).is(cp));
3015  DCHECK(ToRegister(instr->result()).is(v0));
3016 
3017  // Name is always in a2.
3018  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
3019  if (FLAG_vector_ics) {
3020  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3021  }
3022  Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
3023  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3024 }
3025 
3026 
3027 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3028  Register scratch = scratch0();
3029  Register function = ToRegister(instr->function());
3030  Register result = ToRegister(instr->result());
3031 
3032  // Get the prototype or initial map from the function.
3033  __ ld(result,
3035 
3036  // Check that the function has a prototype or an initial map.
3037  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
3038  DeoptimizeIf(eq, instr, "hole", result, Operand(at));
3039 
3040  // If the function does not have an initial map, we're done.
3041  Label done;
3042  __ GetObjectType(result, scratch, scratch);
3043  __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
3044 
3045  // Get the prototype from the initial map.
3046  __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
3047 
3048  // All done.
3049  __ bind(&done);
3050 }
3051 
3052 
3053 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3054  Register result = ToRegister(instr->result());
3055  __ LoadRoot(result, instr->index());
3056 }
3057 
3058 
3059 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3060  Register arguments = ToRegister(instr->arguments());
3061  Register result = ToRegister(instr->result());
3062  // There are two words between the frame pointer and the last argument.
3063  // Subtracting from length accounts for one of them add one more.
3064  if (instr->length()->IsConstantOperand()) {
3065  int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3066  if (instr->index()->IsConstantOperand()) {
3067  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3068  int index = (const_length - const_index) + 1;
3069  __ ld(result, MemOperand(arguments, index * kPointerSize));
3070  } else {
3071  Register index = ToRegister(instr->index());
3072  __ li(at, Operand(const_length + 1));
3073  __ Dsubu(result, at, index);
3074  __ dsll(at, result, kPointerSizeLog2);
3075  __ Daddu(at, arguments, at);
3076  __ ld(result, MemOperand(at));
3077  }
3078  } else if (instr->index()->IsConstantOperand()) {
3079  Register length = ToRegister(instr->length());
3080  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3081  int loc = const_index - 1;
3082  if (loc != 0) {
3083  __ Dsubu(result, length, Operand(loc));
3084  __ dsll(at, result, kPointerSizeLog2);
3085  __ Daddu(at, arguments, at);
3086  __ ld(result, MemOperand(at));
3087  } else {
3088  __ dsll(at, length, kPointerSizeLog2);
3089  __ Daddu(at, arguments, at);
3090  __ ld(result, MemOperand(at));
3091  }
3092  } else {
3093  Register length = ToRegister(instr->length());
3094  Register index = ToRegister(instr->index());
3095  __ Dsubu(result, length, index);
3096  __ Daddu(result, result, 1);
3097  __ dsll(at, result, kPointerSizeLog2);
3098  __ Daddu(at, arguments, at);
3099  __ ld(result, MemOperand(at));
3100  }
3101 }
3102 
3103 
3104 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3105  Register external_pointer = ToRegister(instr->elements());
3106  Register key = no_reg;
3107  ElementsKind elements_kind = instr->elements_kind();
3108  bool key_is_constant = instr->key()->IsConstantOperand();
3109  int constant_key = 0;
3110  if (key_is_constant) {
3111  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3112  if (constant_key & 0xF0000000) {
3113  Abort(kArrayIndexConstantValueTooBig);
3114  }
3115  } else {
3116  key = ToRegister(instr->key());
3117  }
3118  int element_size_shift = ElementsKindToShiftSize(elements_kind);
3119  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3120  ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
3121  : element_size_shift;
3122  int base_offset = instr->base_offset();
3123 
3124  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3125  elements_kind == FLOAT32_ELEMENTS ||
3126  elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3127  elements_kind == FLOAT64_ELEMENTS) {
3128  int base_offset = instr->base_offset();
3129  FPURegister result = ToDoubleRegister(instr->result());
3130  if (key_is_constant) {
3131  __ Daddu(scratch0(), external_pointer,
3132  constant_key << element_size_shift);
3133  } else {
3134  if (shift_size < 0) {
3135  if (shift_size == -32) {
3136  __ dsra32(scratch0(), key, 0);
3137  } else {
3138  __ dsra(scratch0(), key, -shift_size);
3139  }
3140  } else {
3141  __ dsll(scratch0(), key, shift_size);
3142  }
3143  __ Daddu(scratch0(), scratch0(), external_pointer);
3144  }
3145  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3146  elements_kind == FLOAT32_ELEMENTS) {
3147  __ lwc1(result, MemOperand(scratch0(), base_offset));
3148  __ cvt_d_s(result, result);
3149  } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3150  __ ldc1(result, MemOperand(scratch0(), base_offset));
3151  }
3152  } else {
3153  Register result = ToRegister(instr->result());
3154  MemOperand mem_operand = PrepareKeyedOperand(
3155  key, external_pointer, key_is_constant, constant_key,
3156  element_size_shift, shift_size, base_offset);
3157  switch (elements_kind) {
3159  case INT8_ELEMENTS:
3160  __ lb(result, mem_operand);
3161  break;
3164  case UINT8_ELEMENTS:
3166  __ lbu(result, mem_operand);
3167  break;
3169  case INT16_ELEMENTS:
3170  __ lh(result, mem_operand);
3171  break;
3173  case UINT16_ELEMENTS:
3174  __ lhu(result, mem_operand);
3175  break;
3177  case INT32_ELEMENTS:
3178  __ lw(result, mem_operand);
3179  break;
3181  case UINT32_ELEMENTS:
3182  __ lw(result, mem_operand);
3183  if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3184  DeoptimizeIf(Ugreater_equal, instr, "negative value", result,
3185  Operand(0x80000000));
3186  }
3187  break;
3188  case FLOAT32_ELEMENTS:
3189  case FLOAT64_ELEMENTS:
3192  case FAST_DOUBLE_ELEMENTS:
3193  case FAST_ELEMENTS:
3194  case FAST_SMI_ELEMENTS:
3196  case FAST_HOLEY_ELEMENTS:
3198  case DICTIONARY_ELEMENTS:
3200  UNREACHABLE();
3201  break;
3202  }
3203  }
3204 }
3205 
3206 
3207 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3208  Register elements = ToRegister(instr->elements());
3209  bool key_is_constant = instr->key()->IsConstantOperand();
3210  Register key = no_reg;
3211  DoubleRegister result = ToDoubleRegister(instr->result());
3212  Register scratch = scratch0();
3213 
3214  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3215 
3216  int base_offset = instr->base_offset();
3217  if (key_is_constant) {
3218  int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3219  if (constant_key & 0xF0000000) {
3220  Abort(kArrayIndexConstantValueTooBig);
3221  }
3222  base_offset += constant_key * kDoubleSize;
3223  }
3224  __ Daddu(scratch, elements, Operand(base_offset));
3225 
3226  if (!key_is_constant) {
3227  key = ToRegister(instr->key());
3228  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3229  ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
3230  : element_size_shift;
3231  if (shift_size > 0) {
3232  __ dsll(at, key, shift_size);
3233  } else if (shift_size == -32) {
3234  __ dsra32(at, key, 0);
3235  } else {
3236  __ dsra(at, key, -shift_size);
3237  }
3238  __ Daddu(scratch, scratch, at);
3239  }
3240 
3241  __ ldc1(result, MemOperand(scratch));
3242 
3243  if (instr->hydrogen()->RequiresHoleCheck()) {
3244  __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3245  DeoptimizeIf(eq, instr, "hole", scratch, Operand(kHoleNanUpper32));
3246  }
3247 }
3248 
3249 
3250 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3251  HLoadKeyed* hinstr = instr->hydrogen();
3252  Register elements = ToRegister(instr->elements());
3253  Register result = ToRegister(instr->result());
3254  Register scratch = scratch0();
3255  Register store_base = scratch;
3256  int offset = instr->base_offset();
3257 
3258  if (instr->key()->IsConstantOperand()) {
3259  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3260  offset += ToInteger32(const_operand) * kPointerSize;
3261  store_base = elements;
3262  } else {
3263  Register key = ToRegister(instr->key());
3264  // Even though the HLoadKeyed instruction forces the input
3265  // representation for the key to be an integer, the input gets replaced
3266  // during bound check elimination with the index argument to the bounds
3267  // check, which can be tagged, so that case must be handled here, too.
3268  if (instr->hydrogen()->key()->representation().IsSmi()) {
3269  __ SmiScale(scratch, key, kPointerSizeLog2);
3270  __ daddu(scratch, elements, scratch);
3271  } else {
3272  __ dsll(scratch, key, kPointerSizeLog2);
3273  __ daddu(scratch, elements, scratch);
3274  }
3275  }
3276 
3277  Representation representation = hinstr->representation();
3278  if (representation.IsInteger32() && SmiValuesAre32Bits() &&
3279  hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3280  DCHECK(!hinstr->RequiresHoleCheck());
3281  if (FLAG_debug_code) {
3282  Register temp = scratch1();
3283  __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
3284  __ AssertSmi(temp);
3285  }
3286 
3287  // Read int value directly from upper half of the smi.
3288  STATIC_ASSERT(kSmiTag == 0);
3290  offset += kPointerSize / 2;
3291  }
3292 
3293  __ Load(result, MemOperand(store_base, offset), representation);
3294 
3295  // Check for the hole value.
3296  if (hinstr->RequiresHoleCheck()) {
3297  if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3298  __ SmiTst(result, scratch);
3299  DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg));
3300  } else {
3301  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3302  DeoptimizeIf(eq, instr, "hole", result, Operand(scratch));
3303  }
3304  }
3305 }
3306 
3307 
3308 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3309  if (instr->is_typed_elements()) {
3310  DoLoadKeyedExternalArray(instr);
3311  } else if (instr->hydrogen()->representation().IsDouble()) {
3313  } else {
3314  DoLoadKeyedFixedArray(instr);
3315  }
3316 }
3317 
3318 
3320  Register base,
3321  bool key_is_constant,
3322  int constant_key,
3323  int element_size,
3324  int shift_size,
3325  int base_offset) {
3326  if (key_is_constant) {
3327  return MemOperand(base, (constant_key << element_size) + base_offset);
3328  }
3329 
3330  if (base_offset == 0) {
3331  if (shift_size >= 0) {
3332  __ dsll(scratch0(), key, shift_size);
3333  __ Daddu(scratch0(), base, scratch0());
3334  return MemOperand(scratch0());
3335  } else {
3336  if (shift_size == -32) {
3337  __ dsra32(scratch0(), key, 0);
3338  } else {
3339  __ dsra(scratch0(), key, -shift_size);
3340  }
3341  __ Daddu(scratch0(), base, scratch0());
3342  return MemOperand(scratch0());
3343  }
3344  }
3345 
3346  if (shift_size >= 0) {
3347  __ dsll(scratch0(), key, shift_size);
3348  __ Daddu(scratch0(), base, scratch0());
3349  return MemOperand(scratch0(), base_offset);
3350  } else {
3351  if (shift_size == -32) {
3352  __ dsra32(scratch0(), key, 0);
3353  } else {
3354  __ dsra(scratch0(), key, -shift_size);
3355  }
3356  __ Daddu(scratch0(), base, scratch0());
3357  return MemOperand(scratch0(), base_offset);
3358  }
3359 }
3360 
3361 
3362 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3363  DCHECK(ToRegister(instr->context()).is(cp));
3366 
3367  if (FLAG_vector_ics) {
3368  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3369  }
3370 
3371  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
3372  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3373 }
3374 
3375 
3376 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3377  Register scratch = scratch0();
3378  Register temp = scratch1();
3379  Register result = ToRegister(instr->result());
3380 
3381  if (instr->hydrogen()->from_inlined()) {
3382  __ Dsubu(result, sp, 2 * kPointerSize);
3383  } else {
3384  // Check if the calling frame is an arguments adaptor frame.
3385  Label done, adapted;
3388  __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3389 
3390  // Result is the frame pointer for the frame if not adapted and for the real
3391  // frame below the adaptor frame if adapted.
3392  __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
3393  __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
3394  }
3395 }
3396 
3397 
3398 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3399  Register elem = ToRegister(instr->elements());
3400  Register result = ToRegister(instr->result());
3401 
3402  Label done;
3403 
3404  // If no arguments adaptor frame the number of arguments is fixed.
3405  __ Daddu(result, zero_reg, Operand(scope()->num_parameters()));
3406  __ Branch(&done, eq, fp, Operand(elem));
3407 
3408  // Arguments adaptor frame present. Get argument length from there.
3410  __ ld(result,
3412  __ SmiUntag(result);
3413 
3414  // Argument length is in result register.
3415  __ bind(&done);
3416 }
3417 
3418 
3419 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3420  Register receiver = ToRegister(instr->receiver());
3421  Register function = ToRegister(instr->function());
3422  Register result = ToRegister(instr->result());
3423  Register scratch = scratch0();
3424 
3425  // If the receiver is null or undefined, we have to pass the global
3426  // object as a receiver to normal functions. Values have to be
3427  // passed unchanged to builtins and strict-mode functions.
3428  Label global_object, result_in_receiver;
3429 
3430  if (!instr->hydrogen()->known_function()) {
3431  // Do not transform the receiver to object for strict mode functions.
3432  __ ld(scratch,
3434 
3435  // Do not transform the receiver to object for builtins.
3436  int32_t strict_mode_function_mask =
3439 
3440  __ lbu(at,
3441  FieldMemOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset));
3442  __ And(at, at, Operand(strict_mode_function_mask));
3443  __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
3444  __ lbu(at,
3445  FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset));
3446  __ And(at, at, Operand(native_mask));
3447  __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
3448  }
3449 
3450  // Normal function. Replace undefined or null with global receiver.
3451  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3452  __ Branch(&global_object, eq, receiver, Operand(scratch));
3453  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3454  __ Branch(&global_object, eq, receiver, Operand(scratch));
3455 
3456  // Deoptimize if the receiver is not a JS object.
3457  __ SmiTst(receiver, scratch);
3458  DeoptimizeIf(eq, instr, "Smi", scratch, Operand(zero_reg));
3459 
3460  __ GetObjectType(receiver, scratch, scratch);
3461  DeoptimizeIf(lt, instr, "not a JavaScript object", scratch,
3462  Operand(FIRST_SPEC_OBJECT_TYPE));
3463  __ Branch(&result_in_receiver);
3464 
3465  __ bind(&global_object);
3466  __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
3467  __ ld(result,
3469  __ ld(result,
3471 
3472  if (result.is(receiver)) {
3473  __ bind(&result_in_receiver);
3474  } else {
3475  Label result_ok;
3476  __ Branch(&result_ok);
3477  __ bind(&result_in_receiver);
3478  __ mov(result, receiver);
3479  __ bind(&result_ok);
3480  }
3481 }
3482 
3483 
3484 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3485  Register receiver = ToRegister(instr->receiver());
3486  Register function = ToRegister(instr->function());
3487  Register length = ToRegister(instr->length());
3488  Register elements = ToRegister(instr->elements());
3489  Register scratch = scratch0();
3490  DCHECK(receiver.is(a0)); // Used for parameter count.
3491  DCHECK(function.is(a1)); // Required by InvokeFunction.
3492  DCHECK(ToRegister(instr->result()).is(v0));
3493 
3494  // Copy the arguments to this function possibly from the
3495  // adaptor frame below it.
3496  const uint32_t kArgumentsLimit = 1 * KB;
3497  DeoptimizeIf(hi, instr, "too many arguments", length,
3498  Operand(kArgumentsLimit));
3499 
3500  // Push the receiver and use the register to keep the original
3501  // number of arguments.
3502  __ push(receiver);
3503  __ Move(receiver, length);
3504  // The arguments are at a one pointer size offset from elements.
3505  __ Daddu(elements, elements, Operand(1 * kPointerSize));
3506 
3507  // Loop through the arguments pushing them onto the execution
3508  // stack.
3509  Label invoke, loop;
3510  // length is a small non-negative integer, due to the test above.
3511  __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3512  __ dsll(scratch, length, kPointerSizeLog2);
3513  __ bind(&loop);
3514  __ Daddu(scratch, elements, scratch);
3515  __ ld(scratch, MemOperand(scratch));
3516  __ push(scratch);
3517  __ Dsubu(length, length, Operand(1));
3518  __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3519  __ dsll(scratch, length, kPointerSizeLog2);
3520 
3521  __ bind(&invoke);
3522  DCHECK(instr->HasPointerMap());
3523  LPointerMap* pointers = instr->pointer_map();
3524  SafepointGenerator safepoint_generator(
3525  this, pointers, Safepoint::kLazyDeopt);
3526  // The number of arguments is stored in receiver which is a0, as expected
3527  // by InvokeFunction.
3528  ParameterCount actual(receiver);
3529  __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3530 }
3531 
3532 
3533 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3534  LOperand* argument = instr->value();
3535  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3536  Abort(kDoPushArgumentNotImplementedForDoubleType);
3537  } else {
3538  Register argument_reg = EmitLoadRegister(argument, at);
3539  __ push(argument_reg);
3540  }
3541 }
3542 
3543 
3544 void LCodeGen::DoDrop(LDrop* instr) {
3545  __ Drop(instr->count());
3546 }
3547 
3548 
3549 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3550  Register result = ToRegister(instr->result());
3552 }
3553 
3554 
3555 void LCodeGen::DoContext(LContext* instr) {
3556  // If there is a non-return use, the context must be moved to a register.
3557  Register result = ToRegister(instr->result());
3558  if (info()->IsOptimizing()) {
3560  } else {
3561  // If there is no frame, the context must be in cp.
3562  DCHECK(result.is(cp));
3563  }
3564 }
3565 
3566 
3567 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3568  DCHECK(ToRegister(instr->context()).is(cp));
3569  __ li(scratch0(), instr->hydrogen()->pairs());
3570  __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3571  // The context is the first argument.
3572  __ Push(cp, scratch0(), scratch1());
3573  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3574 }
3575 
3576 
3577 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3578  int formal_parameter_count,
3579  int arity,
3580  LInstruction* instr,
3581  A1State a1_state) {
3582  bool dont_adapt_arguments =
3583  formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3584  bool can_invoke_directly =
3585  dont_adapt_arguments || formal_parameter_count == arity;
3586 
3587  LPointerMap* pointers = instr->pointer_map();
3588 
3589  if (can_invoke_directly) {
3590  if (a1_state == A1_UNINITIALIZED) {
3591  __ li(a1, function);
3592  }
3593 
3594  // Change context.
3596 
3597  // Set r0 to arguments count if adaption is not needed. Assumes that r0
3598  // is available to write to at this point.
3599  if (dont_adapt_arguments) {
3600  __ li(a0, Operand(arity));
3601  }
3602 
3603  // Invoke function.
3605  __ Call(at);
3606 
3607  // Set up deoptimization.
3609  } else {
3610  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3611  ParameterCount count(arity);
3612  ParameterCount expected(formal_parameter_count);
3613  __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3614  }
3615 }
3616 
3617 
3618 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3619  DCHECK(instr->context() != NULL);
3620  DCHECK(ToRegister(instr->context()).is(cp));
3621  Register input = ToRegister(instr->value());
3622  Register result = ToRegister(instr->result());
3623  Register scratch = scratch0();
3624 
3625  // Deoptimize if not a heap number.
3626  __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3627  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3628  DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at));
3629 
3630  Label done;
3631  Register exponent = scratch0();
3632  scratch = no_reg;
3633  __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3634  // Check the sign of the argument. If the argument is positive, just
3635  // return it.
3636  __ Move(result, input);
3637  __ And(at, exponent, Operand(HeapNumber::kSignMask));
3638  __ Branch(&done, eq, at, Operand(zero_reg));
3639 
3640  // Input is negative. Reverse its sign.
3641  // Preserve the value of all registers.
3642  {
3643  PushSafepointRegistersScope scope(this);
3644 
3645  // Registers were saved at the safepoint, so we can use
3646  // many scratch registers.
3647  Register tmp1 = input.is(a1) ? a0 : a1;
3648  Register tmp2 = input.is(a2) ? a0 : a2;
3649  Register tmp3 = input.is(a3) ? a0 : a3;
3650  Register tmp4 = input.is(a4) ? a0 : a4;
3651 
3652  // exponent: floating point exponent value.
3653 
3654  Label allocated, slow;
3655  __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3656  __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3657  __ Branch(&allocated);
3658 
3659  // Slow case: Call the runtime system to do the number allocation.
3660  __ bind(&slow);
3661 
3662  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3663  instr->context());
3664  // Set the pointer to the new heap number in tmp.
3665  if (!tmp1.is(v0))
3666  __ mov(tmp1, v0);
3667  // Restore input_reg after call to runtime.
3668  __ LoadFromSafepointRegisterSlot(input, input);
3669  __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3670 
3671  __ bind(&allocated);
3672  // exponent: floating point exponent value.
3673  // tmp1: allocated heap number.
3674  __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3675  __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3676  __ lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3677  __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3678 
3679  __ StoreToSafepointRegisterSlot(tmp1, result);
3680  }
3681 
3682  __ bind(&done);
3683 }
3684 
3685 
3686 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3687  Register input = ToRegister(instr->value());
3688  Register result = ToRegister(instr->result());
3689  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3690  Label done;
3691  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3692  __ mov(result, input);
3693  __ dsubu(result, zero_reg, input);
3694  // Overflow if result is still negative, i.e. 0x80000000.
3695  DeoptimizeIf(lt, instr, "overflow", result, Operand(zero_reg));
3696  __ bind(&done);
3697 }
3698 
3699 
3700 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3701  // Class for deferred case.
3702  class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
3703  public:
3704  DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3705  : LDeferredCode(codegen), instr_(instr) { }
3706  virtual void Generate() OVERRIDE {
3707  codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3708  }
3709  virtual LInstruction* instr() OVERRIDE { return instr_; }
3710  private:
3711  LMathAbs* instr_;
3712  };
3713 
3714  Representation r = instr->hydrogen()->value()->representation();
3715  if (r.IsDouble()) {
3716  FPURegister input = ToDoubleRegister(instr->value());
3717  FPURegister result = ToDoubleRegister(instr->result());
3718  __ abs_d(result, input);
3719  } else if (r.IsSmiOrInteger32()) {
3720  EmitIntegerMathAbs(instr);
3721  } else {
3722  // Representation is tagged.
3723  DeferredMathAbsTaggedHeapNumber* deferred =
3724  new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3725  Register input = ToRegister(instr->value());
3726  // Smi check.
3727  __ JumpIfNotSmi(input, deferred->entry());
3728  // If smi, handle it directly.
3729  EmitIntegerMathAbs(instr);
3730  __ bind(deferred->exit());
3731  }
3732 }
3733 
3734 
3735 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3736  DoubleRegister input = ToDoubleRegister(instr->value());
3737  Register result = ToRegister(instr->result());
3738  Register scratch1 = scratch0();
3739  Register except_flag = ToRegister(instr->temp());
3740 
3741  __ EmitFPUTruncate(kRoundToMinusInf,
3742  result,
3743  input,
3744  scratch1,
3745  double_scratch0(),
3746  except_flag);
3747 
3748  // Deopt if the operation did not succeed.
3749  DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
3750  Operand(zero_reg));
3751 
3752  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3753  // Test for -0.
3754  Label done;
3755  __ Branch(&done, ne, result, Operand(zero_reg));
3756  __ mfhc1(scratch1, input); // Get exponent/sign bits.
3757  __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3758  DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
3759  __ bind(&done);
3760  }
3761 }
3762 
3763 
3764 void LCodeGen::DoMathRound(LMathRound* instr) {
3765  DoubleRegister input = ToDoubleRegister(instr->value());
3766  Register result = ToRegister(instr->result());
3767  DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3768  Register scratch = scratch0();
3769  Label done, check_sign_on_zero;
3770 
3771  // Extract exponent bits.
3772  __ mfhc1(result, input);
3773  __ Ext(scratch,
3774  result,
3777 
3778  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3779  Label skip1;
3780  __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3781  __ mov(result, zero_reg);
3782  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3783  __ Branch(&check_sign_on_zero);
3784  } else {
3785  __ Branch(&done);
3786  }
3787  __ bind(&skip1);
3788 
3789  // The following conversion will not work with numbers
3790  // outside of ]-2^32, 2^32[.
3791  DeoptimizeIf(ge, instr, "overflow", scratch,
3792  Operand(HeapNumber::kExponentBias + 32));
3793 
3794  // Save the original sign for later comparison.
3795  __ And(scratch, result, Operand(HeapNumber::kSignMask));
3796 
3797  __ Move(double_scratch0(), 0.5);
3798  __ add_d(double_scratch0(), input, double_scratch0());
3799 
3800  // Check sign of the result: if the sign changed, the input
3801  // value was in ]0.5, 0[ and the result should be -0.
3802  __ mfhc1(result, double_scratch0());
3803  // mfhc1 sign-extends, clear the upper bits.
3804  __ dsll32(result, result, 0);
3805  __ dsrl32(result, result, 0);
3806  __ Xor(result, result, Operand(scratch));
3807  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3808  // ARM uses 'mi' here, which is 'lt'
3809  DeoptimizeIf(lt, instr, "minus zero", result, Operand(zero_reg));
3810  } else {
3811  Label skip2;
3812  // ARM uses 'mi' here, which is 'lt'
3813  // Negating it results in 'ge'
3814  __ Branch(&skip2, ge, result, Operand(zero_reg));
3815  __ mov(result, zero_reg);
3816  __ Branch(&done);
3817  __ bind(&skip2);
3818  }
3819 
3820  Register except_flag = scratch;
3821  __ EmitFPUTruncate(kRoundToMinusInf,
3822  result,
3823  double_scratch0(),
3824  at,
3825  double_scratch1,
3826  except_flag);
3827 
3828  DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
3829  Operand(zero_reg));
3830 
3831  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3832  // Test for -0.
3833  __ Branch(&done, ne, result, Operand(zero_reg));
3834  __ bind(&check_sign_on_zero);
3835  __ mfhc1(scratch, input); // Get exponent/sign bits.
3836  __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3837  DeoptimizeIf(ne, instr, "minus zero", scratch, Operand(zero_reg));
3838  }
3839  __ bind(&done);
3840 }
3841 
3842 
3843 void LCodeGen::DoMathFround(LMathFround* instr) {
3844  DoubleRegister input = ToDoubleRegister(instr->value());
3845  DoubleRegister result = ToDoubleRegister(instr->result());
3846  __ cvt_s_d(result, input);
3847  __ cvt_d_s(result, result);
3848 }
3849 
3850 
3851 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3852  DoubleRegister input = ToDoubleRegister(instr->value());
3853  DoubleRegister result = ToDoubleRegister(instr->result());
3854  __ sqrt_d(result, input);
3855 }
3856 
3857 
3858 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3859  DoubleRegister input = ToDoubleRegister(instr->value());
3860  DoubleRegister result = ToDoubleRegister(instr->result());
3861  DoubleRegister temp = ToDoubleRegister(instr->temp());
3862 
3863  DCHECK(!input.is(result));
3864 
3865  // Note that according to ECMA-262 15.8.2.13:
3866  // Math.pow(-Infinity, 0.5) == Infinity
3867  // Math.sqrt(-Infinity) == NaN
3868  Label done;
3869  __ Move(temp, -V8_INFINITY);
3870  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3871  // Set up Infinity in the delay slot.
3872  // result is overwritten if the branch is not taken.
3873  __ neg_d(result, temp);
3874 
3875  // Add +0 to convert -0 to +0.
3876  __ add_d(result, input, kDoubleRegZero);
3877  __ sqrt_d(result, result);
3878  __ bind(&done);
3879 }
3880 
3881 
3882 void LCodeGen::DoPower(LPower* instr) {
3883  Representation exponent_type = instr->hydrogen()->right()->representation();
3884  // Having marked this as a call, we can use any registers.
3885  // Just make sure that the input/output registers are the expected ones.
3886  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3887  DCHECK(!instr->right()->IsDoubleRegister() ||
3888  ToDoubleRegister(instr->right()).is(f4));
3889  DCHECK(!instr->right()->IsRegister() ||
3890  ToRegister(instr->right()).is(tagged_exponent));
3891  DCHECK(ToDoubleRegister(instr->left()).is(f2));
3892  DCHECK(ToDoubleRegister(instr->result()).is(f0));
3893 
3894  if (exponent_type.IsSmi()) {
3895  MathPowStub stub(isolate(), MathPowStub::TAGGED);
3896  __ CallStub(&stub);
3897  } else if (exponent_type.IsTagged()) {
3898  Label no_deopt;
3899  __ JumpIfSmi(tagged_exponent, &no_deopt);
3900  DCHECK(!a7.is(tagged_exponent));
3901  __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3902  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3903  DeoptimizeIf(ne, instr, "not a heap number", a7, Operand(at));
3904  __ bind(&no_deopt);
3905  MathPowStub stub(isolate(), MathPowStub::TAGGED);
3906  __ CallStub(&stub);
3907  } else if (exponent_type.IsInteger32()) {
3908  MathPowStub stub(isolate(), MathPowStub::INTEGER);
3909  __ CallStub(&stub);
3910  } else {
3911  DCHECK(exponent_type.IsDouble());
3912  MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3913  __ CallStub(&stub);
3914  }
3915 }
3916 
3917 
3918 void LCodeGen::DoMathExp(LMathExp* instr) {
3919  DoubleRegister input = ToDoubleRegister(instr->value());
3920  DoubleRegister result = ToDoubleRegister(instr->result());
3921  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3922  DoubleRegister double_scratch2 = double_scratch0();
3923  Register temp1 = ToRegister(instr->temp1());
3924  Register temp2 = ToRegister(instr->temp2());
3925 
3927  masm(), input, result, double_scratch1, double_scratch2,
3928  temp1, temp2, scratch0());
3929 }
3930 
3931 
3932 void LCodeGen::DoMathLog(LMathLog* instr) {
3933  __ PrepareCallCFunction(0, 1, scratch0());
3934  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3935  __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3936  0, 1);
3937  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3938 }
3939 
3940 
3941 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3942  Register input = ToRegister(instr->value());
3943  Register result = ToRegister(instr->result());
3944  __ Clz(result, input);
3945 }
3946 
3947 
3948 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3949  DCHECK(ToRegister(instr->context()).is(cp));
3950  DCHECK(ToRegister(instr->function()).is(a1));
3951  DCHECK(instr->HasPointerMap());
3952 
3953  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3954  if (known_function.is_null()) {
3955  LPointerMap* pointers = instr->pointer_map();
3956  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3957  ParameterCount count(instr->arity());
3958  __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
3959  } else {
3960  CallKnownFunction(known_function,
3961  instr->hydrogen()->formal_parameter_count(),
3962  instr->arity(),
3963  instr,
3965  }
3966 }
3967 
3968 
3969 void LCodeGen::DoTailCallThroughMegamorphicCache(
3970  LTailCallThroughMegamorphicCache* instr) {
3971  Register receiver = ToRegister(instr->receiver());
3972  Register name = ToRegister(instr->name());
3973  DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
3975  DCHECK(receiver.is(a1));
3976  DCHECK(name.is(a2));
3977 
3978  Register scratch = a3;
3979  Register extra = a4;
3980  Register extra2 = a5;
3981  Register extra3 = a6;
3982 
3983  // Important for the tail-call.
3984  bool must_teardown_frame = NeedsEagerFrame();
3985 
3986  // The probe will tail call to a handler if found.
3987  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
3988  must_teardown_frame, receiver, name,
3989  scratch, extra, extra2, extra3);
3990 
3991  // Tail call to miss if we ended up here.
3992  if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
3993  LoadIC::GenerateMiss(masm());
3994 }
3995 
3996 
3997 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3998  DCHECK(ToRegister(instr->result()).is(v0));
3999 
4000  LPointerMap* pointers = instr->pointer_map();
4001  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4002 
4003  if (instr->target()->IsConstantOperand()) {
4004  LConstantOperand* target = LConstantOperand::cast(instr->target());
4005  Handle<Code> code = Handle<Code>::cast(ToHandle(target));
4006  generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
4007  __ Call(code, RelocInfo::CODE_TARGET);
4008  } else {
4009  DCHECK(instr->target()->IsRegister());
4010  Register target = ToRegister(instr->target());
4011  generator.BeforeCall(__ CallSize(target));
4012  __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4013  __ Call(target);
4014  }
4015  generator.AfterCall();
4016 }
4017 
4018 
4019 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
4020  DCHECK(ToRegister(instr->function()).is(a1));
4021  DCHECK(ToRegister(instr->result()).is(v0));
4022 
4023  if (instr->hydrogen()->pass_argument_count()) {
4024  __ li(a0, Operand(instr->arity()));
4025  }
4026 
4027  // Change context.
4029 
4030  // Load the code entry address
4032  __ Call(at);
4033 
4035 }
4036 
4037 
4038 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4039  DCHECK(ToRegister(instr->context()).is(cp));
4040  DCHECK(ToRegister(instr->function()).is(a1));
4041  DCHECK(ToRegister(instr->result()).is(v0));
4042 
4043  int arity = instr->arity();
4044  CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
4045  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4046 }
4047 
4048 
4049 void LCodeGen::DoCallNew(LCallNew* instr) {
4050  DCHECK(ToRegister(instr->context()).is(cp));
4051  DCHECK(ToRegister(instr->constructor()).is(a1));
4052  DCHECK(ToRegister(instr->result()).is(v0));
4053 
4054  __ li(a0, Operand(instr->arity()));
4055  // No cell in a2 for construct type feedback in optimized code
4056  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
4057  CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4058  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4059 }
4060 
4061 
4062 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4063  DCHECK(ToRegister(instr->context()).is(cp));
4064  DCHECK(ToRegister(instr->constructor()).is(a1));
4065  DCHECK(ToRegister(instr->result()).is(v0));
4066 
4067  __ li(a0, Operand(instr->arity()));
4068  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
4069  ElementsKind kind = instr->hydrogen()->elements_kind();
4070  AllocationSiteOverrideMode override_mode =
4073  : DONT_OVERRIDE;
4074 
4075  if (instr->arity() == 0) {
4076  ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4077  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4078  } else if (instr->arity() == 1) {
4079  Label done;
4080  if (IsFastPackedElementsKind(kind)) {
4081  Label packed_case;
4082  // We might need a change here,
4083  // look at the first argument.
4084  __ ld(a5, MemOperand(sp, 0));
4085  __ Branch(&packed_case, eq, a5, Operand(zero_reg));
4086 
4087  ElementsKind holey_kind = GetHoleyElementsKind(kind);
4088  ArraySingleArgumentConstructorStub stub(isolate(),
4089  holey_kind,
4090  override_mode);
4091  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4092  __ jmp(&done);
4093  __ bind(&packed_case);
4094  }
4095 
4096  ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4097  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4098  __ bind(&done);
4099  } else {
4100  ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4101  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4102  }
4103 }
4104 
4105 
4106 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4107  CallRuntime(instr->function(), instr->arity(), instr);
4108 }
4109 
4110 
4111 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4112  Register function = ToRegister(instr->function());
4113  Register code_object = ToRegister(instr->code_object());
4114  __ Daddu(code_object, code_object,
4115  Operand(Code::kHeaderSize - kHeapObjectTag));
4116  __ sd(code_object,
4118 }
4119 
4120 
4121 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4122  Register result = ToRegister(instr->result());
4123  Register base = ToRegister(instr->base_object());
4124  if (instr->offset()->IsConstantOperand()) {
4125  LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4126  __ Daddu(result, base, Operand(ToInteger32(offset)));
4127  } else {
4128  Register offset = ToRegister(instr->offset());
4129  __ Daddu(result, base, offset);
4130  }
4131 }
4132 
4133 
4134 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4135  Representation representation = instr->representation();
4136 
4137  Register object = ToRegister(instr->object());
4138  Register scratch2 = scratch1();
4139  Register scratch1 = scratch0();
4140  HObjectAccess access = instr->hydrogen()->access();
4141  int offset = access.offset();
4142  if (access.IsExternalMemory()) {
4143  Register value = ToRegister(instr->value());
4144  MemOperand operand = MemOperand(object, offset);
4145  __ Store(value, operand, representation);
4146  return;
4147  }
4148 
4149  __ AssertNotSmi(object);
4150 
4151  DCHECK(!representation.IsSmi() ||
4152  !instr->value()->IsConstantOperand() ||
4153  IsSmi(LConstantOperand::cast(instr->value())));
4154  if (representation.IsDouble()) {
4155  DCHECK(access.IsInobject());
4156  DCHECK(!instr->hydrogen()->has_transition());
4157  DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4158  DoubleRegister value = ToDoubleRegister(instr->value());
4159  __ sdc1(value, FieldMemOperand(object, offset));
4160  return;
4161  }
4162 
4163  if (instr->hydrogen()->has_transition()) {
4164  Handle<Map> transition = instr->hydrogen()->transition_map();
4165  AddDeprecationDependency(transition);
4166  __ li(scratch1, Operand(transition));
4168  if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4169  Register temp = ToRegister(instr->temp());
4170  // Update the write barrier for the map field.
4171  __ RecordWriteForMap(object,
4172  scratch1,
4173  temp,
4174  GetRAState(),
4175  kSaveFPRegs);
4176  }
4177  }
4178 
4179  // Do the store.
4180  Register destination = object;
4181  if (!access.IsInobject()) {
4182  destination = scratch1;
4183  __ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
4184  }
4185  Register value = ToRegister(instr->value());
4186  if (representation.IsSmi() && SmiValuesAre32Bits() &&
4187  instr->hydrogen()->value()->representation().IsInteger32()) {
4188  DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4189  if (FLAG_debug_code) {
4190  __ Load(scratch2, FieldMemOperand(destination, offset), representation);
4191  __ AssertSmi(scratch2);
4192  }
4193 
4194  // Store int value directly to upper half of the smi.
4195  offset += kPointerSize / 2;
4196  representation = Representation::Integer32();
4197  }
4198 
4199  MemOperand operand = FieldMemOperand(destination, offset);
4200  __ Store(value, operand, representation);
4201  if (instr->hydrogen()->NeedsWriteBarrier()) {
4202  // Update the write barrier for the object for in-object properties.
4203  __ RecordWriteField(destination,
4204  offset,
4205  value,
4206  scratch2,
4207  GetRAState(),
4208  kSaveFPRegs,
4210  instr->hydrogen()->SmiCheckForWriteBarrier(),
4211  instr->hydrogen()->PointersToHereCheckForValue());
4212  }
4213 }
4214 
4215 
4216 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4217  DCHECK(ToRegister(instr->context()).is(cp));
4219  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4220 
4221  __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
4222  Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4223  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4224 }
4225 
4226 
4227 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4228  Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
4229  Operand operand((int64_t)0);
4230  Register reg;
4231  if (instr->index()->IsConstantOperand()) {
4232  operand = ToOperand(instr->index());
4233  reg = ToRegister(instr->length());
4234  cc = CommuteCondition(cc);
4235  } else {
4236  reg = ToRegister(instr->index());
4237  operand = ToOperand(instr->length());
4238  }
4239  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4240  Label done;
4241  __ Branch(&done, NegateCondition(cc), reg, operand);
4242  __ stop("eliminated bounds check failed");
4243  __ bind(&done);
4244  } else {
4245  DeoptimizeIf(cc, instr, "out of bounds", reg, operand);
4246  }
4247 }
4248 
4249 
4250 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4251  Register external_pointer = ToRegister(instr->elements());
4252  Register key = no_reg;
4253  ElementsKind elements_kind = instr->elements_kind();
4254  bool key_is_constant = instr->key()->IsConstantOperand();
4255  int constant_key = 0;
4256  if (key_is_constant) {
4257  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4258  if (constant_key & 0xF0000000) {
4259  Abort(kArrayIndexConstantValueTooBig);
4260  }
4261  } else {
4262  key = ToRegister(instr->key());
4263  }
4264  int element_size_shift = ElementsKindToShiftSize(elements_kind);
4265  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4266  ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
4267  : element_size_shift;
4268  int base_offset = instr->base_offset();
4269 
4270  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4271  elements_kind == FLOAT32_ELEMENTS ||
4272  elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4273  elements_kind == FLOAT64_ELEMENTS) {
4274  Register address = scratch0();
4275  FPURegister value(ToDoubleRegister(instr->value()));
4276  if (key_is_constant) {
4277  if (constant_key != 0) {
4278  __ Daddu(address, external_pointer,
4279  Operand(constant_key << element_size_shift));
4280  } else {
4281  address = external_pointer;
4282  }
4283  } else {
4284  if (shift_size < 0) {
4285  if (shift_size == -32) {
4286  __ dsra32(address, key, 0);
4287  } else {
4288  __ dsra(address, key, -shift_size);
4289  }
4290  } else {
4291  __ dsll(address, key, shift_size);
4292  }
4293  __ Daddu(address, external_pointer, address);
4294  }
4295 
4296  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4297  elements_kind == FLOAT32_ELEMENTS) {
4298  __ cvt_s_d(double_scratch0(), value);
4299  __ swc1(double_scratch0(), MemOperand(address, base_offset));
4300  } else { // Storing doubles, not floats.
4301  __ sdc1(value, MemOperand(address, base_offset));
4302  }
4303  } else {
4304  Register value(ToRegister(instr->value()));
4305  MemOperand mem_operand = PrepareKeyedOperand(
4306  key, external_pointer, key_is_constant, constant_key,
4307  element_size_shift, shift_size,
4308  base_offset);
4309  switch (elements_kind) {
4313  case UINT8_ELEMENTS:
4315  case INT8_ELEMENTS:
4316  __ sb(value, mem_operand);
4317  break;
4320  case INT16_ELEMENTS:
4321  case UINT16_ELEMENTS:
4322  __ sh(value, mem_operand);
4323  break;
4326  case INT32_ELEMENTS:
4327  case UINT32_ELEMENTS:
4328  __ sw(value, mem_operand);
4329  break;
4330  case FLOAT32_ELEMENTS:
4331  case FLOAT64_ELEMENTS:
4334  case FAST_DOUBLE_ELEMENTS:
4335  case FAST_ELEMENTS:
4336  case FAST_SMI_ELEMENTS:
4338  case FAST_HOLEY_ELEMENTS:
4340  case DICTIONARY_ELEMENTS:
4342  UNREACHABLE();
4343  break;
4344  }
4345  }
4346 }
4347 
4348 
4349 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4350  DoubleRegister value = ToDoubleRegister(instr->value());
4351  Register elements = ToRegister(instr->elements());
4352  Register scratch = scratch0();
4354  bool key_is_constant = instr->key()->IsConstantOperand();
4355  int base_offset = instr->base_offset();
4356  Label not_nan, done;
4357 
4358  // Calculate the effective address of the slot in the array to store the
4359  // double value.
4360  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4361  if (key_is_constant) {
4362  int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4363  if (constant_key & 0xF0000000) {
4364  Abort(kArrayIndexConstantValueTooBig);
4365  }
4366  __ Daddu(scratch, elements,
4367  Operand((constant_key << element_size_shift) + base_offset));
4368  } else {
4369  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4370  ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
4371  : element_size_shift;
4372  __ Daddu(scratch, elements, Operand(base_offset));
4373  DCHECK((shift_size == 3) || (shift_size == -29));
4374  if (shift_size == 3) {
4375  __ dsll(at, ToRegister(instr->key()), 3);
4376  } else if (shift_size == -29) {
4377  __ dsra(at, ToRegister(instr->key()), 29);
4378  }
4379  __ Daddu(scratch, scratch, at);
4380  }
4381 
4382  if (instr->NeedsCanonicalization()) {
4383  Label is_nan;
4384  // Check for NaN. All NaNs must be canonicalized.
4385  __ BranchF(NULL, &is_nan, eq, value, value);
4386  __ Branch(&not_nan);
4387 
4388  // Only load canonical NaN if the comparison above set the overflow.
4389  __ bind(&is_nan);
4390  __ LoadRoot(at, Heap::kNanValueRootIndex);
4392  __ sdc1(double_scratch, MemOperand(scratch, 0));
4393  __ Branch(&done);
4394  }
4395 
4396  __ bind(&not_nan);
4397  __ sdc1(value, MemOperand(scratch, 0));
4398  __ bind(&done);
4399 }
4400 
4401 
4402 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4403  Register value = ToRegister(instr->value());
4404  Register elements = ToRegister(instr->elements());
4405  Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4406  : no_reg;
4407  Register scratch = scratch0();
4408  Register store_base = scratch;
4409  int offset = instr->base_offset();
4410 
4411  // Do the store.
4412  if (instr->key()->IsConstantOperand()) {
4413  DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4414  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4415  offset += ToInteger32(const_operand) * kPointerSize;
4416  store_base = elements;
4417  } else {
4418  // Even though the HLoadKeyed instruction forces the input
4419  // representation for the key to be an integer, the input gets replaced
4420  // during bound check elimination with the index argument to the bounds
4421  // check, which can be tagged, so that case must be handled here, too.
4422  if (instr->hydrogen()->key()->representation().IsSmi()) {
4423  __ SmiScale(scratch, key, kPointerSizeLog2);
4424  __ daddu(store_base, elements, scratch);
4425  } else {
4426  __ dsll(scratch, key, kPointerSizeLog2);
4427  __ daddu(store_base, elements, scratch);
4428  }
4429  }
4430 
4431  Representation representation = instr->hydrogen()->value()->representation();
4432  if (representation.IsInteger32() && SmiValuesAre32Bits()) {
4433  DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4434  DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
4435  if (FLAG_debug_code) {
4436  Register temp = scratch1();
4437  __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
4438  __ AssertSmi(temp);
4439  }
4440 
4441  // Store int value directly to upper half of the smi.
4442  STATIC_ASSERT(kSmiTag == 0);
4444  offset += kPointerSize / 2;
4445  representation = Representation::Integer32();
4446  }
4447 
4448  __ Store(value, MemOperand(store_base, offset), representation);
4449 
4450  if (instr->hydrogen()->NeedsWriteBarrier()) {
4451  SmiCheck check_needed =
4452  instr->hydrogen()->value()->type().IsHeapObject()
4454  // Compute address of modified element and store it into key register.
4455  __ Daddu(key, store_base, Operand(offset));
4456  __ RecordWrite(elements,
4457  key,
4458  value,
4459  GetRAState(),
4460  kSaveFPRegs,
4462  check_needed,
4463  instr->hydrogen()->PointersToHereCheckForValue());
4464  }
4465 }
4466 
4467 
4468 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4469  // By cases: external, fast double
4470  if (instr->is_typed_elements()) {
4472  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4474  } else {
4475  DoStoreKeyedFixedArray(instr);
4476  }
4477 }
4478 
4479 
4480 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4481  DCHECK(ToRegister(instr->context()).is(cp));
4484  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4485 
4486  Handle<Code> ic =
4487  CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
4488  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4489 }
4490 
4491 
4492 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4493  Register object_reg = ToRegister(instr->object());
4494  Register scratch = scratch0();
4495 
4496  Handle<Map> from_map = instr->original_map();
4497  Handle<Map> to_map = instr->transitioned_map();
4498  ElementsKind from_kind = instr->from_kind();
4499  ElementsKind to_kind = instr->to_kind();
4500 
4501  Label not_applicable;
4502  __ ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4503  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
4504 
4505  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4506  Register new_map_reg = ToRegister(instr->new_map_temp());
4507  __ li(new_map_reg, Operand(to_map));
4508  __ sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4509  // Write barrier.
4510  __ RecordWriteForMap(object_reg,
4511  new_map_reg,
4512  scratch,
4513  GetRAState(),
4514  kDontSaveFPRegs);
4515  } else {
4516  DCHECK(object_reg.is(a0));
4517  DCHECK(ToRegister(instr->context()).is(cp));
4518  PushSafepointRegistersScope scope(this);
4519  __ li(a1, Operand(to_map));
4520  bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4521  TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4522  __ CallStub(&stub);
4524  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4525  }
4526  __ bind(&not_applicable);
4527 }
4528 
4529 
4530 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4531  Register object = ToRegister(instr->object());
4532  Register temp = ToRegister(instr->temp());
4533  Label no_memento_found;
4534  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
4535  ne, &no_memento_found);
4536  DeoptimizeIf(al, instr, "memento found");
4537  __ bind(&no_memento_found);
4538 }
4539 
4540 
4541 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4542  DCHECK(ToRegister(instr->context()).is(cp));
4543  DCHECK(ToRegister(instr->left()).is(a1));
4544  DCHECK(ToRegister(instr->right()).is(a0));
4545  StringAddStub stub(isolate(),
4546  instr->hydrogen()->flags(),
4547  instr->hydrogen()->pretenure_flag());
4548  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4549 }
4550 
4551 
4552 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4553  class DeferredStringCharCodeAt FINAL : public LDeferredCode {
4554  public:
4555  DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4556  : LDeferredCode(codegen), instr_(instr) { }
4557  virtual void Generate() OVERRIDE {
4558  codegen()->DoDeferredStringCharCodeAt(instr_);
4559  }
4560  virtual LInstruction* instr() OVERRIDE { return instr_; }
4561  private:
4562  LStringCharCodeAt* instr_;
4563  };
4564 
4565  DeferredStringCharCodeAt* deferred =
4566  new(zone()) DeferredStringCharCodeAt(this, instr);
4568  ToRegister(instr->string()),
4569  ToRegister(instr->index()),
4570  ToRegister(instr->result()),
4571  deferred->entry());
4572  __ bind(deferred->exit());
4573 }
4574 
4575 
4576 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4577  Register string = ToRegister(instr->string());
4578  Register result = ToRegister(instr->result());
4579  Register scratch = scratch0();
4580 
4581  // TODO(3095996): Get rid of this. For now, we need to make the
4582  // result register contain a valid pointer because it is already
4583  // contained in the register pointer map.
4584  __ mov(result, zero_reg);
4585 
4586  PushSafepointRegistersScope scope(this);
4587  __ push(string);
4588  // Push the index as a smi. This is safe because of the checks in
4589  // DoStringCharCodeAt above.
4590  if (instr->index()->IsConstantOperand()) {
4591  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4592  __ Daddu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4593  __ push(scratch);
4594  } else {
4595  Register index = ToRegister(instr->index());
4596  __ SmiTag(index);
4597  __ push(index);
4598  }
4599  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4600  instr->context());
4601  __ AssertSmi(v0);
4602  __ SmiUntag(v0);
4603  __ StoreToSafepointRegisterSlot(v0, result);
4604 }
4605 
4606 
4607 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4608  class DeferredStringCharFromCode FINAL : public LDeferredCode {
4609  public:
4610  DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4611  : LDeferredCode(codegen), instr_(instr) { }
4612  virtual void Generate() OVERRIDE {
4613  codegen()->DoDeferredStringCharFromCode(instr_);
4614  }
4615  virtual LInstruction* instr() OVERRIDE { return instr_; }
4616  private:
4617  LStringCharFromCode* instr_;
4618  };
4619 
4620  DeferredStringCharFromCode* deferred =
4621  new(zone()) DeferredStringCharFromCode(this, instr);
4622 
4623  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4624  Register char_code = ToRegister(instr->char_code());
4625  Register result = ToRegister(instr->result());
4626  Register scratch = scratch0();
4627  DCHECK(!char_code.is(result));
4628 
4629  __ Branch(deferred->entry(), hi,
4630  char_code, Operand(String::kMaxOneByteCharCode));
4631  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4632  __ dsll(scratch, char_code, kPointerSizeLog2);
4633  __ Daddu(result, result, scratch);
4634  __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4635  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4636  __ Branch(deferred->entry(), eq, result, Operand(scratch));
4637  __ bind(deferred->exit());
4638 }
4639 
4640 
4641 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4642  Register char_code = ToRegister(instr->char_code());
4643  Register result = ToRegister(instr->result());
4644 
4645  // TODO(3095996): Get rid of this. For now, we need to make the
4646  // result register contain a valid pointer because it is already
4647  // contained in the register pointer map.
4648  __ mov(result, zero_reg);
4649 
4650  PushSafepointRegistersScope scope(this);
4651  __ SmiTag(char_code);
4652  __ push(char_code);
4653  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4654  __ StoreToSafepointRegisterSlot(v0, result);
4655 }
4656 
4657 
4658 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4659  LOperand* input = instr->value();
4660  DCHECK(input->IsRegister() || input->IsStackSlot());
4661  LOperand* output = instr->result();
4662  DCHECK(output->IsDoubleRegister());
4663  FPURegister single_scratch = double_scratch0().low();
4664  if (input->IsStackSlot()) {
4665  Register scratch = scratch0();
4666  __ ld(scratch, ToMemOperand(input));
4667  __ mtc1(scratch, single_scratch);
4668  } else {
4669  __ mtc1(ToRegister(input), single_scratch);
4670  }
4671  __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4672 }
4673 
4674 
4675 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4676  LOperand* input = instr->value();
4677  LOperand* output = instr->result();
4678 
4679  FPURegister dbl_scratch = double_scratch0();
4680  __ mtc1(ToRegister(input), dbl_scratch);
4681  __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22); // TODO(plind): f22?
4682 }
4683 
4684 
4685 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4686  class DeferredNumberTagU FINAL : public LDeferredCode {
4687  public:
4688  DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4689  : LDeferredCode(codegen), instr_(instr) { }
4690  virtual void Generate() OVERRIDE {
4691  codegen()->DoDeferredNumberTagIU(instr_,
4692  instr_->value(),
4693  instr_->temp1(),
4694  instr_->temp2(),
4695  UNSIGNED_INT32);
4696  }
4697  virtual LInstruction* instr() OVERRIDE { return instr_; }
4698  private:
4699  LNumberTagU* instr_;
4700  };
4701 
4702  Register input = ToRegister(instr->value());
4703  Register result = ToRegister(instr->result());
4704 
4705  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4706  __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
4707  __ SmiTag(result, input);
4708  __ bind(deferred->exit());
4709 }
4710 
4711 
4712 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4713  LOperand* value,
4714  LOperand* temp1,
4715  LOperand* temp2,
4716  IntegerSignedness signedness) {
4717  Label done, slow;
4718  Register src = ToRegister(value);
4719  Register dst = ToRegister(instr->result());
4720  Register tmp1 = scratch0();
4721  Register tmp2 = ToRegister(temp1);
4722  Register tmp3 = ToRegister(temp2);
4723  DoubleRegister dbl_scratch = double_scratch0();
4724 
4725  if (signedness == SIGNED_INT32) {
4726  // There was overflow, so bits 30 and 31 of the original integer
4727  // disagree. Try to allocate a heap number in new space and store
4728  // the value in there. If that fails, call the runtime system.
4729  if (dst.is(src)) {
4730  __ SmiUntag(src, dst);
4731  __ Xor(src, src, Operand(0x80000000));
4732  }
4733  __ mtc1(src, dbl_scratch);
4734  __ cvt_d_w(dbl_scratch, dbl_scratch);
4735  } else {
4736  __ mtc1(src, dbl_scratch);
4737  __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
4738  }
4739 
4740  if (FLAG_inline_new) {
4741  __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4742  __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, TAG_RESULT);
4743  __ Branch(&done);
4744  }
4745 
4746  // Slow case: Call the runtime system to do the number allocation.
4747  __ bind(&slow);
4748  {
4749  // TODO(3095996): Put a valid pointer value in the stack slot where the
4750  // result register is stored, as this register is in the pointer map, but
4751  // contains an integer value.
4752  __ mov(dst, zero_reg);
4753  // Preserve the value of all registers.
4754  PushSafepointRegistersScope scope(this);
4755 
4756  // NumberTagI and NumberTagD use the context from the frame, rather than
4757  // the environment's HContext or HInlinedContext value.
4758  // They only call Runtime::kAllocateHeapNumber.
4759  // The corresponding HChange instructions are added in a phase that does
4760  // not have easy access to the local context.
4762  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4764  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4765  __ StoreToSafepointRegisterSlot(v0, dst);
4766  }
4767 
4768  // Done. Put the value in dbl_scratch into the value of the allocated heap
4769  // number.
4770  __ bind(&done);
4771  __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4772 }
4773 
4774 
4775 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4776  class DeferredNumberTagD FINAL : public LDeferredCode {
4777  public:
4778  DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4779  : LDeferredCode(codegen), instr_(instr) { }
4780  virtual void Generate() OVERRIDE {
4781  codegen()->DoDeferredNumberTagD(instr_);
4782  }
4783  virtual LInstruction* instr() OVERRIDE { return instr_; }
4784  private:
4785  LNumberTagD* instr_;
4786  };
4787 
4788  DoubleRegister input_reg = ToDoubleRegister(instr->value());
4789  Register scratch = scratch0();
4790  Register reg = ToRegister(instr->result());
4791  Register temp1 = ToRegister(instr->temp());
4792  Register temp2 = ToRegister(instr->temp2());
4793 
4794  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4795  if (FLAG_inline_new) {
4796  __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4797  // We want the untagged address first for performance
4798  __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4799  DONT_TAG_RESULT);
4800  } else {
4801  __ Branch(deferred->entry());
4802  }
4803  __ bind(deferred->exit());
4804  __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
4805  // Now that we have finished with the object's real address tag it
4806  __ Daddu(reg, reg, kHeapObjectTag);
4807 }
4808 
4809 
4810 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4811  // TODO(3095996): Get rid of this. For now, we need to make the
4812  // result register contain a valid pointer because it is already
4813  // contained in the register pointer map.
4814  Register reg = ToRegister(instr->result());
4815  __ mov(reg, zero_reg);
4816 
4817  PushSafepointRegistersScope scope(this);
4818  // NumberTagI and NumberTagD use the context from the frame, rather than
4819  // the environment's HContext or HInlinedContext value.
4820  // They only call Runtime::kAllocateHeapNumber.
4821  // The corresponding HChange instructions are added in a phase that does
4822  // not have easy access to the local context.
4824  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4826  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4827  __ Dsubu(v0, v0, kHeapObjectTag);
4828  __ StoreToSafepointRegisterSlot(v0, reg);
4829 }
4830 
4831 
4832 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4833  HChange* hchange = instr->hydrogen();
4834  Register input = ToRegister(instr->value());
4835  Register output = ToRegister(instr->result());
4836  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4837  hchange->value()->CheckFlag(HValue::kUint32)) {
4838  __ And(at, input, Operand(0x80000000));
4839  DeoptimizeIf(ne, instr, "overflow", at, Operand(zero_reg));
4840  }
4841  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4842  !hchange->value()->CheckFlag(HValue::kUint32)) {
4843  __ SmiTagCheckOverflow(output, input, at);
4844  DeoptimizeIf(lt, instr, "overflow", at, Operand(zero_reg));
4845  } else {
4846  __ SmiTag(output, input);
4847  }
4848 }
4849 
4850 
4851 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4852  Register scratch = scratch0();
4853  Register input = ToRegister(instr->value());
4854  Register result = ToRegister(instr->result());
4855  if (instr->needs_check()) {
4857  // If the input is a HeapObject, value of scratch won't be zero.
4858  __ And(scratch, input, Operand(kHeapObjectTag));
4859  __ SmiUntag(result, input);
4860  DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg));
4861  } else {
4862  __ SmiUntag(result, input);
4863  }
4864 }
4865 
4866 
4867 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4868  DoubleRegister result_reg,
4870  bool can_convert_undefined_to_nan =
4871  instr->hydrogen()->can_convert_undefined_to_nan();
4872  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4873 
4874  Register scratch = scratch0();
4875  Label convert, load_smi, done;
4877  // Smi check.
4878  __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4879  // Heap number map check.
4880  __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4881  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4882  if (can_convert_undefined_to_nan) {
4883  __ Branch(&convert, ne, scratch, Operand(at));
4884  } else {
4885  DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at));
4886  }
4887  // Load heap number.
4888  __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4889  if (deoptimize_on_minus_zero) {
4890  __ mfc1(at, result_reg);
4891  __ Branch(&done, ne, at, Operand(zero_reg));
4892  __ mfhc1(scratch, result_reg); // Get exponent/sign bits.
4893  DeoptimizeIf(eq, instr, "minus zero", scratch,
4894  Operand(HeapNumber::kSignMask));
4895  }
4896  __ Branch(&done);
4897  if (can_convert_undefined_to_nan) {
4898  __ bind(&convert);
4899  // Convert undefined (and hole) to NaN.
4900  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4901  DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg,
4902  Operand(at));
4903  __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4904  __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4905  __ Branch(&done);
4906  }
4907  } else {
4908  __ SmiUntag(scratch, input_reg);
4910  }
4911  // Smi to double register conversion
4912  __ bind(&load_smi);
4913  // scratch: untagged value of input_reg
4914  __ mtc1(scratch, result_reg);
4915  __ cvt_d_w(result_reg, result_reg);
4916  __ bind(&done);
4917 }
4918 
4919 
4920 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4921  Register input_reg = ToRegister(instr->value());
4922  Register scratch1 = scratch0();
4923  Register scratch2 = ToRegister(instr->temp());
4925  DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4926 
4927  DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4928  DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4929 
4930  Label done;
4931 
4932  // The input is a tagged HeapObject.
4933  // Heap number map check.
4935  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4936  // This 'at' value and scratch1 map value are used for tests in both clauses
4937  // of the if.
4938 
4939  if (instr->truncating()) {
4940  // Performs a truncating conversion of a floating point number as used by
4941  // the JS bitwise operations.
4942  Label no_heap_number, check_bools, check_false;
4943  // Check HeapNumber map.
4944  __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
4945  __ mov(scratch2, input_reg); // In delay slot.
4946  __ TruncateHeapNumberToI(input_reg, scratch2);
4947  __ Branch(&done);
4948 
4949  // Check for Oddballs. Undefined/False is converted to zero and True to one
4950  // for truncating conversions.
4951  __ bind(&no_heap_number);
4952  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4953  __ Branch(&check_bools, ne, input_reg, Operand(at));
4954  DCHECK(ToRegister(instr->result()).is(input_reg));
4955  __ Branch(USE_DELAY_SLOT, &done);
4956  __ mov(input_reg, zero_reg); // In delay slot.
4957 
4958  __ bind(&check_bools);
4959  __ LoadRoot(at, Heap::kTrueValueRootIndex);
4960  __ Branch(&check_false, ne, scratch2, Operand(at));
4961  __ Branch(USE_DELAY_SLOT, &done);
4962  __ li(input_reg, Operand(1)); // In delay slot.
4963 
4964  __ bind(&check_false);
4965  __ LoadRoot(at, Heap::kFalseValueRootIndex);
4966  DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", scratch2,
4967  Operand(at));
4968  __ Branch(USE_DELAY_SLOT, &done);
4969  __ mov(input_reg, zero_reg); // In delay slot.
4970  } else {
4971  DeoptimizeIf(ne, instr, "not a heap number", scratch1, Operand(at));
4972 
4973  // Load the double value.
4974  __ ldc1(double_scratch,
4976 
4977  Register except_flag = scratch2;
4978  __ EmitFPUTruncate(kRoundToZero,
4979  input_reg,
4981  scratch1,
4982  double_scratch2,
4983  except_flag,
4985 
4986  DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
4987  Operand(zero_reg));
4988 
4989  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4990  __ Branch(&done, ne, input_reg, Operand(zero_reg));
4991 
4992  __ mfhc1(scratch1, double_scratch); // Get exponent/sign bits.
4993  __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4994  DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
4995  }
4996  }
4997  __ bind(&done);
4998 }
4999 
5000 
5001 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5002  class DeferredTaggedToI FINAL : public LDeferredCode {
5003  public:
5004  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5005  : LDeferredCode(codegen), instr_(instr) { }
5006  virtual void Generate() OVERRIDE {
5007  codegen()->DoDeferredTaggedToI(instr_);
5008  }
5009  virtual LInstruction* instr() OVERRIDE { return instr_; }
5010  private:
5011  LTaggedToI* instr_;
5012  };
5013 
5014  LOperand* input = instr->value();
5015  DCHECK(input->IsRegister());
5016  DCHECK(input->Equals(instr->result()));
5017 
5018  Register input_reg = ToRegister(input);
5019 
5020  if (instr->hydrogen()->value()->representation().IsSmi()) {
5021  __ SmiUntag(input_reg);
5022  } else {
5023  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5024 
5025  // Let the deferred code handle the HeapObject case.
5026  __ JumpIfNotSmi(input_reg, deferred->entry());
5027 
5028  // Smi to int32 conversion.
5029  __ SmiUntag(input_reg);
5030  __ bind(deferred->exit());
5031  }
5032 }
5033 
5034 
5035 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5036  LOperand* input = instr->value();
5037  DCHECK(input->IsRegister());
5038  LOperand* result = instr->result();
5039  DCHECK(result->IsDoubleRegister());
5040 
5041  Register input_reg = ToRegister(input);
5042  DoubleRegister result_reg = ToDoubleRegister(result);
5043 
5044  HValue* value = instr->hydrogen()->value();
5045  NumberUntagDMode mode = value->representation().IsSmi()
5047 
5048  EmitNumberUntagD(instr, input_reg, result_reg, mode);
5049 }
5050 
5051 
5052 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5053  Register result_reg = ToRegister(instr->result());
5054  Register scratch1 = scratch0();
5055  DoubleRegister double_input = ToDoubleRegister(instr->value());
5056 
5057  if (instr->truncating()) {
5058  __ TruncateDoubleToI(result_reg, double_input);
5059  } else {
5060  Register except_flag = LCodeGen::scratch1();
5061 
5062  __ EmitFPUTruncate(kRoundToMinusInf,
5063  result_reg,
5064  double_input,
5065  scratch1,
5066  double_scratch0(),
5067  except_flag,
5069 
5070  // Deopt if the operation did not succeed (except_flag != 0).
5071  DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
5072  Operand(zero_reg));
5073 
5074  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5075  Label done;
5076  __ Branch(&done, ne, result_reg, Operand(zero_reg));
5077  __ mfhc1(scratch1, double_input); // Get exponent/sign bits.
5078  __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5079  DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
5080  __ bind(&done);
5081  }
5082  }
5083 }
5084 
5085 
5086 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5087  Register result_reg = ToRegister(instr->result());
5088  Register scratch1 = LCodeGen::scratch0();
5089  DoubleRegister double_input = ToDoubleRegister(instr->value());
5090 
5091  if (instr->truncating()) {
5092  __ TruncateDoubleToI(result_reg, double_input);
5093  } else {
5094  Register except_flag = LCodeGen::scratch1();
5095 
5096  __ EmitFPUTruncate(kRoundToMinusInf,
5097  result_reg,
5098  double_input,
5099  scratch1,
5100  double_scratch0(),
5101  except_flag,
5103 
5104  // Deopt if the operation did not succeed (except_flag != 0).
5105  DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
5106  Operand(zero_reg));
5107 
5108  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5109  Label done;
5110  __ Branch(&done, ne, result_reg, Operand(zero_reg));
5111  __ mfhc1(scratch1, double_input); // Get exponent/sign bits.
5112  __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5113  DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
5114  __ bind(&done);
5115  }
5116  }
5117  __ SmiTag(result_reg, result_reg);
5118 }
5119 
5120 
5121 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5122  LOperand* input = instr->value();
5123  __ SmiTst(ToRegister(input), at);
5124  DeoptimizeIf(ne, instr, "not a Smi", at, Operand(zero_reg));
5125 }
5126 
5127 
5128 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5129  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5130  LOperand* input = instr->value();
5131  __ SmiTst(ToRegister(input), at);
5132  DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
5133  }
5134 }
5135 
5136 
5137 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5138  Register input = ToRegister(instr->value());
5139  Register scratch = scratch0();
5140 
5141  __ GetObjectType(input, scratch, scratch);
5142 
5143  if (instr->hydrogen()->is_interval_check()) {
5144  InstanceType first;
5145  InstanceType last;
5146  instr->hydrogen()->GetCheckInterval(&first, &last);
5147 
5148  // If there is only one type in the interval check for equality.
5149  if (first == last) {
5150  DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(first));
5151  } else {
5152  DeoptimizeIf(lo, instr, "wrong instance type", scratch, Operand(first));
5153  // Omit check for the last type.
5154  if (last != LAST_TYPE) {
5155  DeoptimizeIf(hi, instr, "wrong instance type", scratch, Operand(last));
5156  }
5157  }
5158  } else {
5159  uint8_t mask;
5160  uint8_t tag;
5161  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5162 
5163  if (base::bits::IsPowerOfTwo32(mask)) {
5164  DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5165  __ And(at, scratch, mask);
5166  DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", at,
5167  Operand(zero_reg));
5168  } else {
5169  __ And(scratch, scratch, Operand(mask));
5170  DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(tag));
5171  }
5172  }
5173 }
5174 
5175 
5176 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5177  Register reg = ToRegister(instr->value());
5178  Handle<HeapObject> object = instr->hydrogen()->object().handle();
5180  if (isolate()->heap()->InNewSpace(*object)) {
5181  Register reg = ToRegister(instr->value());
5182  Handle<Cell> cell = isolate()->factory()->NewCell(object);
5183  __ li(at, Operand(Handle<Object>(cell)));
5184  __ ld(at, FieldMemOperand(at, Cell::kValueOffset));
5185  DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(at));
5186  } else {
5187  DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(object));
5188  }
5189 }
5190 
5191 
5192 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5193  {
5194  PushSafepointRegistersScope scope(this);
5195  __ push(object);
5196  __ mov(cp, zero_reg);
5197  __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5199  instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5200  __ StoreToSafepointRegisterSlot(v0, scratch0());
5201  }
5202  __ SmiTst(scratch0(), at);
5203  DeoptimizeIf(eq, instr, "instance migration failed", at, Operand(zero_reg));
5204 }
5205 
5206 
5207 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5208  class DeferredCheckMaps FINAL : public LDeferredCode {
5209  public:
5210  DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5211  : LDeferredCode(codegen), instr_(instr), object_(object) {
5212  SetExit(check_maps());
5213  }
5214  virtual void Generate() OVERRIDE {
5215  codegen()->DoDeferredInstanceMigration(instr_, object_);
5216  }
5217  Label* check_maps() { return &check_maps_; }
5218  virtual LInstruction* instr() OVERRIDE { return instr_; }
5219  private:
5220  LCheckMaps* instr_;
5221  Label check_maps_;
5222  Register object_;
5223  };
5224 
5225  if (instr->hydrogen()->IsStabilityCheck()) {
5226  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5227  for (int i = 0; i < maps->size(); ++i) {
5228  AddStabilityDependency(maps->at(i).handle());
5229  }
5230  return;
5231  }
5232 
5233  Register map_reg = scratch0();
5234  LOperand* input = instr->value();
5235  DCHECK(input->IsRegister());
5236  Register reg = ToRegister(input);
5237  __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5238 
5239  DeferredCheckMaps* deferred = NULL;
5240  if (instr->hydrogen()->HasMigrationTarget()) {
5241  deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5242  __ bind(deferred->check_maps());
5243  }
5244 
5245  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5246  Label success;
5247  for (int i = 0; i < maps->size() - 1; i++) {
5248  Handle<Map> map = maps->at(i).handle();
5249  __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
5250  }
5251  Handle<Map> map = maps->at(maps->size() - 1).handle();
5252  // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
5253  if (instr->hydrogen()->HasMigrationTarget()) {
5254  __ Branch(deferred->entry(), ne, map_reg, Operand(map));
5255  } else {
5256  DeoptimizeIf(ne, instr, "wrong map", map_reg, Operand(map));
5257  }
5258 
5259  __ bind(&success);
5260 }
5261 
5262 
5263 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5264  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5265  Register result_reg = ToRegister(instr->result());
5266  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5267  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5268 }
5269 
5270 
5271 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5272  Register unclamped_reg = ToRegister(instr->unclamped());
5273  Register result_reg = ToRegister(instr->result());
5274  __ ClampUint8(result_reg, unclamped_reg);
5275 }
5276 
5277 
5278 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5279  Register scratch = scratch0();
5280  Register input_reg = ToRegister(instr->unclamped());
5281  Register result_reg = ToRegister(instr->result());
5282  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5283  Label is_smi, done, heap_number;
5284 
5285  // Both smi and heap number cases are handled.
5286  __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5287 
5288  // Check for heap number
5289  __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5290  __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5291 
5292  // Check for undefined. Undefined is converted to zero for clamping
5293  // conversions.
5294  DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg,
5295  Operand(factory()->undefined_value()));
5296  __ mov(result_reg, zero_reg);
5297  __ jmp(&done);
5298 
5299  // Heap number
5300  __ bind(&heap_number);
5301  __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
5303  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5304  __ jmp(&done);
5305 
5306  __ bind(&is_smi);
5307  __ ClampUint8(result_reg, scratch);
5308 
5309  __ bind(&done);
5310 }
5311 
5312 
5313 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5314  DoubleRegister value_reg = ToDoubleRegister(instr->value());
5315  Register result_reg = ToRegister(instr->result());
5316  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5317  __ FmoveHigh(result_reg, value_reg);
5318  } else {
5319  __ FmoveLow(result_reg, value_reg);
5320  }
5321 }
5322 
5323 
5324 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5325  Register hi_reg = ToRegister(instr->hi());
5326  Register lo_reg = ToRegister(instr->lo());
5327  DoubleRegister result_reg = ToDoubleRegister(instr->result());
5328  __ Move(result_reg, lo_reg, hi_reg);
5329 }
5330 
5331 
5332 void LCodeGen::DoAllocate(LAllocate* instr) {
5333  class DeferredAllocate FINAL : public LDeferredCode {
5334  public:
5335  DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5336  : LDeferredCode(codegen), instr_(instr) { }
5337  virtual void Generate() OVERRIDE {
5338  codegen()->DoDeferredAllocate(instr_);
5339  }
5340  virtual LInstruction* instr() OVERRIDE { return instr_; }
5341  private:
5342  LAllocate* instr_;
5343  };
5344 
5345  DeferredAllocate* deferred =
5346  new(zone()) DeferredAllocate(this, instr);
5347 
5348  Register result = ToRegister(instr->result());
5349  Register scratch = ToRegister(instr->temp1());
5350  Register scratch2 = ToRegister(instr->temp2());
5351 
5352  // Allocate memory for the object.
5354  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5355  flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5356  }
5357  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5358  DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5359  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5361  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5362  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5364  }
5365  if (instr->size()->IsConstantOperand()) {
5366  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5368  __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5369  } else {
5370  __ jmp(deferred->entry());
5371  }
5372  } else {
5373  Register size = ToRegister(instr->size());
5374  __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5375  }
5376 
5377  __ bind(deferred->exit());
5378 
5379  if (instr->hydrogen()->MustPrefillWithFiller()) {
5381  if (instr->size()->IsConstantOperand()) {
5382  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5383  __ li(scratch, Operand(size - kHeapObjectTag));
5384  } else {
5385  __ Dsubu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5386  }
5387  __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5388  Label loop;
5389  __ bind(&loop);
5390  __ Dsubu(scratch, scratch, Operand(kPointerSize));
5391  __ Daddu(at, result, Operand(scratch));
5392  __ sd(scratch2, MemOperand(at));
5393  __ Branch(&loop, ge, scratch, Operand(zero_reg));
5394  }
5395 }
5396 
5397 
5398 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5399  Register result = ToRegister(instr->result());
5400 
5401  // TODO(3095996): Get rid of this. For now, we need to make the
5402  // result register contain a valid pointer because it is already
5403  // contained in the register pointer map.
5404  __ mov(result, zero_reg);
5405 
5406  PushSafepointRegistersScope scope(this);
5407  if (instr->size()->IsRegister()) {
5408  Register size = ToRegister(instr->size());
5409  DCHECK(!size.is(result));
5410  __ SmiTag(size);
5411  __ push(size);
5412  } else {
5413  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5414  if (size >= 0 && size <= Smi::kMaxValue) {
5415  __ li(v0, Operand(Smi::FromInt(size)));
5416  __ Push(v0);
5417  } else {
5418  // We should never get here at runtime => abort
5419  __ stop("invalid allocation size");
5420  return;
5421  }
5422  }
5423 
5425  instr->hydrogen()->MustAllocateDoubleAligned());
5426  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5427  DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5428  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5430  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5431  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5433  } else {
5435  }
5436  __ li(v0, Operand(Smi::FromInt(flags)));
5437  __ Push(v0);
5438 
5440  Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5441  __ StoreToSafepointRegisterSlot(v0, result);
5442 }
5443 
5444 
5445 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5446  DCHECK(ToRegister(instr->value()).is(a0));
5447  DCHECK(ToRegister(instr->result()).is(v0));
5448  __ push(a0);
5449  CallRuntime(Runtime::kToFastProperties, 1, instr);
5450 }
5451 
5452 
5453 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5454  DCHECK(ToRegister(instr->context()).is(cp));
5455  Label materialized;
5456  // Registers will be used as follows:
5457  // a7 = literals array.
5458  // a1 = regexp literal.
5459  // a0 = regexp literal clone.
5460  // a2 and a4-a6 are used as temporaries.
5461  int literal_offset =
5462  FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5463  __ li(a7, instr->hydrogen()->literals());
5464  __ ld(a1, FieldMemOperand(a7, literal_offset));
5465  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5466  __ Branch(&materialized, ne, a1, Operand(at));
5467 
5468  // Create regexp literal using runtime function
5469  // Result will be in v0.
5470  __ li(a6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5471  __ li(a5, Operand(instr->hydrogen()->pattern()));
5472  __ li(a4, Operand(instr->hydrogen()->flags()));
5473  __ Push(a7, a6, a5, a4);
5474  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5475  __ mov(a1, v0);
5476 
5477  __ bind(&materialized);
5479  Label allocated, runtime_allocate;
5480 
5481  __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
5482  __ jmp(&allocated);
5483 
5484  __ bind(&runtime_allocate);
5485  __ li(a0, Operand(Smi::FromInt(size)));
5486  __ Push(a1, a0);
5487  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5488  __ pop(a1);
5489 
5490  __ bind(&allocated);
5491  // Copy the content into the newly allocated memory.
5492  // (Unroll copy loop once for better throughput).
5493  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5494  __ ld(a3, FieldMemOperand(a1, i));
5495  __ ld(a2, FieldMemOperand(a1, i + kPointerSize));
5496  __ sd(a3, FieldMemOperand(v0, i));
5497  __ sd(a2, FieldMemOperand(v0, i + kPointerSize));
5498  }
5499  if ((size % (2 * kPointerSize)) != 0) {
5500  __ ld(a3, FieldMemOperand(a1, size - kPointerSize));
5501  __ sd(a3, FieldMemOperand(v0, size - kPointerSize));
5502  }
5503 }
5504 
5505 
5506 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5507  DCHECK(ToRegister(instr->context()).is(cp));
5508  // Use the fast case closure allocation code that allocates in new
5509  // space for nested functions that don't need literals cloning.
5510  bool pretenure = instr->hydrogen()->pretenure();
5511  if (!pretenure && instr->hydrogen()->has_no_literals()) {
5512  FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
5513  instr->hydrogen()->kind());
5514  __ li(a2, Operand(instr->hydrogen()->shared_info()));
5515  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5516  } else {
5517  __ li(a2, Operand(instr->hydrogen()->shared_info()));
5518  __ li(a1, Operand(pretenure ? factory()->true_value()
5519  : factory()->false_value()));
5520  __ Push(cp, a2, a1);
5521  CallRuntime(Runtime::kNewClosure, 3, instr);
5522  }
5523 }
5524 
5525 
5526 void LCodeGen::DoTypeof(LTypeof* instr) {
5527  DCHECK(ToRegister(instr->result()).is(v0));
5528  Register input = ToRegister(instr->value());
5529  __ push(input);
5530  CallRuntime(Runtime::kTypeof, 1, instr);
5531 }
5532 
5533 
5534 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5535  Register input = ToRegister(instr->value());
5536 
5537  Register cmp1 = no_reg;
5538  Operand cmp2 = Operand(no_reg);
5539 
5540  Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5541  instr->FalseLabel(chunk_),
5542  input,
5543  instr->type_literal(),
5544  &cmp1,
5545  &cmp2);
5546 
5547  DCHECK(cmp1.is_valid());
5548  DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
5549 
5550  if (final_branch_condition != kNoCondition) {
5551  EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5552  }
5553 }
5554 
5555 
5556 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5557  Label* false_label,
5558  Register input,
5559  Handle<String> type_name,
5560  Register* cmp1,
5561  Operand* cmp2) {
5562  // This function utilizes the delay slot heavily. This is used to load
5563  // values that are always usable without depending on the type of the input
5564  // register.
5565  Condition final_branch_condition = kNoCondition;
5566  Register scratch = scratch0();
5567  Factory* factory = isolate()->factory();
5568  if (String::Equals(type_name, factory->number_string())) {
5569  __ JumpIfSmi(input, true_label);
5570  __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
5571  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5572  *cmp1 = input;
5573  *cmp2 = Operand(at);
5574  final_branch_condition = eq;
5575 
5576  } else if (String::Equals(type_name, factory->string_string())) {
5577  __ JumpIfSmi(input, false_label);
5578  __ GetObjectType(input, input, scratch);
5579  __ Branch(USE_DELAY_SLOT, false_label,
5580  ge, scratch, Operand(FIRST_NONSTRING_TYPE));
5581  // input is an object so we can load the BitFieldOffset even if we take the
5582  // other branch.
5583  __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5584  __ And(at, at, 1 << Map::kIsUndetectable);
5585  *cmp1 = at;
5586  *cmp2 = Operand(zero_reg);
5587  final_branch_condition = eq;
5588 
5589  } else if (String::Equals(type_name, factory->symbol_string())) {
5590  __ JumpIfSmi(input, false_label);
5591  __ GetObjectType(input, input, scratch);
5592  *cmp1 = scratch;
5593  *cmp2 = Operand(SYMBOL_TYPE);
5594  final_branch_condition = eq;
5595 
5596  } else if (String::Equals(type_name, factory->boolean_string())) {
5597  __ LoadRoot(at, Heap::kTrueValueRootIndex);
5598  __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5599  __ LoadRoot(at, Heap::kFalseValueRootIndex);
5600  *cmp1 = at;
5601  *cmp2 = Operand(input);
5602  final_branch_condition = eq;
5603 
5604  } else if (String::Equals(type_name, factory->undefined_string())) {
5605  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5606  __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5607  // The first instruction of JumpIfSmi is an And - it is safe in the delay
5608  // slot.
5609  __ JumpIfSmi(input, false_label);
5610  // Check for undetectable objects => true.
5611  __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
5612  __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5613  __ And(at, at, 1 << Map::kIsUndetectable);
5614  *cmp1 = at;
5615  *cmp2 = Operand(zero_reg);
5616  final_branch_condition = ne;
5617 
5618  } else if (String::Equals(type_name, factory->function_string())) {
5620  __ JumpIfSmi(input, false_label);
5621  __ GetObjectType(input, scratch, input);
5622  __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
5623  *cmp1 = input;
5624  *cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
5625  final_branch_condition = eq;
5626 
5627  } else if (String::Equals(type_name, factory->object_string())) {
5628  __ JumpIfSmi(input, false_label);
5629  __ LoadRoot(at, Heap::kNullValueRootIndex);
5630  __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5631  Register map = input;
5632  __ GetObjectType(input, map, scratch);
5633  __ Branch(false_label,
5634  lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
5635  __ Branch(USE_DELAY_SLOT, false_label,
5636  gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
5637  // map is still valid, so the BitField can be loaded in delay slot.
5638  // Check for undetectable objects => false.
5640  __ And(at, at, 1 << Map::kIsUndetectable);
5641  *cmp1 = at;
5642  *cmp2 = Operand(zero_reg);
5643  final_branch_condition = eq;
5644 
5645  } else {
5646  *cmp1 = at;
5647  *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
5648  __ Branch(false_label);
5649  }
5650 
5651  return final_branch_condition;
5652 }
5653 
5654 
5655 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5656  Register temp1 = ToRegister(instr->temp());
5657 
5658  EmitIsConstructCall(temp1, scratch0());
5659 
5660  EmitBranch(instr, eq, temp1,
5661  Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5662 }
5663 
5664 
5665 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5666  DCHECK(!temp1.is(temp2));
5667  // Get the frame pointer for the calling frame.
5669 
5670  // Skip the arguments adaptor frame if it exists.
5671  Label check_frame_marker;
5673  __ Branch(&check_frame_marker, ne, temp2,
5676 
5677  // Check the marker in the calling frame.
5678  __ bind(&check_frame_marker);
5680 }
5681 
5682 
5683 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5684  if (!info()->IsStub()) {
5685  // Ensure that we have enough space after the previous lazy-bailout
5686  // instruction for patching the code here.
5687  int current_pc = masm()->pc_offset();
5688  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5689  int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5690  DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5691  while (padding_size > 0) {
5692  __ nop();
5693  padding_size -= Assembler::kInstrSize;
5694  }
5695  }
5696  }
5697  last_lazy_deopt_pc_ = masm()->pc_offset();
5698 }
5699 
5700 
5701 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5702  last_lazy_deopt_pc_ = masm()->pc_offset();
5703  DCHECK(instr->HasEnvironment());
5704  LEnvironment* env = instr->environment();
5705  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5706  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5707 }
5708 
5709 
5710 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5711  Deoptimizer::BailoutType type = instr->hydrogen()->type();
5712  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5713  // needed return address), even though the implementation of LAZY and EAGER is
5714  // now identical. When LAZY is eventually completely folded into EAGER, remove
5715  // the special case below.
5716  if (info()->IsStub() && type == Deoptimizer::EAGER) {
5717  type = Deoptimizer::LAZY;
5718  }
5719 
5720  DeoptimizeIf(al, instr, type, instr->hydrogen()->reason(), zero_reg,
5721  Operand(zero_reg));
5722 }
5723 
5724 
5725 void LCodeGen::DoDummy(LDummy* instr) {
5726  // Nothing to see here, move on!
5727 }
5728 
5729 
5730 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5731  // Nothing to see here, move on!
5732 }
5733 
5734 
5735 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5736  PushSafepointRegistersScope scope(this);
5737  LoadContextFromDeferred(instr->context());
5738  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5741  DCHECK(instr->HasEnvironment());
5742  LEnvironment* env = instr->environment();
5743  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5744 }
5745 
5746 
5747 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5748  class DeferredStackCheck FINAL : public LDeferredCode {
5749  public:
5750  DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5751  : LDeferredCode(codegen), instr_(instr) { }
5752  virtual void Generate() OVERRIDE {
5753  codegen()->DoDeferredStackCheck(instr_);
5754  }
5755  virtual LInstruction* instr() OVERRIDE { return instr_; }
5756  private:
5757  LStackCheck* instr_;
5758  };
5759 
5760  DCHECK(instr->HasEnvironment());
5761  LEnvironment* env = instr->environment();
5762  // There is no LLazyBailout instruction for stack-checks. We have to
5763  // prepare for lazy deoptimization explicitly here.
5764  if (instr->hydrogen()->is_function_entry()) {
5765  // Perform stack overflow check.
5766  Label done;
5767  __ LoadRoot(at, Heap::kStackLimitRootIndex);
5768  __ Branch(&done, hs, sp, Operand(at));
5769  DCHECK(instr->context()->IsRegister());
5770  DCHECK(ToRegister(instr->context()).is(cp));
5771  CallCode(isolate()->builtins()->StackCheck(),
5773  instr);
5774  __ bind(&done);
5775  } else {
5776  DCHECK(instr->hydrogen()->is_backwards_branch());
5777  // Perform stack overflow check if this goto needs it before jumping.
5778  DeferredStackCheck* deferred_stack_check =
5779  new(zone()) DeferredStackCheck(this, instr);
5780  __ LoadRoot(at, Heap::kStackLimitRootIndex);
5781  __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5783  __ bind(instr->done_label());
5784  deferred_stack_check->SetExit(instr->done_label());
5785  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5786  // Don't record a deoptimization index for the safepoint here.
5787  // This will be done explicitly when emitting call and the safepoint in
5788  // the deferred code.
5789  }
5790 }
5791 
5792 
5793 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5794  // This is a pseudo-instruction that ensures that the environment here is
5795  // properly registered for deoptimization and records the assembler's PC
5796  // offset.
5797  LEnvironment* environment = instr->environment();
5798 
5799  // If the environment were already registered, we would have no way of
5800  // backpatching it with the spill slot operands.
5801  DCHECK(!environment->HasBeenRegistered());
5802  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5803 
5805 }
5806 
5807 
5808 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5809  Register result = ToRegister(instr->result());
5810  Register object = ToRegister(instr->object());
5811  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5812  DeoptimizeIf(eq, instr, "undefined", object, Operand(at));
5813 
5814  Register null_value = a5;
5815  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5816  DeoptimizeIf(eq, instr, "null", object, Operand(null_value));
5817 
5818  __ And(at, object, kSmiTagMask);
5819  DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
5820 
5822  __ GetObjectType(object, a1, a1);
5823  DeoptimizeIf(le, instr, "not a JavaScript object", a1,
5824  Operand(LAST_JS_PROXY_TYPE));
5825 
5826  Label use_cache, call_runtime;
5827  DCHECK(object.is(a0));
5828  __ CheckEnumCache(null_value, &call_runtime);
5829 
5830  __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
5831  __ Branch(&use_cache);
5832 
5833  // Get the set of properties to enumerate.
5834  __ bind(&call_runtime);
5835  __ push(object);
5836  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5837 
5839  DCHECK(result.is(v0));
5840  __ LoadRoot(at, Heap::kMetaMapRootIndex);
5841  DeoptimizeIf(ne, instr, "wrong map", a1, Operand(at));
5842  __ bind(&use_cache);
5843 }
5844 
5845 
5846 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5847  Register map = ToRegister(instr->map());
5848  Register result = ToRegister(instr->result());
5849  Label load_cache, done;
5850  __ EnumLength(result, map);
5851  __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5852  __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5853  __ jmp(&done);
5854 
5855  __ bind(&load_cache);
5856  __ LoadInstanceDescriptors(map, result);
5857  __ ld(result,
5859  __ ld(result,
5860  FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5861  DeoptimizeIf(eq, instr, "no cache", result, Operand(zero_reg));
5862 
5863  __ bind(&done);
5864 }
5865 
5866 
5867 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5868  Register object = ToRegister(instr->value());
5869  Register map = ToRegister(instr->map());
5871  DeoptimizeIf(ne, instr, "wrong map", map, Operand(scratch0()));
5872 }
5873 
5874 
5875 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5876  Register result,
5877  Register object,
5878  Register index) {
5879  PushSafepointRegistersScope scope(this);
5880  __ Push(object, index);
5881  __ mov(cp, zero_reg);
5882  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5884  instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5885  __ StoreToSafepointRegisterSlot(v0, result);
5886 }
5887 
5888 
5889 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5890  class DeferredLoadMutableDouble FINAL : public LDeferredCode {
5891  public:
5892  DeferredLoadMutableDouble(LCodeGen* codegen,
5893  LLoadFieldByIndex* instr,
5894  Register result,
5895  Register object,
5896  Register index)
5897  : LDeferredCode(codegen),
5898  instr_(instr),
5899  result_(result),
5900  object_(object),
5901  index_(index) {
5902  }
5903  virtual void Generate() OVERRIDE {
5904  codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5905  }
5906  virtual LInstruction* instr() OVERRIDE { return instr_; }
5907  private:
5908  LLoadFieldByIndex* instr_;
5909  Register result_;
5910  Register object_;
5911  Register index_;
5912  };
5913 
5914  Register object = ToRegister(instr->object());
5915  Register index = ToRegister(instr->index());
5916  Register result = ToRegister(instr->result());
5917  Register scratch = scratch0();
5918 
5919  DeferredLoadMutableDouble* deferred;
5920  deferred = new(zone()) DeferredLoadMutableDouble(
5921  this, instr, result, object, index);
5922 
5923  Label out_of_object, done;
5924 
5925  __ And(scratch, index, Operand(Smi::FromInt(1)));
5926  __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
5927  __ dsra(index, index, 1);
5928 
5929  __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5930  __ SmiScale(scratch, index, kPointerSizeLog2); // In delay slot.
5931  __ Daddu(scratch, object, scratch);
5932  __ ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5933 
5934  __ Branch(&done);
5935 
5936  __ bind(&out_of_object);
5937  __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5938  // Index is equal to negated out of object property index plus 1.
5939  __ Dsubu(scratch, result, scratch);
5940  __ ld(result, FieldMemOperand(scratch,
5942  __ bind(deferred->exit());
5943  __ bind(&done);
5944 }
5945 
5946 
5947 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5948  Register context = ToRegister(instr->context());
5950 }
5951 
5952 
5953 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5954  Handle<ScopeInfo> scope_info = instr->scope_info();
5955  __ li(at, scope_info);
5956  __ Push(at, ToRegister(instr->function()));
5957  CallRuntime(Runtime::kPushBlockContext, 2, instr);
5958  RecordSafepoint(Safepoint::kNoLazyDeopt);
5959 }
5960 
5961 
5962 #undef __
5963 
5964 } } // namespace v8::internal
#define kDoubleRegZero
An object reference managed by the v8 garbage collector.
Definition: v8.h:198
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1591
static const int kInstrSize
friend class BlockTrampolinePoolScope
static U update(U previous, T value)
Definition: utils.h:223
static U encode(T value)
Definition: utils.h:217
static const int kValueOffset
Definition: objects.h:9446
static const int kHeaderSize
Definition: objects.h:5373
static Handle< DeoptimizationInputData > New(Isolate *isolate, int deopt_entry_count, PretenureFlag pretenure)
Definition: objects.cc:7918
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
Definition: deoptimizer.cc:672
static const int kEnumCacheOffset
Definition: objects.h:3028
virtual void BeforeCall(int call_size) const OVERRIDE
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
virtual void AfterCall() const OVERRIDE
static const int kHeaderSize
Definition: objects.h:2393
static int OffsetOfElementAt(int index)
Definition: objects.h:2455
static int SizeFor(int length)
Definition: objects.h:2452
static const int kGlobalProxyOffset
Definition: objects.h:7461
static Handle< T > cast(Handle< S > that)
Definition: handles.h:116
static const uint32_t kSignMask
Definition: objects.h:1522
static const int kValueOffset
Definition: objects.h:1506
static const int kExponentBits
Definition: objects.h:1526
static const int kExponentBias
Definition: objects.h:1527
static const int kExponentShift
Definition: objects.h:1528
static const int kMapOffset
Definition: objects.h:1427
static Register right()
Definition: code-stubs.h:686
static const int kValueOffset
Definition: objects.h:7623
static const int kCacheStampOffset
Definition: objects.h:7631
static const int kSharedFunctionInfoOffset
Definition: objects.h:7379
static const int kContextOffset
Definition: objects.h:7381
static const int kCodeEntryOffset
Definition: objects.h:7376
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7377
static const int kHeaderSize
Definition: objects.h:2195
static const int kPropertiesOffset
Definition: objects.h:2193
static const int kSize
Definition: objects.h:7772
static const int kInObjectFieldCount
Definition: objects.h:7826
bool IsNextEmittedBlock(int block_id) const
void DoStoreKeyedFixedArray(LStoreKeyed *instr)
DwVfpRegister ToDoubleRegister(LOperand *op) const
void RecordSafepointWithRegisters(LPointerMap *pointers, int arguments, Safepoint::DeoptMode mode)
void EmitBranchF(InstrType instr, Condition condition, FPURegister src1, FPURegister src2)
bool IsSmi(LConstantOperand *op) const
TranslationBuffer translations_
MemOperand BuildSeqStringOperand(Register string, LOperand *index, String::Encoding encoding)
Condition EmitIsString(Register input, Register temp1, Label *is_not_string, SmiCheck check_needed)
DwVfpRegister EmitLoadDoubleRegister(LOperand *op, SwVfpRegister flt_scratch, DwVfpRegister dbl_scratch)
void DoDeferredStackCheck(LStackCheck *instr)
SafepointTableBuilder safepoints_
void EmitVectorLoadICRegisters(T *instr)
static Condition TokenToCondition(Token::Value op, bool is_unsigned)
ZoneList< Handle< Object > > deoptimization_literals_
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check, Label *bool_load)
MemOperand PrepareKeyedOperand(Register key, Register base, bool key_is_constant, int constant_key, int element_size, int shift_size, int base_offset)
void PopulateDeoptimizationLiteralsWithInlinedFunctions()
void AddToTranslation(LEnvironment *environment, Translation *translation, LOperand *op, bool is_tagged, bool is_uint32, int *object_index_pointer, int *dematerialized_index_pointer)
ZoneList< LEnvironment * > deoptimizations_
void EmitIntegerMathAbs(LMathAbs *instr)
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, LInstruction *instr, LOperand *context)
void EmitIsConstructCall(Register temp1, Register temp2)
int32_t ToRepresentation_donotuse(LConstantOperand *op, const Representation &r) const
void EmitFalseBranchF(InstrType instr, Condition condition, FPURegister src1, FPURegister src2)
int32_t ToInteger32(LConstantOperand *op) const
LPlatformChunk * chunk() const
void FinishCode(Handle< Code > code)
int LookupDestination(int block_id) const
Condition EmitTypeofIs(Label *true_label, Label *false_label, Register input, Handle< String > type_name)
void DoDeferredAllocate(LAllocate *instr)
void RecordSafepoint(LPointerMap *pointers, Safepoint::Kind kind, int arguments, Safepoint::DeoptMode mode)
void DoDeferredTaggedToI(LTaggedToI *instr)
LowDwVfpRegister double_scratch0()
void CallCodeGeneric(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr, SafepointMode safepoint_mode, TargetAddressStorageMode storage_mode=CAN_INLINE_TARGET_ADDRESS)
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
void CallCode(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr, TargetAddressStorageMode storage_mode=CAN_INLINE_TARGET_ADDRESS)
Safepoint::Kind expected_safepoint_kind_
ZoneList< LDeferredCode * > deferred_
void DoDeferredNumberTagIU(LInstruction *instr, LOperand *value, LOperand *temp1, LOperand *temp2, IntegerSignedness signedness)
Handle< Object > ToHandle(LConstantOperand *op) const
void RegisterEnvironmentForDeoptimization(LEnvironment *environment, Safepoint::DeoptMode mode)
void LoadContextFromDeferred(LOperand *context)
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoDeferredLoadMutableDouble(LLoadFieldByIndex *instr, Register result, Register object, Register index)
int DefineDeoptimizationLiteral(Handle< Object > literal)
void DeoptimizeIf(Condition condition, LInstruction *instr, const char *detail, Deoptimizer::BailoutType bailout_type)
void CallKnownFunction(Handle< JSFunction > function, int formal_parameter_count, int arity, LInstruction *instr, R1State r1_state)
void WriteTranslation(LEnvironment *environment, Translation *translation)
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
void DoLoadKeyedFixedDoubleArray(LLoadKeyed *instr)
Operand ToOperand(LOperand *op)
Register EmitLoadRegister(LOperand *op, Register scratch)
void EmitClassOfTest(Label *if_true, Label *if_false, Handle< String > class_name, Register input, Register temporary, Register temporary2)
void DoLoadKeyedExternalArray(LLoadKeyed *instr)
double ToDouble(LConstantOperand *op) const
Register ToRegister(LOperand *op) const
void DoStoreKeyedExternalArray(LStoreKeyed *instr)
void RecordAndWritePosition(int position) OVERRIDE
bool IsInteger32(LConstantOperand *op) const
void PopulateDeoptimizationData(Handle< Code > code)
void DoParallelMove(LParallelMove *move)
Smi * ToSmi(LConstantOperand *op) const
void CallRuntime(const Runtime::Function *function, int num_arguments, LInstruction *instr, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
ZoneList< Deoptimizer::JumpTableEntry > jump_table_
Condition EmitIsObject(Register input, Register temp1, Label *is_not_object, Label *is_object)
void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE
void EmitNumberUntagD(LNumberUntagD *instr, Register input, DwVfpRegister result, NumberUntagDMode mode)
MemOperand ToMemOperand(LOperand *op) const
void GenerateBodyInstructionPre(LInstruction *instr) OVERRIDE
MemOperand ToHighMemOperand(LOperand *op) const
void RecordSafepointWithLazyDeopt(LInstruction *instr, SafepointMode safepoint_mode)
void EmitFalseBranch(InstrType instr, Condition condition)
void DoLoadKeyedFixedArray(LLoadKeyed *instr)
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
void EmitBranch(InstrType instr, Condition condition)
void DoDeferredNumberTagD(LNumberTagD *instr)
void DoStoreKeyedFixedDoubleArray(LStoreKeyed *instr)
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:17
static const Register ReceiverRegister()
static const Register NameRegister()
static void GenerateMiss(MacroAssembler *masm)
static const int kIsUndetectable
Definition: objects.h:6244
static const int kBitFieldOffset
Definition: objects.h:6228
static const int kInstanceTypeOffset
Definition: objects.h:6229
static const int kConstructorOffset
Definition: objects.h:6191
static const int kPrototypeOffset
Definition: objects.h:6190
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
static const Register exponent()
static const int kHashFieldOffset
Definition: objects.h:8486
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:754
static void MaybeCallEntryHook(MacroAssembler *masm)
static const int kNoPosition
Definition: assembler.h:317
static Representation Smi()
static Representation Integer32()
int num_parameters() const
Definition: scopes.h:321
Variable * parameter(int index) const
Definition: scopes.h:316
static const int kHeaderSize
Definition: objects.h:8941
static const int kDontAdaptArgumentsSentinel
Definition: objects.h:6888
static const int kInstanceClassNameOffset
Definition: objects.h:6897
static const int kNativeBitWithinByte
Definition: objects.h:7046
static const int kStrictModeBitWithinByte
Definition: objects.h:7043
static const int kMaxValue
Definition: objects.h:1272
static Smi * FromInt(int value)
Definition: objects-inl.h:1321
static const int kFixedFrameSizeFromFp
Definition: frames.h:157
static const int kContextOffset
Definition: frames.h:162
static const int kCallerSPOffset
Definition: frames.h:167
static const int kMarkerOffset
Definition: frames.h:161
static const int kCallerFPOffset
Definition: frames.h:165
static const Register ReceiverRegister()
static const Register NameRegister()
static const Register ValueRegister()
static Handle< Code > initialize_stub(Isolate *isolate, StrictMode strict_mode)
Definition: ic.cc:1346
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const unsigned int kContainsCachedArrayIndexMask
Definition: objects.h:8618
static const int32_t kMaxOneByteCharCode
Definition: objects.h:8811
static const int kLengthOffset
Definition: objects.h:8802
bool Equals(String *other)
Definition: objects-inl.h:3336
static const Register VectorRegister()
#define OVERRIDE
#define FINAL
@ kMips64r6
static const ArchVariants kArchVariant
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric literals(0o77, 0b11)") DEFINE_BOOL(harmony_object_literals
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
enable harmony numeric enable harmony object literal extensions Optimize object Array shift
#define V8_INFINITY
Definition: globals.h:25
#define __
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
@ CALL_FUNCTION
AllocationFlags
@ DOUBLE_ALIGNMENT
@ PRETENURE_OLD_POINTER_SPACE
@ TAG_OBJECT
@ PRETENURE_OLD_DATA_SPACE
unsigned short uint16_t
Definition: unicode.cc:23
int int32_t
Definition: unicode.cc:24
bool IsPowerOfTwo32(uint32_t value)
Definition: bits.h:77
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
int WhichPowerOf2(uint32_t x)
Definition: utils.h:37
Vector< const char > CStrVector(const char *data)
Definition: vector.h:158
const int kPointerSize
Definition: globals.h:129
const uint32_t kStringEncodingMask
Definition: objects.h:555
MemOperand ContextOperand(Register context, int index)
@ DO_SMI_CHECK
Definition: globals.h:641
const int KB
Definition: globals.h:106
Condition CommuteCondition(Condition cond)
Definition: constants-arm.h:93
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1488
@ TRACK_ALLOCATION_SITE
Definition: objects.h:8085
@ kSeqStringTag
Definition: objects.h:563
@ ARGUMENTS_ADAPTOR
Definition: hydrogen.h:546
const Register cp
@ kCheckForInexactConversion
const uint32_t kTwoByteStringTag
Definition: objects.h:556
const int kSmiTagSize
Definition: v8.h:5743
const int kDoubleSize
Definition: globals.h:127
const FPURegister f2
const Register fp
DwVfpRegister DoubleRegister
const FPURegister f4
const int kMaxInt
Definition: globals.h:109
const Register sp
const int kPointerSizeLog2
Definition: globals.h:147
@ LAST_NONCALLABLE_SPEC_OBJECT_TYPE
Definition: objects.h:785
@ NUM_OF_CALLABLE_SPEC_OBJECT_TYPES
Definition: objects.h:788
@ JS_DATE_TYPE
Definition: objects.h:730
@ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE
Definition: objects.h:784
@ FIRST_JS_PROXY_TYPE
Definition: objects.h:778
@ JS_ARRAY_TYPE
Definition: objects.h:738
@ FIRST_NONSTRING_TYPE
Definition: objects.h:758
@ FIRST_SPEC_OBJECT_TYPE
Definition: objects.h:781
@ LAST_SPEC_OBJECT_TYPE
Definition: objects.h:782
@ JS_FUNCTION_TYPE
Definition: objects.h:749
@ JS_FUNCTION_PROXY_TYPE
Definition: objects.h:726
@ LAST_JS_PROXY_TYPE
Definition: objects.h:779
@ EXTERNAL_UINT16_ELEMENTS
Definition: elements-kind.h:36
@ UINT8_CLAMPED_ELEMENTS
Definition: elements-kind.h:52
@ EXTERNAL_INT16_ELEMENTS
Definition: elements-kind.h:35
@ EXTERNAL_UINT8_ELEMENTS
Definition: elements-kind.h:34
@ EXTERNAL_INT32_ELEMENTS
Definition: elements-kind.h:37
@ FAST_HOLEY_DOUBLE_ELEMENTS
Definition: elements-kind.h:27
@ SLOPPY_ARGUMENTS_ELEMENTS
Definition: elements-kind.h:31
@ EXTERNAL_INT8_ELEMENTS
Definition: elements-kind.h:33
@ EXTERNAL_FLOAT32_ELEMENTS
Definition: elements-kind.h:39
@ EXTERNAL_FLOAT64_ELEMENTS
Definition: elements-kind.h:40
@ FAST_HOLEY_SMI_ELEMENTS
Definition: elements-kind.h:17
@ EXTERNAL_UINT32_ELEMENTS
Definition: elements-kind.h:38
@ EXTERNAL_UINT8_CLAMPED_ELEMENTS
Definition: elements-kind.h:41
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
const uint32_t kOneByteStringTag
Definition: objects.h:557
@ NO_OVERWRITE
Definition: ic-state.h:58
int ElementsKindToShiftSize(ElementsKind elements_kind)
MemOperand FieldMemOperand(Register object, int offset)
int32_t WhichPowerOf2Abs(int32_t x)
Definition: utils.h:168
int StackSlotOffset(int index)
Definition: lithium.cc:254
const int kUC16Size
Definition: globals.h:187
bool IsFastPackedElementsKind(ElementsKind kind)
@ NUMBER_CANDIDATE_IS_SMI
Definition: lithium.h:756
@ NUMBER_CANDIDATE_IS_ANY_TAGGED
Definition: lithium.h:757
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
AllocationSiteOverrideMode
Definition: code-stubs.h:716
@ DISABLE_ALLOCATION_SITES
Definition: code-stubs.h:718
const FPURegister f22
Condition NegateCondition(Condition cond)
Definition: constants-arm.h:86
static InstanceType TestType(HHasInstanceTypeAndBranch *instr)
const int kMinInt
Definition: globals.h:110
T Abs(T a)
Definition: utils.h:153
const uint32_t kStringRepresentationMask
Definition: objects.h:561
uint32_t RegList
Definition: frames.h:18
byte * Address
Definition: globals.h:101
static Condition BranchCondition(HHasInstanceTypeAndBranch *instr)
@ NOT_CONTEXTUAL
Definition: objects.h:174
static bool SmiValuesAre32Bits()
Definition: v8.h:5808
const int kIntSize
Definition: globals.h:124
@ OLD_DATA_SPACE
Definition: globals.h:361
@ OLD_POINTER_SPACE
Definition: globals.h:360
const int kHeapObjectTag
Definition: v8.h:5737
const int kSmiShiftSize
Definition: v8.h:5805
const Register no_reg
static int ArgumentsOffsetWithoutFrame(int index)
static Condition ComputeCompareCondition(Token::Value op)
static const char * LabelType(LLabel *label)
MemOperand GlobalObjectOperand()
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const intptr_t kSmiTagMask
Definition: v8.h:5744
@ NO_CALL_CONSTRUCTOR_FLAGS
Definition: globals.h:478
const int kSmiTag
Definition: v8.h:5742
bool IsFastSmiElementsKind(ElementsKind kind)
const uint32_t kHoleNanLower32
Definition: globals.h:657
const uint32_t kSlotsZapValue
Definition: globals.h:273
FPURegister FloatRegister
const int kCharSize
Definition: globals.h:122
const FPURegister f0
const uint32_t kHoleNanUpper32
Definition: globals.h:656
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:130
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
#define IN
@ NONE
bool is(DwVfpRegister reg) const
static DwVfpRegister FromAllocationIndex(int index)
SwVfpRegister low() const
static Register FromAllocationIndex(int index)
bool is(Register reg) const
#define T(name, string, precedence)
Definition: token.cc:25