V8 Project
lithium-codegen-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.7
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "src/v8.h"
29 
30 #include "src/base/bits.h"
31 #include "src/code-factory.h"
32 #include "src/code-stubs.h"
33 #include "src/hydrogen-osr.h"
34 #include "src/ic/ic.h"
35 #include "src/ic/stub-cache.h"
38 
39 
40 namespace v8 {
41 namespace internal {
42 
43 
44 class SafepointGenerator FINAL : public CallWrapper {
45  public:
47  LPointerMap* pointers,
48  Safepoint::DeoptMode mode)
49  : codegen_(codegen),
50  pointers_(pointers),
51  deopt_mode_(mode) { }
52  virtual ~SafepointGenerator() {}
53 
54  virtual void BeforeCall(int call_size) const OVERRIDE {}
55 
56  virtual void AfterCall() const OVERRIDE {
57  codegen_->RecordSafepoint(pointers_, deopt_mode_);
58  }
59 
60  private:
61  LCodeGen* codegen_;
62  LPointerMap* pointers_;
63  Safepoint::DeoptMode deopt_mode_;
64 };
65 
66 
67 #define __ masm()->
68 
70  LPhase phase("Z_Code generation", chunk());
71  DCHECK(is_unused());
72  status_ = GENERATING;
73 
74  // Open a frame scope to indicate that there is a frame on the stack. The
75  // NONE indicates that the scope shouldn't actually generate code to set up
76  // the frame (that is done in GeneratePrologue).
77  FrameScope frame_scope(masm_, StackFrame::NONE);
78 
79  return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
81 }
82 
83 
84 void LCodeGen::FinishCode(Handle<Code> code) {
85  DCHECK(is_done());
86  code->set_stack_slots(GetStackSlotCount());
87  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
88  if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
90 }
91 
92 
94  DCHECK(info()->saves_caller_doubles());
96  Comment(";;; Save clobbered callee double registers");
97  int count = 0;
98  BitVector* doubles = chunk()->allocated_double_registers();
99  BitVector::Iterator save_iterator(doubles);
100  while (!save_iterator.Done()) {
101  __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
102  MemOperand(sp, count * kDoubleSize));
103  save_iterator.Advance();
104  count++;
105  }
106 }
107 
108 
110  DCHECK(info()->saves_caller_doubles());
112  Comment(";;; Restore clobbered callee double registers");
113  BitVector* doubles = chunk()->allocated_double_registers();
114  BitVector::Iterator save_iterator(doubles);
115  int count = 0;
116  while (!save_iterator.Done()) {
117  __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
118  MemOperand(sp, count * kDoubleSize));
119  save_iterator.Advance();
120  count++;
121  }
122 }
123 
124 
126  DCHECK(is_generating());
127 
128  if (info()->IsOptimizing()) {
130 
131 #ifdef DEBUG
132  if (strlen(FLAG_stop_at) > 0 &&
133  info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
134  __ stop("stop_at");
135  }
136 #endif
137 
138  // a1: Callee's JS function.
139  // cp: Callee's context.
140  // fp: Caller's frame pointer.
141  // lr: Caller's pc.
142 
143  // Sloppy mode functions and builtins need to replace the receiver with the
144  // global proxy when called as functions (without an explicit receiver
145  // object).
146  if (info_->this_has_uses() &&
147  info_->strict_mode() == SLOPPY &&
148  !info_->is_native()) {
149  Label ok;
150  int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
151  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
152  __ lw(a2, MemOperand(sp, receiver_offset));
153  __ Branch(&ok, ne, a2, Operand(at));
154 
155  __ lw(a2, GlobalObjectOperand());
157 
158  __ sw(a2, MemOperand(sp, receiver_offset));
159 
160  __ bind(&ok);
161  }
162  }
163 
164  info()->set_prologue_offset(masm_->pc_offset());
165  if (NeedsEagerFrame()) {
166  if (info()->IsStub()) {
167  __ StubPrologue();
168  } else {
169  __ Prologue(info()->IsCodePreAgingActive());
170  }
171  frame_is_built_ = true;
172  info_->AddNoFrameRange(0, masm_->pc_offset());
173  }
174 
175  // Reserve space for the stack slots needed by the code.
176  int slots = GetStackSlotCount();
177  if (slots > 0) {
178  if (FLAG_debug_code) {
179  __ Subu(sp, sp, Operand(slots * kPointerSize));
180  __ Push(a0, a1);
181  __ Addu(a0, sp, Operand(slots * kPointerSize));
182  __ li(a1, Operand(kSlotsZapValue));
183  Label loop;
184  __ bind(&loop);
185  __ Subu(a0, a0, Operand(kPointerSize));
186  __ sw(a1, MemOperand(a0, 2 * kPointerSize));
187  __ Branch(&loop, ne, a0, Operand(sp));
188  __ Pop(a0, a1);
189  } else {
190  __ Subu(sp, sp, Operand(slots * kPointerSize));
191  }
192  }
193 
194  if (info()->saves_caller_doubles()) {
196  }
197 
198  // Possibly allocate a local context.
199  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
200  if (heap_slots > 0) {
201  Comment(";;; Allocate local context");
202  bool need_write_barrier = true;
203  // Argument to NewContext is the function, which is in a1.
204  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
205  FastNewContextStub stub(isolate(), heap_slots);
206  __ CallStub(&stub);
207  // Result of FastNewContextStub is always in new space.
208  need_write_barrier = false;
209  } else {
210  __ push(a1);
211  __ CallRuntime(Runtime::kNewFunctionContext, 1);
212  }
213  RecordSafepoint(Safepoint::kNoLazyDeopt);
214  // Context is returned in both v0. It replaces the context passed to us.
215  // It's saved in the stack and kept live in cp.
216  __ mov(cp, v0);
218  // Copy any necessary parameters into the context.
219  int num_parameters = scope()->num_parameters();
220  for (int i = 0; i < num_parameters; i++) {
221  Variable* var = scope()->parameter(i);
222  if (var->IsContextSlot()) {
223  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
224  (num_parameters - 1 - i) * kPointerSize;
225  // Load parameter from stack.
226  __ lw(a0, MemOperand(fp, parameter_offset));
227  // Store it in the context.
228  MemOperand target = ContextOperand(cp, var->index());
229  __ sw(a0, target);
230  // Update the write barrier. This clobbers a3 and a0.
231  if (need_write_barrier) {
232  __ RecordWriteContextSlot(
233  cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
234  } else if (FLAG_debug_code) {
235  Label done;
236  __ JumpIfInNewSpace(cp, a0, &done);
237  __ Abort(kExpectedNewSpaceObject);
238  __ bind(&done);
239  }
240  }
241  }
242  Comment(";;; End allocate local context");
243  }
244 
245  // Trace the call.
246  if (FLAG_trace && info()->IsOptimizing()) {
247  // We have not executed any compiled code yet, so cp still holds the
248  // incoming context.
249  __ CallRuntime(Runtime::kTraceEnter, 0);
250  }
251  return !is_aborted();
252 }
253 
254 
256  // Generate the OSR entry prologue at the first unknown OSR value, or if there
257  // are none, at the OSR entrypoint instruction.
258  if (osr_pc_offset_ >= 0) return;
259 
260  osr_pc_offset_ = masm()->pc_offset();
261 
262  // Adjust the frame size, subsuming the unoptimized frame into the
263  // optimized frame.
264  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
265  DCHECK(slots >= 0);
266  __ Subu(sp, sp, Operand(slots * kPointerSize));
267 }
268 
269 
270 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
271  if (instr->IsCall()) {
273  }
274  if (!instr->IsLazyBailout() && !instr->IsGap()) {
275  safepoints_.BumpLastLazySafepointIndex();
276  }
277 }
278 
279 
281  DCHECK(is_generating());
282  if (deferred_.length() > 0) {
283  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
284  LDeferredCode* code = deferred_[i];
285 
286  HValue* value =
287  instructions_->at(code->instruction_index())->hydrogen_value();
289  chunk()->graph()->SourcePositionToScriptPosition(value->position()));
290 
291  Comment(";;; <@%d,#%d> "
292  "-------------------- Deferred %s --------------------",
293  code->instruction_index(),
294  code->instr()->hydrogen_value()->id(),
295  code->instr()->Mnemonic());
296  __ bind(code->entry());
297  if (NeedsDeferredFrame()) {
298  Comment(";;; Build frame");
300  DCHECK(info()->IsStub());
301  frame_is_built_ = true;
302  __ MultiPush(cp.bit() | fp.bit() | ra.bit());
303  __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
304  __ push(scratch0());
306  Comment(";;; Deferred code");
307  }
308  code->Generate();
309  if (NeedsDeferredFrame()) {
310  Comment(";;; Destroy frame");
312  __ pop(at);
313  __ MultiPop(cp.bit() | fp.bit() | ra.bit());
314  frame_is_built_ = false;
315  }
316  __ jmp(code->exit());
317  }
318  }
319  // Deferred code is the last part of the instruction sequence. Mark
320  // the generated code as done unless we bailed out.
321  if (!is_aborted()) status_ = DONE;
322  return !is_aborted();
323 }
324 
325 
327  if (jump_table_.length() > 0) {
328  Label needs_frame, call_deopt_entry;
329 
330  Comment(";;; -------------------- Jump table --------------------");
331  Address base = jump_table_[0].address;
332 
333  Register entry_offset = t9;
334 
335  int length = jump_table_.length();
336  for (int i = 0; i < length; i++) {
337  Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
338  __ bind(&table_entry->label);
339 
340  DCHECK(table_entry->bailout_type == jump_table_[0].bailout_type);
341  Address entry = table_entry->address;
342  DeoptComment(table_entry->reason);
343 
344  // Second-level deopt table entries are contiguous and small, so instead
345  // of loading the full, absolute address of each one, load an immediate
346  // offset which will be added to the base address later.
347  __ li(entry_offset, Operand(entry - base));
348 
349  if (table_entry->needs_frame) {
350  DCHECK(!info()->saves_caller_doubles());
351  if (needs_frame.is_bound()) {
352  __ Branch(&needs_frame);
353  } else {
354  __ bind(&needs_frame);
355  Comment(";;; call deopt with frame");
356  __ MultiPush(cp.bit() | fp.bit() | ra.bit());
357  // This variant of deopt can only be used with stubs. Since we don't
358  // have a function pointer to install in the stack frame that we're
359  // building, install a special marker there instead.
360  DCHECK(info()->IsStub());
361  __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
362  __ push(at);
363  __ Addu(fp, sp,
365  __ bind(&call_deopt_entry);
366  // Add the base address to the offset previously loaded in
367  // entry_offset.
368  __ Addu(entry_offset, entry_offset,
369  Operand(ExternalReference::ForDeoptEntry(base)));
370  __ Call(entry_offset);
371  }
372  } else {
373  // The last entry can fall through into `call_deopt_entry`, avoiding a
374  // branch.
375  bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
376 
377  if (need_branch) __ Branch(&call_deopt_entry);
378  }
379  }
380 
381  if (!call_deopt_entry.is_bound()) {
382  Comment(";;; call deopt");
383  __ bind(&call_deopt_entry);
384 
385  if (info()->saves_caller_doubles()) {
386  DCHECK(info()->IsStub());
388  }
389 
390  // Add the base address to the offset previously loaded in entry_offset.
391  __ Addu(entry_offset, entry_offset,
392  Operand(ExternalReference::ForDeoptEntry(base)));
393  __ Call(entry_offset);
394  }
395  }
396  __ RecordComment("]");
397 
398  // The deoptimization jump table is the last part of the instruction
399  // sequence. Mark the generated code as done unless we bailed out.
400  if (!is_aborted()) status_ = DONE;
401  return !is_aborted();
402 }
403 
404 
406  DCHECK(is_done());
407  safepoints_.Emit(masm(), GetStackSlotCount());
408  return !is_aborted();
409 }
410 
411 
412 Register LCodeGen::ToRegister(int index) const {
413  return Register::FromAllocationIndex(index);
414 }
415 
416 
419 }
420 
421 
422 Register LCodeGen::ToRegister(LOperand* op) const {
423  DCHECK(op->IsRegister());
424  return ToRegister(op->index());
425 }
426 
427 
428 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
429  if (op->IsRegister()) {
430  return ToRegister(op->index());
431  } else if (op->IsConstantOperand()) {
432  LConstantOperand* const_op = LConstantOperand::cast(op);
433  HConstant* constant = chunk_->LookupConstant(const_op);
434  Handle<Object> literal = constant->handle(isolate());
435  Representation r = chunk_->LookupLiteralRepresentation(const_op);
436  if (r.IsInteger32()) {
437  DCHECK(literal->IsNumber());
438  __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
439  } else if (r.IsSmi()) {
440  DCHECK(constant->HasSmiValue());
441  __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
442  } else if (r.IsDouble()) {
443  Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
444  } else {
445  DCHECK(r.IsSmiOrTagged());
446  __ li(scratch, literal);
447  }
448  return scratch;
449  } else if (op->IsStackSlot()) {
450  __ lw(scratch, ToMemOperand(op));
451  return scratch;
452  }
453  UNREACHABLE();
454  return scratch;
455 }
456 
457 
458 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
459  DCHECK(op->IsDoubleRegister());
460  return ToDoubleRegister(op->index());
461 }
462 
463 
465  FloatRegister flt_scratch,
466  DoubleRegister dbl_scratch) {
467  if (op->IsDoubleRegister()) {
468  return ToDoubleRegister(op->index());
469  } else if (op->IsConstantOperand()) {
470  LConstantOperand* const_op = LConstantOperand::cast(op);
471  HConstant* constant = chunk_->LookupConstant(const_op);
472  Handle<Object> literal = constant->handle(isolate());
473  Representation r = chunk_->LookupLiteralRepresentation(const_op);
474  if (r.IsInteger32()) {
475  DCHECK(literal->IsNumber());
476  __ li(at, Operand(static_cast<int32_t>(literal->Number())));
477  __ mtc1(at, flt_scratch);
478  __ cvt_d_w(dbl_scratch, flt_scratch);
479  return dbl_scratch;
480  } else if (r.IsDouble()) {
481  Abort(kUnsupportedDoubleImmediate);
482  } else if (r.IsTagged()) {
483  Abort(kUnsupportedTaggedImmediate);
484  }
485  } else if (op->IsStackSlot()) {
486  MemOperand mem_op = ToMemOperand(op);
487  __ ldc1(dbl_scratch, mem_op);
488  return dbl_scratch;
489  }
490  UNREACHABLE();
491  return dbl_scratch;
492 }
493 
494 
495 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
496  HConstant* constant = chunk_->LookupConstant(op);
497  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
498  return constant->handle(isolate());
499 }
500 
501 
502 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
503  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
504 }
505 
506 
507 bool LCodeGen::IsSmi(LConstantOperand* op) const {
508  return chunk_->LookupLiteralRepresentation(op).IsSmi();
509 }
510 
511 
512 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
514 }
515 
516 
517 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
518  const Representation& r) const {
519  HConstant* constant = chunk_->LookupConstant(op);
520  int32_t value = constant->Integer32Value();
521  if (r.IsInteger32()) return value;
522  DCHECK(r.IsSmiOrTagged());
523  return reinterpret_cast<int32_t>(Smi::FromInt(value));
524 }
525 
526 
527 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
528  HConstant* constant = chunk_->LookupConstant(op);
529  return Smi::FromInt(constant->Integer32Value());
530 }
531 
532 
533 double LCodeGen::ToDouble(LConstantOperand* op) const {
534  HConstant* constant = chunk_->LookupConstant(op);
535  DCHECK(constant->HasDoubleValue());
536  return constant->DoubleValue();
537 }
538 
539 
540 Operand LCodeGen::ToOperand(LOperand* op) {
541  if (op->IsConstantOperand()) {
542  LConstantOperand* const_op = LConstantOperand::cast(op);
543  HConstant* constant = chunk()->LookupConstant(const_op);
544  Representation r = chunk_->LookupLiteralRepresentation(const_op);
545  if (r.IsSmi()) {
546  DCHECK(constant->HasSmiValue());
547  return Operand(Smi::FromInt(constant->Integer32Value()));
548  } else if (r.IsInteger32()) {
549  DCHECK(constant->HasInteger32Value());
550  return Operand(constant->Integer32Value());
551  } else if (r.IsDouble()) {
552  Abort(kToOperandUnsupportedDoubleImmediate);
553  }
554  DCHECK(r.IsTagged());
555  return Operand(constant->handle(isolate()));
556  } else if (op->IsRegister()) {
557  return Operand(ToRegister(op));
558  } else if (op->IsDoubleRegister()) {
559  Abort(kToOperandIsDoubleRegisterUnimplemented);
560  return Operand(0);
561  }
562  // Stack slots not implemented, use ToMemOperand instead.
563  UNREACHABLE();
564  return Operand(0);
565 }
566 
567 
568 static int ArgumentsOffsetWithoutFrame(int index) {
569  DCHECK(index < 0);
570  return -(index + 1) * kPointerSize;
571 }
572 
573 
574 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
575  DCHECK(!op->IsRegister());
576  DCHECK(!op->IsDoubleRegister());
577  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
578  if (NeedsEagerFrame()) {
579  return MemOperand(fp, StackSlotOffset(op->index()));
580  } else {
581  // Retrieve parameter without eager stack-frame relative to the
582  // stack-pointer.
583  return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
584  }
585 }
586 
587 
588 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
589  DCHECK(op->IsDoubleStackSlot());
590  if (NeedsEagerFrame()) {
591  return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
592  } else {
593  // Retrieve parameter without eager stack-frame relative to the
594  // stack-pointer.
595  return MemOperand(
597  }
598 }
599 
600 
601 void LCodeGen::WriteTranslation(LEnvironment* environment,
602  Translation* translation) {
603  if (environment == NULL) return;
604 
605  // The translation includes one command per value in the environment.
606  int translation_size = environment->translation_size();
607  // The output frame height does not include the parameters.
608  int height = translation_size - environment->parameter_count();
609 
610  WriteTranslation(environment->outer(), translation);
611  bool has_closure_id = !info()->closure().is_null() &&
612  !info()->closure().is_identical_to(environment->closure());
613  int closure_id = has_closure_id
614  ? DefineDeoptimizationLiteral(environment->closure())
615  : Translation::kSelfLiteralId;
616 
617  switch (environment->frame_type()) {
618  case JS_FUNCTION:
619  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
620  break;
621  case JS_CONSTRUCT:
622  translation->BeginConstructStubFrame(closure_id, translation_size);
623  break;
624  case JS_GETTER:
625  DCHECK(translation_size == 1);
626  DCHECK(height == 0);
627  translation->BeginGetterStubFrame(closure_id);
628  break;
629  case JS_SETTER:
630  DCHECK(translation_size == 2);
631  DCHECK(height == 0);
632  translation->BeginSetterStubFrame(closure_id);
633  break;
634  case STUB:
635  translation->BeginCompiledStubFrame();
636  break;
637  case ARGUMENTS_ADAPTOR:
638  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
639  break;
640  }
641 
642  int object_index = 0;
643  int dematerialized_index = 0;
644  for (int i = 0; i < translation_size; ++i) {
645  LOperand* value = environment->values()->at(i);
646  AddToTranslation(environment,
647  translation,
648  value,
649  environment->HasTaggedValueAt(i),
650  environment->HasUint32ValueAt(i),
651  &object_index,
652  &dematerialized_index);
653  }
654 }
655 
656 
657 void LCodeGen::AddToTranslation(LEnvironment* environment,
658  Translation* translation,
659  LOperand* op,
660  bool is_tagged,
661  bool is_uint32,
662  int* object_index_pointer,
663  int* dematerialized_index_pointer) {
664  if (op == LEnvironment::materialization_marker()) {
665  int object_index = (*object_index_pointer)++;
666  if (environment->ObjectIsDuplicateAt(object_index)) {
667  int dupe_of = environment->ObjectDuplicateOfAt(object_index);
668  translation->DuplicateObject(dupe_of);
669  return;
670  }
671  int object_length = environment->ObjectLengthAt(object_index);
672  if (environment->ObjectIsArgumentsAt(object_index)) {
673  translation->BeginArgumentsObject(object_length);
674  } else {
675  translation->BeginCapturedObject(object_length);
676  }
677  int dematerialized_index = *dematerialized_index_pointer;
678  int env_offset = environment->translation_size() + dematerialized_index;
679  *dematerialized_index_pointer += object_length;
680  for (int i = 0; i < object_length; ++i) {
681  LOperand* value = environment->values()->at(env_offset + i);
682  AddToTranslation(environment,
683  translation,
684  value,
685  environment->HasTaggedValueAt(env_offset + i),
686  environment->HasUint32ValueAt(env_offset + i),
687  object_index_pointer,
688  dematerialized_index_pointer);
689  }
690  return;
691  }
692 
693  if (op->IsStackSlot()) {
694  if (is_tagged) {
695  translation->StoreStackSlot(op->index());
696  } else if (is_uint32) {
697  translation->StoreUint32StackSlot(op->index());
698  } else {
699  translation->StoreInt32StackSlot(op->index());
700  }
701  } else if (op->IsDoubleStackSlot()) {
702  translation->StoreDoubleStackSlot(op->index());
703  } else if (op->IsRegister()) {
704  Register reg = ToRegister(op);
705  if (is_tagged) {
706  translation->StoreRegister(reg);
707  } else if (is_uint32) {
708  translation->StoreUint32Register(reg);
709  } else {
710  translation->StoreInt32Register(reg);
711  }
712  } else if (op->IsDoubleRegister()) {
714  translation->StoreDoubleRegister(reg);
715  } else if (op->IsConstantOperand()) {
716  HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
717  int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
718  translation->StoreLiteral(src_index);
719  } else {
720  UNREACHABLE();
721  }
722 }
723 
724 
725 void LCodeGen::CallCode(Handle<Code> code,
727  LInstruction* instr) {
729 }
730 
731 
732 void LCodeGen::CallCodeGeneric(Handle<Code> code,
734  LInstruction* instr,
735  SafepointMode safepoint_mode) {
736  DCHECK(instr != NULL);
737  __ Call(code, mode);
738  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
739 }
740 
741 
742 void LCodeGen::CallRuntime(const Runtime::Function* function,
743  int num_arguments,
744  LInstruction* instr,
745  SaveFPRegsMode save_doubles) {
746  DCHECK(instr != NULL);
747 
748  __ CallRuntime(function, num_arguments, save_doubles);
749 
751 }
752 
753 
754 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
755  if (context->IsRegister()) {
756  __ Move(cp, ToRegister(context));
757  } else if (context->IsStackSlot()) {
758  __ lw(cp, ToMemOperand(context));
759  } else if (context->IsConstantOperand()) {
760  HConstant* constant =
761  chunk_->LookupConstant(LConstantOperand::cast(context));
762  __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
763  } else {
764  UNREACHABLE();
765  }
766 }
767 
768 
770  int argc,
771  LInstruction* instr,
772  LOperand* context) {
773  LoadContextFromDeferred(context);
774  __ CallRuntimeSaveDoubles(id);
776  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
777 }
778 
779 
780 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
781  Safepoint::DeoptMode mode) {
782  environment->set_has_been_used();
783  if (!environment->HasBeenRegistered()) {
784  // Physical stack frame layout:
785  // -x ............. -4 0 ..................................... y
786  // [incoming arguments] [spill slots] [pushed outgoing arguments]
787 
788  // Layout of the environment:
789  // 0 ..................................................... size-1
790  // [parameters] [locals] [expression stack including arguments]
791 
792  // Layout of the translation:
793  // 0 ........................................................ size - 1 + 4
794  // [expression stack including arguments] [locals] [4 words] [parameters]
795  // |>------------ translation_size ------------<|
796 
797  int frame_count = 0;
798  int jsframe_count = 0;
799  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
800  ++frame_count;
801  if (e->frame_type() == JS_FUNCTION) {
802  ++jsframe_count;
803  }
804  }
805  Translation translation(&translations_, frame_count, jsframe_count, zone());
806  WriteTranslation(environment, &translation);
807  int deoptimization_index = deoptimizations_.length();
808  int pc_offset = masm()->pc_offset();
809  environment->Register(deoptimization_index,
810  translation.index(),
811  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
812  deoptimizations_.Add(environment, zone());
813  }
814 }
815 
816 
818  Deoptimizer::BailoutType bailout_type,
819  const char* detail, Register src1,
820  const Operand& src2) {
821  LEnvironment* environment = instr->environment();
822  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
823  DCHECK(environment->HasBeenRegistered());
824  int id = environment->deoptimization_index();
825  DCHECK(info()->IsOptimizing() || info()->IsStub());
826  Address entry =
827  Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
828  if (entry == NULL) {
829  Abort(kBailoutWasNotPrepared);
830  return;
831  }
832 
833  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
834  Register scratch = scratch0();
835  ExternalReference count = ExternalReference::stress_deopt_count(isolate());
836  Label no_deopt;
837  __ Push(a1, scratch);
838  __ li(scratch, Operand(count));
839  __ lw(a1, MemOperand(scratch));
840  __ Subu(a1, a1, Operand(1));
841  __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
842  __ li(a1, Operand(FLAG_deopt_every_n_times));
843  __ sw(a1, MemOperand(scratch));
844  __ Pop(a1, scratch);
845 
846  __ Call(entry, RelocInfo::RUNTIME_ENTRY);
847  __ bind(&no_deopt);
848  __ sw(a1, MemOperand(scratch));
849  __ Pop(a1, scratch);
850  }
851 
852  if (info()->ShouldTrapOnDeopt()) {
853  Label skip;
854  if (condition != al) {
855  __ Branch(&skip, NegateCondition(condition), src1, src2);
856  }
857  __ stop("trap_on_deopt");
858  __ bind(&skip);
859  }
860 
861  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
862  instr->Mnemonic(), detail);
863  DCHECK(info()->IsStub() || frame_is_built_);
864  // Go through jump table if we need to handle condition, build frame, or
865  // restore caller doubles.
866  if (condition == al && frame_is_built_ &&
867  !info()->saves_caller_doubles()) {
868  DeoptComment(reason);
869  __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
870  } else {
871  Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
872  !frame_is_built_);
873  // We often have several deopts to the same entry, reuse the last
874  // jump entry if this is the case.
875  if (jump_table_.is_empty() ||
876  !table_entry.IsEquivalentTo(jump_table_.last())) {
877  jump_table_.Add(table_entry, zone());
878  }
879  __ Branch(&jump_table_.last().label, condition, src1, src2);
880  }
881 }
882 
883 
885  const char* detail, Register src1,
886  const Operand& src2) {
887  Deoptimizer::BailoutType bailout_type = info()->IsStub()
890  DeoptimizeIf(condition, instr, bailout_type, detail, src1, src2);
891 }
892 
893 
895  int length = deoptimizations_.length();
896  if (length == 0) return;
898  DeoptimizationInputData::New(isolate(), length, TENURED);
899 
900  Handle<ByteArray> translations =
901  translations_.CreateByteArray(isolate()->factory());
902  data->SetTranslationByteArray(*translations);
903  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
904  data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
905  if (info_->IsOptimizing()) {
906  // Reference to shared function info does not change between phases.
907  AllowDeferredHandleDereference allow_handle_dereference;
908  data->SetSharedFunctionInfo(*info_->shared_info());
909  } else {
910  data->SetSharedFunctionInfo(Smi::FromInt(0));
911  }
912 
913  Handle<FixedArray> literals =
914  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
915  { AllowDeferredHandleDereference copy_handles;
916  for (int i = 0; i < deoptimization_literals_.length(); i++) {
918  }
919  data->SetLiteralArray(*literals);
920  }
921 
922  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
923  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
924 
925  // Populate the deoptimization entries.
926  for (int i = 0; i < length; i++) {
928  data->SetAstId(i, env->ast_id());
929  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
930  data->SetArgumentsStackHeight(i,
931  Smi::FromInt(env->arguments_stack_height()));
932  data->SetPc(i, Smi::FromInt(env->pc_offset()));
933  }
934  code->set_deoptimization_data(*data);
935 }
936 
937 
938 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
939  int result = deoptimization_literals_.length();
940  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
941  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
942  }
943  deoptimization_literals_.Add(literal, zone());
944  return result;
945 }
946 
947 
949  DCHECK(deoptimization_literals_.length() == 0);
950 
951  const ZoneList<Handle<JSFunction> >* inlined_closures =
952  chunk()->inlined_closures();
953 
954  for (int i = 0, length = inlined_closures->length();
955  i < length;
956  i++) {
957  DefineDeoptimizationLiteral(inlined_closures->at(i));
958  }
959 
961 }
962 
963 
965  LInstruction* instr, SafepointMode safepoint_mode) {
966  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
967  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
968  } else {
971  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
972  }
973 }
974 
975 
977  LPointerMap* pointers,
978  Safepoint::Kind kind,
979  int arguments,
980  Safepoint::DeoptMode deopt_mode) {
982 
983  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
984  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
985  kind, arguments, deopt_mode);
986  for (int i = 0; i < operands->length(); i++) {
987  LOperand* pointer = operands->at(i);
988  if (pointer->IsStackSlot()) {
989  safepoint.DefinePointerSlot(pointer->index(), zone());
990  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
991  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
992  }
993  }
994 }
995 
996 
997 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
998  Safepoint::DeoptMode deopt_mode) {
999  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
1000 }
1001 
1002 
1003 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
1004  LPointerMap empty_pointers(zone());
1005  RecordSafepoint(&empty_pointers, deopt_mode);
1006 }
1007 
1008 
1009 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1010  int arguments,
1011  Safepoint::DeoptMode deopt_mode) {
1013  pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
1014 }
1015 
1016 
1017 void LCodeGen::RecordAndWritePosition(int position) {
1018  if (position == RelocInfo::kNoPosition) return;
1019  masm()->positions_recorder()->RecordPosition(position);
1020  masm()->positions_recorder()->WriteRecordedPositions();
1021 }
1022 
1023 
1024 static const char* LabelType(LLabel* label) {
1025  if (label->is_loop_header()) return " (loop header)";
1026  if (label->is_osr_entry()) return " (OSR entry)";
1027  return "";
1028 }
1029 
1030 
1031 void LCodeGen::DoLabel(LLabel* label) {
1032  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1033  current_instruction_,
1034  label->hydrogen_value()->id(),
1035  label->block_id(),
1036  LabelType(label));
1037  __ bind(label->label());
1038  current_block_ = label->block_id();
1039  DoGap(label);
1040 }
1041 
1042 
1043 void LCodeGen::DoParallelMove(LParallelMove* move) {
1044  resolver_.Resolve(move);
1045 }
1046 
1047 
1048 void LCodeGen::DoGap(LGap* gap) {
1049  for (int i = LGap::FIRST_INNER_POSITION;
1051  i++) {
1052  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1053  LParallelMove* move = gap->GetParallelMove(inner_pos);
1054  if (move != NULL) DoParallelMove(move);
1055  }
1056 }
1057 
1058 
1059 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1060  DoGap(instr);
1061 }
1062 
1063 
1064 void LCodeGen::DoParameter(LParameter* instr) {
1065  // Nothing to do.
1066 }
1067 
1068 
1069 void LCodeGen::DoCallStub(LCallStub* instr) {
1070  DCHECK(ToRegister(instr->context()).is(cp));
1071  DCHECK(ToRegister(instr->result()).is(v0));
1072  switch (instr->hydrogen()->major_key()) {
1073  case CodeStub::RegExpExec: {
1074  RegExpExecStub stub(isolate());
1075  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1076  break;
1077  }
1078  case CodeStub::SubString: {
1079  SubStringStub stub(isolate());
1080  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1081  break;
1082  }
1083  case CodeStub::StringCompare: {
1084  StringCompareStub stub(isolate());
1085  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1086  break;
1087  }
1088  default:
1089  UNREACHABLE();
1090  }
1091 }
1092 
1093 
1094 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1096 }
1097 
1098 
1099 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1100  Register dividend = ToRegister(instr->dividend());
1101  int32_t divisor = instr->divisor();
1102  DCHECK(dividend.is(ToRegister(instr->result())));
1103 
1104  // Theoretically, a variation of the branch-free code for integer division by
1105  // a power of 2 (calculating the remainder via an additional multiplication
1106  // (which gets simplified to an 'and') and subtraction) should be faster, and
1107  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1108  // indicate that positive dividends are heavily favored, so the branching
1109  // version performs better.
1110  HMod* hmod = instr->hydrogen();
1111  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1112  Label dividend_is_not_negative, done;
1113 
1114  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1115  __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
1116  // Note: The code below even works when right contains kMinInt.
1117  __ subu(dividend, zero_reg, dividend);
1118  __ And(dividend, dividend, Operand(mask));
1119  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1120  DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
1121  }
1122  __ Branch(USE_DELAY_SLOT, &done);
1123  __ subu(dividend, zero_reg, dividend);
1124  }
1125 
1126  __ bind(&dividend_is_not_negative);
1127  __ And(dividend, dividend, Operand(mask));
1128  __ bind(&done);
1129 }
1130 
1131 
1132 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1133  Register dividend = ToRegister(instr->dividend());
1134  int32_t divisor = instr->divisor();
1135  Register result = ToRegister(instr->result());
1136  DCHECK(!dividend.is(result));
1137 
1138  if (divisor == 0) {
1139  DeoptimizeIf(al, instr);
1140  return;
1141  }
1142 
1143  __ TruncatingDiv(result, dividend, Abs(divisor));
1144  __ Mul(result, result, Operand(Abs(divisor)));
1145  __ Subu(result, dividend, Operand(result));
1146 
1147  // Check for negative zero.
1148  HMod* hmod = instr->hydrogen();
1149  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1150  Label remainder_not_zero;
1151  __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
1152  DeoptimizeIf(lt, instr, "minus zero", dividend, Operand(zero_reg));
1153  __ bind(&remainder_not_zero);
1154  }
1155 }
1156 
1157 
1158 void LCodeGen::DoModI(LModI* instr) {
1159  HMod* hmod = instr->hydrogen();
1160  const Register left_reg = ToRegister(instr->left());
1161  const Register right_reg = ToRegister(instr->right());
1162  const Register result_reg = ToRegister(instr->result());
1163 
1164  // div runs in the background while we check for special cases.
1165  __ Mod(result_reg, left_reg, right_reg);
1166 
1167  Label done;
1168  // Check for x % 0, we have to deopt in this case because we can't return a
1169  // NaN.
1170  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1171  DeoptimizeIf(eq, instr, "division by zero", right_reg, Operand(zero_reg));
1172  }
1173 
1174  // Check for kMinInt % -1, div will return kMinInt, which is not what we
1175  // want. We have to deopt if we care about -0, because we can't return that.
1176  if (hmod->CheckFlag(HValue::kCanOverflow)) {
1177  Label no_overflow_possible;
1178  __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
1179  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1180  DeoptimizeIf(eq, instr, "minus zero", right_reg, Operand(-1));
1181  } else {
1182  __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
1183  __ Branch(USE_DELAY_SLOT, &done);
1184  __ mov(result_reg, zero_reg);
1185  }
1186  __ bind(&no_overflow_possible);
1187  }
1188 
1189  // If we care about -0, test if the dividend is <0 and the result is 0.
1190  __ Branch(&done, ge, left_reg, Operand(zero_reg));
1191  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1192  DeoptimizeIf(eq, instr, "minus zero", result_reg, Operand(zero_reg));
1193  }
1194  __ bind(&done);
1195 }
1196 
1197 
1198 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1199  Register dividend = ToRegister(instr->dividend());
1200  int32_t divisor = instr->divisor();
1201  Register result = ToRegister(instr->result());
1202  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1203  DCHECK(!result.is(dividend));
1204 
1205  // Check for (0 / -x) that will produce negative zero.
1206  HDiv* hdiv = instr->hydrogen();
1207  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1208  DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
1209  }
1210  // Check for (kMinInt / -1).
1211  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1212  DeoptimizeIf(eq, instr, "overflow", dividend, Operand(kMinInt));
1213  }
1214  // Deoptimize if remainder will not be 0.
1215  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1216  divisor != 1 && divisor != -1) {
1217  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1218  __ And(at, dividend, Operand(mask));
1219  DeoptimizeIf(ne, instr, "lost precision", at, Operand(zero_reg));
1220  }
1221 
1222  if (divisor == -1) { // Nice shortcut, not needed for correctness.
1223  __ Subu(result, zero_reg, dividend);
1224  return;
1225  }
1226  uint16_t shift = WhichPowerOf2Abs(divisor);
1227  if (shift == 0) {
1228  __ Move(result, dividend);
1229  } else if (shift == 1) {
1230  __ srl(result, dividend, 31);
1231  __ Addu(result, dividend, Operand(result));
1232  } else {
1233  __ sra(result, dividend, 31);
1234  __ srl(result, result, 32 - shift);
1235  __ Addu(result, dividend, Operand(result));
1236  }
1237  if (shift > 0) __ sra(result, result, shift);
1238  if (divisor < 0) __ Subu(result, zero_reg, result);
1239 }
1240 
1241 
1242 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1243  Register dividend = ToRegister(instr->dividend());
1244  int32_t divisor = instr->divisor();
1245  Register result = ToRegister(instr->result());
1246  DCHECK(!dividend.is(result));
1247 
1248  if (divisor == 0) {
1249  DeoptimizeIf(al, instr);
1250  return;
1251  }
1252 
1253  // Check for (0 / -x) that will produce negative zero.
1254  HDiv* hdiv = instr->hydrogen();
1255  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1256  DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
1257  }
1258 
1259  __ TruncatingDiv(result, dividend, Abs(divisor));
1260  if (divisor < 0) __ Subu(result, zero_reg, result);
1261 
1262  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1263  __ Mul(scratch0(), result, Operand(divisor));
1264  __ Subu(scratch0(), scratch0(), dividend);
1265  DeoptimizeIf(ne, instr, "lost precision", scratch0(), Operand(zero_reg));
1266  }
1267 }
1268 
1269 
1270 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1271 void LCodeGen::DoDivI(LDivI* instr) {
1272  HBinaryOperation* hdiv = instr->hydrogen();
1273  Register dividend = ToRegister(instr->dividend());
1274  Register divisor = ToRegister(instr->divisor());
1275  const Register result = ToRegister(instr->result());
1276  Register remainder = ToRegister(instr->temp());
1277 
1278  // On MIPS div is asynchronous - it will run in the background while we
1279  // check for special cases.
1280  __ Div(remainder, result, dividend, divisor);
1281 
1282  // Check for x / 0.
1283  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1284  DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg));
1285  }
1286 
1287  // Check for (0 / -x) that will produce negative zero.
1288  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1289  Label left_not_zero;
1290  __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1291  DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg));
1292  __ bind(&left_not_zero);
1293  }
1294 
1295  // Check for (kMinInt / -1).
1296  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1297  !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1298  Label left_not_min_int;
1299  __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1300  DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1));
1301  __ bind(&left_not_min_int);
1302  }
1303 
1304  if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1305  DeoptimizeIf(ne, instr, "lost precision", remainder, Operand(zero_reg));
1306  }
1307 }
1308 
1309 
1310 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1311  DoubleRegister addend = ToDoubleRegister(instr->addend());
1312  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1313  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1314 
1315  // This is computed in-place.
1316  DCHECK(addend.is(ToDoubleRegister(instr->result())));
1317 
1318  __ madd_d(addend, addend, multiplier, multiplicand);
1319 }
1320 
1321 
1322 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1323  Register dividend = ToRegister(instr->dividend());
1324  Register result = ToRegister(instr->result());
1325  int32_t divisor = instr->divisor();
1326  Register scratch = result.is(dividend) ? scratch0() : dividend;
1327  DCHECK(!result.is(dividend) || !scratch.is(dividend));
1328 
1329  // If the divisor is 1, return the dividend.
1330  if (divisor == 1) {
1331  __ Move(result, dividend);
1332  return;
1333  }
1334 
1335  // If the divisor is positive, things are easy: There can be no deopts and we
1336  // can simply do an arithmetic right shift.
1337  uint16_t shift = WhichPowerOf2Abs(divisor);
1338  if (divisor > 1) {
1339  __ sra(result, dividend, shift);
1340  return;
1341  }
1342 
1343  // If the divisor is negative, we have to negate and handle edge cases.
1344 
1345  // dividend can be the same register as result so save the value of it
1346  // for checking overflow.
1347  __ Move(scratch, dividend);
1348 
1349  __ Subu(result, zero_reg, dividend);
1350  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1351  DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg));
1352  }
1353 
1354  // Dividing by -1 is basically negation, unless we overflow.
1355  __ Xor(scratch, scratch, result);
1356  if (divisor == -1) {
1357  if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1358  DeoptimizeIf(ge, instr, "overflow", scratch, Operand(zero_reg));
1359  }
1360  return;
1361  }
1362 
1363  // If the negation could not overflow, simply shifting is OK.
1364  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1365  __ sra(result, result, shift);
1366  return;
1367  }
1368 
1369  Label no_overflow, done;
1370  __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
1371  __ li(result, Operand(kMinInt / divisor));
1372  __ Branch(&done);
1373  __ bind(&no_overflow);
1374  __ sra(result, result, shift);
1375  __ bind(&done);
1376 }
1377 
1378 
1379 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1380  Register dividend = ToRegister(instr->dividend());
1381  int32_t divisor = instr->divisor();
1382  Register result = ToRegister(instr->result());
1383  DCHECK(!dividend.is(result));
1384 
1385  if (divisor == 0) {
1386  DeoptimizeIf(al, instr);
1387  return;
1388  }
1389 
1390  // Check for (0 / -x) that will produce negative zero.
1391  HMathFloorOfDiv* hdiv = instr->hydrogen();
1392  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1393  DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
1394  }
1395 
1396  // Easy case: We need no dynamic check for the dividend and the flooring
1397  // division is the same as the truncating division.
1398  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1399  (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1400  __ TruncatingDiv(result, dividend, Abs(divisor));
1401  if (divisor < 0) __ Subu(result, zero_reg, result);
1402  return;
1403  }
1404 
1405  // In the general case we may need to adjust before and after the truncating
1406  // division to get a flooring division.
1407  Register temp = ToRegister(instr->temp());
1408  DCHECK(!temp.is(dividend) && !temp.is(result));
1409  Label needs_adjustment, done;
1410  __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
1411  dividend, Operand(zero_reg));
1412  __ TruncatingDiv(result, dividend, Abs(divisor));
1413  if (divisor < 0) __ Subu(result, zero_reg, result);
1414  __ jmp(&done);
1415  __ bind(&needs_adjustment);
1416  __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1417  __ TruncatingDiv(result, temp, Abs(divisor));
1418  if (divisor < 0) __ Subu(result, zero_reg, result);
1419  __ Subu(result, result, Operand(1));
1420  __ bind(&done);
1421 }
1422 
1423 
1424 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1425 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1426  HBinaryOperation* hdiv = instr->hydrogen();
1427  Register dividend = ToRegister(instr->dividend());
1428  Register divisor = ToRegister(instr->divisor());
1429  const Register result = ToRegister(instr->result());
1430  Register remainder = scratch0();
1431  // On MIPS div is asynchronous - it will run in the background while we
1432  // check for special cases.
1433  __ Div(remainder, result, dividend, divisor);
1434 
1435  // Check for x / 0.
1436  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1437  DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg));
1438  }
1439 
1440  // Check for (0 / -x) that will produce negative zero.
1441  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1442  Label left_not_zero;
1443  __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1444  DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg));
1445  __ bind(&left_not_zero);
1446  }
1447 
1448  // Check for (kMinInt / -1).
1449  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1450  !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1451  Label left_not_min_int;
1452  __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1453  DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1));
1454  __ bind(&left_not_min_int);
1455  }
1456 
1457  // We performed a truncating division. Correct the result if necessary.
1458  Label done;
1459  __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1460  __ Xor(remainder, remainder, Operand(divisor));
1461  __ Branch(&done, ge, remainder, Operand(zero_reg));
1462  __ Subu(result, result, Operand(1));
1463  __ bind(&done);
1464 }
1465 
1466 
1467 void LCodeGen::DoMulI(LMulI* instr) {
1468  Register scratch = scratch0();
1469  Register result = ToRegister(instr->result());
1470  // Note that result may alias left.
1471  Register left = ToRegister(instr->left());
1472  LOperand* right_op = instr->right();
1473 
1474  bool bailout_on_minus_zero =
1475  instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1476  bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1477 
1478  if (right_op->IsConstantOperand()) {
1479  int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1480 
1481  if (bailout_on_minus_zero && (constant < 0)) {
1482  // The case of a null constant will be handled separately.
1483  // If constant is negative and left is null, the result should be -0.
1484  DeoptimizeIf(eq, instr, "minus zero", left, Operand(zero_reg));
1485  }
1486 
1487  switch (constant) {
1488  case -1:
1489  if (overflow) {
1490  __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
1491  DeoptimizeIf(lt, instr, "overflow", scratch, Operand(zero_reg));
1492  } else {
1493  __ Subu(result, zero_reg, left);
1494  }
1495  break;
1496  case 0:
1497  if (bailout_on_minus_zero) {
1498  // If left is strictly negative and the constant is null, the
1499  // result is -0. Deoptimize if required, otherwise return 0.
1500  DeoptimizeIf(lt, instr, "minus zero", left, Operand(zero_reg));
1501  }
1502  __ mov(result, zero_reg);
1503  break;
1504  case 1:
1505  // Nothing to do.
1506  __ Move(result, left);
1507  break;
1508  default:
1509  // Multiplying by powers of two and powers of two plus or minus
1510  // one can be done faster with shifted operands.
1511  // For other constants we emit standard code.
1512  int32_t mask = constant >> 31;
1513  uint32_t constant_abs = (constant + mask) ^ mask;
1514 
1515  if (base::bits::IsPowerOfTwo32(constant_abs)) {
1516  int32_t shift = WhichPowerOf2(constant_abs);
1517  __ sll(result, left, shift);
1518  // Correct the sign of the result if the constant is negative.
1519  if (constant < 0) __ Subu(result, zero_reg, result);
1520  } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1521  int32_t shift = WhichPowerOf2(constant_abs - 1);
1522  __ sll(scratch, left, shift);
1523  __ Addu(result, scratch, left);
1524  // Correct the sign of the result if the constant is negative.
1525  if (constant < 0) __ Subu(result, zero_reg, result);
1526  } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1527  int32_t shift = WhichPowerOf2(constant_abs + 1);
1528  __ sll(scratch, left, shift);
1529  __ Subu(result, scratch, left);
1530  // Correct the sign of the result if the constant is negative.
1531  if (constant < 0) __ Subu(result, zero_reg, result);
1532  } else {
1533  // Generate standard code.
1534  __ li(at, constant);
1535  __ Mul(result, left, at);
1536  }
1537  }
1538 
1539  } else {
1540  DCHECK(right_op->IsRegister());
1541  Register right = ToRegister(right_op);
1542 
1543  if (overflow) {
1544  // hi:lo = left * right.
1545  if (instr->hydrogen()->representation().IsSmi()) {
1546  __ SmiUntag(result, left);
1547  __ Mul(scratch, result, result, right);
1548  } else {
1549  __ Mul(scratch, result, left, right);
1550  }
1551  __ sra(at, result, 31);
1552  DeoptimizeIf(ne, instr, "overflow", scratch, Operand(at));
1553  } else {
1554  if (instr->hydrogen()->representation().IsSmi()) {
1555  __ SmiUntag(result, left);
1556  __ Mul(result, result, right);
1557  } else {
1558  __ Mul(result, left, right);
1559  }
1560  }
1561 
1562  if (bailout_on_minus_zero) {
1563  Label done;
1564  __ Xor(at, left, right);
1565  __ Branch(&done, ge, at, Operand(zero_reg));
1566  // Bail out if the result is minus zero.
1567  DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg));
1568  __ bind(&done);
1569  }
1570  }
1571 }
1572 
1573 
1574 void LCodeGen::DoBitI(LBitI* instr) {
1575  LOperand* left_op = instr->left();
1576  LOperand* right_op = instr->right();
1577  DCHECK(left_op->IsRegister());
1578  Register left = ToRegister(left_op);
1579  Register result = ToRegister(instr->result());
1580  Operand right(no_reg);
1581 
1582  if (right_op->IsStackSlot()) {
1583  right = Operand(EmitLoadRegister(right_op, at));
1584  } else {
1585  DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1586  right = ToOperand(right_op);
1587  }
1588 
1589  switch (instr->op()) {
1590  case Token::BIT_AND:
1591  __ And(result, left, right);
1592  break;
1593  case Token::BIT_OR:
1594  __ Or(result, left, right);
1595  break;
1596  case Token::BIT_XOR:
1597  if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1598  __ Nor(result, zero_reg, left);
1599  } else {
1600  __ Xor(result, left, right);
1601  }
1602  break;
1603  default:
1604  UNREACHABLE();
1605  break;
1606  }
1607 }
1608 
1609 
1610 void LCodeGen::DoShiftI(LShiftI* instr) {
1611  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1612  // result may alias either of them.
1613  LOperand* right_op = instr->right();
1614  Register left = ToRegister(instr->left());
1615  Register result = ToRegister(instr->result());
1616  Register scratch = scratch0();
1617 
1618  if (right_op->IsRegister()) {
1619  // No need to mask the right operand on MIPS, it is built into the variable
1620  // shift instructions.
1621  switch (instr->op()) {
1622  case Token::ROR:
1623  __ Ror(result, left, Operand(ToRegister(right_op)));
1624  break;
1625  case Token::SAR:
1626  __ srav(result, left, ToRegister(right_op));
1627  break;
1628  case Token::SHR:
1629  __ srlv(result, left, ToRegister(right_op));
1630  if (instr->can_deopt()) {
1631  DeoptimizeIf(lt, instr, "negative value", result, Operand(zero_reg));
1632  }
1633  break;
1634  case Token::SHL:
1635  __ sllv(result, left, ToRegister(right_op));
1636  break;
1637  default:
1638  UNREACHABLE();
1639  break;
1640  }
1641  } else {
1642  // Mask the right_op operand.
1643  int value = ToInteger32(LConstantOperand::cast(right_op));
1644  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1645  switch (instr->op()) {
1646  case Token::ROR:
1647  if (shift_count != 0) {
1648  __ Ror(result, left, Operand(shift_count));
1649  } else {
1650  __ Move(result, left);
1651  }
1652  break;
1653  case Token::SAR:
1654  if (shift_count != 0) {
1655  __ sra(result, left, shift_count);
1656  } else {
1657  __ Move(result, left);
1658  }
1659  break;
1660  case Token::SHR:
1661  if (shift_count != 0) {
1662  __ srl(result, left, shift_count);
1663  } else {
1664  if (instr->can_deopt()) {
1665  __ And(at, left, Operand(0x80000000));
1666  DeoptimizeIf(ne, instr, "negative value", at, Operand(zero_reg));
1667  }
1668  __ Move(result, left);
1669  }
1670  break;
1671  case Token::SHL:
1672  if (shift_count != 0) {
1673  if (instr->hydrogen_value()->representation().IsSmi() &&
1674  instr->can_deopt()) {
1675  if (shift_count != 1) {
1676  __ sll(result, left, shift_count - 1);
1677  __ SmiTagCheckOverflow(result, result, scratch);
1678  } else {
1679  __ SmiTagCheckOverflow(result, left, scratch);
1680  }
1681  DeoptimizeIf(lt, instr, "overflow", scratch, Operand(zero_reg));
1682  } else {
1683  __ sll(result, left, shift_count);
1684  }
1685  } else {
1686  __ Move(result, left);
1687  }
1688  break;
1689  default:
1690  UNREACHABLE();
1691  break;
1692  }
1693  }
1694 }
1695 
1696 
1697 void LCodeGen::DoSubI(LSubI* instr) {
1698  LOperand* left = instr->left();
1699  LOperand* right = instr->right();
1700  LOperand* result = instr->result();
1701  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1702 
1703  if (!can_overflow) {
1704  if (right->IsStackSlot()) {
1705  Register right_reg = EmitLoadRegister(right, at);
1706  __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1707  } else {
1708  DCHECK(right->IsRegister() || right->IsConstantOperand());
1709  __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1710  }
1711  } else { // can_overflow.
1712  Register overflow = scratch0();
1713  Register scratch = scratch1();
1714  if (right->IsStackSlot() || right->IsConstantOperand()) {
1715  Register right_reg = EmitLoadRegister(right, scratch);
1716  __ SubuAndCheckForOverflow(ToRegister(result),
1717  ToRegister(left),
1718  right_reg,
1719  overflow); // Reg at also used as scratch.
1720  } else {
1721  DCHECK(right->IsRegister());
1722  // Due to overflow check macros not supporting constant operands,
1723  // handling the IsConstantOperand case was moved to prev if clause.
1724  __ SubuAndCheckForOverflow(ToRegister(result),
1725  ToRegister(left),
1726  ToRegister(right),
1727  overflow); // Reg at also used as scratch.
1728  }
1729  DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg));
1730  }
1731 }
1732 
1733 
1734 void LCodeGen::DoConstantI(LConstantI* instr) {
1735  __ li(ToRegister(instr->result()), Operand(instr->value()));
1736 }
1737 
1738 
1739 void LCodeGen::DoConstantS(LConstantS* instr) {
1740  __ li(ToRegister(instr->result()), Operand(instr->value()));
1741 }
1742 
1743 
1744 void LCodeGen::DoConstantD(LConstantD* instr) {
1745  DCHECK(instr->result()->IsDoubleRegister());
1746  DoubleRegister result = ToDoubleRegister(instr->result());
1747  double v = instr->value();
1748  __ Move(result, v);
1749 }
1750 
1751 
1752 void LCodeGen::DoConstantE(LConstantE* instr) {
1753  __ li(ToRegister(instr->result()), Operand(instr->value()));
1754 }
1755 
1756 
1757 void LCodeGen::DoConstantT(LConstantT* instr) {
1758  Handle<Object> object = instr->value(isolate());
1760  __ li(ToRegister(instr->result()), object);
1761 }
1762 
1763 
1764 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1765  Register result = ToRegister(instr->result());
1766  Register map = ToRegister(instr->value());
1767  __ EnumLength(result, map);
1768 }
1769 
1770 
1771 void LCodeGen::DoDateField(LDateField* instr) {
1772  Register object = ToRegister(instr->date());
1773  Register result = ToRegister(instr->result());
1774  Register scratch = ToRegister(instr->temp());
1775  Smi* index = instr->index();
1776  Label runtime, done;
1777  DCHECK(object.is(a0));
1778  DCHECK(result.is(v0));
1779  DCHECK(!scratch.is(scratch0()));
1780  DCHECK(!scratch.is(object));
1781 
1782  __ SmiTst(object, at);
1783  DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
1784  __ GetObjectType(object, scratch, scratch);
1785  DeoptimizeIf(ne, instr, "not a date object", scratch, Operand(JS_DATE_TYPE));
1786 
1787  if (index->value() == 0) {
1788  __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
1789  } else {
1790  if (index->value() < JSDate::kFirstUncachedField) {
1791  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1792  __ li(scratch, Operand(stamp));
1793  __ lw(scratch, MemOperand(scratch));
1795  __ Branch(&runtime, ne, scratch, Operand(scratch0()));
1796  __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
1797  kPointerSize * index->value()));
1798  __ jmp(&done);
1799  }
1800  __ bind(&runtime);
1801  __ PrepareCallCFunction(2, scratch);
1802  __ li(a1, Operand(index));
1803  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1804  __ bind(&done);
1805  }
1806 }
1807 
1808 
1810  LOperand* index,
1811  String::Encoding encoding) {
1812  if (index->IsConstantOperand()) {
1813  int offset = ToInteger32(LConstantOperand::cast(index));
1814  if (encoding == String::TWO_BYTE_ENCODING) {
1815  offset *= kUC16Size;
1816  }
1817  STATIC_ASSERT(kCharSize == 1);
1818  return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1819  }
1820  Register scratch = scratch0();
1821  DCHECK(!scratch.is(string));
1822  DCHECK(!scratch.is(ToRegister(index)));
1823  if (encoding == String::ONE_BYTE_ENCODING) {
1824  __ Addu(scratch, string, ToRegister(index));
1825  } else {
1826  STATIC_ASSERT(kUC16Size == 2);
1827  __ sll(scratch, ToRegister(index), 1);
1828  __ Addu(scratch, string, scratch);
1829  }
1830  return FieldMemOperand(scratch, SeqString::kHeaderSize);
1831 }
1832 
1833 
1834 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1835  String::Encoding encoding = instr->hydrogen()->encoding();
1836  Register string = ToRegister(instr->string());
1837  Register result = ToRegister(instr->result());
1838 
1839  if (FLAG_debug_code) {
1840  Register scratch = scratch0();
1841  __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1842  __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1843 
1844  __ And(scratch, scratch,
1846  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1847  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1848  __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1849  ? one_byte_seq_type : two_byte_seq_type));
1850  __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1851  }
1852 
1853  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1854  if (encoding == String::ONE_BYTE_ENCODING) {
1855  __ lbu(result, operand);
1856  } else {
1857  __ lhu(result, operand);
1858  }
1859 }
1860 
1861 
1862 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1863  String::Encoding encoding = instr->hydrogen()->encoding();
1864  Register string = ToRegister(instr->string());
1865  Register value = ToRegister(instr->value());
1866 
1867  if (FLAG_debug_code) {
1868  Register scratch = scratch0();
1869  Register index = ToRegister(instr->index());
1870  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1871  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1872  int encoding_mask =
1873  instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1874  ? one_byte_seq_type : two_byte_seq_type;
1875  __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
1876  }
1877 
1878  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1879  if (encoding == String::ONE_BYTE_ENCODING) {
1880  __ sb(value, operand);
1881  } else {
1882  __ sh(value, operand);
1883  }
1884 }
1885 
1886 
1887 void LCodeGen::DoAddI(LAddI* instr) {
1888  LOperand* left = instr->left();
1889  LOperand* right = instr->right();
1890  LOperand* result = instr->result();
1891  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1892 
1893  if (!can_overflow) {
1894  if (right->IsStackSlot()) {
1895  Register right_reg = EmitLoadRegister(right, at);
1896  __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1897  } else {
1898  DCHECK(right->IsRegister() || right->IsConstantOperand());
1899  __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1900  }
1901  } else { // can_overflow.
1902  Register overflow = scratch0();
1903  Register scratch = scratch1();
1904  if (right->IsStackSlot() ||
1905  right->IsConstantOperand()) {
1906  Register right_reg = EmitLoadRegister(right, scratch);
1907  __ AdduAndCheckForOverflow(ToRegister(result),
1908  ToRegister(left),
1909  right_reg,
1910  overflow); // Reg at also used as scratch.
1911  } else {
1912  DCHECK(right->IsRegister());
1913  // Due to overflow check macros not supporting constant operands,
1914  // handling the IsConstantOperand case was moved to prev if clause.
1915  __ AdduAndCheckForOverflow(ToRegister(result),
1916  ToRegister(left),
1917  ToRegister(right),
1918  overflow); // Reg at also used as scratch.
1919  }
1920  DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg));
1921  }
1922 }
1923 
1924 
1925 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1926  LOperand* left = instr->left();
1927  LOperand* right = instr->right();
1928  HMathMinMax::Operation operation = instr->hydrogen()->operation();
1929  Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1930  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1931  Register left_reg = ToRegister(left);
1932  Register right_reg = EmitLoadRegister(right, scratch0());
1933  Register result_reg = ToRegister(instr->result());
1934  Label return_right, done;
1935  Register scratch = scratch1();
1936  __ Slt(scratch, left_reg, Operand(right_reg));
1937  if (condition == ge) {
1938  __ Movz(result_reg, left_reg, scratch);
1939  __ Movn(result_reg, right_reg, scratch);
1940  } else {
1941  DCHECK(condition == le);
1942  __ Movn(result_reg, left_reg, scratch);
1943  __ Movz(result_reg, right_reg, scratch);
1944  }
1945  } else {
1946  DCHECK(instr->hydrogen()->representation().IsDouble());
1947  FPURegister left_reg = ToDoubleRegister(left);
1948  FPURegister right_reg = ToDoubleRegister(right);
1949  FPURegister result_reg = ToDoubleRegister(instr->result());
1950  Label check_nan_left, check_zero, return_left, return_right, done;
1951  __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
1952  __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
1953  __ Branch(&return_right);
1954 
1955  __ bind(&check_zero);
1956  // left == right != 0.
1957  __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
1958  // At this point, both left and right are either 0 or -0.
1959  if (operation == HMathMinMax::kMathMin) {
1960  __ neg_d(left_reg, left_reg);
1961  __ sub_d(result_reg, left_reg, right_reg);
1962  __ neg_d(result_reg, result_reg);
1963  } else {
1964  __ add_d(result_reg, left_reg, right_reg);
1965  }
1966  __ Branch(&done);
1967 
1968  __ bind(&check_nan_left);
1969  // left == NaN.
1970  __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
1971  __ bind(&return_right);
1972  if (!right_reg.is(result_reg)) {
1973  __ mov_d(result_reg, right_reg);
1974  }
1975  __ Branch(&done);
1976 
1977  __ bind(&return_left);
1978  if (!left_reg.is(result_reg)) {
1979  __ mov_d(result_reg, left_reg);
1980  }
1981  __ bind(&done);
1982  }
1983 }
1984 
1985 
1986 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1987  DoubleRegister left = ToDoubleRegister(instr->left());
1988  DoubleRegister right = ToDoubleRegister(instr->right());
1989  DoubleRegister result = ToDoubleRegister(instr->result());
1990  switch (instr->op()) {
1991  case Token::ADD:
1992  __ add_d(result, left, right);
1993  break;
1994  case Token::SUB:
1995  __ sub_d(result, left, right);
1996  break;
1997  case Token::MUL:
1998  __ mul_d(result, left, right);
1999  break;
2000  case Token::DIV:
2001  __ div_d(result, left, right);
2002  break;
2003  case Token::MOD: {
2004  // Save a0-a3 on the stack.
2005  RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
2006  __ MultiPush(saved_regs);
2007 
2008  __ PrepareCallCFunction(0, 2, scratch0());
2009  __ MovToFloatParameters(left, right);
2010  __ CallCFunction(
2011  ExternalReference::mod_two_doubles_operation(isolate()),
2012  0, 2);
2013  // Move the result in the double result register.
2014  __ MovFromFloatResult(result);
2015 
2016  // Restore saved register.
2017  __ MultiPop(saved_regs);
2018  break;
2019  }
2020  default:
2021  UNREACHABLE();
2022  break;
2023  }
2024 }
2025 
2026 
2027 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2028  DCHECK(ToRegister(instr->context()).is(cp));
2029  DCHECK(ToRegister(instr->left()).is(a1));
2030  DCHECK(ToRegister(instr->right()).is(a0));
2031  DCHECK(ToRegister(instr->result()).is(v0));
2032 
2033  Handle<Code> code =
2034  CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
2035  CallCode(code, RelocInfo::CODE_TARGET, instr);
2036  // Other arch use a nop here, to signal that there is no inlined
2037  // patchable code. Mips does not need the nop, since our marker
2038  // instruction (andi zero_reg) will never be used in normal code.
2039 }
2040 
2041 
2042 template<class InstrType>
2043 void LCodeGen::EmitBranch(InstrType instr,
2044  Condition condition,
2045  Register src1,
2046  const Operand& src2) {
2047  int left_block = instr->TrueDestination(chunk_);
2048  int right_block = instr->FalseDestination(chunk_);
2049 
2050  int next_block = GetNextEmittedBlock();
2051  if (right_block == left_block || condition == al) {
2052  EmitGoto(left_block);
2053  } else if (left_block == next_block) {
2054  __ Branch(chunk_->GetAssemblyLabel(right_block),
2055  NegateCondition(condition), src1, src2);
2056  } else if (right_block == next_block) {
2057  __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2058  } else {
2059  __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2060  __ Branch(chunk_->GetAssemblyLabel(right_block));
2061  }
2062 }
2063 
2064 
2065 template<class InstrType>
2066 void LCodeGen::EmitBranchF(InstrType instr,
2067  Condition condition,
2068  FPURegister src1,
2069  FPURegister src2) {
2070  int right_block = instr->FalseDestination(chunk_);
2071  int left_block = instr->TrueDestination(chunk_);
2072 
2073  int next_block = GetNextEmittedBlock();
2074  if (right_block == left_block) {
2075  EmitGoto(left_block);
2076  } else if (left_block == next_block) {
2077  __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
2078  NegateCondition(condition), src1, src2);
2079  } else if (right_block == next_block) {
2080  __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2081  condition, src1, src2);
2082  } else {
2083  __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2084  condition, src1, src2);
2085  __ Branch(chunk_->GetAssemblyLabel(right_block));
2086  }
2087 }
2088 
2089 
2090 template<class InstrType>
2091 void LCodeGen::EmitFalseBranch(InstrType instr,
2092  Condition condition,
2093  Register src1,
2094  const Operand& src2) {
2095  int false_block = instr->FalseDestination(chunk_);
2096  __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
2097 }
2098 
2099 
2100 template<class InstrType>
2101 void LCodeGen::EmitFalseBranchF(InstrType instr,
2102  Condition condition,
2103  FPURegister src1,
2104  FPURegister src2) {
2105  int false_block = instr->FalseDestination(chunk_);
2106  __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
2107  condition, src1, src2);
2108 }
2109 
2110 
2111 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2112  __ stop("LDebugBreak");
2113 }
2114 
2115 
2116 void LCodeGen::DoBranch(LBranch* instr) {
2117  Representation r = instr->hydrogen()->value()->representation();
2118  if (r.IsInteger32() || r.IsSmi()) {
2119  DCHECK(!info()->IsStub());
2120  Register reg = ToRegister(instr->value());
2121  EmitBranch(instr, ne, reg, Operand(zero_reg));
2122  } else if (r.IsDouble()) {
2123  DCHECK(!info()->IsStub());
2124  DoubleRegister reg = ToDoubleRegister(instr->value());
2125  // Test the double value. Zero and NaN are false.
2126  EmitBranchF(instr, nue, reg, kDoubleRegZero);
2127  } else {
2128  DCHECK(r.IsTagged());
2129  Register reg = ToRegister(instr->value());
2130  HType type = instr->hydrogen()->value()->type();
2131  if (type.IsBoolean()) {
2132  DCHECK(!info()->IsStub());
2133  __ LoadRoot(at, Heap::kTrueValueRootIndex);
2134  EmitBranch(instr, eq, reg, Operand(at));
2135  } else if (type.IsSmi()) {
2136  DCHECK(!info()->IsStub());
2137  EmitBranch(instr, ne, reg, Operand(zero_reg));
2138  } else if (type.IsJSArray()) {
2139  DCHECK(!info()->IsStub());
2140  EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2141  } else if (type.IsHeapNumber()) {
2142  DCHECK(!info()->IsStub());
2143  DoubleRegister dbl_scratch = double_scratch0();
2144  __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2145  // Test the double value. Zero and NaN are false.
2146  EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
2147  } else if (type.IsString()) {
2148  DCHECK(!info()->IsStub());
2150  EmitBranch(instr, ne, at, Operand(zero_reg));
2151  } else {
2152  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2153  // Avoid deopts in the case where we've never executed this path before.
2154  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2155 
2156  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2157  // undefined -> false.
2158  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2159  __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2160  }
2161  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2162  // Boolean -> its value.
2163  __ LoadRoot(at, Heap::kTrueValueRootIndex);
2164  __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
2165  __ LoadRoot(at, Heap::kFalseValueRootIndex);
2166  __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2167  }
2168  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2169  // 'null' -> false.
2170  __ LoadRoot(at, Heap::kNullValueRootIndex);
2171  __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2172  }
2173 
2174  if (expected.Contains(ToBooleanStub::SMI)) {
2175  // Smis: 0 -> false, all other -> true.
2176  __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2177  __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2178  } else if (expected.NeedsMap()) {
2179  // If we need a map later and have a Smi -> deopt.
2180  __ SmiTst(reg, at);
2181  DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
2182  }
2183 
2184  const Register map = scratch0();
2185  if (expected.NeedsMap()) {
2187  if (expected.CanBeUndetectable()) {
2188  // Undetectable -> false.
2190  __ And(at, at, Operand(1 << Map::kIsUndetectable));
2191  __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2192  }
2193  }
2194 
2195  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2196  // spec object -> true.
2198  __ Branch(instr->TrueLabel(chunk_),
2199  ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
2200  }
2201 
2202  if (expected.Contains(ToBooleanStub::STRING)) {
2203  // String value -> false iff empty.
2204  Label not_string;
2206  __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2208  __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2209  __ Branch(instr->FalseLabel(chunk_));
2210  __ bind(&not_string);
2211  }
2212 
2213  if (expected.Contains(ToBooleanStub::SYMBOL)) {
2214  // Symbol value -> true.
2215  const Register scratch = scratch1();
2217  __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2218  }
2219 
2220  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2221  // heap number -> false iff +0, -0, or NaN.
2222  DoubleRegister dbl_scratch = double_scratch0();
2223  Label not_heap_number;
2224  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2225  __ Branch(&not_heap_number, ne, map, Operand(at));
2226  __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2227  __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2228  ne, dbl_scratch, kDoubleRegZero);
2229  // Falls through if dbl_scratch == 0.
2230  __ Branch(instr->FalseLabel(chunk_));
2231  __ bind(&not_heap_number);
2232  }
2233 
2234  if (!expected.IsGeneric()) {
2235  // We've seen something for the first time -> deopt.
2236  // This can only happen if we are not generic already.
2237  DeoptimizeIf(al, instr, "unexpected object", zero_reg,
2238  Operand(zero_reg));
2239  }
2240  }
2241  }
2242 }
2243 
2244 
2245 void LCodeGen::EmitGoto(int block) {
2246  if (!IsNextEmittedBlock(block)) {
2247  __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2248  }
2249 }
2250 
2251 
2252 void LCodeGen::DoGoto(LGoto* instr) {
2253  EmitGoto(instr->block_id());
2254 }
2255 
2256 
2257 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2258  Condition cond = kNoCondition;
2259  switch (op) {
2260  case Token::EQ:
2261  case Token::EQ_STRICT:
2262  cond = eq;
2263  break;
2264  case Token::NE:
2265  case Token::NE_STRICT:
2266  cond = ne;
2267  break;
2268  case Token::LT:
2269  cond = is_unsigned ? lo : lt;
2270  break;
2271  case Token::GT:
2272  cond = is_unsigned ? hi : gt;
2273  break;
2274  case Token::LTE:
2275  cond = is_unsigned ? ls : le;
2276  break;
2277  case Token::GTE:
2278  cond = is_unsigned ? hs : ge;
2279  break;
2280  case Token::IN:
2281  case Token::INSTANCEOF:
2282  default:
2283  UNREACHABLE();
2284  }
2285  return cond;
2286 }
2287 
2288 
2289 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2290  LOperand* left = instr->left();
2291  LOperand* right = instr->right();
2292  bool is_unsigned =
2293  instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2294  instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2295  Condition cond = TokenToCondition(instr->op(), is_unsigned);
2296 
2297  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2298  // We can statically evaluate the comparison.
2299  double left_val = ToDouble(LConstantOperand::cast(left));
2300  double right_val = ToDouble(LConstantOperand::cast(right));
2301  int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2302  instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2303  EmitGoto(next_block);
2304  } else {
2305  if (instr->is_double()) {
2306  // Compare left and right as doubles and load the
2307  // resulting flags into the normal status register.
2308  FPURegister left_reg = ToDoubleRegister(left);
2309  FPURegister right_reg = ToDoubleRegister(right);
2310 
2311  // If a NaN is involved, i.e. the result is unordered,
2312  // jump to false block label.
2313  __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2314  left_reg, right_reg);
2315 
2316  EmitBranchF(instr, cond, left_reg, right_reg);
2317  } else {
2318  Register cmp_left;
2319  Operand cmp_right = Operand(0);
2320 
2321  if (right->IsConstantOperand()) {
2322  int32_t value = ToInteger32(LConstantOperand::cast(right));
2323  if (instr->hydrogen_value()->representation().IsSmi()) {
2324  cmp_left = ToRegister(left);
2325  cmp_right = Operand(Smi::FromInt(value));
2326  } else {
2327  cmp_left = ToRegister(left);
2328  cmp_right = Operand(value);
2329  }
2330  } else if (left->IsConstantOperand()) {
2331  int32_t value = ToInteger32(LConstantOperand::cast(left));
2332  if (instr->hydrogen_value()->representation().IsSmi()) {
2333  cmp_left = ToRegister(right);
2334  cmp_right = Operand(Smi::FromInt(value));
2335  } else {
2336  cmp_left = ToRegister(right);
2337  cmp_right = Operand(value);
2338  }
2339  // We commuted the operands, so commute the condition.
2340  cond = CommuteCondition(cond);
2341  } else {
2342  cmp_left = ToRegister(left);
2343  cmp_right = Operand(ToRegister(right));
2344  }
2345 
2346  EmitBranch(instr, cond, cmp_left, cmp_right);
2347  }
2348  }
2349 }
2350 
2351 
2352 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2353  Register left = ToRegister(instr->left());
2354  Register right = ToRegister(instr->right());
2355 
2356  EmitBranch(instr, eq, left, Operand(right));
2357 }
2358 
2359 
2360 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2361  if (instr->hydrogen()->representation().IsTagged()) {
2362  Register input_reg = ToRegister(instr->object());
2363  __ li(at, Operand(factory()->the_hole_value()));
2364  EmitBranch(instr, eq, input_reg, Operand(at));
2365  return;
2366  }
2367 
2368  DoubleRegister input_reg = ToDoubleRegister(instr->object());
2369  EmitFalseBranchF(instr, eq, input_reg, input_reg);
2370 
2371  Register scratch = scratch0();
2372  __ FmoveHigh(scratch, input_reg);
2373  EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
2374 }
2375 
2376 
2377 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2378  Representation rep = instr->hydrogen()->value()->representation();
2379  DCHECK(!rep.IsInteger32());
2380  Register scratch = ToRegister(instr->temp());
2381 
2382  if (rep.IsDouble()) {
2383  DoubleRegister value = ToDoubleRegister(instr->value());
2384  EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
2385  __ FmoveHigh(scratch, value);
2386  __ li(at, 0x80000000);
2387  } else {
2388  Register value = ToRegister(instr->value());
2389  __ CheckMap(value,
2390  scratch,
2391  Heap::kHeapNumberMapRootIndex,
2392  instr->FalseLabel(chunk()),
2393  DO_SMI_CHECK);
2394  __ lw(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2395  EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
2396  __ lw(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2397  __ mov(at, zero_reg);
2398  }
2399  EmitBranch(instr, eq, scratch, Operand(at));
2400 }
2401 
2402 
2404  Register temp1,
2405  Register temp2,
2406  Label* is_not_object,
2407  Label* is_object) {
2408  __ JumpIfSmi(input, is_not_object);
2409 
2410  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2411  __ Branch(is_object, eq, input, Operand(temp2));
2412 
2413  // Load map.
2414  __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2415  // Undetectable objects behave like undefined.
2416  __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2417  __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
2418  __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
2419 
2420  // Load instance type and check that it is in object type range.
2421  __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2422  __ Branch(is_not_object,
2424 
2425  return le;
2426 }
2427 
2428 
2429 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2430  Register reg = ToRegister(instr->value());
2431  Register temp1 = ToRegister(instr->temp());
2432  Register temp2 = scratch0();
2433 
2434  Condition true_cond =
2435  EmitIsObject(reg, temp1, temp2,
2436  instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2437 
2438  EmitBranch(instr, true_cond, temp2,
2440 }
2441 
2442 
2443 Condition LCodeGen::EmitIsString(Register input,
2444  Register temp1,
2445  Label* is_not_string,
2446  SmiCheck check_needed = INLINE_SMI_CHECK) {
2447  if (check_needed == INLINE_SMI_CHECK) {
2448  __ JumpIfSmi(input, is_not_string);
2449  }
2450  __ GetObjectType(input, temp1, temp1);
2451 
2452  return lt;
2453 }
2454 
2455 
2456 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2457  Register reg = ToRegister(instr->value());
2458  Register temp1 = ToRegister(instr->temp());
2459 
2460  SmiCheck check_needed =
2461  instr->hydrogen()->value()->type().IsHeapObject()
2463  Condition true_cond =
2464  EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2465 
2466  EmitBranch(instr, true_cond, temp1,
2467  Operand(FIRST_NONSTRING_TYPE));
2468 }
2469 
2470 
2471 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2472  Register input_reg = EmitLoadRegister(instr->value(), at);
2473  __ And(at, input_reg, kSmiTagMask);
2474  EmitBranch(instr, eq, at, Operand(zero_reg));
2475 }
2476 
2477 
2478 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2479  Register input = ToRegister(instr->value());
2480  Register temp = ToRegister(instr->temp());
2481 
2482  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2483  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2484  }
2485  __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2486  __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2487  __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2488  EmitBranch(instr, ne, at, Operand(zero_reg));
2489 }
2490 
2491 
2493  switch (op) {
2494  case Token::EQ_STRICT:
2495  case Token::EQ:
2496  return eq;
2497  case Token::LT:
2498  return lt;
2499  case Token::GT:
2500  return gt;
2501  case Token::LTE:
2502  return le;
2503  case Token::GTE:
2504  return ge;
2505  default:
2506  UNREACHABLE();
2507  return kNoCondition;
2508  }
2509 }
2510 
2511 
2512 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2513  DCHECK(ToRegister(instr->context()).is(cp));
2514  Token::Value op = instr->op();
2515 
2516  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2517  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2518 
2519  Condition condition = ComputeCompareCondition(op);
2520 
2521  EmitBranch(instr, condition, v0, Operand(zero_reg));
2522 }
2523 
2524 
2525 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2526  InstanceType from = instr->from();
2527  InstanceType to = instr->to();
2528  if (from == FIRST_TYPE) return to;
2529  DCHECK(from == to || to == LAST_TYPE);
2530  return from;
2531 }
2532 
2533 
2534 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2535  InstanceType from = instr->from();
2536  InstanceType to = instr->to();
2537  if (from == to) return eq;
2538  if (to == LAST_TYPE) return hs;
2539  if (from == FIRST_TYPE) return ls;
2540  UNREACHABLE();
2541  return eq;
2542 }
2543 
2544 
2545 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2546  Register scratch = scratch0();
2547  Register input = ToRegister(instr->value());
2548 
2549  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2550  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2551  }
2552 
2553  __ GetObjectType(input, scratch, scratch);
2554  EmitBranch(instr,
2555  BranchCondition(instr->hydrogen()),
2556  scratch,
2557  Operand(TestType(instr->hydrogen())));
2558 }
2559 
2560 
2561 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2562  Register input = ToRegister(instr->value());
2563  Register result = ToRegister(instr->result());
2564 
2565  __ AssertString(input);
2566 
2567  __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
2568  __ IndexFromHash(result, result);
2569 }
2570 
2571 
2572 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2573  LHasCachedArrayIndexAndBranch* instr) {
2574  Register input = ToRegister(instr->value());
2575  Register scratch = scratch0();
2576 
2577  __ lw(scratch,
2579  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2580  EmitBranch(instr, eq, at, Operand(zero_reg));
2581 }
2582 
2583 
2584 // Branches to a label or falls through with the answer in flags. Trashes
2585 // the temp registers, but not the input.
2586 void LCodeGen::EmitClassOfTest(Label* is_true,
2587  Label* is_false,
2588  Handle<String>class_name,
2589  Register input,
2590  Register temp,
2591  Register temp2) {
2592  DCHECK(!input.is(temp));
2593  DCHECK(!input.is(temp2));
2594  DCHECK(!temp.is(temp2));
2595 
2596  __ JumpIfSmi(input, is_false);
2597 
2598  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2599  // Assuming the following assertions, we can use the same compares to test
2600  // for both being a function type and being in the object type range.
2605  LAST_SPEC_OBJECT_TYPE - 1);
2607 
2608  __ GetObjectType(input, temp, temp2);
2609  __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2610  __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2611  __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2612  } else {
2613  // Faster code path to avoid two compares: subtract lower bound from the
2614  // actual type and do a signed compare with the width of the type range.
2615  __ GetObjectType(input, temp, temp2);
2616  __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2617  __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2619  }
2620 
2621  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2622  // Check if the constructor in the map is a function.
2623  __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2624 
2625  // Objects with a non-function constructor have class 'Object'.
2626  __ GetObjectType(temp, temp2, temp2);
2627  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2628  __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
2629  } else {
2630  __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
2631  }
2632 
2633  // temp now contains the constructor function. Grab the
2634  // instance class name from there.
2636  __ lw(temp, FieldMemOperand(temp,
2638  // The class name we are testing against is internalized since it's a literal.
2639  // The name in the constructor is internalized because of the way the context
2640  // is booted. This routine isn't expected to work for random API-created
2641  // classes and it doesn't have to because you can't access it with natives
2642  // syntax. Since both sides are internalized it is sufficient to use an
2643  // identity comparison.
2644 
2645  // End with the address of this class_name instance in temp register.
2646  // On MIPS, the caller must do the comparison with Handle<String>class_name.
2647 }
2648 
2649 
2650 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2651  Register input = ToRegister(instr->value());
2652  Register temp = scratch0();
2653  Register temp2 = ToRegister(instr->temp());
2654  Handle<String> class_name = instr->hydrogen()->class_name();
2655 
2656  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2657  class_name, input, temp, temp2);
2658 
2659  EmitBranch(instr, eq, temp, Operand(class_name));
2660 }
2661 
2662 
2663 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2664  Register reg = ToRegister(instr->value());
2665  Register temp = ToRegister(instr->temp());
2666 
2667  __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2668  EmitBranch(instr, eq, temp, Operand(instr->map()));
2669 }
2670 
2671 
2672 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2673  DCHECK(ToRegister(instr->context()).is(cp));
2674  Label true_label, done;
2675  DCHECK(ToRegister(instr->left()).is(a0)); // Object is in a0.
2676  DCHECK(ToRegister(instr->right()).is(a1)); // Function is in a1.
2677  Register result = ToRegister(instr->result());
2678  DCHECK(result.is(v0));
2679 
2680  InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2681  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2682 
2683  __ Branch(&true_label, eq, result, Operand(zero_reg));
2684  __ li(result, Operand(factory()->false_value()));
2685  __ Branch(&done);
2686  __ bind(&true_label);
2687  __ li(result, Operand(factory()->true_value()));
2688  __ bind(&done);
2689 }
2690 
2691 
2692 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2693  class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
2694  public:
2695  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2696  LInstanceOfKnownGlobal* instr)
2697  : LDeferredCode(codegen), instr_(instr) { }
2698  virtual void Generate() OVERRIDE {
2699  codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2700  }
2701  virtual LInstruction* instr() OVERRIDE { return instr_; }
2702  Label* map_check() { return &map_check_; }
2703 
2704  private:
2705  LInstanceOfKnownGlobal* instr_;
2706  Label map_check_;
2707  };
2708 
2709  DeferredInstanceOfKnownGlobal* deferred;
2710  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2711 
2712  Label done, false_result;
2713  Register object = ToRegister(instr->value());
2714  Register temp = ToRegister(instr->temp());
2715  Register result = ToRegister(instr->result());
2716 
2717  DCHECK(object.is(a0));
2718  DCHECK(result.is(v0));
2719 
2720  // A Smi is not instance of anything.
2721  __ JumpIfSmi(object, &false_result);
2722 
2723  // This is the inlined call site instanceof cache. The two occurences of the
2724  // hole value will be patched to the last map/result pair generated by the
2725  // instanceof stub.
2726  Label cache_miss;
2727  Register map = temp;
2729 
2730  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2731  __ bind(deferred->map_check()); // Label for calculating code patching.
2732  // We use Factory::the_hole_value() on purpose instead of loading from the
2733  // root array to force relocation to be able to later patch with
2734  // the cached map.
2735  Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2736  __ li(at, Operand(Handle<Object>(cell)));
2738  __ BranchShort(&cache_miss, ne, map, Operand(at));
2739  // We use Factory::the_hole_value() on purpose instead of loading from the
2740  // root array to force relocation to be able to later patch
2741  // with true or false. The distance from map check has to be constant.
2742  __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
2743  __ Branch(&done);
2744 
2745  // The inlined call site cache did not match. Check null and string before
2746  // calling the deferred code.
2747  __ bind(&cache_miss);
2748  // Null is not instance of anything.
2749  __ LoadRoot(temp, Heap::kNullValueRootIndex);
2750  __ Branch(&false_result, eq, object, Operand(temp));
2751 
2752  // String values is not instance of anything.
2753  Condition cc = __ IsObjectStringType(object, temp, temp);
2754  __ Branch(&false_result, cc, temp, Operand(zero_reg));
2755 
2756  // Go to the deferred code.
2757  __ Branch(deferred->entry());
2758 
2759  __ bind(&false_result);
2760  __ LoadRoot(result, Heap::kFalseValueRootIndex);
2761 
2762  // Here result has either true or false. Deferred code also produces true or
2763  // false object.
2764  __ bind(deferred->exit());
2765  __ bind(&done);
2766 }
2767 
2768 
2769 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2770  Label* map_check) {
2771  Register result = ToRegister(instr->result());
2772  DCHECK(result.is(v0));
2773 
2775  flags = static_cast<InstanceofStub::Flags>(
2777  flags = static_cast<InstanceofStub::Flags>(
2779  flags = static_cast<InstanceofStub::Flags>(
2781  InstanceofStub stub(isolate(), flags);
2782 
2783  PushSafepointRegistersScope scope(this);
2784  LoadContextFromDeferred(instr->context());
2785 
2786  // Get the temp register reserved by the instruction. This needs to be t0 as
2787  // its slot of the pushing of safepoint registers is used to communicate the
2788  // offset to the location of the map check.
2789  Register temp = ToRegister(instr->temp());
2790  DCHECK(temp.is(t0));
2791  __ li(InstanceofStub::right(), instr->function());
2792  static const int kAdditionalDelta = 7;
2793  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2794  Label before_push_delta;
2795  __ bind(&before_push_delta);
2796  {
2797  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2798  __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
2799  __ StoreToSafepointRegisterSlot(temp, temp);
2800  }
2801  CallCodeGeneric(stub.GetCode(),
2803  instr,
2805  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2806  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2807  // Put the result value into the result register slot and
2808  // restore all registers.
2809  __ StoreToSafepointRegisterSlot(result, result);
2810 }
2811 
2812 
2813 void LCodeGen::DoCmpT(LCmpT* instr) {
2814  DCHECK(ToRegister(instr->context()).is(cp));
2815  Token::Value op = instr->op();
2816 
2817  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2818  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2819  // On MIPS there is no need for a "no inlined smi code" marker (nop).
2820 
2821  Condition condition = ComputeCompareCondition(op);
2822  // A minor optimization that relies on LoadRoot always emitting one
2823  // instruction.
2824  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2825  Label done, check;
2826  __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2827  __ bind(&check);
2828  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2829  DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
2830  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2831  __ bind(&done);
2832 }
2833 
2834 
2835 void LCodeGen::DoReturn(LReturn* instr) {
2836  if (FLAG_trace && info()->IsOptimizing()) {
2837  // Push the return value on the stack as the parameter.
2838  // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2839  // managed by the register allocator and tearing down the frame, it's
2840  // safe to write to the context register.
2841  __ push(v0);
2843  __ CallRuntime(Runtime::kTraceExit, 1);
2844  }
2845  if (info()->saves_caller_doubles()) {
2847  }
2848  int no_frame_start = -1;
2849  if (NeedsEagerFrame()) {
2850  __ mov(sp, fp);
2851  no_frame_start = masm_->pc_offset();
2852  __ Pop(ra, fp);
2853  }
2854  if (instr->has_constant_parameter_count()) {
2855  int parameter_count = ToInteger32(instr->constant_parameter_count());
2856  int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2857  if (sp_delta != 0) {
2858  __ Addu(sp, sp, Operand(sp_delta));
2859  }
2860  } else {
2861  Register reg = ToRegister(instr->parameter_count());
2862  // The argument count parameter is a smi
2863  __ SmiUntag(reg);
2864  __ sll(at, reg, kPointerSizeLog2);
2865  __ Addu(sp, sp, at);
2866  }
2867 
2868  __ Jump(ra);
2869 
2870  if (no_frame_start != -1) {
2871  info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2872  }
2873 }
2874 
2875 
2876 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2877  Register result = ToRegister(instr->result());
2878  __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2879  __ lw(result, FieldMemOperand(at, Cell::kValueOffset));
2880  if (instr->hydrogen()->RequiresHoleCheck()) {
2881  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2882  DeoptimizeIf(eq, instr, "hole", result, Operand(at));
2883  }
2884 }
2885 
2886 
2887 template <class T>
2889  DCHECK(FLAG_vector_ics);
2890  Register vector = ToRegister(instr->temp_vector());
2892  __ li(vector, instr->hydrogen()->feedback_vector());
2893  // No need to allocate this register.
2896  Operand(Smi::FromInt(instr->hydrogen()->slot())));
2897 }
2898 
2899 
2900 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2901  DCHECK(ToRegister(instr->context()).is(cp));
2902  DCHECK(ToRegister(instr->global_object())
2904  DCHECK(ToRegister(instr->result()).is(v0));
2905 
2906  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
2907  if (FLAG_vector_ics) {
2908  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2909  }
2910  ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2911  Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
2912  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2913 }
2914 
2915 
2916 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2917  Register value = ToRegister(instr->value());
2918  Register cell = scratch0();
2919 
2920  // Load the cell.
2921  __ li(cell, Operand(instr->hydrogen()->cell().handle()));
2922 
2923  // If the cell we are storing to contains the hole it could have
2924  // been deleted from the property dictionary. In that case, we need
2925  // to update the property details in the property dictionary to mark
2926  // it as no longer deleted.
2927  if (instr->hydrogen()->RequiresHoleCheck()) {
2928  // We use a temp to check the payload.
2929  Register payload = ToRegister(instr->temp());
2930  __ lw(payload, FieldMemOperand(cell, Cell::kValueOffset));
2931  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2932  DeoptimizeIf(eq, instr, "hole", payload, Operand(at));
2933  }
2934 
2935  // Store the value.
2936  __ sw(value, FieldMemOperand(cell, Cell::kValueOffset));
2937  // Cells are always rescanned, so no write barrier here.
2938 }
2939 
2940 
2941 
2942 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2943  Register context = ToRegister(instr->context());
2944  Register result = ToRegister(instr->result());
2945 
2946  __ lw(result, ContextOperand(context, instr->slot_index()));
2947  if (instr->hydrogen()->RequiresHoleCheck()) {
2948  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2949 
2950  if (instr->hydrogen()->DeoptimizesOnHole()) {
2951  DeoptimizeIf(eq, instr, "hole", result, Operand(at));
2952  } else {
2953  Label is_not_hole;
2954  __ Branch(&is_not_hole, ne, result, Operand(at));
2955  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2956  __ bind(&is_not_hole);
2957  }
2958  }
2959 }
2960 
2961 
2962 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2963  Register context = ToRegister(instr->context());
2964  Register value = ToRegister(instr->value());
2965  Register scratch = scratch0();
2966  MemOperand target = ContextOperand(context, instr->slot_index());
2967 
2968  Label skip_assignment;
2969 
2970  if (instr->hydrogen()->RequiresHoleCheck()) {
2971  __ lw(scratch, target);
2972  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2973 
2974  if (instr->hydrogen()->DeoptimizesOnHole()) {
2975  DeoptimizeIf(eq, instr, "hole", scratch, Operand(at));
2976  } else {
2977  __ Branch(&skip_assignment, ne, scratch, Operand(at));
2978  }
2979  }
2980 
2981  __ sw(value, target);
2982  if (instr->hydrogen()->NeedsWriteBarrier()) {
2983  SmiCheck check_needed =
2984  instr->hydrogen()->value()->type().IsHeapObject()
2986  __ RecordWriteContextSlot(context,
2987  target.offset(),
2988  value,
2989  scratch0(),
2990  GetRAState(),
2991  kSaveFPRegs,
2993  check_needed);
2994  }
2995 
2996  __ bind(&skip_assignment);
2997 }
2998 
2999 
3000 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3001  HObjectAccess access = instr->hydrogen()->access();
3002  int offset = access.offset();
3003  Register object = ToRegister(instr->object());
3004 
3005  if (access.IsExternalMemory()) {
3006  Register result = ToRegister(instr->result());
3007  MemOperand operand = MemOperand(object, offset);
3008  __ Load(result, operand, access.representation());
3009  return;
3010  }
3011 
3012  if (instr->hydrogen()->representation().IsDouble()) {
3013  DoubleRegister result = ToDoubleRegister(instr->result());
3014  __ ldc1(result, FieldMemOperand(object, offset));
3015  return;
3016  }
3017 
3018  Register result = ToRegister(instr->result());
3019  if (!access.IsInobject()) {
3020  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3021  object = result;
3022  }
3023  MemOperand operand = FieldMemOperand(object, offset);
3024  __ Load(result, operand, access.representation());
3025 }
3026 
3027 
3028 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3029  DCHECK(ToRegister(instr->context()).is(cp));
3031  DCHECK(ToRegister(instr->result()).is(v0));
3032 
3033  // Name is always in a2.
3034  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
3035  if (FLAG_vector_ics) {
3036  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3037  }
3038  Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
3039  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3040 }
3041 
3042 
3043 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3044  Register scratch = scratch0();
3045  Register function = ToRegister(instr->function());
3046  Register result = ToRegister(instr->result());
3047 
3048  // Get the prototype or initial map from the function.
3049  __ lw(result,
3051 
3052  // Check that the function has a prototype or an initial map.
3053  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
3054  DeoptimizeIf(eq, instr, "hole", result, Operand(at));
3055 
3056  // If the function does not have an initial map, we're done.
3057  Label done;
3058  __ GetObjectType(result, scratch, scratch);
3059  __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
3060 
3061  // Get the prototype from the initial map.
3062  __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3063 
3064  // All done.
3065  __ bind(&done);
3066 }
3067 
3068 
3069 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3070  Register result = ToRegister(instr->result());
3071  __ LoadRoot(result, instr->index());
3072 }
3073 
3074 
3075 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3076  Register arguments = ToRegister(instr->arguments());
3077  Register result = ToRegister(instr->result());
3078  // There are two words between the frame pointer and the last argument.
3079  // Subtracting from length accounts for one of them add one more.
3080  if (instr->length()->IsConstantOperand()) {
3081  int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3082  if (instr->index()->IsConstantOperand()) {
3083  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3084  int index = (const_length - const_index) + 1;
3085  __ lw(result, MemOperand(arguments, index * kPointerSize));
3086  } else {
3087  Register index = ToRegister(instr->index());
3088  __ li(at, Operand(const_length + 1));
3089  __ Subu(result, at, index);
3090  __ sll(at, result, kPointerSizeLog2);
3091  __ Addu(at, arguments, at);
3092  __ lw(result, MemOperand(at));
3093  }
3094  } else if (instr->index()->IsConstantOperand()) {
3095  Register length = ToRegister(instr->length());
3096  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3097  int loc = const_index - 1;
3098  if (loc != 0) {
3099  __ Subu(result, length, Operand(loc));
3100  __ sll(at, result, kPointerSizeLog2);
3101  __ Addu(at, arguments, at);
3102  __ lw(result, MemOperand(at));
3103  } else {
3104  __ sll(at, length, kPointerSizeLog2);
3105  __ Addu(at, arguments, at);
3106  __ lw(result, MemOperand(at));
3107  }
3108  } else {
3109  Register length = ToRegister(instr->length());
3110  Register index = ToRegister(instr->index());
3111  __ Subu(result, length, index);
3112  __ Addu(result, result, 1);
3113  __ sll(at, result, kPointerSizeLog2);
3114  __ Addu(at, arguments, at);
3115  __ lw(result, MemOperand(at));
3116  }
3117 }
3118 
3119 
3120 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3121  Register external_pointer = ToRegister(instr->elements());
3122  Register key = no_reg;
3123  ElementsKind elements_kind = instr->elements_kind();
3124  bool key_is_constant = instr->key()->IsConstantOperand();
3125  int constant_key = 0;
3126  if (key_is_constant) {
3127  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3128  if (constant_key & 0xF0000000) {
3129  Abort(kArrayIndexConstantValueTooBig);
3130  }
3131  } else {
3132  key = ToRegister(instr->key());
3133  }
3134  int element_size_shift = ElementsKindToShiftSize(elements_kind);
3135  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3136  ? (element_size_shift - kSmiTagSize) : element_size_shift;
3137  int base_offset = instr->base_offset();
3138 
3139  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3140  elements_kind == FLOAT32_ELEMENTS ||
3141  elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3142  elements_kind == FLOAT64_ELEMENTS) {
3143  int base_offset = instr->base_offset();
3144  FPURegister result = ToDoubleRegister(instr->result());
3145  if (key_is_constant) {
3146  __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
3147  } else {
3148  __ sll(scratch0(), key, shift_size);
3149  __ Addu(scratch0(), scratch0(), external_pointer);
3150  }
3151  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3152  elements_kind == FLOAT32_ELEMENTS) {
3153  __ lwc1(result, MemOperand(scratch0(), base_offset));
3154  __ cvt_d_s(result, result);
3155  } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3156  __ ldc1(result, MemOperand(scratch0(), base_offset));
3157  }
3158  } else {
3159  Register result = ToRegister(instr->result());
3160  MemOperand mem_operand = PrepareKeyedOperand(
3161  key, external_pointer, key_is_constant, constant_key,
3162  element_size_shift, shift_size, base_offset);
3163  switch (elements_kind) {
3165  case INT8_ELEMENTS:
3166  __ lb(result, mem_operand);
3167  break;
3170  case UINT8_ELEMENTS:
3172  __ lbu(result, mem_operand);
3173  break;
3175  case INT16_ELEMENTS:
3176  __ lh(result, mem_operand);
3177  break;
3179  case UINT16_ELEMENTS:
3180  __ lhu(result, mem_operand);
3181  break;
3183  case INT32_ELEMENTS:
3184  __ lw(result, mem_operand);
3185  break;
3187  case UINT32_ELEMENTS:
3188  __ lw(result, mem_operand);
3189  if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3190  DeoptimizeIf(Ugreater_equal, instr, "negative value", result,
3191  Operand(0x80000000));
3192  }
3193  break;
3194  case FLOAT32_ELEMENTS:
3195  case FLOAT64_ELEMENTS:
3198  case FAST_DOUBLE_ELEMENTS:
3199  case FAST_ELEMENTS:
3200  case FAST_SMI_ELEMENTS:
3202  case FAST_HOLEY_ELEMENTS:
3204  case DICTIONARY_ELEMENTS:
3206  UNREACHABLE();
3207  break;
3208  }
3209  }
3210 }
3211 
3212 
3213 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3214  Register elements = ToRegister(instr->elements());
3215  bool key_is_constant = instr->key()->IsConstantOperand();
3216  Register key = no_reg;
3217  DoubleRegister result = ToDoubleRegister(instr->result());
3218  Register scratch = scratch0();
3219 
3220  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3221 
3222  int base_offset = instr->base_offset();
3223  if (key_is_constant) {
3224  int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3225  if (constant_key & 0xF0000000) {
3226  Abort(kArrayIndexConstantValueTooBig);
3227  }
3228  base_offset += constant_key * kDoubleSize;
3229  }
3230  __ Addu(scratch, elements, Operand(base_offset));
3231 
3232  if (!key_is_constant) {
3233  key = ToRegister(instr->key());
3234  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3235  ? (element_size_shift - kSmiTagSize) : element_size_shift;
3236  __ sll(at, key, shift_size);
3237  __ Addu(scratch, scratch, at);
3238  }
3239 
3240  __ ldc1(result, MemOperand(scratch));
3241 
3242  if (instr->hydrogen()->RequiresHoleCheck()) {
3243  __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
3244  DeoptimizeIf(eq, instr, "hole", scratch, Operand(kHoleNanUpper32));
3245  }
3246 }
3247 
3248 
3249 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3250  Register elements = ToRegister(instr->elements());
3251  Register result = ToRegister(instr->result());
3252  Register scratch = scratch0();
3253  Register store_base = scratch;
3254  int offset = instr->base_offset();
3255 
3256  if (instr->key()->IsConstantOperand()) {
3257  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3258  offset += ToInteger32(const_operand) * kPointerSize;
3259  store_base = elements;
3260  } else {
3261  Register key = ToRegister(instr->key());
3262  // Even though the HLoadKeyed instruction forces the input
3263  // representation for the key to be an integer, the input gets replaced
3264  // during bound check elimination with the index argument to the bounds
3265  // check, which can be tagged, so that case must be handled here, too.
3266  if (instr->hydrogen()->key()->representation().IsSmi()) {
3267  __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
3268  __ addu(scratch, elements, scratch);
3269  } else {
3270  __ sll(scratch, key, kPointerSizeLog2);
3271  __ addu(scratch, elements, scratch);
3272  }
3273  }
3274  __ lw(result, MemOperand(store_base, offset));
3275 
3276  // Check for the hole value.
3277  if (instr->hydrogen()->RequiresHoleCheck()) {
3278  if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3279  __ SmiTst(result, scratch);
3280  DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg));
3281  } else {
3282  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3283  DeoptimizeIf(eq, instr, "hole", result, Operand(scratch));
3284  }
3285  }
3286 }
3287 
3288 
3289 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3290  if (instr->is_typed_elements()) {
3291  DoLoadKeyedExternalArray(instr);
3292  } else if (instr->hydrogen()->representation().IsDouble()) {
3294  } else {
3295  DoLoadKeyedFixedArray(instr);
3296  }
3297 }
3298 
3299 
3301  Register base,
3302  bool key_is_constant,
3303  int constant_key,
3304  int element_size,
3305  int shift_size,
3306  int base_offset) {
3307  if (key_is_constant) {
3308  return MemOperand(base, (constant_key << element_size) + base_offset);
3309  }
3310 
3311  if (base_offset == 0) {
3312  if (shift_size >= 0) {
3313  __ sll(scratch0(), key, shift_size);
3314  __ Addu(scratch0(), base, scratch0());
3315  return MemOperand(scratch0());
3316  } else {
3317  DCHECK_EQ(-1, shift_size);
3318  __ srl(scratch0(), key, 1);
3319  __ Addu(scratch0(), base, scratch0());
3320  return MemOperand(scratch0());
3321  }
3322  }
3323 
3324  if (shift_size >= 0) {
3325  __ sll(scratch0(), key, shift_size);
3326  __ Addu(scratch0(), base, scratch0());
3327  return MemOperand(scratch0(), base_offset);
3328  } else {
3329  DCHECK_EQ(-1, shift_size);
3330  __ sra(scratch0(), key, 1);
3331  __ Addu(scratch0(), base, scratch0());
3332  return MemOperand(scratch0(), base_offset);
3333  }
3334 }
3335 
3336 
3337 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3338  DCHECK(ToRegister(instr->context()).is(cp));
3341 
3342  if (FLAG_vector_ics) {
3343  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3344  }
3345 
3346  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
3347  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3348 }
3349 
3350 
3351 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3352  Register scratch = scratch0();
3353  Register temp = scratch1();
3354  Register result = ToRegister(instr->result());
3355 
3356  if (instr->hydrogen()->from_inlined()) {
3357  __ Subu(result, sp, 2 * kPointerSize);
3358  } else {
3359  // Check if the calling frame is an arguments adaptor frame.
3360  Label done, adapted;
3363  __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3364 
3365  // Result is the frame pointer for the frame if not adapted and for the real
3366  // frame below the adaptor frame if adapted.
3367  __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
3368  __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
3369  }
3370 }
3371 
3372 
3373 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3374  Register elem = ToRegister(instr->elements());
3375  Register result = ToRegister(instr->result());
3376 
3377  Label done;
3378 
3379  // If no arguments adaptor frame the number of arguments is fixed.
3380  __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
3381  __ Branch(&done, eq, fp, Operand(elem));
3382 
3383  // Arguments adaptor frame present. Get argument length from there.
3385  __ lw(result,
3387  __ SmiUntag(result);
3388 
3389  // Argument length is in result register.
3390  __ bind(&done);
3391 }
3392 
3393 
3394 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3395  Register receiver = ToRegister(instr->receiver());
3396  Register function = ToRegister(instr->function());
3397  Register result = ToRegister(instr->result());
3398  Register scratch = scratch0();
3399 
3400  // If the receiver is null or undefined, we have to pass the global
3401  // object as a receiver to normal functions. Values have to be
3402  // passed unchanged to builtins and strict-mode functions.
3403  Label global_object, result_in_receiver;
3404 
3405  if (!instr->hydrogen()->known_function()) {
3406  // Do not transform the receiver to object for strict mode
3407  // functions.
3408  __ lw(scratch,
3410  __ lw(scratch,
3412 
3413  // Do not transform the receiver to object for builtins.
3414  int32_t strict_mode_function_mask =
3416  int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
3417  __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
3418  __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
3419  }
3420 
3421  // Normal function. Replace undefined or null with global receiver.
3422  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3423  __ Branch(&global_object, eq, receiver, Operand(scratch));
3424  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3425  __ Branch(&global_object, eq, receiver, Operand(scratch));
3426 
3427  // Deoptimize if the receiver is not a JS object.
3428  __ SmiTst(receiver, scratch);
3429  DeoptimizeIf(eq, instr, "Smi", scratch, Operand(zero_reg));
3430 
3431  __ GetObjectType(receiver, scratch, scratch);
3432  DeoptimizeIf(lt, instr, "not a JavaScript object", scratch,
3433  Operand(FIRST_SPEC_OBJECT_TYPE));
3434 
3435  __ Branch(&result_in_receiver);
3436  __ bind(&global_object);
3437  __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
3438  __ lw(result,
3440  __ lw(result,
3442 
3443  if (result.is(receiver)) {
3444  __ bind(&result_in_receiver);
3445  } else {
3446  Label result_ok;
3447  __ Branch(&result_ok);
3448  __ bind(&result_in_receiver);
3449  __ mov(result, receiver);
3450  __ bind(&result_ok);
3451  }
3452 }
3453 
3454 
3455 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3456  Register receiver = ToRegister(instr->receiver());
3457  Register function = ToRegister(instr->function());
3458  Register length = ToRegister(instr->length());
3459  Register elements = ToRegister(instr->elements());
3460  Register scratch = scratch0();
3461  DCHECK(receiver.is(a0)); // Used for parameter count.
3462  DCHECK(function.is(a1)); // Required by InvokeFunction.
3463  DCHECK(ToRegister(instr->result()).is(v0));
3464 
3465  // Copy the arguments to this function possibly from the
3466  // adaptor frame below it.
3467  const uint32_t kArgumentsLimit = 1 * KB;
3468  DeoptimizeIf(hi, instr, "too many arguments", length,
3469  Operand(kArgumentsLimit));
3470 
3471  // Push the receiver and use the register to keep the original
3472  // number of arguments.
3473  __ push(receiver);
3474  __ Move(receiver, length);
3475  // The arguments are at a one pointer size offset from elements.
3476  __ Addu(elements, elements, Operand(1 * kPointerSize));
3477 
3478  // Loop through the arguments pushing them onto the execution
3479  // stack.
3480  Label invoke, loop;
3481  // length is a small non-negative integer, due to the test above.
3482  __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3483  __ sll(scratch, length, 2);
3484  __ bind(&loop);
3485  __ Addu(scratch, elements, scratch);
3486  __ lw(scratch, MemOperand(scratch));
3487  __ push(scratch);
3488  __ Subu(length, length, Operand(1));
3489  __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3490  __ sll(scratch, length, 2);
3491 
3492  __ bind(&invoke);
3493  DCHECK(instr->HasPointerMap());
3494  LPointerMap* pointers = instr->pointer_map();
3495  SafepointGenerator safepoint_generator(
3496  this, pointers, Safepoint::kLazyDeopt);
3497  // The number of arguments is stored in receiver which is a0, as expected
3498  // by InvokeFunction.
3499  ParameterCount actual(receiver);
3500  __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3501 }
3502 
3503 
3504 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3505  LOperand* argument = instr->value();
3506  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3507  Abort(kDoPushArgumentNotImplementedForDoubleType);
3508  } else {
3509  Register argument_reg = EmitLoadRegister(argument, at);
3510  __ push(argument_reg);
3511  }
3512 }
3513 
3514 
3515 void LCodeGen::DoDrop(LDrop* instr) {
3516  __ Drop(instr->count());
3517 }
3518 
3519 
3520 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3521  Register result = ToRegister(instr->result());
3523 }
3524 
3525 
3526 void LCodeGen::DoContext(LContext* instr) {
3527  // If there is a non-return use, the context must be moved to a register.
3528  Register result = ToRegister(instr->result());
3529  if (info()->IsOptimizing()) {
3531  } else {
3532  // If there is no frame, the context must be in cp.
3533  DCHECK(result.is(cp));
3534  }
3535 }
3536 
3537 
3538 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3539  DCHECK(ToRegister(instr->context()).is(cp));
3540  __ li(scratch0(), instr->hydrogen()->pairs());
3541  __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3542  // The context is the first argument.
3543  __ Push(cp, scratch0(), scratch1());
3544  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3545 }
3546 
3547 
3549  int formal_parameter_count,
3550  int arity,
3551  LInstruction* instr,
3552  A1State a1_state) {
3553  bool dont_adapt_arguments =
3554  formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3555  bool can_invoke_directly =
3556  dont_adapt_arguments || formal_parameter_count == arity;
3557 
3558  LPointerMap* pointers = instr->pointer_map();
3559 
3560  if (can_invoke_directly) {
3561  if (a1_state == A1_UNINITIALIZED) {
3562  __ li(a1, function);
3563  }
3564 
3565  // Change context.
3567 
3568  // Set r0 to arguments count if adaption is not needed. Assumes that r0
3569  // is available to write to at this point.
3570  if (dont_adapt_arguments) {
3571  __ li(a0, Operand(arity));
3572  }
3573 
3574  // Invoke function.
3576  __ Call(at);
3577 
3578  // Set up deoptimization.
3580  } else {
3581  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3582  ParameterCount count(arity);
3583  ParameterCount expected(formal_parameter_count);
3584  __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3585  }
3586 }
3587 
3588 
3589 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3590  DCHECK(instr->context() != NULL);
3591  DCHECK(ToRegister(instr->context()).is(cp));
3592  Register input = ToRegister(instr->value());
3593  Register result = ToRegister(instr->result());
3594  Register scratch = scratch0();
3595 
3596  // Deoptimize if not a heap number.
3597  __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3598  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3599  DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at));
3600 
3601  Label done;
3602  Register exponent = scratch0();
3603  scratch = no_reg;
3604  __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3605  // Check the sign of the argument. If the argument is positive, just
3606  // return it.
3607  __ Move(result, input);
3608  __ And(at, exponent, Operand(HeapNumber::kSignMask));
3609  __ Branch(&done, eq, at, Operand(zero_reg));
3610 
3611  // Input is negative. Reverse its sign.
3612  // Preserve the value of all registers.
3613  {
3614  PushSafepointRegistersScope scope(this);
3615 
3616  // Registers were saved at the safepoint, so we can use
3617  // many scratch registers.
3618  Register tmp1 = input.is(a1) ? a0 : a1;
3619  Register tmp2 = input.is(a2) ? a0 : a2;
3620  Register tmp3 = input.is(a3) ? a0 : a3;
3621  Register tmp4 = input.is(t0) ? a0 : t0;
3622 
3623  // exponent: floating point exponent value.
3624 
3625  Label allocated, slow;
3626  __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3627  __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3628  __ Branch(&allocated);
3629 
3630  // Slow case: Call the runtime system to do the number allocation.
3631  __ bind(&slow);
3632 
3633  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3634  instr->context());
3635  // Set the pointer to the new heap number in tmp.
3636  if (!tmp1.is(v0))
3637  __ mov(tmp1, v0);
3638  // Restore input_reg after call to runtime.
3639  __ LoadFromSafepointRegisterSlot(input, input);
3640  __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3641 
3642  __ bind(&allocated);
3643  // exponent: floating point exponent value.
3644  // tmp1: allocated heap number.
3645  __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3646  __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3647  __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3648  __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3649 
3650  __ StoreToSafepointRegisterSlot(tmp1, result);
3651  }
3652 
3653  __ bind(&done);
3654 }
3655 
3656 
3657 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3658  Register input = ToRegister(instr->value());
3659  Register result = ToRegister(instr->result());
3660  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3661  Label done;
3662  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3663  __ mov(result, input);
3664  __ subu(result, zero_reg, input);
3665  // Overflow if result is still negative, i.e. 0x80000000.
3666  DeoptimizeIf(lt, instr, "overflow", result, Operand(zero_reg));
3667  __ bind(&done);
3668 }
3669 
3670 
3671 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3672  // Class for deferred case.
3673  class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
3674  public:
3675  DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3676  : LDeferredCode(codegen), instr_(instr) { }
3677  virtual void Generate() OVERRIDE {
3678  codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3679  }
3680  virtual LInstruction* instr() OVERRIDE { return instr_; }
3681  private:
3682  LMathAbs* instr_;
3683  };
3684 
3685  Representation r = instr->hydrogen()->value()->representation();
3686  if (r.IsDouble()) {
3687  FPURegister input = ToDoubleRegister(instr->value());
3688  FPURegister result = ToDoubleRegister(instr->result());
3689  __ abs_d(result, input);
3690  } else if (r.IsSmiOrInteger32()) {
3691  EmitIntegerMathAbs(instr);
3692  } else {
3693  // Representation is tagged.
3694  DeferredMathAbsTaggedHeapNumber* deferred =
3695  new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3696  Register input = ToRegister(instr->value());
3697  // Smi check.
3698  __ JumpIfNotSmi(input, deferred->entry());
3699  // If smi, handle it directly.
3700  EmitIntegerMathAbs(instr);
3701  __ bind(deferred->exit());
3702  }
3703 }
3704 
3705 
3706 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3707  DoubleRegister input = ToDoubleRegister(instr->value());
3708  Register result = ToRegister(instr->result());
3709  Register scratch1 = scratch0();
3710  Register except_flag = ToRegister(instr->temp());
3711 
3712  __ EmitFPUTruncate(kRoundToMinusInf,
3713  result,
3714  input,
3715  scratch1,
3716  double_scratch0(),
3717  except_flag);
3718 
3719  // Deopt if the operation did not succeed.
3720  DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
3721  Operand(zero_reg));
3722 
3723  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3724  // Test for -0.
3725  Label done;
3726  __ Branch(&done, ne, result, Operand(zero_reg));
3727  __ Mfhc1(scratch1, input);
3728  __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3729  DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
3730  __ bind(&done);
3731  }
3732 }
3733 
3734 
3735 void LCodeGen::DoMathRound(LMathRound* instr) {
3736  DoubleRegister input = ToDoubleRegister(instr->value());
3737  Register result = ToRegister(instr->result());
3738  DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3739  Register scratch = scratch0();
3740  Label done, check_sign_on_zero;
3741 
3742  // Extract exponent bits.
3743  __ Mfhc1(result, input);
3744  __ Ext(scratch,
3745  result,
3748 
3749  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3750  Label skip1;
3751  __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3752  __ mov(result, zero_reg);
3753  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3754  __ Branch(&check_sign_on_zero);
3755  } else {
3756  __ Branch(&done);
3757  }
3758  __ bind(&skip1);
3759 
3760  // The following conversion will not work with numbers
3761  // outside of ]-2^32, 2^32[.
3762  DeoptimizeIf(ge, instr, "overflow", scratch,
3763  Operand(HeapNumber::kExponentBias + 32));
3764 
3765  // Save the original sign for later comparison.
3766  __ And(scratch, result, Operand(HeapNumber::kSignMask));
3767 
3768  __ Move(double_scratch0(), 0.5);
3769  __ add_d(double_scratch0(), input, double_scratch0());
3770 
3771  // Check sign of the result: if the sign changed, the input
3772  // value was in ]0.5, 0[ and the result should be -0.
3773  __ Mfhc1(result, double_scratch0());
3774  __ Xor(result, result, Operand(scratch));
3775  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3776  // ARM uses 'mi' here, which is 'lt'
3777  DeoptimizeIf(lt, instr, "minus zero", result, Operand(zero_reg));
3778  } else {
3779  Label skip2;
3780  // ARM uses 'mi' here, which is 'lt'
3781  // Negating it results in 'ge'
3782  __ Branch(&skip2, ge, result, Operand(zero_reg));
3783  __ mov(result, zero_reg);
3784  __ Branch(&done);
3785  __ bind(&skip2);
3786  }
3787 
3788  Register except_flag = scratch;
3789  __ EmitFPUTruncate(kRoundToMinusInf,
3790  result,
3791  double_scratch0(),
3792  at,
3793  double_scratch1,
3794  except_flag);
3795 
3796  DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
3797  Operand(zero_reg));
3798 
3799  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3800  // Test for -0.
3801  __ Branch(&done, ne, result, Operand(zero_reg));
3802  __ bind(&check_sign_on_zero);
3803  __ Mfhc1(scratch, input);
3804  __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3805  DeoptimizeIf(ne, instr, "minus zero", scratch, Operand(zero_reg));
3806  }
3807  __ bind(&done);
3808 }
3809 
3810 
3811 void LCodeGen::DoMathFround(LMathFround* instr) {
3812  DoubleRegister input = ToDoubleRegister(instr->value());
3813  DoubleRegister result = ToDoubleRegister(instr->result());
3814  __ cvt_s_d(result.low(), input);
3815  __ cvt_d_s(result, result.low());
3816 }
3817 
3818 
3819 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3820  DoubleRegister input = ToDoubleRegister(instr->value());
3821  DoubleRegister result = ToDoubleRegister(instr->result());
3822  __ sqrt_d(result, input);
3823 }
3824 
3825 
3826 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3827  DoubleRegister input = ToDoubleRegister(instr->value());
3828  DoubleRegister result = ToDoubleRegister(instr->result());
3829  DoubleRegister temp = ToDoubleRegister(instr->temp());
3830 
3831  DCHECK(!input.is(result));
3832 
3833  // Note that according to ECMA-262 15.8.2.13:
3834  // Math.pow(-Infinity, 0.5) == Infinity
3835  // Math.sqrt(-Infinity) == NaN
3836  Label done;
3837  __ Move(temp, -V8_INFINITY);
3838  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3839  // Set up Infinity in the delay slot.
3840  // result is overwritten if the branch is not taken.
3841  __ neg_d(result, temp);
3842 
3843  // Add +0 to convert -0 to +0.
3844  __ add_d(result, input, kDoubleRegZero);
3845  __ sqrt_d(result, result);
3846  __ bind(&done);
3847 }
3848 
3849 
3850 void LCodeGen::DoPower(LPower* instr) {
3851  Representation exponent_type = instr->hydrogen()->right()->representation();
3852  // Having marked this as a call, we can use any registers.
3853  // Just make sure that the input/output registers are the expected ones.
3854  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3855  DCHECK(!instr->right()->IsDoubleRegister() ||
3856  ToDoubleRegister(instr->right()).is(f4));
3857  DCHECK(!instr->right()->IsRegister() ||
3858  ToRegister(instr->right()).is(tagged_exponent));
3859  DCHECK(ToDoubleRegister(instr->left()).is(f2));
3860  DCHECK(ToDoubleRegister(instr->result()).is(f0));
3861 
3862  if (exponent_type.IsSmi()) {
3863  MathPowStub stub(isolate(), MathPowStub::TAGGED);
3864  __ CallStub(&stub);
3865  } else if (exponent_type.IsTagged()) {
3866  Label no_deopt;
3867  __ JumpIfSmi(tagged_exponent, &no_deopt);
3868  DCHECK(!t3.is(tagged_exponent));
3869  __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3870  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3871  DeoptimizeIf(ne, instr, "not a heap number", t3, Operand(at));
3872  __ bind(&no_deopt);
3873  MathPowStub stub(isolate(), MathPowStub::TAGGED);
3874  __ CallStub(&stub);
3875  } else if (exponent_type.IsInteger32()) {
3876  MathPowStub stub(isolate(), MathPowStub::INTEGER);
3877  __ CallStub(&stub);
3878  } else {
3879  DCHECK(exponent_type.IsDouble());
3880  MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3881  __ CallStub(&stub);
3882  }
3883 }
3884 
3885 
3886 void LCodeGen::DoMathExp(LMathExp* instr) {
3887  DoubleRegister input = ToDoubleRegister(instr->value());
3888  DoubleRegister result = ToDoubleRegister(instr->result());
3889  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3890  DoubleRegister double_scratch2 = double_scratch0();
3891  Register temp1 = ToRegister(instr->temp1());
3892  Register temp2 = ToRegister(instr->temp2());
3893 
3895  masm(), input, result, double_scratch1, double_scratch2,
3896  temp1, temp2, scratch0());
3897 }
3898 
3899 
3900 void LCodeGen::DoMathLog(LMathLog* instr) {
3901  __ PrepareCallCFunction(0, 1, scratch0());
3902  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3903  __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3904  0, 1);
3905  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3906 }
3907 
3908 
3909 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3910  Register input = ToRegister(instr->value());
3911  Register result = ToRegister(instr->result());
3912  __ Clz(result, input);
3913 }
3914 
3915 
3916 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3917  DCHECK(ToRegister(instr->context()).is(cp));
3918  DCHECK(ToRegister(instr->function()).is(a1));
3919  DCHECK(instr->HasPointerMap());
3920 
3921  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3922  if (known_function.is_null()) {
3923  LPointerMap* pointers = instr->pointer_map();
3924  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3925  ParameterCount count(instr->arity());
3926  __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
3927  } else {
3928  CallKnownFunction(known_function,
3929  instr->hydrogen()->formal_parameter_count(),
3930  instr->arity(),
3931  instr,
3933  }
3934 }
3935 
3936 
3937 void LCodeGen::DoTailCallThroughMegamorphicCache(
3938  LTailCallThroughMegamorphicCache* instr) {
3939  Register receiver = ToRegister(instr->receiver());
3940  Register name = ToRegister(instr->name());
3941  DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
3943  DCHECK(receiver.is(a1));
3944  DCHECK(name.is(a2));
3945 
3946  Register scratch = a3;
3947  Register extra = t0;
3948  Register extra2 = t1;
3949  Register extra3 = t2;
3950 
3951  // Important for the tail-call.
3952  bool must_teardown_frame = NeedsEagerFrame();
3953 
3954  // The probe will tail call to a handler if found.
3955  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
3956  must_teardown_frame, receiver, name,
3957  scratch, extra, extra2, extra3);
3958 
3959  // Tail call to miss if we ended up here.
3960  if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
3961  LoadIC::GenerateMiss(masm());
3962 }
3963 
3964 
3965 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3966  DCHECK(ToRegister(instr->result()).is(v0));
3967 
3968  LPointerMap* pointers = instr->pointer_map();
3969  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3970 
3971  if (instr->target()->IsConstantOperand()) {
3972  LConstantOperand* target = LConstantOperand::cast(instr->target());
3973  Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3974  generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3975  __ Call(code, RelocInfo::CODE_TARGET);
3976  } else {
3977  DCHECK(instr->target()->IsRegister());
3978  Register target = ToRegister(instr->target());
3979  generator.BeforeCall(__ CallSize(target));
3980  __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3981  __ Call(target);
3982  }
3983  generator.AfterCall();
3984 }
3985 
3986 
3987 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3988  DCHECK(ToRegister(instr->function()).is(a1));
3989  DCHECK(ToRegister(instr->result()).is(v0));
3990 
3991  if (instr->hydrogen()->pass_argument_count()) {
3992  __ li(a0, Operand(instr->arity()));
3993  }
3994 
3995  // Change context.
3997 
3998  // Load the code entry address
4000  __ Call(at);
4001 
4003 }
4004 
4005 
4006 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4007  DCHECK(ToRegister(instr->context()).is(cp));
4008  DCHECK(ToRegister(instr->function()).is(a1));
4009  DCHECK(ToRegister(instr->result()).is(v0));
4010 
4011  int arity = instr->arity();
4012  CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
4013  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4014 }
4015 
4016 
4017 void LCodeGen::DoCallNew(LCallNew* instr) {
4018  DCHECK(ToRegister(instr->context()).is(cp));
4019  DCHECK(ToRegister(instr->constructor()).is(a1));
4020  DCHECK(ToRegister(instr->result()).is(v0));
4021 
4022  __ li(a0, Operand(instr->arity()));
4023  // No cell in a2 for construct type feedback in optimized code
4024  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
4025  CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4026  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4027 }
4028 
4029 
4030 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4031  DCHECK(ToRegister(instr->context()).is(cp));
4032  DCHECK(ToRegister(instr->constructor()).is(a1));
4033  DCHECK(ToRegister(instr->result()).is(v0));
4034 
4035  __ li(a0, Operand(instr->arity()));
4036  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
4037  ElementsKind kind = instr->hydrogen()->elements_kind();
4038  AllocationSiteOverrideMode override_mode =
4041  : DONT_OVERRIDE;
4042 
4043  if (instr->arity() == 0) {
4044  ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4045  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4046  } else if (instr->arity() == 1) {
4047  Label done;
4048  if (IsFastPackedElementsKind(kind)) {
4049  Label packed_case;
4050  // We might need a change here,
4051  // look at the first argument.
4052  __ lw(t1, MemOperand(sp, 0));
4053  __ Branch(&packed_case, eq, t1, Operand(zero_reg));
4054 
4055  ElementsKind holey_kind = GetHoleyElementsKind(kind);
4056  ArraySingleArgumentConstructorStub stub(isolate(),
4057  holey_kind,
4058  override_mode);
4059  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4060  __ jmp(&done);
4061  __ bind(&packed_case);
4062  }
4063 
4064  ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4065  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4066  __ bind(&done);
4067  } else {
4068  ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4069  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4070  }
4071 }
4072 
4073 
4074 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4075  CallRuntime(instr->function(), instr->arity(), instr);
4076 }
4077 
4078 
4079 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4080  Register function = ToRegister(instr->function());
4081  Register code_object = ToRegister(instr->code_object());
4082  __ Addu(code_object, code_object,
4083  Operand(Code::kHeaderSize - kHeapObjectTag));
4084  __ sw(code_object,
4086 }
4087 
4088 
4089 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4090  Register result = ToRegister(instr->result());
4091  Register base = ToRegister(instr->base_object());
4092  if (instr->offset()->IsConstantOperand()) {
4093  LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4094  __ Addu(result, base, Operand(ToInteger32(offset)));
4095  } else {
4096  Register offset = ToRegister(instr->offset());
4097  __ Addu(result, base, offset);
4098  }
4099 }
4100 
4101 
4102 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4103  Representation representation = instr->representation();
4104 
4105  Register object = ToRegister(instr->object());
4106  Register scratch = scratch0();
4107  HObjectAccess access = instr->hydrogen()->access();
4108  int offset = access.offset();
4109 
4110  if (access.IsExternalMemory()) {
4111  Register value = ToRegister(instr->value());
4112  MemOperand operand = MemOperand(object, offset);
4113  __ Store(value, operand, representation);
4114  return;
4115  }
4116 
4117  __ AssertNotSmi(object);
4118 
4119  DCHECK(!representation.IsSmi() ||
4120  !instr->value()->IsConstantOperand() ||
4121  IsSmi(LConstantOperand::cast(instr->value())));
4122  if (representation.IsDouble()) {
4123  DCHECK(access.IsInobject());
4124  DCHECK(!instr->hydrogen()->has_transition());
4125  DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4126  DoubleRegister value = ToDoubleRegister(instr->value());
4127  __ sdc1(value, FieldMemOperand(object, offset));
4128  return;
4129  }
4130 
4131  if (instr->hydrogen()->has_transition()) {
4132  Handle<Map> transition = instr->hydrogen()->transition_map();
4133  AddDeprecationDependency(transition);
4134  __ li(scratch, Operand(transition));
4135  __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4136  if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4137  Register temp = ToRegister(instr->temp());
4138  // Update the write barrier for the map field.
4139  __ RecordWriteForMap(object,
4140  scratch,
4141  temp,
4142  GetRAState(),
4143  kSaveFPRegs);
4144  }
4145  }
4146 
4147  // Do the store.
4148  Register value = ToRegister(instr->value());
4149  if (access.IsInobject()) {
4150  MemOperand operand = FieldMemOperand(object, offset);
4151  __ Store(value, operand, representation);
4152  if (instr->hydrogen()->NeedsWriteBarrier()) {
4153  // Update the write barrier for the object for in-object properties.
4154  __ RecordWriteField(object,
4155  offset,
4156  value,
4157  scratch,
4158  GetRAState(),
4159  kSaveFPRegs,
4161  instr->hydrogen()->SmiCheckForWriteBarrier(),
4162  instr->hydrogen()->PointersToHereCheckForValue());
4163  }
4164  } else {
4165  __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4166  MemOperand operand = FieldMemOperand(scratch, offset);
4167  __ Store(value, operand, representation);
4168  if (instr->hydrogen()->NeedsWriteBarrier()) {
4169  // Update the write barrier for the properties array.
4170  // object is used as a scratch register.
4171  __ RecordWriteField(scratch,
4172  offset,
4173  value,
4174  object,
4175  GetRAState(),
4176  kSaveFPRegs,
4178  instr->hydrogen()->SmiCheckForWriteBarrier(),
4179  instr->hydrogen()->PointersToHereCheckForValue());
4180  }
4181  }
4182 }
4183 
4184 
4185 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4186  DCHECK(ToRegister(instr->context()).is(cp));
4188  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4189 
4190  __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
4191  Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4192  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4193 }
4194 
4195 
4196 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4197  Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
4198  Operand operand(0);
4199  Register reg;
4200  if (instr->index()->IsConstantOperand()) {
4201  operand = ToOperand(instr->index());
4202  reg = ToRegister(instr->length());
4203  cc = CommuteCondition(cc);
4204  } else {
4205  reg = ToRegister(instr->index());
4206  operand = ToOperand(instr->length());
4207  }
4208  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4209  Label done;
4210  __ Branch(&done, NegateCondition(cc), reg, operand);
4211  __ stop("eliminated bounds check failed");
4212  __ bind(&done);
4213  } else {
4214  DeoptimizeIf(cc, instr, "out of bounds", reg, operand);
4215  }
4216 }
4217 
4218 
4219 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4220  Register external_pointer = ToRegister(instr->elements());
4221  Register key = no_reg;
4222  ElementsKind elements_kind = instr->elements_kind();
4223  bool key_is_constant = instr->key()->IsConstantOperand();
4224  int constant_key = 0;
4225  if (key_is_constant) {
4226  constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4227  if (constant_key & 0xF0000000) {
4228  Abort(kArrayIndexConstantValueTooBig);
4229  }
4230  } else {
4231  key = ToRegister(instr->key());
4232  }
4233  int element_size_shift = ElementsKindToShiftSize(elements_kind);
4234  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4235  ? (element_size_shift - kSmiTagSize) : element_size_shift;
4236  int base_offset = instr->base_offset();
4237 
4238  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4239  elements_kind == FLOAT32_ELEMENTS ||
4240  elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4241  elements_kind == FLOAT64_ELEMENTS) {
4242  Register address = scratch0();
4243  FPURegister value(ToDoubleRegister(instr->value()));
4244  if (key_is_constant) {
4245  if (constant_key != 0) {
4246  __ Addu(address, external_pointer,
4247  Operand(constant_key << element_size_shift));
4248  } else {
4249  address = external_pointer;
4250  }
4251  } else {
4252  __ sll(address, key, shift_size);
4253  __ Addu(address, external_pointer, address);
4254  }
4255 
4256  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4257  elements_kind == FLOAT32_ELEMENTS) {
4258  __ cvt_s_d(double_scratch0(), value);
4259  __ swc1(double_scratch0(), MemOperand(address, base_offset));
4260  } else { // Storing doubles, not floats.
4261  __ sdc1(value, MemOperand(address, base_offset));
4262  }
4263  } else {
4264  Register value(ToRegister(instr->value()));
4265  MemOperand mem_operand = PrepareKeyedOperand(
4266  key, external_pointer, key_is_constant, constant_key,
4267  element_size_shift, shift_size,
4268  base_offset);
4269  switch (elements_kind) {
4273  case UINT8_ELEMENTS:
4275  case INT8_ELEMENTS:
4276  __ sb(value, mem_operand);
4277  break;
4280  case INT16_ELEMENTS:
4281  case UINT16_ELEMENTS:
4282  __ sh(value, mem_operand);
4283  break;
4286  case INT32_ELEMENTS:
4287  case UINT32_ELEMENTS:
4288  __ sw(value, mem_operand);
4289  break;
4290  case FLOAT32_ELEMENTS:
4291  case FLOAT64_ELEMENTS:
4294  case FAST_DOUBLE_ELEMENTS:
4295  case FAST_ELEMENTS:
4296  case FAST_SMI_ELEMENTS:
4298  case FAST_HOLEY_ELEMENTS:
4300  case DICTIONARY_ELEMENTS:
4302  UNREACHABLE();
4303  break;
4304  }
4305  }
4306 }
4307 
4308 
4309 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4310  DoubleRegister value = ToDoubleRegister(instr->value());
4311  Register elements = ToRegister(instr->elements());
4312  Register scratch = scratch0();
4314  bool key_is_constant = instr->key()->IsConstantOperand();
4315  int base_offset = instr->base_offset();
4316  Label not_nan, done;
4317 
4318  // Calculate the effective address of the slot in the array to store the
4319  // double value.
4320  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4321  if (key_is_constant) {
4322  int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4323  if (constant_key & 0xF0000000) {
4324  Abort(kArrayIndexConstantValueTooBig);
4325  }
4326  __ Addu(scratch, elements,
4327  Operand((constant_key << element_size_shift) + base_offset));
4328  } else {
4329  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4330  ? (element_size_shift - kSmiTagSize) : element_size_shift;
4331  __ Addu(scratch, elements, Operand(base_offset));
4332  __ sll(at, ToRegister(instr->key()), shift_size);
4333  __ Addu(scratch, scratch, at);
4334  }
4335 
4336  if (instr->NeedsCanonicalization()) {
4337  Label is_nan;
4338  // Check for NaN. All NaNs must be canonicalized.
4339  __ BranchF(NULL, &is_nan, eq, value, value);
4340  __ Branch(&not_nan);
4341 
4342  // Only load canonical NaN if the comparison above set the overflow.
4343  __ bind(&is_nan);
4344  __ LoadRoot(at, Heap::kNanValueRootIndex);
4346  __ sdc1(double_scratch, MemOperand(scratch, 0));
4347  __ Branch(&done);
4348  }
4349 
4350  __ bind(&not_nan);
4351  __ sdc1(value, MemOperand(scratch, 0));
4352  __ bind(&done);
4353 }
4354 
4355 
4356 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4357  Register value = ToRegister(instr->value());
4358  Register elements = ToRegister(instr->elements());
4359  Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4360  : no_reg;
4361  Register scratch = scratch0();
4362  Register store_base = scratch;
4363  int offset = instr->base_offset();
4364 
4365  // Do the store.
4366  if (instr->key()->IsConstantOperand()) {
4367  DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4368  LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4369  offset += ToInteger32(const_operand) * kPointerSize;
4370  store_base = elements;
4371  } else {
4372  // Even though the HLoadKeyed instruction forces the input
4373  // representation for the key to be an integer, the input gets replaced
4374  // during bound check elimination with the index argument to the bounds
4375  // check, which can be tagged, so that case must be handled here, too.
4376  if (instr->hydrogen()->key()->representation().IsSmi()) {
4377  __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
4378  __ addu(scratch, elements, scratch);
4379  } else {
4380  __ sll(scratch, key, kPointerSizeLog2);
4381  __ addu(scratch, elements, scratch);
4382  }
4383  }
4384  __ sw(value, MemOperand(store_base, offset));
4385 
4386  if (instr->hydrogen()->NeedsWriteBarrier()) {
4387  SmiCheck check_needed =
4388  instr->hydrogen()->value()->type().IsHeapObject()
4390  // Compute address of modified element and store it into key register.
4391  __ Addu(key, store_base, Operand(offset));
4392  __ RecordWrite(elements,
4393  key,
4394  value,
4395  GetRAState(),
4396  kSaveFPRegs,
4398  check_needed,
4399  instr->hydrogen()->PointersToHereCheckForValue());
4400  }
4401 }
4402 
4403 
4404 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4405  // By cases: external, fast double
4406  if (instr->is_typed_elements()) {
4408  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4410  } else {
4411  DoStoreKeyedFixedArray(instr);
4412  }
4413 }
4414 
4415 
4416 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4417  DCHECK(ToRegister(instr->context()).is(cp));
4420  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4421 
4422  Handle<Code> ic =
4423  CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
4424  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4425 }
4426 
4427 
4428 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4429  Register object_reg = ToRegister(instr->object());
4430  Register scratch = scratch0();
4431 
4432  Handle<Map> from_map = instr->original_map();
4433  Handle<Map> to_map = instr->transitioned_map();
4434  ElementsKind from_kind = instr->from_kind();
4435  ElementsKind to_kind = instr->to_kind();
4436 
4437  Label not_applicable;
4438  __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4439  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
4440 
4441  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4442  Register new_map_reg = ToRegister(instr->new_map_temp());
4443  __ li(new_map_reg, Operand(to_map));
4444  __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4445  // Write barrier.
4446  __ RecordWriteForMap(object_reg,
4447  new_map_reg,
4448  scratch,
4449  GetRAState(),
4450  kDontSaveFPRegs);
4451  } else {
4452  DCHECK(object_reg.is(a0));
4453  DCHECK(ToRegister(instr->context()).is(cp));
4454  PushSafepointRegistersScope scope(this);
4455  __ li(a1, Operand(to_map));
4456  bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4457  TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4458  __ CallStub(&stub);
4460  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4461  }
4462  __ bind(&not_applicable);
4463 }
4464 
4465 
4466 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4467  Register object = ToRegister(instr->object());
4468  Register temp = ToRegister(instr->temp());
4469  Label no_memento_found;
4470  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
4471  ne, &no_memento_found);
4472  DeoptimizeIf(al, instr);
4473  __ bind(&no_memento_found);
4474 }
4475 
4476 
4477 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4478  DCHECK(ToRegister(instr->context()).is(cp));
4479  DCHECK(ToRegister(instr->left()).is(a1));
4480  DCHECK(ToRegister(instr->right()).is(a0));
4481  StringAddStub stub(isolate(),
4482  instr->hydrogen()->flags(),
4483  instr->hydrogen()->pretenure_flag());
4484  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4485 }
4486 
4487 
4488 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4489  class DeferredStringCharCodeAt FINAL : public LDeferredCode {
4490  public:
4491  DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4492  : LDeferredCode(codegen), instr_(instr) { }
4493  virtual void Generate() OVERRIDE {
4494  codegen()->DoDeferredStringCharCodeAt(instr_);
4495  }
4496  virtual LInstruction* instr() OVERRIDE { return instr_; }
4497  private:
4498  LStringCharCodeAt* instr_;
4499  };
4500 
4501  DeferredStringCharCodeAt* deferred =
4502  new(zone()) DeferredStringCharCodeAt(this, instr);
4504  ToRegister(instr->string()),
4505  ToRegister(instr->index()),
4506  ToRegister(instr->result()),
4507  deferred->entry());
4508  __ bind(deferred->exit());
4509 }
4510 
4511 
4512 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4513  Register string = ToRegister(instr->string());
4514  Register result = ToRegister(instr->result());
4515  Register scratch = scratch0();
4516 
4517  // TODO(3095996): Get rid of this. For now, we need to make the
4518  // result register contain a valid pointer because it is already
4519  // contained in the register pointer map.
4520  __ mov(result, zero_reg);
4521 
4522  PushSafepointRegistersScope scope(this);
4523  __ push(string);
4524  // Push the index as a smi. This is safe because of the checks in
4525  // DoStringCharCodeAt above.
4526  if (instr->index()->IsConstantOperand()) {
4527  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4528  __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4529  __ push(scratch);
4530  } else {
4531  Register index = ToRegister(instr->index());
4532  __ SmiTag(index);
4533  __ push(index);
4534  }
4535  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4536  instr->context());
4537  __ AssertSmi(v0);
4538  __ SmiUntag(v0);
4539  __ StoreToSafepointRegisterSlot(v0, result);
4540 }
4541 
4542 
4543 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4544  class DeferredStringCharFromCode FINAL : public LDeferredCode {
4545  public:
4546  DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4547  : LDeferredCode(codegen), instr_(instr) { }
4548  virtual void Generate() OVERRIDE {
4549  codegen()->DoDeferredStringCharFromCode(instr_);
4550  }
4551  virtual LInstruction* instr() OVERRIDE { return instr_; }
4552  private:
4553  LStringCharFromCode* instr_;
4554  };
4555 
4556  DeferredStringCharFromCode* deferred =
4557  new(zone()) DeferredStringCharFromCode(this, instr);
4558 
4559  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4560  Register char_code = ToRegister(instr->char_code());
4561  Register result = ToRegister(instr->result());
4562  Register scratch = scratch0();
4563  DCHECK(!char_code.is(result));
4564 
4565  __ Branch(deferred->entry(), hi,
4566  char_code, Operand(String::kMaxOneByteCharCode));
4567  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4568  __ sll(scratch, char_code, kPointerSizeLog2);
4569  __ Addu(result, result, scratch);
4570  __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4571  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4572  __ Branch(deferred->entry(), eq, result, Operand(scratch));
4573  __ bind(deferred->exit());
4574 }
4575 
4576 
4577 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4578  Register char_code = ToRegister(instr->char_code());
4579  Register result = ToRegister(instr->result());
4580 
4581  // TODO(3095996): Get rid of this. For now, we need to make the
4582  // result register contain a valid pointer because it is already
4583  // contained in the register pointer map.
4584  __ mov(result, zero_reg);
4585 
4586  PushSafepointRegistersScope scope(this);
4587  __ SmiTag(char_code);
4588  __ push(char_code);
4589  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4590  __ StoreToSafepointRegisterSlot(v0, result);
4591 }
4592 
4593 
4594 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4595  LOperand* input = instr->value();
4596  DCHECK(input->IsRegister() || input->IsStackSlot());
4597  LOperand* output = instr->result();
4598  DCHECK(output->IsDoubleRegister());
4599  FPURegister single_scratch = double_scratch0().low();
4600  if (input->IsStackSlot()) {
4601  Register scratch = scratch0();
4602  __ lw(scratch, ToMemOperand(input));
4603  __ mtc1(scratch, single_scratch);
4604  } else {
4605  __ mtc1(ToRegister(input), single_scratch);
4606  }
4607  __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4608 }
4609 
4610 
4611 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4612  LOperand* input = instr->value();
4613  LOperand* output = instr->result();
4614 
4615  FPURegister dbl_scratch = double_scratch0();
4616  __ mtc1(ToRegister(input), dbl_scratch);
4617  __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);
4618 }
4619 
4620 
4621 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4622  class DeferredNumberTagI FINAL : public LDeferredCode {
4623  public:
4624  DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4625  : LDeferredCode(codegen), instr_(instr) { }
4626  virtual void Generate() OVERRIDE {
4627  codegen()->DoDeferredNumberTagIU(instr_,
4628  instr_->value(),
4629  instr_->temp1(),
4630  instr_->temp2(),
4631  SIGNED_INT32);
4632  }
4633  virtual LInstruction* instr() OVERRIDE { return instr_; }
4634  private:
4635  LNumberTagI* instr_;
4636  };
4637 
4638  Register src = ToRegister(instr->value());
4639  Register dst = ToRegister(instr->result());
4640  Register overflow = scratch0();
4641 
4642  DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4643  __ SmiTagCheckOverflow(dst, src, overflow);
4644  __ BranchOnOverflow(deferred->entry(), overflow);
4645  __ bind(deferred->exit());
4646 }
4647 
4648 
4649 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4650  class DeferredNumberTagU FINAL : public LDeferredCode {
4651  public:
4652  DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4653  : LDeferredCode(codegen), instr_(instr) { }
4654  virtual void Generate() OVERRIDE {
4655  codegen()->DoDeferredNumberTagIU(instr_,
4656  instr_->value(),
4657  instr_->temp1(),
4658  instr_->temp2(),
4659  UNSIGNED_INT32);
4660  }
4661  virtual LInstruction* instr() OVERRIDE { return instr_; }
4662  private:
4663  LNumberTagU* instr_;
4664  };
4665 
4666  Register input = ToRegister(instr->value());
4667  Register result = ToRegister(instr->result());
4668 
4669  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4670  __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
4671  __ SmiTag(result, input);
4672  __ bind(deferred->exit());
4673 }
4674 
4675 
4676 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4677  LOperand* value,
4678  LOperand* temp1,
4679  LOperand* temp2,
4680  IntegerSignedness signedness) {
4681  Label done, slow;
4682  Register src = ToRegister(value);
4683  Register dst = ToRegister(instr->result());
4684  Register tmp1 = scratch0();
4685  Register tmp2 = ToRegister(temp1);
4686  Register tmp3 = ToRegister(temp2);
4687  DoubleRegister dbl_scratch = double_scratch0();
4688 
4689  if (signedness == SIGNED_INT32) {
4690  // There was overflow, so bits 30 and 31 of the original integer
4691  // disagree. Try to allocate a heap number in new space and store
4692  // the value in there. If that fails, call the runtime system.
4693  if (dst.is(src)) {
4694  __ SmiUntag(src, dst);
4695  __ Xor(src, src, Operand(0x80000000));
4696  }
4697  __ mtc1(src, dbl_scratch);
4698  __ cvt_d_w(dbl_scratch, dbl_scratch);
4699  } else {
4700  __ mtc1(src, dbl_scratch);
4701  __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
4702  }
4703 
4704  if (FLAG_inline_new) {
4705  __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4706  __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
4707  __ Branch(&done);
4708  }
4709 
4710  // Slow case: Call the runtime system to do the number allocation.
4711  __ bind(&slow);
4712  {
4713  // TODO(3095996): Put a valid pointer value in the stack slot where the
4714  // result register is stored, as this register is in the pointer map, but
4715  // contains an integer value.
4716  __ mov(dst, zero_reg);
4717 
4718  // Preserve the value of all registers.
4719  PushSafepointRegistersScope scope(this);
4720 
4721  // NumberTagI and NumberTagD use the context from the frame, rather than
4722  // the environment's HContext or HInlinedContext value.
4723  // They only call Runtime::kAllocateHeapNumber.
4724  // The corresponding HChange instructions are added in a phase that does
4725  // not have easy access to the local context.
4727  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4729  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4730  __ Subu(v0, v0, kHeapObjectTag);
4731  __ StoreToSafepointRegisterSlot(v0, dst);
4732  }
4733 
4734 
4735  // Done. Put the value in dbl_scratch into the value of the allocated heap
4736  // number.
4737  __ bind(&done);
4738  __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
4739  __ Addu(dst, dst, kHeapObjectTag);
4740 }
4741 
4742 
4743 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4744  class DeferredNumberTagD FINAL : public LDeferredCode {
4745  public:
4746  DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4747  : LDeferredCode(codegen), instr_(instr) { }
4748  virtual void Generate() OVERRIDE {
4749  codegen()->DoDeferredNumberTagD(instr_);
4750  }
4751  virtual LInstruction* instr() OVERRIDE { return instr_; }
4752  private:
4753  LNumberTagD* instr_;
4754  };
4755 
4756  DoubleRegister input_reg = ToDoubleRegister(instr->value());
4757  Register scratch = scratch0();
4758  Register reg = ToRegister(instr->result());
4759  Register temp1 = ToRegister(instr->temp());
4760  Register temp2 = ToRegister(instr->temp2());
4761 
4762  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4763  if (FLAG_inline_new) {
4764  __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4765  // We want the untagged address first for performance
4766  __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4767  DONT_TAG_RESULT);
4768  } else {
4769  __ Branch(deferred->entry());
4770  }
4771  __ bind(deferred->exit());
4772  __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
4773  // Now that we have finished with the object's real address tag it
4774  __ Addu(reg, reg, kHeapObjectTag);
4775 }
4776 
4777 
4778 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4779  // TODO(3095996): Get rid of this. For now, we need to make the
4780  // result register contain a valid pointer because it is already
4781  // contained in the register pointer map.
4782  Register reg = ToRegister(instr->result());
4783  __ mov(reg, zero_reg);
4784 
4785  PushSafepointRegistersScope scope(this);
4786  // NumberTagI and NumberTagD use the context from the frame, rather than
4787  // the environment's HContext or HInlinedContext value.
4788  // They only call Runtime::kAllocateHeapNumber.
4789  // The corresponding HChange instructions are added in a phase that does
4790  // not have easy access to the local context.
4792  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4794  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4795  __ Subu(v0, v0, kHeapObjectTag);
4796  __ StoreToSafepointRegisterSlot(v0, reg);
4797 }
4798 
4799 
4800 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4801  HChange* hchange = instr->hydrogen();
4802  Register input = ToRegister(instr->value());
4803  Register output = ToRegister(instr->result());
4804  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4805  hchange->value()->CheckFlag(HValue::kUint32)) {
4806  __ And(at, input, Operand(0xc0000000));
4807  DeoptimizeIf(ne, instr, "overflow", at, Operand(zero_reg));
4808  }
4809  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4810  !hchange->value()->CheckFlag(HValue::kUint32)) {
4811  __ SmiTagCheckOverflow(output, input, at);
4812  DeoptimizeIf(lt, instr, "overflow", at, Operand(zero_reg));
4813  } else {
4814  __ SmiTag(output, input);
4815  }
4816 }
4817 
4818 
4819 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4820  Register scratch = scratch0();
4821  Register input = ToRegister(instr->value());
4822  Register result = ToRegister(instr->result());
4823  if (instr->needs_check()) {
4825  // If the input is a HeapObject, value of scratch won't be zero.
4826  __ And(scratch, input, Operand(kHeapObjectTag));
4827  __ SmiUntag(result, input);
4828  DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg));
4829  } else {
4830  __ SmiUntag(result, input);
4831  }
4832 }
4833 
4834 
4835 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4836  DoubleRegister result_reg,
4838  bool can_convert_undefined_to_nan =
4839  instr->hydrogen()->can_convert_undefined_to_nan();
4840  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4841 
4842  Register scratch = scratch0();
4843  Label convert, load_smi, done;
4845  // Smi check.
4846  __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4847  // Heap number map check.
4848  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4849  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4850  if (can_convert_undefined_to_nan) {
4851  __ Branch(&convert, ne, scratch, Operand(at));
4852  } else {
4853  DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at));
4854  }
4855  // Load heap number.
4856  __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4857  if (deoptimize_on_minus_zero) {
4858  __ mfc1(at, result_reg.low());
4859  __ Branch(&done, ne, at, Operand(zero_reg));
4860  __ Mfhc1(scratch, result_reg);
4861  DeoptimizeIf(eq, instr, "minus zero", scratch,
4862  Operand(HeapNumber::kSignMask));
4863  }
4864  __ Branch(&done);
4865  if (can_convert_undefined_to_nan) {
4866  __ bind(&convert);
4867  // Convert undefined (and hole) to NaN.
4868  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4869  DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg,
4870  Operand(at));
4871  __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4872  __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4873  __ Branch(&done);
4874  }
4875  } else {
4876  __ SmiUntag(scratch, input_reg);
4878  }
4879  // Smi to double register conversion
4880  __ bind(&load_smi);
4881  // scratch: untagged value of input_reg
4882  __ mtc1(scratch, result_reg);
4883  __ cvt_d_w(result_reg, result_reg);
4884  __ bind(&done);
4885 }
4886 
4887 
4888 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4889  Register input_reg = ToRegister(instr->value());
4890  Register scratch1 = scratch0();
4891  Register scratch2 = ToRegister(instr->temp());
4893  DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4894 
4895  DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4896  DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4897 
4898  Label done;
4899 
4900  // The input is a tagged HeapObject.
4901  // Heap number map check.
4903  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4904  // This 'at' value and scratch1 map value are used for tests in both clauses
4905  // of the if.
4906 
4907  if (instr->truncating()) {
4908  // Performs a truncating conversion of a floating point number as used by
4909  // the JS bitwise operations.
4910  Label no_heap_number, check_bools, check_false;
4911  // Check HeapNumber map.
4912  __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
4913  __ mov(scratch2, input_reg); // In delay slot.
4914  __ TruncateHeapNumberToI(input_reg, scratch2);
4915  __ Branch(&done);
4916 
4917  // Check for Oddballs. Undefined/False is converted to zero and True to one
4918  // for truncating conversions.
4919  __ bind(&no_heap_number);
4920  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4921  __ Branch(&check_bools, ne, input_reg, Operand(at));
4922  DCHECK(ToRegister(instr->result()).is(input_reg));
4923  __ Branch(USE_DELAY_SLOT, &done);
4924  __ mov(input_reg, zero_reg); // In delay slot.
4925 
4926  __ bind(&check_bools);
4927  __ LoadRoot(at, Heap::kTrueValueRootIndex);
4928  __ Branch(&check_false, ne, scratch2, Operand(at));
4929  __ Branch(USE_DELAY_SLOT, &done);
4930  __ li(input_reg, Operand(1)); // In delay slot.
4931 
4932  __ bind(&check_false);
4933  __ LoadRoot(at, Heap::kFalseValueRootIndex);
4934  DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", scratch2,
4935  Operand(at));
4936  __ Branch(USE_DELAY_SLOT, &done);
4937  __ mov(input_reg, zero_reg); // In delay slot.
4938  } else {
4939  DeoptimizeIf(ne, instr, "not a heap number", scratch1, Operand(at));
4940 
4941  // Load the double value.
4942  __ ldc1(double_scratch,
4944 
4945  Register except_flag = scratch2;
4946  __ EmitFPUTruncate(kRoundToZero,
4947  input_reg,
4949  scratch1,
4950  double_scratch2,
4951  except_flag,
4953 
4954  DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
4955  Operand(zero_reg));
4956 
4957  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4958  __ Branch(&done, ne, input_reg, Operand(zero_reg));
4959 
4960  __ Mfhc1(scratch1, double_scratch);
4961  __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4962  DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
4963  }
4964  }
4965  __ bind(&done);
4966 }
4967 
4968 
4969 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4970  class DeferredTaggedToI FINAL : public LDeferredCode {
4971  public:
4972  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4973  : LDeferredCode(codegen), instr_(instr) { }
4974  virtual void Generate() OVERRIDE {
4975  codegen()->DoDeferredTaggedToI(instr_);
4976  }
4977  virtual LInstruction* instr() OVERRIDE { return instr_; }
4978  private:
4979  LTaggedToI* instr_;
4980  };
4981 
4982  LOperand* input = instr->value();
4983  DCHECK(input->IsRegister());
4984  DCHECK(input->Equals(instr->result()));
4985 
4986  Register input_reg = ToRegister(input);
4987 
4988  if (instr->hydrogen()->value()->representation().IsSmi()) {
4989  __ SmiUntag(input_reg);
4990  } else {
4991  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4992 
4993  // Let the deferred code handle the HeapObject case.
4994  __ JumpIfNotSmi(input_reg, deferred->entry());
4995 
4996  // Smi to int32 conversion.
4997  __ SmiUntag(input_reg);
4998  __ bind(deferred->exit());
4999  }
5000 }
5001 
5002 
5003 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5004  LOperand* input = instr->value();
5005  DCHECK(input->IsRegister());
5006  LOperand* result = instr->result();
5007  DCHECK(result->IsDoubleRegister());
5008 
5009  Register input_reg = ToRegister(input);
5010  DoubleRegister result_reg = ToDoubleRegister(result);
5011 
5012  HValue* value = instr->hydrogen()->value();
5013  NumberUntagDMode mode = value->representation().IsSmi()
5015 
5016  EmitNumberUntagD(instr, input_reg, result_reg, mode);
5017 }
5018 
5019 
5020 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5021  Register result_reg = ToRegister(instr->result());
5022  Register scratch1 = scratch0();
5023  DoubleRegister double_input = ToDoubleRegister(instr->value());
5024 
5025  if (instr->truncating()) {
5026  __ TruncateDoubleToI(result_reg, double_input);
5027  } else {
5028  Register except_flag = LCodeGen::scratch1();
5029 
5030  __ EmitFPUTruncate(kRoundToMinusInf,
5031  result_reg,
5032  double_input,
5033  scratch1,
5034  double_scratch0(),
5035  except_flag,
5037 
5038  // Deopt if the operation did not succeed (except_flag != 0).
5039  DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
5040  Operand(zero_reg));
5041 
5042  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5043  Label done;
5044  __ Branch(&done, ne, result_reg, Operand(zero_reg));
5045  __ Mfhc1(scratch1, double_input);
5046  __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5047  DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
5048  __ bind(&done);
5049  }
5050  }
5051 }
5052 
5053 
5054 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5055  Register result_reg = ToRegister(instr->result());
5056  Register scratch1 = LCodeGen::scratch0();
5057  DoubleRegister double_input = ToDoubleRegister(instr->value());
5058 
5059  if (instr->truncating()) {
5060  __ TruncateDoubleToI(result_reg, double_input);
5061  } else {
5062  Register except_flag = LCodeGen::scratch1();
5063 
5064  __ EmitFPUTruncate(kRoundToMinusInf,
5065  result_reg,
5066  double_input,
5067  scratch1,
5068  double_scratch0(),
5069  except_flag,
5071 
5072  // Deopt if the operation did not succeed (except_flag != 0).
5073  DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
5074  Operand(zero_reg));
5075 
5076  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5077  Label done;
5078  __ Branch(&done, ne, result_reg, Operand(zero_reg));
5079  __ Mfhc1(scratch1, double_input);
5080  __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5081  DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
5082  __ bind(&done);
5083  }
5084  }
5085  __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
5086  DeoptimizeIf(lt, instr, "overflow", scratch1, Operand(zero_reg));
5087 }
5088 
5089 
5090 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5091  LOperand* input = instr->value();
5092  __ SmiTst(ToRegister(input), at);
5093  DeoptimizeIf(ne, instr, "not a Smi", at, Operand(zero_reg));
5094 }
5095 
5096 
5097 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5098  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5099  LOperand* input = instr->value();
5100  __ SmiTst(ToRegister(input), at);
5101  DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
5102  }
5103 }
5104 
5105 
5106 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5107  Register input = ToRegister(instr->value());
5108  Register scratch = scratch0();
5109 
5110  __ GetObjectType(input, scratch, scratch);
5111 
5112  if (instr->hydrogen()->is_interval_check()) {
5113  InstanceType first;
5114  InstanceType last;
5115  instr->hydrogen()->GetCheckInterval(&first, &last);
5116 
5117  // If there is only one type in the interval check for equality.
5118  if (first == last) {
5119  DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(first));
5120  } else {
5121  DeoptimizeIf(lo, instr, "wrong instance type", scratch, Operand(first));
5122  // Omit check for the last type.
5123  if (last != LAST_TYPE) {
5124  DeoptimizeIf(hi, instr, "wrong instance type", scratch, Operand(last));
5125  }
5126  }
5127  } else {
5128  uint8_t mask;
5129  uint8_t tag;
5130  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5131 
5132  if (base::bits::IsPowerOfTwo32(mask)) {
5133  DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5134  __ And(at, scratch, mask);
5135  DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", at,
5136  Operand(zero_reg));
5137  } else {
5138  __ And(scratch, scratch, Operand(mask));
5139  DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(tag));
5140  }
5141  }
5142 }
5143 
5144 
5145 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5146  Register reg = ToRegister(instr->value());
5147  Handle<HeapObject> object = instr->hydrogen()->object().handle();
5149  if (isolate()->heap()->InNewSpace(*object)) {
5150  Register reg = ToRegister(instr->value());
5151  Handle<Cell> cell = isolate()->factory()->NewCell(object);
5152  __ li(at, Operand(Handle<Object>(cell)));
5153  __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
5154  DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(at));
5155  } else {
5156  DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(object));
5157  }
5158 }
5159 
5160 
5161 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5162  {
5163  PushSafepointRegistersScope scope(this);
5164  __ push(object);
5165  __ mov(cp, zero_reg);
5166  __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5168  instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5169  __ StoreToSafepointRegisterSlot(v0, scratch0());
5170  }
5171  __ SmiTst(scratch0(), at);
5172  DeoptimizeIf(eq, instr, "instance migration failed", at, Operand(zero_reg));
5173 }
5174 
5175 
5176 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5177  class DeferredCheckMaps FINAL : public LDeferredCode {
5178  public:
5179  DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5180  : LDeferredCode(codegen), instr_(instr), object_(object) {
5181  SetExit(check_maps());
5182  }
5183  virtual void Generate() OVERRIDE {
5184  codegen()->DoDeferredInstanceMigration(instr_, object_);
5185  }
5186  Label* check_maps() { return &check_maps_; }
5187  virtual LInstruction* instr() OVERRIDE { return instr_; }
5188  private:
5189  LCheckMaps* instr_;
5190  Label check_maps_;
5191  Register object_;
5192  };
5193 
5194  if (instr->hydrogen()->IsStabilityCheck()) {
5195  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5196  for (int i = 0; i < maps->size(); ++i) {
5197  AddStabilityDependency(maps->at(i).handle());
5198  }
5199  return;
5200  }
5201 
5202  Register map_reg = scratch0();
5203  LOperand* input = instr->value();
5204  DCHECK(input->IsRegister());
5205  Register reg = ToRegister(input);
5206  __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5207 
5208  DeferredCheckMaps* deferred = NULL;
5209  if (instr->hydrogen()->HasMigrationTarget()) {
5210  deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5211  __ bind(deferred->check_maps());
5212  }
5213 
5214  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5215  Label success;
5216  for (int i = 0; i < maps->size() - 1; i++) {
5217  Handle<Map> map = maps->at(i).handle();
5218  __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
5219  }
5220  Handle<Map> map = maps->at(maps->size() - 1).handle();
5221  // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
5222  if (instr->hydrogen()->HasMigrationTarget()) {
5223  __ Branch(deferred->entry(), ne, map_reg, Operand(map));
5224  } else {
5225  DeoptimizeIf(ne, instr, "wrong map", map_reg, Operand(map));
5226  }
5227 
5228  __ bind(&success);
5229 }
5230 
5231 
5232 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5233  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5234  Register result_reg = ToRegister(instr->result());
5235  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5236  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5237 }
5238 
5239 
5240 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5241  Register unclamped_reg = ToRegister(instr->unclamped());
5242  Register result_reg = ToRegister(instr->result());
5243  __ ClampUint8(result_reg, unclamped_reg);
5244 }
5245 
5246 
5247 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5248  Register scratch = scratch0();
5249  Register input_reg = ToRegister(instr->unclamped());
5250  Register result_reg = ToRegister(instr->result());
5251  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5252  Label is_smi, done, heap_number;
5253 
5254  // Both smi and heap number cases are handled.
5255  __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5256 
5257  // Check for heap number
5258  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5259  __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5260 
5261  // Check for undefined. Undefined is converted to zero for clamping
5262  // conversions.
5263  DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg,
5264  Operand(factory()->undefined_value()));
5265  __ mov(result_reg, zero_reg);
5266  __ jmp(&done);
5267 
5268  // Heap number
5269  __ bind(&heap_number);
5270  __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
5272  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5273  __ jmp(&done);
5274 
5275  __ bind(&is_smi);
5276  __ ClampUint8(result_reg, scratch);
5277 
5278  __ bind(&done);
5279 }
5280 
5281 
5282 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5283  DoubleRegister value_reg = ToDoubleRegister(instr->value());
5284  Register result_reg = ToRegister(instr->result());
5285  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5286  __ FmoveHigh(result_reg, value_reg);
5287  } else {
5288  __ FmoveLow(result_reg, value_reg);
5289  }
5290 }
5291 
5292 
5293 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5294  Register hi_reg = ToRegister(instr->hi());
5295  Register lo_reg = ToRegister(instr->lo());
5296  DoubleRegister result_reg = ToDoubleRegister(instr->result());
5297  __ Move(result_reg, lo_reg, hi_reg);
5298 }
5299 
5300 
5301 void LCodeGen::DoAllocate(LAllocate* instr) {
5302  class DeferredAllocate FINAL : public LDeferredCode {
5303  public:
5304  DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5305  : LDeferredCode(codegen), instr_(instr) { }
5306  virtual void Generate() OVERRIDE {
5307  codegen()->DoDeferredAllocate(instr_);
5308  }
5309  virtual LInstruction* instr() OVERRIDE { return instr_; }
5310  private:
5311  LAllocate* instr_;
5312  };
5313 
5314  DeferredAllocate* deferred =
5315  new(zone()) DeferredAllocate(this, instr);
5316 
5317  Register result = ToRegister(instr->result());
5318  Register scratch = ToRegister(instr->temp1());
5319  Register scratch2 = ToRegister(instr->temp2());
5320 
5321  // Allocate memory for the object.
5323  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5324  flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5325  }
5326  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5327  DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5328  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5330  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5331  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5333  }
5334  if (instr->size()->IsConstantOperand()) {
5335  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5337  __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5338  } else {
5339  __ jmp(deferred->entry());
5340  }
5341  } else {
5342  Register size = ToRegister(instr->size());
5343  __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5344  }
5345 
5346  __ bind(deferred->exit());
5347 
5348  if (instr->hydrogen()->MustPrefillWithFiller()) {
5350  if (instr->size()->IsConstantOperand()) {
5351  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5352  __ li(scratch, Operand(size - kHeapObjectTag));
5353  } else {
5354  __ Subu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5355  }
5356  __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5357  Label loop;
5358  __ bind(&loop);
5359  __ Subu(scratch, scratch, Operand(kPointerSize));
5360  __ Addu(at, result, Operand(scratch));
5361  __ sw(scratch2, MemOperand(at));
5362  __ Branch(&loop, ge, scratch, Operand(zero_reg));
5363  }
5364 }
5365 
5366 
5367 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5368  Register result = ToRegister(instr->result());
5369 
5370  // TODO(3095996): Get rid of this. For now, we need to make the
5371  // result register contain a valid pointer because it is already
5372  // contained in the register pointer map.
5373  __ mov(result, zero_reg);
5374 
5375  PushSafepointRegistersScope scope(this);
5376  if (instr->size()->IsRegister()) {
5377  Register size = ToRegister(instr->size());
5378  DCHECK(!size.is(result));
5379  __ SmiTag(size);
5380  __ push(size);
5381  } else {
5382  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5383  if (size >= 0 && size <= Smi::kMaxValue) {
5385  } else {
5386  // We should never get here at runtime => abort
5387  __ stop("invalid allocation size");
5388  return;
5389  }
5390  }
5391 
5393  instr->hydrogen()->MustAllocateDoubleAligned());
5394  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5395  DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5396  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5398  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5399  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5401  } else {
5403  }
5405 
5407  Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5408  __ StoreToSafepointRegisterSlot(v0, result);
5409 }
5410 
5411 
5412 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5413  DCHECK(ToRegister(instr->value()).is(a0));
5414  DCHECK(ToRegister(instr->result()).is(v0));
5415  __ push(a0);
5416  CallRuntime(Runtime::kToFastProperties, 1, instr);
5417 }
5418 
5419 
5420 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5421  DCHECK(ToRegister(instr->context()).is(cp));
5422  Label materialized;
5423  // Registers will be used as follows:
5424  // t3 = literals array.
5425  // a1 = regexp literal.
5426  // a0 = regexp literal clone.
5427  // a2 and t0-t2 are used as temporaries.
5428  int literal_offset =
5429  FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5430  __ li(t3, instr->hydrogen()->literals());
5431  __ lw(a1, FieldMemOperand(t3, literal_offset));
5432  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5433  __ Branch(&materialized, ne, a1, Operand(at));
5434 
5435  // Create regexp literal using runtime function
5436  // Result will be in v0.
5437  __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5438  __ li(t1, Operand(instr->hydrogen()->pattern()));
5439  __ li(t0, Operand(instr->hydrogen()->flags()));
5440  __ Push(t3, t2, t1, t0);
5441  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5442  __ mov(a1, v0);
5443 
5444  __ bind(&materialized);
5446  Label allocated, runtime_allocate;
5447 
5448  __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
5449  __ jmp(&allocated);
5450 
5451  __ bind(&runtime_allocate);
5452  __ li(a0, Operand(Smi::FromInt(size)));
5453  __ Push(a1, a0);
5454  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5455  __ pop(a1);
5456 
5457  __ bind(&allocated);
5458  // Copy the content into the newly allocated memory.
5459  // (Unroll copy loop once for better throughput).
5460  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5461  __ lw(a3, FieldMemOperand(a1, i));
5462  __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
5463  __ sw(a3, FieldMemOperand(v0, i));
5464  __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
5465  }
5466  if ((size % (2 * kPointerSize)) != 0) {
5467  __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
5468  __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
5469  }
5470 }
5471 
5472 
5473 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5474  DCHECK(ToRegister(instr->context()).is(cp));
5475  // Use the fast case closure allocation code that allocates in new
5476  // space for nested functions that don't need literals cloning.
5477  bool pretenure = instr->hydrogen()->pretenure();
5478  if (!pretenure && instr->hydrogen()->has_no_literals()) {
5479  FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
5480  instr->hydrogen()->kind());
5481  __ li(a2, Operand(instr->hydrogen()->shared_info()));
5482  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5483  } else {
5484  __ li(a2, Operand(instr->hydrogen()->shared_info()));
5485  __ li(a1, Operand(pretenure ? factory()->true_value()
5486  : factory()->false_value()));
5487  __ Push(cp, a2, a1);
5488  CallRuntime(Runtime::kNewClosure, 3, instr);
5489  }
5490 }
5491 
5492 
5493 void LCodeGen::DoTypeof(LTypeof* instr) {
5494  DCHECK(ToRegister(instr->result()).is(v0));
5495  Register input = ToRegister(instr->value());
5496  __ push(input);
5497  CallRuntime(Runtime::kTypeof, 1, instr);
5498 }
5499 
5500 
5501 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5502  Register input = ToRegister(instr->value());
5503 
5504  Register cmp1 = no_reg;
5505  Operand cmp2 = Operand(no_reg);
5506 
5507  Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5508  instr->FalseLabel(chunk_),
5509  input,
5510  instr->type_literal(),
5511  &cmp1,
5512  &cmp2);
5513 
5514  DCHECK(cmp1.is_valid());
5515  DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
5516 
5517  if (final_branch_condition != kNoCondition) {
5518  EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5519  }
5520 }
5521 
5522 
5524  Label* false_label,
5525  Register input,
5526  Handle<String> type_name,
5527  Register* cmp1,
5528  Operand* cmp2) {
5529  // This function utilizes the delay slot heavily. This is used to load
5530  // values that are always usable without depending on the type of the input
5531  // register.
5532  Condition final_branch_condition = kNoCondition;
5533  Register scratch = scratch0();
5534  Factory* factory = isolate()->factory();
5535  if (String::Equals(type_name, factory->number_string())) {
5536  __ JumpIfSmi(input, true_label);
5537  __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5538  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5539  *cmp1 = input;
5540  *cmp2 = Operand(at);
5541  final_branch_condition = eq;
5542 
5543  } else if (String::Equals(type_name, factory->string_string())) {
5544  __ JumpIfSmi(input, false_label);
5545  __ GetObjectType(input, input, scratch);
5546  __ Branch(USE_DELAY_SLOT, false_label,
5547  ge, scratch, Operand(FIRST_NONSTRING_TYPE));
5548  // input is an object so we can load the BitFieldOffset even if we take the
5549  // other branch.
5550  __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5551  __ And(at, at, 1 << Map::kIsUndetectable);
5552  *cmp1 = at;
5553  *cmp2 = Operand(zero_reg);
5554  final_branch_condition = eq;
5555 
5556  } else if (String::Equals(type_name, factory->symbol_string())) {
5557  __ JumpIfSmi(input, false_label);
5558  __ GetObjectType(input, input, scratch);
5559  *cmp1 = scratch;
5560  *cmp2 = Operand(SYMBOL_TYPE);
5561  final_branch_condition = eq;
5562 
5563  } else if (String::Equals(type_name, factory->boolean_string())) {
5564  __ LoadRoot(at, Heap::kTrueValueRootIndex);
5565  __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5566  __ LoadRoot(at, Heap::kFalseValueRootIndex);
5567  *cmp1 = at;
5568  *cmp2 = Operand(input);
5569  final_branch_condition = eq;
5570 
5571  } else if (String::Equals(type_name, factory->undefined_string())) {
5572  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5573  __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5574  // The first instruction of JumpIfSmi is an And - it is safe in the delay
5575  // slot.
5576  __ JumpIfSmi(input, false_label);
5577  // Check for undetectable objects => true.
5578  __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5579  __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5580  __ And(at, at, 1 << Map::kIsUndetectable);
5581  *cmp1 = at;
5582  *cmp2 = Operand(zero_reg);
5583  final_branch_condition = ne;
5584 
5585  } else if (String::Equals(type_name, factory->function_string())) {
5587  __ JumpIfSmi(input, false_label);
5588  __ GetObjectType(input, scratch, input);
5589  __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
5590  *cmp1 = input;
5592  final_branch_condition = eq;
5593 
5594  } else if (String::Equals(type_name, factory->object_string())) {
5595  __ JumpIfSmi(input, false_label);
5596  __ LoadRoot(at, Heap::kNullValueRootIndex);
5597  __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5598  Register map = input;
5599  __ GetObjectType(input, map, scratch);
5600  __ Branch(false_label,
5602  __ Branch(USE_DELAY_SLOT, false_label,
5604  // map is still valid, so the BitField can be loaded in delay slot.
5605  // Check for undetectable objects => false.
5607  __ And(at, at, 1 << Map::kIsUndetectable);
5608  *cmp1 = at;
5609  *cmp2 = Operand(zero_reg);
5610  final_branch_condition = eq;
5611 
5612  } else {
5613  *cmp1 = at;
5614  *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
5615  __ Branch(false_label);
5616  }
5617 
5618  return final_branch_condition;
5619 }
5620 
5621 
5622 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5623  Register temp1 = ToRegister(instr->temp());
5624 
5625  EmitIsConstructCall(temp1, scratch0());
5626 
5627  EmitBranch(instr, eq, temp1,
5628  Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5629 }
5630 
5631 
5632 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5633  DCHECK(!temp1.is(temp2));
5634  // Get the frame pointer for the calling frame.
5636 
5637  // Skip the arguments adaptor frame if it exists.
5638  Label check_frame_marker;
5640  __ Branch(&check_frame_marker, ne, temp2,
5643 
5644  // Check the marker in the calling frame.
5645  __ bind(&check_frame_marker);
5647 }
5648 
5649 
5650 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5651  if (!info()->IsStub()) {
5652  // Ensure that we have enough space after the previous lazy-bailout
5653  // instruction for patching the code here.
5654  int current_pc = masm()->pc_offset();
5655  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5656  int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5657  DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5658  while (padding_size > 0) {
5659  __ nop();
5660  padding_size -= Assembler::kInstrSize;
5661  }
5662  }
5663  }
5664  last_lazy_deopt_pc_ = masm()->pc_offset();
5665 }
5666 
5667 
5668 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5669  last_lazy_deopt_pc_ = masm()->pc_offset();
5670  DCHECK(instr->HasEnvironment());
5671  LEnvironment* env = instr->environment();
5672  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5673  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5674 }
5675 
5676 
5677 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5678  Deoptimizer::BailoutType type = instr->hydrogen()->type();
5679  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5680  // needed return address), even though the implementation of LAZY and EAGER is
5681  // now identical. When LAZY is eventually completely folded into EAGER, remove
5682  // the special case below.
5683  if (info()->IsStub() && type == Deoptimizer::EAGER) {
5684  type = Deoptimizer::LAZY;
5685  }
5686 
5687  DeoptimizeIf(al, instr, type, instr->hydrogen()->reason(), zero_reg,
5688  Operand(zero_reg));
5689 }
5690 
5691 
5692 void LCodeGen::DoDummy(LDummy* instr) {
5693  // Nothing to see here, move on!
5694 }
5695 
5696 
5697 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5698  // Nothing to see here, move on!
5699 }
5700 
5701 
5702 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5703  PushSafepointRegistersScope scope(this);
5704  LoadContextFromDeferred(instr->context());
5705  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5708  DCHECK(instr->HasEnvironment());
5709  LEnvironment* env = instr->environment();
5710  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5711 }
5712 
5713 
5714 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5715  class DeferredStackCheck FINAL : public LDeferredCode {
5716  public:
5717  DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5718  : LDeferredCode(codegen), instr_(instr) { }
5719  virtual void Generate() OVERRIDE {
5720  codegen()->DoDeferredStackCheck(instr_);
5721  }
5722  virtual LInstruction* instr() OVERRIDE { return instr_; }
5723  private:
5724  LStackCheck* instr_;
5725  };
5726 
5727  DCHECK(instr->HasEnvironment());
5728  LEnvironment* env = instr->environment();
5729  // There is no LLazyBailout instruction for stack-checks. We have to
5730  // prepare for lazy deoptimization explicitly here.
5731  if (instr->hydrogen()->is_function_entry()) {
5732  // Perform stack overflow check.
5733  Label done;
5734  __ LoadRoot(at, Heap::kStackLimitRootIndex);
5735  __ Branch(&done, hs, sp, Operand(at));
5736  DCHECK(instr->context()->IsRegister());
5737  DCHECK(ToRegister(instr->context()).is(cp));
5738  CallCode(isolate()->builtins()->StackCheck(),
5740  instr);
5741  __ bind(&done);
5742  } else {
5743  DCHECK(instr->hydrogen()->is_backwards_branch());
5744  // Perform stack overflow check if this goto needs it before jumping.
5745  DeferredStackCheck* deferred_stack_check =
5746  new(zone()) DeferredStackCheck(this, instr);
5747  __ LoadRoot(at, Heap::kStackLimitRootIndex);
5748  __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5750  __ bind(instr->done_label());
5751  deferred_stack_check->SetExit(instr->done_label());
5752  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5753  // Don't record a deoptimization index for the safepoint here.
5754  // This will be done explicitly when emitting call and the safepoint in
5755  // the deferred code.
5756  }
5757 }
5758 
5759 
5760 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5761  // This is a pseudo-instruction that ensures that the environment here is
5762  // properly registered for deoptimization and records the assembler's PC
5763  // offset.
5764  LEnvironment* environment = instr->environment();
5765 
5766  // If the environment were already registered, we would have no way of
5767  // backpatching it with the spill slot operands.
5768  DCHECK(!environment->HasBeenRegistered());
5769  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5770 
5772 }
5773 
5774 
5775 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5776  Register result = ToRegister(instr->result());
5777  Register object = ToRegister(instr->object());
5778  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5779  DeoptimizeIf(eq, instr, "undefined", object, Operand(at));
5780 
5781  Register null_value = t1;
5782  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5783  DeoptimizeIf(eq, instr, "null", object, Operand(null_value));
5784 
5785  __ And(at, object, kSmiTagMask);
5786  DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
5787 
5789  __ GetObjectType(object, a1, a1);
5790  DeoptimizeIf(le, instr, "not a JavaScript object", a1,
5791  Operand(LAST_JS_PROXY_TYPE));
5792 
5793  Label use_cache, call_runtime;
5794  DCHECK(object.is(a0));
5795  __ CheckEnumCache(null_value, &call_runtime);
5796 
5797  __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
5798  __ Branch(&use_cache);
5799 
5800  // Get the set of properties to enumerate.
5801  __ bind(&call_runtime);
5802  __ push(object);
5803  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5804 
5806  DCHECK(result.is(v0));
5807  __ LoadRoot(at, Heap::kMetaMapRootIndex);
5808  DeoptimizeIf(ne, instr, "wrong map", a1, Operand(at));
5809  __ bind(&use_cache);
5810 }
5811 
5812 
5813 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5814  Register map = ToRegister(instr->map());
5815  Register result = ToRegister(instr->result());
5816  Label load_cache, done;
5817  __ EnumLength(result, map);
5818  __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5819  __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5820  __ jmp(&done);
5821 
5822  __ bind(&load_cache);
5823  __ LoadInstanceDescriptors(map, result);
5824  __ lw(result,
5826  __ lw(result,
5827  FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5828  DeoptimizeIf(eq, instr, "no cache", result, Operand(zero_reg));
5829 
5830  __ bind(&done);
5831 }
5832 
5833 
5834 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5835  Register object = ToRegister(instr->value());
5836  Register map = ToRegister(instr->map());
5838  DeoptimizeIf(ne, instr, "wrong map", map, Operand(scratch0()));
5839 }
5840 
5841 
5842 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5843  Register result,
5844  Register object,
5845  Register index) {
5846  PushSafepointRegistersScope scope(this);
5847  __ Push(object, index);
5848  __ mov(cp, zero_reg);
5849  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5851  instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5852  __ StoreToSafepointRegisterSlot(v0, result);
5853 }
5854 
5855 
5856 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5857  class DeferredLoadMutableDouble FINAL : public LDeferredCode {
5858  public:
5859  DeferredLoadMutableDouble(LCodeGen* codegen,
5860  LLoadFieldByIndex* instr,
5861  Register result,
5862  Register object,
5863  Register index)
5864  : LDeferredCode(codegen),
5865  instr_(instr),
5866  result_(result),
5867  object_(object),
5868  index_(index) {
5869  }
5870  virtual void Generate() OVERRIDE {
5871  codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5872  }
5873  virtual LInstruction* instr() OVERRIDE { return instr_; }
5874  private:
5875  LLoadFieldByIndex* instr_;
5876  Register result_;
5877  Register object_;
5878  Register index_;
5879  };
5880 
5881  Register object = ToRegister(instr->object());
5882  Register index = ToRegister(instr->index());
5883  Register result = ToRegister(instr->result());
5884  Register scratch = scratch0();
5885 
5886  DeferredLoadMutableDouble* deferred;
5887  deferred = new(zone()) DeferredLoadMutableDouble(
5888  this, instr, result, object, index);
5889 
5890  Label out_of_object, done;
5891 
5892  __ And(scratch, index, Operand(Smi::FromInt(1)));
5893  __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
5894  __ sra(index, index, 1);
5895 
5896  __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5897  __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot.
5898 
5900  __ Addu(scratch, object, scratch);
5901  __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5902 
5903  __ Branch(&done);
5904 
5905  __ bind(&out_of_object);
5906  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5907  // Index is equal to negated out of object property index plus 1.
5908  __ Subu(scratch, result, scratch);
5909  __ lw(result, FieldMemOperand(scratch,
5911  __ bind(deferred->exit());
5912  __ bind(&done);
5913 }
5914 
5915 
5916 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5917  Register context = ToRegister(instr->context());
5919 }
5920 
5921 
5922 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5923  Handle<ScopeInfo> scope_info = instr->scope_info();
5924  __ li(at, scope_info);
5925  __ Push(at, ToRegister(instr->function()));
5926  CallRuntime(Runtime::kPushBlockContext, 2, instr);
5927  RecordSafepoint(Safepoint::kNoLazyDeopt);
5928 }
5929 
5930 
5931 #undef __
5932 
5933 } } // namespace v8::internal
#define kDoubleRegZero
An object reference managed by the v8 garbage collector.
Definition: v8.h:198
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1591
static const int kInstrSize
friend class BlockTrampolinePoolScope
static U update(U previous, T value)
Definition: utils.h:223
static U encode(T value)
Definition: utils.h:217
static const int kValueOffset
Definition: objects.h:9446
static const int kHeaderSize
Definition: objects.h:5373
static Handle< DeoptimizationInputData > New(Isolate *isolate, int deopt_entry_count, PretenureFlag pretenure)
Definition: objects.cc:7918
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
Definition: deoptimizer.cc:672
static const int kEnumCacheOffset
Definition: objects.h:3028
virtual void BeforeCall(int call_size) const OVERRIDE
SafepointGenerator(LCodeGen *codegen, LPointerMap *pointers, Safepoint::DeoptMode mode)
virtual void AfterCall() const OVERRIDE
static const int kHeaderSize
Definition: objects.h:2393
static int OffsetOfElementAt(int index)
Definition: objects.h:2455
static int SizeFor(int length)
Definition: objects.h:2452
static const int kGlobalProxyOffset
Definition: objects.h:7461
virtual HSourcePosition position() const
static Handle< T > cast(Handle< S > that)
Definition: handles.h:116
static const uint32_t kSignMask
Definition: objects.h:1522
static const int kValueOffset
Definition: objects.h:1506
static const int kExponentBits
Definition: objects.h:1526
static const int kExponentBias
Definition: objects.h:1527
static const int kExponentShift
Definition: objects.h:1528
static const int kMapOffset
Definition: objects.h:1427
static Register right()
Definition: code-stubs.h:686
static const int kValueOffset
Definition: objects.h:7623
static const int kCacheStampOffset
Definition: objects.h:7631
static const int kSharedFunctionInfoOffset
Definition: objects.h:7379
static const int kContextOffset
Definition: objects.h:7381
static const int kCodeEntryOffset
Definition: objects.h:7376
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7377
static const int kHeaderSize
Definition: objects.h:2195
static const int kPropertiesOffset
Definition: objects.h:2193
static const int kSize
Definition: objects.h:7772
static const int kInObjectFieldCount
Definition: objects.h:7826
bool IsNextEmittedBlock(int block_id) const
void DoStoreKeyedFixedArray(LStoreKeyed *instr)
DwVfpRegister ToDoubleRegister(LOperand *op) const
void RecordSafepointWithRegisters(LPointerMap *pointers, int arguments, Safepoint::DeoptMode mode)
void EmitBranchF(InstrType instr, Condition condition, FPURegister src1, FPURegister src2)
bool IsSmi(LConstantOperand *op) const
TranslationBuffer translations_
MemOperand BuildSeqStringOperand(Register string, LOperand *index, String::Encoding encoding)
Condition EmitIsString(Register input, Register temp1, Label *is_not_string, SmiCheck check_needed)
DwVfpRegister EmitLoadDoubleRegister(LOperand *op, SwVfpRegister flt_scratch, DwVfpRegister dbl_scratch)
void DoDeferredStackCheck(LStackCheck *instr)
SafepointTableBuilder safepoints_
void EmitVectorLoadICRegisters(T *instr)
static Condition TokenToCondition(Token::Value op, bool is_unsigned)
ZoneList< Handle< Object > > deoptimization_literals_
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check, Label *bool_load)
MemOperand PrepareKeyedOperand(Register key, Register base, bool key_is_constant, int constant_key, int element_size, int shift_size, int base_offset)
void PopulateDeoptimizationLiteralsWithInlinedFunctions()
void AddToTranslation(LEnvironment *environment, Translation *translation, LOperand *op, bool is_tagged, bool is_uint32, int *object_index_pointer, int *dematerialized_index_pointer)
ZoneList< LEnvironment * > deoptimizations_
void EmitIntegerMathAbs(LMathAbs *instr)
int32_t ToRepresentation(LConstantOperand *op, const Representation &r) const
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, LInstruction *instr, LOperand *context)
void EmitIsConstructCall(Register temp1, Register temp2)
void EmitFalseBranchF(InstrType instr, Condition condition, FPURegister src1, FPURegister src2)
int32_t ToInteger32(LConstantOperand *op) const
LPlatformChunk * chunk() const
void FinishCode(Handle< Code > code)
int LookupDestination(int block_id) const
Condition EmitTypeofIs(Label *true_label, Label *false_label, Register input, Handle< String > type_name)
void DoDeferredAllocate(LAllocate *instr)
void RecordSafepoint(LPointerMap *pointers, Safepoint::Kind kind, int arguments, Safepoint::DeoptMode mode)
void DoDeferredTaggedToI(LTaggedToI *instr)
LowDwVfpRegister double_scratch0()
void CallCodeGeneric(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr, SafepointMode safepoint_mode, TargetAddressStorageMode storage_mode=CAN_INLINE_TARGET_ADDRESS)
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
void CallCode(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr, TargetAddressStorageMode storage_mode=CAN_INLINE_TARGET_ADDRESS)
Safepoint::Kind expected_safepoint_kind_
ZoneList< LDeferredCode * > deferred_
void DoDeferredNumberTagIU(LInstruction *instr, LOperand *value, LOperand *temp1, LOperand *temp2, IntegerSignedness signedness)
Handle< Object > ToHandle(LConstantOperand *op) const
void RegisterEnvironmentForDeoptimization(LEnvironment *environment, Safepoint::DeoptMode mode)
void LoadContextFromDeferred(LOperand *context)
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoDeferredLoadMutableDouble(LLoadFieldByIndex *instr, Register result, Register object, Register index)
int DefineDeoptimizationLiteral(Handle< Object > literal)
void DeoptimizeIf(Condition condition, LInstruction *instr, const char *detail, Deoptimizer::BailoutType bailout_type)
void CallKnownFunction(Handle< JSFunction > function, int formal_parameter_count, int arity, LInstruction *instr, R1State r1_state)
void WriteTranslation(LEnvironment *environment, Translation *translation)
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
void DoLoadKeyedFixedDoubleArray(LLoadKeyed *instr)
Operand ToOperand(LOperand *op)
Register EmitLoadRegister(LOperand *op, Register scratch)
void EmitClassOfTest(Label *if_true, Label *if_false, Handle< String > class_name, Register input, Register temporary, Register temporary2)
void DoLoadKeyedExternalArray(LLoadKeyed *instr)
double ToDouble(LConstantOperand *op) const
Register ToRegister(LOperand *op) const
void DoStoreKeyedExternalArray(LStoreKeyed *instr)
void RecordAndWritePosition(int position) OVERRIDE
bool IsInteger32(LConstantOperand *op) const
void PopulateDeoptimizationData(Handle< Code > code)
void DoParallelMove(LParallelMove *move)
Smi * ToSmi(LConstantOperand *op) const
void CallRuntime(const Runtime::Function *function, int num_arguments, LInstruction *instr, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
ZoneList< Deoptimizer::JumpTableEntry > jump_table_
Condition EmitIsObject(Register input, Register temp1, Label *is_not_object, Label *is_object)
void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE
void EmitNumberUntagD(LNumberUntagD *instr, Register input, DwVfpRegister result, NumberUntagDMode mode)
MemOperand ToMemOperand(LOperand *op) const
void GenerateBodyInstructionPre(LInstruction *instr) OVERRIDE
MemOperand ToHighMemOperand(LOperand *op) const
void RecordSafepointWithLazyDeopt(LInstruction *instr, SafepointMode safepoint_mode)
void EmitFalseBranch(InstrType instr, Condition condition)
void DoLoadKeyedFixedArray(LLoadKeyed *instr)
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
void EmitBranch(InstrType instr, Condition condition)
void DoDeferredNumberTagD(LNumberTagD *instr)
void DoStoreKeyedFixedDoubleArray(LStoreKeyed *instr)
virtual const char * Mnemonic() const =0
LEnvironment * environment() const
Definition: lithium-arm.h:231
HValue * hydrogen_value() const
Definition: lithium-arm.h:239
LPointerMap * pointer_map() const
Definition: lithium-arm.h:235
int index() const
Definition: lithium.h:41
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:17
static const Register ReceiverRegister()
static const Register NameRegister()
static void GenerateMiss(MacroAssembler *masm)
static const int kIsUndetectable
Definition: objects.h:6244
static const int kBitFieldOffset
Definition: objects.h:6228
static const int kInstanceTypeOffset
Definition: objects.h:6229
static const int kConstructorOffset
Definition: objects.h:6191
static const int kPrototypeOffset
Definition: objects.h:6190
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
static const Register exponent()
static const int kHashFieldOffset
Definition: objects.h:8486
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:754
static void MaybeCallEntryHook(MacroAssembler *masm)
static const int kNoPosition
Definition: assembler.h:317
static Representation Integer32()
int num_parameters() const
Definition: scopes.h:321
Variable * parameter(int index) const
Definition: scopes.h:316
static const int kHeaderSize
Definition: objects.h:8941
static const int kDontAdaptArgumentsSentinel
Definition: objects.h:6888
static const int kInstanceClassNameOffset
Definition: objects.h:6897
static const int kCompilerHintsOffset
Definition: objects.h:6961
static const int kMaxValue
Definition: objects.h:1272
static Smi * FromInt(int value)
Definition: objects-inl.h:1321
static const int kFixedFrameSizeFromFp
Definition: frames.h:157
static const int kContextOffset
Definition: frames.h:162
static const int kCallerSPOffset
Definition: frames.h:167
static const int kMarkerOffset
Definition: frames.h:161
static const int kCallerFPOffset
Definition: frames.h:165
static const Register ReceiverRegister()
static const Register NameRegister()
static const Register ValueRegister()
static Handle< Code > initialize_stub(Isolate *isolate, StrictMode strict_mode)
Definition: ic.cc:1346
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const unsigned int kContainsCachedArrayIndexMask
Definition: objects.h:8618
static const int32_t kMaxOneByteCharCode
Definition: objects.h:8811
static const int kLengthOffset
Definition: objects.h:8802
bool Equals(String *other)
Definition: objects-inl.h:3336
static const Register VectorRegister()
#define OVERRIDE
#define FINAL
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric literals(0o77, 0b11)") DEFINE_BOOL(harmony_object_literals
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
enable harmony numeric enable harmony object literal extensions Optimize object Array shift
#define V8_INFINITY
Definition: globals.h:25
#define __
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
@ CALL_FUNCTION
AllocationFlags
@ DOUBLE_ALIGNMENT
@ PRETENURE_OLD_POINTER_SPACE
@ TAG_OBJECT
@ PRETENURE_OLD_DATA_SPACE
unsigned short uint16_t
Definition: unicode.cc:23
int int32_t
Definition: unicode.cc:24
bool IsPowerOfTwo32(uint32_t value)
Definition: bits.h:77
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
int WhichPowerOf2(uint32_t x)
Definition: utils.h:37
Vector< const char > CStrVector(const char *data)
Definition: vector.h:158
const int kPointerSize
Definition: globals.h:129
const uint32_t kStringEncodingMask
Definition: objects.h:555
MemOperand ContextOperand(Register context, int index)
@ DO_SMI_CHECK
Definition: globals.h:641
const int KB
Definition: globals.h:106
Condition CommuteCondition(Condition cond)
Definition: constants-arm.h:93
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1488
@ TRACK_ALLOCATION_SITE
Definition: objects.h:8085
@ kSeqStringTag
Definition: objects.h:563
@ ARGUMENTS_ADAPTOR
Definition: hydrogen.h:546
const Register cp
@ kCheckForInexactConversion
const uint32_t kTwoByteStringTag
Definition: objects.h:556
const int kSmiTagSize
Definition: v8.h:5743
const int kDoubleSize
Definition: globals.h:127
const FPURegister f2
const Register fp
DwVfpRegister DoubleRegister
const FPURegister f4
const Register sp
const int kPointerSizeLog2
Definition: globals.h:147
@ LAST_NONCALLABLE_SPEC_OBJECT_TYPE
Definition: objects.h:785
@ NUM_OF_CALLABLE_SPEC_OBJECT_TYPES
Definition: objects.h:788
@ JS_DATE_TYPE
Definition: objects.h:730
@ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE
Definition: objects.h:784
@ FIRST_JS_PROXY_TYPE
Definition: objects.h:778
@ JS_ARRAY_TYPE
Definition: objects.h:738
@ FIRST_NONSTRING_TYPE
Definition: objects.h:758
@ FIRST_SPEC_OBJECT_TYPE
Definition: objects.h:781
@ LAST_SPEC_OBJECT_TYPE
Definition: objects.h:782
@ JS_FUNCTION_TYPE
Definition: objects.h:749
@ JS_FUNCTION_PROXY_TYPE
Definition: objects.h:726
@ LAST_JS_PROXY_TYPE
Definition: objects.h:779
@ EXTERNAL_UINT16_ELEMENTS
Definition: elements-kind.h:36
@ UINT8_CLAMPED_ELEMENTS
Definition: elements-kind.h:52
@ EXTERNAL_INT16_ELEMENTS
Definition: elements-kind.h:35
@ EXTERNAL_UINT8_ELEMENTS
Definition: elements-kind.h:34
@ EXTERNAL_INT32_ELEMENTS
Definition: elements-kind.h:37
@ FAST_HOLEY_DOUBLE_ELEMENTS
Definition: elements-kind.h:27
@ SLOPPY_ARGUMENTS_ELEMENTS
Definition: elements-kind.h:31
@ EXTERNAL_INT8_ELEMENTS
Definition: elements-kind.h:33
@ EXTERNAL_FLOAT32_ELEMENTS
Definition: elements-kind.h:39
@ EXTERNAL_FLOAT64_ELEMENTS
Definition: elements-kind.h:40
@ FAST_HOLEY_SMI_ELEMENTS
Definition: elements-kind.h:17
@ EXTERNAL_UINT32_ELEMENTS
Definition: elements-kind.h:38
@ EXTERNAL_UINT8_CLAMPED_ELEMENTS
Definition: elements-kind.h:41
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
const uint32_t kOneByteStringTag
Definition: objects.h:557
@ NO_OVERWRITE
Definition: ic-state.h:58
int ElementsKindToShiftSize(ElementsKind elements_kind)
MemOperand FieldMemOperand(Register object, int offset)
int32_t WhichPowerOf2Abs(int32_t x)
Definition: utils.h:168
int StackSlotOffset(int index)
Definition: lithium.cc:254
const int kUC16Size
Definition: globals.h:187
bool IsFastPackedElementsKind(ElementsKind kind)
@ NUMBER_CANDIDATE_IS_SMI
Definition: lithium.h:756
@ NUMBER_CANDIDATE_IS_ANY_TAGGED
Definition: lithium.h:757
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
AllocationSiteOverrideMode
Definition: code-stubs.h:716
@ DISABLE_ALLOCATION_SITES
Definition: code-stubs.h:718
const FPURegister f22
Condition NegateCondition(Condition cond)
Definition: constants-arm.h:86
static InstanceType TestType(HHasInstanceTypeAndBranch *instr)
const int kMinInt
Definition: globals.h:110
T Abs(T a)
Definition: utils.h:153
const uint32_t kStringRepresentationMask
Definition: objects.h:561
uint32_t RegList
Definition: frames.h:18
byte * Address
Definition: globals.h:101
static Condition BranchCondition(HHasInstanceTypeAndBranch *instr)
@ NOT_CONTEXTUAL
Definition: objects.h:174
@ OLD_DATA_SPACE
Definition: globals.h:361
@ OLD_POINTER_SPACE
Definition: globals.h:360
const int kHeapObjectTag
Definition: v8.h:5737
const Register no_reg
static int ArgumentsOffsetWithoutFrame(int index)
static Condition ComputeCompareCondition(Token::Value op)
static const char * LabelType(LLabel *label)
MemOperand GlobalObjectOperand()
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const intptr_t kSmiTagMask
Definition: v8.h:5744
@ NO_CALL_CONSTRUCTOR_FLAGS
Definition: globals.h:478
bool IsFastSmiElementsKind(ElementsKind kind)
const uint32_t kSlotsZapValue
Definition: globals.h:273
const int kCharSize
Definition: globals.h:122
const FPURegister f0
const uint32_t kHoleNanUpper32
Definition: globals.h:656
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:130
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
#define IN
@ NONE
bool IsEquivalentTo(const JumpTableEntry &other) const
Definition: deoptimizer.h:130
bool is(DwVfpRegister reg) const
static DwVfpRegister FromAllocationIndex(int index)
SwVfpRegister low() const
static Register FromAllocationIndex(int index)
bool is(Register reg) const
#define T(name, string, precedence)
Definition: token.cc:25