V8 Project
lithium-codegen-ia32.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_IA32
8 
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/codegen.h"
13 #include "src/deoptimizer.h"
14 #include "src/hydrogen-osr.h"
16 #include "src/ic/ic.h"
17 #include "src/ic/stub-cache.h"
18 
19 namespace v8 {
20 namespace internal {
21 
22 // When invoking builtins, we need to record the safepoint in the middle of
23 // the invoke instruction sequence generated by the macro assembler.
24 class SafepointGenerator FINAL : public CallWrapper {
25  public:
26  SafepointGenerator(LCodeGen* codegen,
27  LPointerMap* pointers,
28  Safepoint::DeoptMode mode)
29  : codegen_(codegen),
30  pointers_(pointers),
31  deopt_mode_(mode) {}
32  virtual ~SafepointGenerator() {}
33 
34  virtual void BeforeCall(int call_size) const OVERRIDE {}
35 
36  virtual void AfterCall() const OVERRIDE {
37  codegen_->RecordSafepoint(pointers_, deopt_mode_);
38  }
39 
40  private:
41  LCodeGen* codegen_;
42  LPointerMap* pointers_;
43  Safepoint::DeoptMode deopt_mode_;
44 };
45 
46 
47 #define __ masm()->
48 
50  LPhase phase("Z_Code generation", chunk());
51  DCHECK(is_unused());
52  status_ = GENERATING;
53 
54  // Open a frame scope to indicate that there is a frame on the stack. The
55  // MANUAL indicates that the scope shouldn't actually generate code to set up
56  // the frame (that is done in GeneratePrologue).
57  FrameScope frame_scope(masm_, StackFrame::MANUAL);
58 
59  support_aligned_spilled_doubles_ = info()->IsOptimizing();
60 
61  dynamic_frame_alignment_ = info()->IsOptimizing() &&
62  ((chunk()->num_double_slots() > 2 &&
63  !chunk()->graph()->is_recursive()) ||
64  !info()->osr_ast_id().IsNone());
65 
66  return GeneratePrologue() &&
67  GenerateBody() &&
71 }
72 
73 
74 void LCodeGen::FinishCode(Handle<Code> code) {
75  DCHECK(is_done());
76  code->set_stack_slots(GetStackSlotCount());
77  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
78  if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
80  if (!info()->IsStub()) {
82  }
83 }
84 
85 
86 #ifdef _MSC_VER
87 void LCodeGen::MakeSureStackPagesMapped(int offset) {
88  const int kPageSize = 4 * KB;
89  for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
90  __ mov(Operand(esp, offset), eax);
91  }
92 }
93 #endif
94 
95 
97  DCHECK(info()->saves_caller_doubles());
99  Comment(";;; Save clobbered callee double registers");
100  int count = 0;
101  BitVector* doubles = chunk()->allocated_double_registers();
102  BitVector::Iterator save_iterator(doubles);
103  while (!save_iterator.Done()) {
104  __ movsd(MemOperand(esp, count * kDoubleSize),
105  XMMRegister::FromAllocationIndex(save_iterator.Current()));
106  save_iterator.Advance();
107  count++;
108  }
109 }
110 
111 
113  DCHECK(info()->saves_caller_doubles());
115  Comment(";;; Restore clobbered callee double registers");
116  BitVector* doubles = chunk()->allocated_double_registers();
117  BitVector::Iterator save_iterator(doubles);
118  int count = 0;
119  while (!save_iterator.Done()) {
120  __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
121  MemOperand(esp, count * kDoubleSize));
122  save_iterator.Advance();
123  count++;
124  }
125 }
126 
127 
129  DCHECK(is_generating());
130 
131  if (info()->IsOptimizing()) {
133 
134 #ifdef DEBUG
135  if (strlen(FLAG_stop_at) > 0 &&
136  info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
137  __ int3();
138  }
139 #endif
140 
141  // Sloppy mode functions and builtins need to replace the receiver with the
142  // global proxy when called as functions (without an explicit receiver
143  // object).
144  if (info_->this_has_uses() &&
145  info_->strict_mode() == SLOPPY &&
146  !info_->is_native()) {
147  Label ok;
148  // +1 for return address.
149  int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
150  __ mov(ecx, Operand(esp, receiver_offset));
151 
152  __ cmp(ecx, isolate()->factory()->undefined_value());
153  __ j(not_equal, &ok, Label::kNear);
154 
155  __ mov(ecx, GlobalObjectOperand());
157 
158  __ mov(Operand(esp, receiver_offset), ecx);
159 
160  __ bind(&ok);
161  }
162 
164  // Move state of dynamic frame alignment into edx.
165  __ Move(edx, Immediate(kNoAlignmentPadding));
166 
167  Label do_not_pad, align_loop;
169  // Align esp + 4 to a multiple of 2 * kPointerSize.
170  __ test(esp, Immediate(kPointerSize));
171  __ j(not_zero, &do_not_pad, Label::kNear);
172  __ push(Immediate(0));
173  __ mov(ebx, esp);
174  __ mov(edx, Immediate(kAlignmentPaddingPushed));
175  // Copy arguments, receiver, and return address.
176  __ mov(ecx, Immediate(scope()->num_parameters() + 2));
177 
178  __ bind(&align_loop);
179  __ mov(eax, Operand(ebx, 1 * kPointerSize));
180  __ mov(Operand(ebx, 0), eax);
181  __ add(Operand(ebx), Immediate(kPointerSize));
182  __ dec(ecx);
183  __ j(not_zero, &align_loop, Label::kNear);
184  __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
185  __ bind(&do_not_pad);
186  }
187  }
188 
189  info()->set_prologue_offset(masm_->pc_offset());
190  if (NeedsEagerFrame()) {
192  frame_is_built_ = true;
193  if (info()->IsStub()) {
194  __ StubPrologue();
195  } else {
196  __ Prologue(info()->IsCodePreAgingActive());
197  }
198  info()->AddNoFrameRange(0, masm_->pc_offset());
199  }
200 
201  if (info()->IsOptimizing() &&
203  FLAG_debug_code) {
204  __ test(esp, Immediate(kPointerSize));
205  __ Assert(zero, kFrameIsExpectedToBeAligned);
206  }
207 
208  // Reserve space for the stack slots needed by the code.
209  int slots = GetStackSlotCount();
210  DCHECK(slots != 0 || !info()->IsOptimizing());
211  if (slots > 0) {
212  if (slots == 1) {
214  __ push(edx);
215  } else {
216  __ push(Immediate(kNoAlignmentPadding));
217  }
218  } else {
219  if (FLAG_debug_code) {
220  __ sub(Operand(esp), Immediate(slots * kPointerSize));
221 #ifdef _MSC_VER
222  MakeSureStackPagesMapped(slots * kPointerSize);
223 #endif
224  __ push(eax);
225  __ mov(Operand(eax), Immediate(slots));
226  Label loop;
227  __ bind(&loop);
228  __ mov(MemOperand(esp, eax, times_4, 0),
229  Immediate(kSlotsZapValue));
230  __ dec(eax);
231  __ j(not_zero, &loop);
232  __ pop(eax);
233  } else {
234  __ sub(Operand(esp), Immediate(slots * kPointerSize));
235 #ifdef _MSC_VER
236  MakeSureStackPagesMapped(slots * kPointerSize);
237 #endif
238  }
239 
241  Comment(";;; Store dynamic frame alignment tag for spilled doubles");
242  // Store dynamic frame alignment state in the first local.
245  __ mov(Operand(ebp, offset), edx);
246  } else {
247  __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
248  }
249  }
250  }
251 
252  if (info()->saves_caller_doubles()) SaveCallerDoubles();
253  }
254 
255  // Possibly allocate a local context.
256  int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
257  if (heap_slots > 0) {
258  Comment(";;; Allocate local context");
259  bool need_write_barrier = true;
260  // Argument to NewContext is the function, which is still in edi.
261  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
262  FastNewContextStub stub(isolate(), heap_slots);
263  __ CallStub(&stub);
264  // Result of FastNewContextStub is always in new space.
265  need_write_barrier = false;
266  } else {
267  __ push(edi);
268  __ CallRuntime(Runtime::kNewFunctionContext, 1);
269  }
270  RecordSafepoint(Safepoint::kNoLazyDeopt);
271  // Context is returned in eax. It replaces the context passed to us.
272  // It's saved in the stack and kept live in esi.
273  __ mov(esi, eax);
275 
276  // Copy parameters into context if necessary.
277  int num_parameters = scope()->num_parameters();
278  for (int i = 0; i < num_parameters; i++) {
279  Variable* var = scope()->parameter(i);
280  if (var->IsContextSlot()) {
281  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
282  (num_parameters - 1 - i) * kPointerSize;
283  // Load parameter from stack.
284  __ mov(eax, Operand(ebp, parameter_offset));
285  // Store it in the context.
286  int context_offset = Context::SlotOffset(var->index());
287  __ mov(Operand(esi, context_offset), eax);
288  // Update the write barrier. This clobbers eax and ebx.
289  if (need_write_barrier) {
290  __ RecordWriteContextSlot(esi,
291  context_offset,
292  eax,
293  ebx,
295  } else if (FLAG_debug_code) {
296  Label done;
297  __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
298  __ Abort(kExpectedNewSpaceObject);
299  __ bind(&done);
300  }
301  }
302  }
303  Comment(";;; End allocate local context");
304  }
305 
306  // Trace the call.
307  if (FLAG_trace && info()->IsOptimizing()) {
308  // We have not executed any compiled code yet, so esi still holds the
309  // incoming context.
310  __ CallRuntime(Runtime::kTraceEnter, 0);
311  }
312  return !is_aborted();
313 }
314 
315 
317  // Generate the OSR entry prologue at the first unknown OSR value, or if there
318  // are none, at the OSR entrypoint instruction.
319  if (osr_pc_offset_ >= 0) return;
320 
321  osr_pc_offset_ = masm()->pc_offset();
322 
323  // Move state of dynamic frame alignment into edx.
324  __ Move(edx, Immediate(kNoAlignmentPadding));
325 
327  Label do_not_pad, align_loop;
328  // Align ebp + 4 to a multiple of 2 * kPointerSize.
329  __ test(ebp, Immediate(kPointerSize));
330  __ j(zero, &do_not_pad, Label::kNear);
331  __ push(Immediate(0));
332  __ mov(ebx, esp);
333  __ mov(edx, Immediate(kAlignmentPaddingPushed));
334 
335  // Move all parts of the frame over one word. The frame consists of:
336  // unoptimized frame slots, alignment state, context, frame pointer, return
337  // address, receiver, and the arguments.
338  __ mov(ecx, Immediate(scope()->num_parameters() +
339  5 + graph()->osr()->UnoptimizedFrameSlots()));
340 
341  __ bind(&align_loop);
342  __ mov(eax, Operand(ebx, 1 * kPointerSize));
343  __ mov(Operand(ebx, 0), eax);
344  __ add(Operand(ebx), Immediate(kPointerSize));
345  __ dec(ecx);
346  __ j(not_zero, &align_loop, Label::kNear);
347  __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
348  __ sub(Operand(ebp), Immediate(kPointerSize));
349  __ bind(&do_not_pad);
350  }
351 
352  // Save the first local, which is overwritten by the alignment state.
353  Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
354  __ push(alignment_loc);
355 
356  // Set the dynamic frame alignment state.
357  __ mov(alignment_loc, edx);
358 
359  // Adjust the frame size, subsuming the unoptimized frame into the
360  // optimized frame.
361  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
362  DCHECK(slots >= 1);
363  __ sub(esp, Immediate((slots - 1) * kPointerSize));
364 }
365 
366 
367 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
368  if (instr->IsCall()) {
370  }
371  if (!instr->IsLazyBailout() && !instr->IsGap()) {
372  safepoints_.BumpLastLazySafepointIndex();
373  }
374 }
375 
376 
377 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { }
378 
379 
381  Label needs_frame;
382  if (jump_table_.length() > 0) {
383  Comment(";;; -------------------- Jump table --------------------");
384  }
385  for (int i = 0; i < jump_table_.length(); i++) {
386  Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
387  __ bind(&table_entry->label);
388  Address entry = table_entry->address;
389  DeoptComment(table_entry->reason);
390  if (table_entry->needs_frame) {
391  DCHECK(!info()->saves_caller_doubles());
392  __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
393  if (needs_frame.is_bound()) {
394  __ jmp(&needs_frame);
395  } else {
396  __ bind(&needs_frame);
398  // This variant of deopt can only be used with stubs. Since we don't
399  // have a function pointer to install in the stack frame that we're
400  // building, install a special marker there instead.
401  DCHECK(info()->IsStub());
402  __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
403  // Push a PC inside the function so that the deopt code can find where
404  // the deopt comes from. It doesn't have to be the precise return
405  // address of a "calling" LAZY deopt, it only has to be somewhere
406  // inside the code body.
407  Label push_approx_pc;
408  __ call(&push_approx_pc);
409  __ bind(&push_approx_pc);
410  // Push the continuation which was stashed were the ebp should
411  // be. Replace it with the saved ebp.
412  __ push(MemOperand(esp, 3 * kPointerSize));
413  __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
414  __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
415  __ ret(0); // Call the continuation without clobbering registers.
416  }
417  } else {
418  if (info()->saves_caller_doubles()) RestoreCallerDoubles();
419  __ call(entry, RelocInfo::RUNTIME_ENTRY);
420  }
421  }
422  return !is_aborted();
423 }
424 
425 
427  DCHECK(is_generating());
428  if (deferred_.length() > 0) {
429  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
430  LDeferredCode* code = deferred_[i];
431 
432  HValue* value =
433  instructions_->at(code->instruction_index())->hydrogen_value();
435  chunk()->graph()->SourcePositionToScriptPosition(value->position()));
436 
437  Comment(";;; <@%d,#%d> "
438  "-------------------- Deferred %s --------------------",
439  code->instruction_index(),
440  code->instr()->hydrogen_value()->id(),
441  code->instr()->Mnemonic());
442  __ bind(code->entry());
443  if (NeedsDeferredFrame()) {
444  Comment(";;; Build frame");
446  DCHECK(info()->IsStub());
447  frame_is_built_ = true;
448  // Build the frame in such a way that esi isn't trashed.
449  __ push(ebp); // Caller's frame pointer.
451  __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
452  __ lea(ebp, Operand(esp, 2 * kPointerSize));
453  Comment(";;; Deferred code");
454  }
455  code->Generate();
456  if (NeedsDeferredFrame()) {
457  __ bind(code->done());
458  Comment(";;; Destroy frame");
460  frame_is_built_ = false;
461  __ mov(esp, ebp);
462  __ pop(ebp);
463  }
464  __ jmp(code->exit());
465  }
466  }
467 
468  // Deferred code is the last part of the instruction sequence. Mark
469  // the generated code as done unless we bailed out.
470  if (!is_aborted()) status_ = DONE;
471  return !is_aborted();
472 }
473 
474 
476  DCHECK(is_done());
477  if (!info()->IsStub()) {
478  // For lazy deoptimization we need space to patch a call after every call.
479  // Ensure there is always space for such patching, even if the code ends
480  // in a call.
481  int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
482  while (masm()->pc_offset() < target_offset) {
483  masm()->nop();
484  }
485  }
486  safepoints_.Emit(masm(), GetStackSlotCount());
487  return !is_aborted();
488 }
489 
490 
491 Register LCodeGen::ToRegister(int index) const {
492  return Register::FromAllocationIndex(index);
493 }
494 
495 
496 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
497  return XMMRegister::FromAllocationIndex(index);
498 }
499 
500 
501 Register LCodeGen::ToRegister(LOperand* op) const {
502  DCHECK(op->IsRegister());
503  return ToRegister(op->index());
504 }
505 
506 
507 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
508  DCHECK(op->IsDoubleRegister());
509  return ToDoubleRegister(op->index());
510 }
511 
512 
513 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
515 }
516 
517 
518 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
519  const Representation& r) const {
520  HConstant* constant = chunk_->LookupConstant(op);
521  int32_t value = constant->Integer32Value();
522  if (r.IsInteger32()) return value;
523  DCHECK(r.IsSmiOrTagged());
524  return reinterpret_cast<int32_t>(Smi::FromInt(value));
525 }
526 
527 
528 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
529  HConstant* constant = chunk_->LookupConstant(op);
530  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
531  return constant->handle(isolate());
532 }
533 
534 
535 double LCodeGen::ToDouble(LConstantOperand* op) const {
536  HConstant* constant = chunk_->LookupConstant(op);
537  DCHECK(constant->HasDoubleValue());
538  return constant->DoubleValue();
539 }
540 
541 
542 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
543  HConstant* constant = chunk_->LookupConstant(op);
544  DCHECK(constant->HasExternalReferenceValue());
545  return constant->ExternalReferenceValue();
546 }
547 
548 
549 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
550  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
551 }
552 
553 
554 bool LCodeGen::IsSmi(LConstantOperand* op) const {
555  return chunk_->LookupLiteralRepresentation(op).IsSmi();
556 }
557 
558 
559 static int ArgumentsOffsetWithoutFrame(int index) {
560  DCHECK(index < 0);
561  return -(index + 1) * kPointerSize + kPCOnStackSize;
562 }
563 
564 
565 Operand LCodeGen::ToOperand(LOperand* op) const {
566  if (op->IsRegister()) return Operand(ToRegister(op));
567  if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
568  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
569  if (NeedsEagerFrame()) {
570  return Operand(ebp, StackSlotOffset(op->index()));
571  } else {
572  // Retrieve parameter without eager stack-frame relative to the
573  // stack-pointer.
574  return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
575  }
576 }
577 
578 
579 Operand LCodeGen::HighOperand(LOperand* op) {
580  DCHECK(op->IsDoubleStackSlot());
581  if (NeedsEagerFrame()) {
582  return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
583  } else {
584  // Retrieve parameter without eager stack-frame relative to the
585  // stack-pointer.
586  return Operand(
588  }
589 }
590 
591 
592 void LCodeGen::WriteTranslation(LEnvironment* environment,
593  Translation* translation) {
594  if (environment == NULL) return;
595 
596  // The translation includes one command per value in the environment.
597  int translation_size = environment->translation_size();
598  // The output frame height does not include the parameters.
599  int height = translation_size - environment->parameter_count();
600 
601  WriteTranslation(environment->outer(), translation);
602  bool has_closure_id = !info()->closure().is_null() &&
603  !info()->closure().is_identical_to(environment->closure());
604  int closure_id = has_closure_id
605  ? DefineDeoptimizationLiteral(environment->closure())
606  : Translation::kSelfLiteralId;
607  switch (environment->frame_type()) {
608  case JS_FUNCTION:
609  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
610  break;
611  case JS_CONSTRUCT:
612  translation->BeginConstructStubFrame(closure_id, translation_size);
613  break;
614  case JS_GETTER:
615  DCHECK(translation_size == 1);
616  DCHECK(height == 0);
617  translation->BeginGetterStubFrame(closure_id);
618  break;
619  case JS_SETTER:
620  DCHECK(translation_size == 2);
621  DCHECK(height == 0);
622  translation->BeginSetterStubFrame(closure_id);
623  break;
624  case ARGUMENTS_ADAPTOR:
625  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
626  break;
627  case STUB:
628  translation->BeginCompiledStubFrame();
629  break;
630  default:
631  UNREACHABLE();
632  }
633 
634  int object_index = 0;
635  int dematerialized_index = 0;
636  for (int i = 0; i < translation_size; ++i) {
637  LOperand* value = environment->values()->at(i);
638  AddToTranslation(environment,
639  translation,
640  value,
641  environment->HasTaggedValueAt(i),
642  environment->HasUint32ValueAt(i),
643  &object_index,
644  &dematerialized_index);
645  }
646 }
647 
648 
649 void LCodeGen::AddToTranslation(LEnvironment* environment,
650  Translation* translation,
651  LOperand* op,
652  bool is_tagged,
653  bool is_uint32,
654  int* object_index_pointer,
655  int* dematerialized_index_pointer) {
656  if (op == LEnvironment::materialization_marker()) {
657  int object_index = (*object_index_pointer)++;
658  if (environment->ObjectIsDuplicateAt(object_index)) {
659  int dupe_of = environment->ObjectDuplicateOfAt(object_index);
660  translation->DuplicateObject(dupe_of);
661  return;
662  }
663  int object_length = environment->ObjectLengthAt(object_index);
664  if (environment->ObjectIsArgumentsAt(object_index)) {
665  translation->BeginArgumentsObject(object_length);
666  } else {
667  translation->BeginCapturedObject(object_length);
668  }
669  int dematerialized_index = *dematerialized_index_pointer;
670  int env_offset = environment->translation_size() + dematerialized_index;
671  *dematerialized_index_pointer += object_length;
672  for (int i = 0; i < object_length; ++i) {
673  LOperand* value = environment->values()->at(env_offset + i);
674  AddToTranslation(environment,
675  translation,
676  value,
677  environment->HasTaggedValueAt(env_offset + i),
678  environment->HasUint32ValueAt(env_offset + i),
679  object_index_pointer,
680  dematerialized_index_pointer);
681  }
682  return;
683  }
684 
685  if (op->IsStackSlot()) {
686  if (is_tagged) {
687  translation->StoreStackSlot(op->index());
688  } else if (is_uint32) {
689  translation->StoreUint32StackSlot(op->index());
690  } else {
691  translation->StoreInt32StackSlot(op->index());
692  }
693  } else if (op->IsDoubleStackSlot()) {
694  translation->StoreDoubleStackSlot(op->index());
695  } else if (op->IsRegister()) {
696  Register reg = ToRegister(op);
697  if (is_tagged) {
698  translation->StoreRegister(reg);
699  } else if (is_uint32) {
700  translation->StoreUint32Register(reg);
701  } else {
702  translation->StoreInt32Register(reg);
703  }
704  } else if (op->IsDoubleRegister()) {
705  XMMRegister reg = ToDoubleRegister(op);
706  translation->StoreDoubleRegister(reg);
707  } else if (op->IsConstantOperand()) {
708  HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
709  int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
710  translation->StoreLiteral(src_index);
711  } else {
712  UNREACHABLE();
713  }
714 }
715 
716 
717 void LCodeGen::CallCodeGeneric(Handle<Code> code,
719  LInstruction* instr,
720  SafepointMode safepoint_mode) {
721  DCHECK(instr != NULL);
722  __ call(code, mode);
723  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
724 
725  // Signal that we don't inline smi code before these stubs in the
726  // optimizing code generator.
727  if (code->kind() == Code::BINARY_OP_IC ||
728  code->kind() == Code::COMPARE_IC) {
729  __ nop();
730  }
731 }
732 
733 
734 void LCodeGen::CallCode(Handle<Code> code,
736  LInstruction* instr) {
738 }
739 
740 
741 void LCodeGen::CallRuntime(const Runtime::Function* fun,
742  int argc,
743  LInstruction* instr,
744  SaveFPRegsMode save_doubles) {
745  DCHECK(instr != NULL);
746  DCHECK(instr->HasPointerMap());
747 
748  __ CallRuntime(fun, argc, save_doubles);
749 
751 
752  DCHECK(info()->is_calling());
753 }
754 
755 
756 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
757  if (context->IsRegister()) {
758  if (!ToRegister(context).is(esi)) {
759  __ mov(esi, ToRegister(context));
760  }
761  } else if (context->IsStackSlot()) {
762  __ mov(esi, ToOperand(context));
763  } else if (context->IsConstantOperand()) {
764  HConstant* constant =
765  chunk_->LookupConstant(LConstantOperand::cast(context));
766  __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
767  } else {
768  UNREACHABLE();
769  }
770 }
771 
773  int argc,
774  LInstruction* instr,
775  LOperand* context) {
776  LoadContextFromDeferred(context);
777 
778  __ CallRuntimeSaveDoubles(id);
780  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
781 
782  DCHECK(info()->is_calling());
783 }
784 
785 
787  LEnvironment* environment, Safepoint::DeoptMode mode) {
788  environment->set_has_been_used();
789  if (!environment->HasBeenRegistered()) {
790  // Physical stack frame layout:
791  // -x ............. -4 0 ..................................... y
792  // [incoming arguments] [spill slots] [pushed outgoing arguments]
793 
794  // Layout of the environment:
795  // 0 ..................................................... size-1
796  // [parameters] [locals] [expression stack including arguments]
797 
798  // Layout of the translation:
799  // 0 ........................................................ size - 1 + 4
800  // [expression stack including arguments] [locals] [4 words] [parameters]
801  // |>------------ translation_size ------------<|
802 
803  int frame_count = 0;
804  int jsframe_count = 0;
805  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
806  ++frame_count;
807  if (e->frame_type() == JS_FUNCTION) {
808  ++jsframe_count;
809  }
810  }
811  Translation translation(&translations_, frame_count, jsframe_count, zone());
812  WriteTranslation(environment, &translation);
813  int deoptimization_index = deoptimizations_.length();
814  int pc_offset = masm()->pc_offset();
815  environment->Register(deoptimization_index,
816  translation.index(),
817  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
818  deoptimizations_.Add(environment, zone());
819  }
820 }
821 
822 
823 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
824  const char* detail,
825  Deoptimizer::BailoutType bailout_type) {
826  LEnvironment* environment = instr->environment();
827  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
828  DCHECK(environment->HasBeenRegistered());
829  int id = environment->deoptimization_index();
830  DCHECK(info()->IsOptimizing() || info()->IsStub());
831  Address entry =
832  Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
833  if (entry == NULL) {
834  Abort(kBailoutWasNotPrepared);
835  return;
836  }
837 
838  if (DeoptEveryNTimes()) {
839  ExternalReference count = ExternalReference::stress_deopt_count(isolate());
840  Label no_deopt;
841  __ pushfd();
842  __ push(eax);
843  __ mov(eax, Operand::StaticVariable(count));
844  __ sub(eax, Immediate(1));
845  __ j(not_zero, &no_deopt, Label::kNear);
846  if (FLAG_trap_on_deopt) __ int3();
847  __ mov(eax, Immediate(FLAG_deopt_every_n_times));
848  __ mov(Operand::StaticVariable(count), eax);
849  __ pop(eax);
850  __ popfd();
852  __ call(entry, RelocInfo::RUNTIME_ENTRY);
853  __ bind(&no_deopt);
854  __ mov(Operand::StaticVariable(count), eax);
855  __ pop(eax);
856  __ popfd();
857  }
858 
859  if (info()->ShouldTrapOnDeopt()) {
860  Label done;
861  if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
862  __ int3();
863  __ bind(&done);
864  }
865 
866  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
867  instr->Mnemonic(), detail);
868  DCHECK(info()->IsStub() || frame_is_built_);
869  if (cc == no_condition && frame_is_built_) {
870  DeoptComment(reason);
871  __ call(entry, RelocInfo::RUNTIME_ENTRY);
872  } else {
873  Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
874  !frame_is_built_);
875  // We often have several deopts to the same entry, reuse the last
876  // jump entry if this is the case.
877  if (jump_table_.is_empty() ||
878  !table_entry.IsEquivalentTo(jump_table_.last())) {
879  jump_table_.Add(table_entry, zone());
880  }
881  if (cc == no_condition) {
882  __ jmp(&jump_table_.last().label);
883  } else {
884  __ j(cc, &jump_table_.last().label);
885  }
886  }
887 }
888 
889 
890 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
891  const char* detail) {
892  Deoptimizer::BailoutType bailout_type = info()->IsStub()
895  DeoptimizeIf(cc, instr, detail, bailout_type);
896 }
897 
898 
899 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
900  int length = deoptimizations_.length();
901  if (length == 0) return;
902  Handle<DeoptimizationInputData> data =
903  DeoptimizationInputData::New(isolate(), length, TENURED);
904 
905  Handle<ByteArray> translations =
906  translations_.CreateByteArray(isolate()->factory());
907  data->SetTranslationByteArray(*translations);
908  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
909  data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
910  if (info_->IsOptimizing()) {
911  // Reference to shared function info does not change between phases.
912  AllowDeferredHandleDereference allow_handle_dereference;
913  data->SetSharedFunctionInfo(*info_->shared_info());
914  } else {
915  data->SetSharedFunctionInfo(Smi::FromInt(0));
916  }
917 
918  Handle<FixedArray> literals =
919  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
920  { AllowDeferredHandleDereference copy_handles;
921  for (int i = 0; i < deoptimization_literals_.length(); i++) {
923  }
924  data->SetLiteralArray(*literals);
925  }
926 
927  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
928  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
929 
930  // Populate the deoptimization entries.
931  for (int i = 0; i < length; i++) {
933  data->SetAstId(i, env->ast_id());
934  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
935  data->SetArgumentsStackHeight(i,
936  Smi::FromInt(env->arguments_stack_height()));
937  data->SetPc(i, Smi::FromInt(env->pc_offset()));
938  }
939  code->set_deoptimization_data(*data);
940 }
941 
942 
943 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
944  int result = deoptimization_literals_.length();
945  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
946  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
947  }
948  deoptimization_literals_.Add(literal, zone());
949  return result;
950 }
951 
952 
954  DCHECK(deoptimization_literals_.length() == 0);
955 
956  const ZoneList<Handle<JSFunction> >* inlined_closures =
957  chunk()->inlined_closures();
958 
959  for (int i = 0, length = inlined_closures->length();
960  i < length;
961  i++) {
962  DefineDeoptimizationLiteral(inlined_closures->at(i));
963  }
964 
966 }
967 
968 
970  LInstruction* instr, SafepointMode safepoint_mode) {
971  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
972  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
973  } else {
976  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
977  }
978 }
979 
980 
982  LPointerMap* pointers,
983  Safepoint::Kind kind,
984  int arguments,
985  Safepoint::DeoptMode deopt_mode) {
987  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
988  Safepoint safepoint =
989  safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
990  for (int i = 0; i < operands->length(); i++) {
991  LOperand* pointer = operands->at(i);
992  if (pointer->IsStackSlot()) {
993  safepoint.DefinePointerSlot(pointer->index(), zone());
994  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
995  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
996  }
997  }
998 }
999 
1000 
1001 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1002  Safepoint::DeoptMode mode) {
1003  RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
1004 }
1005 
1006 
1007 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
1008  LPointerMap empty_pointers(zone());
1009  RecordSafepoint(&empty_pointers, mode);
1010 }
1011 
1012 
1013 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1014  int arguments,
1015  Safepoint::DeoptMode mode) {
1016  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
1017 }
1018 
1019 
1020 void LCodeGen::RecordAndWritePosition(int position) {
1021  if (position == RelocInfo::kNoPosition) return;
1022  masm()->positions_recorder()->RecordPosition(position);
1023  masm()->positions_recorder()->WriteRecordedPositions();
1024 }
1025 
1026 
1027 static const char* LabelType(LLabel* label) {
1028  if (label->is_loop_header()) return " (loop header)";
1029  if (label->is_osr_entry()) return " (OSR entry)";
1030  return "";
1031 }
1032 
1033 
1034 void LCodeGen::DoLabel(LLabel* label) {
1035  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1036  current_instruction_,
1037  label->hydrogen_value()->id(),
1038  label->block_id(),
1039  LabelType(label));
1040  __ bind(label->label());
1041  current_block_ = label->block_id();
1042  DoGap(label);
1043 }
1044 
1045 
1046 void LCodeGen::DoParallelMove(LParallelMove* move) {
1047  resolver_.Resolve(move);
1048 }
1049 
1050 
1051 void LCodeGen::DoGap(LGap* gap) {
1052  for (int i = LGap::FIRST_INNER_POSITION;
1054  i++) {
1055  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1056  LParallelMove* move = gap->GetParallelMove(inner_pos);
1057  if (move != NULL) DoParallelMove(move);
1058  }
1059 }
1060 
1061 
1062 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1063  DoGap(instr);
1064 }
1065 
1066 
1067 void LCodeGen::DoParameter(LParameter* instr) {
1068  // Nothing to do.
1069 }
1070 
1071 
1072 void LCodeGen::DoCallStub(LCallStub* instr) {
1073  DCHECK(ToRegister(instr->context()).is(esi));
1074  DCHECK(ToRegister(instr->result()).is(eax));
1075  switch (instr->hydrogen()->major_key()) {
1076  case CodeStub::RegExpExec: {
1077  RegExpExecStub stub(isolate());
1078  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1079  break;
1080  }
1081  case CodeStub::SubString: {
1082  SubStringStub stub(isolate());
1083  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1084  break;
1085  }
1086  case CodeStub::StringCompare: {
1087  StringCompareStub stub(isolate());
1088  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1089  break;
1090  }
1091  default:
1092  UNREACHABLE();
1093  }
1094 }
1095 
1096 
1097 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1099 }
1100 
1101 
1102 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1103  Register dividend = ToRegister(instr->dividend());
1104  int32_t divisor = instr->divisor();
1105  DCHECK(dividend.is(ToRegister(instr->result())));
1106 
1107  // Theoretically, a variation of the branch-free code for integer division by
1108  // a power of 2 (calculating the remainder via an additional multiplication
1109  // (which gets simplified to an 'and') and subtraction) should be faster, and
1110  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1111  // indicate that positive dividends are heavily favored, so the branching
1112  // version performs better.
1113  HMod* hmod = instr->hydrogen();
1114  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1115  Label dividend_is_not_negative, done;
1116  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1117  __ test(dividend, dividend);
1118  __ j(not_sign, &dividend_is_not_negative, Label::kNear);
1119  // Note that this is correct even for kMinInt operands.
1120  __ neg(dividend);
1121  __ and_(dividend, mask);
1122  __ neg(dividend);
1123  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1124  DeoptimizeIf(zero, instr, "minus zero");
1125  }
1126  __ jmp(&done, Label::kNear);
1127  }
1128 
1129  __ bind(&dividend_is_not_negative);
1130  __ and_(dividend, mask);
1131  __ bind(&done);
1132 }
1133 
1134 
1135 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1136  Register dividend = ToRegister(instr->dividend());
1137  int32_t divisor = instr->divisor();
1138  DCHECK(ToRegister(instr->result()).is(eax));
1139 
1140  if (divisor == 0) {
1141  DeoptimizeIf(no_condition, instr, "division by zero");
1142  return;
1143  }
1144 
1145  __ TruncatingDiv(dividend, Abs(divisor));
1146  __ imul(edx, edx, Abs(divisor));
1147  __ mov(eax, dividend);
1148  __ sub(eax, edx);
1149 
1150  // Check for negative zero.
1151  HMod* hmod = instr->hydrogen();
1152  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1153  Label remainder_not_zero;
1154  __ j(not_zero, &remainder_not_zero, Label::kNear);
1155  __ cmp(dividend, Immediate(0));
1156  DeoptimizeIf(less, instr, "minus zero");
1157  __ bind(&remainder_not_zero);
1158  }
1159 }
1160 
1161 
1162 void LCodeGen::DoModI(LModI* instr) {
1163  HMod* hmod = instr->hydrogen();
1164 
1165  Register left_reg = ToRegister(instr->left());
1166  DCHECK(left_reg.is(eax));
1167  Register right_reg = ToRegister(instr->right());
1168  DCHECK(!right_reg.is(eax));
1169  DCHECK(!right_reg.is(edx));
1170  Register result_reg = ToRegister(instr->result());
1171  DCHECK(result_reg.is(edx));
1172 
1173  Label done;
1174  // Check for x % 0, idiv would signal a divide error. We have to
1175  // deopt in this case because we can't return a NaN.
1176  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1177  __ test(right_reg, Operand(right_reg));
1178  DeoptimizeIf(zero, instr, "division by zero");
1179  }
1180 
1181  // Check for kMinInt % -1, idiv would signal a divide error. We
1182  // have to deopt if we care about -0, because we can't return that.
1183  if (hmod->CheckFlag(HValue::kCanOverflow)) {
1184  Label no_overflow_possible;
1185  __ cmp(left_reg, kMinInt);
1186  __ j(not_equal, &no_overflow_possible, Label::kNear);
1187  __ cmp(right_reg, -1);
1188  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1189  DeoptimizeIf(equal, instr, "minus zero");
1190  } else {
1191  __ j(not_equal, &no_overflow_possible, Label::kNear);
1192  __ Move(result_reg, Immediate(0));
1193  __ jmp(&done, Label::kNear);
1194  }
1195  __ bind(&no_overflow_possible);
1196  }
1197 
1198  // Sign extend dividend in eax into edx:eax.
1199  __ cdq();
1200 
1201  // If we care about -0, test if the dividend is <0 and the result is 0.
1202  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1203  Label positive_left;
1204  __ test(left_reg, Operand(left_reg));
1205  __ j(not_sign, &positive_left, Label::kNear);
1206  __ idiv(right_reg);
1207  __ test(result_reg, Operand(result_reg));
1208  DeoptimizeIf(zero, instr, "minus zero");
1209  __ jmp(&done, Label::kNear);
1210  __ bind(&positive_left);
1211  }
1212  __ idiv(right_reg);
1213  __ bind(&done);
1214 }
1215 
1216 
1217 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1218  Register dividend = ToRegister(instr->dividend());
1219  int32_t divisor = instr->divisor();
1220  Register result = ToRegister(instr->result());
1221  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1222  DCHECK(!result.is(dividend));
1223 
1224  // Check for (0 / -x) that will produce negative zero.
1225  HDiv* hdiv = instr->hydrogen();
1226  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1227  __ test(dividend, dividend);
1228  DeoptimizeIf(zero, instr, "minus zero");
1229  }
1230  // Check for (kMinInt / -1).
1231  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1232  __ cmp(dividend, kMinInt);
1233  DeoptimizeIf(zero, instr, "overflow");
1234  }
1235  // Deoptimize if remainder will not be 0.
1236  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1237  divisor != 1 && divisor != -1) {
1238  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1239  __ test(dividend, Immediate(mask));
1240  DeoptimizeIf(not_zero, instr, "lost precision");
1241  }
1242  __ Move(result, dividend);
1243  int32_t shift = WhichPowerOf2Abs(divisor);
1244  if (shift > 0) {
1245  // The arithmetic shift is always OK, the 'if' is an optimization only.
1246  if (shift > 1) __ sar(result, 31);
1247  __ shr(result, 32 - shift);
1248  __ add(result, dividend);
1249  __ sar(result, shift);
1250  }
1251  if (divisor < 0) __ neg(result);
1252 }
1253 
1254 
1255 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1256  Register dividend = ToRegister(instr->dividend());
1257  int32_t divisor = instr->divisor();
1258  DCHECK(ToRegister(instr->result()).is(edx));
1259 
1260  if (divisor == 0) {
1261  DeoptimizeIf(no_condition, instr, "division by zero");
1262  return;
1263  }
1264 
1265  // Check for (0 / -x) that will produce negative zero.
1266  HDiv* hdiv = instr->hydrogen();
1267  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1268  __ test(dividend, dividend);
1269  DeoptimizeIf(zero, instr, "minus zero");
1270  }
1271 
1272  __ TruncatingDiv(dividend, Abs(divisor));
1273  if (divisor < 0) __ neg(edx);
1274 
1275  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1276  __ mov(eax, edx);
1277  __ imul(eax, eax, divisor);
1278  __ sub(eax, dividend);
1279  DeoptimizeIf(not_equal, instr, "lost precision");
1280  }
1281 }
1282 
1283 
1284 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1285 void LCodeGen::DoDivI(LDivI* instr) {
1286  HBinaryOperation* hdiv = instr->hydrogen();
1287  Register dividend = ToRegister(instr->dividend());
1288  Register divisor = ToRegister(instr->divisor());
1289  Register remainder = ToRegister(instr->temp());
1290  DCHECK(dividend.is(eax));
1291  DCHECK(remainder.is(edx));
1292  DCHECK(ToRegister(instr->result()).is(eax));
1293  DCHECK(!divisor.is(eax));
1294  DCHECK(!divisor.is(edx));
1295 
1296  // Check for x / 0.
1297  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1298  __ test(divisor, divisor);
1299  DeoptimizeIf(zero, instr, "division by zero");
1300  }
1301 
1302  // Check for (0 / -x) that will produce negative zero.
1303  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1304  Label dividend_not_zero;
1305  __ test(dividend, dividend);
1306  __ j(not_zero, &dividend_not_zero, Label::kNear);
1307  __ test(divisor, divisor);
1308  DeoptimizeIf(sign, instr, "minus zero");
1309  __ bind(&dividend_not_zero);
1310  }
1311 
1312  // Check for (kMinInt / -1).
1313  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1314  Label dividend_not_min_int;
1315  __ cmp(dividend, kMinInt);
1316  __ j(not_zero, &dividend_not_min_int, Label::kNear);
1317  __ cmp(divisor, -1);
1318  DeoptimizeIf(zero, instr, "overflow");
1319  __ bind(&dividend_not_min_int);
1320  }
1321 
1322  // Sign extend to edx (= remainder).
1323  __ cdq();
1324  __ idiv(divisor);
1325 
1326  if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1327  // Deoptimize if remainder is not 0.
1328  __ test(remainder, remainder);
1329  DeoptimizeIf(not_zero, instr, "lost precision");
1330  }
1331 }
1332 
1333 
1334 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1335  Register dividend = ToRegister(instr->dividend());
1336  int32_t divisor = instr->divisor();
1337  DCHECK(dividend.is(ToRegister(instr->result())));
1338 
1339  // If the divisor is positive, things are easy: There can be no deopts and we
1340  // can simply do an arithmetic right shift.
1341  if (divisor == 1) return;
1342  int32_t shift = WhichPowerOf2Abs(divisor);
1343  if (divisor > 1) {
1344  __ sar(dividend, shift);
1345  return;
1346  }
1347 
1348  // If the divisor is negative, we have to negate and handle edge cases.
1349  __ neg(dividend);
1350  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1351  DeoptimizeIf(zero, instr, "minus zero");
1352  }
1353 
1354  // Dividing by -1 is basically negation, unless we overflow.
1355  if (divisor == -1) {
1356  if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1357  DeoptimizeIf(overflow, instr, "overflow");
1358  }
1359  return;
1360  }
1361 
1362  // If the negation could not overflow, simply shifting is OK.
1363  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1364  __ sar(dividend, shift);
1365  return;
1366  }
1367 
1368  Label not_kmin_int, done;
1369  __ j(no_overflow, &not_kmin_int, Label::kNear);
1370  __ mov(dividend, Immediate(kMinInt / divisor));
1371  __ jmp(&done, Label::kNear);
1372  __ bind(&not_kmin_int);
1373  __ sar(dividend, shift);
1374  __ bind(&done);
1375 }
1376 
1377 
1378 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1379  Register dividend = ToRegister(instr->dividend());
1380  int32_t divisor = instr->divisor();
1381  DCHECK(ToRegister(instr->result()).is(edx));
1382 
1383  if (divisor == 0) {
1384  DeoptimizeIf(no_condition, instr, "division by zero");
1385  return;
1386  }
1387 
1388  // Check for (0 / -x) that will produce negative zero.
1389  HMathFloorOfDiv* hdiv = instr->hydrogen();
1390  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1391  __ test(dividend, dividend);
1392  DeoptimizeIf(zero, instr, "minus zero");
1393  }
1394 
1395  // Easy case: We need no dynamic check for the dividend and the flooring
1396  // division is the same as the truncating division.
1397  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1398  (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1399  __ TruncatingDiv(dividend, Abs(divisor));
1400  if (divisor < 0) __ neg(edx);
1401  return;
1402  }
1403 
1404  // In the general case we may need to adjust before and after the truncating
1405  // division to get a flooring division.
1406  Register temp = ToRegister(instr->temp3());
1407  DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
1408  Label needs_adjustment, done;
1409  __ cmp(dividend, Immediate(0));
1410  __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1411  __ TruncatingDiv(dividend, Abs(divisor));
1412  if (divisor < 0) __ neg(edx);
1413  __ jmp(&done, Label::kNear);
1414  __ bind(&needs_adjustment);
1415  __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1416  __ TruncatingDiv(temp, Abs(divisor));
1417  if (divisor < 0) __ neg(edx);
1418  __ dec(edx);
1419  __ bind(&done);
1420 }
1421 
1422 
1423 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1424 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1425  HBinaryOperation* hdiv = instr->hydrogen();
1426  Register dividend = ToRegister(instr->dividend());
1427  Register divisor = ToRegister(instr->divisor());
1428  Register remainder = ToRegister(instr->temp());
1429  Register result = ToRegister(instr->result());
1430  DCHECK(dividend.is(eax));
1431  DCHECK(remainder.is(edx));
1432  DCHECK(result.is(eax));
1433  DCHECK(!divisor.is(eax));
1434  DCHECK(!divisor.is(edx));
1435 
1436  // Check for x / 0.
1437  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1438  __ test(divisor, divisor);
1439  DeoptimizeIf(zero, instr, "division by zero");
1440  }
1441 
1442  // Check for (0 / -x) that will produce negative zero.
1443  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1444  Label dividend_not_zero;
1445  __ test(dividend, dividend);
1446  __ j(not_zero, &dividend_not_zero, Label::kNear);
1447  __ test(divisor, divisor);
1448  DeoptimizeIf(sign, instr, "minus zero");
1449  __ bind(&dividend_not_zero);
1450  }
1451 
1452  // Check for (kMinInt / -1).
1453  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1454  Label dividend_not_min_int;
1455  __ cmp(dividend, kMinInt);
1456  __ j(not_zero, &dividend_not_min_int, Label::kNear);
1457  __ cmp(divisor, -1);
1458  DeoptimizeIf(zero, instr, "overflow");
1459  __ bind(&dividend_not_min_int);
1460  }
1461 
1462  // Sign extend to edx (= remainder).
1463  __ cdq();
1464  __ idiv(divisor);
1465 
1466  Label done;
1467  __ test(remainder, remainder);
1468  __ j(zero, &done, Label::kNear);
1469  __ xor_(remainder, divisor);
1470  __ sar(remainder, 31);
1471  __ add(result, remainder);
1472  __ bind(&done);
1473 }
1474 
1475 
1476 void LCodeGen::DoMulI(LMulI* instr) {
1477  Register left = ToRegister(instr->left());
1478  LOperand* right = instr->right();
1479 
1480  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1481  __ mov(ToRegister(instr->temp()), left);
1482  }
1483 
1484  if (right->IsConstantOperand()) {
1485  // Try strength reductions on the multiplication.
1486  // All replacement instructions are at most as long as the imul
1487  // and have better latency.
1488  int constant = ToInteger32(LConstantOperand::cast(right));
1489  if (constant == -1) {
1490  __ neg(left);
1491  } else if (constant == 0) {
1492  __ xor_(left, Operand(left));
1493  } else if (constant == 2) {
1494  __ add(left, Operand(left));
1495  } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1496  // If we know that the multiplication can't overflow, it's safe to
1497  // use instructions that don't set the overflow flag for the
1498  // multiplication.
1499  switch (constant) {
1500  case 1:
1501  // Do nothing.
1502  break;
1503  case 3:
1504  __ lea(left, Operand(left, left, times_2, 0));
1505  break;
1506  case 4:
1507  __ shl(left, 2);
1508  break;
1509  case 5:
1510  __ lea(left, Operand(left, left, times_4, 0));
1511  break;
1512  case 8:
1513  __ shl(left, 3);
1514  break;
1515  case 9:
1516  __ lea(left, Operand(left, left, times_8, 0));
1517  break;
1518  case 16:
1519  __ shl(left, 4);
1520  break;
1521  default:
1522  __ imul(left, left, constant);
1523  break;
1524  }
1525  } else {
1526  __ imul(left, left, constant);
1527  }
1528  } else {
1529  if (instr->hydrogen()->representation().IsSmi()) {
1530  __ SmiUntag(left);
1531  }
1532  __ imul(left, ToOperand(right));
1533  }
1534 
1535  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1536  DeoptimizeIf(overflow, instr, "overflow");
1537  }
1538 
1539  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1540  // Bail out if the result is supposed to be negative zero.
1541  Label done;
1542  __ test(left, Operand(left));
1543  __ j(not_zero, &done, Label::kNear);
1544  if (right->IsConstantOperand()) {
1545  if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1546  DeoptimizeIf(no_condition, instr, "minus zero");
1547  } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1548  __ cmp(ToRegister(instr->temp()), Immediate(0));
1549  DeoptimizeIf(less, instr, "minus zero");
1550  }
1551  } else {
1552  // Test the non-zero operand for negative sign.
1553  __ or_(ToRegister(instr->temp()), ToOperand(right));
1554  DeoptimizeIf(sign, instr, "minus zero");
1555  }
1556  __ bind(&done);
1557  }
1558 }
1559 
1560 
1561 void LCodeGen::DoBitI(LBitI* instr) {
1562  LOperand* left = instr->left();
1563  LOperand* right = instr->right();
1564  DCHECK(left->Equals(instr->result()));
1565  DCHECK(left->IsRegister());
1566 
1567  if (right->IsConstantOperand()) {
1568  int32_t right_operand =
1569  ToRepresentation(LConstantOperand::cast(right),
1570  instr->hydrogen()->representation());
1571  switch (instr->op()) {
1572  case Token::BIT_AND:
1573  __ and_(ToRegister(left), right_operand);
1574  break;
1575  case Token::BIT_OR:
1576  __ or_(ToRegister(left), right_operand);
1577  break;
1578  case Token::BIT_XOR:
1579  if (right_operand == int32_t(~0)) {
1580  __ not_(ToRegister(left));
1581  } else {
1582  __ xor_(ToRegister(left), right_operand);
1583  }
1584  break;
1585  default:
1586  UNREACHABLE();
1587  break;
1588  }
1589  } else {
1590  switch (instr->op()) {
1591  case Token::BIT_AND:
1592  __ and_(ToRegister(left), ToOperand(right));
1593  break;
1594  case Token::BIT_OR:
1595  __ or_(ToRegister(left), ToOperand(right));
1596  break;
1597  case Token::BIT_XOR:
1598  __ xor_(ToRegister(left), ToOperand(right));
1599  break;
1600  default:
1601  UNREACHABLE();
1602  break;
1603  }
1604  }
1605 }
1606 
1607 
1608 void LCodeGen::DoShiftI(LShiftI* instr) {
1609  LOperand* left = instr->left();
1610  LOperand* right = instr->right();
1611  DCHECK(left->Equals(instr->result()));
1612  DCHECK(left->IsRegister());
1613  if (right->IsRegister()) {
1614  DCHECK(ToRegister(right).is(ecx));
1615 
1616  switch (instr->op()) {
1617  case Token::ROR:
1618  __ ror_cl(ToRegister(left));
1619  break;
1620  case Token::SAR:
1621  __ sar_cl(ToRegister(left));
1622  break;
1623  case Token::SHR:
1624  __ shr_cl(ToRegister(left));
1625  if (instr->can_deopt()) {
1626  __ test(ToRegister(left), ToRegister(left));
1627  DeoptimizeIf(sign, instr, "negative value");
1628  }
1629  break;
1630  case Token::SHL:
1631  __ shl_cl(ToRegister(left));
1632  break;
1633  default:
1634  UNREACHABLE();
1635  break;
1636  }
1637  } else {
1638  int value = ToInteger32(LConstantOperand::cast(right));
1639  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1640  switch (instr->op()) {
1641  case Token::ROR:
1642  if (shift_count == 0 && instr->can_deopt()) {
1643  __ test(ToRegister(left), ToRegister(left));
1644  DeoptimizeIf(sign, instr, "negative value");
1645  } else {
1646  __ ror(ToRegister(left), shift_count);
1647  }
1648  break;
1649  case Token::SAR:
1650  if (shift_count != 0) {
1651  __ sar(ToRegister(left), shift_count);
1652  }
1653  break;
1654  case Token::SHR:
1655  if (shift_count != 0) {
1656  __ shr(ToRegister(left), shift_count);
1657  } else if (instr->can_deopt()) {
1658  __ test(ToRegister(left), ToRegister(left));
1659  DeoptimizeIf(sign, instr, "negative value");
1660  }
1661  break;
1662  case Token::SHL:
1663  if (shift_count != 0) {
1664  if (instr->hydrogen_value()->representation().IsSmi() &&
1665  instr->can_deopt()) {
1666  if (shift_count != 1) {
1667  __ shl(ToRegister(left), shift_count - 1);
1668  }
1669  __ SmiTag(ToRegister(left));
1670  DeoptimizeIf(overflow, instr, "overflow");
1671  } else {
1672  __ shl(ToRegister(left), shift_count);
1673  }
1674  }
1675  break;
1676  default:
1677  UNREACHABLE();
1678  break;
1679  }
1680  }
1681 }
1682 
1683 
1684 void LCodeGen::DoSubI(LSubI* instr) {
1685  LOperand* left = instr->left();
1686  LOperand* right = instr->right();
1687  DCHECK(left->Equals(instr->result()));
1688 
1689  if (right->IsConstantOperand()) {
1690  __ sub(ToOperand(left),
1691  ToImmediate(right, instr->hydrogen()->representation()));
1692  } else {
1693  __ sub(ToRegister(left), ToOperand(right));
1694  }
1695  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1696  DeoptimizeIf(overflow, instr, "overflow");
1697  }
1698 }
1699 
1700 
1701 void LCodeGen::DoConstantI(LConstantI* instr) {
1702  __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1703 }
1704 
1705 
1706 void LCodeGen::DoConstantS(LConstantS* instr) {
1707  __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1708 }
1709 
1710 
1711 void LCodeGen::DoConstantD(LConstantD* instr) {
1712  double v = instr->value();
1713  uint64_t int_val = bit_cast<uint64_t, double>(v);
1714  int32_t lower = static_cast<int32_t>(int_val);
1715  int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
1716  DCHECK(instr->result()->IsDoubleRegister());
1717 
1718  XMMRegister res = ToDoubleRegister(instr->result());
1719  if (int_val == 0) {
1720  __ xorps(res, res);
1721  } else {
1722  Register temp = ToRegister(instr->temp());
1724  CpuFeatureScope scope2(masm(), SSE4_1);
1725  if (lower != 0) {
1726  __ Move(temp, Immediate(lower));
1727  __ movd(res, Operand(temp));
1728  __ Move(temp, Immediate(upper));
1729  __ pinsrd(res, Operand(temp), 1);
1730  } else {
1731  __ xorps(res, res);
1732  __ Move(temp, Immediate(upper));
1733  __ pinsrd(res, Operand(temp), 1);
1734  }
1735  } else {
1736  __ Move(temp, Immediate(upper));
1737  __ movd(res, Operand(temp));
1738  __ psllq(res, 32);
1739  if (lower != 0) {
1740  XMMRegister xmm_scratch = double_scratch0();
1741  __ Move(temp, Immediate(lower));
1742  __ movd(xmm_scratch, Operand(temp));
1743  __ orps(res, xmm_scratch);
1744  }
1745  }
1746  }
1747 }
1748 
1749 
1750 void LCodeGen::DoConstantE(LConstantE* instr) {
1751  __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
1752 }
1753 
1754 
1755 void LCodeGen::DoConstantT(LConstantT* instr) {
1756  Register reg = ToRegister(instr->result());
1757  Handle<Object> object = instr->value(isolate());
1759  __ LoadObject(reg, object);
1760 }
1761 
1762 
1763 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1764  Register result = ToRegister(instr->result());
1765  Register map = ToRegister(instr->value());
1766  __ EnumLength(result, map);
1767 }
1768 
1769 
1770 void LCodeGen::DoDateField(LDateField* instr) {
1771  Register object = ToRegister(instr->date());
1772  Register result = ToRegister(instr->result());
1773  Register scratch = ToRegister(instr->temp());
1774  Smi* index = instr->index();
1775  Label runtime, done;
1776  DCHECK(object.is(result));
1777  DCHECK(object.is(eax));
1778 
1779  __ test(object, Immediate(kSmiTagMask));
1780  DeoptimizeIf(zero, instr, "Smi");
1781  __ CmpObjectType(object, JS_DATE_TYPE, scratch);
1782  DeoptimizeIf(not_equal, instr, "not a date object");
1783 
1784  if (index->value() == 0) {
1785  __ mov(result, FieldOperand(object, JSDate::kValueOffset));
1786  } else {
1787  if (index->value() < JSDate::kFirstUncachedField) {
1788  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1789  __ mov(scratch, Operand::StaticVariable(stamp));
1790  __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
1791  __ j(not_equal, &runtime, Label::kNear);
1792  __ mov(result, FieldOperand(object, JSDate::kValueOffset +
1793  kPointerSize * index->value()));
1794  __ jmp(&done, Label::kNear);
1795  }
1796  __ bind(&runtime);
1797  __ PrepareCallCFunction(2, scratch);
1798  __ mov(Operand(esp, 0), object);
1799  __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
1800  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1801  __ bind(&done);
1802  }
1803 }
1804 
1805 
1806 Operand LCodeGen::BuildSeqStringOperand(Register string,
1807  LOperand* index,
1808  String::Encoding encoding) {
1809  if (index->IsConstantOperand()) {
1810  int offset = ToRepresentation(LConstantOperand::cast(index),
1812  if (encoding == String::TWO_BYTE_ENCODING) {
1813  offset *= kUC16Size;
1814  }
1815  STATIC_ASSERT(kCharSize == 1);
1816  return FieldOperand(string, SeqString::kHeaderSize + offset);
1817  }
1818  return FieldOperand(
1819  string, ToRegister(index),
1820  encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1822 }
1823 
1824 
1825 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1826  String::Encoding encoding = instr->hydrogen()->encoding();
1827  Register result = ToRegister(instr->result());
1828  Register string = ToRegister(instr->string());
1829 
1830  if (FLAG_debug_code) {
1831  __ push(string);
1832  __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
1833  __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
1834 
1835  __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1836  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1837  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1838  __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1839  ? one_byte_seq_type : two_byte_seq_type));
1840  __ Check(equal, kUnexpectedStringType);
1841  __ pop(string);
1842  }
1843 
1844  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1845  if (encoding == String::ONE_BYTE_ENCODING) {
1846  __ movzx_b(result, operand);
1847  } else {
1848  __ movzx_w(result, operand);
1849  }
1850 }
1851 
1852 
1853 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1854  String::Encoding encoding = instr->hydrogen()->encoding();
1855  Register string = ToRegister(instr->string());
1856 
1857  if (FLAG_debug_code) {
1858  Register value = ToRegister(instr->value());
1859  Register index = ToRegister(instr->index());
1860  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1861  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1862  int encoding_mask =
1863  instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1864  ? one_byte_seq_type : two_byte_seq_type;
1865  __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1866  }
1867 
1868  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1869  if (instr->value()->IsConstantOperand()) {
1870  int value = ToRepresentation(LConstantOperand::cast(instr->value()),
1872  DCHECK_LE(0, value);
1873  if (encoding == String::ONE_BYTE_ENCODING) {
1875  __ mov_b(operand, static_cast<int8_t>(value));
1876  } else {
1878  __ mov_w(operand, static_cast<int16_t>(value));
1879  }
1880  } else {
1881  Register value = ToRegister(instr->value());
1882  if (encoding == String::ONE_BYTE_ENCODING) {
1883  __ mov_b(operand, value);
1884  } else {
1885  __ mov_w(operand, value);
1886  }
1887  }
1888 }
1889 
1890 
1891 void LCodeGen::DoAddI(LAddI* instr) {
1892  LOperand* left = instr->left();
1893  LOperand* right = instr->right();
1894 
1895  if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1896  if (right->IsConstantOperand()) {
1897  int32_t offset = ToRepresentation(LConstantOperand::cast(right),
1898  instr->hydrogen()->representation());
1899  __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
1900  } else {
1901  Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1902  __ lea(ToRegister(instr->result()), address);
1903  }
1904  } else {
1905  if (right->IsConstantOperand()) {
1906  __ add(ToOperand(left),
1907  ToImmediate(right, instr->hydrogen()->representation()));
1908  } else {
1909  __ add(ToRegister(left), ToOperand(right));
1910  }
1911  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1912  DeoptimizeIf(overflow, instr, "overflow");
1913  }
1914  }
1915 }
1916 
1917 
1918 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1919  LOperand* left = instr->left();
1920  LOperand* right = instr->right();
1921  DCHECK(left->Equals(instr->result()));
1922  HMathMinMax::Operation operation = instr->hydrogen()->operation();
1923  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1924  Label return_left;
1925  Condition condition = (operation == HMathMinMax::kMathMin)
1926  ? less_equal
1927  : greater_equal;
1928  if (right->IsConstantOperand()) {
1929  Operand left_op = ToOperand(left);
1930  Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
1931  instr->hydrogen()->representation());
1932  __ cmp(left_op, immediate);
1933  __ j(condition, &return_left, Label::kNear);
1934  __ mov(left_op, immediate);
1935  } else {
1936  Register left_reg = ToRegister(left);
1937  Operand right_op = ToOperand(right);
1938  __ cmp(left_reg, right_op);
1939  __ j(condition, &return_left, Label::kNear);
1940  __ mov(left_reg, right_op);
1941  }
1942  __ bind(&return_left);
1943  } else {
1944  DCHECK(instr->hydrogen()->representation().IsDouble());
1945  Label check_nan_left, check_zero, return_left, return_right;
1946  Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1947  XMMRegister left_reg = ToDoubleRegister(left);
1948  XMMRegister right_reg = ToDoubleRegister(right);
1949  __ ucomisd(left_reg, right_reg);
1950  __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
1951  __ j(equal, &check_zero, Label::kNear); // left == right.
1952  __ j(condition, &return_left, Label::kNear);
1953  __ jmp(&return_right, Label::kNear);
1954 
1955  __ bind(&check_zero);
1956  XMMRegister xmm_scratch = double_scratch0();
1957  __ xorps(xmm_scratch, xmm_scratch);
1958  __ ucomisd(left_reg, xmm_scratch);
1959  __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1960  // At this point, both left and right are either 0 or -0.
1961  if (operation == HMathMinMax::kMathMin) {
1962  __ orpd(left_reg, right_reg);
1963  } else {
1964  // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
1965  __ addsd(left_reg, right_reg);
1966  }
1967  __ jmp(&return_left, Label::kNear);
1968 
1969  __ bind(&check_nan_left);
1970  __ ucomisd(left_reg, left_reg); // NaN check.
1971  __ j(parity_even, &return_left, Label::kNear); // left == NaN.
1972  __ bind(&return_right);
1973  __ movaps(left_reg, right_reg);
1974 
1975  __ bind(&return_left);
1976  }
1977 }
1978 
1979 
1980 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1981  XMMRegister left = ToDoubleRegister(instr->left());
1982  XMMRegister right = ToDoubleRegister(instr->right());
1983  XMMRegister result = ToDoubleRegister(instr->result());
1984  switch (instr->op()) {
1985  case Token::ADD:
1986  __ addsd(left, right);
1987  break;
1988  case Token::SUB:
1989  __ subsd(left, right);
1990  break;
1991  case Token::MUL:
1992  __ mulsd(left, right);
1993  break;
1994  case Token::DIV:
1995  __ divsd(left, right);
1996  // Don't delete this mov. It may improve performance on some CPUs,
1997  // when there is a mulsd depending on the result
1998  __ movaps(left, left);
1999  break;
2000  case Token::MOD: {
2001  // Pass two doubles as arguments on the stack.
2002  __ PrepareCallCFunction(4, eax);
2003  __ movsd(Operand(esp, 0 * kDoubleSize), left);
2004  __ movsd(Operand(esp, 1 * kDoubleSize), right);
2005  __ CallCFunction(
2006  ExternalReference::mod_two_doubles_operation(isolate()),
2007  4);
2008 
2009  // Return value is in st(0) on ia32.
2010  // Store it into the result register.
2011  __ sub(Operand(esp), Immediate(kDoubleSize));
2012  __ fstp_d(Operand(esp, 0));
2013  __ movsd(result, Operand(esp, 0));
2014  __ add(Operand(esp), Immediate(kDoubleSize));
2015  break;
2016  }
2017  default:
2018  UNREACHABLE();
2019  break;
2020  }
2021 }
2022 
2023 
2024 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2025  DCHECK(ToRegister(instr->context()).is(esi));
2026  DCHECK(ToRegister(instr->left()).is(edx));
2027  DCHECK(ToRegister(instr->right()).is(eax));
2028  DCHECK(ToRegister(instr->result()).is(eax));
2029 
2030  Handle<Code> code =
2031  CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
2032  CallCode(code, RelocInfo::CODE_TARGET, instr);
2033 }
2034 
2035 
2036 template<class InstrType>
2037 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2038  int left_block = instr->TrueDestination(chunk_);
2039  int right_block = instr->FalseDestination(chunk_);
2040 
2041  int next_block = GetNextEmittedBlock();
2042 
2043  if (right_block == left_block || cc == no_condition) {
2044  EmitGoto(left_block);
2045  } else if (left_block == next_block) {
2046  __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2047  } else if (right_block == next_block) {
2048  __ j(cc, chunk_->GetAssemblyLabel(left_block));
2049  } else {
2050  __ j(cc, chunk_->GetAssemblyLabel(left_block));
2051  __ jmp(chunk_->GetAssemblyLabel(right_block));
2052  }
2053 }
2054 
2055 
2056 template<class InstrType>
2057 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2058  int false_block = instr->FalseDestination(chunk_);
2059  if (cc == no_condition) {
2060  __ jmp(chunk_->GetAssemblyLabel(false_block));
2061  } else {
2062  __ j(cc, chunk_->GetAssemblyLabel(false_block));
2063  }
2064 }
2065 
2066 
2067 void LCodeGen::DoBranch(LBranch* instr) {
2068  Representation r = instr->hydrogen()->value()->representation();
2069  if (r.IsSmiOrInteger32()) {
2070  Register reg = ToRegister(instr->value());
2071  __ test(reg, Operand(reg));
2072  EmitBranch(instr, not_zero);
2073  } else if (r.IsDouble()) {
2074  DCHECK(!info()->IsStub());
2075  XMMRegister reg = ToDoubleRegister(instr->value());
2076  XMMRegister xmm_scratch = double_scratch0();
2077  __ xorps(xmm_scratch, xmm_scratch);
2078  __ ucomisd(reg, xmm_scratch);
2079  EmitBranch(instr, not_equal);
2080  } else {
2081  DCHECK(r.IsTagged());
2082  Register reg = ToRegister(instr->value());
2083  HType type = instr->hydrogen()->value()->type();
2084  if (type.IsBoolean()) {
2085  DCHECK(!info()->IsStub());
2086  __ cmp(reg, factory()->true_value());
2087  EmitBranch(instr, equal);
2088  } else if (type.IsSmi()) {
2089  DCHECK(!info()->IsStub());
2090  __ test(reg, Operand(reg));
2091  EmitBranch(instr, not_equal);
2092  } else if (type.IsJSArray()) {
2093  DCHECK(!info()->IsStub());
2094  EmitBranch(instr, no_condition);
2095  } else if (type.IsHeapNumber()) {
2096  DCHECK(!info()->IsStub());
2097  XMMRegister xmm_scratch = double_scratch0();
2098  __ xorps(xmm_scratch, xmm_scratch);
2099  __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2100  EmitBranch(instr, not_equal);
2101  } else if (type.IsString()) {
2102  DCHECK(!info()->IsStub());
2103  __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2104  EmitBranch(instr, not_equal);
2105  } else {
2106  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2107  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2108 
2109  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2110  // undefined -> false.
2111  __ cmp(reg, factory()->undefined_value());
2112  __ j(equal, instr->FalseLabel(chunk_));
2113  }
2114  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2115  // true -> true.
2116  __ cmp(reg, factory()->true_value());
2117  __ j(equal, instr->TrueLabel(chunk_));
2118  // false -> false.
2119  __ cmp(reg, factory()->false_value());
2120  __ j(equal, instr->FalseLabel(chunk_));
2121  }
2122  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2123  // 'null' -> false.
2124  __ cmp(reg, factory()->null_value());
2125  __ j(equal, instr->FalseLabel(chunk_));
2126  }
2127 
2128  if (expected.Contains(ToBooleanStub::SMI)) {
2129  // Smis: 0 -> false, all other -> true.
2130  __ test(reg, Operand(reg));
2131  __ j(equal, instr->FalseLabel(chunk_));
2132  __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2133  } else if (expected.NeedsMap()) {
2134  // If we need a map later and have a Smi -> deopt.
2135  __ test(reg, Immediate(kSmiTagMask));
2136  DeoptimizeIf(zero, instr, "Smi");
2137  }
2138 
2139  Register map = no_reg; // Keep the compiler happy.
2140  if (expected.NeedsMap()) {
2141  map = ToRegister(instr->temp());
2142  DCHECK(!map.is(reg));
2144 
2145  if (expected.CanBeUndetectable()) {
2146  // Undetectable -> false.
2148  1 << Map::kIsUndetectable);
2149  __ j(not_zero, instr->FalseLabel(chunk_));
2150  }
2151  }
2152 
2153  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2154  // spec object -> true.
2155  __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2156  __ j(above_equal, instr->TrueLabel(chunk_));
2157  }
2158 
2159  if (expected.Contains(ToBooleanStub::STRING)) {
2160  // String value -> false iff empty.
2161  Label not_string;
2162  __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2163  __ j(above_equal, &not_string, Label::kNear);
2164  __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2165  __ j(not_zero, instr->TrueLabel(chunk_));
2166  __ jmp(instr->FalseLabel(chunk_));
2167  __ bind(&not_string);
2168  }
2169 
2170  if (expected.Contains(ToBooleanStub::SYMBOL)) {
2171  // Symbol value -> true.
2172  __ CmpInstanceType(map, SYMBOL_TYPE);
2173  __ j(equal, instr->TrueLabel(chunk_));
2174  }
2175 
2176  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2177  // heap number -> false iff +0, -0, or NaN.
2178  Label not_heap_number;
2180  factory()->heap_number_map());
2181  __ j(not_equal, &not_heap_number, Label::kNear);
2182  XMMRegister xmm_scratch = double_scratch0();
2183  __ xorps(xmm_scratch, xmm_scratch);
2184  __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2185  __ j(zero, instr->FalseLabel(chunk_));
2186  __ jmp(instr->TrueLabel(chunk_));
2187  __ bind(&not_heap_number);
2188  }
2189 
2190  if (!expected.IsGeneric()) {
2191  // We've seen something for the first time -> deopt.
2192  // This can only happen if we are not generic already.
2193  DeoptimizeIf(no_condition, instr, "unexpected object");
2194  }
2195  }
2196  }
2197 }
2198 
2199 
2200 void LCodeGen::EmitGoto(int block) {
2201  if (!IsNextEmittedBlock(block)) {
2202  __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2203  }
2204 }
2205 
2206 
2207 void LCodeGen::DoGoto(LGoto* instr) {
2208  EmitGoto(instr->block_id());
2209 }
2210 
2211 
2212 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2213  Condition cond = no_condition;
2214  switch (op) {
2215  case Token::EQ:
2216  case Token::EQ_STRICT:
2217  cond = equal;
2218  break;
2219  case Token::NE:
2220  case Token::NE_STRICT:
2221  cond = not_equal;
2222  break;
2223  case Token::LT:
2224  cond = is_unsigned ? below : less;
2225  break;
2226  case Token::GT:
2227  cond = is_unsigned ? above : greater;
2228  break;
2229  case Token::LTE:
2230  cond = is_unsigned ? below_equal : less_equal;
2231  break;
2232  case Token::GTE:
2233  cond = is_unsigned ? above_equal : greater_equal;
2234  break;
2235  case Token::IN:
2236  case Token::INSTANCEOF:
2237  default:
2238  UNREACHABLE();
2239  }
2240  return cond;
2241 }
2242 
2243 
2244 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2245  LOperand* left = instr->left();
2246  LOperand* right = instr->right();
2247  bool is_unsigned =
2248  instr->is_double() ||
2249  instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2250  instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2251  Condition cc = TokenToCondition(instr->op(), is_unsigned);
2252 
2253  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2254  // We can statically evaluate the comparison.
2255  double left_val = ToDouble(LConstantOperand::cast(left));
2256  double right_val = ToDouble(LConstantOperand::cast(right));
2257  int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2258  instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2259  EmitGoto(next_block);
2260  } else {
2261  if (instr->is_double()) {
2262  __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2263  // Don't base result on EFLAGS when a NaN is involved. Instead
2264  // jump to the false block.
2265  __ j(parity_even, instr->FalseLabel(chunk_));
2266  } else {
2267  if (right->IsConstantOperand()) {
2268  __ cmp(ToOperand(left),
2269  ToImmediate(right, instr->hydrogen()->representation()));
2270  } else if (left->IsConstantOperand()) {
2271  __ cmp(ToOperand(right),
2272  ToImmediate(left, instr->hydrogen()->representation()));
2273  // We commuted the operands, so commute the condition.
2274  cc = CommuteCondition(cc);
2275  } else {
2276  __ cmp(ToRegister(left), ToOperand(right));
2277  }
2278  }
2279  EmitBranch(instr, cc);
2280  }
2281 }
2282 
2283 
2284 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2285  Register left = ToRegister(instr->left());
2286 
2287  if (instr->right()->IsConstantOperand()) {
2288  Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2289  __ CmpObject(left, right);
2290  } else {
2291  Operand right = ToOperand(instr->right());
2292  __ cmp(left, right);
2293  }
2294  EmitBranch(instr, equal);
2295 }
2296 
2297 
2298 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2299  if (instr->hydrogen()->representation().IsTagged()) {
2300  Register input_reg = ToRegister(instr->object());
2301  __ cmp(input_reg, factory()->the_hole_value());
2302  EmitBranch(instr, equal);
2303  return;
2304  }
2305 
2306  XMMRegister input_reg = ToDoubleRegister(instr->object());
2307  __ ucomisd(input_reg, input_reg);
2308  EmitFalseBranch(instr, parity_odd);
2309 
2310  __ sub(esp, Immediate(kDoubleSize));
2311  __ movsd(MemOperand(esp, 0), input_reg);
2312 
2313  __ add(esp, Immediate(kDoubleSize));
2314  int offset = sizeof(kHoleNanUpper32);
2315  __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
2316  EmitBranch(instr, equal);
2317 }
2318 
2319 
2320 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2321  Representation rep = instr->hydrogen()->value()->representation();
2322  DCHECK(!rep.IsInteger32());
2323  Register scratch = ToRegister(instr->temp());
2324 
2325  if (rep.IsDouble()) {
2326  XMMRegister value = ToDoubleRegister(instr->value());
2327  XMMRegister xmm_scratch = double_scratch0();
2328  __ xorps(xmm_scratch, xmm_scratch);
2329  __ ucomisd(xmm_scratch, value);
2330  EmitFalseBranch(instr, not_equal);
2331  __ movmskpd(scratch, value);
2332  __ test(scratch, Immediate(1));
2333  EmitBranch(instr, not_zero);
2334  } else {
2335  Register value = ToRegister(instr->value());
2336  Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2337  __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2338  __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
2339  Immediate(0x1));
2340  EmitFalseBranch(instr, no_overflow);
2341  __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
2342  Immediate(0x00000000));
2343  EmitBranch(instr, equal);
2344  }
2345 }
2346 
2347 
2348 Condition LCodeGen::EmitIsObject(Register input,
2349  Register temp1,
2350  Label* is_not_object,
2351  Label* is_object) {
2352  __ JumpIfSmi(input, is_not_object);
2353 
2354  __ cmp(input, isolate()->factory()->null_value());
2355  __ j(equal, is_object);
2356 
2357  __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
2358  // Undetectable objects behave like undefined.
2359  __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
2360  1 << Map::kIsUndetectable);
2361  __ j(not_zero, is_not_object);
2362 
2363  __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
2365  __ j(below, is_not_object);
2367  return below_equal;
2368 }
2369 
2370 
2371 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2372  Register reg = ToRegister(instr->value());
2373  Register temp = ToRegister(instr->temp());
2374 
2375  Condition true_cond = EmitIsObject(
2376  reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2377 
2378  EmitBranch(instr, true_cond);
2379 }
2380 
2381 
2382 Condition LCodeGen::EmitIsString(Register input,
2383  Register temp1,
2384  Label* is_not_string,
2385  SmiCheck check_needed = INLINE_SMI_CHECK) {
2386  if (check_needed == INLINE_SMI_CHECK) {
2387  __ JumpIfSmi(input, is_not_string);
2388  }
2389 
2390  Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2391 
2392  return cond;
2393 }
2394 
2395 
2396 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2397  Register reg = ToRegister(instr->value());
2398  Register temp = ToRegister(instr->temp());
2399 
2400  SmiCheck check_needed =
2401  instr->hydrogen()->value()->type().IsHeapObject()
2403 
2404  Condition true_cond = EmitIsString(
2405  reg, temp, instr->FalseLabel(chunk_), check_needed);
2406 
2407  EmitBranch(instr, true_cond);
2408 }
2409 
2410 
2411 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2412  Operand input = ToOperand(instr->value());
2413 
2414  __ test(input, Immediate(kSmiTagMask));
2415  EmitBranch(instr, zero);
2416 }
2417 
2418 
2419 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2420  Register input = ToRegister(instr->value());
2421  Register temp = ToRegister(instr->temp());
2422 
2423  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2424  STATIC_ASSERT(kSmiTag == 0);
2425  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2426  }
2427  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2428  __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
2429  1 << Map::kIsUndetectable);
2430  EmitBranch(instr, not_zero);
2431 }
2432 
2433 
2435  switch (op) {
2436  case Token::EQ_STRICT:
2437  case Token::EQ:
2438  return equal;
2439  case Token::LT:
2440  return less;
2441  case Token::GT:
2442  return greater;
2443  case Token::LTE:
2444  return less_equal;
2445  case Token::GTE:
2446  return greater_equal;
2447  default:
2448  UNREACHABLE();
2449  return no_condition;
2450  }
2451 }
2452 
2453 
2454 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2455  Token::Value op = instr->op();
2456 
2457  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2458  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2459 
2460  Condition condition = ComputeCompareCondition(op);
2461  __ test(eax, Operand(eax));
2462 
2463  EmitBranch(instr, condition);
2464 }
2465 
2466 
2467 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2468  InstanceType from = instr->from();
2469  InstanceType to = instr->to();
2470  if (from == FIRST_TYPE) return to;
2471  DCHECK(from == to || to == LAST_TYPE);
2472  return from;
2473 }
2474 
2475 
2476 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2477  InstanceType from = instr->from();
2478  InstanceType to = instr->to();
2479  if (from == to) return equal;
2480  if (to == LAST_TYPE) return above_equal;
2481  if (from == FIRST_TYPE) return below_equal;
2482  UNREACHABLE();
2483  return equal;
2484 }
2485 
2486 
2487 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2488  Register input = ToRegister(instr->value());
2489  Register temp = ToRegister(instr->temp());
2490 
2491  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2492  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2493  }
2494 
2495  __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
2496  EmitBranch(instr, BranchCondition(instr->hydrogen()));
2497 }
2498 
2499 
2500 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2501  Register input = ToRegister(instr->value());
2502  Register result = ToRegister(instr->result());
2503 
2504  __ AssertString(input);
2505 
2506  __ mov(result, FieldOperand(input, String::kHashFieldOffset));
2507  __ IndexFromHash(result, result);
2508 }
2509 
2510 
2511 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2512  LHasCachedArrayIndexAndBranch* instr) {
2513  Register input = ToRegister(instr->value());
2514 
2517  EmitBranch(instr, equal);
2518 }
2519 
2520 
2521 // Branches to a label or falls through with the answer in the z flag. Trashes
2522 // the temp registers, but not the input.
2523 void LCodeGen::EmitClassOfTest(Label* is_true,
2524  Label* is_false,
2525  Handle<String>class_name,
2526  Register input,
2527  Register temp,
2528  Register temp2) {
2529  DCHECK(!input.is(temp));
2530  DCHECK(!input.is(temp2));
2531  DCHECK(!temp.is(temp2));
2532  __ JumpIfSmi(input, is_false);
2533 
2534  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2535  // Assuming the following assertions, we can use the same compares to test
2536  // for both being a function type and being in the object type range.
2541  LAST_SPEC_OBJECT_TYPE - 1);
2543  __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2544  __ j(below, is_false);
2545  __ j(equal, is_true);
2546  __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2547  __ j(equal, is_true);
2548  } else {
2549  // Faster code path to avoid two compares: subtract lower bound from the
2550  // actual type and do a signed compare with the width of the type range.
2551  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2552  __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2553  __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2554  __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2556  __ j(above, is_false);
2557  }
2558 
2559  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2560  // Check if the constructor in the map is a function.
2561  __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
2562  // Objects with a non-function constructor have class 'Object'.
2563  __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
2564  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2565  __ j(not_equal, is_true);
2566  } else {
2567  __ j(not_equal, is_false);
2568  }
2569 
2570  // temp now contains the constructor function. Grab the
2571  // instance class name from there.
2573  __ mov(temp, FieldOperand(temp,
2575  // The class name we are testing against is internalized since it's a literal.
2576  // The name in the constructor is internalized because of the way the context
2577  // is booted. This routine isn't expected to work for random API-created
2578  // classes and it doesn't have to because you can't access it with natives
2579  // syntax. Since both sides are internalized it is sufficient to use an
2580  // identity comparison.
2581  __ cmp(temp, class_name);
2582  // End with the answer in the z flag.
2583 }
2584 
2585 
2586 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2587  Register input = ToRegister(instr->value());
2588  Register temp = ToRegister(instr->temp());
2589  Register temp2 = ToRegister(instr->temp2());
2590 
2591  Handle<String> class_name = instr->hydrogen()->class_name();
2592 
2593  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2594  class_name, input, temp, temp2);
2595 
2596  EmitBranch(instr, equal);
2597 }
2598 
2599 
2600 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2601  Register reg = ToRegister(instr->value());
2602  __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2603  EmitBranch(instr, equal);
2604 }
2605 
2606 
2607 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2608  // Object and function are in fixed registers defined by the stub.
2609  DCHECK(ToRegister(instr->context()).is(esi));
2610  InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2611  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2612 
2613  Label true_value, done;
2614  __ test(eax, Operand(eax));
2615  __ j(zero, &true_value, Label::kNear);
2616  __ mov(ToRegister(instr->result()), factory()->false_value());
2617  __ jmp(&done, Label::kNear);
2618  __ bind(&true_value);
2619  __ mov(ToRegister(instr->result()), factory()->true_value());
2620  __ bind(&done);
2621 }
2622 
2623 
2624 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2625  class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
2626  public:
2627  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2628  LInstanceOfKnownGlobal* instr)
2629  : LDeferredCode(codegen), instr_(instr) { }
2630  virtual void Generate() OVERRIDE {
2631  codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2632  }
2633  virtual LInstruction* instr() OVERRIDE { return instr_; }
2634  Label* map_check() { return &map_check_; }
2635  private:
2636  LInstanceOfKnownGlobal* instr_;
2637  Label map_check_;
2638  };
2639 
2640  DeferredInstanceOfKnownGlobal* deferred;
2641  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2642 
2643  Label done, false_result;
2644  Register object = ToRegister(instr->value());
2645  Register temp = ToRegister(instr->temp());
2646 
2647  // A Smi is not an instance of anything.
2648  __ JumpIfSmi(object, &false_result, Label::kNear);
2649 
2650  // This is the inlined call site instanceof cache. The two occurences of the
2651  // hole value will be patched to the last map/result pair generated by the
2652  // instanceof stub.
2653  Label cache_miss;
2654  Register map = ToRegister(instr->temp());
2655  __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
2656  __ bind(deferred->map_check()); // Label for calculating code patching.
2657  Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2658  __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map.
2659  __ j(not_equal, &cache_miss, Label::kNear);
2660  __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
2661  __ jmp(&done, Label::kNear);
2662 
2663  // The inlined call site cache did not match. Check for null and string
2664  // before calling the deferred code.
2665  __ bind(&cache_miss);
2666  // Null is not an instance of anything.
2667  __ cmp(object, factory()->null_value());
2668  __ j(equal, &false_result, Label::kNear);
2669 
2670  // String values are not instances of anything.
2671  Condition is_string = masm_->IsObjectStringType(object, temp, temp);
2672  __ j(is_string, &false_result, Label::kNear);
2673 
2674  // Go to the deferred code.
2675  __ jmp(deferred->entry());
2676 
2677  __ bind(&false_result);
2678  __ mov(ToRegister(instr->result()), factory()->false_value());
2679 
2680  // Here result has either true or false. Deferred code also produces true or
2681  // false object.
2682  __ bind(deferred->exit());
2683  __ bind(&done);
2684 }
2685 
2686 
2687 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2688  Label* map_check) {
2689  PushSafepointRegistersScope scope(this);
2690 
2692  flags = static_cast<InstanceofStub::Flags>(
2694  flags = static_cast<InstanceofStub::Flags>(
2696  flags = static_cast<InstanceofStub::Flags>(
2698  InstanceofStub stub(isolate(), flags);
2699 
2700  // Get the temp register reserved by the instruction. This needs to be a
2701  // register which is pushed last by PushSafepointRegisters as top of the
2702  // stack is used to pass the offset to the location of the map check to
2703  // the stub.
2704  Register temp = ToRegister(instr->temp());
2706  __ LoadHeapObject(InstanceofStub::right(), instr->function());
2707  static const int kAdditionalDelta = 13;
2708  int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2709  __ mov(temp, Immediate(delta));
2710  __ StoreToSafepointRegisterSlot(temp, temp);
2711  CallCodeGeneric(stub.GetCode(),
2713  instr,
2715  // Get the deoptimization index of the LLazyBailout-environment that
2716  // corresponds to this instruction.
2717  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2718  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2719 
2720  // Put the result value into the eax slot and restore all registers.
2721  __ StoreToSafepointRegisterSlot(eax, eax);
2722 }
2723 
2724 
2725 void LCodeGen::DoCmpT(LCmpT* instr) {
2726  Token::Value op = instr->op();
2727 
2728  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2729  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2730 
2731  Condition condition = ComputeCompareCondition(op);
2732  Label true_value, done;
2733  __ test(eax, Operand(eax));
2734  __ j(condition, &true_value, Label::kNear);
2735  __ mov(ToRegister(instr->result()), factory()->false_value());
2736  __ jmp(&done, Label::kNear);
2737  __ bind(&true_value);
2738  __ mov(ToRegister(instr->result()), factory()->true_value());
2739  __ bind(&done);
2740 }
2741 
2742 
2743 void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
2744  int extra_value_count = dynamic_frame_alignment ? 2 : 1;
2745 
2746  if (instr->has_constant_parameter_count()) {
2747  int parameter_count = ToInteger32(instr->constant_parameter_count());
2748  if (dynamic_frame_alignment && FLAG_debug_code) {
2749  __ cmp(Operand(esp,
2750  (parameter_count + extra_value_count) * kPointerSize),
2751  Immediate(kAlignmentZapValue));
2752  __ Assert(equal, kExpectedAlignmentMarker);
2753  }
2754  __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
2755  } else {
2756  Register reg = ToRegister(instr->parameter_count());
2757  // The argument count parameter is a smi
2758  __ SmiUntag(reg);
2759  Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
2760  if (dynamic_frame_alignment && FLAG_debug_code) {
2761  DCHECK(extra_value_count == 2);
2762  __ cmp(Operand(esp, reg, times_pointer_size,
2763  extra_value_count * kPointerSize),
2764  Immediate(kAlignmentZapValue));
2765  __ Assert(equal, kExpectedAlignmentMarker);
2766  }
2767 
2768  // emit code to restore stack based on instr->parameter_count()
2769  __ pop(return_addr_reg); // save return address
2770  if (dynamic_frame_alignment) {
2771  __ inc(reg); // 1 more for alignment
2772  }
2773  __ shl(reg, kPointerSizeLog2);
2774  __ add(esp, reg);
2775  __ jmp(return_addr_reg);
2776  }
2777 }
2778 
2779 
2780 void LCodeGen::DoReturn(LReturn* instr) {
2781  if (FLAG_trace && info()->IsOptimizing()) {
2782  // Preserve the return value on the stack and rely on the runtime call
2783  // to return the value in the same register. We're leaving the code
2784  // managed by the register allocator and tearing down the frame, it's
2785  // safe to write to the context register.
2786  __ push(eax);
2788  __ CallRuntime(Runtime::kTraceExit, 1);
2789  }
2790  if (info()->saves_caller_doubles()) RestoreCallerDoubles();
2792  // Fetch the state of the dynamic frame alignment.
2793  __ mov(edx, Operand(ebp,
2795  }
2796  int no_frame_start = -1;
2797  if (NeedsEagerFrame()) {
2798  __ mov(esp, ebp);
2799  __ pop(ebp);
2800  no_frame_start = masm_->pc_offset();
2801  }
2803  Label no_padding;
2804  __ cmp(edx, Immediate(kNoAlignmentPadding));
2805  __ j(equal, &no_padding, Label::kNear);
2806 
2807  EmitReturn(instr, true);
2808  __ bind(&no_padding);
2809  }
2810 
2811  EmitReturn(instr, false);
2812  if (no_frame_start != -1) {
2813  info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2814  }
2815 }
2816 
2817 
2818 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2819  Register result = ToRegister(instr->result());
2820  __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
2821  if (instr->hydrogen()->RequiresHoleCheck()) {
2822  __ cmp(result, factory()->the_hole_value());
2823  DeoptimizeIf(equal, instr, "hole");
2824  }
2825 }
2826 
2827 
2828 template <class T>
2830  DCHECK(FLAG_vector_ics);
2831  Register vector = ToRegister(instr->temp_vector());
2833  __ mov(vector, instr->hydrogen()->feedback_vector());
2834  // No need to allocate this register.
2837  Immediate(Smi::FromInt(instr->hydrogen()->slot())));
2838 }
2839 
2840 
2841 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2842  DCHECK(ToRegister(instr->context()).is(esi));
2843  DCHECK(ToRegister(instr->global_object())
2845  DCHECK(ToRegister(instr->result()).is(eax));
2846 
2847  __ mov(LoadDescriptor::NameRegister(), instr->name());
2848  if (FLAG_vector_ics) {
2849  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2850  }
2851  ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2852  Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
2853  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2854 }
2855 
2856 
2857 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2858  Register value = ToRegister(instr->value());
2859  Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
2860 
2861  // If the cell we are storing to contains the hole it could have
2862  // been deleted from the property dictionary. In that case, we need
2863  // to update the property details in the property dictionary to mark
2864  // it as no longer deleted. We deoptimize in that case.
2865  if (instr->hydrogen()->RequiresHoleCheck()) {
2866  __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
2867  DeoptimizeIf(equal, instr, "hole");
2868  }
2869 
2870  // Store the value.
2871  __ mov(Operand::ForCell(cell_handle), value);
2872  // Cells are always rescanned, so no write barrier here.
2873 }
2874 
2875 
2876 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2877  Register context = ToRegister(instr->context());
2878  Register result = ToRegister(instr->result());
2879  __ mov(result, ContextOperand(context, instr->slot_index()));
2880 
2881  if (instr->hydrogen()->RequiresHoleCheck()) {
2882  __ cmp(result, factory()->the_hole_value());
2883  if (instr->hydrogen()->DeoptimizesOnHole()) {
2884  DeoptimizeIf(equal, instr, "hole");
2885  } else {
2886  Label is_not_hole;
2887  __ j(not_equal, &is_not_hole, Label::kNear);
2888  __ mov(result, factory()->undefined_value());
2889  __ bind(&is_not_hole);
2890  }
2891  }
2892 }
2893 
2894 
2895 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2896  Register context = ToRegister(instr->context());
2897  Register value = ToRegister(instr->value());
2898 
2899  Label skip_assignment;
2900 
2901  Operand target = ContextOperand(context, instr->slot_index());
2902  if (instr->hydrogen()->RequiresHoleCheck()) {
2903  __ cmp(target, factory()->the_hole_value());
2904  if (instr->hydrogen()->DeoptimizesOnHole()) {
2905  DeoptimizeIf(equal, instr, "hole");
2906  } else {
2907  __ j(not_equal, &skip_assignment, Label::kNear);
2908  }
2909  }
2910 
2911  __ mov(target, value);
2912  if (instr->hydrogen()->NeedsWriteBarrier()) {
2913  SmiCheck check_needed =
2914  instr->hydrogen()->value()->type().IsHeapObject()
2916  Register temp = ToRegister(instr->temp());
2917  int offset = Context::SlotOffset(instr->slot_index());
2918  __ RecordWriteContextSlot(context,
2919  offset,
2920  value,
2921  temp,
2922  kSaveFPRegs,
2924  check_needed);
2925  }
2926 
2927  __ bind(&skip_assignment);
2928 }
2929 
2930 
2931 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2932  HObjectAccess access = instr->hydrogen()->access();
2933  int offset = access.offset();
2934 
2935  if (access.IsExternalMemory()) {
2936  Register result = ToRegister(instr->result());
2937  MemOperand operand = instr->object()->IsConstantOperand()
2938  ? MemOperand::StaticVariable(ToExternalReference(
2939  LConstantOperand::cast(instr->object())))
2940  : MemOperand(ToRegister(instr->object()), offset);
2941  __ Load(result, operand, access.representation());
2942  return;
2943  }
2944 
2945  Register object = ToRegister(instr->object());
2946  if (instr->hydrogen()->representation().IsDouble()) {
2947  XMMRegister result = ToDoubleRegister(instr->result());
2948  __ movsd(result, FieldOperand(object, offset));
2949  return;
2950  }
2951 
2952  Register result = ToRegister(instr->result());
2953  if (!access.IsInobject()) {
2954  __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
2955  object = result;
2956  }
2957  __ Load(result, FieldOperand(object, offset), access.representation());
2958 }
2959 
2960 
2961 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
2962  DCHECK(!operand->IsDoubleRegister());
2963  if (operand->IsConstantOperand()) {
2964  Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
2966  if (object->IsSmi()) {
2967  __ Push(Handle<Smi>::cast(object));
2968  } else {
2969  __ PushHeapObject(Handle<HeapObject>::cast(object));
2970  }
2971  } else if (operand->IsRegister()) {
2972  __ push(ToRegister(operand));
2973  } else {
2974  __ push(ToOperand(operand));
2975  }
2976 }
2977 
2978 
2979 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2980  DCHECK(ToRegister(instr->context()).is(esi));
2982  DCHECK(ToRegister(instr->result()).is(eax));
2983 
2984  __ mov(LoadDescriptor::NameRegister(), instr->name());
2985  if (FLAG_vector_ics) {
2986  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
2987  }
2988  Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
2989  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2990 }
2991 
2992 
2993 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2994  Register function = ToRegister(instr->function());
2995  Register temp = ToRegister(instr->temp());
2996  Register result = ToRegister(instr->result());
2997 
2998  // Get the prototype or initial map from the function.
2999  __ mov(result,
3001 
3002  // Check that the function has a prototype or an initial map.
3003  __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
3004  DeoptimizeIf(equal, instr, "hole");
3005 
3006  // If the function does not have an initial map, we're done.
3007  Label done;
3008  __ CmpObjectType(result, MAP_TYPE, temp);
3009  __ j(not_equal, &done, Label::kNear);
3010 
3011  // Get the prototype from the initial map.
3012  __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
3013 
3014  // All done.
3015  __ bind(&done);
3016 }
3017 
3018 
3019 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3020  Register result = ToRegister(instr->result());
3021  __ LoadRoot(result, instr->index());
3022 }
3023 
3024 
3025 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3026  Register arguments = ToRegister(instr->arguments());
3027  Register result = ToRegister(instr->result());
3028  if (instr->length()->IsConstantOperand() &&
3029  instr->index()->IsConstantOperand()) {
3030  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3031  int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3032  int index = (const_length - const_index) + 1;
3033  __ mov(result, Operand(arguments, index * kPointerSize));
3034  } else {
3035  Register length = ToRegister(instr->length());
3036  Operand index = ToOperand(instr->index());
3037  // There are two words between the frame pointer and the last argument.
3038  // Subtracting from length accounts for one of them add one more.
3039  __ sub(length, index);
3040  __ mov(result, Operand(arguments, length, times_4, kPointerSize));
3041  }
3042 }
3043 
3044 
3045 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3046  ElementsKind elements_kind = instr->elements_kind();
3047  LOperand* key = instr->key();
3048  if (!key->IsConstantOperand() &&
3049  ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3050  elements_kind)) {
3051  __ SmiUntag(ToRegister(key));
3052  }
3053  Operand operand(BuildFastArrayOperand(
3054  instr->elements(),
3055  key,
3056  instr->hydrogen()->key()->representation(),
3057  elements_kind,
3058  instr->base_offset()));
3059  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3060  elements_kind == FLOAT32_ELEMENTS) {
3061  XMMRegister result(ToDoubleRegister(instr->result()));
3062  __ movss(result, operand);
3063  __ cvtss2sd(result, result);
3064  } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3065  elements_kind == FLOAT64_ELEMENTS) {
3066  __ movsd(ToDoubleRegister(instr->result()), operand);
3067  } else {
3068  Register result(ToRegister(instr->result()));
3069  switch (elements_kind) {
3071  case INT8_ELEMENTS:
3072  __ movsx_b(result, operand);
3073  break;
3076  case UINT8_ELEMENTS:
3078  __ movzx_b(result, operand);
3079  break;
3081  case INT16_ELEMENTS:
3082  __ movsx_w(result, operand);
3083  break;
3085  case UINT16_ELEMENTS:
3086  __ movzx_w(result, operand);
3087  break;
3089  case INT32_ELEMENTS:
3090  __ mov(result, operand);
3091  break;
3093  case UINT32_ELEMENTS:
3094  __ mov(result, operand);
3095  if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3096  __ test(result, Operand(result));
3097  DeoptimizeIf(negative, instr, "negative value");
3098  }
3099  break;
3102  case FLOAT32_ELEMENTS:
3103  case FLOAT64_ELEMENTS:
3104  case FAST_SMI_ELEMENTS:
3105  case FAST_ELEMENTS:
3106  case FAST_DOUBLE_ELEMENTS:
3108  case FAST_HOLEY_ELEMENTS:
3110  case DICTIONARY_ELEMENTS:
3112  UNREACHABLE();
3113  break;
3114  }
3115  }
3116 }
3117 
3118 
3119 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3120  if (instr->hydrogen()->RequiresHoleCheck()) {
3121  Operand hole_check_operand = BuildFastArrayOperand(
3122  instr->elements(), instr->key(),
3123  instr->hydrogen()->key()->representation(),
3125  instr->base_offset() + sizeof(kHoleNanLower32));
3126  __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
3127  DeoptimizeIf(equal, instr, "hole");
3128  }
3129 
3130  Operand double_load_operand = BuildFastArrayOperand(
3131  instr->elements(),
3132  instr->key(),
3133  instr->hydrogen()->key()->representation(),
3135  instr->base_offset());
3136  XMMRegister result = ToDoubleRegister(instr->result());
3137  __ movsd(result, double_load_operand);
3138 }
3139 
3140 
3141 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3142  Register result = ToRegister(instr->result());
3143 
3144  // Load the result.
3145  __ mov(result,
3146  BuildFastArrayOperand(instr->elements(), instr->key(),
3147  instr->hydrogen()->key()->representation(),
3148  FAST_ELEMENTS, instr->base_offset()));
3149 
3150  // Check for the hole value.
3151  if (instr->hydrogen()->RequiresHoleCheck()) {
3152  if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3153  __ test(result, Immediate(kSmiTagMask));
3154  DeoptimizeIf(not_equal, instr, "not a Smi");
3155  } else {
3156  __ cmp(result, factory()->the_hole_value());
3157  DeoptimizeIf(equal, instr, "hole");
3158  }
3159  }
3160 }
3161 
3162 
3163 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3164  if (instr->is_typed_elements()) {
3165  DoLoadKeyedExternalArray(instr);
3166  } else if (instr->hydrogen()->representation().IsDouble()) {
3168  } else {
3169  DoLoadKeyedFixedArray(instr);
3170  }
3171 }
3172 
3173 
3175  LOperand* elements_pointer,
3176  LOperand* key,
3177  Representation key_representation,
3178  ElementsKind elements_kind,
3179  uint32_t base_offset) {
3180  Register elements_pointer_reg = ToRegister(elements_pointer);
3181  int element_shift_size = ElementsKindToShiftSize(elements_kind);
3182  int shift_size = element_shift_size;
3183  if (key->IsConstantOperand()) {
3184  int constant_value = ToInteger32(LConstantOperand::cast(key));
3185  if (constant_value & 0xF0000000) {
3186  Abort(kArrayIndexConstantValueTooBig);
3187  }
3188  return Operand(elements_pointer_reg,
3189  ((constant_value) << shift_size)
3190  + base_offset);
3191  } else {
3192  // Take the tag bit into account while computing the shift size.
3193  if (key_representation.IsSmi() && (shift_size >= 1)) {
3194  shift_size -= kSmiTagSize;
3195  }
3196  ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3197  return Operand(elements_pointer_reg,
3198  ToRegister(key),
3199  scale_factor,
3200  base_offset);
3201  }
3202 }
3203 
3204 
3205 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3206  DCHECK(ToRegister(instr->context()).is(esi));
3209 
3210  if (FLAG_vector_ics) {
3211  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3212  }
3213 
3214  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
3215  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3216 }
3217 
3218 
3219 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3220  Register result = ToRegister(instr->result());
3221 
3222  if (instr->hydrogen()->from_inlined()) {
3223  __ lea(result, Operand(esp, -2 * kPointerSize));
3224  } else {
3225  // Check for arguments adapter frame.
3226  Label done, adapted;
3227  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3228  __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
3229  __ cmp(Operand(result),
3231  __ j(equal, &adapted, Label::kNear);
3232 
3233  // No arguments adaptor frame.
3234  __ mov(result, Operand(ebp));
3235  __ jmp(&done, Label::kNear);
3236 
3237  // Arguments adaptor frame present.
3238  __ bind(&adapted);
3239  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3240 
3241  // Result is the frame pointer for the frame if not adapted and for the real
3242  // frame below the adaptor frame if adapted.
3243  __ bind(&done);
3244  }
3245 }
3246 
3247 
3248 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3249  Operand elem = ToOperand(instr->elements());
3250  Register result = ToRegister(instr->result());
3251 
3252  Label done;
3253 
3254  // If no arguments adaptor frame the number of arguments is fixed.
3255  __ cmp(ebp, elem);
3256  __ mov(result, Immediate(scope()->num_parameters()));
3257  __ j(equal, &done, Label::kNear);
3258 
3259  // Arguments adaptor frame present. Get argument length from there.
3260  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3261  __ mov(result, Operand(result,
3263  __ SmiUntag(result);
3264 
3265  // Argument length is in result register.
3266  __ bind(&done);
3267 }
3268 
3269 
3270 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3271  Register receiver = ToRegister(instr->receiver());
3272  Register function = ToRegister(instr->function());
3273 
3274  // If the receiver is null or undefined, we have to pass the global
3275  // object as a receiver to normal functions. Values have to be
3276  // passed unchanged to builtins and strict-mode functions.
3277  Label receiver_ok, global_object;
3278  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3279  Register scratch = ToRegister(instr->temp());
3280 
3281  if (!instr->hydrogen()->known_function()) {
3282  // Do not transform the receiver to object for strict mode
3283  // functions.
3284  __ mov(scratch,
3286  __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
3288  __ j(not_equal, &receiver_ok, dist);
3289 
3290  // Do not transform the receiver to object for builtins.
3291  __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
3293  __ j(not_equal, &receiver_ok, dist);
3294  }
3295 
3296  // Normal function. Replace undefined or null with global receiver.
3297  __ cmp(receiver, factory()->null_value());
3298  __ j(equal, &global_object, Label::kNear);
3299  __ cmp(receiver, factory()->undefined_value());
3300  __ j(equal, &global_object, Label::kNear);
3301 
3302  // The receiver should be a JS object.
3303  __ test(receiver, Immediate(kSmiTagMask));
3304  DeoptimizeIf(equal, instr, "Smi");
3305  __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
3306  DeoptimizeIf(below, instr, "not a JavaScript object");
3307 
3308  __ jmp(&receiver_ok, Label::kNear);
3309  __ bind(&global_object);
3310  __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
3311  const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
3312  __ mov(receiver, Operand(receiver, global_offset));
3313  const int proxy_offset = GlobalObject::kGlobalProxyOffset;
3314  __ mov(receiver, FieldOperand(receiver, proxy_offset));
3315  __ bind(&receiver_ok);
3316 }
3317 
3318 
3319 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3320  Register receiver = ToRegister(instr->receiver());
3321  Register function = ToRegister(instr->function());
3322  Register length = ToRegister(instr->length());
3323  Register elements = ToRegister(instr->elements());
3324  DCHECK(receiver.is(eax)); // Used for parameter count.
3325  DCHECK(function.is(edi)); // Required by InvokeFunction.
3326  DCHECK(ToRegister(instr->result()).is(eax));
3327 
3328  // Copy the arguments to this function possibly from the
3329  // adaptor frame below it.
3330  const uint32_t kArgumentsLimit = 1 * KB;
3331  __ cmp(length, kArgumentsLimit);
3332  DeoptimizeIf(above, instr, "too many arguments");
3333 
3334  __ push(receiver);
3335  __ mov(receiver, length);
3336 
3337  // Loop through the arguments pushing them onto the execution
3338  // stack.
3339  Label invoke, loop;
3340  // length is a small non-negative integer, due to the test above.
3341  __ test(length, Operand(length));
3342  __ j(zero, &invoke, Label::kNear);
3343  __ bind(&loop);
3344  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
3345  __ dec(length);
3346  __ j(not_zero, &loop);
3347 
3348  // Invoke the function.
3349  __ bind(&invoke);
3350  DCHECK(instr->HasPointerMap());
3351  LPointerMap* pointers = instr->pointer_map();
3352  SafepointGenerator safepoint_generator(
3353  this, pointers, Safepoint::kLazyDeopt);
3354  ParameterCount actual(eax);
3355  __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3356 }
3357 
3358 
3359 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
3360  __ int3();
3361 }
3362 
3363 
3364 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3365  LOperand* argument = instr->value();
3366  EmitPushTaggedOperand(argument);
3367 }
3368 
3369 
3370 void LCodeGen::DoDrop(LDrop* instr) {
3371  __ Drop(instr->count());
3372 }
3373 
3374 
3375 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3376  Register result = ToRegister(instr->result());
3377  __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3378 }
3379 
3380 
3381 void LCodeGen::DoContext(LContext* instr) {
3382  Register result = ToRegister(instr->result());
3383  if (info()->IsOptimizing()) {
3384  __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
3385  } else {
3386  // If there is no frame, the context must be in esi.
3387  DCHECK(result.is(esi));
3388  }
3389 }
3390 
3391 
3392 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3393  DCHECK(ToRegister(instr->context()).is(esi));
3394  __ push(esi); // The context is the first argument.
3395  __ push(Immediate(instr->hydrogen()->pairs()));
3396  __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
3397  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3398 }
3399 
3400 
3401 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3402  int formal_parameter_count,
3403  int arity,
3404  LInstruction* instr,
3405  EDIState edi_state) {
3406  bool dont_adapt_arguments =
3407  formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3408  bool can_invoke_directly =
3409  dont_adapt_arguments || formal_parameter_count == arity;
3410 
3411  if (can_invoke_directly) {
3412  if (edi_state == EDI_UNINITIALIZED) {
3413  __ LoadHeapObject(edi, function);
3414  }
3415 
3416  // Change context.
3418 
3419  // Set eax to arguments count if adaption is not needed. Assumes that eax
3420  // is available to write to at this point.
3421  if (dont_adapt_arguments) {
3422  __ mov(eax, arity);
3423  }
3424 
3425  // Invoke function directly.
3426  if (function.is_identical_to(info()->closure())) {
3427  __ CallSelf();
3428  } else {
3430  }
3432  } else {
3433  // We need to adapt arguments.
3434  LPointerMap* pointers = instr->pointer_map();
3435  SafepointGenerator generator(
3436  this, pointers, Safepoint::kLazyDeopt);
3437  ParameterCount count(arity);
3438  ParameterCount expected(formal_parameter_count);
3439  __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3440  }
3441 }
3442 
3443 
3444 void LCodeGen::DoTailCallThroughMegamorphicCache(
3445  LTailCallThroughMegamorphicCache* instr) {
3446  Register receiver = ToRegister(instr->receiver());
3447  Register name = ToRegister(instr->name());
3448  DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
3450 
3451  Register scratch = ebx;
3452  Register extra = eax;
3453  DCHECK(!scratch.is(receiver) && !scratch.is(name));
3454  DCHECK(!extra.is(receiver) && !extra.is(name));
3455 
3456  // Important for the tail-call.
3457  bool must_teardown_frame = NeedsEagerFrame();
3458 
3459  // The probe will tail call to a handler if found.
3460  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
3461  must_teardown_frame, receiver, name,
3462  scratch, extra);
3463 
3464  // Tail call to miss if we ended up here.
3465  if (must_teardown_frame) __ leave();
3466  LoadIC::GenerateMiss(masm());
3467 }
3468 
3469 
3470 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3471  DCHECK(ToRegister(instr->result()).is(eax));
3472 
3473  LPointerMap* pointers = instr->pointer_map();
3474  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3475 
3476  if (instr->target()->IsConstantOperand()) {
3477  LConstantOperand* target = LConstantOperand::cast(instr->target());
3478  Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3479  generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3480  __ call(code, RelocInfo::CODE_TARGET);
3481  } else {
3482  DCHECK(instr->target()->IsRegister());
3483  Register target = ToRegister(instr->target());
3484  generator.BeforeCall(__ CallSize(Operand(target)));
3485  __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3486  __ call(target);
3487  }
3488  generator.AfterCall();
3489 }
3490 
3491 
3492 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3493  DCHECK(ToRegister(instr->function()).is(edi));
3494  DCHECK(ToRegister(instr->result()).is(eax));
3495 
3496  if (instr->hydrogen()->pass_argument_count()) {
3497  __ mov(eax, instr->arity());
3498  }
3499 
3500  // Change context.
3502 
3503  bool is_self_call = false;
3504  if (instr->hydrogen()->function()->IsConstant()) {
3505  HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3506  Handle<JSFunction> jsfun =
3507  Handle<JSFunction>::cast(fun_const->handle(isolate()));
3508  is_self_call = jsfun.is_identical_to(info()->closure());
3509  }
3510 
3511  if (is_self_call) {
3512  __ CallSelf();
3513  } else {
3515  }
3516 
3518 }
3519 
3520 
3521 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3522  Register input_reg = ToRegister(instr->value());
3523  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
3524  factory()->heap_number_map());
3525  DeoptimizeIf(not_equal, instr, "not a heap number");
3526 
3527  Label slow, allocated, done;
3528  Register tmp = input_reg.is(eax) ? ecx : eax;
3529  Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
3530 
3531  // Preserve the value of all registers.
3532  PushSafepointRegistersScope scope(this);
3533 
3534  __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3535  // Check the sign of the argument. If the argument is positive, just
3536  // return it. We do not need to patch the stack since |input| and
3537  // |result| are the same register and |input| will be restored
3538  // unchanged by popping safepoint registers.
3539  __ test(tmp, Immediate(HeapNumber::kSignMask));
3540  __ j(zero, &done, Label::kNear);
3541 
3542  __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
3543  __ jmp(&allocated, Label::kNear);
3544 
3545  // Slow case: Call the runtime system to do the number allocation.
3546  __ bind(&slow);
3547  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
3548  instr, instr->context());
3549  // Set the pointer to the new heap number in tmp.
3550  if (!tmp.is(eax)) __ mov(tmp, eax);
3551  // Restore input_reg after call to runtime.
3552  __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3553 
3554  __ bind(&allocated);
3555  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3556  __ and_(tmp2, ~HeapNumber::kSignMask);
3557  __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
3558  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
3559  __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
3560  __ StoreToSafepointRegisterSlot(input_reg, tmp);
3561 
3562  __ bind(&done);
3563 }
3564 
3565 
3566 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3567  Register input_reg = ToRegister(instr->value());
3568  __ test(input_reg, Operand(input_reg));
3569  Label is_positive;
3570  __ j(not_sign, &is_positive, Label::kNear);
3571  __ neg(input_reg); // Sets flags.
3572  DeoptimizeIf(negative, instr, "overflow");
3573  __ bind(&is_positive);
3574 }
3575 
3576 
3577 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3578  // Class for deferred case.
3579  class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
3580  public:
3581  DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3582  LMathAbs* instr)
3583  : LDeferredCode(codegen), instr_(instr) { }
3584  virtual void Generate() OVERRIDE {
3585  codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3586  }
3587  virtual LInstruction* instr() OVERRIDE { return instr_; }
3588  private:
3589  LMathAbs* instr_;
3590  };
3591 
3592  DCHECK(instr->value()->Equals(instr->result()));
3593  Representation r = instr->hydrogen()->value()->representation();
3594 
3595  if (r.IsDouble()) {
3596  XMMRegister scratch = double_scratch0();
3597  XMMRegister input_reg = ToDoubleRegister(instr->value());
3598  __ xorps(scratch, scratch);
3599  __ subsd(scratch, input_reg);
3600  __ andps(input_reg, scratch);
3601  } else if (r.IsSmiOrInteger32()) {
3602  EmitIntegerMathAbs(instr);
3603  } else { // Tagged case.
3604  DeferredMathAbsTaggedHeapNumber* deferred =
3605  new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3606  Register input_reg = ToRegister(instr->value());
3607  // Smi check.
3608  __ JumpIfNotSmi(input_reg, deferred->entry());
3609  EmitIntegerMathAbs(instr);
3610  __ bind(deferred->exit());
3611  }
3612 }
3613 
3614 
3615 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3616  XMMRegister xmm_scratch = double_scratch0();
3617  Register output_reg = ToRegister(instr->result());
3618  XMMRegister input_reg = ToDoubleRegister(instr->value());
3619 
3621  CpuFeatureScope scope(masm(), SSE4_1);
3622  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3623  // Deoptimize on negative zero.
3624  Label non_zero;
3625  __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3626  __ ucomisd(input_reg, xmm_scratch);
3627  __ j(not_equal, &non_zero, Label::kNear);
3628  __ movmskpd(output_reg, input_reg);
3629  __ test(output_reg, Immediate(1));
3630  DeoptimizeIf(not_zero, instr, "minus zero");
3631  __ bind(&non_zero);
3632  }
3633  __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
3634  __ cvttsd2si(output_reg, Operand(xmm_scratch));
3635  // Overflow is signalled with minint.
3636  __ cmp(output_reg, 0x1);
3637  DeoptimizeIf(overflow, instr, "overflow");
3638  } else {
3639  Label negative_sign, done;
3640  // Deoptimize on unordered.
3641  __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3642  __ ucomisd(input_reg, xmm_scratch);
3643  DeoptimizeIf(parity_even, instr, "NaN");
3644  __ j(below, &negative_sign, Label::kNear);
3645 
3646  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3647  // Check for negative zero.
3648  Label positive_sign;
3649  __ j(above, &positive_sign, Label::kNear);
3650  __ movmskpd(output_reg, input_reg);
3651  __ test(output_reg, Immediate(1));
3652  DeoptimizeIf(not_zero, instr, "minus zero");
3653  __ Move(output_reg, Immediate(0));
3654  __ jmp(&done, Label::kNear);
3655  __ bind(&positive_sign);
3656  }
3657 
3658  // Use truncating instruction (OK because input is positive).
3659  __ cvttsd2si(output_reg, Operand(input_reg));
3660  // Overflow is signalled with minint.
3661  __ cmp(output_reg, 0x1);
3662  DeoptimizeIf(overflow, instr, "overflow");
3663  __ jmp(&done, Label::kNear);
3664 
3665  // Non-zero negative reaches here.
3666  __ bind(&negative_sign);
3667  // Truncate, then compare and compensate.
3668  __ cvttsd2si(output_reg, Operand(input_reg));
3669  __ Cvtsi2sd(xmm_scratch, output_reg);
3670  __ ucomisd(input_reg, xmm_scratch);
3671  __ j(equal, &done, Label::kNear);
3672  __ sub(output_reg, Immediate(1));
3673  DeoptimizeIf(overflow, instr, "overflow");
3674 
3675  __ bind(&done);
3676  }
3677 }
3678 
3679 
3680 void LCodeGen::DoMathRound(LMathRound* instr) {
3681  Register output_reg = ToRegister(instr->result());
3682  XMMRegister input_reg = ToDoubleRegister(instr->value());
3683  XMMRegister xmm_scratch = double_scratch0();
3684  XMMRegister input_temp = ToDoubleRegister(instr->temp());
3685  ExternalReference one_half = ExternalReference::address_of_one_half();
3686  ExternalReference minus_one_half =
3687  ExternalReference::address_of_minus_one_half();
3688 
3689  Label done, round_to_zero, below_one_half, do_not_compensate;
3690  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3691 
3692  __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
3693  __ ucomisd(xmm_scratch, input_reg);
3694  __ j(above, &below_one_half, Label::kNear);
3695 
3696  // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
3697  __ addsd(xmm_scratch, input_reg);
3698  __ cvttsd2si(output_reg, Operand(xmm_scratch));
3699  // Overflow is signalled with minint.
3700  __ cmp(output_reg, 0x1);
3701  DeoptimizeIf(overflow, instr, "overflow");
3702  __ jmp(&done, dist);
3703 
3704  __ bind(&below_one_half);
3705  __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
3706  __ ucomisd(xmm_scratch, input_reg);
3707  __ j(below_equal, &round_to_zero, Label::kNear);
3708 
3709  // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3710  // compare and compensate.
3711  __ movaps(input_temp, input_reg); // Do not alter input_reg.
3712  __ subsd(input_temp, xmm_scratch);
3713  __ cvttsd2si(output_reg, Operand(input_temp));
3714  // Catch minint due to overflow, and to prevent overflow when compensating.
3715  __ cmp(output_reg, 0x1);
3716  DeoptimizeIf(overflow, instr, "overflow");
3717 
3718  __ Cvtsi2sd(xmm_scratch, output_reg);
3719  __ ucomisd(xmm_scratch, input_temp);
3720  __ j(equal, &done, dist);
3721  __ sub(output_reg, Immediate(1));
3722  // No overflow because we already ruled out minint.
3723  __ jmp(&done, dist);
3724 
3725  __ bind(&round_to_zero);
3726  // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3727  // we can ignore the difference between a result of -0 and +0.
3728  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3729  // If the sign is positive, we return +0.
3730  __ movmskpd(output_reg, input_reg);
3731  __ test(output_reg, Immediate(1));
3732  DeoptimizeIf(not_zero, instr, "minus zero");
3733  }
3734  __ Move(output_reg, Immediate(0));
3735  __ bind(&done);
3736 }
3737 
3738 
3739 void LCodeGen::DoMathFround(LMathFround* instr) {
3740  XMMRegister input_reg = ToDoubleRegister(instr->value());
3741  XMMRegister output_reg = ToDoubleRegister(instr->result());
3742  __ cvtsd2ss(output_reg, input_reg);
3743  __ cvtss2sd(output_reg, output_reg);
3744 }
3745 
3746 
3747 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3748  Operand input = ToOperand(instr->value());
3749  XMMRegister output = ToDoubleRegister(instr->result());
3750  __ sqrtsd(output, input);
3751 }
3752 
3753 
3754 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3755  XMMRegister xmm_scratch = double_scratch0();
3756  XMMRegister input_reg = ToDoubleRegister(instr->value());
3757  Register scratch = ToRegister(instr->temp());
3758  DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
3759 
3760  // Note that according to ECMA-262 15.8.2.13:
3761  // Math.pow(-Infinity, 0.5) == Infinity
3762  // Math.sqrt(-Infinity) == NaN
3763  Label done, sqrt;
3764  // Check base for -Infinity. According to IEEE-754, single-precision
3765  // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
3766  __ mov(scratch, 0xFF800000);
3767  __ movd(xmm_scratch, scratch);
3768  __ cvtss2sd(xmm_scratch, xmm_scratch);
3769  __ ucomisd(input_reg, xmm_scratch);
3770  // Comparing -Infinity with NaN results in "unordered", which sets the
3771  // zero flag as if both were equal. However, it also sets the carry flag.
3772  __ j(not_equal, &sqrt, Label::kNear);
3773  __ j(carry, &sqrt, Label::kNear);
3774  // If input is -Infinity, return Infinity.
3775  __ xorps(input_reg, input_reg);
3776  __ subsd(input_reg, xmm_scratch);
3777  __ jmp(&done, Label::kNear);
3778 
3779  // Square root.
3780  __ bind(&sqrt);
3781  __ xorps(xmm_scratch, xmm_scratch);
3782  __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3783  __ sqrtsd(input_reg, input_reg);
3784  __ bind(&done);
3785 }
3786 
3787 
3788 void LCodeGen::DoPower(LPower* instr) {
3789  Representation exponent_type = instr->hydrogen()->right()->representation();
3790  // Having marked this as a call, we can use any registers.
3791  // Just make sure that the input/output registers are the expected ones.
3792  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3793  DCHECK(!instr->right()->IsDoubleRegister() ||
3794  ToDoubleRegister(instr->right()).is(xmm1));
3795  DCHECK(!instr->right()->IsRegister() ||
3796  ToRegister(instr->right()).is(tagged_exponent));
3797  DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
3798  DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
3799 
3800  if (exponent_type.IsSmi()) {
3801  MathPowStub stub(isolate(), MathPowStub::TAGGED);
3802  __ CallStub(&stub);
3803  } else if (exponent_type.IsTagged()) {
3804  Label no_deopt;
3805  __ JumpIfSmi(tagged_exponent, &no_deopt);
3806  DCHECK(!ecx.is(tagged_exponent));
3807  __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, ecx);
3808  DeoptimizeIf(not_equal, instr, "not a heap number");
3809  __ bind(&no_deopt);
3810  MathPowStub stub(isolate(), MathPowStub::TAGGED);
3811  __ CallStub(&stub);
3812  } else if (exponent_type.IsInteger32()) {
3813  MathPowStub stub(isolate(), MathPowStub::INTEGER);
3814  __ CallStub(&stub);
3815  } else {
3816  DCHECK(exponent_type.IsDouble());
3817  MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3818  __ CallStub(&stub);
3819  }
3820 }
3821 
3822 
3823 void LCodeGen::DoMathLog(LMathLog* instr) {
3824  DCHECK(instr->value()->Equals(instr->result()));
3825  XMMRegister input_reg = ToDoubleRegister(instr->value());
3826  XMMRegister xmm_scratch = double_scratch0();
3827  Label positive, done, zero;
3828  __ xorps(xmm_scratch, xmm_scratch);
3829  __ ucomisd(input_reg, xmm_scratch);
3830  __ j(above, &positive, Label::kNear);
3831  __ j(not_carry, &zero, Label::kNear);
3832  ExternalReference nan =
3833  ExternalReference::address_of_canonical_non_hole_nan();
3834  __ movsd(input_reg, Operand::StaticVariable(nan));
3835  __ jmp(&done, Label::kNear);
3836  __ bind(&zero);
3837  ExternalReference ninf =
3838  ExternalReference::address_of_negative_infinity();
3839  __ movsd(input_reg, Operand::StaticVariable(ninf));
3840  __ jmp(&done, Label::kNear);
3841  __ bind(&positive);
3842  __ fldln2();
3843  __ sub(Operand(esp), Immediate(kDoubleSize));
3844  __ movsd(Operand(esp, 0), input_reg);
3845  __ fld_d(Operand(esp, 0));
3846  __ fyl2x();
3847  __ fstp_d(Operand(esp, 0));
3848  __ movsd(input_reg, Operand(esp, 0));
3849  __ add(Operand(esp), Immediate(kDoubleSize));
3850  __ bind(&done);
3851 }
3852 
3853 
3854 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3855  Register input = ToRegister(instr->value());
3856  Register result = ToRegister(instr->result());
3857  Label not_zero_input;
3858  __ bsr(result, input);
3859 
3860  __ j(not_zero, &not_zero_input);
3861  __ Move(result, Immediate(63)); // 63^31 == 32
3862 
3863  __ bind(&not_zero_input);
3864  __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
3865 }
3866 
3867 
3868 void LCodeGen::DoMathExp(LMathExp* instr) {
3869  XMMRegister input = ToDoubleRegister(instr->value());
3870  XMMRegister result = ToDoubleRegister(instr->result());
3871  XMMRegister temp0 = double_scratch0();
3872  Register temp1 = ToRegister(instr->temp1());
3873  Register temp2 = ToRegister(instr->temp2());
3874 
3875  MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
3876 }
3877 
3878 
3879 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3880  DCHECK(ToRegister(instr->context()).is(esi));
3881  DCHECK(ToRegister(instr->function()).is(edi));
3882  DCHECK(instr->HasPointerMap());
3883 
3884  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3885  if (known_function.is_null()) {
3886  LPointerMap* pointers = instr->pointer_map();
3887  SafepointGenerator generator(
3888  this, pointers, Safepoint::kLazyDeopt);
3889  ParameterCount count(instr->arity());
3890  __ InvokeFunction(edi, count, CALL_FUNCTION, generator);
3891  } else {
3892  CallKnownFunction(known_function,
3893  instr->hydrogen()->formal_parameter_count(),
3894  instr->arity(),
3895  instr,
3897  }
3898 }
3899 
3900 
3901 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3902  DCHECK(ToRegister(instr->context()).is(esi));
3903  DCHECK(ToRegister(instr->function()).is(edi));
3904  DCHECK(ToRegister(instr->result()).is(eax));
3905 
3906  int arity = instr->arity();
3907  CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
3908  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3909 }
3910 
3911 
3912 void LCodeGen::DoCallNew(LCallNew* instr) {
3913  DCHECK(ToRegister(instr->context()).is(esi));
3914  DCHECK(ToRegister(instr->constructor()).is(edi));
3915  DCHECK(ToRegister(instr->result()).is(eax));
3916 
3917  // No cell in ebx for construct type feedback in optimized code
3918  __ mov(ebx, isolate()->factory()->undefined_value());
3919  CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
3920  __ Move(eax, Immediate(instr->arity()));
3921  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3922 }
3923 
3924 
3925 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3926  DCHECK(ToRegister(instr->context()).is(esi));
3927  DCHECK(ToRegister(instr->constructor()).is(edi));
3928  DCHECK(ToRegister(instr->result()).is(eax));
3929 
3930  __ Move(eax, Immediate(instr->arity()));
3931  __ mov(ebx, isolate()->factory()->undefined_value());
3932  ElementsKind kind = instr->hydrogen()->elements_kind();
3933  AllocationSiteOverrideMode override_mode =
3936  : DONT_OVERRIDE;
3937 
3938  if (instr->arity() == 0) {
3939  ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3940  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3941  } else if (instr->arity() == 1) {
3942  Label done;
3943  if (IsFastPackedElementsKind(kind)) {
3944  Label packed_case;
3945  // We might need a change here
3946  // look at the first argument
3947  __ mov(ecx, Operand(esp, 0));
3948  __ test(ecx, ecx);
3949  __ j(zero, &packed_case, Label::kNear);
3950 
3951  ElementsKind holey_kind = GetHoleyElementsKind(kind);
3952  ArraySingleArgumentConstructorStub stub(isolate(),
3953  holey_kind,
3954  override_mode);
3955  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3956  __ jmp(&done, Label::kNear);
3957  __ bind(&packed_case);
3958  }
3959 
3960  ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3961  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3962  __ bind(&done);
3963  } else {
3964  ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
3965  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3966  }
3967 }
3968 
3969 
3970 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3971  DCHECK(ToRegister(instr->context()).is(esi));
3972  CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
3973 }
3974 
3975 
3976 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3977  Register function = ToRegister(instr->function());
3978  Register code_object = ToRegister(instr->code_object());
3979  __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
3980  __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
3981 }
3982 
3983 
3984 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3985  Register result = ToRegister(instr->result());
3986  Register base = ToRegister(instr->base_object());
3987  if (instr->offset()->IsConstantOperand()) {
3988  LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3989  __ lea(result, Operand(base, ToInteger32(offset)));
3990  } else {
3991  Register offset = ToRegister(instr->offset());
3992  __ lea(result, Operand(base, offset, times_1, 0));
3993  }
3994 }
3995 
3996 
3997 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3998  Representation representation = instr->hydrogen()->field_representation();
3999 
4000  HObjectAccess access = instr->hydrogen()->access();
4001  int offset = access.offset();
4002 
4003  if (access.IsExternalMemory()) {
4004  DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4005  MemOperand operand = instr->object()->IsConstantOperand()
4006  ? MemOperand::StaticVariable(
4007  ToExternalReference(LConstantOperand::cast(instr->object())))
4008  : MemOperand(ToRegister(instr->object()), offset);
4009  if (instr->value()->IsConstantOperand()) {
4010  LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4011  __ mov(operand, Immediate(ToInteger32(operand_value)));
4012  } else {
4013  Register value = ToRegister(instr->value());
4014  __ Store(value, operand, representation);
4015  }
4016  return;
4017  }
4018 
4019  Register object = ToRegister(instr->object());
4020  __ AssertNotSmi(object);
4021 
4022  DCHECK(!representation.IsSmi() ||
4023  !instr->value()->IsConstantOperand() ||
4024  IsSmi(LConstantOperand::cast(instr->value())));
4025  if (representation.IsDouble()) {
4026  DCHECK(access.IsInobject());
4027  DCHECK(!instr->hydrogen()->has_transition());
4028  DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4029  XMMRegister value = ToDoubleRegister(instr->value());
4030  __ movsd(FieldOperand(object, offset), value);
4031  return;
4032  }
4033 
4034  if (instr->hydrogen()->has_transition()) {
4035  Handle<Map> transition = instr->hydrogen()->transition_map();
4036  AddDeprecationDependency(transition);
4037  __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
4038  if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4039  Register temp = ToRegister(instr->temp());
4040  Register temp_map = ToRegister(instr->temp_map());
4041  // Update the write barrier for the map field.
4042  __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
4043  }
4044  }
4045 
4046  // Do the store.
4047  Register write_register = object;
4048  if (!access.IsInobject()) {
4049  write_register = ToRegister(instr->temp());
4050  __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4051  }
4052 
4053  MemOperand operand = FieldOperand(write_register, offset);
4054  if (instr->value()->IsConstantOperand()) {
4055  LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4056  if (operand_value->IsRegister()) {
4057  Register value = ToRegister(operand_value);
4058  __ Store(value, operand, representation);
4059  } else if (representation.IsInteger32()) {
4060  Immediate immediate = ToImmediate(operand_value, representation);
4061  DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4062  __ mov(operand, immediate);
4063  } else {
4064  Handle<Object> handle_value = ToHandle(operand_value);
4065  DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4066  __ mov(operand, handle_value);
4067  }
4068  } else {
4069  Register value = ToRegister(instr->value());
4070  __ Store(value, operand, representation);
4071  }
4072 
4073  if (instr->hydrogen()->NeedsWriteBarrier()) {
4074  Register value = ToRegister(instr->value());
4075  Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4076  // Update the write barrier for the object for in-object properties.
4077  __ RecordWriteField(write_register,
4078  offset,
4079  value,
4080  temp,
4081  kSaveFPRegs,
4083  instr->hydrogen()->SmiCheckForWriteBarrier(),
4084  instr->hydrogen()->PointersToHereCheckForValue());
4085  }
4086 }
4087 
4088 
4089 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4090  DCHECK(ToRegister(instr->context()).is(esi));
4092  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4093 
4094  __ mov(StoreDescriptor::NameRegister(), instr->name());
4095  Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4096  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4097 }
4098 
4099 
4100 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4101  Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
4102  if (instr->index()->IsConstantOperand()) {
4103  __ cmp(ToOperand(instr->length()),
4104  ToImmediate(LConstantOperand::cast(instr->index()),
4105  instr->hydrogen()->length()->representation()));
4106  cc = CommuteCondition(cc);
4107  } else if (instr->length()->IsConstantOperand()) {
4108  __ cmp(ToOperand(instr->index()),
4109  ToImmediate(LConstantOperand::cast(instr->length()),
4110  instr->hydrogen()->index()->representation()));
4111  } else {
4112  __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
4113  }
4114  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4115  Label done;
4116  __ j(NegateCondition(cc), &done, Label::kNear);
4117  __ int3();
4118  __ bind(&done);
4119  } else {
4120  DeoptimizeIf(cc, instr, "out of bounds");
4121  }
4122 }
4123 
4124 
4125 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4126  ElementsKind elements_kind = instr->elements_kind();
4127  LOperand* key = instr->key();
4128  if (!key->IsConstantOperand() &&
4129  ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
4130  elements_kind)) {
4131  __ SmiUntag(ToRegister(key));
4132  }
4133  Operand operand(BuildFastArrayOperand(
4134  instr->elements(),
4135  key,
4136  instr->hydrogen()->key()->representation(),
4137  elements_kind,
4138  instr->base_offset()));
4139  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4140  elements_kind == FLOAT32_ELEMENTS) {
4141  XMMRegister xmm_scratch = double_scratch0();
4142  __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
4143  __ movss(operand, xmm_scratch);
4144  } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4145  elements_kind == FLOAT64_ELEMENTS) {
4146  __ movsd(operand, ToDoubleRegister(instr->value()));
4147  } else {
4148  Register value = ToRegister(instr->value());
4149  switch (elements_kind) {
4153  case UINT8_ELEMENTS:
4154  case INT8_ELEMENTS:
4156  __ mov_b(operand, value);
4157  break;
4160  case UINT16_ELEMENTS:
4161  case INT16_ELEMENTS:
4162  __ mov_w(operand, value);
4163  break;
4166  case UINT32_ELEMENTS:
4167  case INT32_ELEMENTS:
4168  __ mov(operand, value);
4169  break;
4172  case FLOAT32_ELEMENTS:
4173  case FLOAT64_ELEMENTS:
4174  case FAST_SMI_ELEMENTS:
4175  case FAST_ELEMENTS:
4176  case FAST_DOUBLE_ELEMENTS:
4178  case FAST_HOLEY_ELEMENTS:
4180  case DICTIONARY_ELEMENTS:
4182  UNREACHABLE();
4183  break;
4184  }
4185  }
4186 }
4187 
4188 
4189 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4190  ExternalReference canonical_nan_reference =
4191  ExternalReference::address_of_canonical_non_hole_nan();
4192  Operand double_store_operand = BuildFastArrayOperand(
4193  instr->elements(),
4194  instr->key(),
4195  instr->hydrogen()->key()->representation(),
4197  instr->base_offset());
4198 
4199  XMMRegister value = ToDoubleRegister(instr->value());
4200 
4201  if (instr->NeedsCanonicalization()) {
4202  Label have_value;
4203 
4204  __ ucomisd(value, value);
4205  __ j(parity_odd, &have_value, Label::kNear); // NaN.
4206 
4207  __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
4208  __ bind(&have_value);
4209  }
4210 
4211  __ movsd(double_store_operand, value);
4212 }
4213 
4214 
4215 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4216  Register elements = ToRegister(instr->elements());
4217  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4218 
4219  Operand operand = BuildFastArrayOperand(
4220  instr->elements(),
4221  instr->key(),
4222  instr->hydrogen()->key()->representation(),
4223  FAST_ELEMENTS,
4224  instr->base_offset());
4225  if (instr->value()->IsRegister()) {
4226  __ mov(operand, ToRegister(instr->value()));
4227  } else {
4228  LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4229  if (IsSmi(operand_value)) {
4230  Immediate immediate = ToImmediate(operand_value, Representation::Smi());
4231  __ mov(operand, immediate);
4232  } else {
4233  DCHECK(!IsInteger32(operand_value));
4234  Handle<Object> handle_value = ToHandle(operand_value);
4235  __ mov(operand, handle_value);
4236  }
4237  }
4238 
4239  if (instr->hydrogen()->NeedsWriteBarrier()) {
4240  DCHECK(instr->value()->IsRegister());
4241  Register value = ToRegister(instr->value());
4242  DCHECK(!instr->key()->IsConstantOperand());
4243  SmiCheck check_needed =
4244  instr->hydrogen()->value()->type().IsHeapObject()
4246  // Compute address of modified element and store it into key register.
4247  __ lea(key, operand);
4248  __ RecordWrite(elements,
4249  key,
4250  value,
4251  kSaveFPRegs,
4253  check_needed,
4254  instr->hydrogen()->PointersToHereCheckForValue());
4255  }
4256 }
4257 
4258 
4259 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4260  // By cases...external, fast-double, fast
4261  if (instr->is_typed_elements()) {
4263  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4265  } else {
4266  DoStoreKeyedFixedArray(instr);
4267  }
4268 }
4269 
4270 
4271 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4272  DCHECK(ToRegister(instr->context()).is(esi));
4275  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4276 
4277  Handle<Code> ic =
4278  CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
4279  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4280 }
4281 
4282 
4283 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4284  Register object = ToRegister(instr->object());
4285  Register temp = ToRegister(instr->temp());
4286  Label no_memento_found;
4287  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4288  DeoptimizeIf(equal, instr, "memento found");
4289  __ bind(&no_memento_found);
4290 }
4291 
4292 
4293 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4294  Register object_reg = ToRegister(instr->object());
4295 
4296  Handle<Map> from_map = instr->original_map();
4297  Handle<Map> to_map = instr->transitioned_map();
4298  ElementsKind from_kind = instr->from_kind();
4299  ElementsKind to_kind = instr->to_kind();
4300 
4301  Label not_applicable;
4302  bool is_simple_map_transition =
4303  IsSimpleMapChangeTransition(from_kind, to_kind);
4304  Label::Distance branch_distance =
4305  is_simple_map_transition ? Label::kNear : Label::kFar;
4306  __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4307  __ j(not_equal, &not_applicable, branch_distance);
4308  if (is_simple_map_transition) {
4309  Register new_map_reg = ToRegister(instr->new_map_temp());
4310  __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
4311  Immediate(to_map));
4312  // Write barrier.
4313  DCHECK_NE(instr->temp(), NULL);
4314  __ RecordWriteForMap(object_reg, to_map, new_map_reg,
4315  ToRegister(instr->temp()),
4316  kDontSaveFPRegs);
4317  } else {
4318  DCHECK(ToRegister(instr->context()).is(esi));
4319  DCHECK(object_reg.is(eax));
4320  PushSafepointRegistersScope scope(this);
4321  __ mov(ebx, to_map);
4322  bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4323  TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4324  __ CallStub(&stub);
4327  }
4328  __ bind(&not_applicable);
4329 }
4330 
4331 
4332 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4333  class DeferredStringCharCodeAt FINAL : public LDeferredCode {
4334  public:
4335  DeferredStringCharCodeAt(LCodeGen* codegen,
4336  LStringCharCodeAt* instr)
4337  : LDeferredCode(codegen), instr_(instr) { }
4338  virtual void Generate() OVERRIDE {
4339  codegen()->DoDeferredStringCharCodeAt(instr_);
4340  }
4341  virtual LInstruction* instr() OVERRIDE { return instr_; }
4342  private:
4343  LStringCharCodeAt* instr_;
4344  };
4345 
4346  DeferredStringCharCodeAt* deferred =
4347  new(zone()) DeferredStringCharCodeAt(this, instr);
4348 
4350  factory(),
4351  ToRegister(instr->string()),
4352  ToRegister(instr->index()),
4353  ToRegister(instr->result()),
4354  deferred->entry());
4355  __ bind(deferred->exit());
4356 }
4357 
4358 
4359 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4360  Register string = ToRegister(instr->string());
4361  Register result = ToRegister(instr->result());
4362 
4363  // TODO(3095996): Get rid of this. For now, we need to make the
4364  // result register contain a valid pointer because it is already
4365  // contained in the register pointer map.
4366  __ Move(result, Immediate(0));
4367 
4368  PushSafepointRegistersScope scope(this);
4369  __ push(string);
4370  // Push the index as a smi. This is safe because of the checks in
4371  // DoStringCharCodeAt above.
4373  if (instr->index()->IsConstantOperand()) {
4374  Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
4376  __ push(immediate);
4377  } else {
4378  Register index = ToRegister(instr->index());
4379  __ SmiTag(index);
4380  __ push(index);
4381  }
4382  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2,
4383  instr, instr->context());
4384  __ AssertSmi(eax);
4385  __ SmiUntag(eax);
4386  __ StoreToSafepointRegisterSlot(result, eax);
4387 }
4388 
4389 
4390 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4391  class DeferredStringCharFromCode FINAL : public LDeferredCode {
4392  public:
4393  DeferredStringCharFromCode(LCodeGen* codegen,
4394  LStringCharFromCode* instr)
4395  : LDeferredCode(codegen), instr_(instr) { }
4396  virtual void Generate() OVERRIDE {
4397  codegen()->DoDeferredStringCharFromCode(instr_);
4398  }
4399  virtual LInstruction* instr() OVERRIDE { return instr_; }
4400  private:
4401  LStringCharFromCode* instr_;
4402  };
4403 
4404  DeferredStringCharFromCode* deferred =
4405  new(zone()) DeferredStringCharFromCode(this, instr);
4406 
4407  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4408  Register char_code = ToRegister(instr->char_code());
4409  Register result = ToRegister(instr->result());
4410  DCHECK(!char_code.is(result));
4411 
4412  __ cmp(char_code, String::kMaxOneByteCharCode);
4413  __ j(above, deferred->entry());
4414  __ Move(result, Immediate(factory()->single_character_string_cache()));
4415  __ mov(result, FieldOperand(result,
4416  char_code, times_pointer_size,
4418  __ cmp(result, factory()->undefined_value());
4419  __ j(equal, deferred->entry());
4420  __ bind(deferred->exit());
4421 }
4422 
4423 
4424 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4425  Register char_code = ToRegister(instr->char_code());
4426  Register result = ToRegister(instr->result());
4427 
4428  // TODO(3095996): Get rid of this. For now, we need to make the
4429  // result register contain a valid pointer because it is already
4430  // contained in the register pointer map.
4431  __ Move(result, Immediate(0));
4432 
4433  PushSafepointRegistersScope scope(this);
4434  __ SmiTag(char_code);
4435  __ push(char_code);
4436  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4437  __ StoreToSafepointRegisterSlot(result, eax);
4438 }
4439 
4440 
4441 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4442  DCHECK(ToRegister(instr->context()).is(esi));
4443  DCHECK(ToRegister(instr->left()).is(edx));
4444  DCHECK(ToRegister(instr->right()).is(eax));
4445  StringAddStub stub(isolate(),
4446  instr->hydrogen()->flags(),
4447  instr->hydrogen()->pretenure_flag());
4448  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4449 }
4450 
4451 
4452 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4453  LOperand* input = instr->value();
4454  LOperand* output = instr->result();
4455  DCHECK(input->IsRegister() || input->IsStackSlot());
4456  DCHECK(output->IsDoubleRegister());
4457  __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
4458 }
4459 
4460 
4461 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4462  LOperand* input = instr->value();
4463  LOperand* output = instr->result();
4464  __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
4465 }
4466 
4467 
4468 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4469  class DeferredNumberTagI FINAL : public LDeferredCode {
4470  public:
4471  DeferredNumberTagI(LCodeGen* codegen,
4472  LNumberTagI* instr)
4473  : LDeferredCode(codegen), instr_(instr) { }
4474  virtual void Generate() OVERRIDE {
4475  codegen()->DoDeferredNumberTagIU(
4476  instr_, instr_->value(), instr_->temp(), SIGNED_INT32);
4477  }
4478  virtual LInstruction* instr() OVERRIDE { return instr_; }
4479  private:
4480  LNumberTagI* instr_;
4481  };
4482 
4483  LOperand* input = instr->value();
4484  DCHECK(input->IsRegister() && input->Equals(instr->result()));
4485  Register reg = ToRegister(input);
4486 
4487  DeferredNumberTagI* deferred =
4488  new(zone()) DeferredNumberTagI(this, instr);
4489  __ SmiTag(reg);
4490  __ j(overflow, deferred->entry());
4491  __ bind(deferred->exit());
4492 }
4493 
4494 
4495 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4496  class DeferredNumberTagU FINAL : public LDeferredCode {
4497  public:
4498  DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4499  : LDeferredCode(codegen), instr_(instr) { }
4500  virtual void Generate() OVERRIDE {
4501  codegen()->DoDeferredNumberTagIU(
4502  instr_, instr_->value(), instr_->temp(), UNSIGNED_INT32);
4503  }
4504  virtual LInstruction* instr() OVERRIDE { return instr_; }
4505  private:
4506  LNumberTagU* instr_;
4507  };
4508 
4509  LOperand* input = instr->value();
4510  DCHECK(input->IsRegister() && input->Equals(instr->result()));
4511  Register reg = ToRegister(input);
4512 
4513  DeferredNumberTagU* deferred =
4514  new(zone()) DeferredNumberTagU(this, instr);
4515  __ cmp(reg, Immediate(Smi::kMaxValue));
4516  __ j(above, deferred->entry());
4517  __ SmiTag(reg);
4518  __ bind(deferred->exit());
4519 }
4520 
4521 
4522 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4523  LOperand* value,
4524  LOperand* temp,
4525  IntegerSignedness signedness) {
4526  Label done, slow;
4527  Register reg = ToRegister(value);
4528  Register tmp = ToRegister(temp);
4529  XMMRegister xmm_scratch = double_scratch0();
4530 
4531  if (signedness == SIGNED_INT32) {
4532  // There was overflow, so bits 30 and 31 of the original integer
4533  // disagree. Try to allocate a heap number in new space and store
4534  // the value in there. If that fails, call the runtime system.
4535  __ SmiUntag(reg);
4536  __ xor_(reg, 0x80000000);
4537  __ Cvtsi2sd(xmm_scratch, Operand(reg));
4538  } else {
4539  __ LoadUint32(xmm_scratch, reg);
4540  }
4541 
4542  if (FLAG_inline_new) {
4543  __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
4544  __ jmp(&done, Label::kNear);
4545  }
4546 
4547  // Slow case: Call the runtime system to do the number allocation.
4548  __ bind(&slow);
4549  {
4550  // TODO(3095996): Put a valid pointer value in the stack slot where the
4551  // result register is stored, as this register is in the pointer map, but
4552  // contains an integer value.
4553  __ Move(reg, Immediate(0));
4554 
4555  // Preserve the value of all registers.
4556  PushSafepointRegistersScope scope(this);
4557 
4558  // NumberTagI and NumberTagD use the context from the frame, rather than
4559  // the environment's HContext or HInlinedContext value.
4560  // They only call Runtime::kAllocateHeapNumber.
4561  // The corresponding HChange instructions are added in a phase that does
4562  // not have easy access to the local context.
4564  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4566  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4567  __ StoreToSafepointRegisterSlot(reg, eax);
4568  }
4569 
4570  // Done. Put the value in xmm_scratch into the value of the allocated heap
4571  // number.
4572  __ bind(&done);
4573  __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
4574 }
4575 
4576 
4577 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4578  class DeferredNumberTagD FINAL : public LDeferredCode {
4579  public:
4580  DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4581  : LDeferredCode(codegen), instr_(instr) { }
4582  virtual void Generate() OVERRIDE {
4583  codegen()->DoDeferredNumberTagD(instr_);
4584  }
4585  virtual LInstruction* instr() OVERRIDE { return instr_; }
4586  private:
4587  LNumberTagD* instr_;
4588  };
4589 
4590  Register reg = ToRegister(instr->result());
4591 
4592  DeferredNumberTagD* deferred =
4593  new(zone()) DeferredNumberTagD(this, instr);
4594  if (FLAG_inline_new) {
4595  Register tmp = ToRegister(instr->temp());
4596  __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
4597  } else {
4598  __ jmp(deferred->entry());
4599  }
4600  __ bind(deferred->exit());
4601  XMMRegister input_reg = ToDoubleRegister(instr->value());
4602  __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4603 }
4604 
4605 
4606 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4607  // TODO(3095996): Get rid of this. For now, we need to make the
4608  // result register contain a valid pointer because it is already
4609  // contained in the register pointer map.
4610  Register reg = ToRegister(instr->result());
4611  __ Move(reg, Immediate(0));
4612 
4613  PushSafepointRegistersScope scope(this);
4614  // NumberTagI and NumberTagD use the context from the frame, rather than
4615  // the environment's HContext or HInlinedContext value.
4616  // They only call Runtime::kAllocateHeapNumber.
4617  // The corresponding HChange instructions are added in a phase that does
4618  // not have easy access to the local context.
4620  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4622  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4623  __ StoreToSafepointRegisterSlot(reg, eax);
4624 }
4625 
4626 
4627 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4628  HChange* hchange = instr->hydrogen();
4629  Register input = ToRegister(instr->value());
4630  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4631  hchange->value()->CheckFlag(HValue::kUint32)) {
4632  __ test(input, Immediate(0xc0000000));
4633  DeoptimizeIf(not_zero, instr, "overflow");
4634  }
4635  __ SmiTag(input);
4636  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4637  !hchange->value()->CheckFlag(HValue::kUint32)) {
4638  DeoptimizeIf(overflow, instr, "overflow");
4639  }
4640 }
4641 
4642 
4643 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4644  LOperand* input = instr->value();
4645  Register result = ToRegister(input);
4646  DCHECK(input->IsRegister() && input->Equals(instr->result()));
4647  if (instr->needs_check()) {
4648  __ test(result, Immediate(kSmiTagMask));
4649  DeoptimizeIf(not_zero, instr, "not a Smi");
4650  } else {
4651  __ AssertSmi(result);
4652  }
4653  __ SmiUntag(result);
4654 }
4655 
4656 
4657 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4658  Register temp_reg, XMMRegister result_reg,
4660  bool can_convert_undefined_to_nan =
4661  instr->hydrogen()->can_convert_undefined_to_nan();
4662  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4663 
4664  Label convert, load_smi, done;
4665 
4667  // Smi check.
4668  __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4669 
4670  // Heap number map check.
4671  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4672  factory()->heap_number_map());
4673  if (can_convert_undefined_to_nan) {
4674  __ j(not_equal, &convert, Label::kNear);
4675  } else {
4676  DeoptimizeIf(not_equal, instr, "not a heap number");
4677  }
4678 
4679  // Heap number to XMM conversion.
4680  __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4681 
4682  if (deoptimize_on_minus_zero) {
4683  XMMRegister xmm_scratch = double_scratch0();
4684  __ xorps(xmm_scratch, xmm_scratch);
4685  __ ucomisd(result_reg, xmm_scratch);
4686  __ j(not_zero, &done, Label::kNear);
4687  __ movmskpd(temp_reg, result_reg);
4688  __ test_b(temp_reg, 1);
4689  DeoptimizeIf(not_zero, instr, "minus zero");
4690  }
4691  __ jmp(&done, Label::kNear);
4692 
4693  if (can_convert_undefined_to_nan) {
4694  __ bind(&convert);
4695 
4696  // Convert undefined (and hole) to NaN.
4697  __ cmp(input_reg, factory()->undefined_value());
4698  DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
4699 
4700  ExternalReference nan =
4701  ExternalReference::address_of_canonical_non_hole_nan();
4702  __ movsd(result_reg, Operand::StaticVariable(nan));
4703  __ jmp(&done, Label::kNear);
4704  }
4705  } else {
4707  }
4708 
4709  __ bind(&load_smi);
4710  // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the
4711  // input register since we avoid dependencies.
4712  __ mov(temp_reg, input_reg);
4713  __ SmiUntag(temp_reg); // Untag smi before converting to float.
4714  __ Cvtsi2sd(result_reg, Operand(temp_reg));
4715  __ bind(&done);
4716 }
4717 
4718 
4719 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
4720  Register input_reg = ToRegister(instr->value());
4721 
4722  // The input was optimistically untagged; revert it.
4723  STATIC_ASSERT(kSmiTagSize == 1);
4724  __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
4725 
4726  if (instr->truncating()) {
4727  Label no_heap_number, check_bools, check_false;
4728 
4729  // Heap number map check.
4730  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4731  factory()->heap_number_map());
4732  __ j(not_equal, &no_heap_number, Label::kNear);
4733  __ TruncateHeapNumberToI(input_reg, input_reg);
4734  __ jmp(done);
4735 
4736  __ bind(&no_heap_number);
4737  // Check for Oddballs. Undefined/False is converted to zero and True to one
4738  // for truncating conversions.
4739  __ cmp(input_reg, factory()->undefined_value());
4740  __ j(not_equal, &check_bools, Label::kNear);
4741  __ Move(input_reg, Immediate(0));
4742  __ jmp(done);
4743 
4744  __ bind(&check_bools);
4745  __ cmp(input_reg, factory()->true_value());
4746  __ j(not_equal, &check_false, Label::kNear);
4747  __ Move(input_reg, Immediate(1));
4748  __ jmp(done);
4749 
4750  __ bind(&check_false);
4751  __ cmp(input_reg, factory()->false_value());
4752  DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
4753  __ Move(input_reg, Immediate(0));
4754  } else {
4755  XMMRegister scratch = ToDoubleRegister(instr->temp());
4756  DCHECK(!scratch.is(xmm0));
4757  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4758  isolate()->factory()->heap_number_map());
4759  DeoptimizeIf(not_equal, instr, "not a heap number");
4760  __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4761  __ cvttsd2si(input_reg, Operand(xmm0));
4762  __ Cvtsi2sd(scratch, Operand(input_reg));
4763  __ ucomisd(xmm0, scratch);
4764  DeoptimizeIf(not_equal, instr, "lost precision");
4765  DeoptimizeIf(parity_even, instr, "NaN");
4766  if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
4767  __ test(input_reg, Operand(input_reg));
4768  __ j(not_zero, done);
4769  __ movmskpd(input_reg, xmm0);
4770  __ and_(input_reg, 1);
4771  DeoptimizeIf(not_zero, instr, "minus zero");
4772  }
4773  }
4774 }
4775 
4776 
4777 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4778  class DeferredTaggedToI FINAL : public LDeferredCode {
4779  public:
4780  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4781  : LDeferredCode(codegen), instr_(instr) { }
4782  virtual void Generate() OVERRIDE {
4783  codegen()->DoDeferredTaggedToI(instr_, done());
4784  }
4785  virtual LInstruction* instr() OVERRIDE { return instr_; }
4786  private:
4787  LTaggedToI* instr_;
4788  };
4789 
4790  LOperand* input = instr->value();
4791  DCHECK(input->IsRegister());
4792  Register input_reg = ToRegister(input);
4793  DCHECK(input_reg.is(ToRegister(instr->result())));
4794 
4795  if (instr->hydrogen()->value()->representation().IsSmi()) {
4796  __ SmiUntag(input_reg);
4797  } else {
4798  DeferredTaggedToI* deferred =
4799  new(zone()) DeferredTaggedToI(this, instr);
4800  // Optimistically untag the input.
4801  // If the input is a HeapObject, SmiUntag will set the carry flag.
4802  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
4803  __ SmiUntag(input_reg);
4804  // Branch to deferred code if the input was tagged.
4805  // The deferred code will take care of restoring the tag.
4806  __ j(carry, deferred->entry());
4807  __ bind(deferred->exit());
4808  }
4809 }
4810 
4811 
4812 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4813  LOperand* input = instr->value();
4814  DCHECK(input->IsRegister());
4815  LOperand* temp = instr->temp();
4816  DCHECK(temp->IsRegister());
4817  LOperand* result = instr->result();
4818  DCHECK(result->IsDoubleRegister());
4819 
4820  Register input_reg = ToRegister(input);
4821  Register temp_reg = ToRegister(temp);
4822 
4823  HValue* value = instr->hydrogen()->value();
4824  NumberUntagDMode mode = value->representation().IsSmi()
4826 
4827  XMMRegister result_reg = ToDoubleRegister(result);
4828  EmitNumberUntagD(instr, input_reg, temp_reg, result_reg, mode);
4829 }
4830 
4831 
4832 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4833  LOperand* input = instr->value();
4834  DCHECK(input->IsDoubleRegister());
4835  LOperand* result = instr->result();
4836  DCHECK(result->IsRegister());
4837  Register result_reg = ToRegister(result);
4838 
4839  if (instr->truncating()) {
4840  XMMRegister input_reg = ToDoubleRegister(input);
4841  __ TruncateDoubleToI(result_reg, input_reg);
4842  } else {
4843  Label lost_precision, is_nan, minus_zero, done;
4844  XMMRegister input_reg = ToDoubleRegister(input);
4845  XMMRegister xmm_scratch = double_scratch0();
4846  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4847  __ DoubleToI(result_reg, input_reg, xmm_scratch,
4848  instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
4849  &is_nan, &minus_zero, dist);
4850  __ jmp(&done, dist);
4851  __ bind(&lost_precision);
4852  DeoptimizeIf(no_condition, instr, "lost precision");
4853  __ bind(&is_nan);
4854  DeoptimizeIf(no_condition, instr, "NaN");
4855  __ bind(&minus_zero);
4856  DeoptimizeIf(no_condition, instr, "minus zero");
4857  __ bind(&done);
4858  }
4859 }
4860 
4861 
4862 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4863  LOperand* input = instr->value();
4864  DCHECK(input->IsDoubleRegister());
4865  LOperand* result = instr->result();
4866  DCHECK(result->IsRegister());
4867  Register result_reg = ToRegister(result);
4868 
4869  Label lost_precision, is_nan, minus_zero, done;
4870  XMMRegister input_reg = ToDoubleRegister(input);
4871  XMMRegister xmm_scratch = double_scratch0();
4872  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4873  __ DoubleToI(result_reg, input_reg, xmm_scratch,
4874  instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
4875  &minus_zero, dist);
4876  __ jmp(&done, dist);
4877  __ bind(&lost_precision);
4878  DeoptimizeIf(no_condition, instr, "lost precision");
4879  __ bind(&is_nan);
4880  DeoptimizeIf(no_condition, instr, "NaN");
4881  __ bind(&minus_zero);
4882  DeoptimizeIf(no_condition, instr, "minus zero");
4883  __ bind(&done);
4884  __ SmiTag(result_reg);
4885  DeoptimizeIf(overflow, instr, "overflow");
4886 }
4887 
4888 
4889 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4890  LOperand* input = instr->value();
4891  __ test(ToOperand(input), Immediate(kSmiTagMask));
4892  DeoptimizeIf(not_zero, instr, "not a Smi");
4893 }
4894 
4895 
4896 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4897  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4898  LOperand* input = instr->value();
4899  __ test(ToOperand(input), Immediate(kSmiTagMask));
4900  DeoptimizeIf(zero, instr, "Smi");
4901  }
4902 }
4903 
4904 
4905 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4906  Register input = ToRegister(instr->value());
4907  Register temp = ToRegister(instr->temp());
4908 
4909  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
4910 
4911  if (instr->hydrogen()->is_interval_check()) {
4912  InstanceType first;
4913  InstanceType last;
4914  instr->hydrogen()->GetCheckInterval(&first, &last);
4915 
4917  static_cast<int8_t>(first));
4918 
4919  // If there is only one type in the interval check for equality.
4920  if (first == last) {
4921  DeoptimizeIf(not_equal, instr, "wrong instance type");
4922  } else {
4923  DeoptimizeIf(below, instr, "wrong instance type");
4924  // Omit check for the last type.
4925  if (last != LAST_TYPE) {
4927  static_cast<int8_t>(last));
4928  DeoptimizeIf(above, instr, "wrong instance type");
4929  }
4930  }
4931  } else {
4932  uint8_t mask;
4933  uint8_t tag;
4934  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4935 
4936  if (base::bits::IsPowerOfTwo32(mask)) {
4937  DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
4938  __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
4939  DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
4940  } else {
4941  __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
4942  __ and_(temp, mask);
4943  __ cmp(temp, tag);
4944  DeoptimizeIf(not_equal, instr, "wrong instance type");
4945  }
4946  }
4947 }
4948 
4949 
4950 void LCodeGen::DoCheckValue(LCheckValue* instr) {
4951  Handle<HeapObject> object = instr->hydrogen()->object().handle();
4952  if (instr->hydrogen()->object_in_new_space()) {
4953  Register reg = ToRegister(instr->value());
4954  Handle<Cell> cell = isolate()->factory()->NewCell(object);
4955  __ cmp(reg, Operand::ForCell(cell));
4956  } else {
4957  Operand operand = ToOperand(instr->value());
4958  __ cmp(operand, object);
4959  }
4960  DeoptimizeIf(not_equal, instr, "value mismatch");
4961 }
4962 
4963 
4964 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
4965  {
4966  PushSafepointRegistersScope scope(this);
4967  __ push(object);
4968  __ xor_(esi, esi);
4969  __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
4971  instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
4972 
4973  __ test(eax, Immediate(kSmiTagMask));
4974  }
4975  DeoptimizeIf(zero, instr, "instance migration failed");
4976 }
4977 
4978 
4979 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4980  class DeferredCheckMaps FINAL : public LDeferredCode {
4981  public:
4982  DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
4983  : LDeferredCode(codegen), instr_(instr), object_(object) {
4984  SetExit(check_maps());
4985  }
4986  virtual void Generate() OVERRIDE {
4987  codegen()->DoDeferredInstanceMigration(instr_, object_);
4988  }
4989  Label* check_maps() { return &check_maps_; }
4990  virtual LInstruction* instr() OVERRIDE { return instr_; }
4991  private:
4992  LCheckMaps* instr_;
4993  Label check_maps_;
4994  Register object_;
4995  };
4996 
4997  if (instr->hydrogen()->IsStabilityCheck()) {
4998  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4999  for (int i = 0; i < maps->size(); ++i) {
5000  AddStabilityDependency(maps->at(i).handle());
5001  }
5002  return;
5003  }
5004 
5005  LOperand* input = instr->value();
5006  DCHECK(input->IsRegister());
5007  Register reg = ToRegister(input);
5008 
5009  DeferredCheckMaps* deferred = NULL;
5010  if (instr->hydrogen()->HasMigrationTarget()) {
5011  deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5012  __ bind(deferred->check_maps());
5013  }
5014 
5015  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5016  Label success;
5017  for (int i = 0; i < maps->size() - 1; i++) {
5018  Handle<Map> map = maps->at(i).handle();
5019  __ CompareMap(reg, map);
5020  __ j(equal, &success, Label::kNear);
5021  }
5022 
5023  Handle<Map> map = maps->at(maps->size() - 1).handle();
5024  __ CompareMap(reg, map);
5025  if (instr->hydrogen()->HasMigrationTarget()) {
5026  __ j(not_equal, deferred->entry());
5027  } else {
5028  DeoptimizeIf(not_equal, instr, "wrong map");
5029  }
5030 
5031  __ bind(&success);
5032 }
5033 
5034 
5035 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5036  XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5037  XMMRegister xmm_scratch = double_scratch0();
5038  Register result_reg = ToRegister(instr->result());
5039  __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5040 }
5041 
5042 
5043 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5044  DCHECK(instr->unclamped()->Equals(instr->result()));
5045  Register value_reg = ToRegister(instr->result());
5046  __ ClampUint8(value_reg);
5047 }
5048 
5049 
5050 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5051  DCHECK(instr->unclamped()->Equals(instr->result()));
5052  Register input_reg = ToRegister(instr->unclamped());
5053  XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5054  XMMRegister xmm_scratch = double_scratch0();
5055  Label is_smi, done, heap_number;
5056 
5057  __ JumpIfSmi(input_reg, &is_smi);
5058 
5059  // Check for heap number
5060  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5061  factory()->heap_number_map());
5062  __ j(equal, &heap_number, Label::kNear);
5063 
5064  // Check for undefined. Undefined is converted to zero for clamping
5065  // conversions.
5066  __ cmp(input_reg, factory()->undefined_value());
5067  DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
5068  __ mov(input_reg, 0);
5069  __ jmp(&done, Label::kNear);
5070 
5071  // Heap number
5072  __ bind(&heap_number);
5073  __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
5074  __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5075  __ jmp(&done, Label::kNear);
5076 
5077  // smi
5078  __ bind(&is_smi);
5079  __ SmiUntag(input_reg);
5080  __ ClampUint8(input_reg);
5081  __ bind(&done);
5082 }
5083 
5084 
5085 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5086  XMMRegister value_reg = ToDoubleRegister(instr->value());
5087  Register result_reg = ToRegister(instr->result());
5088  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5090  CpuFeatureScope scope2(masm(), SSE4_1);
5091  __ pextrd(result_reg, value_reg, 1);
5092  } else {
5093  XMMRegister xmm_scratch = double_scratch0();
5094  __ pshufd(xmm_scratch, value_reg, 1);
5095  __ movd(result_reg, xmm_scratch);
5096  }
5097  } else {
5098  __ movd(result_reg, value_reg);
5099  }
5100 }
5101 
5102 
5103 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5104  Register hi_reg = ToRegister(instr->hi());
5105  Register lo_reg = ToRegister(instr->lo());
5106  XMMRegister result_reg = ToDoubleRegister(instr->result());
5107 
5109  CpuFeatureScope scope2(masm(), SSE4_1);
5110  __ movd(result_reg, lo_reg);
5111  __ pinsrd(result_reg, hi_reg, 1);
5112  } else {
5113  XMMRegister xmm_scratch = double_scratch0();
5114  __ movd(result_reg, hi_reg);
5115  __ psllq(result_reg, 32);
5116  __ movd(xmm_scratch, lo_reg);
5117  __ orps(result_reg, xmm_scratch);
5118  }
5119 }
5120 
5121 
5122 void LCodeGen::DoAllocate(LAllocate* instr) {
5123  class DeferredAllocate FINAL : public LDeferredCode {
5124  public:
5125  DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5126  : LDeferredCode(codegen), instr_(instr) { }
5127  virtual void Generate() OVERRIDE {
5128  codegen()->DoDeferredAllocate(instr_);
5129  }
5130  virtual LInstruction* instr() OVERRIDE { return instr_; }
5131  private:
5132  LAllocate* instr_;
5133  };
5134 
5135  DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
5136 
5137  Register result = ToRegister(instr->result());
5138  Register temp = ToRegister(instr->temp());
5139 
5140  // Allocate memory for the object.
5142  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5143  flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5144  }
5145  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5146  DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5147  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5149  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5150  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5152  }
5153 
5154  if (instr->size()->IsConstantOperand()) {
5155  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5157  __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5158  } else {
5159  __ jmp(deferred->entry());
5160  }
5161  } else {
5162  Register size = ToRegister(instr->size());
5163  __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5164  }
5165 
5166  __ bind(deferred->exit());
5167 
5168  if (instr->hydrogen()->MustPrefillWithFiller()) {
5169  if (instr->size()->IsConstantOperand()) {
5170  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5171  __ mov(temp, (size / kPointerSize) - 1);
5172  } else {
5173  temp = ToRegister(instr->size());
5174  __ shr(temp, kPointerSizeLog2);
5175  __ dec(temp);
5176  }
5177  Label loop;
5178  __ bind(&loop);
5179  __ mov(FieldOperand(result, temp, times_pointer_size, 0),
5180  isolate()->factory()->one_pointer_filler_map());
5181  __ dec(temp);
5182  __ j(not_zero, &loop);
5183  }
5184 }
5185 
5186 
5187 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5188  Register result = ToRegister(instr->result());
5189 
5190  // TODO(3095996): Get rid of this. For now, we need to make the
5191  // result register contain a valid pointer because it is already
5192  // contained in the register pointer map.
5193  __ Move(result, Immediate(Smi::FromInt(0)));
5194 
5195  PushSafepointRegistersScope scope(this);
5196  if (instr->size()->IsRegister()) {
5197  Register size = ToRegister(instr->size());
5198  DCHECK(!size.is(result));
5199  __ SmiTag(ToRegister(instr->size()));
5200  __ push(size);
5201  } else {
5202  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5203  if (size >= 0 && size <= Smi::kMaxValue) {
5204  __ push(Immediate(Smi::FromInt(size)));
5205  } else {
5206  // We should never get here at runtime => abort
5207  __ int3();
5208  return;
5209  }
5210  }
5211 
5213  instr->hydrogen()->MustAllocateDoubleAligned());
5214  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5215  DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5216  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5218  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5219  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5221  } else {
5223  }
5224  __ push(Immediate(Smi::FromInt(flags)));
5225 
5227  Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5228  __ StoreToSafepointRegisterSlot(result, eax);
5229 }
5230 
5231 
5232 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5233  DCHECK(ToRegister(instr->value()).is(eax));
5234  __ push(eax);
5235  CallRuntime(Runtime::kToFastProperties, 1, instr);
5236 }
5237 
5238 
5239 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5240  DCHECK(ToRegister(instr->context()).is(esi));
5241  Label materialized;
5242  // Registers will be used as follows:
5243  // ecx = literals array.
5244  // ebx = regexp literal.
5245  // eax = regexp literal clone.
5246  // esi = context.
5247  int literal_offset =
5248  FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5249  __ LoadHeapObject(ecx, instr->hydrogen()->literals());
5250  __ mov(ebx, FieldOperand(ecx, literal_offset));
5251  __ cmp(ebx, factory()->undefined_value());
5252  __ j(not_equal, &materialized, Label::kNear);
5253 
5254  // Create regexp literal using runtime function
5255  // Result will be in eax.
5256  __ push(ecx);
5257  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
5258  __ push(Immediate(instr->hydrogen()->pattern()));
5259  __ push(Immediate(instr->hydrogen()->flags()));
5260  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5261  __ mov(ebx, eax);
5262 
5263  __ bind(&materialized);
5265  Label allocated, runtime_allocate;
5266  __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
5267  __ jmp(&allocated, Label::kNear);
5268 
5269  __ bind(&runtime_allocate);
5270  __ push(ebx);
5271  __ push(Immediate(Smi::FromInt(size)));
5272  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5273  __ pop(ebx);
5274 
5275  __ bind(&allocated);
5276  // Copy the content into the newly allocated memory.
5277  // (Unroll copy loop once for better throughput).
5278  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5279  __ mov(edx, FieldOperand(ebx, i));
5280  __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
5281  __ mov(FieldOperand(eax, i), edx);
5282  __ mov(FieldOperand(eax, i + kPointerSize), ecx);
5283  }
5284  if ((size % (2 * kPointerSize)) != 0) {
5285  __ mov(edx, FieldOperand(ebx, size - kPointerSize));
5286  __ mov(FieldOperand(eax, size - kPointerSize), edx);
5287  }
5288 }
5289 
5290 
5291 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5292  DCHECK(ToRegister(instr->context()).is(esi));
5293  // Use the fast case closure allocation code that allocates in new
5294  // space for nested functions that don't need literals cloning.
5295  bool pretenure = instr->hydrogen()->pretenure();
5296  if (!pretenure && instr->hydrogen()->has_no_literals()) {
5297  FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
5298  instr->hydrogen()->kind());
5299  __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
5300  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5301  } else {
5302  __ push(esi);
5303  __ push(Immediate(instr->hydrogen()->shared_info()));
5304  __ push(Immediate(pretenure ? factory()->true_value()
5305  : factory()->false_value()));
5306  CallRuntime(Runtime::kNewClosure, 3, instr);
5307  }
5308 }
5309 
5310 
5311 void LCodeGen::DoTypeof(LTypeof* instr) {
5312  DCHECK(ToRegister(instr->context()).is(esi));
5313  LOperand* input = instr->value();
5314  EmitPushTaggedOperand(input);
5315  CallRuntime(Runtime::kTypeof, 1, instr);
5316 }
5317 
5318 
5319 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5320  Register input = ToRegister(instr->value());
5321  Condition final_branch_condition = EmitTypeofIs(instr, input);
5322  if (final_branch_condition != no_condition) {
5323  EmitBranch(instr, final_branch_condition);
5324  }
5325 }
5326 
5327 
5328 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5329  Label* true_label = instr->TrueLabel(chunk_);
5330  Label* false_label = instr->FalseLabel(chunk_);
5331  Handle<String> type_name = instr->type_literal();
5332  int left_block = instr->TrueDestination(chunk_);
5333  int right_block = instr->FalseDestination(chunk_);
5334  int next_block = GetNextEmittedBlock();
5335 
5336  Label::Distance true_distance = left_block == next_block ? Label::kNear
5337  : Label::kFar;
5338  Label::Distance false_distance = right_block == next_block ? Label::kNear
5339  : Label::kFar;
5340  Condition final_branch_condition = no_condition;
5341  if (String::Equals(type_name, factory()->number_string())) {
5342  __ JumpIfSmi(input, true_label, true_distance);
5344  factory()->heap_number_map());
5345  final_branch_condition = equal;
5346 
5347  } else if (String::Equals(type_name, factory()->string_string())) {
5348  __ JumpIfSmi(input, false_label, false_distance);
5349  __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5350  __ j(above_equal, false_label, false_distance);
5351  __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5352  1 << Map::kIsUndetectable);
5353  final_branch_condition = zero;
5354 
5355  } else if (String::Equals(type_name, factory()->symbol_string())) {
5356  __ JumpIfSmi(input, false_label, false_distance);
5357  __ CmpObjectType(input, SYMBOL_TYPE, input);
5358  final_branch_condition = equal;
5359 
5360  } else if (String::Equals(type_name, factory()->boolean_string())) {
5361  __ cmp(input, factory()->true_value());
5362  __ j(equal, true_label, true_distance);
5363  __ cmp(input, factory()->false_value());
5364  final_branch_condition = equal;
5365 
5366  } else if (String::Equals(type_name, factory()->undefined_string())) {
5367  __ cmp(input, factory()->undefined_value());
5368  __ j(equal, true_label, true_distance);
5369  __ JumpIfSmi(input, false_label, false_distance);
5370  // Check for undetectable objects => true.
5371  __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
5372  __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5373  1 << Map::kIsUndetectable);
5374  final_branch_condition = not_zero;
5375 
5376  } else if (String::Equals(type_name, factory()->function_string())) {
5378  __ JumpIfSmi(input, false_label, false_distance);
5379  __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
5380  __ j(equal, true_label, true_distance);
5381  __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
5382  final_branch_condition = equal;
5383 
5384  } else if (String::Equals(type_name, factory()->object_string())) {
5385  __ JumpIfSmi(input, false_label, false_distance);
5386  __ cmp(input, factory()->null_value());
5387  __ j(equal, true_label, true_distance);
5388  __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
5389  __ j(below, false_label, false_distance);
5390  __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5391  __ j(above, false_label, false_distance);
5392  // Check for undetectable objects => false.
5393  __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5394  1 << Map::kIsUndetectable);
5395  final_branch_condition = zero;
5396 
5397  } else {
5398  __ jmp(false_label, false_distance);
5399  }
5400  return final_branch_condition;
5401 }
5402 
5403 
5404 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5405  Register temp = ToRegister(instr->temp());
5406 
5407  EmitIsConstructCall(temp);
5408  EmitBranch(instr, equal);
5409 }
5410 
5411 
5412 void LCodeGen::EmitIsConstructCall(Register temp) {
5413  // Get the frame pointer for the calling frame.
5414  __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
5415 
5416  // Skip the arguments adaptor frame if it exists.
5417  Label check_frame_marker;
5418  __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
5420  __ j(not_equal, &check_frame_marker, Label::kNear);
5421  __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
5422 
5423  // Check the marker in the calling frame.
5424  __ bind(&check_frame_marker);
5425  __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
5426  Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
5427 }
5428 
5429 
5430 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5431  if (!info()->IsStub()) {
5432  // Ensure that we have enough space after the previous lazy-bailout
5433  // instruction for patching the code here.
5434  int current_pc = masm()->pc_offset();
5435  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5436  int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5437  __ Nop(padding_size);
5438  }
5439  }
5440  last_lazy_deopt_pc_ = masm()->pc_offset();
5441 }
5442 
5443 
5444 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5445  last_lazy_deopt_pc_ = masm()->pc_offset();
5446  DCHECK(instr->HasEnvironment());
5447  LEnvironment* env = instr->environment();
5448  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5449  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5450 }
5451 
5452 
5453 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5454  Deoptimizer::BailoutType type = instr->hydrogen()->type();
5455  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5456  // needed return address), even though the implementation of LAZY and EAGER is
5457  // now identical. When LAZY is eventually completely folded into EAGER, remove
5458  // the special case below.
5459  if (info()->IsStub() && type == Deoptimizer::EAGER) {
5460  type = Deoptimizer::LAZY;
5461  }
5462  DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
5463 }
5464 
5465 
5466 void LCodeGen::DoDummy(LDummy* instr) {
5467  // Nothing to see here, move on!
5468 }
5469 
5470 
5471 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5472  // Nothing to see here, move on!
5473 }
5474 
5475 
5476 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5477  PushSafepointRegistersScope scope(this);
5479  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5482  DCHECK(instr->HasEnvironment());
5483  LEnvironment* env = instr->environment();
5484  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5485 }
5486 
5487 
5488 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5489  class DeferredStackCheck FINAL : public LDeferredCode {
5490  public:
5491  DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5492  : LDeferredCode(codegen), instr_(instr) { }
5493  virtual void Generate() OVERRIDE {
5494  codegen()->DoDeferredStackCheck(instr_);
5495  }
5496  virtual LInstruction* instr() OVERRIDE { return instr_; }
5497  private:
5498  LStackCheck* instr_;
5499  };
5500 
5501  DCHECK(instr->HasEnvironment());
5502  LEnvironment* env = instr->environment();
5503  // There is no LLazyBailout instruction for stack-checks. We have to
5504  // prepare for lazy deoptimization explicitly here.
5505  if (instr->hydrogen()->is_function_entry()) {
5506  // Perform stack overflow check.
5507  Label done;
5508  ExternalReference stack_limit =
5509  ExternalReference::address_of_stack_limit(isolate());
5510  __ cmp(esp, Operand::StaticVariable(stack_limit));
5511  __ j(above_equal, &done, Label::kNear);
5512 
5513  DCHECK(instr->context()->IsRegister());
5514  DCHECK(ToRegister(instr->context()).is(esi));
5515  CallCode(isolate()->builtins()->StackCheck(),
5517  instr);
5518  __ bind(&done);
5519  } else {
5520  DCHECK(instr->hydrogen()->is_backwards_branch());
5521  // Perform stack overflow check if this goto needs it before jumping.
5522  DeferredStackCheck* deferred_stack_check =
5523  new(zone()) DeferredStackCheck(this, instr);
5524  ExternalReference stack_limit =
5525  ExternalReference::address_of_stack_limit(isolate());
5526  __ cmp(esp, Operand::StaticVariable(stack_limit));
5527  __ j(below, deferred_stack_check->entry());
5529  __ bind(instr->done_label());
5530  deferred_stack_check->SetExit(instr->done_label());
5531  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5532  // Don't record a deoptimization index for the safepoint here.
5533  // This will be done explicitly when emitting call and the safepoint in
5534  // the deferred code.
5535  }
5536 }
5537 
5538 
5539 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5540  // This is a pseudo-instruction that ensures that the environment here is
5541  // properly registered for deoptimization and records the assembler's PC
5542  // offset.
5543  LEnvironment* environment = instr->environment();
5544 
5545  // If the environment were already registered, we would have no way of
5546  // backpatching it with the spill slot operands.
5547  DCHECK(!environment->HasBeenRegistered());
5548  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5549 
5551 }
5552 
5553 
5554 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5555  DCHECK(ToRegister(instr->context()).is(esi));
5556  __ cmp(eax, isolate()->factory()->undefined_value());
5557  DeoptimizeIf(equal, instr, "undefined");
5558 
5559  __ cmp(eax, isolate()->factory()->null_value());
5560  DeoptimizeIf(equal, instr, "null");
5561 
5562  __ test(eax, Immediate(kSmiTagMask));
5563  DeoptimizeIf(zero, instr, "Smi");
5564 
5566  __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
5567  DeoptimizeIf(below_equal, instr, "wrong instance type");
5568 
5569  Label use_cache, call_runtime;
5570  __ CheckEnumCache(&call_runtime);
5571 
5573  __ jmp(&use_cache, Label::kNear);
5574 
5575  // Get the set of properties to enumerate.
5576  __ bind(&call_runtime);
5577  __ push(eax);
5578  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5579 
5581  isolate()->factory()->meta_map());
5582  DeoptimizeIf(not_equal, instr, "wrong map");
5583  __ bind(&use_cache);
5584 }
5585 
5586 
5587 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5588  Register map = ToRegister(instr->map());
5589  Register result = ToRegister(instr->result());
5590  Label load_cache, done;
5591  __ EnumLength(result, map);
5592  __ cmp(result, Immediate(Smi::FromInt(0)));
5593  __ j(not_equal, &load_cache, Label::kNear);
5594  __ mov(result, isolate()->factory()->empty_fixed_array());
5595  __ jmp(&done, Label::kNear);
5596 
5597  __ bind(&load_cache);
5598  __ LoadInstanceDescriptors(map, result);
5599  __ mov(result,
5601  __ mov(result,
5602  FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5603  __ bind(&done);
5604  __ test(result, result);
5605  DeoptimizeIf(equal, instr, "no cache");
5606 }
5607 
5608 
5609 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5610  Register object = ToRegister(instr->value());
5611  __ cmp(ToRegister(instr->map()),
5613  DeoptimizeIf(not_equal, instr, "wrong map");
5614 }
5615 
5616 
5617 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5618  Register object,
5619  Register index) {
5620  PushSafepointRegistersScope scope(this);
5621  __ push(object);
5622  __ push(index);
5623  __ xor_(esi, esi);
5624  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5626  instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5627  __ StoreToSafepointRegisterSlot(object, eax);
5628 }
5629 
5630 
5631 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5632  class DeferredLoadMutableDouble FINAL : public LDeferredCode {
5633  public:
5634  DeferredLoadMutableDouble(LCodeGen* codegen,
5635  LLoadFieldByIndex* instr,
5636  Register object,
5637  Register index)
5638  : LDeferredCode(codegen),
5639  instr_(instr),
5640  object_(object),
5641  index_(index) {
5642  }
5643  virtual void Generate() OVERRIDE {
5644  codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
5645  }
5646  virtual LInstruction* instr() OVERRIDE { return instr_; }
5647  private:
5648  LLoadFieldByIndex* instr_;
5649  Register object_;
5650  Register index_;
5651  };
5652 
5653  Register object = ToRegister(instr->object());
5654  Register index = ToRegister(instr->index());
5655 
5656  DeferredLoadMutableDouble* deferred;
5657  deferred = new(zone()) DeferredLoadMutableDouble(
5658  this, instr, object, index);
5659 
5660  Label out_of_object, done;
5661  __ test(index, Immediate(Smi::FromInt(1)));
5662  __ j(not_zero, deferred->entry());
5663 
5664  __ sar(index, 1);
5665 
5666  __ cmp(index, Immediate(0));
5667  __ j(less, &out_of_object, Label::kNear);
5668  __ mov(object, FieldOperand(object,
5669  index,
5672  __ jmp(&done, Label::kNear);
5673 
5674  __ bind(&out_of_object);
5675  __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
5676  __ neg(index);
5677  // Index is now equal to out of object property index plus 1.
5678  __ mov(object, FieldOperand(object,
5679  index,
5682  __ bind(deferred->exit());
5683  __ bind(&done);
5684 }
5685 
5686 
5687 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5688  Register context = ToRegister(instr->context());
5689  __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
5690 }
5691 
5692 
5693 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5694  Handle<ScopeInfo> scope_info = instr->scope_info();
5695  __ Push(scope_info);
5696  __ push(ToRegister(instr->function()));
5697  CallRuntime(Runtime::kPushBlockContext, 2, instr);
5698  RecordSafepoint(Safepoint::kNoLazyDeopt);
5699 }
5700 
5701 
5702 #undef __
5703 
5704 } } // namespace v8::internal
5705 
5706 #endif // V8_TARGET_ARCH_IA32
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1591
static U update(U previous, T value)
Definition: utils.h:223
static U encode(T value)
Definition: utils.h:217
static const int kHeaderSize
Definition: objects.h:5373
static int SlotOffset(int index)
Definition: contexts.h:552
static bool IsSupported(CpuFeature f)
Definition: assembler.h:184
static Handle< DeoptimizationInputData > New(Isolate *isolate, int deopt_entry_count, PretenureFlag pretenure)
Definition: objects.cc:7918
static void EnsureRelocSpaceForLazyDeoptimization(Handle< Code > code)
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
Definition: deoptimizer.cc:672
static const int kEnumCacheOffset
Definition: objects.h:3028
static const int kHeaderSize
Definition: objects.h:2393
static int OffsetOfElementAt(int index)
Definition: objects.h:2455
static int SizeFor(int length)
Definition: objects.h:2452
static const int kGlobalProxyOffset
Definition: objects.h:7461
static Handle< T > cast(Handle< S > that)
Definition: handles.h:116
static const uint32_t kSignMask
Definition: objects.h:1522
static const int kValueOffset
Definition: objects.h:1506
static const int kMapOffset
Definition: objects.h:1427
static Register right()
Definition: code-stubs.h:686
static const int kValueOffset
Definition: objects.h:7623
static const int kCacheStampOffset
Definition: objects.h:7631
static const int kSharedFunctionInfoOffset
Definition: objects.h:7379
static const int kContextOffset
Definition: objects.h:7381
static const int kCodeEntryOffset
Definition: objects.h:7376
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7377
static const int kHeaderSize
Definition: objects.h:2195
static const int kPropertiesOffset
Definition: objects.h:2193
static const int kSize
Definition: objects.h:7772
static const int kInObjectFieldCount
Definition: objects.h:7826
static const int kDynamicAlignmentStateOffset
Definition: frames-ia32.h:78
bool IsNextEmittedBlock(int block_id) const
void DoStoreKeyedFixedArray(LStoreKeyed *instr)
DwVfpRegister ToDoubleRegister(LOperand *op) const
void RecordSafepointWithRegisters(LPointerMap *pointers, int arguments, Safepoint::DeoptMode mode)
bool IsSmi(LConstantOperand *op) const
TranslationBuffer translations_
MemOperand BuildSeqStringOperand(Register string, LOperand *index, String::Encoding encoding)
Condition EmitIsString(Register input, Register temp1, Label *is_not_string, SmiCheck check_needed)
void DoDeferredStackCheck(LStackCheck *instr)
SafepointTableBuilder safepoints_
void EmitVectorLoadICRegisters(T *instr)
static Condition TokenToCondition(Token::Value op, bool is_unsigned)
ZoneList< Handle< Object > > deoptimization_literals_
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check, Label *bool_load)
void PopulateDeoptimizationLiteralsWithInlinedFunctions()
void AddToTranslation(LEnvironment *environment, Translation *translation, LOperand *op, bool is_tagged, bool is_uint32, int *object_index_pointer, int *dematerialized_index_pointer)
ZoneList< LEnvironment * > deoptimizations_
void EmitIntegerMathAbs(LMathAbs *instr)
int32_t ToRepresentation(LConstantOperand *op, const Representation &r) const
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, LInstruction *instr, LOperand *context)
void EmitIsConstructCall(Register temp1, Register temp2)
void EmitPushTaggedOperand(LOperand *operand)
int32_t ToInteger32(LConstantOperand *op) const
LPlatformChunk * chunk() const
void FinishCode(Handle< Code > code)
ExternalReference ToExternalReference(LConstantOperand *op) const
int LookupDestination(int block_id) const
Condition EmitTypeofIs(Label *true_label, Label *false_label, Register input, Handle< String > type_name)
void DoDeferredAllocate(LAllocate *instr)
void RecordSafepoint(LPointerMap *pointers, Safepoint::Kind kind, int arguments, Safepoint::DeoptMode mode)
void DoDeferredTaggedToI(LTaggedToI *instr)
LowDwVfpRegister double_scratch0()
void CallCodeGeneric(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr, SafepointMode safepoint_mode, TargetAddressStorageMode storage_mode=CAN_INLINE_TARGET_ADDRESS)
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
void CallCode(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr, TargetAddressStorageMode storage_mode=CAN_INLINE_TARGET_ADDRESS)
Safepoint::Kind expected_safepoint_kind_
ZoneList< LDeferredCode * > deferred_
Operand HighOperand(LOperand *op)
void DoDeferredNumberTagIU(LInstruction *instr, LOperand *value, LOperand *temp1, LOperand *temp2, IntegerSignedness signedness)
Handle< Object > ToHandle(LConstantOperand *op) const
void GenerateBodyInstructionPost(LInstruction *instr) OVERRIDE
void RegisterEnvironmentForDeoptimization(LEnvironment *environment, Safepoint::DeoptMode mode)
void LoadContextFromDeferred(LOperand *context)
Operand BuildFastArrayOperand(LOperand *elements_pointer, LOperand *key, Representation key_representation, ElementsKind elements_kind, uint32_t base_offset)
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoDeferredLoadMutableDouble(LLoadFieldByIndex *instr, Register result, Register object, Register index)
int DefineDeoptimizationLiteral(Handle< Object > literal)
void DeoptimizeIf(Condition condition, LInstruction *instr, const char *detail, Deoptimizer::BailoutType bailout_type)
void CallKnownFunction(Handle< JSFunction > function, int formal_parameter_count, int arity, LInstruction *instr, R1State r1_state)
void WriteTranslation(LEnvironment *environment, Translation *translation)
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
void DoLoadKeyedFixedDoubleArray(LLoadKeyed *instr)
Operand ToOperand(LOperand *op)
void EmitClassOfTest(Label *if_true, Label *if_false, Handle< String > class_name, Register input, Register temporary, Register temporary2)
void DoLoadKeyedExternalArray(LLoadKeyed *instr)
void EmitReturn(LReturn *instr, bool dynamic_frame_alignment)
Immediate ToImmediate(LOperand *op, const Representation &r) const
double ToDouble(LConstantOperand *op) const
Register ToRegister(LOperand *op) const
void DoStoreKeyedExternalArray(LStoreKeyed *instr)
void RecordAndWritePosition(int position) OVERRIDE
bool IsInteger32(LConstantOperand *op) const
void PopulateDeoptimizationData(Handle< Code > code)
void DoParallelMove(LParallelMove *move)
void CallRuntime(const Runtime::Function *function, int num_arguments, LInstruction *instr, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
ZoneList< Deoptimizer::JumpTableEntry > jump_table_
Condition EmitIsObject(Register input, Register temp1, Label *is_not_object, Label *is_object)
void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE
void EmitNumberUntagD(LNumberUntagD *instr, Register input, DwVfpRegister result, NumberUntagDMode mode)
void GenerateBodyInstructionPre(LInstruction *instr) OVERRIDE
void RecordSafepointWithLazyDeopt(LInstruction *instr, SafepointMode safepoint_mode)
void EmitFalseBranch(InstrType instr, Condition condition)
void DoLoadKeyedFixedArray(LLoadKeyed *instr)
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
void EmitBranch(InstrType instr, Condition condition)
void DoDeferredNumberTagD(LNumberTagD *instr)
void DoStoreKeyedFixedDoubleArray(LStoreKeyed *instr)
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:17
static const Register ReceiverRegister()
static const Register NameRegister()
static void GenerateMiss(MacroAssembler *masm)
static int SafepointRegisterStackIndex(int reg_code)
static const int kIsUndetectable
Definition: objects.h:6244
static const int kBitFieldOffset
Definition: objects.h:6228
static const int kInstanceTypeOffset
Definition: objects.h:6229
static const int kConstructorOffset
Definition: objects.h:6191
static const int kPrototypeOffset
Definition: objects.h:6190
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
static const Register exponent()
static const int kHashFieldOffset
Definition: objects.h:8486
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:754
static void MaybeCallEntryHook(MacroAssembler *masm)
static const int kNoPosition
Definition: assembler.h:317
static Representation Smi()
static Representation Integer32()
int num_parameters() const
Definition: scopes.h:321
Variable * parameter(int index) const
Definition: scopes.h:316
static const int kHeaderSize
Definition: objects.h:8941
static const int kDontAdaptArgumentsSentinel
Definition: objects.h:6888
static const int kInstanceClassNameOffset
Definition: objects.h:6897
static const int kNativeBitWithinByte
Definition: objects.h:7046
static const int kStrictModeBitWithinByte
Definition: objects.h:7043
static const int kMaxValue
Definition: objects.h:1272
static Smi * FromInt(int value)
Definition: objects-inl.h:1321
static const int kContextOffset
Definition: frames.h:162
static const int kCallerSPOffset
Definition: frames.h:167
static const int kMarkerOffset
Definition: frames.h:161
static const int kCallerFPOffset
Definition: frames.h:165
static const Register ReceiverRegister()
static const Register NameRegister()
static const Register ValueRegister()
static Handle< Code > initialize_stub(Isolate *isolate, StrictMode strict_mode)
Definition: ic.cc:1346
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const unsigned int kContainsCachedArrayIndexMask
Definition: objects.h:8618
static const int32_t kMaxOneByteCharCode
Definition: objects.h:8811
static const int kMaxLength
Definition: objects.h:8820
static const int kLengthOffset
Definition: objects.h:8802
static const int kMaxUtf16CodeUnit
Definition: objects.h:8813
bool Equals(String *other)
Definition: objects-inl.h:3336
static const Register VectorRegister()
#define OVERRIDE
#define FINAL
#define __
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric literals(0o77, 0b11)") DEFINE_BOOL(harmony_object_literals
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
enable harmony numeric enable harmony object literal extensions Optimize object Array shift
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK_LE(v1, v2)
Definition: logging.h:210
#define DCHECK_NE(v1, v2)
Definition: logging.h:207
#define DCHECK(condition)
Definition: logging.h:205
@ CALL_FUNCTION
AllocationFlags
@ DOUBLE_ALIGNMENT
@ PRETENURE_OLD_POINTER_SPACE
@ TAG_OBJECT
@ PRETENURE_OLD_DATA_SPACE
signed short int16_t
Definition: unicode.cc:22
int int32_t
Definition: unicode.cc:24
bool IsPowerOfTwo32(uint32_t value)
Definition: bits.h:77
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
Vector< const char > CStrVector(const char *data)
Definition: vector.h:158
const int kPointerSize
Definition: globals.h:129
const Register edx
const uint32_t kStringEncodingMask
Definition: objects.h:555
const Register edi
MemOperand ContextOperand(Register context, int index)
const int kAlignmentPaddingPushed
Definition: frames-ia32.h:32
static bool ExternalArrayOpRequiresTemp(Representation key_representation, ElementsKind elements_kind)
@ DO_SMI_CHECK
Definition: globals.h:641
const int KB
Definition: globals.h:106
Condition CommuteCondition(Condition cond)
Definition: constants-arm.h:93
const int kBitsPerInt
Definition: globals.h:165
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1488
@ TRACK_ALLOCATION_SITE
Definition: objects.h:8085
@ kSeqStringTag
Definition: objects.h:563
@ ARGUMENTS_ADAPTOR
Definition: hydrogen.h:546
const XMMRegister xmm1
const Register esp
const int kPCOnStackSize
Definition: globals.h:135
const uint32_t kTwoByteStringTag
Definition: objects.h:556
const int kSmiTagSize
Definition: v8.h:5743
const XMMRegister xmm2
const int kDoubleSize
Definition: globals.h:127
Operand FieldOperand(Register object, int offset)
const int kPointerSizeLog2
Definition: globals.h:147
@ LAST_NONCALLABLE_SPEC_OBJECT_TYPE
Definition: objects.h:785
@ NUM_OF_CALLABLE_SPEC_OBJECT_TYPES
Definition: objects.h:788
@ JS_DATE_TYPE
Definition: objects.h:730
@ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE
Definition: objects.h:784
@ FIRST_JS_PROXY_TYPE
Definition: objects.h:778
@ JS_ARRAY_TYPE
Definition: objects.h:738
@ FIRST_NONSTRING_TYPE
Definition: objects.h:758
@ FIRST_SPEC_OBJECT_TYPE
Definition: objects.h:781
@ LAST_SPEC_OBJECT_TYPE
Definition: objects.h:782
@ HEAP_NUMBER_TYPE
Definition: objects.h:669
@ JS_FUNCTION_TYPE
Definition: objects.h:749
@ JS_FUNCTION_PROXY_TYPE
Definition: objects.h:726
@ LAST_JS_PROXY_TYPE
Definition: objects.h:779
@ EXTERNAL_UINT16_ELEMENTS
Definition: elements-kind.h:36
@ UINT8_CLAMPED_ELEMENTS
Definition: elements-kind.h:52
@ EXTERNAL_INT16_ELEMENTS
Definition: elements-kind.h:35
@ EXTERNAL_UINT8_ELEMENTS
Definition: elements-kind.h:34
@ EXTERNAL_INT32_ELEMENTS
Definition: elements-kind.h:37
@ FAST_HOLEY_DOUBLE_ELEMENTS
Definition: elements-kind.h:27
@ SLOPPY_ARGUMENTS_ELEMENTS
Definition: elements-kind.h:31
@ EXTERNAL_INT8_ELEMENTS
Definition: elements-kind.h:33
@ EXTERNAL_FLOAT32_ELEMENTS
Definition: elements-kind.h:39
@ EXTERNAL_FLOAT64_ELEMENTS
Definition: elements-kind.h:40
@ FAST_HOLEY_SMI_ELEMENTS
Definition: elements-kind.h:17
@ EXTERNAL_UINT32_ELEMENTS
Definition: elements-kind.h:38
@ EXTERNAL_UINT8_CLAMPED_ELEMENTS
Definition: elements-kind.h:41
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
const XMMRegister xmm3
const uint32_t kOneByteStringTag
Definition: objects.h:557
@ NO_OVERWRITE
Definition: ic-state.h:58
int ElementsKindToShiftSize(ElementsKind elements_kind)
int32_t WhichPowerOf2Abs(int32_t x)
Definition: utils.h:168
int StackSlotOffset(int index)
Definition: lithium.cc:254
const Register esi
@ FAIL_ON_MINUS_ZERO
Definition: globals.h:768
const Register eax
const int kUC16Size
Definition: globals.h:187
bool IsFastPackedElementsKind(ElementsKind kind)
const Register ebx
@ NUMBER_CANDIDATE_IS_SMI
Definition: lithium.h:756
@ NUMBER_CANDIDATE_IS_ANY_TAGGED
Definition: lithium.h:757
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
AllocationSiteOverrideMode
Definition: code-stubs.h:716
@ DISABLE_ALLOCATION_SITES
Definition: code-stubs.h:718
const XMMRegister xmm0
Condition NegateCondition(Condition cond)
Definition: constants-arm.h:86
static InstanceType TestType(HHasInstanceTypeAndBranch *instr)
const int kMinInt
Definition: globals.h:110
T Abs(T a)
Definition: utils.h:153
const uint32_t kStringRepresentationMask
Definition: objects.h:561
byte * Address
Definition: globals.h:101
static Condition BranchCondition(HHasInstanceTypeAndBranch *instr)
@ NOT_CONTEXTUAL
Definition: objects.h:174
OStream & dec(OStream &os)
Definition: ostreams.cc:122
@ OLD_DATA_SPACE
Definition: globals.h:361
@ OLD_POINTER_SPACE
Definition: globals.h:360
const int kHeapObjectTag
Definition: v8.h:5737
const Register no_reg
static int ArgumentsOffsetWithoutFrame(int index)
static Condition ComputeCompareCondition(Token::Value op)
static const char * LabelType(LLabel *label)
const int kAlignmentZapValue
Definition: frames-ia32.h:33
MemOperand GlobalObjectOperand()
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const intptr_t kSmiTagMask
Definition: v8.h:5744
const Register ebp
@ NO_CALL_CONSTRUCTOR_FLAGS
Definition: globals.h:478
const int kNoAlignmentPadding
Definition: frames-ia32.h:31
const int kSmiTag
Definition: v8.h:5742
bool IsFastSmiElementsKind(ElementsKind kind)
const uint32_t kHoleNanLower32
Definition: globals.h:657
const uint32_t kSlotsZapValue
Definition: globals.h:273
const int kCharSize
Definition: globals.h:122
const uint32_t kHoleNanUpper32
Definition: globals.h:656
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:130
const Register ecx
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
#define IN
bool is(DwVfpRegister reg) const
static Register FromAllocationIndex(int index)
bool is(Register reg) const
static XMMRegister FromAllocationIndex(int index)
#define T(name, string, precedence)
Definition: token.cc:25