V8 Project
lithium-codegen-x87.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_X87
8 
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/codegen.h"
13 #include "src/deoptimizer.h"
14 #include "src/hydrogen-osr.h"
15 #include "src/ic/ic.h"
16 #include "src/ic/stub-cache.h"
18 
19 namespace v8 {
20 namespace internal {
21 
22 
23 // When invoking builtins, we need to record the safepoint in the middle of
24 // the invoke instruction sequence generated by the macro assembler.
25 class SafepointGenerator FINAL : public CallWrapper {
26  public:
27  SafepointGenerator(LCodeGen* codegen,
28  LPointerMap* pointers,
29  Safepoint::DeoptMode mode)
30  : codegen_(codegen),
31  pointers_(pointers),
32  deopt_mode_(mode) {}
33  virtual ~SafepointGenerator() {}
34 
35  virtual void BeforeCall(int call_size) const OVERRIDE {}
36 
37  virtual void AfterCall() const OVERRIDE {
38  codegen_->RecordSafepoint(pointers_, deopt_mode_);
39  }
40 
41  private:
42  LCodeGen* codegen_;
43  LPointerMap* pointers_;
44  Safepoint::DeoptMode deopt_mode_;
45 };
46 
47 
48 #define __ masm()->
49 
51  LPhase phase("Z_Code generation", chunk());
52  DCHECK(is_unused());
53  status_ = GENERATING;
54 
55  // Open a frame scope to indicate that there is a frame on the stack. The
56  // MANUAL indicates that the scope shouldn't actually generate code to set up
57  // the frame (that is done in GeneratePrologue).
58  FrameScope frame_scope(masm_, StackFrame::MANUAL);
59 
60  support_aligned_spilled_doubles_ = info()->IsOptimizing();
61 
62  dynamic_frame_alignment_ = info()->IsOptimizing() &&
63  ((chunk()->num_double_slots() > 2 &&
64  !chunk()->graph()->is_recursive()) ||
65  !info()->osr_ast_id().IsNone());
66 
67  return GeneratePrologue() &&
68  GenerateBody() &&
72 }
73 
74 
75 void LCodeGen::FinishCode(Handle<Code> code) {
76  DCHECK(is_done());
77  code->set_stack_slots(GetStackSlotCount());
78  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
79  if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
81  if (!info()->IsStub()) {
83  }
84 }
85 
86 
87 #ifdef _MSC_VER
88 void LCodeGen::MakeSureStackPagesMapped(int offset) {
89  const int kPageSize = 4 * KB;
90  for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
91  __ mov(Operand(esp, offset), eax);
92  }
93 }
94 #endif
95 
96 
98  DCHECK(is_generating());
99 
100  if (info()->IsOptimizing()) {
102 
103 #ifdef DEBUG
104  if (strlen(FLAG_stop_at) > 0 &&
105  info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
106  __ int3();
107  }
108 #endif
109 
110  // Sloppy mode functions and builtins need to replace the receiver with the
111  // global proxy when called as functions (without an explicit receiver
112  // object).
113  if (info_->this_has_uses() &&
114  info_->strict_mode() == SLOPPY &&
115  !info_->is_native()) {
116  Label ok;
117  // +1 for return address.
118  int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
119  __ mov(ecx, Operand(esp, receiver_offset));
120 
121  __ cmp(ecx, isolate()->factory()->undefined_value());
122  __ j(not_equal, &ok, Label::kNear);
123 
124  __ mov(ecx, GlobalObjectOperand());
126 
127  __ mov(Operand(esp, receiver_offset), ecx);
128 
129  __ bind(&ok);
130  }
131 
133  // Move state of dynamic frame alignment into edx.
134  __ Move(edx, Immediate(kNoAlignmentPadding));
135 
136  Label do_not_pad, align_loop;
138  // Align esp + 4 to a multiple of 2 * kPointerSize.
139  __ test(esp, Immediate(kPointerSize));
140  __ j(not_zero, &do_not_pad, Label::kNear);
141  __ push(Immediate(0));
142  __ mov(ebx, esp);
143  __ mov(edx, Immediate(kAlignmentPaddingPushed));
144  // Copy arguments, receiver, and return address.
145  __ mov(ecx, Immediate(scope()->num_parameters() + 2));
146 
147  __ bind(&align_loop);
148  __ mov(eax, Operand(ebx, 1 * kPointerSize));
149  __ mov(Operand(ebx, 0), eax);
150  __ add(Operand(ebx), Immediate(kPointerSize));
151  __ dec(ecx);
152  __ j(not_zero, &align_loop, Label::kNear);
153  __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
154  __ bind(&do_not_pad);
155  }
156  }
157 
158  info()->set_prologue_offset(masm_->pc_offset());
159  if (NeedsEagerFrame()) {
161  frame_is_built_ = true;
162  if (info()->IsStub()) {
163  __ StubPrologue();
164  } else {
165  __ Prologue(info()->IsCodePreAgingActive());
166  }
167  info()->AddNoFrameRange(0, masm_->pc_offset());
168  }
169 
170  if (info()->IsOptimizing() &&
172  FLAG_debug_code) {
173  __ test(esp, Immediate(kPointerSize));
174  __ Assert(zero, kFrameIsExpectedToBeAligned);
175  }
176 
177  // Reserve space for the stack slots needed by the code.
178  int slots = GetStackSlotCount();
179  DCHECK(slots != 0 || !info()->IsOptimizing());
180  if (slots > 0) {
181  if (slots == 1) {
183  __ push(edx);
184  } else {
185  __ push(Immediate(kNoAlignmentPadding));
186  }
187  } else {
188  if (FLAG_debug_code) {
189  __ sub(Operand(esp), Immediate(slots * kPointerSize));
190 #ifdef _MSC_VER
191  MakeSureStackPagesMapped(slots * kPointerSize);
192 #endif
193  __ push(eax);
194  __ mov(Operand(eax), Immediate(slots));
195  Label loop;
196  __ bind(&loop);
197  __ mov(MemOperand(esp, eax, times_4, 0),
198  Immediate(kSlotsZapValue));
199  __ dec(eax);
200  __ j(not_zero, &loop);
201  __ pop(eax);
202  } else {
203  __ sub(Operand(esp), Immediate(slots * kPointerSize));
204 #ifdef _MSC_VER
205  MakeSureStackPagesMapped(slots * kPointerSize);
206 #endif
207  }
208 
210  Comment(";;; Store dynamic frame alignment tag for spilled doubles");
211  // Store dynamic frame alignment state in the first local.
214  __ mov(Operand(ebp, offset), edx);
215  } else {
216  __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
217  }
218  }
219  }
220  }
221 
222  // Possibly allocate a local context.
223  int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
224  if (heap_slots > 0) {
225  Comment(";;; Allocate local context");
226  bool need_write_barrier = true;
227  // Argument to NewContext is the function, which is still in edi.
228  if (heap_slots <= FastNewContextStub::kMaximumSlots) {
229  FastNewContextStub stub(isolate(), heap_slots);
230  __ CallStub(&stub);
231  // Result of FastNewContextStub is always in new space.
232  need_write_barrier = false;
233  } else {
234  __ push(edi);
235  __ CallRuntime(Runtime::kNewFunctionContext, 1);
236  }
237  RecordSafepoint(Safepoint::kNoLazyDeopt);
238  // Context is returned in eax. It replaces the context passed to us.
239  // It's saved in the stack and kept live in esi.
240  __ mov(esi, eax);
242 
243  // Copy parameters into context if necessary.
244  int num_parameters = scope()->num_parameters();
245  for (int i = 0; i < num_parameters; i++) {
246  Variable* var = scope()->parameter(i);
247  if (var->IsContextSlot()) {
248  int parameter_offset = StandardFrameConstants::kCallerSPOffset +
249  (num_parameters - 1 - i) * kPointerSize;
250  // Load parameter from stack.
251  __ mov(eax, Operand(ebp, parameter_offset));
252  // Store it in the context.
253  int context_offset = Context::SlotOffset(var->index());
254  __ mov(Operand(esi, context_offset), eax);
255  // Update the write barrier. This clobbers eax and ebx.
256  if (need_write_barrier) {
257  __ RecordWriteContextSlot(esi, context_offset, eax, ebx,
259  } else if (FLAG_debug_code) {
260  Label done;
261  __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
262  __ Abort(kExpectedNewSpaceObject);
263  __ bind(&done);
264  }
265  }
266  }
267  Comment(";;; End allocate local context");
268  }
269 
270  // Initailize FPU state.
271  __ fninit();
272  // Trace the call.
273  if (FLAG_trace && info()->IsOptimizing()) {
274  // We have not executed any compiled code yet, so esi still holds the
275  // incoming context.
276  __ CallRuntime(Runtime::kTraceEnter, 0);
277  }
278  return !is_aborted();
279 }
280 
281 
283  // Generate the OSR entry prologue at the first unknown OSR value, or if there
284  // are none, at the OSR entrypoint instruction.
285  if (osr_pc_offset_ >= 0) return;
286 
287  osr_pc_offset_ = masm()->pc_offset();
288 
289  // Move state of dynamic frame alignment into edx.
290  __ Move(edx, Immediate(kNoAlignmentPadding));
291 
293  Label do_not_pad, align_loop;
294  // Align ebp + 4 to a multiple of 2 * kPointerSize.
295  __ test(ebp, Immediate(kPointerSize));
296  __ j(zero, &do_not_pad, Label::kNear);
297  __ push(Immediate(0));
298  __ mov(ebx, esp);
299  __ mov(edx, Immediate(kAlignmentPaddingPushed));
300 
301  // Move all parts of the frame over one word. The frame consists of:
302  // unoptimized frame slots, alignment state, context, frame pointer, return
303  // address, receiver, and the arguments.
304  __ mov(ecx, Immediate(scope()->num_parameters() +
305  5 + graph()->osr()->UnoptimizedFrameSlots()));
306 
307  __ bind(&align_loop);
308  __ mov(eax, Operand(ebx, 1 * kPointerSize));
309  __ mov(Operand(ebx, 0), eax);
310  __ add(Operand(ebx), Immediate(kPointerSize));
311  __ dec(ecx);
312  __ j(not_zero, &align_loop, Label::kNear);
313  __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
314  __ sub(Operand(ebp), Immediate(kPointerSize));
315  __ bind(&do_not_pad);
316  }
317 
318  // Save the first local, which is overwritten by the alignment state.
319  Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
320  __ push(alignment_loc);
321 
322  // Set the dynamic frame alignment state.
323  __ mov(alignment_loc, edx);
324 
325  // Adjust the frame size, subsuming the unoptimized frame into the
326  // optimized frame.
327  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
328  DCHECK(slots >= 1);
329  __ sub(esp, Immediate((slots - 1) * kPointerSize));
330 
331  // Initailize FPU state.
332  __ fninit();
333 }
334 
335 
336 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
337  if (instr->IsCall()) {
339  }
340  if (!instr->IsLazyBailout() && !instr->IsGap()) {
341  safepoints_.BumpLastLazySafepointIndex();
342  }
344 }
345 
346 
347 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
348  // When return from function call, FPU should be initialized again.
349  if (instr->IsCall() && instr->ClobbersDoubleRegisters(isolate())) {
350  bool double_result = instr->HasDoubleRegisterResult();
351  if (double_result) {
352  __ lea(esp, Operand(esp, -kDoubleSize));
353  __ fstp_d(Operand(esp, 0));
354  }
355  __ fninit();
356  if (double_result) {
357  __ fld_d(Operand(esp, 0));
358  __ lea(esp, Operand(esp, kDoubleSize));
359  }
360  }
361  if (instr->IsGoto()) {
362  x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr), this);
363  } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
364  !instr->IsGap() && !instr->IsReturn()) {
365  if (instr->ClobbersDoubleRegisters(isolate())) {
366  if (instr->HasDoubleRegisterResult()) {
367  DCHECK_EQ(1, x87_stack_.depth());
368  } else {
369  DCHECK_EQ(0, x87_stack_.depth());
370  }
371  }
372  __ VerifyX87StackDepth(x87_stack_.depth());
373  }
374 }
375 
376 
378  Label needs_frame;
379  if (jump_table_.length() > 0) {
380  Comment(";;; -------------------- Jump table --------------------");
381  }
382  for (int i = 0; i < jump_table_.length(); i++) {
383  Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
384  __ bind(&table_entry->label);
385  Address entry = table_entry->address;
386  DeoptComment(table_entry->reason);
387  if (table_entry->needs_frame) {
388  DCHECK(!info()->saves_caller_doubles());
389  __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
390  if (needs_frame.is_bound()) {
391  __ jmp(&needs_frame);
392  } else {
393  __ bind(&needs_frame);
395  // This variant of deopt can only be used with stubs. Since we don't
396  // have a function pointer to install in the stack frame that we're
397  // building, install a special marker there instead.
398  DCHECK(info()->IsStub());
399  __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
400  // Push a PC inside the function so that the deopt code can find where
401  // the deopt comes from. It doesn't have to be the precise return
402  // address of a "calling" LAZY deopt, it only has to be somewhere
403  // inside the code body.
404  Label push_approx_pc;
405  __ call(&push_approx_pc);
406  __ bind(&push_approx_pc);
407  // Push the continuation which was stashed were the ebp should
408  // be. Replace it with the saved ebp.
409  __ push(MemOperand(esp, 3 * kPointerSize));
410  __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
411  __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
412  __ ret(0); // Call the continuation without clobbering registers.
413  }
414  } else {
415  __ call(entry, RelocInfo::RUNTIME_ENTRY);
416  }
417  }
418  return !is_aborted();
419 }
420 
421 
423  DCHECK(is_generating());
424  if (deferred_.length() > 0) {
425  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
426  LDeferredCode* code = deferred_[i];
427  X87Stack copy(code->x87_stack());
428  x87_stack_ = copy;
429 
430  HValue* value =
431  instructions_->at(code->instruction_index())->hydrogen_value();
433  chunk()->graph()->SourcePositionToScriptPosition(value->position()));
434 
435  Comment(";;; <@%d,#%d> "
436  "-------------------- Deferred %s --------------------",
437  code->instruction_index(),
438  code->instr()->hydrogen_value()->id(),
439  code->instr()->Mnemonic());
440  __ bind(code->entry());
441  if (NeedsDeferredFrame()) {
442  Comment(";;; Build frame");
444  DCHECK(info()->IsStub());
445  frame_is_built_ = true;
446  // Build the frame in such a way that esi isn't trashed.
447  __ push(ebp); // Caller's frame pointer.
449  __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
450  __ lea(ebp, Operand(esp, 2 * kPointerSize));
451  Comment(";;; Deferred code");
452  }
453  code->Generate();
454  if (NeedsDeferredFrame()) {
455  __ bind(code->done());
456  Comment(";;; Destroy frame");
458  frame_is_built_ = false;
459  __ mov(esp, ebp);
460  __ pop(ebp);
461  }
462  __ jmp(code->exit());
463  }
464  }
465 
466  // Deferred code is the last part of the instruction sequence. Mark
467  // the generated code as done unless we bailed out.
468  if (!is_aborted()) status_ = DONE;
469  return !is_aborted();
470 }
471 
472 
474  DCHECK(is_done());
475  if (!info()->IsStub()) {
476  // For lazy deoptimization we need space to patch a call after every call.
477  // Ensure there is always space for such patching, even if the code ends
478  // in a call.
479  int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
480  while (masm()->pc_offset() < target_offset) {
481  masm()->nop();
482  }
483  }
484  safepoints_.Emit(masm(), GetStackSlotCount());
485  return !is_aborted();
486 }
487 
488 
489 Register LCodeGen::ToRegister(int index) const {
490  return Register::FromAllocationIndex(index);
491 }
492 
493 
494 X87Register LCodeGen::ToX87Register(int index) const {
495  return X87Register::FromAllocationIndex(index);
496 }
497 
498 
499 void LCodeGen::X87LoadForUsage(X87Register reg) {
500  DCHECK(x87_stack_.Contains(reg));
501  x87_stack_.Fxch(reg);
502  x87_stack_.pop();
503 }
504 
505 
506 void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
507  DCHECK(x87_stack_.Contains(reg1));
508  DCHECK(x87_stack_.Contains(reg2));
509  if (reg1.is(reg2) && x87_stack_.depth() == 1) {
510  __ fld(x87_stack_.st(reg1));
511  x87_stack_.push(reg1);
512  x87_stack_.pop();
513  x87_stack_.pop();
514  } else {
515  x87_stack_.Fxch(reg1, 1);
516  x87_stack_.Fxch(reg2);
517  x87_stack_.pop();
518  x87_stack_.pop();
519  }
520 }
521 
522 
524  int layout = stack_depth_;
525  for (int i = 0; i < stack_depth_; i++) {
526  layout |= (stack_[stack_depth_ - 1 - i].code() << ((i + 1) * 3));
527  }
528 
529  return layout;
530 }
531 
532 
533 void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
534  DCHECK(is_mutable_);
535  DCHECK(Contains(reg) && stack_depth_ > other_slot);
536  int i = ArrayIndex(reg);
537  int st = st2idx(i);
538  if (st != other_slot) {
539  int other_i = st2idx(other_slot);
540  X87Register other = stack_[other_i];
541  stack_[other_i] = reg;
542  stack_[i] = other;
543  if (st == 0) {
544  __ fxch(other_slot);
545  } else if (other_slot == 0) {
546  __ fxch(st);
547  } else {
548  __ fxch(st);
549  __ fxch(other_slot);
550  __ fxch(st);
551  }
552  }
553 }
554 
555 
556 int LCodeGen::X87Stack::st2idx(int pos) {
557  return stack_depth_ - pos - 1;
558 }
559 
560 
561 int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
562  for (int i = 0; i < stack_depth_; i++) {
563  if (stack_[i].is(reg)) return i;
564  }
565  UNREACHABLE();
566  return -1;
567 }
568 
569 
570 bool LCodeGen::X87Stack::Contains(X87Register reg) {
571  for (int i = 0; i < stack_depth_; i++) {
572  if (stack_[i].is(reg)) return true;
573  }
574  return false;
575 }
576 
577 
578 void LCodeGen::X87Stack::Free(X87Register reg) {
579  DCHECK(is_mutable_);
580  DCHECK(Contains(reg));
581  int i = ArrayIndex(reg);
582  int st = st2idx(i);
583  if (st > 0) {
584  // keep track of how fstp(i) changes the order of elements
585  int tos_i = st2idx(0);
586  stack_[i] = stack_[tos_i];
587  }
588  pop();
589  __ fstp(st);
590 }
591 
592 
593 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
594  if (x87_stack_.Contains(dst)) {
595  x87_stack_.Fxch(dst);
596  __ fstp(0);
597  } else {
598  x87_stack_.push(dst);
599  }
600  X87Fld(src, opts);
601 }
602 
603 
604 void LCodeGen::X87Mov(X87Register dst, X87Register src, X87OperandType opts) {
605  if (x87_stack_.Contains(dst)) {
606  x87_stack_.Fxch(dst);
607  __ fstp(0);
608  x87_stack_.pop();
609  // Push ST(i) onto the FPU register stack
610  __ fld(x87_stack_.st(src));
611  x87_stack_.push(dst);
612  } else {
613  // Push ST(i) onto the FPU register stack
614  __ fld(x87_stack_.st(src));
615  x87_stack_.push(dst);
616  }
617 }
618 
619 
620 void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
621  DCHECK(!src.is_reg_only());
622  switch (opts) {
623  case kX87DoubleOperand:
624  __ fld_d(src);
625  break;
626  case kX87FloatOperand:
627  __ fld_s(src);
628  break;
629  case kX87IntOperand:
630  __ fild_s(src);
631  break;
632  default:
633  UNREACHABLE();
634  }
635 }
636 
637 
638 void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
639  DCHECK(!dst.is_reg_only());
640  x87_stack_.Fxch(src);
641  switch (opts) {
642  case kX87DoubleOperand:
643  __ fst_d(dst);
644  break;
645  case kX87FloatOperand:
646  __ fst_s(dst);
647  break;
648  case kX87IntOperand:
649  __ fist_s(dst);
650  break;
651  default:
652  UNREACHABLE();
653  }
654 }
655 
656 
657 void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
658  DCHECK(is_mutable_);
659  if (Contains(reg)) {
660  Free(reg);
661  }
662  // Mark this register as the next register to write to
663  stack_[stack_depth_] = reg;
664 }
665 
666 
667 void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
668  DCHECK(is_mutable_);
669  // Assert the reg is prepared to write, but not on the virtual stack yet
670  DCHECK(!Contains(reg) && stack_[stack_depth_].is(reg) &&
672  stack_depth_++;
673 }
674 
675 
677  X87Register left, X87Register right, X87Register result) {
678  // You need to use DefineSameAsFirst for x87 instructions
679  DCHECK(result.is(left));
680  x87_stack_.Fxch(right, 1);
681  x87_stack_.Fxch(left);
682 }
683 
684 
685 void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
686  if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) {
687  bool double_inputs = instr->HasDoubleRegisterInput();
688 
689  // Flush stack from tos down, since FreeX87() will mess with tos
690  for (int i = stack_depth_-1; i >= 0; i--) {
691  X87Register reg = stack_[i];
692  // Skip registers which contain the inputs for the next instruction
693  // when flushing the stack
694  if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
695  continue;
696  }
697  Free(reg);
698  if (i < stack_depth_-1) i++;
699  }
700  }
701  if (instr->IsReturn()) {
702  while (stack_depth_ > 0) {
703  __ fstp(0);
704  stack_depth_--;
705  }
706  if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
707  }
708 }
709 
710 
711 void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr,
712  LCodeGen* cgen) {
713  // For going to a joined block, an explicit LClobberDoubles is inserted before
714  // LGoto. Because all used x87 registers are spilled to stack slots. The
715  // ResolvePhis phase of register allocator could guarantee the two input's x87
716  // stacks have the same layout. So don't check stack_depth_ <= 1 here.
717  int goto_block_id = goto_instr->block_id();
718  if (current_block_id + 1 != goto_block_id) {
719  // If we have a value on the x87 stack on leaving a block, it must be a
720  // phi input. If the next block we compile is not the join block, we have
721  // to discard the stack state.
722  // Before discarding the stack state, we need to save it if the "goto block"
723  // has unreachable last predecessor when FLAG_unreachable_code_elimination.
724  if (FLAG_unreachable_code_elimination) {
725  int length = goto_instr->block()->predecessors()->length();
726  bool has_unreachable_last_predecessor = false;
727  for (int i = 0; i < length; i++) {
728  HBasicBlock* block = goto_instr->block()->predecessors()->at(i);
729  if (block->IsUnreachable() &&
730  (block->block_id() + 1) == goto_block_id) {
731  has_unreachable_last_predecessor = true;
732  }
733  }
734  if (has_unreachable_last_predecessor) {
735  if (cgen->x87_stack_map_.find(goto_block_id) ==
736  cgen->x87_stack_map_.end()) {
737  X87Stack* stack = new (cgen->zone()) X87Stack(*this);
738  cgen->x87_stack_map_.insert(std::make_pair(goto_block_id, stack));
739  }
740  }
741  }
742 
743  // Discard the stack state.
744  stack_depth_ = 0;
745  }
746 }
747 
748 
750  // The deoptimizer does not support X87 Registers. But as long as we
751  // deopt from a stub its not a problem, since we will re-materialize the
752  // original stub inputs, which can't be double registers.
753  // DCHECK(info()->IsStub());
754  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
755  __ pushfd();
756  __ VerifyX87StackDepth(x87_stack_.depth());
757  __ popfd();
758  }
759 
760  // Flush X87 stack in the deoptimizer entry.
761 }
762 
763 
764 Register LCodeGen::ToRegister(LOperand* op) const {
765  DCHECK(op->IsRegister());
766  return ToRegister(op->index());
767 }
768 
769 
770 X87Register LCodeGen::ToX87Register(LOperand* op) const {
771  DCHECK(op->IsDoubleRegister());
772  return ToX87Register(op->index());
773 }
774 
775 
776 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
778 }
779 
780 
781 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
782  const Representation& r) const {
783  HConstant* constant = chunk_->LookupConstant(op);
784  int32_t value = constant->Integer32Value();
785  if (r.IsInteger32()) return value;
786  DCHECK(r.IsSmiOrTagged());
787  return reinterpret_cast<int32_t>(Smi::FromInt(value));
788 }
789 
790 
791 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
792  HConstant* constant = chunk_->LookupConstant(op);
793  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
794  return constant->handle(isolate());
795 }
796 
797 
798 double LCodeGen::ToDouble(LConstantOperand* op) const {
799  HConstant* constant = chunk_->LookupConstant(op);
800  DCHECK(constant->HasDoubleValue());
801  return constant->DoubleValue();
802 }
803 
804 
805 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
806  HConstant* constant = chunk_->LookupConstant(op);
807  DCHECK(constant->HasExternalReferenceValue());
808  return constant->ExternalReferenceValue();
809 }
810 
811 
812 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
813  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
814 }
815 
816 
817 bool LCodeGen::IsSmi(LConstantOperand* op) const {
818  return chunk_->LookupLiteralRepresentation(op).IsSmi();
819 }
820 
821 
822 static int ArgumentsOffsetWithoutFrame(int index) {
823  DCHECK(index < 0);
824  return -(index + 1) * kPointerSize + kPCOnStackSize;
825 }
826 
827 
828 Operand LCodeGen::ToOperand(LOperand* op) const {
829  if (op->IsRegister()) return Operand(ToRegister(op));
830  DCHECK(!op->IsDoubleRegister());
831  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
832  if (NeedsEagerFrame()) {
833  return Operand(ebp, StackSlotOffset(op->index()));
834  } else {
835  // Retrieve parameter without eager stack-frame relative to the
836  // stack-pointer.
837  return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
838  }
839 }
840 
841 
842 Operand LCodeGen::HighOperand(LOperand* op) {
843  DCHECK(op->IsDoubleStackSlot());
844  if (NeedsEagerFrame()) {
845  return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
846  } else {
847  // Retrieve parameter without eager stack-frame relative to the
848  // stack-pointer.
849  return Operand(
851  }
852 }
853 
854 
855 void LCodeGen::WriteTranslation(LEnvironment* environment,
856  Translation* translation) {
857  if (environment == NULL) return;
858 
859  // The translation includes one command per value in the environment.
860  int translation_size = environment->translation_size();
861  // The output frame height does not include the parameters.
862  int height = translation_size - environment->parameter_count();
863 
864  WriteTranslation(environment->outer(), translation);
865  bool has_closure_id = !info()->closure().is_null() &&
866  !info()->closure().is_identical_to(environment->closure());
867  int closure_id = has_closure_id
868  ? DefineDeoptimizationLiteral(environment->closure())
869  : Translation::kSelfLiteralId;
870  switch (environment->frame_type()) {
871  case JS_FUNCTION:
872  translation->BeginJSFrame(environment->ast_id(), closure_id, height);
873  break;
874  case JS_CONSTRUCT:
875  translation->BeginConstructStubFrame(closure_id, translation_size);
876  break;
877  case JS_GETTER:
878  DCHECK(translation_size == 1);
879  DCHECK(height == 0);
880  translation->BeginGetterStubFrame(closure_id);
881  break;
882  case JS_SETTER:
883  DCHECK(translation_size == 2);
884  DCHECK(height == 0);
885  translation->BeginSetterStubFrame(closure_id);
886  break;
887  case ARGUMENTS_ADAPTOR:
888  translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
889  break;
890  case STUB:
891  translation->BeginCompiledStubFrame();
892  break;
893  default:
894  UNREACHABLE();
895  }
896 
897  int object_index = 0;
898  int dematerialized_index = 0;
899  for (int i = 0; i < translation_size; ++i) {
900  LOperand* value = environment->values()->at(i);
901  AddToTranslation(environment,
902  translation,
903  value,
904  environment->HasTaggedValueAt(i),
905  environment->HasUint32ValueAt(i),
906  &object_index,
907  &dematerialized_index);
908  }
909 }
910 
911 
912 void LCodeGen::AddToTranslation(LEnvironment* environment,
913  Translation* translation,
914  LOperand* op,
915  bool is_tagged,
916  bool is_uint32,
917  int* object_index_pointer,
918  int* dematerialized_index_pointer) {
919  if (op == LEnvironment::materialization_marker()) {
920  int object_index = (*object_index_pointer)++;
921  if (environment->ObjectIsDuplicateAt(object_index)) {
922  int dupe_of = environment->ObjectDuplicateOfAt(object_index);
923  translation->DuplicateObject(dupe_of);
924  return;
925  }
926  int object_length = environment->ObjectLengthAt(object_index);
927  if (environment->ObjectIsArgumentsAt(object_index)) {
928  translation->BeginArgumentsObject(object_length);
929  } else {
930  translation->BeginCapturedObject(object_length);
931  }
932  int dematerialized_index = *dematerialized_index_pointer;
933  int env_offset = environment->translation_size() + dematerialized_index;
934  *dematerialized_index_pointer += object_length;
935  for (int i = 0; i < object_length; ++i) {
936  LOperand* value = environment->values()->at(env_offset + i);
937  AddToTranslation(environment,
938  translation,
939  value,
940  environment->HasTaggedValueAt(env_offset + i),
941  environment->HasUint32ValueAt(env_offset + i),
942  object_index_pointer,
943  dematerialized_index_pointer);
944  }
945  return;
946  }
947 
948  if (op->IsStackSlot()) {
949  if (is_tagged) {
950  translation->StoreStackSlot(op->index());
951  } else if (is_uint32) {
952  translation->StoreUint32StackSlot(op->index());
953  } else {
954  translation->StoreInt32StackSlot(op->index());
955  }
956  } else if (op->IsDoubleStackSlot()) {
957  translation->StoreDoubleStackSlot(op->index());
958  } else if (op->IsRegister()) {
959  Register reg = ToRegister(op);
960  if (is_tagged) {
961  translation->StoreRegister(reg);
962  } else if (is_uint32) {
963  translation->StoreUint32Register(reg);
964  } else {
965  translation->StoreInt32Register(reg);
966  }
967  } else if (op->IsDoubleRegister()) {
968  X87Register reg = ToX87Register(op);
969  translation->StoreDoubleRegister(reg);
970  } else if (op->IsConstantOperand()) {
971  HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
972  int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
973  translation->StoreLiteral(src_index);
974  } else {
975  UNREACHABLE();
976  }
977 }
978 
979 
980 void LCodeGen::CallCodeGeneric(Handle<Code> code,
982  LInstruction* instr,
983  SafepointMode safepoint_mode) {
984  DCHECK(instr != NULL);
985  __ call(code, mode);
986  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
987 
988  // Signal that we don't inline smi code before these stubs in the
989  // optimizing code generator.
990  if (code->kind() == Code::BINARY_OP_IC ||
991  code->kind() == Code::COMPARE_IC) {
992  __ nop();
993  }
994 }
995 
996 
997 void LCodeGen::CallCode(Handle<Code> code,
999  LInstruction* instr) {
1001 }
1002 
1003 
1004 void LCodeGen::CallRuntime(const Runtime::Function* fun, int argc,
1005  LInstruction* instr, SaveFPRegsMode save_doubles) {
1006  DCHECK(instr != NULL);
1007  DCHECK(instr->HasPointerMap());
1008 
1009  __ CallRuntime(fun, argc, save_doubles);
1010 
1012 
1013  DCHECK(info()->is_calling());
1014 }
1015 
1016 
1017 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
1018  if (context->IsRegister()) {
1019  if (!ToRegister(context).is(esi)) {
1020  __ mov(esi, ToRegister(context));
1021  }
1022  } else if (context->IsStackSlot()) {
1023  __ mov(esi, ToOperand(context));
1024  } else if (context->IsConstantOperand()) {
1025  HConstant* constant =
1026  chunk_->LookupConstant(LConstantOperand::cast(context));
1027  __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
1028  } else {
1029  UNREACHABLE();
1030  }
1031 }
1032 
1034  int argc,
1035  LInstruction* instr,
1036  LOperand* context) {
1037  LoadContextFromDeferred(context);
1038 
1039  __ CallRuntimeSaveDoubles(id);
1041  instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
1042 
1043  DCHECK(info()->is_calling());
1044 }
1045 
1046 
1048  LEnvironment* environment, Safepoint::DeoptMode mode) {
1049  environment->set_has_been_used();
1050  if (!environment->HasBeenRegistered()) {
1051  // Physical stack frame layout:
1052  // -x ............. -4 0 ..................................... y
1053  // [incoming arguments] [spill slots] [pushed outgoing arguments]
1054 
1055  // Layout of the environment:
1056  // 0 ..................................................... size-1
1057  // [parameters] [locals] [expression stack including arguments]
1058 
1059  // Layout of the translation:
1060  // 0 ........................................................ size - 1 + 4
1061  // [expression stack including arguments] [locals] [4 words] [parameters]
1062  // |>------------ translation_size ------------<|
1063 
1064  int frame_count = 0;
1065  int jsframe_count = 0;
1066  for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
1067  ++frame_count;
1068  if (e->frame_type() == JS_FUNCTION) {
1069  ++jsframe_count;
1070  }
1071  }
1072  Translation translation(&translations_, frame_count, jsframe_count, zone());
1073  WriteTranslation(environment, &translation);
1074  int deoptimization_index = deoptimizations_.length();
1075  int pc_offset = masm()->pc_offset();
1076  environment->Register(deoptimization_index,
1077  translation.index(),
1078  (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
1079  deoptimizations_.Add(environment, zone());
1080  }
1081 }
1082 
1083 
1084 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
1085  const char* detail,
1086  Deoptimizer::BailoutType bailout_type) {
1087  LEnvironment* environment = instr->environment();
1088  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1089  DCHECK(environment->HasBeenRegistered());
1090  int id = environment->deoptimization_index();
1091  DCHECK(info()->IsOptimizing() || info()->IsStub());
1092  Address entry =
1093  Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1094  if (entry == NULL) {
1095  Abort(kBailoutWasNotPrepared);
1096  return;
1097  }
1098 
1099  if (DeoptEveryNTimes()) {
1100  ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1101  Label no_deopt;
1102  __ pushfd();
1103  __ push(eax);
1104  __ mov(eax, Operand::StaticVariable(count));
1105  __ sub(eax, Immediate(1));
1106  __ j(not_zero, &no_deopt, Label::kNear);
1107  if (FLAG_trap_on_deopt) __ int3();
1108  __ mov(eax, Immediate(FLAG_deopt_every_n_times));
1109  __ mov(Operand::StaticVariable(count), eax);
1110  __ pop(eax);
1111  __ popfd();
1113  // Put the x87 stack layout in TOS.
1114  if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
1115  __ push(Immediate(x87_stack_.GetLayout()));
1116  __ fild_s(MemOperand(esp, 0));
1117  // Don't touch eflags.
1118  __ lea(esp, Operand(esp, kPointerSize));
1119  __ call(entry, RelocInfo::RUNTIME_ENTRY);
1120  __ bind(&no_deopt);
1121  __ mov(Operand::StaticVariable(count), eax);
1122  __ pop(eax);
1123  __ popfd();
1124  }
1125 
1126  // Put the x87 stack layout in TOS, so that we can save x87 fp registers in
1127  // the correct location.
1128  {
1129  Label done;
1130  if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1131  if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
1132 
1133  int x87_stack_layout = x87_stack_.GetLayout();
1134  __ push(Immediate(x87_stack_layout));
1135  __ fild_s(MemOperand(esp, 0));
1136  // Don't touch eflags.
1137  __ lea(esp, Operand(esp, kPointerSize));
1138  __ bind(&done);
1139  }
1140 
1141  if (info()->ShouldTrapOnDeopt()) {
1142  Label done;
1143  if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1144  __ int3();
1145  __ bind(&done);
1146  }
1147 
1148  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
1149  instr->Mnemonic(), detail);
1150  DCHECK(info()->IsStub() || frame_is_built_);
1151  if (cc == no_condition && frame_is_built_) {
1152  DeoptComment(reason);
1153  __ call(entry, RelocInfo::RUNTIME_ENTRY);
1154  } else {
1155  Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
1156  !frame_is_built_);
1157  // We often have several deopts to the same entry, reuse the last
1158  // jump entry if this is the case.
1159  if (jump_table_.is_empty() ||
1160  !table_entry.IsEquivalentTo(jump_table_.last())) {
1161  jump_table_.Add(table_entry, zone());
1162  }
1163  if (cc == no_condition) {
1164  __ jmp(&jump_table_.last().label);
1165  } else {
1166  __ j(cc, &jump_table_.last().label);
1167  }
1168  }
1169 }
1170 
1171 
1172 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
1173  const char* detail) {
1174  Deoptimizer::BailoutType bailout_type = info()->IsStub()
1177  DeoptimizeIf(cc, instr, detail, bailout_type);
1178 }
1179 
1180 
1181 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
1182  int length = deoptimizations_.length();
1183  if (length == 0) return;
1184  Handle<DeoptimizationInputData> data =
1185  DeoptimizationInputData::New(isolate(), length, TENURED);
1186 
1187  Handle<ByteArray> translations =
1188  translations_.CreateByteArray(isolate()->factory());
1189  data->SetTranslationByteArray(*translations);
1190  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
1191  data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
1192  if (info_->IsOptimizing()) {
1193  // Reference to shared function info does not change between phases.
1194  AllowDeferredHandleDereference allow_handle_dereference;
1195  data->SetSharedFunctionInfo(*info_->shared_info());
1196  } else {
1197  data->SetSharedFunctionInfo(Smi::FromInt(0));
1198  }
1199 
1200  Handle<FixedArray> literals =
1201  factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
1202  { AllowDeferredHandleDereference copy_handles;
1203  for (int i = 0; i < deoptimization_literals_.length(); i++) {
1205  }
1206  data->SetLiteralArray(*literals);
1207  }
1208 
1209  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
1210  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
1211 
1212  // Populate the deoptimization entries.
1213  for (int i = 0; i < length; i++) {
1215  data->SetAstId(i, env->ast_id());
1216  data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
1217  data->SetArgumentsStackHeight(i,
1218  Smi::FromInt(env->arguments_stack_height()));
1219  data->SetPc(i, Smi::FromInt(env->pc_offset()));
1220  }
1221  code->set_deoptimization_data(*data);
1222 }
1223 
1224 
1225 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
1226  int result = deoptimization_literals_.length();
1227  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
1228  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
1229  }
1230  deoptimization_literals_.Add(literal, zone());
1231  return result;
1232 }
1233 
1234 
1236  DCHECK(deoptimization_literals_.length() == 0);
1237 
1238  const ZoneList<Handle<JSFunction> >* inlined_closures =
1239  chunk()->inlined_closures();
1240 
1241  for (int i = 0, length = inlined_closures->length();
1242  i < length;
1243  i++) {
1244  DefineDeoptimizationLiteral(inlined_closures->at(i));
1245  }
1246 
1248 }
1249 
1250 
1252  LInstruction* instr, SafepointMode safepoint_mode) {
1253  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
1254  RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
1255  } else {
1258  instr->pointer_map(), 0, Safepoint::kLazyDeopt);
1259  }
1260 }
1261 
1262 
1264  LPointerMap* pointers,
1265  Safepoint::Kind kind,
1266  int arguments,
1267  Safepoint::DeoptMode deopt_mode) {
1269  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1270  Safepoint safepoint =
1271  safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
1272  for (int i = 0; i < operands->length(); i++) {
1273  LOperand* pointer = operands->at(i);
1274  if (pointer->IsStackSlot()) {
1275  safepoint.DefinePointerSlot(pointer->index(), zone());
1276  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1277  safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1278  }
1279  }
1280 }
1281 
1282 
1283 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1284  Safepoint::DeoptMode mode) {
1285  RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
1286 }
1287 
1288 
1289 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
1290  LPointerMap empty_pointers(zone());
1291  RecordSafepoint(&empty_pointers, mode);
1292 }
1293 
1294 
1295 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1296  int arguments,
1297  Safepoint::DeoptMode mode) {
1298  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
1299 }
1300 
1301 
1302 void LCodeGen::RecordAndWritePosition(int position) {
1303  if (position == RelocInfo::kNoPosition) return;
1304  masm()->positions_recorder()->RecordPosition(position);
1305  masm()->positions_recorder()->WriteRecordedPositions();
1306 }
1307 
1308 
1309 static const char* LabelType(LLabel* label) {
1310  if (label->is_loop_header()) return " (loop header)";
1311  if (label->is_osr_entry()) return " (OSR entry)";
1312  return "";
1313 }
1314 
1315 
1316 void LCodeGen::DoLabel(LLabel* label) {
1317  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1318  current_instruction_,
1319  label->hydrogen_value()->id(),
1320  label->block_id(),
1321  LabelType(label));
1322  __ bind(label->label());
1323  current_block_ = label->block_id();
1324  if (label->block()->predecessors()->length() > 1) {
1325  // A join block's x87 stack is that of its last visited predecessor.
1326  // If the last visited predecessor block is unreachable, the stack state
1327  // will be wrong. In such case, use the x87 stack of reachable predecessor.
1328  X87StackMap::const_iterator it = x87_stack_map_.find(current_block_);
1329  // Restore x87 stack.
1330  if (it != x87_stack_map_.end()) {
1331  x87_stack_ = *(it->second);
1332  }
1333  }
1334  DoGap(label);
1335 }
1336 
1337 
1338 void LCodeGen::DoParallelMove(LParallelMove* move) {
1339  resolver_.Resolve(move);
1340 }
1341 
1342 
1343 void LCodeGen::DoGap(LGap* gap) {
1344  for (int i = LGap::FIRST_INNER_POSITION;
1346  i++) {
1347  LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1348  LParallelMove* move = gap->GetParallelMove(inner_pos);
1349  if (move != NULL) DoParallelMove(move);
1350  }
1351 }
1352 
1353 
1354 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1355  DoGap(instr);
1356 }
1357 
1358 
1359 void LCodeGen::DoParameter(LParameter* instr) {
1360  // Nothing to do.
1361 }
1362 
1363 
1364 void LCodeGen::DoCallStub(LCallStub* instr) {
1365  DCHECK(ToRegister(instr->context()).is(esi));
1366  DCHECK(ToRegister(instr->result()).is(eax));
1367  switch (instr->hydrogen()->major_key()) {
1368  case CodeStub::RegExpExec: {
1369  RegExpExecStub stub(isolate());
1370  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1371  break;
1372  }
1373  case CodeStub::SubString: {
1374  SubStringStub stub(isolate());
1375  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1376  break;
1377  }
1378  case CodeStub::StringCompare: {
1379  StringCompareStub stub(isolate());
1380  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1381  break;
1382  }
1383  default:
1384  UNREACHABLE();
1385  }
1386 }
1387 
1388 
1389 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1391 }
1392 
1393 
1394 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1395  Register dividend = ToRegister(instr->dividend());
1396  int32_t divisor = instr->divisor();
1397  DCHECK(dividend.is(ToRegister(instr->result())));
1398 
1399  // Theoretically, a variation of the branch-free code for integer division by
1400  // a power of 2 (calculating the remainder via an additional multiplication
1401  // (which gets simplified to an 'and') and subtraction) should be faster, and
1402  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1403  // indicate that positive dividends are heavily favored, so the branching
1404  // version performs better.
1405  HMod* hmod = instr->hydrogen();
1406  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1407  Label dividend_is_not_negative, done;
1408  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1409  __ test(dividend, dividend);
1410  __ j(not_sign, &dividend_is_not_negative, Label::kNear);
1411  // Note that this is correct even for kMinInt operands.
1412  __ neg(dividend);
1413  __ and_(dividend, mask);
1414  __ neg(dividend);
1415  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1416  DeoptimizeIf(zero, instr, "minus zero");
1417  }
1418  __ jmp(&done, Label::kNear);
1419  }
1420 
1421  __ bind(&dividend_is_not_negative);
1422  __ and_(dividend, mask);
1423  __ bind(&done);
1424 }
1425 
1426 
1427 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1428  Register dividend = ToRegister(instr->dividend());
1429  int32_t divisor = instr->divisor();
1430  DCHECK(ToRegister(instr->result()).is(eax));
1431 
1432  if (divisor == 0) {
1433  DeoptimizeIf(no_condition, instr, "division by zero");
1434  return;
1435  }
1436 
1437  __ TruncatingDiv(dividend, Abs(divisor));
1438  __ imul(edx, edx, Abs(divisor));
1439  __ mov(eax, dividend);
1440  __ sub(eax, edx);
1441 
1442  // Check for negative zero.
1443  HMod* hmod = instr->hydrogen();
1444  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1445  Label remainder_not_zero;
1446  __ j(not_zero, &remainder_not_zero, Label::kNear);
1447  __ cmp(dividend, Immediate(0));
1448  DeoptimizeIf(less, instr, "minus zero");
1449  __ bind(&remainder_not_zero);
1450  }
1451 }
1452 
1453 
1454 void LCodeGen::DoModI(LModI* instr) {
1455  HMod* hmod = instr->hydrogen();
1456 
1457  Register left_reg = ToRegister(instr->left());
1458  DCHECK(left_reg.is(eax));
1459  Register right_reg = ToRegister(instr->right());
1460  DCHECK(!right_reg.is(eax));
1461  DCHECK(!right_reg.is(edx));
1462  Register result_reg = ToRegister(instr->result());
1463  DCHECK(result_reg.is(edx));
1464 
1465  Label done;
1466  // Check for x % 0, idiv would signal a divide error. We have to
1467  // deopt in this case because we can't return a NaN.
1468  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1469  __ test(right_reg, Operand(right_reg));
1470  DeoptimizeIf(zero, instr, "division by zero");
1471  }
1472 
1473  // Check for kMinInt % -1, idiv would signal a divide error. We
1474  // have to deopt if we care about -0, because we can't return that.
1475  if (hmod->CheckFlag(HValue::kCanOverflow)) {
1476  Label no_overflow_possible;
1477  __ cmp(left_reg, kMinInt);
1478  __ j(not_equal, &no_overflow_possible, Label::kNear);
1479  __ cmp(right_reg, -1);
1480  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1481  DeoptimizeIf(equal, instr, "minus zero");
1482  } else {
1483  __ j(not_equal, &no_overflow_possible, Label::kNear);
1484  __ Move(result_reg, Immediate(0));
1485  __ jmp(&done, Label::kNear);
1486  }
1487  __ bind(&no_overflow_possible);
1488  }
1489 
1490  // Sign extend dividend in eax into edx:eax.
1491  __ cdq();
1492 
1493  // If we care about -0, test if the dividend is <0 and the result is 0.
1494  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1495  Label positive_left;
1496  __ test(left_reg, Operand(left_reg));
1497  __ j(not_sign, &positive_left, Label::kNear);
1498  __ idiv(right_reg);
1499  __ test(result_reg, Operand(result_reg));
1500  DeoptimizeIf(zero, instr, "minus zero");
1501  __ jmp(&done, Label::kNear);
1502  __ bind(&positive_left);
1503  }
1504  __ idiv(right_reg);
1505  __ bind(&done);
1506 }
1507 
1508 
1509 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1510  Register dividend = ToRegister(instr->dividend());
1511  int32_t divisor = instr->divisor();
1512  Register result = ToRegister(instr->result());
1513  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1514  DCHECK(!result.is(dividend));
1515 
1516  // Check for (0 / -x) that will produce negative zero.
1517  HDiv* hdiv = instr->hydrogen();
1518  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1519  __ test(dividend, dividend);
1520  DeoptimizeIf(zero, instr, "minus zero");
1521  }
1522  // Check for (kMinInt / -1).
1523  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1524  __ cmp(dividend, kMinInt);
1525  DeoptimizeIf(zero, instr, "overflow");
1526  }
1527  // Deoptimize if remainder will not be 0.
1528  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1529  divisor != 1 && divisor != -1) {
1530  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1531  __ test(dividend, Immediate(mask));
1532  DeoptimizeIf(not_zero, instr, "lost precision");
1533  }
1534  __ Move(result, dividend);
1535  int32_t shift = WhichPowerOf2Abs(divisor);
1536  if (shift > 0) {
1537  // The arithmetic shift is always OK, the 'if' is an optimization only.
1538  if (shift > 1) __ sar(result, 31);
1539  __ shr(result, 32 - shift);
1540  __ add(result, dividend);
1541  __ sar(result, shift);
1542  }
1543  if (divisor < 0) __ neg(result);
1544 }
1545 
1546 
1547 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1548  Register dividend = ToRegister(instr->dividend());
1549  int32_t divisor = instr->divisor();
1550  DCHECK(ToRegister(instr->result()).is(edx));
1551 
1552  if (divisor == 0) {
1553  DeoptimizeIf(no_condition, instr, "division by zero");
1554  return;
1555  }
1556 
1557  // Check for (0 / -x) that will produce negative zero.
1558  HDiv* hdiv = instr->hydrogen();
1559  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1560  __ test(dividend, dividend);
1561  DeoptimizeIf(zero, instr, "minus zero");
1562  }
1563 
1564  __ TruncatingDiv(dividend, Abs(divisor));
1565  if (divisor < 0) __ neg(edx);
1566 
1567  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1568  __ mov(eax, edx);
1569  __ imul(eax, eax, divisor);
1570  __ sub(eax, dividend);
1571  DeoptimizeIf(not_equal, instr, "lost precision");
1572  }
1573 }
1574 
1575 
1576 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1577 void LCodeGen::DoDivI(LDivI* instr) {
1578  HBinaryOperation* hdiv = instr->hydrogen();
1579  Register dividend = ToRegister(instr->dividend());
1580  Register divisor = ToRegister(instr->divisor());
1581  Register remainder = ToRegister(instr->temp());
1582  DCHECK(dividend.is(eax));
1583  DCHECK(remainder.is(edx));
1584  DCHECK(ToRegister(instr->result()).is(eax));
1585  DCHECK(!divisor.is(eax));
1586  DCHECK(!divisor.is(edx));
1587 
1588  // Check for x / 0.
1589  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1590  __ test(divisor, divisor);
1591  DeoptimizeIf(zero, instr, "division by zero");
1592  }
1593 
1594  // Check for (0 / -x) that will produce negative zero.
1595  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1596  Label dividend_not_zero;
1597  __ test(dividend, dividend);
1598  __ j(not_zero, &dividend_not_zero, Label::kNear);
1599  __ test(divisor, divisor);
1600  DeoptimizeIf(sign, instr, "minus zero");
1601  __ bind(&dividend_not_zero);
1602  }
1603 
1604  // Check for (kMinInt / -1).
1605  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1606  Label dividend_not_min_int;
1607  __ cmp(dividend, kMinInt);
1608  __ j(not_zero, &dividend_not_min_int, Label::kNear);
1609  __ cmp(divisor, -1);
1610  DeoptimizeIf(zero, instr, "overflow");
1611  __ bind(&dividend_not_min_int);
1612  }
1613 
1614  // Sign extend to edx (= remainder).
1615  __ cdq();
1616  __ idiv(divisor);
1617 
1618  if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1619  // Deoptimize if remainder is not 0.
1620  __ test(remainder, remainder);
1621  DeoptimizeIf(not_zero, instr, "lost precision");
1622  }
1623 }
1624 
1625 
1626 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1627  Register dividend = ToRegister(instr->dividend());
1628  int32_t divisor = instr->divisor();
1629  DCHECK(dividend.is(ToRegister(instr->result())));
1630 
1631  // If the divisor is positive, things are easy: There can be no deopts and we
1632  // can simply do an arithmetic right shift.
1633  if (divisor == 1) return;
1634  int32_t shift = WhichPowerOf2Abs(divisor);
1635  if (divisor > 1) {
1636  __ sar(dividend, shift);
1637  return;
1638  }
1639 
1640  // If the divisor is negative, we have to negate and handle edge cases.
1641  __ neg(dividend);
1642  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1643  DeoptimizeIf(zero, instr, "minus zero");
1644  }
1645 
1646  // Dividing by -1 is basically negation, unless we overflow.
1647  if (divisor == -1) {
1648  if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1649  DeoptimizeIf(overflow, instr, "overflow");
1650  }
1651  return;
1652  }
1653 
1654  // If the negation could not overflow, simply shifting is OK.
1655  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1656  __ sar(dividend, shift);
1657  return;
1658  }
1659 
1660  Label not_kmin_int, done;
1661  __ j(no_overflow, &not_kmin_int, Label::kNear);
1662  __ mov(dividend, Immediate(kMinInt / divisor));
1663  __ jmp(&done, Label::kNear);
1664  __ bind(&not_kmin_int);
1665  __ sar(dividend, shift);
1666  __ bind(&done);
1667 }
1668 
1669 
1670 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1671  Register dividend = ToRegister(instr->dividend());
1672  int32_t divisor = instr->divisor();
1673  DCHECK(ToRegister(instr->result()).is(edx));
1674 
1675  if (divisor == 0) {
1676  DeoptimizeIf(no_condition, instr, "division by zero");
1677  return;
1678  }
1679 
1680  // Check for (0 / -x) that will produce negative zero.
1681  HMathFloorOfDiv* hdiv = instr->hydrogen();
1682  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1683  __ test(dividend, dividend);
1684  DeoptimizeIf(zero, instr, "minus zero");
1685  }
1686 
1687  // Easy case: We need no dynamic check for the dividend and the flooring
1688  // division is the same as the truncating division.
1689  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1690  (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1691  __ TruncatingDiv(dividend, Abs(divisor));
1692  if (divisor < 0) __ neg(edx);
1693  return;
1694  }
1695 
1696  // In the general case we may need to adjust before and after the truncating
1697  // division to get a flooring division.
1698  Register temp = ToRegister(instr->temp3());
1699  DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
1700  Label needs_adjustment, done;
1701  __ cmp(dividend, Immediate(0));
1702  __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1703  __ TruncatingDiv(dividend, Abs(divisor));
1704  if (divisor < 0) __ neg(edx);
1705  __ jmp(&done, Label::kNear);
1706  __ bind(&needs_adjustment);
1707  __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1708  __ TruncatingDiv(temp, Abs(divisor));
1709  if (divisor < 0) __ neg(edx);
1710  __ dec(edx);
1711  __ bind(&done);
1712 }
1713 
1714 
1715 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1716 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1717  HBinaryOperation* hdiv = instr->hydrogen();
1718  Register dividend = ToRegister(instr->dividend());
1719  Register divisor = ToRegister(instr->divisor());
1720  Register remainder = ToRegister(instr->temp());
1721  Register result = ToRegister(instr->result());
1722  DCHECK(dividend.is(eax));
1723  DCHECK(remainder.is(edx));
1724  DCHECK(result.is(eax));
1725  DCHECK(!divisor.is(eax));
1726  DCHECK(!divisor.is(edx));
1727 
1728  // Check for x / 0.
1729  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1730  __ test(divisor, divisor);
1731  DeoptimizeIf(zero, instr, "division by zero");
1732  }
1733 
1734  // Check for (0 / -x) that will produce negative zero.
1735  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1736  Label dividend_not_zero;
1737  __ test(dividend, dividend);
1738  __ j(not_zero, &dividend_not_zero, Label::kNear);
1739  __ test(divisor, divisor);
1740  DeoptimizeIf(sign, instr, "minus zero");
1741  __ bind(&dividend_not_zero);
1742  }
1743 
1744  // Check for (kMinInt / -1).
1745  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1746  Label dividend_not_min_int;
1747  __ cmp(dividend, kMinInt);
1748  __ j(not_zero, &dividend_not_min_int, Label::kNear);
1749  __ cmp(divisor, -1);
1750  DeoptimizeIf(zero, instr, "overflow");
1751  __ bind(&dividend_not_min_int);
1752  }
1753 
1754  // Sign extend to edx (= remainder).
1755  __ cdq();
1756  __ idiv(divisor);
1757 
1758  Label done;
1759  __ test(remainder, remainder);
1760  __ j(zero, &done, Label::kNear);
1761  __ xor_(remainder, divisor);
1762  __ sar(remainder, 31);
1763  __ add(result, remainder);
1764  __ bind(&done);
1765 }
1766 
1767 
1768 void LCodeGen::DoMulI(LMulI* instr) {
1769  Register left = ToRegister(instr->left());
1770  LOperand* right = instr->right();
1771 
1772  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1773  __ mov(ToRegister(instr->temp()), left);
1774  }
1775 
1776  if (right->IsConstantOperand()) {
1777  // Try strength reductions on the multiplication.
1778  // All replacement instructions are at most as long as the imul
1779  // and have better latency.
1780  int constant = ToInteger32(LConstantOperand::cast(right));
1781  if (constant == -1) {
1782  __ neg(left);
1783  } else if (constant == 0) {
1784  __ xor_(left, Operand(left));
1785  } else if (constant == 2) {
1786  __ add(left, Operand(left));
1787  } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1788  // If we know that the multiplication can't overflow, it's safe to
1789  // use instructions that don't set the overflow flag for the
1790  // multiplication.
1791  switch (constant) {
1792  case 1:
1793  // Do nothing.
1794  break;
1795  case 3:
1796  __ lea(left, Operand(left, left, times_2, 0));
1797  break;
1798  case 4:
1799  __ shl(left, 2);
1800  break;
1801  case 5:
1802  __ lea(left, Operand(left, left, times_4, 0));
1803  break;
1804  case 8:
1805  __ shl(left, 3);
1806  break;
1807  case 9:
1808  __ lea(left, Operand(left, left, times_8, 0));
1809  break;
1810  case 16:
1811  __ shl(left, 4);
1812  break;
1813  default:
1814  __ imul(left, left, constant);
1815  break;
1816  }
1817  } else {
1818  __ imul(left, left, constant);
1819  }
1820  } else {
1821  if (instr->hydrogen()->representation().IsSmi()) {
1822  __ SmiUntag(left);
1823  }
1824  __ imul(left, ToOperand(right));
1825  }
1826 
1827  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1828  DeoptimizeIf(overflow, instr, "overflow");
1829  }
1830 
1831  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1832  // Bail out if the result is supposed to be negative zero.
1833  Label done;
1834  __ test(left, Operand(left));
1835  __ j(not_zero, &done);
1836  if (right->IsConstantOperand()) {
1837  if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1838  DeoptimizeIf(no_condition, instr, "minus zero");
1839  } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1840  __ cmp(ToRegister(instr->temp()), Immediate(0));
1841  DeoptimizeIf(less, instr, "minus zero");
1842  }
1843  } else {
1844  // Test the non-zero operand for negative sign.
1845  __ or_(ToRegister(instr->temp()), ToOperand(right));
1846  DeoptimizeIf(sign, instr, "minus zero");
1847  }
1848  __ bind(&done);
1849  }
1850 }
1851 
1852 
1853 void LCodeGen::DoBitI(LBitI* instr) {
1854  LOperand* left = instr->left();
1855  LOperand* right = instr->right();
1856  DCHECK(left->Equals(instr->result()));
1857  DCHECK(left->IsRegister());
1858 
1859  if (right->IsConstantOperand()) {
1860  int32_t right_operand =
1861  ToRepresentation(LConstantOperand::cast(right),
1862  instr->hydrogen()->representation());
1863  switch (instr->op()) {
1864  case Token::BIT_AND:
1865  __ and_(ToRegister(left), right_operand);
1866  break;
1867  case Token::BIT_OR:
1868  __ or_(ToRegister(left), right_operand);
1869  break;
1870  case Token::BIT_XOR:
1871  if (right_operand == int32_t(~0)) {
1872  __ not_(ToRegister(left));
1873  } else {
1874  __ xor_(ToRegister(left), right_operand);
1875  }
1876  break;
1877  default:
1878  UNREACHABLE();
1879  break;
1880  }
1881  } else {
1882  switch (instr->op()) {
1883  case Token::BIT_AND:
1884  __ and_(ToRegister(left), ToOperand(right));
1885  break;
1886  case Token::BIT_OR:
1887  __ or_(ToRegister(left), ToOperand(right));
1888  break;
1889  case Token::BIT_XOR:
1890  __ xor_(ToRegister(left), ToOperand(right));
1891  break;
1892  default:
1893  UNREACHABLE();
1894  break;
1895  }
1896  }
1897 }
1898 
1899 
1900 void LCodeGen::DoShiftI(LShiftI* instr) {
1901  LOperand* left = instr->left();
1902  LOperand* right = instr->right();
1903  DCHECK(left->Equals(instr->result()));
1904  DCHECK(left->IsRegister());
1905  if (right->IsRegister()) {
1906  DCHECK(ToRegister(right).is(ecx));
1907 
1908  switch (instr->op()) {
1909  case Token::ROR:
1910  __ ror_cl(ToRegister(left));
1911  break;
1912  case Token::SAR:
1913  __ sar_cl(ToRegister(left));
1914  break;
1915  case Token::SHR:
1916  __ shr_cl(ToRegister(left));
1917  if (instr->can_deopt()) {
1918  __ test(ToRegister(left), ToRegister(left));
1919  DeoptimizeIf(sign, instr, "negative value");
1920  }
1921  break;
1922  case Token::SHL:
1923  __ shl_cl(ToRegister(left));
1924  break;
1925  default:
1926  UNREACHABLE();
1927  break;
1928  }
1929  } else {
1930  int value = ToInteger32(LConstantOperand::cast(right));
1931  uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1932  switch (instr->op()) {
1933  case Token::ROR:
1934  if (shift_count == 0 && instr->can_deopt()) {
1935  __ test(ToRegister(left), ToRegister(left));
1936  DeoptimizeIf(sign, instr, "negative value");
1937  } else {
1938  __ ror(ToRegister(left), shift_count);
1939  }
1940  break;
1941  case Token::SAR:
1942  if (shift_count != 0) {
1943  __ sar(ToRegister(left), shift_count);
1944  }
1945  break;
1946  case Token::SHR:
1947  if (shift_count != 0) {
1948  __ shr(ToRegister(left), shift_count);
1949  } else if (instr->can_deopt()) {
1950  __ test(ToRegister(left), ToRegister(left));
1951  DeoptimizeIf(sign, instr, "negative value");
1952  }
1953  break;
1954  case Token::SHL:
1955  if (shift_count != 0) {
1956  if (instr->hydrogen_value()->representation().IsSmi() &&
1957  instr->can_deopt()) {
1958  if (shift_count != 1) {
1959  __ shl(ToRegister(left), shift_count - 1);
1960  }
1961  __ SmiTag(ToRegister(left));
1962  DeoptimizeIf(overflow, instr, "overflow");
1963  } else {
1964  __ shl(ToRegister(left), shift_count);
1965  }
1966  }
1967  break;
1968  default:
1969  UNREACHABLE();
1970  break;
1971  }
1972  }
1973 }
1974 
1975 
1976 void LCodeGen::DoSubI(LSubI* instr) {
1977  LOperand* left = instr->left();
1978  LOperand* right = instr->right();
1979  DCHECK(left->Equals(instr->result()));
1980 
1981  if (right->IsConstantOperand()) {
1982  __ sub(ToOperand(left),
1983  ToImmediate(right, instr->hydrogen()->representation()));
1984  } else {
1985  __ sub(ToRegister(left), ToOperand(right));
1986  }
1987  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1988  DeoptimizeIf(overflow, instr, "overflow");
1989  }
1990 }
1991 
1992 
1993 void LCodeGen::DoConstantI(LConstantI* instr) {
1994  __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1995 }
1996 
1997 
1998 void LCodeGen::DoConstantS(LConstantS* instr) {
1999  __ Move(ToRegister(instr->result()), Immediate(instr->value()));
2000 }
2001 
2002 
2003 void LCodeGen::DoConstantD(LConstantD* instr) {
2004  double v = instr->value();
2005  uint64_t int_val = bit_cast<uint64_t, double>(v);
2006  int32_t lower = static_cast<int32_t>(int_val);
2007  int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
2008  DCHECK(instr->result()->IsDoubleRegister());
2009 
2010  __ push(Immediate(upper));
2011  __ push(Immediate(lower));
2012  X87Register reg = ToX87Register(instr->result());
2013  X87Mov(reg, Operand(esp, 0));
2014  __ add(Operand(esp), Immediate(kDoubleSize));
2015 }
2016 
2017 
2018 void LCodeGen::DoConstantE(LConstantE* instr) {
2019  __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
2020 }
2021 
2022 
2023 void LCodeGen::DoConstantT(LConstantT* instr) {
2024  Register reg = ToRegister(instr->result());
2025  Handle<Object> object = instr->value(isolate());
2027  __ LoadObject(reg, object);
2028 }
2029 
2030 
2031 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
2032  Register result = ToRegister(instr->result());
2033  Register map = ToRegister(instr->value());
2034  __ EnumLength(result, map);
2035 }
2036 
2037 
2038 void LCodeGen::DoDateField(LDateField* instr) {
2039  Register object = ToRegister(instr->date());
2040  Register result = ToRegister(instr->result());
2041  Register scratch = ToRegister(instr->temp());
2042  Smi* index = instr->index();
2043  Label runtime, done;
2044  DCHECK(object.is(result));
2045  DCHECK(object.is(eax));
2046 
2047  __ test(object, Immediate(kSmiTagMask));
2048  DeoptimizeIf(zero, instr, "Smi");
2049  __ CmpObjectType(object, JS_DATE_TYPE, scratch);
2050  DeoptimizeIf(not_equal, instr, "not a date object");
2051 
2052  if (index->value() == 0) {
2053  __ mov(result, FieldOperand(object, JSDate::kValueOffset));
2054  } else {
2055  if (index->value() < JSDate::kFirstUncachedField) {
2056  ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2057  __ mov(scratch, Operand::StaticVariable(stamp));
2058  __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
2059  __ j(not_equal, &runtime, Label::kNear);
2060  __ mov(result, FieldOperand(object, JSDate::kValueOffset +
2061  kPointerSize * index->value()));
2062  __ jmp(&done, Label::kNear);
2063  }
2064  __ bind(&runtime);
2065  __ PrepareCallCFunction(2, scratch);
2066  __ mov(Operand(esp, 0), object);
2067  __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
2068  __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2069  __ bind(&done);
2070  }
2071 }
2072 
2073 
2074 Operand LCodeGen::BuildSeqStringOperand(Register string,
2075  LOperand* index,
2076  String::Encoding encoding) {
2077  if (index->IsConstantOperand()) {
2078  int offset = ToRepresentation(LConstantOperand::cast(index),
2080  if (encoding == String::TWO_BYTE_ENCODING) {
2081  offset *= kUC16Size;
2082  }
2083  STATIC_ASSERT(kCharSize == 1);
2084  return FieldOperand(string, SeqString::kHeaderSize + offset);
2085  }
2086  return FieldOperand(
2087  string, ToRegister(index),
2088  encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
2090 }
2091 
2092 
2093 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
2094  String::Encoding encoding = instr->hydrogen()->encoding();
2095  Register result = ToRegister(instr->result());
2096  Register string = ToRegister(instr->string());
2097 
2098  if (FLAG_debug_code) {
2099  __ push(string);
2100  __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
2101  __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
2102 
2103  __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
2104  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2105  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2106  __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
2107  ? one_byte_seq_type : two_byte_seq_type));
2108  __ Check(equal, kUnexpectedStringType);
2109  __ pop(string);
2110  }
2111 
2112  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2113  if (encoding == String::ONE_BYTE_ENCODING) {
2114  __ movzx_b(result, operand);
2115  } else {
2116  __ movzx_w(result, operand);
2117  }
2118 }
2119 
2120 
2121 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2122  String::Encoding encoding = instr->hydrogen()->encoding();
2123  Register string = ToRegister(instr->string());
2124 
2125  if (FLAG_debug_code) {
2126  Register value = ToRegister(instr->value());
2127  Register index = ToRegister(instr->index());
2128  static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2129  static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2130  int encoding_mask =
2131  instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
2132  ? one_byte_seq_type : two_byte_seq_type;
2133  __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
2134  }
2135 
2136  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2137  if (instr->value()->IsConstantOperand()) {
2138  int value = ToRepresentation(LConstantOperand::cast(instr->value()),
2140  DCHECK_LE(0, value);
2141  if (encoding == String::ONE_BYTE_ENCODING) {
2143  __ mov_b(operand, static_cast<int8_t>(value));
2144  } else {
2146  __ mov_w(operand, static_cast<int16_t>(value));
2147  }
2148  } else {
2149  Register value = ToRegister(instr->value());
2150  if (encoding == String::ONE_BYTE_ENCODING) {
2151  __ mov_b(operand, value);
2152  } else {
2153  __ mov_w(operand, value);
2154  }
2155  }
2156 }
2157 
2158 
2159 void LCodeGen::DoAddI(LAddI* instr) {
2160  LOperand* left = instr->left();
2161  LOperand* right = instr->right();
2162 
2163  if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
2164  if (right->IsConstantOperand()) {
2165  int32_t offset = ToRepresentation(LConstantOperand::cast(right),
2166  instr->hydrogen()->representation());
2167  __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
2168  } else {
2169  Operand address(ToRegister(left), ToRegister(right), times_1, 0);
2170  __ lea(ToRegister(instr->result()), address);
2171  }
2172  } else {
2173  if (right->IsConstantOperand()) {
2174  __ add(ToOperand(left),
2175  ToImmediate(right, instr->hydrogen()->representation()));
2176  } else {
2177  __ add(ToRegister(left), ToOperand(right));
2178  }
2179  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
2180  DeoptimizeIf(overflow, instr, "overflow");
2181  }
2182  }
2183 }
2184 
2185 
2186 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2187  LOperand* left = instr->left();
2188  LOperand* right = instr->right();
2189  DCHECK(left->Equals(instr->result()));
2190  HMathMinMax::Operation operation = instr->hydrogen()->operation();
2191  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2192  Label return_left;
2193  Condition condition = (operation == HMathMinMax::kMathMin)
2194  ? less_equal
2195  : greater_equal;
2196  if (right->IsConstantOperand()) {
2197  Operand left_op = ToOperand(left);
2198  Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
2199  instr->hydrogen()->representation());
2200  __ cmp(left_op, immediate);
2201  __ j(condition, &return_left, Label::kNear);
2202  __ mov(left_op, immediate);
2203  } else {
2204  Register left_reg = ToRegister(left);
2205  Operand right_op = ToOperand(right);
2206  __ cmp(left_reg, right_op);
2207  __ j(condition, &return_left, Label::kNear);
2208  __ mov(left_reg, right_op);
2209  }
2210  __ bind(&return_left);
2211  } else {
2212  DCHECK(instr->hydrogen()->representation().IsDouble());
2213  Label check_nan_left, check_zero, return_left, return_right;
2214  Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
2215  X87Register left_reg = ToX87Register(left);
2216  X87Register right_reg = ToX87Register(right);
2217 
2218  X87PrepareBinaryOp(left_reg, right_reg, ToX87Register(instr->result()));
2219  __ fld(1);
2220  __ fld(1);
2221  __ FCmp();
2222  __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
2223  __ j(equal, &check_zero, Label::kNear); // left == right.
2224  __ j(condition, &return_left, Label::kNear);
2225  __ jmp(&return_right, Label::kNear);
2226 
2227  __ bind(&check_zero);
2228  __ fld(0);
2229  __ fldz();
2230  __ FCmp();
2231  __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
2232  // At this point, both left and right are either 0 or -0.
2233  if (operation == HMathMinMax::kMathMin) {
2234  // Push st0 and st1 to stack, then pop them to temp registers and OR them,
2235  // load it to left.
2236  Register scratch_reg = ToRegister(instr->temp());
2237  __ fld(1);
2238  __ fld(1);
2239  __ sub(esp, Immediate(2 * kPointerSize));
2240  __ fstp_s(MemOperand(esp, 0));
2241  __ fstp_s(MemOperand(esp, kPointerSize));
2242  __ pop(scratch_reg);
2243  __ xor_(MemOperand(esp, 0), scratch_reg);
2244  X87Mov(left_reg, MemOperand(esp, 0), kX87FloatOperand);
2245  __ pop(scratch_reg); // restore esp
2246  } else {
2247  // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
2248  X87Fxch(left_reg);
2249  __ fadd(1);
2250  }
2251  __ jmp(&return_left, Label::kNear);
2252 
2253  __ bind(&check_nan_left);
2254  __ fld(0);
2255  __ fld(0);
2256  __ FCmp(); // NaN check.
2257  __ j(parity_even, &return_left, Label::kNear); // left == NaN.
2258 
2259  __ bind(&return_right);
2260  X87Fxch(left_reg);
2261  X87Mov(left_reg, right_reg);
2262 
2263  __ bind(&return_left);
2264  }
2265 }
2266 
2267 
2268 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2269  X87Register left = ToX87Register(instr->left());
2270  X87Register right = ToX87Register(instr->right());
2271  X87Register result = ToX87Register(instr->result());
2272  if (instr->op() != Token::MOD) {
2273  X87PrepareBinaryOp(left, right, result);
2274  }
2275  switch (instr->op()) {
2276  case Token::ADD:
2277  __ fadd_i(1);
2278  break;
2279  case Token::SUB:
2280  __ fsub_i(1);
2281  break;
2282  case Token::MUL:
2283  __ fmul_i(1);
2284  break;
2285  case Token::DIV:
2286  __ fdiv_i(1);
2287  break;
2288  case Token::MOD: {
2289  // Pass two doubles as arguments on the stack.
2290  __ PrepareCallCFunction(4, eax);
2291  X87Mov(Operand(esp, 1 * kDoubleSize), right);
2292  X87Mov(Operand(esp, 0), left);
2293  X87Free(right);
2294  DCHECK(left.is(result));
2295  X87PrepareToWrite(result);
2296  __ CallCFunction(
2297  ExternalReference::mod_two_doubles_operation(isolate()),
2298  4);
2299 
2300  // Return value is in st(0) on ia32.
2301  X87CommitWrite(result);
2302  break;
2303  }
2304  default:
2305  UNREACHABLE();
2306  break;
2307  }
2308 
2309  // Only always explicitly storing to memory to force the round-down for double
2310  // arithmetic.
2311  __ lea(esp, Operand(esp, -kDoubleSize));
2312  __ fstp_d(Operand(esp, 0));
2313  __ fld_d(Operand(esp, 0));
2314  __ lea(esp, Operand(esp, kDoubleSize));
2315 }
2316 
2317 
2318 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2319  DCHECK(ToRegister(instr->context()).is(esi));
2320  DCHECK(ToRegister(instr->left()).is(edx));
2321  DCHECK(ToRegister(instr->right()).is(eax));
2322  DCHECK(ToRegister(instr->result()).is(eax));
2323 
2324  Handle<Code> code =
2325  CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
2326  CallCode(code, RelocInfo::CODE_TARGET, instr);
2327 }
2328 
2329 
2330 template<class InstrType>
2331 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2332  int left_block = instr->TrueDestination(chunk_);
2333  int right_block = instr->FalseDestination(chunk_);
2334 
2335  int next_block = GetNextEmittedBlock();
2336 
2337  if (right_block == left_block || cc == no_condition) {
2338  EmitGoto(left_block);
2339  } else if (left_block == next_block) {
2340  __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2341  } else if (right_block == next_block) {
2342  __ j(cc, chunk_->GetAssemblyLabel(left_block));
2343  } else {
2344  __ j(cc, chunk_->GetAssemblyLabel(left_block));
2345  __ jmp(chunk_->GetAssemblyLabel(right_block));
2346  }
2347 }
2348 
2349 
2350 template<class InstrType>
2351 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2352  int false_block = instr->FalseDestination(chunk_);
2353  if (cc == no_condition) {
2354  __ jmp(chunk_->GetAssemblyLabel(false_block));
2355  } else {
2356  __ j(cc, chunk_->GetAssemblyLabel(false_block));
2357  }
2358 }
2359 
2360 
2361 void LCodeGen::DoBranch(LBranch* instr) {
2362  Representation r = instr->hydrogen()->value()->representation();
2363  if (r.IsSmiOrInteger32()) {
2364  Register reg = ToRegister(instr->value());
2365  __ test(reg, Operand(reg));
2366  EmitBranch(instr, not_zero);
2367  } else if (r.IsDouble()) {
2368  X87Register reg = ToX87Register(instr->value());
2369  X87LoadForUsage(reg);
2370  __ fldz();
2371  __ FCmp();
2372  EmitBranch(instr, not_zero);
2373  } else {
2374  DCHECK(r.IsTagged());
2375  Register reg = ToRegister(instr->value());
2376  HType type = instr->hydrogen()->value()->type();
2377  if (type.IsBoolean()) {
2378  DCHECK(!info()->IsStub());
2379  __ cmp(reg, factory()->true_value());
2380  EmitBranch(instr, equal);
2381  } else if (type.IsSmi()) {
2382  DCHECK(!info()->IsStub());
2383  __ test(reg, Operand(reg));
2384  EmitBranch(instr, not_equal);
2385  } else if (type.IsJSArray()) {
2386  DCHECK(!info()->IsStub());
2387  EmitBranch(instr, no_condition);
2388  } else if (type.IsHeapNumber()) {
2389  UNREACHABLE();
2390  } else if (type.IsString()) {
2391  DCHECK(!info()->IsStub());
2392  __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2393  EmitBranch(instr, not_equal);
2394  } else {
2395  ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2396  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2397 
2398  if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2399  // undefined -> false.
2400  __ cmp(reg, factory()->undefined_value());
2401  __ j(equal, instr->FalseLabel(chunk_));
2402  }
2403  if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2404  // true -> true.
2405  __ cmp(reg, factory()->true_value());
2406  __ j(equal, instr->TrueLabel(chunk_));
2407  // false -> false.
2408  __ cmp(reg, factory()->false_value());
2409  __ j(equal, instr->FalseLabel(chunk_));
2410  }
2411  if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2412  // 'null' -> false.
2413  __ cmp(reg, factory()->null_value());
2414  __ j(equal, instr->FalseLabel(chunk_));
2415  }
2416 
2417  if (expected.Contains(ToBooleanStub::SMI)) {
2418  // Smis: 0 -> false, all other -> true.
2419  __ test(reg, Operand(reg));
2420  __ j(equal, instr->FalseLabel(chunk_));
2421  __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2422  } else if (expected.NeedsMap()) {
2423  // If we need a map later and have a Smi -> deopt.
2424  __ test(reg, Immediate(kSmiTagMask));
2425  DeoptimizeIf(zero, instr, "Smi");
2426  }
2427 
2428  Register map = no_reg; // Keep the compiler happy.
2429  if (expected.NeedsMap()) {
2430  map = ToRegister(instr->temp());
2431  DCHECK(!map.is(reg));
2433 
2434  if (expected.CanBeUndetectable()) {
2435  // Undetectable -> false.
2437  1 << Map::kIsUndetectable);
2438  __ j(not_zero, instr->FalseLabel(chunk_));
2439  }
2440  }
2441 
2442  if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2443  // spec object -> true.
2444  __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2445  __ j(above_equal, instr->TrueLabel(chunk_));
2446  }
2447 
2448  if (expected.Contains(ToBooleanStub::STRING)) {
2449  // String value -> false iff empty.
2450  Label not_string;
2451  __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2452  __ j(above_equal, &not_string, Label::kNear);
2453  __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2454  __ j(not_zero, instr->TrueLabel(chunk_));
2455  __ jmp(instr->FalseLabel(chunk_));
2456  __ bind(&not_string);
2457  }
2458 
2459  if (expected.Contains(ToBooleanStub::SYMBOL)) {
2460  // Symbol value -> true.
2461  __ CmpInstanceType(map, SYMBOL_TYPE);
2462  __ j(equal, instr->TrueLabel(chunk_));
2463  }
2464 
2465  if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2466  // heap number -> false iff +0, -0, or NaN.
2467  Label not_heap_number;
2469  factory()->heap_number_map());
2470  __ j(not_equal, &not_heap_number, Label::kNear);
2471  __ fldz();
2473  __ FCmp();
2474  __ j(zero, instr->FalseLabel(chunk_));
2475  __ jmp(instr->TrueLabel(chunk_));
2476  __ bind(&not_heap_number);
2477  }
2478 
2479  if (!expected.IsGeneric()) {
2480  // We've seen something for the first time -> deopt.
2481  // This can only happen if we are not generic already.
2482  DeoptimizeIf(no_condition, instr, "unexpected object");
2483  }
2484  }
2485  }
2486 }
2487 
2488 
2489 void LCodeGen::EmitGoto(int block) {
2490  if (!IsNextEmittedBlock(block)) {
2491  __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2492  }
2493 }
2494 
2495 
2496 void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
2497 }
2498 
2499 
2500 void LCodeGen::DoGoto(LGoto* instr) {
2501  EmitGoto(instr->block_id());
2502 }
2503 
2504 
2505 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2506  Condition cond = no_condition;
2507  switch (op) {
2508  case Token::EQ:
2509  case Token::EQ_STRICT:
2510  cond = equal;
2511  break;
2512  case Token::NE:
2513  case Token::NE_STRICT:
2514  cond = not_equal;
2515  break;
2516  case Token::LT:
2517  cond = is_unsigned ? below : less;
2518  break;
2519  case Token::GT:
2520  cond = is_unsigned ? above : greater;
2521  break;
2522  case Token::LTE:
2523  cond = is_unsigned ? below_equal : less_equal;
2524  break;
2525  case Token::GTE:
2526  cond = is_unsigned ? above_equal : greater_equal;
2527  break;
2528  case Token::IN:
2529  case Token::INSTANCEOF:
2530  default:
2531  UNREACHABLE();
2532  }
2533  return cond;
2534 }
2535 
2536 
2537 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2538  LOperand* left = instr->left();
2539  LOperand* right = instr->right();
2540  bool is_unsigned =
2541  instr->is_double() ||
2542  instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2543  instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2544  Condition cc = TokenToCondition(instr->op(), is_unsigned);
2545 
2546  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2547  // We can statically evaluate the comparison.
2548  double left_val = ToDouble(LConstantOperand::cast(left));
2549  double right_val = ToDouble(LConstantOperand::cast(right));
2550  int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2551  instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2552  EmitGoto(next_block);
2553  } else {
2554  if (instr->is_double()) {
2556  __ FCmp();
2557  // Don't base result on EFLAGS when a NaN is involved. Instead
2558  // jump to the false block.
2559  __ j(parity_even, instr->FalseLabel(chunk_));
2560  } else {
2561  if (right->IsConstantOperand()) {
2562  __ cmp(ToOperand(left),
2563  ToImmediate(right, instr->hydrogen()->representation()));
2564  } else if (left->IsConstantOperand()) {
2565  __ cmp(ToOperand(right),
2566  ToImmediate(left, instr->hydrogen()->representation()));
2567  // We commuted the operands, so commute the condition.
2568  cc = CommuteCondition(cc);
2569  } else {
2570  __ cmp(ToRegister(left), ToOperand(right));
2571  }
2572  }
2573  EmitBranch(instr, cc);
2574  }
2575 }
2576 
2577 
2578 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2579  Register left = ToRegister(instr->left());
2580 
2581  if (instr->right()->IsConstantOperand()) {
2582  Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2583  __ CmpObject(left, right);
2584  } else {
2585  Operand right = ToOperand(instr->right());
2586  __ cmp(left, right);
2587  }
2588  EmitBranch(instr, equal);
2589 }
2590 
2591 
2592 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2593  if (instr->hydrogen()->representation().IsTagged()) {
2594  Register input_reg = ToRegister(instr->object());
2595  __ cmp(input_reg, factory()->the_hole_value());
2596  EmitBranch(instr, equal);
2597  return;
2598  }
2599 
2600  // Put the value to the top of stack
2601  X87Register src = ToX87Register(instr->object());
2602  X87LoadForUsage(src);
2603  __ fld(0);
2604  __ fld(0);
2605  __ FCmp();
2606  Label ok;
2607  __ j(parity_even, &ok, Label::kNear);
2608  __ fstp(0);
2609  EmitFalseBranch(instr, no_condition);
2610  __ bind(&ok);
2611 
2612 
2613  __ sub(esp, Immediate(kDoubleSize));
2614  __ fstp_d(MemOperand(esp, 0));
2615 
2616  __ add(esp, Immediate(kDoubleSize));
2617  int offset = sizeof(kHoleNanUpper32);
2618  __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
2619  EmitBranch(instr, equal);
2620 }
2621 
2622 
2623 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2624  Representation rep = instr->hydrogen()->value()->representation();
2625  DCHECK(!rep.IsInteger32());
2626 
2627  if (rep.IsDouble()) {
2628  X87Register input = ToX87Register(instr->value());
2629  X87LoadForUsage(input);
2630  __ FXamMinusZero();
2631  EmitBranch(instr, equal);
2632  } else {
2633  Register value = ToRegister(instr->value());
2634  Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2635  __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2636  __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
2637  Immediate(0x1));
2638  EmitFalseBranch(instr, no_overflow);
2639  __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
2640  Immediate(0x00000000));
2641  EmitBranch(instr, equal);
2642  }
2643 }
2644 
2645 
2646 Condition LCodeGen::EmitIsObject(Register input,
2647  Register temp1,
2648  Label* is_not_object,
2649  Label* is_object) {
2650  __ JumpIfSmi(input, is_not_object);
2651 
2652  __ cmp(input, isolate()->factory()->null_value());
2653  __ j(equal, is_object);
2654 
2655  __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
2656  // Undetectable objects behave like undefined.
2657  __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
2658  1 << Map::kIsUndetectable);
2659  __ j(not_zero, is_not_object);
2660 
2661  __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
2663  __ j(below, is_not_object);
2665  return below_equal;
2666 }
2667 
2668 
2669 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2670  Register reg = ToRegister(instr->value());
2671  Register temp = ToRegister(instr->temp());
2672 
2673  Condition true_cond = EmitIsObject(
2674  reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2675 
2676  EmitBranch(instr, true_cond);
2677 }
2678 
2679 
2680 Condition LCodeGen::EmitIsString(Register input,
2681  Register temp1,
2682  Label* is_not_string,
2683  SmiCheck check_needed = INLINE_SMI_CHECK) {
2684  if (check_needed == INLINE_SMI_CHECK) {
2685  __ JumpIfSmi(input, is_not_string);
2686  }
2687 
2688  Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2689 
2690  return cond;
2691 }
2692 
2693 
2694 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2695  Register reg = ToRegister(instr->value());
2696  Register temp = ToRegister(instr->temp());
2697 
2698  SmiCheck check_needed =
2699  instr->hydrogen()->value()->type().IsHeapObject()
2701 
2702  Condition true_cond = EmitIsString(
2703  reg, temp, instr->FalseLabel(chunk_), check_needed);
2704 
2705  EmitBranch(instr, true_cond);
2706 }
2707 
2708 
2709 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2710  Operand input = ToOperand(instr->value());
2711 
2712  __ test(input, Immediate(kSmiTagMask));
2713  EmitBranch(instr, zero);
2714 }
2715 
2716 
2717 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2718  Register input = ToRegister(instr->value());
2719  Register temp = ToRegister(instr->temp());
2720 
2721  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2722  STATIC_ASSERT(kSmiTag == 0);
2723  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2724  }
2725  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2726  __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
2727  1 << Map::kIsUndetectable);
2728  EmitBranch(instr, not_zero);
2729 }
2730 
2731 
2733  switch (op) {
2734  case Token::EQ_STRICT:
2735  case Token::EQ:
2736  return equal;
2737  case Token::LT:
2738  return less;
2739  case Token::GT:
2740  return greater;
2741  case Token::LTE:
2742  return less_equal;
2743  case Token::GTE:
2744  return greater_equal;
2745  default:
2746  UNREACHABLE();
2747  return no_condition;
2748  }
2749 }
2750 
2751 
2752 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2753  Token::Value op = instr->op();
2754 
2755  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2756  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2757 
2758  Condition condition = ComputeCompareCondition(op);
2759  __ test(eax, Operand(eax));
2760 
2761  EmitBranch(instr, condition);
2762 }
2763 
2764 
2765 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2766  InstanceType from = instr->from();
2767  InstanceType to = instr->to();
2768  if (from == FIRST_TYPE) return to;
2769  DCHECK(from == to || to == LAST_TYPE);
2770  return from;
2771 }
2772 
2773 
2774 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2775  InstanceType from = instr->from();
2776  InstanceType to = instr->to();
2777  if (from == to) return equal;
2778  if (to == LAST_TYPE) return above_equal;
2779  if (from == FIRST_TYPE) return below_equal;
2780  UNREACHABLE();
2781  return equal;
2782 }
2783 
2784 
2785 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2786  Register input = ToRegister(instr->value());
2787  Register temp = ToRegister(instr->temp());
2788 
2789  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2790  __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2791  }
2792 
2793  __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
2794  EmitBranch(instr, BranchCondition(instr->hydrogen()));
2795 }
2796 
2797 
2798 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2799  Register input = ToRegister(instr->value());
2800  Register result = ToRegister(instr->result());
2801 
2802  __ AssertString(input);
2803 
2804  __ mov(result, FieldOperand(input, String::kHashFieldOffset));
2805  __ IndexFromHash(result, result);
2806 }
2807 
2808 
2809 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2810  LHasCachedArrayIndexAndBranch* instr) {
2811  Register input = ToRegister(instr->value());
2812 
2815  EmitBranch(instr, equal);
2816 }
2817 
2818 
2819 // Branches to a label or falls through with the answer in the z flag. Trashes
2820 // the temp registers, but not the input.
2821 void LCodeGen::EmitClassOfTest(Label* is_true,
2822  Label* is_false,
2823  Handle<String>class_name,
2824  Register input,
2825  Register temp,
2826  Register temp2) {
2827  DCHECK(!input.is(temp));
2828  DCHECK(!input.is(temp2));
2829  DCHECK(!temp.is(temp2));
2830  __ JumpIfSmi(input, is_false);
2831 
2832  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2833  // Assuming the following assertions, we can use the same compares to test
2834  // for both being a function type and being in the object type range.
2839  LAST_SPEC_OBJECT_TYPE - 1);
2841  __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2842  __ j(below, is_false);
2843  __ j(equal, is_true);
2844  __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2845  __ j(equal, is_true);
2846  } else {
2847  // Faster code path to avoid two compares: subtract lower bound from the
2848  // actual type and do a signed compare with the width of the type range.
2849  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2850  __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2851  __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2852  __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2854  __ j(above, is_false);
2855  }
2856 
2857  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2858  // Check if the constructor in the map is a function.
2859  __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
2860  // Objects with a non-function constructor have class 'Object'.
2861  __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
2862  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2863  __ j(not_equal, is_true);
2864  } else {
2865  __ j(not_equal, is_false);
2866  }
2867 
2868  // temp now contains the constructor function. Grab the
2869  // instance class name from there.
2871  __ mov(temp, FieldOperand(temp,
2873  // The class name we are testing against is internalized since it's a literal.
2874  // The name in the constructor is internalized because of the way the context
2875  // is booted. This routine isn't expected to work for random API-created
2876  // classes and it doesn't have to because you can't access it with natives
2877  // syntax. Since both sides are internalized it is sufficient to use an
2878  // identity comparison.
2879  __ cmp(temp, class_name);
2880  // End with the answer in the z flag.
2881 }
2882 
2883 
2884 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2885  Register input = ToRegister(instr->value());
2886  Register temp = ToRegister(instr->temp());
2887  Register temp2 = ToRegister(instr->temp2());
2888 
2889  Handle<String> class_name = instr->hydrogen()->class_name();
2890 
2891  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2892  class_name, input, temp, temp2);
2893 
2894  EmitBranch(instr, equal);
2895 }
2896 
2897 
2898 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2899  Register reg = ToRegister(instr->value());
2900  __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2901  EmitBranch(instr, equal);
2902 }
2903 
2904 
2905 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2906  // Object and function are in fixed registers defined by the stub.
2907  DCHECK(ToRegister(instr->context()).is(esi));
2908  InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2909  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2910 
2911  Label true_value, done;
2912  __ test(eax, Operand(eax));
2913  __ j(zero, &true_value, Label::kNear);
2914  __ mov(ToRegister(instr->result()), factory()->false_value());
2915  __ jmp(&done, Label::kNear);
2916  __ bind(&true_value);
2917  __ mov(ToRegister(instr->result()), factory()->true_value());
2918  __ bind(&done);
2919 }
2920 
2921 
2922 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2923  class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
2924  public:
2925  DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2926  LInstanceOfKnownGlobal* instr,
2927  const X87Stack& x87_stack)
2928  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
2929  virtual void Generate() OVERRIDE {
2930  codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2931  }
2932  virtual LInstruction* instr() OVERRIDE { return instr_; }
2933  Label* map_check() { return &map_check_; }
2934  private:
2935  LInstanceOfKnownGlobal* instr_;
2936  Label map_check_;
2937  };
2938 
2939  DeferredInstanceOfKnownGlobal* deferred;
2940  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_);
2941 
2942  Label done, false_result;
2943  Register object = ToRegister(instr->value());
2944  Register temp = ToRegister(instr->temp());
2945 
2946  // A Smi is not an instance of anything.
2947  __ JumpIfSmi(object, &false_result, Label::kNear);
2948 
2949  // This is the inlined call site instanceof cache. The two occurences of the
2950  // hole value will be patched to the last map/result pair generated by the
2951  // instanceof stub.
2952  Label cache_miss;
2953  Register map = ToRegister(instr->temp());
2954  __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
2955  __ bind(deferred->map_check()); // Label for calculating code patching.
2956  Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2957  __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map.
2958  __ j(not_equal, &cache_miss, Label::kNear);
2959  __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
2960  __ jmp(&done, Label::kNear);
2961 
2962  // The inlined call site cache did not match. Check for null and string
2963  // before calling the deferred code.
2964  __ bind(&cache_miss);
2965  // Null is not an instance of anything.
2966  __ cmp(object, factory()->null_value());
2967  __ j(equal, &false_result, Label::kNear);
2968 
2969  // String values are not instances of anything.
2970  Condition is_string = masm_->IsObjectStringType(object, temp, temp);
2971  __ j(is_string, &false_result, Label::kNear);
2972 
2973  // Go to the deferred code.
2974  __ jmp(deferred->entry());
2975 
2976  __ bind(&false_result);
2977  __ mov(ToRegister(instr->result()), factory()->false_value());
2978 
2979  // Here result has either true or false. Deferred code also produces true or
2980  // false object.
2981  __ bind(deferred->exit());
2982  __ bind(&done);
2983 }
2984 
2985 
2986 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2987  Label* map_check) {
2988  PushSafepointRegistersScope scope(this);
2989 
2991  flags = static_cast<InstanceofStub::Flags>(
2993  flags = static_cast<InstanceofStub::Flags>(
2995  flags = static_cast<InstanceofStub::Flags>(
2997  InstanceofStub stub(isolate(), flags);
2998 
2999  // Get the temp register reserved by the instruction. This needs to be a
3000  // register which is pushed last by PushSafepointRegisters as top of the
3001  // stack is used to pass the offset to the location of the map check to
3002  // the stub.
3003  Register temp = ToRegister(instr->temp());
3005  __ LoadHeapObject(InstanceofStub::right(), instr->function());
3006  static const int kAdditionalDelta = 13;
3007  int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
3008  __ mov(temp, Immediate(delta));
3009  __ StoreToSafepointRegisterSlot(temp, temp);
3010  CallCodeGeneric(stub.GetCode(),
3012  instr,
3014  // Get the deoptimization index of the LLazyBailout-environment that
3015  // corresponds to this instruction.
3016  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3017  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3018 
3019  // Put the result value into the eax slot and restore all registers.
3020  __ StoreToSafepointRegisterSlot(eax, eax);
3021 }
3022 
3023 
3024 void LCodeGen::DoCmpT(LCmpT* instr) {
3025  Token::Value op = instr->op();
3026 
3027  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
3028  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3029 
3030  Condition condition = ComputeCompareCondition(op);
3031  Label true_value, done;
3032  __ test(eax, Operand(eax));
3033  __ j(condition, &true_value, Label::kNear);
3034  __ mov(ToRegister(instr->result()), factory()->false_value());
3035  __ jmp(&done, Label::kNear);
3036  __ bind(&true_value);
3037  __ mov(ToRegister(instr->result()), factory()->true_value());
3038  __ bind(&done);
3039 }
3040 
3041 
3042 void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
3043  int extra_value_count = dynamic_frame_alignment ? 2 : 1;
3044 
3045  if (instr->has_constant_parameter_count()) {
3046  int parameter_count = ToInteger32(instr->constant_parameter_count());
3047  if (dynamic_frame_alignment && FLAG_debug_code) {
3048  __ cmp(Operand(esp,
3049  (parameter_count + extra_value_count) * kPointerSize),
3050  Immediate(kAlignmentZapValue));
3051  __ Assert(equal, kExpectedAlignmentMarker);
3052  }
3053  __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
3054  } else {
3055  Register reg = ToRegister(instr->parameter_count());
3056  // The argument count parameter is a smi
3057  __ SmiUntag(reg);
3058  Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
3059  if (dynamic_frame_alignment && FLAG_debug_code) {
3060  DCHECK(extra_value_count == 2);
3061  __ cmp(Operand(esp, reg, times_pointer_size,
3062  extra_value_count * kPointerSize),
3063  Immediate(kAlignmentZapValue));
3064  __ Assert(equal, kExpectedAlignmentMarker);
3065  }
3066 
3067  // emit code to restore stack based on instr->parameter_count()
3068  __ pop(return_addr_reg); // save return address
3069  if (dynamic_frame_alignment) {
3070  __ inc(reg); // 1 more for alignment
3071  }
3072  __ shl(reg, kPointerSizeLog2);
3073  __ add(esp, reg);
3074  __ jmp(return_addr_reg);
3075  }
3076 }
3077 
3078 
3079 void LCodeGen::DoReturn(LReturn* instr) {
3080  if (FLAG_trace && info()->IsOptimizing()) {
3081  // Preserve the return value on the stack and rely on the runtime call
3082  // to return the value in the same register. We're leaving the code
3083  // managed by the register allocator and tearing down the frame, it's
3084  // safe to write to the context register.
3085  __ push(eax);
3087  __ CallRuntime(Runtime::kTraceExit, 1);
3088  }
3090  // Fetch the state of the dynamic frame alignment.
3091  __ mov(edx, Operand(ebp,
3093  }
3094  int no_frame_start = -1;
3095  if (NeedsEagerFrame()) {
3096  __ mov(esp, ebp);
3097  __ pop(ebp);
3098  no_frame_start = masm_->pc_offset();
3099  }
3101  Label no_padding;
3102  __ cmp(edx, Immediate(kNoAlignmentPadding));
3103  __ j(equal, &no_padding, Label::kNear);
3104 
3105  EmitReturn(instr, true);
3106  __ bind(&no_padding);
3107  }
3108 
3109  EmitReturn(instr, false);
3110  if (no_frame_start != -1) {
3111  info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
3112  }
3113 }
3114 
3115 
3116 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3117  Register result = ToRegister(instr->result());
3118  __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
3119  if (instr->hydrogen()->RequiresHoleCheck()) {
3120  __ cmp(result, factory()->the_hole_value());
3121  DeoptimizeIf(equal, instr, "hole");
3122  }
3123 }
3124 
3125 
3126 template <class T>
3128  DCHECK(FLAG_vector_ics);
3129  Register vector = ToRegister(instr->temp_vector());
3131  __ mov(vector, instr->hydrogen()->feedback_vector());
3132  // No need to allocate this register.
3135  Immediate(Smi::FromInt(instr->hydrogen()->slot())));
3136 }
3137 
3138 
3139 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3140  DCHECK(ToRegister(instr->context()).is(esi));
3141  DCHECK(ToRegister(instr->global_object())
3143  DCHECK(ToRegister(instr->result()).is(eax));
3144 
3145  __ mov(LoadDescriptor::NameRegister(), instr->name());
3146  if (FLAG_vector_ics) {
3147  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
3148  }
3149  ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3150  Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
3151  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3152 }
3153 
3154 
3155 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
3156  Register value = ToRegister(instr->value());
3157  Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
3158 
3159  // If the cell we are storing to contains the hole it could have
3160  // been deleted from the property dictionary. In that case, we need
3161  // to update the property details in the property dictionary to mark
3162  // it as no longer deleted. We deoptimize in that case.
3163  if (instr->hydrogen()->RequiresHoleCheck()) {
3164  __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
3165  DeoptimizeIf(equal, instr, "hole");
3166  }
3167 
3168  // Store the value.
3169  __ mov(Operand::ForCell(cell_handle), value);
3170  // Cells are always rescanned, so no write barrier here.
3171 }
3172 
3173 
3174 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3175  Register context = ToRegister(instr->context());
3176  Register result = ToRegister(instr->result());
3177  __ mov(result, ContextOperand(context, instr->slot_index()));
3178 
3179  if (instr->hydrogen()->RequiresHoleCheck()) {
3180  __ cmp(result, factory()->the_hole_value());
3181  if (instr->hydrogen()->DeoptimizesOnHole()) {
3182  DeoptimizeIf(equal, instr, "hole");
3183  } else {
3184  Label is_not_hole;
3185  __ j(not_equal, &is_not_hole, Label::kNear);
3186  __ mov(result, factory()->undefined_value());
3187  __ bind(&is_not_hole);
3188  }
3189  }
3190 }
3191 
3192 
3193 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3194  Register context = ToRegister(instr->context());
3195  Register value = ToRegister(instr->value());
3196 
3197  Label skip_assignment;
3198 
3199  Operand target = ContextOperand(context, instr->slot_index());
3200  if (instr->hydrogen()->RequiresHoleCheck()) {
3201  __ cmp(target, factory()->the_hole_value());
3202  if (instr->hydrogen()->DeoptimizesOnHole()) {
3203  DeoptimizeIf(equal, instr, "hole");
3204  } else {
3205  __ j(not_equal, &skip_assignment, Label::kNear);
3206  }
3207  }
3208 
3209  __ mov(target, value);
3210  if (instr->hydrogen()->NeedsWriteBarrier()) {
3211  SmiCheck check_needed =
3212  instr->hydrogen()->value()->type().IsHeapObject()
3214  Register temp = ToRegister(instr->temp());
3215  int offset = Context::SlotOffset(instr->slot_index());
3216  __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs,
3217  EMIT_REMEMBERED_SET, check_needed);
3218  }
3219 
3220  __ bind(&skip_assignment);
3221 }
3222 
3223 
3224 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3225  HObjectAccess access = instr->hydrogen()->access();
3226  int offset = access.offset();
3227 
3228  if (access.IsExternalMemory()) {
3229  Register result = ToRegister(instr->result());
3230  MemOperand operand = instr->object()->IsConstantOperand()
3231  ? MemOperand::StaticVariable(ToExternalReference(
3232  LConstantOperand::cast(instr->object())))
3233  : MemOperand(ToRegister(instr->object()), offset);
3234  __ Load(result, operand, access.representation());
3235  return;
3236  }
3237 
3238  Register object = ToRegister(instr->object());
3239  if (instr->hydrogen()->representation().IsDouble()) {
3240  X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
3241  return;
3242  }
3243 
3244  Register result = ToRegister(instr->result());
3245  if (!access.IsInobject()) {
3246  __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
3247  object = result;
3248  }
3249  __ Load(result, FieldOperand(object, offset), access.representation());
3250 }
3251 
3252 
3253 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
3254  DCHECK(!operand->IsDoubleRegister());
3255  if (operand->IsConstantOperand()) {
3256  Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
3258  if (object->IsSmi()) {
3259  __ Push(Handle<Smi>::cast(object));
3260  } else {
3261  __ PushHeapObject(Handle<HeapObject>::cast(object));
3262  }
3263  } else if (operand->IsRegister()) {
3264  __ push(ToRegister(operand));
3265  } else {
3266  __ push(ToOperand(operand));
3267  }
3268 }
3269 
3270 
3271 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3272  DCHECK(ToRegister(instr->context()).is(esi));
3274  DCHECK(ToRegister(instr->result()).is(eax));
3275 
3276  __ mov(LoadDescriptor::NameRegister(), instr->name());
3277  if (FLAG_vector_ics) {
3278  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3279  }
3280  Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
3281  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3282 }
3283 
3284 
3285 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3286  Register function = ToRegister(instr->function());
3287  Register temp = ToRegister(instr->temp());
3288  Register result = ToRegister(instr->result());
3289 
3290  // Get the prototype or initial map from the function.
3291  __ mov(result,
3293 
3294  // Check that the function has a prototype or an initial map.
3295  __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
3296  DeoptimizeIf(equal, instr, "hole");
3297 
3298  // If the function does not have an initial map, we're done.
3299  Label done;
3300  __ CmpObjectType(result, MAP_TYPE, temp);
3301  __ j(not_equal, &done, Label::kNear);
3302 
3303  // Get the prototype from the initial map.
3304  __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
3305 
3306  // All done.
3307  __ bind(&done);
3308 }
3309 
3310 
3311 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3312  Register result = ToRegister(instr->result());
3313  __ LoadRoot(result, instr->index());
3314 }
3315 
3316 
3317 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3318  Register arguments = ToRegister(instr->arguments());
3319  Register result = ToRegister(instr->result());
3320  if (instr->length()->IsConstantOperand() &&
3321  instr->index()->IsConstantOperand()) {
3322  int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3323  int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3324  int index = (const_length - const_index) + 1;
3325  __ mov(result, Operand(arguments, index * kPointerSize));
3326  } else {
3327  Register length = ToRegister(instr->length());
3328  Operand index = ToOperand(instr->index());
3329  // There are two words between the frame pointer and the last argument.
3330  // Subtracting from length accounts for one of them add one more.
3331  __ sub(length, index);
3332  __ mov(result, Operand(arguments, length, times_4, kPointerSize));
3333  }
3334 }
3335 
3336 
3337 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3338  ElementsKind elements_kind = instr->elements_kind();
3339  LOperand* key = instr->key();
3340  if (!key->IsConstantOperand() &&
3341  ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3342  elements_kind)) {
3343  __ SmiUntag(ToRegister(key));
3344  }
3345  Operand operand(BuildFastArrayOperand(
3346  instr->elements(),
3347  key,
3348  instr->hydrogen()->key()->representation(),
3349  elements_kind,
3350  instr->base_offset()));
3351  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3352  elements_kind == FLOAT32_ELEMENTS) {
3353  X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
3354  } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3355  elements_kind == FLOAT64_ELEMENTS) {
3356  X87Mov(ToX87Register(instr->result()), operand);
3357  } else {
3358  Register result(ToRegister(instr->result()));
3359  switch (elements_kind) {
3361  case INT8_ELEMENTS:
3362  __ movsx_b(result, operand);
3363  break;
3366  case UINT8_ELEMENTS:
3368  __ movzx_b(result, operand);
3369  break;
3371  case INT16_ELEMENTS:
3372  __ movsx_w(result, operand);
3373  break;
3375  case UINT16_ELEMENTS:
3376  __ movzx_w(result, operand);
3377  break;
3379  case INT32_ELEMENTS:
3380  __ mov(result, operand);
3381  break;
3383  case UINT32_ELEMENTS:
3384  __ mov(result, operand);
3385  if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3386  __ test(result, Operand(result));
3387  DeoptimizeIf(negative, instr, "negative value");
3388  }
3389  break;
3392  case FLOAT32_ELEMENTS:
3393  case FLOAT64_ELEMENTS:
3394  case FAST_SMI_ELEMENTS:
3395  case FAST_ELEMENTS:
3396  case FAST_DOUBLE_ELEMENTS:
3398  case FAST_HOLEY_ELEMENTS:
3400  case DICTIONARY_ELEMENTS:
3402  UNREACHABLE();
3403  break;
3404  }
3405  }
3406 }
3407 
3408 
3409 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3410  if (instr->hydrogen()->RequiresHoleCheck()) {
3411  Operand hole_check_operand = BuildFastArrayOperand(
3412  instr->elements(), instr->key(),
3413  instr->hydrogen()->key()->representation(),
3415  instr->base_offset() + sizeof(kHoleNanLower32));
3416  __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
3417  DeoptimizeIf(equal, instr, "hole");
3418  }
3419 
3420  Operand double_load_operand = BuildFastArrayOperand(
3421  instr->elements(),
3422  instr->key(),
3423  instr->hydrogen()->key()->representation(),
3425  instr->base_offset());
3426  X87Mov(ToX87Register(instr->result()), double_load_operand);
3427 }
3428 
3429 
3430 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3431  Register result = ToRegister(instr->result());
3432 
3433  // Load the result.
3434  __ mov(result,
3435  BuildFastArrayOperand(instr->elements(), instr->key(),
3436  instr->hydrogen()->key()->representation(),
3437  FAST_ELEMENTS, instr->base_offset()));
3438 
3439  // Check for the hole value.
3440  if (instr->hydrogen()->RequiresHoleCheck()) {
3441  if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3442  __ test(result, Immediate(kSmiTagMask));
3443  DeoptimizeIf(not_equal, instr, "not a Smi");
3444  } else {
3445  __ cmp(result, factory()->the_hole_value());
3446  DeoptimizeIf(equal, instr, "hole");
3447  }
3448  }
3449 }
3450 
3451 
3452 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3453  if (instr->is_typed_elements()) {
3454  DoLoadKeyedExternalArray(instr);
3455  } else if (instr->hydrogen()->representation().IsDouble()) {
3457  } else {
3458  DoLoadKeyedFixedArray(instr);
3459  }
3460 }
3461 
3462 
3464  LOperand* elements_pointer,
3465  LOperand* key,
3466  Representation key_representation,
3467  ElementsKind elements_kind,
3468  uint32_t base_offset) {
3469  Register elements_pointer_reg = ToRegister(elements_pointer);
3470  int element_shift_size = ElementsKindToShiftSize(elements_kind);
3471  int shift_size = element_shift_size;
3472  if (key->IsConstantOperand()) {
3473  int constant_value = ToInteger32(LConstantOperand::cast(key));
3474  if (constant_value & 0xF0000000) {
3475  Abort(kArrayIndexConstantValueTooBig);
3476  }
3477  return Operand(elements_pointer_reg,
3478  ((constant_value) << shift_size)
3479  + base_offset);
3480  } else {
3481  // Take the tag bit into account while computing the shift size.
3482  if (key_representation.IsSmi() && (shift_size >= 1)) {
3483  shift_size -= kSmiTagSize;
3484  }
3485  ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3486  return Operand(elements_pointer_reg,
3487  ToRegister(key),
3488  scale_factor,
3489  base_offset);
3490  }
3491 }
3492 
3493 
3494 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3495  DCHECK(ToRegister(instr->context()).is(esi));
3498 
3499  if (FLAG_vector_ics) {
3500  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3501  }
3502 
3503  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
3504  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3505 }
3506 
3507 
3508 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3509  Register result = ToRegister(instr->result());
3510 
3511  if (instr->hydrogen()->from_inlined()) {
3512  __ lea(result, Operand(esp, -2 * kPointerSize));
3513  } else {
3514  // Check for arguments adapter frame.
3515  Label done, adapted;
3516  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3517  __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
3518  __ cmp(Operand(result),
3520  __ j(equal, &adapted, Label::kNear);
3521 
3522  // No arguments adaptor frame.
3523  __ mov(result, Operand(ebp));
3524  __ jmp(&done, Label::kNear);
3525 
3526  // Arguments adaptor frame present.
3527  __ bind(&adapted);
3528  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3529 
3530  // Result is the frame pointer for the frame if not adapted and for the real
3531  // frame below the adaptor frame if adapted.
3532  __ bind(&done);
3533  }
3534 }
3535 
3536 
3537 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3538  Operand elem = ToOperand(instr->elements());
3539  Register result = ToRegister(instr->result());
3540 
3541  Label done;
3542 
3543  // If no arguments adaptor frame the number of arguments is fixed.
3544  __ cmp(ebp, elem);
3545  __ mov(result, Immediate(scope()->num_parameters()));
3546  __ j(equal, &done, Label::kNear);
3547 
3548  // Arguments adaptor frame present. Get argument length from there.
3549  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3550  __ mov(result, Operand(result,
3552  __ SmiUntag(result);
3553 
3554  // Argument length is in result register.
3555  __ bind(&done);
3556 }
3557 
3558 
3559 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3560  Register receiver = ToRegister(instr->receiver());
3561  Register function = ToRegister(instr->function());
3562 
3563  // If the receiver is null or undefined, we have to pass the global
3564  // object as a receiver to normal functions. Values have to be
3565  // passed unchanged to builtins and strict-mode functions.
3566  Label receiver_ok, global_object;
3567  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3568  Register scratch = ToRegister(instr->temp());
3569 
3570  if (!instr->hydrogen()->known_function()) {
3571  // Do not transform the receiver to object for strict mode
3572  // functions.
3573  __ mov(scratch,
3575  __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
3577  __ j(not_equal, &receiver_ok, dist);
3578 
3579  // Do not transform the receiver to object for builtins.
3580  __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
3582  __ j(not_equal, &receiver_ok, dist);
3583  }
3584 
3585  // Normal function. Replace undefined or null with global receiver.
3586  __ cmp(receiver, factory()->null_value());
3587  __ j(equal, &global_object, Label::kNear);
3588  __ cmp(receiver, factory()->undefined_value());
3589  __ j(equal, &global_object, Label::kNear);
3590 
3591  // The receiver should be a JS object.
3592  __ test(receiver, Immediate(kSmiTagMask));
3593  DeoptimizeIf(equal, instr, "Smi");
3594  __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
3595  DeoptimizeIf(below, instr, "not a JavaScript object");
3596 
3597  __ jmp(&receiver_ok, Label::kNear);
3598  __ bind(&global_object);
3599  __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
3600  const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
3601  __ mov(receiver, Operand(receiver, global_offset));
3602  const int proxy_offset = GlobalObject::kGlobalProxyOffset;
3603  __ mov(receiver, FieldOperand(receiver, proxy_offset));
3604  __ bind(&receiver_ok);
3605 }
3606 
3607 
3608 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3609  Register receiver = ToRegister(instr->receiver());
3610  Register function = ToRegister(instr->function());
3611  Register length = ToRegister(instr->length());
3612  Register elements = ToRegister(instr->elements());
3613  DCHECK(receiver.is(eax)); // Used for parameter count.
3614  DCHECK(function.is(edi)); // Required by InvokeFunction.
3615  DCHECK(ToRegister(instr->result()).is(eax));
3616 
3617  // Copy the arguments to this function possibly from the
3618  // adaptor frame below it.
3619  const uint32_t kArgumentsLimit = 1 * KB;
3620  __ cmp(length, kArgumentsLimit);
3621  DeoptimizeIf(above, instr, "too many arguments");
3622 
3623  __ push(receiver);
3624  __ mov(receiver, length);
3625 
3626  // Loop through the arguments pushing them onto the execution
3627  // stack.
3628  Label invoke, loop;
3629  // length is a small non-negative integer, due to the test above.
3630  __ test(length, Operand(length));
3631  __ j(zero, &invoke, Label::kNear);
3632  __ bind(&loop);
3633  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
3634  __ dec(length);
3635  __ j(not_zero, &loop);
3636 
3637  // Invoke the function.
3638  __ bind(&invoke);
3639  DCHECK(instr->HasPointerMap());
3640  LPointerMap* pointers = instr->pointer_map();
3641  SafepointGenerator safepoint_generator(
3642  this, pointers, Safepoint::kLazyDeopt);
3643  ParameterCount actual(eax);
3644  __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3645 }
3646 
3647 
3648 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
3649  __ int3();
3650 }
3651 
3652 
3653 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3654  LOperand* argument = instr->value();
3655  EmitPushTaggedOperand(argument);
3656 }
3657 
3658 
3659 void LCodeGen::DoDrop(LDrop* instr) {
3660  __ Drop(instr->count());
3661 }
3662 
3663 
3664 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3665  Register result = ToRegister(instr->result());
3666  __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3667 }
3668 
3669 
3670 void LCodeGen::DoContext(LContext* instr) {
3671  Register result = ToRegister(instr->result());
3672  if (info()->IsOptimizing()) {
3673  __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
3674  } else {
3675  // If there is no frame, the context must be in esi.
3676  DCHECK(result.is(esi));
3677  }
3678 }
3679 
3680 
3681 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3682  DCHECK(ToRegister(instr->context()).is(esi));
3683  __ push(esi); // The context is the first argument.
3684  __ push(Immediate(instr->hydrogen()->pairs()));
3685  __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
3686  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3687 }
3688 
3689 
3690 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3691  int formal_parameter_count,
3692  int arity,
3693  LInstruction* instr,
3694  EDIState edi_state) {
3695  bool dont_adapt_arguments =
3696  formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3697  bool can_invoke_directly =
3698  dont_adapt_arguments || formal_parameter_count == arity;
3699 
3700  if (can_invoke_directly) {
3701  if (edi_state == EDI_UNINITIALIZED) {
3702  __ LoadHeapObject(edi, function);
3703  }
3704 
3705  // Change context.
3707 
3708  // Set eax to arguments count if adaption is not needed. Assumes that eax
3709  // is available to write to at this point.
3710  if (dont_adapt_arguments) {
3711  __ mov(eax, arity);
3712  }
3713 
3714  // Invoke function directly.
3715  if (function.is_identical_to(info()->closure())) {
3716  __ CallSelf();
3717  } else {
3719  }
3721  } else {
3722  // We need to adapt arguments.
3723  LPointerMap* pointers = instr->pointer_map();
3724  SafepointGenerator generator(
3725  this, pointers, Safepoint::kLazyDeopt);
3726  ParameterCount count(arity);
3727  ParameterCount expected(formal_parameter_count);
3728  __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3729  }
3730 }
3731 
3732 
3733 void LCodeGen::DoTailCallThroughMegamorphicCache(
3734  LTailCallThroughMegamorphicCache* instr) {
3735  Register receiver = ToRegister(instr->receiver());
3736  Register name = ToRegister(instr->name());
3737  DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
3739 
3740  Register scratch = ebx;
3741  Register extra = eax;
3742  DCHECK(!scratch.is(receiver) && !scratch.is(name));
3743  DCHECK(!extra.is(receiver) && !extra.is(name));
3744 
3745  // Important for the tail-call.
3746  bool must_teardown_frame = NeedsEagerFrame();
3747 
3748  // The probe will tail call to a handler if found.
3749  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
3750  must_teardown_frame, receiver, name,
3751  scratch, extra);
3752 
3753  // Tail call to miss if we ended up here.
3754  if (must_teardown_frame) __ leave();
3755  LoadIC::GenerateMiss(masm());
3756 }
3757 
3758 
3759 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3760  DCHECK(ToRegister(instr->result()).is(eax));
3761 
3762  LPointerMap* pointers = instr->pointer_map();
3763  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3764 
3765  if (instr->target()->IsConstantOperand()) {
3766  LConstantOperand* target = LConstantOperand::cast(instr->target());
3767  Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3768  generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3769  __ call(code, RelocInfo::CODE_TARGET);
3770  } else {
3771  DCHECK(instr->target()->IsRegister());
3772  Register target = ToRegister(instr->target());
3773  generator.BeforeCall(__ CallSize(Operand(target)));
3774  __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3775  __ call(target);
3776  }
3777  generator.AfterCall();
3778 }
3779 
3780 
3781 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3782  DCHECK(ToRegister(instr->function()).is(edi));
3783  DCHECK(ToRegister(instr->result()).is(eax));
3784 
3785  if (instr->hydrogen()->pass_argument_count()) {
3786  __ mov(eax, instr->arity());
3787  }
3788 
3789  // Change context.
3791 
3792  bool is_self_call = false;
3793  if (instr->hydrogen()->function()->IsConstant()) {
3794  HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
3795  Handle<JSFunction> jsfun =
3796  Handle<JSFunction>::cast(fun_const->handle(isolate()));
3797  is_self_call = jsfun.is_identical_to(info()->closure());
3798  }
3799 
3800  if (is_self_call) {
3801  __ CallSelf();
3802  } else {
3804  }
3805 
3807 }
3808 
3809 
3810 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3811  Register input_reg = ToRegister(instr->value());
3812  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
3813  factory()->heap_number_map());
3814  DeoptimizeIf(not_equal, instr, "not a heap number");
3815 
3816  Label slow, allocated, done;
3817  Register tmp = input_reg.is(eax) ? ecx : eax;
3818  Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
3819 
3820  // Preserve the value of all registers.
3821  PushSafepointRegistersScope scope(this);
3822 
3823  __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3824  // Check the sign of the argument. If the argument is positive, just
3825  // return it. We do not need to patch the stack since |input| and
3826  // |result| are the same register and |input| will be restored
3827  // unchanged by popping safepoint registers.
3828  __ test(tmp, Immediate(HeapNumber::kSignMask));
3829  __ j(zero, &done, Label::kNear);
3830 
3831  __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
3832  __ jmp(&allocated, Label::kNear);
3833 
3834  // Slow case: Call the runtime system to do the number allocation.
3835  __ bind(&slow);
3836  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
3837  instr, instr->context());
3838  // Set the pointer to the new heap number in tmp.
3839  if (!tmp.is(eax)) __ mov(tmp, eax);
3840  // Restore input_reg after call to runtime.
3841  __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3842 
3843  __ bind(&allocated);
3844  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3845  __ and_(tmp2, ~HeapNumber::kSignMask);
3846  __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
3847  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
3848  __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
3849  __ StoreToSafepointRegisterSlot(input_reg, tmp);
3850 
3851  __ bind(&done);
3852 }
3853 
3854 
3855 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3856  Register input_reg = ToRegister(instr->value());
3857  __ test(input_reg, Operand(input_reg));
3858  Label is_positive;
3859  __ j(not_sign, &is_positive, Label::kNear);
3860  __ neg(input_reg); // Sets flags.
3861  DeoptimizeIf(negative, instr, "overflow");
3862  __ bind(&is_positive);
3863 }
3864 
3865 
3866 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3867  // Class for deferred case.
3868  class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
3869  public:
3870  DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3871  LMathAbs* instr,
3872  const X87Stack& x87_stack)
3873  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
3874  virtual void Generate() OVERRIDE {
3875  codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3876  }
3877  virtual LInstruction* instr() OVERRIDE { return instr_; }
3878  private:
3879  LMathAbs* instr_;
3880  };
3881 
3882  DCHECK(instr->value()->Equals(instr->result()));
3883  Representation r = instr->hydrogen()->value()->representation();
3884 
3885  if (r.IsDouble()) {
3886  X87Register value = ToX87Register(instr->value());
3887  X87Fxch(value);
3888  __ fabs();
3889  } else if (r.IsSmiOrInteger32()) {
3890  EmitIntegerMathAbs(instr);
3891  } else { // Tagged case.
3892  DeferredMathAbsTaggedHeapNumber* deferred =
3893  new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
3894  Register input_reg = ToRegister(instr->value());
3895  // Smi check.
3896  __ JumpIfNotSmi(input_reg, deferred->entry());
3897  EmitIntegerMathAbs(instr);
3898  __ bind(deferred->exit());
3899  }
3900 }
3901 
3902 
3903 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3904  Register output_reg = ToRegister(instr->result());
3905  X87Register input_reg = ToX87Register(instr->value());
3906  X87Fxch(input_reg);
3907 
3908  Label not_minus_zero, done;
3909  // Deoptimize on unordered.
3910  __ fldz();
3911  __ fld(1);
3912  __ FCmp();
3913  DeoptimizeIf(parity_even, instr, "NaN");
3914  __ j(below, &not_minus_zero, Label::kNear);
3915 
3916  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3917  // Check for negative zero.
3918  __ j(not_equal, &not_minus_zero, Label::kNear);
3919  // +- 0.0.
3920  __ fld(0);
3921  __ FXamSign();
3922  DeoptimizeIf(not_zero, instr, "minus zero");
3923  __ Move(output_reg, Immediate(0));
3924  __ jmp(&done, Label::kFar);
3925  }
3926 
3927  // Positive input.
3928  // rc=01B, round down.
3929  __ bind(&not_minus_zero);
3930  __ fnclex();
3931  __ X87SetRC(0x0400);
3932  __ sub(esp, Immediate(kPointerSize));
3933  __ fist_s(Operand(esp, 0));
3934  __ pop(output_reg);
3935  __ X87CheckIA();
3936  DeoptimizeIf(equal, instr, "overflow");
3937  __ fnclex();
3938  __ X87SetRC(0x0000);
3939  __ bind(&done);
3940 }
3941 
3942 
3943 void LCodeGen::DoMathRound(LMathRound* instr) {
3944  X87Register input_reg = ToX87Register(instr->value());
3945  Register result = ToRegister(instr->result());
3946  X87Fxch(input_reg);
3947  Label below_one_half, below_minus_one_half, done;
3948 
3949  ExternalReference one_half = ExternalReference::address_of_one_half();
3950  ExternalReference minus_one_half =
3951  ExternalReference::address_of_minus_one_half();
3952 
3953  __ fld_d(Operand::StaticVariable(one_half));
3954  __ fld(1);
3955  __ FCmp();
3956  __ j(carry, &below_one_half);
3957 
3958  // Use rounds towards zero, since 0.5 <= x, we use floor(0.5 + x)
3959  __ fld(0);
3960  __ fadd_d(Operand::StaticVariable(one_half));
3961  // rc=11B, round toward zero.
3962  __ X87SetRC(0x0c00);
3963  __ sub(esp, Immediate(kPointerSize));
3964  // Clear exception bits.
3965  __ fnclex();
3966  __ fistp_s(MemOperand(esp, 0));
3967  // Check overflow.
3968  __ X87CheckIA();
3969  __ pop(result);
3970  DeoptimizeIf(equal, instr, "conversion overflow");
3971  __ fnclex();
3972  // Restore round mode.
3973  __ X87SetRC(0x0000);
3974  __ jmp(&done);
3975 
3976  __ bind(&below_one_half);
3977  __ fld_d(Operand::StaticVariable(minus_one_half));
3978  __ fld(1);
3979  __ FCmp();
3980  __ j(carry, &below_minus_one_half);
3981  // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3982  // we can ignore the difference between a result of -0 and +0.
3983  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3984  // If the sign is positive, we return +0.
3985  __ fld(0);
3986  __ FXamSign();
3987  DeoptimizeIf(not_zero, instr, "minus zero");
3988  }
3989  __ Move(result, Immediate(0));
3990  __ jmp(&done);
3991 
3992  __ bind(&below_minus_one_half);
3993  __ fld(0);
3994  __ fadd_d(Operand::StaticVariable(one_half));
3995  // rc=01B, round down.
3996  __ X87SetRC(0x0400);
3997  __ sub(esp, Immediate(kPointerSize));
3998  // Clear exception bits.
3999  __ fnclex();
4000  __ fistp_s(MemOperand(esp, 0));
4001  // Check overflow.
4002  __ X87CheckIA();
4003  __ pop(result);
4004  DeoptimizeIf(equal, instr, "conversion overflow");
4005  __ fnclex();
4006  // Restore round mode.
4007  __ X87SetRC(0x0000);
4008 
4009  __ bind(&done);
4010 }
4011 
4012 
4013 void LCodeGen::DoMathFround(LMathFround* instr) {
4014  X87Register input_reg = ToX87Register(instr->value());
4015  X87Fxch(input_reg);
4016  __ sub(esp, Immediate(kPointerSize));
4017  __ fstp_s(MemOperand(esp, 0));
4019  __ add(esp, Immediate(kPointerSize));
4020 }
4021 
4022 
4023 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4024  X87Register input = ToX87Register(instr->value());
4025  X87Register result_reg = ToX87Register(instr->result());
4026  Register temp_result = ToRegister(instr->temp1());
4027  Register temp = ToRegister(instr->temp2());
4028  Label slow, done, smi, finish;
4029  DCHECK(result_reg.is(input));
4030 
4031  // Store input into Heap number and call runtime function kMathExpRT.
4032  if (FLAG_inline_new) {
4033  __ AllocateHeapNumber(temp_result, temp, no_reg, &slow);
4034  __ jmp(&done, Label::kNear);
4035  }
4036 
4037  // Slow case: Call the runtime system to do the number allocation.
4038  __ bind(&slow);
4039  {
4040  // TODO(3095996): Put a valid pointer value in the stack slot where the
4041  // result register is stored, as this register is in the pointer map, but
4042  // contains an integer value.
4043  __ Move(temp_result, Immediate(0));
4044 
4045  // Preserve the value of all registers.
4046  PushSafepointRegistersScope scope(this);
4047 
4049  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4051  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4052  __ StoreToSafepointRegisterSlot(temp_result, eax);
4053  }
4054  __ bind(&done);
4055  X87LoadForUsage(input);
4056  __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
4057 
4058  {
4059  // Preserve the value of all registers.
4060  PushSafepointRegistersScope scope(this);
4061 
4063  __ push(temp_result);
4064  __ CallRuntimeSaveDoubles(Runtime::kMathSqrtRT);
4066  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4067  __ StoreToSafepointRegisterSlot(temp_result, eax);
4068  }
4069  X87PrepareToWrite(result_reg);
4070  // return value of MathExpRT is Smi or Heap Number.
4071  __ JumpIfSmi(temp_result, &smi);
4072  // Heap number(double)
4073  __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
4074  __ jmp(&finish);
4075  // SMI
4076  __ bind(&smi);
4077  __ SmiUntag(temp_result);
4078  __ push(temp_result);
4079  __ fild_s(MemOperand(esp, 0));
4080  __ pop(temp_result);
4081  __ bind(&finish);
4082  X87CommitWrite(result_reg);
4083 }
4084 
4085 
4086 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4087  X87Register input_reg = ToX87Register(instr->value());
4088  DCHECK(ToX87Register(instr->result()).is(input_reg));
4089  X87Fxch(input_reg);
4090  // Note that according to ECMA-262 15.8.2.13:
4091  // Math.pow(-Infinity, 0.5) == Infinity
4092  // Math.sqrt(-Infinity) == NaN
4093  Label done, sqrt;
4094  // Check base for -Infinity. C3 == 0, C2 == 1, C1 == 1 and C0 == 1
4095  __ fxam();
4096  __ push(eax);
4097  __ fnstsw_ax();
4098  __ and_(eax, Immediate(0x4700));
4099  __ cmp(eax, Immediate(0x0700));
4100  __ j(not_equal, &sqrt, Label::kNear);
4101  // If input is -Infinity, return Infinity.
4102  __ fchs();
4103  __ jmp(&done, Label::kNear);
4104 
4105  // Square root.
4106  __ bind(&sqrt);
4107  __ fldz();
4108  __ faddp(); // Convert -0 to +0.
4109  __ fsqrt();
4110  __ bind(&done);
4111  __ pop(eax);
4112 }
4113 
4114 
4115 void LCodeGen::DoPower(LPower* instr) {
4116  Representation exponent_type = instr->hydrogen()->right()->representation();
4117  X87Register result = ToX87Register(instr->result());
4118  // Having marked this as a call, we can use any registers.
4119  X87Register base = ToX87Register(instr->left());
4120  ExternalReference one_half = ExternalReference::address_of_one_half();
4121 
4122  if (exponent_type.IsSmi()) {
4123  Register exponent = ToRegister(instr->right());
4124  X87LoadForUsage(base);
4125  __ SmiUntag(exponent);
4126  __ push(exponent);
4127  __ fild_s(MemOperand(esp, 0));
4128  __ pop(exponent);
4129  } else if (exponent_type.IsTagged()) {
4130  Register exponent = ToRegister(instr->right());
4131  Register temp = exponent.is(ecx) ? eax : ecx;
4132  Label no_deopt, done;
4133  X87LoadForUsage(base);
4134  __ JumpIfSmi(exponent, &no_deopt);
4135  __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp);
4136  DeoptimizeIf(not_equal, instr, "not a heap number");
4137  // Heap number(double)
4138  __ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset));
4139  __ jmp(&done);
4140  // SMI
4141  __ bind(&no_deopt);
4142  __ SmiUntag(exponent);
4143  __ push(exponent);
4144  __ fild_s(MemOperand(esp, 0));
4145  __ pop(exponent);
4146  __ bind(&done);
4147  } else if (exponent_type.IsInteger32()) {
4148  Register exponent = ToRegister(instr->right());
4149  X87LoadForUsage(base);
4150  __ push(exponent);
4151  __ fild_s(MemOperand(esp, 0));
4152  __ pop(exponent);
4153  } else {
4154  DCHECK(exponent_type.IsDouble());
4155  X87Register exponent_double = ToX87Register(instr->right());
4156  X87LoadForUsage(base, exponent_double);
4157  }
4158 
4159  // FP data stack {base, exponent(TOS)}.
4160  // Handle (exponent==+-0.5 && base == -0).
4161  Label not_plus_0;
4162  __ fld(0);
4163  __ fabs();
4164  X87Fld(Operand::StaticVariable(one_half), kX87DoubleOperand);
4165  __ FCmp();
4166  __ j(parity_even, &not_plus_0, Label::kNear); // NaN.
4167  __ j(not_equal, &not_plus_0, Label::kNear);
4168  __ fldz();
4169  // FP data stack {base, exponent(TOS), zero}.
4170  __ faddp(2);
4171  __ bind(&not_plus_0);
4172 
4173  {
4174  __ PrepareCallCFunction(4, eax);
4175  __ fstp_d(MemOperand(esp, kDoubleSize)); // Exponent value.
4176  __ fstp_d(MemOperand(esp, 0)); // Base value.
4177  X87PrepareToWrite(result);
4178  __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
4179  4);
4180  // Return value is in st(0) on ia32.
4181  X87CommitWrite(result);
4182  }
4183 }
4184 
4185 
4186 void LCodeGen::DoMathLog(LMathLog* instr) {
4187  DCHECK(instr->value()->Equals(instr->result()));
4188  X87Register input_reg = ToX87Register(instr->value());
4189  X87Fxch(input_reg);
4190 
4191  Label positive, done, zero, nan_result;
4192  __ fldz();
4193  __ fld(1);
4194  __ FCmp();
4195  __ j(below, &nan_result, Label::kNear);
4196  __ j(equal, &zero, Label::kNear);
4197  // Positive input.
4198  // {input, ln2}.
4199  __ fldln2();
4200  // {ln2, input}.
4201  __ fxch();
4202  // {result}.
4203  __ fyl2x();
4204  __ jmp(&done, Label::kNear);
4205 
4206  __ bind(&nan_result);
4207  ExternalReference nan =
4208  ExternalReference::address_of_canonical_non_hole_nan();
4209  X87PrepareToWrite(input_reg);
4210  __ fld_d(Operand::StaticVariable(nan));
4211  X87CommitWrite(input_reg);
4212  __ jmp(&done, Label::kNear);
4213 
4214  __ bind(&zero);
4215  ExternalReference ninf = ExternalReference::address_of_negative_infinity();
4216  X87PrepareToWrite(input_reg);
4217  __ fld_d(Operand::StaticVariable(ninf));
4218  X87CommitWrite(input_reg);
4219 
4220  __ bind(&done);
4221 }
4222 
4223 
4224 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4225  Register input = ToRegister(instr->value());
4226  Register result = ToRegister(instr->result());
4227  Label not_zero_input;
4228  __ bsr(result, input);
4229 
4230  __ j(not_zero, &not_zero_input);
4231  __ Move(result, Immediate(63)); // 63^31 == 32
4232 
4233  __ bind(&not_zero_input);
4234  __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
4235 }
4236 
4237 
4238 void LCodeGen::DoMathExp(LMathExp* instr) {
4239  X87Register input = ToX87Register(instr->value());
4240  X87Register result_reg = ToX87Register(instr->result());
4241  Register temp_result = ToRegister(instr->temp1());
4242  Register temp = ToRegister(instr->temp2());
4243  Label slow, done, smi, finish;
4244  DCHECK(result_reg.is(input));
4245 
4246  // Store input into Heap number and call runtime function kMathExpRT.
4247  if (FLAG_inline_new) {
4248  __ AllocateHeapNumber(temp_result, temp, no_reg, &slow);
4249  __ jmp(&done, Label::kNear);
4250  }
4251 
4252  // Slow case: Call the runtime system to do the number allocation.
4253  __ bind(&slow);
4254  {
4255  // TODO(3095996): Put a valid pointer value in the stack slot where the
4256  // result register is stored, as this register is in the pointer map, but
4257  // contains an integer value.
4258  __ Move(temp_result, Immediate(0));
4259 
4260  // Preserve the value of all registers.
4261  PushSafepointRegistersScope scope(this);
4262 
4264  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4265  RecordSafepointWithRegisters(instr->pointer_map(), 0,
4266  Safepoint::kNoLazyDeopt);
4267  __ StoreToSafepointRegisterSlot(temp_result, eax);
4268  }
4269  __ bind(&done);
4270  X87LoadForUsage(input);
4271  __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
4272 
4273  {
4274  // Preserve the value of all registers.
4275  PushSafepointRegistersScope scope(this);
4276 
4278  __ push(temp_result);
4279  __ CallRuntimeSaveDoubles(Runtime::kMathExpRT);
4280  RecordSafepointWithRegisters(instr->pointer_map(), 0,
4281  Safepoint::kNoLazyDeopt);
4282  __ StoreToSafepointRegisterSlot(temp_result, eax);
4283  }
4284  X87PrepareToWrite(result_reg);
4285  // return value of MathExpRT is Smi or Heap Number.
4286  __ JumpIfSmi(temp_result, &smi);
4287  // Heap number(double)
4288  __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
4289  __ jmp(&finish);
4290  // SMI
4291  __ bind(&smi);
4292  __ SmiUntag(temp_result);
4293  __ push(temp_result);
4294  __ fild_s(MemOperand(esp, 0));
4295  __ pop(temp_result);
4296  __ bind(&finish);
4297  X87CommitWrite(result_reg);
4298 }
4299 
4300 
4301 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
4302  DCHECK(ToRegister(instr->context()).is(esi));
4303  DCHECK(ToRegister(instr->function()).is(edi));
4304  DCHECK(instr->HasPointerMap());
4305 
4306  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4307  if (known_function.is_null()) {
4308  LPointerMap* pointers = instr->pointer_map();
4309  SafepointGenerator generator(
4310  this, pointers, Safepoint::kLazyDeopt);
4311  ParameterCount count(instr->arity());
4312  __ InvokeFunction(edi, count, CALL_FUNCTION, generator);
4313  } else {
4314  CallKnownFunction(known_function,
4315  instr->hydrogen()->formal_parameter_count(),
4316  instr->arity(),
4317  instr,
4319  }
4320 }
4321 
4322 
4323 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4324  DCHECK(ToRegister(instr->context()).is(esi));
4325  DCHECK(ToRegister(instr->function()).is(edi));
4326  DCHECK(ToRegister(instr->result()).is(eax));
4327 
4328  int arity = instr->arity();
4329  CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
4330  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4331 }
4332 
4333 
4334 void LCodeGen::DoCallNew(LCallNew* instr) {
4335  DCHECK(ToRegister(instr->context()).is(esi));
4336  DCHECK(ToRegister(instr->constructor()).is(edi));
4337  DCHECK(ToRegister(instr->result()).is(eax));
4338 
4339  // No cell in ebx for construct type feedback in optimized code
4340  __ mov(ebx, isolate()->factory()->undefined_value());
4341  CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4342  __ Move(eax, Immediate(instr->arity()));
4343  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4344 }
4345 
4346 
4347 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4348  DCHECK(ToRegister(instr->context()).is(esi));
4349  DCHECK(ToRegister(instr->constructor()).is(edi));
4350  DCHECK(ToRegister(instr->result()).is(eax));
4351 
4352  __ Move(eax, Immediate(instr->arity()));
4353  __ mov(ebx, isolate()->factory()->undefined_value());
4354  ElementsKind kind = instr->hydrogen()->elements_kind();
4355  AllocationSiteOverrideMode override_mode =
4358  : DONT_OVERRIDE;
4359 
4360  if (instr->arity() == 0) {
4361  ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4362  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4363  } else if (instr->arity() == 1) {
4364  Label done;
4365  if (IsFastPackedElementsKind(kind)) {
4366  Label packed_case;
4367  // We might need a change here
4368  // look at the first argument
4369  __ mov(ecx, Operand(esp, 0));
4370  __ test(ecx, ecx);
4371  __ j(zero, &packed_case, Label::kNear);
4372 
4373  ElementsKind holey_kind = GetHoleyElementsKind(kind);
4374  ArraySingleArgumentConstructorStub stub(isolate(),
4375  holey_kind,
4376  override_mode);
4377  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4378  __ jmp(&done, Label::kNear);
4379  __ bind(&packed_case);
4380  }
4381 
4382  ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4383  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4384  __ bind(&done);
4385  } else {
4386  ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4387  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4388  }
4389 }
4390 
4391 
4392 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4393  DCHECK(ToRegister(instr->context()).is(esi));
4394  CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4395 }
4396 
4397 
4398 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4399  Register function = ToRegister(instr->function());
4400  Register code_object = ToRegister(instr->code_object());
4401  __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
4402  __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
4403 }
4404 
4405 
4406 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4407  Register result = ToRegister(instr->result());
4408  Register base = ToRegister(instr->base_object());
4409  if (instr->offset()->IsConstantOperand()) {
4410  LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4411  __ lea(result, Operand(base, ToInteger32(offset)));
4412  } else {
4413  Register offset = ToRegister(instr->offset());
4414  __ lea(result, Operand(base, offset, times_1, 0));
4415  }
4416 }
4417 
4418 
4419 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4420  Representation representation = instr->hydrogen()->field_representation();
4421 
4422  HObjectAccess access = instr->hydrogen()->access();
4423  int offset = access.offset();
4424 
4425  if (access.IsExternalMemory()) {
4426  DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4427  MemOperand operand = instr->object()->IsConstantOperand()
4428  ? MemOperand::StaticVariable(
4429  ToExternalReference(LConstantOperand::cast(instr->object())))
4430  : MemOperand(ToRegister(instr->object()), offset);
4431  if (instr->value()->IsConstantOperand()) {
4432  LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4433  __ mov(operand, Immediate(ToInteger32(operand_value)));
4434  } else {
4435  Register value = ToRegister(instr->value());
4436  __ Store(value, operand, representation);
4437  }
4438  return;
4439  }
4440 
4441  Register object = ToRegister(instr->object());
4442  __ AssertNotSmi(object);
4443  DCHECK(!representation.IsSmi() ||
4444  !instr->value()->IsConstantOperand() ||
4445  IsSmi(LConstantOperand::cast(instr->value())));
4446  if (representation.IsDouble()) {
4447  DCHECK(access.IsInobject());
4448  DCHECK(!instr->hydrogen()->has_transition());
4449  DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4450  X87Register value = ToX87Register(instr->value());
4451  X87Mov(FieldOperand(object, offset), value);
4452  return;
4453  }
4454 
4455  if (instr->hydrogen()->has_transition()) {
4456  Handle<Map> transition = instr->hydrogen()->transition_map();
4457  AddDeprecationDependency(transition);
4458  __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
4459  if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4460  Register temp = ToRegister(instr->temp());
4461  Register temp_map = ToRegister(instr->temp_map());
4462  __ mov(temp_map, transition);
4463  __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
4464  // Update the write barrier for the map field.
4465  __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
4466  }
4467  }
4468 
4469  // Do the store.
4470  Register write_register = object;
4471  if (!access.IsInobject()) {
4472  write_register = ToRegister(instr->temp());
4473  __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4474  }
4475 
4476  MemOperand operand = FieldOperand(write_register, offset);
4477  if (instr->value()->IsConstantOperand()) {
4478  LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4479  if (operand_value->IsRegister()) {
4480  Register value = ToRegister(operand_value);
4481  __ Store(value, operand, representation);
4482  } else if (representation.IsInteger32()) {
4483  Immediate immediate = ToImmediate(operand_value, representation);
4484  DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4485  __ mov(operand, immediate);
4486  } else {
4487  Handle<Object> handle_value = ToHandle(operand_value);
4488  DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4489  __ mov(operand, handle_value);
4490  }
4491  } else {
4492  Register value = ToRegister(instr->value());
4493  __ Store(value, operand, representation);
4494  }
4495 
4496  if (instr->hydrogen()->NeedsWriteBarrier()) {
4497  Register value = ToRegister(instr->value());
4498  Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4499  // Update the write barrier for the object for in-object properties.
4500  __ RecordWriteField(write_register, offset, value, temp, kSaveFPRegs,
4502  instr->hydrogen()->SmiCheckForWriteBarrier(),
4503  instr->hydrogen()->PointersToHereCheckForValue());
4504  }
4505 }
4506 
4507 
4508 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4509  DCHECK(ToRegister(instr->context()).is(esi));
4511  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4512 
4513  __ mov(StoreDescriptor::NameRegister(), instr->name());
4514  Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4515  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4516 }
4517 
4518 
4519 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4520  Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
4521  if (instr->index()->IsConstantOperand()) {
4522  __ cmp(ToOperand(instr->length()),
4523  ToImmediate(LConstantOperand::cast(instr->index()),
4524  instr->hydrogen()->length()->representation()));
4525  cc = CommuteCondition(cc);
4526  } else if (instr->length()->IsConstantOperand()) {
4527  __ cmp(ToOperand(instr->index()),
4528  ToImmediate(LConstantOperand::cast(instr->length()),
4529  instr->hydrogen()->index()->representation()));
4530  } else {
4531  __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
4532  }
4533  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4534  Label done;
4535  __ j(NegateCondition(cc), &done, Label::kNear);
4536  __ int3();
4537  __ bind(&done);
4538  } else {
4539  DeoptimizeIf(cc, instr, "out of bounds");
4540  }
4541 }
4542 
4543 
4544 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4545  ElementsKind elements_kind = instr->elements_kind();
4546  LOperand* key = instr->key();
4547  if (!key->IsConstantOperand() &&
4548  ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
4549  elements_kind)) {
4550  __ SmiUntag(ToRegister(key));
4551  }
4552  Operand operand(BuildFastArrayOperand(
4553  instr->elements(),
4554  key,
4555  instr->hydrogen()->key()->representation(),
4556  elements_kind,
4557  instr->base_offset()));
4558  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4559  elements_kind == FLOAT32_ELEMENTS) {
4560  X87Mov(operand, ToX87Register(instr->value()), kX87FloatOperand);
4561  } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4562  elements_kind == FLOAT64_ELEMENTS) {
4563  X87Mov(operand, ToX87Register(instr->value()));
4564  } else {
4565  Register value = ToRegister(instr->value());
4566  switch (elements_kind) {
4570  case UINT8_ELEMENTS:
4571  case INT8_ELEMENTS:
4573  __ mov_b(operand, value);
4574  break;
4577  case UINT16_ELEMENTS:
4578  case INT16_ELEMENTS:
4579  __ mov_w(operand, value);
4580  break;
4583  case UINT32_ELEMENTS:
4584  case INT32_ELEMENTS:
4585  __ mov(operand, value);
4586  break;
4589  case FLOAT32_ELEMENTS:
4590  case FLOAT64_ELEMENTS:
4591  case FAST_SMI_ELEMENTS:
4592  case FAST_ELEMENTS:
4593  case FAST_DOUBLE_ELEMENTS:
4595  case FAST_HOLEY_ELEMENTS:
4597  case DICTIONARY_ELEMENTS:
4599  UNREACHABLE();
4600  break;
4601  }
4602  }
4603 }
4604 
4605 
4606 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4607  ExternalReference canonical_nan_reference =
4608  ExternalReference::address_of_canonical_non_hole_nan();
4609  Operand double_store_operand = BuildFastArrayOperand(
4610  instr->elements(),
4611  instr->key(),
4612  instr->hydrogen()->key()->representation(),
4614  instr->base_offset());
4615 
4616  // Can't use SSE2 in the serializer
4617  if (instr->hydrogen()->IsConstantHoleStore()) {
4618  // This means we should store the (double) hole. No floating point
4619  // registers required.
4620  double nan_double = FixedDoubleArray::hole_nan_as_double();
4621  uint64_t int_val = bit_cast<uint64_t, double>(nan_double);
4622  int32_t lower = static_cast<int32_t>(int_val);
4623  int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
4624 
4625  __ mov(double_store_operand, Immediate(lower));
4626  Operand double_store_operand2 = BuildFastArrayOperand(
4627  instr->elements(),
4628  instr->key(),
4629  instr->hydrogen()->key()->representation(),
4631  instr->base_offset() + kPointerSize);
4632  __ mov(double_store_operand2, Immediate(upper));
4633  } else {
4634  Label no_special_nan_handling;
4635  X87Register value = ToX87Register(instr->value());
4636  X87Fxch(value);
4637 
4638  if (instr->NeedsCanonicalization()) {
4639  __ fld(0);
4640  __ fld(0);
4641  __ FCmp();
4642 
4643  __ j(parity_odd, &no_special_nan_handling, Label::kNear);
4644  __ sub(esp, Immediate(kDoubleSize));
4645  __ fst_d(MemOperand(esp, 0));
4646  __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
4647  Immediate(kHoleNanUpper32));
4648  __ add(esp, Immediate(kDoubleSize));
4649  Label canonicalize;
4650  __ j(not_equal, &canonicalize, Label::kNear);
4651  __ jmp(&no_special_nan_handling, Label::kNear);
4652  __ bind(&canonicalize);
4653  __ fstp(0);
4654  __ fld_d(Operand::StaticVariable(canonical_nan_reference));
4655  }
4656 
4657  __ bind(&no_special_nan_handling);
4658  __ fst_d(double_store_operand);
4659  }
4660 }
4661 
4662 
4663 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4664  Register elements = ToRegister(instr->elements());
4665  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4666 
4667  Operand operand = BuildFastArrayOperand(
4668  instr->elements(),
4669  instr->key(),
4670  instr->hydrogen()->key()->representation(),
4671  FAST_ELEMENTS,
4672  instr->base_offset());
4673  if (instr->value()->IsRegister()) {
4674  __ mov(operand, ToRegister(instr->value()));
4675  } else {
4676  LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4677  if (IsSmi(operand_value)) {
4678  Immediate immediate = ToImmediate(operand_value, Representation::Smi());
4679  __ mov(operand, immediate);
4680  } else {
4681  DCHECK(!IsInteger32(operand_value));
4682  Handle<Object> handle_value = ToHandle(operand_value);
4683  __ mov(operand, handle_value);
4684  }
4685  }
4686 
4687  if (instr->hydrogen()->NeedsWriteBarrier()) {
4688  DCHECK(instr->value()->IsRegister());
4689  Register value = ToRegister(instr->value());
4690  DCHECK(!instr->key()->IsConstantOperand());
4691  SmiCheck check_needed =
4692  instr->hydrogen()->value()->type().IsHeapObject()
4694  // Compute address of modified element and store it into key register.
4695  __ lea(key, operand);
4696  __ RecordWrite(elements, key, value, kSaveFPRegs, EMIT_REMEMBERED_SET,
4697  check_needed,
4698  instr->hydrogen()->PointersToHereCheckForValue());
4699  }
4700 }
4701 
4702 
4703 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4704  // By cases...external, fast-double, fast
4705  if (instr->is_typed_elements()) {
4707  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4709  } else {
4710  DoStoreKeyedFixedArray(instr);
4711  }
4712 }
4713 
4714 
4715 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4716  DCHECK(ToRegister(instr->context()).is(esi));
4719  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4720 
4721  Handle<Code> ic =
4722  CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
4723  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4724 }
4725 
4726 
4727 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4728  Register object = ToRegister(instr->object());
4729  Register temp = ToRegister(instr->temp());
4730  Label no_memento_found;
4731  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4732  DeoptimizeIf(equal, instr, "memento found");
4733  __ bind(&no_memento_found);
4734 }
4735 
4736 
4737 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4738  Register object_reg = ToRegister(instr->object());
4739 
4740  Handle<Map> from_map = instr->original_map();
4741  Handle<Map> to_map = instr->transitioned_map();
4742  ElementsKind from_kind = instr->from_kind();
4743  ElementsKind to_kind = instr->to_kind();
4744 
4745  Label not_applicable;
4746  bool is_simple_map_transition =
4747  IsSimpleMapChangeTransition(from_kind, to_kind);
4748  Label::Distance branch_distance =
4749  is_simple_map_transition ? Label::kNear : Label::kFar;
4750  __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4751  __ j(not_equal, &not_applicable, branch_distance);
4752  if (is_simple_map_transition) {
4753  Register new_map_reg = ToRegister(instr->new_map_temp());
4754  __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
4755  Immediate(to_map));
4756  // Write barrier.
4757  DCHECK_NE(instr->temp(), NULL);
4758  __ RecordWriteForMap(object_reg, to_map, new_map_reg,
4759  ToRegister(instr->temp()), kDontSaveFPRegs);
4760  } else {
4761  DCHECK(ToRegister(instr->context()).is(esi));
4762  DCHECK(object_reg.is(eax));
4763  PushSafepointRegistersScope scope(this);
4764  __ mov(ebx, to_map);
4765  bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4766  TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4767  __ CallStub(&stub);
4770  }
4771  __ bind(&not_applicable);
4772 }
4773 
4774 
4775 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4776  class DeferredStringCharCodeAt FINAL : public LDeferredCode {
4777  public:
4778  DeferredStringCharCodeAt(LCodeGen* codegen,
4779  LStringCharCodeAt* instr,
4780  const X87Stack& x87_stack)
4781  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4782  virtual void Generate() OVERRIDE {
4783  codegen()->DoDeferredStringCharCodeAt(instr_);
4784  }
4785  virtual LInstruction* instr() OVERRIDE { return instr_; }
4786  private:
4787  LStringCharCodeAt* instr_;
4788  };
4789 
4790  DeferredStringCharCodeAt* deferred =
4791  new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
4792 
4794  factory(),
4795  ToRegister(instr->string()),
4796  ToRegister(instr->index()),
4797  ToRegister(instr->result()),
4798  deferred->entry());
4799  __ bind(deferred->exit());
4800 }
4801 
4802 
4803 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4804  Register string = ToRegister(instr->string());
4805  Register result = ToRegister(instr->result());
4806 
4807  // TODO(3095996): Get rid of this. For now, we need to make the
4808  // result register contain a valid pointer because it is already
4809  // contained in the register pointer map.
4810  __ Move(result, Immediate(0));
4811 
4812  PushSafepointRegistersScope scope(this);
4813  __ push(string);
4814  // Push the index as a smi. This is safe because of the checks in
4815  // DoStringCharCodeAt above.
4817  if (instr->index()->IsConstantOperand()) {
4818  Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
4820  __ push(immediate);
4821  } else {
4822  Register index = ToRegister(instr->index());
4823  __ SmiTag(index);
4824  __ push(index);
4825  }
4826  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2,
4827  instr, instr->context());
4828  __ AssertSmi(eax);
4829  __ SmiUntag(eax);
4830  __ StoreToSafepointRegisterSlot(result, eax);
4831 }
4832 
4833 
4834 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4835  class DeferredStringCharFromCode FINAL : public LDeferredCode {
4836  public:
4837  DeferredStringCharFromCode(LCodeGen* codegen,
4838  LStringCharFromCode* instr,
4839  const X87Stack& x87_stack)
4840  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4841  virtual void Generate() OVERRIDE {
4842  codegen()->DoDeferredStringCharFromCode(instr_);
4843  }
4844  virtual LInstruction* instr() OVERRIDE { return instr_; }
4845  private:
4846  LStringCharFromCode* instr_;
4847  };
4848 
4849  DeferredStringCharFromCode* deferred =
4850  new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
4851 
4852  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4853  Register char_code = ToRegister(instr->char_code());
4854  Register result = ToRegister(instr->result());
4855  DCHECK(!char_code.is(result));
4856 
4857  __ cmp(char_code, String::kMaxOneByteCharCode);
4858  __ j(above, deferred->entry());
4859  __ Move(result, Immediate(factory()->single_character_string_cache()));
4860  __ mov(result, FieldOperand(result,
4861  char_code, times_pointer_size,
4863  __ cmp(result, factory()->undefined_value());
4864  __ j(equal, deferred->entry());
4865  __ bind(deferred->exit());
4866 }
4867 
4868 
4869 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4870  Register char_code = ToRegister(instr->char_code());
4871  Register result = ToRegister(instr->result());
4872 
4873  // TODO(3095996): Get rid of this. For now, we need to make the
4874  // result register contain a valid pointer because it is already
4875  // contained in the register pointer map.
4876  __ Move(result, Immediate(0));
4877 
4878  PushSafepointRegistersScope scope(this);
4879  __ SmiTag(char_code);
4880  __ push(char_code);
4881  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4882  __ StoreToSafepointRegisterSlot(result, eax);
4883 }
4884 
4885 
4886 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4887  DCHECK(ToRegister(instr->context()).is(esi));
4888  DCHECK(ToRegister(instr->left()).is(edx));
4889  DCHECK(ToRegister(instr->right()).is(eax));
4890  StringAddStub stub(isolate(),
4891  instr->hydrogen()->flags(),
4892  instr->hydrogen()->pretenure_flag());
4893  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4894 }
4895 
4896 
4897 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4898  LOperand* input = instr->value();
4899  LOperand* output = instr->result();
4900  DCHECK(input->IsRegister() || input->IsStackSlot());
4901  DCHECK(output->IsDoubleRegister());
4902  if (input->IsRegister()) {
4903  Register input_reg = ToRegister(input);
4904  __ push(input_reg);
4905  X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
4906  __ pop(input_reg);
4907  } else {
4908  X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
4909  }
4910 }
4911 
4912 
4913 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4914  LOperand* input = instr->value();
4915  LOperand* output = instr->result();
4916  X87Register res = ToX87Register(output);
4917  X87PrepareToWrite(res);
4918  __ LoadUint32NoSSE2(ToRegister(input));
4919  X87CommitWrite(res);
4920 }
4921 
4922 
4923 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4924  class DeferredNumberTagI FINAL : public LDeferredCode {
4925  public:
4926  DeferredNumberTagI(LCodeGen* codegen,
4927  LNumberTagI* instr,
4928  const X87Stack& x87_stack)
4929  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4930  virtual void Generate() OVERRIDE {
4931  codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
4932  SIGNED_INT32);
4933  }
4934  virtual LInstruction* instr() OVERRIDE { return instr_; }
4935  private:
4936  LNumberTagI* instr_;
4937  };
4938 
4939  LOperand* input = instr->value();
4940  DCHECK(input->IsRegister() && input->Equals(instr->result()));
4941  Register reg = ToRegister(input);
4942 
4943  DeferredNumberTagI* deferred =
4944  new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
4945  __ SmiTag(reg);
4946  __ j(overflow, deferred->entry());
4947  __ bind(deferred->exit());
4948 }
4949 
4950 
4951 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4952  class DeferredNumberTagU FINAL : public LDeferredCode {
4953  public:
4954  DeferredNumberTagU(LCodeGen* codegen,
4955  LNumberTagU* instr,
4956  const X87Stack& x87_stack)
4957  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4958  virtual void Generate() OVERRIDE {
4959  codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
4960  UNSIGNED_INT32);
4961  }
4962  virtual LInstruction* instr() OVERRIDE { return instr_; }
4963  private:
4964  LNumberTagU* instr_;
4965  };
4966 
4967  LOperand* input = instr->value();
4968  DCHECK(input->IsRegister() && input->Equals(instr->result()));
4969  Register reg = ToRegister(input);
4970 
4971  DeferredNumberTagU* deferred =
4972  new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
4973  __ cmp(reg, Immediate(Smi::kMaxValue));
4974  __ j(above, deferred->entry());
4975  __ SmiTag(reg);
4976  __ bind(deferred->exit());
4977 }
4978 
4979 
4980 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4981  LOperand* value,
4982  LOperand* temp,
4983  IntegerSignedness signedness) {
4984  Label done, slow;
4985  Register reg = ToRegister(value);
4986  Register tmp = ToRegister(temp);
4987 
4988  if (signedness == SIGNED_INT32) {
4989  // There was overflow, so bits 30 and 31 of the original integer
4990  // disagree. Try to allocate a heap number in new space and store
4991  // the value in there. If that fails, call the runtime system.
4992  __ SmiUntag(reg);
4993  __ xor_(reg, 0x80000000);
4994  __ push(reg);
4995  __ fild_s(Operand(esp, 0));
4996  __ pop(reg);
4997  } else {
4998  // There's no fild variant for unsigned values, so zero-extend to a 64-bit
4999  // int manually.
5000  __ push(Immediate(0));
5001  __ push(reg);
5002  __ fild_d(Operand(esp, 0));
5003  __ pop(reg);
5004  __ pop(reg);
5005  }
5006 
5007  if (FLAG_inline_new) {
5008  __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
5009  __ jmp(&done, Label::kNear);
5010  }
5011 
5012  // Slow case: Call the runtime system to do the number allocation.
5013  __ bind(&slow);
5014  {
5015  // TODO(3095996): Put a valid pointer value in the stack slot where the
5016  // result register is stored, as this register is in the pointer map, but
5017  // contains an integer value.
5018  __ Move(reg, Immediate(0));
5019 
5020  // Preserve the value of all registers.
5021  PushSafepointRegistersScope scope(this);
5022 
5023  // NumberTagI and NumberTagD use the context from the frame, rather than
5024  // the environment's HContext or HInlinedContext value.
5025  // They only call Runtime::kAllocateHeapNumber.
5026  // The corresponding HChange instructions are added in a phase that does
5027  // not have easy access to the local context.
5029  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5031  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5032  __ StoreToSafepointRegisterSlot(reg, eax);
5033  }
5034 
5035  __ bind(&done);
5037 }
5038 
5039 
5040 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
5041  class DeferredNumberTagD FINAL : public LDeferredCode {
5042  public:
5043  DeferredNumberTagD(LCodeGen* codegen,
5044  LNumberTagD* instr,
5045  const X87Stack& x87_stack)
5046  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5047  virtual void Generate() OVERRIDE {
5048  codegen()->DoDeferredNumberTagD(instr_);
5049  }
5050  virtual LInstruction* instr() OVERRIDE { return instr_; }
5051  private:
5052  LNumberTagD* instr_;
5053  };
5054 
5055  Register reg = ToRegister(instr->result());
5056 
5057  // Put the value to the top of stack
5058  X87Register src = ToX87Register(instr->value());
5059  // Don't use X87LoadForUsage here, which is only used by Instruction which
5060  // clobbers fp registers.
5061  x87_stack_.Fxch(src);
5062 
5063  DeferredNumberTagD* deferred =
5064  new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
5065  if (FLAG_inline_new) {
5066  Register tmp = ToRegister(instr->temp());
5067  __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
5068  } else {
5069  __ jmp(deferred->entry());
5070  }
5071  __ bind(deferred->exit());
5073 }
5074 
5075 
5076 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
5077  // TODO(3095996): Get rid of this. For now, we need to make the
5078  // result register contain a valid pointer because it is already
5079  // contained in the register pointer map.
5080  Register reg = ToRegister(instr->result());
5081  __ Move(reg, Immediate(0));
5082 
5083  PushSafepointRegistersScope scope(this);
5084  // NumberTagI and NumberTagD use the context from the frame, rather than
5085  // the environment's HContext or HInlinedContext value.
5086  // They only call Runtime::kAllocateHeapNumber.
5087  // The corresponding HChange instructions are added in a phase that does
5088  // not have easy access to the local context.
5090  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5092  instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5093  __ StoreToSafepointRegisterSlot(reg, eax);
5094 }
5095 
5096 
5097 void LCodeGen::DoSmiTag(LSmiTag* instr) {
5098  HChange* hchange = instr->hydrogen();
5099  Register input = ToRegister(instr->value());
5100  if (hchange->CheckFlag(HValue::kCanOverflow) &&
5101  hchange->value()->CheckFlag(HValue::kUint32)) {
5102  __ test(input, Immediate(0xc0000000));
5103  DeoptimizeIf(not_zero, instr, "overflow");
5104  }
5105  __ SmiTag(input);
5106  if (hchange->CheckFlag(HValue::kCanOverflow) &&
5107  !hchange->value()->CheckFlag(HValue::kUint32)) {
5108  DeoptimizeIf(overflow, instr, "overflow");
5109  }
5110 }
5111 
5112 
5113 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5114  LOperand* input = instr->value();
5115  Register result = ToRegister(input);
5116  DCHECK(input->IsRegister() && input->Equals(instr->result()));
5117  if (instr->needs_check()) {
5118  __ test(result, Immediate(kSmiTagMask));
5119  DeoptimizeIf(not_zero, instr, "not a Smi");
5120  } else {
5121  __ AssertSmi(result);
5122  }
5123  __ SmiUntag(result);
5124 }
5125 
5126 
5127 void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
5128  Register temp_reg, X87Register res_reg,
5130  bool can_convert_undefined_to_nan =
5131  instr->hydrogen()->can_convert_undefined_to_nan();
5132  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
5133 
5134  Label load_smi, done;
5135 
5136  X87PrepareToWrite(res_reg);
5138  // Smi check.
5139  __ JumpIfSmi(input_reg, &load_smi);
5140 
5141  // Heap number map check.
5142  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5143  factory()->heap_number_map());
5144  if (!can_convert_undefined_to_nan) {
5145  DeoptimizeIf(not_equal, instr, "not a heap number");
5146  } else {
5147  Label heap_number, convert;
5148  __ j(equal, &heap_number);
5149 
5150  // Convert undefined (or hole) to NaN.
5151  __ cmp(input_reg, factory()->undefined_value());
5152  DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
5153 
5154  __ bind(&convert);
5155  ExternalReference nan =
5156  ExternalReference::address_of_canonical_non_hole_nan();
5157  __ fld_d(Operand::StaticVariable(nan));
5158  __ jmp(&done, Label::kNear);
5159 
5160  __ bind(&heap_number);
5161  }
5162  // Heap number to x87 conversion.
5163  __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5164  if (deoptimize_on_minus_zero) {
5165  __ fldz();
5166  __ FCmp();
5167  __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5168  __ j(not_zero, &done, Label::kNear);
5169 
5170  // Use general purpose registers to check if we have -0.0
5171  __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5172  __ test(temp_reg, Immediate(HeapNumber::kSignMask));
5173  __ j(zero, &done, Label::kNear);
5174 
5175  // Pop FPU stack before deoptimizing.
5176  __ fstp(0);
5177  DeoptimizeIf(not_zero, instr, "minus zero");
5178  }
5179  __ jmp(&done, Label::kNear);
5180  } else {
5182  }
5183 
5184  __ bind(&load_smi);
5185  // Clobbering a temp is faster than re-tagging the
5186  // input register since we avoid dependencies.
5187  __ mov(temp_reg, input_reg);
5188  __ SmiUntag(temp_reg); // Untag smi before converting to float.
5189  __ push(temp_reg);
5190  __ fild_s(Operand(esp, 0));
5191  __ add(esp, Immediate(kPointerSize));
5192  __ bind(&done);
5193  X87CommitWrite(res_reg);
5194 }
5195 
5196 
5197 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
5198  Register input_reg = ToRegister(instr->value());
5199 
5200  // The input was optimistically untagged; revert it.
5201  STATIC_ASSERT(kSmiTagSize == 1);
5202  __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
5203 
5204  if (instr->truncating()) {
5205  Label no_heap_number, check_bools, check_false;
5206 
5207  // Heap number map check.
5208  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5209  factory()->heap_number_map());
5210  __ j(not_equal, &no_heap_number, Label::kNear);
5211  __ TruncateHeapNumberToI(input_reg, input_reg);
5212  __ jmp(done);
5213 
5214  __ bind(&no_heap_number);
5215  // Check for Oddballs. Undefined/False is converted to zero and True to one
5216  // for truncating conversions.
5217  __ cmp(input_reg, factory()->undefined_value());
5218  __ j(not_equal, &check_bools, Label::kNear);
5219  __ Move(input_reg, Immediate(0));
5220  __ jmp(done);
5221 
5222  __ bind(&check_bools);
5223  __ cmp(input_reg, factory()->true_value());
5224  __ j(not_equal, &check_false, Label::kNear);
5225  __ Move(input_reg, Immediate(1));
5226  __ jmp(done);
5227 
5228  __ bind(&check_false);
5229  __ cmp(input_reg, factory()->false_value());
5230  DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
5231  __ Move(input_reg, Immediate(0));
5232  } else {
5233  // TODO(olivf) Converting a number on the fpu is actually quite slow. We
5234  // should first try a fast conversion and then bailout to this slow case.
5235  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5236  isolate()->factory()->heap_number_map());
5237  DeoptimizeIf(not_equal, instr, "not a heap number");
5238 
5239  __ sub(esp, Immediate(kPointerSize));
5240  __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5241 
5242  if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
5243  Label no_precision_lost, not_nan, zero_check;
5244  __ fld(0);
5245 
5246  __ fist_s(MemOperand(esp, 0));
5247  __ fild_s(MemOperand(esp, 0));
5248  __ FCmp();
5249  __ pop(input_reg);
5250 
5251  __ j(equal, &no_precision_lost, Label::kNear);
5252  __ fstp(0);
5253  DeoptimizeIf(no_condition, instr, "lost precision");
5254  __ bind(&no_precision_lost);
5255 
5256  __ j(parity_odd, &not_nan);
5257  __ fstp(0);
5258  DeoptimizeIf(no_condition, instr, "NaN");
5259  __ bind(&not_nan);
5260 
5261  __ test(input_reg, Operand(input_reg));
5262  __ j(zero, &zero_check, Label::kNear);
5263  __ fstp(0);
5264  __ jmp(done);
5265 
5266  __ bind(&zero_check);
5267  // To check for minus zero, we load the value again as float, and check
5268  // if that is still 0.
5269  __ sub(esp, Immediate(kPointerSize));
5270  __ fstp_s(Operand(esp, 0));
5271  __ pop(input_reg);
5272  __ test(input_reg, Operand(input_reg));
5273  DeoptimizeIf(not_zero, instr, "minus zero");
5274  } else {
5275  __ fist_s(MemOperand(esp, 0));
5276  __ fild_s(MemOperand(esp, 0));
5277  __ FCmp();
5278  __ pop(input_reg);
5279  DeoptimizeIf(not_equal, instr, "lost precision");
5280  DeoptimizeIf(parity_even, instr, "NaN");
5281  }
5282  }
5283 }
5284 
5285 
5286 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5287  class DeferredTaggedToI FINAL : public LDeferredCode {
5288  public:
5289  DeferredTaggedToI(LCodeGen* codegen,
5290  LTaggedToI* instr,
5291  const X87Stack& x87_stack)
5292  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5293  virtual void Generate() OVERRIDE {
5294  codegen()->DoDeferredTaggedToI(instr_, done());
5295  }
5296  virtual LInstruction* instr() OVERRIDE { return instr_; }
5297  private:
5298  LTaggedToI* instr_;
5299  };
5300 
5301  LOperand* input = instr->value();
5302  DCHECK(input->IsRegister());
5303  Register input_reg = ToRegister(input);
5304  DCHECK(input_reg.is(ToRegister(instr->result())));
5305 
5306  if (instr->hydrogen()->value()->representation().IsSmi()) {
5307  __ SmiUntag(input_reg);
5308  } else {
5309  DeferredTaggedToI* deferred =
5310  new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
5311  // Optimistically untag the input.
5312  // If the input is a HeapObject, SmiUntag will set the carry flag.
5313  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
5314  __ SmiUntag(input_reg);
5315  // Branch to deferred code if the input was tagged.
5316  // The deferred code will take care of restoring the tag.
5317  __ j(carry, deferred->entry());
5318  __ bind(deferred->exit());
5319  }
5320 }
5321 
5322 
5323 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5324  LOperand* input = instr->value();
5325  DCHECK(input->IsRegister());
5326  LOperand* temp = instr->temp();
5327  DCHECK(temp->IsRegister());
5328  LOperand* result = instr->result();
5329  DCHECK(result->IsDoubleRegister());
5330 
5331  Register input_reg = ToRegister(input);
5332  Register temp_reg = ToRegister(temp);
5333 
5334  HValue* value = instr->hydrogen()->value();
5335  NumberUntagDMode mode = value->representation().IsSmi()
5337 
5338  EmitNumberUntagDNoSSE2(instr, input_reg, temp_reg, ToX87Register(result),
5339  mode);
5340 }
5341 
5342 
5343 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5344  LOperand* input = instr->value();
5345  DCHECK(input->IsDoubleRegister());
5346  LOperand* result = instr->result();
5347  DCHECK(result->IsRegister());
5348  Register result_reg = ToRegister(result);
5349 
5350  if (instr->truncating()) {
5351  X87Register input_reg = ToX87Register(input);
5352  X87Fxch(input_reg);
5353  __ TruncateX87TOSToI(result_reg);
5354  } else {
5355  Label lost_precision, is_nan, minus_zero, done;
5356  X87Register input_reg = ToX87Register(input);
5357  X87Fxch(input_reg);
5358  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5359  __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5360  &lost_precision, &is_nan, &minus_zero, dist);
5361  __ jmp(&done);
5362  __ bind(&lost_precision);
5363  DeoptimizeIf(no_condition, instr, "lost precision");
5364  __ bind(&is_nan);
5365  DeoptimizeIf(no_condition, instr, "NaN");
5366  __ bind(&minus_zero);
5367  DeoptimizeIf(no_condition, instr, "minus zero");
5368  __ bind(&done);
5369  }
5370 }
5371 
5372 
5373 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5374  LOperand* input = instr->value();
5375  DCHECK(input->IsDoubleRegister());
5376  LOperand* result = instr->result();
5377  DCHECK(result->IsRegister());
5378  Register result_reg = ToRegister(result);
5379 
5380  Label lost_precision, is_nan, minus_zero, done;
5381  X87Register input_reg = ToX87Register(input);
5382  X87Fxch(input_reg);
5383  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
5384  __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5385  &lost_precision, &is_nan, &minus_zero, dist);
5386  __ jmp(&done);
5387  __ bind(&lost_precision);
5388  DeoptimizeIf(no_condition, instr, "lost precision");
5389  __ bind(&is_nan);
5390  DeoptimizeIf(no_condition, instr, "NaN");
5391  __ bind(&minus_zero);
5392  DeoptimizeIf(no_condition, instr, "minus zero");
5393  __ bind(&done);
5394  __ SmiTag(result_reg);
5395  DeoptimizeIf(overflow, instr, "overflow");
5396 }
5397 
5398 
5399 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5400  LOperand* input = instr->value();
5401  __ test(ToOperand(input), Immediate(kSmiTagMask));
5402  DeoptimizeIf(not_zero, instr, "not a Smi");
5403 }
5404 
5405 
5406 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5407  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5408  LOperand* input = instr->value();
5409  __ test(ToOperand(input), Immediate(kSmiTagMask));
5410  DeoptimizeIf(zero, instr, "Smi");
5411  }
5412 }
5413 
5414 
5415 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5416  Register input = ToRegister(instr->value());
5417  Register temp = ToRegister(instr->temp());
5418 
5419  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
5420 
5421  if (instr->hydrogen()->is_interval_check()) {
5422  InstanceType first;
5423  InstanceType last;
5424  instr->hydrogen()->GetCheckInterval(&first, &last);
5425 
5427  static_cast<int8_t>(first));
5428 
5429  // If there is only one type in the interval check for equality.
5430  if (first == last) {
5431  DeoptimizeIf(not_equal, instr, "wrong instance type");
5432  } else {
5433  DeoptimizeIf(below, instr, "wrong instance type");
5434  // Omit check for the last type.
5435  if (last != LAST_TYPE) {
5437  static_cast<int8_t>(last));
5438  DeoptimizeIf(above, instr, "wrong instance type");
5439  }
5440  }
5441  } else {
5442  uint8_t mask;
5443  uint8_t tag;
5444  instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5445 
5446  if (base::bits::IsPowerOfTwo32(mask)) {
5447  DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5448  __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
5449  DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
5450  } else {
5451  __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
5452  __ and_(temp, mask);
5453  __ cmp(temp, tag);
5454  DeoptimizeIf(not_equal, instr, "wrong instance type");
5455  }
5456  }
5457 }
5458 
5459 
5460 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5461  Handle<HeapObject> object = instr->hydrogen()->object().handle();
5462  if (instr->hydrogen()->object_in_new_space()) {
5463  Register reg = ToRegister(instr->value());
5464  Handle<Cell> cell = isolate()->factory()->NewCell(object);
5465  __ cmp(reg, Operand::ForCell(cell));
5466  } else {
5467  Operand operand = ToOperand(instr->value());
5468  __ cmp(operand, object);
5469  }
5470  DeoptimizeIf(not_equal, instr, "value mismatch");
5471 }
5472 
5473 
5474 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5475  {
5476  PushSafepointRegistersScope scope(this);
5477  __ push(object);
5478  __ xor_(esi, esi);
5479  __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5481  instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5482 
5483  __ test(eax, Immediate(kSmiTagMask));
5484  }
5485  DeoptimizeIf(zero, instr, "instance migration failed");
5486 }
5487 
5488 
5489 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5490  class DeferredCheckMaps FINAL : public LDeferredCode {
5491  public:
5492  DeferredCheckMaps(LCodeGen* codegen,
5493  LCheckMaps* instr,
5494  Register object,
5495  const X87Stack& x87_stack)
5496  : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
5497  SetExit(check_maps());
5498  }
5499  virtual void Generate() OVERRIDE {
5500  codegen()->DoDeferredInstanceMigration(instr_, object_);
5501  }
5502  Label* check_maps() { return &check_maps_; }
5503  virtual LInstruction* instr() OVERRIDE { return instr_; }
5504  private:
5505  LCheckMaps* instr_;
5506  Label check_maps_;
5507  Register object_;
5508  };
5509 
5510  if (instr->hydrogen()->IsStabilityCheck()) {
5511  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5512  for (int i = 0; i < maps->size(); ++i) {
5513  AddStabilityDependency(maps->at(i).handle());
5514  }
5515  return;
5516  }
5517 
5518  LOperand* input = instr->value();
5519  DCHECK(input->IsRegister());
5520  Register reg = ToRegister(input);
5521 
5522  DeferredCheckMaps* deferred = NULL;
5523  if (instr->hydrogen()->HasMigrationTarget()) {
5524  deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
5525  __ bind(deferred->check_maps());
5526  }
5527 
5528  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5529  Label success;
5530  for (int i = 0; i < maps->size() - 1; i++) {
5531  Handle<Map> map = maps->at(i).handle();
5532  __ CompareMap(reg, map);
5533  __ j(equal, &success, Label::kNear);
5534  }
5535 
5536  Handle<Map> map = maps->at(maps->size() - 1).handle();
5537  __ CompareMap(reg, map);
5538  if (instr->hydrogen()->HasMigrationTarget()) {
5539  __ j(not_equal, deferred->entry());
5540  } else {
5541  DeoptimizeIf(not_equal, instr, "wrong map");
5542  }
5543 
5544  __ bind(&success);
5545 }
5546 
5547 
5548 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5549  X87Register value_reg = ToX87Register(instr->unclamped());
5550  Register result_reg = ToRegister(instr->result());
5551  X87Fxch(value_reg);
5552  __ ClampTOSToUint8(result_reg);
5553 }
5554 
5555 
5556 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5557  DCHECK(instr->unclamped()->Equals(instr->result()));
5558  Register value_reg = ToRegister(instr->result());
5559  __ ClampUint8(value_reg);
5560 }
5561 
5562 
5563 void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
5564  Register input_reg = ToRegister(instr->unclamped());
5565  Register result_reg = ToRegister(instr->result());
5566  Register scratch = ToRegister(instr->scratch());
5567  Register scratch2 = ToRegister(instr->scratch2());
5568  Register scratch3 = ToRegister(instr->scratch3());
5569  Label is_smi, done, heap_number, valid_exponent,
5570  largest_value, zero_result, maybe_nan_or_infinity;
5571 
5572  __ JumpIfSmi(input_reg, &is_smi);
5573 
5574  // Check for heap number
5575  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5576  factory()->heap_number_map());
5577  __ j(equal, &heap_number, Label::kNear);
5578 
5579  // Check for undefined. Undefined is converted to zero for clamping
5580  // conversions.
5581  __ cmp(input_reg, factory()->undefined_value());
5582  DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
5583  __ jmp(&zero_result, Label::kNear);
5584 
5585  // Heap number
5586  __ bind(&heap_number);
5587 
5588  // Surprisingly, all of the hand-crafted bit-manipulations below are much
5589  // faster than the x86 FPU built-in instruction, especially since "banker's
5590  // rounding" would be additionally very expensive
5591 
5592  // Get exponent word.
5593  __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5594  __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5595 
5596  // Test for negative values --> clamp to zero
5597  __ test(scratch, scratch);
5598  __ j(negative, &zero_result, Label::kNear);
5599 
5600  // Get exponent alone in scratch2.
5601  __ mov(scratch2, scratch);
5602  __ and_(scratch2, HeapNumber::kExponentMask);
5603  __ shr(scratch2, HeapNumber::kExponentShift);
5604  __ j(zero, &zero_result, Label::kNear);
5605  __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
5606  __ j(negative, &zero_result, Label::kNear);
5607 
5608  const uint32_t non_int8_exponent = 7;
5609  __ cmp(scratch2, Immediate(non_int8_exponent + 1));
5610  // If the exponent is too big, check for special values.
5611  __ j(greater, &maybe_nan_or_infinity, Label::kNear);
5612 
5613  __ bind(&valid_exponent);
5614  // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
5615  // < 7. The shift bias is the number of bits to shift the mantissa such that
5616  // with an exponent of 7 such the that top-most one is in bit 30, allowing
5617  // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
5618  // 1).
5619  int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
5620  __ lea(result_reg, MemOperand(scratch2, shift_bias));
5621  // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the
5622  // top bits of the mantissa.
5623  __ and_(scratch, HeapNumber::kMantissaMask);
5624  // Put back the implicit 1 of the mantissa
5625  __ or_(scratch, 1 << HeapNumber::kExponentShift);
5626  // Shift up to round
5627  __ shl_cl(scratch);
5628  // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
5629  // use the bit in the "ones" place and add it to the "halves" place, which has
5630  // the effect of rounding to even.
5631  __ mov(scratch2, scratch);
5632  const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
5633  const uint32_t one_bit_shift = one_half_bit_shift + 1;
5634  __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
5635  __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
5636  Label no_round;
5637  __ j(less, &no_round, Label::kNear);
5638  Label round_up;
5639  __ mov(scratch2, Immediate(1 << one_half_bit_shift));
5640  __ j(greater, &round_up, Label::kNear);
5641  __ test(scratch3, scratch3);
5642  __ j(not_zero, &round_up, Label::kNear);
5643  __ mov(scratch2, scratch);
5644  __ and_(scratch2, Immediate(1 << one_bit_shift));
5645  __ shr(scratch2, 1);
5646  __ bind(&round_up);
5647  __ add(scratch, scratch2);
5648  __ j(overflow, &largest_value, Label::kNear);
5649  __ bind(&no_round);
5650  __ shr(scratch, 23);
5651  __ mov(result_reg, scratch);
5652  __ jmp(&done, Label::kNear);
5653 
5654  __ bind(&maybe_nan_or_infinity);
5655  // Check for NaN/Infinity, all other values map to 255
5656  __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
5657  __ j(not_equal, &largest_value, Label::kNear);
5658 
5659  // Check for NaN, which differs from Infinity in that at least one mantissa
5660  // bit is set.
5661  __ and_(scratch, HeapNumber::kMantissaMask);
5662  __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5663  __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN
5664  // Infinity -> Fall through to map to 255.
5665 
5666  __ bind(&largest_value);
5667  __ mov(result_reg, Immediate(255));
5668  __ jmp(&done, Label::kNear);
5669 
5670  __ bind(&zero_result);
5671  __ xor_(result_reg, result_reg);
5672  __ jmp(&done, Label::kNear);
5673 
5674  // smi
5675  __ bind(&is_smi);
5676  if (!input_reg.is(result_reg)) {
5677  __ mov(result_reg, input_reg);
5678  }
5679  __ SmiUntag(result_reg);
5680  __ ClampUint8(result_reg);
5681  __ bind(&done);
5682 }
5683 
5684 
5685 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5686  X87Register value_reg = ToX87Register(instr->value());
5687  Register result_reg = ToRegister(instr->result());
5688  X87Fxch(value_reg);
5689  __ sub(esp, Immediate(kDoubleSize));
5690  __ fst_d(Operand(esp, 0));
5691  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5692  __ mov(result_reg, Operand(esp, kPointerSize));
5693  } else {
5694  __ mov(result_reg, Operand(esp, 0));
5695  }
5696  __ add(esp, Immediate(kDoubleSize));
5697 }
5698 
5699 
5700 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5701  Register hi_reg = ToRegister(instr->hi());
5702  Register lo_reg = ToRegister(instr->lo());
5703  X87Register result_reg = ToX87Register(instr->result());
5704  // Follow below pattern to write a x87 fp register.
5705  X87PrepareToWrite(result_reg);
5706  __ sub(esp, Immediate(kDoubleSize));
5707  __ mov(Operand(esp, 0), lo_reg);
5708  __ mov(Operand(esp, kPointerSize), hi_reg);
5709  __ fld_d(Operand(esp, 0));
5710  __ add(esp, Immediate(kDoubleSize));
5711  X87CommitWrite(result_reg);
5712 }
5713 
5714 
5715 void LCodeGen::DoAllocate(LAllocate* instr) {
5716  class DeferredAllocate FINAL : public LDeferredCode {
5717  public:
5718  DeferredAllocate(LCodeGen* codegen,
5719  LAllocate* instr,
5720  const X87Stack& x87_stack)
5721  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5722  virtual void Generate() OVERRIDE {
5723  codegen()->DoDeferredAllocate(instr_);
5724  }
5725  virtual LInstruction* instr() OVERRIDE { return instr_; }
5726  private:
5727  LAllocate* instr_;
5728  };
5729 
5730  DeferredAllocate* deferred =
5731  new(zone()) DeferredAllocate(this, instr, x87_stack_);
5732 
5733  Register result = ToRegister(instr->result());
5734  Register temp = ToRegister(instr->temp());
5735 
5736  // Allocate memory for the object.
5738  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5739  flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5740  }
5741  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5742  DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5743  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5745  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5746  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5748  }
5749 
5750  if (instr->size()->IsConstantOperand()) {
5751  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5753  __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5754  } else {
5755  __ jmp(deferred->entry());
5756  }
5757  } else {
5758  Register size = ToRegister(instr->size());
5759  __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5760  }
5761 
5762  __ bind(deferred->exit());
5763 
5764  if (instr->hydrogen()->MustPrefillWithFiller()) {
5765  if (instr->size()->IsConstantOperand()) {
5766  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5767  __ mov(temp, (size / kPointerSize) - 1);
5768  } else {
5769  temp = ToRegister(instr->size());
5770  __ shr(temp, kPointerSizeLog2);
5771  __ dec(temp);
5772  }
5773  Label loop;
5774  __ bind(&loop);
5775  __ mov(FieldOperand(result, temp, times_pointer_size, 0),
5776  isolate()->factory()->one_pointer_filler_map());
5777  __ dec(temp);
5778  __ j(not_zero, &loop);
5779  }
5780 }
5781 
5782 
5783 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5784  Register result = ToRegister(instr->result());
5785 
5786  // TODO(3095996): Get rid of this. For now, we need to make the
5787  // result register contain a valid pointer because it is already
5788  // contained in the register pointer map.
5789  __ Move(result, Immediate(Smi::FromInt(0)));
5790 
5791  PushSafepointRegistersScope scope(this);
5792  if (instr->size()->IsRegister()) {
5793  Register size = ToRegister(instr->size());
5794  DCHECK(!size.is(result));
5795  __ SmiTag(ToRegister(instr->size()));
5796  __ push(size);
5797  } else {
5798  int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5799  if (size >= 0 && size <= Smi::kMaxValue) {
5800  __ push(Immediate(Smi::FromInt(size)));
5801  } else {
5802  // We should never get here at runtime => abort
5803  __ int3();
5804  return;
5805  }
5806  }
5807 
5809  instr->hydrogen()->MustAllocateDoubleAligned());
5810  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5811  DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5812  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5814  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5815  DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5817  } else {
5819  }
5820  __ push(Immediate(Smi::FromInt(flags)));
5821 
5823  Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5824  __ StoreToSafepointRegisterSlot(result, eax);
5825 }
5826 
5827 
5828 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5829  DCHECK(ToRegister(instr->value()).is(eax));
5830  __ push(eax);
5831  CallRuntime(Runtime::kToFastProperties, 1, instr);
5832 }
5833 
5834 
5835 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5836  DCHECK(ToRegister(instr->context()).is(esi));
5837  Label materialized;
5838  // Registers will be used as follows:
5839  // ecx = literals array.
5840  // ebx = regexp literal.
5841  // eax = regexp literal clone.
5842  // esi = context.
5843  int literal_offset =
5844  FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5845  __ LoadHeapObject(ecx, instr->hydrogen()->literals());
5846  __ mov(ebx, FieldOperand(ecx, literal_offset));
5847  __ cmp(ebx, factory()->undefined_value());
5848  __ j(not_equal, &materialized, Label::kNear);
5849 
5850  // Create regexp literal using runtime function
5851  // Result will be in eax.
5852  __ push(ecx);
5853  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
5854  __ push(Immediate(instr->hydrogen()->pattern()));
5855  __ push(Immediate(instr->hydrogen()->flags()));
5856  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5857  __ mov(ebx, eax);
5858 
5859  __ bind(&materialized);
5861  Label allocated, runtime_allocate;
5862  __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
5863  __ jmp(&allocated, Label::kNear);
5864 
5865  __ bind(&runtime_allocate);
5866  __ push(ebx);
5867  __ push(Immediate(Smi::FromInt(size)));
5868  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5869  __ pop(ebx);
5870 
5871  __ bind(&allocated);
5872  // Copy the content into the newly allocated memory.
5873  // (Unroll copy loop once for better throughput).
5874  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5875  __ mov(edx, FieldOperand(ebx, i));
5876  __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
5877  __ mov(FieldOperand(eax, i), edx);
5878  __ mov(FieldOperand(eax, i + kPointerSize), ecx);
5879  }
5880  if ((size % (2 * kPointerSize)) != 0) {
5881  __ mov(edx, FieldOperand(ebx, size - kPointerSize));
5882  __ mov(FieldOperand(eax, size - kPointerSize), edx);
5883  }
5884 }
5885 
5886 
5887 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5888  DCHECK(ToRegister(instr->context()).is(esi));
5889  // Use the fast case closure allocation code that allocates in new
5890  // space for nested functions that don't need literals cloning.
5891  bool pretenure = instr->hydrogen()->pretenure();
5892  if (!pretenure && instr->hydrogen()->has_no_literals()) {
5893  FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
5894  instr->hydrogen()->kind());
5895  __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
5896  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5897  } else {
5898  __ push(esi);
5899  __ push(Immediate(instr->hydrogen()->shared_info()));
5900  __ push(Immediate(pretenure ? factory()->true_value()
5901  : factory()->false_value()));
5902  CallRuntime(Runtime::kNewClosure, 3, instr);
5903  }
5904 }
5905 
5906 
5907 void LCodeGen::DoTypeof(LTypeof* instr) {
5908  DCHECK(ToRegister(instr->context()).is(esi));
5909  LOperand* input = instr->value();
5910  EmitPushTaggedOperand(input);
5911  CallRuntime(Runtime::kTypeof, 1, instr);
5912 }
5913 
5914 
5915 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5916  Register input = ToRegister(instr->value());
5917  Condition final_branch_condition = EmitTypeofIs(instr, input);
5918  if (final_branch_condition != no_condition) {
5919  EmitBranch(instr, final_branch_condition);
5920  }
5921 }
5922 
5923 
5924 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5925  Label* true_label = instr->TrueLabel(chunk_);
5926  Label* false_label = instr->FalseLabel(chunk_);
5927  Handle<String> type_name = instr->type_literal();
5928  int left_block = instr->TrueDestination(chunk_);
5929  int right_block = instr->FalseDestination(chunk_);
5930  int next_block = GetNextEmittedBlock();
5931 
5932  Label::Distance true_distance = left_block == next_block ? Label::kNear
5933  : Label::kFar;
5934  Label::Distance false_distance = right_block == next_block ? Label::kNear
5935  : Label::kFar;
5936  Condition final_branch_condition = no_condition;
5937  if (String::Equals(type_name, factory()->number_string())) {
5938  __ JumpIfSmi(input, true_label, true_distance);
5940  factory()->heap_number_map());
5941  final_branch_condition = equal;
5942 
5943  } else if (String::Equals(type_name, factory()->string_string())) {
5944  __ JumpIfSmi(input, false_label, false_distance);
5945  __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5946  __ j(above_equal, false_label, false_distance);
5947  __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5948  1 << Map::kIsUndetectable);
5949  final_branch_condition = zero;
5950 
5951  } else if (String::Equals(type_name, factory()->symbol_string())) {
5952  __ JumpIfSmi(input, false_label, false_distance);
5953  __ CmpObjectType(input, SYMBOL_TYPE, input);
5954  final_branch_condition = equal;
5955 
5956  } else if (String::Equals(type_name, factory()->boolean_string())) {
5957  __ cmp(input, factory()->true_value());
5958  __ j(equal, true_label, true_distance);
5959  __ cmp(input, factory()->false_value());
5960  final_branch_condition = equal;
5961 
5962  } else if (String::Equals(type_name, factory()->undefined_string())) {
5963  __ cmp(input, factory()->undefined_value());
5964  __ j(equal, true_label, true_distance);
5965  __ JumpIfSmi(input, false_label, false_distance);
5966  // Check for undetectable objects => true.
5967  __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
5968  __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5969  1 << Map::kIsUndetectable);
5970  final_branch_condition = not_zero;
5971 
5972  } else if (String::Equals(type_name, factory()->function_string())) {
5974  __ JumpIfSmi(input, false_label, false_distance);
5975  __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
5976  __ j(equal, true_label, true_distance);
5977  __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
5978  final_branch_condition = equal;
5979 
5980  } else if (String::Equals(type_name, factory()->object_string())) {
5981  __ JumpIfSmi(input, false_label, false_distance);
5982  __ cmp(input, factory()->null_value());
5983  __ j(equal, true_label, true_distance);
5984  __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
5985  __ j(below, false_label, false_distance);
5986  __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5987  __ j(above, false_label, false_distance);
5988  // Check for undetectable objects => false.
5989  __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5990  1 << Map::kIsUndetectable);
5991  final_branch_condition = zero;
5992 
5993  } else {
5994  __ jmp(false_label, false_distance);
5995  }
5996  return final_branch_condition;
5997 }
5998 
5999 
6000 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
6001  Register temp = ToRegister(instr->temp());
6002 
6003  EmitIsConstructCall(temp);
6004  EmitBranch(instr, equal);
6005 }
6006 
6007 
6008 void LCodeGen::EmitIsConstructCall(Register temp) {
6009  // Get the frame pointer for the calling frame.
6010  __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
6011 
6012  // Skip the arguments adaptor frame if it exists.
6013  Label check_frame_marker;
6014  __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
6016  __ j(not_equal, &check_frame_marker, Label::kNear);
6017  __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
6018 
6019  // Check the marker in the calling frame.
6020  __ bind(&check_frame_marker);
6021  __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
6022  Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
6023 }
6024 
6025 
6026 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
6027  if (!info()->IsStub()) {
6028  // Ensure that we have enough space after the previous lazy-bailout
6029  // instruction for patching the code here.
6030  int current_pc = masm()->pc_offset();
6031  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
6032  int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
6033  __ Nop(padding_size);
6034  }
6035  }
6036  last_lazy_deopt_pc_ = masm()->pc_offset();
6037 }
6038 
6039 
6040 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
6041  last_lazy_deopt_pc_ = masm()->pc_offset();
6042  DCHECK(instr->HasEnvironment());
6043  LEnvironment* env = instr->environment();
6044  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6045  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6046 }
6047 
6048 
6049 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
6050  Deoptimizer::BailoutType type = instr->hydrogen()->type();
6051  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
6052  // needed return address), even though the implementation of LAZY and EAGER is
6053  // now identical. When LAZY is eventually completely folded into EAGER, remove
6054  // the special case below.
6055  if (info()->IsStub() && type == Deoptimizer::EAGER) {
6056  type = Deoptimizer::LAZY;
6057  }
6058  DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
6059 }
6060 
6061 
6062 void LCodeGen::DoDummy(LDummy* instr) {
6063  // Nothing to see here, move on!
6064 }
6065 
6066 
6067 void LCodeGen::DoDummyUse(LDummyUse* instr) {
6068  // Nothing to see here, move on!
6069 }
6070 
6071 
6072 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
6073  PushSafepointRegistersScope scope(this);
6075  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
6078  DCHECK(instr->HasEnvironment());
6079  LEnvironment* env = instr->environment();
6080  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6081 }
6082 
6083 
6084 void LCodeGen::DoStackCheck(LStackCheck* instr) {
6085  class DeferredStackCheck FINAL : public LDeferredCode {
6086  public:
6087  DeferredStackCheck(LCodeGen* codegen,
6088  LStackCheck* instr,
6089  const X87Stack& x87_stack)
6090  : LDeferredCode(codegen, x87_stack), instr_(instr) { }
6091  virtual void Generate() OVERRIDE {
6092  codegen()->DoDeferredStackCheck(instr_);
6093  }
6094  virtual LInstruction* instr() OVERRIDE { return instr_; }
6095  private:
6096  LStackCheck* instr_;
6097  };
6098 
6099  DCHECK(instr->HasEnvironment());
6100  LEnvironment* env = instr->environment();
6101  // There is no LLazyBailout instruction for stack-checks. We have to
6102  // prepare for lazy deoptimization explicitly here.
6103  if (instr->hydrogen()->is_function_entry()) {
6104  // Perform stack overflow check.
6105  Label done;
6106  ExternalReference stack_limit =
6107  ExternalReference::address_of_stack_limit(isolate());
6108  __ cmp(esp, Operand::StaticVariable(stack_limit));
6109  __ j(above_equal, &done, Label::kNear);
6110 
6111  DCHECK(instr->context()->IsRegister());
6112  DCHECK(ToRegister(instr->context()).is(esi));
6113  CallCode(isolate()->builtins()->StackCheck(),
6115  instr);
6116  __ bind(&done);
6117  } else {
6118  DCHECK(instr->hydrogen()->is_backwards_branch());
6119  // Perform stack overflow check if this goto needs it before jumping.
6120  DeferredStackCheck* deferred_stack_check =
6121  new(zone()) DeferredStackCheck(this, instr, x87_stack_);
6122  ExternalReference stack_limit =
6123  ExternalReference::address_of_stack_limit(isolate());
6124  __ cmp(esp, Operand::StaticVariable(stack_limit));
6125  __ j(below, deferred_stack_check->entry());
6127  __ bind(instr->done_label());
6128  deferred_stack_check->SetExit(instr->done_label());
6129  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6130  // Don't record a deoptimization index for the safepoint here.
6131  // This will be done explicitly when emitting call and the safepoint in
6132  // the deferred code.
6133  }
6134 }
6135 
6136 
6137 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
6138  // This is a pseudo-instruction that ensures that the environment here is
6139  // properly registered for deoptimization and records the assembler's PC
6140  // offset.
6141  LEnvironment* environment = instr->environment();
6142 
6143  // If the environment were already registered, we would have no way of
6144  // backpatching it with the spill slot operands.
6145  DCHECK(!environment->HasBeenRegistered());
6146  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
6147 
6149 }
6150 
6151 
6152 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
6153  DCHECK(ToRegister(instr->context()).is(esi));
6154  __ cmp(eax, isolate()->factory()->undefined_value());
6155  DeoptimizeIf(equal, instr, "undefined");
6156 
6157  __ cmp(eax, isolate()->factory()->null_value());
6158  DeoptimizeIf(equal, instr, "null");
6159 
6160  __ test(eax, Immediate(kSmiTagMask));
6161  DeoptimizeIf(zero, instr, "Smi");
6162 
6164  __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
6165  DeoptimizeIf(below_equal, instr, "wrong instance type");
6166 
6167  Label use_cache, call_runtime;
6168  __ CheckEnumCache(&call_runtime);
6169 
6171  __ jmp(&use_cache, Label::kNear);
6172 
6173  // Get the set of properties to enumerate.
6174  __ bind(&call_runtime);
6175  __ push(eax);
6176  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
6177 
6179  isolate()->factory()->meta_map());
6180  DeoptimizeIf(not_equal, instr, "wrong map");
6181  __ bind(&use_cache);
6182 }
6183 
6184 
6185 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
6186  Register map = ToRegister(instr->map());
6187  Register result = ToRegister(instr->result());
6188  Label load_cache, done;
6189  __ EnumLength(result, map);
6190  __ cmp(result, Immediate(Smi::FromInt(0)));
6191  __ j(not_equal, &load_cache, Label::kNear);
6192  __ mov(result, isolate()->factory()->empty_fixed_array());
6193  __ jmp(&done, Label::kNear);
6194 
6195  __ bind(&load_cache);
6196  __ LoadInstanceDescriptors(map, result);
6197  __ mov(result,
6199  __ mov(result,
6200  FieldOperand(result, FixedArray::SizeFor(instr->idx())));
6201  __ bind(&done);
6202  __ test(result, result);
6203  DeoptimizeIf(equal, instr, "no cache");
6204 }
6205 
6206 
6207 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
6208  Register object = ToRegister(instr->value());
6209  __ cmp(ToRegister(instr->map()),
6211  DeoptimizeIf(not_equal, instr, "wrong map");
6212 }
6213 
6214 
6215 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
6216  Register object,
6217  Register index) {
6218  PushSafepointRegistersScope scope(this);
6219  __ push(object);
6220  __ push(index);
6221  __ xor_(esi, esi);
6222  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
6224  instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
6225  __ StoreToSafepointRegisterSlot(object, eax);
6226 }
6227 
6228 
6229 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6230  class DeferredLoadMutableDouble FINAL : public LDeferredCode {
6231  public:
6232  DeferredLoadMutableDouble(LCodeGen* codegen,
6233  LLoadFieldByIndex* instr,
6234  Register object,
6235  Register index,
6236  const X87Stack& x87_stack)
6237  : LDeferredCode(codegen, x87_stack),
6238  instr_(instr),
6239  object_(object),
6240  index_(index) {
6241  }
6242  virtual void Generate() OVERRIDE {
6243  codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
6244  }
6245  virtual LInstruction* instr() OVERRIDE { return instr_; }
6246  private:
6247  LLoadFieldByIndex* instr_;
6248  Register object_;
6249  Register index_;
6250  };
6251 
6252  Register object = ToRegister(instr->object());
6253  Register index = ToRegister(instr->index());
6254 
6255  DeferredLoadMutableDouble* deferred;
6256  deferred = new(zone()) DeferredLoadMutableDouble(
6257  this, instr, object, index, x87_stack_);
6258 
6259  Label out_of_object, done;
6260  __ test(index, Immediate(Smi::FromInt(1)));
6261  __ j(not_zero, deferred->entry());
6262 
6263  __ sar(index, 1);
6264 
6265  __ cmp(index, Immediate(0));
6266  __ j(less, &out_of_object, Label::kNear);
6267  __ mov(object, FieldOperand(object,
6268  index,
6271  __ jmp(&done, Label::kNear);
6272 
6273  __ bind(&out_of_object);
6274  __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
6275  __ neg(index);
6276  // Index is now equal to out of object property index plus 1.
6277  __ mov(object, FieldOperand(object,
6278  index,
6281  __ bind(deferred->exit());
6282  __ bind(&done);
6283 }
6284 
6285 
6286 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
6287  Register context = ToRegister(instr->context());
6288  __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
6289 }
6290 
6291 
6292 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6293  Handle<ScopeInfo> scope_info = instr->scope_info();
6294  __ Push(scope_info);
6295  __ push(ToRegister(instr->function()));
6296  CallRuntime(Runtime::kPushBlockContext, 2, instr);
6297  RecordSafepoint(Safepoint::kNoLazyDeopt);
6298 }
6299 
6300 
6301 #undef __
6302 
6303 } } // namespace v8::internal
6304 
6305 #endif // V8_TARGET_ARCH_X87
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1591
static U update(U previous, T value)
Definition: utils.h:223
static U encode(T value)
Definition: utils.h:217
static const int kHeaderSize
Definition: objects.h:5373
static int SlotOffset(int index)
Definition: contexts.h:552
static Handle< DeoptimizationInputData > New(Isolate *isolate, int deopt_entry_count, PretenureFlag pretenure)
Definition: objects.cc:7918
static void EnsureRelocSpaceForLazyDeoptimization(Handle< Code > code)
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
Definition: deoptimizer.cc:672
static const int kEnumCacheOffset
Definition: objects.h:3028
static const int kHeaderSize
Definition: objects.h:2393
static int OffsetOfElementAt(int index)
Definition: objects.h:2455
static int SizeFor(int length)
Definition: objects.h:2452
static double hole_nan_as_double()
Definition: objects-inl.h:2205
static const int kGlobalProxyOffset
Definition: objects.h:7461
static Handle< T > cast(Handle< S > that)
Definition: handles.h:116
static const int kInfinityOrNanExponent
Definition: objects.h:1529
static const uint32_t kSignMask
Definition: objects.h:1522
static const int kValueOffset
Definition: objects.h:1506
static const uint32_t kMantissaMask
Definition: objects.h:1524
static const uint32_t kExponentMask
Definition: objects.h:1523
static const int kExponentBias
Definition: objects.h:1527
static const int kExponentShift
Definition: objects.h:1528
static const int kMapOffset
Definition: objects.h:1427
static Register right()
Definition: code-stubs.h:686
static const int kValueOffset
Definition: objects.h:7623
static const int kCacheStampOffset
Definition: objects.h:7631
static const int kSharedFunctionInfoOffset
Definition: objects.h:7379
static const int kContextOffset
Definition: objects.h:7381
static const int kCodeEntryOffset
Definition: objects.h:7376
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7377
static const int kHeaderSize
Definition: objects.h:2195
static const int kPropertiesOffset
Definition: objects.h:2193
static const int kSize
Definition: objects.h:7772
static const int kInObjectFieldCount
Definition: objects.h:7826
static const int kDynamicAlignmentStateOffset
Definition: frames-ia32.h:78
void FlushIfNecessary(LInstruction *instr, LCodeGen *cgen)
void Free(X87Register reg)
void CommitWrite(X87Register reg)
X87Register stack_[X87Register::kMaxNumAllocatableRegisters]
bool Contains(X87Register reg)
int ArrayIndex(X87Register reg)
void Fxch(X87Register reg, int other_slot=0)
void LeavingBlock(int current_block_id, LGoto *goto_instr, LCodeGen *cgen)
void PrepareToWrite(X87Register reg)
void X87Fxch(X87Register reg, int other_slot=0)
bool IsNextEmittedBlock(int block_id) const
void X87PrepareBinaryOp(X87Register left, X87Register right, X87Register result)
void DoStoreKeyedFixedArray(LStoreKeyed *instr)
void RecordSafepointWithRegisters(LPointerMap *pointers, int arguments, Safepoint::DeoptMode mode)
bool IsSmi(LConstantOperand *op) const
TranslationBuffer translations_
MemOperand BuildSeqStringOperand(Register string, LOperand *index, String::Encoding encoding)
Condition EmitIsString(Register input, Register temp1, Label *is_not_string, SmiCheck check_needed)
void DoDeferredStackCheck(LStackCheck *instr)
X87Register ToX87Register(LOperand *op) const
SafepointTableBuilder safepoints_
void EmitVectorLoadICRegisters(T *instr)
static Condition TokenToCondition(Token::Value op, bool is_unsigned)
ZoneList< Handle< Object > > deoptimization_literals_
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal *instr, Label *map_check, Label *bool_load)
void X87CommitWrite(X87Register reg)
void X87PrepareToWrite(X87Register reg)
void PopulateDeoptimizationLiteralsWithInlinedFunctions()
void AddToTranslation(LEnvironment *environment, Translation *translation, LOperand *op, bool is_tagged, bool is_uint32, int *object_index_pointer, int *dematerialized_index_pointer)
ZoneList< LEnvironment * > deoptimizations_
void EmitIntegerMathAbs(LMathAbs *instr)
int32_t ToRepresentation(LConstantOperand *op, const Representation &r) const
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, LInstruction *instr, LOperand *context)
void EmitIsConstructCall(Register temp1, Register temp2)
void EmitPushTaggedOperand(LOperand *operand)
void X87Mov(X87Register reg, Operand src, X87OperandType operand=kX87DoubleOperand)
int32_t ToInteger32(LConstantOperand *op) const
LPlatformChunk * chunk() const
void FinishCode(Handle< Code > code)
ExternalReference ToExternalReference(LConstantOperand *op) const
int LookupDestination(int block_id) const
Condition EmitTypeofIs(Label *true_label, Label *false_label, Register input, Handle< String > type_name)
void DoDeferredAllocate(LAllocate *instr)
void RecordSafepoint(LPointerMap *pointers, Safepoint::Kind kind, int arguments, Safepoint::DeoptMode mode)
void DoDeferredTaggedToI(LTaggedToI *instr)
void CallCodeGeneric(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr, SafepointMode safepoint_mode, TargetAddressStorageMode storage_mode=CAN_INLINE_TARGET_ADDRESS)
void DoDeferredStringCharCodeAt(LStringCharCodeAt *instr)
void CallCode(Handle< Code > code, RelocInfo::Mode mode, LInstruction *instr, TargetAddressStorageMode storage_mode=CAN_INLINE_TARGET_ADDRESS)
Safepoint::Kind expected_safepoint_kind_
ZoneList< LDeferredCode * > deferred_
Operand HighOperand(LOperand *op)
void DoDeferredNumberTagIU(LInstruction *instr, LOperand *value, LOperand *temp1, LOperand *temp2, IntegerSignedness signedness)
void X87Fld(Operand src, X87OperandType opts)
Handle< Object > ToHandle(LConstantOperand *op) const
void GenerateBodyInstructionPost(LInstruction *instr) OVERRIDE
void RegisterEnvironmentForDeoptimization(LEnvironment *environment, Safepoint::DeoptMode mode)
void LoadContextFromDeferred(LOperand *context)
Operand BuildFastArrayOperand(LOperand *elements_pointer, LOperand *key, Representation key_representation, ElementsKind elements_kind, uint32_t base_offset)
void X87Free(X87Register reg)
void DoDeferredInstanceMigration(LCheckMaps *instr, Register object)
void DoDeferredLoadMutableDouble(LLoadFieldByIndex *instr, Register result, Register object, Register index)
int DefineDeoptimizationLiteral(Handle< Object > literal)
void DeoptimizeIf(Condition condition, LInstruction *instr, const char *detail, Deoptimizer::BailoutType bailout_type)
void X87LoadForUsage(X87Register reg)
void CallKnownFunction(Handle< JSFunction > function, int formal_parameter_count, int arity, LInstruction *instr, R1State r1_state)
void WriteTranslation(LEnvironment *environment, Translation *translation)
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs *instr)
void DoLoadKeyedFixedDoubleArray(LLoadKeyed *instr)
Operand ToOperand(LOperand *op)
void EmitClassOfTest(Label *if_true, Label *if_false, Handle< String > class_name, Register input, Register temporary, Register temporary2)
void DoLoadKeyedExternalArray(LLoadKeyed *instr)
void EmitReturn(LReturn *instr, bool dynamic_frame_alignment)
void EmitNumberUntagDNoSSE2(LNumberUntagD *instr, Register input, Register temp, X87Register res_reg, NumberUntagDMode mode)
Immediate ToImmediate(LOperand *op, const Representation &r) const
double ToDouble(LConstantOperand *op) const
Register ToRegister(LOperand *op) const
void DoStoreKeyedExternalArray(LStoreKeyed *instr)
void RecordAndWritePosition(int position) OVERRIDE
bool IsInteger32(LConstantOperand *op) const
void PopulateDeoptimizationData(Handle< Code > code)
void DoParallelMove(LParallelMove *move)
void CallRuntime(const Runtime::Function *function, int num_arguments, LInstruction *instr, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
void DoDeferredStringCharFromCode(LStringCharFromCode *instr)
void FlushX87StackIfNecessary(LInstruction *instr)
ZoneList< Deoptimizer::JumpTableEntry > jump_table_
Condition EmitIsObject(Register input, Register temp1, Label *is_not_object, Label *is_object)
void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE
void GenerateBodyInstructionPre(LInstruction *instr) OVERRIDE
void RecordSafepointWithLazyDeopt(LInstruction *instr, SafepointMode safepoint_mode)
void EmitFalseBranch(InstrType instr, Condition condition)
void DoLoadKeyedFixedArray(LLoadKeyed *instr)
LCodeGen(LChunk *chunk, MacroAssembler *assembler, CompilationInfo *info)
void EmitBranch(InstrType instr, Condition condition)
void DoDeferredNumberTagD(LNumberTagD *instr)
void DoStoreKeyedFixedDoubleArray(LStoreKeyed *instr)
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:17
static const Register ReceiverRegister()
static const Register NameRegister()
static void GenerateMiss(MacroAssembler *masm)
static int SafepointRegisterStackIndex(int reg_code)
static const int kIsUndetectable
Definition: objects.h:6244
static const int kBitFieldOffset
Definition: objects.h:6228
static const int kInstanceTypeOffset
Definition: objects.h:6229
static const int kConstructorOffset
Definition: objects.h:6191
static const int kPrototypeOffset
Definition: objects.h:6190
static const int kHashFieldOffset
Definition: objects.h:8486
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:754
static void MaybeCallEntryHook(MacroAssembler *masm)
static const int kNoPosition
Definition: assembler.h:317
static Representation Smi()
static Representation Integer32()
int num_parameters() const
Definition: scopes.h:321
Variable * parameter(int index) const
Definition: scopes.h:316
static const int kHeaderSize
Definition: objects.h:8941
static const int kDontAdaptArgumentsSentinel
Definition: objects.h:6888
static const int kInstanceClassNameOffset
Definition: objects.h:6897
static const int kNativeBitWithinByte
Definition: objects.h:7046
static const int kStrictModeBitWithinByte
Definition: objects.h:7043
static const int kMaxValue
Definition: objects.h:1272
static Smi * FromInt(int value)
Definition: objects-inl.h:1321
static const int kContextOffset
Definition: frames.h:162
static const int kCallerSPOffset
Definition: frames.h:167
static const int kMarkerOffset
Definition: frames.h:161
static const int kCallerFPOffset
Definition: frames.h:165
static const Register ReceiverRegister()
static const Register NameRegister()
static const Register ValueRegister()
static Handle< Code > initialize_stub(Isolate *isolate, StrictMode strict_mode)
Definition: ic.cc:1346
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const unsigned int kContainsCachedArrayIndexMask
Definition: objects.h:8618
static const int32_t kMaxOneByteCharCode
Definition: objects.h:8811
static const int kMaxLength
Definition: objects.h:8820
static const int kLengthOffset
Definition: objects.h:8802
static const int kMaxUtf16CodeUnit
Definition: objects.h:8813
bool Equals(String *other)
Definition: objects-inl.h:3336
static const Register VectorRegister()
#define OVERRIDE
#define FINAL
#define __
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric literals(0o77, 0b11)") DEFINE_BOOL(harmony_object_literals
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
enable harmony numeric enable harmony object literal extensions Optimize object Array shift
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK_LE(v1, v2)
Definition: logging.h:210
#define DCHECK_NE(v1, v2)
Definition: logging.h:207
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
@ CALL_FUNCTION
AllocationFlags
@ DOUBLE_ALIGNMENT
@ PRETENURE_OLD_POINTER_SPACE
@ TAG_OBJECT
@ PRETENURE_OLD_DATA_SPACE
signed short int16_t
Definition: unicode.cc:22
int int32_t
Definition: unicode.cc:24
bool IsPowerOfTwo32(uint32_t value)
Definition: bits.h:77
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
Vector< const char > CStrVector(const char *data)
Definition: vector.h:158
const int kPointerSize
Definition: globals.h:129
const Register edx
const uint32_t kStringEncodingMask
Definition: objects.h:555
const Register edi
MemOperand ContextOperand(Register context, int index)
const int kAlignmentPaddingPushed
Definition: frames-ia32.h:32
static bool ExternalArrayOpRequiresTemp(Representation key_representation, ElementsKind elements_kind)
@ DO_SMI_CHECK
Definition: globals.h:641
const int KB
Definition: globals.h:106
Condition CommuteCondition(Condition cond)
Definition: constants-arm.h:93
const int kBitsPerInt
Definition: globals.h:165
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1488
@ TRACK_ALLOCATION_SITE
Definition: objects.h:8085
@ kSeqStringTag
Definition: objects.h:563
@ ARGUMENTS_ADAPTOR
Definition: hydrogen.h:546
const Register esp
const int kPCOnStackSize
Definition: globals.h:135
const uint32_t kTwoByteStringTag
Definition: objects.h:556
const int kSmiTagSize
Definition: v8.h:5743
const int kDoubleSize
Definition: globals.h:127
Operand FieldOperand(Register object, int offset)
const int kPointerSizeLog2
Definition: globals.h:147
@ LAST_NONCALLABLE_SPEC_OBJECT_TYPE
Definition: objects.h:785
@ NUM_OF_CALLABLE_SPEC_OBJECT_TYPES
Definition: objects.h:788
@ JS_DATE_TYPE
Definition: objects.h:730
@ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE
Definition: objects.h:784
@ FIRST_JS_PROXY_TYPE
Definition: objects.h:778
@ JS_ARRAY_TYPE
Definition: objects.h:738
@ FIRST_NONSTRING_TYPE
Definition: objects.h:758
@ FIRST_SPEC_OBJECT_TYPE
Definition: objects.h:781
@ LAST_SPEC_OBJECT_TYPE
Definition: objects.h:782
@ HEAP_NUMBER_TYPE
Definition: objects.h:669
@ JS_FUNCTION_TYPE
Definition: objects.h:749
@ JS_FUNCTION_PROXY_TYPE
Definition: objects.h:726
@ LAST_JS_PROXY_TYPE
Definition: objects.h:779
@ EXTERNAL_UINT16_ELEMENTS
Definition: elements-kind.h:36
@ UINT8_CLAMPED_ELEMENTS
Definition: elements-kind.h:52
@ EXTERNAL_INT16_ELEMENTS
Definition: elements-kind.h:35
@ EXTERNAL_UINT8_ELEMENTS
Definition: elements-kind.h:34
@ EXTERNAL_INT32_ELEMENTS
Definition: elements-kind.h:37
@ FAST_HOLEY_DOUBLE_ELEMENTS
Definition: elements-kind.h:27
@ SLOPPY_ARGUMENTS_ELEMENTS
Definition: elements-kind.h:31
@ EXTERNAL_INT8_ELEMENTS
Definition: elements-kind.h:33
@ EXTERNAL_FLOAT32_ELEMENTS
Definition: elements-kind.h:39
@ EXTERNAL_FLOAT64_ELEMENTS
Definition: elements-kind.h:40
@ FAST_HOLEY_SMI_ELEMENTS
Definition: elements-kind.h:17
@ EXTERNAL_UINT32_ELEMENTS
Definition: elements-kind.h:38
@ EXTERNAL_UINT8_CLAMPED_ELEMENTS
Definition: elements-kind.h:41
bool IsSimpleMapChangeTransition(ElementsKind from_kind, ElementsKind to_kind)
const uint32_t kOneByteStringTag
Definition: objects.h:557
@ NO_OVERWRITE
Definition: ic-state.h:58
int ElementsKindToShiftSize(ElementsKind elements_kind)
int32_t WhichPowerOf2Abs(int32_t x)
Definition: utils.h:168
int StackSlotOffset(int index)
Definition: lithium.cc:254
const Register esi
@ FAIL_ON_MINUS_ZERO
Definition: globals.h:768
const Register eax
const int kUC16Size
Definition: globals.h:187
bool IsFastPackedElementsKind(ElementsKind kind)
const Register ebx
const bool FLAG_enable_slow_asserts
Definition: checks.h:31
@ NUMBER_CANDIDATE_IS_SMI
Definition: lithium.h:756
@ NUMBER_CANDIDATE_IS_ANY_TAGGED
Definition: lithium.h:757
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
AllocationSiteOverrideMode
Definition: code-stubs.h:716
@ DISABLE_ALLOCATION_SITES
Definition: code-stubs.h:718
Condition NegateCondition(Condition cond)
Definition: constants-arm.h:86
static InstanceType TestType(HHasInstanceTypeAndBranch *instr)
const int kMinInt
Definition: globals.h:110
T Abs(T a)
Definition: utils.h:153
const uint32_t kStringRepresentationMask
Definition: objects.h:561
byte * Address
Definition: globals.h:101
static Condition BranchCondition(HHasInstanceTypeAndBranch *instr)
@ NOT_CONTEXTUAL
Definition: objects.h:174
OStream & dec(OStream &os)
Definition: ostreams.cc:122
@ OLD_DATA_SPACE
Definition: globals.h:361
@ OLD_POINTER_SPACE
Definition: globals.h:360
const int kHeapObjectTag
Definition: v8.h:5737
const Register no_reg
static int ArgumentsOffsetWithoutFrame(int index)
static Condition ComputeCompareCondition(Token::Value op)
static const char * LabelType(LLabel *label)
const int kAlignmentZapValue
Definition: frames-ia32.h:33
MemOperand GlobalObjectOperand()
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const intptr_t kSmiTagMask
Definition: v8.h:5744
const Register ebp
@ NO_CALL_CONSTRUCTOR_FLAGS
Definition: globals.h:478
const int kNoAlignmentPadding
Definition: frames-ia32.h:31
const int kSmiTag
Definition: v8.h:5742
bool IsFastSmiElementsKind(ElementsKind kind)
const uint32_t kHoleNanLower32
Definition: globals.h:657
const uint32_t kSlotsZapValue
Definition: globals.h:273
const int kCharSize
Definition: globals.h:122
const uint32_t kHoleNanUpper32
Definition: globals.h:656
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:130
static intptr_t Free(PagedSpace *space, FreeList *free_list, Address start, int size)
const Register ecx
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
#define IN
static Register FromAllocationIndex(int index)
bool is(Register reg) const
static const int kMaxNumAllocatableRegisters
static X87Register FromAllocationIndex(int index)
bool is(X87Register reg) const
#define T(name, string, precedence)
Definition: token.cc:25