V8 Project
macro-assembler-x87.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_X87
8 
9 #include "src/base/bits.h"
11 #include "src/bootstrapper.h"
12 #include "src/codegen.h"
13 #include "src/cpu-profiler.h"
14 #include "src/debug.h"
15 #include "src/isolate-inl.h"
16 #include "src/runtime/runtime.h"
17 #include "src/serialize.h"
18 
19 namespace v8 {
20 namespace internal {
21 
22 // -------------------------------------------------------------------------
23 // MacroAssembler implementation.
24 
25 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
26  : Assembler(arg_isolate, buffer, size),
27  generating_stub_(false),
28  has_frame_(false) {
29  if (isolate() != NULL) {
30  // TODO(titzer): should we just use a null handle here instead?
31  code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
32  isolate());
33  }
34 }
35 
36 
37 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
38  DCHECK(!r.IsDouble());
39  if (r.IsInteger8()) {
40  movsx_b(dst, src);
41  } else if (r.IsUInteger8()) {
42  movzx_b(dst, src);
43  } else if (r.IsInteger16()) {
44  movsx_w(dst, src);
45  } else if (r.IsUInteger16()) {
46  movzx_w(dst, src);
47  } else {
48  mov(dst, src);
49  }
50 }
51 
52 
53 void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
54  DCHECK(!r.IsDouble());
55  if (r.IsInteger8() || r.IsUInteger8()) {
56  mov_b(dst, src);
57  } else if (r.IsInteger16() || r.IsUInteger16()) {
58  mov_w(dst, src);
59  } else {
60  if (r.IsHeapObject()) {
61  AssertNotSmi(src);
62  } else if (r.IsSmi()) {
63  AssertSmi(src);
64  }
65  mov(dst, src);
66  }
67 }
68 
69 
70 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
71  if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
72  Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
73  mov(destination, value);
74  return;
75  }
76  ExternalReference roots_array_start =
77  ExternalReference::roots_array_start(isolate());
78  mov(destination, Immediate(index));
79  mov(destination, Operand::StaticArray(destination,
81  roots_array_start));
82 }
83 
84 
85 void MacroAssembler::StoreRoot(Register source,
86  Register scratch,
87  Heap::RootListIndex index) {
88  DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
89  ExternalReference roots_array_start =
90  ExternalReference::roots_array_start(isolate());
91  mov(scratch, Immediate(index));
92  mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
93  source);
94 }
95 
96 
97 void MacroAssembler::CompareRoot(Register with,
98  Register scratch,
99  Heap::RootListIndex index) {
100  ExternalReference roots_array_start =
101  ExternalReference::roots_array_start(isolate());
102  mov(scratch, Immediate(index));
103  cmp(with, Operand::StaticArray(scratch,
105  roots_array_start));
106 }
107 
108 
109 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
110  DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
111  Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
112  cmp(with, value);
113 }
114 
115 
116 void MacroAssembler::CompareRoot(const Operand& with,
117  Heap::RootListIndex index) {
118  DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
119  Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
120  cmp(with, value);
121 }
122 
123 
124 void MacroAssembler::InNewSpace(
125  Register object,
126  Register scratch,
127  Condition cc,
128  Label* condition_met,
129  Label::Distance condition_met_distance) {
130  DCHECK(cc == equal || cc == not_equal);
131  if (scratch.is(object)) {
132  and_(scratch, Immediate(~Page::kPageAlignmentMask));
133  } else {
134  mov(scratch, Immediate(~Page::kPageAlignmentMask));
135  and_(scratch, object);
136  }
137  // Check that we can use a test_b.
138  DCHECK(MemoryChunk::IN_FROM_SPACE < 8);
139  DCHECK(MemoryChunk::IN_TO_SPACE < 8);
140  int mask = (1 << MemoryChunk::IN_FROM_SPACE)
141  | (1 << MemoryChunk::IN_TO_SPACE);
142  // If non-zero, the page belongs to new-space.
143  test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
144  static_cast<uint8_t>(mask));
145  j(cc, condition_met, condition_met_distance);
146 }
147 
148 
149 void MacroAssembler::RememberedSetHelper(
150  Register object, // Only used for debug checks.
151  Register addr, Register scratch, SaveFPRegsMode save_fp,
152  MacroAssembler::RememberedSetFinalAction and_then) {
153  Label done;
154  if (emit_debug_code()) {
155  Label ok;
156  JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
157  int3();
158  bind(&ok);
159  }
160  // Load store buffer top.
161  ExternalReference store_buffer =
162  ExternalReference::store_buffer_top(isolate());
163  mov(scratch, Operand::StaticVariable(store_buffer));
164  // Store pointer to buffer.
165  mov(Operand(scratch, 0), addr);
166  // Increment buffer top.
167  add(scratch, Immediate(kPointerSize));
168  // Write back new top of buffer.
169  mov(Operand::StaticVariable(store_buffer), scratch);
170  // Call stub on end of buffer.
171  // Check for end of buffer.
172  test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
173  if (and_then == kReturnAtEnd) {
174  Label buffer_overflowed;
175  j(not_equal, &buffer_overflowed, Label::kNear);
176  ret(0);
177  bind(&buffer_overflowed);
178  } else {
179  DCHECK(and_then == kFallThroughAtEnd);
180  j(equal, &done, Label::kNear);
181  }
182  StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
183  CallStub(&store_buffer_overflow);
184  if (and_then == kReturnAtEnd) {
185  ret(0);
186  } else {
187  DCHECK(and_then == kFallThroughAtEnd);
188  bind(&done);
189  }
190 }
191 
192 
193 void MacroAssembler::ClampTOSToUint8(Register result_reg) {
194  Label done, conv_failure;
195  sub(esp, Immediate(kPointerSize));
196  fnclex();
197  fist_s(Operand(esp, 0));
198  pop(result_reg);
199  X87CheckIA();
200  j(equal, &conv_failure, Label::kNear);
201  test(result_reg, Immediate(0xFFFFFF00));
202  j(zero, &done, Label::kNear);
203  setcc(sign, result_reg);
204  sub(result_reg, Immediate(1));
205  and_(result_reg, Immediate(255));
206  jmp(&done, Label::kNear);
207  bind(&conv_failure);
208  fnclex();
209  fldz();
210  fld(1);
211  FCmp();
212  setcc(below, result_reg); // 1 if negative, 0 if positive.
213  dec_b(result_reg); // 0 if negative, 255 if positive.
214  bind(&done);
215 }
216 
217 
218 void MacroAssembler::ClampUint8(Register reg) {
219  Label done;
220  test(reg, Immediate(0xFFFFFF00));
221  j(zero, &done, Label::kNear);
222  setcc(negative, reg); // 1 if negative, 0 if positive.
223  dec_b(reg); // 0 if negative, 255 if positive.
224  bind(&done);
225 }
226 
227 
228 void MacroAssembler::SlowTruncateToI(Register result_reg,
229  Register input_reg,
230  int offset) {
231  DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
232  call(stub.GetCode(), RelocInfo::CODE_TARGET);
233 }
234 
235 
236 void MacroAssembler::TruncateX87TOSToI(Register result_reg) {
237  sub(esp, Immediate(kDoubleSize));
238  fst_d(MemOperand(esp, 0));
239  SlowTruncateToI(result_reg, esp, 0);
240  add(esp, Immediate(kDoubleSize));
241 }
242 
243 
244 void MacroAssembler::X87TOSToI(Register result_reg,
245  MinusZeroMode minus_zero_mode,
246  Label* lost_precision, Label* is_nan,
247  Label* minus_zero, Label::Distance dst) {
248  Label done;
249  sub(esp, Immediate(kPointerSize));
250  fld(0);
251  fist_s(MemOperand(esp, 0));
252  fild_s(MemOperand(esp, 0));
253  pop(result_reg);
254  FCmp();
255  j(not_equal, lost_precision, dst);
256  j(parity_even, is_nan, dst);
257  if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
258  test(result_reg, Operand(result_reg));
259  j(not_zero, &done, Label::kNear);
260  // To check for minus zero, we load the value again as float, and check
261  // if that is still 0.
262  sub(esp, Immediate(kPointerSize));
263  fst_s(MemOperand(esp, 0));
264  pop(result_reg);
265  test(result_reg, Operand(result_reg));
266  j(not_zero, minus_zero, dst);
267  }
268  bind(&done);
269 }
270 
271 
272 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
273  Register input_reg) {
274  Label done, slow_case;
275 
276  SlowTruncateToI(result_reg, input_reg);
277  bind(&done);
278 }
279 
280 
281 void MacroAssembler::LoadUint32NoSSE2(Register src) {
282  Label done;
283  push(src);
284  fild_s(Operand(esp, 0));
285  cmp(src, Immediate(0));
286  j(not_sign, &done, Label::kNear);
287  ExternalReference uint32_bias =
288  ExternalReference::address_of_uint32_bias();
289  fld_d(Operand::StaticVariable(uint32_bias));
290  faddp(1);
291  bind(&done);
292  add(esp, Immediate(kPointerSize));
293 }
294 
295 
296 void MacroAssembler::RecordWriteArray(
297  Register object, Register value, Register index, SaveFPRegsMode save_fp,
298  RememberedSetAction remembered_set_action, SmiCheck smi_check,
299  PointersToHereCheck pointers_to_here_check_for_value) {
300  // First, check if a write barrier is even needed. The tests below
301  // catch stores of Smis.
302  Label done;
303 
304  // Skip barrier if writing a smi.
305  if (smi_check == INLINE_SMI_CHECK) {
306  DCHECK_EQ(0, kSmiTag);
307  test(value, Immediate(kSmiTagMask));
308  j(zero, &done);
309  }
310 
311  // Array access: calculate the destination address in the same manner as
312  // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
313  // into an array of words.
314  Register dst = index;
315  lea(dst, Operand(object, index, times_half_pointer_size,
316  FixedArray::kHeaderSize - kHeapObjectTag));
317 
318  RecordWrite(object, dst, value, save_fp, remembered_set_action,
319  OMIT_SMI_CHECK, pointers_to_here_check_for_value);
320 
321  bind(&done);
322 
323  // Clobber clobbered input registers when running with the debug-code flag
324  // turned on to provoke errors.
325  if (emit_debug_code()) {
326  mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
327  mov(index, Immediate(bit_cast<int32_t>(kZapValue)));
328  }
329 }
330 
331 
332 void MacroAssembler::RecordWriteField(
333  Register object, int offset, Register value, Register dst,
334  SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action,
335  SmiCheck smi_check, PointersToHereCheck pointers_to_here_check_for_value) {
336  // First, check if a write barrier is even needed. The tests below
337  // catch stores of Smis.
338  Label done;
339 
340  // Skip barrier if writing a smi.
341  if (smi_check == INLINE_SMI_CHECK) {
342  JumpIfSmi(value, &done, Label::kNear);
343  }
344 
345  // Although the object register is tagged, the offset is relative to the start
346  // of the object, so so offset must be a multiple of kPointerSize.
347  DCHECK(IsAligned(offset, kPointerSize));
348 
349  lea(dst, FieldOperand(object, offset));
350  if (emit_debug_code()) {
351  Label ok;
352  test_b(dst, (1 << kPointerSizeLog2) - 1);
353  j(zero, &ok, Label::kNear);
354  int3();
355  bind(&ok);
356  }
357 
358  RecordWrite(object, dst, value, save_fp, remembered_set_action,
359  OMIT_SMI_CHECK, pointers_to_here_check_for_value);
360 
361  bind(&done);
362 
363  // Clobber clobbered input registers when running with the debug-code flag
364  // turned on to provoke errors.
365  if (emit_debug_code()) {
366  mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
367  mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
368  }
369 }
370 
371 
372 void MacroAssembler::RecordWriteForMap(Register object, Handle<Map> map,
373  Register scratch1, Register scratch2,
374  SaveFPRegsMode save_fp) {
375  Label done;
376 
377  Register address = scratch1;
378  Register value = scratch2;
379  if (emit_debug_code()) {
380  Label ok;
381  lea(address, FieldOperand(object, HeapObject::kMapOffset));
382  test_b(address, (1 << kPointerSizeLog2) - 1);
383  j(zero, &ok, Label::kNear);
384  int3();
385  bind(&ok);
386  }
387 
388  DCHECK(!object.is(value));
389  DCHECK(!object.is(address));
390  DCHECK(!value.is(address));
391  AssertNotSmi(object);
392 
393  if (!FLAG_incremental_marking) {
394  return;
395  }
396 
397  // Compute the address.
398  lea(address, FieldOperand(object, HeapObject::kMapOffset));
399 
400  // A single check of the map's pages interesting flag suffices, since it is
401  // only set during incremental collection, and then it's also guaranteed that
402  // the from object's page's interesting flag is also set. This optimization
403  // relies on the fact that maps can never be in new space.
404  DCHECK(!isolate()->heap()->InNewSpace(*map));
405  CheckPageFlagForMap(map,
406  MemoryChunk::kPointersToHereAreInterestingMask,
407  zero,
408  &done,
409  Label::kNear);
410 
411  RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET,
412  save_fp);
413  CallStub(&stub);
414 
415  bind(&done);
416 
417  // Count number of write barriers in generated code.
418  isolate()->counters()->write_barriers_static()->Increment();
419  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
420 
421  // Clobber clobbered input registers when running with the debug-code flag
422  // turned on to provoke errors.
423  if (emit_debug_code()) {
424  mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
425  mov(scratch1, Immediate(bit_cast<int32_t>(kZapValue)));
426  mov(scratch2, Immediate(bit_cast<int32_t>(kZapValue)));
427  }
428 }
429 
430 
431 void MacroAssembler::RecordWrite(
432  Register object, Register address, Register value, SaveFPRegsMode fp_mode,
433  RememberedSetAction remembered_set_action, SmiCheck smi_check,
434  PointersToHereCheck pointers_to_here_check_for_value) {
435  DCHECK(!object.is(value));
436  DCHECK(!object.is(address));
437  DCHECK(!value.is(address));
438  AssertNotSmi(object);
439 
440  if (remembered_set_action == OMIT_REMEMBERED_SET &&
441  !FLAG_incremental_marking) {
442  return;
443  }
444 
445  if (emit_debug_code()) {
446  Label ok;
447  cmp(value, Operand(address, 0));
448  j(equal, &ok, Label::kNear);
449  int3();
450  bind(&ok);
451  }
452 
453  // First, check if a write barrier is even needed. The tests below
454  // catch stores of Smis and stores into young gen.
455  Label done;
456 
457  if (smi_check == INLINE_SMI_CHECK) {
458  // Skip barrier if writing a smi.
459  JumpIfSmi(value, &done, Label::kNear);
460  }
461 
462  if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
463  CheckPageFlag(value,
464  value, // Used as scratch.
465  MemoryChunk::kPointersToHereAreInterestingMask,
466  zero,
467  &done,
468  Label::kNear);
469  }
470  CheckPageFlag(object,
471  value, // Used as scratch.
472  MemoryChunk::kPointersFromHereAreInterestingMask,
473  zero,
474  &done,
475  Label::kNear);
476 
477  RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
478  fp_mode);
479  CallStub(&stub);
480 
481  bind(&done);
482 
483  // Count number of write barriers in generated code.
484  isolate()->counters()->write_barriers_static()->Increment();
485  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
486 
487  // Clobber clobbered registers when running with the debug-code flag
488  // turned on to provoke errors.
489  if (emit_debug_code()) {
490  mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
491  mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
492  }
493 }
494 
495 
496 void MacroAssembler::DebugBreak() {
497  Move(eax, Immediate(0));
498  mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
499  CEntryStub ces(isolate(), 1);
500  call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
501 }
502 
503 
504 bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
505  static const int kMaxImmediateBits = 17;
506  if (!RelocInfo::IsNone(x.rmode_)) return false;
507  return !is_intn(x.x_, kMaxImmediateBits);
508 }
509 
510 
511 void MacroAssembler::SafeMove(Register dst, const Immediate& x) {
512  if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
513  Move(dst, Immediate(x.x_ ^ jit_cookie()));
514  xor_(dst, jit_cookie());
515  } else {
516  Move(dst, x);
517  }
518 }
519 
520 
521 void MacroAssembler::SafePush(const Immediate& x) {
522  if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
523  push(Immediate(x.x_ ^ jit_cookie()));
524  xor_(Operand(esp, 0), Immediate(jit_cookie()));
525  } else {
526  push(x);
527  }
528 }
529 
530 
531 void MacroAssembler::CmpObjectType(Register heap_object,
532  InstanceType type,
533  Register map) {
534  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
535  CmpInstanceType(map, type);
536 }
537 
538 
539 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
540  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
541  static_cast<int8_t>(type));
542 }
543 
544 
545 void MacroAssembler::CheckFastElements(Register map,
546  Label* fail,
547  Label::Distance distance) {
552  cmpb(FieldOperand(map, Map::kBitField2Offset),
553  Map::kMaximumBitField2FastHoleyElementValue);
554  j(above, fail, distance);
555 }
556 
557 
558 void MacroAssembler::CheckFastObjectElements(Register map,
559  Label* fail,
560  Label::Distance distance) {
565  cmpb(FieldOperand(map, Map::kBitField2Offset),
566  Map::kMaximumBitField2FastHoleySmiElementValue);
567  j(below_equal, fail, distance);
568  cmpb(FieldOperand(map, Map::kBitField2Offset),
569  Map::kMaximumBitField2FastHoleyElementValue);
570  j(above, fail, distance);
571 }
572 
573 
574 void MacroAssembler::CheckFastSmiElements(Register map,
575  Label* fail,
576  Label::Distance distance) {
579  cmpb(FieldOperand(map, Map::kBitField2Offset),
580  Map::kMaximumBitField2FastHoleySmiElementValue);
581  j(above, fail, distance);
582 }
583 
584 
585 void MacroAssembler::StoreNumberToDoubleElements(
586  Register maybe_number,
587  Register elements,
588  Register key,
589  Register scratch,
590  Label* fail,
591  int elements_offset) {
592  Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
593  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
594 
595  CheckMap(maybe_number,
596  isolate()->factory()->heap_number_map(),
597  fail,
599 
600  // Double value, canonicalize NaN.
601  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
602  cmp(FieldOperand(maybe_number, offset),
604  j(greater_equal, &maybe_nan, Label::kNear);
605 
606  bind(&not_nan);
607  ExternalReference canonical_nan_reference =
608  ExternalReference::address_of_canonical_non_hole_nan();
609  fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
610  bind(&have_double_value);
611  fstp_d(FieldOperand(elements, key, times_4,
612  FixedDoubleArray::kHeaderSize - elements_offset));
613  jmp(&done);
614 
615  bind(&maybe_nan);
616  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
617  // it's an Infinity, and the non-NaN code path applies.
618  j(greater, &is_nan, Label::kNear);
619  cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
620  j(zero, &not_nan);
621  bind(&is_nan);
622  fld_d(Operand::StaticVariable(canonical_nan_reference));
623  jmp(&have_double_value, Label::kNear);
624 
625  bind(&smi_value);
626  // Value is a smi. Convert to a double and store.
627  // Preserve original value.
628  mov(scratch, maybe_number);
629  SmiUntag(scratch);
630  push(scratch);
631  fild_s(Operand(esp, 0));
632  pop(scratch);
633  fstp_d(FieldOperand(elements, key, times_4,
634  FixedDoubleArray::kHeaderSize - elements_offset));
635  bind(&done);
636 }
637 
638 
639 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
640  cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
641 }
642 
643 
644 void MacroAssembler::CheckMap(Register obj,
645  Handle<Map> map,
646  Label* fail,
647  SmiCheckType smi_check_type) {
648  if (smi_check_type == DO_SMI_CHECK) {
649  JumpIfSmi(obj, fail);
650  }
651 
652  CompareMap(obj, map);
653  j(not_equal, fail);
654 }
655 
656 
657 void MacroAssembler::DispatchMap(Register obj,
658  Register unused,
659  Handle<Map> map,
660  Handle<Code> success,
661  SmiCheckType smi_check_type) {
662  Label fail;
663  if (smi_check_type == DO_SMI_CHECK) {
664  JumpIfSmi(obj, &fail);
665  }
666  cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
667  j(equal, success);
668 
669  bind(&fail);
670 }
671 
672 
673 Condition MacroAssembler::IsObjectStringType(Register heap_object,
674  Register map,
675  Register instance_type) {
676  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
677  movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
679  test(instance_type, Immediate(kIsNotStringMask));
680  return zero;
681 }
682 
683 
684 Condition MacroAssembler::IsObjectNameType(Register heap_object,
685  Register map,
686  Register instance_type) {
687  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
688  movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
689  cmpb(instance_type, static_cast<uint8_t>(LAST_NAME_TYPE));
690  return below_equal;
691 }
692 
693 
694 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
695  Register map,
696  Register scratch,
697  Label* fail) {
698  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
699  IsInstanceJSObjectType(map, scratch, fail);
700 }
701 
702 
703 void MacroAssembler::IsInstanceJSObjectType(Register map,
704  Register scratch,
705  Label* fail) {
706  movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
707  sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
708  cmp(scratch,
710  j(above, fail);
711 }
712 
713 
714 void MacroAssembler::FCmp() {
715  fucompp();
716  push(eax);
717  fnstsw_ax();
718  sahf();
719  pop(eax);
720 }
721 
722 
723 void MacroAssembler::FXamMinusZero() {
724  fxam();
725  push(eax);
726  fnstsw_ax();
727  and_(eax, Immediate(0x4700));
728  // For minus zero, C3 == 1 && C1 == 1.
729  cmp(eax, Immediate(0x4200));
730  pop(eax);
731  fstp(0);
732 }
733 
734 
735 void MacroAssembler::FXamSign() {
736  fxam();
737  push(eax);
738  fnstsw_ax();
739  // For negative value (including -0.0), C1 == 1.
740  and_(eax, Immediate(0x0200));
741  pop(eax);
742  fstp(0);
743 }
744 
745 
746 void MacroAssembler::X87CheckIA() {
747  push(eax);
748  fnstsw_ax();
749  // For #IA, IE == 1 && SF == 0.
750  and_(eax, Immediate(0x0041));
751  cmp(eax, Immediate(0x0001));
752  pop(eax);
753 }
754 
755 
756 // rc=00B, round to nearest.
757 // rc=01B, round down.
758 // rc=10B, round up.
759 // rc=11B, round toward zero.
760 void MacroAssembler::X87SetRC(int rc) {
761  sub(esp, Immediate(kPointerSize));
762  fnstcw(MemOperand(esp, 0));
763  and_(MemOperand(esp, 0), Immediate(0xF3FF));
764  or_(MemOperand(esp, 0), Immediate(rc));
765  fldcw(MemOperand(esp, 0));
766  add(esp, Immediate(kPointerSize));
767 }
768 
769 
770 void MacroAssembler::AssertNumber(Register object) {
771  if (emit_debug_code()) {
772  Label ok;
773  JumpIfSmi(object, &ok);
774  cmp(FieldOperand(object, HeapObject::kMapOffset),
775  isolate()->factory()->heap_number_map());
776  Check(equal, kOperandNotANumber);
777  bind(&ok);
778  }
779 }
780 
781 
782 void MacroAssembler::AssertSmi(Register object) {
783  if (emit_debug_code()) {
784  test(object, Immediate(kSmiTagMask));
785  Check(equal, kOperandIsNotASmi);
786  }
787 }
788 
789 
790 void MacroAssembler::AssertString(Register object) {
791  if (emit_debug_code()) {
792  test(object, Immediate(kSmiTagMask));
793  Check(not_equal, kOperandIsASmiAndNotAString);
794  push(object);
795  mov(object, FieldOperand(object, HeapObject::kMapOffset));
796  CmpInstanceType(object, FIRST_NONSTRING_TYPE);
797  pop(object);
798  Check(below, kOperandIsNotAString);
799  }
800 }
801 
802 
803 void MacroAssembler::AssertName(Register object) {
804  if (emit_debug_code()) {
805  test(object, Immediate(kSmiTagMask));
806  Check(not_equal, kOperandIsASmiAndNotAName);
807  push(object);
808  mov(object, FieldOperand(object, HeapObject::kMapOffset));
809  CmpInstanceType(object, LAST_NAME_TYPE);
810  pop(object);
811  Check(below_equal, kOperandIsNotAName);
812  }
813 }
814 
815 
816 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
817  if (emit_debug_code()) {
818  Label done_checking;
819  AssertNotSmi(object);
820  cmp(object, isolate()->factory()->undefined_value());
821  j(equal, &done_checking);
822  cmp(FieldOperand(object, 0),
823  Immediate(isolate()->factory()->allocation_site_map()));
824  Assert(equal, kExpectedUndefinedOrCell);
825  bind(&done_checking);
826  }
827 }
828 
829 
830 void MacroAssembler::AssertNotSmi(Register object) {
831  if (emit_debug_code()) {
832  test(object, Immediate(kSmiTagMask));
833  Check(not_equal, kOperandIsASmi);
834  }
835 }
836 
837 
838 void MacroAssembler::StubPrologue() {
839  push(ebp); // Caller's frame pointer.
840  mov(ebp, esp);
841  push(esi); // Callee's context.
842  push(Immediate(Smi::FromInt(StackFrame::STUB)));
843 }
844 
845 
846 void MacroAssembler::Prologue(bool code_pre_aging) {
847  PredictableCodeSizeScope predictible_code_size_scope(this,
849  if (code_pre_aging) {
850  // Pre-age the code.
851  call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
852  RelocInfo::CODE_AGE_SEQUENCE);
853  Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
854  } else {
855  push(ebp); // Caller's frame pointer.
856  mov(ebp, esp);
857  push(esi); // Callee's context.
858  push(edi); // Callee's JS function.
859  }
860 }
861 
862 
863 void MacroAssembler::EnterFrame(StackFrame::Type type) {
864  push(ebp);
865  mov(ebp, esp);
866  push(esi);
867  push(Immediate(Smi::FromInt(type)));
868  push(Immediate(CodeObject()));
869  if (emit_debug_code()) {
870  cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
871  Check(not_equal, kCodeObjectNotProperlyPatched);
872  }
873 }
874 
875 
876 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
877  if (emit_debug_code()) {
878  cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
879  Immediate(Smi::FromInt(type)));
880  Check(equal, kStackFrameTypesMustMatch);
881  }
882  leave();
883 }
884 
885 
886 void MacroAssembler::EnterExitFramePrologue() {
887  // Set up the frame structure on the stack.
888  DCHECK(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
889  DCHECK(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
890  DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
891  push(ebp);
892  mov(ebp, esp);
893 
894  // Reserve room for entry stack pointer and push the code object.
895  DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
896  push(Immediate(0)); // Saved entry sp, patched before call.
897  push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
898 
899  // Save the frame pointer and the context in top.
900  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress, isolate());
901  ExternalReference context_address(Isolate::kContextAddress, isolate());
902  mov(Operand::StaticVariable(c_entry_fp_address), ebp);
903  mov(Operand::StaticVariable(context_address), esi);
904 }
905 
906 
907 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
908  // Optionally save FPU state.
909  if (save_doubles) {
910  // Store FPU state to m108byte.
911  int space = 108 + argc * kPointerSize;
912  sub(esp, Immediate(space));
913  const int offset = -2 * kPointerSize; // entry fp + code object.
914  fnsave(MemOperand(ebp, offset - 108));
915  } else {
916  sub(esp, Immediate(argc * kPointerSize));
917  }
918 
919  // Get the required frame alignment for the OS.
920  const int kFrameAlignment = base::OS::ActivationFrameAlignment();
921  if (kFrameAlignment > 0) {
922  DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
923  and_(esp, -kFrameAlignment);
924  }
925 
926  // Patch the saved entry sp.
927  mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
928 }
929 
930 
931 void MacroAssembler::EnterExitFrame(bool save_doubles) {
932  EnterExitFramePrologue();
933 
934  // Set up argc and argv in callee-saved registers.
935  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
936  mov(edi, eax);
937  lea(esi, Operand(ebp, eax, times_4, offset));
938 
939  // Reserve space for argc, argv and isolate.
940  EnterExitFrameEpilogue(3, save_doubles);
941 }
942 
943 
944 void MacroAssembler::EnterApiExitFrame(int argc) {
945  EnterExitFramePrologue();
946  EnterExitFrameEpilogue(argc, false);
947 }
948 
949 
950 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
951  // Optionally restore FPU state.
952  if (save_doubles) {
953  const int offset = -2 * kPointerSize;
954  frstor(MemOperand(ebp, offset - 108));
955  }
956 
957  // Get the return address from the stack and restore the frame pointer.
958  mov(ecx, Operand(ebp, 1 * kPointerSize));
959  mov(ebp, Operand(ebp, 0 * kPointerSize));
960 
961  // Pop the arguments and the receiver from the caller stack.
962  lea(esp, Operand(esi, 1 * kPointerSize));
963 
964  // Push the return address to get ready to return.
965  push(ecx);
966 
967  LeaveExitFrameEpilogue(true);
968 }
969 
970 
971 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
972  // Restore current context from top and clear it in debug mode.
973  ExternalReference context_address(Isolate::kContextAddress, isolate());
974  if (restore_context) {
975  mov(esi, Operand::StaticVariable(context_address));
976  }
977 #ifdef DEBUG
978  mov(Operand::StaticVariable(context_address), Immediate(0));
979 #endif
980 
981  // Clear the top frame.
982  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
983  isolate());
984  mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
985 }
986 
987 
988 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
989  mov(esp, ebp);
990  pop(ebp);
991 
992  LeaveExitFrameEpilogue(restore_context);
993 }
994 
995 
996 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
997  int handler_index) {
998  // Adjust this code if not the case.
999  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1000  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1001  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1002  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1003  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1004  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1005 
1006  // We will build up the handler from the bottom by pushing on the stack.
1007  // First push the frame pointer and context.
1008  if (kind == StackHandler::JS_ENTRY) {
1009  // The frame pointer does not point to a JS frame so we save NULL for
1010  // ebp. We expect the code throwing an exception to check ebp before
1011  // dereferencing it to restore the context.
1012  push(Immediate(0)); // NULL frame pointer.
1013  push(Immediate(Smi::FromInt(0))); // No context.
1014  } else {
1015  push(ebp);
1016  push(esi);
1017  }
1018  // Push the state and the code object.
1019  unsigned state =
1020  StackHandler::IndexField::encode(handler_index) |
1021  StackHandler::KindField::encode(kind);
1022  push(Immediate(state));
1023  Push(CodeObject());
1024 
1025  // Link the current handler as the next handler.
1026  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1027  push(Operand::StaticVariable(handler_address));
1028  // Set this new handler as the current one.
1029  mov(Operand::StaticVariable(handler_address), esp);
1030 }
1031 
1032 
1033 void MacroAssembler::PopTryHandler() {
1034  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1035  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1036  pop(Operand::StaticVariable(handler_address));
1037  add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
1038 }
1039 
1040 
1041 void MacroAssembler::JumpToHandlerEntry() {
1042  // Compute the handler entry address and jump to it. The handler table is
1043  // a fixed array of (smi-tagged) code offsets.
1044  // eax = exception, edi = code object, edx = state.
1045  mov(ebx, FieldOperand(edi, Code::kHandlerTableOffset));
1046  shr(edx, StackHandler::kKindWidth);
1047  mov(edx, FieldOperand(ebx, edx, times_4, FixedArray::kHeaderSize));
1048  SmiUntag(edx);
1049  lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
1050  jmp(edi);
1051 }
1052 
1053 
1054 void MacroAssembler::Throw(Register value) {
1055  // Adjust this code if not the case.
1056  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1057  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1058  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1059  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1060  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1061  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1062 
1063  // The exception is expected in eax.
1064  if (!value.is(eax)) {
1065  mov(eax, value);
1066  }
1067  // Drop the stack pointer to the top of the top handler.
1068  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1069  mov(esp, Operand::StaticVariable(handler_address));
1070  // Restore the next handler.
1071  pop(Operand::StaticVariable(handler_address));
1072 
1073  // Remove the code object and state, compute the handler address in edi.
1074  pop(edi); // Code object.
1075  pop(edx); // Index and state.
1076 
1077  // Restore the context and frame pointer.
1078  pop(esi); // Context.
1079  pop(ebp); // Frame pointer.
1080 
1081  // If the handler is a JS frame, restore the context to the frame.
1082  // (kind == ENTRY) == (ebp == 0) == (esi == 0), so we could test either
1083  // ebp or esi.
1084  Label skip;
1085  test(esi, esi);
1086  j(zero, &skip, Label::kNear);
1087  mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
1088  bind(&skip);
1089 
1090  JumpToHandlerEntry();
1091 }
1092 
1093 
1094 void MacroAssembler::ThrowUncatchable(Register value) {
1095  // Adjust this code if not the case.
1096  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1097  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1098  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1099  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1100  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1101  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1102 
1103  // The exception is expected in eax.
1104  if (!value.is(eax)) {
1105  mov(eax, value);
1106  }
1107  // Drop the stack pointer to the top of the top stack handler.
1108  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
1109  mov(esp, Operand::StaticVariable(handler_address));
1110 
1111  // Unwind the handlers until the top ENTRY handler is found.
1112  Label fetch_next, check_kind;
1113  jmp(&check_kind, Label::kNear);
1114  bind(&fetch_next);
1115  mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
1116 
1117  bind(&check_kind);
1118  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1119  test(Operand(esp, StackHandlerConstants::kStateOffset),
1120  Immediate(StackHandler::KindField::kMask));
1121  j(not_zero, &fetch_next);
1122 
1123  // Set the top handler address to next handler past the top ENTRY handler.
1124  pop(Operand::StaticVariable(handler_address));
1125 
1126  // Remove the code object and state, compute the handler address in edi.
1127  pop(edi); // Code object.
1128  pop(edx); // Index and state.
1129 
1130  // Clear the context pointer and frame pointer (0 was saved in the handler).
1131  pop(esi);
1132  pop(ebp);
1133 
1134  JumpToHandlerEntry();
1135 }
1136 
1137 
1138 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1139  Register scratch1,
1140  Register scratch2,
1141  Label* miss) {
1142  Label same_contexts;
1143 
1144  DCHECK(!holder_reg.is(scratch1));
1145  DCHECK(!holder_reg.is(scratch2));
1146  DCHECK(!scratch1.is(scratch2));
1147 
1148  // Load current lexical context from the stack frame.
1149  mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
1150 
1151  // When generating debug code, make sure the lexical context is set.
1152  if (emit_debug_code()) {
1153  cmp(scratch1, Immediate(0));
1154  Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
1155  }
1156  // Load the native context of the current context.
1157  int offset =
1158  Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1159  mov(scratch1, FieldOperand(scratch1, offset));
1160  mov(scratch1, FieldOperand(scratch1, GlobalObject::kNativeContextOffset));
1161 
1162  // Check the context is a native context.
1163  if (emit_debug_code()) {
1164  // Read the first word and compare to native_context_map.
1165  cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
1166  isolate()->factory()->native_context_map());
1167  Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
1168  }
1169 
1170  // Check if both contexts are the same.
1171  cmp(scratch1, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1172  j(equal, &same_contexts);
1173 
1174  // Compare security tokens, save holder_reg on the stack so we can use it
1175  // as a temporary register.
1176  //
1177  // Check that the security token in the calling global object is
1178  // compatible with the security token in the receiving global
1179  // object.
1180  mov(scratch2,
1181  FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1182 
1183  // Check the context is a native context.
1184  if (emit_debug_code()) {
1185  cmp(scratch2, isolate()->factory()->null_value());
1186  Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
1187 
1188  // Read the first word and compare to native_context_map(),
1189  cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
1190  isolate()->factory()->native_context_map());
1191  Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
1192  }
1193 
1194  int token_offset = Context::kHeaderSize +
1195  Context::SECURITY_TOKEN_INDEX * kPointerSize;
1196  mov(scratch1, FieldOperand(scratch1, token_offset));
1197  cmp(scratch1, FieldOperand(scratch2, token_offset));
1198  j(not_equal, miss);
1199 
1200  bind(&same_contexts);
1201 }
1202 
1203 
1204 // Compute the hash code from the untagged key. This must be kept in sync with
1205 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1206 // code-stub-hydrogen.cc
1207 //
1208 // Note: r0 will contain hash code
1209 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
1210  // Xor original key with a seed.
1211  if (serializer_enabled()) {
1212  ExternalReference roots_array_start =
1213  ExternalReference::roots_array_start(isolate());
1214  mov(scratch, Immediate(Heap::kHashSeedRootIndex));
1215  mov(scratch,
1216  Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
1217  SmiUntag(scratch);
1218  xor_(r0, scratch);
1219  } else {
1220  int32_t seed = isolate()->heap()->HashSeed();
1221  xor_(r0, Immediate(seed));
1222  }
1223 
1224  // hash = ~hash + (hash << 15);
1225  mov(scratch, r0);
1226  not_(r0);
1227  shl(scratch, 15);
1228  add(r0, scratch);
1229  // hash = hash ^ (hash >> 12);
1230  mov(scratch, r0);
1231  shr(scratch, 12);
1232  xor_(r0, scratch);
1233  // hash = hash + (hash << 2);
1234  lea(r0, Operand(r0, r0, times_4, 0));
1235  // hash = hash ^ (hash >> 4);
1236  mov(scratch, r0);
1237  shr(scratch, 4);
1238  xor_(r0, scratch);
1239  // hash = hash * 2057;
1240  imul(r0, r0, 2057);
1241  // hash = hash ^ (hash >> 16);
1242  mov(scratch, r0);
1243  shr(scratch, 16);
1244  xor_(r0, scratch);
1245 }
1246 
1247 
1248 
1249 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1250  Register elements,
1251  Register key,
1252  Register r0,
1253  Register r1,
1254  Register r2,
1255  Register result) {
1256  // Register use:
1257  //
1258  // elements - holds the slow-case elements of the receiver and is unchanged.
1259  //
1260  // key - holds the smi key on entry and is unchanged.
1261  //
1262  // Scratch registers:
1263  //
1264  // r0 - holds the untagged key on entry and holds the hash once computed.
1265  //
1266  // r1 - used to hold the capacity mask of the dictionary
1267  //
1268  // r2 - used for the index into the dictionary.
1269  //
1270  // result - holds the result on exit if the load succeeds and we fall through.
1271 
1272  Label done;
1273 
1274  GetNumberHash(r0, r1);
1275 
1276  // Compute capacity mask.
1277  mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
1278  shr(r1, kSmiTagSize); // convert smi to int
1279  dec(r1);
1280 
1281  // Generate an unrolled loop that performs a few probes before giving up.
1282  for (int i = 0; i < kNumberDictionaryProbes; i++) {
1283  // Use r2 for index calculations and keep the hash intact in r0.
1284  mov(r2, r0);
1285  // Compute the masked index: (hash + i + i * i) & mask.
1286  if (i > 0) {
1287  add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
1288  }
1289  and_(r2, r1);
1290 
1291  // Scale the index by multiplying by the entry size.
1292  DCHECK(SeededNumberDictionary::kEntrySize == 3);
1293  lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
1294 
1295  // Check if the key matches.
1296  cmp(key, FieldOperand(elements,
1297  r2,
1299  SeededNumberDictionary::kElementsStartOffset));
1300  if (i != (kNumberDictionaryProbes - 1)) {
1301  j(equal, &done);
1302  } else {
1303  j(not_equal, miss);
1304  }
1305  }
1306 
1307  bind(&done);
1308  // Check that the value is a normal propety.
1309  const int kDetailsOffset =
1310  SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1311  DCHECK_EQ(NORMAL, 0);
1312  test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
1313  Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
1314  j(not_zero, miss);
1315 
1316  // Get the value at the masked, scaled index.
1317  const int kValueOffset =
1318  SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1319  mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
1320 }
1321 
1322 
1323 void MacroAssembler::LoadAllocationTopHelper(Register result,
1324  Register scratch,
1326  ExternalReference allocation_top =
1327  AllocationUtils::GetAllocationTopReference(isolate(), flags);
1328 
1329  // Just return if allocation top is already known.
1330  if ((flags & RESULT_CONTAINS_TOP) != 0) {
1331  // No use of scratch if allocation top is provided.
1332  DCHECK(scratch.is(no_reg));
1333 #ifdef DEBUG
1334  // Assert that result actually contains top on entry.
1335  cmp(result, Operand::StaticVariable(allocation_top));
1336  Check(equal, kUnexpectedAllocationTop);
1337 #endif
1338  return;
1339  }
1340 
1341  // Move address of new object to result. Use scratch register if available.
1342  if (scratch.is(no_reg)) {
1343  mov(result, Operand::StaticVariable(allocation_top));
1344  } else {
1345  mov(scratch, Immediate(allocation_top));
1346  mov(result, Operand(scratch, 0));
1347  }
1348 }
1349 
1350 
1351 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
1352  Register scratch,
1354  if (emit_debug_code()) {
1355  test(result_end, Immediate(kObjectAlignmentMask));
1356  Check(zero, kUnalignedAllocationInNewSpace);
1357  }
1358 
1359  ExternalReference allocation_top =
1360  AllocationUtils::GetAllocationTopReference(isolate(), flags);
1361 
1362  // Update new top. Use scratch if available.
1363  if (scratch.is(no_reg)) {
1364  mov(Operand::StaticVariable(allocation_top), result_end);
1365  } else {
1366  mov(Operand(scratch, 0), result_end);
1367  }
1368 }
1369 
1370 
1371 void MacroAssembler::Allocate(int object_size,
1372  Register result,
1373  Register result_end,
1374  Register scratch,
1375  Label* gc_required,
1378  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1379  if (!FLAG_inline_new) {
1380  if (emit_debug_code()) {
1381  // Trash the registers to simulate an allocation failure.
1382  mov(result, Immediate(0x7091));
1383  if (result_end.is_valid()) {
1384  mov(result_end, Immediate(0x7191));
1385  }
1386  if (scratch.is_valid()) {
1387  mov(scratch, Immediate(0x7291));
1388  }
1389  }
1390  jmp(gc_required);
1391  return;
1392  }
1393  DCHECK(!result.is(result_end));
1394 
1395  // Load address of new object into result.
1396  LoadAllocationTopHelper(result, scratch, flags);
1397 
1398  ExternalReference allocation_limit =
1399  AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1400 
1401  // Align the next allocation. Storing the filler map without checking top is
1402  // safe in new-space because the limit of the heap is aligned there.
1403  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1406  Label aligned;
1407  test(result, Immediate(kDoubleAlignmentMask));
1408  j(zero, &aligned, Label::kNear);
1409  if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1410  cmp(result, Operand::StaticVariable(allocation_limit));
1411  j(above_equal, gc_required);
1412  }
1413  mov(Operand(result, 0),
1414  Immediate(isolate()->factory()->one_pointer_filler_map()));
1415  add(result, Immediate(kDoubleSize / 2));
1416  bind(&aligned);
1417  }
1418 
1419  // Calculate new top and bail out if space is exhausted.
1420  Register top_reg = result_end.is_valid() ? result_end : result;
1421  if (!top_reg.is(result)) {
1422  mov(top_reg, result);
1423  }
1424  add(top_reg, Immediate(object_size));
1425  j(carry, gc_required);
1426  cmp(top_reg, Operand::StaticVariable(allocation_limit));
1427  j(above, gc_required);
1428 
1429  // Update allocation top.
1430  UpdateAllocationTopHelper(top_reg, scratch, flags);
1431 
1432  // Tag result if requested.
1433  bool tag_result = (flags & TAG_OBJECT) != 0;
1434  if (top_reg.is(result)) {
1435  if (tag_result) {
1436  sub(result, Immediate(object_size - kHeapObjectTag));
1437  } else {
1438  sub(result, Immediate(object_size));
1439  }
1440  } else if (tag_result) {
1441  DCHECK(kHeapObjectTag == 1);
1442  inc(result);
1443  }
1444 }
1445 
1446 
1447 void MacroAssembler::Allocate(int header_size,
1448  ScaleFactor element_size,
1449  Register element_count,
1450  RegisterValueType element_count_type,
1451  Register result,
1452  Register result_end,
1453  Register scratch,
1454  Label* gc_required,
1456  DCHECK((flags & SIZE_IN_WORDS) == 0);
1457  if (!FLAG_inline_new) {
1458  if (emit_debug_code()) {
1459  // Trash the registers to simulate an allocation failure.
1460  mov(result, Immediate(0x7091));
1461  mov(result_end, Immediate(0x7191));
1462  if (scratch.is_valid()) {
1463  mov(scratch, Immediate(0x7291));
1464  }
1465  // Register element_count is not modified by the function.
1466  }
1467  jmp(gc_required);
1468  return;
1469  }
1470  DCHECK(!result.is(result_end));
1471 
1472  // Load address of new object into result.
1473  LoadAllocationTopHelper(result, scratch, flags);
1474 
1475  ExternalReference allocation_limit =
1476  AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1477 
1478  // Align the next allocation. Storing the filler map without checking top is
1479  // safe in new-space because the limit of the heap is aligned there.
1480  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1483  Label aligned;
1484  test(result, Immediate(kDoubleAlignmentMask));
1485  j(zero, &aligned, Label::kNear);
1486  if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1487  cmp(result, Operand::StaticVariable(allocation_limit));
1488  j(above_equal, gc_required);
1489  }
1490  mov(Operand(result, 0),
1491  Immediate(isolate()->factory()->one_pointer_filler_map()));
1492  add(result, Immediate(kDoubleSize / 2));
1493  bind(&aligned);
1494  }
1495 
1496  // Calculate new top and bail out if space is exhausted.
1497  // We assume that element_count*element_size + header_size does not
1498  // overflow.
1499  if (element_count_type == REGISTER_VALUE_IS_SMI) {
1500  STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
1501  STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
1502  STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
1503  DCHECK(element_size >= times_2);
1504  DCHECK(kSmiTagSize == 1);
1505  element_size = static_cast<ScaleFactor>(element_size - 1);
1506  } else {
1507  DCHECK(element_count_type == REGISTER_VALUE_IS_INT32);
1508  }
1509  lea(result_end, Operand(element_count, element_size, header_size));
1510  add(result_end, result);
1511  j(carry, gc_required);
1512  cmp(result_end, Operand::StaticVariable(allocation_limit));
1513  j(above, gc_required);
1514 
1515  if ((flags & TAG_OBJECT) != 0) {
1516  DCHECK(kHeapObjectTag == 1);
1517  inc(result);
1518  }
1519 
1520  // Update allocation top.
1521  UpdateAllocationTopHelper(result_end, scratch, flags);
1522 }
1523 
1524 
1525 void MacroAssembler::Allocate(Register object_size,
1526  Register result,
1527  Register result_end,
1528  Register scratch,
1529  Label* gc_required,
1532  if (!FLAG_inline_new) {
1533  if (emit_debug_code()) {
1534  // Trash the registers to simulate an allocation failure.
1535  mov(result, Immediate(0x7091));
1536  mov(result_end, Immediate(0x7191));
1537  if (scratch.is_valid()) {
1538  mov(scratch, Immediate(0x7291));
1539  }
1540  // object_size is left unchanged by this function.
1541  }
1542  jmp(gc_required);
1543  return;
1544  }
1545  DCHECK(!result.is(result_end));
1546 
1547  // Load address of new object into result.
1548  LoadAllocationTopHelper(result, scratch, flags);
1549 
1550  ExternalReference allocation_limit =
1551  AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1552 
1553  // Align the next allocation. Storing the filler map without checking top is
1554  // safe in new-space because the limit of the heap is aligned there.
1555  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1558  Label aligned;
1559  test(result, Immediate(kDoubleAlignmentMask));
1560  j(zero, &aligned, Label::kNear);
1561  if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1562  cmp(result, Operand::StaticVariable(allocation_limit));
1563  j(above_equal, gc_required);
1564  }
1565  mov(Operand(result, 0),
1566  Immediate(isolate()->factory()->one_pointer_filler_map()));
1567  add(result, Immediate(kDoubleSize / 2));
1568  bind(&aligned);
1569  }
1570 
1571  // Calculate new top and bail out if space is exhausted.
1572  if (!object_size.is(result_end)) {
1573  mov(result_end, object_size);
1574  }
1575  add(result_end, result);
1576  j(carry, gc_required);
1577  cmp(result_end, Operand::StaticVariable(allocation_limit));
1578  j(above, gc_required);
1579 
1580  // Tag result if requested.
1581  if ((flags & TAG_OBJECT) != 0) {
1582  DCHECK(kHeapObjectTag == 1);
1583  inc(result);
1584  }
1585 
1586  // Update allocation top.
1587  UpdateAllocationTopHelper(result_end, scratch, flags);
1588 }
1589 
1590 
1591 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
1592  ExternalReference new_space_allocation_top =
1593  ExternalReference::new_space_allocation_top_address(isolate());
1594 
1595  // Make sure the object has no tag before resetting top.
1596  and_(object, Immediate(~kHeapObjectTagMask));
1597 #ifdef DEBUG
1598  cmp(object, Operand::StaticVariable(new_space_allocation_top));
1599  Check(below, kUndoAllocationOfNonAllocatedMemory);
1600 #endif
1601  mov(Operand::StaticVariable(new_space_allocation_top), object);
1602 }
1603 
1604 
1605 void MacroAssembler::AllocateHeapNumber(Register result,
1606  Register scratch1,
1607  Register scratch2,
1608  Label* gc_required,
1609  MutableMode mode) {
1610  // Allocate heap number in new space.
1611  Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
1612  TAG_OBJECT);
1613 
1614  Handle<Map> map = mode == MUTABLE
1615  ? isolate()->factory()->mutable_heap_number_map()
1616  : isolate()->factory()->heap_number_map();
1617 
1618  // Set the map.
1619  mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map));
1620 }
1621 
1622 
1623 void MacroAssembler::AllocateTwoByteString(Register result,
1624  Register length,
1625  Register scratch1,
1626  Register scratch2,
1627  Register scratch3,
1628  Label* gc_required) {
1629  // Calculate the number of bytes needed for the characters in the string while
1630  // observing object alignment.
1631  DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1632  DCHECK(kShortSize == 2);
1633  // scratch1 = length * 2 + kObjectAlignmentMask.
1634  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
1635  and_(scratch1, Immediate(~kObjectAlignmentMask));
1636 
1637  // Allocate two byte string in new space.
1638  Allocate(SeqTwoByteString::kHeaderSize,
1639  times_1,
1640  scratch1,
1642  result,
1643  scratch2,
1644  scratch3,
1645  gc_required,
1646  TAG_OBJECT);
1647 
1648  // Set the map, length and hash field.
1649  mov(FieldOperand(result, HeapObject::kMapOffset),
1650  Immediate(isolate()->factory()->string_map()));
1651  mov(scratch1, length);
1652  SmiTag(scratch1);
1653  mov(FieldOperand(result, String::kLengthOffset), scratch1);
1654  mov(FieldOperand(result, String::kHashFieldOffset),
1655  Immediate(String::kEmptyHashField));
1656 }
1657 
1658 
1659 void MacroAssembler::AllocateOneByteString(Register result, Register length,
1660  Register scratch1, Register scratch2,
1661  Register scratch3,
1662  Label* gc_required) {
1663  // Calculate the number of bytes needed for the characters in the string while
1664  // observing object alignment.
1665  DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1666  mov(scratch1, length);
1667  DCHECK(kCharSize == 1);
1668  add(scratch1, Immediate(kObjectAlignmentMask));
1669  and_(scratch1, Immediate(~kObjectAlignmentMask));
1670 
1671  // Allocate one-byte string in new space.
1672  Allocate(SeqOneByteString::kHeaderSize,
1673  times_1,
1674  scratch1,
1676  result,
1677  scratch2,
1678  scratch3,
1679  gc_required,
1680  TAG_OBJECT);
1681 
1682  // Set the map, length and hash field.
1683  mov(FieldOperand(result, HeapObject::kMapOffset),
1684  Immediate(isolate()->factory()->one_byte_string_map()));
1685  mov(scratch1, length);
1686  SmiTag(scratch1);
1687  mov(FieldOperand(result, String::kLengthOffset), scratch1);
1688  mov(FieldOperand(result, String::kHashFieldOffset),
1689  Immediate(String::kEmptyHashField));
1690 }
1691 
1692 
1693 void MacroAssembler::AllocateOneByteString(Register result, int length,
1694  Register scratch1, Register scratch2,
1695  Label* gc_required) {
1696  DCHECK(length > 0);
1697 
1698  // Allocate one-byte string in new space.
1699  Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
1700  gc_required, TAG_OBJECT);
1701 
1702  // Set the map, length and hash field.
1703  mov(FieldOperand(result, HeapObject::kMapOffset),
1704  Immediate(isolate()->factory()->one_byte_string_map()));
1705  mov(FieldOperand(result, String::kLengthOffset),
1706  Immediate(Smi::FromInt(length)));
1707  mov(FieldOperand(result, String::kHashFieldOffset),
1708  Immediate(String::kEmptyHashField));
1709 }
1710 
1711 
1712 void MacroAssembler::AllocateTwoByteConsString(Register result,
1713  Register scratch1,
1714  Register scratch2,
1715  Label* gc_required) {
1716  // Allocate heap number in new space.
1717  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1718  TAG_OBJECT);
1719 
1720  // Set the map. The other fields are left uninitialized.
1721  mov(FieldOperand(result, HeapObject::kMapOffset),
1722  Immediate(isolate()->factory()->cons_string_map()));
1723 }
1724 
1725 
1726 void MacroAssembler::AllocateOneByteConsString(Register result,
1727  Register scratch1,
1728  Register scratch2,
1729  Label* gc_required) {
1730  Allocate(ConsString::kSize,
1731  result,
1732  scratch1,
1733  scratch2,
1734  gc_required,
1735  TAG_OBJECT);
1736 
1737  // Set the map. The other fields are left uninitialized.
1738  mov(FieldOperand(result, HeapObject::kMapOffset),
1739  Immediate(isolate()->factory()->cons_one_byte_string_map()));
1740 }
1741 
1742 
1743 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1744  Register scratch1,
1745  Register scratch2,
1746  Label* gc_required) {
1747  // Allocate heap number in new space.
1748  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1749  TAG_OBJECT);
1750 
1751  // Set the map. The other fields are left uninitialized.
1752  mov(FieldOperand(result, HeapObject::kMapOffset),
1753  Immediate(isolate()->factory()->sliced_string_map()));
1754 }
1755 
1756 
1757 void MacroAssembler::AllocateOneByteSlicedString(Register result,
1758  Register scratch1,
1759  Register scratch2,
1760  Label* gc_required) {
1761  // Allocate heap number in new space.
1762  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1763  TAG_OBJECT);
1764 
1765  // Set the map. The other fields are left uninitialized.
1766  mov(FieldOperand(result, HeapObject::kMapOffset),
1767  Immediate(isolate()->factory()->sliced_one_byte_string_map()));
1768 }
1769 
1770 
1771 // Copy memory, byte-by-byte, from source to destination. Not optimized for
1772 // long or aligned copies. The contents of scratch and length are destroyed.
1773 // Source and destination are incremented by length.
1774 // Many variants of movsb, loop unrolling, word moves, and indexed operands
1775 // have been tried here already, and this is fastest.
1776 // A simpler loop is faster on small copies, but 30% slower on large ones.
1777 // The cld() instruction must have been emitted, to set the direction flag(),
1778 // before calling this function.
1779 void MacroAssembler::CopyBytes(Register source,
1780  Register destination,
1781  Register length,
1782  Register scratch) {
1783  Label short_loop, len4, len8, len12, done, short_string;
1784  DCHECK(source.is(esi));
1785  DCHECK(destination.is(edi));
1786  DCHECK(length.is(ecx));
1787  cmp(length, Immediate(4));
1788  j(below, &short_string, Label::kNear);
1789 
1790  // Because source is 4-byte aligned in our uses of this function,
1791  // we keep source aligned for the rep_movs call by copying the odd bytes
1792  // at the end of the ranges.
1793  mov(scratch, Operand(source, length, times_1, -4));
1794  mov(Operand(destination, length, times_1, -4), scratch);
1795 
1796  cmp(length, Immediate(8));
1797  j(below_equal, &len4, Label::kNear);
1798  cmp(length, Immediate(12));
1799  j(below_equal, &len8, Label::kNear);
1800  cmp(length, Immediate(16));
1801  j(below_equal, &len12, Label::kNear);
1802 
1803  mov(scratch, ecx);
1804  shr(ecx, 2);
1805  rep_movs();
1806  and_(scratch, Immediate(0x3));
1807  add(destination, scratch);
1808  jmp(&done, Label::kNear);
1809 
1810  bind(&len12);
1811  mov(scratch, Operand(source, 8));
1812  mov(Operand(destination, 8), scratch);
1813  bind(&len8);
1814  mov(scratch, Operand(source, 4));
1815  mov(Operand(destination, 4), scratch);
1816  bind(&len4);
1817  mov(scratch, Operand(source, 0));
1818  mov(Operand(destination, 0), scratch);
1819  add(destination, length);
1820  jmp(&done, Label::kNear);
1821 
1822  bind(&short_string);
1823  test(length, length);
1824  j(zero, &done, Label::kNear);
1825 
1826  bind(&short_loop);
1827  mov_b(scratch, Operand(source, 0));
1828  mov_b(Operand(destination, 0), scratch);
1829  inc(source);
1830  inc(destination);
1831  dec(length);
1832  j(not_zero, &short_loop);
1833 
1834  bind(&done);
1835 }
1836 
1837 
1838 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
1839  Register end_offset,
1840  Register filler) {
1841  Label loop, entry;
1842  jmp(&entry);
1843  bind(&loop);
1844  mov(Operand(start_offset, 0), filler);
1845  add(start_offset, Immediate(kPointerSize));
1846  bind(&entry);
1847  cmp(start_offset, end_offset);
1848  j(less, &loop);
1849 }
1850 
1851 
1852 void MacroAssembler::BooleanBitTest(Register object,
1853  int field_offset,
1854  int bit_index) {
1855  bit_index += kSmiTagSize + kSmiShiftSize;
1857  int byte_index = bit_index / kBitsPerByte;
1858  int byte_bit_index = bit_index & (kBitsPerByte - 1);
1859  test_b(FieldOperand(object, field_offset + byte_index),
1860  static_cast<byte>(1 << byte_bit_index));
1861 }
1862 
1863 
1864 
1865 void MacroAssembler::NegativeZeroTest(Register result,
1866  Register op,
1867  Label* then_label) {
1868  Label ok;
1869  test(result, result);
1870  j(not_zero, &ok);
1871  test(op, op);
1872  j(sign, then_label);
1873  bind(&ok);
1874 }
1875 
1876 
1877 void MacroAssembler::NegativeZeroTest(Register result,
1878  Register op1,
1879  Register op2,
1880  Register scratch,
1881  Label* then_label) {
1882  Label ok;
1883  test(result, result);
1884  j(not_zero, &ok);
1885  mov(scratch, op1);
1886  or_(scratch, op2);
1887  j(sign, then_label);
1888  bind(&ok);
1889 }
1890 
1891 
1892 void MacroAssembler::TryGetFunctionPrototype(Register function,
1893  Register result,
1894  Register scratch,
1895  Label* miss,
1896  bool miss_on_bound_function) {
1897  Label non_instance;
1898  if (miss_on_bound_function) {
1899  // Check that the receiver isn't a smi.
1900  JumpIfSmi(function, miss);
1901 
1902  // Check that the function really is a function.
1903  CmpObjectType(function, JS_FUNCTION_TYPE, result);
1904  j(not_equal, miss);
1905 
1906  // If a bound function, go to miss label.
1907  mov(scratch,
1908  FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
1909  BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
1910  SharedFunctionInfo::kBoundFunction);
1911  j(not_zero, miss);
1912 
1913  // Make sure that the function has an instance prototype.
1914  movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
1915  test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
1916  j(not_zero, &non_instance);
1917  }
1918 
1919  // Get the prototype or initial map from the function.
1920  mov(result,
1921  FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1922 
1923  // If the prototype or initial map is the hole, don't return it and
1924  // simply miss the cache instead. This will allow us to allocate a
1925  // prototype object on-demand in the runtime system.
1926  cmp(result, Immediate(isolate()->factory()->the_hole_value()));
1927  j(equal, miss);
1928 
1929  // If the function does not have an initial map, we're done.
1930  Label done;
1931  CmpObjectType(result, MAP_TYPE, scratch);
1932  j(not_equal, &done);
1933 
1934  // Get the prototype from the initial map.
1935  mov(result, FieldOperand(result, Map::kPrototypeOffset));
1936 
1937  if (miss_on_bound_function) {
1938  jmp(&done);
1939 
1940  // Non-instance prototype: Fetch prototype from constructor field
1941  // in initial map.
1942  bind(&non_instance);
1943  mov(result, FieldOperand(result, Map::kConstructorOffset));
1944  }
1945 
1946  // All done.
1947  bind(&done);
1948 }
1949 
1950 
1951 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1952  DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
1953  call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1954 }
1955 
1956 
1957 void MacroAssembler::TailCallStub(CodeStub* stub) {
1958  jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
1959 }
1960 
1961 
1962 void MacroAssembler::StubReturn(int argc) {
1963  DCHECK(argc >= 1 && generating_stub());
1964  ret((argc - 1) * kPointerSize);
1965 }
1966 
1967 
1968 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
1969  return has_frame_ || !stub->SometimesSetsUpAFrame();
1970 }
1971 
1972 
1973 void MacroAssembler::IndexFromHash(Register hash, Register index) {
1974  // The assert checks that the constants for the maximum number of digits
1975  // for an array index cached in the hash field and the number of bits
1976  // reserved for it does not conflict.
1977  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
1978  (1 << String::kArrayIndexValueBits));
1979  if (!index.is(hash)) {
1980  mov(index, hash);
1981  }
1982  DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
1983 }
1984 
1985 
1986 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
1987  SaveFPRegsMode save_doubles) {
1988  // If the expected number of arguments of the runtime function is
1989  // constant, we check that the actual number of arguments match the
1990  // expectation.
1991  CHECK(f->nargs < 0 || f->nargs == num_arguments);
1992 
1993  // TODO(1236192): Most runtime routines don't need the number of
1994  // arguments passed in because it is constant. At some point we
1995  // should remove this need and make the runtime routine entry code
1996  // smarter.
1997  Move(eax, Immediate(num_arguments));
1998  mov(ebx, Immediate(ExternalReference(f, isolate())));
1999  CEntryStub ces(isolate(), 1, save_doubles);
2000  CallStub(&ces);
2001 }
2002 
2003 
2004 void MacroAssembler::CallExternalReference(ExternalReference ref,
2005  int num_arguments) {
2006  mov(eax, Immediate(num_arguments));
2007  mov(ebx, Immediate(ref));
2008 
2009  CEntryStub stub(isolate(), 1);
2010  CallStub(&stub);
2011 }
2012 
2013 
2014 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2015  int num_arguments,
2016  int result_size) {
2017  // TODO(1236192): Most runtime routines don't need the number of
2018  // arguments passed in because it is constant. At some point we
2019  // should remove this need and make the runtime routine entry code
2020  // smarter.
2021  Move(eax, Immediate(num_arguments));
2022  JumpToExternalReference(ext);
2023 }
2024 
2025 
2026 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2027  int num_arguments,
2028  int result_size) {
2029  TailCallExternalReference(ExternalReference(fid, isolate()),
2030  num_arguments,
2031  result_size);
2032 }
2033 
2034 
2035 Operand ApiParameterOperand(int index) {
2036  return Operand(esp, index * kPointerSize);
2037 }
2038 
2039 
2040 void MacroAssembler::PrepareCallApiFunction(int argc) {
2041  EnterApiExitFrame(argc);
2042  if (emit_debug_code()) {
2043  mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
2044  }
2045 }
2046 
2047 
2048 void MacroAssembler::CallApiFunctionAndReturn(
2049  Register function_address,
2050  ExternalReference thunk_ref,
2051  Operand thunk_last_arg,
2052  int stack_space,
2053  Operand return_value_operand,
2054  Operand* context_restore_operand) {
2055  ExternalReference next_address =
2056  ExternalReference::handle_scope_next_address(isolate());
2057  ExternalReference limit_address =
2058  ExternalReference::handle_scope_limit_address(isolate());
2059  ExternalReference level_address =
2060  ExternalReference::handle_scope_level_address(isolate());
2061 
2062  DCHECK(edx.is(function_address));
2063  // Allocate HandleScope in callee-save registers.
2064  mov(ebx, Operand::StaticVariable(next_address));
2065  mov(edi, Operand::StaticVariable(limit_address));
2066  add(Operand::StaticVariable(level_address), Immediate(1));
2067 
2068  if (FLAG_log_timer_events) {
2069  FrameScope frame(this, StackFrame::MANUAL);
2070  PushSafepointRegisters();
2071  PrepareCallCFunction(1, eax);
2072  mov(Operand(esp, 0),
2073  Immediate(ExternalReference::isolate_address(isolate())));
2074  CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
2075  PopSafepointRegisters();
2076  }
2077 
2078 
2079  Label profiler_disabled;
2080  Label end_profiler_check;
2081  mov(eax, Immediate(ExternalReference::is_profiling_address(isolate())));
2082  cmpb(Operand(eax, 0), 0);
2083  j(zero, &profiler_disabled);
2084 
2085  // Additional parameter is the address of the actual getter function.
2086  mov(thunk_last_arg, function_address);
2087  // Call the api function.
2088  mov(eax, Immediate(thunk_ref));
2089  call(eax);
2090  jmp(&end_profiler_check);
2091 
2092  bind(&profiler_disabled);
2093  // Call the api function.
2094  call(function_address);
2095  bind(&end_profiler_check);
2096 
2097  if (FLAG_log_timer_events) {
2098  FrameScope frame(this, StackFrame::MANUAL);
2099  PushSafepointRegisters();
2100  PrepareCallCFunction(1, eax);
2101  mov(Operand(esp, 0),
2102  Immediate(ExternalReference::isolate_address(isolate())));
2103  CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
2104  PopSafepointRegisters();
2105  }
2106 
2107  Label prologue;
2108  // Load the value from ReturnValue
2109  mov(eax, return_value_operand);
2110 
2111  Label promote_scheduled_exception;
2112  Label exception_handled;
2113  Label delete_allocated_handles;
2114  Label leave_exit_frame;
2115 
2116  bind(&prologue);
2117  // No more valid handles (the result handle was the last one). Restore
2118  // previous handle scope.
2119  mov(Operand::StaticVariable(next_address), ebx);
2120  sub(Operand::StaticVariable(level_address), Immediate(1));
2121  Assert(above_equal, kInvalidHandleScopeLevel);
2122  cmp(edi, Operand::StaticVariable(limit_address));
2123  j(not_equal, &delete_allocated_handles);
2124  bind(&leave_exit_frame);
2125 
2126  // Check if the function scheduled an exception.
2127  ExternalReference scheduled_exception_address =
2128  ExternalReference::scheduled_exception_address(isolate());
2129  cmp(Operand::StaticVariable(scheduled_exception_address),
2130  Immediate(isolate()->factory()->the_hole_value()));
2131  j(not_equal, &promote_scheduled_exception);
2132  bind(&exception_handled);
2133 
2134 #if ENABLE_EXTRA_CHECKS
2135  // Check if the function returned a valid JavaScript value.
2136  Label ok;
2137  Register return_value = eax;
2138  Register map = ecx;
2139 
2140  JumpIfSmi(return_value, &ok, Label::kNear);
2141  mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
2142 
2143  CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2144  j(below, &ok, Label::kNear);
2145 
2146  CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2147  j(above_equal, &ok, Label::kNear);
2148 
2149  cmp(map, isolate()->factory()->heap_number_map());
2150  j(equal, &ok, Label::kNear);
2151 
2152  cmp(return_value, isolate()->factory()->undefined_value());
2153  j(equal, &ok, Label::kNear);
2154 
2155  cmp(return_value, isolate()->factory()->true_value());
2156  j(equal, &ok, Label::kNear);
2157 
2158  cmp(return_value, isolate()->factory()->false_value());
2159  j(equal, &ok, Label::kNear);
2160 
2161  cmp(return_value, isolate()->factory()->null_value());
2162  j(equal, &ok, Label::kNear);
2163 
2164  Abort(kAPICallReturnedInvalidObject);
2165 
2166  bind(&ok);
2167 #endif
2168 
2169  bool restore_context = context_restore_operand != NULL;
2170  if (restore_context) {
2171  mov(esi, *context_restore_operand);
2172  }
2173  LeaveApiExitFrame(!restore_context);
2174  ret(stack_space * kPointerSize);
2175 
2176  bind(&promote_scheduled_exception);
2177  {
2178  FrameScope frame(this, StackFrame::INTERNAL);
2179  CallRuntime(Runtime::kPromoteScheduledException, 0);
2180  }
2181  jmp(&exception_handled);
2182 
2183  // HandleScope limit has changed. Delete allocated extensions.
2184  ExternalReference delete_extensions =
2185  ExternalReference::delete_handle_scope_extensions(isolate());
2186  bind(&delete_allocated_handles);
2187  mov(Operand::StaticVariable(limit_address), edi);
2188  mov(edi, eax);
2189  mov(Operand(esp, 0),
2190  Immediate(ExternalReference::isolate_address(isolate())));
2191  mov(eax, Immediate(delete_extensions));
2192  call(eax);
2193  mov(eax, edi);
2194  jmp(&leave_exit_frame);
2195 }
2196 
2197 
2198 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
2199  // Set the entry point and jump to the C entry runtime stub.
2200  mov(ebx, Immediate(ext));
2201  CEntryStub ces(isolate(), 1);
2202  jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
2203 }
2204 
2205 
2206 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2207  const ParameterCount& actual,
2208  Handle<Code> code_constant,
2209  const Operand& code_operand,
2210  Label* done,
2211  bool* definitely_mismatches,
2212  InvokeFlag flag,
2213  Label::Distance done_near,
2214  const CallWrapper& call_wrapper) {
2215  bool definitely_matches = false;
2216  *definitely_mismatches = false;
2217  Label invoke;
2218  if (expected.is_immediate()) {
2219  DCHECK(actual.is_immediate());
2220  if (expected.immediate() == actual.immediate()) {
2221  definitely_matches = true;
2222  } else {
2223  mov(eax, actual.immediate());
2224  const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
2225  if (expected.immediate() == sentinel) {
2226  // Don't worry about adapting arguments for builtins that
2227  // don't want that done. Skip adaption code by making it look
2228  // like we have a match between expected and actual number of
2229  // arguments.
2230  definitely_matches = true;
2231  } else {
2232  *definitely_mismatches = true;
2233  mov(ebx, expected.immediate());
2234  }
2235  }
2236  } else {
2237  if (actual.is_immediate()) {
2238  // Expected is in register, actual is immediate. This is the
2239  // case when we invoke function values without going through the
2240  // IC mechanism.
2241  cmp(expected.reg(), actual.immediate());
2242  j(equal, &invoke);
2243  DCHECK(expected.reg().is(ebx));
2244  mov(eax, actual.immediate());
2245  } else if (!expected.reg().is(actual.reg())) {
2246  // Both expected and actual are in (different) registers. This
2247  // is the case when we invoke functions using call and apply.
2248  cmp(expected.reg(), actual.reg());
2249  j(equal, &invoke);
2250  DCHECK(actual.reg().is(eax));
2251  DCHECK(expected.reg().is(ebx));
2252  }
2253  }
2254 
2255  if (!definitely_matches) {
2256  Handle<Code> adaptor =
2257  isolate()->builtins()->ArgumentsAdaptorTrampoline();
2258  if (!code_constant.is_null()) {
2259  mov(edx, Immediate(code_constant));
2260  add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
2261  } else if (!code_operand.is_reg(edx)) {
2262  mov(edx, code_operand);
2263  }
2264 
2265  if (flag == CALL_FUNCTION) {
2266  call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
2267  call(adaptor, RelocInfo::CODE_TARGET);
2268  call_wrapper.AfterCall();
2269  if (!*definitely_mismatches) {
2270  jmp(done, done_near);
2271  }
2272  } else {
2273  jmp(adaptor, RelocInfo::CODE_TARGET);
2274  }
2275  bind(&invoke);
2276  }
2277 }
2278 
2279 
2280 void MacroAssembler::InvokeCode(const Operand& code,
2281  const ParameterCount& expected,
2282  const ParameterCount& actual,
2283  InvokeFlag flag,
2284  const CallWrapper& call_wrapper) {
2285  // You can't call a function without a valid frame.
2286  DCHECK(flag == JUMP_FUNCTION || has_frame());
2287 
2288  Label done;
2289  bool definitely_mismatches = false;
2290  InvokePrologue(expected, actual, Handle<Code>::null(), code,
2291  &done, &definitely_mismatches, flag, Label::kNear,
2292  call_wrapper);
2293  if (!definitely_mismatches) {
2294  if (flag == CALL_FUNCTION) {
2295  call_wrapper.BeforeCall(CallSize(code));
2296  call(code);
2297  call_wrapper.AfterCall();
2298  } else {
2300  jmp(code);
2301  }
2302  bind(&done);
2303  }
2304 }
2305 
2306 
2307 void MacroAssembler::InvokeFunction(Register fun,
2308  const ParameterCount& actual,
2309  InvokeFlag flag,
2310  const CallWrapper& call_wrapper) {
2311  // You can't call a function without a valid frame.
2312  DCHECK(flag == JUMP_FUNCTION || has_frame());
2313 
2314  DCHECK(fun.is(edi));
2315  mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
2316  mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2317  mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
2318  SmiUntag(ebx);
2319 
2320  ParameterCount expected(ebx);
2321  InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2322  expected, actual, flag, call_wrapper);
2323 }
2324 
2325 
2326 void MacroAssembler::InvokeFunction(Register fun,
2327  const ParameterCount& expected,
2328  const ParameterCount& actual,
2329  InvokeFlag flag,
2330  const CallWrapper& call_wrapper) {
2331  // You can't call a function without a valid frame.
2332  DCHECK(flag == JUMP_FUNCTION || has_frame());
2333 
2334  DCHECK(fun.is(edi));
2335  mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2336 
2337  InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2338  expected, actual, flag, call_wrapper);
2339 }
2340 
2341 
2342 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2343  const ParameterCount& expected,
2344  const ParameterCount& actual,
2345  InvokeFlag flag,
2346  const CallWrapper& call_wrapper) {
2347  LoadHeapObject(edi, function);
2348  InvokeFunction(edi, expected, actual, flag, call_wrapper);
2349 }
2350 
2351 
2352 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2353  InvokeFlag flag,
2354  const CallWrapper& call_wrapper) {
2355  // You can't call a builtin without a valid frame.
2356  DCHECK(flag == JUMP_FUNCTION || has_frame());
2357 
2358  // Rely on the assertion to check that the number of provided
2359  // arguments match the expected number of arguments. Fake a
2360  // parameter count to avoid emitting code to do the check.
2361  ParameterCount expected(0);
2362  GetBuiltinFunction(edi, id);
2363  InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
2364  expected, expected, flag, call_wrapper);
2365 }
2366 
2367 
2368 void MacroAssembler::GetBuiltinFunction(Register target,
2369  Builtins::JavaScript id) {
2370  // Load the JavaScript builtin function from the builtins object.
2371  mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2372  mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
2373  mov(target, FieldOperand(target,
2374  JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2375 }
2376 
2377 
2378 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2379  DCHECK(!target.is(edi));
2380  // Load the JavaScript builtin function from the builtins object.
2381  GetBuiltinFunction(edi, id);
2382  // Load the code entry point from the function into the target register.
2383  mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
2384 }
2385 
2386 
2387 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2388  if (context_chain_length > 0) {
2389  // Move up the chain of contexts to the context containing the slot.
2390  mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2391  for (int i = 1; i < context_chain_length; i++) {
2392  mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2393  }
2394  } else {
2395  // Slot is in the current function context. Move it into the
2396  // destination register in case we store into it (the write barrier
2397  // cannot be allowed to destroy the context in esi).
2398  mov(dst, esi);
2399  }
2400 
2401  // We should not have found a with context by walking the context chain
2402  // (i.e., the static scope chain and runtime context chain do not agree).
2403  // A variable occurring in such a scope should have slot type LOOKUP and
2404  // not CONTEXT.
2405  if (emit_debug_code()) {
2406  cmp(FieldOperand(dst, HeapObject::kMapOffset),
2407  isolate()->factory()->with_context_map());
2408  Check(not_equal, kVariableResolvedToWithContext);
2409  }
2410 }
2411 
2412 
2413 void MacroAssembler::LoadTransitionedArrayMapConditional(
2414  ElementsKind expected_kind,
2415  ElementsKind transitioned_kind,
2416  Register map_in_out,
2417  Register scratch,
2418  Label* no_map_match) {
2419  // Load the global or builtins object from the current context.
2420  mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2421  mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
2422 
2423  // Check that the function's map is the same as the expected cached map.
2424  mov(scratch, Operand(scratch,
2425  Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2426 
2427  size_t offset = expected_kind * kPointerSize +
2428  FixedArrayBase::kHeaderSize;
2429  cmp(map_in_out, FieldOperand(scratch, offset));
2430  j(not_equal, no_map_match);
2431 
2432  // Use the transitioned cached map.
2433  offset = transitioned_kind * kPointerSize +
2434  FixedArrayBase::kHeaderSize;
2435  mov(map_in_out, FieldOperand(scratch, offset));
2436 }
2437 
2438 
2439 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2440  // Load the global or builtins object from the current context.
2441  mov(function,
2442  Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2443  // Load the native context from the global or builtins object.
2444  mov(function,
2445  FieldOperand(function, GlobalObject::kNativeContextOffset));
2446  // Load the function from the native context.
2447  mov(function, Operand(function, Context::SlotOffset(index)));
2448 }
2449 
2450 
2451 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2452  Register map) {
2453  // Load the initial map. The global functions all have initial maps.
2454  mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2455  if (emit_debug_code()) {
2456  Label ok, fail;
2457  CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
2458  jmp(&ok);
2459  bind(&fail);
2460  Abort(kGlobalFunctionsMustHaveInitialMap);
2461  bind(&ok);
2462  }
2463 }
2464 
2465 
2466 // Store the value in register src in the safepoint register stack
2467 // slot for register dst.
2468 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2469  mov(SafepointRegisterSlot(dst), src);
2470 }
2471 
2472 
2473 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
2474  mov(SafepointRegisterSlot(dst), src);
2475 }
2476 
2477 
2478 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2479  mov(dst, SafepointRegisterSlot(src));
2480 }
2481 
2482 
2483 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
2484  return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
2485 }
2486 
2487 
2488 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
2489  // The registers are pushed starting with the lowest encoding,
2490  // which means that lowest encodings are furthest away from
2491  // the stack pointer.
2492  DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
2493  return kNumSafepointRegisters - reg_code - 1;
2494 }
2495 
2496 
2497 void MacroAssembler::LoadHeapObject(Register result,
2498  Handle<HeapObject> object) {
2499  AllowDeferredHandleDereference embedding_raw_address;
2500  if (isolate()->heap()->InNewSpace(*object)) {
2501  Handle<Cell> cell = isolate()->factory()->NewCell(object);
2502  mov(result, Operand::ForCell(cell));
2503  } else {
2504  mov(result, object);
2505  }
2506 }
2507 
2508 
2509 void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
2510  AllowDeferredHandleDereference using_raw_address;
2511  if (isolate()->heap()->InNewSpace(*object)) {
2512  Handle<Cell> cell = isolate()->factory()->NewCell(object);
2513  cmp(reg, Operand::ForCell(cell));
2514  } else {
2515  cmp(reg, object);
2516  }
2517 }
2518 
2519 
2520 void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
2521  AllowDeferredHandleDereference using_raw_address;
2522  if (isolate()->heap()->InNewSpace(*object)) {
2523  Handle<Cell> cell = isolate()->factory()->NewCell(object);
2524  push(Operand::ForCell(cell));
2525  } else {
2526  Push(object);
2527  }
2528 }
2529 
2530 
2531 void MacroAssembler::Ret() {
2532  ret(0);
2533 }
2534 
2535 
2536 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
2537  if (is_uint16(bytes_dropped)) {
2538  ret(bytes_dropped);
2539  } else {
2540  pop(scratch);
2541  add(esp, Immediate(bytes_dropped));
2542  push(scratch);
2543  ret(0);
2544  }
2545 }
2546 
2547 
2548 void MacroAssembler::VerifyX87StackDepth(uint32_t depth) {
2549  // Turn off the stack depth check when serializer is enabled to reduce the
2550  // code size.
2551  if (serializer_enabled()) return;
2552  // Make sure the floating point stack is either empty or has depth items.
2553  DCHECK(depth <= 7);
2554  // This is very expensive.
2555  DCHECK(FLAG_debug_code && FLAG_enable_slow_asserts);
2556 
2557  // The top-of-stack (tos) is 7 if there is one item pushed.
2558  int tos = (8 - depth) % 8;
2559  const int kTopMask = 0x3800;
2560  push(eax);
2561  fwait();
2562  fnstsw_ax();
2563  and_(eax, kTopMask);
2564  shr(eax, 11);
2565  cmp(eax, Immediate(tos));
2566  Check(equal, kUnexpectedFPUStackDepthAfterInstruction);
2567  fnclex();
2568  pop(eax);
2569 }
2570 
2571 
2572 void MacroAssembler::Drop(int stack_elements) {
2573  if (stack_elements > 0) {
2574  add(esp, Immediate(stack_elements * kPointerSize));
2575  }
2576 }
2577 
2578 
2579 void MacroAssembler::Move(Register dst, Register src) {
2580  if (!dst.is(src)) {
2581  mov(dst, src);
2582  }
2583 }
2584 
2585 
2586 void MacroAssembler::Move(Register dst, const Immediate& x) {
2587  if (x.is_zero()) {
2588  xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
2589  } else {
2590  mov(dst, x);
2591  }
2592 }
2593 
2594 
2595 void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
2596  mov(dst, x);
2597 }
2598 
2599 
2600 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
2601  if (FLAG_native_code_counters && counter->Enabled()) {
2602  mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
2603  }
2604 }
2605 
2606 
2607 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
2608  DCHECK(value > 0);
2609  if (FLAG_native_code_counters && counter->Enabled()) {
2610  Operand operand = Operand::StaticVariable(ExternalReference(counter));
2611  if (value == 1) {
2612  inc(operand);
2613  } else {
2614  add(operand, Immediate(value));
2615  }
2616  }
2617 }
2618 
2619 
2620 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
2621  DCHECK(value > 0);
2622  if (FLAG_native_code_counters && counter->Enabled()) {
2623  Operand operand = Operand::StaticVariable(ExternalReference(counter));
2624  if (value == 1) {
2625  dec(operand);
2626  } else {
2627  sub(operand, Immediate(value));
2628  }
2629  }
2630 }
2631 
2632 
2633 void MacroAssembler::IncrementCounter(Condition cc,
2634  StatsCounter* counter,
2635  int value) {
2636  DCHECK(value > 0);
2637  if (FLAG_native_code_counters && counter->Enabled()) {
2638  Label skip;
2639  j(NegateCondition(cc), &skip);
2640  pushfd();
2641  IncrementCounter(counter, value);
2642  popfd();
2643  bind(&skip);
2644  }
2645 }
2646 
2647 
2648 void MacroAssembler::DecrementCounter(Condition cc,
2649  StatsCounter* counter,
2650  int value) {
2651  DCHECK(value > 0);
2652  if (FLAG_native_code_counters && counter->Enabled()) {
2653  Label skip;
2654  j(NegateCondition(cc), &skip);
2655  pushfd();
2656  DecrementCounter(counter, value);
2657  popfd();
2658  bind(&skip);
2659  }
2660 }
2661 
2662 
2663 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
2664  if (emit_debug_code()) Check(cc, reason);
2665 }
2666 
2667 
2668 void MacroAssembler::AssertFastElements(Register elements) {
2669  if (emit_debug_code()) {
2670  Factory* factory = isolate()->factory();
2671  Label ok;
2672  cmp(FieldOperand(elements, HeapObject::kMapOffset),
2673  Immediate(factory->fixed_array_map()));
2674  j(equal, &ok);
2675  cmp(FieldOperand(elements, HeapObject::kMapOffset),
2676  Immediate(factory->fixed_double_array_map()));
2677  j(equal, &ok);
2678  cmp(FieldOperand(elements, HeapObject::kMapOffset),
2679  Immediate(factory->fixed_cow_array_map()));
2680  j(equal, &ok);
2681  Abort(kJSObjectWithFastElementsMapHasSlowElements);
2682  bind(&ok);
2683  }
2684 }
2685 
2686 
2687 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
2688  Label L;
2689  j(cc, &L);
2690  Abort(reason);
2691  // will not return here
2692  bind(&L);
2693 }
2694 
2695 
2696 void MacroAssembler::CheckStackAlignment() {
2697  int frame_alignment = base::OS::ActivationFrameAlignment();
2698  int frame_alignment_mask = frame_alignment - 1;
2699  if (frame_alignment > kPointerSize) {
2700  DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
2701  Label alignment_as_expected;
2702  test(esp, Immediate(frame_alignment_mask));
2703  j(zero, &alignment_as_expected);
2704  // Abort if stack is not aligned.
2705  int3();
2706  bind(&alignment_as_expected);
2707  }
2708 }
2709 
2710 
2711 void MacroAssembler::Abort(BailoutReason reason) {
2712 #ifdef DEBUG
2713  const char* msg = GetBailoutReason(reason);
2714  if (msg != NULL) {
2715  RecordComment("Abort message: ");
2716  RecordComment(msg);
2717  }
2718 
2719  if (FLAG_trap_on_abort) {
2720  int3();
2721  return;
2722  }
2723 #endif
2724 
2725  push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason))));
2726  // Disable stub call restrictions to always allow calls to abort.
2727  if (!has_frame_) {
2728  // We don't actually want to generate a pile of code for this, so just
2729  // claim there is a stack frame, without generating one.
2730  FrameScope scope(this, StackFrame::NONE);
2731  CallRuntime(Runtime::kAbort, 1);
2732  } else {
2733  CallRuntime(Runtime::kAbort, 1);
2734  }
2735  // will not return here
2736  int3();
2737 }
2738 
2739 
2740 void MacroAssembler::LoadInstanceDescriptors(Register map,
2741  Register descriptors) {
2742  mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
2743 }
2744 
2745 
2746 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
2747  mov(dst, FieldOperand(map, Map::kBitField3Offset));
2748  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
2749 }
2750 
2751 
2752 void MacroAssembler::LookupNumberStringCache(Register object,
2753  Register result,
2754  Register scratch1,
2755  Register scratch2,
2756  Label* not_found) {
2757  // Use of registers. Register result is used as a temporary.
2758  Register number_string_cache = result;
2759  Register mask = scratch1;
2760  Register scratch = scratch2;
2761 
2762  // Load the number string cache.
2763  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2764  // Make the hash mask from the length of the number string cache. It
2765  // contains two elements (number and string) for each cache entry.
2766  mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2767  shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
2768  sub(mask, Immediate(1)); // Make mask.
2769 
2770  // Calculate the entry in the number string cache. The hash value in the
2771  // number string cache for smis is just the smi value, and the hash for
2772  // doubles is the xor of the upper and lower words. See
2773  // Heap::GetNumberStringCache.
2774  Label smi_hash_calculated;
2775  Label load_result_from_cache;
2776  Label not_smi;
2777  STATIC_ASSERT(kSmiTag == 0);
2778  JumpIfNotSmi(object, &not_smi, Label::kNear);
2779  mov(scratch, object);
2780  SmiUntag(scratch);
2781  jmp(&smi_hash_calculated, Label::kNear);
2782  bind(&not_smi);
2783  cmp(FieldOperand(object, HeapObject::kMapOffset),
2784  isolate()->factory()->heap_number_map());
2785  j(not_equal, not_found);
2786  STATIC_ASSERT(8 == kDoubleSize);
2787  mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2788  xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2789  // Object is heap number and hash is now in scratch. Calculate cache index.
2790  and_(scratch, mask);
2791  Register index = scratch;
2792  Register probe = mask;
2793  mov(probe,
2794  FieldOperand(number_string_cache,
2795  index,
2797  FixedArray::kHeaderSize));
2798  JumpIfSmi(probe, not_found);
2799  fld_d(FieldOperand(object, HeapNumber::kValueOffset));
2800  fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
2801  FCmp();
2802  j(parity_even, not_found); // Bail out if NaN is involved.
2803  j(not_equal, not_found); // The cache did not contain this value.
2804  jmp(&load_result_from_cache, Label::kNear);
2805 
2806  bind(&smi_hash_calculated);
2807  // Object is smi and hash is now in scratch. Calculate cache index.
2808  and_(scratch, mask);
2809  // Check if the entry is the smi we are looking for.
2810  cmp(object,
2811  FieldOperand(number_string_cache,
2812  index,
2814  FixedArray::kHeaderSize));
2815  j(not_equal, not_found);
2816 
2817  // Get the result from the cache.
2818  bind(&load_result_from_cache);
2819  mov(result,
2820  FieldOperand(number_string_cache,
2821  index,
2823  FixedArray::kHeaderSize + kPointerSize));
2824  IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
2825 }
2826 
2827 
2828 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
2829  Register instance_type, Register scratch, Label* failure) {
2830  if (!scratch.is(instance_type)) {
2831  mov(scratch, instance_type);
2832  }
2833  and_(scratch,
2835  cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
2836  j(not_equal, failure);
2837 }
2838 
2839 
2840 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
2841  Register object2,
2842  Register scratch1,
2843  Register scratch2,
2844  Label* failure) {
2845  // Check that both objects are not smis.
2846  STATIC_ASSERT(kSmiTag == 0);
2847  mov(scratch1, object1);
2848  and_(scratch1, object2);
2849  JumpIfSmi(scratch1, failure);
2850 
2851  // Load instance type for both strings.
2852  mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
2853  mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
2854  movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2855  movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2856 
2857  // Check that both are flat one-byte strings.
2858  const int kFlatOneByteStringMask =
2860  const int kFlatOneByteStringTag =
2862  // Interleave bits from both instance types and compare them in one check.
2863  DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
2864  and_(scratch1, kFlatOneByteStringMask);
2865  and_(scratch2, kFlatOneByteStringMask);
2866  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2867  cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << 3));
2868  j(not_equal, failure);
2869 }
2870 
2871 
2872 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
2873  Label* not_unique_name,
2874  Label::Distance distance) {
2876  Label succeed;
2877  test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2878  j(zero, &succeed);
2879  cmpb(operand, static_cast<uint8_t>(SYMBOL_TYPE));
2880  j(not_equal, not_unique_name, distance);
2881 
2882  bind(&succeed);
2883 }
2884 
2885 
2886 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
2887  Register index,
2888  Register value,
2889  uint32_t encoding_mask) {
2890  Label is_object;
2891  JumpIfNotSmi(string, &is_object, Label::kNear);
2892  Abort(kNonObject);
2893  bind(&is_object);
2894 
2895  push(value);
2896  mov(value, FieldOperand(string, HeapObject::kMapOffset));
2897  movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
2898 
2899  and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
2900  cmp(value, Immediate(encoding_mask));
2901  pop(value);
2902  Check(equal, kUnexpectedStringType);
2903 
2904  // The index is assumed to be untagged coming in, tag it to compare with the
2905  // string length without using a temp register, it is restored at the end of
2906  // this function.
2907  SmiTag(index);
2908  Check(no_overflow, kIndexIsTooLarge);
2909 
2910  cmp(index, FieldOperand(string, String::kLengthOffset));
2911  Check(less, kIndexIsTooLarge);
2912 
2913  cmp(index, Immediate(Smi::FromInt(0)));
2914  Check(greater_equal, kIndexIsNegative);
2915 
2916  // Restore the index
2917  SmiUntag(index);
2918 }
2919 
2920 
2921 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
2922  int frame_alignment = base::OS::ActivationFrameAlignment();
2923  if (frame_alignment != 0) {
2924  // Make stack end at alignment and make room for num_arguments words
2925  // and the original value of esp.
2926  mov(scratch, esp);
2927  sub(esp, Immediate((num_arguments + 1) * kPointerSize));
2928  DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
2929  and_(esp, -frame_alignment);
2930  mov(Operand(esp, num_arguments * kPointerSize), scratch);
2931  } else {
2932  sub(esp, Immediate(num_arguments * kPointerSize));
2933  }
2934 }
2935 
2936 
2937 void MacroAssembler::CallCFunction(ExternalReference function,
2938  int num_arguments) {
2939  // Trashing eax is ok as it will be the return value.
2940  mov(eax, Immediate(function));
2941  CallCFunction(eax, num_arguments);
2942 }
2943 
2944 
2945 void MacroAssembler::CallCFunction(Register function,
2946  int num_arguments) {
2947  DCHECK(has_frame());
2948  // Check stack alignment.
2949  if (emit_debug_code()) {
2950  CheckStackAlignment();
2951  }
2952 
2953  call(function);
2954  if (base::OS::ActivationFrameAlignment() != 0) {
2955  mov(esp, Operand(esp, num_arguments * kPointerSize));
2956  } else {
2957  add(esp, Immediate(num_arguments * kPointerSize));
2958  }
2959 }
2960 
2961 
2962 #ifdef DEBUG
2963 bool AreAliased(Register reg1,
2964  Register reg2,
2965  Register reg3,
2966  Register reg4,
2967  Register reg5,
2968  Register reg6,
2969  Register reg7,
2970  Register reg8) {
2971  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
2972  reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
2973  reg7.is_valid() + reg8.is_valid();
2974 
2975  RegList regs = 0;
2976  if (reg1.is_valid()) regs |= reg1.bit();
2977  if (reg2.is_valid()) regs |= reg2.bit();
2978  if (reg3.is_valid()) regs |= reg3.bit();
2979  if (reg4.is_valid()) regs |= reg4.bit();
2980  if (reg5.is_valid()) regs |= reg5.bit();
2981  if (reg6.is_valid()) regs |= reg6.bit();
2982  if (reg7.is_valid()) regs |= reg7.bit();
2983  if (reg8.is_valid()) regs |= reg8.bit();
2984  int n_of_non_aliasing_regs = NumRegs(regs);
2985 
2986  return n_of_valid_regs != n_of_non_aliasing_regs;
2987 }
2988 #endif
2989 
2990 
2991 CodePatcher::CodePatcher(byte* address, int size)
2992  : address_(address),
2993  size_(size),
2994  masm_(NULL, address, size + Assembler::kGap) {
2995  // Create a new macro assembler pointing to the address of the code to patch.
2996  // The size is adjusted with kGap on order for the assembler to generate size
2997  // bytes of instructions without failing with buffer size constraints.
2998  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2999 }
3000 
3001 
3002 CodePatcher::~CodePatcher() {
3003  // Indicate that code has changed.
3004  CpuFeatures::FlushICache(address_, size_);
3005 
3006  // Check that the code was patched as expected.
3007  DCHECK(masm_.pc_ == address_ + size_);
3008  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3009 }
3010 
3011 
3012 void MacroAssembler::CheckPageFlag(
3013  Register object,
3014  Register scratch,
3015  int mask,
3016  Condition cc,
3017  Label* condition_met,
3018  Label::Distance condition_met_distance) {
3019  DCHECK(cc == zero || cc == not_zero);
3020  if (scratch.is(object)) {
3021  and_(scratch, Immediate(~Page::kPageAlignmentMask));
3022  } else {
3023  mov(scratch, Immediate(~Page::kPageAlignmentMask));
3024  and_(scratch, object);
3025  }
3026  if (mask < (1 << kBitsPerByte)) {
3027  test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
3028  static_cast<uint8_t>(mask));
3029  } else {
3030  test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
3031  }
3032  j(cc, condition_met, condition_met_distance);
3033 }
3034 
3035 
3036 void MacroAssembler::CheckPageFlagForMap(
3037  Handle<Map> map,
3038  int mask,
3039  Condition cc,
3040  Label* condition_met,
3041  Label::Distance condition_met_distance) {
3042  DCHECK(cc == zero || cc == not_zero);
3043  Page* page = Page::FromAddress(map->address());
3044  DCHECK(!serializer_enabled()); // Serializer cannot match page_flags.
3045  ExternalReference reference(ExternalReference::page_flags(page));
3046  // The inlined static address check of the page's flags relies
3047  // on maps never being compacted.
3048  DCHECK(!isolate()->heap()->mark_compact_collector()->
3049  IsOnEvacuationCandidate(*map));
3050  if (mask < (1 << kBitsPerByte)) {
3051  test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
3052  } else {
3053  test(Operand::StaticVariable(reference), Immediate(mask));
3054  }
3055  j(cc, condition_met, condition_met_distance);
3056 }
3057 
3058 
3059 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
3060  Register scratch,
3061  Label* if_deprecated) {
3062  if (map->CanBeDeprecated()) {
3063  mov(scratch, map);
3064  mov(scratch, FieldOperand(scratch, Map::kBitField3Offset));
3065  and_(scratch, Immediate(Map::Deprecated::kMask));
3066  j(not_zero, if_deprecated);
3067  }
3068 }
3069 
3070 
3071 void MacroAssembler::JumpIfBlack(Register object,
3072  Register scratch0,
3073  Register scratch1,
3074  Label* on_black,
3075  Label::Distance on_black_near) {
3076  HasColor(object, scratch0, scratch1,
3077  on_black, on_black_near,
3078  1, 0); // kBlackBitPattern.
3079  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3080 }
3081 
3082 
3083 void MacroAssembler::HasColor(Register object,
3084  Register bitmap_scratch,
3085  Register mask_scratch,
3086  Label* has_color,
3087  Label::Distance has_color_distance,
3088  int first_bit,
3089  int second_bit) {
3090  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
3091 
3092  GetMarkBits(object, bitmap_scratch, mask_scratch);
3093 
3094  Label other_color, word_boundary;
3095  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3096  j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
3097  add(mask_scratch, mask_scratch); // Shift left 1 by adding.
3098  j(zero, &word_boundary, Label::kNear);
3099  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3100  j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
3101  jmp(&other_color, Label::kNear);
3102 
3103  bind(&word_boundary);
3104  test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
3105 
3106  j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
3107  bind(&other_color);
3108 }
3109 
3110 
3111 void MacroAssembler::GetMarkBits(Register addr_reg,
3112  Register bitmap_reg,
3113  Register mask_reg) {
3114  DCHECK(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
3115  mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
3116  and_(bitmap_reg, addr_reg);
3117  mov(ecx, addr_reg);
3118  int shift =
3119  Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
3120  shr(ecx, shift);
3121  and_(ecx,
3122  (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
3123 
3124  add(bitmap_reg, ecx);
3125  mov(ecx, addr_reg);
3126  shr(ecx, kPointerSizeLog2);
3127  and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
3128  mov(mask_reg, Immediate(1));
3129  shl_cl(mask_reg);
3130 }
3131 
3132 
3133 void MacroAssembler::EnsureNotWhite(
3134  Register value,
3135  Register bitmap_scratch,
3136  Register mask_scratch,
3137  Label* value_is_white_and_not_data,
3138  Label::Distance distance) {
3139  DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
3140  GetMarkBits(value, bitmap_scratch, mask_scratch);
3141 
3142  // If the value is black or grey we don't need to do anything.
3143  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3144  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3145  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
3146  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3147 
3148  Label done;
3149 
3150  // Since both black and grey have a 1 in the first position and white does
3151  // not have a 1 there we only need to check one bit.
3152  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3153  j(not_zero, &done, Label::kNear);
3154 
3155  if (emit_debug_code()) {
3156  // Check for impossible bit pattern.
3157  Label ok;
3158  push(mask_scratch);
3159  // shl. May overflow making the check conservative.
3160  add(mask_scratch, mask_scratch);
3161  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
3162  j(zero, &ok, Label::kNear);
3163  int3();
3164  bind(&ok);
3165  pop(mask_scratch);
3166  }
3167 
3168  // Value is white. We check whether it is data that doesn't need scanning.
3169  // Currently only checks for HeapNumber and non-cons strings.
3170  Register map = ecx; // Holds map while checking type.
3171  Register length = ecx; // Holds length of object after checking type.
3172  Label not_heap_number;
3173  Label is_data_object;
3174 
3175  // Check for heap-number
3176  mov(map, FieldOperand(value, HeapObject::kMapOffset));
3177  cmp(map, isolate()->factory()->heap_number_map());
3178  j(not_equal, &not_heap_number, Label::kNear);
3179  mov(length, Immediate(HeapNumber::kSize));
3180  jmp(&is_data_object, Label::kNear);
3181 
3182  bind(&not_heap_number);
3183  // Check for strings.
3185  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3186  // If it's a string and it's not a cons string then it's an object containing
3187  // no GC pointers.
3188  Register instance_type = ecx;
3189  movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3190  test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
3191  j(not_zero, value_is_white_and_not_data);
3192  // It's a non-indirect (non-cons and non-slice) string.
3193  // If it's external, the length is just ExternalString::kSize.
3194  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3195  Label not_external;
3196  // External strings are the only ones with the kExternalStringTag bit
3197  // set.
3200  test_b(instance_type, kExternalStringTag);
3201  j(zero, &not_external, Label::kNear);
3202  mov(length, Immediate(ExternalString::kSize));
3203  jmp(&is_data_object, Label::kNear);
3204 
3205  bind(&not_external);
3206  // Sequential string, either Latin1 or UC16.
3207  DCHECK(kOneByteStringTag == 0x04);
3208  and_(length, Immediate(kStringEncodingMask));
3209  xor_(length, Immediate(kStringEncodingMask));
3210  add(length, Immediate(0x04));
3211  // Value now either 4 (if Latin1) or 8 (if UC16), i.e., char-size shifted
3212  // by 2. If we multiply the string length as smi by this, it still
3213  // won't overflow a 32-bit value.
3214  DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
3215  DCHECK(SeqOneByteString::kMaxSize <=
3216  static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
3217  imul(length, FieldOperand(value, String::kLengthOffset));
3218  shr(length, 2 + kSmiTagSize + kSmiShiftSize);
3219  add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
3220  and_(length, Immediate(~kObjectAlignmentMask));
3221 
3222  bind(&is_data_object);
3223  // Value is a data object, and it is white. Mark it black. Since we know
3224  // that the object is white we can make it black by flipping one bit.
3225  or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
3226 
3227  and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
3228  add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
3229  length);
3230  if (emit_debug_code()) {
3231  mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3232  cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
3233  Check(less_equal, kLiveBytesCountOverflowChunkSize);
3234  }
3235 
3236  bind(&done);
3237 }
3238 
3239 
3240 void MacroAssembler::EnumLength(Register dst, Register map) {
3241  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3242  mov(dst, FieldOperand(map, Map::kBitField3Offset));
3243  and_(dst, Immediate(Map::EnumLengthBits::kMask));
3244  SmiTag(dst);
3245 }
3246 
3247 
3248 void MacroAssembler::CheckEnumCache(Label* call_runtime) {
3249  Label next, start;
3250  mov(ecx, eax);
3251 
3252  // Check if the enum length field is properly initialized, indicating that
3253  // there is an enum cache.
3254  mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
3255 
3256  EnumLength(edx, ebx);
3257  cmp(edx, Immediate(Smi::FromInt(kInvalidEnumCacheSentinel)));
3258  j(equal, call_runtime);
3259 
3260  jmp(&start);
3261 
3262  bind(&next);
3263  mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
3264 
3265  // For all objects but the receiver, check that the cache is empty.
3266  EnumLength(edx, ebx);
3267  cmp(edx, Immediate(Smi::FromInt(0)));
3268  j(not_equal, call_runtime);
3269 
3270  bind(&start);
3271 
3272  // Check that there are no elements. Register rcx contains the current JS
3273  // object we've reached through the prototype chain.
3274  Label no_elements;
3275  mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
3276  cmp(ecx, isolate()->factory()->empty_fixed_array());
3277  j(equal, &no_elements);
3278 
3279  // Second chance, the object may be using the empty slow element dictionary.
3280  cmp(ecx, isolate()->factory()->empty_slow_element_dictionary());
3281  j(not_equal, call_runtime);
3282 
3283  bind(&no_elements);
3284  mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
3285  cmp(ecx, isolate()->factory()->null_value());
3286  j(not_equal, &next);
3287 }
3288 
3289 
3290 void MacroAssembler::TestJSArrayForAllocationMemento(
3291  Register receiver_reg,
3292  Register scratch_reg,
3293  Label* no_memento_found) {
3294  ExternalReference new_space_start =
3295  ExternalReference::new_space_start(isolate());
3296  ExternalReference new_space_allocation_top =
3297  ExternalReference::new_space_allocation_top_address(isolate());
3298 
3299  lea(scratch_reg, Operand(receiver_reg,
3300  JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3301  cmp(scratch_reg, Immediate(new_space_start));
3302  j(less, no_memento_found);
3303  cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
3304  j(greater, no_memento_found);
3305  cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
3306  Immediate(isolate()->factory()->allocation_memento_map()));
3307 }
3308 
3309 
3310 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
3311  Register object,
3312  Register scratch0,
3313  Register scratch1,
3314  Label* found) {
3315  DCHECK(!scratch1.is(scratch0));
3316  Factory* factory = isolate()->factory();
3317  Register current = scratch0;
3318  Label loop_again;
3319 
3320  // scratch contained elements pointer.
3321  mov(current, object);
3322 
3323  // Loop based on the map going up the prototype chain.
3324  bind(&loop_again);
3325  mov(current, FieldOperand(current, HeapObject::kMapOffset));
3326  mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
3327  DecodeField<Map::ElementsKindBits>(scratch1);
3328  cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
3329  j(equal, found);
3330  mov(current, FieldOperand(current, Map::kPrototypeOffset));
3331  cmp(current, Immediate(factory->null_value()));
3332  j(not_equal, &loop_again);
3333 }
3334 
3335 
3336 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
3337  DCHECK(!dividend.is(eax));
3338  DCHECK(!dividend.is(edx));
3339  base::MagicNumbersForDivision<uint32_t> mag =
3340  base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
3341  mov(eax, Immediate(mag.multiplier));
3342  imul(dividend);
3343  bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
3344  if (divisor > 0 && neg) add(edx, dividend);
3345  if (divisor < 0 && !neg && mag.multiplier > 0) sub(edx, dividend);
3346  if (mag.shift > 0) sar(edx, mag.shift);
3347  mov(eax, dividend);
3348  shr(eax, 31);
3349  add(edx, eax);
3350 }
3351 
3352 
3353 } } // namespace v8::internal
3354 
3355 #endif // V8_TARGET_ARCH_X87
MacroAssembler(Isolate *isolate, void *buffer, int size)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
enable harmony numeric enable harmony object literal extensions Optimize object Array shift
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be aligned(ARM64 only)") DEFINE_STRING(expose_gc_as
#define CHECK(condition)
Definition: logging.h:36
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
InvokeFlag
@ JUMP_FUNCTION
@ CALL_FUNCTION
AllocationFlags
@ RESULT_CONTAINS_TOP
@ DOUBLE_ALIGNMENT
@ SIZE_IN_WORDS
@ PRETENURE_OLD_POINTER_SPACE
@ TAG_OBJECT
@ PRETENURE_OLD_DATA_SPACE
#define STATIC_ASSERT(test)
Definition: macros.h:311
int int32_t
Definition: unicode.cc:24
bool IsPowerOfTwo32(uint32_t value)
Definition: bits.h:77
MagicNumbersForDivision< T > SignedDivisionByConstant(T d)
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
const intptr_t kHeapObjectTagMask
Definition: v8.h:5739
const int kPointerSize
Definition: globals.h:129
const Register edx
const uint32_t kStringEncodingMask
Definition: objects.h:555
const Register edi
const Register r2
@ DONT_DO_SMI_CHECK
Definition: globals.h:640
@ DO_SMI_CHECK
Definition: globals.h:641
@ kSeqStringTag
Definition: objects.h:563
@ kConsStringTag
Definition: objects.h:564
@ kExternalStringTag
Definition: objects.h:565
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const Register esp
TypeImpl< ZoneTypeConfig > Type
const Register r0
const int kSmiTagSize
Definition: v8.h:5743
const int kNumSafepointRegisters
Definition: frames-arm.h:67
const int kDoubleSize
Definition: globals.h:127
const uint32_t kNotStringTag
Definition: objects.h:545
Operand FieldOperand(Register object, int offset)
const Address kZapValue
Definition: globals.h:269
const int kPointerSizeLog2
Definition: globals.h:147
const uint32_t kStringTag
Definition: objects.h:544
@ LAST_NONCALLABLE_SPEC_OBJECT_TYPE
Definition: objects.h:785
@ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE
Definition: objects.h:784
@ FIRST_NONSTRING_TYPE
Definition: objects.h:758
@ LAST_NAME_TYPE
Definition: objects.h:755
@ FIRST_SPEC_OBJECT_TYPE
Definition: objects.h:781
@ JS_FUNCTION_TYPE
Definition: objects.h:749
@ FAST_HOLEY_SMI_ELEMENTS
Definition: elements-kind.h:17
const uint32_t kOneByteStringTag
Definition: objects.h:557
const intptr_t kObjectAlignmentMask
Definition: globals.h:227
const Register esi
@ FAIL_ON_MINUS_ZERO
Definition: globals.h:768
const Register eax
const int kShortSize
Definition: globals.h:123
const Register ebx
const bool FLAG_enable_slow_asserts
Definition: checks.h:31
int NumRegs(RegList reglist)
Definition: frames.cc:1582
static const int kInvalidEnumCacheSentinel
const char * GetBailoutReason(BailoutReason reason)
Condition NegateCondition(Condition cond)
Definition: constants-arm.h:86
const uint32_t kStringRepresentationMask
Definition: objects.h:561
uint32_t RegList
Definition: frames.h:18
const Register r1
OStream & dec(OStream &os)
Definition: ostreams.cc:122
const int kHeapObjectTag
Definition: v8.h:5737
const int kSmiShiftSize
Definition: v8.h:5805
const Register no_reg
const uint32_t kIsIndirectStringTag
Definition: objects.h:569
int TenToThe(int exponent)
Definition: utils.h:733
kFeedbackVectorOffset flag
Definition: objects-inl.h:5418
const uint32_t kInternalizedTag
Definition: objects.h:551
static const int kNumberDictionaryProbes
Definition: codegen.h:149
const int kBitsPerByte
Definition: globals.h:162
const intptr_t kSmiTagMask
Definition: v8.h:5744
const uint32_t kIsNotInternalizedMask
Definition: objects.h:549
const Register ebp
Operand ApiParameterOperand(int index)
const uint32_t kNaNOrInfinityLowerBoundUpper32
Definition: globals.h:658
bool is_intn(int64_t x, unsigned n)
Definition: utils.h:898
const int kSmiTag
Definition: v8.h:5742
static const int kNoCodeAgeSequenceLength
const uint32_t kHoleNanLower32
Definition: globals.h:657
const uint32_t kIsNotStringMask
Definition: objects.h:543
bool IsAligned(T value, U alignment)
Definition: utils.h:123
const int kCharSize
Definition: globals.h:122
const intptr_t kDoubleAlignment
Definition: globals.h:234
@ kPointersToHereAreAlwaysInteresting
const intptr_t kPointerAlignment
Definition: globals.h:230
void CopyBytes(uint8_t *target, uint8_t *source)
const intptr_t kDoubleAlignmentMask
Definition: globals.h:235
const uint32_t kIsIndirectStringMask
Definition: objects.h:568
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:130
const Register ecx
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
static Handle< Value > Throw(Isolate *isolate, const char *message)
Definition: d8.cc:72
@ NONE
bool is(Register reg) const