V8 Project
codegen-x64.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_X64
8 
9 #include "src/codegen.h"
10 #include "src/macro-assembler.h"
11 
12 namespace v8 {
13 namespace internal {
14 
15 // -------------------------------------------------------------------------
16 // Platform-specific RuntimeCallHelper functions.
17 
18 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
19  masm->EnterFrame(StackFrame::INTERNAL);
20  DCHECK(!masm->has_frame());
21  masm->set_has_frame(true);
22 }
23 
24 
25 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
26  masm->LeaveFrame(StackFrame::INTERNAL);
27  DCHECK(masm->has_frame());
28  masm->set_has_frame(false);
29 }
30 
31 
32 #define __ masm.
33 
34 
36  if (!FLAG_fast_math) return &std::exp;
37  size_t actual_size;
38  byte* buffer =
39  static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
40  if (buffer == NULL) return &std::exp;
41  ExternalReference::InitializeMathExpData();
42 
43  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
44  // xmm0: raw double input.
45  XMMRegister input = xmm0;
46  XMMRegister result = xmm1;
47  __ pushq(rax);
48  __ pushq(rbx);
49 
50  MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
51 
52  __ popq(rbx);
53  __ popq(rax);
54  __ movsd(xmm0, result);
55  __ Ret();
56 
57  CodeDesc desc;
58  masm.GetCode(&desc);
59  DCHECK(!RelocInfo::RequiresRelocation(desc));
60 
61  CpuFeatures::FlushICache(buffer, actual_size);
62  base::OS::ProtectCode(buffer, actual_size);
63  return FUNCTION_CAST<UnaryMathFunction>(buffer);
64 }
65 
66 
68  size_t actual_size;
69  // Allocate buffer in executable space.
70  byte* buffer =
71  static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
72  if (buffer == NULL) return &std::sqrt;
73 
74  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
75  // xmm0: raw double input.
76  // Move double input into registers.
77  __ sqrtsd(xmm0, xmm0);
78  __ Ret();
79 
80  CodeDesc desc;
81  masm.GetCode(&desc);
82  DCHECK(!RelocInfo::RequiresRelocation(desc));
83 
84  CpuFeatures::FlushICache(buffer, actual_size);
85  base::OS::ProtectCode(buffer, actual_size);
86  return FUNCTION_CAST<UnaryMathFunction>(buffer);
87 }
88 
89 
90 #ifdef _WIN64
91 typedef double (*ModuloFunction)(double, double);
92 // Define custom fmod implementation.
93 ModuloFunction CreateModuloFunction() {
94  size_t actual_size;
95  byte* buffer = static_cast<byte*>(
97  CHECK(buffer);
98  Assembler masm(NULL, buffer, static_cast<int>(actual_size));
99  // Generated code is put into a fixed, unmovable, buffer, and not into
100  // the V8 heap. We can't, and don't, refer to any relocatable addresses
101  // (e.g. the JavaScript nan-object).
102 
103  // Windows 64 ABI passes double arguments in xmm0, xmm1 and
104  // returns result in xmm0.
105  // Argument backing space is allocated on the stack above
106  // the return address.
107 
108  // Compute x mod y.
109  // Load y and x (use argument backing store as temporary storage).
110  __ movsd(Operand(rsp, kRegisterSize * 2), xmm1);
111  __ movsd(Operand(rsp, kRegisterSize), xmm0);
112  __ fld_d(Operand(rsp, kRegisterSize * 2));
113  __ fld_d(Operand(rsp, kRegisterSize));
114 
115  // Clear exception flags before operation.
116  {
117  Label no_exceptions;
118  __ fwait();
119  __ fnstsw_ax();
120  // Clear if Illegal Operand or Zero Division exceptions are set.
121  __ testb(rax, Immediate(5));
122  __ j(zero, &no_exceptions);
123  __ fnclex();
124  __ bind(&no_exceptions);
125  }
126 
127  // Compute st(0) % st(1)
128  {
129  Label partial_remainder_loop;
130  __ bind(&partial_remainder_loop);
131  __ fprem();
132  __ fwait();
133  __ fnstsw_ax();
134  __ testl(rax, Immediate(0x400 /* C2 */));
135  // If C2 is set, computation only has partial result. Loop to
136  // continue computation.
137  __ j(not_zero, &partial_remainder_loop);
138  }
139 
140  Label valid_result;
141  Label return_result;
142  // If Invalid Operand or Zero Division exceptions are set,
143  // return NaN.
144  __ testb(rax, Immediate(5));
145  __ j(zero, &valid_result);
146  __ fstp(0); // Drop result in st(0).
147  int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
148  __ movq(rcx, kNaNValue);
149  __ movq(Operand(rsp, kRegisterSize), rcx);
150  __ movsd(xmm0, Operand(rsp, kRegisterSize));
151  __ jmp(&return_result);
152 
153  // If result is valid, return that.
154  __ bind(&valid_result);
155  __ fstp_d(Operand(rsp, kRegisterSize));
156  __ movsd(xmm0, Operand(rsp, kRegisterSize));
157 
158  // Clean up FPU stack and exceptions and return xmm0
159  __ bind(&return_result);
160  __ fstp(0); // Unload y.
161 
162  Label clear_exceptions;
163  __ testb(rax, Immediate(0x3f /* Any Exception*/));
164  __ j(not_zero, &clear_exceptions);
165  __ ret(0);
166  __ bind(&clear_exceptions);
167  __ fnclex();
168  __ ret(0);
169 
170  CodeDesc desc;
171  masm.GetCode(&desc);
172  base::OS::ProtectCode(buffer, actual_size);
173  // Call the function from C++ through this pointer.
174  return FUNCTION_CAST<ModuloFunction>(buffer);
175 }
176 
177 #endif
178 
179 #undef __
180 
181 // -------------------------------------------------------------------------
182 // Code generators
183 
184 #define __ ACCESS_MASM(masm)
185 
187  MacroAssembler* masm,
188  Register receiver,
189  Register key,
190  Register value,
191  Register target_map,
193  Label* allocation_memento_found) {
194  // Return address is on the stack.
195  Register scratch = rdi;
196  DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
197 
198  if (mode == TRACK_ALLOCATION_SITE) {
199  DCHECK(allocation_memento_found != NULL);
200  __ JumpIfJSArrayHasAllocationMemento(
201  receiver, scratch, allocation_memento_found);
202  }
203 
204  // Set transitioned map.
205  __ movp(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
206  __ RecordWriteField(receiver,
208  target_map,
209  scratch,
213 }
214 
215 
217  MacroAssembler* masm,
218  Register receiver,
219  Register key,
220  Register value,
221  Register target_map,
223  Label* fail) {
224  // Return address is on the stack.
225  DCHECK(receiver.is(rdx));
226  DCHECK(key.is(rcx));
227  DCHECK(value.is(rax));
228  DCHECK(target_map.is(rbx));
229 
230  // The fail label is not actually used since we do not allocate.
231  Label allocated, new_backing_store, only_change_map, done;
232 
233  if (mode == TRACK_ALLOCATION_SITE) {
234  __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
235  }
236 
237  // Check for empty arrays, which only require a map transition and no changes
238  // to the backing store.
240  __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
241  __ j(equal, &only_change_map);
242 
244  if (kPointerSize == kDoubleSize) {
245  // Check backing store for COW-ness. For COW arrays we have to
246  // allocate a new backing store.
248  Heap::kFixedCOWArrayMapRootIndex);
249  __ j(equal, &new_backing_store);
250  } else {
251  // For x32 port we have to allocate a new backing store as SMI size is
252  // not equal with double size.
254  __ jmp(&new_backing_store);
255  }
256 
257  // Check if the backing store is in new-space. If not, we need to allocate
258  // a new one since the old one is in pointer-space.
259  // If in new space, we can reuse the old backing store because it is
260  // the same size.
261  __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
262 
263  __ movp(r14, r8); // Destination array equals source array.
264 
265  // r8 : source FixedArray
266  // r9 : elements array length
267  // r14: destination FixedDoubleArray
268  // Set backing store's map
269  __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
271 
272  __ bind(&allocated);
273  // Set transitioned map.
275  __ RecordWriteField(rdx,
277  rbx,
278  rdi,
282 
283  // Convert smis to doubles and holes to hole NaNs. The Array's length
284  // remains unchanged.
287 
288  Label loop, entry, convert_hole;
289  __ movq(r15, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
290  // r15: the-hole NaN
291  __ jmp(&entry);
292 
293  // Allocate new backing store.
294  __ bind(&new_backing_store);
295  __ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
296  __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
297  // Set backing store's map
298  __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
300  // Set receiver's backing store.
302  __ movp(r11, r14);
303  __ RecordWriteField(rdx,
305  r11,
306  r15,
310  // Set backing store's length.
311  __ Integer32ToSmi(r11, r9);
313  __ jmp(&allocated);
314 
315  __ bind(&only_change_map);
316  // Set transitioned map.
318  __ RecordWriteField(rdx,
320  rbx,
321  rdi,
325  __ jmp(&done);
326 
327  // Conversion loop.
328  __ bind(&loop);
329  __ movp(rbx,
331  // r9 : current element's index
332  // rbx: current element (smi-tagged)
333  __ JumpIfNotSmi(rbx, &convert_hole);
334  __ SmiToInteger32(rbx, rbx);
335  __ Cvtlsi2sd(xmm0, rbx);
337  xmm0);
338  __ jmp(&entry);
339  __ bind(&convert_hole);
340 
341  if (FLAG_debug_code) {
342  __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
343  __ Assert(equal, kObjectFoundInSmiOnlyArray);
344  }
345 
347  __ bind(&entry);
348  __ decp(r9);
349  __ j(not_sign, &loop);
350 
351  __ bind(&done);
352 }
353 
354 
356  MacroAssembler* masm,
357  Register receiver,
358  Register key,
359  Register value,
360  Register target_map,
362  Label* fail) {
363  // Return address is on the stack.
364  DCHECK(receiver.is(rdx));
365  DCHECK(key.is(rcx));
366  DCHECK(value.is(rax));
367  DCHECK(target_map.is(rbx));
368 
369  Label loop, entry, convert_hole, gc_required, only_change_map;
370 
371  if (mode == TRACK_ALLOCATION_SITE) {
372  __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
373  }
374 
375  // Check for empty arrays, which only require a map transition and no changes
376  // to the backing store.
378  __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
379  __ j(equal, &only_change_map);
380 
381  __ Push(rax);
382 
385  // r8 : source FixedDoubleArray
386  // r9 : number of elements
388  __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
389  // r11: destination FixedArray
390  __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
392  __ Integer32ToSmi(r14, r9);
394 
395  // Prepare for conversion loop.
396  __ movq(rsi, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
397  __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
398  // rsi: the-hole NaN
399  // rdi: pointer to the-hole
400  __ jmp(&entry);
401 
402  // Call into runtime if GC is required.
403  __ bind(&gc_required);
404  __ Pop(rax);
406  __ jmp(fail);
407 
408  // Box doubles into heap numbers.
409  __ bind(&loop);
410  __ movq(r14, FieldOperand(r8,
411  r9,
412  times_8,
414  // r9 : current element's index
415  // r14: current element
416  __ cmpq(r14, rsi);
417  __ j(equal, &convert_hole);
418 
419  // Non-hole double, copy value into a heap number.
420  __ AllocateHeapNumber(rax, r15, &gc_required);
421  // rax: new heap number
423  __ movp(FieldOperand(r11,
424  r9,
427  rax);
428  __ movp(r15, r9);
429  __ RecordWriteArray(r11,
430  rax,
431  r15,
435  __ jmp(&entry, Label::kNear);
436 
437  // Replace the-hole NaN with the-hole pointer.
438  __ bind(&convert_hole);
439  __ movp(FieldOperand(r11,
440  r9,
443  rdi);
444 
445  __ bind(&entry);
446  __ decp(r9);
447  __ j(not_sign, &loop);
448 
449  // Replace receiver's backing store with newly created and filled FixedArray.
451  __ RecordWriteField(rdx,
453  r11,
454  r15,
458  __ Pop(rax);
460 
461  __ bind(&only_change_map);
462  // Set transitioned map.
464  __ RecordWriteField(rdx,
466  rbx,
467  rdi,
471 }
472 
473 
474 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
475  Register string,
476  Register index,
477  Register result,
478  Label* call_runtime) {
479  // Fetch the instance type of the receiver into result register.
480  __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
481  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
482 
483  // We need special handling for indirect strings.
484  Label check_sequential;
485  __ testb(result, Immediate(kIsIndirectStringMask));
486  __ j(zero, &check_sequential, Label::kNear);
487 
488  // Dispatch on the indirect string shape: slice or cons.
489  Label cons_string;
490  __ testb(result, Immediate(kSlicedNotConsMask));
491  __ j(zero, &cons_string, Label::kNear);
492 
493  // Handle slices.
494  Label indirect_string_loaded;
495  __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
496  __ addp(index, result);
497  __ movp(string, FieldOperand(string, SlicedString::kParentOffset));
498  __ jmp(&indirect_string_loaded, Label::kNear);
499 
500  // Handle cons strings.
501  // Check whether the right hand side is the empty string (i.e. if
502  // this is really a flat string in a cons string). If that is not
503  // the case we would rather go to the runtime system now to flatten
504  // the string.
505  __ bind(&cons_string);
506  __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
507  Heap::kempty_stringRootIndex);
508  __ j(not_equal, call_runtime);
509  __ movp(string, FieldOperand(string, ConsString::kFirstOffset));
510 
511  __ bind(&indirect_string_loaded);
512  __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
513  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
514 
515  // Distinguish sequential and external strings. Only these two string
516  // representations can reach here (slices and flat cons strings have been
517  // reduced to the underlying sequential or external string).
518  Label seq_string;
519  __ bind(&check_sequential);
521  __ testb(result, Immediate(kStringRepresentationMask));
522  __ j(zero, &seq_string, Label::kNear);
523 
524  // Handle external strings.
525  Label one_byte_external, done;
526  if (FLAG_debug_code) {
527  // Assert that we do not have a cons or slice (indirect strings) here.
528  // Sequential strings have already been ruled out.
529  __ testb(result, Immediate(kIsIndirectStringMask));
530  __ Assert(zero, kExternalStringExpectedButNotFound);
531  }
532  // Rule out short external strings.
534  __ testb(result, Immediate(kShortExternalStringTag));
535  __ j(not_zero, call_runtime);
536  // Check encoding.
538  __ testb(result, Immediate(kStringEncodingMask));
539  __ movp(result, FieldOperand(string, ExternalString::kResourceDataOffset));
540  __ j(not_equal, &one_byte_external, Label::kNear);
541  // Two-byte string.
542  __ movzxwl(result, Operand(result, index, times_2, 0));
543  __ jmp(&done, Label::kNear);
544  __ bind(&one_byte_external);
545  // One-byte string.
546  __ movzxbl(result, Operand(result, index, times_1, 0));
547  __ jmp(&done, Label::kNear);
548 
549  // Dispatch on the encoding: one-byte or two-byte.
550  Label one_byte;
551  __ bind(&seq_string);
554  __ testb(result, Immediate(kStringEncodingMask));
555  __ j(not_zero, &one_byte, Label::kNear);
556 
557  // Two-byte string.
558  // Load the two-byte character code into the result register.
559  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
560  __ movzxwl(result, FieldOperand(string,
561  index,
562  times_2,
564  __ jmp(&done, Label::kNear);
565 
566  // One-byte string.
567  // Load the byte into the result register.
568  __ bind(&one_byte);
569  __ movzxbl(result, FieldOperand(string,
570  index,
571  times_1,
573  __ bind(&done);
574 }
575 
576 
577 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
578  XMMRegister input,
579  XMMRegister result,
580  XMMRegister double_scratch,
581  Register temp1,
582  Register temp2) {
583  DCHECK(!input.is(result));
584  DCHECK(!input.is(double_scratch));
585  DCHECK(!result.is(double_scratch));
586  DCHECK(!temp1.is(temp2));
587  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
588  DCHECK(!masm->serializer_enabled()); // External references not serializable.
589 
590  Label done;
591 
592  __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
593  __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
594  __ xorpd(result, result);
595  __ ucomisd(double_scratch, input);
596  __ j(above_equal, &done);
597  __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
598  __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
599  __ j(above_equal, &done);
600  __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
601  __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
602  __ mulsd(double_scratch, input);
603  __ addsd(double_scratch, result);
604  __ movq(temp2, double_scratch);
605  __ subsd(double_scratch, result);
606  __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
607  __ leaq(temp1, Operand(temp2, 0x1ff800));
608  __ andq(temp2, Immediate(0x7ff));
609  __ shrq(temp1, Immediate(11));
610  __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
611  __ Move(kScratchRegister, ExternalReference::math_exp_log_table());
612  __ shlq(temp1, Immediate(52));
613  __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
614  __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
615  __ subsd(double_scratch, input);
616  __ movsd(input, double_scratch);
617  __ subsd(result, double_scratch);
618  __ mulsd(input, double_scratch);
619  __ mulsd(result, input);
620  __ movq(input, temp1);
621  __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
622  __ subsd(result, double_scratch);
623  __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
624  __ mulsd(result, input);
625 
626  __ bind(&done);
627 }
628 
629 #undef __
630 
631 
634  // The sequence of instructions that is patched out for aging code is the
635  // following boilerplate stack-building prologue that is found both in
636  // FUNCTION and OPTIMIZED_FUNCTION code:
637  CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
638  patcher.masm()->pushq(rbp);
639  patcher.masm()->movp(rbp, rsp);
640  patcher.masm()->Push(rsi);
641  patcher.masm()->Push(rdi);
642 }
643 
644 
645 #ifdef DEBUG
646 bool CodeAgingHelper::IsOld(byte* candidate) const {
647  return *candidate == kCallOpcode;
648 }
649 #endif
650 
651 
652 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
653  bool result = isolate->code_aging_helper()->IsYoung(sequence);
654  DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
655  return result;
656 }
657 
658 
659 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
660  MarkingParity* parity) {
661  if (IsYoungSequence(isolate, sequence)) {
662  *age = kNoAgeCodeAge;
663  *parity = NO_MARKING_PARITY;
664  } else {
665  sequence++; // Skip the kCallOpcode byte
666  Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
668  Code* stub = GetCodeFromTargetAddress(target_address);
669  GetCodeAgeAndParity(stub, age, parity);
670  }
671 }
672 
673 
674 void Code::PatchPlatformCodeAge(Isolate* isolate,
675  byte* sequence,
676  Code::Age age,
677  MarkingParity parity) {
678  uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
679  if (age == kNoAgeCodeAge) {
680  isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
681  CpuFeatures::FlushICache(sequence, young_length);
682  } else {
683  Code* stub = GetCodeAgeStub(isolate, age, parity);
684  CodePatcher patcher(sequence, young_length);
685  patcher.masm()->call(stub->instruction_start());
686  patcher.masm()->Nop(
688  }
689 }
690 
691 
692 Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
693  DCHECK(index >= 0);
694  int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
695  int displacement_to_last_argument = base_reg_.is(rsp) ?
697  displacement_to_last_argument += extra_displacement_to_last_argument_;
698  if (argument_count_reg_.is(no_reg)) {
699  // argument[0] is at base_reg_ + displacement_to_last_argument +
700  // (argument_count_immediate_ + receiver - 1) * kPointerSize.
701  DCHECK(argument_count_immediate_ + receiver > 0);
702  return Operand(base_reg_, displacement_to_last_argument +
703  (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
704  } else {
705  // argument[0] is at base_reg_ + displacement_to_last_argument +
706  // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
707  return Operand(base_reg_, argument_count_reg_, times_pointer_size,
708  displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
709  }
710 }
711 
712 
713 } } // namespace v8::internal
714 
715 #endif // V8_TARGET_ARCH_X64
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static void ProtectCode(void *address, const size_t size)
static const int kMinimalBufferSize
Definition: assembler.h:92
static const int kShortCallInstructionLength
static const int kCallTargetAddressOffset
const EmbeddedVector< byte, kNoCodeAgeSequenceLength > young_sequence_
Definition: codegen.h:171
static Code * GetCodeAgeStub(Isolate *isolate, Age age, MarkingParity parity)
Definition: objects.cc:10561
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:5018
static void PatchPlatformCodeAge(Isolate *isolate, byte *sequence, Age age, MarkingParity parity)
static bool IsYoungSequence(Isolate *isolate, byte *sequence)
static void GetCodeAgeAndParity(Code *code, Age *age, MarkingParity *parity)
Definition: objects.cc:10525
static const int kFirstOffset
Definition: objects.h:9061
static const int kSecondOffset
Definition: objects.h:9062
static void FlushICache(void *start, size_t size)
static void GenerateSmiToDouble(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *allocation_memento_found)
static void GenerateDoubleToObject(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static const int kResourceDataOffset
Definition: objects.h:9138
static const int kLengthOffset
Definition: objects.h:2392
static const int kHeaderSize
Definition: objects.h:2393
static const int kValueOffset
Definition: objects.h:1506
static const int kMapOffset
Definition: objects.h:1427
static const int kElementsOffset
Definition: objects.h:2194
static const int kInstanceTypeOffset
Definition: objects.h:6229
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
static const int kHeaderSize
Definition: objects.h:8941
static const int kParentOffset
Definition: objects.h:9104
static const int kOffsetOffset
Definition: objects.h:9105
static const int kContextOffset
Definition: frames.h:162
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
virtual void AfterCall(MacroAssembler *masm) const
virtual void BeforeCall(MacroAssembler *masm) const
T * start() const
Definition: vector.h:47
int length() const
Definition: vector.h:41
#define __
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define CHECK(condition)
Definition: logging.h:36
#define DCHECK(condition)
Definition: logging.h:205
@ TAG_OBJECT
#define V8_INT64_C(x)
Definition: macros.h:358
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
const int kPointerSize
Definition: globals.h:129
const uint32_t kStringEncodingMask
Definition: objects.h:555
const int KB
Definition: globals.h:106
@ TRACK_ALLOCATION_SITE
Definition: objects.h:8085
@ kSeqStringTag
Definition: objects.h:563
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const XMMRegister xmm1
const int kPCOnStackSize
Definition: globals.h:135
const Register kScratchRegister
const uint32_t kTwoByteStringTag
Definition: objects.h:556
const uint32_t kShortExternalStringTag
Definition: objects.h:590
const int kSmiTagSize
Definition: v8.h:5743
const XMMRegister xmm2
const int kDoubleSize
Definition: globals.h:127
Operand FieldOperand(Register object, int offset)
const Register rsi
const Register rbp
const Register r11
const uint32_t kOneByteStringTag
Definition: objects.h:557
const int kRegisterSize
Definition: globals.h:133
const Register rdi
UnaryMathFunction CreateExpFunction()
@ NO_MARKING_PARITY
Definition: objects.h:298
const Register r9
const XMMRegister xmm0
const Register rbx
const uint32_t kStringRepresentationMask
Definition: objects.h:561
@ ARGUMENTS_CONTAIN_RECEIVER
Definition: codegen-x64.h:49
byte * Address
Definition: globals.h:101
const uint32_t kSlicedNotConsMask
Definition: objects.h:579
const Register r8
const int kFPOnStackSize
Definition: globals.h:136
const Register no_reg
const Register rdx
static const byte kCallOpcode
const Register rax
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const uint64_t kHoleNanInt64
Definition: globals.h:660
const Register r15
UnaryMathFunction CreateSqrtFunction()
double(* UnaryMathFunction)(double x)
Definition: codegen.h:98
const int kSmiTag
Definition: v8.h:5742
static const int kNoCodeAgeSequenceLength
const Register rcx
const Register r14
const Register rsp
const uint32_t kIsIndirectStringMask
Definition: objects.h:568
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20