V8 Project
codegen-arm64.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_ARM64
8 
10 #include "src/codegen.h"
11 #include "src/macro-assembler.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 #define __ ACCESS_MASM(masm)
17 
18 #if defined(USE_SIMULATOR)
19 byte* fast_exp_arm64_machine_code = NULL;
20 double fast_exp_simulator(double x) {
21  Simulator * simulator = Simulator::current(Isolate::Current());
22  Simulator::CallArgument args[] = {
23  Simulator::CallArgument(x),
24  Simulator::CallArgument::End()
25  };
26  return simulator->CallDouble(fast_exp_arm64_machine_code, args);
27 }
28 #endif
29 
30 
32  if (!FLAG_fast_math) return &std::exp;
33 
34  // Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
35  // an AAPCS64-compliant exp() function. This will be faster than the C
36  // library's exp() function, but probably less accurate.
37  size_t actual_size;
38  byte* buffer =
39  static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
40  if (buffer == NULL) return &std::exp;
41 
42  ExternalReference::InitializeMathExpData();
43  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
44  masm.SetStackPointer(csp);
45 
46  // The argument will be in d0 on entry.
47  DoubleRegister input = d0;
48  // Use other caller-saved registers for all other values.
49  DoubleRegister result = d1;
50  DoubleRegister double_temp1 = d2;
51  DoubleRegister double_temp2 = d3;
52  Register temp1 = x10;
53  Register temp2 = x11;
54  Register temp3 = x12;
55 
56  MathExpGenerator::EmitMathExp(&masm, input, result,
57  double_temp1, double_temp2,
58  temp1, temp2, temp3);
59  // Move the result to the return register.
60  masm.Fmov(d0, result);
61  masm.Ret();
62 
63  CodeDesc desc;
64  masm.GetCode(&desc);
65  DCHECK(!RelocInfo::RequiresRelocation(desc));
66 
67  CpuFeatures::FlushICache(buffer, actual_size);
68  base::OS::ProtectCode(buffer, actual_size);
69 
70 #if !defined(USE_SIMULATOR)
71  return FUNCTION_CAST<UnaryMathFunction>(buffer);
72 #else
73  fast_exp_arm64_machine_code = buffer;
74  return &fast_exp_simulator;
75 #endif
76 }
77 
78 
80  return &std::sqrt;
81 }
82 
83 
84 // -------------------------------------------------------------------------
85 // Platform-specific RuntimeCallHelper functions.
86 
87 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
88  masm->EnterFrame(StackFrame::INTERNAL);
89  DCHECK(!masm->has_frame());
90  masm->set_has_frame(true);
91 }
92 
93 
94 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
95  masm->LeaveFrame(StackFrame::INTERNAL);
96  DCHECK(masm->has_frame());
97  masm->set_has_frame(false);
98 }
99 
100 
101 // -------------------------------------------------------------------------
102 // Code generators
103 
105  MacroAssembler* masm,
106  Register receiver,
107  Register key,
108  Register value,
109  Register target_map,
111  Label* allocation_memento_found) {
112  ASM_LOCATION(
113  "ElementsTransitionGenerator::GenerateMapChangeElementsTransition");
114  DCHECK(!AreAliased(receiver, key, value, target_map));
115 
116  if (mode == TRACK_ALLOCATION_SITE) {
117  DCHECK(allocation_memento_found != NULL);
118  __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
119  allocation_memento_found);
120  }
121 
122  // Set transitioned map.
123  __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
124  __ RecordWriteField(receiver,
126  target_map,
127  x10,
132 }
133 
134 
136  MacroAssembler* masm,
137  Register receiver,
138  Register key,
139  Register value,
140  Register target_map,
142  Label* fail) {
143  ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
144  Label gc_required, only_change_map;
145  Register elements = x4;
146  Register length = x5;
147  Register array_size = x6;
148  Register array = x7;
149 
150  Register scratch = x6;
151 
152  // Verify input registers don't conflict with locals.
153  DCHECK(!AreAliased(receiver, key, value, target_map,
154  elements, length, array_size, array));
155 
156  if (mode == TRACK_ALLOCATION_SITE) {
157  __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
158  }
159 
160  // Check for empty arrays, which only require a map transition and no changes
161  // to the backing store.
162  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
163  __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
164 
165  __ Push(lr);
166  __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
168 
169  // Allocate new FixedDoubleArray.
170  __ Lsl(array_size, length, kDoubleSizeLog2);
171  __ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
172  __ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
173  // Register array is non-tagged heap object.
174 
175  // Set the destination FixedDoubleArray's length and map.
176  Register map_root = array_size;
177  __ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
178  __ SmiTag(x11, length);
180  __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
181 
182  __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
183  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
186 
187  // Replace receiver's backing store with newly created FixedDoubleArray.
188  __ Add(x10, array, kHeapObjectTag);
189  __ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
190  __ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
193 
194  // Prepare for conversion loop.
195  Register src_elements = x10;
196  Register dst_elements = x11;
197  Register dst_end = x12;
198  __ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
199  __ Add(dst_elements, array, FixedDoubleArray::kHeaderSize);
200  __ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
201 
202  FPRegister nan_d = d1;
203  __ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
204 
205  Label entry, done;
206  __ B(&entry);
207 
208  __ Bind(&only_change_map);
209  __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
210  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
213  __ B(&done);
214 
215  // Call into runtime if GC is required.
216  __ Bind(&gc_required);
217  __ Pop(lr);
218  __ B(fail);
219 
220  // Iterate over the array, copying and coverting smis to doubles. If an
221  // element is non-smi, write a hole to the destination.
222  {
223  Label loop;
224  __ Bind(&loop);
225  __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
226  __ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
227  __ Tst(x13, kSmiTagMask);
228  __ Fcsel(d0, d0, nan_d, eq);
229  __ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
230 
231  __ Bind(&entry);
232  __ Cmp(dst_elements, dst_end);
233  __ B(lt, &loop);
234  }
235 
236  __ Pop(lr);
237  __ Bind(&done);
238 }
239 
240 
242  MacroAssembler* masm,
243  Register receiver,
244  Register key,
245  Register value,
246  Register target_map,
248  Label* fail) {
249  ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
250  Register elements = x4;
251  Register array_size = x6;
252  Register array = x7;
253  Register length = x5;
254 
255  // Verify input registers don't conflict with locals.
256  DCHECK(!AreAliased(receiver, key, value, target_map,
257  elements, array_size, array, length));
258 
259  if (mode == TRACK_ALLOCATION_SITE) {
260  __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
261  }
262 
263  // Check for empty arrays, which only require a map transition and no changes
264  // to the backing store.
265  Label only_change_map;
266 
267  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
268  __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
269 
270  __ Push(lr);
271  // TODO(all): These registers may not need to be pushed. Examine
272  // RecordWriteStub and check whether it's needed.
273  __ Push(target_map, receiver, key, value);
274  __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
276  // Allocate new FixedArray.
277  Label gc_required;
278  __ Mov(array_size, FixedDoubleArray::kHeaderSize);
279  __ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
280  __ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
281 
282  // Set destination FixedDoubleArray's length and map.
283  Register map_root = array_size;
284  __ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
285  __ SmiTag(x11, length);
287  __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
288 
289  // Prepare for conversion loop.
290  Register src_elements = x10;
291  Register dst_elements = x11;
292  Register dst_end = x12;
293  __ Add(src_elements, elements,
295  __ Add(dst_elements, array, FixedArray::kHeaderSize);
296  __ Add(array, array, kHeapObjectTag);
297  __ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
298 
299  Register the_hole = x14;
300  Register heap_num_map = x15;
301  __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
302  __ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
303 
304  Label entry;
305  __ B(&entry);
306 
307  // Call into runtime if GC is required.
308  __ Bind(&gc_required);
309  __ Pop(value, key, receiver, target_map);
310  __ Pop(lr);
311  __ B(fail);
312 
313  {
314  Label loop, convert_hole;
315  __ Bind(&loop);
316  __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
317  __ Cmp(x13, kHoleNanInt64);
318  __ B(eq, &convert_hole);
319 
320  // Non-hole double, copy value into a heap number.
321  Register heap_num = length;
322  Register scratch = array_size;
323  Register scratch2 = elements;
324  __ AllocateHeapNumber(heap_num, &gc_required, scratch, scratch2,
325  x13, heap_num_map);
326  __ Mov(x13, dst_elements);
327  __ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
328  __ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
330 
331  __ B(&entry);
332 
333  // Replace the-hole NaN with the-hole pointer.
334  __ Bind(&convert_hole);
335  __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
336 
337  __ Bind(&entry);
338  __ Cmp(dst_elements, dst_end);
339  __ B(lt, &loop);
340  }
341 
342  __ Pop(value, key, receiver, target_map);
343  // Replace receiver's backing store with newly created and filled FixedArray.
344  __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
345  __ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
348  __ Pop(lr);
349 
350  __ Bind(&only_change_map);
351  __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
352  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
355 }
356 
357 
360  // The sequence of instructions that is patched out for aging code is the
361  // following boilerplate stack-building prologue that is found both in
362  // FUNCTION and OPTIMIZED_FUNCTION code:
363  PatchingAssembler patcher(young_sequence_.start(),
365  // The young sequence is the frame setup code for FUNCTION code types. It is
366  // generated by FullCodeGenerator::Generate.
368 
369 #ifdef DEBUG
370  const int length = kCodeAgeStubEntryOffset / kInstructionSize;
371  DCHECK(old_sequence_.length() >= kCodeAgeStubEntryOffset);
372  PatchingAssembler patcher_old(old_sequence_.start(), length);
374 #endif
375 }
376 
377 
378 #ifdef DEBUG
379 bool CodeAgingHelper::IsOld(byte* candidate) const {
380  return memcmp(candidate, old_sequence_.start(), kCodeAgeStubEntryOffset) == 0;
381 }
382 #endif
383 
384 
385 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
386  return MacroAssembler::IsYoungSequence(isolate, sequence);
387 }
388 
389 
390 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
391  MarkingParity* parity) {
392  if (IsYoungSequence(isolate, sequence)) {
393  *age = kNoAgeCodeAge;
394  *parity = NO_MARKING_PARITY;
395  } else {
396  byte* target = sequence + kCodeAgeStubEntryOffset;
397  Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
398  GetCodeAgeAndParity(stub, age, parity);
399  }
400 }
401 
402 
403 void Code::PatchPlatformCodeAge(Isolate* isolate,
404  byte* sequence,
405  Code::Age age,
406  MarkingParity parity) {
407  PatchingAssembler patcher(sequence,
409  if (age == kNoAgeCodeAge) {
411  } else {
412  Code * stub = GetCodeAgeStub(isolate, age, parity);
413  MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
414  }
415 }
416 
417 
418 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
419  Register string,
420  Register index,
421  Register result,
422  Label* call_runtime) {
423  DCHECK(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
424  // Fetch the instance type of the receiver into result register.
425  __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
426  __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
427 
428  // We need special handling for indirect strings.
429  Label check_sequential;
430  __ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
431 
432  // Dispatch on the indirect string shape: slice or cons.
433  Label cons_string;
434  __ TestAndBranchIfAllClear(result, kSlicedNotConsMask, &cons_string);
435 
436  // Handle slices.
437  Label indirect_string_loaded;
438  __ Ldr(result.W(),
440  __ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
441  __ Add(index, index, result.W());
442  __ B(&indirect_string_loaded);
443 
444  // Handle cons strings.
445  // Check whether the right hand side is the empty string (i.e. if
446  // this is really a flat string in a cons string). If that is not
447  // the case we would rather go to the runtime system now to flatten
448  // the string.
449  __ Bind(&cons_string);
450  __ Ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
451  __ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
452  // Get the first of the two strings and load its instance type.
453  __ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
454 
455  __ Bind(&indirect_string_loaded);
456  __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
457  __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
458 
459  // Distinguish sequential and external strings. Only these two string
460  // representations can reach here (slices and flat cons strings have been
461  // reduced to the underlying sequential or external string).
462  Label external_string, check_encoding;
463  __ Bind(&check_sequential);
465  __ TestAndBranchIfAnySet(result, kStringRepresentationMask, &external_string);
466 
467  // Prepare sequential strings
469  __ Add(string, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
470  __ B(&check_encoding);
471 
472  // Handle external strings.
473  __ Bind(&external_string);
474  if (FLAG_debug_code) {
475  // Assert that we do not have a cons or slice (indirect strings) here.
476  // Sequential strings have already been ruled out.
477  __ Tst(result, kIsIndirectStringMask);
478  __ Assert(eq, kExternalStringExpectedButNotFound);
479  }
480  // Rule out short external strings.
482  // TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
483  // can be bound far away in deferred code.
484  __ Tst(result, kShortExternalStringMask);
485  __ B(ne, call_runtime);
487 
488  Label one_byte, done;
489  __ Bind(&check_encoding);
491  __ TestAndBranchIfAnySet(result, kStringEncodingMask, &one_byte);
492  // Two-byte string.
493  __ Ldrh(result, MemOperand(string, index, SXTW, 1));
494  __ B(&done);
495  __ Bind(&one_byte);
496  // One-byte string.
497  __ Ldrb(result, MemOperand(string, index, SXTW));
498  __ Bind(&done);
499 }
500 
501 
502 static MemOperand ExpConstant(Register base, int index) {
503  return MemOperand(base, index * kDoubleSize);
504 }
505 
506 
507 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
508  DoubleRegister input,
509  DoubleRegister result,
510  DoubleRegister double_temp1,
511  DoubleRegister double_temp2,
512  Register temp1,
513  Register temp2,
514  Register temp3) {
515  // TODO(jbramley): There are several instances where fnmsub could be used
516  // instead of fmul and fsub. Doing this changes the result, but since this is
517  // an estimation anyway, does it matter?
518 
519  DCHECK(!AreAliased(input, result,
520  double_temp1, double_temp2,
521  temp1, temp2, temp3));
522  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
523  DCHECK(!masm->serializer_enabled()); // External references not serializable.
524 
525  Label done;
526  DoubleRegister double_temp3 = result;
527  Register constants = temp3;
528 
529  // The algorithm used relies on some magic constants which are initialized in
530  // ExternalReference::InitializeMathExpData().
531 
532  // Load the address of the start of the array.
533  __ Mov(constants, ExternalReference::math_exp_constants(0));
534 
535  // We have to do a four-way split here:
536  // - If input <= about -708.4, the output always rounds to zero.
537  // - If input >= about 709.8, the output always rounds to +infinity.
538  // - If the input is NaN, the output is NaN.
539  // - Otherwise, the result needs to be calculated.
540  Label result_is_finite_non_zero;
541  // Assert that we can load offset 0 (the small input threshold) and offset 1
542  // (the large input threshold) with a single ldp.
543  DCHECK(kDRegSize == (ExpConstant(constants, 1).offset() -
544  ExpConstant(constants, 0).offset()));
545  __ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
546 
547  __ Fcmp(input, double_temp1);
548  __ Fccmp(input, double_temp2, NoFlag, hi);
549  // At this point, the condition flags can be in one of five states:
550  // NZCV
551  // 1000 -708.4 < input < 709.8 result = exp(input)
552  // 0110 input == 709.8 result = +infinity
553  // 0010 input > 709.8 result = +infinity
554  // 0011 input is NaN result = input
555  // 0000 input <= -708.4 result = +0.0
556 
557  // Continue the common case first. 'mi' tests N == 1.
558  __ B(&result_is_finite_non_zero, mi);
559 
560  // TODO(jbramley): Consider adding a +infinity register for ARM64.
561  __ Ldr(double_temp2, ExpConstant(constants, 2)); // Synthesize +infinity.
562 
563  // Select between +0.0 and +infinity. 'lo' tests C == 0.
564  __ Fcsel(result, fp_zero, double_temp2, lo);
565  // Select between {+0.0 or +infinity} and input. 'vc' tests V == 0.
566  __ Fcsel(result, result, input, vc);
567  __ B(&done);
568 
569  // The rest is magic, as described in InitializeMathExpData().
570  __ Bind(&result_is_finite_non_zero);
571 
572  // Assert that we can load offset 3 and offset 4 with a single ldp.
573  DCHECK(kDRegSize == (ExpConstant(constants, 4).offset() -
574  ExpConstant(constants, 3).offset()));
575  __ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
576  __ Fmadd(double_temp1, double_temp1, input, double_temp3);
577  __ Fmov(temp2.W(), double_temp1.S());
578  __ Fsub(double_temp1, double_temp1, double_temp3);
579 
580  // Assert that we can load offset 5 and offset 6 with a single ldp.
581  DCHECK(kDRegSize == (ExpConstant(constants, 6).offset() -
582  ExpConstant(constants, 5).offset()));
583  __ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
584  // TODO(jbramley): Consider using Fnmsub here.
585  __ Fmul(double_temp1, double_temp1, double_temp2);
586  __ Fsub(double_temp1, double_temp1, input);
587 
588  __ Fmul(double_temp2, double_temp1, double_temp1);
589  __ Fsub(double_temp3, double_temp3, double_temp1);
590  __ Fmul(double_temp3, double_temp3, double_temp2);
591 
592  __ Mov(temp1.W(), Operand(temp2.W(), LSR, 11));
593 
594  __ Ldr(double_temp2, ExpConstant(constants, 7));
595  // TODO(jbramley): Consider using Fnmsub here.
596  __ Fmul(double_temp3, double_temp3, double_temp2);
597  __ Fsub(double_temp3, double_temp3, double_temp1);
598 
599  // The 8th constant is 1.0, so use an immediate move rather than a load.
600  // We can't generate a runtime assertion here as we would need to call Abort
601  // in the runtime and we don't have an Isolate when we generate this code.
602  __ Fmov(double_temp2, 1.0);
603  __ Fadd(double_temp3, double_temp3, double_temp2);
604 
605  __ And(temp2, temp2, 0x7ff);
606  __ Add(temp1, temp1, 0x3ff);
607 
608  // Do the final table lookup.
609  __ Mov(temp3, ExternalReference::math_exp_log_table());
610 
611  __ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeLog2));
612  __ Ldp(temp2.W(), temp3.W(), MemOperand(temp3));
613  __ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
614  __ Bfi(temp2, temp1, 32, 32);
615  __ Fmov(double_temp1, temp2);
616 
617  __ Fmul(result, double_temp3, double_temp1);
618 
619  __ Bind(&done);
620 }
621 
622 #undef __
623 
624 } } // namespace v8::internal
625 
626 #endif // V8_TARGET_ARCH_ARM64
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static void ProtectCode(void *address, const size_t size)
const EmbeddedVector< byte, kNoCodeAgeSequenceLength > young_sequence_
Definition: codegen.h:171
static Code * GetCodeAgeStub(Isolate *isolate, Age age, MarkingParity parity)
Definition: objects.cc:10561
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:5018
static void PatchPlatformCodeAge(Isolate *isolate, byte *sequence, Age age, MarkingParity parity)
static bool IsYoungSequence(Isolate *isolate, byte *sequence)
static void GetCodeAgeAndParity(Code *code, Age *age, MarkingParity *parity)
Definition: objects.cc:10525
static const int kFirstOffset
Definition: objects.h:9061
static const int kSecondOffset
Definition: objects.h:9062
static void FlushICache(void *start, size_t size)
static void GenerateSmiToDouble(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *allocation_memento_found)
static void GenerateDoubleToObject(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static const int kResourceDataOffset
Definition: objects.h:9138
static const int kLengthOffset
Definition: objects.h:2392
static const int kHeaderSize
Definition: objects.h:2393
static const int kMapOffset
Definition: objects.h:1427
static const int kElementsOffset
Definition: objects.h:2194
static void EmitCodeAgeSequence(Assembler *assm, Code *stub)
static bool IsYoungSequence(Isolate *isolate, byte *sequence)
static const int kInstanceTypeOffset
Definition: objects.h:6229
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
static Address & Address_at(Address addr)
Definition: v8memory.h:56
static const int kHeaderSize
Definition: objects.h:8941
static const int kParentOffset
Definition: objects.h:9104
static const int kOffsetOffset
Definition: objects.h:9105
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
virtual void AfterCall(MacroAssembler *masm) const
virtual void BeforeCall(MacroAssembler *masm) const
T * start() const
Definition: vector.h:47
int length() const
Definition: vector.h:41
#define __
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK(condition)
Definition: logging.h:205
#define ASM_LOCATION(message)
@ DOUBLE_ALIGNMENT
@ NO_ALLOCATION_FLAGS
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
const int kPointerSize
Definition: globals.h:129
const LowDwVfpRegister d2
const uint32_t kStringEncodingMask
Definition: objects.h:555
const int KB
Definition: globals.h:106
@ TRACK_ALLOCATION_SITE
Definition: objects.h:8085
@ kSeqStringTag
Definition: objects.h:563
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const LowDwVfpRegister d1
const uint32_t kTwoByteStringTag
Definition: objects.h:556
const uint32_t kShortExternalStringTag
Definition: objects.h:590
const LowDwVfpRegister d0
const int kDoubleSizeLog2
Definition: globals.h:138
const int kDoubleSize
Definition: globals.h:127
DwVfpRegister DoubleRegister
const int kPointerSizeLog2
Definition: globals.h:147
MemOperand FieldMemOperand(Register object, int offset)
UnaryMathFunction CreateExpFunction()
@ NO_MARKING_PARITY
Definition: objects.h:298
const uint32_t kShortExternalStringMask
Definition: objects.h:589
static double rawbits_to_double(uint64_t bits)
Definition: utils-arm64.h:48
const uint32_t kStringRepresentationMask
Definition: objects.h:561
const Register lr
const uint32_t kSlicedNotConsMask
Definition: objects.h:579
const int kHeapObjectTag
Definition: v8.h:5737
MemOperand UntagSmiFieldMemOperand(Register object, int offset)
const unsigned kDRegSize
const unsigned kDRegSizeLog2
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const uint64_t kHoleNanInt64
Definition: globals.h:660
const intptr_t kSmiTagMask
Definition: v8.h:5744
UnaryMathFunction CreateSqrtFunction()
double(* UnaryMathFunction)(double x)
Definition: codegen.h:98
static const int kCodeAgeStubEntryOffset
const LowDwVfpRegister d3
static const int kNoCodeAgeSequenceLength
const unsigned kInstructionSize
const uint32_t kIsIndirectStringMask
Definition: objects.h:568
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20