7 #if V8_TARGET_ARCH_ARM64
18 #define __ ACCESS_MASM(masm)
24 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
25 Label* global_object) {
29 __ B(
eq, global_object);
44 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
45 Register elements, Register
name,
46 Register result, Register scratch1,
55 name, scratch1, scratch2);
60 static const int kElementsStartOffset =
63 static const int kDetailsOffset = kElementsStartOffset + 2 *
kPointerSize;
84 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
85 Register elements, Register
name,
86 Register value, Register scratch1,
94 name, scratch1, scratch2);
100 static const int kElementsStartOffset =
103 static const int kDetailsOffset = kElementsStartOffset + 2 *
kPointerSize;
104 static const int kTypeAndReadOnlyMask =
105 PropertyDetails::TypeField::kMask |
106 PropertyDetails::AttributesField::encode(
READ_ONLY);
108 __ Tst(scratch1, kTypeAndReadOnlyMask);
112 static const int kValueOffset = kElementsStartOffset +
kPointerSize;
117 __ Mov(scratch1, value);
126 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
128 Register map_scratch,
130 int interceptor_bit, Label* slow) {
134 __ JumpIfSmi(receiver, slow);
140 __ Tbnz(scratch, interceptor_bit, slow);
171 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
172 Register key, Register elements,
173 Register elements_map, Register scratch2,
174 Register result, Label* not_fast_array,
180 if (not_fast_array !=
NULL) {
183 __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
186 __ AssertFastElements(elements);
191 Register scratch1 = elements_map;
195 __ Cmp(key, scratch1);
200 __ SmiUntag(scratch2, key);
205 __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
210 __ Mov(result, scratch2);
219 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
220 Register map_scratch, Register hash_scratch,
221 Label* index_string, Label* not_unique) {
252 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
253 Register
object, Register key,
254 Register
map, Register scratch1,
256 Label* unmapped_case,
260 Heap* heap = masm->isolate()->heap();
265 __ JumpIfSmi(
object, slow_case);
271 __ JumpIfNotSmi(key, slow_case);
275 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
283 __ Cmp(key, scratch1);
284 __ B(
hs, unmapped_case);
287 static const int offset =
290 __ Add(scratch1,
map, offset);
291 __ SmiUntag(scratch2, key);
293 __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
297 __ SmiUntag(scratch1);
308 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
310 Register parameter_map,
318 Register backing_store = parameter_map;
320 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
321 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
324 __ Cmp(key, scratch);
327 __ Add(backing_store, backing_store,
329 __ SmiUntag(scratch, key);
335 Register dictionary = x0;
342 GenerateDictionaryLoad(masm, &slow, dictionary,
354 Isolate*
isolate = masm->isolate();
361 ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss),
isolate);
362 __ TailCallExternalReference(ref, 2, 1);
369 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
387 Register mapped1 = x4;
388 Register mapped2 = x5;
390 MemOperand mapped = GenerateMappedArgumentsLookup(
391 masm, receiver, key,
map, mapped1, mapped2, ¬in, &slow);
392 Operand mapped_offset = mapped.OffsetAsOperand();
393 __ Str(value, mapped);
394 __ Add(x10, mapped.base(), mapped_offset);
403 Register unmapped1 =
map;
404 Register unmapped2 = x4;
406 GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
407 Operand unmapped_offset = unmapped.OffsetAsOperand();
408 __ Str(value, unmapped);
409 __ Add(x10, unmapped.base(), unmapped_offset);
421 Isolate*
isolate = masm->isolate();
428 ExternalReference ref =
429 ExternalReference(IC_Utility(kKeyedLoadIC_Miss),
isolate);
431 __ TailCallExternalReference(ref, 2, 1);
438 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
442 static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
443 Register receiver, Register scratch1,
444 Register scratch2, Register scratch3,
445 Register scratch4, Register scratch5,
450 Isolate* isolate = masm->isolate();
451 Label check_number_dictionary;
453 Register result = x0;
455 GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
459 __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
461 GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1,
463 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1,
467 __ Bind(&check_number_dictionary);
472 __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
474 __ LoadFromNumberDictionary(slow, scratch3, key, result, scratch1, scratch2,
479 static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
480 Register receiver, Register scratch1,
481 Register scratch2, Register scratch3,
482 Register scratch4, Register scratch5,
487 Isolate* isolate = masm->isolate();
488 Label probe_dictionary, property_array_property;
490 Register result = x0;
492 GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
499 __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
502 Register receiver_map = scratch1;
511 __ And(scratch2, scratch2, mask);
515 Label load_in_object_property;
517 Label hit_on_nth_entry[kEntriesPerBucket];
518 ExternalReference cache_keys =
519 ExternalReference::keyed_lookup_cache_keys(isolate);
521 __ Mov(scratch3, cache_keys);
524 for (
int i = 0;
i < kEntriesPerBucket - 1;
i++) {
525 Label try_next_entry;
528 __ Cmp(receiver_map, scratch4);
529 __ B(
ne, &try_next_entry);
531 __ Cmp(key, scratch4);
532 __ B(
eq, &hit_on_nth_entry[
i]);
533 __ Bind(&try_next_entry);
538 __ Cmp(receiver_map, scratch4);
541 __ Cmp(key, scratch4);
545 ExternalReference cache_field_offsets =
546 ExternalReference::keyed_lookup_cache_field_offsets(isolate);
549 for (
int i = kEntriesPerBucket - 1;
i >= 0;
i--) {
550 __ Bind(&hit_on_nth_entry[
i]);
551 __ Mov(scratch3, cache_field_offsets);
553 __ Add(scratch2, scratch2,
i);
558 __ Subs(scratch4, scratch4, scratch5);
559 __ B(
ge, &property_array_property);
561 __ B(&load_in_object_property);
566 __ Bind(&load_in_object_property);
568 __ Add(scratch5, scratch5, scratch4);
571 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
576 __ Bind(&property_array_property);
580 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
585 __ Bind(&probe_dictionary);
588 GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
590 GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
591 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1,
599 Label slow, check_name, index_smi, index_name;
606 __ JumpIfNotSmi(key, &check_name);
610 GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
614 __ IncrementCounter(masm->isolate()->counters()->keyed_load_generic_slow(), 1,
618 __ Bind(&check_name);
619 GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
621 GenerateKeyedLoadWithNameKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
623 __ Bind(&index_name);
624 __ IndexFromHash(x3, key);
636 Register result = x0;
637 Register scratch = x3;
638 DCHECK(!scratch.is(receiver) && !scratch.is(index));
640 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
645 char_at_generator.GenerateFast(masm);
648 StubRuntimeCallHelper call_helper;
649 char_at_generator.GenerateSlow(masm, call_helper);
663 ExternalReference ref =
664 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
665 __ TailCallExternalReference(ref, 3, 1);
669 static void KeyedStoreGenerateGenericHelper(
670 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
672 Register value, Register key, Register receiver, Register receiver_map,
673 Register elements_map, Register elements) {
674 DCHECK(!
AreAliased(value, key, receiver, receiver_map, elements_map, elements,
677 Label transition_smi_elements;
678 Label transition_double_elements;
679 Label fast_double_without_map_check;
680 Label non_double_value;
683 __ Bind(fast_object);
687 Operand(masm->isolate()->factory()->fixed_array_map()));
688 __ B(
ne, fast_double);
694 Label holecheck_passed;
698 __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
699 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
700 __ bind(&holecheck_passed);
703 __ JumpIfSmi(value, &finish_store);
706 __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
708 __ Bind(&finish_store);
715 Register address = x11;
720 Label dont_record_write;
721 __ JumpIfSmi(value, &dont_record_write);
728 __ Bind(&dont_record_write);
732 __ Bind(fast_double);
736 __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
746 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
748 __ Bind(&fast_double_without_map_check);
749 __ StoreNumberToDoubleElements(value, key, elements, x10,
d0,
750 &transition_double_elements);
759 __ Bind(&transition_smi_elements);
762 __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
766 __ LoadTransitionedArrayMapConditional(
771 receiver_map,
mode, slow);
773 __ B(&fast_double_without_map_check);
775 __ Bind(&non_double_value);
778 receiver_map, x10, x11, slow);
782 masm, receiver, key, value, receiver_map,
mode, slow);
787 __ Bind(&transition_double_elements);
792 receiver_map, x10, x11, slow);
795 masm, receiver, key, value, receiver_map,
mode, slow);
808 Label fast_object_grow;
809 Label fast_double_grow;
819 Register receiver_map = x3;
820 Register elements = x4;
821 Register elements_map = x5;
823 __ JumpIfNotSmi(key, &slow);
824 __ JumpIfSmi(receiver, &slow);
830 __ TestAndBranchIfAnySet(
834 Register instance_type = x10;
846 __ B(
hi, &fast_object);
870 __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
871 __ B(
eq, &fast_object_grow);
873 Operand(masm->isolate()->factory()->fixed_double_array_map()));
874 __ B(
eq, &fast_double_grow);
891 KeyedStoreGenerateGenericHelper(
893 value, key, receiver, receiver_map, elements_map, elements);
894 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
896 key, receiver, receiver_map, elements_map,
910 masm->isolate()->stub_cache()->GenerateProbe(masm,
flags,
false, receiver,
911 name, x3, x4, x5, x6);
923 ExternalReference ref =
924 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
925 __ TailCallExternalReference(ref, 3, 1);
934 Register dictionary = x3;
939 GenerateDictionaryStore(masm, &miss, dictionary,
name, value, x4, x5);
940 Counters* counters = masm->isolate()->counters();
941 __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
946 __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
953 case Token::EQ_STRICT:
976 return patch_info->IsInlineData();
989 InlineSmiCheckInfo info(info_address);
992 if (!info.HasSmiCheck()) {
997 PrintF(
"[ Patching ic at %p, marker=%p, SMI check=%p\n", address,
998 info_address,
reinterpret_cast<void*
>(info.SmiCheck()));
1007 Instruction* to_patch = info.SmiCheck();
1008 PatchingAssembler patcher(to_patch, 1);
1009 DCHECK(to_patch->IsTestBranch());
1010 DCHECK(to_patch->ImmTestBranchBit5() == 0);
1011 DCHECK(to_patch->ImmTestBranchBit40() == 0);
1016 int branch_imm = to_patch->ImmTestBranch();
1019 DCHECK(to_patch->Rt() == xzr.code());
1020 smi_reg = info.SmiRegister();
1023 DCHECK(to_patch->Rt() != xzr.code());
1029 patcher.tbnz(smi_reg, 0, branch_imm);
1033 patcher.tbz(smi_reg, 0, branch_imm);
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
static Address return_address_from_call_start(Address pc)
static Flags ComputeHandlerFlags(Kind handler_kind, StubType type=NORMAL, CacheHolderFlag holder=kCacheOnReceiver)
static Flags RemoveTypeAndHolderFromFlags(Flags flags)
static bool HasInlinedSmiCode(Address address)
static Condition ComputeCondition(Token::Value op)
static void GenerateSmiToDouble(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *allocation_memento_found)
static void GenerateDoubleToObject(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static const int kLengthOffset
static const int kHeaderSize
static const int kElementsStartIndex
static const int kMapOffset
Isolate * isolate() const
static InstructionSequence * At(Address address)
static const int kLengthOffset
static const int kPropertiesOffset
static const int kElementsOffset
static void GenerateMiss(MacroAssembler *masm)
static void GenerateString(MacroAssembler *masm)
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static void GenerateGeneric(MacroAssembler *masm)
static const int kHashMask
static const int kCapacityMask
static const int kMapHashShift
static const int kEntriesPerBucket
static void GenerateMiss(MacroAssembler *masm)
static void GenerateSloppyArguments(MacroAssembler *masm)
static void GenerateGeneric(MacroAssembler *masm, StrictMode strict_mode)
static const Register ReceiverRegister()
static const Register NameRegister()
static void GenerateNormal(MacroAssembler *masm)
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)
static const int kHasIndexedInterceptor
static const int kBitFieldOffset
static const int kIsAccessCheckNeeded
static const int kInstanceTypeOffset
static const int kInstanceSizeOffset
static const int kInObjectPropertiesOffset
static const int kIsObserved
static const int kHasNamedInterceptor
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static const int kHashShift
static const int kHashFieldOffset
static const unsigned int kContainsCachedArrayIndexMask
static Operand UntagSmiAndScale(Register smi, int scale)
static Operand UntagSmi(Register smi)
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
static Smi * FromInt(int value)
static const Register ReceiverRegister()
static const Register NameRegister()
static const Register ValueRegister()
static void GenerateMiss(MacroAssembler *masm)
static void GenerateMegamorphic(MacroAssembler *masm)
StrictMode strict_mode() const
static void GenerateNormal(MacroAssembler *masm)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK(condition)
#define ASM_LOCATION(message)
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
const LowDwVfpRegister d0
@ DISABLE_INLINED_SMI_CHECK
@ ENABLE_INLINED_SMI_CHECK
const int kPointerSizeLog2
@ JS_BUILTINS_OBJECT_TYPE
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check)
MemOperand FieldMemOperand(Register object, int offset)
void PrintF(const char *format,...)
MemOperand UntagSmiFieldMemOperand(Register object, int offset)
const uint32_t kInternalizedTag
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const uint64_t kHoleNanInt64
const intptr_t kSmiTagMask
const uint32_t kIsNotInternalizedMask
@ STRING_INDEX_IS_ARRAY_INDEX
KeyedStoreIncrementLength
Debugger support for the V8 JavaScript engine.