22 #define __ ACCESS_MASM(masm)
25 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
26 Label* global_object) {
30 __ b(
eq, global_object);
32 __ b(
eq, global_object);
34 __ b(
eq, global_object);
51 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
52 Register elements, Register
name,
53 Register result, Register scratch1,
63 name, scratch1, scratch2);
68 const int kElementsStartOffset =
71 const int kDetailsOffset = kElementsStartOffset + 2 *
kPointerSize;
73 __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask <<
kSmiTagSize));
93 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
94 Register elements, Register
name,
95 Register value, Register scratch1,
105 name, scratch1, scratch2);
110 const int kElementsStartOffset =
113 const int kDetailsOffset = kElementsStartOffset + 2 *
kPointerSize;
114 const int kTypeAndReadOnlyMask =
115 (PropertyDetails::TypeField::kMask |
116 PropertyDetails::AttributesField::encode(
READ_ONLY))
119 __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
123 const int kValueOffset = kElementsStartOffset +
kPointerSize;
128 __ mov(scratch1, value);
136 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
137 Register receiver, Register
map,
139 int interceptor_bit, Label* slow) {
141 __ JumpIfSmi(receiver, slow);
162 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
163 Register key, Register elements,
164 Register scratch1, Register scratch2,
165 Register result, Label* not_fast_array,
166 Label* out_of_range) {
190 if (not_fast_array !=
NULL) {
193 __ LoadRoot(
ip, Heap::kFixedArrayMapRootIndex);
194 __ cmp(scratch1,
ip);
195 __ b(
ne, not_fast_array);
197 __ AssertFastElements(elements);
201 __ cmp(key, Operand(scratch1));
202 __ b(
hs, out_of_range);
205 __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
206 __ LoadRoot(
ip, Heap::kTheHoleValueRootIndex);
207 __ cmp(scratch2,
ip);
210 __ b(
eq, out_of_range);
211 __ mov(result, scratch2);
217 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
218 Register
map, Register hash,
219 Label* index_string, Label* not_unique) {
224 __ b(
hi, not_unique);
231 __ b(
eq, index_string);
239 __ b(
ne, not_unique);
246 Register dictionary =
r0;
254 GenerateDictionaryLoad(masm, &slow, dictionary,
265 static const Register LoadIC_TempRegister() {
return r3; }
270 Isolate*
isolate = masm->isolate();
278 ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss),
isolate);
279 __ TailCallExternalReference(ref, 2, 1);
289 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
293 static MemOperand GenerateMappedArgumentsLookup(
294 MacroAssembler* masm, Register
object, Register key, Register scratch1,
295 Register scratch2, Register scratch3, Label* unmapped_case,
297 Heap* heap = masm->isolate()->heap();
302 __ JumpIfSmi(
object, slow_case);
308 __ tst(key, Operand(0x80000001));
312 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
320 __ cmp(key, Operand(scratch2));
321 __ b(
cs, unmapped_case);
328 __ mul(scratch3, key, scratch3);
329 __ add(scratch3, scratch3, Operand(kOffset));
332 __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
333 __ cmp(scratch2, scratch3);
334 __ b(
eq, unmapped_case);
341 __ mul(scratch3, scratch2, scratch3);
347 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
349 Register parameter_map,
357 Register backing_store = parameter_map;
359 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
360 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
363 __ cmp(key, Operand(scratch));
366 __ mul(scratch, key, scratch);
381 MemOperand mapped_location = GenerateMappedArgumentsLookup(
382 masm, receiver, key,
r3,
r4,
r5, ¬in, &slow);
383 __ str(value, mapped_location);
391 GenerateUnmappedArgumentsLookup(masm, key,
r3,
r4, &slow);
392 __ str(value, unmapped_location);
404 Isolate*
isolate = masm->isolate();
411 ExternalReference ref =
412 ExternalReference(IC_Utility(kKeyedLoadIC_Miss),
isolate);
414 __ TailCallExternalReference(ref, 2, 1);
423 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
429 Label slow, check_name, index_smi, index_name, property_array_property;
430 Label probe_dictionary, check_number_dictionary;
437 Isolate*
isolate = masm->isolate();
440 __ JumpIfNotSmi(key, &check_name);
445 GenerateKeyedLoadReceiverCheck(masm, receiver,
r0,
r3,
449 __ CheckFastElements(
r0,
r3, &check_number_dictionary);
451 GenerateFastArrayLoad(masm, receiver, key,
r0,
r3,
r4,
r0,
NULL, &slow);
455 __ bind(&check_number_dictionary);
462 __ LoadRoot(
ip, Heap::kHashTableMapRootIndex);
465 __ SmiUntag(
r0, key);
466 __ LoadFromNumberDictionary(&slow,
r4, key,
r0,
r0,
r3,
r5);
475 __ bind(&check_name);
476 GenerateKeyNameCheck(masm, key,
r0,
r3, &index_name, &slow);
478 GenerateKeyedLoadReceiverCheck(masm, receiver,
r0,
r3,
485 __ LoadRoot(
ip, Heap::kHashTableMapRootIndex);
487 __ b(
eq, &probe_dictionary);
496 __ And(
r3,
r3, Operand(mask));
500 Label load_in_object_property;
502 Label hit_on_nth_entry[kEntriesPerBucket];
503 ExternalReference cache_keys =
504 ExternalReference::keyed_lookup_cache_keys(
isolate);
506 __ mov(
r4, Operand(cache_keys));
509 for (
int i = 0;
i < kEntriesPerBucket - 1;
i++) {
510 Label try_next_entry;
514 __ b(
ne, &try_next_entry);
517 __ b(
eq, &hit_on_nth_entry[
i]);
518 __ bind(&try_next_entry);
532 ExternalReference cache_field_offsets =
533 ExternalReference::keyed_lookup_cache_field_offsets(
isolate);
536 for (
int i = kEntriesPerBucket - 1;
i >= 0;
i--) {
537 __ bind(&hit_on_nth_entry[
i]);
538 __ mov(
r4, Operand(cache_field_offsets));
545 __ b(
ge, &property_array_property);
547 __ jmp(&load_in_object_property);
552 __ bind(&load_in_object_property);
562 __ bind(&property_array_property);
572 __ bind(&probe_dictionary);
576 GenerateGlobalInstanceTypeCheck(masm,
r0, &slow);
578 GenerateDictionaryLoad(masm, &slow,
r3, key,
r0,
r5,
r4);
583 __ bind(&index_name);
584 __ IndexFromHash(
r3, key);
596 Register scratch =
r3;
597 Register result =
r0;
598 DCHECK(!scratch.is(receiver) && !scratch.is(index));
600 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
605 char_at_generator.GenerateFast(masm);
608 StubRuntimeCallHelper call_helper;
609 char_at_generator.GenerateSlow(masm, call_helper);
621 ExternalReference ref =
622 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
623 __ TailCallExternalReference(ref, 3, 1);
627 static void KeyedStoreGenerateGenericHelper(
628 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
630 Register value, Register key, Register receiver, Register receiver_map,
631 Register elements_map, Register elements) {
632 Label transition_smi_elements;
633 Label finish_object_store, non_double_value, transition_double_elements;
634 Label fast_double_without_map_check;
637 __ bind(fast_object);
638 Register scratch_value =
r4;
639 Register address =
r5;
643 Operand(masm->isolate()->factory()->fixed_array_map()));
644 __ b(
ne, fast_double);
650 Label holecheck_passed1;
652 __ ldr(scratch_value,
653 MemOperand::PointerAddressFromSmiKey(address, key,
PreIndex));
654 __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()));
655 __ b(
ne, &holecheck_passed1);
656 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
659 __ bind(&holecheck_passed1);
663 __ JumpIfNotSmi(value, &non_smi_value);
672 __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
675 __ bind(&non_smi_value);
677 __ CheckFastObjectElements(receiver_map, scratch_value,
678 &transition_smi_elements);
681 __ bind(&finish_object_store);
688 __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
691 __ mov(scratch_value, value);
696 __ bind(fast_double);
700 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
707 __ add(address, elements,
710 __ ldr(scratch_value,
713 __ b(
ne, &fast_double_without_map_check);
714 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
717 __ bind(&fast_double_without_map_check);
718 __ StoreNumberToDoubleElements(value, key, elements,
r3,
d0,
719 &transition_double_elements);
727 __ bind(&transition_smi_elements);
730 __ CompareRoot(
r4, Heap::kHeapNumberMapRootIndex);
731 __ b(
ne, &non_double_value);
735 __ LoadTransitionedArrayMapConditional(
740 receiver_map,
mode, slow);
742 __ jmp(&fast_double_without_map_check);
744 __ bind(&non_double_value);
747 receiver_map,
r4, slow);
750 masm, receiver, key, value, receiver_map,
mode, slow);
752 __ jmp(&finish_object_store);
754 __ bind(&transition_double_elements);
759 receiver_map,
r4, slow);
762 masm, receiver, key, value, receiver_map,
mode, slow);
764 __ jmp(&finish_object_store);
776 Label slow, fast_object, fast_object_grow;
777 Label fast_double, fast_double_grow;
778 Label array, extra, check_if_double_array;
787 Register receiver_map =
r3;
788 Register elements_map =
r6;
789 Register elements =
r9;
793 __ JumpIfNotSmi(key, &slow);
795 __ JumpIfSmi(receiver, &slow);
815 __ cmp(key, Operand(
ip));
816 __ b(
lo, &fast_object);
835 __ cmp(key, Operand(
ip));
838 __ cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
839 __ b(
ne, &check_if_double_array);
840 __ jmp(&fast_object_grow);
842 __ bind(&check_if_double_array);
844 Operand(masm->isolate()->factory()->fixed_double_array_map()));
846 __ jmp(&fast_double_grow);
856 __ cmp(key, Operand(
ip));
859 KeyedStoreGenerateGenericHelper(
861 value, key, receiver, receiver_map, elements_map, elements);
862 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
864 key, receiver, receiver_map, elements_map,
880 masm->isolate()->stub_cache()->GenerateProbe(masm,
flags,
false, receiver,
893 ExternalReference ref =
894 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
895 __ TailCallExternalReference(ref, 3, 1);
904 Register dictionary =
r3;
911 GenerateDictionaryStore(masm, &miss, dictionary,
name, value,
r4,
r5);
912 Counters* counters = masm->isolate()->counters();
913 __ IncrementCounter(counters->store_normal_hit(), 1,
r4,
r5);
917 __ IncrementCounter(counters->store_normal_miss(), 1,
r4,
r5);
927 case Token::EQ_STRICT:
947 Address cmp_instruction_address =
958 Address cmp_instruction_address =
979 PrintF(
"[ patching ic at %p, cmp=%p, delta=%d\n", address,
980 cmp_instruction_address, delta);
996 CodePatcher patcher(patch_address, 2);
1006 patcher.masm()->cmp(reg, reg);
1010 patcher.EmitCondition(
ne);
1013 patcher.EmitCondition(
eq);
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
static bool IsCmpRegister(Instr instr)
static int GetCmpImmediateRawImmediate(Instr instr)
static bool IsCmpImmediate(Instr instr)
static bool IsTstImmediate(Instr instr)
static Register GetRn(Instr instr)
static Register GetRm(Instr instr)
static bool IsBranch(Instr instr)
static Register GetCmpImmediateRegister(Instr instr)
static Address return_address_from_call_start(Address pc)
static Condition GetCondition(Instr instr)
static Flags ComputeHandlerFlags(Kind handler_kind, StubType type=NORMAL, CacheHolderFlag holder=kCacheOnReceiver)
static Flags RemoveTypeAndHolderFromFlags(Flags flags)
static bool HasInlinedSmiCode(Address address)
static Condition ComputeCondition(Token::Value op)
static void GenerateSmiToDouble(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *allocation_memento_found)
static void GenerateDoubleToObject(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static const int kLengthOffset
static const int kHeaderSize
static const int kElementsStartIndex
static const int kMapOffset
Isolate * isolate() const
static const int kLengthOffset
static const int kPropertiesOffset
static const int kElementsOffset
static void GenerateMiss(MacroAssembler *masm)
static void GenerateString(MacroAssembler *masm)
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static void GenerateGeneric(MacroAssembler *masm)
static const int kHashMask
static const int kCapacityMask
static const int kMapHashShift
static const int kEntriesPerBucket
static void GenerateMiss(MacroAssembler *masm)
static void GenerateSloppyArguments(MacroAssembler *masm)
static void GenerateGeneric(MacroAssembler *masm, StrictMode strict_mode)
static const Register ReceiverRegister()
static const Register NameRegister()
static void GenerateNormal(MacroAssembler *masm)
static void GenerateRuntimeGetProperty(MacroAssembler *masm)
static void GenerateMiss(MacroAssembler *masm)
static const int kHasIndexedInterceptor
static const int kBitFieldOffset
static const int kIsAccessCheckNeeded
static const int kInstanceTypeOffset
static const int kInstanceSizeOffset
static const int kInObjectPropertiesOffset
static const int kIsObserved
static const int kHasNamedInterceptor
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static const int kHashShift
static const int kHashFieldOffset
static const unsigned int kContainsCachedArrayIndexMask
static void GenerateRuntimeSetProperty(MacroAssembler *masm, StrictMode strict_mode)
static Smi * FromInt(int value)
static const Register ReceiverRegister()
static const Register NameRegister()
static const Register ValueRegister()
static void GenerateMiss(MacroAssembler *masm)
static void GenerateMegamorphic(MacroAssembler *masm)
StrictMode strict_mode() const
static void GenerateNormal(MacroAssembler *masm)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
const LowDwVfpRegister d0
@ DISABLE_INLINED_SMI_CHECK
@ ENABLE_INLINED_SMI_CHECK
const int kPointerSizeLog2
@ JS_BUILTINS_OBJECT_TYPE
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check)
MemOperand FieldMemOperand(Register object, int offset)
void PrintF(const char *format,...)
const uint32_t kInternalizedTag
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const intptr_t kSmiTagMask
const uint32_t kIsNotInternalizedMask
const uint32_t kHoleNanLower32
@ STRING_INDEX_IS_ARRAY_INDEX
const uint32_t kHoleNanUpper32
KeyedStoreIncrementLength
Debugger support for the V8 JavaScript engine.