20 #if defined(USE_SIMULATOR)
21 byte* fast_exp_arm_machine_code =
NULL;
22 double fast_exp_simulator(
double x) {
23 return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
24 fast_exp_arm_machine_code, x, 0);
30 if (!FLAG_fast_math)
return &std::exp;
34 if (buffer ==
NULL)
return &std::exp;
35 ExternalReference::InitializeMathExpData();
37 MacroAssembler masm(
NULL, buffer,
static_cast<int>(actual_size));
40 DwVfpRegister input =
d0;
41 DwVfpRegister result =
d1;
42 DwVfpRegister double_scratch1 =
d2;
43 DwVfpRegister double_scratch2 =
d3;
48 if (masm.use_eabi_hardfloat()) {
53 __ Push(temp3, temp2, temp1);
55 &masm, input, result, double_scratch1, double_scratch2,
57 __ Pop(temp3, temp2, temp1);
58 if (masm.use_eabi_hardfloat()) {
68 DCHECK(!RelocInfo::RequiresRelocation(desc));
73 #if !defined(USE_SIMULATOR)
74 return FUNCTION_CAST<UnaryMathFunction>(buffer);
76 fast_exp_arm_machine_code = buffer;
77 return &fast_exp_simulator;
81 #if defined(V8_HOST_ARCH_ARM)
82 MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
83 #if defined(USE_SIMULATOR)
90 if (buffer ==
NULL)
return stub;
92 MacroAssembler masm(
NULL, buffer,
static_cast<int>(actual_size));
101 Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
102 Label size_less_than_8;
105 __ cmp(chars, Operand(8));
106 __ b(
lt, &size_less_than_8);
107 __ cmp(chars, Operand(32));
112 __ cmp(chars, Operand(64));
118 __ cmp(chars, Operand(128));
128 __ cmp(chars, Operand(256));
130 __ sub(chars, chars, Operand(256));
139 __ sub(chars, chars, Operand(64),
SetCC);
143 __ add(chars, chars, Operand(256));
148 __ sub(chars, chars, Operand(128));
155 __ cmp(chars, Operand(64));
161 __ sub(chars, chars, Operand(64));
166 __ cmp(chars, Operand(32));
170 __ sub(chars, chars, Operand(32));
173 __ cmp(chars, Operand(16));
174 __ b(
le, &_16_or_less);
177 __ sub(chars, chars, Operand(16));
179 __ bind(&_16_or_less);
180 __ cmp(chars, Operand(8));
181 __ b(
le, &_8_or_less);
184 __ sub(chars, chars, Operand(8));
187 __ bind(&_8_or_less);
188 __ rsb(chars, chars, Operand(8));
189 __ sub(src, src, Operand(chars));
190 __ sub(dest, dest, Operand(chars));
191 __ vld1(
Neon8, NeonListOperand(
d0), NeonMemOperand(src));
192 __ vst1(
Neon8, NeonListOperand(
d0), NeonMemOperand(dest));
196 __ bind(&size_less_than_8);
198 __ bic(temp1, chars, Operand(0x3),
SetCC);
206 __ bic(temp2, chars, Operand(0x3),
SetCC);
208 __ add(temp2, dest, temp2);
218 __ mov(chars, Operand(chars,
LSL, 31),
SetCC);
228 DCHECK(!RelocInfo::RequiresRelocation(desc));
232 return FUNCTION_CAST<MemCopyUint8Function>(buffer);
238 MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
239 MemCopyUint16Uint8Function stub) {
240 #if defined(USE_SIMULATOR)
247 if (buffer ==
NULL)
return stub;
249 MacroAssembler masm(
NULL, buffer,
static_cast<int>(actual_size));
258 __ bic(temp, chars, Operand(0x7));
259 __ sub(chars, chars, Operand(temp));
260 __ add(temp, dest, Operand(temp,
LSL, 1));
270 __ rsb(chars, chars, Operand(8));
271 __ sub(src, src, Operand(chars));
272 __ sub(dest, dest, Operand(chars,
LSL, 1));
273 __ vld1(
Neon8, NeonListOperand(
d0), NeonMemOperand(src));
275 __ vst1(
Neon16, NeonListOperand(
d0, 2), NeonMemOperand(dest));
286 __ bic(temp2, chars, Operand(0x3));
287 __ add(temp2, dest, Operand(temp2,
LSL, 1));
291 __ uxtb16(temp3, Operand(temp1,
ROR, 0));
292 __ uxtb16(temp4, Operand(temp1,
ROR, 8));
293 __ pkhbt(temp1, temp3, Operand(temp4,
LSL, 16));
295 __ pkhtb(temp1, temp4, Operand(temp3,
ASR, 16));
297 __ add(dest, dest, Operand(8));
301 __ mov(chars, Operand(chars,
LSL, 31),
SetCC);
304 __ uxtb(temp3, Operand(temp1,
ROR, 8));
305 __ mov(temp3, Operand(temp3,
LSL, 16));
306 __ uxtab(temp3, temp3, Operand(temp1,
ROR, 0));
320 return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
326 #if defined(USE_SIMULATOR)
332 if (buffer ==
NULL)
return &std::sqrt;
334 MacroAssembler masm(
NULL, buffer,
static_cast<int>(actual_size));
336 __ MovFromFloatParameter(
d0);
338 __ MovToFloatResult(
d0);
343 DCHECK(!RelocInfo::RequiresRelocation(desc));
347 return FUNCTION_CAST<UnaryMathFunction>(buffer);
359 DCHECK(!masm->has_frame());
360 masm->set_has_frame(
true);
366 DCHECK(masm->has_frame());
367 masm->set_has_frame(
false);
374 #define __ ACCESS_MASM(masm)
377 MacroAssembler* masm,
383 Label* allocation_memento_found) {
384 Register scratch_elements =
r4;
390 __ JumpIfJSArrayHasAllocationMemento(
391 receiver, scratch_elements, allocation_memento_found);
396 __ RecordWriteField(receiver,
408 MacroAssembler* masm,
416 Label loop, entry, convert_hole, gc_required, only_change_map, done;
417 Register elements =
r4;
418 Register length =
r5;
420 Register array_end = array;
423 Register scratch1 = target_map;
424 Register scratch2 =
r9;
428 elements, length, array, scratch2));
431 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
437 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
438 __ b(
eq, &only_change_map);
446 __ mov(
lr, Operand(length,
LSL, 2));
454 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
460 __ RecordWriteField(receiver,
471 __ RecordWriteField(receiver,
483 __ add(array_end, scratch2, Operand(length,
LSL, 2));
486 Register hole_lower = elements;
487 Register hole_upper = length;
499 __ bind(&only_change_map);
501 __ RecordWriteField(receiver,
512 __ bind(&gc_required);
520 __ UntagAndJumpIfNotSmi(
lr,
lr, &convert_hole);
525 __ vstr(
d0, scratch2, 0);
526 __ add(scratch2, scratch2, Operand(8));
530 __ bind(&convert_hole);
531 if (FLAG_debug_code) {
534 __ orr(
lr,
lr, Operand(1));
535 __ CompareRoot(
lr, Heap::kTheHoleValueRootIndex);
536 __ Assert(
eq, kObjectFoundInSmiOnlyArray);
541 __ cmp(scratch2, array_end);
550 MacroAssembler* masm,
558 Label entry, loop, convert_hole, gc_required, only_change_map;
559 Register elements =
r4;
561 Register length =
r5;
562 Register scratch =
r9;
566 elements, array, length, scratch));
569 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
575 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
576 __ b(
eq, &only_change_map);
579 __ Push(target_map, receiver, key, value);
587 Register array_size = value;
588 Register allocate_scratch = target_map;
590 __ add(array_size, array_size, Operand(length,
LSL, 1));
591 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
595 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
600 Register src_elements = elements;
601 Register dst_elements = target_map;
602 Register dst_end = length;
603 Register heap_number_map = scratch;
604 __ add(src_elements, elements,
608 __ add(dst_end, dst_elements, Operand(length,
LSL, 1));
609 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
621 __ bind(&gc_required);
622 __ Pop(target_map, receiver, key, value);
627 Register upper_bits = key;
632 __ b(
eq, &convert_hole);
635 Register heap_number = receiver;
636 Register scratch2 = value;
637 __ AllocateHeapNumber(heap_number, scratch2,
lr, heap_number_map,
641 __ Strd(scratch2, upper_bits,
643 __ mov(scratch2, dst_elements);
645 __ RecordWrite(array,
655 __ bind(&convert_hole);
656 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
660 __ cmp(dst_elements, dst_end);
663 __ Pop(target_map, receiver, key, value);
666 __ RecordWriteField(receiver,
676 __ bind(&only_change_map);
679 __ RecordWriteField(receiver,
694 Label* call_runtime) {
700 Label check_sequential;
702 __ b(
eq, &check_sequential);
707 __ b(
eq, &cons_string);
710 Label indirect_string_loaded;
713 __ add(index, index, Operand::SmiUntag(result));
714 __ jmp(&indirect_string_loaded);
721 __ bind(&cons_string);
723 __ CompareRoot(result, Heap::kempty_stringRootIndex);
724 __ b(
ne, call_runtime);
728 __ bind(&indirect_string_loaded);
735 Label external_string, check_encoding;
736 __ bind(&check_sequential);
739 __ b(
ne, &external_string);
746 __ jmp(&check_encoding);
749 __ bind(&external_string);
750 if (FLAG_debug_code) {
754 __ Assert(
eq, kExternalStringExpectedButNotFound);
759 __ b(
ne, call_runtime);
762 Label one_byte, done;
763 __ bind(&check_encoding);
777 static MemOperand ExpConstant(
int index, Register base) {
784 DwVfpRegister result,
785 DwVfpRegister double_scratch1,
786 DwVfpRegister double_scratch2,
790 DCHECK(!input.is(result));
791 DCHECK(!input.is(double_scratch1));
792 DCHECK(!input.is(double_scratch2));
793 DCHECK(!result.is(double_scratch1));
794 DCHECK(!result.is(double_scratch2));
795 DCHECK(!double_scratch1.is(double_scratch2));
799 DCHECK(ExternalReference::math_exp_constants(0).address() !=
NULL);
800 DCHECK(!masm->serializer_enabled());
802 Label
zero, infinity, done;
804 __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
806 __ vldr(double_scratch1, ExpConstant(0, temp3));
807 __ VFPCompareAndSetFlags(double_scratch1, input);
810 __ vldr(double_scratch2, ExpConstant(1, temp3));
811 __ VFPCompareAndSetFlags(input, double_scratch2);
814 __ vldr(double_scratch1, ExpConstant(3, temp3));
815 __ vldr(result, ExpConstant(4, temp3));
816 __ vmul(double_scratch1, double_scratch1, input);
817 __ vadd(double_scratch1, double_scratch1, result);
818 __ VmovLow(temp2, double_scratch1);
819 __ vsub(double_scratch1, double_scratch1, result);
820 __ vldr(result, ExpConstant(6, temp3));
821 __ vldr(double_scratch2, ExpConstant(5, temp3));
822 __ vmul(double_scratch1, double_scratch1, double_scratch2);
823 __ vsub(double_scratch1, double_scratch1, input);
824 __ vsub(result, result, double_scratch1);
825 __ vmul(double_scratch2, double_scratch1, double_scratch1);
826 __ vmul(result, result, double_scratch2);
827 __ vldr(double_scratch2, ExpConstant(7, temp3));
828 __ vmul(result, result, double_scratch2);
829 __ vsub(result, result, double_scratch1);
831 DCHECK(*
reinterpret_cast<double*
>
832 (ExternalReference::math_exp_constants(8).address()) == 1);
833 __ vmov(double_scratch2, 1);
834 __ vadd(result, result, double_scratch2);
835 __ mov(temp1, Operand(temp2,
LSR, 11));
836 __ Ubfx(temp2, temp2, 0, 11);
837 __ add(temp1, temp1, Operand(0x3ff));
840 __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
841 __ add(temp3, temp3, Operand(temp2,
LSL, 3));
842 __ ldm(
ia, temp3, temp2.bit() | temp3.bit());
844 if (temp2.code() < temp3.code()) {
845 __ orr(temp1, temp3, Operand(temp1,
LSL, 20));
846 __ vmov(double_scratch1, temp2, temp1);
848 __ orr(temp1, temp2, Operand(temp1,
LSL, 20));
849 __ vmov(double_scratch1, temp3, temp1);
851 __ vmul(result, result, double_scratch1);
859 __ vldr(result, ExpConstant(2, temp3));
868 static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
877 SmartPointer<CodePatcher> patcher(
882 patcher->masm()->PushFixedFrame(
r1);
883 patcher->masm()->nop(
ip.
code());
884 patcher->masm()->add(
890 bool CodeAgingHelper::IsOld(
byte* candidate)
const {
897 bool result = isolate->code_aging_helper()->IsYoung(sequence);
898 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
921 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
923 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
928 patcher.masm()->add(
r0,
pc, Operand(-8));
930 patcher.masm()->emit_code_stub_address(stub);
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static void ProtectCode(void *address, const size_t size)
static const int kInstrSize
const EmbeddedVector< byte, kNoCodeAgeSequenceLength > young_sequence_
static Code * GetCodeAgeStub(Isolate *isolate, Age age, MarkingParity parity)
static Code * GetCodeFromTargetAddress(Address address)
static void PatchPlatformCodeAge(Isolate *isolate, byte *sequence, Age age, MarkingParity parity)
static bool IsYoungSequence(Isolate *isolate, byte *sequence)
static void GetCodeAgeAndParity(Code *code, Age *age, MarkingParity *parity)
static const int kFirstOffset
static const int kSecondOffset
static void FlushICache(void *start, size_t size)
static bool IsSupported(CpuFeature f)
static unsigned cache_line_size()
static void GenerateSmiToDouble(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *allocation_memento_found)
static void GenerateDoubleToObject(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static const int kResourceDataOffset
static const int kLengthOffset
static const int kHeaderSize
static const int kValueOffset
static const int kMapOffset
static const int kElementsOffset
static const int kInstanceTypeOffset
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
static uint32_t & uint32_at(Address addr)
static Address & Address_at(Address addr)
static const int kHeaderSize
static const int kParentOffset
static const int kOffsetOffset
static const int kFixedFrameSizeFromFp
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
virtual void AfterCall(MacroAssembler *masm) const
virtual void BeforeCall(MacroAssembler *masm) const
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK(condition)
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
const LowDwVfpRegister d2
const uint32_t kStringEncodingMask
bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
const LowDwVfpRegister d1
const uint32_t kTwoByteStringTag
const uint32_t kShortExternalStringTag
const LowDwVfpRegister d0
MemOperand FieldMemOperand(Register object, int offset)
UnaryMathFunction CreateExpFunction()
const uint32_t kShortExternalStringMask
const uint32_t kStringRepresentationMask
const uint32_t kSlicedNotConsMask
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
UnaryMathFunction CreateSqrtFunction()
double(* UnaryMathFunction)(double x)
const LowDwVfpRegister d3
static const int kNoCodeAgeSequenceLength
const uint32_t kHoleNanLower32
const uint32_t kIsIndirectStringMask
const uint32_t kHoleNanUpper32
const LowDwVfpRegister d4
Debugger support for the V8 JavaScript engine.