7 #if V8_TARGET_ARCH_MIPS64
20 #if defined(USE_SIMULATOR)
21 byte* fast_exp_mips_machine_code =
NULL;
22 double fast_exp_simulator(
double x) {
23 return Simulator::current(Isolate::Current())->CallFP(
24 fast_exp_mips_machine_code, x, 0);
30 if (!FLAG_fast_math)
return &std::exp;
34 if (buffer ==
NULL)
return &std::exp;
35 ExternalReference::InitializeMathExpData();
37 MacroAssembler masm(
NULL, buffer,
static_cast<int>(actual_size));
51 __ Move(input, a0, a1);
53 __ Push(temp3, temp2, temp1);
55 &masm, input, result, double_scratch1, double_scratch2,
57 __ Pop(temp3, temp2, temp1);
61 __ Move(v0, v1, result);
68 DCHECK(!RelocInfo::RequiresRelocation(desc));
73 #if !defined(USE_SIMULATOR)
74 return FUNCTION_CAST<UnaryMathFunction>(buffer);
76 fast_exp_mips_machine_code = buffer;
77 return &fast_exp_simulator;
82 #if defined(V8_HOST_ARCH_MIPS)
83 MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
84 #if defined(USE_SIMULATOR)
91 if (buffer ==
NULL)
return stub;
95 MacroAssembler masm(
NULL, buffer,
static_cast<int>(actual_size));
98 Label lastb, unaligned,
aligned, chkw,
99 loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
100 leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
101 ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
110 DCHECK(pref_chunk < max_pref_size);
115 uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
124 pref_chunk * 4 >= max_pref_size);
127 __ slti(a6, a2, 2 * loadstore_chunk);
128 __ bne(a6, zero_reg, &lastb);
136 __ andi(t8, t8, loadstore_chunk - 1);
137 __ bne(t8, zero_reg, &unaligned);
138 __ subu(a3, zero_reg, a0);
140 __ andi(a3, a3, loadstore_chunk - 1);
154 __ andi(t8, a2, 0x3f);
155 __ beq(a2, t8, &chkw);
166 __ Subu(t9, a4, pref_limit);
169 __ Pref(pref_hint_load,
MemOperand(a1, 0 * pref_chunk));
170 __ Pref(pref_hint_load,
MemOperand(a1, 1 * pref_chunk));
171 __ Pref(pref_hint_load,
MemOperand(a1, 2 * pref_chunk));
172 __ Pref(pref_hint_load,
MemOperand(a1, 3 * pref_chunk));
175 __ Pref(pref_hint_store,
MemOperand(a0, 1 * pref_chunk));
176 __ Pref(pref_hint_store,
MemOperand(a0, 2 * pref_chunk));
177 __ Pref(pref_hint_store,
MemOperand(a0, 3 * pref_chunk));
188 __ Pref(pref_hint_store,
MemOperand(a0, 4 * pref_chunk));
189 __ Pref(pref_hint_store,
MemOperand(a0, 5 * pref_chunk));
198 __ Pref(pref_hint_load,
MemOperand(a1, 4 * pref_chunk));
217 __ Pref(pref_hint_load,
MemOperand(a1, 5 * pref_chunk));
227 __ addiu(a0, a0, 16 * loadstore_chunk);
228 __ bne(a0, a3, &loop16w);
229 __ addiu(a1, a1, 16 * loadstore_chunk);
236 __ Pref(pref_hint_load,
MemOperand(a1, 0 * pref_chunk));
237 __ andi(t8, a2, 0x1f);
238 __ beq(a2, t8, &chk1w);
248 __ addiu(a1, a1, 8 * loadstore_chunk);
257 __ addiu(a0, a0, 8 * loadstore_chunk);
265 __ andi(a2, t8, loadstore_chunk - 1);
266 __ beq(a2, t8, &lastb);
270 __ bind(&wordCopy_loop);
272 __ addiu(a0, a0, loadstore_chunk);
273 __ addiu(a1, a1, loadstore_chunk);
274 __ bne(a0, a3, &wordCopy_loop);
278 __ Branch(&leave,
le, a2, Operand(zero_reg));
285 __ bne(a0, a3, &lastbloop);
296 __ andi(a3, a3, loadstore_chunk - 1);
297 __ beq(a3, zero_reg, &ua_chk16w);
312 __ andi(t8, a2, 0x3f);
313 __ beq(a2, t8, &ua_chkw);
319 __ Subu(t9, a4, pref_limit);
322 __ Pref(pref_hint_load,
MemOperand(a1, 0 * pref_chunk));
323 __ Pref(pref_hint_load,
MemOperand(a1, 1 * pref_chunk));
324 __ Pref(pref_hint_load,
MemOperand(a1, 2 * pref_chunk));
327 __ Pref(pref_hint_store,
MemOperand(a0, 1 * pref_chunk));
328 __ Pref(pref_hint_store,
MemOperand(a0, 2 * pref_chunk));
329 __ Pref(pref_hint_store,
MemOperand(a0, 3 * pref_chunk));
332 __ bind(&ua_loop16w);
333 __ Pref(pref_hint_load,
MemOperand(a1, 3 * pref_chunk));
344 __ Pref(pref_hint_store,
MemOperand(a0, 4 * pref_chunk));
345 __ Pref(pref_hint_store,
MemOperand(a0, 5 * pref_chunk));
347 __ bind(&ua_skip_pref);
368 __ Pref(pref_hint_load,
MemOperand(a1, 4 * pref_chunk));
401 __ Pref(pref_hint_load,
MemOperand(a1, 5 * pref_chunk));
410 __ addiu(a0, a0, 16 * loadstore_chunk);
411 __ bne(a0, a3, &ua_loop16w);
412 __ addiu(a1, a1, 16 * loadstore_chunk);
420 __ andi(t8, a2, 0x1f);
422 __ beq(a2, t8, &ua_chk1w);
448 __ addiu(a1, a1, 8 * loadstore_chunk);
457 __ addiu(a0, a0, 8 * loadstore_chunk);
462 __ andi(a2, t8, loadstore_chunk - 1);
463 __ beq(a2, t8, &ua_smallCopy);
467 __ bind(&ua_wordCopy_loop);
471 __ addiu(a0, a0, loadstore_chunk);
472 __ addiu(a1, a1, loadstore_chunk);
473 __ bne(a0, a3, &ua_wordCopy_loop);
477 __ bind(&ua_smallCopy);
478 __ beq(a2, zero_reg, &leave);
481 __ bind(&ua_smallCopy_loop);
485 __ bne(a0, a3, &ua_smallCopy_loop);
493 DCHECK(!RelocInfo::RequiresRelocation(desc));
497 return FUNCTION_CAST<MemCopyUint8Function>(buffer);
503 #if defined(USE_SIMULATOR)
509 if (buffer ==
NULL)
return &std::sqrt;
511 MacroAssembler masm(
NULL, buffer,
static_cast<int>(actual_size));
513 __ MovFromFloatParameter(
f12);
515 __ MovToFloatResult(
f0);
520 DCHECK(!RelocInfo::RequiresRelocation(desc));
524 return FUNCTION_CAST<UnaryMathFunction>(buffer);
536 DCHECK(!masm->has_frame());
537 masm->set_has_frame(
true);
543 DCHECK(masm->has_frame());
544 masm->set_has_frame(
false);
551 #define __ ACCESS_MASM(masm)
554 MacroAssembler* masm,
560 Label* allocation_memento_found) {
561 Register scratch_elements = a4;
566 __ JumpIfJSArrayHasAllocationMemento(
567 receiver, scratch_elements, allocation_memento_found);
572 __ RecordWriteField(receiver,
584 MacroAssembler* masm,
592 Label loop, entry, convert_hole, gc_required, only_change_map, done;
593 Register elements = a4;
594 Register length = a5;
596 Register array_end = array;
599 Register scratch1 = target_map;
600 Register scratch2 = t1;
601 Register scratch3 = a7;
605 elements, length, array, scratch2));
607 Register scratch = t2;
609 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
615 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
616 __ Branch(&only_change_map,
eq, at, Operand(elements));
630 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
636 __ RecordWriteField(receiver,
647 __ RecordWriteField(receiver,
658 __ Daddu(scratch1, elements,
662 __ Daddu(array_end, array_end, scratch3);
665 Register hole_lower = elements;
666 Register hole_upper = length;
676 __ bind(&only_change_map);
678 __ RecordWriteField(receiver,
689 __ bind(&gc_required);
699 __ JumpIfNotSmi(scratch2, &convert_hole);
700 __ SmiUntag(scratch2);
703 __ mtc1(scratch2,
f0);
710 __ bind(&convert_hole);
711 if (FLAG_debug_code) {
713 __ Or(scratch2, scratch2, Operand(1));
714 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
715 __ Assert(
eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
724 __ Branch(&loop,
lt, scratch3, Operand(array_end));
732 MacroAssembler* masm,
740 Label entry, loop, convert_hole, gc_required, only_change_map;
741 Register elements = a4;
743 Register length = a5;
744 Register scratch = t1;
748 elements, array, length, scratch));
750 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
756 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
757 __ Branch(&only_change_map,
eq, at, Operand(elements));
760 value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
769 Register array_size = value;
770 Register allocate_scratch = target_map;
773 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
777 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
782 Register src_elements = elements;
783 Register dst_elements = target_map;
784 Register dst_end = length;
785 Register heap_number_map = scratch;
786 __ Daddu(src_elements, src_elements,
791 __ Daddu(dst_end, dst_elements, dst_end);
792 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
803 __ bind(&gc_required);
805 value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
810 Register upper_bits = key;
818 Register heap_number = receiver;
819 Register scratch2 = value;
820 Register scratch3 = t2;
821 __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
829 __ mov(scratch2, dst_elements);
832 __ RecordWrite(array,
842 __ bind(&convert_hole);
843 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
848 __ Branch(&loop,
lt, dst_elements, Operand(dst_end));
850 __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
853 __ RecordWriteField(receiver,
863 __ bind(&only_change_map);
866 __ RecordWriteField(receiver,
881 Label* call_runtime) {
887 Label check_sequential;
889 __ Branch(&check_sequential,
eq, at, Operand(zero_reg));
894 __ Branch(&cons_string,
eq, at, Operand(zero_reg));
897 Label indirect_string_loaded;
900 __ dsra32(at, result, 0);
901 __ Daddu(index, index, at);
902 __ jmp(&indirect_string_loaded);
909 __ bind(&cons_string);
911 __ LoadRoot(at, Heap::kempty_stringRootIndex);
912 __ Branch(call_runtime,
ne, result, Operand(at));
916 __ bind(&indirect_string_loaded);
923 Label external_string, check_encoding;
924 __ bind(&check_sequential);
927 __ Branch(&external_string,
ne, at, Operand(zero_reg));
934 __ jmp(&check_encoding);
937 __ bind(&external_string);
938 if (FLAG_debug_code) {
942 __ Assert(
eq, kExternalStringExpectedButNotFound,
943 at, Operand(zero_reg));
948 __ Branch(call_runtime,
ne, at, Operand(zero_reg));
951 Label one_byte, done;
952 __ bind(&check_encoding);
955 __ Branch(&one_byte,
ne, at, Operand(zero_reg));
957 __ dsll(at, index, 1);
958 __ Daddu(at,
string, at);
963 __ Daddu(at,
string, index);
969 static MemOperand ExpConstant(
int index, Register base) {
982 DCHECK(!input.is(result));
983 DCHECK(!input.is(double_scratch1));
984 DCHECK(!input.is(double_scratch2));
985 DCHECK(!result.is(double_scratch1));
986 DCHECK(!result.is(double_scratch2));
987 DCHECK(!double_scratch1.is(double_scratch2));
991 DCHECK(ExternalReference::math_exp_constants(0).address() !=
NULL);
992 DCHECK(!masm->serializer_enabled());
994 Label
zero, infinity, done;
995 __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
997 __ ldc1(double_scratch1, ExpConstant(0, temp3));
1000 __ ldc1(double_scratch2, ExpConstant(1, temp3));
1001 __ BranchF(&infinity,
NULL,
ge, input, double_scratch2);
1003 __ ldc1(double_scratch1, ExpConstant(3, temp3));
1004 __ ldc1(result, ExpConstant(4, temp3));
1005 __ mul_d(double_scratch1, double_scratch1, input);
1006 __ add_d(double_scratch1, double_scratch1, result);
1007 __ FmoveLow(temp2, double_scratch1);
1008 __ sub_d(double_scratch1, double_scratch1, result);
1009 __ ldc1(result, ExpConstant(6, temp3));
1010 __ ldc1(double_scratch2, ExpConstant(5, temp3));
1011 __ mul_d(double_scratch1, double_scratch1, double_scratch2);
1012 __ sub_d(double_scratch1, double_scratch1, input);
1013 __ sub_d(result, result, double_scratch1);
1014 __ mul_d(double_scratch2, double_scratch1, double_scratch1);
1015 __ mul_d(result, result, double_scratch2);
1016 __ ldc1(double_scratch2, ExpConstant(7, temp3));
1017 __ mul_d(result, result, double_scratch2);
1018 __ sub_d(result, result, double_scratch1);
1020 DCHECK(*
reinterpret_cast<double*
>
1021 (ExternalReference::math_exp_constants(8).address()) == 1);
1022 __ Move(double_scratch2, 1);
1023 __ add_d(result, result, double_scratch2);
1024 __ dsrl(temp1, temp2, 11);
1025 __ Ext(temp2, temp2, 0, 11);
1026 __ Daddu(temp1, temp1, Operand(0x3ff));
1029 __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
1030 __ dsll(at, temp2, 3);
1031 __ Daddu(temp3, temp3, Operand(at));
1035 if (temp2.code() < temp3.code()) {
1036 __ dsll(at, temp1, 20);
1037 __ Or(temp1, temp3, at);
1038 __ Move(double_scratch1, temp2, temp1);
1040 __ dsll(at, temp1, 20);
1041 __ Or(temp1, temp2, at);
1042 __ Move(double_scratch1, temp3, temp1);
1044 __ mul_d(result, result, double_scratch1);
1045 __ BranchShort(&done);
1049 __ BranchShort(&done);
1052 __ ldc1(result, ExpConstant(2, temp3));
1059 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
1069 SmartPointer<CodePatcher> patcher(
1074 patcher->masm()->Push(ra,
fp,
cp, a1);
1078 patcher->masm()->Daddu(
1084 bool CodeAgingHelper::IsOld(
byte* candidate)
const {
1091 bool result = isolate->code_aging_helper()->IsYoung(sequence);
1092 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
1115 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
1117 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
1128 Operand(
reinterpret_cast<uint64_t
>(stub->instruction_start())),
1130 patcher.masm()->nop();
1131 patcher.masm()->jalr(t9, a0);
1132 patcher.masm()->nop();
1133 patcher.masm()->nop();
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static void ProtectCode(void *address, const size_t size)
static const int kInstrSize
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
const EmbeddedVector< byte, kNoCodeAgeSequenceLength > young_sequence_
static Code * GetCodeAgeStub(Isolate *isolate, Age age, MarkingParity parity)
static Code * GetCodeFromTargetAddress(Address address)
static void PatchPlatformCodeAge(Isolate *isolate, byte *sequence, Age age, MarkingParity parity)
static bool IsYoungSequence(Isolate *isolate, byte *sequence)
static void GetCodeAgeAndParity(Code *code, Age *age, MarkingParity *parity)
static const int kFirstOffset
static const int kSecondOffset
static void FlushICache(void *start, size_t size)
static void GenerateSmiToDouble(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *allocation_memento_found)
static void GenerateDoubleToObject(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static const int kResourceDataOffset
static const int kLengthOffset
static const int kHeaderSize
static const int kMapOffset
static const int kElementsOffset
static const int kInstanceTypeOffset
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
static uint32_t & uint32_at(Address addr)
static const int kHeaderSize
static const int kParentOffset
static const int kOffsetOffset
static const int kFixedFrameSizeFromFp
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
virtual void AfterCall(MacroAssembler *masm) const
virtual void BeforeCall(MacroAssembler *masm) const
const bool IsMipsSoftFloatABI
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be aligned(ARM64 only)") DEFINE_STRING(expose_gc_as
#define DCHECK(condition)
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
const uint32_t kStringEncodingMask
const int32_t kPrefHintLoadStreamed
bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
const uint32_t kTwoByteStringTag
const uint32_t kShortExternalStringTag
const int kDoubleSizeLog2
DwVfpRegister DoubleRegister
const int kPointerSizeLog2
MemOperand FieldMemOperand(Register object, int offset)
UnaryMathFunction CreateExpFunction()
const uint32_t kShortExternalStringMask
const uint32_t kStringRepresentationMask
const uint32_t kSlicedNotConsMask
const int32_t kPrefHintPrepareForStore
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
UnaryMathFunction CreateSqrtFunction()
double(* UnaryMathFunction)(double x)
static const int kNoCodeAgeSequenceLength
const uint32_t kHoleNanLower32
const uint32_t kIsIndirectStringMask
const uint32_t kHoleNanUpper32
Debugger support for the V8 JavaScript engine.