7 #if V8_TARGET_ARCH_MIPS
20 #if defined(USE_SIMULATOR)
21 byte* fast_exp_mips_machine_code =
NULL;
22 double fast_exp_simulator(
double x) {
23 return Simulator::current(Isolate::Current())->CallFP(
24 fast_exp_mips_machine_code, x, 0);
30 if (!FLAG_fast_math)
return &std::exp;
34 if (buffer ==
NULL)
return &std::exp;
35 ExternalReference::InitializeMathExpData();
37 MacroAssembler masm(
NULL, buffer,
static_cast<int>(actual_size));
48 __ MovFromFloatParameter(input);
49 __ Push(temp3, temp2, temp1);
51 &masm, input, result, double_scratch1, double_scratch2,
53 __ Pop(temp3, temp2, temp1);
54 __ MovToFloatResult(result);
60 DCHECK(!RelocInfo::RequiresRelocation(desc));
65 #if !defined(USE_SIMULATOR)
66 return FUNCTION_CAST<UnaryMathFunction>(buffer);
68 fast_exp_mips_machine_code = buffer;
69 return &fast_exp_simulator;
74 #if defined(V8_HOST_ARCH_MIPS)
75 MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
76 #if defined(USE_SIMULATOR) || defined(_MIPS_ARCH_MIPS32R6) || \
77 defined(_MIPS_ARCH_MIPS32RX)
83 if (buffer ==
NULL)
return stub;
87 MacroAssembler masm(
NULL, buffer,
static_cast<int>(actual_size));
90 Label lastb, unaligned,
aligned, chkw,
91 loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
92 leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
93 ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
102 DCHECK(pref_chunk < max_pref_size);
107 uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
116 pref_chunk * 4 >= max_pref_size);
120 __ slti(t2, a2, 2 * loadstore_chunk);
121 __ bne(t2, zero_reg, &lastb);
129 __ andi(t8, t8, loadstore_chunk - 1);
130 __ bne(t8, zero_reg, &unaligned);
131 __ subu(a3, zero_reg, a0);
133 __ andi(a3, a3, loadstore_chunk - 1);
153 __ andi(t8, a2, 0x3f);
154 __ beq(a2, t8, &chkw);
165 __ Subu(t9, t0, pref_limit);
168 __ Pref(pref_hint_load,
MemOperand(a1, 0 * pref_chunk));
169 __ Pref(pref_hint_load,
MemOperand(a1, 1 * pref_chunk));
170 __ Pref(pref_hint_load,
MemOperand(a1, 2 * pref_chunk));
171 __ Pref(pref_hint_load,
MemOperand(a1, 3 * pref_chunk));
174 __ Pref(pref_hint_store,
MemOperand(a0, 1 * pref_chunk));
175 __ Pref(pref_hint_store,
MemOperand(a0, 2 * pref_chunk));
176 __ Pref(pref_hint_store,
MemOperand(a0, 3 * pref_chunk));
187 __ Pref(pref_hint_store,
MemOperand(a0, 4 * pref_chunk));
188 __ Pref(pref_hint_store,
MemOperand(a0, 5 * pref_chunk));
197 __ Pref(pref_hint_load,
MemOperand(a1, 4 * pref_chunk));
216 __ Pref(pref_hint_load,
MemOperand(a1, 5 * pref_chunk));
226 __ addiu(a0, a0, 16 * loadstore_chunk);
227 __ bne(a0, a3, &loop16w);
228 __ addiu(a1, a1, 16 * loadstore_chunk);
235 __ Pref(pref_hint_load,
MemOperand(a1, 0 * pref_chunk));
236 __ andi(t8, a2, 0x1f);
237 __ beq(a2, t8, &chk1w);
247 __ addiu(a1, a1, 8 * loadstore_chunk);
256 __ addiu(a0, a0, 8 * loadstore_chunk);
264 __ andi(a2, t8, loadstore_chunk - 1);
265 __ beq(a2, t8, &lastb);
269 __ bind(&wordCopy_loop);
271 __ addiu(a0, a0, loadstore_chunk);
272 __ addiu(a1, a1, loadstore_chunk);
273 __ bne(a0, a3, &wordCopy_loop);
277 __ Branch(&leave,
le, a2, Operand(zero_reg));
284 __ bne(a0, a3, &lastbloop);
295 __ andi(a3, a3, loadstore_chunk - 1);
296 __ beq(a3, zero_reg, &ua_chk16w);
320 __ andi(t8, a2, 0x3f);
321 __ beq(a2, t8, &ua_chkw);
327 __ Subu(t9, t0, pref_limit);
330 __ Pref(pref_hint_load,
MemOperand(a1, 0 * pref_chunk));
331 __ Pref(pref_hint_load,
MemOperand(a1, 1 * pref_chunk));
332 __ Pref(pref_hint_load,
MemOperand(a1, 2 * pref_chunk));
335 __ Pref(pref_hint_store,
MemOperand(a0, 1 * pref_chunk));
336 __ Pref(pref_hint_store,
MemOperand(a0, 2 * pref_chunk));
337 __ Pref(pref_hint_store,
MemOperand(a0, 3 * pref_chunk));
340 __ bind(&ua_loop16w);
341 __ Pref(pref_hint_load,
MemOperand(a1, 3 * pref_chunk));
353 __ Pref(pref_hint_store,
MemOperand(a0, 4 * pref_chunk));
354 __ Pref(pref_hint_store,
MemOperand(a0, 5 * pref_chunk));
356 __ bind(&ua_skip_pref);
388 __ Pref(pref_hint_store,
MemOperand(a0, 4 * pref_chunk));
389 __ Pref(pref_hint_store,
MemOperand(a0, 5 * pref_chunk));
391 __ bind(&ua_skip_pref);
413 __ Pref(pref_hint_load,
MemOperand(a1, 4 * pref_chunk));
473 __ Pref(pref_hint_load,
MemOperand(a1, 5 * pref_chunk));
482 __ addiu(a0, a0, 16 * loadstore_chunk);
483 __ bne(a0, a3, &ua_loop16w);
484 __ addiu(a1, a1, 16 * loadstore_chunk);
492 __ andi(t8, a2, 0x1f);
494 __ beq(a2, t8, &ua_chk1w);
547 __ addiu(a1, a1, 8 * loadstore_chunk);
556 __ addiu(a0, a0, 8 * loadstore_chunk);
561 __ andi(a2, t8, loadstore_chunk - 1);
562 __ beq(a2, t8, &ua_smallCopy);
566 __ bind(&ua_wordCopy_loop);
576 __ addiu(a0, a0, loadstore_chunk);
577 __ addiu(a1, a1, loadstore_chunk);
578 __ bne(a0, a3, &ua_wordCopy_loop);
582 __ bind(&ua_smallCopy);
583 __ beq(a2, zero_reg, &leave);
586 __ bind(&ua_smallCopy_loop);
590 __ bne(a0, a3, &ua_smallCopy_loop);
598 DCHECK(!RelocInfo::RequiresRelocation(desc));
602 return FUNCTION_CAST<MemCopyUint8Function>(buffer);
608 #if defined(USE_SIMULATOR)
614 if (buffer ==
NULL)
return &std::sqrt;
616 MacroAssembler masm(
NULL, buffer,
static_cast<int>(actual_size));
618 __ MovFromFloatParameter(
f12);
620 __ MovToFloatResult(
f0);
625 DCHECK(!RelocInfo::RequiresRelocation(desc));
629 return FUNCTION_CAST<UnaryMathFunction>(buffer);
641 DCHECK(!masm->has_frame());
642 masm->set_has_frame(
true);
648 DCHECK(masm->has_frame());
649 masm->set_has_frame(
false);
656 #define __ ACCESS_MASM(masm)
659 MacroAssembler* masm,
665 Label* allocation_memento_found) {
666 Register scratch_elements = t0;
672 __ JumpIfJSArrayHasAllocationMemento(
673 receiver, scratch_elements, allocation_memento_found);
678 __ RecordWriteField(receiver,
690 MacroAssembler* masm,
698 Label loop, entry, convert_hole, gc_required, only_change_map, done;
699 Register elements = t0;
700 Register length = t1;
702 Register array_end = array;
705 Register scratch1 = target_map;
706 Register scratch2 = t5;
707 Register scratch3 = t3;
711 elements, length, array, scratch2));
713 Register scratch = t6;
716 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
722 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
723 __ Branch(&only_change_map,
eq, at, Operand(elements));
731 __ sll(scratch, length, 2);
737 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
743 __ RecordWriteField(receiver,
754 __ RecordWriteField(receiver,
765 __ Addu(scratch1, elements,
768 __ sll(at, length, 2);
769 __ Addu(array_end, scratch3, at);
772 Register hole_lower = elements;
773 Register hole_upper = length;
784 __ bind(&only_change_map);
786 __ RecordWriteField(receiver,
797 __ bind(&gc_required);
807 __ UntagAndJumpIfNotSmi(scratch2, scratch2, &convert_hole);
810 __ mtc1(scratch2,
f0);
817 __ bind(&convert_hole);
818 if (FLAG_debug_code) {
821 __ Or(scratch2, scratch2, Operand(1));
822 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
823 __ Assert(
eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
826 __ sw(hole_lower,
MemOperand(scratch3, Register::kMantissaOffset));
828 __ sw(hole_upper,
MemOperand(scratch3, Register::kExponentOffset));
832 __ Branch(&loop,
lt, scratch3, Operand(array_end));
840 MacroAssembler* masm,
848 Label entry, loop, convert_hole, gc_required, only_change_map;
849 Register elements = t0;
851 Register length = t1;
852 Register scratch = t5;
856 elements, array, length, scratch));
859 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
865 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
866 __ Branch(&only_change_map,
eq, at, Operand(elements));
869 value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
878 Register array_size = value;
879 Register allocate_scratch = target_map;
880 __ sll(array_size, length, 1);
882 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
886 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
891 Register src_elements = elements;
892 Register dst_elements = target_map;
893 Register dst_end = length;
894 Register heap_number_map = scratch;
895 __ Addu(src_elements, src_elements, Operand(
897 + Register::kExponentOffset));
900 __ sll(dst_end, dst_end, 1);
901 __ Addu(dst_end, dst_elements, dst_end);
902 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
913 __ bind(&gc_required);
915 value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
920 Register upper_bits = key;
928 Register heap_number = receiver;
929 Register scratch2 = value;
930 Register scratch3 = t6;
931 __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
936 __ lw(scratch2,
MemOperand(src_elements, (Register::kMantissaOffset
940 __ mov(scratch2, dst_elements);
942 __ Addu(dst_elements, dst_elements,
kIntSize);
943 __ RecordWrite(array,
953 __ bind(&convert_hole);
954 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
956 __ Addu(dst_elements, dst_elements,
kIntSize);
959 __ Branch(&loop,
lt, dst_elements, Operand(dst_end));
961 __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
964 __ RecordWriteField(receiver,
974 __ bind(&only_change_map);
977 __ RecordWriteField(receiver,
992 Label* call_runtime) {
998 Label check_sequential;
1000 __ Branch(&check_sequential,
eq, at, Operand(zero_reg));
1005 __ Branch(&cons_string,
eq, at, Operand(zero_reg));
1008 Label indirect_string_loaded;
1012 __ Addu(index, index, at);
1013 __ jmp(&indirect_string_loaded);
1020 __ bind(&cons_string);
1022 __ LoadRoot(at, Heap::kempty_stringRootIndex);
1023 __ Branch(call_runtime,
ne, result, Operand(at));
1027 __ bind(&indirect_string_loaded);
1034 Label external_string, check_encoding;
1035 __ bind(&check_sequential);
1038 __ Branch(&external_string,
ne, at, Operand(zero_reg));
1045 __ jmp(&check_encoding);
1048 __ bind(&external_string);
1049 if (FLAG_debug_code) {
1053 __ Assert(
eq, kExternalStringExpectedButNotFound,
1054 at, Operand(zero_reg));
1059 __ Branch(call_runtime,
ne, at, Operand(zero_reg));
1062 Label one_byte, done;
1063 __ bind(&check_encoding);
1066 __ Branch(&one_byte,
ne, at, Operand(zero_reg));
1068 __ sll(at, index, 1);
1069 __ Addu(at,
string, at);
1074 __ Addu(at,
string, index);
1080 static MemOperand ExpConstant(
int index, Register base) {
1093 DCHECK(!input.is(result));
1094 DCHECK(!input.is(double_scratch1));
1095 DCHECK(!input.is(double_scratch2));
1096 DCHECK(!result.is(double_scratch1));
1097 DCHECK(!result.is(double_scratch2));
1098 DCHECK(!double_scratch1.is(double_scratch2));
1099 DCHECK(!temp1.is(temp2));
1100 DCHECK(!temp1.is(temp3));
1101 DCHECK(!temp2.is(temp3));
1102 DCHECK(ExternalReference::math_exp_constants(0).address() !=
NULL);
1103 DCHECK(!masm->serializer_enabled());
1105 Label
zero, infinity, done;
1107 __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
1109 __ ldc1(double_scratch1, ExpConstant(0, temp3));
1112 __ ldc1(double_scratch2, ExpConstant(1, temp3));
1113 __ BranchF(&infinity,
NULL,
ge, input, double_scratch2);
1115 __ ldc1(double_scratch1, ExpConstant(3, temp3));
1116 __ ldc1(result, ExpConstant(4, temp3));
1117 __ mul_d(double_scratch1, double_scratch1, input);
1118 __ add_d(double_scratch1, double_scratch1, result);
1119 __ FmoveLow(temp2, double_scratch1);
1120 __ sub_d(double_scratch1, double_scratch1, result);
1121 __ ldc1(result, ExpConstant(6, temp3));
1122 __ ldc1(double_scratch2, ExpConstant(5, temp3));
1123 __ mul_d(double_scratch1, double_scratch1, double_scratch2);
1124 __ sub_d(double_scratch1, double_scratch1, input);
1125 __ sub_d(result, result, double_scratch1);
1126 __ mul_d(double_scratch2, double_scratch1, double_scratch1);
1127 __ mul_d(result, result, double_scratch2);
1128 __ ldc1(double_scratch2, ExpConstant(7, temp3));
1129 __ mul_d(result, result, double_scratch2);
1130 __ sub_d(result, result, double_scratch1);
1132 DCHECK(*
reinterpret_cast<double*
>
1133 (ExternalReference::math_exp_constants(8).address()) == 1);
1134 __ Move(double_scratch2, 1);
1135 __ add_d(result, result, double_scratch2);
1136 __ srl(temp1, temp2, 11);
1137 __ Ext(temp2, temp2, 0, 11);
1138 __ Addu(temp1, temp1, Operand(0x3ff));
1141 __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
1142 __ sll(at, temp2, 3);
1143 __ Addu(temp3, temp3, Operand(at));
1144 __ lw(temp2,
MemOperand(temp3, Register::kMantissaOffset));
1145 __ lw(temp3,
MemOperand(temp3, Register::kExponentOffset));
1147 if (temp2.code() < temp3.code()) {
1148 __ sll(at, temp1, 20);
1149 __ Or(temp1, temp3, at);
1150 __ Move(double_scratch1, temp2, temp1);
1152 __ sll(at, temp1, 20);
1153 __ Or(temp1, temp2, at);
1154 __ Move(double_scratch1, temp3, temp1);
1156 __ mul_d(result, result, double_scratch1);
1157 __ BranchShort(&done);
1161 __ BranchShort(&done);
1164 __ ldc1(result, ExpConstant(2, temp3));
1171 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
1181 SmartPointer<CodePatcher> patcher(
1186 patcher->masm()->Push(ra,
fp,
cp, a1);
1188 patcher->masm()->Addu(
1194 bool CodeAgingHelper::IsOld(
byte* candidate)
const {
1201 bool result = isolate->code_aging_helper()->IsYoung(sequence);
1202 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
1225 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
1227 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
1238 Operand(
reinterpret_cast<uint32_t>(stub->instruction_start())),
1240 patcher.masm()->nop();
1241 patcher.masm()->jalr(t9, a0);
1242 patcher.masm()->nop();
1243 patcher.masm()->nop();
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static void ProtectCode(void *address, const size_t size)
static const int kInstrSize
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
const EmbeddedVector< byte, kNoCodeAgeSequenceLength > young_sequence_
static Code * GetCodeAgeStub(Isolate *isolate, Age age, MarkingParity parity)
static Code * GetCodeFromTargetAddress(Address address)
static void PatchPlatformCodeAge(Isolate *isolate, byte *sequence, Age age, MarkingParity parity)
static bool IsYoungSequence(Isolate *isolate, byte *sequence)
static void GetCodeAgeAndParity(Code *code, Age *age, MarkingParity *parity)
static const int kFirstOffset
static const int kSecondOffset
static void FlushICache(void *start, size_t size)
static void GenerateSmiToDouble(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *allocation_memento_found)
static void GenerateDoubleToObject(MacroAssembler *masm, Register receiver, Register key, Register value, Register target_map, AllocationSiteMode mode, Label *fail)
static const int kResourceDataOffset
static const int kLengthOffset
static const int kHeaderSize
static const int kMapOffset
static const int kElementsOffset
static const int kInstanceTypeOffset
static void EmitMathExp(MacroAssembler *masm, DwVfpRegister input, DwVfpRegister result, DwVfpRegister double_scratch1, DwVfpRegister double_scratch2, Register temp1, Register temp2, Register temp3)
static uint32_t & uint32_at(Address addr)
static const int kHeaderSize
static const int kParentOffset
static const int kOffsetOffset
static const int kFixedFrameSizeFromFp
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
virtual void AfterCall(MacroAssembler *masm) const
virtual void BeforeCall(MacroAssembler *masm) const
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be aligned(ARM64 only)") DEFINE_STRING(expose_gc_as
#define DCHECK(condition)
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
const uint32_t kStringEncodingMask
const int32_t kPrefHintLoadStreamed
bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
const uint32_t kTwoByteStringTag
const uint32_t kShortExternalStringTag
DwVfpRegister DoubleRegister
MemOperand FieldMemOperand(Register object, int offset)
UnaryMathFunction CreateExpFunction()
const uint32_t kShortExternalStringMask
const uint32_t kStringRepresentationMask
const uint32_t kSlicedNotConsMask
const int32_t kPrefHintPrepareForStore
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
UnaryMathFunction CreateSqrtFunction()
double(* UnaryMathFunction)(double x)
static const int kNoCodeAgeSequenceLength
const uint32_t kHoleNanLower32
const uint32_t kIsIndirectStringMask
const uint32_t kHoleNanUpper32
Debugger support for the V8 JavaScript engine.