5 #ifndef V8_ARM64_ASSEMBLER_ARM64_INL_H_
6 #define V8_ARM64_ASSEMBLER_ARM64_INL_H_
20 void RelocInfo::apply(intptr_t delta,
ICacheFlushMode icache_flush_mode) {
25 void RelocInfo::set_target_address(
Address target,
34 host(),
this, HeapObject::cast(target_code));
197 list_ |= (1UL << code);
204 list_ &= ~(1UL << code);
281 static const bool kIsIntType =
false;
286 return reinterpret_cast<int64_t
>(t);
298 return reinterpret_cast<int64_t
>(t.address());
334 : immediate_(t, rmode),
343 shift_amount_(shift_amount) {
355 shift_amount_(shift_amount) {
455 : base_(NoReg), regoffset_(NoReg), offset_(0), addrmode_(
Offset),
461 : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
470 unsigned shift_amount)
471 : base_(base), regoffset_(regoffset), offset_(0), addrmode_(
Offset),
472 shift_(
NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
485 unsigned shift_amount)
486 : base_(base), regoffset_(regoffset), offset_(0), addrmode_(
Offset),
495 : base_(base), addrmode_(addrmode) {
498 if (
offset.IsImmediate()) {
502 }
else if (
offset.IsShiftedRegister()) {
635 Instruction* instr =
reinterpret_cast<Instruction*
>(
pc);
636 if (instr->IsMovz()) {
638 DCHECK(instr->following(1)->IsMovk());
639 DCHECK(instr->following(2)->IsMovk());
640 DCHECK(instr->following(3)->IsBranchAndLinkToRegister());
644 DCHECK(instr->IsLdrLiteralX());
645 DCHECK(instr->following(1)->IsBranchAndLinkToRegister());
658 ConstantPoolArray* constant_pool,
682 int RelocInfo::target_address_size() {
687 Address RelocInfo::target_address() {
693 Address RelocInfo::target_address_address() {
701 Address RelocInfo::constant_pool_entry_address() {
707 Object* RelocInfo::target_object() {
713 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
715 return Handle<Object>(
reinterpret_cast<Object**
>(
720 void RelocInfo::set_target_object(
Object* target,
725 reinterpret_cast<Address>(target),
729 target->IsHeapObject()) {
736 Address RelocInfo::target_reference() {
742 Address RelocInfo::target_runtime_entry(Assembler* origin) {
744 return target_address();
748 void RelocInfo::set_target_runtime_entry(
Address target,
752 if (target_address() != target) {
753 set_target_address(target, write_barrier_mode, icache_flush_mode);
758 Handle<Cell> RelocInfo::target_cell_handle() {
760 Cell *null_cell =
NULL;
761 return Handle<Cell>(null_cell);
765 Cell* RelocInfo::target_cell() {
771 void RelocInfo::set_target_cell(Cell* cell,
788 Code* RelocInfo::code_age_stub() {
796 void RelocInfo::set_code_age_stub(Code* stub,
807 Address RelocInfo::call_address() {
816 void RelocInfo::set_call_address(
Address target) {
823 host(),
this, HeapObject::cast(target_code));
828 void RelocInfo::WipeOut() {
837 bool RelocInfo::IsPatchedReturnSequence() {
842 Instruction* i1 =
reinterpret_cast<Instruction*
>(
pc_);
843 Instruction* i2 = i1->following();
844 return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
845 i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code());
849 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
850 Instruction* current_instr =
reinterpret_cast<Instruction*
>(
pc_);
858 visitor->VisitEmbeddedPointer(
this);
860 visitor->VisitCodeTarget(
this);
862 visitor->VisitCell(
this);
864 visitor->VisitExternalReference(
this);
866 IsPatchedReturnSequence()) ||
868 IsPatchedDebugBreakSlotSequence())) &&
869 isolate->debug()->has_break_points()) {
870 visitor->VisitDebugTarget(
this);
872 visitor->VisitRuntimeEntry(
this);
877 template<
typename StaticVisitor>
881 StaticVisitor::VisitEmbeddedPointer(heap,
this);
883 StaticVisitor::VisitCodeTarget(heap,
this);
885 StaticVisitor::VisitCell(heap,
this);
887 StaticVisitor::VisitExternalReference(
this);
888 }
else if (heap->isolate()->debug()->has_break_points() &&
890 IsPatchedReturnSequence()) ||
892 IsPatchedDebugBreakSlotSequence()))) {
893 StaticVisitor::VisitDebugTarget(heap,
this);
895 StaticVisitor::VisitRuntimeEntry(
this);
903 return rt.
Is64Bits() ? LDR_x : LDR_w;
906 return rt.
Is64Bits() ? LDR_d : LDR_s;
916 return rt.
Is64Bits() ? LDP_x : LDP_w;
919 return rt.
Is64Bits() ? LDP_d : LDP_s;
927 return rt.
Is64Bits() ? STR_x : STR_w;
930 return rt.
Is64Bits() ? STR_d : STR_s;
940 return rt.
Is64Bits() ? STP_x : STP_w;
943 return rt.
Is64Bits() ? STP_d : STP_s;
994 return 1 << FlagsUpdate_offset;
996 return 0 << FlagsUpdate_offset;
1004 return cond << Condition_offset;
1009 CHECK(is_int21(imm21));
1010 Instr imm =
static_cast<Instr>(truncate_to_int21(imm21));
1011 Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
1012 Instr immlo = imm << ImmPCRelLo_offset;
1013 return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
1018 CHECK(is_int26(imm26));
1019 return truncate_to_int26(imm26) << ImmUncondBranch_offset;
1024 CHECK(is_int19(imm19));
1025 return truncate_to_int19(imm19) << ImmCondBranch_offset;
1030 CHECK(is_int19(imm19));
1031 return truncate_to_int19(imm19) << ImmCmpBranch_offset;
1036 CHECK(is_int14(imm14));
1037 return truncate_to_int14(imm14) << ImmTestBranch_offset;
1042 DCHECK(is_uint6(bit_pos));
1044 unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
1045 unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
1046 b5 &= ImmTestBranchBit5_mask;
1047 b40 &= ImmTestBranchBit40_mask;
1059 if (is_uint12(imm)) {
1060 return imm << ImmAddSub_offset;
1062 return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
1071 return imms << ImmS_offset;
1080 return immr << ImmR_offset;
1089 return imms << ImmSetBits_offset;
1098 return immr << ImmRotate_offset;
1103 CHECK(is_int19(imm19));
1104 return truncate_to_int19(imm19) << ImmLLiteral_offset;
1112 return bitn << BitN_offset;
1118 return shift << ShiftDP_offset;
1123 DCHECK(is_uint6(amount));
1124 return amount << ImmDPShift_offset;
1129 return extend << ExtendMode_offset;
1135 return left_shift << ImmExtendShift_offset;
1141 return imm << ImmCondCmp_offset;
1146 return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
1151 DCHECK(is_uint12(imm12));
1152 return imm12 << ImmLSUnsigned_offset;
1158 return truncate_to_int9(imm9) << ImmLS_offset;
1164 int scaled_imm7 = imm7 >>
size;
1165 DCHECK(is_int7(scaled_imm7));
1166 return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
1171 DCHECK(is_uint1(shift_amount));
1172 return shift_amount << ImmShiftLS_offset;
1177 DCHECK(is_uint16(imm16));
1178 return imm16 << ImmException_offset;
1183 DCHECK(is_uint15(imm15));
1184 return imm15 << ImmSystemRegister_offset;
1190 return imm7 << ImmHint_offset;
1196 return imm2 << ImmBarrierDomain_offset;
1202 return imm2 << ImmBarrierType_offset;
1208 return static_cast<LSDataSize>(op >> SizeLS_offset);
1214 return imm << ImmMoveWide_offset;
1220 return shift << ShiftMoveWide_offset;
1231 return scale << FPScale_offset;
static Instr Flags(FlagsUpdate S)
static Instr SF(Register rd)
static Instr ImmR(unsigned immr, unsigned reg_size)
static Instr ImmTestBranch(int imm14)
int next_constant_pool_check_
static Instr ImmCondCmp(unsigned imm)
static Instr ImmMoveWide(uint64_t imm)
int next_veneer_pool_check_
static Instr ImmCmpBranch(int imm19)
static Instr Nzcv(StatusFlags nzcv)
static Instr ImmLSPair(int imm7, LSDataSize size)
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
static Instr ImmPCRelAddress(int imm21)
static Instr ImmException(int imm16)
static LoadStoreOp StoreOpFor(const CPURegister &rt)
TypeFeedbackId RecordedAstId()
static Instr Cond(Condition cond)
static Instr ImmExtendShift(unsigned left_shift)
void ClearRecordedAstId()
TypeFeedbackId recorded_ast_id_
static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(const CPURegister &rt, const CPURegister &rt2)
void CheckConstPool(bool force_emit, bool require_jump)
static LoadLiteralOp LoadLiteralOpFor(const CPURegister &rt)
static Instr ShiftMoveWide(int64_t shift)
static Instr ImmLLiteral(int imm19)
static LoadStorePairOp LoadPairOpFor(const CPURegister &rt, const CPURegister &rt2)
static const int kStartOfLabelLinkChain
static Instr ImmShiftLS(unsigned shift_amount)
static const int kPatchDebugBreakSlotReturnOffset
static Instr ImmSystemRegister(int imm15)
static Instr ImmLS(int imm9)
static Address break_address_from_return_address(Address pc)
static Instr ImmSetBits(unsigned imms, unsigned reg_size)
static bool IsImmAddSub(int64_t immediate)
static Instr ImmDPShift(unsigned amount)
void shift(Register dst, Immediate shift_amount, int subcode, int size)
static Instr FPType(FPRegister fd)
static Instr ImmBarrierType(int imm2)
void Emit(Instr instruction)
static const int kCallSizeWithoutRelocation
static Instr ExtendMode(Extend extend)
int LinkAndGetByteOffsetTo(Label *label)
static Instr ImmS(unsigned imms, unsigned reg_size)
static Instr ImmRotate(unsigned immr, unsigned reg_size)
static Instr ShiftDP(Shift shift)
static LSDataSize CalcLSDataSize(LoadStoreOp op)
static Address target_pointer_address_at(Address pc)
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
static Address target_address_from_return_address(Address pc)
static Instr ImmCondBranch(int imm19)
static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(const CPURegister &rt, const CPURegister &rt2)
static Instr BitN(unsigned bitn, unsigned reg_size)
static Address return_address_from_call_start(Address pc)
static Instr ImmHint(int imm7)
static LoadStorePairOp StorePairOpFor(const CPURegister &rt, const CPURegister &rt2)
int LinkAndGetInstructionOffsetTo(Label *label)
static Instr Rn(CPURegister rn)
void CheckVeneerPool(bool force_emit, bool require_jump, int margin=kVeneerDistanceMargin)
STATIC_ASSERT(kPointerSize==kInt64Size||kPointerSize==kInt32Size)
static LoadStoreOp LoadOpFor(const CPURegister &rt)
static Instr FPScale(unsigned scale)
static void deserialization_set_special_target_at(Address constant_pool_entry, Code *code, Address target)
static Instr ImmUncondBranch(int imm26)
static Instr ImmAddSub(int64_t imm)
static Instr ImmLSUnsigned(int imm12)
static const int kCallSizeWithRelocation
void debug(const char *message, uint32_t code, Instr params=BREAK)
static Instr ImmTestBranchBit(unsigned bit_pos)
static Instr ImmBarrierDomain(int imm2)
const Register & AppropriateZeroRegFor(const CPURegister ®) const
void Remove(const CPURegList &other)
CPURegister::RegisterType type() const
unsigned RegisterSizeInBits() const
CPURegister::RegisterType type_
void Combine(const CPURegList &other)
static Cell * FromValueAddress(Address value)
ConstantPoolArray * constant_pool()
static Code * GetCodeFromTargetAddress(Address address)
static bool IsYoungSequence(Isolate *isolate, byte *sequence)
static bool SupportsCrankshaft()
IncrementalMarking * incremental_marking()
Instruction * ImmPCOffsetTarget()
bool IsLdrLiteralX() const
const Register & regoffset() const
Operand OffsetAsOperand() const
bool IsImmediateOffset() const
AddrMode addrmode() const
const Register & base() const
bool IsRegisterOffset() const
unsigned shift_amount() const
static Object *& Object_at(Address addr)
static Address & Address_at(Address addr)
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
bool IsExtendedRegister() const
int64_t ImmediateValue() const
unsigned shift_amount() const
static Operand UntagSmiAndScale(Register smi, int scale)
static Operand UntagSmi(Register smi)
Operand ToExtendedRegister() const
bool IsShiftedRegister() const
Immediate immediate() const
static bool IsDebugBreakSlot(Mode mode)
static bool IsJSReturn(Mode mode)
static bool IsEmbeddedObject(Mode mode)
static bool IsRuntimeEntry(Mode mode)
static bool IsCodeTarget(Mode mode)
static bool IsExternalReference(Mode mode)
static TypeFeedbackId None()
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
enable harmony numeric enable harmony object literal extensions Optimize object Array shift
#define DCHECK(condition)
bool AreSameSizeAndType(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoCPUReg, const CPURegister ®4=NoCPUReg, const CPURegister ®5=NoCPUReg, const CPURegister ®6=NoCPUReg, const CPURegister ®7=NoCPUReg, const CPURegister ®8=NoCPUReg)
const unsigned kDRegSizeInBits
const unsigned kXRegSizeInBits
LoadStorePairNonTemporalOp
const unsigned kSPRegInternalCode
const unsigned kWRegSizeInBits
kSerializedDataOffset Object
const unsigned kSRegSizeInBits
const unsigned kNumberOfFPRegisters
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const unsigned kZeroRegCode
static const int kCodeAgeStubEntryOffset
const unsigned kNumberOfRegisters
static const int kNoCodeAgeSequenceLength
bool IsAligned(T value, U alignment)
const unsigned kInstructionSize
const unsigned kInstructionSizeLog2
Debugger support for the V8 JavaScript engine.
bool Aliases(const CPURegister &other) const
RegisterType type() const
bool IsValidFPRegister() const
bool Is(const CPURegister &other) const
static CPURegister Create(unsigned code, unsigned size, RegisterType type)
bool IsFPRegister() const
unsigned SizeInBits() const
bool IsValidOrNone() const
bool IsValidRegister() const
bool IsSameSizeAndType(const CPURegister &other) const
static FPRegister SRegFromCode(unsigned code)
static FPRegister Create(unsigned code, unsigned size)
static FPRegister DRegFromCode(unsigned code)
static Register WRegFromCode(unsigned code)
static Register XRegFromCode(unsigned code)
static Register Create(unsigned code, unsigned size)
#define T(name, string, precedence)