39 #if V8_TARGET_ARCH_ARM
54 static unsigned CpuFeaturesImpliedByCompiler() {
56 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
57 if (FLAG_enable_armv7) answer |= 1u <<
ARMv7;
59 #ifdef CAN_USE_VFP3_INSTRUCTIONS
60 if (FLAG_enable_vfp3) answer |= 1u <<
VFP3 | 1u <<
ARMv7;
62 #ifdef CAN_USE_VFP32DREGS
63 if (FLAG_enable_32dregs) answer |= 1u <<
VFP32DREGS;
66 if (FLAG_enable_neon) answer |= 1u <<
NEON;
68 if ((answer & (1u <<
ARMv7)) && FLAG_enable_unaligned_accesses) {
81 if (cross_compile)
return;
85 if (FLAG_enable_armv7) {
99 if (FLAG_enable_vfp3 && cpu.has_vfp3()) {
108 if (FLAG_enable_mls && cpu.has_thumb2())
supported_ |= 1u <<
MLS;
110 if (cpu.architecture() >= 7) {
114 if (FLAG_enable_movw_movt && cpu.implementer() == base::CPU::QUALCOMM) {
120 if (cpu.implementer() == base::CPU::ARM &&
121 (cpu.part() == base::CPU::ARM_CORTEX_A5 ||
122 cpu.part() == base::CPU::ARM_CORTEX_A9)) {
134 const char* arm_arch =
NULL;
135 const char* arm_target_type =
"";
136 const char* arm_no_probe =
"";
137 const char* arm_fpu =
"";
138 const char* arm_thumb =
"";
139 const char* arm_float_abi =
NULL;
142 arm_target_type =
" simulator";
145 #if defined ARM_TEST_NO_FEATURE_PROBE
146 arm_no_probe =
" noprobe";
149 #if defined CAN_USE_ARMV7_INSTRUCTIONS
155 #if defined CAN_USE_NEON
157 #elif defined CAN_USE_VFP3_INSTRUCTIONS
158 # if defined CAN_USE_VFP32DREGS
161 arm_fpu =
" vfp3-d16";
169 #elif USE_EABI_HARDFLOAT
170 arm_float_abi =
"hard";
172 arm_float_abi =
"softfp";
175 #if defined __arm__ && (defined __thumb__) || (defined __thumb2__)
176 arm_thumb =
" thumb";
179 printf(
"target%s%s %s%s%s %s\n",
180 arm_target_type, arm_no_probe, arm_arch, arm_fpu, arm_thumb,
187 "ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
188 "MOVW_MOVT_IMMEDIATE_LOADS=%d",
198 #elif USE_EABI_HARDFLOAT
199 bool eabi_hardfloat =
true;
201 bool eabi_hardfloat =
false;
203 printf(
" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
230 return FLAG_enable_ool_constant_pool;
235 return Assembler::is_constant_pool_load(
pc_);
242 Instr* instr =
reinterpret_cast<Instr*
>(instructions);
243 for (
int i = 0;
i < instruction_count;
i++) {
244 *(
pc +
i) = *(instr +
i);
269 if (obj->IsHeapObject()) {
270 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
271 imm32_ =
reinterpret_cast<intptr_t
>(
handle.location());
275 imm32_ =
reinterpret_cast<intptr_t
>(obj);
282 DCHECK(is_uint5(shift_imm));
286 shift_op_ = shift_op;
287 shift_imm_ = shift_imm & 31;
289 if ((shift_op ==
ROR) && (shift_imm == 0)) {
293 }
else if (shift_op ==
RRX) {
306 shift_op_ = shift_op;
330 DCHECK(is_uint5(shift_imm));
333 shift_op_ = shift_op;
334 shift_imm_ = shift_imm & 31;
339 NeonMemOperand::NeonMemOperand(Register rn,
AddrMode am,
int align) {
347 NeonMemOperand::NeonMemOperand(Register rn, Register rm,
int align) {
354 void NeonMemOperand::SetAlignment(
int align) {
376 NeonListOperand::NeonListOperand(
DoubleRegister base,
int registers_count) {
378 switch (registers_count) {
426 const Instr kBlxRegMask =
428 const Instr kBlxRegPattern =
432 const Instr kMovMvnPattern = 0xd *
B21;
434 const Instr kMovLeaveCCMask = 0xdff *
B16;
435 const Instr kMovLeaveCCPattern = 0x1a0 *
B16;
436 const Instr kMovwPattern = 0x30 *
B20;
437 const Instr kMovtPattern = 0x34 *
B20;
438 const Instr kMovwLeaveCCFlip = 0x5 *
B21;
439 const Instr kMovImmedMask = 0x7f *
B21;
440 const Instr kMovImmedPattern = 0x1d *
B21;
441 const Instr kOrrImmedMask = 0x7f *
B21;
442 const Instr kOrrImmedPattern = 0x1c *
B21;
444 const Instr kCmpCmnPattern = 0x15 *
B20;
446 const Instr kAddSubFlip = 0x6 *
B21;
447 const Instr kAndBicFlip = 0xe *
B21;
450 const Instr kLdrRegFpOffsetPattern =
452 const Instr kStrRegFpOffsetPattern =
454 const Instr kLdrRegFpNegOffsetPattern =
456 const Instr kStrRegFpNegOffsetPattern =
458 const Instr kLdrStrInstrTypeMask = 0xffff0000;
462 : AssemblerBase(isolate, buffer, buffer_size),
463 recorded_ast_id_(TypeFeedbackId::
None()),
464 constant_pool_builder_(),
465 positions_recorder_(this) {
466 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
467 num_pending_32_bit_reloc_info_ = 0;
468 num_pending_64_bit_reloc_info_ = 0;
469 next_buffer_check_ = 0;
470 const_pool_blocked_nesting_ = 0;
471 no_const_pool_before_ = 0;
472 first_const_pool_32_use_ = -1;
473 first_const_pool_64_use_ = -1;
475 constant_pool_available_ = !FLAG_enable_ool_constant_pool;
476 ClearRecordedAstId();
480 Assembler::~Assembler() {
481 DCHECK(const_pool_blocked_nesting_ == 0);
485 void Assembler::GetCode(CodeDesc* desc) {
486 if (!FLAG_enable_ool_constant_pool) {
488 CheckConstPool(
true,
false);
489 DCHECK(num_pending_32_bit_reloc_info_ == 0);
490 DCHECK(num_pending_64_bit_reloc_info_ == 0);
493 desc->buffer = buffer_;
494 desc->buffer_size = buffer_size_;
495 desc->instr_size = pc_offset();
496 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
503 while ((pc_offset() & (m - 1)) != 0) {
509 void Assembler::CodeTargetAlign() {
516 return Instruction::ConditionField(instr);
525 int Assembler::GetBranchOffset(
Instr instr) {
533 bool Assembler::IsLdrRegisterImmediate(
Instr instr) {
538 bool Assembler::IsVldrDRegisterImmediate(
Instr instr) {
539 return (instr & (15 *
B24 | 3 *
B20 | 15 *
B8)) == (13 *
B24 |
B20 | 11 *
B8);
543 int Assembler::GetLdrRegisterImmediateOffset(
Instr instr) {
544 DCHECK(IsLdrRegisterImmediate(instr));
551 int Assembler::GetVldrDRegisterImmediateOffset(
Instr instr) {
552 DCHECK(IsVldrDRegisterImmediate(instr));
560 Instr Assembler::SetLdrRegisterImmediateOffset(
Instr instr,
int offset) {
561 DCHECK(IsLdrRegisterImmediate(instr));
564 DCHECK(is_uint12(offset));
572 Instr Assembler::SetVldrDRegisterImmediateOffset(
Instr instr,
int offset) {
573 DCHECK(IsVldrDRegisterImmediate(instr));
574 DCHECK((offset & ~3) == offset);
577 DCHECK(is_uint10(offset));
581 return (instr & ~
kOff8Mask) | (offset >> 2);
585 bool Assembler::IsStrRegisterImmediate(
Instr instr) {
590 Instr Assembler::SetStrRegisterImmediateOffset(
Instr instr,
int offset) {
591 DCHECK(IsStrRegisterImmediate(instr));
594 DCHECK(is_uint12(offset));
602 bool Assembler::IsAddRegisterImmediate(
Instr instr) {
607 Instr Assembler::SetAddRegisterImmediateOffset(
Instr instr,
int offset) {
608 DCHECK(IsAddRegisterImmediate(instr));
610 DCHECK(is_uint12(offset));
616 Register Assembler::GetRd(
Instr instr) {
618 reg.code_ = Instruction::RdValue(instr);
623 Register Assembler::GetRn(
Instr instr) {
625 reg.code_ = Instruction::RnValue(instr);
630 Register Assembler::GetRm(
Instr instr) {
632 reg.code_ = Instruction::RmValue(instr);
637 Instr Assembler::GetConsantPoolLoadPattern() {
638 if (FLAG_enable_ool_constant_pool) {
639 return kLdrPpImmedPattern;
641 return kLdrPCImmedPattern;
646 Instr Assembler::GetConsantPoolLoadMask() {
647 if (FLAG_enable_ool_constant_pool) {
648 return kLdrPpImmedMask;
650 return kLdrPCImmedMask;
655 bool Assembler::IsPush(
Instr instr) {
660 bool Assembler::IsPop(
Instr instr) {
665 bool Assembler::IsStrRegFpOffset(
Instr instr) {
666 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
670 bool Assembler::IsLdrRegFpOffset(
Instr instr) {
671 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
675 bool Assembler::IsStrRegFpNegOffset(
Instr instr) {
676 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
680 bool Assembler::IsLdrRegFpNegOffset(
Instr instr) {
681 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
685 bool Assembler::IsLdrPcImmediateOffset(
Instr instr) {
688 return (instr & kLdrPCImmedMask) == kLdrPCImmedPattern;
692 bool Assembler::IsLdrPpImmediateOffset(
Instr instr) {
695 return (instr & kLdrPpImmedMask) == kLdrPpImmedPattern;
699 bool Assembler::IsLdrPpRegOffset(
Instr instr) {
702 return (instr & kLdrPpRegMask) == kLdrPpRegPattern;
706 Instr Assembler::GetLdrPpRegOffsetPattern() {
return kLdrPpRegPattern; }
709 bool Assembler::IsVldrDPcImmediateOffset(
Instr instr) {
712 return (instr & kVldrDPCMask) == kVldrDPCPattern;
716 bool Assembler::IsVldrDPpImmediateOffset(
Instr instr) {
719 return (instr & kVldrDPpMask) == kVldrDPpPattern;
723 bool Assembler::IsBlxReg(
Instr instr) {
726 return (instr & kBlxRegMask) == kBlxRegPattern;
730 bool Assembler::IsBlxIp(
Instr instr) {
733 return instr == kBlxIp;
737 bool Assembler::IsTstImmediate(
Instr instr) {
743 bool Assembler::IsCmpRegister(
Instr instr) {
749 bool Assembler::IsCmpImmediate(
Instr instr) {
755 Register Assembler::GetCmpImmediateRegister(
Instr instr) {
756 DCHECK(IsCmpImmediate(instr));
761 int Assembler::GetCmpImmediateRawImmediate(
Instr instr) {
762 DCHECK(IsCmpImmediate(instr));
785 int Assembler::target_at(
int pos) {
786 Instr instr = instr_at(pos);
787 if (is_uint24(instr)) {
794 ((instr &
B24) != 0)) {
798 return pos + kPcLoadDelta + imm26;
802 void Assembler::target_at_put(
int pos,
int target_pos) {
803 Instr instr = instr_at(pos);
804 if (is_uint24(instr)) {
805 DCHECK(target_pos == pos || target_pos >= 0);
820 Register dst = Register::from_code(
821 Instruction::RmValue(instr_at(pos + kInstrSize)));
822 DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code()));
824 DCHECK(is_uint24(target24));
825 if (is_uint8(target24)) {
828 CodePatcher patcher(
reinterpret_cast<byte*
>(buffer_ + pos),
830 CodePatcher::DONT_FLUSH);
831 patcher.masm()->mov(dst, Operand(target24));
834 uint16_t target16_1 = target24 >> 16;
835 if (CpuFeatures::IsSupported(
ARMv7)) {
837 if (target16_1 == 0) {
838 CodePatcher patcher(
reinterpret_cast<byte*
>(buffer_ + pos),
840 CodePatcher::DONT_FLUSH);
841 patcher.masm()->movw(dst, target16_0);
843 CodePatcher patcher(
reinterpret_cast<byte*
>(buffer_ + pos),
845 CodePatcher::DONT_FLUSH);
846 patcher.masm()->movw(dst, target16_0);
847 patcher.masm()->movt(dst, target16_1);
851 uint8_t target8_0 = target16_0 &
kImm8Mask;
852 uint8_t target8_1 = target16_0 >> 8;
853 uint8_t target8_2 = target16_1 &
kImm8Mask;
854 if (target8_2 == 0) {
855 CodePatcher patcher(
reinterpret_cast<byte*
>(buffer_ + pos),
857 CodePatcher::DONT_FLUSH);
858 patcher.masm()->mov(dst, Operand(target8_0));
859 patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
861 CodePatcher patcher(
reinterpret_cast<byte*
>(buffer_ + pos),
863 CodePatcher::DONT_FLUSH);
864 patcher.masm()->mov(dst, Operand(target8_0));
865 patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
866 patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
872 int imm26 = target_pos - (pos + kPcLoadDelta);
882 int imm24 = imm26 >> 2;
884 instr_at_put(pos, instr | (imm24 &
kImm24Mask));
888 void Assembler::print(Label*
L) {
889 if (
L->is_unused()) {
891 }
else if (
L->is_bound()) {
892 PrintF(
"bound label to %d\n",
L->pos());
893 }
else if (
L->is_linked()) {
896 while (l.is_linked()) {
898 Instr instr = instr_at(l.pos());
903 Condition cond = Instruction::ConditionField(instr);
910 if ((instr &
B24) != 0)
916 case eq: c =
"eq";
break;
917 case ne: c =
"ne";
break;
918 case hs: c =
"hs";
break;
919 case lo: c =
"lo";
break;
920 case mi: c =
"mi";
break;
921 case pl: c =
"pl";
break;
922 case vs: c =
"vs";
break;
923 case vc: c =
"vc";
break;
924 case hi: c =
"hi";
break;
925 case ls: c =
"ls";
break;
926 case ge: c =
"ge";
break;
927 case lt: c =
"lt";
break;
928 case gt: c =
"gt";
break;
929 case le: c =
"le";
break;
930 case al: c =
"";
break;
941 PrintF(
"label in inconsistent state (pos = %d)\n",
L->pos_);
946 void Assembler::bind_to(Label*
L,
int pos) {
947 DCHECK(0 <= pos && pos <= pc_offset());
948 while (
L->is_linked()) {
949 int fixup_pos =
L->pos();
951 target_at_put(fixup_pos, pos);
957 if (pos > last_bound_pos_)
958 last_bound_pos_ = pos;
962 void Assembler::bind(Label*
L) {
964 bind_to(
L, pc_offset());
968 void Assembler::next(Label*
L) {
970 int link = target_at(
L->pos());
971 if (link ==
L->pos()) {
986 static bool fits_shifter(
uint32_t imm32,
991 for (
int rot = 0; rot < 16; rot++) {
992 uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
993 if ((imm8 <= 0xff)) {
1001 if (instr !=
NULL) {
1002 if ((*instr & kMovMvnMask) == kMovMvnPattern) {
1003 if (fits_shifter(~imm32, rotate_imm, immed_8,
NULL)) {
1004 *instr ^= kMovMvnFlip;
1006 }
else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
1007 if (CpuFeatures::IsSupported(
ARMv7)) {
1008 if (imm32 < 0x10000) {
1009 *instr ^= kMovwLeaveCCFlip;
1010 *instr |= Assembler::EncodeMovwImmediate(imm32);
1011 *rotate_imm = *immed_8 = 0;
1016 }
else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
1017 if (fits_shifter(-
static_cast<int>(imm32), rotate_imm, immed_8,
NULL)) {
1018 *instr ^= kCmpCmnFlip;
1023 if (alu_insn ==
ADD ||
1025 if (fits_shifter(-
static_cast<int>(imm32), rotate_imm, immed_8,
NULL)) {
1026 *instr ^= kAddSubFlip;
1029 }
else if (alu_insn ==
AND ||
1031 if (fits_shifter(~imm32, rotate_imm, immed_8,
NULL)) {
1032 *instr ^= kAndBicFlip;
1046 bool Operand::must_output_reloc_info(
const Assembler* assembler)
const {
1047 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1048 if (assembler !=
NULL && assembler->predictable_code_size())
return true;
1049 return assembler->serializer_enabled();
1050 }
else if (RelocInfo::IsNone(rmode_)) {
1057 static bool use_mov_immediate_load(
const Operand& x,
1058 const Assembler* assembler) {
1059 if (assembler !=
NULL && !assembler->is_constant_pool_available()) {
1062 (assembler ==
NULL || !assembler->predictable_code_size())) {
1065 }
else if (x.must_output_reloc_info(assembler)) {
1070 return CpuFeatures::IsSupported(
ARMv7);
1075 int Operand::instructions_required(
const Assembler* assembler,
1076 Instr instr)
const {
1077 if (rm_.is_valid())
return 1;
1079 if (must_output_reloc_info(assembler) ||
1080 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
1085 if (use_mov_immediate_load(*
this, assembler)) {
1087 instructions = CpuFeatures::IsSupported(
ARMv7) ? 2 : 4;
1088 }
else if (assembler !=
NULL && assembler->use_extended_constant_pool()) {
1090 instructions = CpuFeatures::IsSupported(
ARMv7) ? 3 : 5;
1102 return instructions;
1111 void Assembler::move_32_bit_immediate(Register rd,
1114 RelocInfo rinfo(pc_, x.rmode_, x.imm32_,
NULL);
1116 if (x.must_output_reloc_info(
this)) {
1117 RecordRelocInfo(rinfo);
1120 if (use_mov_immediate_load(x,
this)) {
1121 Register target = rd.code() ==
pc.
code() ?
ip : rd;
1122 if (CpuFeatures::IsSupported(
ARMv7)) {
1123 if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(
this)) {
1125 BlockConstPoolFor(2);
1127 movw(target, imm32 & 0xffff, cond);
1128 movt(target, imm32 >> 16, cond);
1130 DCHECK(FLAG_enable_ool_constant_pool);
1136 if (target.code() != rd.code()) {
1137 mov(rd, target,
LeaveCC, cond);
1140 DCHECK(is_constant_pool_available());
1141 ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
1142 if (section == ConstantPoolArray::EXTENDED_SECTION) {
1143 DCHECK(FLAG_enable_ool_constant_pool);
1144 Register target = rd.code() ==
pc.
code() ?
ip : rd;
1146 if (CpuFeatures::IsSupported(
ARMv7)) {
1147 movw(target, 0, cond);
1148 movt(target, 0, cond);
1150 mov(target, Operand(0),
LeaveCC, cond);
1151 orr(target, target, Operand(0),
LeaveCC, cond);
1152 orr(target, target, Operand(0),
LeaveCC, cond);
1153 orr(target, target, Operand(0),
LeaveCC, cond);
1158 DCHECK(section == ConstantPoolArray::SMALL_SECTION);
1159 ldr(rd,
MemOperand(FLAG_enable_ool_constant_pool ?
pp :
pc, 0), cond);
1165 void Assembler::addrmod1(
Instr instr,
1171 if (!x.rm_.is_valid()) {
1175 if (x.must_output_reloc_info(
this) ||
1176 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
1182 Condition cond = Instruction::ConditionField(instr);
1184 move_32_bit_immediate(rd, x, cond);
1187 addrmod1(instr, rn, rd, Operand(
ip));
1191 instr |=
I | rotate_imm*
B8 | immed_8;
1192 }
else if (!x.rs_.is_valid()) {
1194 instr |= x.shift_imm_*
B7 | x.shift_op_ | x.rm_.code();
1197 DCHECK(!rn.is(
pc) && !rd.is(
pc) && !x.rm_.is(
pc) && !x.rs_.is(
pc));
1198 instr |= x.rs_.code()*
B8 | x.shift_op_ |
B4 | x.rm_.code();
1200 emit(instr | rn.code()*
B16 | rd.code()*
B12);
1201 if (rn.is(
pc) || x.rm_.is(
pc)) {
1203 BlockConstPoolFor(1);
1208 void Assembler::addrmod2(
Instr instr, Register rd,
const MemOperand& x) {
1211 if (!x.rm_.is_valid()) {
1213 int offset_12 = x.offset_;
1214 if (offset_12 < 0) {
1215 offset_12 = -offset_12;
1218 if (!is_uint12(offset_12)) {
1221 DCHECK(!x.rn_.is(
ip) && ((instr &
L) ==
L || !rd.is(
ip)));
1222 mov(
ip, Operand(x.offset_),
LeaveCC, Instruction::ConditionField(instr));
1233 instr |=
B25 | x.shift_imm_*
B7 | x.shift_op_ | x.rm_.code();
1236 emit(instr | am | x.rn_.code()*
B16 | rd.code()*
B12);
1240 void Assembler::addrmod3(
Instr instr, Register rd,
const MemOperand& x) {
1242 DCHECK(x.rn_.is_valid());
1244 if (!x.rm_.is_valid()) {
1246 int offset_8 = x.offset_;
1248 offset_8 = -offset_8;
1251 if (!is_uint8(offset_8)) {
1254 DCHECK(!x.rn_.is(
ip) && ((instr &
L) ==
L || !rd.is(
ip)));
1255 mov(
ip, Operand(x.offset_),
LeaveCC, Instruction::ConditionField(instr));
1260 instr |=
B | (offset_8 >> 4)*
B8 | (offset_8 & 0xf);
1261 }
else if (x.shift_imm_ != 0) {
1264 DCHECK(!x.rn_.is(
ip) && ((instr &
L) ==
L || !rd.is(
ip)));
1265 mov(
ip, Operand(x.rm_, x.shift_op_, x.shift_imm_),
LeaveCC,
1266 Instruction::ConditionField(instr));
1272 instr |= x.rm_.code();
1275 emit(instr | am | x.rn_.code()*
B16 | rd.code()*
B12);
1279 void Assembler::addrmod4(
Instr instr, Register rn,
RegList rl) {
1283 emit(instr | rn.code()*
B16 | rl);
1287 void Assembler::addrmod5(
Instr instr, CRegister crd,
const MemOperand& x) {
1291 DCHECK(x.rn_.is_valid() && !x.rm_.is_valid());
1293 int offset_8 = x.offset_;
1294 DCHECK((offset_8 & 3) == 0);
1297 offset_8 = -offset_8;
1300 DCHECK(is_uint8(offset_8));
1308 emit(instr | am | x.rn_.code()*
B16 | crd.code()*
B12 | offset_8);
1312 int Assembler::branch_offset(Label*
L,
bool jump_elimination_allowed) {
1314 if (
L->is_bound()) {
1315 target_pos =
L->pos();
1317 if (
L->is_linked()) {
1319 target_pos =
L->pos();
1322 target_pos = pc_offset();
1324 L->link_to(pc_offset());
1329 BlockConstPoolFor(1);
1330 return target_pos - (pc_offset() + kPcLoadDelta);
1335 void Assembler::b(
int branch_offset,
Condition cond) {
1336 DCHECK((branch_offset & 3) == 0);
1337 int imm24 = branch_offset >> 2;
1343 CheckConstPool(
false,
false);
1348 void Assembler::bl(
int branch_offset,
Condition cond) {
1349 positions_recorder()->WriteRecordedPositions();
1350 DCHECK((branch_offset & 3) == 0);
1351 int imm24 = branch_offset >> 2;
1357 void Assembler::blx(
int branch_offset) {
1358 positions_recorder()->WriteRecordedPositions();
1359 DCHECK((branch_offset & 1) == 0);
1360 int h = ((branch_offset & 2) >> 1)*
B24;
1361 int imm24 = branch_offset >> 2;
1367 void Assembler::blx(Register target,
Condition cond) {
1368 positions_recorder()->WriteRecordedPositions();
1374 void Assembler::bx(Register target,
Condition cond) {
1375 positions_recorder()->WriteRecordedPositions();
1383 void Assembler::and_(Register dst, Register src1,
const Operand& src2,
1385 addrmod1(cond |
AND | s, src1, dst, src2);
1389 void Assembler::eor(Register dst, Register src1,
const Operand& src2,
1391 addrmod1(cond |
EOR | s, src1, dst, src2);
1395 void Assembler::sub(Register dst, Register src1,
const Operand& src2,
1397 addrmod1(cond |
SUB | s, src1, dst, src2);
1401 void Assembler::rsb(Register dst, Register src1,
const Operand& src2,
1403 addrmod1(cond |
RSB | s, src1, dst, src2);
1407 void Assembler::add(Register dst, Register src1,
const Operand& src2,
1409 addrmod1(cond |
ADD | s, src1, dst, src2);
1413 void Assembler::adc(Register dst, Register src1,
const Operand& src2,
1415 addrmod1(cond |
ADC | s, src1, dst, src2);
1419 void Assembler::sbc(Register dst, Register src1,
const Operand& src2,
1421 addrmod1(cond |
SBC | s, src1, dst, src2);
1425 void Assembler::rsc(Register dst, Register src1,
const Operand& src2,
1427 addrmod1(cond |
RSC | s, src1, dst, src2);
1431 void Assembler::tst(Register src1,
const Operand& src2,
Condition cond) {
1432 addrmod1(cond |
TST |
S, src1,
r0, src2);
1436 void Assembler::teq(Register src1,
const Operand& src2,
Condition cond) {
1437 addrmod1(cond |
TEQ |
S, src1,
r0, src2);
1441 void Assembler::cmp(Register src1,
const Operand& src2,
Condition cond) {
1442 addrmod1(cond |
CMP |
S, src1,
r0, src2);
1446 void Assembler::cmp_raw_immediate(
1447 Register src,
int raw_immediate,
Condition cond) {
1448 DCHECK(is_uint12(raw_immediate));
1449 emit(cond |
I |
CMP |
S | src.code() << 16 | raw_immediate);
1453 void Assembler::cmn(Register src1,
const Operand& src2,
Condition cond) {
1454 addrmod1(cond |
CMN |
S, src1,
r0, src2);
1458 void Assembler::orr(Register dst, Register src1,
const Operand& src2,
1460 addrmod1(cond |
ORR | s, src1, dst, src2);
1464 void Assembler::mov(Register dst,
const Operand& src,
SBit s,
Condition cond) {
1466 positions_recorder()->WriteRecordedPositions();
1471 DCHECK(!(src.is_reg() && src.rm().is(dst) && s ==
LeaveCC && cond ==
al));
1472 addrmod1(cond |
MOV | s,
r0, dst, src);
1476 void Assembler::mov_label_offset(Register dst, Label* label) {
1477 if (label->is_bound()) {
1478 mov(dst, Operand(label->pos() + (Code::kHeaderSize -
kHeapObjectTag)));
1484 int link = label->is_linked() ? label->pos() : pc_offset();
1485 label->link_to(pc_offset());
1505 BlockConstPoolScope block_const_pool(
this);
1508 if (!CpuFeatures::IsSupported(
ARMv7)) {
1517 emit(cond | 0x30*
B20 | reg.code()*
B12 | EncodeMovwImmediate(immediate));
1523 emit(cond | 0x34*
B20 | reg.code()*
B12 | EncodeMovwImmediate(immediate));
1527 void Assembler::bic(Register dst, Register src1,
const Operand& src2,
1529 addrmod1(cond |
BIC | s, src1, dst, src2);
1533 void Assembler::mvn(Register dst,
const Operand& src,
SBit s,
Condition cond) {
1534 addrmod1(cond |
MVN | s,
r0, dst, src);
1539 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1541 DCHECK(!dst.is(
pc) && !src1.is(
pc) && !src2.is(
pc) && !srcA.is(
pc));
1542 emit(cond |
A | s | dst.code()*
B16 | srcA.code()*
B12 |
1543 src2.code()*
B8 |
B7 |
B4 | src1.code());
1547 void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
1549 DCHECK(!dst.is(
pc) && !src1.is(
pc) && !src2.is(
pc) && !srcA.is(
pc));
1551 emit(cond |
B22 |
B21 | dst.code()*
B16 | srcA.code()*
B12 |
1552 src2.code()*
B8 |
B7 |
B4 | src1.code());
1556 void Assembler::sdiv(Register dst, Register src1, Register src2,
1561 src2.code()*
B8 |
B4 | src1.code());
1565 void Assembler::udiv(Register dst, Register src1, Register src2,
1570 src2.code() *
B8 |
B4 | src1.code());
1574 void Assembler::mul(Register dst, Register src1, Register src2,
1578 emit(cond | s | dst.code()*
B16 | src2.code()*
B8 |
B7 |
B4 | src1.code());
1582 void Assembler::smlal(Register dstL,
1588 DCHECK(!dstL.is(
pc) && !dstH.is(
pc) && !src1.is(
pc) && !src2.is(
pc));
1590 emit(cond |
B23 |
B22 |
A | s | dstH.code()*
B16 | dstL.code()*
B12 |
1591 src2.code()*
B8 |
B7 |
B4 | src1.code());
1595 void Assembler::smull(Register dstL,
1601 DCHECK(!dstL.is(
pc) && !dstH.is(
pc) && !src1.is(
pc) && !src2.is(
pc));
1603 emit(cond |
B23 |
B22 | s | dstH.code()*
B16 | dstL.code()*
B12 |
1604 src2.code()*
B8 |
B7 |
B4 | src1.code());
1608 void Assembler::umlal(Register dstL,
1614 DCHECK(!dstL.is(
pc) && !dstH.is(
pc) && !src1.is(
pc) && !src2.is(
pc));
1616 emit(cond |
B23 |
A | s | dstH.code()*
B16 | dstL.code()*
B12 |
1617 src2.code()*
B8 |
B7 |
B4 | src1.code());
1621 void Assembler::umull(Register dstL,
1627 DCHECK(!dstL.is(
pc) && !dstH.is(
pc) && !src1.is(
pc) && !src2.is(
pc));
1629 emit(cond |
B23 | s | dstH.code()*
B16 | dstL.code()*
B12 |
1630 src2.code()*
B8 |
B7 |
B4 | src1.code());
1635 void Assembler::clz(Register dst, Register src,
Condition cond) {
1639 15*
B8 |
CLZ | src.code());
1646 void Assembler::usat(Register dst,
1653 DCHECK((satpos >= 0) && (satpos <= 31));
1654 DCHECK((src.shift_op_ ==
ASR) || (src.shift_op_ ==
LSL));
1658 if (src.shift_op_ ==
ASR) {
1662 emit(cond | 0x6*
B24 | 0xe*
B20 | satpos*
B16 | dst.code()*
B12 |
1663 src.shift_imm_*
B7 | sh*
B6 | 0x1*
B4 | src.rm_.code());
1673 void Assembler::ubfx(Register dst,
1681 DCHECK((lsb >= 0) && (lsb <= 31));
1682 DCHECK((width >= 1) && (width <= (32 - lsb)));
1684 lsb*
B7 |
B6 |
B4 | src.code());
1693 void Assembler::sbfx(Register dst,
1701 DCHECK((lsb >= 0) && (lsb <= 31));
1702 DCHECK((width >= 1) && (width <= (32 - lsb)));
1703 emit(cond | 0xf*
B23 |
B21 | (width - 1)*
B16 | dst.code()*
B12 |
1704 lsb*
B7 |
B6 |
B4 | src.code());
1712 void Assembler::bfc(Register dst,
int lsb,
int width,
Condition cond) {
1716 DCHECK((lsb >= 0) && (lsb <= 31));
1717 DCHECK((width >= 1) && (width <= (32 - lsb)));
1718 int msb = lsb + width - 1;
1719 emit(cond | 0x1f*
B22 | msb*
B16 | dst.code()*
B12 | lsb*
B7 |
B4 | 0xf);
1727 void Assembler::bfi(Register dst,
1735 DCHECK((lsb >= 0) && (lsb <= 31));
1736 DCHECK((width >= 1) && (width <= (32 - lsb)));
1737 int msb = lsb + width - 1;
1738 emit(cond | 0x1f*
B22 | msb*
B16 | dst.code()*
B12 | lsb*
B7 |
B4 |
1743 void Assembler::pkhbt(Register dst,
1745 const Operand& src2,
1755 DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
1757 emit(cond | 0x68*
B20 | src1.code()*
B16 | dst.code()*
B12 |
1758 src2.shift_imm_*
B7 |
B4 | src2.rm().code());
1762 void Assembler::pkhtb(Register dst,
1764 const Operand& src2,
1774 DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
1776 int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
1777 emit(cond | 0x68*
B20 | src1.code()*
B16 | dst.code()*
B12 |
1778 asr*
B7 |
B6 |
B4 | src2.rm().code());
1782 void Assembler::uxtb(Register dst,
1792 DCHECK((src.shift_imm_ == 0) ||
1793 (src.shift_imm_ == 8) ||
1794 (src.shift_imm_ == 16) ||
1795 (src.shift_imm_ == 24));
1798 ((src.shift_op() ==
LSL) && (src.shift_imm_ == 0)));
1799 emit(cond | 0x6E*
B20 | 0xF*
B16 | dst.code()*
B12 |
1800 ((src.shift_imm_ >> 1)&0xC)*
B8 | 7*
B4 | src.rm().code());
1804 void Assembler::uxtab(Register dst,
1806 const Operand& src2,
1816 DCHECK((src2.shift_imm_ == 0) ||
1817 (src2.shift_imm_ == 8) ||
1818 (src2.shift_imm_ == 16) ||
1819 (src2.shift_imm_ == 24));
1822 ((src2.shift_op() ==
LSL) && (src2.shift_imm_ == 0)));
1823 emit(cond | 0x6E*
B20 | src1.code()*
B16 | dst.code()*
B12 |
1824 ((src2.shift_imm_ >> 1) &0xC)*
B8 | 7*
B4 | src2.rm().code());
1828 void Assembler::uxtb16(Register dst,
1838 DCHECK((src.shift_imm_ == 0) ||
1839 (src.shift_imm_ == 8) ||
1840 (src.shift_imm_ == 16) ||
1841 (src.shift_imm_ == 24));
1844 ((src.shift_op() ==
LSL) && (src.shift_imm_ == 0)));
1845 emit(cond | 0x6C*
B20 | 0xF*
B16 | dst.code()*
B12 |
1846 ((src.shift_imm_ >> 1)&0xC)*
B8 | 7*
B4 | src.rm().code());
1853 emit(cond |
B24 | s | 15*
B16 | dst.code()*
B12);
1861 if (!src.rm_.is_valid()) {
1865 if (src.must_output_reloc_info(
this) ||
1866 !fits_shifter(src.imm32_, &rotate_imm, &immed_8,
NULL)) {
1868 move_32_bit_immediate(
ip, src);
1869 msr(fields, Operand(
ip), cond);
1872 instr =
I | rotate_imm*
B8 | immed_8;
1874 DCHECK(!src.rs_.is_valid() && src.shift_imm_ == 0);
1875 instr = src.rm_.code();
1877 emit(cond | instr |
B24 |
B21 | fields | 15*
B12);
1884 positions_recorder()->WriteRecordedPositions();
1886 addrmod2(cond |
B26 |
L, dst, src);
1891 addrmod2(cond |
B26, src, dst);
1896 addrmod2(cond |
B26 |
B |
L, dst, src);
1901 addrmod2(cond |
B26 |
B, src, dst);
1906 addrmod3(cond |
L |
B7 |
H |
B4, dst, src);
1911 addrmod3(cond |
B7 |
H |
B4, src, dst);
1916 addrmod3(cond |
L |
B7 |
S6 |
B4, dst, src);
1921 addrmod3(cond |
L |
B7 |
S6 |
H |
B4, dst, src);
1925 void Assembler::ldrd(Register dst1, Register dst2,
1931 DCHECK_EQ(dst1.code() + 1, dst2.code());
1932 addrmod3(cond |
B7 |
B6 |
B4, dst1, src);
1936 void Assembler::strd(Register src1, Register src2,
1941 DCHECK_EQ(src1.code() + 1, src2.code());
1943 addrmod3(cond |
B7 |
B6 |
B5 |
B4, src1, dst);
1948 void Assembler::pld(
const MemOperand& address) {
1955 int offset = address.offset();
1974 addrmod4(cond |
B27 | am |
L, base, dst);
1977 if (cond ==
al && (dst &
pc.
bit()) != 0) {
1983 CheckConstPool(
true, no_const_pool_before_ == pc_offset() - kInstrSize);
1992 addrmod4(cond |
B27 | am, base, src);
2005 BlockConstPoolScope block_const_pool(
this);
2011 emit(
reinterpret_cast<Instr>(msg));
2026 void Assembler::bkpt(
uint32_t imm16) {
2027 DCHECK(is_uint16(imm16));
2033 DCHECK(is_uint24(imm24));
2034 emit(cond | 15*
B24 | imm24);
2046 DCHECK(is_uint4(opcode_1) && is_uint3(opcode_2));
2048 crd.code()*
B12 | coproc*
B8 | (opcode_2 & 7)*
B5 | crm.code());
2069 DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
2071 rd.code()*
B12 | coproc*
B8 | (opcode_2 & 7)*
B5 |
B4 | crm.code());
2092 DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
2094 rd.code()*
B12 | coproc*
B8 | (opcode_2 & 7)*
B5 |
B4 | crm.code());
2113 addrmod5(cond |
B27 |
B26 | l |
L | coproc*
B8, crd, src);
2124 DCHECK(is_uint8(option));
2125 emit(cond |
B27 |
B26 |
U | l |
L | rn.code()*
B16 | crd.code()*
B12 |
2126 coproc*
B8 | (option & 255));
2149 void Assembler::vldr(
const DwVfpRegister dst,
2150 const Register base,
2163 dst.split_code(&vd, &d);
2166 if ((offset % 4) == 0 && (offset / 4) < 256) {
2168 0xB*
B8 | ((offset / 4) & 255));
2174 add(
ip, base, Operand(offset));
2176 sub(
ip, base, Operand(offset));
2183 void Assembler::vldr(
const DwVfpRegister dst,
2187 if (operand.rm().is_valid()) {
2188 add(
ip, operand.rn(),
2189 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2190 vldr(dst,
ip, 0, cond);
2192 vldr(dst, operand.rn(), operand.offset(), cond);
2197 void Assembler::vldr(
const SwVfpRegister dst,
2198 const Register base,
2211 dst.split_code(&sd, &d);
2214 if ((offset % 4) == 0 && (offset / 4) < 256) {
2216 0xA*
B8 | ((offset / 4) & 255));
2222 add(
ip, base, Operand(offset));
2224 sub(
ip, base, Operand(offset));
2231 void Assembler::vldr(
const SwVfpRegister dst,
2235 if (operand.rm().is_valid()) {
2236 add(
ip, operand.rn(),
2237 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2238 vldr(dst,
ip, 0, cond);
2240 vldr(dst, operand.rn(), operand.offset(), cond);
2245 void Assembler::vstr(
const DwVfpRegister src,
2246 const Register base,
2260 src.split_code(&vd, &d);
2262 if ((offset % 4) == 0 && (offset / 4) < 256) {
2264 ((offset / 4) & 255));
2270 add(
ip, base, Operand(offset));
2272 sub(
ip, base, Operand(offset));
2279 void Assembler::vstr(
const DwVfpRegister src,
2283 if (operand.rm().is_valid()) {
2284 add(
ip, operand.rn(),
2285 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2286 vstr(src,
ip, 0, cond);
2288 vstr(src, operand.rn(), operand.offset(), cond);
2293 void Assembler::vstr(
const SwVfpRegister src,
2294 const Register base,
2307 src.split_code(&sd, &d);
2309 if ((offset % 4) == 0 && (offset / 4) < 256) {
2311 0xA*
B8 | ((offset / 4) & 255));
2317 add(
ip, base, Operand(offset));
2319 sub(
ip, base, Operand(offset));
2326 void Assembler::vstr(
const SwVfpRegister src,
2330 if (operand.rm().is_valid()) {
2331 add(
ip, operand.rn(),
2332 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2333 vstr(src,
ip, 0, cond);
2335 vstr(src, operand.rn(), operand.offset(), cond);
2342 DwVfpRegister first,
2353 first.split_code(&sd, &d);
2354 int count = last.code() - first.code() + 1;
2363 DwVfpRegister first,
2374 first.split_code(&sd, &d);
2375 int count = last.code() - first.code() + 1;
2383 SwVfpRegister first,
2394 first.split_code(&sd, &d);
2395 int count = last.code() - first.code() + 1;
2403 SwVfpRegister first,
2414 first.split_code(&sd, &d);
2415 int count = last.code() - first.code() + 1;
2425 *
lo =
i & 0xffffffff;
2432 static bool FitsVMOVDoubleImmediate(
double d,
uint32_t *encoding) {
2454 DoubleAsTwoUInt32(d, &
lo, &
hi);
2457 if ((
lo != 0) || ((
hi & 0xffff) != 0)) {
2462 if (((
hi & 0x3fc00000) != 0) && ((
hi & 0x3fc00000) != 0x3fc00000)) {
2467 if (((
hi ^ (
hi << 1)) & (0x40000000)) == 0) {
2473 *encoding = (
hi >> 16) & 0xf;
2474 *encoding |= (
hi >> 4) & 0x70000;
2475 *encoding |= (
hi >> 12) & 0x80000;
2481 void Assembler::vmov(
const DwVfpRegister dst,
2483 const Register scratch) {
2485 if (CpuFeatures::IsSupported(
VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
2493 dst.split_code(&vd, &d);
2495 }
else if (FLAG_enable_vldr_imm && is_constant_pool_available()) {
2511 RelocInfo rinfo(pc_, imm);
2512 ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
2513 if (section == ConstantPoolArray::EXTENDED_SECTION) {
2514 DCHECK(FLAG_enable_ool_constant_pool);
2521 DCHECK(section == ConstantPoolArray::SMALL_SECTION);
2522 vldr(dst,
MemOperand(FLAG_enable_ool_constant_pool ?
pp :
pc, 0));
2527 DoubleAsTwoUInt32(imm, &
lo, &
hi);
2529 if (scratch.is(
no_reg)) {
2530 if (dst.code() < 16) {
2531 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
2534 mov(
ip, Operand(
lo));
2535 vmov(loc.low(),
ip);
2539 mov(
ip, Operand(
hi));
2540 vmov(loc.high(),
ip);
2545 mov(
ip, Operand(
lo));
2547 mov(
ip, Operand(
hi));
2553 mov(
ip, Operand(
lo));
2554 mov(scratch, Operand(
hi));
2555 vmov(dst,
ip, scratch);
2561 void Assembler::vmov(
const SwVfpRegister dst,
2562 const SwVfpRegister src,
2567 dst.split_code(&sd, &d);
2568 src.split_code(&sm, &m);
2573 void Assembler::vmov(
const DwVfpRegister dst,
2574 const DwVfpRegister src,
2581 dst.split_code(&vd, &d);
2583 src.split_code(&vm, &m);
2589 void Assembler::vmov(
const DwVfpRegister dst,
2590 const VmovIndex index,
2597 DCHECK(index.index == 0 || index.index == 1);
2599 dst.split_code(&vd, &d);
2600 emit(cond | 0xE*
B24 | index.index*
B21 | vd*
B16 | src.code()*
B12 | 0xB*
B8 |
2605 void Assembler::vmov(
const Register dst,
2606 const VmovIndex index,
2607 const DwVfpRegister src,
2613 DCHECK(index.index == 0 || index.index == 1);
2615 src.split_code(&vn, &n);
2616 emit(cond | 0xE*
B24 | index.index*
B21 |
B20 | vn*
B16 | dst.code()*
B12 |
2621 void Assembler::vmov(
const DwVfpRegister dst,
2622 const Register src1,
2623 const Register src2,
2631 dst.split_code(&vm, &m);
2632 emit(cond | 0xC*
B24 |
B22 | src2.code()*
B16 |
2633 src1.code()*
B12 | 0xB*
B8 | m*
B5 |
B4 | vm);
2637 void Assembler::vmov(
const Register dst1,
2638 const Register dst2,
2639 const DwVfpRegister src,
2647 src.split_code(&vm, &m);
2649 dst1.code()*
B12 | 0xB*
B8 | m*
B5 |
B4 | vm);
2653 void Assembler::vmov(
const SwVfpRegister dst,
2662 dst.split_code(&sn, &n);
2667 void Assembler::vmov(
const Register dst,
2668 const SwVfpRegister src,
2676 src.split_code(&sn, &n);
2683 enum VFPType { S32, U32, F32, F64 };
2686 static bool IsSignedVFPType(VFPType type) {
2699 static bool IsIntegerVFPType(VFPType type) {
2714 static bool IsDoubleVFPType(VFPType type) {
2731 static void SplitRegCode(VFPType reg_type,
2735 DCHECK((reg_code >= 0) && (reg_code <= 31));
2736 if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
2738 *m = reg_code & 0x1;
2739 *vm = reg_code >> 1;
2742 *m = (reg_code & 0x10) >> 4;
2743 *vm = reg_code & 0x0F;
2749 static Instr EncodeVCVT(
const VFPType dst_type,
2751 const VFPType src_type,
2755 DCHECK(src_type != dst_type);
2757 SplitRegCode(src_type, src_code, &Vm, &M);
2758 SplitRegCode(dst_type, dst_code, &Vd, &
D);
2760 if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
2765 DCHECK(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
2769 if (IsIntegerVFPType(dst_type)) {
2770 opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
2771 sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2774 DCHECK(IsIntegerVFPType(src_type));
2776 sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
2777 op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
2787 int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
2794 void Assembler::vcvt_f64_s32(
const DwVfpRegister dst,
2795 const SwVfpRegister src,
2798 emit(EncodeVCVT(F64, dst.code(), S32, src.code(),
mode, cond));
2802 void Assembler::vcvt_f32_s32(
const SwVfpRegister dst,
2803 const SwVfpRegister src,
2806 emit(EncodeVCVT(F32, dst.code(), S32, src.code(),
mode, cond));
2810 void Assembler::vcvt_f64_u32(
const DwVfpRegister dst,
2811 const SwVfpRegister src,
2814 emit(EncodeVCVT(F64, dst.code(), U32, src.code(),
mode, cond));
2818 void Assembler::vcvt_s32_f64(
const SwVfpRegister dst,
2819 const DwVfpRegister src,
2822 emit(EncodeVCVT(S32, dst.code(), F64, src.code(),
mode, cond));
2826 void Assembler::vcvt_u32_f64(
const SwVfpRegister dst,
2827 const DwVfpRegister src,
2830 emit(EncodeVCVT(U32, dst.code(), F64, src.code(),
mode, cond));
2834 void Assembler::vcvt_f64_f32(
const DwVfpRegister dst,
2835 const SwVfpRegister src,
2838 emit(EncodeVCVT(F64, dst.code(), F32, src.code(),
mode, cond));
2842 void Assembler::vcvt_f32_f64(
const SwVfpRegister dst,
2843 const DwVfpRegister src,
2846 emit(EncodeVCVT(F32, dst.code(), F64, src.code(),
mode, cond));
2850 void Assembler::vcvt_f64_s32(
const DwVfpRegister dst,
2856 DCHECK(fraction_bits > 0 && fraction_bits <= 32);
2859 dst.split_code(&vd, &d);
2860 int imm5 = 32 - fraction_bits;
2862 int imm4 = (imm5 >> 1) & 0xf;
2868 void Assembler::vneg(
const DwVfpRegister dst,
2869 const DwVfpRegister src,
2875 dst.split_code(&vd, &d);
2877 src.split_code(&vm, &m);
2884 void Assembler::vabs(
const DwVfpRegister dst,
2885 const DwVfpRegister src,
2891 dst.split_code(&vd, &d);
2893 src.split_code(&vm, &m);
2899 void Assembler::vadd(
const DwVfpRegister dst,
2900 const DwVfpRegister src1,
2901 const DwVfpRegister src2,
2909 dst.split_code(&vd, &d);
2911 src1.split_code(&vn, &n);
2913 src2.split_code(&vm, &m);
2919 void Assembler::vsub(
const DwVfpRegister dst,
2920 const DwVfpRegister src1,
2921 const DwVfpRegister src2,
2929 dst.split_code(&vd, &d);
2931 src1.split_code(&vn, &n);
2933 src2.split_code(&vm, &m);
2939 void Assembler::vmul(
const DwVfpRegister dst,
2940 const DwVfpRegister src1,
2941 const DwVfpRegister src2,
2949 dst.split_code(&vd, &d);
2951 src1.split_code(&vn, &n);
2953 src2.split_code(&vm, &m);
2959 void Assembler::vmla(
const DwVfpRegister dst,
2960 const DwVfpRegister src1,
2961 const DwVfpRegister src2,
2967 dst.split_code(&vd, &d);
2969 src1.split_code(&vn, &n);
2971 src2.split_code(&vm, &m);
2977 void Assembler::vmls(
const DwVfpRegister dst,
2978 const DwVfpRegister src1,
2979 const DwVfpRegister src2,
2985 dst.split_code(&vd, &d);
2987 src1.split_code(&vn, &n);
2989 src2.split_code(&vm, &m);
2995 void Assembler::vdiv(
const DwVfpRegister dst,
2996 const DwVfpRegister src1,
2997 const DwVfpRegister src2,
3005 dst.split_code(&vd, &d);
3007 src1.split_code(&vn, &n);
3009 src2.split_code(&vm, &m);
3015 void Assembler::vcmp(
const DwVfpRegister src1,
3016 const DwVfpRegister src2,
3023 src1.split_code(&vd, &d);
3025 src2.split_code(&vm, &m);
3031 void Assembler::vcmp(
const DwVfpRegister src1,
3040 src1.split_code(&vd, &d);
3045 void Assembler::vmsr(Register dst,
Condition cond) {
3050 dst.code()*
B12 | 0xA*
B8 |
B4);
3054 void Assembler::vmrs(Register dst,
Condition cond) {
3059 dst.code()*
B12 | 0xA*
B8 |
B4);
3063 void Assembler::vsqrt(
const DwVfpRegister dst,
3064 const DwVfpRegister src,
3070 dst.split_code(&vd, &d);
3072 src.split_code(&vm, &m);
3081 const NeonListOperand& dst,
3082 const NeonMemOperand& src) {
3088 dst.base().split_code(&vd, &d);
3090 dst.type()*
B8 |
size*
B6 | src.align()*
B4 | src.rm().code());
3095 const NeonListOperand& src,
3096 const NeonMemOperand& dst) {
3102 src.base().split_code(&vd, &d);
3104 size*
B6 | dst.align()*
B4 | dst.rm().code());
3108 void Assembler::vmovl(
NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
3114 dst.split_code(&vd, &d);
3116 src.split_code(&vm, &m);
3123 void Assembler::nop(
int type) {
3129 DCHECK(0 <= type && type <= 14);
3130 emit(
al | 13*
B21 | type*
B12 | type);
3134 bool Assembler::IsMovT(
Instr instr) {
3137 EncodeMovwImmediate(0xFFFF));
3138 return instr == kMovtPattern;
3142 bool Assembler::IsMovW(
Instr instr) {
3145 EncodeMovwImmediate(0xFFFF));
3146 return instr == kMovwPattern;
3150 Instr Assembler::GetMovTPattern() {
return kMovtPattern; }
3153 Instr Assembler::GetMovWPattern() {
return kMovwPattern; }
3157 DCHECK(immediate < 0x10000);
3158 return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
3163 instruction &= ~EncodeMovwImmediate(0xffff);
3164 return instruction | EncodeMovwImmediate(immediate);
3168 int Assembler::DecodeShiftImm(
Instr instr) {
3169 int rotate = Instruction::RotateValue(instr) * 2;
3170 int immed8 = Instruction::Immed8Value(instr);
3171 return (immed8 >> rotate) | (immed8 << (32 - rotate));
3175 Instr Assembler::PatchShiftImm(
Instr instr,
int immed) {
3178 bool immed_fits = fits_shifter(immed, &rotate_imm, &immed_8,
NULL);
3181 return (instr & ~
kOff12Mask) | (rotate_imm << 8) | immed_8;
3185 bool Assembler::IsNop(
Instr instr,
int type) {
3186 DCHECK(0 <= type && type <= 14);
3188 return instr == (
al | 13*
B21 | type*
B12 | type);
3192 bool Assembler::IsMovImmed(
Instr instr) {
3193 return (instr & kMovImmedMask) == kMovImmedPattern;
3197 bool Assembler::IsOrrImmed(
Instr instr) {
3198 return (instr & kOrrImmedMask) == kOrrImmedPattern;
3203 bool Assembler::ImmediateFitsAddrMode1Instruction(
int32_t imm32) {
3206 return fits_shifter(imm32, &dummy1, &dummy2,
NULL);
3210 bool Assembler::ImmediateFitsAddrMode2Instruction(
int32_t imm32) {
3211 return is_uint12(abs(imm32));
3216 void Assembler::RecordJSReturn() {
3217 positions_recorder()->WriteRecordedPositions();
3219 RecordRelocInfo(RelocInfo::JS_RETURN);
3223 void Assembler::RecordDebugBreakSlot() {
3224 positions_recorder()->WriteRecordedPositions();
3226 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
3230 void Assembler::RecordComment(
const char* msg) {
3231 if (FLAG_code_comments) {
3233 RecordRelocInfo(RelocInfo::COMMENT,
reinterpret_cast<intptr_t
>(msg));
3238 void Assembler::RecordConstPool(
int size) {
3241 RecordRelocInfo(RelocInfo::CONST_POOL,
static_cast<intptr_t
>(
size));
3245 void Assembler::GrowBuffer() {
3246 if (!own_buffer_)
FATAL(
"external code buffer is too small");
3250 if (buffer_size_ < 1 *
MB) {
3251 desc.buffer_size = 2*buffer_size_;
3253 desc.buffer_size = buffer_size_ + 1*
MB;
3258 desc.buffer = NewArray<byte>(desc.buffer_size);
3260 desc.instr_size = pc_offset();
3261 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
3264 int pc_delta = desc.buffer - buffer_;
3265 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
3266 MemMove(desc.buffer, buffer_, desc.instr_size);
3267 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
3272 buffer_ = desc.buffer;
3273 buffer_size_ = desc.buffer_size;
3275 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
3276 reloc_info_writer.last_pc() + pc_delta);
3283 for (
int i = 0;
i < num_pending_32_bit_reloc_info_;
i++) {
3284 RelocInfo& rinfo = pending_32_bit_reloc_info_[
i];
3285 DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
3286 rinfo.rmode() != RelocInfo::POSITION);
3287 if (rinfo.rmode() != RelocInfo::JS_RETURN) {
3288 rinfo.set_pc(rinfo.pc() + pc_delta);
3291 for (
int i = 0;
i < num_pending_64_bit_reloc_info_;
i++) {
3292 RelocInfo& rinfo = pending_64_bit_reloc_info_[
i];
3293 DCHECK(rinfo.rmode() == RelocInfo::NONE64);
3294 rinfo.set_pc(rinfo.pc() + pc_delta);
3296 constant_pool_builder_.Relocate(pc_delta);
3304 DCHECK(num_pending_32_bit_reloc_info_ == 0);
3305 DCHECK(num_pending_64_bit_reloc_info_ == 0);
3307 *
reinterpret_cast<uint8_t*
>(pc_) = data;
3308 pc_ +=
sizeof(uint8_t);
3312 void Assembler::dd(
uint32_t data) {
3316 DCHECK(num_pending_32_bit_reloc_info_ == 0);
3317 DCHECK(num_pending_64_bit_reloc_info_ == 0);
3319 *
reinterpret_cast<uint32_t*
>(pc_) = data;
3324 void Assembler::emit_code_stub_address(Code* stub) {
3326 *
reinterpret_cast<uint32_t*
>(pc_) =
3327 reinterpret_cast<uint32_t>(stub->instruction_start());
3332 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
3333 RelocInfo rinfo(pc_, rmode, data,
NULL);
3334 RecordRelocInfo(rinfo);
3338 void Assembler::RecordRelocInfo(
const RelocInfo& rinfo) {
3339 if (!RelocInfo::IsNone(rinfo.rmode())) {
3341 if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE &&
3342 !serializer_enabled() && !emit_debug_code()) {
3345 DCHECK(buffer_space() >= kMaxRelocSize);
3346 if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
3347 RelocInfo reloc_info_with_ast_id(rinfo.pc(),
3349 RecordedAstId().ToInt(),
3351 ClearRecordedAstId();
3352 reloc_info_writer.Write(&reloc_info_with_ast_id);
3354 reloc_info_writer.Write(&rinfo);
3360 ConstantPoolArray::LayoutSection Assembler::ConstantPoolAddEntry(
3361 const RelocInfo& rinfo) {
3362 if (FLAG_enable_ool_constant_pool) {
3363 return constant_pool_builder_.AddEntry(
this, rinfo);
3365 if (rinfo.rmode() == RelocInfo::NONE64) {
3366 DCHECK(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
3367 if (num_pending_64_bit_reloc_info_ == 0) {
3368 first_const_pool_64_use_ = pc_offset();
3370 pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
3372 DCHECK(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
3373 if (num_pending_32_bit_reloc_info_ == 0) {
3374 first_const_pool_32_use_ = pc_offset();
3376 pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
3380 BlockConstPoolFor(1);
3381 return ConstantPoolArray::SMALL_SECTION;
3386 void Assembler::BlockConstPoolFor(
int instructions) {
3387 if (FLAG_enable_ool_constant_pool) {
3389 DCHECK(num_pending_32_bit_reloc_info_ == 0);
3390 DCHECK(num_pending_64_bit_reloc_info_ == 0);
3394 int pc_limit = pc_offset() + instructions * kInstrSize;
3395 if (no_const_pool_before_ < pc_limit) {
3399 DCHECK((num_pending_32_bit_reloc_info_ == 0) ||
3400 (start - first_const_pool_32_use_ +
3401 num_pending_64_bit_reloc_info_ *
kDoubleSize < kMaxDistToIntPool));
3402 DCHECK((num_pending_64_bit_reloc_info_ == 0) ||
3403 (start - first_const_pool_64_use_ < kMaxDistToFPPool));
3405 no_const_pool_before_ = pc_limit;
3408 if (next_buffer_check_ < no_const_pool_before_) {
3409 next_buffer_check_ = no_const_pool_before_;
3414 void Assembler::CheckConstPool(
bool force_emit,
bool require_jump) {
3415 if (FLAG_enable_ool_constant_pool) {
3417 DCHECK(num_pending_32_bit_reloc_info_ == 0);
3418 DCHECK(num_pending_64_bit_reloc_info_ == 0);
3425 if (is_const_pool_blocked()) {
3432 if ((num_pending_32_bit_reloc_info_ == 0) &&
3433 (num_pending_64_bit_reloc_info_ == 0)) {
3435 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
3442 int jump_instr = require_jump ? kInstrSize : 0;
3443 int size_up_to_marker = jump_instr + kInstrSize;
3444 int size_after_marker = num_pending_32_bit_reloc_info_ *
kPointerSize;
3445 bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
3446 bool require_64_bit_align =
false;
3447 if (has_fp_values) {
3448 require_64_bit_align = (((
uintptr_t)pc_ + size_up_to_marker) & 0x7);
3449 if (require_64_bit_align) {
3450 size_after_marker += kInstrSize;
3452 size_after_marker += num_pending_64_bit_reloc_info_ *
kDoubleSize;
3455 int size = size_up_to_marker + size_after_marker;
3466 DCHECK((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
3467 bool need_emit =
false;
3468 if (has_fp_values) {
3469 int dist64 = pc_offset() +
3472 first_const_pool_64_use_;
3473 if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
3474 (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
3479 pc_offset() +
size - first_const_pool_32_use_;
3480 if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
3481 (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
3484 if (!need_emit)
return;
3487 int needed_space =
size + kGap;
3488 while (buffer_space() <= needed_space) GrowBuffer();
3492 BlockConstPoolScope block_const_pool(
this);
3493 RecordComment(
"[ Constant Pool");
3494 RecordConstPool(
size);
3507 if (require_64_bit_align) {
3513 for (
int i = 0;
i < num_pending_64_bit_reloc_info_;
i++) {
3514 RelocInfo& rinfo = pending_64_bit_reloc_info_[
i];
3518 Instr instr = instr_at(rinfo.pc());
3520 DCHECK((IsVldrDPcImmediateOffset(instr) &&
3521 GetVldrDRegisterImmediateOffset(instr) == 0));
3523 int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3524 DCHECK(is_uint10(delta));
3527 uint64_t value = rinfo.raw_data64();
3528 for (
int j = 0; j <
i; j++) {
3529 RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j];
3530 if (value == rinfo2.raw_data64()) {
3532 DCHECK(rinfo2.rmode() == RelocInfo::NONE64);
3533 Instr instr2 = instr_at(rinfo2.pc());
3534 DCHECK(IsVldrDPcImmediateOffset(instr2));
3535 delta = GetVldrDRegisterImmediateOffset(instr2);
3536 delta += rinfo2.pc() - rinfo.pc();
3541 instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
3544 uint64_t uint_data = rinfo.raw_data64();
3545 emit(uint_data & 0xFFFFFFFF);
3546 emit(uint_data >> 32);
3551 for (
int i = 0;
i < num_pending_32_bit_reloc_info_;
i++) {
3552 RelocInfo& rinfo = pending_32_bit_reloc_info_[
i];
3553 DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
3554 rinfo.rmode() != RelocInfo::POSITION &&
3555 rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
3556 rinfo.rmode() != RelocInfo::CONST_POOL &&
3557 rinfo.rmode() != RelocInfo::NONE64);
3559 Instr instr = instr_at(rinfo.pc());
3562 DCHECK(!IsVldrDPcImmediateOffset(instr));
3564 if (IsLdrPcImmediateOffset(instr) &&
3565 GetLdrRegisterImmediateOffset(instr) == 0) {
3566 int delta = pc_ - rinfo.pc() - kPcLoadDelta;
3567 DCHECK(is_uint12(delta));
3574 if (!serializer_enabled() && rinfo.rmode() >= RelocInfo::CELL) {
3575 for (
int j = 0; j <
i; j++) {
3576 RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
3578 if ((rinfo2.data() == rinfo.data()) &&
3579 (rinfo2.rmode() == rinfo.rmode())) {
3580 Instr instr2 = instr_at(rinfo2.pc());
3581 if (IsLdrPcImmediateOffset(instr2)) {
3582 delta = GetLdrRegisterImmediateOffset(instr2);
3583 delta += rinfo2.pc() - rinfo.pc();
3591 instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
3601 num_pending_32_bit_reloc_info_ = 0;
3602 num_pending_64_bit_reloc_info_ = 0;
3603 first_const_pool_32_use_ = -1;
3604 first_const_pool_64_use_ = -1;
3608 if (after_pool.is_linked()) {
3615 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
3619 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
3620 if (!FLAG_enable_ool_constant_pool) {
3621 return isolate->factory()->empty_constant_pool_array();
3623 return constant_pool_builder_.New(isolate);
3627 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
3628 constant_pool_builder_.Populate(
this, constant_pool);
3632 ConstantPoolBuilder::ConstantPoolBuilder()
3633 : entries_(), current_section_(ConstantPoolArray::SMALL_SECTION) {}
3636 bool ConstantPoolBuilder::IsEmpty() {
3637 return entries_.size() == 0;
3642 RelocInfo::Mode rmode) {
3643 if (rmode == RelocInfo::NONE64) {
3644 return ConstantPoolArray::INT64;
3645 }
else if (!RelocInfo::IsGCRelocMode(rmode)) {
3646 return ConstantPoolArray::INT32;
3647 }
else if (RelocInfo::IsCodeTarget(rmode)) {
3648 return ConstantPoolArray::CODE_PTR;
3650 DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
3651 return ConstantPoolArray::HEAP_PTR;
3656 ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
3657 Assembler* assm,
const RelocInfo& rinfo) {
3658 RelocInfo::Mode rmode = rinfo.rmode();
3659 DCHECK(rmode != RelocInfo::COMMENT &&
3660 rmode != RelocInfo::POSITION &&
3661 rmode != RelocInfo::STATEMENT_POSITION &&
3662 rmode != RelocInfo::CONST_POOL);
3665 int merged_index = -1;
3666 ConstantPoolArray::LayoutSection entry_section = current_section_;
3667 if (RelocInfo::IsNone(rmode) ||
3668 (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
3670 std::vector<ConstantPoolEntry>::const_iterator it;
3671 for (it = entries_.begin(),
i = 0; it != entries_.end(); it++,
i++) {
3672 if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
3675 entry_section = entries_[
i].section_;
3680 DCHECK(entry_section <= current_section_);
3681 entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
3683 if (merged_index == -1) {
3685 number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
3690 if (current_section_ == ConstantPoolArray::SMALL_SECTION &&
3691 !(is_uint12(ConstantPoolArray::SizeFor(*small_entries())) &&
3692 is_uint10(ConstantPoolArray::MaxInt64Offset(
3693 small_entries()->count_of(ConstantPoolArray::INT64))))) {
3694 current_section_ = ConstantPoolArray::EXTENDED_SECTION;
3696 return entry_section;
3700 void ConstantPoolBuilder::Relocate(
int pc_delta) {
3701 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
3702 entry != entries_.end(); entry++) {
3703 DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
3704 entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
3709 Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
3711 return isolate->factory()->empty_constant_pool_array();
3712 }
else if (extended_entries()->is_empty()) {
3713 return isolate->factory()->NewConstantPoolArray(*small_entries());
3715 DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
3716 return isolate->factory()->NewExtendedConstantPoolArray(
3717 *small_entries(), *extended_entries());
3722 void ConstantPoolBuilder::Populate(Assembler* assm,
3723 ConstantPoolArray* constant_pool) {
3724 DCHECK_EQ(extended_entries()->is_empty(),
3725 !constant_pool->is_extended_layout());
3726 DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
3727 constant_pool, ConstantPoolArray::SMALL_SECTION)));
3728 if (constant_pool->is_extended_layout()) {
3729 DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
3730 constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
3734 int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS]
3735 [ConstantPoolArray::NUMBER_OF_TYPES];
3736 for (
int section = 0; section <= constant_pool->final_section(); section++) {
3737 int section_start = (section == ConstantPoolArray::EXTENDED_SECTION)
3738 ? small_entries()->total_count()
3740 for (
int i = 0;
i < ConstantPoolArray::NUMBER_OF_TYPES;
i++) {
3742 if (number_of_entries_[section].count_of(type) != 0) {
3743 offsets[section][type] = constant_pool->OffsetOfElementAt(
3744 number_of_entries_[section].base_of(type) + section_start);
3749 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
3750 entry != entries_.end(); entry++) {
3751 RelocInfo rinfo = entry->rinfo_;
3752 RelocInfo::Mode rmode = entry->rinfo_.rmode();
3757 if (entry->merged_index_ == -1) {
3758 offset = offsets[entry->section_][type];
3759 offsets[entry->section_][type] += ConstantPoolArray::entry_size(type);
3760 if (type == ConstantPoolArray::INT64) {
3761 constant_pool->set_at_offset(offset, rinfo.data64());
3762 }
else if (type == ConstantPoolArray::INT32) {
3763 constant_pool->set_at_offset(offset,
3764 static_cast<int32_t>(rinfo.data()));
3765 }
else if (type == ConstantPoolArray::CODE_PTR) {
3766 constant_pool->set_at_offset(offset,
3767 reinterpret_cast<Address>(rinfo.data()));
3769 DCHECK(type == ConstantPoolArray::HEAP_PTR);
3770 constant_pool->set_at_offset(offset,
3771 reinterpret_cast<Object*
>(rinfo.data()));
3774 entry->merged_index_ = offset;
3776 DCHECK(entry->merged_index_ < (entry - entries_.begin()));
3777 offset = entries_[entry->merged_index_].merged_index_;
3781 Instr instr = assm->instr_at(rinfo.pc());
3782 if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) {
3783 if (CpuFeatures::IsSupported(
ARMv7)) {
3785 Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
3786 DCHECK((Assembler::IsMovW(instr) &&
3787 Instruction::ImmedMovwMovtValue(instr) == 0));
3788 DCHECK((Assembler::IsMovT(next_instr) &&
3789 Instruction::ImmedMovwMovtValue(next_instr) == 0));
3791 rinfo.pc(), Assembler::PatchMovwImmediate(instr, offset & 0xffff));
3793 rinfo.pc() + Assembler::kInstrSize,
3794 Assembler::PatchMovwImmediate(next_instr, offset >> 16));
3797 Instr instr_2 = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
3798 Instr instr_3 = assm->instr_at(rinfo.pc() + 2 * Assembler::kInstrSize);
3799 Instr instr_4 = assm->instr_at(rinfo.pc() + 3 * Assembler::kInstrSize);
3800 DCHECK((Assembler::IsMovImmed(instr) &&
3801 Instruction::Immed8Value(instr) == 0));
3802 DCHECK((Assembler::IsOrrImmed(instr_2) &&
3803 Instruction::Immed8Value(instr_2) == 0) &&
3804 Assembler::GetRn(instr_2).is(Assembler::GetRd(instr_2)));
3805 DCHECK((Assembler::IsOrrImmed(instr_3) &&
3806 Instruction::Immed8Value(instr_3) == 0) &&
3807 Assembler::GetRn(instr_3).is(Assembler::GetRd(instr_3)));
3808 DCHECK((Assembler::IsOrrImmed(instr_4) &&
3809 Instruction::Immed8Value(instr_4) == 0) &&
3810 Assembler::GetRn(instr_4).is(Assembler::GetRd(instr_4)));
3812 rinfo.pc(), Assembler::PatchShiftImm(instr, (offset &
kImm8Mask)));
3814 rinfo.pc() + Assembler::kInstrSize,
3815 Assembler::PatchShiftImm(instr_2, (offset & (
kImm8Mask << 8))));
3817 rinfo.pc() + 2 * Assembler::kInstrSize,
3818 Assembler::PatchShiftImm(instr_3, (offset & (
kImm8Mask << 16))));
3820 rinfo.pc() + 3 * Assembler::kInstrSize,
3821 Assembler::PatchShiftImm(instr_4, (offset & (
kImm8Mask << 24))));
3823 }
else if (type == ConstantPoolArray::INT64) {
3825 DCHECK((Assembler::IsVldrDPpImmediateOffset(instr) &&
3826 Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
3827 DCHECK(is_uint10(offset));
3828 assm->instr_at_put(rinfo.pc(), Assembler::SetVldrDRegisterImmediateOffset(
3832 DCHECK((Assembler::IsLdrPpImmediateOffset(instr) &&
3833 Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
3834 DCHECK(is_uint12(offset));
3836 rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset));
#define kScratchDoubleReg
static bool ArmUsingHardFloat()
static const int kInstrSize
Assembler(Isolate *isolate, void *buffer, int buffer_size)
static void FlushICache(void *start, size_t size)
static bool IsSupported(CpuFeature f)
static unsigned cache_line_size_
static unsigned supported_
static void PrintFeatures()
static void PrintTarget()
static void ProbeImpl(bool cross_compile)
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
static const int kApplyMask
void PatchCode(byte *instructions, int instruction_count)
void PatchCodeWithCall(Address target, int guard_bytes)
static const char * Name(int reg, bool is_double)
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for ARMv7(ARM only)") DEFINE_BOOL(enable_32dregs
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK_LE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
bool IsPowerOfTwo32(uint32_t value)
Matcher< Node * > IsBranch(const Matcher< Node * > &value_matcher, const Matcher< Node * > &control_matcher)
void DeleteArray(T *array)
@ MOVW_MOVT_IMMEDIATE_LOADS
const int32_t kDefaultStopCode
int EncodeConstantPoolLength(int length)
TypeImpl< ZoneTypeConfig > Type
const VmovIndex VmovIndexHi
const uint32_t kMaxStopCode
const int kRegister_pc_Code
DwVfpRegister DoubleRegister
void MemMove(void *dest, const void *src, size_t size)
kSerializedDataOffset Object
uint32_t SRegisterFieldMask
Handle< T > handle(T *t, Isolate *isolate)
const int kRegister_r8_Code
Condition NegateCondition(Condition cond)
void PrintF(const char *format,...)
const VmovIndex VmovIndexLo
const int kRegister_fp_Code
const Instr kPushRegPattern
const Instr kPopRegPattern
const int kConstantPoolMarker
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
const int kRegister_sp_Code
Debugger support for the V8 JavaScript engine.
static const uint16_t * Align(const uint16_t *chars)
#define P(name, number_of_args, result_size)
#define I(name, number_of_args, result_size)
static const int kNumReservedRegisters
static const char * AllocationIndexToString(int index)
static int NumAllocatableRegisters()