31 #if V8_TARGET_ARCH_ARM64
33 #define ARM64_DEFINE_REG_STATICS
53 if (FLAG_enable_always_align_csp &&
54 (cpu.implementer() == base::CPU::NVIDIA || FLAG_debug_code)) {
181 Instruction* instr =
reinterpret_cast<Instruction*
>(
pc_);
182 return instr->IsLdrLiteralX();
189 Instr* instr =
reinterpret_cast<Instr*
>(instructions);
190 for (
int i = 0;
i < instruction_count;
i++) {
191 *(
pc +
i) = *(instr +
i);
207 Register reg3, Register reg4) {
208 CPURegList regs(reg1, reg2, reg3, reg4);
211 if (regs.IncludesAliasOf(candidate))
continue;
219 bool AreAliased(
const CPURegister& reg1,
const CPURegister& reg2,
220 const CPURegister& reg3,
const CPURegister& reg4,
221 const CPURegister& reg5,
const CPURegister& reg6,
222 const CPURegister& reg7,
const CPURegister& reg8) {
223 int number_of_valid_regs = 0;
224 int number_of_valid_fpregs = 0;
229 const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
232 if (regs[
i].IsRegister()) {
233 number_of_valid_regs++;
234 unique_regs |= regs[
i].Bit();
235 }
else if (regs[
i].IsFPRegister()) {
236 number_of_valid_fpregs++;
237 unique_fpregs |= regs[
i].Bit();
243 int number_of_unique_regs =
245 int number_of_unique_fpregs =
248 DCHECK(number_of_valid_regs >= number_of_unique_regs);
249 DCHECK(number_of_valid_fpregs >= number_of_unique_fpregs);
251 return (number_of_valid_regs != number_of_unique_regs) ||
252 (number_of_valid_fpregs != number_of_unique_fpregs);
257 const CPURegister& reg3,
const CPURegister& reg4,
258 const CPURegister& reg5,
const CPURegister& reg6,
259 const CPURegister& reg7,
const CPURegister& reg8) {
262 match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
263 match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
264 match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
265 match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
266 match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
267 match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
268 match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
278 if (obj->IsHeapObject()) {
279 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
284 value_ =
reinterpret_cast<intptr_t
>(obj);
294 return assembler->serializer_enabled();
311 uint64_t raw_data =
static_cast<uint64_t
>(data);
317 std::pair<uint64_t, int> entry = std::make_pair(raw_data, offset);
385 Assembler::BlockPoolsScope block_pools(
assm_);
428 if (after_pool.is_linked()) {
433 static_cast<unsigned>(
size));
470 int access_size_log2) {
471 DCHECK(access_size_log2 >= 0);
472 DCHECK(access_size_log2 <= 3);
475 if (!operandA.base().Is(operandB.base()) ||
476 (operandA.addrmode() !=
Offset) ||
477 (operandB.addrmode() !=
Offset) ||
478 ((operandA.offset() & ((1 << access_size_log2) - 1)) != 0)) {
483 if ((operandB.offset() == operandA.offset() + (1 << access_size_log2)) &&
484 is_int7(operandA.offset() >> access_size_log2)) {
487 if ((operandA.offset() == operandB.offset() + (1 << access_size_log2)) &&
488 is_int7(operandB.offset() >> access_size_log2)) {
497 Instruction* instr =
reinterpret_cast<Instruction*
>(
assm_->
pc());
498 DCHECK(instr->preceding()->IsLdrLiteralX() &&
499 instr->preceding()->Rt() == xzr.code());
508 typedef std::multimap<uint64_t, int>::const_iterator SharedEntriesIterator;
509 SharedEntriesIterator value_it;
514 std::pair<SharedEntriesIterator, SharedEntriesIterator> range;
515 uint64_t data = value_it->first;
517 SharedEntriesIterator offset_it;
519 for (offset_it = range.first; offset_it != range.second; offset_it++) {
523 DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
524 instr->SetImmPCOffsetTarget(
assm_->
pc());
532 std::vector<std::pair<uint64_t, int> >::const_iterator unique_it;
539 DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
540 instr->SetImmPCOffsetTarget(
assm_->
pc());
550 : AssemblerBase(isolate, buffer, buffer_size),
552 recorded_ast_id_(TypeFeedbackId::
None()),
553 unresolved_branches_(),
554 positions_recorder_(this) {
555 const_pool_blocked_nesting_ = 0;
556 veneer_pool_blocked_nesting_ = 0;
561 Assembler::~Assembler() {
562 DCHECK(constpool_.IsEmpty());
563 DCHECK(const_pool_blocked_nesting_ == 0);
564 DCHECK(veneer_pool_blocked_nesting_ == 0);
568 void Assembler::Reset() {
570 DCHECK((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
571 DCHECK(const_pool_blocked_nesting_ == 0);
572 DCHECK(veneer_pool_blocked_nesting_ == 0);
573 DCHECK(unresolved_branches_.empty());
574 memset(buffer_, 0, pc_ - buffer_);
577 reloc_info_writer.Reposition(
reinterpret_cast<byte*
>(buffer_ + buffer_size_),
578 reinterpret_cast<byte*
>(pc_));
580 next_constant_pool_check_ = 0;
581 next_veneer_pool_check_ =
kMaxInt;
582 no_const_pool_before_ = 0;
583 ClearRecordedAstId();
587 void Assembler::GetCode(CodeDesc* desc) {
589 CheckConstPool(
true,
false);
590 DCHECK(constpool_.IsEmpty());
594 desc->buffer =
reinterpret_cast<byte*
>(buffer_);
595 desc->buffer_size = buffer_size_;
596 desc->instr_size = pc_offset();
597 desc->reloc_size = (
reinterpret_cast<byte*
>(buffer_) + buffer_size_) -
598 reloc_info_writer.pos();
606 while ((pc_offset() & (m - 1)) != 0) {
612 void Assembler::CheckLabelLinkChain(Label
const * label) {
614 if (label->is_linked()) {
615 int linkoffset = label->pos();
616 bool end_of_chain =
false;
617 while (!end_of_chain) {
618 Instruction * link = InstructionAt(linkoffset);
619 int linkpcoffset = link->ImmPCOffset();
620 int prevlinkoffset = linkoffset + linkpcoffset;
622 end_of_chain = (linkoffset == prevlinkoffset);
623 linkoffset = linkoffset + linkpcoffset;
630 void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
632 Instruction* label_veneer) {
633 DCHECK(label->is_linked());
635 CheckLabelLinkChain(label);
637 Instruction* link = InstructionAt(label->pos());
638 Instruction* prev_link = link;
639 Instruction* next_link;
640 bool end_of_chain =
false;
642 while (link != branch && !end_of_chain) {
643 next_link = link->ImmPCOffsetTarget();
644 end_of_chain = (link == next_link);
650 next_link = branch->ImmPCOffsetTarget();
652 if (branch == prev_link) {
654 if (branch == next_link) {
659 label->link_to(
reinterpret_cast<byte*
>(next_link) - buffer_);
662 }
else if (branch == next_link) {
664 prev_link->SetImmPCOffsetTarget(prev_link);
668 if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
669 prev_link->SetImmPCOffsetTarget(next_link);
670 }
else if (label_veneer !=
NULL) {
672 prev_link->SetImmPCOffsetTarget(prev_link);
674 end_of_chain =
false;
676 while (!end_of_chain) {
677 next_link = link->ImmPCOffsetTarget();
678 end_of_chain = (link == next_link);
679 link->SetImmPCOffsetTarget(label_veneer);
701 CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link));
706 CheckLabelLinkChain(label);
710 void Assembler::bind(Label* label) {
715 DCHECK(!label->is_near_linked());
716 DCHECK(!label->is_bound());
718 DeleteUnresolvedBranchInfoForLabel(label);
732 while (label->is_linked()) {
733 int linkoffset = label->pos();
734 Instruction* link = InstructionAt(linkoffset);
735 int prevlinkoffset = linkoffset + link->ImmPCOffset();
737 CheckLabelLinkChain(label);
740 DCHECK(linkoffset < pc_offset());
741 DCHECK((linkoffset > prevlinkoffset) ||
742 (linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
743 DCHECK(prevlinkoffset >= 0);
746 link->SetImmPCOffsetTarget(
reinterpret_cast<Instruction*
>(pc_));
749 if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
754 label->link_to(prevlinkoffset);
757 label->bind_to(pc_offset());
759 DCHECK(label->is_bound());
760 DCHECK(!label->is_linked());
764 int Assembler::LinkAndGetByteOffsetTo(Label* label) {
765 DCHECK(
sizeof(*pc_) == 1);
766 CheckLabelLinkChain(label);
769 if (label->is_bound()) {
779 offset = label->pos() - pc_offset();
782 if (label->is_linked()) {
788 offset = label->pos() - pc_offset();
789 DCHECK(offset != kStartOfLabelLinkChain);
797 offset = kStartOfLabelLinkChain;
800 label->link_to(pc_offset());
807 void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
808 DCHECK(label->is_linked());
809 CheckLabelLinkChain(label);
811 int link_offset = label->pos();
813 bool end_of_chain =
false;
815 while (!end_of_chain) {
816 Instruction * link = InstructionAt(link_offset);
817 link_pcoffset = link->ImmPCOffset();
820 if (link->IsImmBranch()) {
821 int max_reachable_pc = InstructionOffset(link) +
822 Instruction::ImmBranchRange(link->BranchType());
823 typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it;
824 std::pair<unresolved_info_it, unresolved_info_it> range;
825 range = unresolved_branches_.equal_range(max_reachable_pc);
826 unresolved_info_it it;
827 for (it = range.first; it != range.second; ++it) {
828 if (it->second.pc_offset_ == link_offset) {
829 unresolved_branches_.erase(it);
835 end_of_chain = (link_pcoffset == 0);
836 link_offset = link_offset + link_pcoffset;
841 void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
842 if (unresolved_branches_.empty()) {
847 if (label->is_linked()) {
850 DeleteUnresolvedBranchInfoForLabelTraverse(label);
852 if (unresolved_branches_.empty()) {
853 next_veneer_pool_check_ =
kMaxInt;
855 next_veneer_pool_check_ =
856 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
861 void Assembler::StartBlockConstPool() {
862 if (const_pool_blocked_nesting_++ == 0) {
865 next_constant_pool_check_ =
kMaxInt;
870 void Assembler::EndBlockConstPool() {
871 if (--const_pool_blocked_nesting_ == 0) {
873 DCHECK(pc_offset() < constpool_.MaxPcOffset());
879 next_constant_pool_check_ = no_const_pool_before_;
884 bool Assembler::is_const_pool_blocked()
const {
885 return (const_pool_blocked_nesting_ > 0) ||
886 (pc_offset() < no_const_pool_before_);
890 bool Assembler::IsConstantPoolAt(Instruction* instr) {
894 bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code());
898 DCHECK(!result || (instr->following()->IsBranchAndLinkToRegister() &&
899 instr->following()->Rn() == xzr.code()));
905 int Assembler::ConstantPoolSizeAt(Instruction* instr) {
912 const char* message =
913 reinterpret_cast<const char*
>(
924 if (IsConstantPoolAt(instr)) {
925 return instr->ImmLLiteral();
932 void Assembler::EmitPoolGuard() {
939 void Assembler::StartBlockVeneerPool() {
940 ++veneer_pool_blocked_nesting_;
944 void Assembler::EndBlockVeneerPool() {
945 if (--veneer_pool_blocked_nesting_ == 0) {
947 DCHECK(unresolved_branches_.empty() ||
948 (pc_offset() < unresolved_branches_first_limit()));
953 void Assembler::br(
const Register& xn) {
954 positions_recorder()->WriteRecordedPositions();
960 void Assembler::blr(
const Register& xn) {
961 positions_recorder()->WriteRecordedPositions();
970 void Assembler::ret(
const Register& xn) {
971 positions_recorder()->WriteRecordedPositions();
977 void Assembler::b(
int imm26) {
978 Emit(
B | ImmUncondBranch(imm26));
982 void Assembler::b(Label* label) {
983 positions_recorder()->WriteRecordedPositions();
984 b(LinkAndGetInstructionOffsetTo(label));
988 void Assembler::b(
int imm19,
Condition cond) {
989 Emit(
B_cond | ImmCondBranch(imm19) | cond);
993 void Assembler::b(Label* label,
Condition cond) {
994 positions_recorder()->WriteRecordedPositions();
995 b(LinkAndGetInstructionOffsetTo(label), cond);
999 void Assembler::bl(
int imm26) {
1000 positions_recorder()->WriteRecordedPositions();
1001 Emit(
BL | ImmUncondBranch(imm26));
1005 void Assembler::bl(Label* label) {
1006 positions_recorder()->WriteRecordedPositions();
1007 bl(LinkAndGetInstructionOffsetTo(label));
1011 void Assembler::cbz(
const Register& rt,
1013 positions_recorder()->WriteRecordedPositions();
1014 Emit(SF(rt) |
CBZ | ImmCmpBranch(imm19) | Rt(rt));
1018 void Assembler::cbz(
const Register& rt,
1020 positions_recorder()->WriteRecordedPositions();
1021 cbz(rt, LinkAndGetInstructionOffsetTo(label));
1025 void Assembler::cbnz(
const Register& rt,
1027 positions_recorder()->WriteRecordedPositions();
1028 Emit(SF(rt) |
CBNZ | ImmCmpBranch(imm19) | Rt(rt));
1032 void Assembler::cbnz(
const Register& rt,
1034 positions_recorder()->WriteRecordedPositions();
1035 cbnz(rt, LinkAndGetInstructionOffsetTo(label));
1039 void Assembler::tbz(
const Register& rt,
1042 positions_recorder()->WriteRecordedPositions();
1044 Emit(
TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
1048 void Assembler::tbz(
const Register& rt,
1051 positions_recorder()->WriteRecordedPositions();
1052 tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
1056 void Assembler::tbnz(
const Register& rt,
1059 positions_recorder()->WriteRecordedPositions();
1061 Emit(
TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
1065 void Assembler::tbnz(
const Register& rt,
1068 positions_recorder()->WriteRecordedPositions();
1069 tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
1073 void Assembler::adr(
const Register& rd,
int imm21) {
1075 Emit(
ADR | ImmPCRelAddress(imm21) | Rd(rd));
1079 void Assembler::adr(
const Register& rd, Label* label) {
1080 adr(rd, LinkAndGetByteOffsetTo(label));
1084 void Assembler::add(
const Register& rd,
1086 const Operand& operand) {
1091 void Assembler::adds(
const Register& rd,
1093 const Operand& operand) {
1098 void Assembler::cmn(
const Register& rn,
1099 const Operand& operand) {
1100 Register zr = AppropriateZeroRegFor(rn);
1101 adds(zr, rn, operand);
1105 void Assembler::sub(
const Register& rd,
1107 const Operand& operand) {
1112 void Assembler::subs(
const Register& rd,
1114 const Operand& operand) {
1119 void Assembler::cmp(
const Register& rn,
const Operand& operand) {
1120 Register zr = AppropriateZeroRegFor(rn);
1121 subs(zr, rn, operand);
1125 void Assembler::neg(
const Register& rd,
const Operand& operand) {
1126 Register zr = AppropriateZeroRegFor(rd);
1127 sub(rd, zr, operand);
1131 void Assembler::negs(
const Register& rd,
const Operand& operand) {
1132 Register zr = AppropriateZeroRegFor(rd);
1133 subs(rd, zr, operand);
1137 void Assembler::adc(
const Register& rd,
1139 const Operand& operand) {
1144 void Assembler::adcs(
const Register& rd,
1146 const Operand& operand) {
1151 void Assembler::sbc(
const Register& rd,
1153 const Operand& operand) {
1158 void Assembler::sbcs(
const Register& rd,
1160 const Operand& operand) {
1165 void Assembler::ngc(
const Register& rd,
const Operand& operand) {
1166 Register zr = AppropriateZeroRegFor(rd);
1167 sbc(rd, zr, operand);
1171 void Assembler::ngcs(
const Register& rd,
const Operand& operand) {
1172 Register zr = AppropriateZeroRegFor(rd);
1173 sbcs(rd, zr, operand);
1178 void Assembler::and_(
const Register& rd,
1180 const Operand& operand) {
1181 Logical(rd, rn, operand,
AND);
1185 void Assembler::ands(
const Register& rd,
1187 const Operand& operand) {
1188 Logical(rd, rn, operand,
ANDS);
1192 void Assembler::tst(
const Register& rn,
1193 const Operand& operand) {
1194 ands(AppropriateZeroRegFor(rn), rn, operand);
1198 void Assembler::bic(
const Register& rd,
1200 const Operand& operand) {
1201 Logical(rd, rn, operand,
BIC);
1205 void Assembler::bics(
const Register& rd,
1207 const Operand& operand) {
1208 Logical(rd, rn, operand,
BICS);
1212 void Assembler::orr(
const Register& rd,
1214 const Operand& operand) {
1215 Logical(rd, rn, operand,
ORR);
1219 void Assembler::orn(
const Register& rd,
1221 const Operand& operand) {
1222 Logical(rd, rn, operand,
ORN);
1226 void Assembler::eor(
const Register& rd,
1228 const Operand& operand) {
1229 Logical(rd, rn, operand,
EOR);
1233 void Assembler::eon(
const Register& rd,
1235 const Operand& operand) {
1236 Logical(rd, rn, operand,
EON);
1240 void Assembler::lslv(
const Register& rd,
1242 const Register& rm) {
1243 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1244 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1245 Emit(SF(rd) |
LSLV | Rm(rm) | Rn(rn) | Rd(rd));
1249 void Assembler::lsrv(
const Register& rd,
1251 const Register& rm) {
1252 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1253 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1254 Emit(SF(rd) |
LSRV | Rm(rm) | Rn(rn) | Rd(rd));
1258 void Assembler::asrv(
const Register& rd,
1260 const Register& rm) {
1261 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1262 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1263 Emit(SF(rd) |
ASRV | Rm(rm) | Rn(rn) | Rd(rd));
1267 void Assembler::rorv(
const Register& rd,
1269 const Register& rm) {
1270 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1271 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1272 Emit(SF(rd) |
RORV | Rm(rm) | Rn(rn) | Rd(rd));
1277 void Assembler::bfm(
const Register& rd,
1281 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1283 Emit(SF(rd) |
BFM |
N |
1284 ImmR(immr, rd.SizeInBits()) |
1285 ImmS(imms, rn.SizeInBits()) |
1290 void Assembler::sbfm(
const Register& rd,
1294 DCHECK(rd.Is64Bits() || rn.Is32Bits());
1296 Emit(SF(rd) |
SBFM |
N |
1297 ImmR(immr, rd.SizeInBits()) |
1298 ImmS(imms, rn.SizeInBits()) |
1303 void Assembler::ubfm(
const Register& rd,
1307 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1309 Emit(SF(rd) |
UBFM |
N |
1310 ImmR(immr, rd.SizeInBits()) |
1311 ImmS(imms, rn.SizeInBits()) |
1316 void Assembler::extr(
const Register& rd,
1320 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1321 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1323 Emit(SF(rd) |
EXTR |
N | Rm(rm) |
1324 ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd));
1328 void Assembler::csel(
const Register& rd,
1332 ConditionalSelect(rd, rn, rm, cond,
CSEL);
1336 void Assembler::csinc(
const Register& rd,
1340 ConditionalSelect(rd, rn, rm, cond,
CSINC);
1344 void Assembler::csinv(
const Register& rd,
1348 ConditionalSelect(rd, rn, rm, cond,
CSINV);
1352 void Assembler::csneg(
const Register& rd,
1356 ConditionalSelect(rd, rn, rm, cond,
CSNEG);
1360 void Assembler::cset(
const Register &rd,
Condition cond) {
1362 Register zr = AppropriateZeroRegFor(rd);
1367 void Assembler::csetm(
const Register &rd,
Condition cond) {
1369 Register zr = AppropriateZeroRegFor(rd);
1374 void Assembler::cinc(
const Register &rd,
const Register &rn,
Condition cond) {
1380 void Assembler::cinv(
const Register &rd,
const Register &rn,
Condition cond) {
1386 void Assembler::cneg(
const Register &rd,
const Register &rn,
Condition cond) {
1392 void Assembler::ConditionalSelect(
const Register& rd,
1397 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1398 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1399 Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
1403 void Assembler::ccmn(
const Register& rn,
1404 const Operand& operand,
1407 ConditionalCompare(rn, operand, nzcv, cond,
CCMN);
1411 void Assembler::ccmp(
const Register& rn,
1412 const Operand& operand,
1415 ConditionalCompare(rn, operand, nzcv, cond,
CCMP);
1419 void Assembler::DataProcessing3Source(
const Register& rd,
1424 Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
1428 void Assembler::mul(
const Register& rd,
1430 const Register& rm) {
1432 Register zr = AppropriateZeroRegFor(rn);
1433 DataProcessing3Source(rd, rn, rm, zr,
MADD);
1437 void Assembler::madd(
const Register& rd,
1440 const Register& ra) {
1442 DataProcessing3Source(rd, rn, rm, ra,
MADD);
1446 void Assembler::mneg(
const Register& rd,
1448 const Register& rm) {
1450 Register zr = AppropriateZeroRegFor(rn);
1451 DataProcessing3Source(rd, rn, rm, zr,
MSUB);
1455 void Assembler::msub(
const Register& rd,
1458 const Register& ra) {
1460 DataProcessing3Source(rd, rn, rm, ra,
MSUB);
1464 void Assembler::smaddl(
const Register& rd,
1467 const Register& ra) {
1468 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1469 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1470 DataProcessing3Source(rd, rn, rm, ra,
SMADDL_x);
1474 void Assembler::smsubl(
const Register& rd,
1477 const Register& ra) {
1478 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1479 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1480 DataProcessing3Source(rd, rn, rm, ra,
SMSUBL_x);
1484 void Assembler::umaddl(
const Register& rd,
1487 const Register& ra) {
1488 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1489 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1490 DataProcessing3Source(rd, rn, rm, ra,
UMADDL_x);
1494 void Assembler::umsubl(
const Register& rd,
1497 const Register& ra) {
1498 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1499 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1500 DataProcessing3Source(rd, rn, rm, ra,
UMSUBL_x);
1504 void Assembler::smull(
const Register& rd,
1506 const Register& rm) {
1508 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1509 DataProcessing3Source(rd, rn, rm, xzr,
SMADDL_x);
1513 void Assembler::smulh(
const Register& rd,
1515 const Register& rm) {
1517 DataProcessing3Source(rd, rn, rm, xzr,
SMULH_x);
1521 void Assembler::sdiv(
const Register& rd,
1523 const Register& rm) {
1524 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1525 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1526 Emit(SF(rd) |
SDIV | Rm(rm) | Rn(rn) | Rd(rd));
1530 void Assembler::udiv(
const Register& rd,
1532 const Register& rm) {
1533 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1534 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1535 Emit(SF(rd) |
UDIV | Rm(rm) | Rn(rn) | Rd(rd));
1539 void Assembler::rbit(
const Register& rd,
1540 const Register& rn) {
1541 DataProcessing1Source(rd, rn,
RBIT);
1545 void Assembler::rev16(
const Register& rd,
1546 const Register& rn) {
1547 DataProcessing1Source(rd, rn,
REV16);
1551 void Assembler::rev32(
const Register& rd,
1552 const Register& rn) {
1554 DataProcessing1Source(rd, rn,
REV);
1558 void Assembler::rev(
const Register& rd,
1559 const Register& rn) {
1560 DataProcessing1Source(rd, rn, rd.Is64Bits() ?
REV_x :
REV_w);
1564 void Assembler::clz(
const Register& rd,
1565 const Register& rn) {
1566 DataProcessing1Source(rd, rn,
CLZ);
1570 void Assembler::cls(
const Register& rd,
1571 const Register& rn) {
1572 DataProcessing1Source(rd, rn,
CLS);
1576 void Assembler::ldp(
const CPURegister& rt,
1577 const CPURegister& rt2,
1579 LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
1583 void Assembler::stp(
const CPURegister& rt,
1584 const CPURegister& rt2,
1586 LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
1590 void Assembler::ldpsw(
const Register& rt,
1591 const Register& rt2,
1594 LoadStorePair(rt, rt2, src, LDPSW_x);
1598 void Assembler::LoadStorePair(
const CPURegister& rt,
1599 const CPURegister& rt2,
1606 Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1610 if (addr.IsImmediateOffset()) {
1614 DCHECK(!rt.Is(addr.base()));
1615 DCHECK(!rt2.Is(addr.base()));
1616 DCHECK(addr.offset() != 0);
1617 if (addr.IsPreIndex()) {
1620 DCHECK(addr.IsPostIndex());
1624 Emit(addrmodeop | memop);
1628 void Assembler::ldnp(
const CPURegister& rt,
1629 const CPURegister& rt2,
1631 LoadStorePairNonTemporal(rt, rt2, src,
1632 LoadPairNonTemporalOpFor(rt, rt2));
1636 void Assembler::stnp(
const CPURegister& rt,
1637 const CPURegister& rt2,
1639 LoadStorePairNonTemporal(rt, rt2, dst,
1640 StorePairNonTemporalOpFor(rt, rt2));
1644 void Assembler::LoadStorePairNonTemporal(
const CPURegister& rt,
1645 const CPURegister& rt2,
1650 DCHECK(addr.IsImmediateOffset());
1654 Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1655 ImmLSPair(addr.offset(),
size));
1660 void Assembler::ldrb(
const Register& rt,
const MemOperand& src) {
1661 LoadStore(rt, src, LDRB_w);
1665 void Assembler::strb(
const Register& rt,
const MemOperand& dst) {
1666 LoadStore(rt, dst, STRB_w);
1670 void Assembler::ldrsb(
const Register& rt,
const MemOperand& src) {
1671 LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
1675 void Assembler::ldrh(
const Register& rt,
const MemOperand& src) {
1676 LoadStore(rt, src, LDRH_w);
1680 void Assembler::strh(
const Register& rt,
const MemOperand& dst) {
1681 LoadStore(rt, dst, STRH_w);
1685 void Assembler::ldrsh(
const Register& rt,
const MemOperand& src) {
1686 LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
1690 void Assembler::ldr(
const CPURegister& rt,
const MemOperand& src) {
1691 LoadStore(rt, src, LoadOpFor(rt));
1695 void Assembler::str(
const CPURegister& rt,
const MemOperand& src) {
1696 LoadStore(rt, src, StoreOpFor(rt));
1700 void Assembler::ldrsw(
const Register& rt,
const MemOperand& src) {
1702 LoadStore(rt, src, LDRSW_x);
1706 void Assembler::ldr_pcrel(
const CPURegister& rt,
int imm19) {
1710 Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt));
1714 void Assembler::ldr(
const CPURegister& rt,
const Immediate& imm) {
1718 RecordRelocInfo(imm.rmode(), imm.value());
1719 BlockConstPoolFor(1);
1726 void Assembler::mov(
const Register& rd,
const Register& rm) {
1730 if (rd.IsSP() || rm.IsSP()) {
1733 orr(rd, AppropriateZeroRegFor(rd), rm);
1738 void Assembler::mvn(
const Register& rd,
const Operand& operand) {
1739 orn(rd, AppropriateZeroRegFor(rd), operand);
1745 Emit(
MRS | ImmSystemRegister(sysreg) | Rt(rt));
1751 Emit(
MSR | Rt(rt) | ImmSystemRegister(sysreg));
1756 Emit(
HINT | ImmHint(code) | Rt(xzr));
1761 Emit(
DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
1766 Emit(
DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
1770 void Assembler::isb() {
1775 void Assembler::fmov(FPRegister fd,
double imm) {
1782 void Assembler::fmov(FPRegister fd,
float imm) {
1789 void Assembler::fmov(Register rd, FPRegister fn) {
1790 DCHECK(rd.SizeInBits() == fn.SizeInBits());
1792 Emit(op | Rd(rd) | Rn(fn));
1796 void Assembler::fmov(FPRegister fd, Register rn) {
1797 DCHECK(fd.SizeInBits() == rn.SizeInBits());
1799 Emit(op | Rd(fd) | Rn(rn));
1803 void Assembler::fmov(FPRegister fd, FPRegister fn) {
1804 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1805 Emit(FPType(fd) |
FMOV | Rd(fd) | Rn(fn));
1809 void Assembler::fadd(
const FPRegister& fd,
1810 const FPRegister& fn,
1811 const FPRegister& fm) {
1812 FPDataProcessing2Source(fd, fn, fm,
FADD);
1816 void Assembler::fsub(
const FPRegister& fd,
1817 const FPRegister& fn,
1818 const FPRegister& fm) {
1819 FPDataProcessing2Source(fd, fn, fm,
FSUB);
1823 void Assembler::fmul(
const FPRegister& fd,
1824 const FPRegister& fn,
1825 const FPRegister& fm) {
1826 FPDataProcessing2Source(fd, fn, fm,
FMUL);
1830 void Assembler::fmadd(
const FPRegister& fd,
1831 const FPRegister& fn,
1832 const FPRegister& fm,
1833 const FPRegister& fa) {
1834 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ?
FMADD_s :
FMADD_d);
1838 void Assembler::fmsub(
const FPRegister& fd,
1839 const FPRegister& fn,
1840 const FPRegister& fm,
1841 const FPRegister& fa) {
1842 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ?
FMSUB_s :
FMSUB_d);
1846 void Assembler::fnmadd(
const FPRegister& fd,
1847 const FPRegister& fn,
1848 const FPRegister& fm,
1849 const FPRegister& fa) {
1850 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ?
FNMADD_s :
FNMADD_d);
1854 void Assembler::fnmsub(
const FPRegister& fd,
1855 const FPRegister& fn,
1856 const FPRegister& fm,
1857 const FPRegister& fa) {
1858 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ?
FNMSUB_s :
FNMSUB_d);
1862 void Assembler::fdiv(
const FPRegister& fd,
1863 const FPRegister& fn,
1864 const FPRegister& fm) {
1865 FPDataProcessing2Source(fd, fn, fm,
FDIV);
1869 void Assembler::fmax(
const FPRegister& fd,
1870 const FPRegister& fn,
1871 const FPRegister& fm) {
1872 FPDataProcessing2Source(fd, fn, fm,
FMAX);
1876 void Assembler::fmaxnm(
const FPRegister& fd,
1877 const FPRegister& fn,
1878 const FPRegister& fm) {
1879 FPDataProcessing2Source(fd, fn, fm,
FMAXNM);
1883 void Assembler::fmin(
const FPRegister& fd,
1884 const FPRegister& fn,
1885 const FPRegister& fm) {
1886 FPDataProcessing2Source(fd, fn, fm,
FMIN);
1890 void Assembler::fminnm(
const FPRegister& fd,
1891 const FPRegister& fn,
1892 const FPRegister& fm) {
1893 FPDataProcessing2Source(fd, fn, fm,
FMINNM);
1897 void Assembler::fabs(
const FPRegister& fd,
1898 const FPRegister& fn) {
1899 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1900 FPDataProcessing1Source(fd, fn,
FABS);
1904 void Assembler::fneg(
const FPRegister& fd,
1905 const FPRegister& fn) {
1906 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1907 FPDataProcessing1Source(fd, fn,
FNEG);
1911 void Assembler::fsqrt(
const FPRegister& fd,
1912 const FPRegister& fn) {
1913 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1914 FPDataProcessing1Source(fd, fn,
FSQRT);
1918 void Assembler::frinta(
const FPRegister& fd,
1919 const FPRegister& fn) {
1920 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1921 FPDataProcessing1Source(fd, fn,
FRINTA);
1925 void Assembler::frintm(
const FPRegister& fd,
1926 const FPRegister& fn) {
1927 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1928 FPDataProcessing1Source(fd, fn,
FRINTM);
1932 void Assembler::frintn(
const FPRegister& fd,
1933 const FPRegister& fn) {
1934 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1935 FPDataProcessing1Source(fd, fn,
FRINTN);
1939 void Assembler::frintz(
const FPRegister& fd,
1940 const FPRegister& fn) {
1941 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1942 FPDataProcessing1Source(fd, fn,
FRINTZ);
1946 void Assembler::fcmp(
const FPRegister& fn,
1947 const FPRegister& fm) {
1948 DCHECK(fn.SizeInBits() == fm.SizeInBits());
1949 Emit(FPType(fn) |
FCMP | Rm(fm) | Rn(fn));
1953 void Assembler::fcmp(
const FPRegister& fn,
1964 void Assembler::fccmp(
const FPRegister& fn,
1965 const FPRegister& fm,
1968 DCHECK(fn.SizeInBits() == fm.SizeInBits());
1969 Emit(FPType(fn) |
FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
1973 void Assembler::fcsel(
const FPRegister& fd,
1974 const FPRegister& fn,
1975 const FPRegister& fm,
1977 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1978 DCHECK(fd.SizeInBits() == fm.SizeInBits());
1979 Emit(FPType(fd) |
FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
1983 void Assembler::FPConvertToInt(
const Register& rd,
1984 const FPRegister& fn,
1986 Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd));
1990 void Assembler::fcvt(
const FPRegister& fd,
1991 const FPRegister& fn) {
1992 if (fd.Is64Bits()) {
1995 FPDataProcessing1Source(fd, fn,
FCVT_ds);
1999 FPDataProcessing1Source(fd, fn,
FCVT_sd);
2004 void Assembler::fcvtau(
const Register& rd,
const FPRegister& fn) {
2005 FPConvertToInt(rd, fn,
FCVTAU);
2009 void Assembler::fcvtas(
const Register& rd,
const FPRegister& fn) {
2010 FPConvertToInt(rd, fn,
FCVTAS);
2014 void Assembler::fcvtmu(
const Register& rd,
const FPRegister& fn) {
2015 FPConvertToInt(rd, fn,
FCVTMU);
2019 void Assembler::fcvtms(
const Register& rd,
const FPRegister& fn) {
2020 FPConvertToInt(rd, fn,
FCVTMS);
2024 void Assembler::fcvtnu(
const Register& rd,
const FPRegister& fn) {
2025 FPConvertToInt(rd, fn,
FCVTNU);
2029 void Assembler::fcvtns(
const Register& rd,
const FPRegister& fn) {
2030 FPConvertToInt(rd, fn,
FCVTNS);
2034 void Assembler::fcvtzu(
const Register& rd,
const FPRegister& fn) {
2035 FPConvertToInt(rd, fn,
FCVTZU);
2039 void Assembler::fcvtzs(
const Register& rd,
const FPRegister& fn) {
2040 FPConvertToInt(rd, fn,
FCVTZS);
2044 void Assembler::scvtf(
const FPRegister& fd,
2048 Emit(SF(rn) | FPType(fd) |
SCVTF | Rn(rn) | Rd(fd));
2050 Emit(SF(rn) | FPType(fd) |
SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
2056 void Assembler::ucvtf(
const FPRegister& fd,
2060 Emit(SF(rn) | FPType(fd) |
UCVTF | Rn(rn) | Rd(fd));
2062 Emit(SF(rn) | FPType(fd) |
UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
2072 Instr Assembler::ImmFP32(
float imm) {
2077 uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
2079 uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
2081 uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
2083 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
2087 Instr Assembler::ImmFP64(
double imm) {
2093 uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
2095 uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
2097 uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
2099 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
2104 void Assembler::MoveWide(
const Register& rd,
2109 if (rd.Is32Bits()) {
2126 if ((imm & ~0xffffUL) == 0) {
2128 }
else if ((imm & ~(0xffffUL << 16)) == 0) {
2131 }
else if ((imm & ~(0xffffUL << 32)) == 0) {
2135 }
else if ((imm & ~(0xffffUL << 48)) == 0) {
2145 Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(
shift));
2149 void Assembler::AddSub(
const Register& rd,
2151 const Operand& operand,
2154 DCHECK(rd.SizeInBits() == rn.SizeInBits());
2155 DCHECK(!operand.NeedsRelocation(
this));
2156 if (operand.IsImmediate()) {
2157 int64_t immediate = operand.ImmediateValue();
2158 DCHECK(IsImmAddSub(immediate));
2161 ImmAddSub(immediate) | dest_reg | RnSP(rn));
2162 }
else if (operand.IsShiftedRegister()) {
2163 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
2173 if (rn.IsSP() || rd.IsSP()) {
2175 DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(),
S,
2181 DCHECK(operand.IsExtendedRegister());
2187 void Assembler::AddSubWithCarry(
const Register& rd,
2189 const Operand& operand,
2192 DCHECK(rd.SizeInBits() == rn.SizeInBits());
2193 DCHECK(rd.SizeInBits() == operand.reg().SizeInBits());
2194 DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
2195 DCHECK(!operand.NeedsRelocation(
this));
2196 Emit(SF(rd) | op | Flags(
S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
2200 void Assembler::hlt(
int code) {
2202 Emit(
HLT | ImmException(code));
2206 void Assembler::brk(
int code) {
2208 Emit(
BRK | ImmException(code));
2212 void Assembler::EmitStringData(
const char*
string) {
2213 size_t len = strlen(
string) + 1;
2215 EmitData(
string, len);
2217 const char pad[] = {
'\0',
'\0',
'\0',
'\0'};
2223 void Assembler::debug(
const char* message,
uint32_t code,
Instr params) {
2224 #ifdef USE_SIMULATOR
2227 if (!serializer_enabled()) {
2230 BlockPoolsScope scope(
this);
2243 EmitStringData(message);
2251 if (params &
BREAK) {
2257 void Assembler::Logical(
const Register& rd,
2259 const Operand& operand,
2261 DCHECK(rd.SizeInBits() == rn.SizeInBits());
2262 DCHECK(!operand.NeedsRelocation(
this));
2263 if (operand.IsImmediate()) {
2264 int64_t immediate = operand.ImmediateValue();
2265 unsigned reg_size = rd.SizeInBits();
2269 DCHECK(rd.Is64Bits() || is_uint32(immediate));
2274 immediate = rd.Is64Bits() ? ~immediate : (~immediate &
kWRegMask);
2277 unsigned n, imm_s, imm_r;
2278 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
2280 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
2286 DCHECK(operand.IsShiftedRegister());
2287 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
2289 DataProcShiftedRegister(rd, rn, operand,
LeaveFlags, dp_op);
2294 void Assembler::LogicalImmediate(
const Register& rd,
2300 unsigned reg_size = rd.SizeInBits();
2301 Instr dest_reg = (op ==
ANDS) ? Rd(rd) : RdSP(rd);
2303 ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
2308 void Assembler::ConditionalCompare(
const Register& rn,
2309 const Operand& operand,
2314 DCHECK(!operand.NeedsRelocation(
this));
2315 if (operand.IsImmediate()) {
2316 int64_t immediate = operand.ImmediateValue();
2317 DCHECK(IsImmConditionalCompare(immediate));
2320 DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
2323 Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
2327 void Assembler::DataProcessing1Source(
const Register& rd,
2330 DCHECK(rd.SizeInBits() == rn.SizeInBits());
2331 Emit(SF(rn) | op | Rn(rn) | Rd(rd));
2335 void Assembler::FPDataProcessing1Source(
const FPRegister& fd,
2336 const FPRegister& fn,
2338 Emit(FPType(fn) | op | Rn(fn) | Rd(fd));
2342 void Assembler::FPDataProcessing2Source(
const FPRegister& fd,
2343 const FPRegister& fn,
2344 const FPRegister& fm,
2346 DCHECK(fd.SizeInBits() == fn.SizeInBits());
2347 DCHECK(fd.SizeInBits() == fm.SizeInBits());
2348 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
2352 void Assembler::FPDataProcessing3Source(
const FPRegister& fd,
2353 const FPRegister& fn,
2354 const FPRegister& fm,
2355 const FPRegister& fa,
2358 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
2362 void Assembler::EmitShift(
const Register& rd,
2365 unsigned shift_amount) {
2368 lsl(rd, rn, shift_amount);
2371 lsr(rd, rn, shift_amount);
2374 asr(rd, rn, shift_amount);
2377 ror(rd, rn, shift_amount);
2385 void Assembler::EmitExtendShift(
const Register& rd,
2388 unsigned left_shift) {
2389 DCHECK(rd.SizeInBits() >= rn.SizeInBits());
2390 unsigned reg_size = rd.SizeInBits();
2392 Register rn_ = Register::Create(rn.code(), rd.SizeInBits());
2394 unsigned high_bit = (8 << (extend & 0x3)) - 1;
2396 unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
2398 if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
2402 case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit);
break;
2405 case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit);
break;
2410 lsl(rd, rn_, left_shift);
2417 lsl(rd, rn_, left_shift);
2422 void Assembler::DataProcShiftedRegister(
const Register& rd,
2424 const Operand& operand,
2427 DCHECK(operand.IsShiftedRegister());
2428 DCHECK(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
2429 DCHECK(!operand.NeedsRelocation(
this));
2430 Emit(SF(rd) | op | Flags(
S) |
2431 ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
2432 Rm(operand.reg()) | Rn(rn) | Rd(rd));
2436 void Assembler::DataProcExtendedRegister(
const Register& rd,
2438 const Operand& operand,
2441 DCHECK(!operand.NeedsRelocation(
this));
2443 Emit(SF(rd) | op | Flags(
S) | Rm(operand.reg()) |
2444 ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
2445 dest_reg | RnSP(rn));
2449 bool Assembler::IsImmAddSub(int64_t immediate) {
2450 return is_uint12(immediate) ||
2451 (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
2454 void Assembler::LoadStore(
const CPURegister& rt,
2457 Instr memop = op | Rt(rt) | RnSP(addr.base());
2458 int64_t offset = addr.offset();
2460 if (addr.IsImmediateOffset()) {
2462 if (IsImmLSScaled(offset,
size)) {
2465 ImmLSUnsigned(offset >>
size));
2466 }
else if (IsImmLSUnscaled(offset)) {
2473 }
else if (addr.IsRegisterOffset()) {
2474 Extend ext = addr.extend();
2476 unsigned shift_amount = addr.shift_amount();
2485 DCHECK((shift_amount == 0) ||
2486 (shift_amount ==
static_cast<unsigned>(CalcLSDataSize(op))));
2488 ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
2491 DCHECK(!rt.Is(addr.base()));
2492 if (IsImmLSUnscaled(offset)) {
2493 if (addr.IsPreIndex()) {
2496 DCHECK(addr.IsPostIndex());
2507 bool Assembler::IsImmLSUnscaled(int64_t offset) {
2508 return is_int9(offset);
2513 bool offset_is_size_multiple = (((offset >>
size) <<
size) == offset);
2514 return offset_is_size_multiple && is_uint12(offset >>
size);
2519 bool offset_is_size_multiple = (((offset >>
size) <<
size) == offset);
2520 return offset_is_size_multiple && is_int7(offset >>
size);
2531 bool Assembler::IsImmLogical(uint64_t value,
2539 bool negate =
false;
2608 uint64_t value_plus_a = value + a;
2610 uint64_t value_plus_a_minus_b = value_plus_a - b;
2613 int d, clz_a, out_n;
2655 if (((b - a) & ~mask) != 0) {
2668 static const uint64_t multipliers[] = {
2669 0x0000000000000001UL,
2670 0x0000000100000001UL,
2671 0x0001000100010001UL,
2672 0x0101010101010101UL,
2673 0x1111111111111111UL,
2674 0x5555555555555555UL,
2678 DCHECK((multiplier_idx >= 0) &&
2679 (
static_cast<size_t>(multiplier_idx) <
arraysize(multipliers)));
2680 uint64_t multiplier = multipliers[multiplier_idx];
2681 uint64_t candidate = (b - a) * multiplier;
2683 if (value != candidate) {
2696 int s = clz_a - clz_b;
2707 r = (clz_b + 1) & (d - 1);
2709 r = (clz_a + 1) & (d - 1);
2726 *imm_s = ((-d << 1) | (s - 1)) & 0x3f;
2733 bool Assembler::IsImmConditionalCompare(int64_t immediate) {
2734 return is_uint5(immediate);
2738 bool Assembler::IsImmFP32(
float imm) {
2743 if ((bits & 0x7ffff) != 0) {
2748 uint32_t b_pattern = (bits >> 16) & 0x3e00;
2749 if (b_pattern != 0 && b_pattern != 0x3e00) {
2754 if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
2762 bool Assembler::IsImmFP64(
double imm) {
2768 if ((bits & 0xffffffffffffL) != 0) {
2773 uint32_t b_pattern = (bits >> 48) & 0x3fc0;
2774 if (b_pattern != 0 && b_pattern != 0x3fc0) {
2779 if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) {
2787 void Assembler::GrowBuffer() {
2788 if (!own_buffer_)
FATAL(
"external code buffer is too small");
2792 if (buffer_size_ < 1 *
MB) {
2793 desc.buffer_size = 2 * buffer_size_;
2795 desc.buffer_size = buffer_size_ + 1 *
MB;
2799 byte* buffer =
reinterpret_cast<byte*
>(buffer_);
2802 desc.buffer = NewArray<byte>(desc.buffer_size);
2804 desc.instr_size = pc_offset();
2805 desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos();
2808 intptr_t pc_delta = desc.buffer - buffer;
2809 intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
2810 (buffer + buffer_size_);
2811 memmove(desc.buffer, buffer, desc.instr_size);
2812 memmove(reloc_info_writer.pos() + rc_delta,
2813 reloc_info_writer.pos(), desc.reloc_size);
2817 buffer_ = desc.buffer;
2818 buffer_size_ = desc.buffer_size;
2819 pc_ =
reinterpret_cast<byte*
>(pc_) + pc_delta;
2820 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2821 reloc_info_writer.last_pc() + pc_delta);
2831 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2833 RelocInfo rinfo(
reinterpret_cast<byte*
>(pc_), rmode, data,
NULL);
2834 if (((rmode >= RelocInfo::JS_RETURN) &&
2835 (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
2836 (rmode == RelocInfo::CONST_POOL) ||
2837 (rmode == RelocInfo::VENEER_POOL)) {
2839 DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
2840 || RelocInfo::IsJSReturn(rmode)
2841 || RelocInfo::IsComment(rmode)
2842 || RelocInfo::IsPosition(rmode)
2843 || RelocInfo::IsConstPool(rmode)
2844 || RelocInfo::IsVeneerPool(rmode));
2847 constpool_.RecordEntry(data, rmode);
2850 BlockConstPoolFor(1);
2853 if (!RelocInfo::IsNone(rmode)) {
2855 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
2856 !serializer_enabled() && !emit_debug_code()) {
2859 DCHECK(buffer_space() >= kMaxRelocSize);
2860 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2861 RelocInfo reloc_info_with_ast_id(
2862 reinterpret_cast<byte*
>(pc_), rmode, RecordedAstId().ToInt(),
NULL);
2863 ClearRecordedAstId();
2864 reloc_info_writer.Write(&reloc_info_with_ast_id);
2866 reloc_info_writer.Write(&rinfo);
2872 void Assembler::BlockConstPoolFor(
int instructions) {
2874 if (no_const_pool_before_ < pc_limit) {
2875 no_const_pool_before_ = pc_limit;
2877 DCHECK(pc_limit < constpool_.MaxPcOffset());
2880 if (next_constant_pool_check_ < no_const_pool_before_) {
2881 next_constant_pool_check_ = no_const_pool_before_;
2886 void Assembler::CheckConstPool(
bool force_emit,
bool require_jump) {
2890 if (is_const_pool_blocked()) {
2897 if (constpool_.IsEmpty()) {
2899 SetNextConstPoolCheckIn(kCheckConstPoolInterval);
2908 int dist = constpool_.DistanceToFirstUse();
2909 int count = constpool_.EntryCount();
2911 (dist < kApproxMaxDistToConstPool) &&
2912 (count < kApproxMaxPoolEntryCount)) {
2919 int worst_case_size = constpool_.WorstCaseSize();
2920 CheckVeneerPool(
false, require_jump,
2921 kVeneerDistanceMargin + worst_case_size);
2926 while (buffer_space() <= needed_space) {
2932 constpool_.Emit(require_jump);
2933 DCHECK(SizeOfCodeGeneratedSince(&size_check) <=
2934 static_cast<unsigned>(worst_case_size));
2938 SetNextConstPoolCheckIn(kCheckConstPoolInterval);
2942 bool Assembler::ShouldEmitVeneer(
int max_reachable_pc,
int margin) {
2945 return pc_offset() > max_reachable_pc - margin - protection_offset -
2946 static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
2950 void Assembler::RecordVeneerPool(
int location_offset,
int size) {
2951 RelocInfo rinfo(buffer_ + location_offset,
2952 RelocInfo::VENEER_POOL,
static_cast<intptr_t
>(
size),
2954 reloc_info_writer.Write(&rinfo);
2958 void Assembler::EmitVeneers(
bool force_emit,
bool need_protection,
int margin) {
2959 BlockPoolsScope scope(
this);
2960 RecordComment(
"[ Veneers");
2969 int veneer_pool_relocinfo_loc = pc_offset();
2972 if (need_protection) {
2978 Label veneer_size_check;
2980 std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
2982 it = unresolved_branches_.begin();
2983 while (it != unresolved_branches_.end()) {
2984 if (force_emit || ShouldEmitVeneer(it->first, margin)) {
2985 Instruction* branch = InstructionAt(it->second.pc_offset_);
2986 Label* label = it->second.label_;
2989 bind(&veneer_size_check);
2993 Instruction* veneer =
reinterpret_cast<Instruction*
>(pc_);
2994 RemoveBranchFromLabelLinkChain(branch, label, veneer);
2995 branch->SetImmPCOffsetTarget(veneer);
2998 DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <=
2999 static_cast<uint64_t
>(kMaxVeneerCodeSize));
3000 veneer_size_check.Unuse();
3003 it_to_delete = it++;
3004 unresolved_branches_.erase(it_to_delete);
3011 int pool_size = SizeOfCodeGeneratedSince(&size_check);
3012 RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
3014 if (unresolved_branches_.empty()) {
3015 next_veneer_pool_check_ =
kMaxInt;
3017 next_veneer_pool_check_ =
3018 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
3027 void Assembler::CheckVeneerPool(
bool force_emit,
bool require_jump,
3030 if (unresolved_branches_.empty()) {
3035 DCHECK(pc_offset() < unresolved_branches_first_limit());
3040 if (is_veneer_pool_blocked()) {
3045 if (!require_jump) {
3047 margin *= kVeneerNoProtectionFactor;
3049 if (force_emit || ShouldEmitVeneers(margin)) {
3050 EmitVeneers(force_emit, require_jump, margin);
3052 next_veneer_pool_check_ =
3053 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
3058 void Assembler::RecordComment(
const char* msg) {
3059 if (FLAG_code_comments) {
3061 RecordRelocInfo(RelocInfo::COMMENT,
reinterpret_cast<intptr_t
>(msg));
3066 int Assembler::buffer_space()
const {
3067 return reloc_info_writer.pos() -
reinterpret_cast<byte*
>(pc_);
3071 void Assembler::RecordJSReturn() {
3072 positions_recorder()->WriteRecordedPositions();
3074 RecordRelocInfo(RelocInfo::JS_RETURN);
3078 void Assembler::RecordDebugBreakSlot() {
3079 positions_recorder()->WriteRecordedPositions();
3081 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
3085 void Assembler::RecordConstPool(
int size) {
3088 RecordRelocInfo(RelocInfo::CONST_POOL,
static_cast<intptr_t
>(
size));
3092 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
3094 DCHECK(!FLAG_enable_ool_constant_pool);
3095 return isolate->factory()->empty_constant_pool_array();
3099 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
3101 DCHECK(!FLAG_enable_ool_constant_pool);
3106 void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
3114 Instruction* expected_adr = InstructionAt(0);
3115 CHECK(expected_adr->IsAdr() && (expected_adr->ImmPCRel() == 0));
3116 int rd_code = expected_adr->Rd();
3117 for (
int i = 0;
i < kAdrFarPatchableNNops; ++
i) {
3120 Instruction* expected_movz =
3122 CHECK(expected_movz->IsMovz() &&
3123 (expected_movz->ImmMoveWide() == 0) &&
3124 (expected_movz->ShiftMoveWide() == 0));
3125 int scratch_code = expected_movz->Rd();
3128 Register rd = Register::XRegFromCode(rd_code);
3129 Register scratch = Register::XRegFromCode(scratch_code);
3131 adr(rd, target_offset & 0xFFFF);
3132 movz(scratch, (target_offset >> 16) & 0xFFFF, 16);
3133 movk(scratch, (target_offset >> 32) & 0xFFFF, 32);
3134 DCHECK((target_offset >> 48) == 0);
3135 add(rd, rd, scratch);
bool serializer_enabled() const
void RecordConstPool(int size)
static Instr ImmLLiteral(int imm19)
static Instr Rt(CPURegister rt)
static const int kApproxMaxPoolEntryCount
void Emit(Instr instruction)
void RecordComment(const char *msg)
bool is_const_pool_blocked() const
void b(int branch_offset, Condition cond=al)
void SetNextConstPoolCheckIn(int instructions)
Instruction * InstructionAt(int offset) const
Assembler(Isolate *isolate, void *buffer, int buffer_size)
int SizeOfCodeGeneratedSince(Label *label)
static CPURegList GetCallerSavedFP(unsigned size=kDRegSizeInBits)
void Remove(const CPURegList &other)
static CPURegList GetCallerSaved(unsigned size=kXRegSizeInBits)
static CPURegList GetSafepointSavedRegisters()
CPURegister PopHighestIndex()
CPURegister::RegisterType type() const
unsigned RegisterSizeInBits() const
CPURegister::RegisterType type_
CPURegister PopLowestIndex()
static CPURegList GetCalleeSaved(unsigned size=kXRegSizeInBits)
static CPURegList GetCalleeSavedFP(unsigned size=kDRegSizeInBits)
CPURegList(CPURegister reg1, CPURegister reg2=NoCPUReg, CPURegister reg3=NoCPUReg, CPURegister reg4=NoCPUReg)
std::multimap< uint64_t, int > shared_entries_
void RecordEntry(intptr_t data, RelocInfo::Mode mode)
int SizeIfEmittedAtCurrentPc(bool require_jump)
void Emit(bool require_jump)
std::vector< std::pair< uint64_t, int > > unique_entries_
bool CanBeShared(RelocInfo::Mode mode)
static void FlushICache(void *start, size_t size)
static unsigned supported_
static void PrintFeatures()
static void PrintTarget()
static void ProbeImpl(bool cross_compile)
static PairResult AreConsistentForPair(const MemOperand &operandA, const MemOperand &operandB, int access_size_log2=kXRegSizeLog2)
bool NeedsRelocation(const Assembler *assembler) const
static const int kApplyMask
static bool IsNone(Mode mode)
void PatchCode(byte *instructions, int instruction_count)
void PatchCodeWithCall(Address target, int guard_bytes)
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
enable harmony numeric enable harmony object literal extensions Optimize object Array shift
#define DCHECK(condition)
#define IS_POWER_OF_TWO(x)
#define STATIC_ASSERT(test)
T RoundUp(T x, intptr_t m)
bool IsPowerOfTwo32(uint32_t value)
bool AreSameSizeAndType(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoCPUReg, const CPURegister ®4=NoCPUReg, const CPURegister ®5=NoCPUReg, const CPURegister ®6=NoCPUReg, const CPURegister ®7=NoCPUReg, const CPURegister ®8=NoCPUReg)
void DeleteArray(T *array)
const int kBitfieldNOffset
LSDataSize CalcLSPairDataSize(LoadStorePairOp op)
static const int kRegListSizeInBits
FPDataProcessing2SourceOp
const unsigned kMaxLoadLiteralRange
const RegList kJSCallerSaved
bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
const unsigned kXRegSizeInBits
LoadStorePairNonTemporalOp
@ LoadStorePairPreIndexFixed
static uint32_t float_to_rawbits(float value)
const unsigned kPrintfLength
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2=NoReg, Register reg3=NoReg, Register reg4=NoReg)
const unsigned kDebugParamsOffset
@ LoadStoreUnscaledOffsetFixed
const unsigned kWRegSizeInBits
kSerializedDataOffset Object
FPDataProcessing3SourceOp
@ LoadStoreUnsignedOffsetFixed
FPDataProcessing1SourceOp
Handle< T > handle(T *t, Isolate *isolate)
const Instr kImmExceptionIsPrintf
@ LoadStorePairOffsetFixed
static uint64_t double_to_rawbits(double value)
Condition NegateCondition(Condition cond)
int CountLeadingZeros(uint64_t value, int width)
uint64_t LargestPowerOf2Divisor(uint64_t value)
const Instr kImmExceptionIsDebug
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
int CountSetBits(uint64_t value, int width)
@ LoadStoreRegisterOffsetFixed
int CountTrailingZeros(uint64_t value, int width)
const Instr kImmExceptionIsUnreachable
const unsigned kDebugCodeOffset
bool IsAligned(T value, U alignment)
const unsigned kInstructionSize
@ LoadStorePostIndexFixed
@ ConditionalCompareImmediateFixed
@ ConditionalCompareRegisterFixed
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
const unsigned kDebugMessageOffset
@ LoadStorePairPostIndexFixed
Debugger support for the V8 JavaScript engine.
static const uint16_t * Align(const uint16_t *chars)
static CPURegister Create(unsigned code, unsigned size, RegisterType type)
static Register FromAllocationIndex(int index)
static int NumAllocatableRegisters()