7 #if V8_TARGET_ARCH_ARM64
28 : Assembler(arg_isolate, buffer, buffer_size),
29 generating_stub_(
false),
31 allow_macro_instructions_(
true),
34 use_real_aborts_(
true),
36 tmp_list_(DefaultTmpList()),
37 fptmp_list_(DefaultFPTmpList()) {
38 if (isolate() !=
NULL) {
39 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
45 CPURegList MacroAssembler::DefaultTmpList() {
46 return CPURegList(ip0, ip1);
50 CPURegList MacroAssembler::DefaultFPTmpList() {
51 return CPURegList(fp_scratch1, fp_scratch2);
55 void MacroAssembler::LogicalMacro(
const Register& rd,
57 const Operand& operand,
59 UseScratchRegisterScope temps(
this);
61 if (operand.NeedsRelocation(
this)) {
62 Register temp = temps.AcquireX();
63 Ldr(temp, operand.immediate());
64 Logical(rd, rn, temp, op);
66 }
else if (operand.IsImmediate()) {
67 int64_t immediate = operand.ImmediateValue();
68 unsigned reg_size = rd.SizeInBits();
73 immediate = ~immediate;
84 DCHECK(rd.Is64Bits() || is_uint32(immediate));
102 }
else if ((rd.Is64Bits() && (immediate == -1L)) ||
103 (rd.Is32Bits() && (immediate == 0xffffffffL))) {
122 unsigned n, imm_s, imm_r;
123 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
125 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
128 Register temp = temps.AcquireSameSizeAs(rn);
129 Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
133 Logical(temp, rn, imm_operand, op);
135 AssertStackConsistency();
137 Logical(rd, rn, imm_operand, op);
141 }
else if (operand.IsExtendedRegister()) {
142 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
145 DCHECK(operand.shift_amount() <= 4);
146 DCHECK(operand.reg().Is64Bits() ||
147 ((operand.extend() !=
UXTX) && (operand.extend() !=
SXTX)));
148 Register temp = temps.AcquireSameSizeAs(rn);
149 EmitExtendShift(temp, operand.reg(), operand.extend(),
150 operand.shift_amount());
151 Logical(rd, rn, temp, op);
155 DCHECK(operand.IsShiftedRegister());
156 Logical(rd, rn, operand, op);
161 void MacroAssembler::Mov(
const Register& rd, uint64_t imm) {
162 DCHECK(allow_macro_instructions_);
163 DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
185 if (!TryOneInstrMoveImmediate(rd, imm)) {
186 unsigned reg_size = rd.SizeInBits();
193 uint64_t ignored_halfword = 0;
194 bool invert_move =
false;
197 if (CountClearHalfWords(~imm, reg_size) >
198 CountClearHalfWords(imm, reg_size)) {
199 ignored_halfword = 0xffffL;
205 UseScratchRegisterScope temps(
this);
206 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
210 DCHECK((reg_size % 16) == 0);
211 bool first_mov_done =
false;
212 for (
unsigned i = 0;
i < (rd.SizeInBits() / 16);
i++) {
213 uint64_t imm16 = (imm >> (16 *
i)) & 0xffffL;
214 if (imm16 != ignored_halfword) {
215 if (!first_mov_done) {
217 movn(temp, (~imm16) & 0xffffL, 16 *
i);
219 movz(temp, imm16, 16 *
i);
221 first_mov_done =
true;
224 movk(temp, imm16, 16 *
i);
234 AssertStackConsistency();
240 void MacroAssembler::Mov(
const Register& rd,
241 const Operand& operand,
243 DCHECK(allow_macro_instructions_);
248 UseScratchRegisterScope temps(
this);
249 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
251 if (operand.NeedsRelocation(
this)) {
252 Ldr(dst, operand.immediate());
254 }
else if (operand.IsImmediate()) {
256 Mov(dst, operand.ImmediateValue());
258 }
else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
262 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
264 }
else if (operand.IsExtendedRegister()) {
267 EmitExtendShift(dst, operand.reg(), operand.extend(),
268 operand.shift_amount());
280 if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
282 Assembler::mov(rd, operand.reg());
291 Assembler::mov(rd, dst);
296 void MacroAssembler::Mvn(
const Register& rd,
const Operand& operand) {
297 DCHECK(allow_macro_instructions_);
299 if (operand.NeedsRelocation(
this)) {
300 Ldr(rd, operand.immediate());
303 }
else if (operand.IsImmediate()) {
305 Mov(rd, ~operand.ImmediateValue());
307 }
else if (operand.IsExtendedRegister()) {
310 EmitExtendShift(rd, operand.reg(), operand.extend(),
311 operand.shift_amount());
320 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm,
unsigned reg_size) {
321 DCHECK((reg_size % 8) == 0);
323 for (
unsigned i = 0;
i < (reg_size / 16);
i++) {
324 if ((imm & 0xffff) == 0) {
335 bool MacroAssembler::IsImmMovz(uint64_t imm,
unsigned reg_size) {
337 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
343 bool MacroAssembler::IsImmMovn(uint64_t imm,
unsigned reg_size) {
344 return IsImmMovz(~imm, reg_size);
348 void MacroAssembler::ConditionalCompareMacro(
const Register& rn,
349 const Operand& operand,
354 if (operand.NeedsRelocation(
this)) {
355 UseScratchRegisterScope temps(
this);
356 Register temp = temps.AcquireX();
357 Ldr(temp, operand.immediate());
358 ConditionalCompareMacro(rn, temp, nzcv, cond, op);
360 }
else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
361 (operand.IsImmediate() &&
362 IsImmConditionalCompare(operand.ImmediateValue()))) {
365 ConditionalCompare(rn, operand, nzcv, cond, op);
370 UseScratchRegisterScope temps(
this);
371 Register temp = temps.AcquireSameSizeAs(rn);
373 ConditionalCompare(rn, temp, nzcv, cond, op);
378 void MacroAssembler::Csel(
const Register& rd,
380 const Operand& operand,
382 DCHECK(allow_macro_instructions_);
385 if (operand.IsImmediate()) {
388 int64_t imm = operand.ImmediateValue();
389 Register zr = AppropriateZeroRegFor(rn);
391 csel(rd, rn, zr, cond);
392 }
else if (imm == 1) {
393 csinc(rd, rn, zr, cond);
394 }
else if (imm == -1) {
395 csinv(rd, rn, zr, cond);
397 UseScratchRegisterScope temps(
this);
398 Register temp = temps.AcquireSameSizeAs(rn);
400 csel(rd, rn, temp, cond);
402 }
else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
404 csel(rd, rn, operand.reg(), cond);
407 UseScratchRegisterScope temps(
this);
408 Register temp = temps.AcquireSameSizeAs(rn);
410 csel(rd, rn, temp, cond);
415 bool MacroAssembler::TryOneInstrMoveImmediate(
const Register& dst,
417 unsigned n, imm_s, imm_r;
418 int reg_size = dst.SizeInBits();
419 if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
424 }
else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
427 movn(dst, dst.Is64Bits() ? ~imm : (~imm &
kWRegMask));
429 }
else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
431 LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r,
ORR);
438 Operand MacroAssembler::MoveImmediateForShiftedOp(
const Register& dst,
440 int reg_size = dst.SizeInBits();
443 if (TryOneInstrMoveImmediate(dst, imm)) {
448 int64_t imm_low = imm >> shift_low;
456 int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
458 if (TryOneInstrMoveImmediate(dst, imm_low)) {
461 return Operand(dst,
LSL, shift_low);
462 }
else if (TryOneInstrMoveImmediate(dst, imm_high)) {
465 return Operand(dst,
LSR, shift_high);
475 void MacroAssembler::AddSubMacro(
const Register& rd,
477 const Operand& operand,
480 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
481 !operand.NeedsRelocation(
this) && (
S ==
LeaveFlags)) {
486 if (operand.NeedsRelocation(
this)) {
487 UseScratchRegisterScope temps(
this);
488 Register temp = temps.AcquireX();
489 Ldr(temp, operand.immediate());
490 AddSubMacro(rd, rn, temp,
S, op);
491 }
else if ((operand.IsImmediate() &&
492 !IsImmAddSub(operand.ImmediateValue())) ||
493 (rn.IsZero() && !operand.IsShiftedRegister()) ||
494 (operand.IsShiftedRegister() && (operand.shift() ==
ROR))) {
495 UseScratchRegisterScope temps(
this);
496 Register temp = temps.AcquireSameSizeAs(rn);
497 if (operand.IsImmediate()) {
498 Operand imm_operand =
499 MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
500 AddSub(rd, rn, imm_operand,
S, op);
503 AddSub(rd, rn, temp,
S, op);
506 AddSub(rd, rn, operand,
S, op);
511 void MacroAssembler::AddSubWithCarryMacro(
const Register& rd,
513 const Operand& operand,
516 DCHECK(rd.SizeInBits() == rn.SizeInBits());
517 UseScratchRegisterScope temps(
this);
519 if (operand.NeedsRelocation(
this)) {
520 Register temp = temps.AcquireX();
521 Ldr(temp, operand.immediate());
522 AddSubWithCarryMacro(rd, rn, temp,
S, op);
524 }
else if (operand.IsImmediate() ||
525 (operand.IsShiftedRegister() && (operand.shift() ==
ROR))) {
527 Register temp = temps.AcquireSameSizeAs(rn);
529 AddSubWithCarry(rd, rn, temp,
S, op);
531 }
else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
533 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
538 Register temp = temps.AcquireSameSizeAs(rn);
539 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
540 AddSubWithCarry(rd, rn, temp,
S, op);
542 }
else if (operand.IsExtendedRegister()) {
544 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
547 DCHECK(operand.shift_amount() <= 4);
548 DCHECK(operand.reg().Is64Bits() ||
549 ((operand.extend() !=
UXTX) && (operand.extend() !=
SXTX)));
550 Register temp = temps.AcquireSameSizeAs(rn);
551 EmitExtendShift(temp, operand.reg(), operand.extend(),
552 operand.shift_amount());
553 AddSubWithCarry(rd, rn, temp,
S, op);
557 AddSubWithCarry(rd, rn, operand,
S, op);
562 void MacroAssembler::LoadStoreMacro(
const CPURegister& rt,
565 int64_t offset = addr.offset();
571 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset,
size) &&
572 !IsImmLSUnscaled(offset)) {
575 UseScratchRegisterScope temps(
this);
576 Register temp = temps.AcquireSameSizeAs(addr.base());
577 Mov(temp, addr.offset());
578 LoadStore(rt,
MemOperand(addr.base(), temp), op);
579 }
else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
582 add(addr.base(), addr.base(), offset);
583 }
else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
585 add(addr.base(), addr.base(), offset);
589 LoadStore(rt, addr, op);
593 void MacroAssembler::LoadStorePairMacro(
const CPURegister& rt,
594 const CPURegister& rt2,
598 DCHECK(!addr.IsRegisterOffset());
600 int64_t offset = addr.offset();
605 if (IsImmLSPair(offset,
size)) {
607 LoadStorePair(rt, rt2, addr, op);
609 Register base = addr.base();
610 if (addr.IsImmediateOffset()) {
611 UseScratchRegisterScope temps(
this);
612 Register temp = temps.AcquireSameSizeAs(base);
613 Add(temp, base, offset);
615 }
else if (addr.IsPostIndex()) {
617 Add(base, base, offset);
619 DCHECK(addr.IsPreIndex());
620 Add(base, base, offset);
627 void MacroAssembler::Load(
const Register& rt,
632 if (r.IsInteger8()) {
634 }
else if (r.IsUInteger8()) {
636 }
else if (r.IsInteger16()) {
638 }
else if (r.IsUInteger16()) {
640 }
else if (r.IsInteger32()) {
649 void MacroAssembler::Store(
const Register& rt,
654 if (r.IsInteger8() || r.IsUInteger8()) {
656 }
else if (r.IsInteger16() || r.IsUInteger16()) {
658 }
else if (r.IsInteger32()) {
662 if (r.IsHeapObject()) {
664 }
else if (r.IsSmi()) {
672 bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
674 bool need_longer_range =
false;
680 if (label->is_bound() || label->is_linked()) {
682 !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
684 if (!need_longer_range && !label->is_bound()) {
685 int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
686 unresolved_branches_.insert(
687 std::pair<int, FarBranchInfo>(max_reachable_pc,
688 FarBranchInfo(pc_offset(), label)));
690 next_veneer_pool_check_ =
691 Min(next_veneer_pool_check_,
692 max_reachable_pc - kVeneerDistanceCheckMargin);
694 return need_longer_range;
698 void MacroAssembler::Adr(
const Register& rd, Label* label, AdrHint hint) {
699 DCHECK(allow_macro_instructions_);
702 if (hint == kAdrNear) {
708 if (label->is_bound()) {
709 int label_offset = label->pos() - pc_offset();
710 if (Instruction::IsValidPCRelOffset(label_offset)) {
713 DCHECK(label_offset <= 0);
714 int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
715 adr(rd, min_adr_offset);
716 Add(rd, rd, label_offset - min_adr_offset);
719 UseScratchRegisterScope temps(
this);
720 Register scratch = temps.AcquireX();
722 InstructionAccurateScope scope(
723 this, PatchingAssembler::kAdrFarPatchableNInstrs);
725 for (
int i = 0;
i < PatchingAssembler::kAdrFarPatchableNNops; ++
i) {
742 case reg_zero: Cbz(reg, label);
break;
754 DCHECK(allow_macro_instructions_);
758 bool need_extra_instructions =
761 if (need_extra_instructions) {
771 void MacroAssembler::Tbnz(
const Register& rt,
unsigned bit_pos, Label* label) {
772 DCHECK(allow_macro_instructions_);
775 bool need_extra_instructions =
778 if (need_extra_instructions) {
779 tbz(rt, bit_pos, &done);
782 tbnz(rt, bit_pos, label);
788 void MacroAssembler::Tbz(
const Register& rt,
unsigned bit_pos, Label* label) {
789 DCHECK(allow_macro_instructions_);
792 bool need_extra_instructions =
795 if (need_extra_instructions) {
796 tbnz(rt, bit_pos, &done);
799 tbz(rt, bit_pos, label);
805 void MacroAssembler::Cbnz(
const Register& rt, Label* label) {
806 DCHECK(allow_macro_instructions_);
809 bool need_extra_instructions =
812 if (need_extra_instructions) {
822 void MacroAssembler::Cbz(
const Register& rt, Label* label) {
823 DCHECK(allow_macro_instructions_);
826 bool need_extra_instructions =
829 if (need_extra_instructions) {
843 Label* is_not_representable,
844 Label* is_representable) {
845 DCHECK(allow_macro_instructions_);
854 if ((is_not_representable !=
NULL) && (is_representable !=
NULL)) {
855 B(is_not_representable,
vs);
857 }
else if (is_not_representable !=
NULL) {
858 B(is_not_representable,
vs);
859 }
else if (is_representable !=
NULL) {
860 B(is_representable,
vc);
869 const CPURegister& src2,
const CPURegister& src3) {
872 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
873 int size = src0.SizeInBytes();
875 PushPreamble(count,
size);
876 PushHelper(count,
size, src0, src1, src2, src3);
881 const CPURegister& src2,
const CPURegister& src3,
882 const CPURegister& src4,
const CPURegister& src5,
883 const CPURegister& src6,
const CPURegister& src7) {
886 int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
887 int size = src0.SizeInBytes();
889 PushPreamble(count,
size);
890 PushHelper(4,
size, src0, src1, src2, src3);
891 PushHelper(count - 4,
size, src4, src5, src6, src7);
895 void MacroAssembler::Pop(
const CPURegister& dst0,
const CPURegister& dst1,
896 const CPURegister& dst2,
const CPURegister& dst3) {
903 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
904 int size = dst0.SizeInBytes();
906 PopHelper(count,
size, dst0, dst1, dst2, dst3);
907 PopPostamble(count,
size);
912 int size = src0.SizeInBytes() + src1.SizeInBytes();
918 str(src0,
MemOperand(StackPointer(), src1.SizeInBytes()));
922 void MacroAssembler::PushPopQueue::PushQueued(
923 PreambleDirective preamble_directive) {
924 if (queued_.empty())
return;
926 if (preamble_directive == WITH_PREAMBLE) {
927 masm_->PushPreamble(size_);
930 int count = queued_.size();
932 while (index < count) {
935 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
938 batch[batch_index++] = queued_[index++];
939 }
while ((batch_index < 4) && (index < count) &&
940 batch[0].IsSameSizeAndType(queued_[index]));
942 masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
943 batch[0], batch[1], batch[2], batch[3]);
950 void MacroAssembler::PushPopQueue::PopQueued() {
951 if (queued_.empty())
return;
953 int count = queued_.size();
955 while (index < count) {
958 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
961 batch[batch_index++] = queued_[index++];
962 }
while ((batch_index < 4) && (index < count) &&
963 batch[0].IsSameSizeAndType(queued_[index]));
965 masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
966 batch[0], batch[1], batch[2], batch[3]);
969 masm_->PopPostamble(size_);
974 void MacroAssembler::PushCPURegList(CPURegList registers) {
975 int size = registers.RegisterSizeInBytes();
977 PushPreamble(registers.Count(),
size);
981 while (!registers.IsEmpty()) {
982 int count_before = registers.Count();
983 const CPURegister& src0 = registers.PopHighestIndex();
984 const CPURegister& src1 = registers.PopHighestIndex();
985 const CPURegister& src2 = registers.PopHighestIndex();
986 const CPURegister& src3 = registers.PopHighestIndex();
987 int count = count_before - registers.Count();
988 PushHelper(count,
size, src0, src1, src2, src3);
993 void MacroAssembler::PopCPURegList(CPURegList registers) {
994 int size = registers.RegisterSizeInBytes();
999 while (!registers.IsEmpty()) {
1000 int count_before = registers.Count();
1001 const CPURegister& dst0 = registers.PopLowestIndex();
1002 const CPURegister& dst1 = registers.PopLowestIndex();
1003 const CPURegister& dst2 = registers.PopLowestIndex();
1004 const CPURegister& dst3 = registers.PopLowestIndex();
1005 int count = count_before - registers.Count();
1006 PopHelper(count,
size, dst0, dst1, dst2, dst3);
1008 PopPostamble(registers.Count(),
size);
1012 void MacroAssembler::PushMultipleTimes(CPURegister src,
int count) {
1013 int size = src.SizeInBytes();
1015 PushPreamble(count,
size);
1017 if (FLAG_optimize_for_size && count > 8) {
1018 UseScratchRegisterScope temps(
this);
1019 Register temp = temps.AcquireX();
1022 __ Mov(temp, count / 2);
1024 PushHelper(2,
size, src, src, NoReg, NoReg);
1025 __ Subs(temp, temp, 1);
1034 while (count >= 4) {
1035 PushHelper(4,
size, src, src, src, src);
1039 PushHelper(2,
size, src, src, NoReg, NoReg);
1043 PushHelper(1,
size, src, NoReg, NoReg, NoReg);
1050 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
1053 UseScratchRegisterScope temps(
this);
1054 Register temp = temps.AcquireSameSizeAs(count);
1056 if (FLAG_optimize_for_size) {
1059 Subs(temp, count, 1);
1064 Subs(temp, temp, 1);
1065 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1070 Label loop, leftover2, leftover1, done;
1072 Subs(temp, count, 4);
1077 Subs(temp, temp, 4);
1078 PushHelper(4, src.SizeInBytes(), src, src, src, src);
1083 Tbz(count, 1, &leftover1);
1084 PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
1088 Tbz(count, 0, &done);
1089 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1096 void MacroAssembler::PushHelper(
int count,
int size,
1097 const CPURegister& src0,
1098 const CPURegister& src1,
1099 const CPURegister& src2,
1100 const CPURegister& src3) {
1102 InstructionAccurateScope scope(
this);
1111 DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
1115 DCHECK(src2.IsNone() && src3.IsNone());
1136 void MacroAssembler::PopHelper(
int count,
int size,
1137 const CPURegister& dst0,
1138 const CPURegister& dst1,
1139 const CPURegister& dst2,
1140 const CPURegister& dst3) {
1142 InstructionAccurateScope scope(
this);
1151 DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1155 DCHECK(dst2.IsNone() && dst3.IsNone());
1177 void MacroAssembler::PushPreamble(Operand total_size) {
1178 if (csp.Is(StackPointer())) {
1182 if (total_size.IsImmediate()) {
1183 DCHECK((total_size.ImmediateValue() % 16) == 0);
1192 BumpSystemStackPointer(total_size);
1197 void MacroAssembler::PopPostamble(Operand total_size) {
1198 if (csp.Is(StackPointer())) {
1202 if (total_size.IsImmediate()) {
1203 DCHECK((total_size.ImmediateValue() % 16) == 0);
1208 }
else if (emit_debug_code()) {
1212 SyncSystemStackPointer();
1217 void MacroAssembler::Poke(
const CPURegister& src,
const Operand& offset) {
1218 if (offset.IsImmediate()) {
1219 DCHECK(offset.ImmediateValue() >= 0);
1220 }
else if (emit_debug_code()) {
1222 Check(
le, kStackAccessBelowStackPointer);
1225 Str(src,
MemOperand(StackPointer(), offset));
1229 void MacroAssembler::Peek(
const CPURegister& dst,
const Operand& offset) {
1230 if (offset.IsImmediate()) {
1231 DCHECK(offset.ImmediateValue() >= 0);
1232 }
else if (emit_debug_code()) {
1234 Check(
le, kStackAccessBelowStackPointer);
1237 Ldr(dst,
MemOperand(StackPointer(), offset));
1241 void MacroAssembler::PokePair(
const CPURegister& src1,
1242 const CPURegister& src2,
1245 DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
1246 Stp(src1, src2,
MemOperand(StackPointer(), offset));
1250 void MacroAssembler::PeekPair(
const CPURegister& dst1,
1251 const CPURegister& dst2,
1254 DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
1255 Ldp(dst1, dst2,
MemOperand(StackPointer(), offset));
1259 void MacroAssembler::PushCalleeSavedRegisters() {
1261 InstructionAccurateScope scope(
this);
1265 DCHECK(csp.Is(StackPointer()));
1283 void MacroAssembler::PopCalleeSavedRegisters() {
1285 InstructionAccurateScope scope(
this);
1289 DCHECK(csp.Is(StackPointer()));
1307 void MacroAssembler::AssertStackConsistency() {
1310 if (emit_debug_code() && use_real_aborts()) {
1311 if (csp.Is(StackPointer()) || CpuFeatures::IsSupported(
ALWAYS_ALIGN_CSP)) {
1321 sub(StackPointer(), csp, StackPointer());
1322 cbz(StackPointer(), &ok);
1326 { DontEmitDebugCodeScope dont_emit_debug_code_scope(
this);
1327 Abort(kTheCurrentStackPointerIsBelowCsp);
1332 sub(StackPointer(), csp, StackPointer());
1338 void MacroAssembler::AssertFPCRState(Register fpcr) {
1339 if (emit_debug_code()) {
1340 Label unexpected_mode, done;
1341 UseScratchRegisterScope temps(
this);
1342 if (fpcr.IsNone()) {
1343 fpcr = temps.AcquireX();
1349 Tbz(fpcr, DN_offset, &unexpected_mode);
1353 Tbnz(fpcr, FZ_offset, &unexpected_mode);
1356 Tst(fpcr, RMode_mask);
1359 Bind(&unexpected_mode);
1360 Abort(kUnexpectedFPCRMode);
1367 void MacroAssembler::ConfigureFPCR() {
1368 UseScratchRegisterScope temps(
this);
1369 Register fpcr = temps.AcquireX();
1374 Label no_write_required;
1375 Tbnz(fpcr, DN_offset, &no_write_required);
1377 Orr(fpcr, fpcr, DN_mask);
1380 Bind(&no_write_required);
1381 AssertFPCRState(fpcr);
1385 void MacroAssembler::CanonicalizeNaN(
const FPRegister& dst,
1386 const FPRegister& src) {
1392 Fsub(dst, src, fp_zero);
1396 void MacroAssembler::LoadRoot(CPURegister destination,
1397 Heap::RootListIndex index) {
1404 void MacroAssembler::StoreRoot(Register source,
1405 Heap::RootListIndex index) {
1410 void MacroAssembler::LoadTrueFalseRoots(Register true_root,
1411 Register false_root) {
1412 STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
1413 Ldp(true_root, false_root,
1418 void MacroAssembler::LoadHeapObject(Register result,
1419 Handle<HeapObject>
object) {
1421 if (isolate()->heap()->InNewSpace(*
object)) {
1422 Handle<Cell> cell = isolate()->factory()->NewCell(
object);
1423 Mov(result, Operand(cell));
1426 Mov(result, Operand(
object));
1431 void MacroAssembler::LoadInstanceDescriptors(Register
map,
1432 Register descriptors) {
1437 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register
map) {
1439 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
1443 void MacroAssembler::EnumLengthUntagged(Register dst, Register
map) {
1446 And(dst, dst, Map::EnumLengthBits::kMask);
1450 void MacroAssembler::EnumLengthSmi(Register dst, Register
map) {
1451 EnumLengthUntagged(dst,
map);
1456 void MacroAssembler::CheckEnumCache(Register
object,
1457 Register null_value,
1462 Label* call_runtime) {
1466 Register empty_fixed_array_value = scratch0;
1467 Register current_object = scratch1;
1469 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
1472 Mov(current_object,
object);
1476 Register
map = scratch2;
1477 Register enum_length = scratch3;
1480 EnumLengthUntagged(enum_length,
map);
1482 B(
eq, call_runtime);
1490 EnumLengthUntagged(enum_length,
map);
1491 Cbnz(enum_length, call_runtime);
1499 JSObject::kElementsOffset));
1500 Cmp(current_object, empty_fixed_array_value);
1501 B(
eq, &no_elements);
1504 CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
1505 B(
ne, call_runtime);
1509 Cmp(current_object, null_value);
1514 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
1517 Label* no_memento_found) {
1518 ExternalReference new_space_start =
1519 ExternalReference::new_space_start(isolate());
1520 ExternalReference new_space_allocation_top =
1521 ExternalReference::new_space_allocation_top_address(isolate());
1523 Add(scratch1, receiver,
1525 Cmp(scratch1, new_space_start);
1526 B(
lt, no_memento_found);
1528 Mov(scratch2, new_space_allocation_top);
1530 Cmp(scratch1, scratch2);
1531 B(
gt, no_memento_found);
1533 Ldr(scratch1,
MemOperand(scratch1, -AllocationMemento::kSize));
1535 Operand(isolate()->factory()->allocation_memento_map()));
1539 void MacroAssembler::JumpToHandlerEntry(Register exception,
1543 Register scratch2) {
1545 DCHECK(exception.Is(x0));
1550 Add(scratch1, scratch1, FixedArray::kHeaderSize -
kHeapObjectTag);
1552 Lsr(scratch2, state, StackHandler::kKindWidth);
1555 Add(scratch1, scratch1, Operand::UntagSmi(scratch2));
1560 void MacroAssembler::InNewSpace(Register
object,
1564 UseScratchRegisterScope temps(
this);
1565 Register temp = temps.AcquireX();
1566 And(temp,
object, ExternalReference::new_space_mask(isolate()));
1567 Cmp(temp, ExternalReference::new_space_start(isolate()));
1576 Register scratch4) {
1589 DCHECK(jssp.Is(StackPointer()));
1590 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
1598 Register
object = scratch1;
1599 Register state = scratch2;
1600 Pop(
object, state,
cp,
fp);
1606 Cbz(
cp, ¬_js_frame);
1608 Bind(¬_js_frame);
1610 JumpToHandlerEntry(value,
object, state, scratch3, scratch4);
1614 void MacroAssembler::ThrowUncatchable(Register value,
1618 Register scratch4) {
1631 DCHECK(jssp.Is(StackPointer()));
1632 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
1637 Label fetch_next, check_kind;
1640 Peek(jssp, StackHandlerConstants::kNextOffset);
1644 Peek(scratch2, StackHandlerConstants::kStateOffset);
1645 TestAndBranchIfAnySet(scratch2, StackHandler::KindField::kMask, &fetch_next);
1653 Register
object = scratch1;
1654 Register state = scratch2;
1655 Pop(
object, state,
cp,
fp);
1657 JumpToHandlerEntry(value,
object, state, scratch3, scratch4);
1661 void MacroAssembler::AssertSmi(Register
object,
BailoutReason reason) {
1662 if (emit_debug_code()) {
1670 void MacroAssembler::AssertNotSmi(Register
object,
BailoutReason reason) {
1671 if (emit_debug_code()) {
1679 void MacroAssembler::AssertName(Register
object) {
1680 if (emit_debug_code()) {
1681 AssertNotSmi(
object, kOperandIsASmiAndNotAName);
1683 UseScratchRegisterScope temps(
this);
1684 Register temp = temps.AcquireX();
1688 Check(
ls, kOperandIsNotAName);
1693 void MacroAssembler::AssertUndefinedOrAllocationSite(Register
object,
1695 if (emit_debug_code()) {
1696 Label done_checking;
1697 AssertNotSmi(
object);
1698 JumpIfRoot(
object, Heap::kUndefinedValueRootIndex, &done_checking);
1700 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
1701 Assert(
eq, kExpectedUndefinedOrCell);
1702 Bind(&done_checking);
1707 void MacroAssembler::AssertString(Register
object) {
1708 if (emit_debug_code()) {
1709 UseScratchRegisterScope temps(
this);
1710 Register temp = temps.AcquireX();
1713 Check(
ne, kOperandIsASmiAndNotAString);
1716 Check(
lo, kOperandIsNotAString);
1721 void MacroAssembler::CallStub(
CodeStub* stub, TypeFeedbackId ast_id) {
1722 DCHECK(AllowThisStubCall(stub));
1723 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1727 void MacroAssembler::TailCallStub(
CodeStub* stub) {
1728 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
1732 void MacroAssembler::CallRuntime(
const Runtime::Function* f,
1740 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1743 Mov(x0, num_arguments);
1744 Mov(x1, ExternalReference(f, isolate()));
1746 CEntryStub stub(isolate(), 1, save_doubles);
1751 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
1752 return ref0.address() - ref1.address();
1756 void MacroAssembler::CallApiFunctionAndReturn(
1757 Register function_address,
1758 ExternalReference thunk_ref,
1764 ExternalReference next_address =
1765 ExternalReference::handle_scope_next_address(isolate());
1766 const int kNextOffset = 0;
1767 const int kLimitOffset = AddressOffset(
1768 ExternalReference::handle_scope_limit_address(isolate()),
1770 const int kLevelOffset = AddressOffset(
1771 ExternalReference::handle_scope_level_address(isolate()),
1774 DCHECK(function_address.is(x1) || function_address.is(x2));
1776 Label profiler_disabled;
1777 Label end_profiler_check;
1778 Mov(x10, ExternalReference::is_profiling_address(isolate()));
1780 Cbz(w10, &profiler_disabled);
1782 B(&end_profiler_check);
1784 Bind(&profiler_disabled);
1785 Mov(x3, function_address);
1786 Bind(&end_profiler_check);
1791 Poke(x19, (spill_offset + 0) *
kXRegSize);
1792 Poke(x20, (spill_offset + 1) *
kXRegSize);
1793 Poke(x21, (spill_offset + 2) *
kXRegSize);
1794 Poke(x22, (spill_offset + 3) *
kXRegSize);
1799 Register handle_scope_base = x22;
1800 Register next_address_reg = x19;
1801 Register limit_reg = x20;
1802 Register level_reg = w21;
1804 Mov(handle_scope_base, next_address);
1805 Ldr(next_address_reg,
MemOperand(handle_scope_base, kNextOffset));
1806 Ldr(limit_reg,
MemOperand(handle_scope_base, kLimitOffset));
1807 Ldr(level_reg,
MemOperand(handle_scope_base, kLevelOffset));
1808 Add(level_reg, level_reg, 1);
1809 Str(level_reg,
MemOperand(handle_scope_base, kLevelOffset));
1811 if (FLAG_log_timer_events) {
1812 FrameScope frame(
this, StackFrame::MANUAL);
1813 PushSafepointRegisters();
1814 Mov(x0, ExternalReference::isolate_address(isolate()));
1815 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
1816 PopSafepointRegisters();
1822 DirectCEntryStub stub(isolate());
1823 stub.GenerateCall(
this, x3);
1825 if (FLAG_log_timer_events) {
1826 FrameScope frame(
this, StackFrame::MANUAL);
1827 PushSafepointRegisters();
1828 Mov(x0, ExternalReference::isolate_address(isolate()));
1829 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
1830 PopSafepointRegisters();
1833 Label promote_scheduled_exception;
1834 Label exception_handled;
1835 Label delete_allocated_handles;
1836 Label leave_exit_frame;
1837 Label return_value_loaded;
1840 Ldr(x0, return_value_operand);
1841 Bind(&return_value_loaded);
1844 Str(next_address_reg,
MemOperand(handle_scope_base, kNextOffset));
1845 if (emit_debug_code()) {
1846 Ldr(w1,
MemOperand(handle_scope_base, kLevelOffset));
1848 Check(
eq, kUnexpectedLevelAfterReturnFromApiCall);
1850 Sub(level_reg, level_reg, 1);
1851 Str(level_reg,
MemOperand(handle_scope_base, kLevelOffset));
1852 Ldr(x1,
MemOperand(handle_scope_base, kLimitOffset));
1854 B(
ne, &delete_allocated_handles);
1856 Bind(&leave_exit_frame);
1858 Peek(x19, (spill_offset + 0) *
kXRegSize);
1859 Peek(x20, (spill_offset + 1) *
kXRegSize);
1860 Peek(x21, (spill_offset + 2) *
kXRegSize);
1861 Peek(x22, (spill_offset + 3) *
kXRegSize);
1864 Mov(x5, ExternalReference::scheduled_exception_address(isolate()));
1866 JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception);
1867 Bind(&exception_handled);
1869 bool restore_context = context_restore_operand !=
NULL;
1870 if (restore_context) {
1871 Ldr(
cp, *context_restore_operand);
1874 LeaveExitFrame(
false, x1, !restore_context);
1878 Bind(&promote_scheduled_exception);
1881 CallExternalReference(
1883 Runtime::kPromoteScheduledException, isolate()), 0);
1885 B(&exception_handled);
1888 Bind(&delete_allocated_handles);
1889 Str(limit_reg,
MemOperand(handle_scope_base, kLimitOffset));
1891 Register saved_result = x19;
1892 Mov(saved_result, x0);
1893 Mov(x0, ExternalReference::isolate_address(isolate()));
1895 ExternalReference::delete_handle_scope_extensions(isolate()), 1);
1896 Mov(x0, saved_result);
1897 B(&leave_exit_frame);
1901 void MacroAssembler::CallExternalReference(
const ExternalReference& ext,
1902 int num_arguments) {
1903 Mov(x0, num_arguments);
1906 CEntryStub stub(isolate(), 1);
1911 void MacroAssembler::JumpToExternalReference(
const ExternalReference& builtin) {
1913 CEntryStub stub(isolate(), 1);
1914 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
1918 void MacroAssembler::GetBuiltinFunction(Register target,
1919 Builtins::JavaScript
id) {
1925 JSBuiltinsObject::OffsetOfFunctionWithId(
id)));
1929 void MacroAssembler::GetBuiltinEntry(Register target,
1931 Builtins::JavaScript
id) {
1933 GetBuiltinFunction(
function,
id);
1939 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript
id,
1941 const CallWrapper& call_wrapper) {
1947 GetBuiltinEntry(x2, x1,
id);
1949 call_wrapper.BeforeCall(CallSize(x2));
1951 call_wrapper.AfterCall();
1959 void MacroAssembler::TailCallExternalReference(
const ExternalReference& ext,
1966 Mov(x0, num_arguments);
1967 JumpToExternalReference(ext);
1971 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
1974 TailCallExternalReference(ExternalReference(fid, isolate()),
1980 void MacroAssembler::InitializeNewString(Register
string,
1982 Heap::RootListIndex map_index,
1984 Register scratch2) {
1986 LoadRoot(scratch2, map_index);
1987 SmiTag(scratch1, length);
1990 Mov(scratch2, String::kEmptyHashField);
1996 int MacroAssembler::ActivationFrameAlignment() {
1997 #if V8_HOST_ARCH_ARM64
2002 return base::OS::ActivationFrameAlignment();
2008 return FLAG_sim_stack_alignment;
2013 void MacroAssembler::CallCFunction(ExternalReference
function,
2014 int num_of_reg_args) {
2015 CallCFunction(
function, num_of_reg_args, 0);
2019 void MacroAssembler::CallCFunction(ExternalReference
function,
2020 int num_of_reg_args,
2021 int num_of_double_args) {
2022 UseScratchRegisterScope temps(
this);
2023 Register temp = temps.AcquireX();
2024 Mov(temp,
function);
2025 CallCFunction(temp, num_of_reg_args, num_of_double_args);
2029 void MacroAssembler::CallCFunction(Register
function,
2030 int num_of_reg_args,
2031 int num_of_double_args) {
2035 DCHECK(num_of_reg_args <= 8);
2043 if (num_of_double_args > 0) {
2044 DCHECK(num_of_reg_args <= 1);
2045 DCHECK((num_of_double_args + num_of_reg_args) <= 2);
2051 const Register old_stack_pointer = StackPointer();
2052 if (!csp.Is(old_stack_pointer)) {
2053 AssertStackConsistency();
2055 int sp_alignment = ActivationFrameAlignment();
2057 DCHECK(sp_alignment >= 16);
2065 Bic(csp, old_stack_pointer, sp_alignment - 1);
2066 SetStackPointer(csp);
2073 if (!csp.Is(old_stack_pointer)) {
2074 if (emit_debug_code()) {
2078 UseScratchRegisterScope temps(
this);
2079 Register temp = temps.AcquireX();
2080 DCHECK(ActivationFrameAlignment() == 16);
2081 Sub(temp, csp, old_stack_pointer);
2085 Check(
ge, kTheStackWasCorruptedByMacroAssemblerCall);
2087 SetStackPointer(old_stack_pointer);
2092 void MacroAssembler::Jump(Register target) {
2097 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
2098 UseScratchRegisterScope temps(
this);
2099 Register temp = temps.AcquireX();
2100 Mov(temp, Operand(target, rmode));
2105 void MacroAssembler::Jump(
Address target, RelocInfo::Mode rmode) {
2106 DCHECK(!RelocInfo::IsCodeTarget(rmode));
2107 Jump(
reinterpret_cast<intptr_t
>(target), rmode);
2111 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
2112 DCHECK(RelocInfo::IsCodeTarget(rmode));
2114 Jump(
reinterpret_cast<intptr_t
>(code.location()), rmode);
2118 void MacroAssembler::Call(Register target) {
2119 BlockPoolsScope scope(
this);
2128 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
2133 void MacroAssembler::Call(Label* target) {
2134 BlockPoolsScope scope(
this);
2143 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
2150 void MacroAssembler::Call(
Address target, RelocInfo::Mode rmode) {
2151 BlockPoolsScope scope(
this);
2158 positions_recorder()->WriteRecordedPositions();
2161 DCHECK(rmode != RelocInfo::NONE32);
2163 UseScratchRegisterScope temps(
this);
2164 Register temp = temps.AcquireX();
2166 if (rmode == RelocInfo::NONE64) {
2168 uint64_t imm =
reinterpret_cast<uint64_t
>(target);
2170 DCHECK(((imm >> 48) & 0xffff) == 0);
2171 movz(temp, (imm >> 0) & 0xffff, 0);
2172 movk(temp, (imm >> 16) & 0xffff, 16);
2173 movk(temp, (imm >> 32) & 0xffff, 32);
2175 Ldr(temp, Immediate(
reinterpret_cast<intptr_t
>(target), rmode));
2179 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
2184 void MacroAssembler::Call(Handle<Code> code,
2185 RelocInfo::Mode rmode,
2186 TypeFeedbackId ast_id) {
2192 if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
2193 SetRecordedAstId(ast_id);
2194 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2198 Call(
reinterpret_cast<Address>(code.location()), rmode);
2202 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
2207 int MacroAssembler::CallSize(Register target) {
2213 int MacroAssembler::CallSize(Label* target) {
2219 int MacroAssembler::CallSize(
Address target, RelocInfo::Mode rmode) {
2223 DCHECK(rmode != RelocInfo::NONE32);
2225 if (rmode == RelocInfo::NONE64) {
2226 return kCallSizeWithoutRelocation;
2228 return kCallSizeWithRelocation;
2233 int MacroAssembler::CallSize(Handle<Code> code,
2234 RelocInfo::Mode rmode,
2235 TypeFeedbackId ast_id) {
2240 DCHECK(rmode != RelocInfo::NONE32);
2242 if (rmode == RelocInfo::NONE64) {
2243 return kCallSizeWithoutRelocation;
2245 return kCallSizeWithRelocation;
2250 void MacroAssembler::JumpIfHeapNumber(Register
object, Label* on_heap_number,
2252 Label on_not_heap_number;
2255 JumpIfSmi(
object, &on_not_heap_number);
2258 AssertNotSmi(
object);
2260 UseScratchRegisterScope temps(
this);
2261 Register temp = temps.AcquireX();
2263 JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
2265 Bind(&on_not_heap_number);
2269 void MacroAssembler::JumpIfNotHeapNumber(Register
object,
2270 Label* on_not_heap_number,
2273 JumpIfSmi(
object, on_not_heap_number);
2276 AssertNotSmi(
object);
2278 UseScratchRegisterScope temps(
this);
2279 Register temp = temps.AcquireX();
2281 JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
2285 void MacroAssembler::LookupNumberStringCache(Register
object,
2294 Register number_string_cache = result;
2295 Register mask = scratch3;
2298 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2303 FixedArray::kLengthOffset));
2312 Label load_result_from_cache;
2314 JumpIfSmi(
object, &is_smi);
2315 JumpIfNotHeapNumber(
object, not_found);
2318 Add(scratch1,
object, HeapNumber::kValueOffset -
kHeapObjectTag);
2319 Ldp(scratch1.W(), scratch2.W(),
MemOperand(scratch1));
2320 Eor(scratch1, scratch1, scratch2);
2321 And(scratch1, scratch1, mask);
2325 Add(scratch1, number_string_cache,
2328 Register probe = mask;
2330 JumpIfSmi(probe, not_found);
2335 B(&load_result_from_cache);
2338 Register scratch = scratch1;
2339 And(scratch, mask, Operand::UntagSmi(
object));
2342 Add(scratch, number_string_cache,
2351 Bind(&load_result_from_cache);
2353 IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
2354 scratch1, scratch2);
2358 void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
2360 FPRegister scratch_d,
2361 Label* on_successful_conversion,
2362 Label* on_failed_conversion) {
2364 Fcvtzs(as_int, value);
2365 Scvtf(scratch_d, as_int);
2366 Fcmp(value, scratch_d);
2368 if (on_successful_conversion) {
2369 B(on_successful_conversion,
eq);
2371 if (on_failed_conversion) {
2372 B(on_failed_conversion,
ne);
2378 UseScratchRegisterScope temps(
this);
2379 Register temp = temps.AcquireX();
2388 Label* on_negative_zero) {
2389 TestForMinusZero(input);
2390 B(
vs, on_negative_zero);
2394 void MacroAssembler::JumpIfMinusZero(Register input,
2395 Label* on_negative_zero) {
2396 DCHECK(input.Is64Bits());
2400 B(
vs, on_negative_zero);
2404 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
2406 Cmp(input.W(), Operand(input.W(),
UXTB));
2408 Csel(output.W(), wzr, input.W(),
lt);
2410 Csel(output.W(), output.W(), 255,
le);
2414 void MacroAssembler::ClampInt32ToUint8(Register in_out) {
2415 ClampInt32ToUint8(in_out, in_out);
2419 void MacroAssembler::ClampDoubleToUint8(Register output,
2430 Fmov(dbl_scratch, 255);
2431 Fmin(dbl_scratch, dbl_scratch, input);
2435 Fcvtnu(output, dbl_scratch);
2439 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
2446 Register scratch5) {
2450 scratch1, scratch2, scratch3, scratch4, scratch5));
2453 const Register& remaining = scratch3;
2454 Mov(remaining, count / 2);
2456 const Register& dst_untagged = scratch1;
2457 const Register& src_untagged = scratch2;
2464 Ldp(scratch4, scratch5,
2466 Stp(scratch4, scratch5,
2468 Sub(remaining, remaining, 1);
2469 Cbnz(remaining, &loop);
2479 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
2485 Register scratch4) {
2490 const Register& dst_untagged = scratch1;
2491 const Register& src_untagged = scratch2;
2496 for (
unsigned i = 0;
i < count / 2;
i++) {
2509 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
2514 Register scratch3) {
2519 const Register& dst_untagged = scratch1;
2520 const Register& src_untagged = scratch2;
2525 for (
unsigned i = 0;
i < count;
i++) {
2532 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
2547 DCHECK(!temps.IncludesAliasOf(dst));
2548 DCHECK(!temps.IncludesAliasOf(src));
2549 DCHECK(!temps.IncludesAliasOf(xzr));
2551 if (emit_debug_code()) {
2553 Check(
ne, kTheSourceAndDestinationAreTheSame);
2558 static const unsigned kLoopThreshold = 8;
2560 UseScratchRegisterScope masm_temps(
this);
2561 if ((temps.Count() >= 3) && (count >= kLoopThreshold)) {
2562 CopyFieldsLoopPairsHelper(dst, src, count,
2563 Register(temps.PopLowestIndex()),
2564 Register(temps.PopLowestIndex()),
2565 Register(temps.PopLowestIndex()),
2566 masm_temps.AcquireX(),
2567 masm_temps.AcquireX());
2568 }
else if (temps.Count() >= 2) {
2569 CopyFieldsUnrolledPairsHelper(dst, src, count,
2570 Register(temps.PopLowestIndex()),
2571 Register(temps.PopLowestIndex()),
2572 masm_temps.AcquireX(),
2573 masm_temps.AcquireX());
2574 }
else if (temps.Count() == 1) {
2575 CopyFieldsUnrolledHelper(dst, src, count,
2576 Register(temps.PopLowestIndex()),
2577 masm_temps.AcquireX(),
2578 masm_temps.AcquireX());
2590 UseScratchRegisterScope temps(
this);
2591 Register tmp1 = temps.AcquireX();
2592 Register tmp2 = temps.AcquireX();
2596 if (emit_debug_code()) {
2599 Assert(
ge, kUnexpectedNegativeValue);
2602 Add(scratch, src, length);
2604 Add(scratch, dst, length);
2605 Ccmp(scratch, src,
ZFlag,
gt);
2606 Assert(
le, kCopyBuffersOverlap);
2609 Label short_copy, short_loop, bulk_loop, done;
2612 Register bulk_length = scratch;
2614 int pair_mask = pair_size - 1;
2616 Bic(bulk_length, length, pair_mask);
2617 Cbz(bulk_length, &short_copy);
2619 Sub(bulk_length, bulk_length, pair_size);
2622 Cbnz(bulk_length, &bulk_loop);
2624 And(length, length, pair_mask);
2630 Sub(length, length, 1);
2633 Cbnz(length, &short_loop);
2640 void MacroAssembler::FillFields(Register dst,
2641 Register field_count,
2644 UseScratchRegisterScope temps(
this);
2645 Register field_ptr = temps.AcquireX();
2646 Register counter = temps.AcquireX();
2652 Subs(counter, field_count, 1);
2661 And(field_ptr, counter, 1);
2669 Subs(counter, counter, 2);
2677 void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
2678 Register first, Register second, Register scratch1, Register scratch2,
2681 JumpIfEitherSmi(first, second, failure);
2682 }
else if (emit_debug_code()) {
2685 JumpIfEitherSmi(first, second,
NULL, ¬_smi);
2689 Abort(kUnexpectedSmi);
2700 JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
2705 void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
2706 Register first, Register second, Register scratch1, Register scratch2,
2710 static const int kFlatOneByteStringMask =
2713 And(scratch1, first, kFlatOneByteStringMask);
2714 And(scratch2, second, kFlatOneByteStringMask);
2715 Cmp(scratch1, kFlatOneByteStringTag);
2716 Ccmp(scratch2, kFlatOneByteStringTag,
NoFlag,
eq);
2721 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
2724 const int kFlatOneByteStringMask =
2726 const int kFlatOneByteStringTag =
2728 And(scratch, type, kFlatOneByteStringMask);
2729 Cmp(scratch, kFlatOneByteStringTag);
2734 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2735 Register first, Register second, Register scratch1, Register scratch2,
2738 const int kFlatOneByteStringMask =
2740 const int kFlatOneByteStringTag =
2742 And(scratch1, first, kFlatOneByteStringMask);
2743 And(scratch2, second, kFlatOneByteStringMask);
2744 Cmp(scratch1, kFlatOneByteStringTag);
2745 Ccmp(scratch2, kFlatOneByteStringTag,
NoFlag,
eq);
2750 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
2751 Label* not_unique_name) {
2760 B(
ne, not_unique_name);
2764 void MacroAssembler::InvokePrologue(
const ParameterCount& expected,
2765 const ParameterCount& actual,
2766 Handle<Code> code_constant,
2770 bool* definitely_mismatches,
2771 const CallWrapper& call_wrapper) {
2772 bool definitely_matches =
false;
2773 *definitely_mismatches =
false;
2774 Label regular_invoke;
2785 DCHECK(actual.is_immediate() || actual.reg().is(x0));
2786 DCHECK(expected.is_immediate() || expected.reg().is(x2));
2787 DCHECK((!code_constant.is_null() && code_reg.is(
no_reg)) || code_reg.is(x3));
2789 if (expected.is_immediate()) {
2790 DCHECK(actual.is_immediate());
2791 if (expected.immediate() == actual.immediate()) {
2792 definitely_matches =
true;
2795 Mov(x0, actual.immediate());
2796 if (expected.immediate() ==
2797 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2802 definitely_matches =
true;
2804 *definitely_mismatches =
true;
2806 Mov(x2, expected.immediate());
2811 Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
2812 : Operand(actual.reg());
2814 Cmp(expected.reg(), actual_op);
2815 B(
eq, ®ular_invoke);
2822 if (!definitely_matches) {
2823 if (!code_constant.is_null()) {
2824 Mov(x3, Operand(code_constant));
2828 Handle<Code> adaptor =
2829 isolate()->builtins()->ArgumentsAdaptorTrampoline();
2831 call_wrapper.BeforeCall(CallSize(adaptor));
2833 call_wrapper.AfterCall();
2834 if (!*definitely_mismatches) {
2840 Jump(adaptor, RelocInfo::CODE_TARGET);
2843 Bind(®ular_invoke);
2847 void MacroAssembler::InvokeCode(Register code,
2848 const ParameterCount& expected,
2849 const ParameterCount& actual,
2851 const CallWrapper& call_wrapper) {
2857 bool definitely_mismatches =
false;
2858 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done,
flag,
2859 &definitely_mismatches, call_wrapper);
2864 if (!definitely_mismatches) {
2866 call_wrapper.BeforeCall(CallSize(code));
2868 call_wrapper.AfterCall();
2881 void MacroAssembler::InvokeFunction(Register
function,
2882 const ParameterCount& actual,
2884 const CallWrapper& call_wrapper) {
2892 Register expected_reg = x2;
2893 Register code_reg = x3;
2900 JSFunction::kSharedFunctionInfoOffset));
2903 SharedFunctionInfo::kFormalParameterCountOffset));
2907 ParameterCount expected(expected_reg);
2908 InvokeCode(code_reg, expected, actual,
flag, call_wrapper);
2912 void MacroAssembler::InvokeFunction(Register
function,
2913 const ParameterCount& expected,
2914 const ParameterCount& actual,
2916 const CallWrapper& call_wrapper) {
2924 Register code_reg = x3;
2932 Ldr(code_reg,
FieldMemOperand(
function, JSFunction::kCodeEntryOffset));
2933 InvokeCode(code_reg, expected, actual,
flag, call_wrapper);
2937 void MacroAssembler::InvokeFunction(Handle<JSFunction>
function,
2938 const ParameterCount& expected,
2939 const ParameterCount& actual,
2941 const CallWrapper& call_wrapper) {
2944 __ LoadObject(x1,
function);
2945 InvokeFunction(x1, expected, actual,
flag, call_wrapper);
2949 void MacroAssembler::TryConvertDoubleToInt64(Register result,
2959 Fcvtzs(result.X(), double_input);
2968 Ccmp(result.X(), -1,
VFlag,
vc);
2974 void MacroAssembler::TruncateDoubleToI(Register result,
2980 TryConvertDoubleToInt64(result, double_input, &done);
2982 const Register old_stack_pointer = StackPointer();
2983 if (csp.Is(old_stack_pointer)) {
2990 SetStackPointer(jssp);
2996 DoubleToIStub stub(isolate(),
3004 DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
3007 if (csp.Is(old_stack_pointer)) {
3009 SetStackPointer(csp);
3010 AssertStackConsistency();
3018 void MacroAssembler::TruncateHeapNumberToI(Register result,
3021 DCHECK(!result.is(
object));
3022 DCHECK(jssp.Is(StackPointer()));
3028 TryConvertDoubleToInt64(result, fp_scratch, &done);
3032 DoubleToIStub stub(isolate(),
3045 void MacroAssembler::StubPrologue() {
3047 UseScratchRegisterScope temps(
this);
3048 Register temp = temps.AcquireX();
3053 __ Add(
fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
3057 void MacroAssembler::Prologue(
bool code_pre_aging) {
3058 if (code_pre_aging) {
3059 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
3060 __ EmitCodeAgeSequence(stub);
3062 __ EmitFrameSetupForCodeAgePatching();
3068 DCHECK(jssp.Is(StackPointer()));
3069 UseScratchRegisterScope temps(
this);
3070 Register type_reg = temps.AcquireX();
3071 Register code_reg = temps.AcquireX();
3074 Mov(type_reg, Smi::FromInt(type));
3075 Mov(code_reg, Operand(CodeObject()));
3076 Push(type_reg, code_reg);
3084 Add(
fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize);
3089 DCHECK(jssp.Is(StackPointer()));
3093 AssertStackConsistency();
3098 void MacroAssembler::ExitFramePreserveFPRegs() {
3103 void MacroAssembler::ExitFrameRestoreFPRegs() {
3107 DCHECK(saved_fp_regs.Count() % 2 == 0);
3109 int offset = ExitFrameConstants::kLastExitFrameField;
3110 while (!saved_fp_regs.IsEmpty()) {
3111 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
3112 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
3119 void MacroAssembler::EnterExitFrame(
bool save_doubles,
3120 const Register& scratch,
3122 DCHECK(jssp.Is(StackPointer()));
3125 Mov(scratch, Operand(CodeObject()));
3127 Mov(
fp, StackPointer());
3134 ExitFrameConstants::kCallerSPDisplacement);
3141 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
3144 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3149 ExitFrameConstants::kLastExitFrameField);
3151 ExitFramePreserveFPRegs();
3167 AlignAndSetCSPForFrame();
3168 DCHECK(csp.Is(StackPointer()));
3184 Str(scratch,
MemOperand(
fp, ExitFrameConstants::kSPOffset));
3189 void MacroAssembler::LeaveExitFrame(
bool restore_doubles,
3190 const Register& scratch,
3191 bool restore_context) {
3192 DCHECK(csp.Is(StackPointer()));
3194 if (restore_doubles) {
3195 ExitFrameRestoreFPRegs();
3199 if (restore_context) {
3200 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3205 if (emit_debug_code()) {
3207 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3212 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
3221 SetStackPointer(jssp);
3222 AssertStackConsistency();
3227 void MacroAssembler::SetCounter(StatsCounter* counter,
int value,
3228 Register scratch1, Register scratch2) {
3229 if (FLAG_native_code_counters && counter->Enabled()) {
3230 Mov(scratch1, value);
3231 Mov(scratch2, ExternalReference(counter));
3237 void MacroAssembler::IncrementCounter(StatsCounter* counter,
int value,
3238 Register scratch1, Register scratch2) {
3240 if (FLAG_native_code_counters && counter->Enabled()) {
3241 Mov(scratch2, ExternalReference(counter));
3243 Add(scratch1, scratch1, value);
3249 void MacroAssembler::DecrementCounter(StatsCounter* counter,
int value,
3250 Register scratch1, Register scratch2) {
3251 IncrementCounter(counter, -value, scratch1, scratch2);
3255 void MacroAssembler::LoadContext(Register dst,
int context_chain_length) {
3256 if (context_chain_length > 0) {
3258 Ldr(dst,
MemOperand(
cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3259 for (
int i = 1;
i < context_chain_length;
i++) {
3260 Ldr(dst,
MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3271 void MacroAssembler::DebugBreak() {
3273 Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
3274 CEntryStub ces(isolate(), 1);
3275 DCHECK(AllowThisStubCall(&ces));
3280 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3281 int handler_index) {
3282 DCHECK(jssp.Is(StackPointer()));
3295 StackHandler::IndexField::encode(handler_index) |
3296 StackHandler::KindField::encode(kind);
3299 Mov(x10, Operand(CodeObject()));
3303 if (kind == StackHandler::JS_ENTRY) {
3304 DCHECK(Smi::FromInt(0) == 0);
3305 Push(xzr, xzr, x11, x10);
3311 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3319 void MacroAssembler::PopTryHandler() {
3322 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3328 void MacroAssembler::Allocate(
int object_size,
3334 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3335 if (!FLAG_inline_new) {
3336 if (emit_debug_code()) {
3347 UseScratchRegisterScope temps(
this);
3348 Register scratch3 = temps.AcquireX();
3351 DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
3361 ExternalReference heap_allocation_top =
3362 AllocationUtils::GetAllocationTopReference(isolate(),
flags);
3363 ExternalReference heap_allocation_limit =
3364 AllocationUtils::GetAllocationLimitReference(isolate(),
flags);
3365 intptr_t top =
reinterpret_cast<intptr_t
>(heap_allocation_top.address());
3366 intptr_t limit =
reinterpret_cast<intptr_t
>(heap_allocation_limit.address());
3370 Register top_address = scratch1;
3371 Register allocation_limit = scratch2;
3372 Mov(top_address, Operand(heap_allocation_top));
3376 Ldp(result, allocation_limit,
MemOperand(top_address));
3378 if (emit_debug_code()) {
3381 Cmp(result, scratch3);
3382 Check(
eq, kUnexpectedAllocationTop);
3385 Ldr(allocation_limit,
MemOperand(top_address, limit - top));
3393 Adds(scratch3, result, object_size);
3394 Ccmp(scratch3, allocation_limit,
CFlag,
cc);
3400 ObjectTag(result, result);
3405 void MacroAssembler::Allocate(Register object_size,
3411 if (!FLAG_inline_new) {
3412 if (emit_debug_code()) {
3423 UseScratchRegisterScope temps(
this);
3424 Register scratch3 = temps.AcquireX();
3427 DCHECK(object_size.Is64Bits() && result.Is64Bits() &&
3428 scratch1.Is64Bits() && scratch2.Is64Bits());
3432 ExternalReference heap_allocation_top =
3433 AllocationUtils::GetAllocationTopReference(isolate(),
flags);
3434 ExternalReference heap_allocation_limit =
3435 AllocationUtils::GetAllocationLimitReference(isolate(),
flags);
3436 intptr_t top =
reinterpret_cast<intptr_t
>(heap_allocation_top.address());
3437 intptr_t limit =
reinterpret_cast<intptr_t
>(heap_allocation_limit.address());
3441 Register top_address = scratch1;
3442 Register allocation_limit = scratch2;
3443 Mov(top_address, heap_allocation_top);
3447 Ldp(result, allocation_limit,
MemOperand(top_address));
3449 if (emit_debug_code()) {
3452 Cmp(result, scratch3);
3453 Check(
eq, kUnexpectedAllocationTop);
3456 Ldr(allocation_limit,
MemOperand(top_address, limit - top));
3467 Adds(scratch3, result, object_size);
3470 if (emit_debug_code()) {
3472 Check(
eq, kUnalignedAllocationInNewSpace);
3475 Ccmp(scratch3, allocation_limit,
CFlag,
cc);
3481 ObjectTag(result, result);
3486 void MacroAssembler::UndoAllocationInNewSpace(Register
object,
3488 ExternalReference new_space_allocation_top =
3489 ExternalReference::new_space_allocation_top_address(isolate());
3495 Mov(scratch, new_space_allocation_top);
3497 Cmp(
object, scratch);
3498 Check(
lt, kUndoAllocationOfNonAllocatedMemory);
3501 Mov(scratch, new_space_allocation_top);
3506 void MacroAssembler::AllocateTwoByteString(Register result,
3511 Label* gc_required) {
3516 Add(scratch1, length, length);
3529 InitializeNewString(result,
3531 Heap::kStringMapRootIndex,
3537 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3538 Register scratch1, Register scratch2,
3540 Label* gc_required) {
3558 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3559 scratch1, scratch2);
3563 void MacroAssembler::AllocateTwoByteConsString(Register result,
3567 Label* gc_required) {
3568 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3571 InitializeNewString(result,
3573 Heap::kConsStringMapRootIndex,
3579 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3582 Label* gc_required) {
3583 Allocate(ConsString::kSize,
3590 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3591 scratch1, scratch2);
3595 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3599 Label* gc_required) {
3601 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3604 InitializeNewString(result,
3606 Heap::kSlicedStringMapRootIndex,
3612 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3616 Label* gc_required) {
3618 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3621 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3622 scratch1, scratch2);
3628 void MacroAssembler::AllocateHeapNumber(Register result,
3633 CPURegister heap_number_map,
3635 DCHECK(!value.IsValid() || value.Is64Bits());
3636 UseScratchRegisterScope temps(
this);
3640 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3644 ? Heap::kMutableHeapNumberMapRootIndex
3645 : Heap::kHeapNumberMapRootIndex;
3648 if (!heap_number_map.IsValid()) {
3651 if (value.IsValid() && value.IsFPRegister()) {
3652 heap_number_map = temps.AcquireD();
3654 heap_number_map = scratch1;
3656 LoadRoot(heap_number_map, map_index);
3658 if (emit_debug_code()) {
3660 if (heap_number_map.IsFPRegister()) {
3664 map = Register(heap_number_map);
3666 AssertRegisterIsRoot(
map, map_index);
3670 if (value.IsSameSizeAndType(heap_number_map)) {
3672 HeapNumber::kValueOffset);
3673 Stp(heap_number_map, value,
MemOperand(result, HeapObject::kMapOffset));
3675 Str(heap_number_map,
MemOperand(result, HeapObject::kMapOffset));
3676 if (value.IsValid()) {
3677 Str(value,
MemOperand(result, HeapNumber::kValueOffset));
3680 ObjectTag(result, result);
3684 void MacroAssembler::JumpIfObjectType(Register
object,
3688 Label* if_cond_pass,
3690 CompareObjectType(
object,
map, type_reg, type);
3691 B(cond, if_cond_pass);
3695 void MacroAssembler::JumpIfNotObjectType(Register
object,
3699 Label* if_not_object) {
3700 JumpIfObjectType(
object,
map, type_reg, type, if_not_object,
ne);
3705 void MacroAssembler::CompareObjectType(Register
object,
3710 CompareInstanceType(
map, type_reg, type);
3715 void MacroAssembler::CompareInstanceType(Register
map,
3719 Cmp(type_reg, type);
3723 void MacroAssembler::CompareObjectMap(Register obj, Heap::RootListIndex index) {
3724 UseScratchRegisterScope temps(
this);
3725 Register obj_map = temps.AcquireX();
3727 CompareRoot(obj_map, index);
3731 void MacroAssembler::CompareObjectMap(Register obj, Register scratch,
3734 CompareMap(scratch,
map);
3738 void MacroAssembler::CompareMap(Register obj_map,
3740 Cmp(obj_map, Operand(
map));
3744 void MacroAssembler::CheckMap(Register obj,
3750 JumpIfSmi(obj, fail);
3753 CompareObjectMap(obj, scratch,
map);
3758 void MacroAssembler::CheckMap(Register obj,
3760 Heap::RootListIndex index,
3764 JumpIfSmi(obj, fail);
3767 JumpIfNotRoot(scratch, index, fail);
3771 void MacroAssembler::CheckMap(Register obj_map,
3776 JumpIfSmi(obj_map, fail);
3779 CompareMap(obj_map,
map);
3784 void MacroAssembler::DispatchMap(Register obj,
3787 Handle<Code> success,
3791 JumpIfSmi(obj, &fail);
3794 Cmp(scratch, Operand(
map));
3796 Jump(success, RelocInfo::CODE_TARGET);
3801 void MacroAssembler::TestMapBitfield(Register
object, uint64_t mask) {
3802 UseScratchRegisterScope temps(
this);
3803 Register temp = temps.AcquireX();
3810 void MacroAssembler::LoadElementsKindFromMap(Register result, Register
map) {
3814 DecodeField<Map::ElementsKindBits>(result);
3818 void MacroAssembler::TryGetFunctionPrototype(Register
function,
3822 BoundFunctionAction action) {
3826 if (action == kMissOnBoundFunction) {
3828 JumpIfSmi(
function, miss);
3833 Register scratch_w = scratch.W();
3840 Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
3844 Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
3854 JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
3858 JumpIfNotObjectType(result, scratch, scratch,
MAP_TYPE, &done);
3863 if (action == kMissOnBoundFunction) {
3868 Bind(&non_instance);
3877 void MacroAssembler::CompareRoot(
const Register& obj,
3878 Heap::RootListIndex index) {
3879 UseScratchRegisterScope temps(
this);
3880 Register temp = temps.AcquireX();
3882 LoadRoot(temp, index);
3887 void MacroAssembler::JumpIfRoot(
const Register& obj,
3888 Heap::RootListIndex index,
3890 CompareRoot(obj, index);
3895 void MacroAssembler::JumpIfNotRoot(
const Register& obj,
3896 Heap::RootListIndex index,
3897 Label* if_not_equal) {
3898 CompareRoot(obj, index);
3899 B(
ne, if_not_equal);
3903 void MacroAssembler::CompareAndSplit(
const Register& lhs,
3908 Label* fall_through) {
3909 if ((if_true == if_false) && (if_false == fall_through)) {
3911 }
else if (if_true == if_false) {
3913 }
else if (if_false == fall_through) {
3914 CompareAndBranch(lhs, rhs, cond, if_true);
3915 }
else if (if_true == fall_through) {
3918 CompareAndBranch(lhs, rhs, cond, if_true);
3924 void MacroAssembler::TestAndSplit(
const Register& reg,
3925 uint64_t bit_pattern,
3926 Label* if_all_clear,
3928 Label* fall_through) {
3929 if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
3931 }
else if (if_all_clear == if_any_set) {
3933 }
else if (if_all_clear == fall_through) {
3934 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3935 }
else if (if_any_set == fall_through) {
3936 TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
3938 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3944 void MacroAssembler::CheckFastElements(Register
map,
3952 Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
3957 void MacroAssembler::CheckFastObjectElements(Register
map,
3965 Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3968 Operand(Map::kMaximumBitField2FastHoleyElementValue),
CFlag,
hi);
3975 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3977 Register elements_reg,
3979 FPRegister fpscratch1,
3981 int elements_offset) {
3990 JumpIfSmi(value_reg, &store_num);
3993 JumpIfNotHeapNumber(value_reg, fail);
3995 Ldr(fpscratch1,
FieldMemOperand(value_reg, HeapNumber::kValueOffset));
3998 CanonicalizeNaN(fpscratch1);
4002 Add(scratch1, elements_reg,
4006 FixedDoubleArray::kHeaderSize - elements_offset));
4010 bool MacroAssembler::AllowThisStubCall(
CodeStub* stub) {
4011 return has_frame_ || !stub->SometimesSetsUpAFrame();
4015 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4021 (1 << String::kArrayIndexValueBits));
4022 DecodeField<String::ArrayIndexValueBits>(index, hash);
4023 SmiTag(index, index);
4027 void MacroAssembler::EmitSeqStringSetCharCheck(
4040 AssertNotSmi(
string, kNonObject);
4047 Cmp(scratch, encoding_mask);
4048 Check(
eq, kUnexpectedStringType);
4051 Cmp(index, index_type ==
kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
4052 Check(
lt, kIndexIsTooLarge);
4056 Check(
ge, kIndexIsNegative);
4060 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
4065 Label same_contexts;
4068 Ldr(scratch1,
MemOperand(
fp, StandardFrameConstants::kContextOffset));
4072 Check(
ne, kWeShouldNotHaveAnEmptyLexicalContext);
4077 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX *
kPointerSize;
4079 Ldr(scratch1,
FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
4082 if (emit_debug_code()) {
4085 CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
4086 Check(
eq, kExpectedNativeContext);
4091 JSGlobalProxy::kNativeContextOffset));
4092 Cmp(scratch1, scratch2);
4093 B(&same_contexts,
eq);
4096 if (emit_debug_code()) {
4099 Register scratch3 = holder_reg;
4101 CompareRoot(scratch2, Heap::kNullValueRootIndex);
4102 Check(
ne, kExpectedNonNullContext);
4105 CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
4106 Check(
eq, kExpectedNativeContext);
4113 int token_offset = Context::kHeaderSize +
4118 Cmp(scratch1, scratch2);
4121 Bind(&same_contexts);
4128 void MacroAssembler::GetNumberHash(Register key, Register scratch) {
4132 LoadRoot(scratch, Heap::kHashSeedRootIndex);
4133 Eor(key, key, Operand::UntagSmi(scratch));
4137 scratch = scratch.W();
4144 Add(key, scratch, Operand(key,
LSL, 15));
4146 Eor(key, key, Operand(key,
LSR, 12));
4148 Add(key, key, Operand(key,
LSL, 2));
4150 Eor(key, key, Operand(key,
LSR, 4));
4152 Mov(scratch, Operand(key,
LSL, 11));
4153 Add(key, key, Operand(key,
LSL, 3));
4154 Add(key, key, scratch);
4156 Eor(key, key, Operand(key,
LSR, 16));
4160 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
4167 Register scratch3) {
4168 DCHECK(!
AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
4172 SmiUntag(scratch0, key);
4173 GetNumberHash(scratch0, scratch1);
4178 SeededNumberDictionary::kCapacityOffset));
4179 Sub(scratch1, scratch1, 1);
4185 Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(
i));
4187 Mov(scratch2, scratch0);
4189 And(scratch2, scratch2, scratch1);
4192 DCHECK(SeededNumberDictionary::kEntrySize == 3);
4193 Add(scratch2, scratch2, Operand(scratch2,
LSL, 1));
4199 SeededNumberDictionary::kElementsStartOffset));
4210 const int kDetailsOffset =
4211 SeededNumberDictionary::kElementsStartOffset + 2 *
kPointerSize;
4213 TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
4216 const int kValueOffset =
4217 SeededNumberDictionary::kElementsStartOffset +
kPointerSize;
4222 void MacroAssembler::RememberedSetHelper(Register
object,
4226 RememberedSetFinalAction and_then) {
4228 Label done, store_buffer_overflow;
4229 if (emit_debug_code()) {
4231 JumpIfNotInNewSpace(
object, &ok);
4232 Abort(kRememberedSetPointerInNewSpace);
4235 UseScratchRegisterScope temps(
this);
4236 Register scratch2 = temps.AcquireX();
4239 Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
4247 DCHECK(StoreBuffer::kStoreBufferOverflowBit ==
4249 if (and_then == kFallThroughAtEnd) {
4252 DCHECK(and_then == kReturnAtEnd);
4257 Bind(&store_buffer_overflow);
4259 StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode);
4260 CallStub(&store_buffer_overflow_stub);
4264 if (and_then == kReturnAtEnd) {
4270 void MacroAssembler::PopSafepointRegisters() {
4277 void MacroAssembler::PushSafepointRegisters() {
4281 DCHECK(num_unsaved >= 0);
4287 void MacroAssembler::PushSafepointRegistersAndDoubles() {
4288 PushSafepointRegisters();
4290 FPRegister::kAllocatableFPRegisters));
4294 void MacroAssembler::PopSafepointRegistersAndDoubles() {
4296 FPRegister::kAllocatableFPRegisters));
4297 PopSafepointRegisters();
4301 int MacroAssembler::SafepointRegisterStackIndex(
int reg_code) {
4303 DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
4316 if ((reg_code >= 0) && (reg_code <= 15)) {
4318 }
else if ((reg_code >= 18) && (reg_code <= 27)) {
4320 return reg_code - 2;
4321 }
else if ((reg_code == 29) || (reg_code == 30)) {
4323 return reg_code - 3;
4332 void MacroAssembler::CheckPageFlagSet(
const Register&
object,
4333 const Register& scratch,
4335 Label* if_any_set) {
4336 And(scratch,
object, ~Page::kPageAlignmentMask);
4337 Ldr(scratch,
MemOperand(scratch, MemoryChunk::kFlagsOffset));
4338 TestAndBranchIfAnySet(scratch, mask, if_any_set);
4342 void MacroAssembler::CheckPageFlagClear(
const Register&
object,
4343 const Register& scratch,
4345 Label* if_all_clear) {
4346 And(scratch,
object, ~Page::kPageAlignmentMask);
4347 Ldr(scratch,
MemOperand(scratch, MemoryChunk::kFlagsOffset));
4348 TestAndBranchIfAllClear(scratch, mask, if_all_clear);
4352 void MacroAssembler::RecordWriteField(
4368 JumpIfSmi(value, &done);
4376 if (emit_debug_code()) {
4380 Abort(kUnalignedCellInWriteBarrier);
4389 remembered_set_action,
4391 pointers_to_here_check_for_value);
4397 if (emit_debug_code()) {
4398 Mov(value, Operand(bit_cast<int64_t>(
kZapValue + 4)));
4399 Mov(scratch, Operand(bit_cast<int64_t>(
kZapValue + 8)));
4406 void MacroAssembler::RecordWriteForMap(Register
object,
4414 if (emit_debug_code()) {
4415 UseScratchRegisterScope temps(
this);
4416 Register temp = temps.AcquireX();
4418 CompareObjectMap(
map, temp, isolate()->factory()->meta_map());
4419 Check(
eq, kWrongAddressOrValuePassedToRecordWrite);
4422 if (!FLAG_incremental_marking) {
4426 if (emit_debug_code()) {
4427 UseScratchRegisterScope temps(
this);
4428 Register temp = temps.AcquireX();
4432 Check(
eq, kWrongAddressOrValuePassedToRecordWrite);
4443 CheckPageFlagClear(
map,
4445 MemoryChunk::kPointersToHereAreInterestingMask,
4463 isolate()->counters()->write_barriers_static()->Increment();
4464 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1,
map,
4469 if (emit_debug_code()) {
4470 Mov(dst, Operand(bit_cast<int64_t>(
kZapValue + 12)));
4481 void MacroAssembler::RecordWrite(
4493 if (emit_debug_code()) {
4494 UseScratchRegisterScope temps(
this);
4495 Register temp = temps.AcquireX();
4499 Check(
eq, kWrongAddressOrValuePassedToRecordWrite);
4508 JumpIfSmi(value, &done);
4512 CheckPageFlagClear(value,
4514 MemoryChunk::kPointersToHereAreInterestingMask,
4517 CheckPageFlagClear(
object,
4519 MemoryChunk::kPointersFromHereAreInterestingMask,
4526 RecordWriteStub stub(isolate(),
object, value, address, remembered_set_action,
4536 isolate()->counters()->write_barriers_static()->Increment();
4537 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address,
4542 if (emit_debug_code()) {
4543 Mov(address, Operand(bit_cast<int64_t>(
kZapValue + 12)));
4544 Mov(value, Operand(bit_cast<int64_t>(
kZapValue + 16)));
4549 void MacroAssembler::AssertHasValidColor(
const Register& reg) {
4550 if (emit_debug_code()) {
4553 DCHECK(strcmp(Marking::kImpossibleBitPattern,
"01") == 0);
4555 Label color_is_valid;
4556 Tbnz(reg, 0, &color_is_valid);
4557 Tbz(reg, 1, &color_is_valid);
4558 Abort(kUnexpectedColorFound);
4559 Bind(&color_is_valid);
4564 void MacroAssembler::GetMarkBits(Register addr_reg,
4565 Register bitmap_reg,
4566 Register shift_reg) {
4568 DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
4574 UseScratchRegisterScope temps(
this);
4575 Register temp = temps.AcquireX();
4576 Ubfx(temp, addr_reg, kShiftBits,
kPageSizeBits - kShiftBits);
4577 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
4578 Add(bitmap_reg, bitmap_reg, Operand(temp,
LSL, Bitmap::kBytesPerCellLog2));
4585 void MacroAssembler::HasColor(Register
object,
4586 Register bitmap_scratch,
4587 Register shift_scratch,
4594 GetMarkBits(
object, bitmap_scratch, shift_scratch);
4595 Ldr(bitmap_scratch,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4597 Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
4599 AssertHasValidColor(bitmap_scratch);
4603 DCHECK(strcmp(Marking::kWhiteBitPattern,
"00") == 0);
4604 DCHECK(strcmp(Marking::kBlackBitPattern,
"10") == 0);
4605 DCHECK(strcmp(Marking::kGreyBitPattern,
"11") == 0);
4608 if (first_bit == 0) {
4612 Tbz(bitmap_scratch, 0, has_color);
4616 Tbz(bitmap_scratch, 0, &other_color);
4617 if (second_bit == 0) {
4618 Tbz(bitmap_scratch, 1, has_color);
4620 Tbnz(bitmap_scratch, 1, has_color);
4629 void MacroAssembler::CheckMapDeprecated(Handle<Map>
map,
4631 Label* if_deprecated) {
4632 if (
map->CanBeDeprecated()) {
4633 Mov(scratch, Operand(
map));
4635 TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated);
4640 void MacroAssembler::JumpIfBlack(Register
object,
4644 DCHECK(strcmp(Marking::kBlackBitPattern,
"10") == 0);
4645 HasColor(
object, scratch0, scratch1, on_black, 1, 0);
4649 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
4655 Factory* factory = isolate()->factory();
4656 Register current = scratch0;
4660 Mov(current,
object);
4666 DecodeField<Map::ElementsKindBits>(scratch1);
4669 CompareAndBranch(current, Operand(factory->null_value()),
ne, &loop_again);
4673 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
4675 DCHECK(!result.Is(ldr_location));
4676 const uint32_t kLdrLitOffset_lsb = 5;
4677 const uint32_t kLdrLitOffset_width = 19;
4679 if (emit_debug_code()) {
4682 Check(
eq, kTheInstructionToPatchShouldBeAnLdrLiteral);
4686 Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width);
4691 void MacroAssembler::EnsureNotWhite(
4693 Register bitmap_scratch,
4694 Register shift_scratch,
4695 Register load_scratch,
4696 Register length_scratch,
4697 Label* value_is_white_and_not_data) {
4699 value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
4703 DCHECK(strcmp(Marking::kWhiteBitPattern,
"00") == 0);
4704 DCHECK(strcmp(Marking::kBlackBitPattern,
"10") == 0);
4705 DCHECK(strcmp(Marking::kGreyBitPattern,
"11") == 0);
4707 GetMarkBits(value, bitmap_scratch, shift_scratch);
4708 Ldr(load_scratch,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4709 Lsr(load_scratch, load_scratch, shift_scratch);
4711 AssertHasValidColor(load_scratch);
4717 Tbnz(load_scratch, 0, &done);
4720 Register
map = load_scratch;
4721 Label is_data_object;
4725 Mov(length_scratch, HeapNumber::kSize);
4726 JumpIfRoot(
map, Heap::kHeapNumberMapRootIndex, &is_data_object);
4733 Register instance_type = load_scratch;
4735 TestAndBranchIfAnySet(instance_type,
4737 value_is_white_and_not_data);
4746 Mov(length_scratch, ExternalString::kSize);
4755 String::kLengthOffset));
4757 Cset(load_scratch,
eq);
4758 Lsl(length_scratch, length_scratch, load_scratch);
4764 Bind(&is_data_object);
4767 Register mask = shift_scratch;
4768 Mov(load_scratch, 1);
4769 Lsl(mask, load_scratch, shift_scratch);
4771 Ldr(load_scratch,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4772 Orr(load_scratch, load_scratch, mask);
4773 Str(load_scratch,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4775 Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask);
4776 Ldr(load_scratch,
MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
4777 Add(load_scratch, load_scratch, length_scratch);
4778 Str(load_scratch,
MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
4785 if (emit_debug_code()) {
4786 Check(cond, reason);
4792 void MacroAssembler::AssertRegisterIsClear(Register reg,
BailoutReason reason) {
4793 if (emit_debug_code()) {
4794 CheckRegisterIsClear(reg, reason);
4799 void MacroAssembler::AssertRegisterIsRoot(Register reg,
4800 Heap::RootListIndex index,
4802 if (emit_debug_code()) {
4803 CompareRoot(reg, index);
4809 void MacroAssembler::AssertFastElements(Register elements) {
4810 if (emit_debug_code()) {
4811 UseScratchRegisterScope temps(
this);
4812 Register temp = temps.AcquireX();
4815 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
4816 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
4817 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
4818 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4824 void MacroAssembler::AssertIsString(
const Register&
object) {
4825 if (emit_debug_code()) {
4826 UseScratchRegisterScope temps(
this);
4827 Register temp = temps.AcquireX();
4830 Check(
ne, kOperandIsNotAString);
4833 Check(
lo, kOperandIsNotAString);
4847 void MacroAssembler::CheckRegisterIsClear(Register reg,
BailoutReason reason) {
4858 RecordComment(
"Abort message: ");
4861 if (FLAG_trap_on_abort) {
4870 Register old_stack_pointer = StackPointer();
4871 SetStackPointer(jssp);
4872 Mov(jssp, old_stack_pointer);
4876 RegList old_tmp_list = TmpList()->list();
4877 TmpList()->Combine(MacroAssembler::DefaultTmpList());
4879 if (use_real_aborts()) {
4881 NoUseRealAbortsScope no_real_aborts(
this);
4883 Mov(x0, Smi::FromInt(reason));
4890 CallRuntime(Runtime::kAbort, 1);
4892 CallRuntime(Runtime::kAbort, 1);
4897 Adr(x0, &msg_address);
4908 BlockPoolsScope scope(
this);
4914 SetStackPointer(old_stack_pointer);
4915 TmpList()->set_list(old_tmp_list);
4919 void MacroAssembler::LoadTransitionedArrayMapConditional(
4922 Register map_in_out,
4925 Label* no_map_match) {
4928 Ldr(scratch1,
FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
4932 size_t offset = (expected_kind *
kPointerSize) + FixedArrayBase::kHeaderSize;
4934 Cmp(map_in_out, scratch2);
4935 B(
ne, no_map_match);
4938 offset = (transitioned_kind *
kPointerSize) + FixedArrayBase::kHeaderSize;
4943 void MacroAssembler::LoadGlobalFunction(
int index, Register
function) {
4948 GlobalObject::kNativeContextOffset));
4954 void MacroAssembler::LoadGlobalFunctionInitialMap(Register
function,
4959 if (emit_debug_code()) {
4964 Abort(kGlobalFunctionsMustHaveInitialMap);
4972 void MacroAssembler::PrintfNoPreserve(
const char * format,
4973 const CPURegister& arg0,
4974 const CPURegister& arg1,
4975 const CPURegister& arg2,
4976 const CPURegister& arg3) {
4989 static const CPURegList kPCSVarargs =
4991 static const CPURegList kPCSVarargsFP =
4992 CPURegList(CPURegister::kFPRegister,
kDRegSizeInBits, 0, arg_count - 1);
4997 tmp_list.Remove(x0);
4998 tmp_list.Remove(kPCSVarargs);
4999 tmp_list.Remove(arg0, arg1, arg2, arg3);
5002 fp_tmp_list.Remove(kPCSVarargsFP);
5003 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
5007 UseScratchRegisterScope temps(
this);
5008 TmpList()->set_list(tmp_list.list());
5009 FPTmpList()->set_list(fp_tmp_list.list());
5012 CPURegList pcs_varargs = kPCSVarargs;
5013 CPURegList pcs_varargs_fp = kPCSVarargsFP;
5021 if (args[
i].IsRegister()) {
5022 pcs[
i] = pcs_varargs.PopLowestIndex().X();
5025 if (args[
i].Is32Bits()) pcs[
i] = pcs[
i].W();
5026 }
else if (args[
i].IsFPRegister()) {
5028 pcs[
i] = pcs_varargs_fp.PopLowestIndex().D();
5036 if (args[
i].Aliases(pcs[
i]))
continue;
5040 if (kPCSVarargs.IncludesAliasOf(args[
i]) ||
5041 kPCSVarargsFP.IncludesAliasOf(args[
i])) {
5042 if (args[
i].IsRegister()) {
5043 Register old_arg = Register(args[
i]);
5044 Register new_arg = temps.AcquireSameSizeAs(old_arg);
5045 Mov(new_arg, old_arg);
5048 FPRegister old_arg = FPRegister(args[
i]);
5049 FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
5050 Fmov(new_arg, old_arg);
5058 for (
int i = 0;
i < arg_count;
i++) {
5059 DCHECK(pcs[
i].type() == args[
i].type());
5060 if (pcs[
i].IsRegister()) {
5063 DCHECK(pcs[
i].IsFPRegister());
5064 if (pcs[
i].SizeInBytes() == args[
i].SizeInBytes()) {
5065 Fmov(FPRegister(pcs[
i]), FPRegister(args[
i]));
5067 Fcvt(FPRegister(pcs[
i]), FPRegister(args[
i]));
5078 Label format_address;
5079 Adr(x0, &format_address);
5082 { BlockPoolsScope scope(
this);
5085 Bind(&format_address);
5086 EmitStringData(format);
5093 if (!csp.Is(StackPointer())) {
5094 Bic(csp, StackPointer(), 0xf);
5097 CallPrintf(arg_count, pcs);
5101 void MacroAssembler::CallPrintf(
int arg_count,
const CPURegister * args) {
5105 #ifdef USE_SIMULATOR
5112 for (
int i = 0;
i < arg_count;
i++) {
5114 if (args[
i].IsRegister()) {
5123 dc32(arg_pattern_list);
5131 void MacroAssembler::Printf(
const char * format,
5137 if (!csp.Is(StackPointer())) {
5138 DCHECK(!csp.Aliases(arg0));
5139 DCHECK(!csp.Aliases(arg1));
5140 DCHECK(!csp.Aliases(arg2));
5141 DCHECK(!csp.Aliases(arg3));
5146 RegList old_tmp_list = TmpList()->list();
5147 RegList old_fp_tmp_list = FPTmpList()->list();
5148 TmpList()->set_list(0);
5149 FPTmpList()->set_list(0);
5160 tmp_list.Remove(arg0, arg1, arg2, arg3);
5161 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
5162 TmpList()->set_list(tmp_list.list());
5163 FPTmpList()->set_list(fp_tmp_list.list());
5165 { UseScratchRegisterScope temps(
this);
5169 bool arg0_sp = StackPointer().Aliases(arg0);
5170 bool arg1_sp = StackPointer().Aliases(arg1);
5171 bool arg2_sp = StackPointer().Aliases(arg2);
5172 bool arg3_sp = StackPointer().Aliases(arg3);
5173 if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
5176 Register arg_sp = temps.AcquireX();
5177 Add(arg_sp, StackPointer(),
5179 if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
5180 if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
5181 if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
5182 if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits());
5186 { UseScratchRegisterScope temps(
this);
5187 Register tmp = temps.AcquireX();
5192 PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
5195 { UseScratchRegisterScope temps(
this);
5196 Register tmp = temps.AcquireX();
5205 TmpList()->set_list(old_tmp_list);
5206 FPTmpList()->set_list(old_fp_tmp_list);
5210 void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
5214 InstructionAccurateScope scope(
this,
5216 DCHECK(jssp.Is(StackPointer()));
5217 EmitFrameSetupForCodeAgePatching(
this);
5222 void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
5223 InstructionAccurateScope scope(
this,
5225 DCHECK(jssp.Is(StackPointer()));
5226 EmitCodeAgeSequence(
this, stub);
5234 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
5246 __ add(
fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
5252 void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
5270 __ dc64(
reinterpret_cast<uint64_t
>(stub->instruction_start()));
5276 bool MacroAssembler::IsYoungSequence(Isolate* isolate,
byte* sequence) {
5277 bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
5279 isolate->code_aging_helper()->IsOld(sequence));
5284 void MacroAssembler::TruncatingDiv(Register result,
5288 DCHECK(result.Is32Bits() && dividend.Is32Bits());
5289 base::MagicNumbersForDivision<uint32_t> mag =
5291 Mov(result, mag.multiplier);
5292 Smull(result.X(), dividend, result);
5293 Asr(result.X(), result.X(), 32);
5294 bool neg = (mag.multiplier & (
static_cast<uint32_t>(1) << 31)) != 0;
5295 if (divisor > 0 && neg) Add(result, result, dividend);
5296 if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend);
5297 if (mag.shift > 0) Asr(result, result, mag.shift);
5298 Add(result, result, Operand(dividend,
LSR, 31));
5305 UseScratchRegisterScope::~UseScratchRegisterScope() {
5306 available_->set_list(old_available_);
5307 availablefp_->set_list(old_availablefp_);
5311 Register UseScratchRegisterScope::AcquireSameSizeAs(
const Register& reg) {
5312 int code = AcquireNextAvailable(available_).code();
5313 return Register::Create(code, reg.SizeInBits());
5317 FPRegister UseScratchRegisterScope::AcquireSameSizeAs(
const FPRegister& reg) {
5318 int code = AcquireNextAvailable(availablefp_).code();
5319 return FPRegister::Create(code, reg.SizeInBits());
5323 CPURegister UseScratchRegisterScope::AcquireNextAvailable(
5326 CPURegister result =
available->PopLowestIndex();
5332 CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList*
available,
5333 const CPURegister& reg) {
5343 void InlineSmiCheckInfo::Emit(MacroAssembler* masm,
const Register& reg,
5344 const Label* smi_check) {
5345 Assembler::BlockPoolsScope scope(masm);
5346 if (reg.IsValid()) {
5347 DCHECK(smi_check->is_bound());
5354 uint32_t delta =
__ InstructionsGeneratedSince(smi_check);
5355 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
5357 DCHECK(!smi_check->is_bound());
5365 InlineSmiCheckInfo::InlineSmiCheckInfo(
Address info)
5366 : reg_(NoReg), smi_check_(
NULL) {
5367 InstructionSequence* inline_data = InstructionSequence::At(info);
5368 DCHECK(inline_data->IsInlineData());
5369 if (inline_data->IsInlineData()) {
5370 uint64_t payload = inline_data->InlineData();
5373 DCHECK(is_uint32(payload));
5375 int reg_code = RegisterBits::decode(payload);
5376 reg_ = Register::XRegFromCode(reg_code);
5377 uint64_t smi_check_delta = DeltaBits::decode(payload);
5378 DCHECK(smi_check_delta != 0);
5379 smi_check_ = inline_data->preceding(smi_check_delta);
MacroAssembler(Isolate *isolate, void *buffer, int size)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions true
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if available(X64 only)") DEFINE_BOOL(enable_vfp3
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define kNumSafepointSavedRegisters
#define kSafepointSavedRegisters
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
#define ASM_LOCATION(message)
#define STATIC_ASSERT(test)
bool IsPowerOfTwo32(uint32_t value)
MagicNumbersForDivision< T > SignedDivisionByConstant(T d)
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
static LifetimePosition Min(LifetimePosition a, LifetimePosition b)
int WhichPowerOf2(uint32_t x)
bool AreSameSizeAndType(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoCPUReg, const CPURegister ®4=NoCPUReg, const CPURegister ®5=NoCPUReg, const CPURegister ®6=NoCPUReg, const CPURegister ®7=NoCPUReg, const CPURegister ®8=NoCPUReg)
const intptr_t kHeapObjectTagMask
const uint32_t kStringEncodingMask
const uint32_t kDebugZapValue
LSDataSize CalcLSPairDataSize(LoadStorePairOp op)
const unsigned kDRegSizeInBits
bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
const unsigned kXRegSizeInBits
TypeImpl< ZoneTypeConfig > Type
const LowDwVfpRegister d9
const LowDwVfpRegister d1
bool is_uintn(int64_t x, unsigned n)
const unsigned kXRegSizeInBitsLog2
static const unsigned kPrintfArgPatternBits
const LowDwVfpRegister d0
const int kDoubleSizeLog2
const int kNumSafepointRegisters
MemOperand GlobalObjectMemOperand()
const uint32_t kNotStringTag
const unsigned kPrintfLength
DwVfpRegister DoubleRegister
const LowDwVfpRegister d8
const LowDwVfpRegister d12
const unsigned kWRegSizeInBits
const int kPointerSizeLog2
const uint32_t kStringTag
MemOperand ContextMemOperand(Register context, int index)
@ FAST_HOLEY_SMI_ELEMENTS
const unsigned kPrintfMaxArgCount
const uint32_t kOneByteStringTag
MemOperand FieldMemOperand(Register object, int offset)
const intptr_t kObjectAlignmentMask
const unsigned kLoadLiteralScaleLog2
const Instr kImmExceptionIsPrintf
const unsigned kWRegSizeInBitsLog2
const unsigned kWordSizeInBytesLog2
const bool FLAG_enable_slow_asserts
static const int kInvalidEnumCacheSentinel
const char * GetBailoutReason(BailoutReason reason)
Condition NegateCondition(Condition cond)
const uint32_t kStringRepresentationMask
const LowDwVfpRegister d11
@ kDontDiscardForSameWReg
int CountLeadingZeros(uint64_t value, int width)
SeqStringSetCharCheckIndexType
MemOperand UntagSmiFieldMemOperand(Register object, int offset)
const uint32_t kIsIndirectStringTag
int TenToThe(int exponent)
kFeedbackVectorOffset flag
const uint32_t kInternalizedTag
static const int kNumberDictionaryProbes
const intptr_t kSmiTagMask
const uint32_t kIsNotInternalizedMask
static const int kCodeAgeStubEntryOffset
const LowDwVfpRegister d13
const unsigned kByteSizeInBytes
static const int kNoCodeAgeSequenceLength
const uint32_t kIsNotStringMask
int CountTrailingZeros(uint64_t value, int width)
bool IsAligned(T value, U alignment)
const unsigned kInstructionSize
const intptr_t kDoubleAlignment
@ kBranchTypeLastCondition
@ kBranchTypeFirstUsingReg
@ kBranchTypeFirstUsingBit
@ kBranchTypeFirstCondition
const LowDwVfpRegister d10
@ kPointersToHereAreAlwaysInteresting
const LowDwVfpRegister d15
const intptr_t kPointerAlignment
void CopyBytes(uint8_t *target, uint8_t *source)
const uint32_t kIsIndirectStringMask
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
const LowDwVfpRegister d14
Debugger support for the V8 JavaScript engine.
static Handle< Value > Throw(Isolate *isolate, const char *message)