9 #if V8_TARGET_ARCH_MIPS
24 : Assembler(arg_isolate, buffer,
size),
25 generating_stub_(
false),
27 if (isolate() !=
NULL) {
28 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
34 void MacroAssembler::Load(Register dst,
40 }
else if (r.IsUInteger8()) {
42 }
else if (r.IsInteger16()) {
44 }
else if (r.IsUInteger16()) {
52 void MacroAssembler::Store(Register src,
56 if (r.IsInteger8() || r.IsUInteger8()) {
58 }
else if (r.IsInteger16() || r.IsUInteger16()) {
61 if (r.IsHeapObject()) {
63 }
else if (r.IsSmi()) {
71 void MacroAssembler::LoadRoot(Register destination,
72 Heap::RootListIndex index) {
77 void MacroAssembler::LoadRoot(Register destination,
78 Heap::RootListIndex index,
80 Register src1,
const Operand& src2) {
86 void MacroAssembler::StoreRoot(Register source,
87 Heap::RootListIndex index) {
92 void MacroAssembler::StoreRoot(Register source,
93 Heap::RootListIndex index,
95 Register src1,
const Operand& src2) {
102 void MacroAssembler::PushSafepointRegisters() {
107 if (num_unsaved > 0) {
114 void MacroAssembler::PopSafepointRegisters() {
117 if (num_unsaved > 0) {
123 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
124 sw(src, SafepointRegisterSlot(dst));
128 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
129 lw(dst, SafepointRegisterSlot(src));
133 int MacroAssembler::SafepointRegisterStackIndex(
int reg_code) {
140 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
145 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
148 int doubles_size = FPURegister::NumAllocatableRegisters() *
kDoubleSize;
149 int register_offset = SafepointRegisterStackIndex(reg.code()) *
kPointerSize;
154 void MacroAssembler::InNewSpace(Register
object,
159 And(scratch,
object, Operand(ExternalReference::new_space_mask(isolate())));
160 Branch(branch,
cc, scratch,
161 Operand(ExternalReference::new_space_start(isolate())));
165 void MacroAssembler::RecordWriteField(
182 JumpIfSmi(value, &done);
190 if (emit_debug_code()) {
193 Branch(&ok,
eq, t8, Operand(zero_reg));
194 stop(
"Unaligned cell in write barrier");
203 remembered_set_action,
205 pointers_to_here_check_for_value);
211 if (emit_debug_code()) {
212 li(value, Operand(bit_cast<int32_t>(
kZapValue + 4)));
213 li(dst, Operand(bit_cast<int32_t>(
kZapValue + 8)));
220 void MacroAssembler::RecordWriteForMap(Register
object,
225 if (emit_debug_code()) {
229 kWrongAddressOrValuePassedToRecordWrite,
231 Operand(isolate()->factory()->meta_map()));
234 if (!FLAG_incremental_marking) {
238 if (emit_debug_code()) {
241 kWrongAddressOrValuePassedToRecordWrite,
254 MemoryChunk::kPointersToHereAreInterestingMask,
258 Addu(dst,
object, Operand(HeapObject::kMapOffset -
kHeapObjectTag));
259 if (emit_debug_code()) {
262 Branch(&ok,
eq, at, Operand(zero_reg));
263 stop(
"Unaligned cell in write barrier");
281 isolate()->counters()->write_barriers_static()->Increment();
282 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
286 if (emit_debug_code()) {
287 li(dst, Operand(bit_cast<int32_t>(
kZapValue + 12)));
296 void MacroAssembler::RecordWrite(
308 if (emit_debug_code()) {
311 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
315 !FLAG_incremental_marking) {
325 JumpIfSmi(value, &done);
331 MemoryChunk::kPointersToHereAreInterestingMask,
335 CheckPageFlag(
object,
337 MemoryChunk::kPointersFromHereAreInterestingMask,
345 RecordWriteStub stub(isolate(),
object, value, address, remembered_set_action,
355 isolate()->counters()->write_barriers_static()->Increment();
356 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
361 if (emit_debug_code()) {
362 li(address, Operand(bit_cast<int32_t>(
kZapValue + 12)));
363 li(value, Operand(bit_cast<int32_t>(
kZapValue + 16)));
368 void MacroAssembler::RememberedSetHelper(Register
object,
372 RememberedSetFinalAction and_then) {
374 if (emit_debug_code()) {
376 JumpIfNotInNewSpace(
object, scratch, &ok);
377 stop(
"Remembered set pointer is in new space");
381 ExternalReference store_buffer =
382 ExternalReference::store_buffer_top(isolate());
383 li(t8, Operand(store_buffer));
392 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
393 if (and_then == kFallThroughAtEnd) {
394 Branch(&done,
eq, t8, Operand(zero_reg));
396 DCHECK(and_then == kReturnAtEnd);
397 Ret(
eq, t8, Operand(zero_reg));
400 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
401 CallStub(&store_buffer_overflow);
404 if (and_then == kReturnAtEnd) {
414 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
419 DCHECK(!holder_reg.is(scratch));
420 DCHECK(!holder_reg.is(at));
424 lw(scratch,
MemOperand(
fp, StandardFrameConstants::kContextOffset));
427 Check(
ne, kWeShouldNotHaveAnEmptyLexicalContext,
428 scratch, Operand(zero_reg));
433 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX *
kPointerSize;
435 lw(scratch,
FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
438 if (emit_debug_code()) {
442 LoadRoot(at, Heap::kNativeContextMapRootIndex);
443 Check(
eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
444 holder_reg, Operand(at));
449 lw(at,
FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
450 Branch(&same_contexts,
eq, scratch, Operand(at));
453 if (emit_debug_code()) {
456 LoadRoot(at, Heap::kNullValueRootIndex);
457 Check(
ne, kJSGlobalProxyContextShouldNotBeNull,
458 holder_reg, Operand(at));
461 LoadRoot(at, Heap::kNativeContextMapRootIndex);
462 Check(
eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
463 holder_reg, Operand(at));
467 lw(at,
FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
473 int token_offset = Context::kHeaderSize +
478 Branch(miss,
ne, scratch, Operand(at));
480 bind(&same_contexts);
487 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
489 LoadRoot(scratch, Heap::kHashSeedRootIndex);
493 xor_(reg0, reg0, scratch);
499 nor(scratch, reg0, zero_reg);
501 addu(reg0, scratch, at);
505 xor_(reg0, reg0, at);
509 addu(reg0, reg0, at);
513 xor_(reg0, reg0, at);
516 sll(scratch, reg0, 11);
518 addu(reg0, reg0, at);
519 addu(reg0, reg0, scratch);
523 xor_(reg0, reg0, at);
527 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
558 GetNumberHash(reg0, reg1);
561 lw(reg1,
FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
563 Subu(reg1, reg1, Operand(1));
571 Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(
i)));
573 and_(reg2, reg2, reg1);
576 DCHECK(SeededNumberDictionary::kEntrySize == 3);
578 addu(reg2, reg2, at);
582 addu(reg2, elements, at);
584 lw(at,
FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
586 Branch(&done,
eq, key, Operand(at));
588 Branch(miss,
ne, key, Operand(at));
595 const int kDetailsOffset =
596 SeededNumberDictionary::kElementsStartOffset + 2 *
kPointerSize;
598 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
599 Branch(miss,
ne, at, Operand(zero_reg));
602 const int kValueOffset =
603 SeededNumberDictionary::kElementsStartOffset +
kPointerSize;
611 void MacroAssembler::Addu(Register rd, Register rs,
const Operand& rt) {
613 addu(rd, rs, rt.rm());
615 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
616 addiu(rd, rs, rt.imm32_);
627 void MacroAssembler::Subu(Register rd, Register rs,
const Operand& rt) {
629 subu(rd, rs, rt.rm());
631 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
632 addiu(rd, rs, -rt.imm32_);
643 void MacroAssembler::Mul(Register rd, Register rs,
const Operand& rt) {
649 mul(rd, rs, rt.rm());
665 void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
666 Register rs,
const Operand& rt) {
675 DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
676 muh(rd_hi, rs, rt.rm());
677 mul(rd_lo, rs, rt.rm());
679 DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
680 mul(rd_lo, rs, rt.rm());
681 muh(rd_hi, rs, rt.rm());
695 DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
699 DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
708 void MacroAssembler::Mulh(Register rd, Register rs,
const Operand& rt) {
714 muh(rd, rs, rt.rm());
730 void MacroAssembler::Mult(Register rs,
const Operand& rt) {
742 void MacroAssembler::Multu(Register rs,
const Operand& rt) {
754 void MacroAssembler::Div(Register rs,
const Operand& rt) {
766 void MacroAssembler::Div(Register rem, Register res,
767 Register rs,
const Operand& rt) {
774 div(res, rs, rt.rm());
775 mod(rem, rs, rt.rm());
793 void MacroAssembler::Div(Register res, Register rs,
const Operand& rt) {
799 div(res, rs, rt.rm());
815 void MacroAssembler::Mod(Register rd, Register rs,
const Operand& rt) {
821 mod(rd, rs, rt.rm());
837 void MacroAssembler::Modu(Register rd, Register rs,
const Operand& rt) {
843 modu(rd, rs, rt.rm());
859 void MacroAssembler::Divu(Register rs,
const Operand& rt) {
871 void MacroAssembler::Divu(Register res, Register rs,
const Operand& rt) {
877 divu(res, rs, rt.rm());
893 void MacroAssembler::And(Register rd, Register rs,
const Operand& rt) {
895 and_(rd, rs, rt.rm());
897 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
898 andi(rd, rs, rt.imm32_);
909 void MacroAssembler::Or(Register rd, Register rs,
const Operand& rt) {
911 or_(rd, rs, rt.rm());
913 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
914 ori(rd, rs, rt.imm32_);
925 void MacroAssembler::Xor(Register rd, Register rs,
const Operand& rt) {
927 xor_(rd, rs, rt.rm());
929 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
930 xori(rd, rs, rt.imm32_);
941 void MacroAssembler::Nor(Register rd, Register rs,
const Operand& rt) {
943 nor(rd, rs, rt.rm());
953 void MacroAssembler::Neg(Register rs,
const Operand& rt) {
958 xor_(rs, rt.rm(), at);
962 void MacroAssembler::Slt(Register rd, Register rs,
const Operand& rt) {
964 slt(rd, rs, rt.rm());
966 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
967 slti(rd, rs, rt.imm32_);
978 void MacroAssembler::Sltu(Register rd, Register rs,
const Operand& rt) {
980 sltu(rd, rs, rt.rm());
982 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
983 sltiu(rd, rs, rt.imm32_);
994 void MacroAssembler::Ror(Register rd, Register rs,
const Operand& rt) {
997 rotrv(rd, rs, rt.rm());
999 rotr(rd, rs, rt.imm32_);
1003 subu(at, zero_reg, rt.rm());
1005 srlv(rd, rs, rt.rm());
1008 if (rt.imm32_ == 0) {
1011 srl(at, rs, rt.imm32_);
1012 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
1031 void MacroAssembler::Ulw(Register rd,
const MemOperand& rs) {
1033 lwl(rd,
MemOperand(rs.rm(), rs.offset() + 3));
1037 void MacroAssembler::Usw(Register rd,
const MemOperand& rs) {
1039 swl(rd,
MemOperand(rs.rm(), rs.offset() + 3));
1043 void MacroAssembler::li(Register dst, Handle<Object> value,
LiFlags mode) {
1045 if (value->IsSmi()) {
1046 li(dst, Operand(value),
mode);
1048 DCHECK(value->IsHeapObject());
1049 if (isolate()->heap()->InNewSpace(*value)) {
1050 Handle<Cell> cell = isolate()->factory()->NewCell(value);
1051 li(dst, Operand(cell));
1054 li(dst, Operand(value));
1060 void MacroAssembler::li(Register rd, Operand j,
LiFlags mode) {
1062 BlockTrampolinePoolScope block_trampoline_pool(
this);
1065 if (is_int16(j.imm32_)) {
1066 addiu(rd, zero_reg, j.imm32_);
1067 }
else if (!(j.imm32_ &
kHiMask)) {
1068 ori(rd, zero_reg, j.imm32_);
1076 if (MustUseReg(j.rmode_)) {
1077 RecordRelocInfo(j.rmode_, j.imm32_);
1087 void MacroAssembler::MultiPush(
RegList regs) {
1091 Subu(
sp,
sp, Operand(stack_offset));
1093 if ((regs & (1 <<
i)) != 0) {
1101 void MacroAssembler::MultiPushReversed(
RegList regs) {
1105 Subu(
sp,
sp, Operand(stack_offset));
1107 if ((regs & (1 <<
i)) != 0) {
1115 void MacroAssembler::MultiPop(
RegList regs) {
1119 if ((regs & (1 <<
i)) != 0) {
1124 addiu(
sp,
sp, stack_offset);
1128 void MacroAssembler::MultiPopReversed(
RegList regs) {
1132 if ((regs & (1 <<
i)) != 0) {
1137 addiu(
sp,
sp, stack_offset);
1141 void MacroAssembler::MultiPushFPU(
RegList regs) {
1145 Subu(
sp,
sp, Operand(stack_offset));
1147 if ((regs & (1 <<
i)) != 0) {
1149 sdc1(FPURegister::from_code(
i),
MemOperand(
sp, stack_offset));
1155 void MacroAssembler::MultiPushReversedFPU(
RegList regs) {
1159 Subu(
sp,
sp, Operand(stack_offset));
1161 if ((regs & (1 <<
i)) != 0) {
1163 sdc1(FPURegister::from_code(
i),
MemOperand(
sp, stack_offset));
1169 void MacroAssembler::MultiPopFPU(
RegList regs) {
1173 if ((regs & (1 <<
i)) != 0) {
1174 ldc1(FPURegister::from_code(
i),
MemOperand(
sp, stack_offset));
1178 addiu(
sp,
sp, stack_offset);
1182 void MacroAssembler::MultiPopReversedFPU(
RegList regs) {
1186 if ((regs & (1 <<
i)) != 0) {
1187 ldc1(FPURegister::from_code(
i),
MemOperand(
sp, stack_offset));
1191 addiu(
sp,
sp, stack_offset);
1195 void MacroAssembler::FlushICache(Register address,
unsigned instructions) {
1197 MultiPush(saved_regs);
1198 AllowExternalCallThatCantCauseGC scope(
this);
1202 PrepareCallCFunction(2, t0);
1204 li(a1, instructions * kInstrSize);
1205 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
1206 MultiPop(saved_regs);
1210 void MacroAssembler::Ext(Register rt,
1218 ext_(rt, rs, pos,
size);
1222 int shift_left = 32 - (pos +
size);
1223 sll(rt, rs, shift_left);
1225 int shift_right = 32 -
size;
1226 if (shift_right > 0) {
1227 srl(rt, rt, shift_right);
1233 void MacroAssembler::Ins(Register rt,
1242 ins_(rt, rs, pos,
size);
1244 DCHECK(!rt.is(t8) && !rs.is(t8));
1245 Subu(at, zero_reg, Operand(1));
1246 srl(at, at, 32 -
size);
1250 nor(at, at, zero_reg);
1257 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1259 FPURegister scratch) {
1262 Cvt_d_uw(fd, t8, scratch);
1266 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1268 FPURegister scratch) {
1287 Label conversion_done;
1291 Branch(&conversion_done,
eq, t9, Operand(zero_reg));
1295 mtc1(zero_reg, scratch);
1298 add_d(fd, fd, scratch);
1300 bind(&conversion_done);
1304 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1306 FPURegister scratch) {
1307 Trunc_uw_d(fs, t8, scratch);
1312 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1323 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1334 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1345 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1356 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1358 FPURegister scratch) {
1364 mtc1(zero_reg, scratch);
1368 Label simple_convert;
1369 BranchF(&simple_convert,
NULL,
lt, fd, scratch);
1373 sub_d(scratch, fd, scratch);
1374 trunc_w_d(scratch, scratch);
1376 Or(rs, rs, 1 << 31);
1381 bind(&simple_convert);
1382 trunc_w_d(scratch, fd);
1389 void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
1393 mtc1(rt, fs.high());
1398 void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
1402 mfc1(rt, fs.high());
1407 void MacroAssembler::BranchF(Label* target,
1413 BlockTrampolinePoolScope block_trampoline_pool(
this);
1423 c(
UN,
D, cmp1, cmp2);
1440 c(
OLT,
D, cmp1, cmp2);
1444 c(
ULE,
D, cmp1, cmp2);
1448 c(
ULT,
D, cmp1, cmp2);
1452 c(
OLE,
D, cmp1, cmp2);
1456 c(
EQ,
D, cmp1, cmp2);
1460 c(
UEQ,
D, cmp1, cmp2);
1464 c(
EQ,
D, cmp1, cmp2);
1468 c(
UEQ,
D, cmp1, cmp2);
1528 void MacroAssembler::Move(FPURegister dst,
double imm) {
1529 static const DoubleRepresentation minus_zero(-0.0);
1530 static const DoubleRepresentation
zero(0.0);
1531 DoubleRepresentation value_rep(imm);
1534 if (value_rep ==
zero && !force_load) {
1536 }
else if (value_rep == minus_zero && !force_load) {
1540 DoubleAsTwoUInt32(imm, &
lo, &
hi);
1544 li(at, Operand(
lo));
1547 mtc1(zero_reg, dst);
1552 li(at, Operand(
hi));
1555 Mthc1(zero_reg, dst);
1561 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1564 Branch(&done,
ne, rt, Operand(zero_reg));
1573 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1576 Branch(&done,
eq, rt, Operand(zero_reg));
1585 void MacroAssembler::Movt(Register rd, Register rs,
uint16_t cc) {
1590 DCHECK(!(rs.is(t8) || rd.is(t8)));
1592 Register scratch = t8;
1596 cfc1(scratch,
FCSR);
1600 srl(scratch, scratch, 16);
1601 andi(scratch, scratch, 0x0080);
1602 Branch(&done,
eq, scratch, Operand(zero_reg));
1611 void MacroAssembler::Movf(Register rd, Register rs,
uint16_t cc) {
1616 DCHECK(!(rs.is(t8) || rd.is(t8)));
1618 Register scratch = t8;
1622 cfc1(scratch,
FCSR);
1626 srl(scratch, scratch, 16);
1627 andi(scratch, scratch, 0x0080);
1628 Branch(&done,
ne, scratch, Operand(zero_reg));
1637 void MacroAssembler::Clz(Register rd, Register rs) {
1639 DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1641 Register scratch = t9;
1647 and_(scratch, at, mask);
1648 Branch(&end,
ne, scratch, Operand(zero_reg));
1664 Register except_flag,
1666 DCHECK(!result.is(scratch));
1667 DCHECK(!double_input.is(double_scratch));
1668 DCHECK(!except_flag.is(scratch));
1673 mov(except_flag, zero_reg);
1676 cvt_w_d(double_scratch, double_input);
1677 mfc1(result, double_scratch);
1678 cvt_d_w(double_scratch, double_scratch);
1679 BranchF(&done,
NULL,
eq, double_input, double_scratch);
1689 cfc1(scratch,
FCSR);
1691 ctc1(zero_reg,
FCSR);
1694 switch (rounding_mode) {
1696 Round_w_d(double_scratch, double_input);
1699 Trunc_w_d(double_scratch, double_input);
1702 Ceil_w_d(double_scratch, double_input);
1705 Floor_w_d(double_scratch, double_input);
1710 cfc1(except_flag,
FCSR);
1712 ctc1(scratch,
FCSR);
1714 mfc1(result, double_scratch);
1717 And(except_flag, except_flag, Operand(except_mask));
1723 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1727 Register scratch = at;
1728 Register scratch2 = t9;
1731 cfc1(scratch2,
FCSR);
1732 ctc1(zero_reg,
FCSR);
1734 trunc_w_d(single_scratch, double_input);
1735 mfc1(result, single_scratch);
1737 cfc1(scratch,
FCSR);
1738 ctc1(scratch2,
FCSR);
1744 Branch(done,
eq, scratch, Operand(zero_reg));
1748 void MacroAssembler::TruncateDoubleToI(Register result,
1752 TryInlineTruncateDoubleToI(result, double_input, &done);
1759 DoubleToIStub stub(isolate(),
sp, result, 0,
true,
true);
1769 void MacroAssembler::TruncateHeapNumberToI(Register result, Register
object) {
1772 DCHECK(!result.is(
object));
1774 ldc1(double_scratch,
1776 TryInlineTruncateDoubleToI(result, double_scratch, &done);
1780 DoubleToIStub stub(isolate(),
1793 void MacroAssembler::TruncateNumberToI(Register
object,
1795 Register heap_number_map,
1797 Label* not_number) {
1799 DCHECK(!result.is(
object));
1801 UntagAndJumpIfSmi(result,
object, &done);
1802 JumpIfNotHeapNumber(
object, heap_number_map, scratch, not_number);
1803 TruncateHeapNumberToI(result,
object);
1809 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1811 int num_least_bits) {
1816 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1818 int num_least_bits) {
1819 And(dst, src, Operand((1 << num_least_bits) - 1));
1826 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
1827 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1828 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1832 BranchShort(offset, bdslot);
1839 BranchShort(offset, cond, rs, rt, bdslot);
1844 if (
L->is_bound()) {
1846 BranchShort(
L, bdslot);
1851 if (is_trampoline_emitted()) {
1854 BranchShort(
L, bdslot);
1860 void MacroAssembler::Branch(Label*
L,
Condition cond, Register rs,
1863 if (
L->is_bound()) {
1865 BranchShort(
L, cond, rs, rt, bdslot);
1870 BranchShort(&skip, neg_cond, rs, rt);
1878 if (is_trampoline_emitted()) {
1882 BranchShort(&skip, neg_cond, rs, rt);
1889 BranchShort(
L, cond, rs, rt, bdslot);
1895 void MacroAssembler::Branch(Label*
L,
1898 Heap::RootListIndex index,
1900 LoadRoot(at, index);
1901 Branch(
L, cond, rs, Operand(at), bdslot);
1914 void MacroAssembler::BranchShort(
int16_t offset,
Condition cond, Register rs,
1917 BRANCH_ARGS_CHECK(cond, rs, rt);
1918 DCHECK(!rs.is(zero_reg));
1920 Register scratch = at;
1925 BlockTrampolinePoolScope block_trampoline_pool(
this);
1932 beq(rs,
r2, offset);
1935 bne(rs,
r2, offset);
1939 if (
r2.
is(zero_reg)) {
1942 slt(scratch,
r2, rs);
1943 bne(scratch, zero_reg, offset);
1947 if (
r2.
is(zero_reg)) {
1950 slt(scratch, rs,
r2);
1951 beq(scratch, zero_reg, offset);
1955 if (
r2.
is(zero_reg)) {
1958 slt(scratch, rs,
r2);
1959 bne(scratch, zero_reg, offset);
1963 if (
r2.
is(zero_reg)) {
1966 slt(scratch,
r2, rs);
1967 beq(scratch, zero_reg, offset);
1972 if (
r2.
is(zero_reg)) {
1973 bne(rs, zero_reg, offset);
1975 sltu(scratch,
r2, rs);
1976 bne(scratch, zero_reg, offset);
1980 if (
r2.
is(zero_reg)) {
1983 sltu(scratch, rs,
r2);
1984 beq(scratch, zero_reg, offset);
1988 if (
r2.
is(zero_reg)) {
1992 sltu(scratch, rs,
r2);
1993 bne(scratch, zero_reg, offset);
1997 if (
r2.
is(zero_reg)) {
1998 beq(rs, zero_reg, offset);
2000 sltu(scratch,
r2, rs);
2001 beq(scratch, zero_reg, offset);
2011 BlockTrampolinePoolScope block_trampoline_pool(
this);
2021 beq(rs,
r2, offset);
2028 bne(rs,
r2, offset);
2032 if (rt.imm32_ == 0) {
2037 slt(scratch,
r2, rs);
2038 bne(scratch, zero_reg, offset);
2042 if (rt.imm32_ == 0) {
2044 }
else if (is_int16(rt.imm32_)) {
2045 slti(scratch, rs, rt.imm32_);
2046 beq(scratch, zero_reg, offset);
2050 slt(scratch, rs,
r2);
2051 beq(scratch, zero_reg, offset);
2055 if (rt.imm32_ == 0) {
2057 }
else if (is_int16(rt.imm32_)) {
2058 slti(scratch, rs, rt.imm32_);
2059 bne(scratch, zero_reg, offset);
2063 slt(scratch, rs,
r2);
2064 bne(scratch, zero_reg, offset);
2068 if (rt.imm32_ == 0) {
2073 slt(scratch,
r2, rs);
2074 beq(scratch, zero_reg, offset);
2079 if (rt.imm32_ == 0) {
2080 bne(rs, zero_reg, offset);
2084 sltu(scratch,
r2, rs);
2085 bne(scratch, zero_reg, offset);
2089 if (rt.imm32_ == 0) {
2091 }
else if (is_int16(rt.imm32_)) {
2092 sltiu(scratch, rs, rt.imm32_);
2093 beq(scratch, zero_reg, offset);
2097 sltu(scratch, rs,
r2);
2098 beq(scratch, zero_reg, offset);
2102 if (rt.imm32_ == 0) {
2105 }
else if (is_int16(rt.imm32_)) {
2106 sltiu(scratch, rs, rt.imm32_);
2107 bne(scratch, zero_reg, offset);
2111 sltu(scratch, rs,
r2);
2112 bne(scratch, zero_reg, offset);
2116 if (rt.imm32_ == 0) {
2117 beq(rs, zero_reg, offset);
2121 sltu(scratch,
r2, rs);
2122 beq(scratch, zero_reg, offset);
2139 b(shifted_branch_offset(
L,
false));
2147 void MacroAssembler::BranchShort(Label*
L,
Condition cond, Register rs,
2150 BRANCH_ARGS_CHECK(cond, rs, rt);
2154 Register scratch = at;
2156 BlockTrampolinePoolScope block_trampoline_pool(
this);
2163 offset = shifted_branch_offset(
L,
false);
2167 offset = shifted_branch_offset(
L,
false);
2168 beq(rs,
r2, offset);
2171 offset = shifted_branch_offset(
L,
false);
2172 bne(rs,
r2, offset);
2176 if (
r2.
is(zero_reg)) {
2177 offset = shifted_branch_offset(
L,
false);
2180 slt(scratch,
r2, rs);
2181 offset = shifted_branch_offset(
L,
false);
2182 bne(scratch, zero_reg, offset);
2186 if (
r2.
is(zero_reg)) {
2187 offset = shifted_branch_offset(
L,
false);
2190 slt(scratch, rs,
r2);
2191 offset = shifted_branch_offset(
L,
false);
2192 beq(scratch, zero_reg, offset);
2196 if (
r2.
is(zero_reg)) {
2197 offset = shifted_branch_offset(
L,
false);
2200 slt(scratch, rs,
r2);
2201 offset = shifted_branch_offset(
L,
false);
2202 bne(scratch, zero_reg, offset);
2206 if (
r2.
is(zero_reg)) {
2207 offset = shifted_branch_offset(
L,
false);
2210 slt(scratch,
r2, rs);
2211 offset = shifted_branch_offset(
L,
false);
2212 beq(scratch, zero_reg, offset);
2217 if (
r2.
is(zero_reg)) {
2218 offset = shifted_branch_offset(
L,
false);
2219 bne(rs, zero_reg, offset);
2221 sltu(scratch,
r2, rs);
2222 offset = shifted_branch_offset(
L,
false);
2223 bne(scratch, zero_reg, offset);
2227 if (
r2.
is(zero_reg)) {
2228 offset = shifted_branch_offset(
L,
false);
2231 sltu(scratch, rs,
r2);
2232 offset = shifted_branch_offset(
L,
false);
2233 beq(scratch, zero_reg, offset);
2237 if (
r2.
is(zero_reg)) {
2241 sltu(scratch, rs,
r2);
2242 offset = shifted_branch_offset(
L,
false);
2243 bne(scratch, zero_reg, offset);
2247 if (
r2.
is(zero_reg)) {
2248 offset = shifted_branch_offset(
L,
false);
2249 beq(rs, zero_reg, offset);
2251 sltu(scratch,
r2, rs);
2252 offset = shifted_branch_offset(
L,
false);
2253 beq(scratch, zero_reg, offset);
2263 BlockTrampolinePoolScope block_trampoline_pool(
this);
2266 offset = shifted_branch_offset(
L,
false);
2273 offset = shifted_branch_offset(
L,
false);
2274 beq(rs,
r2, offset);
2280 offset = shifted_branch_offset(
L,
false);
2281 bne(rs,
r2, offset);
2285 if (rt.imm32_ == 0) {
2286 offset = shifted_branch_offset(
L,
false);
2292 slt(scratch,
r2, rs);
2293 offset = shifted_branch_offset(
L,
false);
2294 bne(scratch, zero_reg, offset);
2298 if (rt.imm32_ == 0) {
2299 offset = shifted_branch_offset(
L,
false);
2301 }
else if (is_int16(rt.imm32_)) {
2302 slti(scratch, rs, rt.imm32_);
2303 offset = shifted_branch_offset(
L,
false);
2304 beq(scratch, zero_reg, offset);
2309 slt(scratch, rs,
r2);
2310 offset = shifted_branch_offset(
L,
false);
2311 beq(scratch, zero_reg, offset);
2315 if (rt.imm32_ == 0) {
2316 offset = shifted_branch_offset(
L,
false);
2318 }
else if (is_int16(rt.imm32_)) {
2319 slti(scratch, rs, rt.imm32_);
2320 offset = shifted_branch_offset(
L,
false);
2321 bne(scratch, zero_reg, offset);
2326 slt(scratch, rs,
r2);
2327 offset = shifted_branch_offset(
L,
false);
2328 bne(scratch, zero_reg, offset);
2332 if (rt.imm32_ == 0) {
2333 offset = shifted_branch_offset(
L,
false);
2339 slt(scratch,
r2, rs);
2340 offset = shifted_branch_offset(
L,
false);
2341 beq(scratch, zero_reg, offset);
2346 if (rt.imm32_ == 0) {
2347 offset = shifted_branch_offset(
L,
false);
2348 bne(rs, zero_reg, offset);
2353 sltu(scratch,
r2, rs);
2354 offset = shifted_branch_offset(
L,
false);
2355 bne(scratch, zero_reg, offset);
2359 if (rt.imm32_ == 0) {
2360 offset = shifted_branch_offset(
L,
false);
2362 }
else if (is_int16(rt.imm32_)) {
2363 sltiu(scratch, rs, rt.imm32_);
2364 offset = shifted_branch_offset(
L,
false);
2365 beq(scratch, zero_reg, offset);
2370 sltu(scratch, rs,
r2);
2371 offset = shifted_branch_offset(
L,
false);
2372 beq(scratch, zero_reg, offset);
2376 if (rt.imm32_ == 0) {
2379 }
else if (is_int16(rt.imm32_)) {
2380 sltiu(scratch, rs, rt.imm32_);
2381 offset = shifted_branch_offset(
L,
false);
2382 bne(scratch, zero_reg, offset);
2387 sltu(scratch, rs,
r2);
2388 offset = shifted_branch_offset(
L,
false);
2389 bne(scratch, zero_reg, offset);
2393 if (rt.imm32_ == 0) {
2394 offset = shifted_branch_offset(
L,
false);
2395 beq(rs, zero_reg, offset);
2400 sltu(scratch,
r2, rs);
2401 offset = shifted_branch_offset(
L,
false);
2402 beq(scratch, zero_reg, offset);
2410 DCHECK(is_int16(offset));
2418 BranchAndLinkShort(offset, bdslot);
2422 void MacroAssembler::BranchAndLink(
int16_t offset,
Condition cond, Register rs,
2425 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2430 if (
L->is_bound()) {
2432 BranchAndLinkShort(
L, bdslot);
2437 if (is_trampoline_emitted()) {
2440 BranchAndLinkShort(
L, bdslot);
2446 void MacroAssembler::BranchAndLink(Label*
L,
Condition cond, Register rs,
2449 if (
L->is_bound()) {
2451 BranchAndLinkShort(
L, cond, rs, rt, bdslot);
2455 BranchShort(&skip, neg_cond, rs, rt);
2460 if (is_trampoline_emitted()) {
2463 BranchShort(&skip, neg_cond, rs, rt);
2467 BranchAndLinkShort(
L, cond, rs, rt, bdslot);
2476 void MacroAssembler::BranchAndLinkShort(
int16_t offset,
2487 Register rs,
const Operand& rt,
2489 BRANCH_ARGS_CHECK(cond, rs, rt);
2491 Register scratch = at;
2501 BlockTrampolinePoolScope block_trampoline_pool(
this);
2519 slt(scratch,
r2, rs);
2520 addiu(scratch, scratch, -1);
2521 bgezal(scratch, offset);
2524 slt(scratch, rs,
r2);
2525 addiu(scratch, scratch, -1);
2526 bltzal(scratch, offset);
2529 slt(scratch, rs,
r2);
2530 addiu(scratch, scratch, -1);
2531 bgezal(scratch, offset);
2534 slt(scratch,
r2, rs);
2535 addiu(scratch, scratch, -1);
2536 bltzal(scratch, offset);
2541 sltu(scratch,
r2, rs);
2542 addiu(scratch, scratch, -1);
2543 bgezal(scratch, offset);
2546 sltu(scratch, rs,
r2);
2547 addiu(scratch, scratch, -1);
2548 bltzal(scratch, offset);
2551 sltu(scratch, rs,
r2);
2552 addiu(scratch, scratch, -1);
2553 bgezal(scratch, offset);
2556 sltu(scratch,
r2, rs);
2557 addiu(scratch, scratch, -1);
2558 bltzal(scratch, offset);
2565 BlockTrampolinePoolScope block_trampoline_pool(
this);
2584 slt(scratch,
r2, rs);
2585 beq(scratch, zero_reg, 2);
2591 slt(scratch, rs,
r2);
2592 bne(scratch, zero_reg, 2);
2598 slt(scratch, rs,
r2);
2599 bne(scratch, zero_reg, 2);
2605 slt(scratch,
r2, rs);
2606 bne(scratch, zero_reg, 2);
2615 sltu(scratch,
r2, rs);
2616 beq(scratch, zero_reg, 2);
2622 sltu(scratch, rs,
r2);
2623 bne(scratch, zero_reg, 2);
2629 sltu(scratch, rs,
r2);
2630 bne(scratch, zero_reg, 2);
2636 sltu(scratch,
r2, rs);
2637 bne(scratch, zero_reg, 2);
2652 void MacroAssembler::BranchAndLinkShort(Label*
L,
BranchDelaySlot bdslot) {
2653 bal(shifted_branch_offset(
L,
false));
2661 void MacroAssembler::BranchAndLinkShort(Label*
L,
Condition cond, Register rs,
2664 BRANCH_ARGS_CHECK(cond, rs, rt);
2668 Register scratch = at;
2677 BlockTrampolinePoolScope block_trampoline_pool(
this);
2680 offset = shifted_branch_offset(
L,
false);
2686 offset = shifted_branch_offset(
L,
false);
2692 offset = shifted_branch_offset(
L,
false);
2698 slt(scratch,
r2, rs);
2699 addiu(scratch, scratch, -1);
2700 offset = shifted_branch_offset(
L,
false);
2701 bgezal(scratch, offset);
2704 slt(scratch, rs,
r2);
2705 addiu(scratch, scratch, -1);
2706 offset = shifted_branch_offset(
L,
false);
2707 bltzal(scratch, offset);
2710 slt(scratch, rs,
r2);
2711 addiu(scratch, scratch, -1);
2712 offset = shifted_branch_offset(
L,
false);
2713 bgezal(scratch, offset);
2716 slt(scratch,
r2, rs);
2717 addiu(scratch, scratch, -1);
2718 offset = shifted_branch_offset(
L,
false);
2719 bltzal(scratch, offset);
2724 sltu(scratch,
r2, rs);
2725 addiu(scratch, scratch, -1);
2726 offset = shifted_branch_offset(
L,
false);
2727 bgezal(scratch, offset);
2730 sltu(scratch, rs,
r2);
2731 addiu(scratch, scratch, -1);
2732 offset = shifted_branch_offset(
L,
false);
2733 bltzal(scratch, offset);
2736 sltu(scratch, rs,
r2);
2737 addiu(scratch, scratch, -1);
2738 offset = shifted_branch_offset(
L,
false);
2739 bgezal(scratch, offset);
2742 sltu(scratch,
r2, rs);
2743 addiu(scratch, scratch, -1);
2744 offset = shifted_branch_offset(
L,
false);
2745 bltzal(scratch, offset);
2752 BlockTrampolinePoolScope block_trampoline_pool(
this);
2755 offset = shifted_branch_offset(
L,
false);
2761 offset = shifted_branch_offset(
L,
false);
2767 offset = shifted_branch_offset(
L,
false);
2774 slt(scratch,
r2, rs);
2775 beq(scratch, zero_reg, 2);
2777 offset = shifted_branch_offset(
L,
false);
2782 slt(scratch, rs,
r2);
2783 bne(scratch, zero_reg, 2);
2785 offset = shifted_branch_offset(
L,
false);
2790 slt(scratch, rs,
r2);
2791 bne(scratch, zero_reg, 2);
2793 offset = shifted_branch_offset(
L,
false);
2798 slt(scratch,
r2, rs);
2799 bne(scratch, zero_reg, 2);
2801 offset = shifted_branch_offset(
L,
false);
2809 sltu(scratch,
r2, rs);
2810 beq(scratch, zero_reg, 2);
2812 offset = shifted_branch_offset(
L,
false);
2817 sltu(scratch, rs,
r2);
2818 bne(scratch, zero_reg, 2);
2820 offset = shifted_branch_offset(
L,
false);
2825 sltu(scratch, rs,
r2);
2826 bne(scratch, zero_reg, 2);
2828 offset = shifted_branch_offset(
L,
false);
2833 sltu(scratch,
r2, rs);
2834 bne(scratch, zero_reg, 2);
2836 offset = shifted_branch_offset(
L,
false);
2846 DCHECK(is_int16(offset));
2854 void MacroAssembler::Jump(Register target,
2859 BlockTrampolinePoolScope block_trampoline_pool(
this);
2863 BRANCH_ARGS_CHECK(cond, rs, rt);
2873 void MacroAssembler::Jump(intptr_t target,
2874 RelocInfo::Mode rmode,
2885 li(t9, Operand(target, rmode));
2886 Jump(t9,
al, zero_reg, Operand(zero_reg), bd);
2891 void MacroAssembler::Jump(
Address target,
2892 RelocInfo::Mode rmode,
2897 DCHECK(!RelocInfo::IsCodeTarget(rmode));
2898 Jump(
reinterpret_cast<intptr_t
>(target), rmode, cond, rs, rt, bd);
2902 void MacroAssembler::Jump(Handle<Code> code,
2903 RelocInfo::Mode rmode,
2908 DCHECK(RelocInfo::IsCodeTarget(rmode));
2910 Jump(
reinterpret_cast<intptr_t
>(code.location()), rmode, cond, rs, rt, bd);
2914 int MacroAssembler::CallSize(Register target,
2930 return size * kInstrSize;
2935 void MacroAssembler::Call(Register target,
2940 BlockTrampolinePoolScope block_trampoline_pool(
this);
2946 BRANCH_ARGS_CHECK(cond, rs, rt);
2954 DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
2955 SizeOfCodeGeneratedSince(&start));
2959 int MacroAssembler::CallSize(
Address target,
2960 RelocInfo::Mode rmode,
2965 int size = CallSize(t9, cond, rs, rt, bd);
2966 return size + 2 * kInstrSize;
2970 void MacroAssembler::Call(
Address target,
2971 RelocInfo::Mode rmode,
2976 BlockTrampolinePoolScope block_trampoline_pool(
this);
2982 positions_recorder()->WriteRecordedPositions();
2984 Call(t9, cond, rs, rt, bd);
2985 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2986 SizeOfCodeGeneratedSince(&start));
2990 int MacroAssembler::CallSize(Handle<Code> code,
2991 RelocInfo::Mode rmode,
2992 TypeFeedbackId ast_id,
2998 return CallSize(
reinterpret_cast<Address>(code.location()),
2999 rmode, cond, rs, rt, bd);
3003 void MacroAssembler::Call(Handle<Code> code,
3004 RelocInfo::Mode rmode,
3005 TypeFeedbackId ast_id,
3010 BlockTrampolinePoolScope block_trampoline_pool(
this);
3013 DCHECK(RelocInfo::IsCodeTarget(rmode));
3014 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
3015 SetRecordedAstId(ast_id);
3016 rmode = RelocInfo::CODE_TARGET_WITH_ID;
3019 Call(
reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
3020 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
3021 SizeOfCodeGeneratedSince(&start));
3025 void MacroAssembler::Ret(
Condition cond,
3029 Jump(ra, cond, rs, rt, bd);
3034 BlockTrampolinePoolScope block_trampoline_pool(
this);
3037 imm28 = jump_address(
L);
3039 { BlockGrowBufferScope block_buf_growth(
this);
3042 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3052 BlockTrampolinePoolScope block_trampoline_pool(
this);
3055 imm32 = jump_address(
L);
3056 { BlockGrowBufferScope block_buf_growth(
this);
3059 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3072 BlockTrampolinePoolScope block_trampoline_pool(
this);
3075 imm32 = jump_address(
L);
3076 { BlockGrowBufferScope block_buf_growth(
this);
3079 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3091 void MacroAssembler::DropAndRet(
int drop) {
3096 void MacroAssembler::DropAndRet(
int drop,
3099 const Operand&
r2) {
3115 void MacroAssembler::Drop(
int count,
3118 const Operand& op) {
3138 void MacroAssembler::Swap(Register reg1,
3141 if (scratch.is(
no_reg)) {
3142 Xor(reg1, reg1, Operand(reg2));
3143 Xor(reg2, reg2, Operand(reg1));
3144 Xor(reg1, reg1, Operand(reg2));
3153 void MacroAssembler::Call(Label* target) {
3154 BranchAndLink(target);
3164 void MacroAssembler::DebugBreak() {
3165 PrepareCEntryArgs(0);
3166 PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
3167 CEntryStub ces(isolate(), 1);
3168 DCHECK(AllowThisStubCall(&ces));
3176 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3177 int handler_index) {
3191 StackHandler::IndexField::encode(handler_index) |
3192 StackHandler::KindField::encode(kind);
3194 li(t2, Operand(state));
3197 if (kind == StackHandler::JS_ENTRY) {
3202 Push(zero_reg, zero_reg, t2, t1);
3204 MultiPush(t1.bit() | t2.bit() |
cp.bit() |
fp.
bit());
3208 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3216 void MacroAssembler::PopTryHandler() {
3220 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3225 void MacroAssembler::JumpToHandlerEntry() {
3231 srl(a2, a2, StackHandler::kKindWidth);
3255 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
3265 MultiPop(a1.bit() | a2.bit() |
cp.bit() |
fp.
bit());
3271 Branch(&done,
eq,
cp, Operand(zero_reg));
3275 JumpToHandlerEntry();
3279 void MacroAssembler::ThrowUncatchable(Register value) {
3289 if (!value.is(v0)) {
3293 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
3297 Label fetch_next, check_kind;
3304 lw(a2,
MemOperand(
sp, StackHandlerConstants::kStateOffset));
3305 And(a2, a2, Operand(StackHandler::KindField::kMask));
3306 Branch(&fetch_next,
ne, a2, Operand(zero_reg));
3314 MultiPop(a1.bit() | a2.bit() |
cp.bit() |
fp.
bit());
3316 JumpToHandlerEntry();
3320 void MacroAssembler::Allocate(
int object_size,
3326 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3327 if (!FLAG_inline_new) {
3328 if (emit_debug_code()) {
3331 li(scratch1, 0x7191);
3332 li(scratch2, 0x7291);
3338 DCHECK(!result.is(scratch1));
3339 DCHECK(!result.is(scratch2));
3340 DCHECK(!scratch1.is(scratch2));
3341 DCHECK(!scratch1.is(t9));
3342 DCHECK(!scratch2.is(t9));
3354 ExternalReference allocation_top =
3355 AllocationUtils::GetAllocationTopReference(isolate(),
flags);
3356 ExternalReference allocation_limit =
3357 AllocationUtils::GetAllocationLimitReference(isolate(),
flags);
3360 reinterpret_cast<intptr_t
>(allocation_top.address());
3362 reinterpret_cast<intptr_t
>(allocation_limit.address());
3366 Register topaddr = scratch1;
3367 li(topaddr, Operand(allocation_top));
3375 if (emit_debug_code()) {
3380 Check(
eq, kUnexpectedAllocationTop, result, Operand(t9));
3393 Branch(&
aligned,
eq, scratch2, Operand(zero_reg));
3397 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3405 Addu(scratch2, result, Operand(object_size));
3406 Branch(gc_required,
Ugreater, scratch2, Operand(t9));
3416 void MacroAssembler::Allocate(Register object_size,
3422 if (!FLAG_inline_new) {
3423 if (emit_debug_code()) {
3426 li(scratch1, 0x7191);
3427 li(scratch2, 0x7291);
3433 DCHECK(!result.is(scratch1));
3434 DCHECK(!result.is(scratch2));
3435 DCHECK(!scratch1.is(scratch2));
3436 DCHECK(!object_size.is(t9));
3437 DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
3442 ExternalReference allocation_top =
3443 AllocationUtils::GetAllocationTopReference(isolate(),
flags);
3444 ExternalReference allocation_limit =
3445 AllocationUtils::GetAllocationLimitReference(isolate(),
flags);
3447 reinterpret_cast<intptr_t
>(allocation_top.address());
3449 reinterpret_cast<intptr_t
>(allocation_limit.address());
3453 Register topaddr = scratch1;
3454 li(topaddr, Operand(allocation_top));
3462 if (emit_debug_code()) {
3467 Check(
eq, kUnexpectedAllocationTop, result, Operand(t9));
3480 Branch(&
aligned,
eq, scratch2, Operand(zero_reg));
3484 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3495 Addu(scratch2, result, scratch2);
3497 Addu(scratch2, result, Operand(object_size));
3499 Branch(gc_required,
Ugreater, scratch2, Operand(t9));
3502 if (emit_debug_code()) {
3504 Check(
eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3515 void MacroAssembler::UndoAllocationInNewSpace(Register
object,
3517 ExternalReference new_space_allocation_top =
3518 ExternalReference::new_space_allocation_top_address(isolate());
3524 li(scratch, Operand(new_space_allocation_top));
3526 Check(
less, kUndoAllocationOfNonAllocatedMemory,
3527 object, Operand(scratch));
3530 li(scratch, Operand(new_space_allocation_top));
3535 void MacroAssembler::AllocateTwoByteString(Register result,
3540 Label* gc_required) {
3544 sll(scratch1, length, 1);
3545 addiu(scratch1, scratch1,
3558 InitializeNewString(result,
3560 Heap::kStringMapRootIndex,
3566 void MacroAssembler::AllocateOneByteString(Register result, Register length,
3567 Register scratch1, Register scratch2,
3569 Label* gc_required) {
3586 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
3587 scratch1, scratch2);
3591 void MacroAssembler::AllocateTwoByteConsString(Register result,
3595 Label* gc_required) {
3596 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3598 InitializeNewString(result,
3600 Heap::kConsStringMapRootIndex,
3606 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
3609 Label* gc_required) {
3610 Allocate(ConsString::kSize,
3617 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
3618 scratch1, scratch2);
3622 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3626 Label* gc_required) {
3627 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3630 InitializeNewString(result,
3632 Heap::kSlicedStringMapRootIndex,
3638 void MacroAssembler::AllocateOneByteSlicedString(Register result,
3642 Label* gc_required) {
3643 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3646 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
3647 scratch1, scratch2);
3651 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3652 Label* not_unique_name) {
3656 Branch(&succeed,
eq, at, Operand(zero_reg));
3665 void MacroAssembler::AllocateHeapNumber(Register result,
3668 Register heap_number_map,
3674 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3678 ? Heap::kMutableHeapNumberMapRootIndex
3679 : Heap::kHeapNumberMapRootIndex;
3680 AssertIsRoot(heap_number_map, map_index);
3686 sw(heap_number_map,
MemOperand(result, HeapObject::kMapOffset));
3691 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3695 Label* gc_required) {
3696 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3697 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3703 void MacroAssembler::CopyFields(Register dst,
3707 DCHECK((temps & dst.bit()) == 0);
3708 DCHECK((temps & src.bit()) == 0);
3714 if ((temps & (1 <<
i)) != 0) {
3721 for (
int i = 0;
i < field_count;
i++) {
3732 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3736 bind(&align_loop_1);
3738 Branch(&word_loop,
eq, scratch, Operand(zero_reg));
3743 Subu(length, length, Operand(1));
3744 Branch(&align_loop_1,
ne, length, Operand(zero_reg));
3748 if (emit_debug_code()) {
3750 Assert(
eq, kExpectingAlignmentForCopyBytes,
3751 scratch, Operand(zero_reg));
3761 srl(scratch, scratch, 8);
3763 srl(scratch, scratch, 8);
3765 srl(scratch, scratch, 8);
3769 srl(scratch, scratch, 8);
3771 srl(scratch, scratch, 8);
3773 srl(scratch, scratch, 8);
3784 Branch(&done,
eq, length, Operand(zero_reg));
3790 Subu(length, length, Operand(1));
3791 Branch(&byte_loop_1,
ne, length, Operand(zero_reg));
3796 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3797 Register end_offset,
3805 Branch(&loop,
lt, start_offset, Operand(end_offset));
3809 void MacroAssembler::CheckFastElements(Register
map,
3817 Branch(fail,
hi, scratch,
3818 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3822 void MacroAssembler::CheckFastObjectElements(Register
map,
3830 Branch(fail,
ls, scratch,
3831 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3832 Branch(fail,
hi, scratch,
3833 Operand(Map::kMaximumBitField2FastHoleyElementValue));
3837 void MacroAssembler::CheckFastSmiElements(Register
map,
3843 Branch(fail,
hi, scratch,
3844 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3848 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3850 Register elements_reg,
3855 int elements_offset) {
3856 Label smi_value, maybe_nan, have_double_value, is_nan, done;
3857 Register mantissa_reg = scratch2;
3858 Register exponent_reg = scratch3;
3861 JumpIfSmi(value_reg, &smi_value);
3866 Heap::kHeapNumberMapRootIndex,
3873 lw(exponent_reg,
FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3874 Branch(&maybe_nan,
ge, exponent_reg, Operand(scratch1));
3876 lw(mantissa_reg,
FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3878 bind(&have_double_value);
3880 Addu(scratch1, scratch1, elements_reg);
3882 FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3883 + kHoleNanLower32Offset));
3885 FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
3886 + kHoleNanUpper32Offset));
3892 Branch(&is_nan,
gt, exponent_reg, Operand(scratch1));
3893 lw(mantissa_reg,
FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3894 Branch(&have_double_value,
eq, mantissa_reg, Operand(zero_reg));
3897 LoadRoot(at, Heap::kNanValueRootIndex);
3900 jmp(&have_double_value);
3903 Addu(scratch1, elements_reg,
3907 Addu(scratch1, scratch1, scratch2);
3910 Register untagged_value = elements_reg;
3911 SmiUntag(untagged_value, value_reg);
3912 mtc1(untagged_value,
f2);
3919 void MacroAssembler::CompareMapAndBranch(Register obj,
3922 Label* early_success,
3926 CompareMapAndBranch(scratch,
map, early_success, cond, branch_to);
3930 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3932 Label* early_success,
3935 Branch(branch_to, cond, obj_map, Operand(
map));
3939 void MacroAssembler::CheckMap(Register obj,
3945 JumpIfSmi(obj, fail);
3948 CompareMapAndBranch(obj, scratch,
map, &success,
ne, fail);
3953 void MacroAssembler::DispatchMap(Register obj,
3956 Handle<Code> success,
3960 JumpIfSmi(obj, &fail);
3963 Jump(success, RelocInfo::CODE_TARGET,
eq, scratch, Operand(
map));
3968 void MacroAssembler::CheckMap(Register obj,
3970 Heap::RootListIndex index,
3974 JumpIfSmi(obj, fail);
3977 LoadRoot(at, index);
3978 Branch(fail,
ne, scratch, Operand(at));
4060 void MacroAssembler::InvokePrologue(
const ParameterCount& expected,
4061 const ParameterCount& actual,
4062 Handle<Code> code_constant,
4065 bool* definitely_mismatches,
4067 const CallWrapper& call_wrapper) {
4068 bool definitely_matches =
false;
4069 *definitely_mismatches =
false;
4070 Label regular_invoke;
4081 DCHECK(actual.is_immediate() || actual.reg().is(a0));
4082 DCHECK(expected.is_immediate() || expected.reg().is(a2));
4083 DCHECK((!code_constant.is_null() && code_reg.is(
no_reg)) || code_reg.is(a3));
4085 if (expected.is_immediate()) {
4086 DCHECK(actual.is_immediate());
4087 if (expected.immediate() == actual.immediate()) {
4088 definitely_matches =
true;
4090 li(a0, Operand(actual.immediate()));
4091 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
4092 if (expected.immediate() == sentinel) {
4097 definitely_matches =
true;
4099 *definitely_mismatches =
true;
4100 li(a2, Operand(expected.immediate()));
4103 }
else if (actual.is_immediate()) {
4104 Branch(®ular_invoke,
eq, expected.reg(), Operand(actual.immediate()));
4105 li(a0, Operand(actual.immediate()));
4107 Branch(®ular_invoke,
eq, expected.reg(), Operand(actual.reg()));
4110 if (!definitely_matches) {
4111 if (!code_constant.is_null()) {
4112 li(a3, Operand(code_constant));
4116 Handle<Code> adaptor =
4117 isolate()->builtins()->ArgumentsAdaptorTrampoline();
4119 call_wrapper.BeforeCall(CallSize(adaptor));
4121 call_wrapper.AfterCall();
4122 if (!*definitely_mismatches) {
4126 Jump(adaptor, RelocInfo::CODE_TARGET);
4128 bind(®ular_invoke);
4133 void MacroAssembler::InvokeCode(Register code,
4134 const ParameterCount& expected,
4135 const ParameterCount& actual,
4137 const CallWrapper& call_wrapper) {
4143 bool definitely_mismatches =
false;
4144 InvokePrologue(expected, actual, Handle<Code>::null(), code,
4145 &done, &definitely_mismatches,
flag,
4147 if (!definitely_mismatches) {
4149 call_wrapper.BeforeCall(CallSize(code));
4151 call_wrapper.AfterCall();
4163 void MacroAssembler::InvokeFunction(Register
function,
4164 const ParameterCount& actual,
4166 const CallWrapper& call_wrapper) {
4172 Register expected_reg = a2;
4173 Register code_reg = a3;
4175 lw(code_reg,
FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4179 SharedFunctionInfo::kFormalParameterCountOffset));
4183 ParameterCount expected(expected_reg);
4184 InvokeCode(code_reg, expected, actual,
flag, call_wrapper);
4188 void MacroAssembler::InvokeFunction(Register
function,
4189 const ParameterCount& expected,
4190 const ParameterCount& actual,
4192 const CallWrapper& call_wrapper) {
4206 InvokeCode(a3, expected, actual,
flag, call_wrapper);
4210 void MacroAssembler::InvokeFunction(Handle<JSFunction>
function,
4211 const ParameterCount& expected,
4212 const ParameterCount& actual,
4214 const CallWrapper& call_wrapper) {
4216 InvokeFunction(a1, expected, actual,
flag, call_wrapper);
4220 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
4225 IsInstanceJSObjectType(
map, scratch, fail);
4229 void MacroAssembler::IsInstanceJSObjectType(Register
map,
4238 void MacroAssembler::IsObjectJSStringType(Register
object,
4246 Branch(fail,
ne, scratch, Operand(zero_reg));
4250 void MacroAssembler::IsObjectNameType(Register
object,
4263 void MacroAssembler::TryGetFunctionPrototype(Register
function,
4267 bool miss_on_bound_function) {
4269 if (miss_on_bound_function) {
4271 JumpIfSmi(
function, miss);
4274 GetObjectType(
function, result, scratch);
4281 And(scratch, scratch,
4282 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
4283 Branch(miss,
ne, scratch, Operand(zero_reg));
4287 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
4288 Branch(&non_instance,
ne, scratch, Operand(zero_reg));
4298 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
4299 Branch(miss,
eq, result, Operand(t8));
4303 GetObjectType(result, scratch, scratch);
4304 Branch(&done,
ne, scratch, Operand(
MAP_TYPE));
4309 if (miss_on_bound_function) {
4314 bind(&non_instance);
4323 void MacroAssembler::GetObjectType(Register
object,
4325 Register type_reg) {
4334 void MacroAssembler::CallStub(
CodeStub* stub,
4335 TypeFeedbackId ast_id,
4340 DCHECK(AllowThisStubCall(stub));
4341 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
4346 void MacroAssembler::TailCallStub(
CodeStub* stub,
4351 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond,
r1,
r2, bd);
4355 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
4356 return ref0.address() - ref1.address();
4360 void MacroAssembler::CallApiFunctionAndReturn(
4361 Register function_address,
4362 ExternalReference thunk_ref,
4366 ExternalReference next_address =
4367 ExternalReference::handle_scope_next_address(isolate());
4368 const int kNextOffset = 0;
4369 const int kLimitOffset = AddressOffset(
4370 ExternalReference::handle_scope_limit_address(isolate()),
4372 const int kLevelOffset = AddressOffset(
4373 ExternalReference::handle_scope_level_address(isolate()),
4376 DCHECK(function_address.is(a1) || function_address.is(a2));
4378 Label profiler_disabled;
4379 Label end_profiler_check;
4380 li(t9, Operand(ExternalReference::is_profiling_address(isolate())));
4382 Branch(&profiler_disabled,
eq, t9, Operand(zero_reg));
4385 li(t9, Operand(thunk_ref));
4386 jmp(&end_profiler_check);
4388 bind(&profiler_disabled);
4389 mov(t9, function_address);
4390 bind(&end_profiler_check);
4393 li(
s3, Operand(next_address));
4397 Addu(
s2,
s2, Operand(1));
4400 if (FLAG_log_timer_events) {
4401 FrameScope frame(
this, StackFrame::MANUAL);
4402 PushSafepointRegisters();
4403 PrepareCallCFunction(1, a0);
4404 li(a0, Operand(ExternalReference::isolate_address(isolate())));
4405 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
4406 PopSafepointRegisters();
4412 DirectCEntryStub stub(isolate());
4413 stub.GenerateCall(
this, t9);
4415 if (FLAG_log_timer_events) {
4416 FrameScope frame(
this, StackFrame::MANUAL);
4417 PushSafepointRegisters();
4418 PrepareCallCFunction(1, a0);
4419 li(a0, Operand(ExternalReference::isolate_address(isolate())));
4420 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
4421 PopSafepointRegisters();
4424 Label promote_scheduled_exception;
4425 Label exception_handled;
4426 Label delete_allocated_handles;
4427 Label leave_exit_frame;
4428 Label return_value_loaded;
4431 lw(v0, return_value_operand);
4432 bind(&return_value_loaded);
4437 if (emit_debug_code()) {
4439 Check(
eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(
s2));
4441 Subu(
s2,
s2, Operand(1));
4444 Branch(&delete_allocated_handles,
ne,
s1, Operand(at));
4447 bind(&leave_exit_frame);
4448 LoadRoot(t0, Heap::kTheHoleValueRootIndex);
4449 li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
4451 Branch(&promote_scheduled_exception,
ne, t0, Operand(t1));
4452 bind(&exception_handled);
4454 bool restore_context = context_restore_operand !=
NULL;
4455 if (restore_context) {
4456 lw(
cp, *context_restore_operand);
4458 li(
s0, Operand(stack_space));
4461 bind(&promote_scheduled_exception);
4464 CallExternalReference(
4465 ExternalReference(Runtime::kPromoteScheduledException, isolate()),
4468 jmp(&exception_handled);
4471 bind(&delete_allocated_handles);
4475 PrepareCallCFunction(1,
s1);
4476 li(a0, Operand(ExternalReference::isolate_address(isolate())));
4477 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
4480 jmp(&leave_exit_frame);
4484 bool MacroAssembler::AllowThisStubCall(
CodeStub* stub) {
4485 return has_frame_ || !stub->SometimesSetsUpAFrame();
4489 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4495 (1 << String::kArrayIndexValueBits));
4496 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
4500 void MacroAssembler::ObjectToDoubleFPURegister(Register
object,
4504 Register heap_number_map,
4510 JumpIfNotSmi(
object, ¬_smi);
4513 mtc1(scratch1, result);
4514 cvt_d_w(result, result);
4520 Branch(not_number,
ne, scratch1, Operand(heap_number_map));
4524 Register exponent = scratch1;
4525 Register mask_reg = scratch2;
4527 li(mask_reg, HeapNumber::kExponentMask);
4529 And(exponent, exponent, mask_reg);
4530 Branch(not_number,
eq, exponent, Operand(mask_reg));
4537 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4539 Register scratch1) {
4541 mtc1(scratch1, value);
4542 cvt_d_w(value, value);
4546 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4547 const Operand& right,
4548 Register overflow_dst,
4550 if (right.is_reg()) {
4551 AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4555 addiu(dst, left, right.immediate());
4556 xor_(scratch, dst, scratch);
4558 addiu(t9, zero_reg, right.immediate());
4559 xor_(overflow_dst, dst, t9);
4560 and_(overflow_dst, overflow_dst, scratch);
4562 addiu(dst, left, right.immediate());
4563 xor_(overflow_dst, dst, left);
4565 addiu(t9, zero_reg, right.immediate());
4566 xor_(scratch, dst, t9);
4567 and_(overflow_dst, scratch, overflow_dst);
4573 void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
4575 Register overflow_dst,
4577 DCHECK(!dst.is(overflow_dst));
4578 DCHECK(!dst.is(scratch));
4579 DCHECK(!overflow_dst.is(scratch));
4580 DCHECK(!overflow_dst.is(left));
4581 DCHECK(!overflow_dst.is(right));
4583 if (left.is(right) && dst.is(left)) {
4588 DCHECK(!overflow_dst.is(t9));
4595 addu(dst, left, right);
4596 xor_(scratch, dst, scratch);
4597 xor_(overflow_dst, dst, right);
4598 and_(overflow_dst, overflow_dst, scratch);
4599 }
else if (dst.is(right)) {
4600 mov(scratch, right);
4601 addu(dst, left, right);
4602 xor_(scratch, dst, scratch);
4603 xor_(overflow_dst, dst, left);
4604 and_(overflow_dst, overflow_dst, scratch);
4606 addu(dst, left, right);
4607 xor_(overflow_dst, dst, left);
4608 xor_(scratch, dst, right);
4609 and_(overflow_dst, scratch, overflow_dst);
4614 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4615 const Operand& right,
4616 Register overflow_dst,
4618 if (right.is_reg()) {
4619 SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
4623 addiu(dst, left, -(right.immediate()));
4624 xor_(overflow_dst, dst, scratch);
4626 addiu(t9, zero_reg, right.immediate());
4627 xor_(scratch, scratch, t9);
4628 and_(overflow_dst, scratch, overflow_dst);
4630 addiu(dst, left, -(right.immediate()));
4631 xor_(overflow_dst, dst, left);
4633 addiu(t9, zero_reg, right.immediate());
4634 xor_(scratch, left, t9);
4635 and_(overflow_dst, scratch, overflow_dst);
4641 void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
4643 Register overflow_dst,
4645 DCHECK(!dst.is(overflow_dst));
4646 DCHECK(!dst.is(scratch));
4647 DCHECK(!overflow_dst.is(scratch));
4648 DCHECK(!overflow_dst.is(left));
4649 DCHECK(!overflow_dst.is(right));
4650 DCHECK(!scratch.is(left));
4651 DCHECK(!scratch.is(right));
4655 if (left.is(right)) {
4657 mov(overflow_dst, zero_reg);
4663 subu(dst, left, right);
4664 xor_(overflow_dst, dst, scratch);
4665 xor_(scratch, scratch, right);
4666 and_(overflow_dst, scratch, overflow_dst);
4667 }
else if (dst.is(right)) {
4668 mov(scratch, right);
4669 subu(dst, left, right);
4670 xor_(overflow_dst, dst, left);
4671 xor_(scratch, left, scratch);
4672 and_(overflow_dst, scratch, overflow_dst);
4674 subu(dst, left, right);
4675 xor_(overflow_dst, dst, left);
4676 xor_(scratch, left, right);
4677 and_(overflow_dst, scratch, overflow_dst);
4682 void MacroAssembler::CallRuntime(
const Runtime::Function* f,
4690 CHECK(f->nargs < 0 || f->nargs == num_arguments);
4696 PrepareCEntryArgs(num_arguments);
4697 PrepareCEntryFunction(ExternalReference(f, isolate()));
4698 CEntryStub stub(isolate(), 1, save_doubles);
4703 void MacroAssembler::CallExternalReference(
const ExternalReference& ext,
4706 PrepareCEntryArgs(num_arguments);
4707 PrepareCEntryFunction(ext);
4709 CEntryStub stub(isolate(), 1);
4714 void MacroAssembler::TailCallExternalReference(
const ExternalReference& ext,
4721 PrepareCEntryArgs(num_arguments);
4722 JumpToExternalReference(ext);
4726 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4729 TailCallExternalReference(ExternalReference(fid, isolate()),
4735 void MacroAssembler::JumpToExternalReference(
const ExternalReference& builtin,
4737 PrepareCEntryFunction(builtin);
4738 CEntryStub stub(isolate(), 1);
4739 Jump(stub.GetCode(),
4740 RelocInfo::CODE_TARGET,
4748 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript
id,
4750 const CallWrapper& call_wrapper) {
4754 GetBuiltinEntry(t9,
id);
4756 call_wrapper.BeforeCall(CallSize(t9));
4758 call_wrapper.AfterCall();
4766 void MacroAssembler::GetBuiltinFunction(Register target,
4767 Builtins::JavaScript
id) {
4769 lw(target,
MemOperand(
cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4773 JSBuiltinsObject::OffsetOfFunctionWithId(
id)));
4777 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript
id) {
4779 GetBuiltinFunction(a1,
id);
4785 void MacroAssembler::SetCounter(StatsCounter* counter,
int value,
4786 Register scratch1, Register scratch2) {
4787 if (FLAG_native_code_counters && counter->Enabled()) {
4788 li(scratch1, Operand(value));
4789 li(scratch2, Operand(ExternalReference(counter)));
4795 void MacroAssembler::IncrementCounter(StatsCounter* counter,
int value,
4796 Register scratch1, Register scratch2) {
4798 if (FLAG_native_code_counters && counter->Enabled()) {
4799 li(scratch2, Operand(ExternalReference(counter)));
4801 Addu(scratch1, scratch1, Operand(value));
4807 void MacroAssembler::DecrementCounter(StatsCounter* counter,
int value,
4808 Register scratch1, Register scratch2) {
4810 if (FLAG_native_code_counters && counter->Enabled()) {
4811 li(scratch2, Operand(ExternalReference(counter)));
4813 Subu(scratch1, scratch1, Operand(value));
4823 Register rs, Operand rt) {
4824 if (emit_debug_code())
4825 Check(
cc, reason, rs, rt);
4829 void MacroAssembler::AssertFastElements(Register elements) {
4830 if (emit_debug_code()) {
4831 DCHECK(!elements.is(at));
4835 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4836 Branch(&ok,
eq, elements, Operand(at));
4837 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4838 Branch(&ok,
eq, elements, Operand(at));
4839 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4840 Branch(&ok,
eq, elements, Operand(at));
4841 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4849 Register rs, Operand rt) {
4851 Branch(&
L,
cc, rs, rt);
4864 RecordComment(
"Abort message: ");
4868 if (FLAG_trap_on_abort) {
4874 li(a0, Operand(Smi::FromInt(reason)));
4881 CallRuntime(Runtime::kAbort, 1);
4883 CallRuntime(Runtime::kAbort, 1);
4886 if (is_trampoline_pool_blocked()) {
4892 static const int kExpectedAbortInstructions = 10;
4893 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4894 DCHECK(abort_instructions <= kExpectedAbortInstructions);
4895 while (abort_instructions++ < kExpectedAbortInstructions) {
4902 void MacroAssembler::LoadContext(Register dst,
int context_chain_length) {
4903 if (context_chain_length > 0) {
4905 lw(dst,
MemOperand(
cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4906 for (
int i = 1;
i < context_chain_length;
i++) {
4907 lw(dst,
MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4918 void MacroAssembler::LoadTransitionedArrayMapConditional(
4921 Register map_in_out,
4923 Label* no_map_match) {
4926 MemOperand(
cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4927 lw(scratch,
FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4932 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4934 FixedArrayBase::kHeaderSize;
4936 Branch(no_map_match,
ne, map_in_out, Operand(at));
4940 FixedArrayBase::kHeaderSize;
4945 void MacroAssembler::LoadGlobalFunction(
int index, Register
function) {
4948 MemOperand(
cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4951 GlobalObject::kNativeContextOffset));
4953 lw(
function,
MemOperand(
function, Context::SlotOffset(index)));
4957 void MacroAssembler::LoadGlobalFunctionInitialMap(Register
function,
4962 if (emit_debug_code()) {
4967 Abort(kGlobalFunctionsMustHaveInitialMap);
4973 void MacroAssembler::StubPrologue() {
4977 Addu(
fp,
sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4981 void MacroAssembler::Prologue(
bool code_pre_aging) {
4982 PredictableCodeSizeScope predictible_code_size_scope(
4986 if (code_pre_aging) {
4988 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4989 nop(Assembler::CODE_AGE_MARKER_NOP);
4993 Operand(
reinterpret_cast<uint32_t>(stub->instruction_start())),
5001 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
5003 Addu(
fp,
sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
5010 li(t8, Operand(Smi::FromInt(type)));
5019 Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize));
5031 void MacroAssembler::EnterExitFrame(
bool save_doubles,
5053 if (emit_debug_code()) {
5054 sw(zero_reg,
MemOperand(
fp, ExitFrameConstants::kSPOffset));
5059 sw(t8,
MemOperand(
fp, ExitFrameConstants::kCodeOffset));
5062 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5064 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5067 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
5071 if (frame_alignment > 0) {
5073 And(
sp,
sp, Operand(-frame_alignment));
5078 for (
int i = 0;
i < FPURegister::kMaxNumRegisters;
i+=2) {
5079 FPURegister reg = FPURegister::from_code(
i);
5087 DCHECK(stack_space >= 0);
5089 if (frame_alignment > 0) {
5091 And(
sp,
sp, Operand(-frame_alignment));
5101 void MacroAssembler::LeaveExitFrame(
bool save_doubles,
5102 Register argument_count,
5103 bool restore_context,
5109 for (
int i = 0;
i < FPURegister::kMaxNumRegisters;
i+=2) {
5110 FPURegister reg = FPURegister::from_code(
i);
5116 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5120 if (restore_context) {
5121 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5125 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5132 lw(ra,
MemOperand(
sp, ExitFrameConstants::kCallerPCOffset));
5134 if (argument_count.is_valid()) {
5147 void MacroAssembler::InitializeNewString(Register
string,
5149 Heap::RootListIndex map_index,
5151 Register scratch2) {
5153 LoadRoot(scratch2, map_index);
5155 li(scratch1, Operand(String::kEmptyHashField));
5161 int MacroAssembler::ActivationFrameAlignment() {
5162 #if V8_HOST_ARCH_MIPS
5167 return base::OS::ActivationFrameAlignment();
5173 return FLAG_sim_stack_alignment;
5178 void MacroAssembler::AssertStackIsAligned() {
5179 if (emit_debug_code()) {
5180 const int frame_alignment = ActivationFrameAlignment();
5181 const int frame_alignment_mask = frame_alignment - 1;
5184 Label alignment_as_expected;
5186 andi(at,
sp, frame_alignment_mask);
5187 Branch(&alignment_as_expected,
eq, at, Operand(zero_reg));
5189 stop(
"Unexpected stack alignment");
5190 bind(&alignment_as_expected);
5196 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
5199 Label* not_power_of_two_or_zero) {
5200 Subu(scratch, reg, Operand(1));
5202 scratch, Operand(zero_reg));
5203 and_(at, scratch, reg);
5204 Branch(not_power_of_two_or_zero,
ne, at, Operand(zero_reg));
5208 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register
overflow) {
5216 void MacroAssembler::SmiTagCheckOverflow(Register dst,
5221 SmiTagCheckOverflow(dst,
overflow);
5232 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5240 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
5242 Label* non_smi_case) {
5247 void MacroAssembler::JumpIfSmi(Register value,
5253 Branch(bd, smi_label,
eq, scratch, Operand(zero_reg));
5256 void MacroAssembler::JumpIfNotSmi(Register value,
5257 Label* not_smi_label,
5262 Branch(bd, not_smi_label,
ne, scratch, Operand(zero_reg));
5266 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5268 Label* on_not_both_smi) {
5271 or_(at, reg1, reg2);
5272 JumpIfNotSmi(at, on_not_both_smi);
5276 void MacroAssembler::JumpIfEitherSmi(Register reg1,
5278 Label* on_either_smi) {
5282 and_(at, reg1, reg2);
5283 JumpIfSmi(at, on_either_smi);
5287 void MacroAssembler::AssertNotSmi(Register
object) {
5288 if (emit_debug_code()) {
5291 Check(
ne, kOperandIsASmi, at, Operand(zero_reg));
5296 void MacroAssembler::AssertSmi(Register
object) {
5297 if (emit_debug_code()) {
5300 Check(
eq, kOperandIsASmi, at, Operand(zero_reg));
5305 void MacroAssembler::AssertString(Register
object) {
5306 if (emit_debug_code()) {
5309 Check(
ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
5319 void MacroAssembler::AssertName(Register
object) {
5320 if (emit_debug_code()) {
5323 Check(
ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
5333 void MacroAssembler::AssertUndefinedOrAllocationSite(Register
object,
5335 if (emit_debug_code()) {
5336 Label done_checking;
5337 AssertNotSmi(
object);
5338 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5339 Branch(&done_checking,
eq,
object, Operand(scratch));
5342 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
5343 Assert(
eq, kExpectedUndefinedOrCell,
object, Operand(scratch));
5345 bind(&done_checking);
5350 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
5351 if (emit_debug_code()) {
5353 LoadRoot(at, index);
5354 Check(
eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
5359 void MacroAssembler::JumpIfNotHeapNumber(Register
object,
5360 Register heap_number_map,
5362 Label* on_not_heap_number) {
5364 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
5365 Branch(on_not_heap_number,
ne, scratch, Operand(heap_number_map));
5369 void MacroAssembler::LookupNumberStringCache(Register
object,
5376 Register number_string_cache = result;
5377 Register mask = scratch3;
5380 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
5384 lw(mask,
FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
5387 Addu(mask, mask, -1);
5394 Label load_result_from_cache;
5395 JumpIfSmi(
object, &is_smi);
5398 Heap::kHeapNumberMapRootIndex,
5408 Xor(scratch1, scratch1, Operand(scratch2));
5409 And(scratch1, scratch1, Operand(mask));
5414 Addu(scratch1, number_string_cache, scratch1);
5416 Register probe = mask;
5418 JumpIfSmi(probe, not_found);
5425 Register scratch = scratch1;
5426 sra(scratch,
object, 1);
5427 And(scratch, mask, Operand(scratch));
5432 Addu(scratch, number_string_cache, scratch);
5436 Branch(not_found,
ne,
object, Operand(probe));
5439 bind(&load_result_from_cache);
5442 IncrementCounter(isolate()->counters()->number_to_string_native(),
5449 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
5450 Register first, Register second, Register scratch1, Register scratch2,
5459 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
5464 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
5471 And(scratch1, first, Operand(second));
5472 JumpIfSmi(scratch1, failure);
5473 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
5478 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
5479 Register first, Register second, Register scratch1, Register scratch2,
5481 const int kFlatOneByteStringMask =
5483 const int kFlatOneByteStringTag =
5485 DCHECK(kFlatOneByteStringTag <= 0xffff);
5486 andi(scratch1, first, kFlatOneByteStringMask);
5487 Branch(failure,
ne, scratch1, Operand(kFlatOneByteStringTag));
5488 andi(scratch2, second, kFlatOneByteStringMask);
5489 Branch(failure,
ne, scratch2, Operand(kFlatOneByteStringTag));
5493 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
5496 const int kFlatOneByteStringMask =
5498 const int kFlatOneByteStringTag =
5500 And(scratch, type, Operand(kFlatOneByteStringMask));
5501 Branch(failure,
ne, scratch, Operand(kFlatOneByteStringTag));
5505 static const int kRegisterPassedArguments = 4;
5507 int MacroAssembler::CalculateStackPassedWords(
int num_reg_arguments,
5508 int num_double_arguments) {
5509 int stack_passed_words = 0;
5510 num_reg_arguments += 2 * num_double_arguments;
5513 if (num_reg_arguments > kRegisterPassedArguments) {
5514 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5517 return stack_passed_words;
5521 void MacroAssembler::EmitSeqStringSetCharCheck(Register
string,
5528 Check(
ne, kNonObject, at, Operand(zero_reg));
5534 li(scratch, Operand(encoding_mask));
5535 Check(
eq, kUnexpectedStringType, at, Operand(scratch));
5540 Label index_tag_ok, index_tag_bad;
5541 TrySmiTag(index, scratch, &index_tag_bad);
5542 Branch(&index_tag_ok);
5543 bind(&index_tag_bad);
5544 Abort(kIndexIsTooLarge);
5545 bind(&index_tag_ok);
5548 Check(
lt, kIndexIsTooLarge, index, Operand(at));
5550 DCHECK(Smi::FromInt(0) == 0);
5551 Check(
ge, kIndexIsNegative, index, Operand(zero_reg));
5553 SmiUntag(index, index);
5557 void MacroAssembler::PrepareCallCFunction(
int num_reg_arguments,
5558 int num_double_arguments,
5560 int frame_alignment = ActivationFrameAlignment();
5567 int stack_passed_arguments = CalculateStackPassedWords(
5568 num_reg_arguments, num_double_arguments);
5575 And(
sp,
sp, Operand(-frame_alignment));
5583 void MacroAssembler::PrepareCallCFunction(
int num_reg_arguments,
5585 PrepareCallCFunction(num_reg_arguments, 0, scratch);
5589 void MacroAssembler::CallCFunction(ExternalReference
function,
5590 int num_reg_arguments,
5591 int num_double_arguments) {
5592 li(t8, Operand(
function));
5593 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5597 void MacroAssembler::CallCFunction(Register
function,
5598 int num_reg_arguments,
5599 int num_double_arguments) {
5600 CallCFunctionHelper(
function, num_reg_arguments, num_double_arguments);
5604 void MacroAssembler::CallCFunction(ExternalReference
function,
5605 int num_arguments) {
5606 CallCFunction(
function, num_arguments, 0);
5610 void MacroAssembler::CallCFunction(Register
function,
5611 int num_arguments) {
5612 CallCFunction(
function, num_arguments, 0);
5616 void MacroAssembler::CallCFunctionHelper(Register
function,
5617 int num_reg_arguments,
5618 int num_double_arguments) {
5626 #if V8_HOST_ARCH_MIPS
5627 if (emit_debug_code()) {
5628 int frame_alignment = base::OS::ActivationFrameAlignment();
5629 int frame_alignment_mask = frame_alignment - 1;
5632 Label alignment_as_expected;
5633 And(at,
sp, Operand(frame_alignment_mask));
5634 Branch(&alignment_as_expected,
eq, at, Operand(zero_reg));
5637 stop(
"Unexpected alignment in CallCFunction");
5638 bind(&alignment_as_expected);
5647 if (!
function.is(t9)) {
5654 int stack_passed_arguments = CalculateStackPassedWords(
5655 num_reg_arguments, num_double_arguments);
5657 if (base::OS::ActivationFrameAlignment() >
kPointerSize) {
5665 #undef BRANCH_ARGS_CHECK
5668 void MacroAssembler::PatchRelocatedValue(Register li_location,
5670 Register new_value) {
5673 if (emit_debug_code()) {
5675 Check(
eq, kTheInstructionToPatchShouldBeALui,
5676 scratch, Operand(
LUI));
5683 lw(scratch,
MemOperand(li_location, kInstrSize));
5685 if (emit_debug_code()) {
5687 Check(
eq, kTheInstructionToPatchShouldBeAnOri,
5688 scratch, Operand(
ORI));
5689 lw(scratch,
MemOperand(li_location, kInstrSize));
5692 sw(scratch,
MemOperand(li_location, kInstrSize));
5695 FlushICache(li_location, 2);
5698 void MacroAssembler::GetRelocatedValue(Register li_location,
5702 if (emit_debug_code()) {
5704 Check(
eq, kTheInstructionShouldBeALui,
5705 value, Operand(
LUI));
5712 lw(scratch,
MemOperand(li_location, kInstrSize));
5713 if (emit_debug_code()) {
5715 Check(
eq, kTheInstructionShouldBeAnOri,
5716 scratch, Operand(
ORI));
5717 lw(scratch,
MemOperand(li_location, kInstrSize));
5723 or_(value, value, scratch);
5727 void MacroAssembler::CheckPageFlag(
5732 Label* condition_met) {
5733 And(scratch,
object, Operand(~Page::kPageAlignmentMask));
5734 lw(scratch,
MemOperand(scratch, MemoryChunk::kFlagsOffset));
5735 And(scratch, scratch, Operand(mask));
5736 Branch(condition_met,
cc, scratch, Operand(zero_reg));
5740 void MacroAssembler::CheckMapDeprecated(Handle<Map>
map,
5742 Label* if_deprecated) {
5743 if (
map->CanBeDeprecated()) {
5744 li(scratch, Operand(
map));
5746 And(scratch, scratch, Operand(Map::Deprecated::kMask));
5747 Branch(if_deprecated,
ne, scratch, Operand(zero_reg));
5752 void MacroAssembler::JumpIfBlack(Register
object,
5756 HasColor(
object, scratch0, scratch1, on_black, 1, 0);
5757 DCHECK(strcmp(Marking::kBlackBitPattern,
"10") == 0);
5761 void MacroAssembler::HasColor(Register
object,
5762 Register bitmap_scratch,
5763 Register mask_scratch,
5770 GetMarkBits(
object, bitmap_scratch, mask_scratch);
5772 Label other_color, word_boundary;
5773 lw(t9,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5774 And(t8, t9, Operand(mask_scratch));
5775 Branch(&other_color, first_bit == 1 ?
eq :
ne, t8, Operand(zero_reg));
5777 Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5778 Branch(&word_boundary,
eq, mask_scratch, Operand(zero_reg));
5779 And(t8, t9, Operand(mask_scratch));
5780 Branch(has_color, second_bit == 1 ?
ne :
eq, t8, Operand(zero_reg));
5783 bind(&word_boundary);
5785 And(t9, t9, Operand(1));
5786 Branch(has_color, second_bit == 1 ?
ne :
eq, t9, Operand(zero_reg));
5794 void MacroAssembler::JumpIfDataObject(Register value,
5796 Label* not_data_object) {
5798 Label is_data_object;
5800 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5801 Branch(&is_data_object,
eq, t8, Operand(scratch));
5808 Branch(not_data_object,
ne, t8, Operand(zero_reg));
5809 bind(&is_data_object);
5813 void MacroAssembler::GetMarkBits(Register addr_reg,
5814 Register bitmap_reg,
5815 Register mask_reg) {
5817 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5822 Addu(bitmap_reg, bitmap_reg, t8);
5824 sllv(mask_reg, t8, mask_reg);
5828 void MacroAssembler::EnsureNotWhite(
5830 Register bitmap_scratch,
5831 Register mask_scratch,
5832 Register load_scratch,
5833 Label* value_is_white_and_not_data) {
5835 GetMarkBits(value, bitmap_scratch, mask_scratch);
5838 DCHECK(strcmp(Marking::kWhiteBitPattern,
"00") == 0);
5839 DCHECK(strcmp(Marking::kBlackBitPattern,
"10") == 0);
5840 DCHECK(strcmp(Marking::kGreyBitPattern,
"11") == 0);
5841 DCHECK(strcmp(Marking::kImpossibleBitPattern,
"01") == 0);
5847 lw(load_scratch,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5848 And(t8, mask_scratch, load_scratch);
5849 Branch(&done,
ne, t8, Operand(zero_reg));
5851 if (emit_debug_code()) {
5855 sll(t8, mask_scratch, 1);
5856 And(t8, load_scratch, t8);
5857 Branch(&ok,
eq, t8, Operand(zero_reg));
5858 stop(
"Impossible marking bit pattern");
5864 Register
map = load_scratch;
5865 Register length = load_scratch;
5866 Label is_data_object;
5870 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5873 Branch(&skip,
ne, t8, Operand(
map));
5874 li(length, HeapNumber::kSize);
5875 Branch(&is_data_object);
5884 Register instance_type = load_scratch;
5887 Branch(value_is_white_and_not_data,
ne, t8, Operand(zero_reg));
5898 Branch(&skip,
eq, t8, Operand(zero_reg));
5899 li(length, ExternalString::kSize);
5900 Branch(&is_data_object);
5914 Branch(&skip,
eq, t8, Operand(zero_reg));
5921 bind(&is_data_object);
5924 lw(t8,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5925 Or(t8, t8, Operand(mask_scratch));
5926 sw(t8,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5928 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5929 lw(t8,
MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5930 Addu(t8, t8, Operand(length));
5931 sw(t8,
MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5937 void MacroAssembler::LoadInstanceDescriptors(Register
map,
5938 Register descriptors) {
5943 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register
map) {
5945 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5949 void MacroAssembler::EnumLength(Register dst, Register
map) {
5952 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
5957 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5958 Register empty_fixed_array_value = t2;
5959 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5978 Branch(call_runtime,
ne, a3, Operand(Smi::FromInt(0)));
5986 Branch(&no_elements,
eq, a2, Operand(empty_fixed_array_value));
5989 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5990 Branch(call_runtime,
ne, a2, Operand(at));
5994 Branch(&next,
ne, a2, Operand(null_value));
5998 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5999 DCHECK(!output_reg.is(input_reg));
6001 li(output_reg, Operand(255));
6003 Branch(&done,
gt, input_reg, Operand(output_reg));
6006 mov(output_reg, zero_reg);
6007 mov(output_reg, input_reg);
6012 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
6019 Move(temp_double_reg, 0.0);
6020 BranchF(&above_zero,
NULL,
gt, input_reg, temp_double_reg);
6023 mov(result_reg, zero_reg);
6028 Move(temp_double_reg, 255.0);
6029 BranchF(&in_bounds,
NULL,
le, input_reg, temp_double_reg);
6030 li(result_reg, Operand(255));
6035 cvt_w_d(temp_double_reg, input_reg);
6036 mfc1(result_reg, temp_double_reg);
6041 void MacroAssembler::TestJSArrayForAllocationMemento(
6042 Register receiver_reg,
6043 Register scratch_reg,
6044 Label* no_memento_found,
6046 Label* allocation_memento_present) {
6047 ExternalReference new_space_start =
6048 ExternalReference::new_space_start(isolate());
6049 ExternalReference new_space_allocation_top =
6050 ExternalReference::new_space_allocation_top_address(isolate());
6051 Addu(scratch_reg, receiver_reg,
6052 Operand(JSArray::kSize + AllocationMemento::kSize -
kHeapObjectTag));
6053 Branch(no_memento_found,
lt, scratch_reg, Operand(new_space_start));
6054 li(at, Operand(new_space_allocation_top));
6056 Branch(no_memento_found,
gt, scratch_reg, Operand(at));
6057 lw(scratch_reg,
MemOperand(scratch_reg, -AllocationMemento::kSize));
6058 if (allocation_memento_present) {
6059 Branch(allocation_memento_present, cond, scratch_reg,
6060 Operand(isolate()->factory()->allocation_memento_map()));
6072 if (reg1.is_valid()) regs |= reg1.bit();
6073 if (reg2.is_valid()) regs |= reg2.bit();
6074 if (reg3.is_valid()) regs |= reg3.bit();
6075 if (reg4.is_valid()) regs |= reg4.bit();
6076 if (reg5.is_valid()) regs |= reg5.bit();
6077 if (reg6.is_valid()) regs |= reg6.bit();
6079 for (
int i = 0;
i < Register::NumAllocatableRegisters();
i++) {
6080 Register candidate = Register::FromAllocationIndex(
i);
6081 if (regs & candidate.bit())
continue;
6089 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
6094 DCHECK(!scratch1.is(scratch0));
6095 Factory* factory = isolate()->factory();
6096 Register current = scratch0;
6100 Move(current,
object);
6106 DecodeField<Map::ElementsKindBits>(scratch1);
6109 Branch(&loop_again,
ne, current, Operand(factory->null_value()));
6121 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
6122 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
6123 reg7.is_valid() + reg8.is_valid();
6126 if (reg1.is_valid()) regs |= reg1.bit();
6127 if (reg2.is_valid()) regs |= reg2.bit();
6128 if (reg3.is_valid()) regs |= reg3.bit();
6129 if (reg4.is_valid()) regs |= reg4.bit();
6130 if (reg5.is_valid()) regs |= reg5.bit();
6131 if (reg6.is_valid()) regs |= reg6.bit();
6132 if (reg7.is_valid()) regs |= reg7.bit();
6133 if (reg8.is_valid()) regs |= reg8.bit();
6134 int n_of_non_aliasing_regs =
NumRegs(regs);
6136 return n_of_valid_regs != n_of_non_aliasing_regs;
6140 CodePatcher::CodePatcher(
byte* address,
6142 FlushICache flush_cache)
6143 : address_(address),
6144 size_(instructions * Assembler::kInstrSize),
6145 masm_(
NULL, address, size_ + Assembler::kGap),
6146 flush_cache_(flush_cache) {
6150 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6154 CodePatcher::~CodePatcher() {
6156 if (flush_cache_ == FLUSH) {
6157 CpuFeatures::FlushICache(address_, size_);
6161 DCHECK(masm_.pc_ == address_ + size_);
6162 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6166 void CodePatcher::Emit(
Instr instr) {
6167 masm()->emit(instr);
6171 void CodePatcher::Emit(
Address addr) {
6172 masm()->emit(
reinterpret_cast<Instr>(addr));
6176 void CodePatcher::ChangeBranchCondition(
Condition cond) {
6177 Instr instr = Assembler::instr_at(masm_.pc_);
6179 uint32_t opcode = Assembler::GetOpcodeField(instr);
6198 void MacroAssembler::TruncatingDiv(Register result,
6201 DCHECK(!dividend.is(result));
6202 DCHECK(!dividend.is(at));
6204 base::MagicNumbersForDivision<uint32_t> mag =
6206 li(at, Operand(mag.multiplier));
6207 Mulh(result, dividend, Operand(at));
6208 bool neg = (mag.multiplier & (
static_cast<uint32_t>(1) << 31)) != 0;
6209 if (divisor > 0 && neg) {
6210 Addu(result, result, Operand(dividend));
6212 if (divisor < 0 && !neg && mag.multiplier > 0) {
6213 Subu(result, result, Operand(dividend));
6215 if (mag.shift > 0) sra(result, result, mag.shift);
6216 srl(at, dividend, 31);
6217 Addu(result, result, Operand(at));
#define kDoubleCompareReg
#define kLithiumScratchDouble
MacroAssembler(Isolate *isolate, void *buffer, int size)
const bool IsMipsSoftFloatABI
#define UNIMPLEMENTED_MIPS()
#define IsMipsArchVariant(check)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be aligned(ARM64 only)") DEFINE_STRING(expose_gc_as
#define kNumSafepointSavedRegisters
#define kSafepointSavedRegisters
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
@ PRETENURE_OLD_POINTER_SPACE
@ PRETENURE_OLD_DATA_SPACE
#define STATIC_ASSERT(test)
bool IsPowerOfTwo32(uint32_t value)
MagicNumbersForDivision< T > SignedDivisionByConstant(T d)
Matcher< Node * > IsBranch(const Matcher< Node * > &value_matcher, const Matcher< Node * > &control_matcher)
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
const intptr_t kHeapObjectTagMask
const uint32_t kStringEncodingMask
const FPUControlRegister FCSR
const RegList kJSCallerSaved
bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
const int kSafepointRegisterStackIndexMap[kNumRegs]
TypeImpl< ZoneTypeConfig > Type
CheckForInexactConversion
@ kDontCheckForInexactConversion
@ AVOID_NANS_AND_INFINITIES
const int kDoubleSizeLog2
const int kNumSafepointRegisters
const uint32_t kNotStringTag
DwVfpRegister DoubleRegister
const uint32_t kFCSRUnderflowFlagMask
const int kPointerSizeLog2
const uint32_t kStringTag
@ LAST_NONCALLABLE_SPEC_OBJECT_TYPE
@ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE
@ FAST_HOLEY_SMI_ELEMENTS
Handle< T > handle(T *t, Isolate *isolate)
const uint32_t kOneByteStringTag
MemOperand FieldMemOperand(Register object, int offset)
const intptr_t kObjectAlignmentMask
const uint32_t kFCSRInvalidOpFlagMask
int NumRegs(RegList reglist)
static const int kInvalidEnumCacheSentinel
const char * GetBailoutReason(BailoutReason reason)
Condition NegateCondition(Condition cond)
const uint32_t kStringRepresentationMask
const uint32_t kFCSROverflowFlagMask
const uint32_t kIsIndirectStringTag
int TenToThe(int exponent)
kFeedbackVectorOffset flag
Register ToRegister(int num)
const uint32_t kInternalizedTag
static const int kNumberDictionaryProbes
const uint32_t kFCSRInexactFlagMask
const intptr_t kSmiTagMask
const uint32_t kIsNotInternalizedMask
const uint32_t kNaNOrInfinityLowerBoundUpper32
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
static const int kNoCodeAgeSequenceLength
const uint32_t kIsNotStringMask
bool IsAligned(T value, U alignment)
int NumberOfBitsSet(uint32_t x)
const intptr_t kDoubleAlignment
const uint32_t kFCSRFlagMask
@ kPointersToHereAreAlwaysInteresting
const intptr_t kPointerAlignment
void CopyBytes(uint8_t *target, uint8_t *source)
const intptr_t kDoubleAlignmentMask
const uint32_t kIsIndirectStringMask
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Debugger support for the V8 JavaScript engine.
static Handle< Value > Throw(Isolate *isolate, const char *message)
bool is(Register reg) const