37 #ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
38 #define V8_MIPS_ASSEMBLER_MIPS_INL_H_
65 imm64_ =
reinterpret_cast<int64_t
>(f.address());
72 imm64_ =
reinterpret_cast<intptr_t
>(value);
82 bool Operand::is_reg()
const {
83 return rm_.is_valid();
108 DCHECK(reg.code() % 2 == 0);
113 return (reg.code() / 2);
120 void RelocInfo::apply(intptr_t delta,
ICacheFlushMode icache_flush_mode) {
123 byte* p =
reinterpret_cast<byte*
>(
pc_);
130 Address RelocInfo::target_address() {
136 Address RelocInfo::target_address_address() {
157 return reinterpret_cast<Address>(
162 Address RelocInfo::constant_pool_entry_address() {
168 int RelocInfo::target_address_size() {
173 void RelocInfo::set_target_address(
Address target,
182 host(),
this, HeapObject::cast(target_code));
197 Object* RelocInfo::target_object() {
203 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
205 return Handle<Object>(
reinterpret_cast<Object**
>(
210 void RelocInfo::set_target_object(
Object* target,
215 reinterpret_cast<Address>(target),
219 target->IsHeapObject()) {
226 Address RelocInfo::target_reference() {
232 Address RelocInfo::target_runtime_entry(Assembler* origin) {
234 return target_address();
238 void RelocInfo::set_target_runtime_entry(
Address target,
242 if (target_address() != target)
243 set_target_address(target, write_barrier_mode, icache_flush_mode);
247 Handle<Cell> RelocInfo::target_cell_handle() {
250 return Handle<Cell>(
reinterpret_cast<Cell**
>(address));
254 Cell* RelocInfo::target_cell() {
260 void RelocInfo::set_target_cell(Cell* cell,
284 Code* RelocInfo::code_age_stub() {
291 void RelocInfo::set_code_age_stub(Code* stub,
300 Address RelocInfo::call_address() {
310 void RelocInfo::set_call_address(
Address target) {
320 host(),
this, HeapObject::cast(target_code));
325 Object* RelocInfo::call_object() {
326 return *call_object_address();
330 Object** RelocInfo::call_object_address() {
337 void RelocInfo::set_call_object(
Object* target) {
338 *call_object_address() = target;
342 void RelocInfo::WipeOut() {
351 bool RelocInfo::IsPatchedReturnSequence() {
363 return patched_return;
367 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
376 visitor->VisitEmbeddedPointer(
this);
378 visitor->VisitCodeTarget(
this);
380 visitor->VisitCell(
this);
382 visitor->VisitExternalReference(
this);
384 visitor->VisitCodeAgeSequence(
this);
386 IsPatchedReturnSequence()) ||
388 IsPatchedDebugBreakSlotSequence())) &&
389 isolate->debug()->has_break_points()) {
390 visitor->VisitDebugTarget(
this);
392 visitor->VisitRuntimeEntry(
this);
397 template<
typename StaticVisitor>
401 StaticVisitor::VisitEmbeddedPointer(heap,
this);
403 StaticVisitor::VisitCodeTarget(heap,
this);
405 StaticVisitor::VisitCell(heap,
this);
407 StaticVisitor::VisitExternalReference(
this);
409 StaticVisitor::VisitCodeAgeSequence(heap,
this);
410 }
else if (heap->isolate()->debug()->has_break_points() &&
412 IsPatchedReturnSequence()) ||
414 IsPatchedDebugBreakSlotSequence()))) {
415 StaticVisitor::VisitDebugTarget(heap,
this);
417 StaticVisitor::VisitRuntimeEntry(
this);
454 *
reinterpret_cast<uint64_t*
>(
pc_) = x;
#define kLithiumScratchDouble
static const int kSpecialTargetSize
bool is_buffer_growth_blocked() const
static const int kInstrSize
void CheckTrampolinePool()
static Address target_address_at(Address pc, ConstantPoolArray *constant_pool)
void CheckTrampolinePoolQuick()
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
static const int kPatchDebugBreakSlotReturnOffset
static Address break_address_from_return_address(Address pc)
static void set_target_address_at(Address pc, ConstantPoolArray *constant_pool, Address target, ICacheFlushMode icache_flush_mode=FLUSH_ICACHE_IF_NEEDED)
static Address target_address_from_return_address(Address pc)
static const int kCallTargetAddressOffset
static const int kInstructionsFor64BitConstant
static int RelocateInternalReference(byte *pc, intptr_t pc_delta)
static const int kValueOffset
static Cell * FromValueAddress(Address value)
static Code * GetCodeFromTargetAddress(Address address)
byte * instruction_start()
static void FlushICache(void *start, size_t size)
static bool IsSupported(CpuFeature f)
static bool SupportsCrankshaft()
IncrementalMarking * incremental_marking()
static Object *& Object_at(Address addr)
static Address & Address_at(Address addr)
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
Immediate immediate() const
static bool IsDebugBreakSlot(Mode mode)
static bool IsJSReturn(Mode mode)
static bool IsEmbeddedObject(Mode mode)
static bool IsRuntimeEntry(Mode mode)
static bool IsCodeTarget(Mode mode)
static bool IsExternalReference(Mode mode)
static bool IsInternalReference(Mode mode)
static bool IsCodeAgeSequence(Mode mode)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK(condition)
kSerializedDataOffset Object
const int kFunctionFieldMask
static const int kNoCodeAgeSequenceLength
Debugger support for the V8 JavaScript engine.
static int NumAllocatableRegisters()
static int NumRegisters()
static int NumAllocatableAliasedRegisters()
static const int kMaxNumAllocatableRegisters
static int ToAllocationIndex(FPURegister reg)
static const int kMaxNumRegisters
static int NumAllocatableRegisters()
static const int kMaxNumAllocatableRegisters