5 #ifndef V8_MIPS_CONSTANTS_H_
6 #define V8_MIPS_CONSTANTS_H_
10 #define UNIMPLEMENTED_MIPS() \
11 v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
12 __FILE__, __LINE__, __func__)
14 #define UNIMPLEMENTED_MIPS()
17 #define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
25 #ifdef _MIPS_ARCH_MIPS64R2
27 #elif _MIPS_ARCH_MIPS64R6
40 #define MIPS_ABI_N64 1
57 #if(defined(__mips_hard_float) && __mips_hard_float != 0)
61 #elif(defined(__mips_soft_float) && __mips_soft_float != 0)
71 #ifndef __STDC_FORMAT_MACROS
72 #define __STDC_FORMAT_MACROS
109 static_cast<uint64_t
>(
static_cast<uint64_t
>(1) << 63) - 1;
147 static const char*
Name(
int reg);
169 static const char*
Name(
int reg);
181 static const RegisterAlias
aliases_[];
267 const int kHiMask = 0xffff << 16;
349 SLL = ((0 << 3) + 0),
350 MOVCI = ((0 << 3) + 1),
351 SRL = ((0 << 3) + 2),
352 SRA = ((0 << 3) + 3),
353 SLLV = ((0 << 3) + 4),
354 SRLV = ((0 << 3) + 6),
355 SRAV = ((0 << 3) + 7),
358 JALR = ((1 << 3) + 1),
359 MOVZ = ((1 << 3) + 2),
360 MOVN = ((1 << 3) + 3),
361 BREAK = ((1 << 3) + 5),
363 MFHI = ((2 << 3) + 0),
366 MFLO = ((2 << 3) + 2),
371 MULT = ((3 << 3) + 0),
372 MULTU = ((3 << 3) + 1),
373 DIV = ((3 << 3) + 2),
374 DIVU = ((3 << 3) + 3),
380 ADD = ((4 << 3) + 0),
381 ADDU = ((4 << 3) + 1),
382 SUB = ((4 << 3) + 2),
383 SUBU = ((4 << 3) + 3),
384 AND = ((4 << 3) + 4),
386 XOR = ((4 << 3) + 6),
387 NOR = ((4 << 3) + 7),
389 SLT = ((5 << 3) + 2),
390 SLTU = ((5 << 3) + 3),
396 TGE = ((6 << 3) + 0),
397 TGEU = ((6 << 3) + 1),
398 TLT = ((6 << 3) + 2),
399 TLTU = ((6 << 3) + 3),
400 TEQ = ((6 << 3) + 4),
402 TNE = ((6 << 3) + 6),
431 MUL = ((0 << 3) + 2),
432 CLZ = ((4 << 3) + 0),
433 CLO = ((4 << 3) + 1),
436 EXT = ((0 << 3) + 0),
440 INS = ((0 << 3) + 4),
448 BLTZ = ((0 << 3) + 0) << 16,
449 BGEZ = ((0 << 3) + 1) << 16,
450 BLTZAL = ((2 << 3) + 0) << 16,
451 BGEZAL = ((2 << 3) + 1) << 16,
452 BGEZALL = ((2 << 3) + 3) << 16,
457 MFC1 = ((0 << 3) + 0) << 21,
459 CFC1 = ((0 << 3) + 2) << 21,
460 MFHC1 = ((0 << 3) + 3) << 21,
461 MTC1 = ((0 << 3) + 4) << 21,
463 CTC1 = ((0 << 3) + 6) << 21,
464 MTHC1 = ((0 << 3) + 7) << 21,
465 BC1 = ((1 << 3) + 0) << 21,
466 S = ((2 << 3) + 0) << 21,
467 D = ((2 << 3) + 1) << 21,
468 W = ((2 << 3) + 4) << 21,
469 L = ((2 << 3) + 5) << 21,
470 PS = ((2 << 3) + 6) << 21,
485 ADD_D = ((0 << 3) + 0),
486 SUB_D = ((0 << 3) + 1),
487 MUL_D = ((0 << 3) + 2),
488 DIV_D = ((0 << 3) + 3),
490 ABS_D = ((0 << 3) + 5),
491 MOV_D = ((0 << 3) + 6),
492 NEG_D = ((0 << 3) + 7),
501 MIN = ((3 << 3) + 4),
502 MINA = ((3 << 3) + 5),
503 MAX = ((3 << 3) + 6),
504 MAXA = ((3 << 3) + 7),
508 C_F_D = ((6 << 3) + 0),
521 BC1EQZ = ((2 << 2) + 1) << 21,
522 BC1NEZ = ((3 << 2) + 1) << 21,
558 SEL = ((2 << 3) + 0),
755 return *
reinterpret_cast<const Instr*
>(
this);
760 *
reinterpret_cast<Instr*
>(
this) = value;
764 inline int Bit(
int nr)
const {
787 return static_cast<Opcode>(
static int Number(const char *name)
static const char * Name(int reg)
static const RegisterAlias aliases_[]
static const char * names_[kNumFPURegisters]
Instr InstructionBits() const
void SetInstructionBits(Instr value)
int FunctionValue() const
Opcode OpcodeValue() const
int FunctionFieldRaw() const
int Bits(int hi, int lo) const
bool IsLinkingInstruction() const
int32_t Imm21Value() const
Opcode OpcodeFieldRaw() const
int RsFieldRawNoAssert() const
static Instruction * At(byte *pc)
int32_t Imm26Value() const
bool IsForbiddenInBranchDelay() const
Type InstructionType() const
int SecondaryValue() const
DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction)
int32_t Imm16Value() const
static const int32_t kMaxValue
static int Number(const char *name)
static const RegisterAlias aliases_[]
static const char * names_[kNumRegisters]
static const int32_t kMinValue
static const char * Name(int reg)
const bool IsMipsSoftFloatABI
static const AbiVariants kMipsAbi
static const ArchVariants kArchVariant
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
#define DCHECK(condition)
const Instr kSwRegFpNegOffsetPattern
Hint NegateHint(Hint ignored)
const Instr kPopInstruction
const int32_t kPrefHintLoadStreamed
const int kInvalidFPUControlRegister
Condition CommuteCondition(Condition cond)
CheckForInexactConversion
@ kCheckForInexactConversion
@ kDontCheckForInexactConversion
const int32_t kPrefHintStore
const int32_t kPrefHintStoreRetained
const Instr rtCallRedirInstr
const int32_t kPrefHintLoad
const uint32_t kMaxStopCode
const uint32_t kMaxWatchpointCode
const int32_t kPrefHintLoadRetained
const uint32_t kFCSRUnderflowFlagMask
const Instr kLwSwInstrArgumentMask
const int32_t kPrefHintWritebackInvalidate
const uint32_t kFCSRExceptionFlagMask
const int kNumFPURegisters
const int kFunctionFieldMask
const Instr kLwSwOffsetMask
const int kInvalidFPURegister
const Instr kSwRegFpOffsetPattern
const int64_t kSe16MaskOf64
const int kNumSimuRegisters
const int kInvalidRegister
const uint32_t kFCSRInvalidOpFlagMask
Condition NegateCondition(Condition cond)
const Instr kLwRegFpNegOffsetPattern
const uint32_t kFCSROverflowFlagMask
const int64_t kHi16MaskOf64
const int32_t kPrefHintPrepareForStore
const int kCArgsSlotsSize
const int64_t kTh16MaskOf64
const uint64_t kFPU64InvalidResult
const uint32_t kFPURoundingModeMask
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const uint32_t kFCSRInexactFlagMask
const int kBranchReturnOffset
const uint32_t kFCSRInvalidOpFlagBit
const uint32_t kFPUInvalidResult
const uint32_t kFCSRDivideByZeroFlagMask
const uint32_t kFCSROverflowFlagBit
const uint32_t kFCSRDivideByZeroFlagBit
const Instr kPushRegPattern
const Instr kLwRegFpOffsetPattern
const Instr kPushInstruction
const Instr kPopRegPattern
const uint32_t kFCSRUnderflowFlagBit
const uint32_t kFCSRFlagMask
const Instr kLwSwInstrTypeMask
const uint32_t kFCSRInexactFlagBit
const int32_t kPrefHintStoreStreamed
Debugger support for the V8 JavaScript engine.