5 #ifndef V8_LITHIUM_ALLOCATOR_H_
6 #define V8_LITHIUM_ALLOCATOR_H_
252 if (pos !=
NULL)
return pos->
hint();
343 return &fixed_live_ranges_;
346 return &fixed_double_live_ranges_;
349 LPlatformChunk*
chunk()
const {
return chunk_; }
350 HGraph*
graph()
const {
return graph_; }
356 allocation_ok_ =
false;
360 return next_virtual_register_++;
369 has_osr_entry_ =
true;
377 return assigned_registers_;
380 return assigned_double_registers_;
ZoneList< LiveRange * > unhandled_live_ranges_
bool CanEagerlyResolveControlFlow(HBasicBlock *block) const
BitVector * assigned_registers_
Isolate * isolate() const
void SplitAndSpillIntersecting(LiveRange *range)
void AddToUnhandledUnsorted(LiveRange *range)
ZoneList< BitVector * > live_in_sets_
LiveRange * LiveRangeFor(LOperand *operand)
GrowableBitVector double_artificial_registers_
void ActiveToHandled(LiveRange *range)
int next_virtual_register_
bool IsBlockBoundary(LifetimePosition pos)
LiveRange * SplitRangeAt(LiveRange *range, LifetimePosition pos)
int RegisterCount() const
BitVector * ComputeLiveOut(HBasicBlock *block)
void AllocateDoubleRegisters()
void PopulatePointerMaps()
LInstruction * InstructionAt(int index)
void SetLiveRangeAssignedRegister(LiveRange *range, int reg)
void InitializeLivenessAnalysis()
LPlatformChunk * chunk() const
ZoneList< LiveRange * > active_live_ranges_
void ResolveControlFlow(LiveRange *range, HBasicBlock *block, HBasicBlock *pred)
void AllocateGeneralRegisters()
BitVector * assigned_registers()
void ResolvePhis(HBasicBlock *block)
LOperand * TryReuseSpillSlot(LiveRange *range)
LifetimePosition FindOptimalSplitPos(LifetimePosition start, LifetimePosition end)
HBasicBlock * GetBlock(LifetimePosition pos)
LiveRange * FixedDoubleLiveRangeFor(int index)
ZoneList< LiveRange * > inactive_live_ranges_
void InactiveToActive(LiveRange *range)
const Vector< LiveRange * > * fixed_live_ranges() const
EmbeddedVector< LiveRange *, Register::kMaxNumAllocatableRegisters > fixed_live_ranges_
void AddConstraintsGapMove(int index, LOperand *from, LOperand *to)
LParallelMove * GetConnectingParallelMove(LifetimePosition pos)
void Use(LifetimePosition block_start, LifetimePosition position, LOperand *operand, LOperand *hint)
void ProcessInstructions(HBasicBlock *block, BitVector *live)
BitVector * assigned_double_registers_
void AddInitialIntervals(HBasicBlock *block, BitVector *live_out)
ZoneList< LiveRange * > reusable_slots_
void InactiveToHandled(LiveRange *range)
LAllocator(int first_virtual_register, HGraph *graph)
RegisterKind RequiredRegisterKind(int virtual_register) const
const ZoneList< LiveRange * > * live_ranges() const
void SpillBetweenUntil(LiveRange *range, LifetimePosition start, LifetimePosition until, LifetimePosition end)
LOperand * AllocateFixed(LUnallocated *operand, int pos, bool is_tagged)
void MeetConstraintsBetween(LInstruction *first, LInstruction *second, int gap_index)
bool TryAllocateFreeReg(LiveRange *range)
LGap * GetLastGap(HBasicBlock *block)
void SpillBetween(LiveRange *range, LifetimePosition start, LifetimePosition end)
LifetimePosition FindOptimalSpillingPos(LiveRange *range, LifetimePosition pos)
EmbeddedVector< LiveRange *, DoubleRegister::kMaxNumAllocatableRegisters > fixed_double_live_ranges_
LiveRange * LiveRangeFor(int index)
static int FixedLiveRangeID(int index)
void Spill(LiveRange *range)
void MeetRegisterConstraints(HBasicBlock *block)
void Define(LifetimePosition position, LOperand *operand, LOperand *hint)
void AddToActive(LiveRange *range)
void ActiveToInactive(LiveRange *range)
void FreeSpillSlot(LiveRange *range)
static void TraceAlloc(const char *msg,...)
LiveRange * FixedLiveRangeFor(int index)
HPhi * LookupPhi(LOperand *operand) const
ZoneList< LiveRange * > live_ranges_
void SpillAfter(LiveRange *range, LifetimePosition pos)
const char * RegisterName(int allocation_index)
void MeetRegisterConstraints()
void AddToInactive(LiveRange *range)
bool HasTaggedValue(int virtual_register) const
static int FixedDoubleLiveRangeID(int index)
DISALLOW_COPY_AND_ASSIGN(LAllocator)
void AddToUnhandledSorted(LiveRange *range)
bool Allocate(LChunk *chunk)
bool SafePointsAreInOrder() const
const Vector< LiveRange * > * fixed_double_live_ranges() const
int first_artificial_register_
BitVector * assigned_double_registers()
LiveRange * SplitBetween(LiveRange *range, LifetimePosition start, LifetimePosition end)
void ResolveControlFlow()
void AllocateBlockedReg(LiveRange *range)
LAllocatorPhase(const char *name, LAllocator *allocator)
DISALLOW_COPY_AND_ASSIGN(LAllocatorPhase)
unsigned allocator_zone_start_allocation_size_
static const int kMaxVirtualRegisters
static LifetimePosition MaxPosition()
LifetimePosition InstructionStart() const
bool IsInstructionStart() const
static LifetimePosition Invalid()
STATIC_ASSERT(IS_POWER_OF_TWO(kStep))
LifetimePosition(int value)
LifetimePosition InstructionEnd() const
static LifetimePosition FromInstructionIndex(int index)
int InstructionIndex() const
LifetimePosition NextInstruction() const
LifetimePosition PrevInstruction() const
bool HasRegisterAssigned() const
UsePosition * NextUsePositionRegisterIsBeneficial(LifetimePosition start)
LOperand * current_hint_operand() const
void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone *zone)
bool Covers(LifetimePosition position)
void ShortenTo(LifetimePosition start)
LOperand * FirstHint() const
void SetSpillOperand(LOperand *operand)
int assigned_register() const
static const int kInvalidAssignment
RegisterKind Kind() const
LOperand * current_hint_operand_
LOperand * spill_operand_
LifetimePosition Start() const
UseInterval * last_interval_
LOperand * GetSpillOperand() const
LOperand * CreateAssignedOperand(Zone *zone)
UsePosition * PreviousUsePositionRegisterIsBeneficial(LifetimePosition start)
bool HasAllocatedSpillOperand() const
LifetimePosition FirstIntersection(LiveRange *other)
UseInterval * current_interval_
UsePosition * NextRegisterPosition(LifetimePosition start)
UseInterval * first_interval() const
LiveRange(int id, Zone *zone)
void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone *zone)
void SetSpillStartIndex(int start)
LiveRange * parent() const
UseInterval * first_interval_
void AdvanceLastProcessedMarker(UseInterval *to_start_of, LifetimePosition but_not_past) const
void ConvertOperands(Zone *zone)
bool ShouldBeAllocatedBefore(const LiveRange *other) const
LifetimePosition End() const
int spill_start_index() const
void set_assigned_register(int reg, Zone *zone)
bool CanCover(LifetimePosition position) const
UseInterval * FirstSearchIntervalForPosition(LifetimePosition position) const
UsePosition * first_pos() const
UsePosition * last_processed_use_
bool CanBeSpilled(LifetimePosition pos)
void SplitAt(LifetimePosition position, LiveRange *result, Zone *zone)
void MakeSpilled(Zone *zone)
UsePosition * NextUsePosition(LifetimePosition start)
void AddUsePosition(LifetimePosition pos, LOperand *operand, LOperand *hint, Zone *zone)
UseInterval * next() const
void set_next(UseInterval *next)
UseInterval(LifetimePosition start, LifetimePosition end)
LifetimePosition end() const
void set_start(LifetimePosition start)
LifetimePosition start() const
void SplitAt(LifetimePosition pos, Zone *zone)
bool Contains(LifetimePosition point) const
LifetimePosition Intersect(const UseInterval *other) const
LifetimePosition const pos_
bool register_beneficial_
LifetimePosition pos() const
void set_next(UsePosition *next)
bool RequiresRegister() const
bool RegisterIsBeneficial() const
UsePosition * next() const
LOperand * operand() const
UsePosition(LifetimePosition pos, LOperand *operand, LOperand *hint)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK(condition)
#define IS_POWER_OF_TWO(x)
static LifetimePosition Min(LifetimePosition a, LifetimePosition b)
Debugger support for the V8 JavaScript engine.