5 #ifndef V8_HEAP_MARK_COMPACT_H_
6 #define V8_HEAP_MARK_COMPACT_H_
33 return MarkBitFrom(
reinterpret_cast<Address>(obj));
39 return !mark_bit.
Get() && mark_bit.
Next().
Get();
45 return mark_bit.
Get() && !mark_bit.
Next().
Get();
55 return mark_bit.
Get() && mark_bit.
Next().
Get();
73 BlackToGrey(MarkBitFrom(obj));
91 static const char* ColorName(ObjectColor color) {
99 case IMPOSSIBLE_COLOR:
105 static ObjectColor Color(HeapObject* obj) {
106 return Color(Marking::MarkBitFrom(obj));
109 static ObjectColor Color(MarkBit mark_bit) {
110 if (IsBlack(mark_bit))
return BLACK_OBJECT;
111 if (IsWhite(mark_bit))
return WHITE_OBJECT;
112 if (IsGrey(mark_bit))
return GREY_OBJECT;
114 return IMPOSSIBLE_COLOR;
120 MarkBit from_mark_bit = MarkBitFrom(from);
122 bool is_black =
false;
123 if (from_mark_bit.
Get()) {
127 if (from_mark_bit.
Next().
Get()) {
150 static_cast<uint32_t>(obj_high - obj_low)) -
170 DCHECK(object->IsHeapObject());
172 Marking::BlackToGrey(
object);
182 DCHECK(object->IsHeapObject());
195 DCHECK(object->IsHeapObject());
200 DCHECK(object->IsHeapObject());
281 return "EMBEDDED_OBJECT_SLOT";
283 return "RELOCATED_CODE_OBJECT";
285 return "CODE_TARGET_SLOT";
287 return "CODE_ENTRY_SLOT";
289 return "DEBUG_TARGET_SLOT";
291 return "JS_RETURN_SLOT";
293 return "NUMBER_OF_SLOT_TYPES";
295 return "UNKNOWN SlotType";
305 if (buffer ==
NULL)
return 0;
306 return static_cast<int>(buffer->
idx_ +
315 bool code_slots_filtering_required) {
316 while (buffer !=
NULL) {
317 if (code_slots_filtering_required) {
322 buffer = buffer->
next();
342 *buffer_address = buffer;
392 DCHECK(function->code() == function->shared()->code());
438 Object* next_candidate = candidate->next_function_link();
439 return reinterpret_cast<JSFunction*
>(next_candidate);
444 candidate->set_next_function_link(next_candidate);
448 DCHECK(undefined->IsUndefined());
453 Object* next_candidate = candidate->code()->gc_metadata();
459 candidate->code()->set_gc_metadata(next_candidate);
467 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
474 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
479 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
493 class ThreadLocalTop;
528 bool in_use() {
return state_ > PREPARE_GC; }
529 bool are_map_pointers_encoded() {
return state_ == UPDATE_POINTERS; }
558 void VerifyMarkbitsAreClean();
561 void VerifyWeakEmbeddedObjectsInCode();
562 void VerifyOmittedMapChecks();
565 INLINE(
static bool ShouldSkipEvacuationSlotRecording(
Object** anchor)) {
581 if (FLAG_trace_fragmentation) {
582 PrintF(
"Page %p is too popular. Disabling evacuation.\n",
583 reinterpret_cast<void*
>(page));
677 enum CollectorState {
682 ENCODE_FORWARDING_ADDRESSES,
688 CollectorState state_;
807 int number_of_own_descriptors);
814 int start,
int end,
int new_start);
875 friend class MarkObjectVisitor;
878 friend class UnmarkObjectVisitor;
900 last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
901 chunk_->AddressToMarkbitIndex(chunk_->area_end())));
902 cell_base_ = chunk_->area_start();
903 cell_index_ = Bitmap::IndexToCell(
904 Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
905 cells_ = chunk_->markbits()->cells();
908 inline bool Done() {
return cell_index_ == last_cell_index_; }
910 inline bool HasNext() {
return cell_index_ < last_cell_index_ - 1; }
913 DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
914 chunk_->AddressToMarkbitIndex(cell_base_))));
915 return &cells_[cell_index_];
919 DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
920 chunk_->AddressToMarkbitIndex(cell_base_))));
941 : collector_(collector) {
942 collector_->set_sequential_sweeping(
true);
SequentialSweepingScope(MarkCompactCollector *collector)
Address CurrentCellBase()
MarkCompactCollector * collector_
MarkBit::CellType * cells_
MarkBit::CellType * CurrentCell()
~SequentialSweepingScope()
unsigned int last_cell_index_
MarkBitCellIterator(MemoryChunk *chunk)
void EvictJSFunctionCandidates()
static JSFunction * GetNextCandidate(JSFunction *candidate)
void ProcessSharedFunctionInfoCandidates()
static SharedFunctionInfo * GetNextCandidate(SharedFunctionInfo *candidate)
CodeFlusher(Isolate *isolate)
void EvictCandidate(SharedFunctionInfo *shared_info)
void EvictOptimizedCodeMaps()
void EvictSharedFunctionInfoCandidates()
void AddCandidate(JSFunction *function)
void IteratePointersToFromSpace(ObjectVisitor *v)
static void ClearNextCodeMap(SharedFunctionInfo *holder)
void EvictOptimizedCodeMap(SharedFunctionInfo *code_map_holder)
JSFunction * jsfunction_candidates_head_
SharedFunctionInfo * shared_function_info_candidates_head_
static void SetNextCandidate(JSFunction *candidate, JSFunction *next_candidate)
DISALLOW_COPY_AND_ASSIGN(CodeFlusher)
SharedFunctionInfo * optimized_code_map_holder_head_
void EvictAllCandidates()
void AddOptimizedCodeMap(SharedFunctionInfo *code_map_holder)
void ProcessOptimizedCodeMaps()
static void ClearNextCandidate(SharedFunctionInfo *candidate)
static JSFunction ** GetNextCandidateSlot(JSFunction *candidate)
void AddCandidate(SharedFunctionInfo *shared_info)
static void SetNextCandidate(SharedFunctionInfo *candidate, SharedFunctionInfo *next_candidate)
static void SetNextCodeMap(SharedFunctionInfo *holder, SharedFunctionInfo *next_holder)
void ProcessJSFunctionCandidates()
static void ClearNextCandidate(JSFunction *candidate, Object *undefined)
static SharedFunctionInfo * GetNextCodeMap(SharedFunctionInfo *holder)
void set(int index, Object *value)
void set_undefined(int index)
static Object ** RawField(HeapObject *obj, int offset)
static const int kNextFunctionLinkOffset
bool sequential_sweeping_
void MarkAllocationSite(AllocationSite *site)
void AddEvacuationCandidate(Page *p)
void EvacuateNewSpaceAndCandidates()
bool ClearMapBackPointer(Map *map)
bool is_compacting() const
void ClearNonLiveReferences()
void ProcessInvalidatedCode(ObjectVisitor *visitor)
void MigrateObject(HeapObject *dst, HeapObject *src, int size, AllocationSpace to_old_space)
bool MarkInvalidatedCode()
Isolate * isolate() const
CodeFlusher * code_flusher()
MarkingDeque marking_deque_
void ClearMapTransitions(Map *map)
INLINE(void SetMark(HeapObject *obj, MarkBit mark_bit))
SlotsBufferAllocator slots_buffer_allocator_
bool have_code_to_deoptimize_
void ClearNonLiveMapTransitions(Map *map, MarkBit map_mark)
bool TryPromoteObject(HeapObject *object, int object_size)
void TrimDescriptorArray(Map *map, DescriptorArray *descriptors, int number_of_own_descriptors)
MarkingParity marking_parity_
void ClearDependentICList(Object *head)
void CollectEvacuationCandidates(PagedSpace *space)
void ProcessEphemeralMarking(ObjectVisitor *visitor)
bool sweeping_in_progress()
void AbortWeakCollections()
bool abort_incremental_marking_
void ProcessWeakCollections()
bool reduce_memory_footprint_
bool sweeping_in_progress_
void MoveEvacuationCandidatesToEndOfPagesList()
INLINE(void EvictEvacuationCandidate(Page *page))
void MarkWeakObjectToCodeTable()
void ClearNonLiveDependentCode(DependentCode *dependent_code)
void InvalidateCode(Code *code)
INLINE(void RecordSlot(Object **anchor_slot, Object **slot, Object *object, SlotsBuffer::AdditionMode mode=SlotsBuffer::FAIL_ON_OVERFLOW))
void ClearNonLivePrototypeTransitions(Map *map)
static bool IsMarked(Object *obj)
void PrepareForCodeFlushing()
void RecordCodeTargetPatch(Address pc, Code *target)
void ProcessMarkingDeque()
void EnableCodeFlushing(bool enable)
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
void RefillMarkingDeque()
void RecordCodeEntrySlot(Address slot, Code *target)
int SweepInParallel(PagedSpace *space, int required_freed_bytes)
SlotsBuffer * migration_slots_buffer_
INLINE(static bool IsOnEvacuationCandidate(Object *obj))
CodeFlusher * code_flusher_
@ NON_INCREMENTAL_COMPACTION
INLINE(void MarkObject(HeapObject *obj, MarkBit mark_bit))
List< Page * > evacuation_candidates_
void RemoveDeadInvalidatedCode()
void PrepareThreadForCodeFlushing(Isolate *isolate, ThreadLocalTop *top)
void ClearWeakCollections()
void ParallelSweepSpaceComplete(PagedSpace *space)
List< Code * > invalidated_code_
bool IsSweepingCompleted()
static const uint32_t kMultiFreeEncoding
int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace *new_space, NewSpacePage *p)
static const uint32_t kSingleFreeEncoding
void RefillFreeList(PagedSpace *space)
void EnsureSweepingCompleted()
static bool IsUnmarkedHeapObjectWithHeap(Heap *heap, Object **p)
SmartPointer< FreeList > free_list_old_data_space_
friend class MarkingVisitor
SmartPointer< FreeList > free_list_old_pointer_space_
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object *host))
bool abort_incremental_marking() const
bool StartCompaction(CompactionMode mode)
void StartSweeperThreads()
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
void RecordMigratedSlot(Object *value, Address slot)
void MarkImplicitRefGroups()
void MarkStringTable(RootMarkingVisitor *visitor)
void ProcessTopOptimizedFrame(ObjectVisitor *visitor)
bool AreSweeperThreadsActivated()
bool is_code_flushing_enabled() const
void SweepSpace(PagedSpace *space, SweeperType sweeper)
bool was_marked_incrementally_
void set_sequential_sweeping(bool sequential_sweeping)
void TrimEnumCache(Map *map, DescriptorArray *descriptors)
void ReleaseEvacuationCandidates()
bool WillBeDeoptimized(Code *code)
int ClearNonLiveDependentCodeInGroup(DependentCode *dependent_code, int group, int start, int end, int new_start)
base::Semaphore pending_sweeper_jobs_semaphore_
void ParallelSweepSpacesComplete()
void EvacuateLiveObjectsFromPage(Page *p)
MarkingParity marking_parity()
bool sequential_sweeping() const
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object **anchor))
void ClearDependentCode(DependentCode *dependent_code)
void MarkRoots(RootMarkingVisitor *visitor)
static bool IsUnmarkedHeapObject(Object **p)
MarkCompactCollector(Heap *heap)
INLINE(void PushBlack(HeapObject *object))
INLINE(HeapObject *Pop())
INLINE(void UnshiftGrey(HeapObject *object))
DISALLOW_COPY_AND_ASSIGN(MarkingDeque)
INLINE(void PushGrey(HeapObject *object))
void Initialize(Address low, Address high)
INLINE(static bool IsImpossible(MarkBit mark_bit))
void TransferMark(Address old_start, Address new_start)
INLINE(static void BlackToGrey(HeapObject *obj))
INLINE(static void BlackToGrey(MarkBit markbit))
INLINE(static MarkBit MarkBitFrom(HeapObject *obj))
static const char * kImpossibleBitPattern
INLINE(static void MarkBlack(MarkBit mark_bit))
static const char * kGreyBitPattern
INLINE(static MarkBit MarkBitFrom(Address addr))
INLINE(static bool IsBlack(MarkBit mark_bit))
INLINE(static void WhiteToGrey(MarkBit markbit))
INLINE(static bool TransferColor(HeapObject *from, HeapObject *to))
INLINE(static void GreyToBlack(MarkBit markbit))
static const char * kWhiteBitPattern
static const char * kBlackBitPattern
INLINE(static void AnyToGrey(MarkBit markbit))
INLINE(static bool IsGrey(MarkBit mark_bit))
INLINE(static bool IsWhite(MarkBit mark_bit))
static void IncrementLiveBytesFromGC(Address address, int by)
static MemoryChunk * FromAddress(Address a)
bool IsEvacuationCandidate()
bool ShouldSkipEvacuationSlotRecording()
void ClearEvacuationCandidate()
static const int kNextMapIndex
SlotsBuffer * AllocateBuffer(SlotsBuffer *next_buffer)
void DeallocateBuffer(SlotsBuffer *buffer)
void DeallocateChain(SlotsBuffer **buffer_address)
static const char * SlotTypeToString(SlotType type)
INLINE(static bool AddTo(SlotsBufferAllocator *allocator, SlotsBuffer **buffer_address, ObjectSlot slot, AdditionMode mode))
bool HasSpaceForTypedSlot()
static const int kNumberOfElements
void UpdateSlotsWithFilter(Heap *heap)
static bool ChainLengthThresholdReached(SlotsBuffer *buffer)
ObjectSlot slots_[kNumberOfElements]
static const int kChainLengthThreshold
void Add(ObjectSlot slot)
static void UpdateSlotsRecordedIn(Heap *heap, SlotsBuffer *buffer, bool code_slots_filtering_required)
SlotsBuffer(SlotsBuffer *next_buffer)
static bool AddTo(SlotsBufferAllocator *allocator, SlotsBuffer **buffer_address, SlotType type, Address addr, AdditionMode mode)
static int SizeOfChain(SlotsBuffer *buffer)
static bool IsTypedSlot(ObjectSlot slot)
void UpdateSlots(Heap *heap)
AllocationSpace identity()
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK(condition)
uint32_t RoundDownToPowerOfTwo32(uint32_t value)
kSerializedDataOffset kPrototypeTemplateOffset kIndexedPropertyHandlerOffset kInstanceCallHandlerOffset kInternalFieldCountOffset dependent_code
void PrintF(const char *format,...)
bool(* IsAliveFunction)(HeapObject *obj, int *size, int *offset)
const char * AllocationSpaceName(AllocationSpace space)
Debugger support for the V8 JavaScript engine.