22 marking_deque_memory_(
NULL),
23 marking_deque_memory_committed_(
false),
25 old_generation_space_available_at_start_of_incremental_(0),
26 old_generation_space_used_at_start_of_incremental_(0),
30 no_marking_scope_depth_(0),
31 unscanned_bytes_of_large_object_(0) {}
39 if (BaseRecordWrite(obj, slot, value) && slot !=
NULL) {
40 MarkBit obj_bit = Marking::MarkBitFrom(obj);
41 if (Marking::IsBlack(obj_bit)) {
52 DCHECK(obj->IsHeapObject());
65 marking->RecordWrite(obj, slot, *slot);
73 RecordWriteIntoCode(host, &rinfo, value);
84 RecordWriteIntoCode(host, &rinfo, value);
92 if (BaseRecordWrite(host, slot, value)) {
95 reinterpret_cast<Address>(slot), value);
103 MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
104 if (Marking::IsWhite(value_bit)) {
105 MarkBit obj_bit = Marking::MarkBitFrom(obj);
106 if (Marking::IsBlack(obj_bit)) {
115 MarkBit obj_bit = Marking::MarkBitFrom(obj);
116 if (Marking::IsBlack(obj_bit)) {
126 if (obj->IsHeapObject()) {
128 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
129 if (Marking::IsBlack(mark_bit)) {
133 Marking::AnyToGrey(mark_bit);
140 DCHECK(!Marking::IsImpossible(mark_bit));
141 if (mark_bit.
Get())
return;
144 DCHECK(Marking::IsBlack(mark_bit));
150 DCHECK(!Marking::IsImpossible(mark_bit));
151 if (Marking::IsBlack(mark_bit))
return;
152 Marking::MarkBlack(mark_bit);
154 DCHECK(Marking::IsBlack(mark_bit));
165 table_.Register(kVisitJSRegExp, &VisitJSRegExp);
174 if (FLAG_use_marking_progress_bar &&
189 int already_scanned_offset = start_offset;
190 bool scan_until_end =
false;
195 start_offset = end_offset;
198 }
while (scan_until_end && start_offset < object_size);
200 if (start_offset < object_size) {
203 object_size - (start_offset - already_scanned_offset));
206 FixedArrayVisitor::Visit(
map,
object);
217 if (!cache->IsUndefined()) {
220 VisitNativeContext(
map, context);
225 if (obj->IsHeapObject()) {
227 MarkObject(heap, obj);
232 for (
Object** p = start; p < end; p++) {
234 if (obj->IsHeapObject()) {
236 MarkObject(heap, obj);
243 for (
Object** p = start; p < end; p++) {
245 if (obj->IsHeapObject()) {
247 MarkObject(heap, obj);
254 HeapObject* heap_object = HeapObject::cast(obj);
255 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
258 }
else if (Marking::IsWhite(mark_bit)) {
266 HeapObject* heap_object = HeapObject::cast(obj);
267 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
268 if (Marking::IsWhite(mark_bit)) {
271 heap_object->
Size());
294 if (!obj->IsHeapObject())
return;
296 HeapObject* heap_object = HeapObject::cast(obj);
297 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
301 if (Marking::IsWhite(mark_bit)) {
318 bool is_compacting) {
354 PageIterator it(
space);
355 while (it.has_next()) {
364 NewSpacePageIterator it(
space);
365 while (it.has_next()) {
390 PageIterator it(
space);
391 while (it.has_next()) {
399 NewSpacePageIterator it(
space->ToSpaceStart(),
space->ToSpaceEnd());
400 while (it.has_next()) {
431 static const intptr_t kActivationThreshold = 8 *
MB;
435 static const intptr_t kActivationThreshold = 0;
440 return FLAG_incremental_marking && FLAG_incremental_marking_steps &&
468 for (
int i = 0;
i < capacity;
i++) {
470 if (stubs->
IsKey(k)) {
473 if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
511 if (FLAG_trace_incremental_marking) {
512 PrintF(
"[IncrementalMarking] Start\n");
514 DCHECK(FLAG_incremental_marking);
515 DCHECK(FLAG_incremental_marking_steps);
526 if (FLAG_trace_incremental_marking) {
527 PrintF(
"[IncrementalMarking] Start sweeping.\n");
537 if (FLAG_trace_incremental_marking) {
538 PrintF(
"[IncrementalMarking] Start marking\n");
565 if (FLAG_verify_heap) {
573 if (FLAG_cleanup_code_caches_at_gc) {
586 if (FLAG_trace_incremental_marking) {
587 PrintF(
"[IncrementalMarking] Running\n");
593 if (!IsMarking())
return;
596 while (it.has_next()) {
603 if (!IsMarking())
return;
609 int new_top = current;
611 Map* filler_map =
heap_->one_pointer_filler_map();
613 while (current != limit) {
615 DCHECK(obj->IsHeapObject());
616 current = ((current + 1) & mask);
619 if (map_word.IsForwardingAddress()) {
620 HeapObject* dest = map_word.ToForwardingAddress();
621 array[new_top] = dest;
622 new_top = ((new_top + 1) & mask);
625 MarkBit mark_bit = Marking::MarkBitFrom(obj);
626 DCHECK(Marking::IsGrey(mark_bit) ||
627 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
630 }
else if (obj->
map() != filler_map) {
633 array[new_top] = obj;
634 new_top = ((new_top + 1) & mask);
637 MarkBit mark_bit = Marking::MarkBitFrom(obj);
639 DCHECK(Marking::IsGrey(mark_bit) ||
640 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
642 Marking::IsBlack(mark_bit)));
651 MarkBit map_mark_bit = Marking::MarkBitFrom(
map);
652 if (Marking::IsWhite(map_mark_bit)) {
656 IncrementalMarkingMarkingVisitor::IterateBody(
map, obj);
658 MarkBit mark_bit = Marking::MarkBitFrom(obj);
659 #if ENABLE_SLOW_DCHECKS
662 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
664 Marking::IsBlack(mark_bit)));
670 intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
671 intptr_t bytes_processed = 0;
672 Map* filler_map =
heap_->one_pointer_filler_map();
678 Map*
map = obj->map();
679 if (
map == filler_map)
continue;
681 int size = obj->SizeFromMap(
map);
687 bytes_processed += delta;
689 return bytes_processed;
693 void IncrementalMarking::ProcessMarkingDeque() {
694 Map* filler_map =
heap_->one_pointer_filler_map();
700 Map*
map = obj->map();
701 if (
map == filler_map)
continue;
703 VisitObject(
map, obj, obj->SizeFromMap(
map));
711 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
713 if (FLAG_trace_incremental_marking) {
714 PrintF(
"[IncrementalMarking] Hurry\n");
719 ProcessMarkingDeque();
721 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
723 double delta = end - start;
725 if (FLAG_trace_incremental_marking) {
726 PrintF(
"[IncrementalMarking] Complete (hurry), spent %d ms.\n",
727 static_cast<int>(delta));
732 if (FLAG_cleanup_code_caches_at_gc) {
734 Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
740 while (!context->IsUndefined()) {
745 if (!cache->IsUndefined()) {
746 MarkBit mark_bit = Marking::MarkBitFrom(cache);
747 if (Marking::IsGrey(mark_bit)) {
748 Marking::GreyToBlack(mark_bit);
759 if (FLAG_trace_incremental_marking) {
760 PrintF(
"[IncrementalMarking] Aborting.\n");
809 if (FLAG_trace_incremental_marking) {
810 PrintF(
"[IncrementalMarking] Complete (normal).\n");
830 bool speed_up =
false;
834 PrintPID(
"Speed up marking after %d steps\n",
840 bool space_left_is_very_small =
843 bool only_1_nth_of_space_that_was_available_still_left =
847 if (space_left_is_very_small ||
848 only_1_nth_of_space_that_was_available_still_left) {
849 if (FLAG_trace_gc)
PrintPID(
"Speed up marking because of low space left\n");
853 bool size_of_old_space_multiplied_by_n_during_marking =
857 if (size_of_old_space_multiplied_by_n_during_marking) {
860 PrintPID(
"Speed up marking because of heap size increase\n");
864 int64_t promoted_during_marking =
871 if (promoted_during_marking >
bytes_scanned_ / 2 + scavenge_slack + delay) {
873 PrintPID(
"Speed up marking because marker was not keeping up\n");
881 PrintPID(
"Postponing speeding up marking until marking starts\n");
896 bool force_marking) {
898 !FLAG_incremental_marking_steps ||
914 HistogramTimerScope incremental_marking_scope(
926 intptr_t bytes_to_process =
933 intptr_t bytes_processed = 0;
945 bytes_processed = ProcessMarkingDeque(bytes_to_process);
956 double duration = (end - start);
#define SLOW_DCHECK(condition)
static double TimeCurrentMillis()
bool Commit(void *address, size_t size, bool is_executable)
bool Uncommit(void *address, size_t size)
static void Clear(MemoryChunk *chunk)
void MarkCompactPrologue()
static Context * cast(Object *context)
@ NORMALIZED_MAP_CACHE_INDEX
Object * ValueAt(int entry)
static int SizeOf(Map *map, HeapObject *object)
static const int kStartOffset
void AddIncrementalMarkingStep(double duration, intptr_t bytes)
void AddMarkingTime(double duration)
Object * KeyAt(int entry)
static Object ** RawField(HeapObject *obj, int offset)
intptr_t MaxOldGenerationSize()
OldSpace * old_pointer_space()
PropertyCellSpace * property_cell_space()
Object * native_contexts_list() const
LargeObjectSpace * lo_space()
bool InNewSpace(Object *object)
bool NextGCIsLikelyToBeFull()
intptr_t PromotedTotalSize()
OldSpace * old_data_space()
void IterateStrongRoots(ObjectVisitor *v, VisitMode mode)
IncrementalMarking * incremental_marking()
void CompletelyClearInstanceofCache()
intptr_t PromotedSpaceSizeOfObjects()
MarkCompactCollector * mark_compact_collector()
static void VisitFixedArrayIncremental(Map *map, HeapObject *object)
INLINE(static void MarkObject(Heap *heap, Object *obj))
INLINE(static void VisitPointersWithAnchor(Heap *heap, Object **anchor, Object **start, Object **end))
static const int kProgressBarScanningChunk
static void VisitNativeContextIncremental(Map *map, HeapObject *object)
INLINE(static void VisitPointers(Heap *heap, Object **start, Object **end))
INLINE(static void VisitPointer(Heap *heap, Object **p))
INLINE(static bool MarkObjectWithoutPush(Heap *heap, Object *obj))
void VisitPointer(Object **p)
void VisitPointers(Object **start, Object **end)
IncrementalMarkingRootMarkingVisitor(IncrementalMarking *incremental_marking)
IncrementalMarking * incremental_marking_
void MarkObjectByPointer(Object **p)
void RecordWriteSlow(HeapObject *obj, Object **slot, Object *value)
static const intptr_t kAllocatedThreshold
int unscanned_bytes_of_large_object_
void WhiteToGreyAndPush(HeapObject *obj, MarkBit mark_bit)
void Step(intptr_t allocated, CompletionAction action, bool force_marking=false)
base::VirtualMemory * marking_deque_memory_
MarkingDeque * marking_deque()
void OldSpaceStep(intptr_t allocated)
static const intptr_t kMaxMarkingSpeed
void ActivateIncrementalWriteBarrier()
void RecordWriteOfCodeEntrySlow(JSFunction *host, Object **slot, Code *value)
static void RecordWriteFromCode(HeapObject *obj, Object **slot, Isolate *isolate)
static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace *space)
int64_t old_generation_space_available_at_start_of_incremental_
void SetOldSpacePageFlags(MemoryChunk *chunk)
int64_t old_generation_space_used_at_start_of_incremental_
void RecordCodeTargetPatch(Code *host, Address pc, HeapObject *value)
void RestartIfNotMarking()
intptr_t write_barriers_invoked_since_last_step_
void NotifyIncompleteScanOfObject(int unscanned_bytes)
void BlackToGreyAndUnshift(HeapObject *obj, MarkBit mark_bit)
static const intptr_t kMarkingSpeedAccellerationInterval
MarkingDeque marking_deque_
bool marking_deque_memory_committed_
void Start(CompactionFlag flag=ALLOW_COMPACTION)
void PrepareForScavenge()
void set_should_hurry(bool val)
static const intptr_t kMarkingSpeedAccelleration
void DeactivateIncrementalWriteBarrier()
int64_t SpaceLeftInOldSpace()
void StartMarking(CompactionFlag flag)
void EnsureMarkingDequeIsCommitted()
static const intptr_t kWriteBarriersInvokedThreshold
void UpdateMarkingDequeAfterScavenge()
IncrementalMarking(Heap *heap)
void RecordWriteIntoCodeSlow(HeapObject *obj, RelocInfo *rinfo, Object *value)
void UncommitMarkingDeque()
void SetNewSpacePageFlags(NewSpacePage *chunk)
static const intptr_t kInitialMarkingSpeed
void MarkingComplete(CompletionAction action)
static const intptr_t kFastMarking
void ActivateGeneratedStub(Code *stub)
int no_marking_scope_depth_
Code * GcSafeFindCodeForInnerPointer(Address inner_pointer)
bool serializer_enabled() const
StackGuard * stack_guard()
InnerPointerToCodeCache * inner_pointer_to_code_cache()
CompilationCache * compilation_cache()
LargePage * next_page() const
bool sweeping_in_progress()
void MarkWeakObjectToCodeTable()
void RecordCodeEntrySlot(Address slot, Code *target)
bool IsSweepingCompleted()
void EnsureSweepingCompleted()
bool StartCompaction(CompactionMode mode)
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
void Initialize(Address low, Address high)
void set_write_barrier_counter(int counter)
static void IncrementLiveBytesFromGC(Address address, int by)
@ POINTERS_FROM_HERE_ARE_INTERESTING
@ POINTERS_TO_HERE_ARE_INTERESTING
static const int kWriteBarrierCounterGranularity
int write_barrier_counter()
static MemoryChunk * FromAddress(Address a)
void set_progress_bar(int progress_bar)
void LowerInlineAllocationLimit(intptr_t step)
static const int kPageSize
static void Patch(Code *stub, Mode mode)
static Mode GetMode(Code *stub)
AllocationSpace identity()
static VisitorDispatchTable< Callback > table_
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK(condition)
void PrintPID(const char *format,...)
static void MarkBlackOrKeepGrey(HeapObject *heap_object, MarkBit mark_bit, int size)
static LifetimePosition Min(LifetimePosition a, LifetimePosition b)
static void MarkBlackOrKeepBlack(HeapObject *heap_object, MarkBit mark_bit, int size)
static LifetimePosition Max(LifetimePosition a, LifetimePosition b)
static void PatchIncrementalMarkingRecordWriteStubs(Heap *heap, RecordWriteStub::Mode mode)
void PrintF(const char *format,...)
uint32_t NumberToUint32(Object *number)
kFeedbackVectorOffset flag
static void MarkObjectGreyDoNotEnqueue(Object *obj)
Debugger support for the V8 JavaScript engine.