V8 Project
incremental-marking.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
8 
9 #include "src/code-stubs.h"
10 #include "src/compilation-cache.h"
11 #include "src/conversions.h"
14 
15 namespace v8 {
16 namespace internal {
17 
18 
20  : heap_(heap),
21  state_(STOPPED),
22  marking_deque_memory_(NULL),
23  marking_deque_memory_committed_(false),
24  steps_count_(0),
25  old_generation_space_available_at_start_of_incremental_(0),
26  old_generation_space_used_at_start_of_incremental_(0),
27  should_hurry_(false),
28  marking_speed_(0),
29  allocated_(0),
30  no_marking_scope_depth_(0),
31  unscanned_bytes_of_large_object_(0) {}
32 
33 
35 
36 
38  Object* value) {
39  if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
40  MarkBit obj_bit = Marking::MarkBitFrom(obj);
41  if (Marking::IsBlack(obj_bit)) {
42  // Object is not going to be rescanned we need to record the slot.
43  heap_->mark_compact_collector()->RecordSlot(HeapObject::RawField(obj, 0),
44  slot, value);
45  }
46  }
47 }
48 
49 
51  Isolate* isolate) {
52  DCHECK(obj->IsHeapObject());
53  IncrementalMarking* marking = isolate->heap()->incremental_marking();
54 
56  int counter = chunk->write_barrier_counter();
60  chunk->write_barrier_counter();
63  }
64 
65  marking->RecordWrite(obj, slot, *slot);
66 }
67 
68 
70  HeapObject* value) {
71  if (IsMarking()) {
72  RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
73  RecordWriteIntoCode(host, &rinfo, value);
74  }
75 }
76 
77 
79  if (IsMarking()) {
80  Code* host = heap_->isolate()
83  RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
84  RecordWriteIntoCode(host, &rinfo, value);
85  }
86 }
87 
88 
90  Object** slot,
91  Code* value) {
92  if (BaseRecordWrite(host, slot, value)) {
93  DCHECK(slot != NULL);
95  reinterpret_cast<Address>(slot), value);
96  }
97 }
98 
99 
101  RelocInfo* rinfo,
102  Object* value) {
103  MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
104  if (Marking::IsWhite(value_bit)) {
105  MarkBit obj_bit = Marking::MarkBitFrom(obj);
106  if (Marking::IsBlack(obj_bit)) {
107  BlackToGreyAndUnshift(obj, obj_bit);
109  }
110  // Object is either grey or white. It will be scanned if survives.
111  return;
112  }
113 
114  if (is_compacting_) {
115  MarkBit obj_bit = Marking::MarkBitFrom(obj);
116  if (Marking::IsBlack(obj_bit)) {
117  // Object is not going to be rescanned. We need to record the slot.
119  Code::cast(value));
120  }
121  }
122 }
123 
124 
126  if (obj->IsHeapObject()) {
127  HeapObject* heap_obj = HeapObject::cast(obj);
128  MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
129  if (Marking::IsBlack(mark_bit)) {
131  -heap_obj->Size());
132  }
133  Marking::AnyToGrey(mark_bit);
134  }
135 }
136 
137 
138 static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
139  MarkBit mark_bit, int size) {
140  DCHECK(!Marking::IsImpossible(mark_bit));
141  if (mark_bit.Get()) return;
142  mark_bit.Set();
144  DCHECK(Marking::IsBlack(mark_bit));
145 }
146 
147 
148 static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
149  MarkBit mark_bit, int size) {
150  DCHECK(!Marking::IsImpossible(mark_bit));
151  if (Marking::IsBlack(mark_bit)) return;
152  Marking::MarkBlack(mark_bit);
154  DCHECK(Marking::IsBlack(mark_bit));
155 }
156 
157 
159  : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
160  public:
161  static void Initialize() {
163  table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
164  table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
165  table_.Register(kVisitJSRegExp, &VisitJSRegExp);
166  }
167 
168  static const int kProgressBarScanningChunk = 32 * 1024;
169 
170  static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
171  MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
172  // TODO(mstarzinger): Move setting of the flag to the allocation site of
173  // the array. The visitor should just check the flag.
174  if (FLAG_use_marking_progress_bar &&
175  chunk->owner()->identity() == LO_SPACE) {
177  }
179  Heap* heap = map->GetHeap();
180  // When using a progress bar for large fixed arrays, scan only a chunk of
181  // the array and try to push it onto the marking deque again until it is
182  // fully scanned. Fall back to scanning it through to the end in case this
183  // fails because of a full deque.
184  int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
185  int start_offset =
187  int end_offset =
188  Min(object_size, start_offset + kProgressBarScanningChunk);
189  int already_scanned_offset = start_offset;
190  bool scan_until_end = false;
191  do {
192  VisitPointersWithAnchor(heap, HeapObject::RawField(object, 0),
193  HeapObject::RawField(object, start_offset),
194  HeapObject::RawField(object, end_offset));
195  start_offset = end_offset;
196  end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
197  scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
198  } while (scan_until_end && start_offset < object_size);
199  chunk->set_progress_bar(start_offset);
200  if (start_offset < object_size) {
201  heap->incremental_marking()->marking_deque()->UnshiftGrey(object);
203  object_size - (start_offset - already_scanned_offset));
204  }
205  } else {
206  FixedArrayVisitor::Visit(map, object);
207  }
208  }
209 
211  Context* context = Context::cast(object);
212 
213  // We will mark cache black with a separate pass when we finish marking.
214  // Note that GC can happen when the context is not fully initialized,
215  // so the cache can be undefined.
217  if (!cache->IsUndefined()) {
219  }
220  VisitNativeContext(map, context);
221  }
222 
223  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
224  Object* obj = *p;
225  if (obj->IsHeapObject()) {
226  heap->mark_compact_collector()->RecordSlot(p, p, obj);
227  MarkObject(heap, obj);
228  }
229  }
230 
231  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
232  for (Object** p = start; p < end; p++) {
233  Object* obj = *p;
234  if (obj->IsHeapObject()) {
235  heap->mark_compact_collector()->RecordSlot(start, p, obj);
236  MarkObject(heap, obj);
237  }
238  }
239  }
240 
241  INLINE(static void VisitPointersWithAnchor(Heap* heap, Object** anchor,
242  Object** start, Object** end)) {
243  for (Object** p = start; p < end; p++) {
244  Object* obj = *p;
245  if (obj->IsHeapObject()) {
246  heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
247  MarkObject(heap, obj);
248  }
249  }
250  }
251 
252  // Marks the object grey and pushes it on the marking stack.
253  INLINE(static void MarkObject(Heap* heap, Object* obj)) {
254  HeapObject* heap_object = HeapObject::cast(obj);
255  MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
256  if (mark_bit.data_only()) {
257  MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
258  } else if (Marking::IsWhite(mark_bit)) {
259  heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
260  }
261  }
262 
263  // Marks the object black without pushing it on the marking stack.
264  // Returns true if object needed marking and false otherwise.
265  INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
266  HeapObject* heap_object = HeapObject::cast(obj);
267  MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
268  if (Marking::IsWhite(mark_bit)) {
269  mark_bit.Set();
271  heap_object->Size());
272  return true;
273  }
274  return false;
275  }
276 };
277 
278 
280  public:
282  IncrementalMarking* incremental_marking)
283  : incremental_marking_(incremental_marking) {}
284 
286 
287  void VisitPointers(Object** start, Object** end) {
288  for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
289  }
290 
291  private:
293  Object* obj = *p;
294  if (!obj->IsHeapObject()) return;
295 
296  HeapObject* heap_object = HeapObject::cast(obj);
297  MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
298  if (mark_bit.data_only()) {
299  MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
300  } else {
301  if (Marking::IsWhite(mark_bit)) {
302  incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
303  }
304  }
305  }
306 
308 };
309 
310 
313 }
314 
315 
317  bool is_marking,
318  bool is_compacting) {
319  if (is_marking) {
322 
323  // It's difficult to filter out slots recorded for large objects.
324  if (chunk->owner()->identity() == LO_SPACE &&
325  chunk->size() > static_cast<size_t>(Page::kPageSize) && is_compacting) {
327  }
328  } else if (chunk->owner()->identity() == CELL_SPACE ||
329  chunk->owner()->identity() == PROPERTY_CELL_SPACE ||
330  chunk->scan_on_scavenge()) {
333  } else {
336  }
337 }
338 
339 
341  bool is_marking) {
343  if (is_marking) {
345  } else {
347  }
349 }
350 
351 
353  PagedSpace* space) {
354  PageIterator it(space);
355  while (it.has_next()) {
356  Page* p = it.next();
357  SetOldSpacePageFlags(p, false, false);
358  }
359 }
360 
361 
363  NewSpace* space) {
364  NewSpacePageIterator it(space);
365  while (it.has_next()) {
366  NewSpacePage* p = it.next();
367  SetNewSpacePageFlags(p, false);
368  }
369 }
370 
371 
380 
381  LargePage* lop = heap_->lo_space()->first_page();
382  while (lop->is_valid()) {
383  SetOldSpacePageFlags(lop, false, false);
384  lop = lop->next_page();
385  }
386 }
387 
388 
390  PageIterator it(space);
391  while (it.has_next()) {
392  Page* p = it.next();
394  }
395 }
396 
397 
399  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
400  while (it.has_next()) {
401  NewSpacePage* p = it.next();
402  SetNewSpacePageFlags(p, true);
403  }
404 }
405 
406 
415 
416  LargePage* lop = heap_->lo_space()->first_page();
417  while (lop->is_valid()) {
419  lop = lop->next_page();
420  }
421 }
422 
423 
426 }
427 
428 
430 #ifndef DEBUG
431  static const intptr_t kActivationThreshold = 8 * MB;
432 #else
433  // TODO(gc) consider setting this to some low level so that some
434  // debug tests run with incremental marking and some without.
435  static const intptr_t kActivationThreshold = 0;
436 #endif
437  // Only start incremental marking in a safe state: 1) when incremental
438  // marking is turned on, 2) when we are currently not in a GC, and
439  // 3) when we are currently not serializing or deserializing the heap.
440  return FLAG_incremental_marking && FLAG_incremental_marking_steps &&
443  heap_->isolate()->IsInitialized() &&
444  heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
445 }
446 
447 
450 
451  if (!IsMarking()) {
452  // Initially stub is generated in STORE_BUFFER_ONLY mode thus
453  // we don't need to do anything if incremental marking is
454  // not active.
455  } else if (IsCompacting()) {
457  } else {
459  }
460 }
461 
462 
465  UnseededNumberDictionary* stubs = heap->code_stubs();
466 
467  int capacity = stubs->Capacity();
468  for (int i = 0; i < capacity; i++) {
469  Object* k = stubs->KeyAt(i);
470  if (stubs->IsKey(k)) {
471  uint32_t key = NumberToUint32(k);
472 
473  if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
474  Object* e = stubs->ValueAt(i);
475  if (e->IsCode()) {
476  RecordWriteStub::Patch(Code::cast(e), mode);
477  }
478  }
479  }
480  }
481 }
482 
483 
485  if (marking_deque_memory_ == NULL) {
487  }
489  bool success = marking_deque_memory_->Commit(
490  reinterpret_cast<Address>(marking_deque_memory_->address()),
492  false); // Not executable.
493  CHECK(success);
495  }
496 }
497 
498 
501  bool success = marking_deque_memory_->Uncommit(
502  reinterpret_cast<Address>(marking_deque_memory_->address()),
504  CHECK(success);
506  }
507 }
508 
509 
511  if (FLAG_trace_incremental_marking) {
512  PrintF("[IncrementalMarking] Start\n");
513  }
514  DCHECK(FLAG_incremental_marking);
515  DCHECK(FLAG_incremental_marking_steps);
516  DCHECK(state_ == STOPPED);
520 
522 
525  } else {
526  if (FLAG_trace_incremental_marking) {
527  PrintF("[IncrementalMarking] Start sweeping.\n");
528  }
529  state_ = SWEEPING;
530  }
531 
533 }
534 
535 
537  if (FLAG_trace_incremental_marking) {
538  PrintF("[IncrementalMarking] Start marking\n");
539  }
540 
541  is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
544 
545  state_ = MARKING;
546 
550 
552 
554 
555  // Initialize marking stack.
556  Address addr = static_cast<Address>(marking_deque_memory_->address());
557  size_t size = marking_deque_memory_->size();
558  if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
559  marking_deque_.Initialize(addr, addr + size);
560 
562 
563 // Marking bits are cleared by the sweeper.
564 #ifdef VERIFY_HEAP
565  if (FLAG_verify_heap) {
566  heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
567  }
568 #endif
569 
572 
573  if (FLAG_cleanup_code_caches_at_gc) {
574  // We will mark cache black with a separate pass
575  // when we finish marking.
576  MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
577  }
578 
579  // Mark strong roots grey.
582 
584 
585  // Ready to start incremental marking.
586  if (FLAG_trace_incremental_marking) {
587  PrintF("[IncrementalMarking] Running\n");
588  }
589 }
590 
591 
593  if (!IsMarking()) return;
594  NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
596  while (it.has_next()) {
597  Bitmap::Clear(it.next());
598  }
599 }
600 
601 
603  if (!IsMarking()) return;
604 
605  int current = marking_deque_.bottom();
606  int mask = marking_deque_.mask();
607  int limit = marking_deque_.top();
608  HeapObject** array = marking_deque_.array();
609  int new_top = current;
610 
611  Map* filler_map = heap_->one_pointer_filler_map();
612 
613  while (current != limit) {
614  HeapObject* obj = array[current];
615  DCHECK(obj->IsHeapObject());
616  current = ((current + 1) & mask);
617  if (heap_->InNewSpace(obj)) {
618  MapWord map_word = obj->map_word();
619  if (map_word.IsForwardingAddress()) {
620  HeapObject* dest = map_word.ToForwardingAddress();
621  array[new_top] = dest;
622  new_top = ((new_top + 1) & mask);
623  DCHECK(new_top != marking_deque_.bottom());
624 #ifdef DEBUG
625  MarkBit mark_bit = Marking::MarkBitFrom(obj);
626  DCHECK(Marking::IsGrey(mark_bit) ||
627  (obj->IsFiller() && Marking::IsWhite(mark_bit)));
628 #endif
629  }
630  } else if (obj->map() != filler_map) {
631  // Skip one word filler objects that appear on the
632  // stack when we perform in place array shift.
633  array[new_top] = obj;
634  new_top = ((new_top + 1) & mask);
635  DCHECK(new_top != marking_deque_.bottom());
636 #ifdef DEBUG
637  MarkBit mark_bit = Marking::MarkBitFrom(obj);
639  DCHECK(Marking::IsGrey(mark_bit) ||
640  (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
642  Marking::IsBlack(mark_bit)));
643 #endif
644  }
645  }
646  marking_deque_.set_top(new_top);
647 }
648 
649 
650 void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
651  MarkBit map_mark_bit = Marking::MarkBitFrom(map);
652  if (Marking::IsWhite(map_mark_bit)) {
653  WhiteToGreyAndPush(map, map_mark_bit);
654  }
655 
656  IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
657 
658  MarkBit mark_bit = Marking::MarkBitFrom(obj);
659 #if ENABLE_SLOW_DCHECKS
660  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
661  SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
662  (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
663  (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
664  Marking::IsBlack(mark_bit)));
665 #endif
666  MarkBlackOrKeepBlack(obj, mark_bit, size);
667 }
668 
669 
670 intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
671  intptr_t bytes_processed = 0;
672  Map* filler_map = heap_->one_pointer_filler_map();
673  while (!marking_deque_.IsEmpty() && bytes_processed < bytes_to_process) {
674  HeapObject* obj = marking_deque_.Pop();
675 
676  // Explicitly skip one word fillers. Incremental markbit patterns are
677  // correct only for objects that occupy at least two words.
678  Map* map = obj->map();
679  if (map == filler_map) continue;
680 
681  int size = obj->SizeFromMap(map);
683  VisitObject(map, obj, size);
684  int delta = (size - unscanned_bytes_of_large_object_);
685  // TODO(jochen): remove after http://crbug.com/381820 is resolved.
686  CHECK_LT(0, delta);
687  bytes_processed += delta;
688  }
689  return bytes_processed;
690 }
691 
692 
693 void IncrementalMarking::ProcessMarkingDeque() {
694  Map* filler_map = heap_->one_pointer_filler_map();
695  while (!marking_deque_.IsEmpty()) {
696  HeapObject* obj = marking_deque_.Pop();
697 
698  // Explicitly skip one word fillers. Incremental markbit patterns are
699  // correct only for objects that occupy at least two words.
700  Map* map = obj->map();
701  if (map == filler_map) continue;
702 
703  VisitObject(map, obj, obj->SizeFromMap(map));
704  }
705 }
706 
707 
709  if (state() == MARKING) {
710  double start = 0.0;
711  if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
712  start = base::OS::TimeCurrentMillis();
713  if (FLAG_trace_incremental_marking) {
714  PrintF("[IncrementalMarking] Hurry\n");
715  }
716  }
717  // TODO(gc) hurry can mark objects it encounters black as mutator
718  // was stopped.
719  ProcessMarkingDeque();
720  state_ = COMPLETE;
721  if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
722  double end = base::OS::TimeCurrentMillis();
723  double delta = end - start;
724  heap_->tracer()->AddMarkingTime(delta);
725  if (FLAG_trace_incremental_marking) {
726  PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
727  static_cast<int>(delta));
728  }
729  }
730  }
731 
732  if (FLAG_cleanup_code_caches_at_gc) {
733  PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
734  Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
737  }
738 
739  Object* context = heap_->native_contexts_list();
740  while (!context->IsUndefined()) {
741  // GC can happen when the context is not fully initialized,
742  // so the cache can be undefined.
743  HeapObject* cache = HeapObject::cast(
745  if (!cache->IsUndefined()) {
746  MarkBit mark_bit = Marking::MarkBitFrom(cache);
747  if (Marking::IsGrey(mark_bit)) {
748  Marking::GreyToBlack(mark_bit);
750  }
751  }
752  context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
753  }
754 }
755 
756 
758  if (IsStopped()) return;
759  if (FLAG_trace_incremental_marking) {
760  PrintF("[IncrementalMarking] Aborting.\n");
761  }
765  if (IsMarking()) {
769 
770  if (is_compacting_) {
772  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
773  Page* p = Page::FromAddress(obj->address());
776  }
777  }
778  }
779  }
780  heap_->isolate()->stack_guard()->ClearGC();
781  state_ = STOPPED;
782  is_compacting_ = false;
783 }
784 
785 
787  Hurry();
788  state_ = STOPPED;
789  is_compacting_ = false;
797  heap_->isolate()->stack_guard()->ClearGC();
798 }
799 
800 
802  state_ = COMPLETE;
803  // We will set the stack guard to request a GC now. This will mean the rest
804  // of the GC gets performed as soon as possible (we can't do a GC here in a
805  // record-write context). If a few things get allocated between now and then
806  // that shouldn't make us do a scavenge and keep being incremental, so we set
807  // the should-hurry flag to indicate that there can't be much work left to do.
808  set_should_hurry(true);
809  if (FLAG_trace_incremental_marking) {
810  PrintF("[IncrementalMarking] Complete (normal).\n");
811  }
812  if (action == GC_VIA_STACK_GUARD) {
813  heap_->isolate()->stack_guard()->RequestGC();
814  }
815 }
816 
817 
818 void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
819  if (IsStopped() && ShouldActivate()) {
820  // TODO(hpayer): Let's play safe for now, but compaction should be
821  // in principle possible.
823  } else {
825  }
826 }
827 
828 
830  bool speed_up = false;
831 
833  if (FLAG_trace_gc) {
834  PrintPID("Speed up marking after %d steps\n",
835  static_cast<int>(kMarkingSpeedAccellerationInterval));
836  }
837  speed_up = true;
838  }
839 
840  bool space_left_is_very_small =
842 
843  bool only_1_nth_of_space_that_was_available_still_left =
846 
847  if (space_left_is_very_small ||
848  only_1_nth_of_space_that_was_available_still_left) {
849  if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
850  speed_up = true;
851  }
852 
853  bool size_of_old_space_multiplied_by_n_during_marking =
855  (marking_speed_ + 1) *
857  if (size_of_old_space_multiplied_by_n_during_marking) {
858  speed_up = true;
859  if (FLAG_trace_gc) {
860  PrintPID("Speed up marking because of heap size increase\n");
861  }
862  }
863 
864  int64_t promoted_during_marking =
867  intptr_t delay = marking_speed_ * MB;
868  intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
869 
870  // We try to scan at at least twice the speed that we are allocating.
871  if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
872  if (FLAG_trace_gc) {
873  PrintPID("Speed up marking because marker was not keeping up\n");
874  }
875  speed_up = true;
876  }
877 
878  if (speed_up) {
879  if (state_ != MARKING) {
880  if (FLAG_trace_gc) {
881  PrintPID("Postponing speeding up marking until marking starts\n");
882  }
883  } else {
885  marking_speed_ = static_cast<int>(
886  Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
887  if (FLAG_trace_gc) {
888  PrintPID("Marking speed increased to %d\n", marking_speed_);
889  }
890  }
891  }
892 }
893 
894 
895 void IncrementalMarking::Step(intptr_t allocated_bytes, CompletionAction action,
896  bool force_marking) {
897  if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
898  !FLAG_incremental_marking_steps ||
899  (state_ != SWEEPING && state_ != MARKING)) {
900  return;
901  }
902 
903  allocated_ += allocated_bytes;
904 
905  if (!force_marking && allocated_ < kAllocatedThreshold &&
908  return;
909  }
910 
911  if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
912 
913  {
914  HistogramTimerScope incremental_marking_scope(
915  heap_->isolate()->counters()->gc_incremental_marking());
916  double start = base::OS::TimeCurrentMillis();
917 
918  // The marking speed is driven either by the allocation rate or by the rate
919  // at which we are having to check the color of objects in the write
920  // barrier.
921  // It is possible for a tight non-allocating loop to run a lot of write
922  // barriers before we get here and check them (marking can only take place
923  // on
924  // allocation), so to reduce the lumpiness we don't use the write barriers
925  // invoked since last step directly to determine the amount of work to do.
926  intptr_t bytes_to_process =
929  allocated_ = 0;
931 
932  bytes_scanned_ += bytes_to_process;
933  intptr_t bytes_processed = 0;
934 
935  if (state_ == SWEEPING) {
939  }
941  bytes_scanned_ = 0;
943  }
944  } else if (state_ == MARKING) {
945  bytes_processed = ProcessMarkingDeque(bytes_to_process);
946  if (marking_deque_.IsEmpty()) MarkingComplete(action);
947  }
948 
949  steps_count_++;
950 
951  // Speed up marking if we are marking too slow or if we are almost done
952  // with marking.
953  SpeedUp();
954 
955  double end = base::OS::TimeCurrentMillis();
956  double duration = (end - start);
957  // Note that we report zero bytes here when sweeping was in progress or
958  // when we just started incremental marking. In these cases we did not
959  // process the marking deque.
960  heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
961  }
962 }
963 
964 
966  steps_count_ = 0;
971  bytes_rescanned_ = 0;
973  bytes_scanned_ = 0;
975 }
976 
977 
980 }
981 }
982 } // namespace v8::internal
#define SLOW_DCHECK(condition)
Definition: checks.h:30
static double TimeCurrentMillis()
bool Commit(void *address, size_t size, bool is_executable)
bool Uncommit(void *address, size_t size)
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:21
static Context * cast(Object *context)
Definition: contexts.h:255
Object * ValueAt(int entry)
Definition: objects.h:3491
static int SizeOf(Map *map, HeapObject *object)
Definition: objects.h:2491
Object * get(int index)
Definition: objects-inl.h:2165
void AddIncrementalMarkingStep(double duration, intptr_t bytes)
Definition: gc-tracer.cc:210
void AddMarkingTime(double duration)
Definition: gc-tracer.h:260
bool IsKey(Object *k)
Definition: objects.h:3255
Object * KeyAt(int entry)
Definition: objects.h:3251
static Object ** RawField(HeapObject *obj, int offset)
Definition: objects-inl.h:1311
MapWord map_word() const
Definition: objects-inl.h:1440
intptr_t MaxOldGenerationSize()
Definition: heap.h:555
HeapState gc_state()
Definition: heap.h:955
OldSpace * old_pointer_space()
Definition: heap.h:594
PropertyCellSpace * property_cell_space()
Definition: heap.h:599
OldSpace * code_space()
Definition: heap.h:596
Object * native_contexts_list() const
Definition: heap.h:793
LargeObjectSpace * lo_space()
Definition: heap.h:600
bool InNewSpace(Object *object)
Definition: heap-inl.h:322
CellSpace * cell_space()
Definition: heap.h:598
Isolate * isolate()
Definition: heap-inl.h:589
bool NextGCIsLikelyToBeFull()
Definition: heap.h:1135
intptr_t PromotedTotalSize()
Definition: heap.h:996
OldSpace * old_data_space()
Definition: heap.h:595
void IterateStrongRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:4747
IncrementalMarking * incremental_marking()
Definition: heap.h:1205
GCTracer * tracer()
Definition: heap.h:1166
NewSpace * new_space()
Definition: heap.h:593
void CompletelyClearInstanceofCache()
Definition: heap-inl.h:711
int MaxSemiSpaceSize()
Definition: heap.h:552
intptr_t PromotedSpaceSizeOfObjects()
Definition: heap.cc:4967
MarkCompactCollector * mark_compact_collector()
Definition: heap.h:1197
MapSpace * map_space()
Definition: heap.h:597
static void VisitFixedArrayIncremental(Map *map, HeapObject *object)
INLINE(static void MarkObject(Heap *heap, Object *obj))
INLINE(static void VisitPointersWithAnchor(Heap *heap, Object **anchor, Object **start, Object **end))
static void VisitNativeContextIncremental(Map *map, HeapObject *object)
INLINE(static void VisitPointers(Heap *heap, Object **start, Object **end))
INLINE(static void VisitPointer(Heap *heap, Object **p))
INLINE(static bool MarkObjectWithoutPush(Heap *heap, Object *obj))
IncrementalMarkingRootMarkingVisitor(IncrementalMarking *incremental_marking)
void RecordWriteSlow(HeapObject *obj, Object **slot, Object *value)
static const intptr_t kAllocatedThreshold
void WhiteToGreyAndPush(HeapObject *obj, MarkBit mark_bit)
void Step(intptr_t allocated, CompletionAction action, bool force_marking=false)
base::VirtualMemory * marking_deque_memory_
void OldSpaceStep(intptr_t allocated)
static const intptr_t kMaxMarkingSpeed
void RecordWriteOfCodeEntrySlow(JSFunction *host, Object **slot, Code *value)
static void RecordWriteFromCode(HeapObject *obj, Object **slot, Isolate *isolate)
static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace *space)
void SetOldSpacePageFlags(MemoryChunk *chunk)
void RecordCodeTargetPatch(Code *host, Address pc, HeapObject *value)
void NotifyIncompleteScanOfObject(int unscanned_bytes)
void BlackToGreyAndUnshift(HeapObject *obj, MarkBit mark_bit)
static const intptr_t kMarkingSpeedAccellerationInterval
void Start(CompactionFlag flag=ALLOW_COMPACTION)
static const intptr_t kMarkingSpeedAccelleration
void StartMarking(CompactionFlag flag)
static const intptr_t kWriteBarriersInvokedThreshold
void RecordWriteIntoCodeSlow(HeapObject *obj, RelocInfo *rinfo, Object *value)
void SetNewSpacePageFlags(NewSpacePage *chunk)
static const intptr_t kInitialMarkingSpeed
void MarkingComplete(CompletionAction action)
Code * GcSafeFindCodeForInnerPointer(Address inner_pointer)
Definition: frames.cc:1468
bool serializer_enabled() const
Definition: isolate.h:1007
StackGuard * stack_guard()
Definition: isolate.h:872
Counters * counters()
Definition: isolate.h:857
InnerPointerToCodeCache * inner_pointer_to_code_cache()
Definition: isolate.h:911
CompilationCache * compilation_cache()
Definition: isolate.h:865
LargePage * next_page() const
Definition: spaces.h:800
void RecordCodeEntrySlot(Address slot, Code *target)
bool StartCompaction(CompactionMode mode)
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
void Initialize(Address low, Address high)
Definition: mark-compact.h:145
bool IsFlagSet(int flag)
Definition: spaces.h:417
void set_write_barrier_counter(int counter)
Definition: spaces.h:489
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:517
void SetFlag(int flag)
Definition: spaces.h:405
Space * owner() const
Definition: spaces.h:307
static const int kWriteBarrierCounterGranularity
Definition: spaces.h:362
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:276
void ClearFlag(int flag)
Definition: spaces.h:407
size_t size() const
Definition: spaces.h:554
void set_progress_bar(int progress_bar)
Definition: spaces.h:498
void LowerInlineAllocationLimit(intptr_t step)
Definition: spaces.h:2473
Address FromSpaceEnd()
Definition: spaces.h:2485
Address FromSpaceStart()
Definition: spaces.h:2484
static const int kPageSize
Definition: spaces.h:748
static void Patch(Code *stub, Mode mode)
static Mode GetMode(Code *stub)
AllocationSpace identity()
Definition: spaces.h:829
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define CHECK_LT(a, b)
Definition: logging.h:179
#define CHECK(condition)
Definition: logging.h:36
#define DCHECK(condition)
Definition: logging.h:205
const int kPointerSize
Definition: globals.h:129
void PrintPID(const char *format,...)
Definition: utils.cc:96
static void MarkBlackOrKeepGrey(HeapObject *heap_object, MarkBit mark_bit, int size)
static LifetimePosition Min(LifetimePosition a, LifetimePosition b)
@ VISIT_ONLY_STRONG
Definition: globals.h:397
static void MarkBlackOrKeepBlack(HeapObject *heap_object, MarkBit mark_bit, int size)
const Register pc
static LifetimePosition Max(LifetimePosition a, LifetimePosition b)
static void PatchIncrementalMarkingRecordWriteStubs(Heap *heap, RecordWriteStub::Mode mode)
byte * Address
Definition: globals.h:101
void PrintF(const char *format,...)
Definition: utils.cc:80
@ PROPERTY_CELL_SPACE
Definition: globals.h:365
uint32_t NumberToUint32(Object *number)
Definition: conversions.h:195
kFeedbackVectorOffset flag
Definition: objects-inl.h:5418
const int MB
Definition: globals.h:107
static void MarkObjectGreyDoNotEnqueue(Object *obj)
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20