V8 Project
mark-compact.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_MARK_COMPACT_H_
6 #define V8_HEAP_MARK_COMPACT_H_
7 
8 #include "src/base/bits.h"
9 #include "src/heap/spaces.h"
10 
11 namespace v8 {
12 namespace internal {
13 
14 // Callback function, returns whether an object is alive. The heap size
15 // of the object is returned in size. It optionally updates the offset
16 // to the first live object in the page (only used for old and map objects).
17 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
18 
19 // Forward declarations.
20 class CodeFlusher;
22 class MarkingVisitor;
23 class RootMarkingVisitor;
24 
25 
26 class Marking {
27  public:
28  explicit Marking(Heap* heap) : heap_(heap) {}
29 
30  INLINE(static MarkBit MarkBitFrom(Address addr));
31 
32  INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
33  return MarkBitFrom(reinterpret_cast<Address>(obj));
34  }
35 
36  // Impossible markbits: 01
37  static const char* kImpossibleBitPattern;
38  INLINE(static bool IsImpossible(MarkBit mark_bit)) {
39  return !mark_bit.Get() && mark_bit.Next().Get();
40  }
41 
42  // Black markbits: 10 - this is required by the sweeper.
43  static const char* kBlackBitPattern;
44  INLINE(static bool IsBlack(MarkBit mark_bit)) {
45  return mark_bit.Get() && !mark_bit.Next().Get();
46  }
47 
48  // White markbits: 00 - this is required by the mark bit clearer.
49  static const char* kWhiteBitPattern;
50  INLINE(static bool IsWhite(MarkBit mark_bit)) { return !mark_bit.Get(); }
51 
52  // Grey markbits: 11
53  static const char* kGreyBitPattern;
54  INLINE(static bool IsGrey(MarkBit mark_bit)) {
55  return mark_bit.Get() && mark_bit.Next().Get();
56  }
57 
58  INLINE(static void MarkBlack(MarkBit mark_bit)) {
59  mark_bit.Set();
60  mark_bit.Next().Clear();
61  }
62 
63  INLINE(static void BlackToGrey(MarkBit markbit)) { markbit.Next().Set(); }
64 
65  INLINE(static void WhiteToGrey(MarkBit markbit)) {
66  markbit.Set();
67  markbit.Next().Set();
68  }
69 
70  INLINE(static void GreyToBlack(MarkBit markbit)) { markbit.Next().Clear(); }
71 
72  INLINE(static void BlackToGrey(HeapObject* obj)) {
73  BlackToGrey(MarkBitFrom(obj));
74  }
75 
76  INLINE(static void AnyToGrey(MarkBit markbit)) {
77  markbit.Set();
78  markbit.Next().Set();
79  }
80 
81  void TransferMark(Address old_start, Address new_start);
82 
83 #ifdef DEBUG
84  enum ObjectColor {
85  BLACK_OBJECT,
86  WHITE_OBJECT,
87  GREY_OBJECT,
88  IMPOSSIBLE_COLOR
89  };
90 
91  static const char* ColorName(ObjectColor color) {
92  switch (color) {
93  case BLACK_OBJECT:
94  return "black";
95  case WHITE_OBJECT:
96  return "white";
97  case GREY_OBJECT:
98  return "grey";
99  case IMPOSSIBLE_COLOR:
100  return "impossible";
101  }
102  return "error";
103  }
104 
105  static ObjectColor Color(HeapObject* obj) {
106  return Color(Marking::MarkBitFrom(obj));
107  }
108 
109  static ObjectColor Color(MarkBit mark_bit) {
110  if (IsBlack(mark_bit)) return BLACK_OBJECT;
111  if (IsWhite(mark_bit)) return WHITE_OBJECT;
112  if (IsGrey(mark_bit)) return GREY_OBJECT;
113  UNREACHABLE();
114  return IMPOSSIBLE_COLOR;
115  }
116 #endif
117 
118  // Returns true if the transferred color is black.
119  INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
120  MarkBit from_mark_bit = MarkBitFrom(from);
121  MarkBit to_mark_bit = MarkBitFrom(to);
122  bool is_black = false;
123  if (from_mark_bit.Get()) {
124  to_mark_bit.Set();
125  is_black = true; // Looks black so far.
126  }
127  if (from_mark_bit.Next().Get()) {
128  to_mark_bit.Next().Set();
129  is_black = false; // Was actually gray.
130  }
131  return is_black;
132  }
133 
134  private:
136 };
137 
138 // ----------------------------------------------------------------------------
139 // Marking deque for tracing live objects.
141  public:
143  : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) {}
144 
145  void Initialize(Address low, Address high) {
146  HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
147  HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
148  array_ = obj_low;
150  static_cast<uint32_t>(obj_high - obj_low)) -
151  1;
152  top_ = bottom_ = 0;
153  overflowed_ = false;
154  }
155 
156  inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
157 
158  inline bool IsEmpty() { return top_ == bottom_; }
159 
160  bool overflowed() const { return overflowed_; }
161 
162  void ClearOverflowed() { overflowed_ = false; }
163 
164  void SetOverflowed() { overflowed_ = true; }
165 
166  // Push the (marked) object on the marking stack if there is room,
167  // otherwise mark the object as overflowed and wait for a rescan of the
168  // heap.
169  INLINE(void PushBlack(HeapObject* object)) {
170  DCHECK(object->IsHeapObject());
171  if (IsFull()) {
172  Marking::BlackToGrey(object);
173  MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
174  SetOverflowed();
175  } else {
176  array_[top_] = object;
177  top_ = ((top_ + 1) & mask_);
178  }
179  }
180 
181  INLINE(void PushGrey(HeapObject* object)) {
182  DCHECK(object->IsHeapObject());
183  if (IsFull()) {
184  SetOverflowed();
185  } else {
186  array_[top_] = object;
187  top_ = ((top_ + 1) & mask_);
188  }
189  }
190 
191  INLINE(HeapObject* Pop()) {
192  DCHECK(!IsEmpty());
193  top_ = ((top_ - 1) & mask_);
194  HeapObject* object = array_[top_];
195  DCHECK(object->IsHeapObject());
196  return object;
197  }
198 
199  INLINE(void UnshiftGrey(HeapObject* object)) {
200  DCHECK(object->IsHeapObject());
201  if (IsFull()) {
202  SetOverflowed();
203  } else {
204  bottom_ = ((bottom_ - 1) & mask_);
205  array_[bottom_] = object;
206  }
207  }
208 
209  HeapObject** array() { return array_; }
210  int bottom() { return bottom_; }
211  int top() { return top_; }
212  int mask() { return mask_; }
213  void set_top(int top) { top_ = top; }
214 
215  private:
217  // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
218  // empty when top_ == bottom_. It is full when top_ + 1 == bottom
219  // (mod mask + 1).
220  int top_;
221  int bottom_;
222  int mask_;
224 
226 };
227 
228 
230  public:
231  SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
232  void DeallocateBuffer(SlotsBuffer* buffer);
233 
234  void DeallocateChain(SlotsBuffer** buffer_address);
235 };
236 
237 
238 // SlotsBuffer records a sequence of slots that has to be updated
239 // after live objects were relocated from evacuation candidates.
240 // All slots are either untyped or typed:
241 // - Untyped slots are expected to contain a tagged object pointer.
242 // They are recorded by an address.
243 // - Typed slots are expected to contain an encoded pointer to a heap
244 // object where the way of encoding depends on the type of the slot.
245 // They are recorded as a pair (SlotType, slot address).
246 // We assume that zero-page is never mapped this allows us to distinguish
247 // untyped slots from typed slots during iteration by a simple comparison:
248 // if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
249 // is the first element of typed slot's pair.
250 class SlotsBuffer {
251  public:
252  typedef Object** ObjectSlot;
253 
254  explicit SlotsBuffer(SlotsBuffer* next_buffer)
255  : idx_(0), chain_length_(1), next_(next_buffer) {
256  if (next_ != NULL) {
258  }
259  }
260 
262 
263  void Add(ObjectSlot slot) {
264  DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
265  slots_[idx_++] = slot;
266  }
267 
268  enum SlotType {
276  };
277 
278  static const char* SlotTypeToString(SlotType type) {
279  switch (type) {
281  return "EMBEDDED_OBJECT_SLOT";
283  return "RELOCATED_CODE_OBJECT";
284  case CODE_TARGET_SLOT:
285  return "CODE_TARGET_SLOT";
286  case CODE_ENTRY_SLOT:
287  return "CODE_ENTRY_SLOT";
288  case DEBUG_TARGET_SLOT:
289  return "DEBUG_TARGET_SLOT";
290  case JS_RETURN_SLOT:
291  return "JS_RETURN_SLOT";
293  return "NUMBER_OF_SLOT_TYPES";
294  }
295  return "UNKNOWN SlotType";
296  }
297 
298  void UpdateSlots(Heap* heap);
299 
300  void UpdateSlotsWithFilter(Heap* heap);
301 
302  SlotsBuffer* next() { return next_; }
303 
304  static int SizeOfChain(SlotsBuffer* buffer) {
305  if (buffer == NULL) return 0;
306  return static_cast<int>(buffer->idx_ +
307  (buffer->chain_length_ - 1) * kNumberOfElements);
308  }
309 
310  inline bool IsFull() { return idx_ == kNumberOfElements; }
311 
312  inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
313 
314  static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer,
315  bool code_slots_filtering_required) {
316  while (buffer != NULL) {
317  if (code_slots_filtering_required) {
318  buffer->UpdateSlotsWithFilter(heap);
319  } else {
320  buffer->UpdateSlots(heap);
321  }
322  buffer = buffer->next();
323  }
324  }
325 
327 
329  return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
330  }
331 
332  INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
333  SlotsBuffer** buffer_address, ObjectSlot slot,
334  AdditionMode mode)) {
335  SlotsBuffer* buffer = *buffer_address;
336  if (buffer == NULL || buffer->IsFull()) {
338  allocator->DeallocateChain(buffer_address);
339  return false;
340  }
341  buffer = allocator->AllocateBuffer(buffer);
342  *buffer_address = buffer;
343  }
344  buffer->Add(slot);
345  return true;
346  }
347 
348  static bool IsTypedSlot(ObjectSlot slot);
349 
350  static bool AddTo(SlotsBufferAllocator* allocator,
351  SlotsBuffer** buffer_address, SlotType type, Address addr,
353 
354  static const int kNumberOfElements = 1021;
355 
356  private:
357  static const int kChainLengthThreshold = 15;
358 
359  intptr_t idx_;
360  intptr_t chain_length_;
363 };
364 
365 
366 // CodeFlusher collects candidates for code flushing during marking and
367 // processes those candidates after marking has completed in order to
368 // reset those functions referencing code objects that would otherwise
369 // be unreachable. Code objects can be referenced in three ways:
370 // - SharedFunctionInfo references unoptimized code.
371 // - JSFunction references either unoptimized or optimized code.
372 // - OptimizedCodeMap references optimized code.
373 // We are not allowed to flush unoptimized code for functions that got
374 // optimized or inlined into optimized code, because we might bailout
375 // into the unoptimized code again during deoptimization.
376 class CodeFlusher {
377  public:
378  explicit CodeFlusher(Isolate* isolate)
379  : isolate_(isolate),
383 
384  void AddCandidate(SharedFunctionInfo* shared_info) {
385  if (GetNextCandidate(shared_info) == NULL) {
388  }
389  }
390 
391  void AddCandidate(JSFunction* function) {
392  DCHECK(function->code() == function->shared()->code());
393  if (GetNextCandidate(function)->IsUndefined()) {
395  jsfunction_candidates_head_ = function;
396  }
397  }
398 
399  void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
400  if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
402  optimized_code_map_holder_head_ = code_map_holder;
403  }
404  }
405 
406  void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
407  void EvictCandidate(SharedFunctionInfo* shared_info);
408  void EvictCandidate(JSFunction* function);
409 
414  }
415 
420  }
421 
423 
424  private:
428  void EvictOptimizedCodeMaps();
431 
433  return reinterpret_cast<JSFunction**>(
435  }
436 
437  static JSFunction* GetNextCandidate(JSFunction* candidate) {
438  Object* next_candidate = candidate->next_function_link();
439  return reinterpret_cast<JSFunction*>(next_candidate);
440  }
441 
442  static void SetNextCandidate(JSFunction* candidate,
443  JSFunction* next_candidate) {
444  candidate->set_next_function_link(next_candidate);
445  }
446 
447  static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
448  DCHECK(undefined->IsUndefined());
449  candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
450  }
451 
453  Object* next_candidate = candidate->code()->gc_metadata();
454  return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
455  }
456 
457  static void SetNextCandidate(SharedFunctionInfo* candidate,
458  SharedFunctionInfo* next_candidate) {
459  candidate->code()->set_gc_metadata(next_candidate);
460  }
461 
462  static void ClearNextCandidate(SharedFunctionInfo* candidate) {
463  candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
464  }
465 
467  FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
468  Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
469  return reinterpret_cast<SharedFunctionInfo*>(next_map);
470  }
471 
472  static void SetNextCodeMap(SharedFunctionInfo* holder,
473  SharedFunctionInfo* next_holder) {
474  FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
475  code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
476  }
477 
478  static void ClearNextCodeMap(SharedFunctionInfo* holder) {
479  FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
481  }
482 
487 
489 };
490 
491 
492 // Defined in isolate.h.
493 class ThreadLocalTop;
494 
495 
496 // -------------------------------------------------------------------------
497 // Mark-Compact collector
499  public:
500  // Set the global flags, it must be called before Prepare to take effect.
501  inline void SetFlags(int flags);
502 
503  static void Initialize();
504 
505  void SetUp();
506 
507  void TearDown();
508 
510 
511  void AddEvacuationCandidate(Page* p);
512 
513  // Prepares for GC by resetting relocation info in old and map spaces and
514  // choosing spaces to compact.
515  void Prepare();
516 
517  // Performs a global garbage collection.
518  void CollectGarbage();
519 
521 
523 
524  void AbortCompaction();
525 
526 #ifdef DEBUG
527  // Checks whether performing mark-compact collection.
528  bool in_use() { return state_ > PREPARE_GC; }
529  bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
530 #endif
531 
532  // Determine type of object and emit deletion log event.
533  static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
534 
535  // Distinguishable invalid map encodings (for single word and multiple words)
536  // that indicate free regions.
537  static const uint32_t kSingleFreeEncoding = 0;
538  static const uint32_t kMultiFreeEncoding = 1;
539 
540  static inline bool IsMarked(Object* obj);
541 
542  inline Heap* heap() const { return heap_; }
543  inline Isolate* isolate() const;
544 
546  inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
547  void EnableCodeFlushing(bool enable);
548 
549  enum SweeperType {
553  };
554 
556 
557 #ifdef VERIFY_HEAP
558  void VerifyMarkbitsAreClean();
559  static void VerifyMarkbitsAreClean(PagedSpace* space);
560  static void VerifyMarkbitsAreClean(NewSpace* space);
561  void VerifyWeakEmbeddedObjectsInCode();
562  void VerifyOmittedMapChecks();
563 #endif
564 
565  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
566  return Page::FromAddress(reinterpret_cast<Address>(anchor))
568  }
569 
570  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
571  return Page::FromAddress(reinterpret_cast<Address>(host))
573  }
574 
575  INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
576  return Page::FromAddress(reinterpret_cast<Address>(obj))
578  }
579 
580  INLINE(void EvictEvacuationCandidate(Page* page)) {
581  if (FLAG_trace_fragmentation) {
582  PrintF("Page %p is too popular. Disabling evacuation.\n",
583  reinterpret_cast<void*>(page));
584  }
585 
586  // TODO(gc) If all evacuation candidates are too popular we
587  // should stop slots recording entirely.
588  page->ClearEvacuationCandidate();
589 
590  // We were not collecting slots on this page that point
591  // to other evacuation candidates thus we have to
592  // rescan the page after evacuation to discover and update all
593  // pointers to evacuated objects.
594  if (page->owner()->identity() == OLD_DATA_SPACE) {
595  evacuation_candidates_.RemoveElement(page);
596  } else {
598  }
599  }
600 
601  void RecordRelocSlot(RelocInfo* rinfo, Object* target);
602  void RecordCodeEntrySlot(Address slot, Code* target);
603  void RecordCodeTargetPatch(Address pc, Code* target);
604 
605  INLINE(void RecordSlot(
606  Object** anchor_slot, Object** slot, Object* object,
608 
609  void MigrateObject(HeapObject* dst, HeapObject* src, int size,
610  AllocationSpace to_old_space);
611 
612  bool TryPromoteObject(HeapObject* object, int object_size);
613 
614  void InvalidateCode(Code* code);
615 
616  void ClearMarkbits();
617 
619 
620  bool is_compacting() const { return compacting_; }
621 
623 
624  // Concurrent and parallel sweeping support. If required_freed_bytes was set
625  // to a value larger than 0, then sweeping returns after a block of at least
626  // required_freed_bytes was freed. If required_freed_bytes was set to zero
627  // then the whole given space is swept. It returns the size of the maximum
628  // continuous freed memory chunk.
629  int SweepInParallel(PagedSpace* space, int required_freed_bytes);
630 
631  // Sweeps a given page concurrently to the sweeper threads. It returns the
632  // size of the maximum continuous freed memory chunk.
633  int SweepInParallel(Page* page, PagedSpace* space);
634 
636 
637  // If sweeper threads are not active this method will return true. If
638  // this is a latency issue we should be smarter here. Otherwise, it will
639  // return true if the sweeper threads are done processing the pages.
640  bool IsSweepingCompleted();
641 
643 
645 
646  // Checks if sweeping is in progress right now on any space.
648 
651  }
652 
653  bool sequential_sweeping() const { return sequential_sweeping_; }
654 
655  // Mark the global table which maps weak objects to dependent code without
656  // marking its contents.
658 
659  // Special case for processing weak references in a full collection. We need
660  // to artificially keep AllocationSites alive for a time.
662 
663  private:
664  class SweeperTask;
665 
666  explicit MarkCompactCollector(Heap* heap);
668 
669  bool MarkInvalidatedCode();
670  bool WillBeDeoptimized(Code* code);
672  void ProcessInvalidatedCode(ObjectVisitor* visitor);
673 
674  void StartSweeperThreads();
675 
676 #ifdef DEBUG
677  enum CollectorState {
678  IDLE,
679  PREPARE_GC,
680  MARK_LIVE_OBJECTS,
681  SWEEP_SPACES,
682  ENCODE_FORWARDING_ADDRESSES,
683  UPDATE_POINTERS,
684  RELOCATE_OBJECTS
685  };
686 
687  // The current stage of the collector.
688  CollectorState state_;
689 #endif
690 
692 
694 
696 
697  // True if we are collecting slots to perform evacuation from evacuation
698  // candidates.
700 
702 
703  // True if concurrent or parallel sweeping is currently in progress.
705 
707 
709 
711 
713 
714  // Finishes GC, performs heap verification if enabled.
715  void Finish();
716 
717  // -----------------------------------------------------------------------
718  // Phase 1: Marking live objects.
719  //
720  // Before: The heap has been prepared for garbage collection by
721  // MarkCompactCollector::Prepare() and is otherwise in its
722  // normal state.
723  //
724  // After: Live objects are marked and non-live objects are unmarked.
725 
726  friend class RootMarkingVisitor;
727  friend class MarkingVisitor;
729  friend class CodeMarkingVisitor;
731 
732  // Mark code objects that are active on the stack to prevent them
733  // from being flushed.
734  void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
735 
736  void PrepareForCodeFlushing();
737 
738  // Marking operations for objects reachable from roots.
739  void MarkLiveObjects();
740 
741  void AfterMarking();
742 
743  // Marks the object black and pushes it on the marking stack.
744  // This is for non-incremental marking only.
745  INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
746 
747  // Marks the object black assuming that it is not yet marked.
748  // This is for non-incremental marking only.
749  INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
750 
751  // Mark the heap roots and all objects reachable from them.
752  void MarkRoots(RootMarkingVisitor* visitor);
753 
754  // Mark the string table specially. References to internalized strings from
755  // the string table are weak.
756  void MarkStringTable(RootMarkingVisitor* visitor);
757 
758  // Mark objects in implicit references groups if their parent object
759  // is marked.
760  void MarkImplicitRefGroups();
761 
762  // Mark objects reachable (transitively) from objects in the marking stack
763  // or overflowed in the heap.
764  void ProcessMarkingDeque();
765 
766  // Mark objects reachable (transitively) from objects in the marking stack
767  // or overflowed in the heap. This respects references only considered in
768  // the final atomic marking pause including the following:
769  // - Processing of objects reachable through Harmony WeakMaps.
770  // - Objects reachable due to host application logic like object groups
771  // or implicit references' groups.
772  void ProcessEphemeralMarking(ObjectVisitor* visitor);
773 
774  // If the call-site of the top optimized code was not prepared for
775  // deoptimization, then treat the maps in the code as strong pointers,
776  // otherwise a map can die and deoptimize the code.
778 
779  // Mark objects reachable (transitively) from objects in the marking
780  // stack. This function empties the marking stack, but may leave
781  // overflowed objects in the heap, in which case the marking stack's
782  // overflow flag will be set.
783  void EmptyMarkingDeque();
784 
785  // Refill the marking stack with overflowed objects from the heap. This
786  // function either leaves the marking stack full or clears the overflow
787  // flag on the marking stack.
788  void RefillMarkingDeque();
789 
790  // After reachable maps have been marked process per context object
791  // literal map caches removing unmarked entries.
792  void ProcessMapCaches();
793 
794  // Callback function for telling whether the object *p is an unmarked
795  // heap object.
796  static bool IsUnmarkedHeapObject(Object** p);
797  static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
798 
799  // Map transitions from a live map to a dead map must be killed.
800  // We replace them with a null descriptor, with the same key.
801  void ClearNonLiveReferences();
803  void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
804  void ClearMapTransitions(Map* map);
805  bool ClearMapBackPointer(Map* map);
806  void TrimDescriptorArray(Map* map, DescriptorArray* descriptors,
807  int number_of_own_descriptors);
808  void TrimEnumCache(Map* map, DescriptorArray* descriptors);
809 
811  void ClearDependentICList(Object* head);
814  int start, int end, int new_start);
815 
816  // Mark all values associated with reachable keys in weak collections
817  // encountered so far. This might push new object or even new weak maps onto
818  // the marking stack.
819  void ProcessWeakCollections();
820 
821  // After all reachable objects have been marked those weak map entries
822  // with an unreachable key are removed from all encountered weak maps.
823  // The linked list of all encountered weak maps is destroyed.
824  void ClearWeakCollections();
825 
826  // We have to remove all encountered weak maps from the list of weak
827  // collections when incremental marking is aborted.
828  void AbortWeakCollections();
829 
830  // -----------------------------------------------------------------------
831  // Phase 2: Sweeping to clear mark bits and free non-live objects for
832  // a non-compacting collection.
833  //
834  // Before: Live objects are marked and non-live objects are unmarked.
835  //
836  // After: Live objects are unmarked, non-live regions have been added to
837  // their space's free list. Active eden semispace is compacted by
838  // evacuation.
839  //
840 
841  // If we are not compacting the heap, we simply sweep the spaces except
842  // for the large object space, clearing mark bits and adding unmarked
843  // regions to each space's free list.
844  void SweepSpaces();
845 
847  NewSpacePage* p);
848 
849  void EvacuateNewSpace();
850 
852 
853  void EvacuatePages();
854 
856 
858 
859  // Moves the pages of the evacuation_candidates_ list to the end of their
860  // corresponding space pages list.
862 
863  void SweepSpace(PagedSpace* space, SweeperType sweeper);
864 
865  // Finalizes the parallel sweeping phase. Marks all the pages that were
866  // swept in parallel.
868 
870 
871  // Updates store buffer and slot buffer for a pointer in a migrating object.
872  void RecordMigratedSlot(Object* value, Address slot);
873 
874 #ifdef DEBUG
875  friend class MarkObjectVisitor;
876  static void VisitObject(HeapObject* obj);
877 
878  friend class UnmarkObjectVisitor;
879  static void UnmarkObject(HeapObject* obj);
880 #endif
881 
886 
889 
892 
893  friend class Heap;
894 };
895 
896 
897 class MarkBitCellIterator BASE_EMBEDDED {
898  public:
899  explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
900  last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
901  chunk_->AddressToMarkbitIndex(chunk_->area_end())));
902  cell_base_ = chunk_->area_start();
903  cell_index_ = Bitmap::IndexToCell(
904  Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
905  cells_ = chunk_->markbits()->cells();
906  }
907 
908  inline bool Done() { return cell_index_ == last_cell_index_; }
909 
910  inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
911 
913  DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
914  chunk_->AddressToMarkbitIndex(cell_base_))));
915  return &cells_[cell_index_];
916  }
917 
919  DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
920  chunk_->AddressToMarkbitIndex(cell_base_))));
921  return cell_base_;
922  }
923 
924  inline void Advance() {
925  cell_index_++;
926  cell_base_ += 32 * kPointerSize;
927  }
928 
929  private:
932  unsigned int last_cell_index_;
933  unsigned int cell_index_;
935 };
936 
937 
938 class SequentialSweepingScope BASE_EMBEDDED {
939  public:
941  : collector_(collector) {
942  collector_->set_sequential_sweeping(true);
943  }
944 
945  ~SequentialSweepingScope() { collector_->set_sequential_sweeping(false); }
946 
947  private:
949 };
950 
951 
953 }
954 } // namespace v8::internal
955 
956 #endif // V8_HEAP_MARK_COMPACT_H_
SequentialSweepingScope(MarkCompactCollector *collector)
Definition: mark-compact.h:940
MarkCompactCollector * collector_
Definition: mark-compact.h:948
MarkBit::CellType * cells_
Definition: mark-compact.h:931
MarkBit::CellType * CurrentCell()
Definition: mark-compact.h:912
MarkBitCellIterator(MemoryChunk *chunk)
Definition: mark-compact.h:899
static JSFunction * GetNextCandidate(JSFunction *candidate)
Definition: mark-compact.h:437
static SharedFunctionInfo * GetNextCandidate(SharedFunctionInfo *candidate)
Definition: mark-compact.h:452
CodeFlusher(Isolate *isolate)
Definition: mark-compact.h:378
void EvictCandidate(SharedFunctionInfo *shared_info)
void AddCandidate(JSFunction *function)
Definition: mark-compact.h:391
void IteratePointersToFromSpace(ObjectVisitor *v)
static void ClearNextCodeMap(SharedFunctionInfo *holder)
Definition: mark-compact.h:478
void EvictOptimizedCodeMap(SharedFunctionInfo *code_map_holder)
JSFunction * jsfunction_candidates_head_
Definition: mark-compact.h:484
SharedFunctionInfo * shared_function_info_candidates_head_
Definition: mark-compact.h:485
static void SetNextCandidate(JSFunction *candidate, JSFunction *next_candidate)
Definition: mark-compact.h:442
DISALLOW_COPY_AND_ASSIGN(CodeFlusher)
SharedFunctionInfo * optimized_code_map_holder_head_
Definition: mark-compact.h:486
void AddOptimizedCodeMap(SharedFunctionInfo *code_map_holder)
Definition: mark-compact.h:399
static void ClearNextCandidate(SharedFunctionInfo *candidate)
Definition: mark-compact.h:462
static JSFunction ** GetNextCandidateSlot(JSFunction *candidate)
Definition: mark-compact.h:432
void AddCandidate(SharedFunctionInfo *shared_info)
Definition: mark-compact.h:384
static void SetNextCandidate(SharedFunctionInfo *candidate, SharedFunctionInfo *next_candidate)
Definition: mark-compact.h:457
static void SetNextCodeMap(SharedFunctionInfo *holder, SharedFunctionInfo *next_holder)
Definition: mark-compact.h:472
static void ClearNextCandidate(JSFunction *candidate, Object *undefined)
Definition: mark-compact.h:447
static SharedFunctionInfo * GetNextCodeMap(SharedFunctionInfo *holder)
Definition: mark-compact.h:466
Object * get(int index)
Definition: objects-inl.h:2165
void set(int index, Object *value)
Definition: objects-inl.h:2190
void set_undefined(int index)
Definition: objects-inl.h:2704
static Object ** RawField(HeapObject *obj, int offset)
Definition: objects-inl.h:1311
static const int kNextFunctionLinkOffset
Definition: objects.h:7384
MarkBit Next()
Definition: spaces.h:123
void MarkAllocationSite(AllocationSite *site)
void ProcessInvalidatedCode(ObjectVisitor *visitor)
void MigrateObject(HeapObject *dst, HeapObject *src, int size, AllocationSpace to_old_space)
INLINE(void SetMark(HeapObject *obj, MarkBit mark_bit))
SlotsBufferAllocator slots_buffer_allocator_
Definition: mark-compact.h:710
void ClearNonLiveMapTransitions(Map *map, MarkBit map_mark)
bool TryPromoteObject(HeapObject *object, int object_size)
void TrimDescriptorArray(Map *map, DescriptorArray *descriptors, int number_of_own_descriptors)
void ClearDependentICList(Object *head)
void CollectEvacuationCandidates(PagedSpace *space)
void ProcessEphemeralMarking(ObjectVisitor *visitor)
INLINE(void EvictEvacuationCandidate(Page *page))
Definition: mark-compact.h:580
void ClearNonLiveDependentCode(DependentCode *dependent_code)
INLINE(void RecordSlot(Object **anchor_slot, Object **slot, Object *object, SlotsBuffer::AdditionMode mode=SlotsBuffer::FAIL_ON_OVERFLOW))
void ClearNonLivePrototypeTransitions(Map *map)
static bool IsMarked(Object *obj)
void RecordCodeTargetPatch(Address pc, Code *target)
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
void RecordCodeEntrySlot(Address slot, Code *target)
int SweepInParallel(PagedSpace *space, int required_freed_bytes)
INLINE(static bool IsOnEvacuationCandidate(Object *obj))
Definition: mark-compact.h:575
INLINE(void MarkObject(HeapObject *obj, MarkBit mark_bit))
void PrepareThreadForCodeFlushing(Isolate *isolate, ThreadLocalTop *top)
void ParallelSweepSpaceComplete(PagedSpace *space)
static const uint32_t kMultiFreeEncoding
Definition: mark-compact.h:538
int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace *new_space, NewSpacePage *p)
static const uint32_t kSingleFreeEncoding
Definition: mark-compact.h:537
void RefillFreeList(PagedSpace *space)
static bool IsUnmarkedHeapObjectWithHeap(Heap *heap, Object **p)
SmartPointer< FreeList > free_list_old_data_space_
Definition: mark-compact.h:890
SmartPointer< FreeList > free_list_old_pointer_space_
Definition: mark-compact.h:891
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object *host))
Definition: mark-compact.h:570
bool StartCompaction(CompactionMode mode)
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
void RecordMigratedSlot(Object *value, Address slot)
void MarkStringTable(RootMarkingVisitor *visitor)
void ProcessTopOptimizedFrame(ObjectVisitor *visitor)
void SweepSpace(PagedSpace *space, SweeperType sweeper)
void set_sequential_sweeping(bool sequential_sweeping)
Definition: mark-compact.h:649
void TrimEnumCache(Map *map, DescriptorArray *descriptors)
int ClearNonLiveDependentCodeInGroup(DependentCode *dependent_code, int group, int start, int end, int new_start)
base::Semaphore pending_sweeper_jobs_semaphore_
Definition: mark-compact.h:706
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object **anchor))
Definition: mark-compact.h:565
void ClearDependentCode(DependentCode *dependent_code)
void MarkRoots(RootMarkingVisitor *visitor)
static bool IsUnmarkedHeapObject(Object **p)
INLINE(void PushBlack(HeapObject *object))
Definition: mark-compact.h:169
INLINE(HeapObject *Pop())
Definition: mark-compact.h:191
INLINE(void UnshiftGrey(HeapObject *object))
Definition: mark-compact.h:199
DISALLOW_COPY_AND_ASSIGN(MarkingDeque)
INLINE(void PushGrey(HeapObject *object))
Definition: mark-compact.h:181
void Initialize(Address low, Address high)
Definition: mark-compact.h:145
INLINE(static bool IsImpossible(MarkBit mark_bit))
Definition: mark-compact.h:38
void TransferMark(Address old_start, Address new_start)
INLINE(static void BlackToGrey(HeapObject *obj))
Definition: mark-compact.h:72
INLINE(static void BlackToGrey(MarkBit markbit))
Definition: mark-compact.h:63
INLINE(static MarkBit MarkBitFrom(HeapObject *obj))
Definition: mark-compact.h:32
static const char * kImpossibleBitPattern
Definition: mark-compact.h:37
INLINE(static void MarkBlack(MarkBit mark_bit))
Definition: mark-compact.h:58
static const char * kGreyBitPattern
Definition: mark-compact.h:53
INLINE(static MarkBit MarkBitFrom(Address addr))
INLINE(static bool IsBlack(MarkBit mark_bit))
Definition: mark-compact.h:44
INLINE(static void WhiteToGrey(MarkBit markbit))
Definition: mark-compact.h:65
INLINE(static bool TransferColor(HeapObject *from, HeapObject *to))
Definition: mark-compact.h:119
INLINE(static void GreyToBlack(MarkBit markbit))
Definition: mark-compact.h:70
static const char * kWhiteBitPattern
Definition: mark-compact.h:49
static const char * kBlackBitPattern
Definition: mark-compact.h:43
INLINE(static void AnyToGrey(MarkBit markbit))
Definition: mark-compact.h:76
INLINE(static bool IsGrey(MarkBit mark_bit))
Definition: mark-compact.h:54
INLINE(static bool IsWhite(MarkBit mark_bit))
Definition: mark-compact.h:50
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:517
void SetFlag(int flag)
Definition: spaces.h:405
Space * owner() const
Definition: spaces.h:307
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:276
bool IsEvacuationCandidate()
Definition: spaces.h:607
bool ShouldSkipEvacuationSlotRecording()
Definition: spaces.h:609
void ClearEvacuationCandidate()
Definition: spaces.h:626
static const int kNextMapIndex
Definition: objects.h:6623
SlotsBuffer * AllocateBuffer(SlotsBuffer *next_buffer)
void DeallocateBuffer(SlotsBuffer *buffer)
void DeallocateChain(SlotsBuffer **buffer_address)
static const char * SlotTypeToString(SlotType type)
Definition: mark-compact.h:278
INLINE(static bool AddTo(SlotsBufferAllocator *allocator, SlotsBuffer **buffer_address, ObjectSlot slot, AdditionMode mode))
Definition: mark-compact.h:332
static const int kNumberOfElements
Definition: mark-compact.h:354
void UpdateSlotsWithFilter(Heap *heap)
static bool ChainLengthThresholdReached(SlotsBuffer *buffer)
Definition: mark-compact.h:328
ObjectSlot slots_[kNumberOfElements]
Definition: mark-compact.h:362
static const int kChainLengthThreshold
Definition: mark-compact.h:357
void Add(ObjectSlot slot)
Definition: mark-compact.h:263
static void UpdateSlotsRecordedIn(Heap *heap, SlotsBuffer *buffer, bool code_slots_filtering_required)
Definition: mark-compact.h:314
SlotsBuffer(SlotsBuffer *next_buffer)
Definition: mark-compact.h:254
static bool AddTo(SlotsBufferAllocator *allocator, SlotsBuffer **buffer_address, SlotType type, Address addr, AdditionMode mode)
static int SizeOfChain(SlotsBuffer *buffer)
Definition: mark-compact.h:304
static bool IsTypedSlot(ObjectSlot slot)
void UpdateSlots(Heap *heap)
AllocationSpace identity()
Definition: spaces.h:829
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
uint32_t RoundDownToPowerOfTwo32(uint32_t value)
Definition: bits.h:99
const int kPointerSize
Definition: globals.h:129
@ SKIP_WRITE_BARRIER
Definition: objects.h:235
const Register pc
byte * Address
Definition: globals.h:101
kSerializedDataOffset kPrototypeTemplateOffset kIndexedPropertyHandlerOffset kInstanceCallHandlerOffset kInternalFieldCountOffset dependent_code
Definition: objects-inl.h:5353
void PrintF(const char *format,...)
Definition: utils.cc:80
@ OLD_DATA_SPACE
Definition: globals.h:361
bool(* IsAliveFunction)(HeapObject *obj, int *size, int *offset)
Definition: mark-compact.h:17
const char * AllocationSpaceName(AllocationSpace space)
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20