V8 Project
mark-compact.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h"
9 #include "src/code-stubs.h"
10 #include "src/compilation-cache.h"
11 #include "src/cpu-profiler.h"
12 #include "src/deoptimizer.h"
13 #include "src/execution.h"
14 #include "src/gdb-jit.h"
15 #include "src/global-handles.h"
17 #include "src/heap/mark-compact.h"
20 #include "src/heap/spaces-inl.h"
22 #include "src/heap-profiler.h"
23 #include "src/ic/ic.h"
24 #include "src/ic/stub-cache.h"
25 
26 namespace v8 {
27 namespace internal {
28 
29 
30 const char* Marking::kWhiteBitPattern = "00";
31 const char* Marking::kBlackBitPattern = "10";
32 const char* Marking::kGreyBitPattern = "11";
33 const char* Marking::kImpossibleBitPattern = "01";
34 
35 
36 // -------------------------------------------------------------------------
37 // MarkCompactCollector
38 
40  : // NOLINT
41 #ifdef DEBUG
42  state_(IDLE),
43 #endif
44  reduce_memory_footprint_(false),
45  abort_incremental_marking_(false),
46  marking_parity_(ODD_MARKING_PARITY),
47  compacting_(false),
48  was_marked_incrementally_(false),
49  sweeping_in_progress_(false),
50  pending_sweeper_jobs_semaphore_(0),
51  sequential_sweeping_(false),
52  migration_slots_buffer_(NULL),
53  heap_(heap),
54  code_flusher_(NULL),
55  have_code_to_deoptimize_(false) {
56 }
57 
58 #ifdef VERIFY_HEAP
59 class VerifyMarkingVisitor : public ObjectVisitor {
60  public:
61  explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
62 
63  void VisitPointers(Object** start, Object** end) {
64  for (Object** current = start; current < end; current++) {
65  if ((*current)->IsHeapObject()) {
66  HeapObject* object = HeapObject::cast(*current);
67  CHECK(heap_->mark_compact_collector()->IsMarked(object));
68  }
69  }
70  }
71 
72  void VisitEmbeddedPointer(RelocInfo* rinfo) {
73  DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
74  if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
75  Object* p = rinfo->target_object();
76  VisitPointer(&p);
77  }
78  }
79 
80  void VisitCell(RelocInfo* rinfo) {
81  Code* code = rinfo->host();
82  DCHECK(rinfo->rmode() == RelocInfo::CELL);
83  if (!code->IsWeakObject(rinfo->target_cell())) {
84  ObjectVisitor::VisitCell(rinfo);
85  }
86  }
87 
88  private:
89  Heap* heap_;
90 };
91 
92 
93 static void VerifyMarking(Heap* heap, Address bottom, Address top) {
94  VerifyMarkingVisitor visitor(heap);
95  HeapObject* object;
96  Address next_object_must_be_here_or_later = bottom;
97 
98  for (Address current = bottom; current < top; current += kPointerSize) {
99  object = HeapObject::FromAddress(current);
100  if (MarkCompactCollector::IsMarked(object)) {
101  CHECK(current >= next_object_must_be_here_or_later);
102  object->Iterate(&visitor);
103  next_object_must_be_here_or_later = current + object->Size();
104  }
105  }
106 }
107 
108 
109 static void VerifyMarking(NewSpace* space) {
110  Address end = space->top();
111  NewSpacePageIterator it(space->bottom(), end);
112  // The bottom position is at the start of its page. Allows us to use
113  // page->area_start() as start of range on all pages.
114  CHECK_EQ(space->bottom(),
116  while (it.has_next()) {
117  NewSpacePage* page = it.next();
118  Address limit = it.has_next() ? page->area_end() : end;
119  CHECK(limit == end || !page->Contains(end));
120  VerifyMarking(space->heap(), page->area_start(), limit);
121  }
122 }
123 
124 
125 static void VerifyMarking(PagedSpace* space) {
126  PageIterator it(space);
127 
128  while (it.has_next()) {
129  Page* p = it.next();
130  VerifyMarking(space->heap(), p->area_start(), p->area_end());
131  }
132 }
133 
134 
135 static void VerifyMarking(Heap* heap) {
136  VerifyMarking(heap->old_pointer_space());
137  VerifyMarking(heap->old_data_space());
138  VerifyMarking(heap->code_space());
139  VerifyMarking(heap->cell_space());
140  VerifyMarking(heap->property_cell_space());
141  VerifyMarking(heap->map_space());
142  VerifyMarking(heap->new_space());
143 
144  VerifyMarkingVisitor visitor(heap);
145 
146  LargeObjectIterator it(heap->lo_space());
147  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
149  obj->Iterate(&visitor);
150  }
151  }
152 
153  heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
154 }
155 
156 
157 class VerifyEvacuationVisitor : public ObjectVisitor {
158  public:
159  void VisitPointers(Object** start, Object** end) {
160  for (Object** current = start; current < end; current++) {
161  if ((*current)->IsHeapObject()) {
162  HeapObject* object = HeapObject::cast(*current);
163  CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
164  }
165  }
166  }
167 };
168 
169 
170 static void VerifyEvacuation(Page* page) {
171  VerifyEvacuationVisitor visitor;
172  HeapObjectIterator iterator(page, NULL);
173  for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
174  heap_object = iterator.Next()) {
175  // We skip free space objects.
176  if (!heap_object->IsFiller()) {
177  heap_object->Iterate(&visitor);
178  }
179  }
180 }
181 
182 
183 static void VerifyEvacuation(NewSpace* space) {
184  NewSpacePageIterator it(space->bottom(), space->top());
185  VerifyEvacuationVisitor visitor;
186 
187  while (it.has_next()) {
188  NewSpacePage* page = it.next();
189  Address current = page->area_start();
190  Address limit = it.has_next() ? page->area_end() : space->top();
191  CHECK(limit == space->top() || !page->Contains(space->top()));
192  while (current < limit) {
193  HeapObject* object = HeapObject::FromAddress(current);
194  object->Iterate(&visitor);
195  current += object->Size();
196  }
197  }
198 }
199 
200 
201 static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
202  if (FLAG_use_allocation_folding &&
203  (space == heap->old_pointer_space() || space == heap->old_data_space())) {
204  return;
205  }
206  PageIterator it(space);
207 
208  while (it.has_next()) {
209  Page* p = it.next();
210  if (p->IsEvacuationCandidate()) continue;
211  VerifyEvacuation(p);
212  }
213 }
214 
215 
216 static void VerifyEvacuation(Heap* heap) {
217  VerifyEvacuation(heap, heap->old_pointer_space());
218  VerifyEvacuation(heap, heap->old_data_space());
219  VerifyEvacuation(heap, heap->code_space());
220  VerifyEvacuation(heap, heap->cell_space());
221  VerifyEvacuation(heap, heap->property_cell_space());
222  VerifyEvacuation(heap, heap->map_space());
223  VerifyEvacuation(heap->new_space());
224 
225  VerifyEvacuationVisitor visitor;
226  heap->IterateStrongRoots(&visitor, VISIT_ALL);
227 }
228 #endif // VERIFY_HEAP
229 
230 
231 #ifdef DEBUG
232 class VerifyNativeContextSeparationVisitor : public ObjectVisitor {
233  public:
234  VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
235 
236  void VisitPointers(Object** start, Object** end) {
237  for (Object** current = start; current < end; current++) {
238  if ((*current)->IsHeapObject()) {
239  HeapObject* object = HeapObject::cast(*current);
240  if (object->IsString()) continue;
241  switch (object->map()->instance_type()) {
242  case JS_FUNCTION_TYPE:
243  CheckContext(JSFunction::cast(object)->context());
244  break;
246  CheckContext(JSGlobalProxy::cast(object)->native_context());
247  break;
250  CheckContext(GlobalObject::cast(object)->native_context());
251  break;
252  case JS_ARRAY_TYPE:
253  case JS_DATE_TYPE:
254  case JS_OBJECT_TYPE:
255  case JS_REGEXP_TYPE:
256  VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
257  break;
258  case MAP_TYPE:
259  VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
260  VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
261  break;
262  case FIXED_ARRAY_TYPE:
263  if (object->IsContext()) {
264  CheckContext(object);
265  } else {
266  FixedArray* array = FixedArray::cast(object);
267  int length = array->length();
268  // Set array length to zero to prevent cycles while iterating
269  // over array bodies, this is easier than intrusive marking.
270  array->set_length(0);
271  array->IterateBody(FIXED_ARRAY_TYPE, FixedArray::SizeFor(length),
272  this);
273  array->set_length(length);
274  }
275  break;
276  case CELL_TYPE:
277  case JS_PROXY_TYPE:
278  case JS_VALUE_TYPE:
280  object->Iterate(this);
281  break;
284  case BYTE_ARRAY_TYPE:
286  case CODE_TYPE:
288  case HEAP_NUMBER_TYPE:
291  case ODDBALL_TYPE:
292  case SCRIPT_TYPE:
294  break;
295  default:
296  UNREACHABLE();
297  }
298  }
299  }
300  }
301 
302  private:
303  void CheckContext(Object* context) {
304  if (!context->IsContext()) return;
305  Context* native_context = Context::cast(context)->native_context();
306  if (current_native_context_ == NULL) {
307  current_native_context_ = native_context;
308  } else {
309  CHECK_EQ(current_native_context_, native_context);
310  }
311  }
312 
313  Context* current_native_context_;
314 };
315 
316 
317 static void VerifyNativeContextSeparation(Heap* heap) {
318  HeapObjectIterator it(heap->code_space());
319 
320  for (Object* object = it.Next(); object != NULL; object = it.Next()) {
321  VerifyNativeContextSeparationVisitor visitor;
322  Code::cast(object)->CodeIterateBody(&visitor);
323  }
324 }
325 #endif
326 
327 
331 }
332 
333 
335 
336 
339  evacuation_candidates_.Add(p);
340 }
341 
342 
344  int number_of_pages = space->CountTotalPages();
345  intptr_t reserved = (number_of_pages * space->AreaSize());
346  intptr_t free = reserved - space->SizeOfObjects();
347  PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
348  AllocationSpaceName(space->identity()), number_of_pages,
349  static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
350 }
351 
352 
354  if (!compacting_) {
355  DCHECK(evacuation_candidates_.length() == 0);
356 
357 #ifdef ENABLE_GDB_JIT_INTERFACE
358  // If GDBJIT interface is active disable compaction.
359  if (FLAG_gdbjit) return false;
360 #endif
361 
362  CollectEvacuationCandidates(heap()->old_pointer_space());
363  CollectEvacuationCandidates(heap()->old_data_space());
364 
365  if (FLAG_compact_code_space && (mode == NON_INCREMENTAL_COMPACTION ||
366  FLAG_incremental_code_compaction)) {
367  CollectEvacuationCandidates(heap()->code_space());
368  } else if (FLAG_trace_fragmentation) {
369  TraceFragmentation(heap()->code_space());
370  }
371 
372  if (FLAG_trace_fragmentation) {
373  TraceFragmentation(heap()->map_space());
374  TraceFragmentation(heap()->cell_space());
375  TraceFragmentation(heap()->property_cell_space());
376  }
377 
381 
382  compacting_ = evacuation_candidates_.length() > 0;
383  }
384 
385  return compacting_;
386 }
387 
388 
390  // Make sure that Prepare() has been called. The individual steps below will
391  // update the state as they proceed.
392  DCHECK(state_ == PREPARE_GC);
393 
394  MarkLiveObjects();
396 
397  if (FLAG_collect_maps) ClearNonLiveReferences();
398 
400 
401 #ifdef VERIFY_HEAP
402  if (FLAG_verify_heap) {
403  VerifyMarking(heap_);
404  }
405 #endif
406 
407  SweepSpaces();
408 
409 #ifdef DEBUG
410  if (FLAG_verify_native_context_separation) {
411  VerifyNativeContextSeparation(heap_);
412  }
413 #endif
414 
415 #ifdef VERIFY_HEAP
416  if (heap()->weak_embedded_objects_verification_enabled()) {
417  VerifyWeakEmbeddedObjectsInCode();
418  }
419  if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
420  VerifyOmittedMapChecks();
421  }
422 #endif
423 
424  Finish();
425 
428  } else {
431  }
432 }
433 
434 
435 #ifdef VERIFY_HEAP
436 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
437  PageIterator it(space);
438 
439  while (it.has_next()) {
440  Page* p = it.next();
441  CHECK(p->markbits()->IsClean());
442  CHECK_EQ(0, p->LiveBytes());
443  }
444 }
445 
446 
447 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
448  NewSpacePageIterator it(space->bottom(), space->top());
449 
450  while (it.has_next()) {
451  NewSpacePage* p = it.next();
452  CHECK(p->markbits()->IsClean());
453  CHECK_EQ(0, p->LiveBytes());
454  }
455 }
456 
457 
458 void MarkCompactCollector::VerifyMarkbitsAreClean() {
459  VerifyMarkbitsAreClean(heap_->old_pointer_space());
460  VerifyMarkbitsAreClean(heap_->old_data_space());
461  VerifyMarkbitsAreClean(heap_->code_space());
462  VerifyMarkbitsAreClean(heap_->cell_space());
463  VerifyMarkbitsAreClean(heap_->property_cell_space());
464  VerifyMarkbitsAreClean(heap_->map_space());
465  VerifyMarkbitsAreClean(heap_->new_space());
466 
467  LargeObjectIterator it(heap_->lo_space());
468  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
469  MarkBit mark_bit = Marking::MarkBitFrom(obj);
470  CHECK(Marking::IsWhite(mark_bit));
471  CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
472  }
473 }
474 
475 
476 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
477  HeapObjectIterator code_iterator(heap()->code_space());
478  for (HeapObject* obj = code_iterator.Next(); obj != NULL;
479  obj = code_iterator.Next()) {
480  Code* code = Code::cast(obj);
481  if (!code->is_optimized_code() && !code->is_weak_stub()) continue;
482  if (WillBeDeoptimized(code)) continue;
483  code->VerifyEmbeddedObjectsDependency();
484  }
485 }
486 
487 
488 void MarkCompactCollector::VerifyOmittedMapChecks() {
489  HeapObjectIterator iterator(heap()->map_space());
490  for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
491  Map* map = Map::cast(obj);
492  map->VerifyOmittedMapChecks();
493  }
494 }
495 #endif // VERIFY_HEAP
496 
497 
499  PageIterator it(space);
500 
501  while (it.has_next()) {
502  Bitmap::Clear(it.next());
503  }
504 }
505 
506 
508  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
509 
510  while (it.has_next()) {
511  Bitmap::Clear(it.next());
512  }
513 }
514 
515 
524 
526  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
527  MarkBit mark_bit = Marking::MarkBitFrom(obj);
528  mark_bit.Clear();
529  mark_bit.Next().Clear();
530  Page::FromAddress(obj->address())->ResetProgressBar();
531  Page::FromAddress(obj->address())->ResetLiveBytes();
532  }
533 }
534 
535 
537  public:
539 
540  virtual ~SweeperTask() {}
541 
542  private:
543  // v8::Task overrides.
544  virtual void Run() OVERRIDE {
547  }
548 
551 
553 };
554 
555 
557  DCHECK(free_list_old_pointer_space_.get()->IsEmpty());
558  DCHECK(free_list_old_data_space_.get()->IsEmpty());
559  sweeping_in_progress_ = true;
560  for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
562  }
563  if (FLAG_job_based_sweeping) {
565  new SweeperTask(heap(), heap()->old_data_space()),
568  new SweeperTask(heap(), heap()->old_pointer_space()),
570  }
571 }
572 
573 
575  DCHECK(sweeping_in_progress_ == true);
576 
577  // If sweeping is not completed, we try to complete it here. If we do not
578  // have sweeper threads we have to complete since we do not have a good
579  // indicator for a swept space in that case.
581  SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
582  SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
583  }
584 
585  for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
587  }
588  if (FLAG_job_based_sweeping) {
589  // Wait twice for both jobs.
592  }
594  sweeping_in_progress_ = false;
595  RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
596  RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
599 
600 #ifdef VERIFY_HEAP
601  if (FLAG_verify_heap) {
602  VerifyEvacuation(heap_);
603  }
604 #endif
605 }
606 
607 
609  for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
610  if (!isolate()->sweeper_threads()[i]->SweepingCompleted()) {
611  return false;
612  }
613  }
614 
615  if (FLAG_job_based_sweeping) {
616  if (!pending_sweeper_jobs_semaphore_.WaitFor(
617  base::TimeDelta::FromSeconds(0))) {
618  return false;
619  }
621  }
622 
623  return true;
624 }
625 
626 
628  FreeList* free_list;
629 
630  if (space == heap()->old_pointer_space()) {
631  free_list = free_list_old_pointer_space_.get();
632  } else if (space == heap()->old_data_space()) {
633  free_list = free_list_old_data_space_.get();
634  } else {
635  // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
636  // to only refill them for old data and pointer spaces.
637  return;
638  }
639 
640  intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
641  space->AddToAccountingStats(freed_bytes);
642  space->DecrementUnsweptFreeBytes(freed_bytes);
643 }
644 
645 
647  return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
648 }
649 
650 
651 void Marking::TransferMark(Address old_start, Address new_start) {
652  // This is only used when resizing an object.
653  DCHECK(MemoryChunk::FromAddress(old_start) ==
654  MemoryChunk::FromAddress(new_start));
655 
656  if (!heap_->incremental_marking()->IsMarking()) return;
657 
658  // If the mark doesn't move, we don't check the color of the object.
659  // It doesn't matter whether the object is black, since it hasn't changed
660  // size, so the adjustment to the live data count will be zero anyway.
661  if (old_start == new_start) return;
662 
663  MarkBit new_mark_bit = MarkBitFrom(new_start);
664  MarkBit old_mark_bit = MarkBitFrom(old_start);
665 
666 #ifdef DEBUG
667  ObjectColor old_color = Color(old_mark_bit);
668 #endif
669 
670  if (Marking::IsBlack(old_mark_bit)) {
671  old_mark_bit.Clear();
672  DCHECK(IsWhite(old_mark_bit));
673  Marking::MarkBlack(new_mark_bit);
674  return;
675  } else if (Marking::IsGrey(old_mark_bit)) {
676  old_mark_bit.Clear();
677  old_mark_bit.Next().Clear();
678  DCHECK(IsWhite(old_mark_bit));
680  HeapObject::FromAddress(new_start), new_mark_bit);
682  }
683 
684 #ifdef DEBUG
685  ObjectColor new_color = Color(new_mark_bit);
686  DCHECK(new_color == old_color);
687 #endif
688 }
689 
690 
692  switch (space) {
693  case NEW_SPACE:
694  return "NEW_SPACE";
695  case OLD_POINTER_SPACE:
696  return "OLD_POINTER_SPACE";
697  case OLD_DATA_SPACE:
698  return "OLD_DATA_SPACE";
699  case CODE_SPACE:
700  return "CODE_SPACE";
701  case MAP_SPACE:
702  return "MAP_SPACE";
703  case CELL_SPACE:
704  return "CELL_SPACE";
705  case PROPERTY_CELL_SPACE:
706  return "PROPERTY_CELL_SPACE";
707  case LO_SPACE:
708  return "LO_SPACE";
709  default:
710  UNREACHABLE();
711  }
712 
713  return NULL;
714 }
715 
716 
717 // Returns zero for pages that have so little fragmentation that it is not
718 // worth defragmenting them. Otherwise a positive integer that gives an
719 // estimate of fragmentation on an arbitrary scale.
721  // If page was not swept then there are no free list items on it.
722  if (!p->WasSwept()) {
723  if (FLAG_trace_fragmentation) {
724  PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p),
725  AllocationSpaceName(space->identity()), p->LiveBytes());
726  }
727  return 0;
728  }
729 
730  PagedSpace::SizeStats sizes;
731  space->ObtainFreeListStatistics(p, &sizes);
732 
733  intptr_t ratio;
734  intptr_t ratio_threshold;
735  intptr_t area_size = space->AreaSize();
736  if (space->identity() == CODE_SPACE) {
737  ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size;
738  ratio_threshold = 10;
739  } else {
740  ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size;
741  ratio_threshold = 15;
742  }
743 
744  if (FLAG_trace_fragmentation) {
745  PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
746  reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()),
747  static_cast<int>(sizes.small_size_),
748  static_cast<double>(sizes.small_size_ * 100) / area_size,
749  static_cast<int>(sizes.medium_size_),
750  static_cast<double>(sizes.medium_size_ * 100) / area_size,
751  static_cast<int>(sizes.large_size_),
752  static_cast<double>(sizes.large_size_ * 100) / area_size,
753  static_cast<int>(sizes.huge_size_),
754  static_cast<double>(sizes.huge_size_ * 100) / area_size,
755  (ratio > ratio_threshold) ? "[fragmented]" : "");
756  }
757 
758  if (FLAG_always_compact && sizes.Total() != area_size) {
759  return 1;
760  }
761 
762  if (ratio <= ratio_threshold) return 0; // Not fragmented.
763 
764  return static_cast<int>(ratio - ratio_threshold);
765 }
766 
767 
769  DCHECK(space->identity() == OLD_POINTER_SPACE ||
770  space->identity() == OLD_DATA_SPACE ||
771  space->identity() == CODE_SPACE);
772 
773  static const int kMaxMaxEvacuationCandidates = 1000;
774  int number_of_pages = space->CountTotalPages();
775  int max_evacuation_candidates =
776  static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
777 
778  if (FLAG_stress_compaction || FLAG_always_compact) {
779  max_evacuation_candidates = kMaxMaxEvacuationCandidates;
780  }
781 
782  class Candidate {
783  public:
784  Candidate() : fragmentation_(0), page_(NULL) {}
785  Candidate(int f, Page* p) : fragmentation_(f), page_(p) {}
786 
787  int fragmentation() { return fragmentation_; }
788  Page* page() { return page_; }
789 
790  private:
791  int fragmentation_;
792  Page* page_;
793  };
794 
795  enum CompactionMode { COMPACT_FREE_LISTS, REDUCE_MEMORY_FOOTPRINT };
796 
797  CompactionMode mode = COMPACT_FREE_LISTS;
798 
799  intptr_t reserved = number_of_pages * space->AreaSize();
800  intptr_t over_reserved = reserved - space->SizeOfObjects();
801  static const intptr_t kFreenessThreshold = 50;
802 
803  if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
804  // If reduction of memory footprint was requested, we are aggressive
805  // about choosing pages to free. We expect that half-empty pages
806  // are easier to compact so slightly bump the limit.
807  mode = REDUCE_MEMORY_FOOTPRINT;
808  max_evacuation_candidates += 2;
809  }
810 
811 
812  if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
813  // If over-usage is very high (more than a third of the space), we
814  // try to free all mostly empty pages. We expect that almost empty
815  // pages are even easier to compact so bump the limit even more.
816  mode = REDUCE_MEMORY_FOOTPRINT;
817  max_evacuation_candidates *= 2;
818  }
819 
820  if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
821  PrintF(
822  "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
823  "evacuation candidate limit: %d\n",
824  static_cast<double>(over_reserved) / MB,
825  static_cast<double>(reserved) / MB,
826  static_cast<int>(kFreenessThreshold), max_evacuation_candidates);
827  }
828 
829  intptr_t estimated_release = 0;
830 
831  Candidate candidates[kMaxMaxEvacuationCandidates];
832 
833  max_evacuation_candidates =
834  Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
835 
836  int count = 0;
837  int fragmentation = 0;
838  Candidate* least = NULL;
839 
840  PageIterator it(space);
841  if (it.has_next()) it.next(); // Never compact the first page.
842 
843  while (it.has_next()) {
844  Page* p = it.next();
846 
847  if (FLAG_stress_compaction) {
848  unsigned int counter = space->heap()->ms_count();
849  uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
850  if ((counter & 1) == (page_number & 1)) fragmentation = 1;
851  } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
852  // Don't try to release too many pages.
853  if (estimated_release >= over_reserved) {
854  continue;
855  }
856 
857  intptr_t free_bytes = 0;
858 
859  if (!p->WasSwept()) {
860  free_bytes = (p->area_size() - p->LiveBytes());
861  } else {
862  PagedSpace::SizeStats sizes;
863  space->ObtainFreeListStatistics(p, &sizes);
864  free_bytes = sizes.Total();
865  }
866 
867  int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
868 
869  if (free_pct >= kFreenessThreshold) {
870  estimated_release += free_bytes;
871  fragmentation = free_pct;
872  } else {
873  fragmentation = 0;
874  }
875 
876  if (FLAG_trace_fragmentation) {
877  PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p),
878  AllocationSpaceName(space->identity()),
879  static_cast<int>(free_bytes),
880  static_cast<double>(free_bytes * 100) / p->area_size(),
881  (fragmentation > 0) ? "[fragmented]" : "");
882  }
883  } else {
884  fragmentation = FreeListFragmentation(space, p);
885  }
886 
887  if (fragmentation != 0) {
888  if (count < max_evacuation_candidates) {
889  candidates[count++] = Candidate(fragmentation, p);
890  } else {
891  if (least == NULL) {
892  for (int i = 0; i < max_evacuation_candidates; i++) {
893  if (least == NULL ||
894  candidates[i].fragmentation() < least->fragmentation()) {
895  least = candidates + i;
896  }
897  }
898  }
899  if (least->fragmentation() < fragmentation) {
900  *least = Candidate(fragmentation, p);
901  least = NULL;
902  }
903  }
904  }
905  }
906 
907  for (int i = 0; i < count; i++) {
908  AddEvacuationCandidate(candidates[i].page());
909  }
910 
911  if (count > 0 && FLAG_trace_fragmentation) {
912  PrintF("Collected %d evacuation candidates for space %s\n", count,
913  AllocationSpaceName(space->identity()));
914  }
915 }
916 
917 
919  if (compacting_) {
920  int npages = evacuation_candidates_.length();
921  for (int i = 0; i < npages; i++) {
926  }
927  compacting_ = false;
928  evacuation_candidates_.Rewind(0);
929  invalidated_code_.Rewind(0);
930  }
931  DCHECK_EQ(0, evacuation_candidates_.length());
932 }
933 
934 
937 
938 #ifdef DEBUG
939  DCHECK(state_ == IDLE);
940  state_ = PREPARE_GC;
941 #endif
942 
943  DCHECK(!FLAG_never_compact || !FLAG_always_compact);
944 
945  if (sweeping_in_progress()) {
946  // Instead of waiting we could also abort the sweeper threads here.
948  }
949 
950  // Clear marking bits if incremental marking is aborted.
953  ClearMarkbits();
955  AbortCompaction();
957  }
958 
959  // Don't start compaction if we are in the middle of incremental
960  // marking cycle. We did not collect any slots.
961  if (!FLAG_never_compact && !was_marked_incrementally_) {
963  }
964 
965  PagedSpaces spaces(heap());
966  for (PagedSpace* space = spaces.next(); space != NULL;
967  space = spaces.next()) {
968  space->PrepareForMarkCompact();
969  }
970 
971 #ifdef VERIFY_HEAP
972  if (!was_marked_incrementally_ && FLAG_verify_heap) {
973  VerifyMarkbitsAreClean();
974  }
975 #endif
976 }
977 
978 
980 #ifdef DEBUG
981  DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
982  state_ = IDLE;
983 #endif
984  // The stub cache is not traversed during GC; clear the cache to
985  // force lazy re-initialization of it. This must be done after the
986  // GC, because it relies on the new address of certain old space
987  // objects (empty string, illegal builtin).
988  isolate()->stub_cache()->Clear();
989 
991  // Some code objects were marked for deoptimization during the GC.
993  have_code_to_deoptimize_ = false;
994  }
995 }
996 
997 
998 // -------------------------------------------------------------------------
999 // Phase 1: tracing and marking live objects.
1000 // before: all objects are in normal state.
1001 // after: a live object's map pointer is marked as '00'.
1002 
1003 // Marking all live objects in the heap as part of mark-sweep or mark-compact
1004 // collection. Before marking, all objects are in their normal state. After
1005 // marking, live objects' map pointers are marked indicating that the object
1006 // has been found reachable.
1007 //
1008 // The marking algorithm is a (mostly) depth-first (because of possible stack
1009 // overflow) traversal of the graph of objects reachable from the roots. It
1010 // uses an explicit stack of pointers rather than recursion. The young
1011 // generation's inactive ('from') space is used as a marking stack. The
1012 // objects in the marking stack are the ones that have been reached and marked
1013 // but their children have not yet been visited.
1014 //
1015 // The marking stack can overflow during traversal. In that case, we set an
1016 // overflow flag. When the overflow flag is set, we continue marking objects
1017 // reachable from the objects on the marking stack, but no longer push them on
1018 // the marking stack. Instead, we mark them as both marked and overflowed.
1019 // When the stack is in the overflowed state, objects marked as overflowed
1020 // have been reached and marked but their children have not been visited yet.
1021 // After emptying the marking stack, we clear the overflow flag and traverse
1022 // the heap looking for objects marked as overflowed, push them on the stack,
1023 // and continue with marking. This process repeats until all reachable
1024 // objects have been marked.
1025 
1027  Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
1028  Object* undefined = isolate_->heap()->undefined_value();
1029 
1031  JSFunction* next_candidate;
1032  while (candidate != NULL) {
1033  next_candidate = GetNextCandidate(candidate);
1034  ClearNextCandidate(candidate, undefined);
1035 
1036  SharedFunctionInfo* shared = candidate->shared();
1037 
1038  Code* code = shared->code();
1039  MarkBit code_mark = Marking::MarkBitFrom(code);
1040  if (!code_mark.Get()) {
1041  if (FLAG_trace_code_flushing && shared->is_compiled()) {
1042  PrintF("[code-flushing clears: ");
1043  shared->ShortPrint();
1044  PrintF(" - age: %d]\n", code->GetAge());
1045  }
1046  shared->set_code(lazy_compile);
1047  candidate->set_code(lazy_compile);
1048  } else {
1049  candidate->set_code(code);
1050  }
1051 
1052  // We are in the middle of a GC cycle so the write barrier in the code
1053  // setter did not record the slot update and we have to do that manually.
1054  Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
1055  Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
1057  target);
1058 
1059  Object** shared_code_slot =
1061  isolate_->heap()->mark_compact_collector()->RecordSlot(
1062  shared_code_slot, shared_code_slot, *shared_code_slot);
1063 
1064  candidate = next_candidate;
1065  }
1066 
1068 }
1069 
1070 
1072  Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
1073 
1075  SharedFunctionInfo* next_candidate;
1076  while (candidate != NULL) {
1077  next_candidate = GetNextCandidate(candidate);
1078  ClearNextCandidate(candidate);
1079 
1080  Code* code = candidate->code();
1081  MarkBit code_mark = Marking::MarkBitFrom(code);
1082  if (!code_mark.Get()) {
1083  if (FLAG_trace_code_flushing && candidate->is_compiled()) {
1084  PrintF("[code-flushing clears: ");
1085  candidate->ShortPrint();
1086  PrintF(" - age: %d]\n", code->GetAge());
1087  }
1088  candidate->set_code(lazy_compile);
1089  }
1090 
1091  Object** code_slot =
1093  isolate_->heap()->mark_compact_collector()->RecordSlot(code_slot, code_slot,
1094  *code_slot);
1095 
1096  candidate = next_candidate;
1097  }
1098 
1100 }
1101 
1102 
1105 
1107  SharedFunctionInfo* next_holder;
1108 
1109  while (holder != NULL) {
1110  next_holder = GetNextCodeMap(holder);
1111  ClearNextCodeMap(holder);
1112 
1113  FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
1114  int new_length = SharedFunctionInfo::kEntriesStart;
1115  int old_length = code_map->length();
1116  for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
1118  Code* code =
1119  Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
1120  if (!Marking::MarkBitFrom(code).Get()) continue;
1121 
1122  // Move every slot in the entry.
1123  for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
1124  int dst_index = new_length++;
1125  Object** slot = code_map->RawFieldOfElementAt(dst_index);
1126  Object* object = code_map->get(i + j);
1127  code_map->set(dst_index, object);
1129  DCHECK(object->IsSmi());
1130  } else {
1131  DCHECK(
1132  Marking::IsBlack(Marking::MarkBitFrom(HeapObject::cast(*slot))));
1133  isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot,
1134  *slot);
1135  }
1136  }
1137  }
1138 
1139  // Trim the optimized code map if entries have been removed.
1140  if (new_length < old_length) {
1141  holder->TrimOptimizedCodeMap(old_length - new_length);
1142  }
1143 
1144  holder = next_holder;
1145  }
1146 
1148 }
1149 
1150 
1152  // Make sure previous flushing decisions are revisited.
1153  isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1154 
1155  if (FLAG_trace_code_flushing) {
1156  PrintF("[code-flushing abandons function-info: ");
1157  shared_info->ShortPrint();
1158  PrintF("]\n");
1159  }
1160 
1162  SharedFunctionInfo* next_candidate;
1163  if (candidate == shared_info) {
1164  next_candidate = GetNextCandidate(shared_info);
1165  shared_function_info_candidates_head_ = next_candidate;
1166  ClearNextCandidate(shared_info);
1167  } else {
1168  while (candidate != NULL) {
1169  next_candidate = GetNextCandidate(candidate);
1170 
1171  if (next_candidate == shared_info) {
1172  next_candidate = GetNextCandidate(shared_info);
1173  SetNextCandidate(candidate, next_candidate);
1174  ClearNextCandidate(shared_info);
1175  break;
1176  }
1177 
1178  candidate = next_candidate;
1179  }
1180  }
1181 }
1182 
1183 
1185  DCHECK(!function->next_function_link()->IsUndefined());
1186  Object* undefined = isolate_->heap()->undefined_value();
1187 
1188  // Make sure previous flushing decisions are revisited.
1189  isolate_->heap()->incremental_marking()->RecordWrites(function);
1190  isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1191 
1192  if (FLAG_trace_code_flushing) {
1193  PrintF("[code-flushing abandons closure: ");
1194  function->shared()->ShortPrint();
1195  PrintF("]\n");
1196  }
1197 
1199  JSFunction* next_candidate;
1200  if (candidate == function) {
1201  next_candidate = GetNextCandidate(function);
1202  jsfunction_candidates_head_ = next_candidate;
1203  ClearNextCandidate(function, undefined);
1204  } else {
1205  while (candidate != NULL) {
1206  next_candidate = GetNextCandidate(candidate);
1207 
1208  if (next_candidate == function) {
1209  next_candidate = GetNextCandidate(function);
1210  SetNextCandidate(candidate, next_candidate);
1211  ClearNextCandidate(function, undefined);
1212  break;
1213  }
1214 
1215  candidate = next_candidate;
1216  }
1217  }
1218 }
1219 
1220 
1222  DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map())
1224  ->IsUndefined());
1225 
1226  // Make sure previous flushing decisions are revisited.
1227  isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
1228 
1229  if (FLAG_trace_code_flushing) {
1230  PrintF("[code-flushing abandons code-map: ");
1231  code_map_holder->ShortPrint();
1232  PrintF("]\n");
1233  }
1234 
1236  SharedFunctionInfo* next_holder;
1237  if (holder == code_map_holder) {
1238  next_holder = GetNextCodeMap(code_map_holder);
1239  optimized_code_map_holder_head_ = next_holder;
1240  ClearNextCodeMap(code_map_holder);
1241  } else {
1242  while (holder != NULL) {
1243  next_holder = GetNextCodeMap(holder);
1244 
1245  if (next_holder == code_map_holder) {
1246  next_holder = GetNextCodeMap(code_map_holder);
1247  SetNextCodeMap(holder, next_holder);
1248  ClearNextCodeMap(code_map_holder);
1249  break;
1250  }
1251 
1252  holder = next_holder;
1253  }
1254  }
1255 }
1256 
1257 
1260  JSFunction* next_candidate;
1261  while (candidate != NULL) {
1262  next_candidate = GetNextCandidate(candidate);
1263  EvictCandidate(candidate);
1264  candidate = next_candidate;
1265  }
1267 }
1268 
1269 
1272  SharedFunctionInfo* next_candidate;
1273  while (candidate != NULL) {
1274  next_candidate = GetNextCandidate(candidate);
1275  EvictCandidate(candidate);
1276  candidate = next_candidate;
1277  }
1279 }
1280 
1281 
1284  SharedFunctionInfo* next_holder;
1285  while (holder != NULL) {
1286  next_holder = GetNextCodeMap(holder);
1287  EvictOptimizedCodeMap(holder);
1288  holder = next_holder;
1289  }
1291 }
1292 
1293 
1295  Heap* heap = isolate_->heap();
1296 
1299  while (candidate != NULL) {
1300  if (heap->InFromSpace(candidate)) {
1301  v->VisitPointer(reinterpret_cast<Object**>(slot));
1302  }
1303  candidate = GetNextCandidate(*slot);
1304  slot = GetNextCandidateSlot(*slot);
1305  }
1306 }
1307 
1308 
1310  if (code_flusher_ != NULL) {
1311  delete code_flusher_;
1312  code_flusher_ = NULL;
1313  }
1314 }
1315 
1316 
1318  // Optimization: If the heap object pointed to by p is a non-internalized
1319  // cons string whose right substring is HEAP->empty_string, update
1320  // it in place to its left substring. Return the updated value.
1321  //
1322  // Here we assume that if we change *p, we replace it with a heap object
1323  // (i.e., the left substring of a cons string is always a heap object).
1324  //
1325  // The check performed is:
1326  // object->IsConsString() && !object->IsInternalizedString() &&
1327  // (ConsString::cast(object)->second() == HEAP->empty_string())
1328  // except the maps for the object and its possible substrings might be
1329  // marked.
1330  HeapObject* object = HeapObject::cast(*p);
1331  if (!FLAG_clever_optimizations) return object;
1332  Map* map = object->map();
1333  InstanceType type = map->instance_type();
1334  if (!IsShortcutCandidate(type)) return object;
1335 
1336  Object* second = reinterpret_cast<ConsString*>(object)->second();
1337  Heap* heap = map->GetHeap();
1338  if (second != heap->empty_string()) {
1339  return object;
1340  }
1341 
1342  // Since we don't have the object's start, it is impossible to update the
1343  // page dirty marks. Therefore, we only replace the string with its left
1344  // substring when page dirty marks do not change.
1345  Object* first = reinterpret_cast<ConsString*>(object)->first();
1346  if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
1347 
1348  *p = first;
1349  return HeapObject::cast(first);
1350 }
1351 
1352 
1354  : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1355  public:
1357  HeapObject* obj);
1358 
1359  static void ObjectStatsCountFixedArray(
1360  FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
1361  FixedArraySubInstanceType dictionary_type);
1362 
1363  template <MarkCompactMarkingVisitor::VisitorId id>
1365  public:
1366  static inline void Visit(Map* map, HeapObject* obj);
1367  };
1368 
1369  static void Initialize();
1370 
1371  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
1372  MarkObjectByPointer(heap->mark_compact_collector(), p, p);
1373  }
1374 
1375  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
1376  // Mark all objects pointed to in [start, end).
1377  const int kMinRangeForMarkingRecursion = 64;
1378  if (end - start >= kMinRangeForMarkingRecursion) {
1379  if (VisitUnmarkedObjects(heap, start, end)) return;
1380  // We are close to a stack overflow, so just mark the objects.
1381  }
1382  MarkCompactCollector* collector = heap->mark_compact_collector();
1383  for (Object** p = start; p < end; p++) {
1384  MarkObjectByPointer(collector, start, p);
1385  }
1386  }
1387 
1388  // Marks the object black and pushes it on the marking stack.
1389  INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1390  MarkBit mark = Marking::MarkBitFrom(object);
1391  heap->mark_compact_collector()->MarkObject(object, mark);
1392  }
1393 
1394  // Marks the object black without pushing it on the marking stack.
1395  // Returns true if object needed marking and false otherwise.
1396  INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1397  MarkBit mark_bit = Marking::MarkBitFrom(object);
1398  if (!mark_bit.Get()) {
1399  heap->mark_compact_collector()->SetMark(object, mark_bit);
1400  return true;
1401  }
1402  return false;
1403  }
1404 
1405  // Mark object pointed to by p.
1406  INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1407  Object** anchor_slot, Object** p)) {
1408  if (!(*p)->IsHeapObject()) return;
1409  HeapObject* object = ShortCircuitConsString(p);
1410  collector->RecordSlot(anchor_slot, p, object);
1411  MarkBit mark = Marking::MarkBitFrom(object);
1412  collector->MarkObject(object, mark);
1413  }
1414 
1415 
1416  // Visit an unmarked object.
1417  INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1418  HeapObject* obj)) {
1419 #ifdef DEBUG
1420  DCHECK(collector->heap()->Contains(obj));
1421  DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1422 #endif
1423  Map* map = obj->map();
1424  Heap* heap = obj->GetHeap();
1425  MarkBit mark = Marking::MarkBitFrom(obj);
1426  heap->mark_compact_collector()->SetMark(obj, mark);
1427  // Mark the map pointer and the body.
1428  MarkBit map_mark = Marking::MarkBitFrom(map);
1429  heap->mark_compact_collector()->MarkObject(map, map_mark);
1430  IterateBody(map, obj);
1431  }
1432 
1433  // Visit all unmarked objects pointed to by [start, end).
1434  // Returns false if the operation fails (lack of stack space).
1435  INLINE(static bool VisitUnmarkedObjects(Heap* heap, Object** start,
1436  Object** end)) {
1437  // Return false is we are close to the stack limit.
1438  StackLimitCheck check(heap->isolate());
1439  if (check.HasOverflowed()) return false;
1440 
1441  MarkCompactCollector* collector = heap->mark_compact_collector();
1442  // Visit the unmarked objects.
1443  for (Object** p = start; p < end; p++) {
1444  Object* o = *p;
1445  if (!o->IsHeapObject()) continue;
1446  collector->RecordSlot(start, p, o);
1447  HeapObject* obj = HeapObject::cast(o);
1448  MarkBit mark = Marking::MarkBitFrom(obj);
1449  if (mark.Get()) continue;
1450  VisitUnmarkedObject(collector, obj);
1451  }
1452  return true;
1453  }
1454 
1455  private:
1456  template <int id>
1457  static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
1458 
1459  // Code flushing support.
1460 
1461  static const int kRegExpCodeThreshold = 5;
1462 
1464  bool is_one_byte) {
1465  // Make sure that the fixed array is in fact initialized on the RegExp.
1466  // We could potentially trigger a GC when initializing the RegExp.
1467  if (HeapObject::cast(re->data())->map()->instance_type() !=
1469  return;
1470 
1471  // Make sure this is a RegExp that actually contains code.
1472  if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1473 
1474  Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
1475  if (!code->IsSmi() &&
1476  HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1477  // Save a copy that can be reinstated if we need the code again.
1478  re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
1479 
1480  // Saving a copy might create a pointer into compaction candidate
1481  // that was not observed by marker. This might happen if JSRegExp data
1482  // was marked through the compilation cache before marker reached JSRegExp
1483  // object.
1484  FixedArray* data = FixedArray::cast(re->data());
1485  Object** slot =
1486  data->data_start() + JSRegExp::saved_code_index(is_one_byte);
1487  heap->mark_compact_collector()->RecordSlot(slot, slot, code);
1488 
1489  // Set a number in the 0-255 range to guarantee no smi overflow.
1490  re->SetDataAt(JSRegExp::code_index(is_one_byte),
1491  Smi::FromInt(heap->sweep_generation() & 0xff));
1492  } else if (code->IsSmi()) {
1493  int value = Smi::cast(code)->value();
1494  // The regexp has not been compiled yet or there was a compilation error.
1495  if (value == JSRegExp::kUninitializedValue ||
1497  return;
1498  }
1499 
1500  // Check if we should flush now.
1501  if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1502  re->SetDataAt(JSRegExp::code_index(is_one_byte),
1504  re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
1506  }
1507  }
1508  }
1509 
1510 
1511  // Works by setting the current sweep_generation (as a smi) in the
1512  // code object place in the data array of the RegExp and keeps a copy
1513  // around that can be reinstated if we reuse the RegExp before flushing.
1514  // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1515  // we flush the code.
1516  static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1517  Heap* heap = map->GetHeap();
1518  MarkCompactCollector* collector = heap->mark_compact_collector();
1519  if (!collector->is_code_flushing_enabled()) {
1520  VisitJSRegExp(map, object);
1521  return;
1522  }
1523  JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1524  // Flush code or set age on both one byte and two byte code.
1525  UpdateRegExpCodeAgeAndFlush(heap, re, true);
1526  UpdateRegExpCodeAgeAndFlush(heap, re, false);
1527  // Visit the fields of the RegExp, including the updated FixedArray.
1528  VisitJSRegExp(map, object);
1529  }
1530 
1532 };
1533 
1534 
1536  FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
1537  FixedArraySubInstanceType dictionary_type) {
1538  Heap* heap = fixed_array->map()->GetHeap();
1539  if (fixed_array->map() != heap->fixed_cow_array_map() &&
1540  fixed_array->map() != heap->fixed_double_array_map() &&
1541  fixed_array != heap->empty_fixed_array()) {
1542  if (fixed_array->IsDictionary()) {
1543  heap->RecordFixedArraySubTypeStats(dictionary_type, fixed_array->Size());
1544  } else {
1545  heap->RecordFixedArraySubTypeStats(fast_type, fixed_array->Size());
1546  }
1547  }
1548 }
1549 
1550 
1553  Heap* heap = map->GetHeap();
1554  int object_size = obj->Size();
1555  heap->RecordObjectStats(map->instance_type(), object_size);
1556  non_count_table_.GetVisitorById(id)(map, obj);
1557  if (obj->IsJSObject()) {
1558  JSObject* object = JSObject::cast(obj);
1559  ObjectStatsCountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE,
1560  FAST_ELEMENTS_SUB_TYPE);
1561  ObjectStatsCountFixedArray(object->properties(),
1562  DICTIONARY_PROPERTIES_SUB_TYPE,
1563  FAST_PROPERTIES_SUB_TYPE);
1564  }
1565 }
1566 
1567 
1568 template <MarkCompactMarkingVisitor::VisitorId id>
1570  HeapObject* obj) {
1571  ObjectStatsVisitBase(id, map, obj);
1572 }
1573 
1574 
1575 template <>
1577  MarkCompactMarkingVisitor::kVisitMap> {
1578  public:
1579  static inline void Visit(Map* map, HeapObject* obj) {
1580  Heap* heap = map->GetHeap();
1581  Map* map_obj = Map::cast(obj);
1582  DCHECK(map->instance_type() == MAP_TYPE);
1583  DescriptorArray* array = map_obj->instance_descriptors();
1584  if (map_obj->owns_descriptors() &&
1585  array != heap->empty_descriptor_array()) {
1586  int fixed_array_size = array->Size();
1587  heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
1588  fixed_array_size);
1589  }
1590  if (map_obj->HasTransitionArray()) {
1591  int fixed_array_size = map_obj->transitions()->Size();
1592  heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
1593  fixed_array_size);
1594  }
1595  if (map_obj->has_code_cache()) {
1596  CodeCache* cache = CodeCache::cast(map_obj->code_cache());
1597  heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
1598  cache->default_cache()->Size());
1599  if (!cache->normal_type_cache()->IsUndefined()) {
1601  MAP_CODE_CACHE_SUB_TYPE,
1602  FixedArray::cast(cache->normal_type_cache())->Size());
1603  }
1604  }
1605  ObjectStatsVisitBase(kVisitMap, map, obj);
1606  }
1607 };
1608 
1609 
1610 template <>
1612  MarkCompactMarkingVisitor::kVisitCode> {
1613  public:
1614  static inline void Visit(Map* map, HeapObject* obj) {
1615  Heap* heap = map->GetHeap();
1616  int object_size = obj->Size();
1617  DCHECK(map->instance_type() == CODE_TYPE);
1618  Code* code_obj = Code::cast(obj);
1619  heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
1620  object_size);
1621  ObjectStatsVisitBase(kVisitCode, map, obj);
1622  }
1623 };
1624 
1625 
1626 template <>
1628  MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
1629  public:
1630  static inline void Visit(Map* map, HeapObject* obj) {
1631  Heap* heap = map->GetHeap();
1632  SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
1633  if (sfi->scope_info() != heap->empty_fixed_array()) {
1635  SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size());
1636  }
1637  ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
1638  }
1639 };
1640 
1641 
1642 template <>
1644  MarkCompactMarkingVisitor::kVisitFixedArray> {
1645  public:
1646  static inline void Visit(Map* map, HeapObject* obj) {
1647  Heap* heap = map->GetHeap();
1648  FixedArray* fixed_array = FixedArray::cast(obj);
1649  if (fixed_array == heap->string_table()) {
1650  heap->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE,
1651  fixed_array->Size());
1652  }
1653  ObjectStatsVisitBase(kVisitFixedArray, map, obj);
1654  }
1655 };
1656 
1657 
1660 
1661  table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
1662 
1663  if (FLAG_track_gc_object_stats) {
1664  // Copy the visitor table to make call-through possible.
1665  non_count_table_.CopyFrom(&table_);
1666 #define VISITOR_ID_COUNT_FUNCTION(id) \
1667  table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
1669 #undef VISITOR_ID_COUNT_FUNCTION
1670  }
1671 }
1672 
1673 
1676 
1677 
1679  public:
1681  : collector_(collector) {}
1682 
1683  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1685  }
1686 
1687  private:
1689 };
1690 
1691 
1693  public:
1695  : collector_(collector) {}
1696 
1697  void VisitPointers(Object** start, Object** end) {
1698  for (Object** p = start; p < end; p++) VisitPointer(p);
1699  }
1700 
1701  void VisitPointer(Object** slot) {
1702  Object* obj = *slot;
1703  if (obj->IsSharedFunctionInfo()) {
1704  SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1705  MarkBit shared_mark = Marking::MarkBitFrom(shared);
1706  MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1707  collector_->MarkObject(shared->code(), code_mark);
1708  collector_->MarkObject(shared, shared_mark);
1709  }
1710  }
1711 
1712  private:
1714 };
1715 
1716 
1718  ThreadLocalTop* top) {
1719  for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1720  // Note: for the frame that has a pending lazy deoptimization
1721  // StackFrame::unchecked_code will return a non-optimized code object for
1722  // the outermost function and StackFrame::LookupCode will return
1723  // actual optimized code object.
1724  StackFrame* frame = it.frame();
1725  Code* code = frame->unchecked_code();
1726  MarkBit code_mark = Marking::MarkBitFrom(code);
1727  MarkObject(code, code_mark);
1728  if (frame->is_optimized()) {
1730  frame->LookupCode());
1731  }
1732  }
1733 }
1734 
1735 
1737  // Enable code flushing for non-incremental cycles.
1738  if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
1740  }
1741 
1742  // If code flushing is disabled, there is no need to prepare for it.
1743  if (!is_code_flushing_enabled()) return;
1744 
1745  // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1746  // relies on it being marked before any other descriptor array.
1747  HeapObject* descriptor_array = heap()->empty_descriptor_array();
1748  MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1749  MarkObject(descriptor_array, descriptor_array_mark);
1750 
1751  // Make sure we are not referencing the code from the stack.
1752  DCHECK(this == heap()->mark_compact_collector());
1754  heap()->isolate()->thread_local_top());
1755 
1756  // Iterate the archived stacks in all threads to check if
1757  // the code is referenced.
1758  CodeMarkingVisitor code_marking_visitor(this);
1760  &code_marking_visitor);
1761 
1762  SharedFunctionInfoMarkingVisitor visitor(this);
1763  heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1764  heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1765 
1767 }
1768 
1769 
1770 // Visitor class for marking heap roots.
1772  public:
1773  explicit RootMarkingVisitor(Heap* heap)
1774  : collector_(heap->mark_compact_collector()) {}
1775 
1777 
1778  void VisitPointers(Object** start, Object** end) {
1779  for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1780  }
1781 
1782  // Skip the weak next code link in a code object, which is visited in
1783  // ProcessTopOptimizedFrame.
1785 
1786  private:
1788  if (!(*p)->IsHeapObject()) return;
1789 
1790  // Replace flat cons strings in place.
1791  HeapObject* object = ShortCircuitConsString(p);
1792  MarkBit mark_bit = Marking::MarkBitFrom(object);
1793  if (mark_bit.Get()) return;
1794 
1795  Map* map = object->map();
1796  // Mark the object.
1797  collector_->SetMark(object, mark_bit);
1798 
1799  // Mark the map pointer and body, and push them on the marking stack.
1800  MarkBit map_mark = Marking::MarkBitFrom(map);
1801  collector_->MarkObject(map, map_mark);
1802  MarkCompactMarkingVisitor::IterateBody(map, object);
1803 
1804  // Mark all the objects reachable from the map and body. May leave
1805  // overflowed objects in the heap.
1807  }
1808 
1810 };
1811 
1812 
1813 // Helper class for pruning the string table.
1814 template <bool finalize_external_strings>
1816  public:
1817  explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
1818 
1819  virtual void VisitPointers(Object** start, Object** end) {
1820  // Visit all HeapObject pointers in [start, end).
1821  for (Object** p = start; p < end; p++) {
1822  Object* o = *p;
1823  if (o->IsHeapObject() &&
1824  !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1825  if (finalize_external_strings) {
1826  DCHECK(o->IsExternalString());
1827  heap_->FinalizeExternalString(String::cast(*p));
1828  } else {
1830  }
1831  // Set the entry to the_hole_value (as deleted).
1832  *p = heap_->the_hole_value();
1833  }
1834  }
1835  }
1836 
1838  DCHECK(!finalize_external_strings);
1839  return pointers_removed_;
1840  }
1841 
1842  private:
1845 };
1846 
1847 
1850 
1851 
1852 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1853 // are retained.
1855  public:
1856  virtual Object* RetainAs(Object* object) {
1857  if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1858  return object;
1859  } else if (object->IsAllocationSite() &&
1860  !(AllocationSite::cast(object)->IsZombie())) {
1861  // "dead" AllocationSites need to live long enough for a traversal of new
1862  // space. These sites get a one-time reprieve.
1863  AllocationSite* site = AllocationSite::cast(object);
1864  site->MarkZombie();
1866  return object;
1867  } else {
1868  return NULL;
1869  }
1870  }
1871 };
1872 
1873 
1874 // Fill the marking stack with overflowed objects returned by the given
1875 // iterator. Stop when the marking stack is filled or the end of the space
1876 // is reached, whichever comes first.
1877 template <class T>
1879  MarkingDeque* marking_deque,
1880  T* it) {
1881  // The caller should ensure that the marking stack is initially not full,
1882  // so that we don't waste effort pointlessly scanning for objects.
1883  DCHECK(!marking_deque->IsFull());
1884 
1885  Map* filler_map = heap->one_pointer_filler_map();
1886  for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
1887  MarkBit markbit = Marking::MarkBitFrom(object);
1888  if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1889  Marking::GreyToBlack(markbit);
1890  MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1891  marking_deque->PushBlack(object);
1892  if (marking_deque->IsFull()) return;
1893  }
1894  }
1895 }
1896 
1897 
1898 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1899 
1900 
1901 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
1902  MemoryChunk* p) {
1903  DCHECK(!marking_deque->IsFull());
1904  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1905  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
1906  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
1907  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1908 
1909  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1910  Address cell_base = it.CurrentCellBase();
1911  MarkBit::CellType* cell = it.CurrentCell();
1912 
1913  const MarkBit::CellType current_cell = *cell;
1914  if (current_cell == 0) continue;
1915 
1916  MarkBit::CellType grey_objects;
1917  if (it.HasNext()) {
1918  const MarkBit::CellType next_cell = *(cell + 1);
1919  grey_objects = current_cell & ((current_cell >> 1) |
1920  (next_cell << (Bitmap::kBitsPerCell - 1)));
1921  } else {
1922  grey_objects = current_cell & (current_cell >> 1);
1923  }
1924 
1925  int offset = 0;
1926  while (grey_objects != 0) {
1927  int trailing_zeros = base::bits::CountTrailingZeros32(grey_objects);
1928  grey_objects >>= trailing_zeros;
1929  offset += trailing_zeros;
1930  MarkBit markbit(cell, 1 << offset, false);
1931  DCHECK(Marking::IsGrey(markbit));
1932  Marking::GreyToBlack(markbit);
1933  Address addr = cell_base + offset * kPointerSize;
1934  HeapObject* object = HeapObject::FromAddress(addr);
1935  MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1936  marking_deque->PushBlack(object);
1937  if (marking_deque->IsFull()) return;
1938  offset += 2;
1939  grey_objects >>= 2;
1940  }
1941 
1942  grey_objects >>= (Bitmap::kBitsPerCell - 1);
1943  }
1944 }
1945 
1946 
1948  NewSpace* new_space, NewSpacePage* p) {
1949  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1950  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
1951  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
1952  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1953 
1954  MarkBit::CellType* cells = p->markbits()->cells();
1955  int survivors_size = 0;
1956 
1957  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1958  Address cell_base = it.CurrentCellBase();
1959  MarkBit::CellType* cell = it.CurrentCell();
1960 
1961  MarkBit::CellType current_cell = *cell;
1962  if (current_cell == 0) continue;
1963 
1964  int offset = 0;
1965  while (current_cell != 0) {
1966  int trailing_zeros = base::bits::CountTrailingZeros32(current_cell);
1967  current_cell >>= trailing_zeros;
1968  offset += trailing_zeros;
1969  Address address = cell_base + offset * kPointerSize;
1970  HeapObject* object = HeapObject::FromAddress(address);
1971 
1972  int size = object->Size();
1973  survivors_size += size;
1974 
1976 
1977  offset++;
1978  current_cell >>= 1;
1979 
1980  // TODO(hpayer): Refactor EvacuateObject and call this function instead.
1981  if (heap()->ShouldBePromoted(object->address(), size) &&
1982  TryPromoteObject(object, size)) {
1983  continue;
1984  }
1985 
1986  AllocationResult allocation = new_space->AllocateRaw(size);
1987  if (allocation.IsRetry()) {
1988  if (!new_space->AddFreshPage()) {
1989  // Shouldn't happen. We are sweeping linearly, and to-space
1990  // has the same number of pages as from-space, so there is
1991  // always room.
1992  UNREACHABLE();
1993  }
1994  allocation = new_space->AllocateRaw(size);
1995  DCHECK(!allocation.IsRetry());
1996  }
1997  Object* target = allocation.ToObjectChecked();
1998 
1999  MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE);
2001  }
2002  *cells = 0;
2003  }
2004  return survivors_size;
2005 }
2006 
2007 
2008 static void DiscoverGreyObjectsInSpace(Heap* heap, MarkingDeque* marking_deque,
2009  PagedSpace* space) {
2010  PageIterator it(space);
2011  while (it.has_next()) {
2012  Page* p = it.next();
2013  DiscoverGreyObjectsOnPage(marking_deque, p);
2014  if (marking_deque->IsFull()) return;
2015  }
2016 }
2017 
2018 
2020  MarkingDeque* marking_deque) {
2021  NewSpace* space = heap->new_space();
2022  NewSpacePageIterator it(space->bottom(), space->top());
2023  while (it.has_next()) {
2024  NewSpacePage* page = it.next();
2025  DiscoverGreyObjectsOnPage(marking_deque, page);
2026  if (marking_deque->IsFull()) return;
2027  }
2028 }
2029 
2030 
2032  Object* o = *p;
2033  if (!o->IsHeapObject()) return false;
2034  HeapObject* heap_object = HeapObject::cast(o);
2035  MarkBit mark = Marking::MarkBitFrom(heap_object);
2036  return !mark.Get();
2037 }
2038 
2039 
2041  Object** p) {
2042  Object* o = *p;
2043  DCHECK(o->IsHeapObject());
2044  HeapObject* heap_object = HeapObject::cast(o);
2045  MarkBit mark = Marking::MarkBitFrom(heap_object);
2046  return !mark.Get();
2047 }
2048 
2049 
2051  StringTable* string_table = heap()->string_table();
2052  // Mark the string table itself.
2053  MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
2054  if (!string_table_mark.Get()) {
2055  // String table could have already been marked by visiting the handles list.
2056  SetMark(string_table, string_table_mark);
2057  }
2058  // Explicitly mark the prefix.
2059  string_table->IteratePrefix(visitor);
2061 }
2062 
2063 
2065  MarkBit mark_bit = Marking::MarkBitFrom(site);
2066  SetMark(site, mark_bit);
2067 }
2068 
2069 
2071  // Mark the heap roots including global variables, stack variables,
2072  // etc., and all objects reachable from them.
2074 
2075  // Handle the string table specially.
2076  MarkStringTable(visitor);
2077 
2079 
2080  // There may be overflowed objects in the heap. Visit them now.
2081  while (marking_deque_.overflowed()) {
2084  }
2085 }
2086 
2087 
2089  List<ImplicitRefGroup*>* ref_groups =
2091 
2092  int last = 0;
2093  for (int i = 0; i < ref_groups->length(); i++) {
2094  ImplicitRefGroup* entry = ref_groups->at(i);
2095  DCHECK(entry != NULL);
2096 
2097  if (!IsMarked(*entry->parent)) {
2098  (*ref_groups)[last++] = entry;
2099  continue;
2100  }
2101 
2102  Object*** children = entry->children;
2103  // A parent object is marked, so mark all child heap objects.
2104  for (size_t j = 0; j < entry->length; ++j) {
2105  if ((*children[j])->IsHeapObject()) {
2106  HeapObject* child = HeapObject::cast(*children[j]);
2107  MarkBit mark = Marking::MarkBitFrom(child);
2108  MarkObject(child, mark);
2109  }
2110  }
2111 
2112  // Once the entire group has been marked, dispose it because it's
2113  // not needed anymore.
2114  delete entry;
2115  }
2116  ref_groups->Rewind(last);
2117 }
2118 
2119 
2121  HeapObject* weak_object_to_code_table =
2122  HeapObject::cast(heap()->weak_object_to_code_table());
2123  if (!IsMarked(weak_object_to_code_table)) {
2124  MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
2125  SetMark(weak_object_to_code_table, mark);
2126  }
2127 }
2128 
2129 
2130 // Mark all objects reachable from the objects on the marking stack.
2131 // Before: the marking stack contains zero or more heap object pointers.
2132 // After: the marking stack is empty, and all objects reachable from the
2133 // marking stack have been marked, or are overflowed in the heap.
2135  while (!marking_deque_.IsEmpty()) {
2136  HeapObject* object = marking_deque_.Pop();
2137  DCHECK(object->IsHeapObject());
2138  DCHECK(heap()->Contains(object));
2139  DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
2140 
2141  Map* map = object->map();
2142  MarkBit map_mark = Marking::MarkBitFrom(map);
2143  MarkObject(map, map_mark);
2144 
2145  MarkCompactMarkingVisitor::IterateBody(map, object);
2146  }
2147 }
2148 
2149 
2150 // Sweep the heap for overflowed objects, clear their overflow bits, and
2151 // push them on the marking stack. Stop early if the marking stack fills
2152 // before sweeping completes. If sweeping completes, there are no remaining
2153 // overflowed objects in the heap so the overflow flag on the markings stack
2154 // is cleared.
2157 
2159  if (marking_deque_.IsFull()) return;
2160 
2162  heap()->old_pointer_space());
2163  if (marking_deque_.IsFull()) return;
2164 
2165  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->old_data_space());
2166  if (marking_deque_.IsFull()) return;
2167 
2168  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->code_space());
2169  if (marking_deque_.IsFull()) return;
2170 
2171  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->map_space());
2172  if (marking_deque_.IsFull()) return;
2173 
2174  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->cell_space());
2175  if (marking_deque_.IsFull()) return;
2176 
2178  heap()->property_cell_space());
2179  if (marking_deque_.IsFull()) return;
2180 
2181  LargeObjectIterator lo_it(heap()->lo_space());
2183  if (marking_deque_.IsFull()) return;
2184 
2186 }
2187 
2188 
2189 // Mark all objects reachable (transitively) from objects on the marking
2190 // stack. Before: the marking stack contains zero or more heap object
2191 // pointers. After: the marking stack is empty and there are no overflowed
2192 // objects in the heap.
2195  while (marking_deque_.overflowed()) {
2198  }
2199 }
2200 
2201 
2202 // Mark all objects reachable (transitively) from objects on the marking
2203 // stack including references only considered in the atomic marking pause.
2205  bool work_to_do = true;
2207  while (work_to_do) {
2209  visitor, &IsUnmarkedHeapObjectWithHeap);
2212  work_to_do = !marking_deque_.IsEmpty();
2214  }
2215 }
2216 
2217 
2219  for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2220  !it.done(); it.Advance()) {
2221  if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2222  return;
2223  }
2224  if (it.frame()->type() == StackFrame::OPTIMIZED) {
2225  Code* code = it.frame()->LookupCode();
2226  if (!code->CanDeoptAt(it.frame()->pc())) {
2227  code->CodeIterateBody(visitor);
2228  }
2230  return;
2231  }
2232  }
2233 }
2234 
2235 
2237  GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
2238  double start_time = 0.0;
2239  if (FLAG_print_cumulative_gc_stat) {
2240  start_time = base::OS::TimeCurrentMillis();
2241  }
2242  // The recursive GC marker detects when it is nearing stack overflow,
2243  // and switches to a different marking system. JS interrupts interfere
2244  // with the C stack limit check.
2245  PostponeInterruptsScope postpone(isolate());
2246 
2247  bool incremental_marking_overflowed = false;
2248  IncrementalMarking* incremental_marking = heap_->incremental_marking();
2250  // Finalize the incremental marking and check whether we had an overflow.
2251  // Both markers use grey color to mark overflowed objects so
2252  // non-incremental marker can deal with them as if overflow
2253  // occured during normal marking.
2254  // But incremental marker uses a separate marking deque
2255  // so we have to explicitly copy its overflow state.
2256  incremental_marking->Finalize();
2257  incremental_marking_overflowed =
2258  incremental_marking->marking_deque()->overflowed();
2259  incremental_marking->marking_deque()->ClearOverflowed();
2260  } else {
2261  // Abort any pending incremental activities e.g. incremental sweeping.
2262  incremental_marking->Abort();
2263  }
2264 
2265 #ifdef DEBUG
2266  DCHECK(state_ == PREPARE_GC);
2267  state_ = MARK_LIVE_OBJECTS;
2268 #endif
2269  // The to space contains live objects, a page in from space is used as a
2270  // marking stack.
2271  Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2272  Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2273  if (FLAG_force_marking_deque_overflows) {
2274  marking_deque_end = marking_deque_start + 64 * kPointerSize;
2275  }
2276  marking_deque_.Initialize(marking_deque_start, marking_deque_end);
2278 
2279  if (incremental_marking_overflowed) {
2280  // There are overflowed objects left in the heap after incremental marking.
2282  }
2283 
2285 
2287  // There is no write barrier on cells so we have to scan them now at the end
2288  // of the incremental marking.
2289  {
2290  HeapObjectIterator cell_iterator(heap()->cell_space());
2291  HeapObject* cell;
2292  while ((cell = cell_iterator.Next()) != NULL) {
2293  DCHECK(cell->IsCell());
2294  if (IsMarked(cell)) {
2295  int offset = Cell::kValueOffset;
2296  MarkCompactMarkingVisitor::VisitPointer(
2297  heap(), reinterpret_cast<Object**>(cell->address() + offset));
2298  }
2299  }
2300  }
2301  {
2302  HeapObjectIterator js_global_property_cell_iterator(
2303  heap()->property_cell_space());
2304  HeapObject* cell;
2305  while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
2306  DCHECK(cell->IsPropertyCell());
2307  if (IsMarked(cell)) {
2308  MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
2309  }
2310  }
2311  }
2312  }
2313 
2314  RootMarkingVisitor root_visitor(heap());
2315  MarkRoots(&root_visitor);
2316 
2317  ProcessTopOptimizedFrame(&root_visitor);
2318 
2319  // The objects reachable from the roots are marked, yet unreachable
2320  // objects are unmarked. Mark objects reachable due to host
2321  // application specific logic or through Harmony weak maps.
2322  ProcessEphemeralMarking(&root_visitor);
2323 
2324  // The objects reachable from the roots, weak maps or object groups
2325  // are marked, yet unreachable objects are unmarked. Mark objects
2326  // reachable only from weak global handles.
2327  //
2328  // First we identify nonlive weak handles and mark them as pending
2329  // destruction.
2332  // Then we mark the objects and process the transitive closure.
2333  heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2334  while (marking_deque_.overflowed()) {
2337  }
2338 
2339  // Repeat host application specific and Harmony weak maps marking to
2340  // mark unmarked objects reachable from the weak roots.
2341  ProcessEphemeralMarking(&root_visitor);
2342 
2343  AfterMarking();
2344 
2345  if (FLAG_print_cumulative_gc_stat) {
2347  }
2348 }
2349 
2350 
2352  // Object literal map caches reference strings (cache keys) and maps
2353  // (cache values). At this point still useful maps have already been
2354  // marked. Mark the keys for the alive values before we process the
2355  // string table.
2356  ProcessMapCaches();
2357 
2358  // Prune the string table removing all strings only pointed to by the
2359  // string table. Cannot use string_table() here because the string
2360  // table is marked.
2361  StringTable* string_table = heap()->string_table();
2362  InternalizedStringTableCleaner internalized_visitor(heap());
2363  string_table->IterateElements(&internalized_visitor);
2364  string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2365 
2366  ExternalStringTableCleaner external_visitor(heap());
2367  heap()->external_string_table_.Iterate(&external_visitor);
2369 
2370  // Process the weak references.
2371  MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2372  heap()->ProcessWeakReferences(&mark_compact_object_retainer);
2373 
2374  // Remove object groups after marking phase.
2377 
2378  // Flush code from collected candidates.
2379  if (is_code_flushing_enabled()) {
2381  // If incremental marker does not support code flushing, we need to
2382  // disable it before incremental marking steps for next cycle.
2383  if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
2384  EnableCodeFlushing(false);
2385  }
2386  }
2387 
2388  if (FLAG_track_gc_object_stats) {
2390  }
2391 }
2392 
2393 
2395  Object* raw_context = heap()->native_contexts_list();
2396  while (raw_context != heap()->undefined_value()) {
2397  Context* context = reinterpret_cast<Context*>(raw_context);
2398  if (IsMarked(context)) {
2399  HeapObject* raw_map_cache =
2400  HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
2401  // A map cache may be reachable from the stack. In this case
2402  // it's already transitively marked and it's too late to clean
2403  // up its parts.
2404  if (!IsMarked(raw_map_cache) &&
2405  raw_map_cache != heap()->undefined_value()) {
2406  MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2407  int existing_elements = map_cache->NumberOfElements();
2408  int used_elements = 0;
2409  for (int i = MapCache::kElementsStartIndex; i < map_cache->length();
2410  i += MapCache::kEntrySize) {
2411  Object* raw_key = map_cache->get(i);
2412  if (raw_key == heap()->undefined_value() ||
2413  raw_key == heap()->the_hole_value())
2414  continue;
2416  Object* raw_map = map_cache->get(i + 1);
2417  if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
2418  ++used_elements;
2419  } else {
2420  // Delete useless entries with unmarked maps.
2421  DCHECK(raw_map->IsMap());
2422  map_cache->set_the_hole(i);
2423  map_cache->set_the_hole(i + 1);
2424  }
2425  }
2426  if (used_elements == 0) {
2427  context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2428  } else {
2429  // Note: we don't actually shrink the cache here to avoid
2430  // extra complexity during GC. We rely on subsequent cache
2431  // usages (EnsureCapacity) to do this.
2432  map_cache->ElementsRemoved(existing_elements - used_elements);
2433  MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2434  MarkObject(map_cache, map_cache_markbit);
2435  }
2436  }
2437  }
2438  // Move to next element in the list.
2439  raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2440  }
2442 }
2443 
2444 
2446  // Iterate over the map space, setting map transitions that go from
2447  // a marked map to an unmarked map to null transitions. This action
2448  // is carried out only on maps of JSObjects and related subtypes.
2449  HeapObjectIterator map_iterator(heap()->map_space());
2450  for (HeapObject* obj = map_iterator.Next(); obj != NULL;
2451  obj = map_iterator.Next()) {
2452  Map* map = Map::cast(obj);
2453 
2454  if (!map->CanTransition()) continue;
2455 
2456  MarkBit map_mark = Marking::MarkBitFrom(map);
2458  ClearNonLiveMapTransitions(map, map_mark);
2459 
2460  if (map_mark.Get()) {
2461  ClearNonLiveDependentCode(map->dependent_code());
2462  } else {
2463  ClearDependentCode(map->dependent_code());
2464  map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
2465  }
2466  }
2467 
2468  // Iterate over property cell space, removing dependent code that is not
2469  // otherwise kept alive by strong references.
2470  HeapObjectIterator cell_iterator(heap_->property_cell_space());
2471  for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
2472  cell = cell_iterator.Next()) {
2473  if (IsMarked(cell)) {
2474  ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
2475  }
2476  }
2477 
2478  // Iterate over allocation sites, removing dependent code that is not
2479  // otherwise kept alive by strong references.
2480  Object* undefined = heap()->undefined_value();
2481  for (Object* site = heap()->allocation_sites_list(); site != undefined;
2482  site = AllocationSite::cast(site)->weak_next()) {
2483  if (IsMarked(site)) {
2484  ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
2485  }
2486  }
2487 
2488  if (heap_->weak_object_to_code_table()->IsHashTable()) {
2489  WeakHashTable* table =
2490  WeakHashTable::cast(heap_->weak_object_to_code_table());
2491  uint32_t capacity = table->Capacity();
2492  for (uint32_t i = 0; i < capacity; i++) {
2493  uint32_t key_index = table->EntryToIndex(i);
2494  Object* key = table->get(key_index);
2495  if (!table->IsKey(key)) continue;
2496  uint32_t value_index = table->EntryToValueIndex(i);
2497  Object* value = table->get(value_index);
2498  if (key->IsCell() && !IsMarked(key)) {
2499  Cell* cell = Cell::cast(key);
2500  Object* object = cell->value();
2501  if (IsMarked(object)) {
2502  MarkBit mark = Marking::MarkBitFrom(cell);
2503  SetMark(cell, mark);
2504  Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset);
2505  RecordSlot(value_slot, value_slot, *value_slot);
2506  }
2507  }
2508  if (IsMarked(key)) {
2509  if (!IsMarked(value)) {
2510  HeapObject* obj = HeapObject::cast(value);
2511  MarkBit mark = Marking::MarkBitFrom(obj);
2512  SetMark(obj, mark);
2513  }
2514  ClearNonLiveDependentCode(DependentCode::cast(value));
2515  } else {
2516  ClearDependentCode(DependentCode::cast(value));
2517  table->set(key_index, heap_->the_hole_value());
2518  table->set(value_index, heap_->the_hole_value());
2519  table->ElementRemoved();
2520  }
2521  }
2522  }
2523 }
2524 
2525 
2527  int number_of_transitions = map->NumberOfProtoTransitions();
2528  FixedArray* prototype_transitions = map->GetPrototypeTransitions();
2529 
2530  int new_number_of_transitions = 0;
2531  const int header = Map::kProtoTransitionHeaderSize;
2532  const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2533  const int map_offset = header + Map::kProtoTransitionMapOffset;
2534  const int step = Map::kProtoTransitionElementsPerEntry;
2535  for (int i = 0; i < number_of_transitions; i++) {
2536  Object* prototype = prototype_transitions->get(proto_offset + i * step);
2537  Object* cached_map = prototype_transitions->get(map_offset + i * step);
2538  if (IsMarked(prototype) && IsMarked(cached_map)) {
2539  DCHECK(!prototype->IsUndefined());
2540  int proto_index = proto_offset + new_number_of_transitions * step;
2541  int map_index = map_offset + new_number_of_transitions * step;
2542  if (new_number_of_transitions != i) {
2543  prototype_transitions->set(proto_index, prototype,
2545  prototype_transitions->set(map_index, cached_map, SKIP_WRITE_BARRIER);
2546  }
2547  Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
2548  RecordSlot(slot, slot, prototype);
2549  new_number_of_transitions++;
2550  }
2551  }
2552 
2553  if (new_number_of_transitions != number_of_transitions) {
2554  map->SetNumberOfProtoTransitions(new_number_of_transitions);
2555  }
2556 
2557  // Fill slots that became free with undefined value.
2558  for (int i = new_number_of_transitions * step;
2559  i < number_of_transitions * step; i++) {
2560  prototype_transitions->set_undefined(header + i);
2561  }
2562 }
2563 
2564 
2566  MarkBit map_mark) {
2567  Object* potential_parent = map->GetBackPointer();
2568  if (!potential_parent->IsMap()) return;
2569  Map* parent = Map::cast(potential_parent);
2570 
2571  // Follow back pointer, check whether we are dealing with a map transition
2572  // from a live map to a dead path and in case clear transitions of parent.
2573  bool current_is_alive = map_mark.Get();
2574  bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2575  if (!current_is_alive && parent_is_alive) {
2576  ClearMapTransitions(parent);
2577  }
2578 }
2579 
2580 
2581 // Clear a possible back pointer in case the transition leads to a dead map.
2582 // Return true in case a back pointer has been cleared and false otherwise.
2584  if (Marking::MarkBitFrom(target).Get()) return false;
2585  target->SetBackPointer(heap_->undefined_value(), SKIP_WRITE_BARRIER);
2586  return true;
2587 }
2588 
2589 
2591  // If there are no transitions to be cleared, return.
2592  // TODO(verwaest) Should be an assert, otherwise back pointers are not
2593  // properly cleared.
2594  if (!map->HasTransitionArray()) return;
2595 
2596  TransitionArray* t = map->transitions();
2597 
2598  int transition_index = 0;
2599 
2600  DescriptorArray* descriptors = map->instance_descriptors();
2601  bool descriptors_owner_died = false;
2602 
2603  // Compact all live descriptors to the left.
2604  for (int i = 0; i < t->number_of_transitions(); ++i) {
2605  Map* target = t->GetTarget(i);
2606  if (ClearMapBackPointer(target)) {
2607  if (target->instance_descriptors() == descriptors) {
2608  descriptors_owner_died = true;
2609  }
2610  } else {
2611  if (i != transition_index) {
2612  Name* key = t->GetKey(i);
2613  t->SetKey(transition_index, key);
2614  Object** key_slot = t->GetKeySlot(transition_index);
2615  RecordSlot(key_slot, key_slot, key);
2616  // Target slots do not need to be recorded since maps are not compacted.
2617  t->SetTarget(transition_index, t->GetTarget(i));
2618  }
2619  transition_index++;
2620  }
2621  }
2622 
2623  // If there are no transitions to be cleared, return.
2624  // TODO(verwaest) Should be an assert, otherwise back pointers are not
2625  // properly cleared.
2626  if (transition_index == t->number_of_transitions()) return;
2627 
2628  int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2629 
2630  if (descriptors_owner_died) {
2631  if (number_of_own_descriptors > 0) {
2632  TrimDescriptorArray(map, descriptors, number_of_own_descriptors);
2633  DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2634  map->set_owns_descriptors(true);
2635  } else {
2636  DCHECK(descriptors == heap_->empty_descriptor_array());
2637  }
2638  }
2639 
2640  // Note that we never eliminate a transition array, though we might right-trim
2641  // such that number_of_transitions() == 0. If this assumption changes,
2642  // TransitionArray::CopyInsert() will need to deal with the case that a
2643  // transition array disappeared during GC.
2644  int trim = t->number_of_transitions() - transition_index;
2645  if (trim > 0) {
2647  t, t->IsSimpleTransition() ? trim
2649  }
2650  DCHECK(map->HasTransitionArray());
2651 }
2652 
2653 
2655  DescriptorArray* descriptors,
2656  int number_of_own_descriptors) {
2657  int number_of_descriptors = descriptors->number_of_descriptors_storage();
2658  int to_trim = number_of_descriptors - number_of_own_descriptors;
2659  if (to_trim == 0) return;
2660 
2662  descriptors, to_trim * DescriptorArray::kDescriptorSize);
2663  descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
2664 
2665  if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
2666  descriptors->Sort();
2667 }
2668 
2669 
2671  DescriptorArray* descriptors) {
2672  int live_enum = map->EnumLength();
2673  if (live_enum == kInvalidEnumCacheSentinel) {
2674  live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM);
2675  }
2676  if (live_enum == 0) return descriptors->ClearEnumCache();
2677 
2678  FixedArray* enum_cache = descriptors->GetEnumCache();
2679 
2680  int to_trim = enum_cache->length() - live_enum;
2681  if (to_trim <= 0) return;
2683  to_trim);
2684 
2685  if (!descriptors->HasEnumIndicesCache()) return;
2686  FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
2687  heap_->RightTrimFixedArray<Heap::FROM_GC>(enum_indices_cache, to_trim);
2688 }
2689 
2690 
2692  Object* current = head;
2693  Object* undefined = heap()->undefined_value();
2694  while (current != undefined) {
2695  Code* code = Code::cast(current);
2696  if (IsMarked(code)) {
2697  DCHECK(code->is_weak_stub());
2698  IC::InvalidateMaps(code);
2699  }
2700  current = code->next_code_link();
2701  code->set_next_code_link(undefined);
2702  }
2703 }
2704 
2705 
2707  DisallowHeapAllocation no_allocation;
2708  DependentCode::GroupStartIndexes starts(entries);
2709  int number_of_entries = starts.number_of_entries();
2710  if (number_of_entries == 0) return;
2712  if (starts.at(g) != starts.at(g + 1)) {
2713  int i = starts.at(g);
2714  DCHECK(i + 1 == starts.at(g + 1));
2715  Object* head = entries->object_at(i);
2716  ClearDependentICList(head);
2717  }
2719  for (int i = starts.at(g); i < starts.at(g + 1); i++) {
2720  // If the entry is compilation info then the map must be alive,
2721  // and ClearDependentCode shouldn't be called.
2722  DCHECK(entries->is_code_at(i));
2723  Code* code = entries->code_at(i);
2724  if (IsMarked(code) && !code->marked_for_deoptimization()) {
2726  code, static_cast<DependentCode::DependencyGroup>(g));
2727  code->InvalidateEmbeddedObjects();
2728  have_code_to_deoptimize_ = true;
2729  }
2730  }
2731  for (int i = 0; i < number_of_entries; i++) {
2732  entries->clear_at(i);
2733  }
2734 }
2735 
2736 
2738  DependentCode* entries, int group, int start, int end, int new_start) {
2739  int survived = 0;
2740  if (group == DependentCode::kWeakICGroup) {
2741  // Dependent weak IC stubs form a linked list and only the head is stored
2742  // in the dependent code array.
2743  if (start != end) {
2744  DCHECK(start + 1 == end);
2745  Object* old_head = entries->object_at(start);
2747  Object* head = VisitWeakList<Code>(heap(), old_head, &retainer);
2748  entries->set_object_at(new_start, head);
2749  Object** slot = entries->slot_at(new_start);
2750  RecordSlot(slot, slot, head);
2751  // We do not compact this group even if the head is undefined,
2752  // more dependent ICs are likely to be added later.
2753  survived = 1;
2754  }
2755  } else {
2756  for (int i = start; i < end; i++) {
2757  Object* obj = entries->object_at(i);
2758  DCHECK(obj->IsCode() || IsMarked(obj));
2759  if (IsMarked(obj) &&
2760  (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
2761  if (new_start + survived != i) {
2762  entries->set_object_at(new_start + survived, obj);
2763  }
2764  Object** slot = entries->slot_at(new_start + survived);
2765  RecordSlot(slot, slot, obj);
2766  survived++;
2767  }
2768  }
2769  }
2770  entries->set_number_of_entries(
2771  static_cast<DependentCode::DependencyGroup>(group), survived);
2772  return survived;
2773 }
2774 
2775 
2777  DisallowHeapAllocation no_allocation;
2778  DependentCode::GroupStartIndexes starts(entries);
2779  int number_of_entries = starts.number_of_entries();
2780  if (number_of_entries == 0) return;
2781  int new_number_of_entries = 0;
2782  // Go through all groups, remove dead codes and compact.
2783  for (int g = 0; g < DependentCode::kGroupCount; g++) {
2784  int survived = ClearNonLiveDependentCodeInGroup(
2785  entries, g, starts.at(g), starts.at(g + 1), new_number_of_entries);
2786  new_number_of_entries += survived;
2787  }
2788  for (int i = new_number_of_entries; i < number_of_entries; i++) {
2789  entries->clear_at(i);
2790  }
2791 }
2792 
2793 
2795  GCTracer::Scope gc_scope(heap()->tracer(),
2797  Object* weak_collection_obj = heap()->encountered_weak_collections();
2798  while (weak_collection_obj != Smi::FromInt(0)) {
2799  JSWeakCollection* weak_collection =
2800  reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2801  DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2802  if (weak_collection->table()->IsHashTable()) {
2803  ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2804  Object** anchor = reinterpret_cast<Object**>(table->address());
2805  for (int i = 0; i < table->Capacity(); i++) {
2806  if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2807  Object** key_slot =
2809  RecordSlot(anchor, key_slot, *key_slot);
2810  Object** value_slot =
2812  MarkCompactMarkingVisitor::MarkObjectByPointer(this, anchor,
2813  value_slot);
2814  }
2815  }
2816  }
2817  weak_collection_obj = weak_collection->next();
2818  }
2819 }
2820 
2821 
2823  GCTracer::Scope gc_scope(heap()->tracer(),
2825  Object* weak_collection_obj = heap()->encountered_weak_collections();
2826  while (weak_collection_obj != Smi::FromInt(0)) {
2827  JSWeakCollection* weak_collection =
2828  reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2829  DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2830  if (weak_collection->table()->IsHashTable()) {
2831  ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2832  for (int i = 0; i < table->Capacity(); i++) {
2833  HeapObject* key = HeapObject::cast(table->KeyAt(i));
2834  if (!MarkCompactCollector::IsMarked(key)) {
2835  table->RemoveEntry(i);
2836  }
2837  }
2838  }
2839  weak_collection_obj = weak_collection->next();
2840  weak_collection->set_next(heap()->undefined_value());
2841  }
2843 }
2844 
2845 
2847  GCTracer::Scope gc_scope(heap()->tracer(),
2849  Object* weak_collection_obj = heap()->encountered_weak_collections();
2850  while (weak_collection_obj != Smi::FromInt(0)) {
2851  JSWeakCollection* weak_collection =
2852  reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2853  weak_collection_obj = weak_collection->next();
2854  weak_collection->set_next(heap()->undefined_value());
2855  }
2857 }
2858 
2859 
2861  if (heap_->InNewSpace(value)) {
2862  heap_->store_buffer()->Mark(slot);
2863  } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2865  reinterpret_cast<Object**>(slot),
2867  }
2868 }
2869 
2870 
2871 // We scavange new space simultaneously with sweeping. This is done in two
2872 // passes.
2873 //
2874 // The first pass migrates all alive objects from one semispace to another or
2875 // promotes them to old space. Forwarding address is written directly into
2876 // first word of object without any encoding. If object is dead we write
2877 // NULL as a forwarding address.
2878 //
2879 // The second pass updates pointers to new space in all spaces. It is possible
2880 // to encounter pointers to dead new space objects during traversal of pointers
2881 // to new space. We should clear them to avoid encountering them during next
2882 // pointer iteration. This is an issue if the store buffer overflows and we
2883 // have to scan the entire old space, including dead objects, looking for
2884 // pointers to new space.
2886  int size, AllocationSpace dest) {
2887  Address dst_addr = dst->address();
2888  Address src_addr = src->address();
2889  DCHECK(heap()->AllowedToBeMigrated(src, dest));
2891  if (dest == OLD_POINTER_SPACE) {
2892  Address src_slot = src_addr;
2893  Address dst_slot = dst_addr;
2895 
2896  for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2897  Object* value = Memory::Object_at(src_slot);
2898 
2899  Memory::Object_at(dst_slot) = value;
2900 
2901  if (!src->MayContainRawValues()) {
2902  RecordMigratedSlot(value, dst_slot);
2903  }
2904 
2905  src_slot += kPointerSize;
2906  dst_slot += kPointerSize;
2907  }
2908 
2909  if (compacting_ && dst->IsJSFunction()) {
2910  Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
2911  Address code_entry = Memory::Address_at(code_entry_slot);
2912 
2913  if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2915  SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
2917  }
2918  } else if (dst->IsConstantPoolArray()) {
2919  // We special case ConstantPoolArrays since they could contain integers
2920  // value entries which look like tagged pointers.
2921  // TODO(mstarzinger): restructure this code to avoid this special-casing.
2922  ConstantPoolArray* array = ConstantPoolArray::cast(dst);
2923  ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
2924  while (!code_iter.is_finished()) {
2925  Address code_entry_slot =
2926  dst_addr + array->OffsetOfElementAt(code_iter.next_index());
2927  Address code_entry = Memory::Address_at(code_entry_slot);
2928 
2929  if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2931  SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
2933  }
2934  }
2935  ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR);
2936  while (!heap_iter.is_finished()) {
2937  Address heap_slot =
2938  dst_addr + array->OffsetOfElementAt(heap_iter.next_index());
2939  Object* value = Memory::Object_at(heap_slot);
2940  RecordMigratedSlot(value, heap_slot);
2941  }
2942  }
2943  } else if (dest == CODE_SPACE) {
2944  PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
2945  heap()->MoveBlock(dst_addr, src_addr, size);
2949  Code::cast(dst)->Relocate(dst_addr - src_addr);
2950  } else {
2951  DCHECK(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2952  heap()->MoveBlock(dst_addr, src_addr, size);
2953  }
2954  heap()->OnMoveEvent(dst, src, size);
2955  Memory::Address_at(src_addr) = dst_addr;
2956 }
2957 
2958 
2959 // Visitor for updating pointers from live objects in old spaces to new space.
2960 // It does not expect to encounter pointers to dead objects.
2962  public:
2963  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
2964 
2966 
2967  void VisitPointers(Object** start, Object** end) {
2968  for (Object** p = start; p < end; p++) UpdatePointer(p);
2969  }
2970 
2973  Object* target = rinfo->target_object();
2974  Object* old_target = target;
2975  VisitPointer(&target);
2976  // Avoid unnecessary changes that might unnecessary flush the instruction
2977  // cache.
2978  if (target != old_target) {
2979  rinfo->set_target_object(target);
2980  }
2981  }
2982 
2985  Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2986  Object* old_target = target;
2987  VisitPointer(&target);
2988  if (target != old_target) {
2989  rinfo->set_target_address(Code::cast(target)->instruction_start());
2990  }
2991  }
2992 
2995  Object* stub = rinfo->code_age_stub();
2996  DCHECK(stub != NULL);
2997  VisitPointer(&stub);
2998  if (stub != rinfo->code_age_stub()) {
2999  rinfo->set_code_age_stub(Code::cast(stub));
3000  }
3001  }
3002 
3004  DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) &&
3005  rinfo->IsPatchedReturnSequence()) ||
3006  (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
3007  rinfo->IsPatchedDebugBreakSlotSequence()));
3008  Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
3009  VisitPointer(&target);
3010  rinfo->set_call_address(Code::cast(target)->instruction_start());
3011  }
3012 
3013  static inline void UpdateSlot(Heap* heap, Object** slot) {
3014  Object* obj = *slot;
3015 
3016  if (!obj->IsHeapObject()) return;
3017 
3018  HeapObject* heap_obj = HeapObject::cast(obj);
3019 
3020  MapWord map_word = heap_obj->map_word();
3021  if (map_word.IsForwardingAddress()) {
3022  DCHECK(heap->InFromSpace(heap_obj) ||
3023  MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
3024  HeapObject* target = map_word.ToForwardingAddress();
3025  *slot = target;
3026  DCHECK(!heap->InFromSpace(target) &&
3027  !MarkCompactCollector::IsOnEvacuationCandidate(target));
3028  }
3029  }
3030 
3031  private:
3032  inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
3033 
3035 };
3036 
3037 
3038 static void UpdatePointer(HeapObject** address, HeapObject* object) {
3039  Address new_addr = Memory::Address_at(object->address());
3040 
3041  // The new space sweep will overwrite the map word of dead objects
3042  // with NULL. In this case we do not need to transfer this entry to
3043  // the store buffer which we are rebuilding.
3044  // We perform the pointer update with a no barrier compare-and-swap. The
3045  // compare and swap may fail in the case where the pointer update tries to
3046  // update garbage memory which was concurrently accessed by the sweeper.
3047  if (new_addr != NULL) {
3049  reinterpret_cast<base::AtomicWord*>(address),
3050  reinterpret_cast<base::AtomicWord>(object),
3051  reinterpret_cast<base::AtomicWord>(HeapObject::FromAddress(new_addr)));
3052  }
3053 }
3054 
3055 
3057  Object** p) {
3058  MapWord map_word = HeapObject::cast(*p)->map_word();
3059 
3060  if (map_word.IsForwardingAddress()) {
3061  return String::cast(map_word.ToForwardingAddress());
3062  }
3063 
3064  return String::cast(*p);
3065 }
3066 
3067 
3069  int object_size) {
3070  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3071 
3072  OldSpace* target_space = heap()->TargetSpace(object);
3073 
3074  DCHECK(target_space == heap()->old_pointer_space() ||
3075  target_space == heap()->old_data_space());
3076  HeapObject* target;
3077  AllocationResult allocation = target_space->AllocateRaw(object_size);
3078  if (allocation.To(&target)) {
3079  MigrateObject(target, object, object_size, target_space->identity());
3080  heap()->IncrementPromotedObjectsSize(object_size);
3081  return true;
3082  }
3083 
3084  return false;
3085 }
3086 
3087 
3089  // There are soft limits in the allocation code, designed trigger a mark
3090  // sweep collection by failing allocations. But since we are already in
3091  // a mark-sweep allocation, there is no sense in trying to trigger one.
3092  AlwaysAllocateScope scope(isolate());
3093 
3094  NewSpace* new_space = heap()->new_space();
3095 
3096  // Store allocation range before flipping semispaces.
3097  Address from_bottom = new_space->bottom();
3098  Address from_top = new_space->top();
3099 
3100  // Flip the semispaces. After flipping, to space is empty, from space has
3101  // live objects.
3102  new_space->Flip();
3103  new_space->ResetAllocationInfo();
3104 
3105  int survivors_size = 0;
3106 
3107  // First pass: traverse all objects in inactive semispace, remove marks,
3108  // migrate live objects and write forwarding addresses. This stage puts
3109  // new entries in the store buffer and may cause some pages to be marked
3110  // scan-on-scavenge.
3111  NewSpacePageIterator it(from_bottom, from_top);
3112  while (it.has_next()) {
3113  NewSpacePage* p = it.next();
3114  survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p);
3115  }
3116 
3117  heap_->IncrementYoungSurvivorsCounter(survivors_size);
3118  new_space->set_age_mark(new_space->top());
3119 }
3120 
3121 
3123  AlwaysAllocateScope always_allocate(isolate());
3124  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3125  DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
3126  p->SetWasSwept();
3127 
3128  int offsets[16];
3129 
3130  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3131  Address cell_base = it.CurrentCellBase();
3132  MarkBit::CellType* cell = it.CurrentCell();
3133 
3134  if (*cell == 0) continue;
3135 
3136  int live_objects = MarkWordToObjectStarts(*cell, offsets);
3137  for (int i = 0; i < live_objects; i++) {
3138  Address object_addr = cell_base + offsets[i] * kPointerSize;
3139  HeapObject* object = HeapObject::FromAddress(object_addr);
3140  DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3141 
3142  int size = object->Size();
3143 
3144  HeapObject* target_object;
3145  AllocationResult allocation = space->AllocateRaw(size);
3146  if (!allocation.To(&target_object)) {
3147  // If allocation failed, use emergency memory and re-try allocation.
3148  CHECK(space->HasEmergencyMemory());
3149  space->UseEmergencyMemory();
3150  allocation = space->AllocateRaw(size);
3151  }
3152  if (!allocation.To(&target_object)) {
3153  // OS refused to give us memory.
3154  V8::FatalProcessOutOfMemory("Evacuation");
3155  return;
3156  }
3157 
3158  MigrateObject(target_object, object, size, space->identity());
3159  DCHECK(object->map_word().IsForwardingAddress());
3160  }
3161 
3162  // Clear marking bits for current cell.
3163  *cell = 0;
3164  }
3165  p->ResetLiveBytes();
3166 }
3167 
3168 
3170  int npages = evacuation_candidates_.length();
3171  for (int i = 0; i < npages; i++) {
3175  DCHECK(static_cast<int>(p->parallel_sweeping()) ==
3177  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3178  // Allocate emergency memory for the case when compaction fails due to out
3179  // of memory.
3180  if (!space->HasEmergencyMemory()) {
3181  space->CreateEmergencyMemory();
3182  }
3183  if (p->IsEvacuationCandidate()) {
3184  // During compaction we might have to request a new page. Check that we
3185  // have an emergency page and the space still has room for that.
3186  if (space->HasEmergencyMemory() && space->CanExpand()) {
3188  } else {
3189  // Without room for expansion evacuation is not guaranteed to succeed.
3190  // Pessimistically abandon unevacuated pages.
3191  for (int j = i; j < npages; j++) {
3192  Page* page = evacuation_candidates_[j];
3194  page->ClearEvacuationCandidate();
3196  }
3197  break;
3198  }
3199  }
3200  }
3201  if (npages > 0) {
3202  // Release emergency memory.
3203  PagedSpaces spaces(heap());
3204  for (PagedSpace* space = spaces.next(); space != NULL;
3205  space = spaces.next()) {
3206  if (space->HasEmergencyMemory()) {
3207  space->FreeEmergencyMemory();
3208  }
3209  }
3210  }
3211 }
3212 
3213 
3215  public:
3216  virtual Object* RetainAs(Object* object) {
3217  if (object->IsHeapObject()) {
3218  HeapObject* heap_object = HeapObject::cast(object);
3219  MapWord map_word = heap_object->map_word();
3220  if (map_word.IsForwardingAddress()) {
3221  return map_word.ToForwardingAddress();
3222  }
3223  }
3224  return object;
3225  }
3226 };
3227 
3228 
3229 static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
3230  SlotsBuffer::SlotType slot_type, Address addr) {
3231  switch (slot_type) {
3233  RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
3234  rinfo.Visit(isolate, v);
3235  break;
3236  }
3238  v->VisitCodeEntry(addr);
3239  break;
3240  }
3242  HeapObject* obj = HeapObject::FromAddress(addr);
3243  Code::cast(obj)->CodeIterateBody(v);
3244  break;
3245  }
3247  RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
3248  if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
3249  break;
3250  }
3252  RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
3253  if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
3254  break;
3255  }
3257  RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
3258  rinfo.Visit(isolate, v);
3259  break;
3260  }
3261  default:
3262  UNREACHABLE();
3263  break;
3264  }
3265 }
3266 
3267 
3269 
3270 
3272 
3273 
3275 
3276 
3277 template <MarkCompactCollector::SweepingParallelism mode>
3278 static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
3279  int size) {
3281  DCHECK(free_list == NULL);
3282  return space->Free(start, size);
3283  } else {
3284  // TODO(hpayer): account for wasted bytes in concurrent sweeping too.
3285  return size - free_list->Free(start, size);
3286  }
3287 }
3288 
3289 
3290 // Sweeps a page. After sweeping the page can be iterated.
3291 // Slots in live objects pointing into evacuation candidates are updated
3292 // if requested.
3293 // Returns the size of the biggest continuous freed memory chunk in bytes.
3294 template <SweepingMode sweeping_mode,
3296  SkipListRebuildingMode skip_list_mode,
3297  FreeSpaceTreatmentMode free_space_mode>
3298 static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
3299  ObjectVisitor* v) {
3300  DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
3301  DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3302  space->identity() == CODE_SPACE);
3303  DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3305  sweeping_mode == SWEEP_ONLY);
3306 
3307  Address free_start = p->area_start();
3308  DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3309  int offsets[16];
3310 
3311  SkipList* skip_list = p->skip_list();
3312  int curr_region = -1;
3313  if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3314  skip_list->Clear();
3315  }
3316 
3317  intptr_t freed_bytes = 0;
3318  intptr_t max_freed_bytes = 0;
3319 
3320  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3321  Address cell_base = it.CurrentCellBase();
3322  MarkBit::CellType* cell = it.CurrentCell();
3323  int live_objects = MarkWordToObjectStarts(*cell, offsets);
3324  int live_index = 0;
3325  for (; live_objects != 0; live_objects--) {
3326  Address free_end = cell_base + offsets[live_index++] * kPointerSize;
3327  if (free_end != free_start) {
3328  int size = static_cast<int>(free_end - free_start);
3329  if (free_space_mode == ZAP_FREE_SPACE) {
3330  memset(free_start, 0xcc, size);
3331  }
3332  freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3333  max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3334 #ifdef ENABLE_GDB_JIT_INTERFACE
3335  if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3336  GDBJITInterface::RemoveCodeRange(free_start, free_end);
3337  }
3338 #endif
3339  }
3340  HeapObject* live_object = HeapObject::FromAddress(free_end);
3341  DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3342  Map* map = live_object->map();
3343  int size = live_object->SizeFromMap(map);
3344  if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3345  live_object->IterateBody(map->instance_type(), size, v);
3346  }
3347  if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3348  int new_region_start = SkipList::RegionNumber(free_end);
3349  int new_region_end =
3351  if (new_region_start != curr_region || new_region_end != curr_region) {
3352  skip_list->AddObject(free_end, size);
3353  curr_region = new_region_end;
3354  }
3355  }
3356  free_start = free_end + size;
3357  }
3358  // Clear marking bits for current cell.
3359  *cell = 0;
3360  }
3361  if (free_start != p->area_end()) {
3362  int size = static_cast<int>(p->area_end() - free_start);
3363  if (free_space_mode == ZAP_FREE_SPACE) {
3364  memset(free_start, 0xcc, size);
3365  }
3366  freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3367  max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3368 #ifdef ENABLE_GDB_JIT_INTERFACE
3369  if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3370  GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
3371  }
3372 #endif
3373  }
3374  p->ResetLiveBytes();
3375 
3376  if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
3377  // When concurrent sweeping is active, the page will be marked after
3378  // sweeping by the main thread.
3380  } else {
3381  p->SetWasSwept();
3382  }
3383  return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3384 }
3385 
3386 
3387 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3388  Page* p = Page::FromAddress(code->address());
3389 
3391  return false;
3392  }
3393 
3394  Address code_start = code->address();
3395  Address code_end = code_start + code->Size();
3396 
3397  uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3398  uint32_t end_index =
3400 
3401  Bitmap* b = p->markbits();
3402 
3403  MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3404  MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3405 
3406  MarkBit::CellType* start_cell = start_mark_bit.cell();
3407  MarkBit::CellType* end_cell = end_mark_bit.cell();
3408 
3409  if (value) {
3410  MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3411  MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3412 
3413  if (start_cell == end_cell) {
3414  *start_cell |= start_mask & end_mask;
3415  } else {
3416  *start_cell |= start_mask;
3417  for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3418  *cell = ~0;
3419  }
3420  *end_cell |= end_mask;
3421  }
3422  } else {
3423  for (MarkBit::CellType* cell = start_cell; cell <= end_cell; cell++) {
3424  *cell = 0;
3425  }
3426  }
3427 
3428  return true;
3429 }
3430 
3431 
3433  // We did not record any slots in large objects thus
3434  // we can safely go to the page from the slot address.
3435  Page* p = Page::FromAddress(addr);
3436 
3437  // First check owner's identity because old pointer and old data spaces
3438  // are swept lazily and might still have non-zero mark-bits on some
3439  // pages.
3440  if (p->owner()->identity() != CODE_SPACE) return false;
3441 
3442  // In code space only bits on evacuation candidates (but we don't record
3443  // any slots on them) and under invalidated code objects are non-zero.
3444  MarkBit mark_bit =
3446 
3447  return mark_bit.Get();
3448 }
3449 
3450 
3453  !ShouldSkipEvacuationSlotRecording(code)) {
3455 
3456  // If the object is white than no slots were recorded on it yet.
3457  MarkBit mark_bit = Marking::MarkBitFrom(code);
3458  if (Marking::IsWhite(mark_bit)) return;
3459 
3460  invalidated_code_.Add(code);
3461  }
3462 }
3463 
3464 
3465 // Return true if the given code is deoptimized or will be deoptimized.
3467  return code->is_optimized_code() && code->marked_for_deoptimization();
3468 }
3469 
3470 
3472  bool code_marked = false;
3473 
3474  int length = invalidated_code_.length();
3475  for (int i = 0; i < length; i++) {
3476  Code* code = invalidated_code_[i];
3477 
3478  if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3479  code_marked = true;
3480  }
3481  }
3482 
3483  return code_marked;
3484 }
3485 
3486 
3488  int length = invalidated_code_.length();
3489  for (int i = 0; i < length; i++) {
3491  }
3492 }
3493 
3494 
3496  int length = invalidated_code_.length();
3497  for (int i = 0; i < length; i++) {
3498  Code* code = invalidated_code_[i];
3499  if (code != NULL) {
3500  code->Iterate(visitor);
3501  SetMarkBitsUnderInvalidatedCode(code, false);
3502  }
3503  }
3504  invalidated_code_.Rewind(0);
3505 }
3506 
3507 
3509  Heap::RelocationLock relocation_lock(heap());
3510 
3511  bool code_slots_filtering_required;
3512  {
3513  GCTracer::Scope gc_scope(heap()->tracer(),
3515  code_slots_filtering_required = MarkInvalidatedCode();
3516  EvacuateNewSpace();
3517  }
3518 
3519  {
3520  GCTracer::Scope gc_scope(heap()->tracer(),
3522  EvacuatePages();
3523  }
3524 
3525  // Second pass: find pointers to new space and update them.
3526  PointersUpdatingVisitor updating_visitor(heap());
3527 
3528  {
3529  GCTracer::Scope gc_scope(heap()->tracer(),
3531  // Update pointers in to space.
3532  SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3533  heap()->new_space()->top());
3534  for (HeapObject* object = to_it.Next(); object != NULL;
3535  object = to_it.Next()) {
3536  Map* map = object->map();
3537  object->IterateBody(map->instance_type(), object->SizeFromMap(map),
3538  &updating_visitor);
3539  }
3540  }
3541 
3542  {
3543  GCTracer::Scope gc_scope(heap()->tracer(),
3545  // Update roots.
3546  heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3547  }
3548 
3549  {
3550  GCTracer::Scope gc_scope(heap()->tracer(),
3555  &UpdatePointer);
3556  }
3557 
3558  {
3559  GCTracer::Scope gc_scope(heap()->tracer(),
3562  code_slots_filtering_required);
3563  if (FLAG_trace_fragmentation) {
3564  PrintF(" migration slots buffer: %d\n",
3566  }
3567 
3569  // It's difficult to filter out slots recorded for large objects.
3571  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3572  // LargeObjectSpace is not swept yet thus we have to skip
3573  // dead objects explicitly.
3574  if (!IsMarked(obj)) continue;
3575 
3576  Page* p = Page::FromAddress(obj->address());
3578  obj->Iterate(&updating_visitor);
3580  }
3581  }
3582  }
3583  }
3584 
3585  int npages = evacuation_candidates_.length();
3586  {
3587  GCTracer::Scope gc_scope(
3588  heap()->tracer(),
3590  for (int i = 0; i < npages; i++) {
3594 
3595  if (p->IsEvacuationCandidate()) {
3597  code_slots_filtering_required);
3598  if (FLAG_trace_fragmentation) {
3599  PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
3601  }
3602 
3603  // Important: skip list should be cleared only after roots were updated
3604  // because root iteration traverses the stack and might have to find
3605  // code objects from non-updated pc pointing into evacuation candidate.
3606  SkipList* list = p->skip_list();
3607  if (list != NULL) list->Clear();
3608  } else {
3609  if (FLAG_gc_verbose) {
3610  PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3611  reinterpret_cast<intptr_t>(p));
3612  }
3613  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3615 
3616  switch (space->identity()) {
3617  case OLD_DATA_SPACE:
3620  &updating_visitor);
3621  break;
3622  case OLD_POINTER_SPACE:
3625  &updating_visitor);
3626  break;
3627  case CODE_SPACE:
3628  if (FLAG_zap_code_space) {
3631  &updating_visitor);
3632  } else {
3635  &updating_visitor);
3636  }
3637  break;
3638  default:
3639  UNREACHABLE();
3640  break;
3641  }
3642  }
3643  }
3644  }
3645 
3646  GCTracer::Scope gc_scope(heap()->tracer(),
3648 
3649  // Update pointers from cells.
3650  HeapObjectIterator cell_iterator(heap_->cell_space());
3651  for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
3652  cell = cell_iterator.Next()) {
3653  if (cell->IsCell()) {
3654  Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3655  }
3656  }
3657 
3658  HeapObjectIterator js_global_property_cell_iterator(
3660  for (HeapObject* cell = js_global_property_cell_iterator.Next(); cell != NULL;
3661  cell = js_global_property_cell_iterator.Next()) {
3662  if (cell->IsPropertyCell()) {
3663  PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3664  }
3665  }
3666 
3667  heap_->string_table()->Iterate(&updating_visitor);
3669  if (heap_->weak_object_to_code_table()->IsHashTable()) {
3670  WeakHashTable* table =
3671  WeakHashTable::cast(heap_->weak_object_to_code_table());
3672  table->Iterate(&updating_visitor);
3673  table->Rehash(heap_->isolate()->factory()->undefined_value());
3674  }
3675 
3676  // Update pointers from external string table.
3679 
3680  EvacuationWeakObjectRetainer evacuation_object_retainer;
3681  heap()->ProcessWeakReferences(&evacuation_object_retainer);
3682 
3683  // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3684  // under it.
3685  ProcessInvalidatedCode(&updating_visitor);
3686 
3688 
3691 }
3692 
3693 
3695  int npages = evacuation_candidates_.length();
3696  for (int i = 0; i < npages; i++) {
3698  if (!p->IsEvacuationCandidate()) continue;
3699  p->Unlink();
3700  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3701  p->InsertAfter(space->LastPage());
3702  }
3703 }
3704 
3705 
3707  int npages = evacuation_candidates_.length();
3708  for (int i = 0; i < npages; i++) {
3710  if (!p->IsEvacuationCandidate()) continue;
3711  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3712  space->Free(p->area_start(), p->area_size());
3713  p->set_scan_on_scavenge(false);
3715  p->ResetLiveBytes();
3716  space->ReleasePage(p);
3717  }
3718  evacuation_candidates_.Rewind(0);
3719  compacting_ = false;
3720  heap()->FreeQueuedChunks();
3721 }
3722 
3723 
3724 static const int kStartTableEntriesPerLine = 5;
3725 static const int kStartTableLines = 171;
3726 static const int kStartTableInvalidLine = 127;
3727 static const int kStartTableUnusedEntry = 126;
3728 
3729 #define _ kStartTableUnusedEntry
3730 #define X kStartTableInvalidLine
3731 // Mark-bit to object start offset table.
3732 //
3733 // The line is indexed by the mark bits in a byte. The first number on
3734 // the line describes the number of live object starts for the line and the
3735 // other numbers on the line describe the offsets (in words) of the object
3736 // starts.
3737 //
3738 // Since objects are at least 2 words large we don't have entries for two
3739 // consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
3741  0, _, _,
3742  _, _, // 0
3743  1, 0, _,
3744  _, _, // 1
3745  1, 1, _,
3746  _, _, // 2
3747  X, _, _,
3748  _, _, // 3
3749  1, 2, _,
3750  _, _, // 4
3751  2, 0, 2,
3752  _, _, // 5
3753  X, _, _,
3754  _, _, // 6
3755  X, _, _,
3756  _, _, // 7
3757  1, 3, _,
3758  _, _, // 8
3759  2, 0, 3,
3760  _, _, // 9
3761  2, 1, 3,
3762  _, _, // 10
3763  X, _, _,
3764  _, _, // 11
3765  X, _, _,
3766  _, _, // 12
3767  X, _, _,
3768  _, _, // 13
3769  X, _, _,
3770  _, _, // 14
3771  X, _, _,
3772  _, _, // 15
3773  1, 4, _,
3774  _, _, // 16
3775  2, 0, 4,
3776  _, _, // 17
3777  2, 1, 4,
3778  _, _, // 18
3779  X, _, _,
3780  _, _, // 19
3781  2, 2, 4,
3782  _, _, // 20
3783  3, 0, 2,
3784  4, _, // 21
3785  X, _, _,
3786  _, _, // 22
3787  X, _, _,
3788  _, _, // 23
3789  X, _, _,
3790  _, _, // 24
3791  X, _, _,
3792  _, _, // 25
3793  X, _, _,
3794  _, _, // 26
3795  X, _, _,
3796  _, _, // 27
3797  X, _, _,
3798  _, _, // 28
3799  X, _, _,
3800  _, _, // 29
3801  X, _, _,
3802  _, _, // 30
3803  X, _, _,
3804  _, _, // 31
3805  1, 5, _,
3806  _, _, // 32
3807  2, 0, 5,
3808  _, _, // 33
3809  2, 1, 5,
3810  _, _, // 34
3811  X, _, _,
3812  _, _, // 35
3813  2, 2, 5,
3814  _, _, // 36
3815  3, 0, 2,
3816  5, _, // 37
3817  X, _, _,
3818  _, _, // 38
3819  X, _, _,
3820  _, _, // 39
3821  2, 3, 5,
3822  _, _, // 40
3823  3, 0, 3,
3824  5, _, // 41
3825  3, 1, 3,
3826  5, _, // 42
3827  X, _, _,
3828  _, _, // 43
3829  X, _, _,
3830  _, _, // 44
3831  X, _, _,
3832  _, _, // 45
3833  X, _, _,
3834  _, _, // 46
3835  X, _, _,
3836  _, _, // 47
3837  X, _, _,
3838  _, _, // 48
3839  X, _, _,
3840  _, _, // 49
3841  X, _, _,
3842  _, _, // 50
3843  X, _, _,
3844  _, _, // 51
3845  X, _, _,
3846  _, _, // 52
3847  X, _, _,
3848  _, _, // 53
3849  X, _, _,
3850  _, _, // 54
3851  X, _, _,
3852  _, _, // 55
3853  X, _, _,
3854  _, _, // 56
3855  X, _, _,
3856  _, _, // 57
3857  X, _, _,
3858  _, _, // 58
3859  X, _, _,
3860  _, _, // 59
3861  X, _, _,
3862  _, _, // 60
3863  X, _, _,
3864  _, _, // 61
3865  X, _, _,
3866  _, _, // 62
3867  X, _, _,
3868  _, _, // 63
3869  1, 6, _,
3870  _, _, // 64
3871  2, 0, 6,
3872  _, _, // 65
3873  2, 1, 6,
3874  _, _, // 66
3875  X, _, _,
3876  _, _, // 67
3877  2, 2, 6,
3878  _, _, // 68
3879  3, 0, 2,
3880  6, _, // 69
3881  X, _, _,
3882  _, _, // 70
3883  X, _, _,
3884  _, _, // 71
3885  2, 3, 6,
3886  _, _, // 72
3887  3, 0, 3,
3888  6, _, // 73
3889  3, 1, 3,
3890  6, _, // 74
3891  X, _, _,
3892  _, _, // 75
3893  X, _, _,
3894  _, _, // 76
3895  X, _, _,
3896  _, _, // 77
3897  X, _, _,
3898  _, _, // 78
3899  X, _, _,
3900  _, _, // 79
3901  2, 4, 6,
3902  _, _, // 80
3903  3, 0, 4,
3904  6, _, // 81
3905  3, 1, 4,
3906  6, _, // 82
3907  X, _, _,
3908  _, _, // 83
3909  3, 2, 4,
3910  6, _, // 84
3911  4, 0, 2,
3912  4, 6, // 85
3913  X, _, _,
3914  _, _, // 86
3915  X, _, _,
3916  _, _, // 87
3917  X, _, _,
3918  _, _, // 88
3919  X, _, _,
3920  _, _, // 89
3921  X, _, _,
3922  _, _, // 90
3923  X, _, _,
3924  _, _, // 91
3925  X, _, _,
3926  _, _, // 92
3927  X, _, _,
3928  _, _, // 93
3929  X, _, _,
3930  _, _, // 94
3931  X, _, _,
3932  _, _, // 95
3933  X, _, _,
3934  _, _, // 96
3935  X, _, _,
3936  _, _, // 97
3937  X, _, _,
3938  _, _, // 98
3939  X, _, _,
3940  _, _, // 99
3941  X, _, _,
3942  _, _, // 100
3943  X, _, _,
3944  _, _, // 101
3945  X, _, _,
3946  _, _, // 102
3947  X, _, _,
3948  _, _, // 103
3949  X, _, _,
3950  _, _, // 104
3951  X, _, _,
3952  _, _, // 105
3953  X, _, _,
3954  _, _, // 106
3955  X, _, _,
3956  _, _, // 107
3957  X, _, _,
3958  _, _, // 108
3959  X, _, _,
3960  _, _, // 109
3961  X, _, _,
3962  _, _, // 110
3963  X, _, _,
3964  _, _, // 111
3965  X, _, _,
3966  _, _, // 112
3967  X, _, _,
3968  _, _, // 113
3969  X, _, _,
3970  _, _, // 114
3971  X, _, _,
3972  _, _, // 115
3973  X, _, _,
3974  _, _, // 116
3975  X, _, _,
3976  _, _, // 117
3977  X, _, _,
3978  _, _, // 118
3979  X, _, _,
3980  _, _, // 119
3981  X, _, _,
3982  _, _, // 120
3983  X, _, _,
3984  _, _, // 121
3985  X, _, _,
3986  _, _, // 122
3987  X, _, _,
3988  _, _, // 123
3989  X, _, _,
3990  _, _, // 124
3991  X, _, _,
3992  _, _, // 125
3993  X, _, _,
3994  _, _, // 126
3995  X, _, _,
3996  _, _, // 127
3997  1, 7, _,
3998  _, _, // 128
3999  2, 0, 7,
4000  _, _, // 129
4001  2, 1, 7,
4002  _, _, // 130
4003  X, _, _,
4004  _, _, // 131
4005  2, 2, 7,
4006  _, _, // 132
4007  3, 0, 2,
4008  7, _, // 133
4009  X, _, _,
4010  _, _, // 134
4011  X, _, _,
4012  _, _, // 135
4013  2, 3, 7,
4014  _, _, // 136
4015  3, 0, 3,
4016  7, _, // 137
4017  3, 1, 3,
4018  7, _, // 138
4019  X, _, _,
4020  _, _, // 139
4021  X, _, _,
4022  _, _, // 140
4023  X, _, _,
4024  _, _, // 141
4025  X, _, _,
4026  _, _, // 142
4027  X, _, _,
4028  _, _, // 143
4029  2, 4, 7,
4030  _, _, // 144
4031  3, 0, 4,
4032  7, _, // 145
4033  3, 1, 4,
4034  7, _, // 146
4035  X, _, _,
4036  _, _, // 147
4037  3, 2, 4,
4038  7, _, // 148
4039  4, 0, 2,
4040  4, 7, // 149
4041  X, _, _,
4042  _, _, // 150
4043  X, _, _,
4044  _, _, // 151
4045  X, _, _,
4046  _, _, // 152
4047  X, _, _,
4048  _, _, // 153
4049  X, _, _,
4050  _, _, // 154
4051  X, _, _,
4052  _, _, // 155
4053  X, _, _,
4054  _, _, // 156
4055  X, _, _,
4056  _, _, // 157
4057  X, _, _,
4058  _, _, // 158
4059  X, _, _,
4060  _, _, // 159
4061  2, 5, 7,
4062  _, _, // 160
4063  3, 0, 5,
4064  7, _, // 161
4065  3, 1, 5,
4066  7, _, // 162
4067  X, _, _,
4068  _, _, // 163
4069  3, 2, 5,
4070  7, _, // 164
4071  4, 0, 2,
4072  5, 7, // 165
4073  X, _, _,
4074  _, _, // 166
4075  X, _, _,
4076  _, _, // 167
4077  3, 3, 5,
4078  7, _, // 168
4079  4, 0, 3,
4080  5, 7, // 169
4081  4, 1, 3,
4082  5, 7 // 170
4083 };
4084 #undef _
4085 #undef X
4086 
4087 
4088 // Takes a word of mark bits. Returns the number of objects that start in the
4089 // range. Puts the offsets of the words in the supplied array.
4090 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
4091  int objects = 0;
4092  int offset = 0;
4093 
4094  // No consecutive 1 bits.
4095  DCHECK((mark_bits & 0x180) != 0x180);
4096  DCHECK((mark_bits & 0x18000) != 0x18000);
4097  DCHECK((mark_bits & 0x1800000) != 0x1800000);
4098 
4099  while (mark_bits != 0) {
4100  int byte = (mark_bits & 0xff);
4101  mark_bits >>= 8;
4102  if (byte != 0) {
4103  DCHECK(byte < kStartTableLines); // No consecutive 1 bits.
4104  char* table = kStartTable + byte * kStartTableEntriesPerLine;
4105  int objects_in_these_8_words = table[0];
4106  DCHECK(objects_in_these_8_words != kStartTableInvalidLine);
4107  DCHECK(objects_in_these_8_words < kStartTableEntriesPerLine);
4108  for (int i = 0; i < objects_in_these_8_words; i++) {
4109  starts[objects++] = offset + table[1 + i];
4110  }
4111  }
4112  offset += 8;
4113  }
4114  return objects;
4115 }
4116 
4117 
4119  int required_freed_bytes) {
4120  int max_freed = 0;
4121  int max_freed_overall = 0;
4122  PageIterator it(space);
4123  while (it.has_next()) {
4124  Page* p = it.next();
4125  max_freed = SweepInParallel(p, space);
4126  DCHECK(max_freed >= 0);
4127  if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
4128  return max_freed;
4129  }
4130  max_freed_overall = Max(max_freed, max_freed_overall);
4131  if (p == space->end_of_unswept_pages()) break;
4132  }
4133  return max_freed_overall;
4134 }
4135 
4136 
4138  int max_freed = 0;
4139  if (page->TryParallelSweeping()) {
4140  FreeList* free_list = space == heap()->old_pointer_space()
4142  : free_list_old_data_space_.get();
4143  FreeList private_free_list(space);
4145  IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
4146  free_list->Concatenate(&private_free_list);
4147  }
4148  return max_freed;
4149 }
4150 
4151 
4153  space->ClearStats();
4154 
4155  // We defensively initialize end_of_unswept_pages_ here with the first page
4156  // of the pages list.
4157  space->set_end_of_unswept_pages(space->FirstPage());
4158 
4159  PageIterator it(space);
4160 
4161  int pages_swept = 0;
4162  bool unused_page_present = false;
4163  bool parallel_sweeping_active = false;
4164 
4165  while (it.has_next()) {
4166  Page* p = it.next();
4168 
4169  // Clear sweeping flags indicating that marking bits are still intact.
4170  p->ClearWasSwept();
4171 
4173  p->IsEvacuationCandidate()) {
4174  // Will be processed in EvacuateNewSpaceAndCandidates.
4175  DCHECK(evacuation_candidates_.length() > 0);
4176  continue;
4177  }
4178 
4179  // One unused page is kept, all further are released before sweeping them.
4180  if (p->LiveBytes() == 0) {
4181  if (unused_page_present) {
4182  if (FLAG_gc_verbose) {
4183  PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
4184  reinterpret_cast<intptr_t>(p));
4185  }
4186  // Adjust unswept free bytes because releasing a page expects said
4187  // counter to be accurate for unswept pages.
4188  space->IncreaseUnsweptFreeBytes(p);
4189  space->ReleasePage(p);
4190  continue;
4191  }
4192  unused_page_present = true;
4193  }
4194 
4195  switch (sweeper) {
4196  case CONCURRENT_SWEEPING:
4197  case PARALLEL_SWEEPING:
4198  if (!parallel_sweeping_active) {
4199  if (FLAG_gc_verbose) {
4200  PrintF("Sweeping 0x%" V8PRIxPTR ".\n",
4201  reinterpret_cast<intptr_t>(p));
4202  }
4205  pages_swept++;
4206  parallel_sweeping_active = true;
4207  } else {
4208  if (FLAG_gc_verbose) {
4209  PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n",
4210  reinterpret_cast<intptr_t>(p));
4211  }
4213  space->IncreaseUnsweptFreeBytes(p);
4214  }
4215  space->set_end_of_unswept_pages(p);
4216  break;
4217  case SEQUENTIAL_SWEEPING: {
4218  if (FLAG_gc_verbose) {
4219  PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p));
4220  }
4221  if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
4223  ZAP_FREE_SPACE>(space, NULL, p, NULL);
4224  } else if (space->identity() == CODE_SPACE) {
4227  } else {
4230  }
4231  pages_swept++;
4232  break;
4233  }
4234  default: { UNREACHABLE(); }
4235  }
4236  }
4237 
4238  if (FLAG_gc_verbose) {
4239  PrintF("SweepSpace: %s (%d pages swept)\n",
4240  AllocationSpaceName(space->identity()), pages_swept);
4241  }
4242 
4243  // Give pages that are queued to be freed back to the OS.
4244  heap()->FreeQueuedChunks();
4245 }
4246 
4247 
4249  return type == MarkCompactCollector::PARALLEL_SWEEPING ||
4251 }
4252 
4253 
4257 }
4258 
4259 
4261  GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
4262  double start_time = 0.0;
4263  if (FLAG_print_cumulative_gc_stat) {
4264  start_time = base::OS::TimeCurrentMillis();
4265  }
4266 
4267 #ifdef DEBUG
4268  state_ = SWEEP_SPACES;
4269 #endif
4270  SweeperType how_to_sweep = CONCURRENT_SWEEPING;
4271  if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_SWEEPING;
4272  if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_SWEEPING;
4273 
4275 
4276  // Noncompacting collections simply sweep the spaces to clear the mark
4277  // bits and free the nonlive blocks (for old and map spaces). We sweep
4278  // the map space last because freeing non-live maps overwrites them and
4279  // the other spaces rely on possibly non-live maps to get the sizes for
4280  // non-live objects.
4281  {
4282  GCTracer::Scope sweep_scope(heap()->tracer(),
4284  {
4285  SequentialSweepingScope scope(this);
4286  SweepSpace(heap()->old_pointer_space(), how_to_sweep);
4287  SweepSpace(heap()->old_data_space(), how_to_sweep);
4288  }
4289 
4290  if (ShouldStartSweeperThreads(how_to_sweep)) {
4292  }
4293 
4294  if (ShouldWaitForSweeperThreads(how_to_sweep)) {
4296  }
4297  }
4299 
4300  {
4301  GCTracer::Scope sweep_scope(heap()->tracer(),
4303  SweepSpace(heap()->code_space(), SEQUENTIAL_SWEEPING);
4304  }
4305 
4306  {
4307  GCTracer::Scope sweep_scope(heap()->tracer(),
4309  SweepSpace(heap()->cell_space(), SEQUENTIAL_SWEEPING);
4310  SweepSpace(heap()->property_cell_space(), SEQUENTIAL_SWEEPING);
4311  }
4312 
4314 
4315  // ClearNonLiveTransitions depends on precise sweeping of map space to
4316  // detect whether unmarked map became dead in this collection or in one
4317  // of the previous ones.
4318  {
4319  GCTracer::Scope sweep_scope(heap()->tracer(),
4321  SweepSpace(heap()->map_space(), SEQUENTIAL_SWEEPING);
4322  }
4323 
4324  // Deallocate unmarked objects and clear marked bits for marked objects.
4326 
4327  // Deallocate evacuated candidate pages.
4329 
4330  if (FLAG_print_cumulative_gc_stat) {
4332  start_time);
4333  }
4334 }
4335 
4336 
4338  PageIterator it(space);
4339  while (it.has_next()) {
4340  Page* p = it.next();
4343  p->SetWasSwept();
4344  }
4346  }
4347 }
4348 
4349 
4351  ParallelSweepSpaceComplete(heap()->old_pointer_space());
4352  ParallelSweepSpaceComplete(heap()->old_data_space());
4353 }
4354 
4355 
4357  if (isolate()->debug()->is_loaded() ||
4358  isolate()->debug()->has_break_points()) {
4359  enable = false;
4360  }
4361 
4362  if (enable) {
4363  if (code_flusher_ != NULL) return;
4365  } else {
4366  if (code_flusher_ == NULL) return;
4368  delete code_flusher_;
4369  code_flusher_ = NULL;
4370  }
4371 
4372  if (FLAG_trace_code_flushing) {
4373  PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
4374  }
4375 }
4376 
4377 
4378 // TODO(1466) ReportDeleteIfNeeded is not called currently.
4379 // Our profiling tools do not expect intersections between
4380 // code objects. We should either reenable it or change our tools.
4382  Isolate* isolate) {
4383  if (obj->IsCode()) {
4384  PROFILE(isolate, CodeDeleteEvent(obj->address()));
4385  }
4386 }
4387 
4388 
4390 
4391 
4395 }
4396 
4397 
4399  return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
4400 }
4401 
4402 
4404  SlotsBuffer** buffer_address, SlotType type,
4405  Address addr, AdditionMode mode) {
4406  SlotsBuffer* buffer = *buffer_address;
4407  if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
4409  allocator->DeallocateChain(buffer_address);
4410  return false;
4411  }
4412  buffer = allocator->AllocateBuffer(buffer);
4413  *buffer_address = buffer;
4414  }
4415  DCHECK(buffer->HasSpaceForTypedSlot());
4416  buffer->Add(reinterpret_cast<ObjectSlot>(type));
4417  buffer->Add(reinterpret_cast<ObjectSlot>(addr));
4418  return true;
4419 }
4420 
4421 
4423  if (RelocInfo::IsCodeTarget(rmode)) {
4425  } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4427  } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4429  } else if (RelocInfo::IsJSReturn(rmode)) {
4431  }
4432  UNREACHABLE();
4434 }
4435 
4436 
4438  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4439  RelocInfo::Mode rmode = rinfo->rmode();
4440  if (target_page->IsEvacuationCandidate() &&
4441  (rinfo->host() == NULL ||
4442  !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4443  bool success;
4444  if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
4445  // This doesn't need to be typed since it is just a normal heap pointer.
4446  Object** target_pointer =
4447  reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
4448  success = SlotsBuffer::AddTo(
4450  target_pointer, SlotsBuffer::FAIL_ON_OVERFLOW);
4451  } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
4452  success = SlotsBuffer::AddTo(
4454  SlotsBuffer::CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address(),
4456  } else {
4457  success = SlotsBuffer::AddTo(
4460  }
4461  if (!success) {
4462  EvictEvacuationCandidate(target_page);
4463  }
4464  }
4465 }
4466 
4467 
4469  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4470  if (target_page->IsEvacuationCandidate() &&
4471  !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4473  target_page->slots_buffer_address(),
4476  EvictEvacuationCandidate(target_page);
4477  }
4478  }
4479 }
4480 
4481 
4483  DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
4484  if (is_compacting()) {
4485  Code* host =
4487  pc);
4488  MarkBit mark_bit = Marking::MarkBitFrom(host);
4489  if (Marking::IsBlack(mark_bit)) {
4490  RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4491  RecordRelocSlot(&rinfo, target);
4492  }
4493  }
4494 }
4495 
4496 
4498  SlotsBuffer::ObjectSlot slot) {
4499  return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4500 }
4501 
4502 
4504  PointersUpdatingVisitor v(heap);
4505 
4506  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4507  ObjectSlot slot = slots_[slot_idx];
4508  if (!IsTypedSlot(slot)) {
4510  } else {
4511  ++slot_idx;
4512  DCHECK(slot_idx < idx_);
4513  UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
4514  reinterpret_cast<Address>(slots_[slot_idx]));
4515  }
4516  }
4517 }
4518 
4519 
4521  PointersUpdatingVisitor v(heap);
4522 
4523  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4524  ObjectSlot slot = slots_[slot_idx];
4525  if (!IsTypedSlot(slot)) {
4526  if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4528  }
4529  } else {
4530  ++slot_idx;
4531  DCHECK(slot_idx < idx_);
4532  Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4533  if (!IsOnInvalidatedCodeObject(pc)) {
4534  UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
4535  reinterpret_cast<Address>(slots_[slot_idx]));
4536  }
4537  }
4538  }
4539 }
4540 
4541 
4543  return new SlotsBuffer(next_buffer);
4544 }
4545 
4546 
4548  delete buffer;
4549 }
4550 
4551 
4553  SlotsBuffer* buffer = *buffer_address;
4554  while (buffer != NULL) {
4555  SlotsBuffer* next_buffer = buffer->next();
4556  DeallocateBuffer(buffer);
4557  buffer = next_buffer;
4558  }
4559  *buffer_address = NULL;
4560 }
4561 }
4562 } // namespace v8::internal
const int kPageSizeBits
Definition: build_config.h:159
A JavaScript object (ECMA-262, 4.3.3)
Definition: v8.h:2283
virtual void CallOnBackgroundThread(Task *task, ExpectedRuntime expected_runtime)=0
Schedules a task to be invoked on a background thread.
A single JavaScript stack frame.
Definition: v8.h:1358
A Task represents a unit of work.
Definition: v8-platform.h:15
static double TimeCurrentMillis()
MarkBit MarkBitFromIndex(uint32_t index, bool data_only=false)
Definition: spaces.h:190
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:21
static const uint32_t kBitsPerCell
Definition: spaces.h:146
Code * builtin(Name name)
Definition: builtins.h:254
static const int kValueOffset
Definition: objects.h:9446
static JSFunction * GetNextCandidate(JSFunction *candidate)
Definition: mark-compact.h:437
void EvictCandidate(SharedFunctionInfo *shared_info)
void IteratePointersToFromSpace(ObjectVisitor *v)
static void ClearNextCodeMap(SharedFunctionInfo *holder)
Definition: mark-compact.h:478
void EvictOptimizedCodeMap(SharedFunctionInfo *code_map_holder)
JSFunction * jsfunction_candidates_head_
Definition: mark-compact.h:484
SharedFunctionInfo * shared_function_info_candidates_head_
Definition: mark-compact.h:485
static void SetNextCandidate(JSFunction *candidate, JSFunction *next_candidate)
Definition: mark-compact.h:442
SharedFunctionInfo * optimized_code_map_holder_head_
Definition: mark-compact.h:486
static JSFunction ** GetNextCandidateSlot(JSFunction *candidate)
Definition: mark-compact.h:432
static void SetNextCodeMap(SharedFunctionInfo *holder, SharedFunctionInfo *next_holder)
Definition: mark-compact.h:472
static void ClearNextCandidate(JSFunction *candidate, Object *undefined)
Definition: mark-compact.h:447
static SharedFunctionInfo * GetNextCodeMap(SharedFunctionInfo *holder)
Definition: mark-compact.h:466
CodeMarkingVisitor(MarkCompactCollector *collector)
void VisitThread(Isolate *isolate, ThreadLocalTop *top)
MarkCompactCollector * collector_
void CodeIterateBody(ObjectVisitor *v)
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:5018
bool marked_for_deoptimization()
Definition: objects-inl.h:4877
bool CanDeoptAt(Address pc)
Definition: objects.cc:10613
bool is_optimized_code()
Definition: objects.h:5059
static Object * GetObjectFromEntryAddress(Address location_of_address)
Definition: objects-inl.h:5029
void InvalidateEmbeddedObjects()
Definition: objects.cc:10110
void IterateFunctions(ObjectVisitor *v)
int OffsetOfElementAt(int index)
Definition: objects.h:2792
static Context * cast(Object *context)
Definition: contexts.h:255
Context * native_context()
Definition: contexts.cc:44
static void DeoptimizeMarkedCode(Isolate *isolate)
Definition: deoptimizer.cc:454
Object ** slot_at(int i)
Definition: objects-inl.h:4590
static void SetMarkedForDeoptimization(Code *code, DependencyGroup group)
Definition: objects.cc:11602
static const int kGroupCount
Definition: objects.h:5534
void set_number_of_entries(DependencyGroup group, int value)
Definition: objects-inl.h:4560
void set_object_at(int i, Object *object)
Definition: objects-inl.h:4580
static const int kDescriptorSize
Definition: objects.h:3038
FixedArray * GetEnumCache()
Definition: objects.h:2917
void SetNumberOfDescriptors(int number_of_descriptors)
Definition: objects-inl.h:2752
FixedArray * GetEnumIndicesCache()
Definition: objects.h:2931
virtual Object * RetainAs(Object *object)
void Iterate(ObjectVisitor *v)
Definition: heap-inl.h:654
Object ** RawFieldOfElementAt(int index)
Definition: objects.h:2458
Object * get(int index)
Definition: objects-inl.h:2165
static int SizeFor(int length)
Definition: objects.h:2452
void set(int index, Object *value)
Definition: objects-inl.h:2190
void set_undefined(int index)
Definition: objects-inl.h:2704
void set_the_hole(int index)
Definition: objects-inl.h:2723
static void IterateBody(HeapObject *obj, ObjectVisitor *v)
Definition: objects-inl.h:7162
static int GuaranteedAllocatable(int maximum_freed)
Definition: spaces.h:1540
intptr_t Concatenate(FreeList *free_list)
Definition: spaces.cc:2159
int Free(Address start, int size_in_bytes)
Definition: spaces.cc:2177
void AddSweepingTime(double duration)
Definition: gc-tracer.h:270
void AddMarkingTime(double duration)
Definition: gc-tracer.h:260
bool IterateObjectGroups(ObjectVisitor *v, WeakSlotCallbackWithHeap can_skip)
void IdentifyWeakHandles(WeakSlotCallback f)
List< ImplicitRefGroup * > * implicit_ref_groups()
void IterateWeakRoots(ObjectVisitor *v)
void Iterate(v8::internal::ObjectVisitor *v)
Definition: api.cc:7590
bool IsKey(Object *k)
Definition: objects.h:3255
Object * KeyAt(int entry)
Definition: objects.h:3251
static int EntryToIndex(int entry)
Definition: objects.h:3306
void IteratePrefix(ObjectVisitor *visitor)
Definition: objects.cc:13742
void Rehash(Key key)
Definition: objects.cc:13895
void IterateElements(ObjectVisitor *visitor)
Definition: objects.cc:13748
void ElementsRemoved(int n)
Definition: objects.h:3234
static const int kMapOffset
Definition: objects.h:1427
Heap * GetHeap() const
Definition: objects-inl.h:1379
void IterateBody(InstanceType type, int object_size, ObjectVisitor *v)
Definition: objects.cc:1538
static Object ** RawField(HeapObject *obj, int offset)
Definition: objects-inl.h:1311
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1464
MapWord map_word() const
Definition: objects-inl.h:1440
void Iterate(ObjectVisitor *v)
Definition: objects.cc:1529
int SizeFromMap(Map *map)
Definition: objects-inl.h:4259
ExternalStringTable external_string_table_
Definition: heap.h:2013
bool Contains(Address addr)
Definition: heap.cc:4447
void IterateRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:4722
void IncrementSemiSpaceCopiedObjectSize(int object_size)
Definition: heap.h:1119
OldSpace * old_pointer_space()
Definition: heap.h:594
void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size)
Definition: heap.h:1306
OldSpace * TargetSpace(HeapObject *object)
Definition: heap-inl.h:392
PropertyCellSpace * property_cell_space()
Definition: heap.h:599
OldSpace * code_space()
Definition: heap.h:596
PagedSpace * paged_space(int idx)
Definition: heap.h:601
void FreeQueuedChunks()
Definition: heap.cc:6025
Object * native_contexts_list() const
Definition: heap.h:793
LargeObjectSpace * lo_space()
Definition: heap.h:600
void CheckpointObjectStats()
Definition: heap.cc:6100
void ProcessWeakReferences(WeakObjectRetainer *retainer)
Definition: heap.cc:1626
@ RECORD_SCRATCHPAD_SLOT
Definition: heap.h:974
void FinalizeExternalString(String *string)
Definition: heap-inl.h:307
bool InNewSpace(Object *object)
Definition: heap-inl.h:322
CellSpace * cell_space()
Definition: heap.h:598
Object * encountered_weak_collections() const
Definition: heap.h:811
Object * weak_object_to_code_table()
Definition: heap.h:806
StoreBuffer * store_buffer()
Definition: heap.h:1201
Isolate * isolate()
Definition: heap-inl.h:589
static void MoveBlock(Address dst, Address src, int byte_size)
Definition: heap-inl.h:475
static void UpdateAllocationSiteFeedback(HeapObject *object, ScratchpadSlotMode mode)
Definition: heap-inl.h:536
static void ScavengeStoreBufferCallback(Heap *heap, MemoryChunk *page, StoreBufferEvent event)
Definition: heap.cc:1309
bool InFromSpace(Object *object)
Definition: heap-inl.h:334
void IncrementPromotedObjectsSize(int object_size)
Definition: heap.h:1114
OldSpace * old_data_space()
Definition: heap.h:595
void IterateStrongRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:4747
IncrementalMarking * incremental_marking()
Definition: heap.h:1205
GCTracer * tracer()
Definition: heap.h:1166
void OnMoveEvent(HeapObject *target, HeapObject *source, int size_in_bytes)
Definition: heap-inl.h:245
NewSpace * new_space()
Definition: heap.h:593
int sweep_generation()
Definition: heap.h:1212
void RightTrimFixedArray(FixedArrayBase *obj, int elements_to_trim)
Definition: heap.cc:3322
void IncrementYoungSurvivorsCounter(int survived)
Definition: heap.h:1130
Object ** weak_object_to_code_table_address()
Definition: heap.h:1940
void UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)
Definition: heap.cc:1613
void RecordFixedArraySubTypeStats(int array_sub_type, size_t size)
Definition: heap.h:1320
void set_encountered_weak_collections(Object *weak_collection)
Definition: heap.h:808
MarkCompactCollector * mark_compact_collector()
Definition: heap.h:1197
MapSpace * map_space()
Definition: heap.h:597
void RecordObjectStats(InstanceType type, size_t size)
Definition: heap.h:1300
static void InvalidateMaps(Code *stub)
Definition: ic.cc:460
void WhiteToGreyAndPush(HeapObject *obj, MarkBit mark_bit)
Code * GcSafeFindCodeForInnerPointer(Address inner_pointer)
Definition: frames.cc:1468
StubCache * stub_cache()
Definition: isolate.h:875
HandleScopeImplementer * handle_scope_implementer()
Definition: isolate.h:901
Builtins * builtins()
Definition: isolate.h:947
InnerPointerToCodeCache * inner_pointer_to_code_cache()
Definition: isolate.h:911
ThreadManager * thread_manager()
Definition: isolate.h:921
CompilationCache * compilation_cache()
Definition: isolate.h:865
int num_sweeper_threads() const
Definition: isolate.h:1063
Factory * factory()
Definition: isolate.h:982
SweeperThread ** sweeper_threads()
Definition: isolate.h:1067
GlobalHandles * global_handles()
Definition: isolate.h:917
void set_code(Code *code)
Definition: objects-inl.h:5844
static const int kCodeEntryOffset
Definition: objects.h:7376
static const int kUninitializedValue
Definition: objects.h:7829
void SetDataAt(int index, Object *value)
Definition: objects-inl.h:6311
static const int kCompilationErrorValue
Definition: objects.h:7833
Object * DataAt(int index)
Definition: objects-inl.h:6305
static int saved_code_index(bool is_latin1)
Definition: objects.h:7758
static int code_index(bool is_latin1)
Definition: objects.h:7750
T & at(int i) const
Definition: list.h:69
static const int kProtoTransitionMapOffset
Definition: objects.h:5905
void SetBackPointer(Object *value, WriteBarrierMode mode=UPDATE_WRITE_BARRIER)
Definition: objects-inl.h:5251
static const int kProtoTransitionElementsPerEntry
Definition: objects.h:5903
static const int kProtoTransitionPrototypeOffset
Definition: objects.h:5904
static const int kConstructorOffset
Definition: objects.h:6191
bool HasTransitionArray() const
Definition: objects-inl.h:5150
static const int kPrototypeOffset
Definition: objects.h:6190
static const int kProtoTransitionHeaderSize
Definition: objects.h:5901
CellType mask()
Definition: spaces.h:109
CellType * cell()
Definition: spaces.h:108
MarkBit Next()
Definition: spaces.h:123
SweeperTask(Heap *heap, PagedSpace *space)
void MarkAllocationSite(AllocationSite *site)
void ProcessInvalidatedCode(ObjectVisitor *visitor)
void MigrateObject(HeapObject *dst, HeapObject *src, int size, AllocationSpace to_old_space)
SlotsBufferAllocator slots_buffer_allocator_
Definition: mark-compact.h:710
void ClearNonLiveMapTransitions(Map *map, MarkBit map_mark)
bool TryPromoteObject(HeapObject *object, int object_size)
void TrimDescriptorArray(Map *map, DescriptorArray *descriptors, int number_of_own_descriptors)
void ClearDependentICList(Object *head)
void CollectEvacuationCandidates(PagedSpace *space)
void ProcessEphemeralMarking(ObjectVisitor *visitor)
void ClearNonLiveDependentCode(DependentCode *dependent_code)
void ClearNonLivePrototypeTransitions(Map *map)
static bool IsMarked(Object *obj)
void RecordCodeTargetPatch(Address pc, Code *target)
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
void RecordCodeEntrySlot(Address slot, Code *target)
int SweepInParallel(PagedSpace *space, int required_freed_bytes)
void PrepareThreadForCodeFlushing(Isolate *isolate, ThreadLocalTop *top)
void ParallelSweepSpaceComplete(PagedSpace *space)
int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace *new_space, NewSpacePage *p)
void RefillFreeList(PagedSpace *space)
static bool IsUnmarkedHeapObjectWithHeap(Heap *heap, Object **p)
SmartPointer< FreeList > free_list_old_data_space_
Definition: mark-compact.h:890
SmartPointer< FreeList > free_list_old_pointer_space_
Definition: mark-compact.h:891
bool StartCompaction(CompactionMode mode)
void RecordRelocSlot(RelocInfo *rinfo, Object *target)
void RecordMigratedSlot(Object *value, Address slot)
void MarkStringTable(RootMarkingVisitor *visitor)
void ProcessTopOptimizedFrame(ObjectVisitor *visitor)
void SweepSpace(PagedSpace *space, SweeperType sweeper)
void TrimEnumCache(Map *map, DescriptorArray *descriptors)
int ClearNonLiveDependentCodeInGroup(DependentCode *dependent_code, int group, int start, int end, int new_start)
base::Semaphore pending_sweeper_jobs_semaphore_
Definition: mark-compact.h:706
void ClearDependentCode(DependentCode *dependent_code)
void MarkRoots(RootMarkingVisitor *visitor)
static bool IsUnmarkedHeapObject(Object **p)
static void ObjectStatsCountFixedArray(FixedArrayBase *fixed_array, FixedArraySubInstanceType fast_type, FixedArraySubInstanceType dictionary_type)
INLINE(static bool VisitUnmarkedObjects(Heap *heap, Object **start, Object **end))
INLINE(static void VisitPointer(Heap *heap, Object **p))
INLINE(static bool MarkObjectWithoutPush(Heap *heap, HeapObject *object))
INLINE(static void VisitUnmarkedObject(MarkCompactCollector *collector, HeapObject *obj))
INLINE(static void MarkObject(Heap *heap, HeapObject *object))
static void TrackObjectStatsAndVisit(Map *map, HeapObject *obj)
static void UpdateRegExpCodeAgeAndFlush(Heap *heap, JSRegExp *re, bool is_one_byte)
static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, Map *map, HeapObject *obj)
static VisitorDispatchTable< Callback > non_count_table_
static void VisitRegExpAndFlushCode(Map *map, HeapObject *object)
INLINE(static void MarkObjectByPointer(MarkCompactCollector *collector, Object **anchor_slot, Object **p))
INLINE(static void VisitPointers(Heap *heap, Object **start, Object **end))
virtual Object * RetainAs(Object *object)
void Initialize(Address low, Address high)
Definition: mark-compact.h:145
void TransferMark(Address old_start, Address new_start)
static const char * kImpossibleBitPattern
Definition: mark-compact.h:37
static const char * kGreyBitPattern
Definition: mark-compact.h:53
static const char * kWhiteBitPattern
Definition: mark-compact.h:49
static const char * kBlackBitPattern
Definition: mark-compact.h:43
bool IsFlagSet(int flag)
Definition: spaces.h:417
SkipList * skip_list()
Definition: spaces.h:613
SlotsBuffer ** slots_buffer_address()
Definition: spaces.h:619
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:517
void MarkEvacuationCandidate()
Definition: spaces.h:621
static uint32_t FastAddressToMarkbitIndex(Address addr)
Definition: spaces.h:590
void set_scan_on_scavenge(bool scan)
Definition: spaces-inl.h:157
void SetFlag(int flag)
Definition: spaces.h:405
Space * owner() const
Definition: spaces.h:307
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:276
bool IsEvacuationCandidate()
Definition: spaces.h:607
ParallelSweepingState parallel_sweeping()
Definition: spaces.h:445
SlotsBuffer * slots_buffer()
Definition: spaces.h:617
void set_parallel_sweeping(ParallelSweepingState state)
Definition: spaces.h:450
void ClearFlag(int flag)
Definition: spaces.h:407
void ClearEvacuationCandidate()
Definition: spaces.h:626
void InsertAfter(MemoryChunk *other)
Definition: spaces.cc:528
static Object *& Object_at(Address addr)
Definition: v8memory.h:60
static Address & Address_at(Address addr)
Definition: v8memory.h:56
static NewSpacePage * FromAddress(Address address_in_page)
Definition: spaces.h:2021
Address FromSpacePageHigh()
Definition: spaces.h:2483
void set_age_mark(Address mark)
Definition: spaces.h:2441
void ResetAllocationInfo()
Definition: spaces.cc:1311
Address FromSpacePageLow()
Definition: spaces.h:2482
static int EntryToValueIndex(int entry)
Definition: objects.h:3791
void RemoveEntry(int entry)
Definition: objects.cc:15360
void ShortPrint(FILE *out=stdout)
Definition: objects.cc:905
void SetWasSwept()
Definition: spaces.h:767
bool WasSwept()
Definition: spaces.h:766
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:754
void ClearWasSwept()
Definition: spaces.h:768
void ResetUnsweptFreeBytes()
Definition: spaces.h:1850
void EvictEvacuationCandidatesFromFreeLists()
Definition: spaces.cc:2516
MUST_USE_RESULT AllocationResult AllocateRaw(int size_in_bytes)
Definition: spaces-inl.h:248
static void UpdateSlot(Heap *heap, Object **slot)
void VisitCodeAgeSequence(RelocInfo *rinfo)
void VisitDebugTarget(RelocInfo *rinfo)
void VisitCodeTarget(RelocInfo *rinfo)
void VisitPointers(Object **start, Object **end)
void VisitEmbeddedPointer(RelocInfo *rinfo)
static bool IsDebugBreakSlot(Mode mode)
Definition: assembler.h:436
static bool IsJSReturn(Mode mode)
Definition: assembler.h:412
static bool IsEmbeddedObject(Mode mode)
Definition: assembler.h:402
static bool IsCodeTarget(Mode mode)
Definition: assembler.h:399
byte * pc() const
Definition: assembler.h:457
Code * host() const
Definition: assembler.h:463
Mode rmode() const
Definition: assembler.h:459
static bool IsCodeAgeSequence(Mode mode)
Definition: assembler.h:442
void VisitPointers(Object **start, Object **end)
MarkCompactCollector * collector_
SharedFunctionInfoMarkingVisitor(MarkCompactCollector *collector)
void VisitPointers(Object **start, Object **end)
static const int kCachedCodeOffset
Definition: objects.h:6626
static const int kNextMapIndex
Definition: objects.h:6623
static const int kEntriesStart
Definition: objects.h:6624
static const int kOsrAstIdOffset
Definition: objects.h:6628
void TrimOptimizedCodeMap(int shrink_by)
Definition: objects.cc:9317
static const int kCodeOffset
Definition: objects.h:6893
static const int kEntryLength
Definition: objects.h:6629
void AddObject(Address addr, int size)
Definition: spaces.h:960
static int RegionNumber(Address addr)
Definition: spaces.h:968
SlotsBuffer * AllocateBuffer(SlotsBuffer *next_buffer)
void DeallocateBuffer(SlotsBuffer *buffer)
void DeallocateChain(SlotsBuffer **buffer_address)
void UpdateSlotsWithFilter(Heap *heap)
static bool ChainLengthThresholdReached(SlotsBuffer *buffer)
Definition: mark-compact.h:328
ObjectSlot slots_[kNumberOfElements]
Definition: mark-compact.h:362
void Add(ObjectSlot slot)
Definition: mark-compact.h:263
static void UpdateSlotsRecordedIn(Heap *heap, SlotsBuffer *buffer, bool code_slots_filtering_required)
Definition: mark-compact.h:314
static bool AddTo(SlotsBufferAllocator *allocator, SlotsBuffer **buffer_address, SlotType type, Address addr, AdditionMode mode)
static int SizeOfChain(SlotsBuffer *buffer)
Definition: mark-compact.h:304
static bool IsTypedSlot(ObjectSlot slot)
void UpdateSlots(Heap *heap)
static Smi * FromInt(int value)
Definition: objects-inl.h:1321
AllocationSpace identity()
Definition: spaces.h:829
void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback)
void Mark(Address addr)
virtual void VisitPointers(Object **start, Object **end)
void IterateArchivedThreads(ThreadVisitor *v)
Definition: v8threads.cc:342
Object ** GetKeySlot(int transition_number)
static const int kTransitionSize
Definition: transitions.h:139
Name * GetKey(int transition_number)
void SetKey(int transition_number, Name *value)
Map * GetTarget(int transition_number)
void SetTarget(int transition_number, Map *target)
static v8::Platform * GetCurrentPlatform()
Definition: v8.cc:115
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
static int EntryToValueIndex(int entry)
Definition: objects.h:4054
#define OVERRIDE
#define PROFILE(IsolateGetter, Call)
Definition: cpu-profiler.h:181
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define CHECK_EQ(expected, value)
Definition: logging.h:169
#define CHECK(condition)
Definition: logging.h:36
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
#define V8PRIxPTR
Definition: macros.h:363
#define X
#define VISITOR_ID_COUNT_FUNCTION(id)
#define _
uint32_t CountTrailingZeros32(uint32_t value)
Definition: bits.h:59
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
intptr_t AtomicWord
Definition: atomicops.h:57
template Object * VisitWeakList< Code >(Heap *heap, Object *list, WeakObjectRetainer *retainer)
static void DiscoverGreyObjectsWithIterator(Heap *heap, MarkingDeque *marking_deque, T *it)
const int kPointerSize
Definition: globals.h:129
static SlotsBuffer::SlotType DecodeSlotType(SlotsBuffer::ObjectSlot slot)
static void UpdateSlot(Isolate *isolate, ObjectVisitor *v, SlotsBuffer::SlotType slot_type, Address addr)
static bool IsOnInvalidatedCodeObject(Address addr)
@ SKIP_WRITE_BARRIER
Definition: objects.h:235
@ UPDATE_WRITE_BARRIER
Definition: objects.h:235
FixedArraySubInstanceType
Definition: objects.h:811
static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type)
static LifetimePosition Min(LifetimePosition a, LifetimePosition b)
@ VISIT_ONLY_STRONG
Definition: globals.h:397
@ VISIT_ALL_IN_SWEEP_NEWSPACE
Definition: globals.h:396
char kStartTable[kStartTableLines *kStartTableEntriesPerLine]
@ SWEEP_AND_VISIT_LIVE_OBJECTS
static SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode)
static const int kStartTableEntriesPerLine
static void DiscoverGreyObjectsInNewSpace(Heap *heap, MarkingDeque *marking_deque)
static const int kStartTableInvalidLine
static bool SetMarkBitsUnderInvalidatedCode(Code *code, bool value)
static const int kStartTableUnusedEntry
kSerializedDataOffset Object
Definition: objects-inl.h:5322
static void ClearMarkbitsInNewSpace(NewSpace *space)
static HeapObject * ShortCircuitConsString(Object **p)
@ INTERCEPTOR_INFO_TYPE
Definition: objects.h:701
@ JS_REGEXP_TYPE
Definition: objects.h:748
@ JS_VALUE_TYPE
Definition: objects.h:728
@ JS_DATE_TYPE
Definition: objects.h:730
@ JS_GLOBAL_PROXY_TYPE
Definition: objects.h:737
@ DECLARED_ACCESSOR_INFO_TYPE
Definition: objects.h:697
@ FIXED_DOUBLE_ARRAY_TYPE
Definition: objects.h:692
@ JS_ARRAY_TYPE
Definition: objects.h:738
@ FIXED_ARRAY_TYPE
Definition: objects.h:717
@ JS_OBJECT_TYPE
Definition: objects.h:731
@ BYTE_ARRAY_TYPE
Definition: objects.h:672
@ ODDBALL_TYPE
Definition: objects.h:663
@ MUTABLE_HEAP_NUMBER_TYPE
Definition: objects.h:670
@ HEAP_NUMBER_TYPE
Definition: objects.h:669
@ EXECUTABLE_ACCESSOR_INFO_TYPE
Definition: objects.h:698
@ JS_FUNCTION_TYPE
Definition: objects.h:749
@ SHARED_FUNCTION_INFO_TYPE
Definition: objects.h:719
@ JS_GLOBAL_OBJECT_TYPE
Definition: objects.h:735
@ CALL_HANDLER_INFO_TYPE
Definition: objects.h:702
@ JS_PROXY_TYPE
Definition: objects.h:727
@ JS_BUILTINS_OBJECT_TYPE
Definition: objects.h:736
@ TYPE_FEEDBACK_INFO_TYPE
Definition: objects.h:712
@ ODD_MARKING_PARITY
Definition: objects.h:299
@ EVEN_MARKING_PARITY
Definition: objects.h:300
static void DiscoverGreyObjectsOnPage(MarkingDeque *marking_deque, MemoryChunk *p)
const Register pc
static bool IsShortcutCandidate(int type)
Definition: objects.h:605
static const int kInvalidEnumCacheSentinel
static int Sweep(PagedSpace *space, FreeList *free_list, Page *p, ObjectVisitor *v)
static LifetimePosition Max(LifetimePosition a, LifetimePosition b)
StringTableCleaner< false > InternalizedStringTableCleaner
byte * Address
Definition: globals.h:101
kSerializedDataOffset kPrototypeTemplateOffset kIndexedPropertyHandlerOffset kInstanceCallHandlerOffset kInternalFieldCountOffset dependent_code
Definition: objects-inl.h:5353
static void TraceFragmentation(PagedSpace *space)
void PrintF(const char *format,...)
Definition: utils.cc:80
static bool ShouldWaitForSweeperThreads(MarkCompactCollector::SweeperType type)
static int MarkWordToObjectStarts(uint32_t mark_bits, int *starts)
@ OLD_DATA_SPACE
Definition: globals.h:361
@ PROPERTY_CELL_SPACE
Definition: globals.h:365
@ OLD_POINTER_SPACE
Definition: globals.h:360
static String * UpdateReferenceInExternalStringTableEntry(Heap *heap, Object **p)
static const int kStartTableLines
StringTableCleaner< true > ExternalStringTableCleaner
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static int FreeListFragmentation(PagedSpace *space, Page *p)
static void UpdatePointer(HeapObject **address, HeapObject *object)
static void DiscoverGreyObjectsInSpace(Heap *heap, MarkingDeque *marking_deque, PagedSpace *space)
const int MB
Definition: globals.h:107
bool IsAligned(T value, U alignment)
Definition: utils.h:123
const char * AllocationSpaceName(AllocationSpace space)
@ OWN_DESCRIPTORS
Definition: objects.h:287
static intptr_t Free(PagedSpace *space, FreeList *free_list, Address start, int size)
static void ClearMarkbitsInPagedSpace(PagedSpace *space)
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
#define VISITOR_ID_LIST(V)
@ DONT_ENUM
#define T(name, string, precedence)
Definition: token.cc:25