V8 Project
heap.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #include "src/accessors.h"
8 #include "src/api.h"
9 #include "src/base/bits.h"
10 #include "src/base/once.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/compilation-cache.h"
15 #include "src/conversions.h"
16 #include "src/cpu-profiler.h"
17 #include "src/debug.h"
18 #include "src/deoptimizer.h"
19 #include "src/global-handles.h"
22 #include "src/heap/mark-compact.h"
25 #include "src/heap/store-buffer.h"
26 #include "src/heap-profiler.h"
27 #include "src/isolate-inl.h"
28 #include "src/natives.h"
29 #include "src/runtime-profiler.h"
30 #include "src/scopeinfo.h"
31 #include "src/snapshot.h"
32 #include "src/utils.h"
33 #include "src/v8threads.h"
34 #include "src/vm-state-inl.h"
35 
36 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
37 #include "src/regexp-macro-assembler.h" // NOLINT
38 #include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
39 #endif
40 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
41 #include "src/regexp-macro-assembler.h" // NOLINT
42 #include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
43 #endif
44 #if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
47 #endif
48 
49 namespace v8 {
50 namespace internal {
51 
52 
54  : amount_of_external_allocated_memory_(0),
55  amount_of_external_allocated_memory_at_last_global_gc_(0),
56  isolate_(NULL),
57  code_range_size_(0),
58  // semispace_size_ should be a power of 2 and old_generation_size_ should
59  // be a multiple of Page::kPageSize.
60  reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
61  max_semi_space_size_(8 * (kPointerSize / 4) * MB),
62  initial_semispace_size_(Page::kPageSize),
63  max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
64  max_executable_size_(256ul * (kPointerSize / 4) * MB),
65  // Variables set based on semispace_size_ and old_generation_size_ in
66  // ConfigureHeap.
67  // Will be 4 * reserved_semispace_size_ to ensure that young
68  // generation can be aligned to its size.
69  maximum_committed_(0),
70  survived_since_last_expansion_(0),
71  sweep_generation_(0),
72  always_allocate_scope_depth_(0),
73  contexts_disposed_(0),
74  global_ic_age_(0),
75  flush_monomorphic_ics_(false),
76  scan_on_scavenge_pages_(0),
77  new_space_(this),
78  old_pointer_space_(NULL),
79  old_data_space_(NULL),
80  code_space_(NULL),
81  map_space_(NULL),
82  cell_space_(NULL),
83  property_cell_space_(NULL),
84  lo_space_(NULL),
85  gc_state_(NOT_IN_GC),
86  gc_post_processing_depth_(0),
87  allocations_count_(0),
88  raw_allocations_hash_(0),
89  dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
90  ms_count_(0),
91  gc_count_(0),
92  remembered_unmapped_pages_index_(0),
93  unflattened_strings_length_(0),
94 #ifdef DEBUG
95  allocation_timeout_(0),
96 #endif // DEBUG
97  old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
98  old_gen_exhausted_(false),
99  inline_allocation_disabled_(false),
100  store_buffer_rebuilder_(store_buffer()),
101  hidden_string_(NULL),
102  gc_safe_size_of_old_object_(NULL),
103  total_regexp_code_generated_(0),
104  tracer_(this),
105  high_survival_rate_period_length_(0),
106  promoted_objects_size_(0),
107  promotion_rate_(0),
108  semi_space_copied_object_size_(0),
109  semi_space_copied_rate_(0),
110  nodes_died_in_new_space_(0),
111  nodes_copied_in_new_space_(0),
112  nodes_promoted_(0),
113  maximum_size_scavenges_(0),
114  max_gc_pause_(0.0),
115  total_gc_time_ms_(0.0),
116  max_alive_after_gc_(0),
117  min_in_mutator_(kMaxInt),
118  marking_time_(0.0),
119  sweeping_time_(0.0),
120  mark_compact_collector_(this),
121  store_buffer_(this),
122  marking_(this),
123  incremental_marking_(this),
124  gc_count_at_last_idle_gc_(0),
125  full_codegen_bytes_generated_(0),
126  crankshaft_codegen_bytes_generated_(0),
127  gcs_since_last_deopt_(0),
128 #ifdef VERIFY_HEAP
129  no_weak_object_verification_scope_depth_(0),
130 #endif
131  allocation_sites_scratchpad_length_(0),
132  promotion_queue_(this),
133  configured_(false),
134  external_string_table_(this),
135  chunks_queued_for_free_(NULL),
136  gc_callbacks_depth_(0) {
137 // Allow build-time customization of the max semispace size. Building
138 // V8 with snapshots and a non-default max semispace size is much
139 // easier if you can define it as part of the build environment.
140 #if defined(V8_MAX_SEMISPACE_SIZE)
141  max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
142 #endif
143 
144  // Ensure old_generation_size_ is a multiple of kPageSize.
146 
147  memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
152  // Put a dummy entry in the remembered pages so we can find the list the
153  // minidump even if there are no real unmapped pages.
154  RememberUnmappedPage(NULL, false);
155 
156  ClearObjectStats(true);
157 }
158 
159 
160 intptr_t Heap::Capacity() {
161  if (!HasBeenSetUp()) return 0;
162 
167 }
168 
169 
171  if (!HasBeenSetUp()) return 0;
172 
177 }
178 
179 
181  if (!HasBeenSetUp()) return 0;
182 
191 }
192 
193 
195  if (!HasBeenSetUp()) return 0;
196 
197  return isolate()->memory_allocator()->SizeExecutable();
198 }
199 
200 
202  if (!HasBeenSetUp()) return;
203 
204  intptr_t current_committed_memory = CommittedMemory();
205  if (current_committed_memory > maximum_committed_) {
206  maximum_committed_ = current_committed_memory;
207  }
208 }
209 
210 
211 intptr_t Heap::Available() {
212  if (!HasBeenSetUp()) return 0;
213 
218 }
219 
220 
222  return old_pointer_space_ != NULL && old_data_space_ != NULL &&
223  code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL &&
225 }
226 
227 
229  if (IntrusiveMarking::IsMarked(object)) {
231  }
232  return object->SizeFromMap(object->map());
233 }
234 
235 
237  const char** reason) {
238  // Is global GC requested?
239  if (space != NEW_SPACE) {
240  isolate_->counters()->gc_compactor_caused_by_request()->Increment();
241  *reason = "GC in old space requested";
242  return MARK_COMPACTOR;
243  }
244 
245  if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
246  *reason = "GC in old space forced by flags";
247  return MARK_COMPACTOR;
248  }
249 
250  // Is enough data promoted to justify a global GC?
252  isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
253  *reason = "promotion limit reached";
254  return MARK_COMPACTOR;
255  }
256 
257  // Have allocation in OLD and LO failed?
258  if (old_gen_exhausted_) {
259  isolate_->counters()
260  ->gc_compactor_caused_by_oldspace_exhaustion()
261  ->Increment();
262  *reason = "old generations exhausted";
263  return MARK_COMPACTOR;
264  }
265 
266  // Is there enough space left in OLD to guarantee that a scavenge can
267  // succeed?
268  //
269  // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
270  // for object promotion. It counts only the bytes that the memory
271  // allocator has not yet allocated from the OS and assigned to any space,
272  // and does not count available bytes already in the old space or code
273  // space. Undercounting is safe---we may get an unrequested full GC when
274  // a scavenge would have succeeded.
276  isolate_->counters()
277  ->gc_compactor_caused_by_oldspace_exhaustion()
278  ->Increment();
279  *reason = "scavenge might not succeed";
280  return MARK_COMPACTOR;
281  }
282 
283  // Default
284  *reason = NULL;
285  return SCAVENGER;
286 }
287 
288 
289 // TODO(1238405): Combine the infrastructure for --heap-stats and
290 // --log-gc to avoid the complicated preprocessor and flag testing.
292 // Heap::ReportHeapStatistics will also log NewSpace statistics when
293 // compiled --log-gc is set. The following logic is used to avoid
294 // double logging.
295 #ifdef DEBUG
296  if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
297  if (FLAG_heap_stats) {
298  ReportHeapStatistics("Before GC");
299  } else if (FLAG_log_gc) {
301  }
302  if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
303 #else
304  if (FLAG_log_gc) {
308  }
309 #endif // DEBUG
310 }
311 
312 
314  if (!FLAG_trace_gc_verbose) return;
315  PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX
316  "d KB"
317  ", available: %6" V8_PTR_PREFIX "d KB\n",
320  PrintPID("New space, used: %6" V8_PTR_PREFIX
321  "d KB"
322  ", available: %6" V8_PTR_PREFIX
323  "d KB"
324  ", committed: %6" V8_PTR_PREFIX "d KB\n",
327  PrintPID("Old pointers, used: %6" V8_PTR_PREFIX
328  "d KB"
329  ", available: %6" V8_PTR_PREFIX
330  "d KB"
331  ", committed: %6" V8_PTR_PREFIX "d KB\n",
335  PrintPID("Old data space, used: %6" V8_PTR_PREFIX
336  "d KB"
337  ", available: %6" V8_PTR_PREFIX
338  "d KB"
339  ", committed: %6" V8_PTR_PREFIX "d KB\n",
343  PrintPID("Code space, used: %6" V8_PTR_PREFIX
344  "d KB"
345  ", available: %6" V8_PTR_PREFIX
346  "d KB"
347  ", committed: %6" V8_PTR_PREFIX "d KB\n",
350  PrintPID("Map space, used: %6" V8_PTR_PREFIX
351  "d KB"
352  ", available: %6" V8_PTR_PREFIX
353  "d KB"
354  ", committed: %6" V8_PTR_PREFIX "d KB\n",
357  PrintPID("Cell space, used: %6" V8_PTR_PREFIX
358  "d KB"
359  ", available: %6" V8_PTR_PREFIX
360  "d KB"
361  ", committed: %6" V8_PTR_PREFIX "d KB\n",
364  PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX
365  "d KB"
366  ", available: %6" V8_PTR_PREFIX
367  "d KB"
368  ", committed: %6" V8_PTR_PREFIX "d KB\n",
372  PrintPID("Large object space, used: %6" V8_PTR_PREFIX
373  "d KB"
374  ", available: %6" V8_PTR_PREFIX
375  "d KB"
376  ", committed: %6" V8_PTR_PREFIX "d KB\n",
379  PrintPID("All spaces, used: %6" V8_PTR_PREFIX
380  "d KB"
381  ", available: %6" V8_PTR_PREFIX
382  "d KB"
383  ", committed: %6" V8_PTR_PREFIX "d KB\n",
384  this->SizeOfObjects() / KB, this->Available() / KB,
385  this->CommittedMemory() / KB);
386  PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
387  static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
388  PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
389 }
390 
391 
392 // TODO(1238405): Combine the infrastructure for --heap-stats and
393 // --log-gc to avoid the complicated preprocessor and flag testing.
395 // Similar to the before GC, we use some complicated logic to ensure that
396 // NewSpace statistics are logged exactly once when --log-gc is turned on.
397 #if defined(DEBUG)
398  if (FLAG_heap_stats) {
400  ReportHeapStatistics("After GC");
401  } else if (FLAG_log_gc) {
403  }
404 #else
405  if (FLAG_log_gc) new_space_.ReportStatistics();
406 #endif // DEBUG
407 }
408 
409 
411  {
412  AllowHeapAllocation for_the_first_part_of_prologue;
414  gc_count_++;
416 
417  if (FLAG_flush_code && FLAG_flush_code_incrementally) {
419  }
420 
421 #ifdef VERIFY_HEAP
422  if (FLAG_verify_heap) {
423  Verify();
424  }
425 #endif
426  }
427 
428  // Reset GC statistics.
433  nodes_promoted_ = 0;
434 
436 
437 #ifdef DEBUG
438  DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
439 
440  if (FLAG_gc_verbose) Print();
441 
443 #endif // DEBUG
444 
446 
447  if (isolate()->concurrent_osr_enabled()) {
449  }
450 
453  } else {
455  }
457 }
458 
459 
461  intptr_t total = 0;
462  AllSpaces spaces(this);
463  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
464  total += space->SizeOfObjects();
465  }
466  return total;
467 }
468 
469 
472 
473  for (Object* object = it.Next(); object != NULL; object = it.Next()) {
474  Code* code = Code::cast(object);
475  Code::Kind current_kind = code->kind();
476  if (current_kind == Code::FUNCTION ||
477  current_kind == Code::OPTIMIZED_FUNCTION) {
478  code->ClearInlineCaches(kind);
479  }
480  }
481 }
482 
483 
485  PagedSpaces spaces(this);
486  for (PagedSpace* space = spaces.next(); space != NULL;
487  space = spaces.next()) {
488  space->RepairFreeListsAfterBoot();
489  }
490 }
491 
492 
494  if (FLAG_allocation_site_pretenuring) {
495  int tenure_decisions = 0;
496  int dont_tenure_decisions = 0;
497  int allocation_mementos_found = 0;
498  int allocation_sites = 0;
499  int active_allocation_sites = 0;
500 
501  // If the scratchpad overflowed, we have to iterate over the allocation
502  // sites list.
503  // TODO(hpayer): We iterate over the whole list of allocation sites when
504  // we grew to the maximum semi-space size to deopt maybe tenured
505  // allocation sites. We could hold the maybe tenured allocation sites
506  // in a seperate data structure if this is a performance problem.
507  bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
508  bool use_scratchpad =
510  !deopt_maybe_tenured;
511 
512  int i = 0;
513  Object* list_element = allocation_sites_list();
514  bool trigger_deoptimization = false;
515  bool maximum_size_scavenge = MaximumSizeScavenge();
516  while (use_scratchpad ? i < allocation_sites_scratchpad_length_
517  : list_element->IsAllocationSite()) {
518  AllocationSite* site =
519  use_scratchpad
520  ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
521  : AllocationSite::cast(list_element);
522  allocation_mementos_found += site->memento_found_count();
523  if (site->memento_found_count() > 0) {
524  active_allocation_sites++;
525  if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
526  trigger_deoptimization = true;
527  }
528  if (site->GetPretenureMode() == TENURED) {
529  tenure_decisions++;
530  } else {
531  dont_tenure_decisions++;
532  }
533  allocation_sites++;
534  }
535 
536  if (deopt_maybe_tenured && site->IsMaybeTenure()) {
537  site->set_deopt_dependent_code(true);
538  trigger_deoptimization = true;
539  }
540 
541  if (use_scratchpad) {
542  i++;
543  } else {
544  list_element = site->weak_next();
545  }
546  }
547 
548  if (trigger_deoptimization) {
549  isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
550  }
551 
553 
554  if (FLAG_trace_pretenuring_statistics &&
555  (allocation_mementos_found > 0 || tenure_decisions > 0 ||
556  dont_tenure_decisions > 0)) {
557  PrintF(
558  "GC: (mode, #visited allocation sites, #active allocation sites, "
559  "#mementos, #tenure decisions, #donttenure decisions) "
560  "(%s, %d, %d, %d, %d, %d)\n",
561  use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
562  active_allocation_sites, allocation_mementos_found, tenure_decisions,
563  dont_tenure_decisions);
564  }
565  }
566 }
567 
568 
570  // TODO(hpayer): If iterating over the allocation sites list becomes a
571  // performance issue, use a cache heap data structure instead (similar to the
572  // allocation sites scratchpad).
573  Object* list_element = allocation_sites_list();
574  while (list_element->IsAllocationSite()) {
575  AllocationSite* site = AllocationSite::cast(list_element);
576  if (site->deopt_dependent_code()) {
577  site->dependent_code()->MarkCodeForDeoptimization(
579  site->set_deopt_dependent_code(false);
580  }
581  list_element = site->weak_next();
582  }
584 }
585 
586 
589 
590  // In release mode, we only zap the from space under heap verification.
591  if (Heap::ShouldZapGarbage()) {
592  ZapFromSpace();
593  }
594 
595  // Process pretenuring feedback and update allocation sites.
597 
598 #ifdef VERIFY_HEAP
599  if (FLAG_verify_heap) {
600  Verify();
601  }
602 #endif
603 
604  AllowHeapAllocation for_the_rest_of_the_epilogue;
605 
606 #ifdef DEBUG
607  if (FLAG_print_global_handles) isolate_->global_handles()->Print();
608  if (FLAG_print_handles) PrintHandles();
609  if (FLAG_gc_verbose) Print();
610  if (FLAG_code_stats) ReportCodeStatistics("After GC");
611 #endif
612  if (FLAG_deopt_every_n_garbage_collections > 0) {
613  // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
614  // the topmost optimized frame can be deoptimized safely, because it
615  // might not have a lazy bailout point right after its current PC.
616  if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
619  }
620  }
621 
623 
624  isolate_->counters()->alive_after_last_gc()->Set(
625  static_cast<int>(SizeOfObjects()));
626 
627  isolate_->counters()->string_table_capacity()->Set(
628  string_table()->Capacity());
629  isolate_->counters()->number_of_symbols()->Set(
630  string_table()->NumberOfElements());
631 
633  isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
634  static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
637  }
638 
639  if (CommittedMemory() > 0) {
640  isolate_->counters()->external_fragmentation_total()->AddSample(
641  static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
642 
643  isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
644  (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
645  isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
646  static_cast<int>((old_pointer_space()->CommittedMemory() * 100.0) /
647  CommittedMemory()));
648  isolate_->counters()->heap_fraction_old_data_space()->AddSample(
649  static_cast<int>((old_data_space()->CommittedMemory() * 100.0) /
650  CommittedMemory()));
651  isolate_->counters()->heap_fraction_code_space()->AddSample(
652  static_cast<int>((code_space()->CommittedMemory() * 100.0) /
653  CommittedMemory()));
654  isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
655  (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
656  isolate_->counters()->heap_fraction_cell_space()->AddSample(
657  static_cast<int>((cell_space()->CommittedMemory() * 100.0) /
658  CommittedMemory()));
659  isolate_->counters()->heap_fraction_property_cell_space()->AddSample(
660  static_cast<int>((property_cell_space()->CommittedMemory() * 100.0) /
661  CommittedMemory()));
662  isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
663  (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
664 
665  isolate_->counters()->heap_sample_total_committed()->AddSample(
666  static_cast<int>(CommittedMemory() / KB));
667  isolate_->counters()->heap_sample_total_used()->AddSample(
668  static_cast<int>(SizeOfObjects() / KB));
669  isolate_->counters()->heap_sample_map_space_committed()->AddSample(
670  static_cast<int>(map_space()->CommittedMemory() / KB));
671  isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
672  static_cast<int>(cell_space()->CommittedMemory() / KB));
673  isolate_->counters()
674  ->heap_sample_property_cell_space_committed()
675  ->AddSample(
676  static_cast<int>(property_cell_space()->CommittedMemory() / KB));
677  isolate_->counters()->heap_sample_code_space_committed()->AddSample(
678  static_cast<int>(code_space()->CommittedMemory() / KB));
679 
680  isolate_->counters()->heap_sample_maximum_committed()->AddSample(
681  static_cast<int>(MaximumCommittedMemory() / KB));
682  }
683 
684 #define UPDATE_COUNTERS_FOR_SPACE(space) \
685  isolate_->counters()->space##_bytes_available()->Set( \
686  static_cast<int>(space()->Available())); \
687  isolate_->counters()->space##_bytes_committed()->Set( \
688  static_cast<int>(space()->CommittedMemory())); \
689  isolate_->counters()->space##_bytes_used()->Set( \
690  static_cast<int>(space()->SizeOfObjects()));
691 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
692  if (space()->CommittedMemory() > 0) { \
693  isolate_->counters()->external_fragmentation_##space()->AddSample( \
694  static_cast<int>(100 - \
695  (space()->SizeOfObjects() * 100.0) / \
696  space()->CommittedMemory())); \
697  }
698 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
699  UPDATE_COUNTERS_FOR_SPACE(space) \
700  UPDATE_FRAGMENTATION_FOR_SPACE(space)
701 
710 #undef UPDATE_COUNTERS_FOR_SPACE
711 #undef UPDATE_FRAGMENTATION_FOR_SPACE
712 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
713 
714 #ifdef DEBUG
716 #endif // DEBUG
717 
718  // Remember the last top pointer so that we can later find out
719  // whether we allocated in new space since the last GC.
721 }
722 
723 
724 void Heap::CollectAllGarbage(int flags, const char* gc_reason,
725  const v8::GCCallbackFlags gc_callback_flags) {
726  // Since we are ignoring the return value, the exact choice of space does
727  // not matter, so long as we do not specify NEW_SPACE, which would not
728  // cause a full GC.
730  CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
732 }
733 
734 
735 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
736  // Since we are ignoring the return value, the exact choice of space does
737  // not matter, so long as we do not specify NEW_SPACE, which would not
738  // cause a full GC.
739  // Major GC would invoke weak handle callbacks on weakly reachable
740  // handles, but won't collect weakly reachable objects until next
741  // major GC. Therefore if we collect aggressively and weak handle callback
742  // has been invoked, we rerun major GC to release objects which become
743  // garbage.
744  // Note: as weak callbacks can execute arbitrary code, we cannot
745  // hope that eventually there will be no weak callbacks invocations.
746  // Therefore stop recollecting after several attempts.
747  if (isolate()->concurrent_recompilation_enabled()) {
748  // The optimizing compiler may be unnecessarily holding on to memory.
749  DisallowHeapAllocation no_recursive_gc;
751  }
755  const int kMaxNumberOfAttempts = 7;
756  const int kMinNumberOfAttempts = 2;
757  for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
758  if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
759  attempt + 1 >= kMinNumberOfAttempts) {
760  break;
761  }
762  }
764  new_space_.Shrink();
767 }
768 
769 
771  // There may be an allocation memento behind every object in new space.
772  // If we evacuate a not full new space or if we are on the last page of
773  // the new space, then there may be uninitialized memory behind the top
774  // pointer of the new space page. We store a filler object there to
775  // identify the unused space.
776  Address from_top = new_space_.top();
777  Address from_limit = new_space_.limit();
778  if (from_top < from_limit) {
779  int remaining_in_page = static_cast<int>(from_limit - from_top);
780  CreateFillerObjectAt(from_top, remaining_in_page);
781  }
782 }
783 
784 
785 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
786  const char* collector_reason,
787  const v8::GCCallbackFlags gc_callback_flags) {
788  // The VM is in the GC state until exiting this function.
789  VMState<GC> state(isolate_);
790 
791 #ifdef DEBUG
792  // Reset the allocation timeout to the GC interval, but make sure to
793  // allow at least a few allocations after a collection. The reason
794  // for this is that we have a lot of allocation sequences and we
795  // assume that a garbage collection will allow the subsequent
796  // allocation attempts to go through.
797  allocation_timeout_ = Max(6, FLAG_gc_interval);
798 #endif
799 
801 
802  if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
803  if (FLAG_trace_incremental_marking) {
804  PrintF("[IncrementalMarking] Scavenge during marking.\n");
805  }
806  }
807 
808  if (collector == MARK_COMPACTOR &&
809  !mark_compact_collector()->abort_incremental_marking() &&
810  !incremental_marking()->IsStopped() &&
811  !incremental_marking()->should_hurry() &&
812  FLAG_incremental_marking_steps) {
813  // Make progress in incremental marking.
814  const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
815  incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
817  if (!incremental_marking()->IsComplete() && !FLAG_gc_global) {
818  if (FLAG_trace_incremental_marking) {
819  PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
820  }
821  collector = SCAVENGER;
822  collector_reason = "incremental marking delaying mark-sweep";
823  }
824  }
825 
826  bool next_gc_likely_to_collect_more = false;
827 
828  {
829  tracer()->Start(collector, gc_reason, collector_reason);
830  DCHECK(AllowHeapAllocation::IsAllowed());
831  DisallowHeapAllocation no_allocation_during_gc;
833 
834  {
835  HistogramTimerScope histogram_timer_scope(
836  (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
837  : isolate_->counters()->gc_compactor());
838  next_gc_likely_to_collect_more =
839  PerformGarbageCollection(collector, gc_callback_flags);
840  }
841 
843  tracer()->Stop();
844  }
845 
846  // Start incremental marking for the next cycle. The heap snapshot
847  // generator needs incremental marking to stay off after it aborted.
848  if (!mark_compact_collector()->abort_incremental_marking() &&
851  }
852 
853  return next_gc_likely_to_collect_more;
854 }
855 
856 
858  if (isolate()->concurrent_recompilation_enabled()) {
859  // Flush the queued recompilation tasks.
861  }
862  flush_monomorphic_ics_ = true;
863  AgeInlineCaches();
864  return ++contexts_disposed_;
865 }
866 
867 
868 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
869  int len) {
870  if (len == 0) return;
871 
872  DCHECK(array->map() != fixed_cow_array_map());
873  Object** dst_objects = array->data_start() + dst_index;
874  MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
875  if (!InNewSpace(array)) {
876  for (int i = 0; i < len; i++) {
877  // TODO(hpayer): check store buffer for entries
878  if (InNewSpace(dst_objects[i])) {
879  RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
880  }
881  }
882  }
884 }
885 
886 
887 #ifdef VERIFY_HEAP
888 // Helper class for verifying the string table.
889 class StringTableVerifier : public ObjectVisitor {
890  public:
891  void VisitPointers(Object** start, Object** end) {
892  // Visit all HeapObject pointers in [start, end).
893  for (Object** p = start; p < end; p++) {
894  if ((*p)->IsHeapObject()) {
895  // Check that the string is actually internalized.
896  CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
897  (*p)->IsInternalizedString());
898  }
899  }
900  }
901 };
902 
903 
904 static void VerifyStringTable(Heap* heap) {
905  StringTableVerifier verifier;
906  heap->string_table()->IterateElements(&verifier);
907 }
908 #endif // VERIFY_HEAP
909 
910 
912  Heap* heap, AllocationSpace space, const char* gc_reason = NULL) {
914  bool result = heap->CollectGarbage(space, gc_reason);
916  return result;
917 }
918 
919 
920 void Heap::ReserveSpace(int* sizes, Address* locations_out) {
921  bool gc_performed = true;
922  int counter = 0;
923  static const int kThreshold = 20;
924  while (gc_performed && counter++ < kThreshold) {
925  gc_performed = false;
927  if (sizes[space] == 0) continue;
928  bool perform_gc = false;
929  if (space == LO_SPACE) {
930  perform_gc = !lo_space()->CanAllocateSize(sizes[space]);
931  } else {
932  AllocationResult allocation;
933  if (space == NEW_SPACE) {
934  allocation = new_space()->AllocateRaw(sizes[space]);
935  } else {
936  allocation = paged_space(space)->AllocateRaw(sizes[space]);
937  }
938  FreeListNode* node;
939  if (allocation.To(&node)) {
940  // Mark with a free list node, in case we have a GC before
941  // deserializing.
942  node->set_size(this, sizes[space]);
944  locations_out[space] = node->address();
945  } else {
946  perform_gc = true;
947  }
948  }
949  if (perform_gc) {
950  if (space == NEW_SPACE) {
952  "failed to reserve space in the new space");
953  } else {
955  this, static_cast<AllocationSpace>(space),
956  "failed to reserve space in paged or large object space");
957  }
958  gc_performed = true;
959  break; // Abort for-loop over spaces and retry.
960  }
961  }
962  }
963 
964  if (gc_performed) {
965  // Failed to reserve the space after several attempts.
966  V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
967  }
968 }
969 
970 
972  if (new_space_.CommitFromSpaceIfNeeded()) return;
973 
974  // Committing memory to from space failed.
975  // Memory is exhausted and we will die.
976  V8::FatalProcessOutOfMemory("Committing semi space failed.");
977 }
978 
979 
981  if (isolate_->bootstrapper()->IsActive()) return;
982 
983  Object* context = native_contexts_list();
984  while (!context->IsUndefined()) {
985  // Get the caches for this context. GC can happen when the context
986  // is not fully initialized, so the caches can be undefined.
987  Object* caches_or_undefined =
989  if (!caches_or_undefined->IsUndefined()) {
990  FixedArray* caches = FixedArray::cast(caches_or_undefined);
991  // Clear the caches:
992  int length = caches->length();
993  for (int i = 0; i < length; i++) {
994  JSFunctionResultCache::cast(caches->get(i))->Clear();
995  }
996  }
997  // Get the next context:
998  context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
999  }
1000 }
1001 
1002 
1004  if (isolate_->bootstrapper()->IsActive() &&
1005  !incremental_marking()->IsMarking()) {
1006  return;
1007  }
1008 
1009  Object* context = native_contexts_list();
1010  while (!context->IsUndefined()) {
1011  // GC can happen when the context is not fully initialized,
1012  // so the cache can be undefined.
1013  Object* cache =
1015  if (!cache->IsUndefined()) {
1016  NormalizedMapCache::cast(cache)->Clear();
1017  }
1018  context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1019  }
1020 }
1021 
1022 
1023 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1024  if (start_new_space_size == 0) return;
1025 
1026  promotion_rate_ = (static_cast<double>(promoted_objects_size_) /
1027  static_cast<double>(start_new_space_size) * 100);
1028 
1030  (static_cast<double>(semi_space_copied_object_size_) /
1031  static_cast<double>(start_new_space_size) * 100);
1032 
1033  double survival_rate = promotion_rate_ + semi_space_copied_rate_;
1034 
1035  if (survival_rate > kYoungSurvivalRateHighThreshold) {
1037  } else {
1039  }
1040 }
1041 
1043  GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1044  int freed_global_handles = 0;
1045 
1046  if (collector != SCAVENGER) {
1047  PROFILE(isolate_, CodeMovingGCEvent());
1048  }
1049 
1050 #ifdef VERIFY_HEAP
1051  if (FLAG_verify_heap) {
1052  VerifyStringTable(this);
1053  }
1054 #endif
1055 
1056  GCType gc_type =
1058 
1059  {
1060  GCCallbacksScope scope(this);
1061  if (scope.CheckReenter()) {
1062  AllowHeapAllocation allow_allocation;
1064  VMState<EXTERNAL> state(isolate_);
1065  HandleScope handle_scope(isolate_);
1067  }
1068  }
1069 
1071 
1072  int start_new_space_size = Heap::new_space()->SizeAsInt();
1073 
1074  if (IsHighSurvivalRate()) {
1075  // We speed up the incremental marker if it is running so that it
1076  // does not fall behind the rate of promotion, which would cause a
1077  // constantly growing old space.
1079  }
1080 
1081  if (collector == MARK_COMPACTOR) {
1082  // Perform mark-sweep with optional compaction.
1083  MarkCompact();
1085  // Temporarily set the limit for case when PostGarbageCollectionProcessing
1086  // allocates and triggers GC. The real limit is set at after
1087  // PostGarbageCollectionProcessing.
1090  old_gen_exhausted_ = false;
1091  } else {
1092  Scavenge();
1093  }
1094 
1095  UpdateSurvivalStatistics(start_new_space_size);
1096 
1097  isolate_->counters()->objs_since_last_young()->Set(0);
1098 
1099  // Callbacks that fire after this point might trigger nested GCs and
1100  // restart incremental marking, the assertion can't be moved down.
1101  DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
1102 
1104  {
1105  AllowHeapAllocation allow_allocation;
1107  freed_global_handles =
1109  }
1111 
1113 
1114  // Update relocatables.
1115  Relocatable::PostGarbageCollectionProcessing(isolate_);
1116 
1117  if (collector == MARK_COMPACTOR) {
1118  // Register the amount of external allocated memory.
1122  PromotedSpaceSizeOfObjects(), freed_global_handles);
1123  }
1124 
1125  {
1126  GCCallbacksScope scope(this);
1127  if (scope.CheckReenter()) {
1128  AllowHeapAllocation allow_allocation;
1130  VMState<EXTERNAL> state(isolate_);
1131  HandleScope handle_scope(isolate_);
1132  CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1133  }
1134  }
1135 
1136 #ifdef VERIFY_HEAP
1137  if (FLAG_verify_heap) {
1138  VerifyStringTable(this);
1139  }
1140 #endif
1141 
1142  return freed_global_handles > 0;
1143 }
1144 
1145 
1147  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1148  if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1149  if (!gc_prologue_callbacks_[i].pass_isolate_) {
1150  v8::GCPrologueCallback callback =
1151  reinterpret_cast<v8::GCPrologueCallback>(
1152  gc_prologue_callbacks_[i].callback);
1153  callback(gc_type, flags);
1154  } else {
1155  v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1156  gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1157  }
1158  }
1159  }
1160 }
1161 
1162 
1164  GCCallbackFlags gc_callback_flags) {
1165  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1166  if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1167  if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1168  v8::GCPrologueCallback callback =
1169  reinterpret_cast<v8::GCPrologueCallback>(
1170  gc_epilogue_callbacks_[i].callback);
1171  callback(gc_type, gc_callback_flags);
1172  } else {
1173  v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1174  gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
1175  }
1176  }
1177  }
1178 }
1179 
1180 
1183  LOG(isolate_, ResourceEvent("markcompact", "begin"));
1184 
1185  uint64_t size_of_objects_before_gc = SizeOfObjects();
1186 
1188 
1189  ms_count_++;
1190 
1192 
1194 
1195  LOG(isolate_, ResourceEvent("markcompact", "end"));
1196 
1197  gc_state_ = NOT_IN_GC;
1198 
1199  isolate_->counters()->objs_since_last_full()->Set(0);
1200 
1201  flush_monomorphic_ics_ = false;
1202 
1203  if (FLAG_allocation_site_pretenuring) {
1204  EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1205  }
1206 }
1207 
1208 
1210  // At any old GC clear the keyed lookup cache to enable collection of unused
1211  // maps.
1215  RegExpResultsCache::Clear(string_split_cache());
1216  RegExpResultsCache::Clear(regexp_multiple_cache());
1217 
1219 
1221 
1223  if (FLAG_cleanup_code_caches_at_gc) {
1224  polymorphic_code_cache()->set_cache(undefined_value());
1225  }
1226 
1228 }
1229 
1230 
1231 // Helper class for copying HeapObjects
1233  public:
1234  explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1235 
1237 
1238  void VisitPointers(Object** start, Object** end) {
1239  // Copy all HeapObject pointers in [start, end)
1240  for (Object** p = start; p < end; p++) ScavengePointer(p);
1241  }
1242 
1243  private:
1245  Object* object = *p;
1246  if (!heap_->InNewSpace(object)) return;
1247  Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1248  reinterpret_cast<HeapObject*>(object));
1249  }
1250 
1252 };
1253 
1254 
1255 #ifdef VERIFY_HEAP
1256 // Visitor class to verify pointers in code or data space do not point into
1257 // new space.
1258 class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
1259  public:
1260  explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1261  void VisitPointers(Object** start, Object** end) {
1262  for (Object** current = start; current < end; current++) {
1263  if ((*current)->IsHeapObject()) {
1264  CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1265  }
1266  }
1267  }
1268 
1269  private:
1270  Heap* heap_;
1271 };
1272 
1273 
1274 static void VerifyNonPointerSpacePointers(Heap* heap) {
1275  // Verify that there are no pointers to new space in spaces where we
1276  // do not expect them.
1277  VerifyNonPointerSpacePointersVisitor v(heap);
1278  HeapObjectIterator code_it(heap->code_space());
1279  for (HeapObject* object = code_it.Next(); object != NULL;
1280  object = code_it.Next())
1281  object->Iterate(&v);
1282 
1283  HeapObjectIterator data_it(heap->old_data_space());
1284  for (HeapObject* object = data_it.Next(); object != NULL;
1285  object = data_it.Next())
1286  object->Iterate(&v);
1287 }
1288 #endif // VERIFY_HEAP
1289 
1290 
1294  // Grow the size of new space if there is room to grow, enough data
1295  // has survived scavenge since the last expansion and we are not in
1296  // high promotion mode.
1297  new_space_.Grow();
1299  }
1300 }
1301 
1302 
1303 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1304  return heap->InNewSpace(*p) &&
1305  !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1306 }
1307 
1308 
1310  StoreBufferEvent event) {
1311  heap->store_buffer_rebuilder_.Callback(page, event);
1312 }
1313 
1314 
1316  if (event == kStoreBufferStartScanningPagesEvent) {
1318  current_page_ = NULL;
1319  } else if (event == kStoreBufferScanningPageEvent) {
1320  if (current_page_ != NULL) {
1321  // If this page already overflowed the store buffer during this iteration.
1323  // Then we should wipe out the entries that have been added for it.
1325  } else if (store_buffer_->Top() - start_of_current_page_ >=
1326  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1327  // Did we find too many pointers in the previous page? The heuristic is
1328  // that no page can take more then 1/5 the remaining slots in the store
1329  // buffer.
1332  } else {
1333  // In this case the page we scanned took a reasonable number of slots in
1334  // the store buffer. It has now been rehabilitated and is no longer
1335  // marked scan_on_scavenge.
1337  }
1338  }
1340  current_page_ = page;
1341  } else if (event == kStoreBufferFullEvent) {
1342  // The current page overflowed the store buffer again. Wipe out its entries
1343  // in the store buffer and mark it scan-on-scavenge again. This may happen
1344  // several times while scanning.
1345  if (current_page_ == NULL) {
1346  // Store Buffer overflowed while scanning promoted objects. These are not
1347  // in any particular page, though they are likely to be clustered by the
1348  // allocation routines.
1350  } else {
1351  // Store Buffer overflowed while scanning a particular old space page for
1352  // pointers to new space.
1353  DCHECK(current_page_ == page);
1354  DCHECK(page != NULL);
1358  }
1359  } else {
1360  UNREACHABLE();
1361  }
1362 }
1363 
1364 
1366  // Assumes that a NewSpacePage exactly fits a number of promotion queue
1367  // entries (where each is a pair of intptr_t). This allows us to simplify
1368  // the test fpr when to switch pages.
1370  0);
1371  limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1372  front_ = rear_ =
1373  reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1375 }
1376 
1377 
1380 
1381  Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1382  intptr_t* head_start = rear_;
1383  intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1384 
1385  int entries_count =
1386  static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1387 
1388  emergency_stack_ = new List<Entry>(2 * entries_count);
1389 
1390  while (head_start != head_end) {
1391  int size = static_cast<int>(*(head_start++));
1392  HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1393  emergency_stack_->Add(Entry(obj, size));
1394  }
1395  rear_ = head_end;
1396 }
1397 
1398 
1400  public:
1401  explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1402 
1403  virtual Object* RetainAs(Object* object) {
1404  if (!heap_->InFromSpace(object)) {
1405  return object;
1406  }
1407 
1408  MapWord map_word = HeapObject::cast(object)->map_word();
1409  if (map_word.IsForwardingAddress()) {
1410  return map_word.ToForwardingAddress();
1411  }
1412  return NULL;
1413  }
1414 
1415  private:
1417 };
1418 
1419 
1421  RelocationLock relocation_lock(this);
1422 
1423 #ifdef VERIFY_HEAP
1424  if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1425 #endif
1426 
1427  gc_state_ = SCAVENGE;
1428 
1429  // Implements Cheney's copying algorithm
1430  LOG(isolate_, ResourceEvent("scavenge", "begin"));
1431 
1432  // Clear descriptor cache.
1434 
1435  // Used for updating survived_since_last_expansion_ at function end.
1436  intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1437 
1439 
1441 
1442  // Flip the semispaces. After flipping, to space is empty, from space has
1443  // live objects.
1444  new_space_.Flip();
1446 
1447  // We need to sweep newly copied objects which can be either in the
1448  // to space or promoted to the old generation. For to-space
1449  // objects, we treat the bottom of the to space as a queue. Newly
1450  // copied and unswept objects lie between a 'front' mark and the
1451  // allocation pointer.
1452  //
1453  // Promoted objects can go into various old-generation spaces, and
1454  // can be allocated internally in the spaces (from the free list).
1455  // We treat the top of the to space as a queue of addresses of
1456  // promoted objects. The addresses of newly promoted and unswept
1457  // objects lie between a 'front' mark and a 'rear' mark that is
1458  // updated as a side effect of promoting an object.
1459  //
1460  // There is guaranteed to be enough room at the top of the to space
1461  // for the addresses of promoted objects: every object promoted
1462  // frees up its size in bytes from the top of the new space, and
1463  // objects are at least one pointer in size.
1464  Address new_space_front = new_space_.ToSpaceStart();
1466 
1467 #ifdef DEBUG
1468  store_buffer()->Clean();
1469 #endif
1470 
1471  ScavengeVisitor scavenge_visitor(this);
1472  // Copy roots.
1473  IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1474 
1475  // Copy objects reachable from the old generation.
1476  {
1477  StoreBufferRebuildScope scope(this, store_buffer(),
1480  }
1481 
1482  // Copy objects reachable from simple cells by scavenging cell values
1483  // directly.
1484  HeapObjectIterator cell_iterator(cell_space_);
1485  for (HeapObject* heap_object = cell_iterator.Next(); heap_object != NULL;
1486  heap_object = cell_iterator.Next()) {
1487  if (heap_object->IsCell()) {
1488  Cell* cell = Cell::cast(heap_object);
1489  Address value_address = cell->ValueAddress();
1490  scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1491  }
1492  }
1493 
1494  // Copy objects reachable from global property cells by scavenging global
1495  // property cell values directly.
1496  HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1497  for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1498  heap_object != NULL;
1499  heap_object = js_global_property_cell_iterator.Next()) {
1500  if (heap_object->IsPropertyCell()) {
1501  PropertyCell* cell = PropertyCell::cast(heap_object);
1502  Address value_address = cell->ValueAddress();
1503  scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1504  Address type_address = cell->TypeAddress();
1505  scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1506  }
1507  }
1508 
1509  // Copy objects reachable from the encountered weak collections list.
1510  scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1511 
1512  // Copy objects reachable from the code flushing candidates list.
1514  if (collector->is_code_flushing_enabled()) {
1515  collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1516  }
1517 
1518  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1519 
1520  while (isolate()->global_handles()->IterateObjectGroups(
1521  &scavenge_visitor, &IsUnscavengedHeapObject)) {
1522  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1523  }
1526 
1530  &scavenge_visitor);
1531  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1532 
1535 
1537 
1539 
1540  ScavengeWeakObjectRetainer weak_object_retainer(this);
1541  ProcessWeakReferences(&weak_object_retainer);
1542 
1543  DCHECK(new_space_front == new_space_.top());
1544 
1545  // Set age mark.
1547 
1550 
1551  // Update how much has survived scavenge.
1552  IncrementYoungSurvivorsCounter(static_cast<int>(
1553  (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1554 
1555  LOG(isolate_, ResourceEvent("scavenge", "end"));
1556 
1557  gc_state_ = NOT_IN_GC;
1558 }
1559 
1560 
1562  Object** p) {
1563  MapWord first_word = HeapObject::cast(*p)->map_word();
1564 
1565  if (!first_word.IsForwardingAddress()) {
1566  // Unreachable external string can be finalized.
1567  heap->FinalizeExternalString(String::cast(*p));
1568  return NULL;
1569  }
1570 
1571  // String is still reachable.
1572  return String::cast(first_word.ToForwardingAddress());
1573 }
1574 
1575 
1577  ExternalStringTableUpdaterCallback updater_func) {
1578 #ifdef VERIFY_HEAP
1579  if (FLAG_verify_heap) {
1581  }
1582 #endif
1583 
1584  if (external_string_table_.new_space_strings_.is_empty()) return;
1585 
1587  Object** end = start + external_string_table_.new_space_strings_.length();
1588  Object** last = start;
1589 
1590  for (Object** p = start; p < end; ++p) {
1591  DCHECK(InFromSpace(*p));
1592  String* target = updater_func(this, p);
1593 
1594  if (target == NULL) continue;
1595 
1596  DCHECK(target->IsExternalString());
1597 
1598  if (InNewSpace(target)) {
1599  // String is still in new space. Update the table entry.
1600  *last = target;
1601  ++last;
1602  } else {
1603  // String got promoted. Move it to the old string list.
1605  }
1606  }
1607 
1608  DCHECK(last <= end);
1609  external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1610 }
1611 
1612 
1614  ExternalStringTableUpdaterCallback updater_func) {
1615  // Update old space string references.
1616  if (external_string_table_.old_space_strings_.length() > 0) {
1618  Object** end = start + external_string_table_.old_space_strings_.length();
1619  for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1620  }
1621 
1623 }
1624 
1625 
1627  ProcessArrayBuffers(retainer);
1628  ProcessNativeContexts(retainer);
1629  // TODO(mvstanton): AllocationSites only need to be processed during
1630  // MARK_COMPACT, as they live in old space. Verify and address.
1631  ProcessAllocationSites(retainer);
1632 }
1633 
1634 
1636  Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1637  // Update the head of the list of contexts.
1639 }
1640 
1641 
1643  Object* array_buffer_obj =
1645  set_array_buffers_list(array_buffer_obj);
1646 }
1647 
1648 
1650  Object* undefined = undefined_value();
1651  for (Object* o = array_buffers_list(); o != undefined;) {
1652  JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1653  Runtime::FreeArrayBuffer(isolate(), buffer);
1654  o = buffer->weak_next();
1655  }
1656  set_array_buffers_list(undefined);
1657 }
1658 
1659 
1661  Object* allocation_site_obj =
1663  set_allocation_sites_list(allocation_site_obj);
1664 }
1665 
1666 
1668  DisallowHeapAllocation no_allocation_scope;
1669  Object* cur = allocation_sites_list();
1670  bool marked = false;
1671  while (cur->IsAllocationSite()) {
1672  AllocationSite* casted = AllocationSite::cast(cur);
1673  if (casted->GetPretenureMode() == flag) {
1674  casted->ResetPretenureDecision();
1675  casted->set_deopt_dependent_code(true);
1676  marked = true;
1677  }
1678  cur = casted->weak_next();
1679  }
1680  if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1681 }
1682 
1683 
1685  uint64_t size_of_objects_before_gc) {
1686  uint64_t size_of_objects_after_gc = SizeOfObjects();
1687  double old_generation_survival_rate =
1688  (static_cast<double>(size_of_objects_after_gc) * 100) /
1689  static_cast<double>(size_of_objects_before_gc);
1690 
1691  if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1692  // Too many objects died in the old generation, pretenuring of wrong
1693  // allocation sites may be the cause for that. We have to deopt all
1694  // dependent code registered in the allocation sites to re-evaluate
1695  // our pretenuring decisions.
1697  if (FLAG_trace_pretenuring) {
1698  PrintF(
1699  "Deopt all allocation sites dependent code due to low survival "
1700  "rate in the old generation %f\n",
1701  old_generation_survival_rate);
1702  }
1703  }
1704 }
1705 
1706 
1708  DisallowHeapAllocation no_allocation;
1709  // All external strings are listed in the external string table.
1710 
1711  class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1712  public:
1713  explicit ExternalStringTableVisitorAdapter(
1714  v8::ExternalResourceVisitor* visitor)
1715  : visitor_(visitor) {}
1716  virtual void VisitPointers(Object** start, Object** end) {
1717  for (Object** p = start; p < end; p++) {
1718  DCHECK((*p)->IsExternalString());
1719  visitor_->VisitExternalString(
1720  Utils::ToLocal(Handle<String>(String::cast(*p))));
1721  }
1722  }
1723 
1724  private:
1725  v8::ExternalResourceVisitor* visitor_;
1726  } external_string_table_visitor(visitor);
1727 
1728  external_string_table_.Iterate(&external_string_table_visitor);
1729 }
1730 
1731 
1732 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1733  public:
1734  static inline void VisitPointer(Heap* heap, Object** p) {
1735  Object* object = *p;
1736  if (!heap->InNewSpace(object)) return;
1737  Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1738  reinterpret_cast<HeapObject*>(object));
1739  }
1740 };
1741 
1742 
1744  Address new_space_front) {
1745  do {
1746  SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1747  // The addresses new_space_front and new_space_.top() define a
1748  // queue of unprocessed copied objects. Process them until the
1749  // queue is empty.
1750  while (new_space_front != new_space_.top()) {
1751  if (!NewSpacePage::IsAtEnd(new_space_front)) {
1752  HeapObject* object = HeapObject::FromAddress(new_space_front);
1753  new_space_front +=
1754  NewSpaceScavenger::IterateBody(object->map(), object);
1755  } else {
1756  new_space_front =
1757  NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1758  }
1759  }
1760 
1761  // Promote and process all the to-be-promoted objects.
1762  {
1763  StoreBufferRebuildScope scope(this, store_buffer(),
1765  while (!promotion_queue()->is_empty()) {
1766  HeapObject* target;
1767  int size;
1768  promotion_queue()->remove(&target, &size);
1769 
1770  // Promoted object might be already partially visited
1771  // during old space pointer iteration. Thus we search specificly
1772  // for pointers to from semispace instead of looking for pointers
1773  // to new space.
1774  DCHECK(!target->IsMap());
1776  target->address(), target->address() + size, &ScavengeObject);
1777  }
1778  }
1779 
1780  // Take another spin if there are now unswept objects in new space
1781  // (there are currently no more unswept promoted objects).
1782  } while (new_space_front != new_space_.top());
1783 
1784  return new_space_front;
1785 }
1786 
1787 
1789  0); // NOLINT
1791  0); // NOLINT
1793  kDoubleAlignmentMask) == 0); // NOLINT
1794 
1795 
1797  int size));
1798 
1800  int size) {
1801  if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1802  heap->CreateFillerObjectAt(object->address(), kPointerSize);
1803  return HeapObject::FromAddress(object->address() + kPointerSize);
1804  } else {
1805  heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1806  kPointerSize);
1807  return object;
1808  }
1809 }
1810 
1811 
1815 };
1816 
1817 
1819 
1820 
1821 template <MarksHandling marks_handling,
1822  LoggingAndProfiling logging_and_profiling_mode>
1824  public:
1825  static void Initialize() {
1826  table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1827  table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1828  table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1829  table_.Register(kVisitByteArray, &EvacuateByteArray);
1830  table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1831  table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1832  table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
1833  table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
1834 
1835  table_.Register(
1836  kVisitNativeContext,
1838  Context::kSize>);
1839 
1840  table_.Register(
1841  kVisitConsString,
1844 
1845  table_.Register(
1846  kVisitSlicedString,
1849 
1850  table_.Register(
1851  kVisitSymbol,
1853  Symbol::kSize>);
1854 
1855  table_.Register(
1856  kVisitSharedFunctionInfo,
1859 
1860  table_.Register(kVisitJSWeakCollection,
1862 
1863  table_.Register(kVisitJSArrayBuffer,
1865 
1866  table_.Register(kVisitJSTypedArray,
1868 
1869  table_.Register(kVisitJSDataView,
1871 
1872  table_.Register(kVisitJSRegExp,
1874 
1875  if (marks_handling == IGNORE_MARKS) {
1876  table_.Register(
1877  kVisitJSFunction,
1880  } else {
1881  table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1882  }
1883 
1885  kVisitDataObject, kVisitDataObjectGeneric>();
1886 
1888  kVisitJSObject, kVisitJSObjectGeneric>();
1889 
1891  kVisitStruct, kVisitStructGeneric>();
1892  }
1893 
1895  return &table_;
1896  }
1897 
1898  private:
1900 
1901  static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1902  bool should_record = false;
1903 #ifdef DEBUG
1904  should_record = FLAG_heap_stats;
1905 #endif
1906  should_record = should_record || FLAG_log_gc;
1907  if (should_record) {
1908  if (heap->new_space()->Contains(obj)) {
1909  heap->new_space()->RecordAllocation(obj);
1910  } else {
1911  heap->new_space()->RecordPromotion(obj);
1912  }
1913  }
1914  }
1915 
1916  // Helper function used by CopyObject to copy a source object to an
1917  // allocated target object and update the forwarding pointer in the source
1918  // object. Returns the target object.
1919  INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
1920  HeapObject* target, int size)) {
1921  // If we migrate into to-space, then the to-space top pointer should be
1922  // right after the target object. Incorporate double alignment
1923  // over-allocation.
1924  DCHECK(!heap->InToSpace(target) ||
1925  target->address() + size == heap->new_space()->top() ||
1926  target->address() + size + kPointerSize == heap->new_space()->top());
1927 
1928  // Make sure that we do not overwrite the promotion queue which is at
1929  // the end of to-space.
1930  DCHECK(!heap->InToSpace(target) ||
1932  heap->new_space()->top()));
1933 
1934  // Copy the content of source to target.
1935  heap->CopyBlock(target->address(), source->address(), size);
1936 
1937  // Set the forwarding address.
1938  source->set_map_word(MapWord::FromForwardingAddress(target));
1939 
1940  if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1941  // Update NewSpace stats if necessary.
1942  RecordCopiedObject(heap, target);
1943  heap->OnMoveEvent(target, source, size);
1944  }
1945 
1946  if (marks_handling == TRANSFER_MARKS) {
1947  if (Marking::TransferColor(source, target)) {
1949  }
1950  }
1951  }
1952 
1953  template <int alignment>
1954  static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
1955  HeapObject* object, int object_size) {
1956  Heap* heap = map->GetHeap();
1957 
1958  int allocation_size = object_size;
1959  if (alignment != kObjectAlignment) {
1960  DCHECK(alignment == kDoubleAlignment);
1961  allocation_size += kPointerSize;
1962  }
1963 
1964  DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
1965  AllocationResult allocation =
1966  heap->new_space()->AllocateRaw(allocation_size);
1967 
1968  HeapObject* target = NULL; // Initialization to please compiler.
1969  if (allocation.To(&target)) {
1970  // Order is important here: Set the promotion limit before storing a
1971  // filler for double alignment or migrating the object. Otherwise we
1972  // may end up overwriting promotion queue entries when we migrate the
1973  // object.
1974  heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1975 
1976  if (alignment != kObjectAlignment) {
1977  target = EnsureDoubleAligned(heap, target, allocation_size);
1978  }
1979 
1980  // Order is important: slot might be inside of the target if target
1981  // was allocated over a dead object and slot comes from the store
1982  // buffer.
1983  *slot = target;
1984  MigrateObject(heap, object, target, object_size);
1985 
1986  heap->IncrementSemiSpaceCopiedObjectSize(object_size);
1987  return true;
1988  }
1989  return false;
1990  }
1991 
1992 
1993  template <ObjectContents object_contents, int alignment>
1994  static inline bool PromoteObject(Map* map, HeapObject** slot,
1995  HeapObject* object, int object_size) {
1996  Heap* heap = map->GetHeap();
1997 
1998  int allocation_size = object_size;
1999  if (alignment != kObjectAlignment) {
2000  DCHECK(alignment == kDoubleAlignment);
2001  allocation_size += kPointerSize;
2002  }
2003 
2004  AllocationResult allocation;
2005  if (object_contents == DATA_OBJECT) {
2006  DCHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2007  allocation = heap->old_data_space()->AllocateRaw(allocation_size);
2008  } else {
2010  allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
2011  }
2012 
2013  HeapObject* target = NULL; // Initialization to please compiler.
2014  if (allocation.To(&target)) {
2015  if (alignment != kObjectAlignment) {
2016  target = EnsureDoubleAligned(heap, target, allocation_size);
2017  }
2018 
2019  // Order is important: slot might be inside of the target if target
2020  // was allocated over a dead object and slot comes from the store
2021  // buffer.
2022  *slot = target;
2023  MigrateObject(heap, object, target, object_size);
2024 
2025  if (object_contents == POINTER_OBJECT) {
2026  if (map->instance_type() == JS_FUNCTION_TYPE) {
2027  heap->promotion_queue()->insert(target,
2029  } else {
2030  heap->promotion_queue()->insert(target, object_size);
2031  }
2032  }
2033  heap->IncrementPromotedObjectsSize(object_size);
2034  return true;
2035  }
2036  return false;
2037  }
2038 
2039 
2040  template <ObjectContents object_contents, int alignment>
2041  static inline void EvacuateObject(Map* map, HeapObject** slot,
2042  HeapObject* object, int object_size) {
2044  SLOW_DCHECK(object->Size() == object_size);
2045  Heap* heap = map->GetHeap();
2046 
2047  if (!heap->ShouldBePromoted(object->address(), object_size)) {
2048  // A semi-space copy may fail due to fragmentation. In that case, we
2049  // try to promote the object.
2050  if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
2051  return;
2052  }
2053  }
2054 
2055  if (PromoteObject<object_contents, alignment>(map, slot, object,
2056  object_size)) {
2057  return;
2058  }
2059 
2060  // If promotion failed, we try to copy the object to the other semi-space
2061  if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
2062 
2063  UNREACHABLE();
2064  }
2065 
2066 
2067  static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
2068  HeapObject* object) {
2070  JSFunction::kSize>(map, slot, object);
2071 
2072  MapWord map_word = object->map_word();
2073  DCHECK(map_word.IsForwardingAddress());
2074  HeapObject* target = map_word.ToForwardingAddress();
2075 
2076  MarkBit mark_bit = Marking::MarkBitFrom(target);
2077  if (Marking::IsBlack(mark_bit)) {
2078  // This object is black and it might not be rescanned by marker.
2079  // We should explicitly record code entry slot for compaction because
2080  // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2081  // miss it as it is not HeapObject-tagged.
2082  Address code_entry_slot =
2084  Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2085  map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
2086  code_entry_slot, code);
2087  }
2088  }
2089 
2090 
2091  static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
2092  HeapObject* object) {
2093  int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2094  EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2095  object_size);
2096  }
2097 
2098 
2099  static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
2100  HeapObject* object) {
2101  int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2102  int object_size = FixedDoubleArray::SizeFor(length);
2103  EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2104  object_size);
2105  }
2106 
2107 
2108  static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
2109  HeapObject* object) {
2110  int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2111  EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2112  object_size);
2113  }
2114 
2115 
2116  static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
2117  HeapObject* object) {
2118  int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2119  EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2120  object_size);
2121  }
2122 
2123 
2124  static inline void EvacuateByteArray(Map* map, HeapObject** slot,
2125  HeapObject* object) {
2126  int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2127  EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2128  object_size);
2129  }
2130 
2131 
2132  static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
2133  HeapObject* object) {
2134  int object_size = SeqOneByteString::cast(object)
2135  ->SeqOneByteStringSize(map->instance_type());
2136  EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2137  object_size);
2138  }
2139 
2140 
2141  static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
2142  HeapObject* object) {
2143  int object_size = SeqTwoByteString::cast(object)
2144  ->SeqTwoByteStringSize(map->instance_type());
2145  EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2146  object_size);
2147  }
2148 
2149 
2150  static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
2151  HeapObject* object) {
2152  DCHECK(IsShortcutCandidate(map->instance_type()));
2153 
2154  Heap* heap = map->GetHeap();
2155 
2156  if (marks_handling == IGNORE_MARKS &&
2157  ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
2158  HeapObject* first =
2159  HeapObject::cast(ConsString::cast(object)->unchecked_first());
2160 
2161  *slot = first;
2162 
2163  if (!heap->InNewSpace(first)) {
2164  object->set_map_word(MapWord::FromForwardingAddress(first));
2165  return;
2166  }
2167 
2168  MapWord first_word = first->map_word();
2169  if (first_word.IsForwardingAddress()) {
2170  HeapObject* target = first_word.ToForwardingAddress();
2171 
2172  *slot = target;
2173  object->set_map_word(MapWord::FromForwardingAddress(target));
2174  return;
2175  }
2176 
2177  heap->DoScavengeObject(first->map(), slot, first);
2178  object->set_map_word(MapWord::FromForwardingAddress(*slot));
2179  return;
2180  }
2181 
2182  int object_size = ConsString::kSize;
2183  EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2184  object_size);
2185  }
2186 
2187  template <ObjectContents object_contents>
2189  public:
2190  template <int object_size>
2191  static inline void VisitSpecialized(Map* map, HeapObject** slot,
2192  HeapObject* object) {
2193  EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2194  object_size);
2195  }
2196 
2197  static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
2198  int object_size = map->instance_size();
2199  EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2200  object_size);
2201  }
2202  };
2203 
2205 };
2206 
2207 
2208 template <MarksHandling marks_handling,
2209  LoggingAndProfiling logging_and_profiling_mode>
2212 
2213 
2216  LOGGING_AND_PROFILING_DISABLED>::Initialize();
2219  LOGGING_AND_PROFILING_ENABLED>::Initialize();
2221 }
2222 
2223 
2225  bool logging_and_profiling =
2226  FLAG_verify_predictable || isolate()->logger()->is_logging() ||
2227  isolate()->cpu_profiler()->is_profiling() ||
2228  (isolate()->heap_profiler() != NULL &&
2230 
2231  if (!incremental_marking()->IsMarking()) {
2232  if (!logging_and_profiling) {
2235  } else {
2238  }
2239  } else {
2240  if (!logging_and_profiling) {
2243  } else {
2246  }
2247 
2248  if (incremental_marking()->IsCompacting()) {
2249  // When compacting forbid short-circuiting of cons-strings.
2250  // Scavenging code relies on the fact that new space object
2251  // can't be evacuated into evacuation candidate but
2252  // short-circuiting violates this assumption.
2254  StaticVisitorBase::kVisitShortcutCandidate,
2256  StaticVisitorBase::kVisitConsString));
2257  }
2258  }
2259 }
2260 
2261 
2263  SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
2264  MapWord first_word = object->map_word();
2265  SLOW_DCHECK(!first_word.IsForwardingAddress());
2266  Map* map = first_word.ToMap();
2267  map->GetHeap()->DoScavengeObject(map, p, object);
2268 }
2269 
2270 
2272  int instance_size) {
2273  Object* result;
2275  if (!allocation.To(&result)) return allocation;
2276 
2277  // Map::cast cannot be used due to uninitialized map field.
2278  reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2279  reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2280  reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2281  reinterpret_cast<Map*>(result)->set_visitor_id(
2282  StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2283  reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2284  reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2285  reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2286  reinterpret_cast<Map*>(result)->set_bit_field(0);
2287  reinterpret_cast<Map*>(result)->set_bit_field2(0);
2290  reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2291  return result;
2292 }
2293 
2294 
2296  int instance_size,
2297  ElementsKind elements_kind) {
2298  HeapObject* result;
2300  if (!allocation.To(&result)) return allocation;
2301 
2302  result->set_map_no_write_barrier(meta_map());
2303  Map* map = Map::cast(result);
2304  map->set_instance_type(instance_type);
2305  map->set_visitor_id(
2306  StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2307  map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2308  map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2309  map->set_instance_size(instance_size);
2310  map->set_inobject_properties(0);
2311  map->set_pre_allocated_property_fields(0);
2312  map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2313  map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2315  map->init_back_pointer(undefined_value());
2316  map->set_unused_property_fields(0);
2317  map->set_instance_descriptors(empty_descriptor_array());
2318  map->set_bit_field(0);
2319  map->set_bit_field2(1 << Map::kIsExtensible);
2322  map->set_bit_field3(bit_field3);
2323  map->set_elements_kind(elements_kind);
2324 
2325  return map;
2326 }
2327 
2328 
2331  HeapObject* obj;
2332  {
2333  AllocationResult allocation = AllocateRaw(size, space, space);
2334  if (!allocation.To(&obj)) return allocation;
2335  }
2336 #ifdef DEBUG
2337  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2338  DCHECK(chunk->owner()->identity() == space);
2339 #endif
2341  return obj;
2342 }
2343 
2344 
2346 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2347  { type, size, k##camel_name##MapRootIndex } \
2348  ,
2350 #undef STRING_TYPE_ELEMENT
2351 };
2352 
2353 
2354 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2355 #define CONSTANT_STRING_ELEMENT(name, contents) \
2356  { contents, k##name##RootIndex } \
2357  ,
2359 #undef CONSTANT_STRING_ELEMENT
2360 };
2361 
2362 
2363 const Heap::StructTable Heap::struct_table[] = {
2364 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2365  { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
2366  ,
2368 #undef STRUCT_TABLE_ELEMENT
2369 };
2370 
2371 
2373  HeapObject* obj;
2374  {
2376  if (!allocation.To(&obj)) return false;
2377  }
2378  // Map::cast cannot be used due to uninitialized map field.
2379  Map* new_meta_map = reinterpret_cast<Map*>(obj);
2380  set_meta_map(new_meta_map);
2381  new_meta_map->set_map(new_meta_map);
2382 
2383  { // Partial map allocation
2384 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
2385  { \
2386  Map* map; \
2387  if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2388  set_##field_name##_map(map); \
2389  }
2390 
2395  constant_pool_array);
2396 
2397 #undef ALLOCATE_PARTIAL_MAP
2398  }
2399 
2400  // Allocate the empty array.
2401  {
2403  if (!allocation.To(&obj)) return false;
2404  }
2405  set_empty_fixed_array(FixedArray::cast(obj));
2406 
2407  {
2408  AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
2409  if (!allocation.To(&obj)) return false;
2410  }
2411  set_null_value(Oddball::cast(obj));
2412  Oddball::cast(obj)->set_kind(Oddball::kNull);
2413 
2414  {
2415  AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
2416  if (!allocation.To(&obj)) return false;
2417  }
2418  set_undefined_value(Oddball::cast(obj));
2419  Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2420  DCHECK(!InNewSpace(undefined_value()));
2421 
2422  // Set preliminary exception sentinel value before actually initializing it.
2423  set_exception(null_value());
2424 
2425  // Allocate the empty descriptor array.
2426  {
2428  if (!allocation.To(&obj)) return false;
2429  }
2430  set_empty_descriptor_array(DescriptorArray::cast(obj));
2431 
2432  // Allocate the constant pool array.
2433  {
2435  if (!allocation.To(&obj)) return false;
2436  }
2437  set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2438 
2439  // Fix the instance_descriptors for the existing maps.
2440  meta_map()->set_code_cache(empty_fixed_array());
2441  meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2442  meta_map()->init_back_pointer(undefined_value());
2443  meta_map()->set_instance_descriptors(empty_descriptor_array());
2444 
2445  fixed_array_map()->set_code_cache(empty_fixed_array());
2446  fixed_array_map()->set_dependent_code(
2447  DependentCode::cast(empty_fixed_array()));
2448  fixed_array_map()->init_back_pointer(undefined_value());
2449  fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2450 
2451  undefined_map()->set_code_cache(empty_fixed_array());
2452  undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2453  undefined_map()->init_back_pointer(undefined_value());
2454  undefined_map()->set_instance_descriptors(empty_descriptor_array());
2455 
2456  null_map()->set_code_cache(empty_fixed_array());
2457  null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2458  null_map()->init_back_pointer(undefined_value());
2459  null_map()->set_instance_descriptors(empty_descriptor_array());
2460 
2461  constant_pool_array_map()->set_code_cache(empty_fixed_array());
2462  constant_pool_array_map()->set_dependent_code(
2463  DependentCode::cast(empty_fixed_array()));
2464  constant_pool_array_map()->init_back_pointer(undefined_value());
2465  constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2466 
2467  // Fix prototype object for existing maps.
2468  meta_map()->set_prototype(null_value());
2469  meta_map()->set_constructor(null_value());
2470 
2471  fixed_array_map()->set_prototype(null_value());
2472  fixed_array_map()->set_constructor(null_value());
2473 
2474  undefined_map()->set_prototype(null_value());
2475  undefined_map()->set_constructor(null_value());
2476 
2477  null_map()->set_prototype(null_value());
2478  null_map()->set_constructor(null_value());
2479 
2480  constant_pool_array_map()->set_prototype(null_value());
2481  constant_pool_array_map()->set_constructor(null_value());
2482 
2483  { // Map allocation
2484 #define ALLOCATE_MAP(instance_type, size, field_name) \
2485  { \
2486  Map* map; \
2487  if (!AllocateMap((instance_type), size).To(&map)) return false; \
2488  set_##field_name##_map(map); \
2489  }
2490 
2491 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2492  ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2493 
2494  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2495  DCHECK(fixed_array_map() != fixed_cow_array_map());
2496 
2500  mutable_heap_number)
2503 
2506  ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2507  ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2508  ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2510  ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
2511 
2512  for (unsigned i = 0; i < arraysize(string_type_table); i++) {
2513  const StringTypeTable& entry = string_type_table[i];
2514  {
2515  AllocationResult allocation = AllocateMap(entry.type, entry.size);
2516  if (!allocation.To(&obj)) return false;
2517  }
2518  // Mark cons string maps as unstable, because their objects can change
2519  // maps during GC.
2520  Map* map = Map::cast(obj);
2521  if (StringShape(entry.type).IsCons()) map->mark_unstable();
2522  roots_[entry.index] = map;
2523  }
2524 
2525  ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
2526  undetectable_string_map()->set_is_undetectable();
2527 
2528  ALLOCATE_VARSIZE_MAP(ONE_BYTE_STRING_TYPE, undetectable_one_byte_string);
2529  undetectable_one_byte_string_map()->set_is_undetectable();
2530 
2531  ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2534 
2535 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2536  ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
2537  external_##type##_array)
2538 
2540 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2541 
2542 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2543  ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
2544 
2546 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2547 
2548  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2549 
2551 
2553  ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2554  ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2555  ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2556 
2557 
2558  for (unsigned i = 0; i < arraysize(struct_table); i++) {
2559  const StructTable& entry = struct_table[i];
2560  Map* map;
2561  if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
2562  roots_[entry.index] = map;
2563  }
2564 
2566  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2567 
2568  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2569  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2570  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2571  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2572  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2573  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
2574 
2575  ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2576  native_context_map()->set_dictionary_map(true);
2577  native_context_map()->set_visitor_id(
2578  StaticVisitorBase::kVisitNativeContext);
2579 
2581  shared_function_info)
2582 
2585  external_map()->set_is_extensible(false);
2586 #undef ALLOCATE_VARSIZE_MAP
2587 #undef ALLOCATE_MAP
2588  }
2589 
2590  { // Empty arrays
2591  {
2592  ByteArray* byte_array;
2593  if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2594  set_empty_byte_array(byte_array);
2595  }
2596 
2597 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
2598  { \
2599  ExternalArray* obj; \
2600  if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
2601  return false; \
2602  set_empty_external_##type##_array(obj); \
2603  }
2604 
2606 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2607 
2608 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2609  { \
2610  FixedTypedArrayBase* obj; \
2611  if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
2612  return false; \
2613  set_empty_fixed_##type##_array(obj); \
2614  }
2615 
2617 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2618  }
2619  DCHECK(!InNewSpace(empty_fixed_array()));
2620  return true;
2621 }
2622 
2623 
2625  PretenureFlag pretenure) {
2626  // Statically ensure that it is safe to allocate heap numbers in paged
2627  // spaces.
2628  int size = HeapNumber::kSize;
2630 
2632 
2633  HeapObject* result;
2634  {
2636  if (!allocation.To(&result)) return allocation;
2637  }
2638 
2639  Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
2640  HeapObject::cast(result)->set_map_no_write_barrier(map);
2641  HeapNumber::cast(result)->set_value(value);
2642  return result;
2643 }
2644 
2645 
2647  int size = Cell::kSize;
2649 
2650  HeapObject* result;
2651  {
2653  if (!allocation.To(&result)) return allocation;
2654  }
2655  result->set_map_no_write_barrier(cell_map());
2656  Cell::cast(result)->set_value(value);
2657  return result;
2658 }
2659 
2660 
2662  int size = PropertyCell::kSize;
2664 
2665  HeapObject* result;
2666  AllocationResult allocation =
2668  if (!allocation.To(&result)) return allocation;
2669 
2670  result->set_map_no_write_barrier(global_property_cell_map());
2671  PropertyCell* cell = PropertyCell::cast(result);
2672  cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2674  cell->set_value(the_hole_value());
2675  cell->set_type(HeapType::None());
2676  return result;
2677 }
2678 
2679 
2681  HandleScope scope(isolate());
2682  Factory* factory = isolate()->factory();
2683  Handle<Map> new_neander_map =
2684  factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2685 
2686  // Don't use Smi-only elements optimizations for objects with the neander
2687  // map. There are too many cases where element values are set directly with a
2688  // bottleneck to trap the Smi-only -> fast elements transition, and there
2689  // appears to be no benefit for optimize this case.
2690  new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2691  set_neander_map(*new_neander_map);
2692 
2693  Handle<JSObject> listeners = factory->NewNeanderObject();
2694  Handle<FixedArray> elements = factory->NewFixedArray(2);
2695  elements->set(0, Smi::FromInt(0));
2696  listeners->set_elements(*elements);
2697  set_message_listeners(*listeners);
2698 }
2699 
2700 
2701 void Heap::CreateJSEntryStub() {
2702  JSEntryStub stub(isolate(), StackFrame::ENTRY);
2703  set_js_entry_code(*stub.GetCode());
2704 }
2705 
2706 
2707 void Heap::CreateJSConstructEntryStub() {
2708  JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
2709  set_js_construct_entry_code(*stub.GetCode());
2710 }
2711 
2712 
2714  // Here we create roots for fixed stubs. They are needed at GC
2715  // for cooking and uncooking (check out frames.cc).
2716  // The eliminates the need for doing dictionary lookup in the
2717  // stub cache for these stubs.
2718  HandleScope scope(isolate());
2719 
2720  // Create stubs that should be there, so we don't unexpectedly have to
2721  // create them if we need them during the creation of another stub.
2722  // Stub creation mixes raw pointers and handles in an unsafe manner so
2723  // we cannot create stubs while we are creating stubs.
2724  CodeStub::GenerateStubsAheadOfTime(isolate());
2725 
2726  // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2727  // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2728  // is created.
2729 
2730  // gcc-4.4 has problem generating correct code of following snippet:
2731  // { JSEntryStub stub;
2732  // js_entry_code_ = *stub.GetCode();
2733  // }
2734  // { JSConstructEntryStub stub;
2735  // js_construct_entry_code_ = *stub.GetCode();
2736  // }
2737  // To workaround the problem, make separate functions without inlining.
2738  Heap::CreateJSEntryStub();
2739  Heap::CreateJSConstructEntryStub();
2740 }
2741 
2742 
2744  HandleScope scope(isolate());
2745  Factory* factory = isolate()->factory();
2746 
2747  // The -0 value must be set before NewNumber works.
2748  set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
2749  DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
2750 
2751  set_nan_value(
2752  *factory->NewHeapNumber(base::OS::nan_value(), IMMUTABLE, TENURED));
2753  set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
2754 
2755  // The hole has not been created yet, but we want to put something
2756  // predictable in the gaps in the string table, so lets make that Smi zero.
2757  set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2758 
2759  // Allocate initial string table.
2760  set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2761 
2762  // Finish initializing oddballs after creating the string table.
2763  Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
2764  factory->nan_value(), Oddball::kUndefined);
2765 
2766  // Initialize the null_value.
2767  Oddball::Initialize(isolate(), factory->null_value(), "null",
2769 
2770  set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
2771  handle(Smi::FromInt(1), isolate()),
2772  Oddball::kTrue));
2773 
2774  set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
2775  handle(Smi::FromInt(0), isolate()),
2776  Oddball::kFalse));
2777 
2778  set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
2779  handle(Smi::FromInt(-1), isolate()),
2781 
2782  set_uninitialized_value(*factory->NewOddball(
2783  factory->uninitialized_map(), "uninitialized",
2785 
2786  set_arguments_marker(*factory->NewOddball(
2787  factory->arguments_marker_map(), "arguments_marker",
2789 
2790  set_no_interceptor_result_sentinel(*factory->NewOddball(
2791  factory->no_interceptor_result_sentinel_map(),
2792  "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
2793  Oddball::kOther));
2794 
2795  set_termination_exception(*factory->NewOddball(
2796  factory->termination_exception_map(), "termination_exception",
2798 
2799  set_exception(*factory->NewOddball(factory->exception_map(), "exception",
2800  handle(Smi::FromInt(-5), isolate()),
2802 
2803  for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
2804  Handle<String> str =
2805  factory->InternalizeUtf8String(constant_string_table[i].contents);
2807  }
2808 
2809  // Allocate the hidden string which is used to identify the hidden properties
2810  // in JSObjects. The hash code has a special value so that it will not match
2811  // the empty string when searching for the property. It cannot be part of the
2812  // loop above because it needs to be allocated manually with the special
2813  // hash code in place. The hash code for the hidden_string is zero to ensure
2814  // that it will always be at the first entry in property descriptors.
2815  hidden_string_ = *factory->NewOneByteInternalizedString(
2817 
2818  // Create the code_stubs dictionary. The initial size is set to avoid
2819  // expanding the dictionary during bootstrapping.
2820  set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
2821 
2822  // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
2823  // is set to avoid expanding the dictionary during bootstrapping.
2824  set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
2825 
2826  set_polymorphic_code_cache(PolymorphicCodeCache::cast(
2827  *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
2828 
2829  set_instanceof_cache_function(Smi::FromInt(0));
2830  set_instanceof_cache_map(Smi::FromInt(0));
2831  set_instanceof_cache_answer(Smi::FromInt(0));
2832 
2833  CreateFixedStubs();
2834 
2835  // Allocate the dictionary of intrinsic function names.
2836  Handle<NameDictionary> intrinsic_names =
2837  NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
2839  set_intrinsic_function_names(*intrinsic_names);
2840 
2841  set_number_string_cache(
2842  *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
2843 
2844  // Allocate cache for single character one byte strings.
2845  set_single_character_string_cache(
2846  *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
2847 
2848  // Allocate cache for string split and regexp-multiple.
2849  set_string_split_cache(*factory->NewFixedArray(
2851  set_regexp_multiple_cache(*factory->NewFixedArray(
2853 
2854  // Allocate cache for external strings pointing to native source code.
2855  set_natives_source_cache(
2856  *factory->NewFixedArray(Natives::GetBuiltinsCount()));
2857 
2858  set_undefined_cell(*factory->NewCell(factory->undefined_value()));
2859 
2860  // The symbol registry is initialized lazily.
2861  set_symbol_registry(undefined_value());
2862 
2863  // Allocate object to hold object observation state.
2864  set_observation_state(*factory->NewJSObjectFromMap(
2865  factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
2866 
2867  // Microtask queue uses the empty fixed array as a sentinel for "empty".
2868  // Number of queued microtasks stored in Isolate::pending_microtask_count().
2869  set_microtask_queue(empty_fixed_array());
2870 
2871  set_detailed_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
2872  set_elements_transition_symbol(*factory->NewPrivateOwnSymbol());
2873  set_frozen_symbol(*factory->NewPrivateOwnSymbol());
2874  set_megamorphic_symbol(*factory->NewPrivateOwnSymbol());
2875  set_premonomorphic_symbol(*factory->NewPrivateOwnSymbol());
2876  set_generic_symbol(*factory->NewPrivateOwnSymbol());
2877  set_nonexistent_symbol(*factory->NewPrivateOwnSymbol());
2878  set_normal_ic_symbol(*factory->NewPrivateOwnSymbol());
2879  set_observed_symbol(*factory->NewPrivateOwnSymbol());
2880  set_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
2881  set_uninitialized_symbol(*factory->NewPrivateOwnSymbol());
2882  set_home_object_symbol(*factory->NewPrivateOwnSymbol());
2883 
2884  Handle<SeededNumberDictionary> slow_element_dictionary =
2886  slow_element_dictionary->set_requires_slow_elements();
2887  set_empty_slow_element_dictionary(*slow_element_dictionary);
2888 
2889  set_materialized_objects(*factory->NewFixedArray(0, TENURED));
2890 
2891  // Handling of script id generation is in Factory::NewScript.
2892  set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
2893 
2894  set_allocation_sites_scratchpad(
2895  *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
2897 
2898  // Initialize keyed lookup cache.
2900 
2901  // Initialize context slot cache.
2903 
2904  // Initialize descriptor cache.
2906 
2907  // Initialize compilation cache.
2909 }
2910 
2911 
2913  RootListIndex writable_roots[] = {
2914  kStoreBufferTopRootIndex,
2915  kStackLimitRootIndex,
2916  kNumberStringCacheRootIndex,
2917  kInstanceofCacheFunctionRootIndex,
2918  kInstanceofCacheMapRootIndex,
2919  kInstanceofCacheAnswerRootIndex,
2920  kCodeStubsRootIndex,
2921  kNonMonomorphicCacheRootIndex,
2922  kPolymorphicCodeCacheRootIndex,
2923  kLastScriptIdRootIndex,
2924  kEmptyScriptRootIndex,
2925  kRealStackLimitRootIndex,
2926  kArgumentsAdaptorDeoptPCOffsetRootIndex,
2927  kConstructStubDeoptPCOffsetRootIndex,
2928  kGetterStubDeoptPCOffsetRootIndex,
2929  kSetterStubDeoptPCOffsetRootIndex,
2931  };
2932 
2933  for (unsigned int i = 0; i < arraysize(writable_roots); i++) {
2934  if (root_index == writable_roots[i]) return true;
2935  }
2936  return false;
2937 }
2938 
2939 
2941  return !RootCanBeWrittenAfterInitialization(root_index) &&
2942  !InNewSpace(roots_array_start()[root_index]);
2943 }
2944 
2945 
2947  Object* key_pattern, ResultsCacheType type) {
2948  FixedArray* cache;
2949  if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
2950  if (type == STRING_SPLIT_SUBSTRINGS) {
2951  DCHECK(key_pattern->IsString());
2952  if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
2953  cache = heap->string_split_cache();
2954  } else {
2956  DCHECK(key_pattern->IsFixedArray());
2957  cache = heap->regexp_multiple_cache();
2958  }
2959 
2960  uint32_t hash = key_string->Hash();
2961  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
2963  if (cache->get(index + kStringOffset) == key_string &&
2964  cache->get(index + kPatternOffset) == key_pattern) {
2965  return cache->get(index + kArrayOffset);
2966  }
2967  index =
2969  if (cache->get(index + kStringOffset) == key_string &&
2970  cache->get(index + kPatternOffset) == key_pattern) {
2971  return cache->get(index + kArrayOffset);
2972  }
2973  return Smi::FromInt(0);
2974 }
2975 
2976 
2978  Handle<Object> key_pattern,
2979  Handle<FixedArray> value_array,
2980  ResultsCacheType type) {
2981  Factory* factory = isolate->factory();
2982  Handle<FixedArray> cache;
2983  if (!key_string->IsInternalizedString()) return;
2984  if (type == STRING_SPLIT_SUBSTRINGS) {
2985  DCHECK(key_pattern->IsString());
2986  if (!key_pattern->IsInternalizedString()) return;
2987  cache = factory->string_split_cache();
2988  } else {
2990  DCHECK(key_pattern->IsFixedArray());
2991  cache = factory->regexp_multiple_cache();
2992  }
2993 
2994  uint32_t hash = key_string->Hash();
2995  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
2997  if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
2998  cache->set(index + kStringOffset, *key_string);
2999  cache->set(index + kPatternOffset, *key_pattern);
3000  cache->set(index + kArrayOffset, *value_array);
3001  } else {
3002  uint32_t index2 =
3004  if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3005  cache->set(index2 + kStringOffset, *key_string);
3006  cache->set(index2 + kPatternOffset, *key_pattern);
3007  cache->set(index2 + kArrayOffset, *value_array);
3008  } else {
3009  cache->set(index2 + kStringOffset, Smi::FromInt(0));
3010  cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3011  cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3012  cache->set(index + kStringOffset, *key_string);
3013  cache->set(index + kPatternOffset, *key_pattern);
3014  cache->set(index + kArrayOffset, *value_array);
3015  }
3016  }
3017  // If the array is a reasonably short list of substrings, convert it into a
3018  // list of internalized strings.
3019  if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3020  for (int i = 0; i < value_array->length(); i++) {
3021  Handle<String> str(String::cast(value_array->get(i)), isolate);
3022  Handle<String> internalized_str = factory->InternalizeString(str);
3023  value_array->set(i, *internalized_str);
3024  }
3025  }
3026  // Convert backing store to a copy-on-write array.
3027  value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
3028 }
3029 
3030 
3032  for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3033  cache->set(i, Smi::FromInt(0));
3034  }
3035 }
3036 
3037 
3039  // Compute the size of the number string cache based on the max newspace size.
3040  // The number string cache has a minimum size based on twice the initial cache
3041  // size to ensure that it is bigger after being made 'full size'.
3042  int number_string_cache_size = max_semi_space_size_ / 512;
3043  number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3044  Min(0x4000, number_string_cache_size));
3045  // There is a string and a number per entry so the length is twice the number
3046  // of entries.
3047  return number_string_cache_size * 2;
3048 }
3049 
3050 
3052  // Flush the number to string cache.
3053  int len = number_string_cache()->length();
3054  for (int i = 0; i < len; i++) {
3055  number_string_cache()->set_undefined(i);
3056  }
3057 }
3058 
3059 
3061  for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
3062  allocation_sites_scratchpad()->set_undefined(i);
3063  }
3065 }
3066 
3067 
3069  DCHECK(allocation_sites_scratchpad()->length() ==
3071  for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
3072  allocation_sites_scratchpad()->set_undefined(i);
3073  }
3074 }
3075 
3076 
3080  // We cannot use the normal write-barrier because slots need to be
3081  // recorded with non-incremental marking as well. We have to explicitly
3082  // record the slot to take evacuation candidates into account.
3083  allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
3084  site, SKIP_WRITE_BARRIER);
3085  Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
3087 
3088  if (mode == RECORD_SCRATCHPAD_SLOT) {
3089  // We need to allow slots buffer overflow here since the evacuation
3090  // candidates are not part of the global list of old space pages and
3091  // releasing an evacuation candidate due to a slots buffer overflow
3092  // results in lost pages.
3093  mark_compact_collector()->RecordSlot(slot, slot, *slot,
3095  }
3097  }
3098 }
3099 
3100 
3102  return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3103 }
3104 
3105 
3107  ExternalArrayType array_type) {
3108  switch (array_type) {
3109 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3110  case kExternal##Type##Array: \
3111  return kExternal##Type##ArrayMapRootIndex;
3112 
3114 #undef ARRAY_TYPE_TO_ROOT_INDEX
3115 
3116  default:
3117  UNREACHABLE();
3118  return kUndefinedValueRootIndex;
3119  }
3120 }
3121 
3122 
3124  return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3125 }
3126 
3127 
3129  ExternalArrayType array_type) {
3130  switch (array_type) {
3131 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3132  case kExternal##Type##Array: \
3133  return kFixed##Type##ArrayMapRootIndex;
3134 
3136 #undef ARRAY_TYPE_TO_ROOT_INDEX
3137 
3138  default:
3139  UNREACHABLE();
3140  return kUndefinedValueRootIndex;
3141  }
3142 }
3143 
3144 
3146  ElementsKind elementsKind) {
3147  switch (elementsKind) {
3148 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3149  case EXTERNAL_##TYPE##_ELEMENTS: \
3150  return kEmptyExternal##Type##ArrayRootIndex;
3151 
3153 #undef ELEMENT_KIND_TO_ROOT_INDEX
3154 
3155  default:
3156  UNREACHABLE();
3157  return kUndefinedValueRootIndex;
3158  }
3159 }
3160 
3161 
3163  ElementsKind elementsKind) {
3164  switch (elementsKind) {
3165 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3166  case TYPE##_ELEMENTS: \
3167  return kEmptyFixed##Type##ArrayRootIndex;
3168 
3170 #undef ELEMENT_KIND_TO_ROOT_INDEX
3171  default:
3172  UNREACHABLE();
3173  return kUndefinedValueRootIndex;
3174  }
3175 }
3176 
3177 
3179  return ExternalArray::cast(
3180  roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3181 }
3182 
3183 
3185  return FixedTypedArrayBase::cast(
3186  roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
3187 }
3188 
3189 
3191  PretenureFlag pretenure) {
3192  // Statically ensure that it is safe to allocate foreigns in paged spaces.
3194  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3195  Foreign* result;
3196  AllocationResult allocation = Allocate(foreign_map(), space);
3197  if (!allocation.To(&result)) return allocation;
3198  result->set_foreign_address(address);
3199  return result;
3200 }
3201 
3202 
3204  if (length < 0 || length > ByteArray::kMaxLength) {
3205  v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3206  }
3207  int size = ByteArray::SizeFor(length);
3209  HeapObject* result;
3210  {
3212  if (!allocation.To(&result)) return allocation;
3213  }
3214 
3215  result->set_map_no_write_barrier(byte_array_map());
3216  ByteArray::cast(result)->set_length(length);
3217  return result;
3218 }
3219 
3220 
3222  if (size == 0) return;
3223  HeapObject* filler = HeapObject::FromAddress(addr);
3224  if (size == kPointerSize) {
3225  filler->set_map_no_write_barrier(one_pointer_filler_map());
3226  } else if (size == 2 * kPointerSize) {
3227  filler->set_map_no_write_barrier(two_pointer_filler_map());
3228  } else {
3229  filler->set_map_no_write_barrier(free_space_map());
3230  FreeSpace::cast(filler)->set_size(size);
3231  }
3232 }
3233 
3234 
3236  Address address = object->address();
3237  bool is_in_old_pointer_space = InOldPointerSpace(address);
3238  bool is_in_old_data_space = InOldDataSpace(address);
3239 
3240  if (lo_space()->Contains(object)) return false;
3241 
3242  Page* page = Page::FromAddress(address);
3243  // We can move the object start if:
3244  // (1) the object is not in old pointer or old data space,
3245  // (2) the page of the object was already swept,
3246  // (3) the page was already concurrently swept. This case is an optimization
3247  // for concurrent sweeping. The WasSwept predicate for concurrently swept
3248  // pages is set after sweeping all pages.
3249  return (!is_in_old_pointer_space && !is_in_old_data_space) ||
3250  page->WasSwept() || page->SweepingCompleted();
3251 }
3252 
3253 
3255  if (incremental_marking()->IsMarking() &&
3256  Marking::IsBlack(Marking::MarkBitFrom(address))) {
3257  if (mode == FROM_GC) {
3259  } else {
3261  }
3262  }
3263 }
3264 
3265 
3267  int elements_to_trim) {
3268  const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3269  const int bytes_to_trim = elements_to_trim * element_size;
3270  Map* map = object->map();
3271 
3272  // For now this trick is only applied to objects in new and paged space.
3273  // In large object space the object's start must coincide with chunk
3274  // and thus the trick is just not applicable.
3275  DCHECK(!lo_space()->Contains(object));
3276  DCHECK(object->map() != fixed_cow_array_map());
3277 
3281 
3282  const int len = object->length();
3283  DCHECK(elements_to_trim <= len);
3284 
3285  // Calculate location of new array start.
3286  Address new_start = object->address() + bytes_to_trim;
3287 
3288  // Technically in new space this write might be omitted (except for
3289  // debug mode which iterates through the heap), but to play safer
3290  // we still do it.
3291  CreateFillerObjectAt(object->address(), bytes_to_trim);
3292 
3293  // Initialize header of the trimmed array. Since left trimming is only
3294  // performed on pages which are not concurrently swept creating a filler
3295  // object does not require synchronization.
3296  DCHECK(CanMoveObjectStart(object));
3297  Object** former_start = HeapObject::RawField(object, 0);
3298  int new_start_index = elements_to_trim * (element_size / kPointerSize);
3299  former_start[new_start_index] = map;
3300  former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
3301  FixedArrayBase* new_object =
3302  FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3303 
3304  // Maintain consistency of live bytes during incremental marking
3305  marking()->TransferMark(object->address(), new_start);
3306  AdjustLiveBytes(new_start, -bytes_to_trim, Heap::FROM_MUTATOR);
3307 
3308  // Notify the heap profiler of change in object layout.
3309  OnMoveEvent(new_object, object, new_object->Size());
3310  return new_object;
3311 }
3312 
3313 
3314 // Force instantiation of templatized method.
3315 template
3316 void Heap::RightTrimFixedArray<Heap::FROM_GC>(FixedArrayBase*, int);
3317 template
3318 void Heap::RightTrimFixedArray<Heap::FROM_MUTATOR>(FixedArrayBase*, int);
3319 
3320 
3321 template<Heap::InvocationMode mode>
3322 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
3323  const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3324  const int bytes_to_trim = elements_to_trim * element_size;
3325 
3326  // For now this trick is only applied to objects in new and paged space.
3327  DCHECK(object->map() != fixed_cow_array_map());
3328 
3329  const int len = object->length();
3330  DCHECK(elements_to_trim < len);
3331 
3332  // Calculate location of new array end.
3333  Address new_end = object->address() + object->Size() - bytes_to_trim;
3334 
3335  // Technically in new space this write might be omitted (except for
3336  // debug mode which iterates through the heap), but to play safer
3337  // we still do it.
3338  // We do not create a filler for objects in large object space.
3339  // TODO(hpayer): We should shrink the large object page if the size
3340  // of the object changed significantly.
3341  if (!lo_space()->Contains(object)) {
3342  CreateFillerObjectAt(new_end, bytes_to_trim);
3343  }
3344 
3345  // Initialize header of the trimmed array. We are storing the new length
3346  // using release store after creating a filler for the left-over space to
3347  // avoid races with the sweeper thread.
3348  object->synchronized_set_length(len - elements_to_trim);
3349 
3350  // Maintain consistency of live bytes during incremental marking
3351  AdjustLiveBytes(object->address(), -bytes_to_trim, mode);
3352 
3353  // Notify the heap profiler of change in object layout. The array may not be
3354  // moved during GC, and size has to be adjusted nevertheless.
3356  if (profiler->is_tracking_allocations()) {
3357  profiler->UpdateObjectSizeEvent(object->address(), object->Size());
3358  }
3359 }
3360 
3361 
3363  ExternalArrayType array_type,
3364  void* external_pointer,
3365  PretenureFlag pretenure) {
3368  HeapObject* result;
3369  {
3371  if (!allocation.To(&result)) return allocation;
3372  }
3373 
3375  ExternalArray::cast(result)->set_length(length);
3376  ExternalArray::cast(result)->set_external_pointer(external_pointer);
3377  return result;
3378 }
3379 
3380 static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
3381  ElementsKind* element_kind) {
3382  switch (array_type) {
3383 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3384  case kExternal##Type##Array: \
3385  *element_size = size; \
3386  *element_kind = TYPE##_ELEMENTS; \
3387  return;
3388 
3390 #undef TYPED_ARRAY_CASE
3391 
3392  default:
3393  *element_size = 0; // Bogus
3394  *element_kind = UINT8_ELEMENTS; // Bogus
3395  UNREACHABLE();
3396  }
3397 }
3398 
3399 
3401  ExternalArrayType array_type,
3402  PretenureFlag pretenure) {
3403  int element_size;
3404  ElementsKind elements_kind;
3405  ForFixedTypedArray(array_type, &element_size, &elements_kind);
3406  int size = OBJECT_POINTER_ALIGN(length * element_size +
3408 #ifndef V8_HOST_ARCH_64_BIT
3409  if (array_type == kExternalFloat64Array) {
3410  size += kPointerSize;
3411  }
3412 #endif
3414 
3415  HeapObject* object;
3417  if (!allocation.To(&object)) return allocation;
3418 
3419  if (array_type == kExternalFloat64Array) {
3420  object = EnsureDoubleAligned(this, object, size);
3421  }
3422 
3423  object->set_map(MapForFixedTypedArray(array_type));
3424  FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
3425  elements->set_length(length);
3426  memset(elements->DataPtr(), 0, elements->DataSize());
3427  return elements;
3428 }
3429 
3430 
3431 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3432  DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3433  AllocationResult allocation =
3434  AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
3435 
3436  HeapObject* result;
3437  if (!allocation.To(&result)) return allocation;
3438 
3439  if (immovable) {
3440  Address address = result->address();
3441  // Code objects which should stay at a fixed address are allocated either
3442  // in the first page of code space (objects on the first page of each space
3443  // are never moved) or in large object space.
3444  if (!code_space_->FirstPage()->Contains(address) &&
3445  MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
3446  // Discard the first code allocation, which was on a page where it could
3447  // be moved.
3448  CreateFillerObjectAt(result->address(), object_size);
3449  allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3450  if (!allocation.To(&result)) return allocation;
3451  OnAllocationEvent(result, object_size);
3452  }
3453  }
3454 
3455  result->set_map_no_write_barrier(code_map());
3456  Code* code = Code::cast(result);
3458  isolate_->code_range()->contains(code->address()));
3459  code->set_gc_metadata(Smi::FromInt(0));
3460  code->set_ic_age(global_ic_age_);
3461  return code;
3462 }
3463 
3464 
3466  AllocationResult allocation;
3467  HeapObject* new_constant_pool;
3468  if (FLAG_enable_ool_constant_pool &&
3469  code->constant_pool() != empty_constant_pool_array()) {
3470  // Copy the constant pool, since edits to the copied code may modify
3471  // the constant pool.
3472  allocation = CopyConstantPoolArray(code->constant_pool());
3473  if (!allocation.To(&new_constant_pool)) return allocation;
3474  } else {
3475  new_constant_pool = empty_constant_pool_array();
3476  }
3477 
3478  HeapObject* result;
3479  // Allocate an object the same size as the code object.
3480  int obj_size = code->Size();
3481  allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
3482  if (!allocation.To(&result)) return allocation;
3483 
3484  // Copy code object.
3485  Address old_addr = code->address();
3486  Address new_addr = result->address();
3487  CopyBlock(new_addr, old_addr, obj_size);
3488  Code* new_code = Code::cast(result);
3489 
3490  // Update the constant pool.
3491  new_code->set_constant_pool(new_constant_pool);
3492 
3493  // Relocate the copy.
3495  isolate_->code_range()->contains(code->address()));
3496  new_code->Relocate(new_addr - old_addr);
3497  return new_code;
3498 }
3499 
3500 
3502  // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
3503  // do not risk leaving uninitialized Code object (and breaking the heap).
3504  ByteArray* reloc_info_array;
3505  {
3506  AllocationResult allocation =
3507  AllocateByteArray(reloc_info.length(), TENURED);
3508  if (!allocation.To(&reloc_info_array)) return allocation;
3509  }
3510  HeapObject* new_constant_pool;
3511  if (FLAG_enable_ool_constant_pool &&
3512  code->constant_pool() != empty_constant_pool_array()) {
3513  // Copy the constant pool, since edits to the copied code may modify
3514  // the constant pool.
3516  if (!allocation.To(&new_constant_pool)) return allocation;
3517  } else {
3518  new_constant_pool = empty_constant_pool_array();
3519  }
3520 
3521  int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3522 
3523  int new_obj_size = Code::SizeFor(new_body_size);
3524 
3525  Address old_addr = code->address();
3526 
3527  size_t relocation_offset =
3528  static_cast<size_t>(code->instruction_end() - old_addr);
3529 
3530  HeapObject* result;
3531  AllocationResult allocation =
3532  AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
3533  if (!allocation.To(&result)) return allocation;
3534 
3535  // Copy code object.
3536  Address new_addr = result->address();
3537 
3538  // Copy header and instructions.
3539  CopyBytes(new_addr, old_addr, relocation_offset);
3540 
3541  Code* new_code = Code::cast(result);
3542  new_code->set_relocation_info(reloc_info_array);
3543 
3544  // Update constant pool.
3545  new_code->set_constant_pool(new_constant_pool);
3546 
3547  // Copy patched rinfo.
3548  CopyBytes(new_code->relocation_start(), reloc_info.start(),
3549  static_cast<size_t>(reloc_info.length()));
3550 
3551  // Relocate the copy.
3553  isolate_->code_range()->contains(code->address()));
3554  new_code->Relocate(new_addr - old_addr);
3555 
3556 #ifdef VERIFY_HEAP
3557  if (FLAG_verify_heap) code->ObjectVerify();
3558 #endif
3559  return new_code;
3560 }
3561 
3562 
3564  AllocationSite* allocation_site) {
3565  memento->set_map_no_write_barrier(allocation_memento_map());
3566  DCHECK(allocation_site->map() == allocation_site_map());
3567  memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3568  if (FLAG_allocation_site_pretenuring) {
3569  allocation_site->IncrementMementoCreateCount();
3570  }
3571 }
3572 
3573 
3575  AllocationSite* allocation_site) {
3577  DCHECK(map->instance_type() != MAP_TYPE);
3578  // If allocation failures are disallowed, we may allocate in a different
3579  // space when new space is full and the object is not a large object.
3580  AllocationSpace retry_space =
3581  (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3582  int size = map->instance_size();
3583  if (allocation_site != NULL) {
3585  }
3586  HeapObject* result;
3587  AllocationResult allocation = AllocateRaw(size, space, retry_space);
3588  if (!allocation.To(&result)) return allocation;
3589  // No need for write barrier since object is white and map is in old space.
3590  result->set_map_no_write_barrier(map);
3591  if (allocation_site != NULL) {
3592  AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3593  reinterpret_cast<Address>(result) + map->instance_size());
3594  InitializeAllocationMemento(alloc_memento, allocation_site);
3595  }
3596  return result;
3597 }
3598 
3599 
3601  Map* map) {
3602  obj->set_properties(properties);
3603  obj->initialize_elements();
3604  // TODO(1240798): Initialize the object's body using valid initial values
3605  // according to the object's initial map. For example, if the map's
3606  // instance type is JS_ARRAY_TYPE, the length field should be initialized
3607  // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3608  // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3609  // verification code has to cope with (temporarily) invalid objects. See
3610  // for example, JSArray::JSArrayVerify).
3611  Object* filler;
3612  // We cannot always fill with one_pointer_filler_map because objects
3613  // created from API functions expect their internal fields to be initialized
3614  // with undefined_value.
3615  // Pre-allocated fields need to be initialized with undefined_value as well
3616  // so that object accesses before the constructor completes (e.g. in the
3617  // debugger) will not cause a crash.
3618  if (map->constructor()->IsJSFunction() &&
3619  JSFunction::cast(map->constructor())
3620  ->IsInobjectSlackTrackingInProgress()) {
3621  // We might want to shrink the object later.
3622  DCHECK(obj->GetInternalFieldCount() == 0);
3623  filler = Heap::one_pointer_filler_map();
3624  } else {
3625  filler = Heap::undefined_value();
3626  }
3627  obj->InitializeBody(map, Heap::undefined_value(), filler);
3628 }
3629 
3630 
3632  Map* map, PretenureFlag pretenure, bool allocate_properties,
3633  AllocationSite* allocation_site) {
3634  // JSFunctions should be allocated using AllocateFunction to be
3635  // properly initialized.
3636  DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
3637 
3638  // Both types of global objects should be allocated using
3639  // AllocateGlobalObject to be properly initialized.
3640  DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3641  DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3642 
3643  // Allocate the backing storage for the properties.
3644  FixedArray* properties;
3645  if (allocate_properties) {
3646  int prop_size = map->InitialPropertiesLength();
3647  DCHECK(prop_size >= 0);
3648  {
3649  AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
3650  if (!allocation.To(&properties)) return allocation;
3651  }
3652  } else {
3653  properties = empty_fixed_array();
3654  }
3655 
3656  // Allocate the JSObject.
3657  int size = map->instance_size();
3659  JSObject* js_obj;
3660  AllocationResult allocation = Allocate(map, space, allocation_site);
3661  if (!allocation.To(&js_obj)) return allocation;
3662 
3663  // Initialize the JSObject.
3664  InitializeJSObjectFromMap(js_obj, properties, map);
3665  DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() ||
3666  js_obj->HasFixedTypedArrayElements());
3667  return js_obj;
3668 }
3669 
3670 
3672  PretenureFlag pretenure,
3673  AllocationSite* allocation_site) {
3674  DCHECK(constructor->has_initial_map());
3675 
3676  // Allocate the object based on the constructors initial map.
3678  constructor->initial_map(), pretenure, true, allocation_site);
3679 #ifdef DEBUG
3680  // Make sure result is NOT a global object if valid.
3681  HeapObject* obj;
3682  DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject());
3683 #endif
3684  return allocation;
3685 }
3686 
3687 
3689  // Never used to copy functions. If functions need to be copied we
3690  // have to be careful to clear the literals array.
3691  SLOW_DCHECK(!source->IsJSFunction());
3692 
3693  // Make the clone.
3694  Map* map = source->map();
3695  int object_size = map->instance_size();
3696  HeapObject* clone;
3697 
3698  DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3699 
3701 
3702  // If we're forced to always allocate, we use the general allocation
3703  // functions which may leave us with an object in old space.
3704  if (always_allocate()) {
3705  {
3706  AllocationResult allocation =
3707  AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3708  if (!allocation.To(&clone)) return allocation;
3709  }
3710  Address clone_address = clone->address();
3711  CopyBlock(clone_address, source->address(), object_size);
3712  // Update write barrier for all fields that lie beyond the header.
3713  RecordWrites(clone_address, JSObject::kHeaderSize,
3714  (object_size - JSObject::kHeaderSize) / kPointerSize);
3715  } else {
3716  wb_mode = SKIP_WRITE_BARRIER;
3717 
3718  {
3719  int adjusted_object_size =
3720  site != NULL ? object_size + AllocationMemento::kSize : object_size;
3721  AllocationResult allocation =
3722  AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
3723  if (!allocation.To(&clone)) return allocation;
3724  }
3725  SLOW_DCHECK(InNewSpace(clone));
3726  // Since we know the clone is allocated in new space, we can copy
3727  // the contents without worrying about updating the write barrier.
3728  CopyBlock(clone->address(), source->address(), object_size);
3729 
3730  if (site != NULL) {
3731  AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3732  reinterpret_cast<Address>(clone) + object_size);
3733  InitializeAllocationMemento(alloc_memento, site);
3734  }
3735  }
3736 
3737  SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
3738  source->GetElementsKind());
3739  FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3740  FixedArray* properties = FixedArray::cast(source->properties());
3741  // Update elements if necessary.
3742  if (elements->length() > 0) {
3743  FixedArrayBase* elem;
3744  {
3745  AllocationResult allocation;
3746  if (elements->map() == fixed_cow_array_map()) {
3747  allocation = FixedArray::cast(elements);
3748  } else if (source->HasFastDoubleElements()) {
3749  allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3750  } else {
3751  allocation = CopyFixedArray(FixedArray::cast(elements));
3752  }
3753  if (!allocation.To(&elem)) return allocation;
3754  }
3755  JSObject::cast(clone)->set_elements(elem, wb_mode);
3756  }
3757  // Update properties if necessary.
3758  if (properties->length() > 0) {
3759  FixedArray* prop;
3760  {
3761  AllocationResult allocation = CopyFixedArray(properties);
3762  if (!allocation.To(&prop)) return allocation;
3763  }
3764  JSObject::cast(clone)->set_properties(prop, wb_mode);
3765  }
3766  // Return the new clone.
3767  return clone;
3768 }
3769 
3770 
3771 static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
3772  int len) {
3773  // Only works for one byte strings.
3774  DCHECK(vector.length() == len);
3775  MemCopy(chars, vector.start(), len);
3776 }
3777 
3778 static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
3779  int len) {
3780  const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
3781  unsigned stream_length = vector.length();
3782  while (stream_length != 0) {
3783  unsigned consumed = 0;
3784  uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
3786  DCHECK(consumed <= stream_length);
3787  stream_length -= consumed;
3788  stream += consumed;
3790  len -= 2;
3791  if (len < 0) break;
3792  *chars++ = unibrow::Utf16::LeadSurrogate(c);
3793  *chars++ = unibrow::Utf16::TrailSurrogate(c);
3794  } else {
3795  len -= 1;
3796  if (len < 0) break;
3797  *chars++ = c;
3798  }
3799  }
3800  DCHECK(stream_length == 0);
3801  DCHECK(len == 0);
3802 }
3803 
3804 
3805 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
3806  DCHECK(s->length() == len);
3807  String::WriteToFlat(s, chars, 0, len);
3808 }
3809 
3810 
3811 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
3812  DCHECK(s->length() == len);
3813  String::WriteToFlat(s, chars, 0, len);
3814 }
3815 
3816 
3817 template <bool is_one_byte, typename T>
3819  uint32_t hash_field) {
3820  DCHECK(chars >= 0);
3821  // Compute map and object size.
3822  int size;
3823  Map* map;
3824 
3825  DCHECK_LE(0, chars);
3826  DCHECK_GE(String::kMaxLength, chars);
3827  if (is_one_byte) {
3828  map = one_byte_internalized_string_map();
3830  } else {
3831  map = internalized_string_map();
3833  }
3835 
3836  // Allocate string.
3837  HeapObject* result;
3838  {
3840  if (!allocation.To(&result)) return allocation;
3841  }
3842 
3843  result->set_map_no_write_barrier(map);
3844  // Set length and hash fields of the allocated string.
3845  String* answer = String::cast(result);
3846  answer->set_length(chars);
3847  answer->set_hash_field(hash_field);
3848 
3849  DCHECK_EQ(size, answer->Size());
3850 
3851  if (is_one_byte) {
3852  WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
3853  } else {
3854  WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
3855  }
3856  return answer;
3857 }
3858 
3859 
3860 // Need explicit instantiations.
3861 template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
3862  int,
3863  uint32_t);
3864 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
3865  int,
3866  uint32_t);
3867 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3869 
3870 
3872  PretenureFlag pretenure) {
3873  DCHECK_LE(0, length);
3874  DCHECK_GE(String::kMaxLength, length);
3875  int size = SeqOneByteString::SizeFor(length);
3878 
3879  HeapObject* result;
3880  {
3882  if (!allocation.To(&result)) return allocation;
3883  }
3884 
3885  // Partially initialize the object.
3886  result->set_map_no_write_barrier(one_byte_string_map());
3887  String::cast(result)->set_length(length);
3888  String::cast(result)->set_hash_field(String::kEmptyHashField);
3889  DCHECK_EQ(size, HeapObject::cast(result)->Size());
3890 
3891  return result;
3892 }
3893 
3894 
3896  PretenureFlag pretenure) {
3897  DCHECK_LE(0, length);
3898  DCHECK_GE(String::kMaxLength, length);
3899  int size = SeqTwoByteString::SizeFor(length);
3902 
3903  HeapObject* result;
3904  {
3906  if (!allocation.To(&result)) return allocation;
3907  }
3908 
3909  // Partially initialize the object.
3910  result->set_map_no_write_barrier(string_map());
3911  String::cast(result)->set_length(length);
3912  String::cast(result)->set_hash_field(String::kEmptyHashField);
3913  DCHECK_EQ(size, HeapObject::cast(result)->Size());
3914  return result;
3915 }
3916 
3917 
3919  int size = FixedArray::SizeFor(0);
3920  HeapObject* result;
3921  {
3922  AllocationResult allocation =
3924  if (!allocation.To(&result)) return allocation;
3925  }
3926  // Initialize the object.
3927  result->set_map_no_write_barrier(fixed_array_map());
3928  FixedArray::cast(result)->set_length(0);
3929  return result;
3930 }
3931 
3932 
3934  ExternalArrayType array_type) {
3935  return AllocateExternalArray(0, array_type, NULL, TENURED);
3936 }
3937 
3938 
3940  if (!InNewSpace(src)) {
3941  return src;
3942  }
3943 
3944  int len = src->length();
3945  HeapObject* obj;
3946  {
3947  AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
3948  if (!allocation.To(&obj)) return allocation;
3949  }
3950  obj->set_map_no_write_barrier(fixed_array_map());
3951  FixedArray* result = FixedArray::cast(obj);
3952  result->set_length(len);
3953 
3954  // Copy the content
3955  DisallowHeapAllocation no_gc;
3956  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3957  for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3958 
3959  // TODO(mvstanton): The map is set twice because of protection against calling
3960  // set() on a COW FixedArray. Issue v8:3221 created to track this, and
3961  // we might then be able to remove this whole method.
3962  HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
3963  return result;
3964 }
3965 
3966 
3968  ExternalArrayType array_type) {
3969  return AllocateFixedTypedArray(0, array_type, TENURED);
3970 }
3971 
3972 
3974  int len = src->length();
3975  HeapObject* obj;
3976  {
3978  if (!allocation.To(&obj)) return allocation;
3979  }
3980  if (InNewSpace(obj)) {
3982  CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
3984  return obj;
3985  }
3987  FixedArray* result = FixedArray::cast(obj);
3988  result->set_length(len);
3989 
3990  // Copy the content
3991  DisallowHeapAllocation no_gc;
3992  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3993  for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3994  return result;
3995 }
3996 
3997 
3999  Map* map) {
4000  int len = src->length();
4001  HeapObject* obj;
4002  {
4004  if (!allocation.To(&obj)) return allocation;
4005  }
4010  return obj;
4011 }
4012 
4013 
4015  Map* map) {
4016  HeapObject* obj;
4017  if (src->is_extended_layout()) {
4018  ConstantPoolArray::NumberOfEntries small(src,
4020  ConstantPoolArray::NumberOfEntries extended(
4022  AllocationResult allocation =
4023  AllocateExtendedConstantPoolArray(small, extended);
4024  if (!allocation.To(&obj)) return allocation;
4025  } else {
4026  ConstantPoolArray::NumberOfEntries small(src,
4028  AllocationResult allocation = AllocateConstantPoolArray(small);
4029  if (!allocation.To(&obj)) return allocation;
4030  }
4035  return obj;
4036 }
4037 
4038 
4040  PretenureFlag pretenure) {
4041  if (length < 0 || length > FixedArray::kMaxLength) {
4042  v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4043  }
4044  int size = FixedArray::SizeFor(length);
4046 
4048 }
4049 
4050 
4052  PretenureFlag pretenure,
4053  Object* filler) {
4054  DCHECK(length >= 0);
4055  DCHECK(empty_fixed_array()->IsFixedArray());
4056  if (length == 0) return empty_fixed_array();
4057 
4058  DCHECK(!InNewSpace(filler));
4059  HeapObject* result;
4060  {
4061  AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
4062  if (!allocation.To(&result)) return allocation;
4063  }
4064 
4065  result->set_map_no_write_barrier(fixed_array_map());
4066  FixedArray* array = FixedArray::cast(result);
4067  array->set_length(length);
4068  MemsetPointer(array->data_start(), filler, length);
4069  return array;
4070 }
4071 
4072 
4074  return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
4075 }
4076 
4077 
4079  if (length == 0) return empty_fixed_array();
4080 
4081  HeapObject* obj;
4082  {
4083  AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
4084  if (!allocation.To(&obj)) return allocation;
4085  }
4086 
4087  obj->set_map_no_write_barrier(fixed_array_map());
4088  FixedArray::cast(obj)->set_length(length);
4089  return obj;
4090 }
4091 
4092 
4094  int length, PretenureFlag pretenure) {
4095  if (length == 0) return empty_fixed_array();
4096 
4097  HeapObject* elements;
4098  AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
4099  if (!allocation.To(&elements)) return allocation;
4100 
4101  elements->set_map_no_write_barrier(fixed_double_array_map());
4102  FixedDoubleArray::cast(elements)->set_length(length);
4103  return elements;
4104 }
4105 
4106 
4108  PretenureFlag pretenure) {
4109  if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4110  v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4111  }
4112  int size = FixedDoubleArray::SizeFor(length);
4113 #ifndef V8_HOST_ARCH_64_BIT
4114  size += kPointerSize;
4115 #endif
4117 
4118  HeapObject* object;
4119  {
4121  if (!allocation.To(&object)) return allocation;
4122  }
4123 
4124  return EnsureDoubleAligned(this, object, size);
4125 }
4126 
4127 
4129  const ConstantPoolArray::NumberOfEntries& small) {
4130  CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4131  int size = ConstantPoolArray::SizeFor(small);
4132 #ifndef V8_HOST_ARCH_64_BIT
4133  size += kPointerSize;
4134 #endif
4136 
4137  HeapObject* object;
4138  {
4140  if (!allocation.To(&object)) return allocation;
4141  }
4142  object = EnsureDoubleAligned(this, object, size);
4143  object->set_map_no_write_barrier(constant_pool_array_map());
4144 
4145  ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4146  constant_pool->Init(small);
4147  constant_pool->ClearPtrEntries(isolate());
4148  return constant_pool;
4149 }
4150 
4151 
4153  const ConstantPoolArray::NumberOfEntries& small,
4154  const ConstantPoolArray::NumberOfEntries& extended) {
4155  CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4156  CHECK(extended.are_in_range(0, kMaxInt));
4157  int size = ConstantPoolArray::SizeForExtended(small, extended);
4158 #ifndef V8_HOST_ARCH_64_BIT
4159  size += kPointerSize;
4160 #endif
4162 
4163  HeapObject* object;
4164  {
4166  if (!allocation.To(&object)) return allocation;
4167  }
4168  object = EnsureDoubleAligned(this, object, size);
4169  object->set_map_no_write_barrier(constant_pool_array_map());
4170 
4171  ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4172  constant_pool->InitExtended(small, extended);
4173  constant_pool->ClearPtrEntries(isolate());
4174  return constant_pool;
4175 }
4176 
4177 
4179  ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
4180  int size = ConstantPoolArray::SizeFor(small);
4181  HeapObject* result;
4182  {
4183  AllocationResult allocation =
4185  if (!allocation.To(&result)) return allocation;
4186  }
4187  result->set_map_no_write_barrier(constant_pool_array_map());
4188  ConstantPoolArray::cast(result)->Init(small);
4189  return result;
4190 }
4191 
4192 
4194  // Statically ensure that it is safe to allocate symbols in paged spaces.
4196 
4197  HeapObject* result;
4198  AllocationResult allocation =
4200  if (!allocation.To(&result)) return allocation;
4201 
4202  result->set_map_no_write_barrier(symbol_map());
4203 
4204  // Generate a random hash value.
4205  int hash;
4206  int attempts = 0;
4207  do {
4208  hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
4209  attempts++;
4210  } while (hash == 0 && attempts < 30);
4211  if (hash == 0) hash = 1; // never return 0
4212 
4213  Symbol::cast(result)
4214  ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4215  Symbol::cast(result)->set_name(undefined_value());
4216  Symbol::cast(result)->set_flags(Smi::FromInt(0));
4217 
4218  DCHECK(!Symbol::cast(result)->is_private());
4219  return result;
4220 }
4221 
4222 
4224  Map* map;
4225  switch (type) {
4226 #define MAKE_CASE(NAME, Name, name) \
4227  case NAME##_TYPE: \
4228  map = name##_map(); \
4229  break;
4231 #undef MAKE_CASE
4232  default:
4233  UNREACHABLE();
4234  return exception();
4235  }
4236  int size = map->instance_size();
4238  Struct* result;
4239  {
4240  AllocationResult allocation = Allocate(map, space);
4241  if (!allocation.To(&result)) return allocation;
4242  }
4243  result->InitializeBody(size);
4244  return result;
4245 }
4246 
4247 
4249  // TODO(hpayer): This function is not correct. Allocation folding in old
4250  // space breaks the iterability.
4252 }
4253 
4254 
4256  DCHECK(AllowHeapAllocation::IsAllowed());
4257  if (!IsHeapIterable()) {
4258  CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4259  }
4260  if (mark_compact_collector()->sweeping_in_progress()) {
4262  }
4264 }
4265 
4266 
4267 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
4268  incremental_marking()->Step(step_size,
4270 
4271  if (incremental_marking()->IsComplete()) {
4272  bool uncommit = false;
4274  // No GC since the last full GC, the mutator is probably not active.
4276  uncommit = true;
4277  }
4279  "idle notification: finalize incremental");
4282  if (uncommit) {
4283  new_space_.Shrink();
4285  }
4286  }
4287 }
4288 
4289 
4291  return incremental_marking()->IsStopped() &&
4293 }
4294 
4295 
4296 bool Heap::IdleNotification(int idle_time_in_ms) {
4297  // If incremental marking is off, we do not perform idle notification.
4298  if (!FLAG_incremental_marking) return true;
4299  base::ElapsedTimer timer;
4300  timer.Start();
4301  isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
4302  idle_time_in_ms);
4303  HistogramTimerScope idle_notification_scope(
4304  isolate_->counters()->gc_idle_notification());
4305 
4306  GCIdleTimeHandler::HeapState heap_state;
4308  heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4310  // TODO(ulan): Start incremental marking only for large heaps.
4311  heap_state.can_start_incremental_marking =
4313  heap_state.sweeping_in_progress =
4316  static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
4317  heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>(
4319  heap_state.scavenge_speed_in_bytes_per_ms =
4320  static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
4322  heap_state.new_space_capacity = new_space_.Capacity();
4324  static_cast<size_t>(
4326 
4327  GCIdleTimeAction action =
4328  gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
4329 
4330  bool result = false;
4331  switch (action.type) {
4332  case DONE:
4333  result = true;
4334  break;
4336  if (incremental_marking()->IsStopped()) {
4338  }
4340  break;
4341  case DO_FULL_GC: {
4342  HistogramTimerScope scope(isolate_->counters()->gc_context());
4343  const char* message = contexts_disposed_
4344  ? "idle notification: contexts disposed"
4345  : "idle notification: finalize idle round";
4348  break;
4349  }
4350  case DO_SCAVENGE:
4351  CollectGarbage(NEW_SPACE, "idle notification: scavenge");
4352  break;
4353  case DO_FINALIZE_SWEEPING:
4355  break;
4356  case DO_NOTHING:
4357  break;
4358  }
4359 
4360  int actual_time_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
4361  if (actual_time_ms <= idle_time_in_ms) {
4362  if (action.type != DONE && action.type != DO_NOTHING) {
4363  isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
4364  idle_time_in_ms - actual_time_ms);
4365  }
4366  } else {
4367  isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
4368  actual_time_ms - idle_time_in_ms);
4369  }
4370 
4371  if (FLAG_trace_idle_notification) {
4372  PrintF("Idle notification: requested idle time %d ms, actual time %d ms [",
4373  idle_time_in_ms, actual_time_ms);
4374  action.Print();
4375  PrintF("]\n");
4376  }
4377 
4378  contexts_disposed_ = 0;
4379  return result;
4380 }
4381 
4382 
4383 #ifdef DEBUG
4384 
4385 void Heap::Print() {
4386  if (!HasBeenSetUp()) return;
4387  isolate()->PrintStack(stdout);
4388  AllSpaces spaces(this);
4389  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4390  space->Print();
4391  }
4392 }
4393 
4394 
4395 void Heap::ReportCodeStatistics(const char* title) {
4396  PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4397  PagedSpace::ResetCodeStatistics(isolate());
4398  // We do not look for code in new space, map space, or old space. If code
4399  // somehow ends up in those spaces, we would miss it here.
4400  code_space_->CollectCodeStatistics();
4401  lo_space_->CollectCodeStatistics();
4402  PagedSpace::ReportCodeStatistics(isolate());
4403 }
4404 
4405 
4406 // This function expects that NewSpace's allocated objects histogram is
4407 // populated (via a call to CollectStatistics or else as a side effect of a
4408 // just-completed scavenge collection).
4409 void Heap::ReportHeapStatistics(const char* title) {
4410  USE(title);
4411  PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
4412  gc_count_);
4413  PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4415 
4416  PrintF("\n");
4417  PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4418  isolate_->global_handles()->PrintStats();
4419  PrintF("\n");
4420 
4421  PrintF("Heap statistics : ");
4422  isolate_->memory_allocator()->ReportStatistics();
4423  PrintF("To space : ");
4425  PrintF("Old pointer space : ");
4426  old_pointer_space_->ReportStatistics();
4427  PrintF("Old data space : ");
4428  old_data_space_->ReportStatistics();
4429  PrintF("Code space : ");
4430  code_space_->ReportStatistics();
4431  PrintF("Map space : ");
4432  map_space_->ReportStatistics();
4433  PrintF("Cell space : ");
4434  cell_space_->ReportStatistics();
4435  PrintF("PropertyCell space : ");
4436  property_cell_space_->ReportStatistics();
4437  PrintF("Large object space : ");
4438  lo_space_->ReportStatistics();
4439  PrintF(">>>>>> ========================================= >>>>>>\n");
4440 }
4441 
4442 #endif // DEBUG
4443 
4444 bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
4445 
4446 
4448  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4449  return HasBeenSetUp() &&
4450  (new_space_.ToSpaceContains(addr) ||
4451  old_pointer_space_->Contains(addr) ||
4452  old_data_space_->Contains(addr) || code_space_->Contains(addr) ||
4453  map_space_->Contains(addr) || cell_space_->Contains(addr) ||
4454  property_cell_space_->Contains(addr) ||
4455  lo_space_->SlowContains(addr));
4456 }
4457 
4458 
4460  return InSpace(value->address(), space);
4461 }
4462 
4463 
4465  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4466  if (!HasBeenSetUp()) return false;
4467 
4468  switch (space) {
4469  case NEW_SPACE:
4470  return new_space_.ToSpaceContains(addr);
4471  case OLD_POINTER_SPACE:
4472  return old_pointer_space_->Contains(addr);
4473  case OLD_DATA_SPACE:
4474  return old_data_space_->Contains(addr);
4475  case CODE_SPACE:
4476  return code_space_->Contains(addr);
4477  case MAP_SPACE:
4478  return map_space_->Contains(addr);
4479  case CELL_SPACE:
4480  return cell_space_->Contains(addr);
4481  case PROPERTY_CELL_SPACE:
4482  return property_cell_space_->Contains(addr);
4483  case LO_SPACE:
4484  return lo_space_->SlowContains(addr);
4485  case INVALID_SPACE:
4486  break;
4487  }
4488  UNREACHABLE();
4489  return false;
4490 }
4491 
4492 
4493 #ifdef VERIFY_HEAP
4494 void Heap::Verify() {
4495  CHECK(HasBeenSetUp());
4496  HandleScope scope(isolate());
4497 
4498  store_buffer()->Verify();
4499 
4500  if (mark_compact_collector()->sweeping_in_progress()) {
4501  // We have to wait here for the sweeper threads to have an iterable heap.
4503  }
4504 
4505  VerifyPointersVisitor visitor;
4506  IterateRoots(&visitor, VISIT_ONLY_STRONG);
4507 
4508  VerifySmisVisitor smis_visitor;
4509  IterateSmiRoots(&smis_visitor);
4510 
4511  new_space_.Verify();
4512 
4513  old_pointer_space_->Verify(&visitor);
4514  map_space_->Verify(&visitor);
4515 
4516  VerifyPointersVisitor no_dirty_regions_visitor;
4517  old_data_space_->Verify(&no_dirty_regions_visitor);
4518  code_space_->Verify(&no_dirty_regions_visitor);
4519  cell_space_->Verify(&no_dirty_regions_visitor);
4520  property_cell_space_->Verify(&no_dirty_regions_visitor);
4521 
4522  lo_space_->Verify();
4523 }
4524 #endif
4525 
4526 
4528  NewSpacePageIterator it(new_space_.FromSpaceStart(),
4530  while (it.has_next()) {
4531  NewSpacePage* page = it.next();
4532  for (Address cursor = page->area_start(), limit = page->area_end();
4533  cursor < limit; cursor += kPointerSize) {
4535  }
4536  }
4537 }
4538 
4539 
4541  ObjectSlotCallback callback) {
4542  Address slot_address = start;
4543 
4544  // We are not collecting slots on new space objects during mutation
4545  // thus we have to scan for pointers to evacuation candidates when we
4546  // promote objects. But we should not record any slots in non-black
4547  // objects. Grey object's slots would be rescanned.
4548  // White object might not survive until the end of collection
4549  // it would be a violation of the invariant to record it's slots.
4550  bool record_slots = false;
4551  if (incremental_marking()->IsCompacting()) {
4552  MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
4553  record_slots = Marking::IsBlack(mark_bit);
4554  }
4555 
4556  while (slot_address < end) {
4557  Object** slot = reinterpret_cast<Object**>(slot_address);
4558  Object* object = *slot;
4559  // If the store buffer becomes overfull we mark pages as being exempt from
4560  // the store buffer. These pages are scanned to find pointers that point
4561  // to the new space. In that case we may hit newly promoted objects and
4562  // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4563  if (object->IsHeapObject()) {
4564  if (Heap::InFromSpace(object)) {
4565  callback(reinterpret_cast<HeapObject**>(slot),
4566  HeapObject::cast(object));
4567  Object* new_object = *slot;
4568  if (InNewSpace(new_object)) {
4569  SLOW_DCHECK(Heap::InToSpace(new_object));
4570  SLOW_DCHECK(new_object->IsHeapObject());
4572  reinterpret_cast<Address>(slot));
4573  }
4574  SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
4575  } else if (record_slots &&
4576  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
4577  mark_compact_collector()->RecordSlot(slot, slot, object);
4578  }
4579  }
4580  slot_address += kPointerSize;
4581  }
4582 }
4583 
4584 
4585 #ifdef DEBUG
4586 typedef bool (*CheckStoreBufferFilter)(Object** addr);
4587 
4588 
4589 bool IsAMapPointerAddress(Object** addr) {
4590  uintptr_t a = reinterpret_cast<uintptr_t>(addr);
4591  int mod = a % Map::kSize;
4592  return mod >= Map::kPointerFieldsBeginOffset &&
4594 }
4595 
4596 
4597 bool EverythingsAPointer(Object** addr) { return true; }
4598 
4599 
4600 static void CheckStoreBuffer(Heap* heap, Object** current, Object** limit,
4601  Object**** store_buffer_position,
4602  Object*** store_buffer_top,
4603  CheckStoreBufferFilter filter,
4604  Address special_garbage_start,
4605  Address special_garbage_end) {
4606  Map* free_space_map = heap->free_space_map();
4607  for (; current < limit; current++) {
4608  Object* o = *current;
4609  Address current_address = reinterpret_cast<Address>(current);
4610  // Skip free space.
4611  if (o == free_space_map) {
4612  Address current_address = reinterpret_cast<Address>(current);
4613  FreeSpace* free_space =
4614  FreeSpace::cast(HeapObject::FromAddress(current_address));
4615  int skip = free_space->Size();
4616  DCHECK(current_address + skip <= reinterpret_cast<Address>(limit));
4617  DCHECK(skip > 0);
4618  current_address += skip - kPointerSize;
4619  current = reinterpret_cast<Object**>(current_address);
4620  continue;
4621  }
4622  // Skip the current linear allocation space between top and limit which is
4623  // unmarked with the free space map, but can contain junk.
4624  if (current_address == special_garbage_start &&
4625  special_garbage_end != special_garbage_start) {
4626  current_address = special_garbage_end - kPointerSize;
4627  current = reinterpret_cast<Object**>(current_address);
4628  continue;
4629  }
4630  if (!(*filter)(current)) continue;
4631  DCHECK(current_address < special_garbage_start ||
4632  current_address >= special_garbage_end);
4633  DCHECK(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
4634  // We have to check that the pointer does not point into new space
4635  // without trying to cast it to a heap object since the hash field of
4636  // a string can contain values like 1 and 3 which are tagged null
4637  // pointers.
4638  if (!heap->InNewSpace(o)) continue;
4639  while (**store_buffer_position < current &&
4640  *store_buffer_position < store_buffer_top) {
4641  (*store_buffer_position)++;
4642  }
4643  if (**store_buffer_position != current ||
4644  *store_buffer_position == store_buffer_top) {
4645  Object** obj_start = current;
4646  while (!(*obj_start)->IsMap()) obj_start--;
4647  UNREACHABLE();
4648  }
4649  }
4650 }
4651 
4652 
4653 // Check that the store buffer contains all intergenerational pointers by
4654 // scanning a page and ensuring that all pointers to young space are in the
4655 // store buffer.
4656 void Heap::OldPointerSpaceCheckStoreBuffer() {
4657  OldSpace* space = old_pointer_space();
4658  PageIterator pages(space);
4659 
4660  store_buffer()->SortUniq();
4661 
4662  while (pages.has_next()) {
4663  Page* page = pages.next();
4664  Object** current = reinterpret_cast<Object**>(page->area_start());
4665 
4666  Address end = page->area_end();
4667 
4668  Object*** store_buffer_position = store_buffer()->Start();
4669  Object*** store_buffer_top = store_buffer()->Top();
4670 
4671  Object** limit = reinterpret_cast<Object**>(end);
4672  CheckStoreBuffer(this, current, limit, &store_buffer_position,
4673  store_buffer_top, &EverythingsAPointer, space->top(),
4674  space->limit());
4675  }
4676 }
4677 
4678 
4679 void Heap::MapSpaceCheckStoreBuffer() {
4680  MapSpace* space = map_space();
4681  PageIterator pages(space);
4682 
4683  store_buffer()->SortUniq();
4684 
4685  while (pages.has_next()) {
4686  Page* page = pages.next();
4687  Object** current = reinterpret_cast<Object**>(page->area_start());
4688 
4689  Address end = page->area_end();
4690 
4691  Object*** store_buffer_position = store_buffer()->Start();
4692  Object*** store_buffer_top = store_buffer()->Top();
4693 
4694  Object** limit = reinterpret_cast<Object**>(end);
4695  CheckStoreBuffer(this, current, limit, &store_buffer_position,
4696  store_buffer_top, &IsAMapPointerAddress, space->top(),
4697  space->limit());
4698  }
4699 }
4700 
4701 
4702 void Heap::LargeObjectSpaceCheckStoreBuffer() {
4703  LargeObjectIterator it(lo_space());
4704  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
4705  // We only have code, sequential strings, or fixed arrays in large
4706  // object space, and only fixed arrays can possibly contain pointers to
4707  // the young generation.
4708  if (object->IsFixedArray()) {
4709  Object*** store_buffer_position = store_buffer()->Start();
4710  Object*** store_buffer_top = store_buffer()->Top();
4711  Object** current = reinterpret_cast<Object**>(object->address());
4712  Object** limit =
4713  reinterpret_cast<Object**>(object->address() + object->Size());
4714  CheckStoreBuffer(this, current, limit, &store_buffer_position,
4715  store_buffer_top, &EverythingsAPointer, NULL, NULL);
4716  }
4717  }
4718 }
4719 #endif
4720 
4721 
4724  IterateWeakRoots(v, mode);
4725 }
4726 
4727 
4729  v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4730  v->Synchronize(VisitorSynchronization::kStringTable);
4732  // Scavenge collections have special processing for this.
4734  }
4735  v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4736 }
4737 
4738 
4740  // Acquire execution access since we are going to read stack limit values.
4741  ExecutionAccess access(isolate());
4742  v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
4743  v->Synchronize(VisitorSynchronization::kSmiRootList);
4744 }
4745 
4746 
4748  v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
4749  v->Synchronize(VisitorSynchronization::kStrongRootList);
4750 
4751  v->VisitPointer(bit_cast<Object**>(&hidden_string_));
4752  v->Synchronize(VisitorSynchronization::kInternalizedString);
4753 
4754  isolate_->bootstrapper()->Iterate(v);
4755  v->Synchronize(VisitorSynchronization::kBootstrapper);
4756  isolate_->Iterate(v);
4757  v->Synchronize(VisitorSynchronization::kTop);
4758  Relocatable::Iterate(isolate_, v);
4759  v->Synchronize(VisitorSynchronization::kRelocatable);
4760 
4761  if (isolate_->deoptimizer_data() != NULL) {
4763  }
4764  v->Synchronize(VisitorSynchronization::kDebug);
4766  v->Synchronize(VisitorSynchronization::kCompilationCache);
4767 
4768  // Iterate over local handles in handle scopes.
4771  v->Synchronize(VisitorSynchronization::kHandleScope);
4772 
4773  // Iterate over the builtin code objects and code stubs in the
4774  // heap. Note that it is not necessary to iterate over code objects
4775  // on scavenge collections.
4776  if (mode != VISIT_ALL_IN_SCAVENGE) {
4778  }
4779  v->Synchronize(VisitorSynchronization::kBuiltins);
4780 
4781  // Iterate over global handles.
4782  switch (mode) {
4783  case VISIT_ONLY_STRONG:
4785  break;
4786  case VISIT_ALL_IN_SCAVENGE:
4788  break;
4790  case VISIT_ALL:
4792  break;
4793  }
4794  v->Synchronize(VisitorSynchronization::kGlobalHandles);
4795 
4796  // Iterate over eternal handles.
4797  if (mode == VISIT_ALL_IN_SCAVENGE) {
4799  } else {
4801  }
4802  v->Synchronize(VisitorSynchronization::kEternalHandles);
4803 
4804  // Iterate over pointers being held by inactive threads.
4806  v->Synchronize(VisitorSynchronization::kThreadManager);
4807 
4808  // Iterate over the pointers the Serialization/Deserialization code is
4809  // holding.
4810  // During garbage collection this keeps the partial snapshot cache alive.
4811  // During deserialization of the startup snapshot this creates the partial
4812  // snapshot cache and deserializes the objects it refers to. During
4813  // serialization this does nothing, since the partial snapshot cache is
4814  // empty. However the next thing we do is create the partial snapshot,
4815  // filling up the partial snapshot cache with objects it needs as we go.
4817  // We don't do a v->Synchronize call here, because in debug mode that will
4818  // output a flag to the snapshot. However at this point the serializer and
4819  // deserializer are deliberately a little unsynchronized (see above) so the
4820  // checking of the sync flag in the snapshot would fail.
4821 }
4822 
4823 
4824 // TODO(1236194): Since the heap size is configurable on the command line
4825 // and through the API, we should gracefully handle the case that the heap
4826 // size is not big enough to fit all the initial objects.
4827 bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
4828  int max_executable_size, size_t code_range_size) {
4829  if (HasBeenSetUp()) return false;
4830 
4831  // Overwrite default configuration.
4832  if (max_semi_space_size > 0) {
4833  max_semi_space_size_ = max_semi_space_size * MB;
4834  }
4835  if (max_old_space_size > 0) {
4836  max_old_generation_size_ = max_old_space_size * MB;
4837  }
4838  if (max_executable_size > 0) {
4839  max_executable_size_ = max_executable_size * MB;
4840  }
4841 
4842  // If max space size flags are specified overwrite the configuration.
4843  if (FLAG_max_semi_space_size > 0) {
4844  max_semi_space_size_ = FLAG_max_semi_space_size * MB;
4845  }
4846  if (FLAG_max_old_space_size > 0) {
4847  max_old_generation_size_ = FLAG_max_old_space_size * MB;
4848  }
4849  if (FLAG_max_executable_size > 0) {
4850  max_executable_size_ = FLAG_max_executable_size * MB;
4851  }
4852 
4853  if (FLAG_stress_compaction) {
4854  // This will cause more frequent GCs when stressing.
4856  }
4857 
4859  // If we are using a snapshot we always reserve the default amount
4860  // of memory for each semispace because code in the snapshot has
4861  // write-barrier code that relies on the size and alignment of new
4862  // space. We therefore cannot use a larger max semispace size
4863  // than the default reserved semispace size.
4866  if (FLAG_trace_gc) {
4867  PrintPID("Max semi-space size cannot be more than %d kbytes\n",
4868  reserved_semispace_size_ >> 10);
4869  }
4870  }
4871  } else {
4872  // If we are not using snapshots we reserve space for the actual
4873  // max semispace size.
4875  }
4876 
4877  // The max executable size must be less than or equal to the max old
4878  // generation size.
4881  }
4882 
4883  // The new space size must be a power of two to support single-bit testing
4884  // for containment.
4889 
4890  if (FLAG_min_semi_space_size > 0) {
4891  int initial_semispace_size = FLAG_min_semi_space_size * MB;
4892  if (initial_semispace_size > max_semi_space_size_) {
4894  if (FLAG_trace_gc) {
4895  PrintPID(
4896  "Min semi-space size cannot be more than the maximum"
4897  "semi-space size of %d MB\n",
4899  }
4900  } else {
4901  initial_semispace_size_ = initial_semispace_size;
4902  }
4903  }
4904 
4906 
4907  // The old generation is paged and needs at least one page for each space.
4908  int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
4910  Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
4912 
4913  // We rely on being able to allocate new arrays in paged spaces.
4915  (JSArray::kSize +
4918 
4919  code_range_size_ = code_range_size * MB;
4920 
4921  configured_ = true;
4922  return true;
4923 }
4924 
4925 
4926 bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
4927 
4928 
4929 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
4932  *stats->new_space_size = new_space_.SizeAsInt();
4933  *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
4941  *stats->map_space_capacity = map_space_->Capacity();
4946  *stats->lo_space_size = lo_space_->Size();
4947  isolate_->global_handles()->RecordStats(stats);
4949  *stats->memory_allocator_capacity =
4950  isolate()->memory_allocator()->Size() +
4952  *stats->os_error = base::OS::GetLastError();
4954  if (take_snapshot) {
4955  HeapIterator iterator(this);
4956  for (HeapObject* obj = iterator.next(); obj != NULL;
4957  obj = iterator.next()) {
4958  InstanceType type = obj->map()->instance_type();
4959  DCHECK(0 <= type && type <= LAST_TYPE);
4960  stats->objects_per_type[type]++;
4961  stats->size_per_type[type] += obj->Size();
4962  }
4963  }
4964 }
4965 
4966 
4968  return old_pointer_space_->SizeOfObjects() +
4972 }
4973 
4974 
4978  return 0;
4981 }
4982 
4983 
4984 intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
4985  int freed_global_handles) {
4986  const int kMaxHandles = 1000;
4987  const int kMinHandles = 100;
4988  double min_factor = 1.1;
4989  double max_factor = 4;
4990  // We set the old generation growing factor to 2 to grow the heap slower on
4991  // memory-constrained devices.
4993  max_factor = 2;
4994  }
4995  // If there are many freed global handles, then the next full GC will
4996  // likely collect a lot of garbage. Choose the heap growing factor
4997  // depending on freed global handles.
4998  // TODO(ulan, hpayer): Take into account mutator utilization.
4999  double factor;
5000  if (freed_global_handles <= kMinHandles) {
5001  factor = max_factor;
5002  } else if (freed_global_handles >= kMaxHandles) {
5003  factor = min_factor;
5004  } else {
5005  // Compute factor using linear interpolation between points
5006  // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
5007  factor = max_factor -
5008  (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
5009  (kMaxHandles - kMinHandles);
5010  }
5011 
5012  if (FLAG_stress_compaction ||
5014  factor = min_factor;
5015  }
5016 
5017  intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
5018  limit = Max(limit, kMinimumOldGenerationAllocationLimit);
5019  limit += new_space_.Capacity();
5020  intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
5021  return Min(limit, halfway_to_the_max);
5022 }
5023 
5024 
5026  if (!inline_allocation_disabled_) return;
5028 
5029  // Update inline allocation limit for new space.
5031 }
5032 
5033 
5035  if (inline_allocation_disabled_) return;
5037 
5038  // Update inline allocation limit for new space.
5040 
5041  // Update inline allocation limit for old spaces.
5042  PagedSpaces spaces(this);
5043  for (PagedSpace* space = spaces.next(); space != NULL;
5044  space = spaces.next()) {
5045  space->EmptyAllocationInfo();
5046  }
5047 }
5048 
5049 
5050 V8_DECLARE_ONCE(initialize_gc_once);
5051 
5052 static void InitializeGCOnce() {
5056 }
5057 
5058 
5059 bool Heap::SetUp() {
5060 #ifdef DEBUG
5061  allocation_timeout_ = FLAG_gc_interval;
5062 #endif
5063 
5064  // Initialize heap spaces and initial maps and objects. Whenever something
5065  // goes wrong, just return false. The caller should check the results and
5066  // call Heap::TearDown() to release allocated memory.
5067  //
5068  // If the heap is not yet configured (e.g. through the API), configure it.
5069  // Configuration is based on the flags new-space-size (really the semispace
5070  // size) and old-space-size if set or the initial values of semispace_size_
5071  // and old_generation_size_ otherwise.
5072  if (!configured_) {
5073  if (!ConfigureHeapDefault()) return false;
5074  }
5075 
5076  base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5077 
5078  MarkMapPointersAsEncoded(false);
5079 
5080  // Set up memory allocator.
5082  return false;
5083 
5084  // Set up new space.
5086  return false;
5087  }
5089 
5090  // Initialize old pointer space.
5093  if (old_pointer_space_ == NULL) return false;
5094  if (!old_pointer_space_->SetUp()) return false;
5095 
5096  // Initialize old data space.
5098  NOT_EXECUTABLE);
5099  if (old_data_space_ == NULL) return false;
5100  if (!old_data_space_->SetUp()) return false;
5101 
5102  if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
5103 
5104  // Initialize the code space, set its maximum capacity to the old
5105  // generation size. It needs executable memory.
5106  code_space_ =
5108  if (code_space_ == NULL) return false;
5109  if (!code_space_->SetUp()) return false;
5110 
5111  // Initialize map space.
5113  if (map_space_ == NULL) return false;
5114  if (!map_space_->SetUp()) return false;
5115 
5116  // Initialize simple cell space.
5118  if (cell_space_ == NULL) return false;
5119  if (!cell_space_->SetUp()) return false;
5120 
5121  // Initialize global property cell space.
5124  if (property_cell_space_ == NULL) return false;
5125  if (!property_cell_space_->SetUp()) return false;
5126 
5127  // The large object code space may contain code or data. We set the memory
5128  // to be non-executable here for safety, but this means we need to enable it
5129  // explicitly when allocating large code objects.
5131  if (lo_space_ == NULL) return false;
5132  if (!lo_space_->SetUp()) return false;
5133 
5134  // Set up the seed that is used to randomize the string hash function.
5135  DCHECK(hash_seed() == 0);
5136  if (FLAG_randomize_hashes) {
5137  if (FLAG_hash_seed == 0) {
5138  int rnd = isolate()->random_number_generator()->NextInt();
5139  set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5140  } else {
5141  set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5142  }
5143  }
5144 
5145  LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5146  LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5147 
5148  store_buffer()->SetUp();
5149 
5151 
5152  return true;
5153 }
5154 
5155 
5157  // Create initial maps.
5158  if (!CreateInitialMaps()) return false;
5159  CreateApiObjects();
5160 
5161  // Create initial objects
5163  CHECK_EQ(0, gc_count_);
5164 
5165  set_native_contexts_list(undefined_value());
5166  set_array_buffers_list(undefined_value());
5167  set_allocation_sites_list(undefined_value());
5168  weak_object_to_code_table_ = undefined_value();
5169  return true;
5170 }
5171 
5172 
5174  DCHECK(isolate_ != NULL);
5175  DCHECK(isolate_ == isolate());
5176  // On 64 bit machines, pointers are generally out of range of Smis. We write
5177  // something that looks like an out of range Smi to the GC.
5178 
5179  // Set up the special root array entries containing the stack limits.
5180  // These are actually addresses, but the tag makes the GC ignore it.
5181  roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
5182  (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5183  roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
5184  (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5185 }
5186 
5187 
5189 #ifdef VERIFY_HEAP
5190  if (FLAG_verify_heap) {
5191  Verify();
5192  }
5193 #endif
5194 
5196 
5197  if (FLAG_print_cumulative_gc_stat) {
5198  PrintF("\n");
5199  PrintF("gc_count=%d ", gc_count_);
5200  PrintF("mark_sweep_count=%d ", ms_count_);
5201  PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
5202  PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
5203  PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
5204  PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
5205  PrintF("total_marking_time=%.1f ", tracer_.cumulative_sweeping_duration());
5206  PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration());
5207  PrintF("\n\n");
5208  }
5209 
5210  if (FLAG_print_max_heap_committed) {
5211  PrintF("\n");
5212  PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
5214  PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
5216  PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
5218  PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5220  PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5222  PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
5224  PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
5226  PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
5228  PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
5230  PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
5232  PrintF("\n\n");
5233  }
5234 
5235  if (FLAG_verify_predictable) {
5237  }
5238 
5240 
5242 
5244 
5246 
5247  new_space_.TearDown();
5248 
5249  if (old_pointer_space_ != NULL) {
5251  delete old_pointer_space_;
5253  }
5254 
5255  if (old_data_space_ != NULL) {
5257  delete old_data_space_;
5259  }
5260 
5261  if (code_space_ != NULL) {
5262  code_space_->TearDown();
5263  delete code_space_;
5264  code_space_ = NULL;
5265  }
5266 
5267  if (map_space_ != NULL) {
5268  map_space_->TearDown();
5269  delete map_space_;
5270  map_space_ = NULL;
5271  }
5272 
5273  if (cell_space_ != NULL) {
5274  cell_space_->TearDown();
5275  delete cell_space_;
5276  cell_space_ = NULL;
5277  }
5278 
5279  if (property_cell_space_ != NULL) {
5281  delete property_cell_space_;
5283  }
5284 
5285  if (lo_space_ != NULL) {
5286  lo_space_->TearDown();
5287  delete lo_space_;
5288  lo_space_ = NULL;
5289  }
5290 
5291  store_buffer()->TearDown();
5293 
5295 }
5296 
5297 
5299  GCType gc_type, bool pass_isolate) {
5300  DCHECK(callback != NULL);
5301  GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
5302  DCHECK(!gc_prologue_callbacks_.Contains(pair));
5303  return gc_prologue_callbacks_.Add(pair);
5304 }
5305 
5306 
5308  DCHECK(callback != NULL);
5309  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5310  if (gc_prologue_callbacks_[i].callback == callback) {
5311  gc_prologue_callbacks_.Remove(i);
5312  return;
5313  }
5314  }
5315  UNREACHABLE();
5316 }
5317 
5318 
5320  GCType gc_type, bool pass_isolate) {
5321  DCHECK(callback != NULL);
5322  GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
5323  DCHECK(!gc_epilogue_callbacks_.Contains(pair));
5324  return gc_epilogue_callbacks_.Add(pair);
5325 }
5326 
5327 
5329  DCHECK(callback != NULL);
5330  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5331  if (gc_epilogue_callbacks_[i].callback == callback) {
5332  gc_epilogue_callbacks_.Remove(i);
5333  return;
5334  }
5335  }
5336  UNREACHABLE();
5337 }
5338 
5339 
5340 // TODO(ishell): Find a better place for this.
5342  Handle<DependentCode> dep) {
5343  DCHECK(!InNewSpace(*obj));
5344  DCHECK(!InNewSpace(*dep));
5345  // This handle scope keeps the table handle local to this function, which
5346  // allows us to safely skip write barriers in table update operations.
5347  HandleScope scope(isolate());
5348  Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_),
5349  isolate());
5350  table = WeakHashTable::Put(table, obj, dep);
5351 
5352  if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) {
5353  WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
5354  }
5356  DCHECK_EQ(*dep, table->Lookup(obj));
5357 }
5358 
5359 
5361  Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
5362  if (dep->IsDependentCode()) return DependentCode::cast(dep);
5363  return DependentCode::cast(empty_fixed_array());
5364 }
5365 
5366 
5368  if (!weak_object_to_code_table()->IsHashTable()) {
5371  TENURED));
5372  }
5373 }
5374 
5375 
5376 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
5377  v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
5378 }
5379 
5380 #ifdef DEBUG
5381 
5382 class PrintHandleVisitor : public ObjectVisitor {
5383  public:
5384  void VisitPointers(Object** start, Object** end) {
5385  for (Object** p = start; p < end; p++)
5386  PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
5387  reinterpret_cast<void*>(*p));
5388  }
5389 };
5390 
5391 
5392 void Heap::PrintHandles() {
5393  PrintF("Handles:\n");
5394  PrintHandleVisitor v;
5396 }
5397 
5398 #endif
5399 
5400 
5401 Space* AllSpaces::next() {
5402  switch (counter_++) {
5403  case NEW_SPACE:
5404  return heap_->new_space();
5405  case OLD_POINTER_SPACE:
5406  return heap_->old_pointer_space();
5407  case OLD_DATA_SPACE:
5408  return heap_->old_data_space();
5409  case CODE_SPACE:
5410  return heap_->code_space();
5411  case MAP_SPACE:
5412  return heap_->map_space();
5413  case CELL_SPACE:
5414  return heap_->cell_space();
5415  case PROPERTY_CELL_SPACE:
5416  return heap_->property_cell_space();
5417  case LO_SPACE:
5418  return heap_->lo_space();
5419  default:
5420  return NULL;
5421  }
5422 }
5423 
5424 
5425 PagedSpace* PagedSpaces::next() {
5426  switch (counter_++) {
5427  case OLD_POINTER_SPACE:
5428  return heap_->old_pointer_space();
5429  case OLD_DATA_SPACE:
5430  return heap_->old_data_space();
5431  case CODE_SPACE:
5432  return heap_->code_space();
5433  case MAP_SPACE:
5434  return heap_->map_space();
5435  case CELL_SPACE:
5436  return heap_->cell_space();
5437  case PROPERTY_CELL_SPACE:
5438  return heap_->property_cell_space();
5439  default:
5440  return NULL;
5441  }
5442 }
5443 
5444 
5445 OldSpace* OldSpaces::next() {
5446  switch (counter_++) {
5447  case OLD_POINTER_SPACE:
5448  return heap_->old_pointer_space();
5449  case OLD_DATA_SPACE:
5450  return heap_->old_data_space();
5451  case CODE_SPACE:
5452  return heap_->code_space();
5453  default:
5454  return NULL;
5455  }
5456 }
5457 
5458 
5460  : heap_(heap),
5461  current_space_(FIRST_SPACE),
5462  iterator_(NULL),
5463  size_func_(NULL) {}
5464 
5465 
5467  : heap_(heap),
5468  current_space_(FIRST_SPACE),
5469  iterator_(NULL),
5470  size_func_(size_func) {}
5471 
5472 
5474  // Delete active iterator if any.
5475  delete iterator_;
5476 }
5477 
5478 
5480  // Iterate until no more spaces.
5481  return current_space_ != LAST_SPACE;
5482 }
5483 
5484 
5486  if (iterator_ != NULL) {
5487  delete iterator_;
5488  iterator_ = NULL;
5489  // Move to the next space
5490  current_space_++;
5491  if (current_space_ > LAST_SPACE) {
5492  return NULL;
5493  }
5494  }
5495 
5496  // Return iterator for the new current space.
5497  return CreateIterator();
5498 }
5499 
5500 
5501 // Create an iterator for the space to iterate.
5503  DCHECK(iterator_ == NULL);
5504 
5505  switch (current_space_) {
5506  case NEW_SPACE:
5508  break;
5509  case OLD_POINTER_SPACE:
5510  iterator_ =
5512  break;
5513  case OLD_DATA_SPACE:
5515  break;
5516  case CODE_SPACE:
5518  break;
5519  case MAP_SPACE:
5521  break;
5522  case CELL_SPACE:
5524  break;
5525  case PROPERTY_CELL_SPACE:
5526  iterator_ =
5528  break;
5529  case LO_SPACE:
5531  break;
5532  }
5533 
5534  // Return the newly allocated iterator;
5535  DCHECK(iterator_ != NULL);
5536  return iterator_;
5537 }
5538 
5539 
5541  public:
5542  virtual ~HeapObjectsFilter() {}
5543  virtual bool SkipObject(HeapObject* object) = 0;
5544 };
5545 
5546 
5548  public:
5549  explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5550  MarkReachableObjects();
5551  }
5552 
5554  heap_->mark_compact_collector()->ClearMarkbits();
5555  }
5556 
5557  bool SkipObject(HeapObject* object) {
5558  MarkBit mark_bit = Marking::MarkBitFrom(object);
5559  return !mark_bit.Get();
5560  }
5561 
5562  private:
5564  public:
5565  MarkingVisitor() : marking_stack_(10) {}
5566 
5567  void VisitPointers(Object** start, Object** end) {
5568  for (Object** p = start; p < end; p++) {
5569  if (!(*p)->IsHeapObject()) continue;
5570  HeapObject* obj = HeapObject::cast(*p);
5571  MarkBit mark_bit = Marking::MarkBitFrom(obj);
5572  if (!mark_bit.Get()) {
5573  mark_bit.Set();
5574  marking_stack_.Add(obj);
5575  }
5576  }
5577  }
5578 
5580  while (!marking_stack_.is_empty()) {
5581  HeapObject* obj = marking_stack_.RemoveLast();
5582  obj->Iterate(this);
5583  }
5584  }
5585 
5586  private:
5588  };
5589 
5591  MarkingVisitor visitor;
5592  heap_->IterateRoots(&visitor, VISIT_ALL);
5593  visitor.TransitiveClosure();
5594  }
5595 
5598 };
5599 
5600 
5601 HeapIterator::HeapIterator(Heap* heap)
5602  : make_heap_iterable_helper_(heap),
5603  no_heap_allocation_(),
5604  heap_(heap),
5605  filtering_(HeapIterator::kNoFiltering),
5606  filter_(NULL) {
5607  Init();
5608 }
5609 
5610 
5611 HeapIterator::HeapIterator(Heap* heap,
5612  HeapIterator::HeapObjectsFiltering filtering)
5613  : make_heap_iterable_helper_(heap),
5614  no_heap_allocation_(),
5615  heap_(heap),
5616  filtering_(filtering),
5617  filter_(NULL) {
5618  Init();
5619 }
5620 
5621 
5622 HeapIterator::~HeapIterator() { Shutdown(); }
5623 
5624 
5625 void HeapIterator::Init() {
5626  // Start the iteration.
5627  space_iterator_ = new SpaceIterator(heap_);
5628  switch (filtering_) {
5629  case kFilterUnreachable:
5630  filter_ = new UnreachableObjectsFilter(heap_);
5631  break;
5632  default:
5633  break;
5634  }
5635  object_iterator_ = space_iterator_->next();
5636 }
5637 
5638 
5639 void HeapIterator::Shutdown() {
5640 #ifdef DEBUG
5641  // Assert that in filtering mode we have iterated through all
5642  // objects. Otherwise, heap will be left in an inconsistent state.
5643  if (filtering_ != kNoFiltering) {
5644  DCHECK(object_iterator_ == NULL);
5645  }
5646 #endif
5647  // Make sure the last iterator is deallocated.
5648  delete space_iterator_;
5649  space_iterator_ = NULL;
5650  object_iterator_ = NULL;
5651  delete filter_;
5652  filter_ = NULL;
5653 }
5654 
5655 
5656 HeapObject* HeapIterator::next() {
5657  if (filter_ == NULL) return NextObject();
5658 
5659  HeapObject* obj = NextObject();
5660  while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
5661  return obj;
5662 }
5663 
5664 
5665 HeapObject* HeapIterator::NextObject() {
5666  // No iterator means we are done.
5667  if (object_iterator_ == NULL) return NULL;
5668 
5669  if (HeapObject* obj = object_iterator_->next_object()) {
5670  // If the current iterator has more objects we are fine.
5671  return obj;
5672  } else {
5673  // Go though the spaces looking for one that has objects.
5674  while (space_iterator_->has_next()) {
5675  object_iterator_ = space_iterator_->next();
5676  if (HeapObject* obj = object_iterator_->next_object()) {
5677  return obj;
5678  }
5679  }
5680  }
5681  // Done with the last space.
5682  object_iterator_ = NULL;
5683  return NULL;
5684 }
5685 
5686 
5687 void HeapIterator::reset() {
5688  // Restart the iterator.
5689  Shutdown();
5690  Init();
5691 }
5692 
5693 
5694 #ifdef DEBUG
5695 
5696 Object* const PathTracer::kAnyGlobalObject = NULL;
5697 
5698 class PathTracer::MarkVisitor : public ObjectVisitor {
5699  public:
5700  explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5701  void VisitPointers(Object** start, Object** end) {
5702  // Scan all HeapObject pointers in [start, end)
5703  for (Object** p = start; !tracer_->found() && (p < end); p++) {
5704  if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
5705  }
5706  }
5707 
5708  private:
5709  PathTracer* tracer_;
5710 };
5711 
5712 
5713 class PathTracer::UnmarkVisitor : public ObjectVisitor {
5714  public:
5715  explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5716  void VisitPointers(Object** start, Object** end) {
5717  // Scan all HeapObject pointers in [start, end)
5718  for (Object** p = start; p < end; p++) {
5719  if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
5720  }
5721  }
5722 
5723  private:
5724  PathTracer* tracer_;
5725 };
5726 
5727 
5728 void PathTracer::VisitPointers(Object** start, Object** end) {
5729  bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5730  // Visit all HeapObject pointers in [start, end)
5731  for (Object** p = start; !done && (p < end); p++) {
5732  if ((*p)->IsHeapObject()) {
5733  TracePathFrom(p);
5734  done = ((what_to_find_ == FIND_FIRST) && found_target_);
5735  }
5736  }
5737 }
5738 
5739 
5740 void PathTracer::Reset() {
5741  found_target_ = false;
5742  object_stack_.Clear();
5743 }
5744 
5745 
5746 void PathTracer::TracePathFrom(Object** root) {
5747  DCHECK((search_target_ == kAnyGlobalObject) ||
5748  search_target_->IsHeapObject());
5749  found_target_in_trace_ = false;
5750  Reset();
5751 
5752  MarkVisitor mark_visitor(this);
5753  MarkRecursively(root, &mark_visitor);
5754 
5755  UnmarkVisitor unmark_visitor(this);
5756  UnmarkRecursively(root, &unmark_visitor);
5757 
5758  ProcessResults();
5759 }
5760 
5761 
5762 static bool SafeIsNativeContext(HeapObject* obj) {
5763  return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
5764 }
5765 
5766 
5767 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
5768  if (!(*p)->IsHeapObject()) return;
5769 
5770  HeapObject* obj = HeapObject::cast(*p);
5771 
5772  MapWord map_word = obj->map_word();
5773  if (!map_word.ToMap()->IsHeapObject()) return; // visited before
5774 
5775  if (found_target_in_trace_) return; // stop if target found
5776  object_stack_.Add(obj);
5777  if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5778  (obj == search_target_)) {
5779  found_target_in_trace_ = true;
5780  found_target_ = true;
5781  return;
5782  }
5783 
5784  bool is_native_context = SafeIsNativeContext(obj);
5785 
5786  // not visited yet
5787  Map* map = Map::cast(map_word.ToMap());
5788 
5789  MapWord marked_map_word =
5790  MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
5791  obj->set_map_word(marked_map_word);
5792 
5793  // Scan the object body.
5794  if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5795  // This is specialized to scan Context's properly.
5796  Object** start =
5797  reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
5798  Object** end =
5799  reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
5801  mark_visitor->VisitPointers(start, end);
5802  } else {
5803  obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
5804  }
5805 
5806  // Scan the map after the body because the body is a lot more interesting
5807  // when doing leak detection.
5808  MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
5809 
5810  if (!found_target_in_trace_) { // don't pop if found the target
5811  object_stack_.RemoveLast();
5812  }
5813 }
5814 
5815 
5816 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
5817  if (!(*p)->IsHeapObject()) return;
5818 
5819  HeapObject* obj = HeapObject::cast(*p);
5820 
5821  MapWord map_word = obj->map_word();
5822  if (map_word.ToMap()->IsHeapObject()) return; // unmarked already
5823 
5824  MapWord unmarked_map_word =
5825  MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
5826  obj->set_map_word(unmarked_map_word);
5827 
5828  Map* map = Map::cast(unmarked_map_word.ToMap());
5829 
5830  UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
5831 
5832  obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
5833 }
5834 
5835 
5836 void PathTracer::ProcessResults() {
5837  if (found_target_) {
5838  OFStream os(stdout);
5839  os << "=====================================\n"
5840  << "==== Path to object ====\n"
5841  << "=====================================\n\n";
5842 
5843  DCHECK(!object_stack_.is_empty());
5844  for (int i = 0; i < object_stack_.length(); i++) {
5845  if (i > 0) os << "\n |\n |\n V\n\n";
5846  object_stack_[i]->Print(os);
5847  }
5848  os << "=====================================\n";
5849  }
5850 }
5851 
5852 
5853 // Triggers a depth-first traversal of reachable objects from one
5854 // given root object and finds a path to a specific heap object and
5855 // prints it.
5856 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
5857  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5858  tracer.VisitPointer(&root);
5859 }
5860 
5861 
5862 // Triggers a depth-first traversal of reachable objects from roots
5863 // and finds a path to a specific heap object and prints it.
5864 void Heap::TracePathToObject(Object* target) {
5865  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5867 }
5868 
5869 
5870 // Triggers a depth-first traversal of reachable objects from roots
5871 // and finds a path to any global object and prints it. Useful for
5872 // determining the source for leaks of global objects.
5873 void Heap::TracePathToGlobal() {
5874  PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
5875  VISIT_ALL);
5877 }
5878 #endif
5879 
5880 
5882  double spent_in_mutator,
5883  double marking_time) {
5884  if (FLAG_print_cumulative_gc_stat) {
5885  total_gc_time_ms_ += duration;
5886  max_gc_pause_ = Max(max_gc_pause_, duration);
5888  min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
5889  } else if (FLAG_trace_gc_verbose) {
5890  total_gc_time_ms_ += duration;
5891  }
5892 
5893  marking_time_ += marking_time;
5894 }
5895 
5896 
5898  DisallowHeapAllocation no_gc;
5899  // Uses only lower 32 bits if pointers are larger.
5900  uintptr_t addr_hash =
5901  static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
5902  return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
5903 }
5904 
5905 
5907  DisallowHeapAllocation no_gc;
5908  int index = (Hash(map, name) & kHashMask);
5909  for (int i = 0; i < kEntriesPerBucket; i++) {
5910  Key& key = keys_[index + i];
5911  if ((key.map == *map) && key.name->Equals(*name)) {
5912  return field_offsets_[index + i];
5913  }
5914  }
5915  return kNotFound;
5916 }
5917 
5918 
5920  int field_offset) {
5921  DisallowHeapAllocation no_gc;
5922  if (!name->IsUniqueName()) {
5924  name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
5925  return;
5926  }
5927  }
5928  // This cache is cleared only between mark compact passes, so we expect the
5929  // cache to only contain old space names.
5930  DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
5931 
5932  int index = (Hash(map, name) & kHashMask);
5933  // After a GC there will be free slots, so we use them in order (this may
5934  // help to get the most frequently used one in position 0).
5935  for (int i = 0; i < kEntriesPerBucket; i++) {
5936  Key& key = keys_[index];
5937  Object* free_entry_indicator = NULL;
5938  if (key.map == free_entry_indicator) {
5939  key.map = *map;
5940  key.name = *name;
5941  field_offsets_[index + i] = field_offset;
5942  return;
5943  }
5944  }
5945  // No free entry found in this bucket, so we move them all down one and
5946  // put the new entry at position zero.
5947  for (int i = kEntriesPerBucket - 1; i > 0; i--) {
5948  Key& key = keys_[index + i];
5949  Key& key2 = keys_[index + i - 1];
5950  key = key2;
5951  field_offsets_[index + i] = field_offsets_[index + i - 1];
5952  }
5953 
5954  // Write the new first entry.
5955  Key& key = keys_[index];
5956  key.map = *map;
5957  key.name = *name;
5958  field_offsets_[index] = field_offset;
5959 }
5960 
5961 
5963  for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
5964 }
5965 
5966 
5968  for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
5969 }
5970 
5971 
5973  int last = 0;
5974  for (int i = 0; i < new_space_strings_.length(); ++i) {
5975  if (new_space_strings_[i] == heap_->the_hole_value()) {
5976  continue;
5977  }
5978  DCHECK(new_space_strings_[i]->IsExternalString());
5981  } else {
5983  }
5984  }
5985  new_space_strings_.Rewind(last);
5986  new_space_strings_.Trim();
5987 
5988  last = 0;
5989  for (int i = 0; i < old_space_strings_.length(); ++i) {
5990  if (old_space_strings_[i] == heap_->the_hole_value()) {
5991  continue;
5992  }
5993  DCHECK(old_space_strings_[i]->IsExternalString());
5996  }
5997  old_space_strings_.Rewind(last);
5998  old_space_strings_.Trim();
5999 #ifdef VERIFY_HEAP
6000  if (FLAG_verify_heap) {
6001  Verify();
6002  }
6003 #endif
6004 }
6005 
6006 
6008  for (int i = 0; i < new_space_strings_.length(); ++i) {
6009  heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6010  }
6011  new_space_strings_.Free();
6012  for (int i = 0; i < old_space_strings_.length(); ++i) {
6013  heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6014  }
6015  old_space_strings_.Free();
6016 }
6017 
6018 
6021  chunks_queued_for_free_ = chunk;
6022 }
6023 
6024 
6026  if (chunks_queued_for_free_ == NULL) return;
6027  MemoryChunk* next;
6028  MemoryChunk* chunk;
6029  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6030  next = chunk->next_chunk();
6032 
6033  if (chunk->owner()->identity() == LO_SPACE) {
6034  // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6035  // If FromAnyPointerAddress encounters a slot that belongs to a large
6036  // chunk queued for deletion it will fail to find the chunk because
6037  // it try to perform a search in the list of pages owned by of the large
6038  // object space and queued chunks were detached from that list.
6039  // To work around this we split large chunk into normal kPageSize aligned
6040  // pieces and initialize size, owner and flags field of every piece.
6041  // If FromAnyPointerAddress encounters a slot that belongs to one of
6042  // these smaller pieces it will treat it as a slot on a normal Page.
6043  Address chunk_end = chunk->address() + chunk->size();
6044  MemoryChunk* inner =
6046  MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
6047  while (inner <= inner_last) {
6048  // Size of a large chunk is always a multiple of
6049  // OS::AllocateAlignment() so there is always
6050  // enough space for a fake MemoryChunk header.
6051  Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
6052  // Guard against overflow.
6053  if (area_end < inner->address()) area_end = chunk_end;
6054  inner->SetArea(inner->address(), area_end);
6055  inner->set_size(Page::kPageSize);
6056  inner->set_owner(lo_space());
6058  inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize);
6059  }
6060  }
6061  }
6062  isolate_->heap()->store_buffer()->Compact();
6064  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6065  next = chunk->next_chunk();
6066  isolate_->memory_allocator()->Free(chunk);
6067  }
6069 }
6070 
6071 
6072 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6073  uintptr_t p = reinterpret_cast<uintptr_t>(page);
6074  // Tag the page pointer to make it findable in the dump file.
6075  if (compacted) {
6076  p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
6077  } else {
6078  p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
6079  }
6081  reinterpret_cast<Address>(p);
6084 }
6085 
6086 
6087 void Heap::ClearObjectStats(bool clear_last_time_stats) {
6088  memset(object_counts_, 0, sizeof(object_counts_));
6089  memset(object_sizes_, 0, sizeof(object_sizes_));
6090  if (clear_last_time_stats) {
6093  }
6094 }
6095 
6096 
6098 
6099 
6101  base::LockGuard<base::Mutex> lock_guard(
6103  Counters* counters = isolate()->counters();
6104 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6105  counters->count_of_##name()->Increment( \
6106  static_cast<int>(object_counts_[name])); \
6107  counters->count_of_##name()->Decrement( \
6108  static_cast<int>(object_counts_last_time_[name])); \
6109  counters->size_of_##name()->Increment( \
6110  static_cast<int>(object_sizes_[name])); \
6111  counters->size_of_##name()->Decrement( \
6112  static_cast<int>(object_sizes_last_time_[name]));
6114 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6115  int index;
6116 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6117  index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
6118  counters->count_of_CODE_TYPE_##name()->Increment( \
6119  static_cast<int>(object_counts_[index])); \
6120  counters->count_of_CODE_TYPE_##name()->Decrement( \
6121  static_cast<int>(object_counts_last_time_[index])); \
6122  counters->size_of_CODE_TYPE_##name()->Increment( \
6123  static_cast<int>(object_sizes_[index])); \
6124  counters->size_of_CODE_TYPE_##name()->Decrement( \
6125  static_cast<int>(object_sizes_last_time_[index]));
6127 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6128 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6129  index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
6130  counters->count_of_FIXED_ARRAY_##name()->Increment( \
6131  static_cast<int>(object_counts_[index])); \
6132  counters->count_of_FIXED_ARRAY_##name()->Decrement( \
6133  static_cast<int>(object_counts_last_time_[index])); \
6134  counters->size_of_FIXED_ARRAY_##name()->Increment( \
6135  static_cast<int>(object_sizes_[index])); \
6136  counters->size_of_FIXED_ARRAY_##name()->Decrement( \
6137  static_cast<int>(object_sizes_last_time_[index]));
6139 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6140 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
6141  index = \
6142  FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
6143  counters->count_of_CODE_AGE_##name()->Increment( \
6144  static_cast<int>(object_counts_[index])); \
6145  counters->count_of_CODE_AGE_##name()->Decrement( \
6146  static_cast<int>(object_counts_last_time_[index])); \
6147  counters->size_of_CODE_AGE_##name()->Increment( \
6148  static_cast<int>(object_sizes_[index])); \
6149  counters->size_of_CODE_AGE_##name()->Decrement( \
6150  static_cast<int>(object_sizes_last_time_[index]));
6152 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6153 
6156  ClearObjectStats();
6157 }
6158 }
6159 } // namespace v8::internal
#define CODE_AGE_LIST_COMPLETE(V)
Definition: builtins.h:30
#define SLOW_DCHECK(condition)
Definition: checks.h:30
static uint16_t LeadSurrogate(uint32_t char_code)
Definition: unicode.h:108
static const uchar kMaxNonSurrogateCharCode
Definition: unicode.h:98
static uint16_t TrailSurrogate(uint32_t char_code)
Definition: unicode.h:111
static uchar ValueOf(const byte *str, unsigned length, unsigned *cursor)
Definition: unicode-inl.h:129
static const uchar kBadChar
Definition: unicode.h:139
Interface for iterating through all external resources in the heap.
Definition: v8.h:4943
Isolate represents an isolated instance of the V8 engine.
Definition: v8.h:4356
void(* GCEpilogueCallback)(Isolate *isolate, GCType type, GCCallbackFlags flags)
Definition: v8.h:4651
void(* GCPrologueCallback)(Isolate *isolate, GCType type, GCCallbackFlags flags)
Definition: v8.h:4648
A JavaScript number value (ECMA-262, 4.3.20)
Definition: v8.h:2162
A JavaScript object (ECMA-262, 4.3.3)
Definition: v8.h:2283
static const int kNoScriptId
Definition: v8.h:977
static Local< Context > ToLocal(v8::internal::Handle< v8::internal::Context > obj)
static double nan_value()
static int GetLastError()
PretenureFlag GetPretenureMode()
Definition: objects.cc:12585
static bool CanTrack(InstanceType type)
Definition: objects-inl.h:1614
void set_deopt_dependent_code(bool deopt)
Definition: objects.h:8161
bool DigestPretenuringFeedback(bool maximum_size_scavenge)
Definition: objects-inl.h:1694
static U encode(T value)
Definition: utils.h:217
void IterateBuiltins(ObjectVisitor *v)
Definition: builtins.cc:1599
static int SizeFor(int length)
Definition: objects.h:4360
static const int kMaxLength
Definition: objects.h:4394
Address ValueAddress()
Definition: objects.h:9437
static const int kSize
Definition: objects.h:9447
void IteratePointersToFromSpace(ObjectVisitor *v)
bool contains(Address address)
Definition: spaces.h:887
bool SetUp(size_t requested_size)
Definition: spaces.cc:99
ConstantPoolArray * constant_pool()
Definition: objects-inl.h:4942
void Relocate(intptr_t delta)
Definition: objects.cc:10126
void set_constant_pool(Object *constant_pool)
Definition: objects-inl.h:4947
byte * relocation_start()
Definition: objects-inl.h:6196
int instruction_size() const
byte * instruction_end()
Definition: objects-inl.h:6181
void set_ic_age(int count)
void ClearInlineCaches()
Definition: objects.cc:10377
static Object * GetObjectFromEntryAddress(Address location_of_address)
Definition: objects-inl.h:5029
static int SizeFor(int body_size)
Definition: objects.h:5256
void Iterate(ObjectVisitor *v)
static const int kSize
Definition: objects.h:9063
static const int kExtendedFirstOffset
Definition: objects.h:2852
static const int kFirstEntryOffset
Definition: objects.h:2827
void ClearPtrEntries(Isolate *isolate)
Definition: objects.cc:9084
void InitExtended(const NumberOfEntries &small, const NumberOfEntries &extended)
Definition: objects-inl.h:2613
static const int kMaxSmallEntriesPerType
Definition: objects.h:2830
void Init(const NumberOfEntries &small)
Definition: objects-inl.h:2594
static int SizeFor(const NumberOfEntries &small)
Definition: objects.h:2755
static int SizeForExtended(const NumberOfEntries &small, const NumberOfEntries &extended)
Definition: objects.h:2764
static Context * cast(Object *context)
Definition: contexts.h:255
static const int kSize
Definition: contexts.h:572
void Iterate(ObjectVisitor *v)
Definition: deoptimizer.cc:53
static void DeoptimizeAll(Isolate *isolate)
Definition: deoptimizer.cc:437
static void DeoptimizeMarkedCode(Isolate *isolate)
Definition: deoptimizer.cc:454
static MUST_USE_RESULT Handle< UnseededNumberDictionary > New(Isolate *isolate, int at_least_space_for, PretenureFlag pretenure=NOT_TENURED)
Definition: objects.cc:14899
void PostGarbageCollectionProcessing(Heap *heap)
void IterateAllRoots(ObjectVisitor *visitor)
void IterateNewSpaceRoots(ObjectVisitor *visitor)
static const int kAlignedSize
Definition: objects.h:4474
void Iterate(ObjectVisitor *v)
Definition: heap-inl.h:654
void AddOldString(String *string)
Definition: heap-inl.h:684
List< Object * > old_space_strings_
Definition: heap.h:503
void ShrinkNewStrings(int position)
Definition: heap-inl.h:691
List< Object * > new_space_strings_
Definition: heap.h:502
static const int kLengthOffset
Definition: objects.h:2392
static const int kHeaderSize
Definition: objects.h:2393
static int SizeOf(Map *map, HeapObject *object)
Definition: objects.h:2491
static int OffsetOfElementAt(int index)
Definition: objects.h:2455
Object * get(int index)
Definition: objects-inl.h:2165
static int SizeFor(int length)
Definition: objects.h:2452
void set(int index, Object *value)
Definition: objects-inl.h:2190
static const int kMaxLength
Definition: objects.h:2469
static int SizeFor(int length)
Definition: objects.h:2531
static const int kMaxLength
Definition: objects.h:2554
static const int kDataOffset
Definition: objects.h:4716
static const int kSize
Definition: objects.h:10005
void set_foreign_address(Address value)
Definition: objects-inl.h:6060
void set_size(Heap *heap, int size_in_bytes)
Definition: spaces.cc:1955
GCIdleTimeAction Compute(size_t idle_time_in_ms, HeapState heap_state)
double cumulative_sweeping_duration() const
Definition: gc-tracer.h:275
intptr_t MarkCompactSpeedInBytesPerMillisecond() const
Definition: gc-tracer.cc:448
intptr_t IncrementalMarkingSpeedInBytesPerMillisecond() const
Definition: gc-tracer.cc:407
intptr_t ScavengeSpeedInBytesPerMillisecond() const
Definition: gc-tracer.cc:432
void Start(GarbageCollector collector, const char *gc_reason, const char *collector_reason)
Definition: gc-tracer.cc:98
intptr_t NewSpaceAllocationThroughputInBytesPerMillisecond() const
Definition: gc-tracer.cc:465
void RecordStats(HeapStats *stats)
void IterateAllRoots(ObjectVisitor *v)
void IterateNewSpaceWeakIndependentRoots(ObjectVisitor *v)
void IterateNewSpaceStrongAndDependentRoots(ObjectVisitor *v)
int PostGarbageCollectionProcessing(GarbageCollector collector)
void IdentifyNewSpaceWeakIndependentHandles(WeakSlotCallbackWithHeap f)
void IterateStrongRoots(ObjectVisitor *v)
void Iterate(v8::internal::ObjectVisitor *v)
Definition: api.cc:7590
static int NumberOfHandles(Isolate *isolate)
Definition: handles.cc:13
static MUST_USE_RESULT Handle< StringTable > New(Isolate *isolate, int at_least_space_for, MinimumCapacity capacity_option=USE_DEFAULT_MINIMUM_CAPACITY, PretenureFlag pretenure=NOT_TENURED)
Definition: objects.cc:13756
static const int kSize
Definition: objects.h:1521
void set_map_no_write_barrier(Map *value)
Definition: objects-inl.h:1435
static const int kMapOffset
Definition: objects.h:1427
void set_map(Map *value)
Definition: objects-inl.h:1404
static Object ** RawField(HeapObject *obj, int offset)
Definition: objects-inl.h:1311
Isolate * GetIsolate() const
Definition: objects-inl.h:1387
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1464
MapWord map_word() const
Definition: objects-inl.h:1440
void set_map_word(MapWord map_word)
Definition: objects-inl.h:1446
WriteBarrierMode GetWriteBarrierMode(const DisallowHeapAllocation &promise)
Definition: objects-inl.h:2660
void Iterate(ObjectVisitor *v)
Definition: objects.cc:1529
virtual bool SkipObject(HeapObject *object)=0
bool is_tracking_object_moves() const
Definition: heap-profiler.h:61
intptr_t * code_space_size
Definition: heap.h:2054
intptr_t * map_space_size
Definition: heap.h:2056
intptr_t * map_space_capacity
Definition: heap.h:2057
static const int kStartMarker
Definition: heap.h:2044
intptr_t * lo_space_size
Definition: heap.h:2060
intptr_t * code_space_capacity
Definition: heap.h:2055
intptr_t * memory_allocator_capacity
Definition: heap.h:2067
intptr_t * old_data_space_size
Definition: heap.h:2052
intptr_t * cell_space_size
Definition: heap.h:2058
intptr_t * old_pointer_space_size
Definition: heap.h:2050
static const int kEndMarker
Definition: heap.h:2045
intptr_t * memory_allocator_size
Definition: heap.h:2066
intptr_t * old_data_space_capacity
Definition: heap.h:2053
int * new_space_capacity
Definition: heap.h:2049
intptr_t * property_cell_space_capacity
Definition: heap.h:2073
intptr_t * old_pointer_space_capacity
Definition: heap.h:2051
intptr_t * property_cell_space_size
Definition: heap.h:2072
intptr_t * cell_space_capacity
Definition: heap.h:2059
int64_t amount_of_external_allocated_memory_
Definition: heap.h:1417
size_t CommittedPhysicalMemory()
Definition: heap.cc:180
MUST_USE_RESULT AllocationResult CopyFixedArray(FixedArray *src)
Definition: heap-inl.h:148
List< GCPrologueCallbackPair > gc_prologue_callbacks_
Definition: heap.h:1580
MUST_USE_RESULT AllocationResult CopyJSObject(JSObject *source, AllocationSite *site=NULL)
Definition: heap.cc:3688
bool flush_monomorphic_ics_
Definition: heap.h:1450
MUST_USE_RESULT AllocationResult AllocateRawOneByteString(int length, PretenureFlag pretenure)
Definition: heap.cc:3871
bool UncommitFromSpace()
Definition: heap.h:1840
void AdvanceIdleIncrementalMarking(intptr_t step_size)
Definition: heap.cc:4267
ExternalStringTable external_string_table_
Definition: heap.h:2013
bool Contains(Address addr)
Definition: heap.cc:4447
intptr_t max_old_generation_size_
Definition: heap.h:1432
Address remembered_unmapped_pages_[kRememberedUnmappedPages]
Definition: heap.h:1487
void EnsureFillerObjectAtTop()
Definition: heap.cc:770
double max_gc_pause_
Definition: heap.h:1959
static const int kAllocationSiteScratchpadSize
Definition: heap.h:2000
void IterateRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:4722
int nodes_copied_in_new_space_
Definition: heap.h:1914
void IncrementSemiSpaceCopiedObjectSize(int object_size)
Definition: heap.h:1119
static int GcSafeSizeOfOldObject(HeapObject *object)
Definition: heap.cc:228
void set_array_buffers_list(Object *object)
Definition: heap.h:795
OldSpace * old_pointer_space()
Definition: heap.h:594
StoreBuffer store_buffer_
Definition: heap.h:1978
MUST_USE_RESULT AllocationResult AllocateEmptyFixedTypedArray(ExternalArrayType array_type)
Definition: heap.cc:3967
FixedTypedArrayBase * EmptyFixedTypedArrayForMap(Map *map)
Definition: heap.cc:3184
void VisitExternalResources(v8::ExternalResourceVisitor *visitor)
Definition: heap.cc:1707
@ kStringTableRootIndex
Definition: heap.h:1067
@ kStrongRootListLength
Definition: heap.h:1073
static void ScavengeObject(HeapObject **p, HeapObject *object)
Definition: heap-inl.h:554
void AddWeakObjectToCodeDependency(Handle< Object > obj, Handle< DependentCode > dep)
Definition: heap.cc:5341
void DeoptMarkedAllocationSites()
Definition: heap.cc:569
void CreateFixedStubs()
Definition: heap.cc:2713
void SetStackLimits()
Definition: heap.cc:5173
void CreateApiObjects()
Definition: heap.cc:2680
RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind)
Definition: heap.cc:3162
void ZapFromSpace()
Definition: heap.cc:4527
friend class HeapIterator
Definition: heap.h:2028
static const int kReduceMemoryFootprintMask
Definition: heap.h:717
MarkCompactCollector mark_compact_collector_
Definition: heap.h:1976
MUST_USE_RESULT AllocationResult CopyConstantPoolArrayWithMap(ConstantPoolArray *src, Map *map)
Definition: heap.cc:4014
static const ConstantStringTable constant_string_table[]
Definition: heap.h:1559
int sweep_generation_
Definition: heap.h:1441
bool OldGenerationAllocationLimitReached()
Definition: heap-inl.h:364
int initial_semispace_size_
Definition: heap.h:1431
static const int kInitialStringTableSize
Definition: heap.h:1948
void ReserveSpace(int *sizes, Address *addresses)
Definition: heap.cc:920
StoreBufferRebuilder store_buffer_rebuilder_
Definition: heap.h:1539
void Scavenge()
Definition: heap.cc:1420
unsigned int maximum_size_scavenges_
Definition: heap.h:1921
MUST_USE_RESULT AllocationResult AllocateEmptyConstantPoolArray()
Definition: heap.cc:4178
PropertyCellSpace * property_cell_space()
Definition: heap.h:599
intptr_t CommittedMemoryExecutable()
Definition: heap.cc:194
void MoveElements(FixedArray *array, int dst_index, int src_index, int len)
Definition: heap.cc:868
RootListIndex RootIndexForExternalArrayType(ExternalArrayType array_type)
Definition: heap.cc:3106
MUST_USE_RESULT AllocationResult AllocateForeign(Address address, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:3190
Isolate * isolate_
Definition: heap.h:1424
void MarkCompactPrologue()
Definition: heap.cc:1209
MUST_USE_RESULT AllocationResult AllocateCell(Object *value)
Definition: heap.cc:2646
OldSpace * code_space()
Definition: heap.h:596
MUST_USE_RESULT AllocationResult AllocatePartialMap(InstanceType instance_type, int instance_size)
Definition: heap.cc:2271
void ClearObjectStats(bool clear_last_time_stats=false)
Definition: heap.cc:6087
friend class Factory
Definition: heap.h:2025
PagedSpace * paged_space(int idx)
Definition: heap.h:601
void FreeQueuedChunks()
Definition: heap.cc:6025
GCIdleTimeHandler gc_idle_time_handler_
Definition: heap.h:1984
MUST_USE_RESULT AllocationResult CopyFixedArrayWithMap(FixedArray *src, Map *map)
Definition: heap.cc:3973
void ProcessPretenuringFeedback()
Definition: heap.cc:493
static const int kRememberedUnmappedPages
Definition: heap.h:1485
Address DoScavenge(ObjectVisitor *scavenge_visitor, Address new_space_front)
Definition: heap.cc:1743
void InitializeJSObjectFromMap(JSObject *obj, FixedArray *properties, Map *map)
Definition: heap.cc:3600
void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback)
Definition: heap.cc:5307
intptr_t MaximumCommittedMemory()
Definition: heap.h:572
OldSpace * code_space_
Definition: heap.h:1457
intptr_t max_executable_size_
Definition: heap.h:1433
MUST_USE_RESULT AllocationResult AllocateEmptyExternalArray(ExternalArrayType array_type)
Definition: heap.cc:3933
void GarbageCollectionEpilogue()
Definition: heap.cc:587
List< GCEpilogueCallbackPair > gc_epilogue_callbacks_
Definition: heap.h:1594
MUST_USE_RESULT AllocationResult AllocateCode(int object_size, bool immovable)
Definition: heap.cc:3431
MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length)
Definition: heap.cc:4078
Object * native_contexts_list() const
Definition: heap.h:793
double total_gc_time_ms_
Definition: heap.h:1962
void CheckNewSpaceExpansionCriteria()
Definition: heap.cc:1291
LargeObjectSpace * lo_space()
Definition: heap.h:600
void SelectScavengingVisitorsTable()
Definition: heap.cc:2224
void QueueMemoryChunkForFree(MemoryChunk *chunk)
Definition: heap.cc:6019
int gcs_since_last_deopt_
Definition: heap.h:1994
Object * allocation_sites_list()
Definition: heap.h:801
MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(int length, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4093
void CheckpointObjectStats()
Definition: heap.cc:6100
int global_ic_age_
Definition: heap.h:1448
void MarkCompact()
Definition: heap.cc:1181
void ResetAllAllocationSitesDependentCode(PretenureFlag flag)
Definition: heap.cc:1667
bool InOldDataSpace(Address address)
Definition: heap-inl.h:354
static const int kInitialNumberStringCacheSize
Definition: heap.h:1950
bool old_gen_exhausted_
Definition: heap.h:1517
void DoScavengeObject(Map *map, HeapObject **slot, HeapObject *obj)
Definition: heap.h:1221
static String * UpdateNewSpaceReferenceInExternalStringTableEntry(Heap *heap, Object **pointer)
Definition: heap.cc:1561
int survived_since_last_expansion_
Definition: heap.h:1438
size_t object_counts_[OBJECT_STATS_COUNT]
Definition: heap.h:1953
bool IsHeapIterable()
Definition: heap.cc:4248
void OnAllocationEvent(HeapObject *object, int size_in_bytes)
Definition: heap-inl.h:224
void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback, GCType gc_type_filter, bool pass_isolate=true)
Definition: heap.cc:5298
void CreateInitialObjects()
Definition: heap.cc:2743
PromotionQueue * promotion_queue()
Definition: heap.h:753
bool WorthActivatingIncrementalMarking()
Definition: heap.cc:4290
void ProcessWeakReferences(WeakObjectRetainer *retainer)
Definition: heap.cc:1626
@ RECORD_SCRATCHPAD_SLOT
Definition: heap.h:974
MUST_USE_RESULT AllocationResult AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field)
void RememberUnmappedPage(Address page, bool compacted)
Definition: heap.cc:6072
void ReportStatisticsAfterGC()
Definition: heap.cc:394
void ProcessNativeContexts(WeakObjectRetainer *retainer)
Definition: heap.cc:1635
static const int kMaxOldSpaceSizeMediumMemoryDevice
Definition: heap.h:1025
intptr_t old_generation_allocation_limit_
Definition: heap.h:1513
unsigned int ms_count_
Definition: heap.h:1479
void FinalizeExternalString(String *string)
Definition: heap-inl.h:307
double get_max_gc_pause()
Definition: heap.h:1189
intptr_t MaxExecutableSize()
Definition: heap.h:556
bool InNewSpace(Object *object)
Definition: heap-inl.h:322
int unflattened_strings_length_
Definition: heap.h:1490
void IterateSmiRoots(ObjectVisitor *v)
Definition: heap.cc:4739
bool PerformGarbageCollection(GarbageCollector collector, const GCCallbackFlags gc_callback_flags=kNoGCCallbackFlags)
Definition: heap.cc:1042
int contexts_disposed_
Definition: heap.h:1446
MUST_USE_RESULT AllocationResult AllocateByteArray(int length, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:3203
void GarbageCollectionPrologue()
Definition: heap.cc:410
void TearDownArrayBuffers()
Definition: heap.cc:1649
ExternalArray * EmptyExternalArrayForMap(Map *map)
Definition: heap.cc:3178
static const int kMakeHeapIterableMask
Definition: heap.h:721
CellSpace * cell_space()
Definition: heap.h:598
size_t crankshaft_codegen_bytes_generated_
Definition: heap.h:1989
Marking * marking()
Definition: heap.h:1203
void CreateFillerObjectAt(Address addr, int size)
Definition: heap.cc:3221
friend class Page
Definition: heap.h:2036
void InitializeAllocationMemento(AllocationMemento *memento, AllocationSite *allocation_site)
Definition: heap.cc:3563
intptr_t MaxReserved()
Definition: heap.h:549
void MakeHeapIterable()
Definition: heap.cc:4255
bool AllowedToBeMigrated(HeapObject *object, AllocationSpace dest)
Definition: heap-inl.h:427
size_t object_sizes_last_time_[OBJECT_STATS_COUNT]
Definition: heap.h:1956
void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags)
Definition: heap.cc:1146
MUST_USE_RESULT AllocationResult AllocateFixedArray(int length, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:4073
static const int kAbortIncrementalMarkingMask
Definition: heap.h:718
GCTracer tracer_
Definition: heap.h:1884
MUST_USE_RESULT AllocationResult AllocateConstantPoolArray(const ConstantPoolArray::NumberOfEntries &small)
Definition: heap.cc:4128
Object ** roots_array_start()
Definition: heap.h:896
MUST_USE_RESULT AllocationResult AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure)
Definition: heap.cc:4107
Object * weak_object_to_code_table()
Definition: heap.h:806
static const intptr_t kMinimumOldGenerationAllocationLimit
Definition: heap.h:1011
MUST_USE_RESULT AllocationResult AllocateRaw(int size_in_bytes, AllocationSpace space, AllocationSpace retry_space)
Definition: heap-inl.h:166
void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator, double marking_time)
Definition: heap.cc:5881
bool IsHighSurvivalRate()
Definition: heap.h:1925
intptr_t Available()
Definition: heap.cc:211
StoreBuffer * store_buffer()
Definition: heap.h:1201
static const StringTypeTable string_type_table[]
Definition: heap.h:1558
Isolate * isolate()
Definition: heap-inl.h:589
Object * roots_[kRootListLength]
Definition: heap.h:1426
int allocation_sites_scratchpad_length_
Definition: heap.h:2001
int FullSizeNumberStringCacheLength()
Definition: heap.cc:3038
void set_allocation_sites_list(Object *object)
Definition: heap.h:798
double get_min_in_mutator()
Definition: heap.h:1195
void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback, GCType gc_type_filter, bool pass_isolate=true)
Definition: heap.cc:5319
Map * MapForExternalArrayType(ExternalArrayType array_type)
Definition: heap.cc:3101
MUST_USE_RESULT AllocationResult CopyConstantPoolArray(ConstantPoolArray *src)
Definition: heap-inl.h:160
int64_t amount_of_external_allocated_memory_at_last_global_gc_
Definition: heap.h:1420
static const StructTable struct_table[]
Definition: heap.h:1560
bool ConfigureHeap(int max_semi_space_size, int max_old_space_size, int max_executable_size, size_t code_range_size)
Definition: heap.cc:4827
bool RootCanBeTreatedAsConstant(RootListIndex root_index)
Definition: heap.cc:2940
void ClearAllICsByKind(Code::Kind kind)
Definition: heap.cc:470
void AgeInlineCaches()
Definition: heap.h:1270
MUST_USE_RESULT AllocationResult CopyFixedDoubleArray(FixedDoubleArray *src)
Definition: heap-inl.h:154
MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray *src)
Definition: heap.cc:3939
String * hidden_string_
Definition: heap.h:1564
void ClearJSFunctionResultCaches()
Definition: heap.cc:980
MUST_USE_RESULT AllocationResult Allocate(Map *map, AllocationSpace space, AllocationSite *allocation_site=NULL)
Definition: heap.cc:3574
MUST_USE_RESULT AllocationResult AllocateMap(InstanceType instance_type, int instance_size, ElementsKind elements_kind=TERMINAL_FAST_ELEMENTS_KIND)
Definition: heap.cc:2295
void IterateWeakRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:4728
MUST_USE_RESULT AllocationResult AllocateJSObjectFromMap(Map *map, PretenureFlag pretenure=NOT_TENURED, bool alloc_props=true, AllocationSite *allocation_site=NULL)
Definition: heap.cc:3631
int nodes_died_in_new_space_
Definition: heap.h:1913
MUST_USE_RESULT AllocationResult AllocateExtendedConstantPoolArray(const ConstantPoolArray::NumberOfEntries &small, const ConstantPoolArray::NumberOfEntries &extended)
Definition: heap.cc:4152
HeapState gc_state_
Definition: heap.h:1462
int nodes_promoted_
Definition: heap.h:1915
int gc_post_processing_depth_
Definition: heap.h:1463
PropertyCellSpace * property_cell_space_
Definition: heap.h:1460
Object * encountered_weak_collections_
Definition: heap.h:1537
void UpdateNewSpaceReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)
Definition: heap.cc:1576
void DisableInlineAllocation()
Definition: heap.cc:5034
static const int kNoGCFlags
Definition: heap.h:716
int max_semi_space_size_
Definition: heap.h:1430
static void ScavengeStoreBufferCallback(Heap *heap, MemoryChunk *page, StoreBufferEvent event)
Definition: heap.cc:1309
intptr_t maximum_committed_
Definition: heap.h:1434
OldSpace * old_pointer_space_
Definition: heap.h:1455
void AddAllocationSiteToScratchpad(AllocationSite *site, ScratchpadSlotMode mode)
Definition: heap.cc:3077
static const int kYoungSurvivalRateHighThreshold
Definition: heap.h:1903
NewSpace new_space_
Definition: heap.h:1454
bool InFromSpace(Object *object)
Definition: heap-inl.h:334
void IncrementPromotedObjectsSize(int object_size)
Definition: heap.h:1114
int remembered_unmapped_pages_index_
Definition: heap.h:1486
MemoryChunk * chunks_queued_for_free_
Definition: heap.h:2017
Object * weak_object_to_code_table_
Definition: heap.h:1532
bool CollectGarbage(AllocationSpace space, const char *gc_reason=NULL, const GCCallbackFlags gc_callback_flags=kNoGCCallbackFlags)
Definition: heap-inl.h:581
size_t object_counts_last_time_[OBJECT_STATS_COUNT]
Definition: heap.h:1954
bool inline_allocation_disabled_
Definition: heap.h:1521
void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags)
Definition: heap.cc:1163
void ProcessAllocationSites(WeakObjectRetainer *retainer)
Definition: heap.cc:1660
void FlushAllocationSitesScratchpad()
Definition: heap.cc:3060
bool NextGCIsLikelyToBeFull()
Definition: heap.h:1135
CellSpace * cell_space_
Definition: heap.h:1459
void set_weak_object_to_code_table(Object *value)
Definition: heap.h:1935
void EnsureWeakObjectToCodeTable()
Definition: heap.cc:5367
OldSpace * old_data_space()
Definition: heap.h:595
size_t object_sizes_[OBJECT_STATS_COUNT]
Definition: heap.h:1955
bool HasBeenSetUp()
Definition: heap.cc:221
MUST_USE_RESULT AllocationResult AllocateHeapNumber(double value, MutableMode mode=IMMUTABLE, PretenureFlag pretenure=NOT_TENURED)
Definition: heap.cc:2624
void RecordStats(HeapStats *stats, bool take_snapshot=false)
Definition: heap.cc:4929
PromotionQueue promotion_queue_
Definition: heap.h:2007
bool CanMoveObjectStart(HeapObject *object)
Definition: heap.cc:3235
static AllocationSpace TargetSpaceId(InstanceType type)
Definition: heap-inl.h:399
void IterateStrongRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:4747
MUST_USE_RESULT AllocationResult AllocateRawTwoByteString(int length, PretenureFlag pretenure)
Definition: heap.cc:3895
void IterateAndMarkPointersToFromSpace(Address start, Address end, ObjectSlotCallback callback)
Definition: heap.cc:4540
bool IdleNotification(int idle_time_in_ms)
Definition: heap.cc:4296
STATIC_ASSERT(kUndefinedValueRootIndex==Internals::kUndefinedValueRootIndex)
void FlushNumberStringCache()
Definition: heap.cc:3051
int reserved_semispace_size_
Definition: heap.h:1429
void AdjustLiveBytes(Address address, int by, InvocationMode mode)
Definition: heap.cc:3254
void EnableInlineAllocation()
Definition: heap.cc:5025
IncrementalMarking * incremental_marking()
Definition: heap.h:1205
void TearDown()
Definition: heap.cc:5188
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
Definition: heap.cc:5376
Address new_space_top_after_last_gc_
Definition: heap.h:1464
void UpdateMaximumCommitted()
Definition: heap.cc:201
RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type)
Definition: heap.cc:3128
double marking_time_
Definition: heap.h:1971
void RepairFreeListsAfterBoot()
Definition: heap.cc:484
void PrintShortHeapStatistics()
Definition: heap.cc:313
MapSpace * map_space_
Definition: heap.h:1458
GarbageCollector SelectGarbageCollector(AllocationSpace space, const char **reason)
Definition: heap.cc:236
GCTracer * tracer()
Definition: heap.h:1166
VisitorDispatchTable< ScavengingCallback > scavenging_visitors_table_
Definition: heap.h:2015
MUST_USE_RESULT AllocationResult AllocateRawFixedArray(int length, PretenureFlag pretenure)
Definition: heap.cc:4039
static AllocationSpace SelectSpace(int object_size, AllocationSpace preferred_old_space, PretenureFlag pretenure)
Definition: heap.h:1649
MUST_USE_RESULT AllocationResult AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure, Object *filler)
Definition: heap.cc:4051
static bool ShouldZapGarbage()
Definition: heap.h:926
bool ShouldBePromoted(Address old_address, int object_size)
Definition: heap-inl.h:370
static void CopyBlock(Address dst, Address src, int byte_size)
Definition: heap-inl.h:469
intptr_t CommittedMemory()
Definition: heap.cc:170
void OnMoveEvent(HeapObject *target, HeapObject *source, int size_in_bytes)
Definition: heap-inl.h:245
MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type)
Definition: heap.cc:4223
NewSpace * new_space()
Definition: heap.h:593
unsigned int gc_count_at_last_idle_gc_
Definition: heap.h:1985
void RightTrimFixedArray(FixedArrayBase *obj, int elements_to_trim)
Definition: heap.cc:3322
MUST_USE_RESULT AllocationResult AllocateSymbol()
Definition: heap.cc:4193
bool CreateHeapObjects()
Definition: heap.cc:5156
bool DeoptMaybeTenuredAllocationSites()
Definition: heap.h:1284
FixedArrayBase * LeftTrimFixedArray(FixedArrayBase *obj, int elements_to_trim)
Definition: heap.cc:3266
void CollectAllGarbage(int flags, const char *gc_reason=NULL, const GCCallbackFlags gc_callback_flags=kNoGCCallbackFlags)
Definition: heap.cc:724
unsigned int gc_count_
Definition: heap.h:1482
void IncrementYoungSurvivorsCounter(int survived)
Definition: heap.h:1130
Object * array_buffers_list() const
Definition: heap.h:796
MUST_USE_RESULT AllocationResult AllocateFixedTypedArray(int length, ExternalArrayType array_type, PretenureFlag pretenure)
Definition: heap.cc:3400
MUST_USE_RESULT AllocationResult AllocateJSObject(JSFunction *constructor, PretenureFlag pretenure=NOT_TENURED, AllocationSite *allocation_site=NULL)
Definition: heap.cc:3671
MUST_USE_RESULT AllocationResult CopyCode(Code *code, Vector< byte > reloc_info)
Definition: heap.cc:3501
MUST_USE_RESULT AllocationResult AllocateFillerObject(int size, bool double_align, AllocationSpace space)
Definition: heap.cc:2329
static void ScavengeObjectSlow(HeapObject **p, HeapObject *object)
Definition: heap.cc:2262
void EnsureFromSpaceIsCommitted()
Definition: heap.cc:971
MUST_USE_RESULT AllocationResult AllocatePropertyCell()
Definition: heap.cc:2661
RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind)
Definition: heap.cc:3145
void ClearNormalizedMapCaches()
Definition: heap.cc:1003
static const int kOldSurvivalRateLowThreshold
Definition: heap.h:1906
MUST_USE_RESULT AllocationResult AllocateExternalArray(int length, ExternalArrayType array_type, void *external_pointer, PretenureFlag pretenure)
Definition: heap.cc:3362
intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size, int freed_global_handles)
Definition: heap.cc:4984
void UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)
Definition: heap.cc:1613
intptr_t get_max_alive_after_gc()
Definition: heap.h:1192
bool always_allocate()
Definition: heap.h:622
intptr_t Capacity()
Definition: heap.cc:160
void CompletelyClearInstanceofCache()
Definition: heap-inl.h:711
bool CreateInitialMaps()
Definition: heap.cc:2372
void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback)
Definition: heap.cc:5328
intptr_t max_alive_after_gc_
Definition: heap.h:1965
double semi_space_copied_rate_
Definition: heap.h:1912
bool ConfigureHeapDefault()
Definition: heap.cc:4926
void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc)
Definition: heap.cc:1684
void CollectAllAvailableGarbage(const char *gc_reason=NULL)
Definition: heap.cc:735
void set_native_contexts_list(Object *object)
Definition: heap.h:790
intptr_t promoted_objects_size_
Definition: heap.h:1909
double promotion_rate_
Definition: heap.h:1910
MUST_USE_RESULT AllocationResult CopyFixedDoubleArrayWithMap(FixedDoubleArray *src, Map *map)
Definition: heap.cc:3998
size_t full_codegen_bytes_generated_
Definition: heap.h:1988
int64_t PromotedExternalMemorySize()
Definition: heap.cc:4975
bool configured_
Definition: heap.h:2011
OldSpace * old_data_space_
Definition: heap.h:1456
void InitializeAllocationSitesScratchpad()
Definition: heap.cc:3068
bool InToSpace(Object *object)
Definition: heap-inl.h:339
intptr_t SizeOfObjects()
Definition: heap.cc:460
double min_in_mutator_
Definition: heap.h:1968
void PrintAlloctionsHash()
Definition: heap-inl.h:301
void ReportStatisticsBeforeGC()
Definition: heap.cc:291
void set_encountered_weak_collections(Object *weak_collection)
Definition: heap.h:808
bool InOldPointerSpace(Address address)
Definition: heap-inl.h:344
size_t code_range_size_
Definition: heap.h:1428
MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray()
Definition: heap.cc:3918
LargeObjectSpace * lo_space_
Definition: heap.h:1461
DependentCode * LookupWeakObjectToCodeDependency(Handle< Object > obj)
Definition: heap.cc:5360
Map * MapForFixedTypedArray(ExternalArrayType array_type)
Definition: heap.cc:3123
intptr_t PromotedSpaceSizeOfObjects()
Definition: heap.cc:4967
int high_survival_rate_period_length_
Definition: heap.h:1908
int NotifyContextDisposed()
Definition: heap.cc:857
MarkCompactCollector * mark_compact_collector()
Definition: heap.h:1197
MapSpace * map_space()
Definition: heap.h:597
void UpdateSurvivalStatistics(int start_new_space_size)
Definition: heap.cc:1023
void ProcessArrayBuffers(WeakObjectRetainer *retainer)
Definition: heap.cc:1642
intptr_t semi_space_copied_object_size_
Definition: heap.h:1911
bool InSpace(Address addr, AllocationSpace space)
Definition: heap.cc:4464
void MarkMapPointersAsEncoded(bool encoded)
Definition: heap.h:1601
bool MaximumSizeScavenge()
Definition: heap.h:1282
static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index)
Definition: heap.cc:2912
void Step(intptr_t allocated, CompletionAction action, bool force_marking=false)
void Start(CompactionFlag flag=ALLOW_COMPACTION)
static int SizeOfMarkedObject(HeapObject *object)
Definition: heap.h:2432
static bool IsMarked(HeapObject *object)
Definition: heap.h:2411
CodeRange * code_range()
Definition: isolate.h:863
HandleScopeImplementer * handle_scope_implementer()
Definition: isolate.h:901
StackGuard * stack_guard()
Definition: isolate.h:872
void PrintStack(StringStream *accumulator)
Definition: isolate.cc:587
DeoptimizerData * deoptimizer_data()
Definition: isolate.h:877
Builtins * builtins()
Definition: isolate.h:947
HeapProfiler * heap_profiler() const
Definition: isolate.h:972
MemoryAllocator * memory_allocator()
Definition: isolate.h:883
KeyedLookupCache * keyed_lookup_cache()
Definition: isolate.h:887
void Iterate(ObjectVisitor *v)
Definition: isolate.cc:206
DescriptorLookupCache * descriptor_lookup_cache()
Definition: isolate.h:895
Counters * counters()
Definition: isolate.h:857
ContextSlotCache * context_slot_cache()
Definition: isolate.h:891
void IterateDeferredHandles(ObjectVisitor *visitor)
Definition: isolate.cc:212
OptimizingCompilerThread * optimizing_compiler_thread()
Definition: isolate.h:1059
ThreadManager * thread_manager()
Definition: isolate.h:921
base::RandomNumberGenerator * random_number_generator()
Definition: isolate-inl.h:33
CompilationCache * compilation_cache()
Definition: isolate.h:865
CpuProfiler * cpu_profiler() const
Definition: isolate.h:971
Logger * logger()
Definition: isolate.h:866
EternalHandles * eternal_handles()
Definition: isolate.h:919
Factory * factory()
Definition: isolate.h:982
GlobalHandles * global_handles()
Definition: isolate.h:917
Bootstrapper * bootstrapper()
Definition: isolate.h:856
static const int kSize
Definition: objects.h:10073
static const int kSize
Definition: objects.h:7385
static const int kNonWeakFieldsEndOffset
Definition: objects.h:7383
static const int kCodeEntryOffset
Definition: objects.h:7376
static const int kSize
Definition: objects.h:7688
void InitializeBody(Map *map, Object *pre_allocated_value, Object *filler_value)
Definition: objects-inl.h:2068
static const int kHeaderSize
Definition: objects.h:2195
static const int kInitialMaxFastElementArray
Definition: objects.h:2180
ElementsKind GetElementsKind()
Definition: objects-inl.h:6318
static const int kLength
Definition: heap.h:2252
static const int kHashMask
Definition: heap.h:2255
static const int kCapacityMask
Definition: heap.h:2253
int Lookup(Handle< Map > map, Handle< Name > name)
Definition: heap.cc:5906
static const int kMapHashShift
Definition: heap.h:2254
static const int kEntriesPerBucket
Definition: heap.h:2256
static const int kNotFound
Definition: heap.h:2260
static int Hash(Handle< Map > map, Handle< Name > name)
Definition: heap.cc:5897
void Update(Handle< Map > map, Handle< Name > name, int field_offset)
Definition: heap.cc:5919
int field_offsets_[kLength]
Definition: heap.h:2291
virtual intptr_t SizeOfObjects()
Definition: spaces.h:2742
intptr_t MaximumCommittedMemory()
Definition: spaces.h:2744
MUST_USE_RESULT AllocationResult AllocateRaw(int object_size, Executability executable)
Definition: spaces.cc:2834
virtual intptr_t Size()
Definition: spaces.h:2740
bool CanAllocateSize(int size)
Definition: spaces.h:2735
bool SlowContains(Address addr)
Definition: spaces.h:2783
bool is_logging()
Definition: log.h:311
static const int kIsExtensible
Definition: objects.h:6250
static const int kPointerFieldsEndOffset
Definition: objects.h:6207
static const int kPointerFieldsBeginOffset
Definition: objects.h:6206
static const int kSize
Definition: objects.h:6202
void TransferMark(Address old_start, Address new_start)
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
Definition: spaces.cc:262
void Free(MemoryChunk *chunk)
Definition: spaces.cc:700
bool IsOutsideAllocatedSpace(const void *address) const
Definition: spaces.h:1041
void set_owner(Space *space)
Definition: spaces.h:317
bool Contains(Address addr)
Definition: spaces.h:348
static void IncrementLiveBytesFromMutator(Address address, int by)
Definition: spaces.cc:868
void set_next_chunk(MemoryChunk *next)
Definition: spaces.h:299
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:517
void set_scan_on_scavenge(bool scan)
Definition: spaces-inl.h:157
static const int kBodyOffset
Definition: spaces.h:543
void SetFlag(int flag)
Definition: spaces.h:405
Space * owner() const
Definition: spaces.h:307
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:276
MemoryChunk * next_chunk() const
Definition: spaces.h:291
void set_size(size_t size)
Definition: spaces.h:556
void SetArea(Address area_start, Address area_end)
Definition: spaces.h:558
size_t size() const
Definition: spaces.h:554
static Address & Address_at(Address addr)
Definition: v8memory.h:56
static const int kHashShift
Definition: objects.h:8499
static const int kEmptyHashField
Definition: objects.h:8534
void set_hash_field(uint32_t value)
Definition: objects-inl.h:3301
static const uint32_t kHashBitMask
Definition: objects.h:8503
static const int kIsNotArrayIndexMask
Definition: objects.h:8495
bool Equals(Name *other)
Definition: objects-inl.h:3309
static bool IsAtEnd(Address addr)
Definition: spaces.h:2014
static NewSpacePage * FromLimit(Address address_limit)
Definition: spaces.h:2031
NewSpacePage * next_page() const
Definition: spaces.h:1993
static void VisitPointer(Heap *heap, Object **p)
Definition: heap.cc:1734
intptr_t Available()
Definition: spaces.h:2400
bool IsAtMaximumCapacity()
Definition: spaces.h:2409
void LowerInlineAllocationLimit(intptr_t step)
Definition: spaces.h:2473
void RecordPromotion(HeapObject *obj)
Definition: spaces.cc:1933
Address ToSpaceEnd()
Definition: spaces.h:2489
intptr_t Capacity()
Definition: spaces.h:2371
Address FromSpaceEnd()
Definition: spaces.h:2485
intptr_t CommittedMemory()
Definition: spaces.h:2385
size_t CommittedPhysicalMemory()
Definition: spaces.cc:1941
void set_age_mark(Address mark)
Definition: spaces.h:2441
virtual intptr_t Size()
Definition: spaces.h:2360
Address ToSpaceStart()
Definition: spaces.h:2488
void ResetAllocationInfo()
Definition: spaces.cc:1311
intptr_t MaximumCommittedMemory()
Definition: spaces.h:2391
bool SetUp(int reserved_semispace_size_, int max_semi_space_size)
Definition: spaces.cc:1175
void UpdateInlineAllocationLimit(int size_in_bytes)
Definition: spaces.cc:1323
bool CommitFromSpaceIfNeeded()
Definition: spaces.h:2534
void RecordAllocation(HeapObject *obj)
Definition: spaces.cc:1925
intptr_t TotalCapacity()
Definition: spaces.h:2379
bool ToSpaceContains(Address address)
Definition: spaces.h:2491
Address FromSpaceStart()
Definition: spaces.h:2484
intptr_t inline_allocation_limit_step()
Definition: spaces.h:2544
bool Contains(Address a)
Definition: spaces.h:2349
static void Initialize(Isolate *isolate, Handle< Oddball > oddball, const char *to_string, Handle< Object > to_number, byte kind)
Definition: objects.cc:9596
static const byte kFalse
Definition: objects.h:9400
static const byte kUndefined
Definition: objects.h:9406
static const byte kArgumentMarker
Definition: objects.h:9405
static const byte kException
Definition: objects.h:9409
static const byte kTheHole
Definition: objects.h:9403
static const byte kTrue
Definition: objects.h:9401
static const byte kNull
Definition: objects.h:9404
static const int kSize
Definition: objects.h:9398
static const byte kUninitialized
Definition: objects.h:9407
static const byte kOther
Definition: objects.h:9408
bool WasSwept()
Definition: spaces.h:766
static const int kPageSize
Definition: spaces.h:748
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:754
bool Contains(Address a)
Definition: spaces-inl.h:150
intptr_t CommittedMemory()
Definition: spaces.h:1692
size_t CommittedPhysicalMemory()
Definition: spaces.cc:920
intptr_t MaximumCommittedMemory()
Definition: spaces.h:1695
intptr_t Available()
Definition: spaces.h:1733
virtual intptr_t SizeOfObjects()
Definition: spaces.cc:2502
MUST_USE_RESULT AllocationResult AllocateRaw(int size_in_bytes)
Definition: spaces-inl.h:248
bool IsBelowPromotionQueue(Address to_space_top)
Definition: heap.h:402
void remove(HeapObject **target, int *size)
Definition: heap.h:421
static const int kEntrySizeInWords
Definition: heap.h:449
List< Entry > * emergency_stack_
Definition: heap.h:457
void insert(HeapObject *target, int size)
Definition: heap-inl.h:24
void SetNewLimit(Address limit)
Definition: heap.h:392
void set_type(HeapType *value, WriteBarrierMode mode=UPDATE_WRITE_BARRIER)
Definition: objects.cc:16342
static const int kSize
Definition: objects.h:9496
static const int kStringOffset
Definition: heap.h:2384
static const int kPatternOffset
Definition: heap.h:2385
static const int kArrayEntriesPerCacheEntry
Definition: heap.h:2383
static Object * Lookup(Heap *heap, String *key_string, Object *key_pattern, ResultsCacheType type)
Definition: heap.cc:2946
static void Enter(Isolate *isolate, Handle< String > key_string, Handle< Object > key_pattern, Handle< FixedArray > value_array, ResultsCacheType type)
Definition: heap.cc:2977
static const int kRegExpResultsCacheSize
Definition: heap.h:2380
static const int kArrayOffset
Definition: heap.h:2386
static void Clear(FixedArray *cache)
Definition: heap.cc:3031
static void InitializeIntrinsicFunctionNames(Isolate *isolate, Handle< NameDictionary > dict)
Definition: runtime.cc:9273
static void FreeArrayBuffer(Isolate *isolate, JSArrayBuffer *phantom_array_buffer)
void VisitPointers(Object **start, Object **end)
Definition: heap.cc:1238
void VisitPointer(Object **p)
Definition: heap.cc:1236
ScavengeVisitor(Heap *heap)
Definition: heap.cc:1234
void ScavengePointer(Object **p)
Definition: heap.cc:1244
virtual Object * RetainAs(Object *object)
Definition: heap.cc:1403
static void Visit(Map *map, HeapObject **slot, HeapObject *object)
Definition: heap.cc:2197
static void VisitSpecialized(Map *map, HeapObject **slot, HeapObject *object)
Definition: heap.cc:2191
static VisitorDispatchTable< ScavengingCallback > * GetTable()
Definition: heap.cc:1894
static void EvacuateObject(Map *map, HeapObject **slot, HeapObject *object, int object_size)
Definition: heap.cc:2041
static void EvacuateFixedFloat64Array(Map *map, HeapObject **slot, HeapObject *object)
Definition: heap.cc:2116
static void EvacuateSeqOneByteString(Map *map, HeapObject **slot, HeapObject *object)
Definition: heap.cc:2132
static void RecordCopiedObject(Heap *heap, HeapObject *obj)
Definition: heap.cc:1901
static VisitorDispatchTable< ScavengingCallback > table_
Definition: heap.cc:2204
INLINE(static void MigrateObject(Heap *heap, HeapObject *source, HeapObject *target, int size))
Definition: heap.cc:1919
static bool PromoteObject(Map *map, HeapObject **slot, HeapObject *object, int object_size)
Definition: heap.cc:1994
static void EvacuateSeqTwoByteString(Map *map, HeapObject **slot, HeapObject *object)
Definition: heap.cc:2141
static void EvacuateByteArray(Map *map, HeapObject **slot, HeapObject *object)
Definition: heap.cc:2124
static void EvacuateShortcutCandidate(Map *map, HeapObject **slot, HeapObject *object)
Definition: heap.cc:2150
static bool SemiSpaceCopyObject(Map *map, HeapObject **slot, HeapObject *object, int object_size)
Definition: heap.cc:1954
static void EvacuateFixedDoubleArray(Map *map, HeapObject **slot, HeapObject *object)
Definition: heap.cc:2099
static void EvacuateFixedArray(Map *map, HeapObject **slot, HeapObject *object)
Definition: heap.cc:2091
static void EvacuateJSFunction(Map *map, HeapObject **slot, HeapObject *object)
Definition: heap.cc:2067
static void EvacuateFixedTypedArray(Map *map, HeapObject **slot, HeapObject *object)
Definition: heap.cc:2108
static void AssertValidRange(Address from, Address to)
Definition: spaces.h:2165
static int SizeFor(int length)
Definition: objects.h:8976
static const int kMaxSize
Definition: objects.h:8981
static int SizeFor(int length)
Definition: objects.h:9015
static const int kMaxSize
Definition: objects.h:9020
static void Iterate(Isolate *isolate, ObjectVisitor *visitor)
Definition: serialize.cc:1293
static const int kNumberOfPreallocatedSpaces
Definition: serialize.h:152
static const int kAlignedSize
Definition: objects.h:6979
static const int kSize
Definition: objects.h:9106
static Smi * FromInt(int value)
Definition: objects-inl.h:1321
static bool HaveASnapshotToStartFrom()
ObjectIterator * next()
Definition: heap.cc:5485
ObjectIterator * CreateIterator()
Definition: heap.cc:5502
ObjectIterator * iterator_
Definition: heap.h:2186
HeapObjectCallback size_func_
Definition: heap.h:2187
SpaceIterator(Heap *heap)
Definition: heap.cc:5459
AllocationSpace identity()
Definition: spaces.h:829
static VisitorId GetVisitorId(int instance_type, int instance_size)
void Callback(MemoryChunk *page, StoreBufferEvent event)
Definition: heap.cc:1315
void EnterDirectlyIntoStoreBuffer(Address addr)
void IteratePointersToNewSpace(ObjectSlotCallback callback)
void EnsureSpace(intptr_t space_needed)
static const int kStoreBufferSize
Definition: store-buffer.h:68
void SetTop(Object ***top)
Definition: store-buffer.h:82
static MUST_USE_RESULT MaybeHandle< String > InternalizeStringIfExists(Isolate *isolate, Handle< String > string)
Definition: objects.cc:14663
static void WriteToFlat(String *source, sinkchar *sink, int from, int to)
Definition: objects.cc:8370
static const int kEmptyStringHash
Definition: objects.h:8817
static const int32_t kMaxOneByteCharCode
Definition: objects.h:8811
static const int kMaxLength
Definition: objects.h:8820
void set_length(int value)
void InitializeBody(int object_size)
Definition: objects-inl.h:2108
static const int kSize
Definition: objects.h:8569
void Iterate(ObjectVisitor *v)
Definition: v8threads.cc:329
void VisitPointers(Object **start, Object **end)
Definition: heap.cc:5567
bool SkipObject(HeapObject *object)
Definition: heap.cc:5557
DisallowHeapAllocation no_allocation_
Definition: heap.cc:5597
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
T * start() const
Definition: vector.h:47
int length() const
Definition: vector.h:41
Callback GetVisitorById(StaticVisitorBase::VisitorId id)
void Register(StaticVisitorBase::VisitorId id, Callback callback)
void CopyFrom(VisitorDispatchTable *other)
static MUST_USE_RESULT Handle< WeakHashTable > Put(Handle< WeakHashTable > table, Handle< Object > key, Handle< Object > value)
Definition: objects.cc:15376
#define PROFILE(IsolateGetter, Call)
Definition: cpu-profiler.h:181
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes A file to write the raw context snapshot bytes Write V8 startup blob Print the time it takes to lazily compile hydrogen code stubs dump only objects containing this substring stress the GC compactor to flush out pretty print source code for builtins print C code to recreate TurboFan graphs report heap spill statistics along with enable possessive quantifier syntax for testing Minimal Log code events to the log file without profiling log positions Log statistical profiling Used with turns on browser compatible mode for profiling Enable perf linux profiler(experimental annotate support).") DEFINE_STRING(gc_fake_mmap
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define V8_INFINITY
Definition: globals.h:25
#define OBJECT_POINTER_ALIGN(value)
Definition: globals.h:578
#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name)
#define STRING_TYPE_ELEMENT(type, size, name, camel_name)
#define MAKE_CASE(NAME, Name, name)
#define ADJUST_LAST_TIME_OBJECT_COUNT(name)
#define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size)
#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)
#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size)
#define STRUCT_TABLE_ELEMENT(NAME, Name, name)
#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)
#define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size)
#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size)
#define ALLOCATE_MAP(instance_type, size, field_name)
#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)
#define CONSTANT_STRING_ELEMENT(name, contents)
#define UPDATE_COUNTERS_FOR_SPACE(space)
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size)
#define ALLOCATE_VARSIZE_MAP(instance_type, field_name)
#define INTERNALIZED_STRING_LIST(V)
Definition: heap.h:262
#define LOG(isolate, Call)
Definition: log.h:69
#define UNREACHABLE()
Definition: logging.h:30
#define CHECK_EQ(expected, value)
Definition: logging.h:169
#define DCHECK_LE(v1, v2)
Definition: logging.h:210
#define CHECK(condition)
Definition: logging.h:36
#define DCHECK_GE(v1, v2)
Definition: logging.h:208
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
void USE(T)
Definition: macros.h:322
intptr_t OffsetFrom(T x)
Definition: macros.h:383
#define V8_PTR_PREFIX
Definition: macros.h:360
#define arraysize(array)
Definition: macros.h:86
#define LAZY_MUTEX_INITIALIZER
Definition: mutex.h:107
unsigned short uint16_t
Definition: unicode.cc:23
uint32_t RoundUpToPowerOfTwo32(uint32_t value)
Definition: bits.cc:12
void CallOnce(OnceType *once, NoArgFunction init_func)
Definition: once.h:82
const int kPointerSize
Definition: globals.h:129
void PrintPID(const char *format,...)
Definition: utils.cc:96
const int KB
Definition: globals.h:106
@ NOT_EXECUTABLE
Definition: globals.h:391
@ SKIP_WRITE_BARRIER
Definition: objects.h:235
@ UPDATE_WRITE_BARRIER
Definition: objects.h:235
template Object * VisitWeakList< JSArrayBuffer >(Heap *heap, Object *list, WeakObjectRetainer *retainer)
static HeapObject * EnsureDoubleAligned(Heap *heap, HeapObject *object, int size)
Definition: heap.cc:1799
@ IGNORE_MARKS
Definition: heap.cc:1818
@ TRANSFER_MARKS
Definition: heap.cc:1818
static LifetimePosition Min(LifetimePosition a, LifetimePosition b)
@ VISIT_ONLY_STRONG
Definition: globals.h:397
@ VISIT_ALL_IN_SWEEP_NEWSPACE
Definition: globals.h:396
@ VISIT_ALL_IN_SCAVENGE
Definition: globals.h:395
const intptr_t kCodeAlignment
Definition: globals.h:240
@ USE_DEFAULT_MINIMUM_CAPACITY
Definition: globals.h:385
const int kDoubleSize
Definition: globals.h:127
void(* ObjectSlotCallback)(HeapObject **from, HeapObject *to)
Definition: store-buffer.h:20
static base::LazyMutex checkpoint_object_stats_mutex
Definition: heap.cc:6097
void MemsetPointer(T **dest, U *value, int counter)
Definition: utils.h:1183
static void InitializeScavengingVisitorsTables()
Definition: heap.cc:2214
void MemMove(void *dest, const void *src, size_t size)
Definition: utils.h:353
kSerializedDataOffset Object
Definition: objects-inl.h:5322
const int kMaxInt
Definition: globals.h:109
@ FIXED_DOUBLE_ARRAY_TYPE
Definition: objects.h:692
@ FIXED_ARRAY_TYPE
Definition: objects.h:717
@ JS_OBJECT_TYPE
Definition: objects.h:731
@ PROPERTY_CELL_TYPE
Definition: objects.h:665
@ FREE_SPACE_TYPE
Definition: objects.h:673
@ BYTE_ARRAY_TYPE
Definition: objects.h:672
@ ODDBALL_TYPE
Definition: objects.h:663
@ MUTABLE_HEAP_NUMBER_TYPE
Definition: objects.h:670
@ ONE_BYTE_STRING_TYPE
Definition: objects.h:633
@ HEAP_NUMBER_TYPE
Definition: objects.h:669
@ JS_MESSAGE_OBJECT_TYPE
Definition: objects.h:729
@ JS_FUNCTION_TYPE
Definition: objects.h:749
@ SHARED_FUNCTION_INFO_TYPE
Definition: objects.h:719
@ JS_GLOBAL_OBJECT_TYPE
Definition: objects.h:735
@ JS_BUILTINS_OBJECT_TYPE
Definition: objects.h:736
@ CONSTANT_POOL_ARRAY_TYPE
Definition: objects.h:718
@ POLYMORPHIC_CODE_CACHE_TYPE
Definition: objects.h:711
@ FOREIGN_TYPE
Definition: objects.h:671
@ TERMINAL_FAST_ELEMENTS_KIND
Definition: elements-kind.h:63
static void WriteTwoByteData(Vector< const char > vector, uint16_t *chars, int len)
Definition: heap.cc:3778
@ kStoreBufferScanningPageEvent
Definition: globals.h:496
@ kStoreBufferStartScanningPagesEvent
Definition: globals.h:495
@ kStoreBufferFullEvent
Definition: globals.h:494
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:146
const int kVariableSizeSentinel
Definition: objects.h:309
V8_DECLARE_ONCE(initialize_gc_once)
String *(* ExternalStringTableUpdaterCallback)(Heap *heap, Object **pointer)
Definition: heap.h:346
const intptr_t kObjectAlignment
Definition: globals.h:226
const Address kFromSpaceZapValue
Definition: globals.h:272
static bool IsShortcutCandidate(int type)
Definition: objects.h:605
static const int kInvalidEnumCacheSentinel
const uint32_t kFreeListZapValue
Definition: globals.h:275
static LifetimePosition Max(LifetimePosition a, LifetimePosition b)
INLINE(static HeapObject *EnsureDoubleAligned(Heap *heap, HeapObject *object, int size))
byte * Address
Definition: globals.h:101
void PrintF(const char *format,...)
Definition: utils.cc:80
static bool IsUnscavengedHeapObject(Heap *heap, Object **p)
Definition: heap.cc:1303
@ FIRST_PAGED_SPACE
Definition: globals.h:371
@ INVALID_SPACE
Definition: globals.h:367
@ LAST_PAGED_SPACE
Definition: globals.h:372
@ OLD_DATA_SPACE
Definition: globals.h:361
@ PROPERTY_CELL_SPACE
Definition: globals.h:365
@ OLD_POINTER_SPACE
Definition: globals.h:360
LoggingAndProfiling
Definition: heap.cc:1812
@ LOGGING_AND_PROFILING_ENABLED
Definition: heap.cc:1813
@ LOGGING_AND_PROFILING_DISABLED
Definition: heap.cc:1814
int(* HeapObjectCallback)(HeapObject *obj)
Definition: globals.h:429
static void ForFixedTypedArray(ExternalArrayType array_type, int *element_size, ElementsKind *element_kind)
Definition: heap.cc:3380
kFeedbackVectorOffset flag
Definition: objects-inl.h:5418
Vector< const uint8_t > OneByteVector(const char *data, int length)
Definition: vector.h:162
static void RoundUp(Vector< char > buffer, int *length, int *decimal_point)
Definition: fixed-dtoa.cc:171
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const intptr_t kSmiTagMask
Definition: v8.h:5744
template Object * VisitWeakList< Context >(Heap *heap, Object *list, WeakObjectRetainer *retainer)
const int MB
Definition: globals.h:107
const int kSmiTag
Definition: v8.h:5742
static void InitializeGCOnce()
Definition: heap.cc:5052
@ MARK_COMPACTOR
Definition: globals.h:389
bool IsAligned(T value, U alignment)
Definition: utils.h:123
template Object * VisitWeakList< AllocationSite >(Heap *heap, Object *list, WeakObjectRetainer *retainer)
void MemCopy(void *dest, const void *src, size_t size)
Definition: utils.h:350
static void WriteOneByteData(Vector< const char > vector, uint8_t *chars, int len)
Definition: heap.cc:3771
const intptr_t kDoubleAlignment
Definition: globals.h:234
void CopyBytes(uint8_t *target, uint8_t *source)
const intptr_t kDoubleAlignmentMask
Definition: globals.h:235
static bool AbortIncrementalMarkingAndCollectGarbage(Heap *heap, AllocationSpace space, const char *gc_reason=NULL)
Definition: heap.cc:911
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
@ None
Definition: v8.h:2211
void(* GCPrologueCallback)(GCType type, GCCallbackFlags flags)
Definition: v8.h:4216
GCCallbackFlags
Definition: v8.h:4209
@ kNoGCCallbackFlags
Definition: v8.h:4210
ExternalArrayType
Definition: v8.h:2217
@ kExternalFloat64Array
Definition: v8.h:2225
GCType
Applications can register callback functions which will be called before and after a garbage collecti...
Definition: v8.h:4203
@ kGCTypeScavenge
Definition: v8.h:4204
@ kGCTypeMarkSweepCompact
Definition: v8.h:4205
#define INSTANCE_TYPE_LIST(V)
Definition: objects.h:339
#define STRUCT_LIST(V)
Definition: objects.h:515
#define STRING_TYPE_LIST(V)
Definition: objects.h:457
#define CODE_KIND_LIST(V)
Definition: objects.h:4950
#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V)
Definition: objects.h:800
#define TYPED_ARRAYS(V)
Definition: objects.h:4433
#define T(name, string, precedence)
Definition: token.cc:25