V8 Project
spaces.cc
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #include "src/base/bits.h"
9 #include "src/full-codegen.h"
10 #include "src/heap/mark-compact.h"
11 #include "src/macro-assembler.h"
12 #include "src/msan.h"
13 
14 namespace v8 {
15 namespace internal {
16 
17 
18 // ----------------------------------------------------------------------------
19 // HeapObjectIterator
20 
22  // You can't actually iterate over the anchor page. It is not a real page,
23  // just an anchor for the double linked page list. Initialize as if we have
24  // reached the end of the anchor page, then the first iteration will move on
25  // to the first page.
27 }
28 
29 
31  HeapObjectCallback size_func) {
32  // You can't actually iterate over the anchor page. It is not a real page,
33  // just an anchor for the double linked page list. Initialize the current
34  // address and end as NULL, then the first iteration will move on
35  // to the first page.
36  Initialize(space, NULL, NULL, kAllPagesInSpace, size_func);
37 }
38 
39 
41  HeapObjectCallback size_func) {
42  Space* owner = page->owner();
43  DCHECK(owner == page->heap()->old_pointer_space() ||
44  owner == page->heap()->old_data_space() ||
45  owner == page->heap()->map_space() ||
46  owner == page->heap()->cell_space() ||
47  owner == page->heap()->property_cell_space() ||
48  owner == page->heap()->code_space());
49  Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
50  page->area_end(), kOnePageOnly, size_func);
51  DCHECK(page->WasSwept() || page->SweepingCompleted());
52 }
53 
54 
57  HeapObjectCallback size_f) {
58  space_ = space;
59  cur_addr_ = cur;
60  cur_end_ = end;
61  page_mode_ = mode;
62  size_func_ = size_f;
63 }
64 
65 
66 // We have hit the end of the page and should advance to the next block of
67 // objects. This happens at the end of the page.
70  if (page_mode_ == kOnePageOnly) return false;
71  Page* cur_page;
72  if (cur_addr_ == NULL) {
73  cur_page = space_->anchor();
74  } else {
75  cur_page = Page::FromAddress(cur_addr_ - 1);
76  DCHECK(cur_addr_ == cur_page->area_end());
77  }
78  cur_page = cur_page->next_page();
79  if (cur_page == space_->anchor()) return false;
80  cur_addr_ = cur_page->area_start();
81  cur_end_ = cur_page->area_end();
82  DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
83  return true;
84 }
85 
86 
87 // -----------------------------------------------------------------------------
88 // CodeRange
89 
90 
92  : isolate_(isolate),
93  code_range_(NULL),
94  free_list_(0),
95  allocation_list_(0),
96  current_allocation_block_index_(0) {}
97 
98 
99 bool CodeRange::SetUp(size_t requested) {
100  DCHECK(code_range_ == NULL);
101 
102  if (requested == 0) {
103  // When a target requires the code range feature, we put all code objects
104  // in a kMaximalCodeRangeSize range of virtual address space, so that
105  // they can call each other with near calls.
106  if (kRequiresCodeRange) {
107  requested = kMaximalCodeRangeSize;
108  } else {
109  return true;
110  }
111  }
112 
114  code_range_ = new base::VirtualMemory(requested);
115  CHECK(code_range_ != NULL);
116  if (!code_range_->IsReserved()) {
117  delete code_range_;
118  code_range_ = NULL;
119  return false;
120  }
121 
122  // We are sure that we have mapped a block of requested addresses.
123  DCHECK(code_range_->size() == requested);
124  LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
125  Address base = reinterpret_cast<Address>(code_range_->address());
126  Address aligned_base =
127  RoundUp(reinterpret_cast<Address>(code_range_->address()),
129  size_t size = code_range_->size() - (aligned_base - base);
130  allocation_list_.Add(FreeBlock(aligned_base, size));
132  return true;
133 }
134 
135 
137  const FreeBlock* right) {
138  // The entire point of CodeRange is that the difference between two
139  // addresses in the range can be represented as a signed 32-bit int,
140  // so the cast is semantically correct.
141  return static_cast<int>(left->start - right->start);
142 }
143 
144 
145 bool CodeRange::GetNextAllocationBlock(size_t requested) {
150  return true; // Found a large enough allocation block.
151  }
152  }
153 
154  // Sort and merge the free blocks on the free list and the allocation list.
156  allocation_list_.Clear();
158  for (int i = 0; i < free_list_.length();) {
159  FreeBlock merged = free_list_[i];
160  i++;
161  // Add adjacent free blocks to the current merged block.
162  while (i < free_list_.length() &&
163  free_list_[i].start == merged.start + merged.size) {
164  merged.size += free_list_[i].size;
165  i++;
166  }
167  if (merged.size > 0) {
168  allocation_list_.Add(merged);
169  }
170  }
171  free_list_.Clear();
172 
177  return true; // Found a large enough allocation block.
178  }
179  }
181  // Code range is full or too fragmented.
182  return false;
183 }
184 
185 
186 Address CodeRange::AllocateRawMemory(const size_t requested_size,
187  const size_t commit_size,
188  size_t* allocated) {
189  DCHECK(commit_size <= requested_size);
190  DCHECK(allocation_list_.length() == 0 ||
192  if (allocation_list_.length() == 0 ||
193  requested_size > allocation_list_[current_allocation_block_index_].size) {
194  // Find an allocation block large enough.
195  if (!GetNextAllocationBlock(requested_size)) return NULL;
196  }
197  // Commit the requested memory at the start of the current allocation block.
198  size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
200  if (aligned_requested >= (current.size - Page::kPageSize)) {
201  // Don't leave a small free block, useless for a large object or chunk.
202  *allocated = current.size;
203  } else {
204  *allocated = aligned_requested;
205  }
206  DCHECK(*allocated <= current.size);
209  code_range_, current.start, commit_size, *allocated)) {
210  *allocated = 0;
211  return NULL;
212  }
215  if (*allocated == current.size) {
216  // This block is used up, get the next one.
218  }
219  return current.start;
220 }
221 
222 
223 bool CodeRange::CommitRawMemory(Address start, size_t length) {
225 }
226 
227 
228 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
229  return code_range_->Uncommit(start, length);
230 }
231 
232 
233 void CodeRange::FreeRawMemory(Address address, size_t length) {
235  free_list_.Add(FreeBlock(address, length));
236  code_range_->Uncommit(address, length);
237 }
238 
239 
241  delete code_range_; // Frees all memory in the virtual memory range.
242  code_range_ = NULL;
243  free_list_.Free();
244  allocation_list_.Free();
245 }
246 
247 
248 // -----------------------------------------------------------------------------
249 // MemoryAllocator
250 //
251 
253  : isolate_(isolate),
254  capacity_(0),
255  capacity_executable_(0),
256  size_(0),
257  size_executable_(0),
258  lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
259  highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
260 
261 
262 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
263  capacity_ = RoundUp(capacity, Page::kPageSize);
264  capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
266 
267  size_ = 0;
268  size_executable_ = 0;
269 
270  return true;
271 }
272 
273 
275  // Check that spaces were torn down before MemoryAllocator.
276  DCHECK(size_ == 0);
277  // TODO(gc) this will be true again when we fix FreeMemory.
278  // DCHECK(size_executable_ == 0);
279  capacity_ = 0;
281 }
282 
283 
285  Executability executable) {
287  executable == EXECUTABLE)) {
288  return false;
289  }
290  UpdateAllocatedSpaceLimits(base, base + size);
291  return true;
292 }
293 
294 
296  Executability executable) {
297  // TODO(gc) make code_range part of memory allocator?
298  DCHECK(reservation->IsReserved());
299  size_t size = reservation->size();
300  DCHECK(size_ >= size);
301  size_ -= size;
302 
303  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
304 
305  if (executable == EXECUTABLE) {
308  }
309  // Code which is part of the code-range does not have its own VirtualMemory.
310  DCHECK(isolate_->code_range() == NULL ||
312  static_cast<Address>(reservation->address())));
313  DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
314  !isolate_->code_range()->valid());
315  reservation->Release();
316 }
317 
318 
320  Executability executable) {
321  // TODO(gc) make code_range part of memory allocator?
322  DCHECK(size_ >= size);
323  size_ -= size;
324 
325  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
326 
327  if (executable == EXECUTABLE) {
330  }
331  if (isolate_->code_range() != NULL &&
332  isolate_->code_range()->contains(static_cast<Address>(base))) {
333  DCHECK(executable == EXECUTABLE);
335  } else {
336  DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
337  !isolate_->code_range()->valid());
338  bool result = base::VirtualMemory::ReleaseRegion(base, size);
339  USE(result);
340  DCHECK(result);
341  }
342 }
343 
344 
346  base::VirtualMemory* controller) {
347  base::VirtualMemory reservation(size, alignment);
348 
349  if (!reservation.IsReserved()) return NULL;
350  size_ += reservation.size();
351  Address base =
352  RoundUp(static_cast<Address>(reservation.address()), alignment);
353  controller->TakeControl(&reservation);
354  return base;
355 }
356 
357 
359  size_t reserve_size, size_t commit_size, size_t alignment,
360  Executability executable, base::VirtualMemory* controller) {
361  DCHECK(commit_size <= reserve_size);
362  base::VirtualMemory reservation;
363  Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
364  if (base == NULL) return NULL;
365 
366  if (executable == EXECUTABLE) {
367  if (!CommitExecutableMemory(&reservation, base, commit_size,
368  reserve_size)) {
369  base = NULL;
370  }
371  } else {
372  if (reservation.Commit(base, commit_size, false)) {
373  UpdateAllocatedSpaceLimits(base, base + commit_size);
374  } else {
375  base = NULL;
376  }
377  }
378 
379  if (base == NULL) {
380  // Failed to commit the body. Release the mapping and any partially
381  // commited regions inside it.
382  reservation.Release();
383  return NULL;
384  }
385 
386  controller->TakeControl(&reservation);
387  return base;
388 }
389 
390 
392  set_owner(owner);
393  set_prev_page(this);
394  set_next_page(this);
395 }
396 
397 
399  SemiSpace* semi_space) {
401  Address area_end = start + Page::kPageSize;
402 
403  MemoryChunk* chunk =
406  chunk->set_next_chunk(NULL);
407  chunk->set_prev_chunk(NULL);
408  chunk->initialize_scan_on_scavenge(true);
409  bool in_to_space = (semi_space->id() != kFromSpace);
410  chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
412  DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
414  NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
416  return page;
417 }
418 
419 
422  set_next_chunk(this);
423  set_prev_chunk(this);
424  // Flags marks this invalid page as not being in new-space.
425  // All real new-space pages will be in new-space.
426  SetFlags(0, ~0);
427 }
428 
429 
431  Address area_start, Address area_end,
432  Executability executable, Space* owner) {
433  MemoryChunk* chunk = FromAddress(base);
434 
435  DCHECK(base == chunk->address());
436 
437  chunk->heap_ = heap;
438  chunk->size_ = size;
439  chunk->area_start_ = area_start;
440  chunk->area_end_ = area_end;
441  chunk->flags_ = 0;
442  chunk->set_owner(owner);
443  chunk->InitializeReservedMemory();
444  chunk->slots_buffer_ = NULL;
445  chunk->skip_list_ = NULL;
447  chunk->progress_bar_ = 0;
448  chunk->high_water_mark_ = static_cast<int>(area_start - base);
453  chunk->available_in_huge_free_list_ = 0;
454  chunk->non_available_small_blocks_ = 0;
455  chunk->ResetLiveBytes();
456  Bitmap::Clear(chunk);
457  chunk->initialize_scan_on_scavenge(false);
458  chunk->SetFlag(WAS_SWEPT);
459 
462 
463  if (executable == EXECUTABLE) {
464  chunk->SetFlag(IS_EXECUTABLE);
465  }
466 
467  if (owner == heap->old_data_space()) {
468  chunk->SetFlag(CONTAINS_ONLY_DATA);
469  }
470 
471  return chunk;
472 }
473 
474 
475 // Commit MemoryChunk area to the requested size.
476 bool MemoryChunk::CommitArea(size_t requested) {
477  size_t guard_size =
479  size_t header_size = area_start() - address() - guard_size;
480  size_t commit_size =
481  RoundUp(header_size + requested, base::OS::CommitPageSize());
482  size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
484 
485  if (commit_size > committed_size) {
486  // Commit size should be less or equal than the reserved size.
487  DCHECK(commit_size <= size() - 2 * guard_size);
488  // Append the committed area.
489  Address start = address() + committed_size + guard_size;
490  size_t length = commit_size - committed_size;
491  if (reservation_.IsReserved()) {
494  if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
495  executable)) {
496  return false;
497  }
498  } else {
499  CodeRange* code_range = heap_->isolate()->code_range();
500  DCHECK(code_range != NULL && code_range->valid() &&
502  if (!code_range->CommitRawMemory(start, length)) return false;
503  }
504 
505  if (Heap::ShouldZapGarbage()) {
506  heap_->isolate()->memory_allocator()->ZapBlock(start, length);
507  }
508  } else if (commit_size < committed_size) {
509  DCHECK(commit_size > 0);
510  // Shrink the committed area.
511  size_t length = committed_size - commit_size;
512  Address start = address() + committed_size + guard_size - length;
513  if (reservation_.IsReserved()) {
514  if (!reservation_.Uncommit(start, length)) return false;
515  } else {
516  CodeRange* code_range = heap_->isolate()->code_range();
517  DCHECK(code_range != NULL && code_range->valid() &&
519  if (!code_range->UncommitRawMemory(start, length)) return false;
520  }
521  }
522 
523  area_end_ = area_start_ + requested;
524  return true;
525 }
526 
527 
529  MemoryChunk* other_next = other->next_chunk();
530 
531  set_next_chunk(other_next);
532  set_prev_chunk(other);
533  other_next->set_prev_chunk(this);
534  other->set_next_chunk(this);
535 }
536 
537 
539  MemoryChunk* next_element = next_chunk();
540  MemoryChunk* prev_element = prev_chunk();
541  next_element->set_prev_chunk(prev_element);
542  prev_element->set_next_chunk(next_element);
545 }
546 
547 
548 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
549  intptr_t commit_area_size,
550  Executability executable,
551  Space* owner) {
552  DCHECK(commit_area_size <= reserve_area_size);
553 
554  size_t chunk_size;
555  Heap* heap = isolate_->heap();
556  Address base = NULL;
557  base::VirtualMemory reservation;
558  Address area_start = NULL;
559  Address area_end = NULL;
560 
561  //
562  // MemoryChunk layout:
563  //
564  // Executable
565  // +----------------------------+<- base aligned with MemoryChunk::kAlignment
566  // | Header |
567  // +----------------------------+<- base + CodePageGuardStartOffset
568  // | Guard |
569  // +----------------------------+<- area_start_
570  // | Area |
571  // +----------------------------+<- area_end_ (area_start + commit_area_size)
572  // | Committed but not used |
573  // +----------------------------+<- aligned at OS page boundary
574  // | Reserved but not committed |
575  // +----------------------------+<- aligned at OS page boundary
576  // | Guard |
577  // +----------------------------+<- base + chunk_size
578  //
579  // Non-executable
580  // +----------------------------+<- base aligned with MemoryChunk::kAlignment
581  // | Header |
582  // +----------------------------+<- area_start_ (base + kObjectStartOffset)
583  // | Area |
584  // +----------------------------+<- area_end_ (area_start + commit_area_size)
585  // | Committed but not used |
586  // +----------------------------+<- aligned at OS page boundary
587  // | Reserved but not committed |
588  // +----------------------------+<- base + chunk_size
589  //
590 
591  if (executable == EXECUTABLE) {
592  chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
595 
596  // Check executable memory limit.
597  if (size_executable_ + chunk_size > capacity_executable_) {
598  LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
599  "V8 Executable Allocation capacity exceeded"));
600  return NULL;
601  }
602 
603  // Size of header (not executable) plus area (executable).
604  size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
606  // Allocate executable memory either from code range or from the
607  // OS.
608  if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
609  base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
610  &chunk_size);
611  DCHECK(
612  IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
613  if (base == NULL) return NULL;
614  size_ += chunk_size;
615  // Update executable memory size.
616  size_executable_ += chunk_size;
617  } else {
618  base = AllocateAlignedMemory(chunk_size, commit_size,
619  MemoryChunk::kAlignment, executable,
620  &reservation);
621  if (base == NULL) return NULL;
622  // Update executable memory size.
623  size_executable_ += reservation.size();
624  }
625 
626  if (Heap::ShouldZapGarbage()) {
628  ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
629  }
630 
631  area_start = base + CodePageAreaStartOffset();
632  area_end = area_start + commit_area_size;
633  } else {
634  chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
636  size_t commit_size =
637  RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
639  base =
640  AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
641  executable, &reservation);
642 
643  if (base == NULL) return NULL;
644 
645  if (Heap::ShouldZapGarbage()) {
646  ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
647  }
648 
649  area_start = base + Page::kObjectStartOffset;
650  area_end = area_start + commit_area_size;
651  }
652 
653  // Use chunk_size for statistics and callbacks because we assume that they
654  // treat reserved but not-yet committed memory regions of chunks as allocated.
655  isolate_->counters()->memory_allocated()->Increment(
656  static_cast<int>(chunk_size));
657 
658  LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
659  if (owner != NULL) {
660  ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
662  }
663 
665  heap, base, chunk_size, area_start, area_end, executable, owner);
666  result->set_reserved_memory(&reservation);
667  return result;
668 }
669 
670 
677 }
678 
679 
681  Executability executable) {
682  MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
683 
684  if (chunk == NULL) return NULL;
685 
686  return Page::Initialize(isolate_->heap(), chunk, executable, owner);
687 }
688 
689 
691  Space* owner,
692  Executability executable) {
693  MemoryChunk* chunk =
694  AllocateChunk(object_size, object_size, executable, owner);
695  if (chunk == NULL) return NULL;
696  return LargePage::Initialize(isolate_->heap(), chunk);
697 }
698 
699 
701  LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
702  if (chunk->owner() != NULL) {
704  static_cast<ObjectSpace>(1 << chunk->owner()->identity());
706  }
707 
708  isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
709  chunk->IsEvacuationCandidate());
710 
711  delete chunk->slots_buffer();
712  delete chunk->skip_list();
713 
714  base::VirtualMemory* reservation = chunk->reserved_memory();
715  if (reservation->IsReserved()) {
716  FreeMemory(reservation, chunk->executable());
717  } else {
718  FreeMemory(chunk->address(), chunk->size(), chunk->executable());
719  }
720 }
721 
722 
724  Executability executable) {
725  if (!CommitMemory(start, size, executable)) return false;
726 
727  if (Heap::ShouldZapGarbage()) {
728  ZapBlock(start, size);
729  }
730 
731  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
732  return true;
733 }
734 
735 
737  if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
738  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
739  return true;
740 }
741 
742 
744  for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
745  Memory::Address_at(start + s) = kZapValue;
746  }
747 }
748 
749 
751  AllocationAction action,
752  size_t size) {
753  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
756  if ((registration.space & space) == space &&
757  (registration.action & action) == action)
758  registration.callback(space, action, static_cast<int>(size));
759  }
760 }
761 
762 
764  MemoryAllocationCallback callback) {
765  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
766  if (memory_allocation_callbacks_[i].callback == callback) return true;
767  }
768  return false;
769 }
770 
771 
774  AllocationAction action) {
775  DCHECK(callback != NULL);
776  MemoryAllocationCallbackRegistration registration(callback, space, action);
778  return memory_allocation_callbacks_.Add(registration);
779 }
780 
781 
783  MemoryAllocationCallback callback) {
784  DCHECK(callback != NULL);
785  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
786  if (memory_allocation_callbacks_[i].callback == callback) {
788  return;
789  }
790  }
791  UNREACHABLE();
792 }
793 
794 
795 #ifdef DEBUG
796 void MemoryAllocator::ReportStatistics() {
797  float pct = static_cast<float>(capacity_ - size_) / capacity_;
798  PrintF(" capacity: %" V8_PTR_PREFIX
799  "d"
800  ", used: %" V8_PTR_PREFIX
801  "d"
802  ", available: %%%d\n\n",
803  capacity_, size_, static_cast<int>(pct * 100));
804 }
805 #endif
806 
807 
809  // We are guarding code pages: the first OS page after the header
810  // will be protected as non-writable.
812 }
813 
814 
816  return static_cast<int>(base::OS::CommitPageSize());
817 }
818 
819 
821  // We are guarding code pages: the first OS page after the header
822  // will be protected as non-writable.
824 }
825 
826 
828  // We are guarding code pages: the last OS page will be protected as
829  // non-writable.
830  return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
831 }
832 
833 
835  Address start, size_t commit_size,
836  size_t reserved_size) {
837  // Commit page header (not executable).
838  if (!vm->Commit(start, CodePageGuardStartOffset(), false)) {
839  return false;
840  }
841 
842  // Create guard page after the header.
843  if (!vm->Guard(start + CodePageGuardStartOffset())) {
844  return false;
845  }
846 
847  // Commit page body (executable).
848  if (!vm->Commit(start + CodePageAreaStartOffset(),
849  commit_size - CodePageGuardStartOffset(), true)) {
850  return false;
851  }
852 
853  // Create guard page before the end.
854  if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
855  return false;
856  }
857 
859  commit_size -
861  return true;
862 }
863 
864 
865 // -----------------------------------------------------------------------------
866 // MemoryChunk implementation
867 
870  if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
871  static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
872  }
873  chunk->IncrementLiveBytes(by);
874 }
875 
876 
877 // -----------------------------------------------------------------------------
878 // PagedSpace implementation
879 
880 PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
881  Executability executable)
882  : Space(heap, id, executable),
883  free_list_(this),
884  unswept_free_bytes_(0),
885  end_of_unswept_pages_(NULL),
886  emergency_memory_(NULL) {
887  if (id == CODE_SPACE) {
889  } else {
891  }
892  max_capacity_ =
893  (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize();
894  accounting_stats_.Clear();
895 
896  allocation_info_.set_top(NULL);
897  allocation_info_.set_limit(NULL);
898 
900 }
901 
902 
903 bool PagedSpace::SetUp() { return true; }
904 
905 
906 bool PagedSpace::HasBeenSetUp() { return true; }
907 
908 
910  PageIterator iterator(this);
911  while (iterator.has_next()) {
912  heap()->isolate()->memory_allocator()->Free(iterator.next());
913  }
916  accounting_stats_.Clear();
917 }
918 
919 
923  size_t size = 0;
924  PageIterator it(this);
925  while (it.has_next()) {
926  size += it.next()->CommittedPhysicalMemory();
927  }
928  return size;
929 }
930 
931 
933  // Note: this function can only be called on iterable spaces.
934  DCHECK(!heap()->mark_compact_collector()->in_use());
935 
936  if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
937 
938  Page* p = Page::FromAddress(addr);
939  HeapObjectIterator it(p, NULL);
940  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
941  Address cur = obj->address();
942  Address next = cur + obj->Size();
943  if ((cur <= addr) && (addr < next)) return obj;
944  }
945 
946  UNREACHABLE();
947  return Smi::FromInt(0);
948 }
949 
950 
952  DCHECK(max_capacity_ % AreaSize() == 0);
953 
954  if (Capacity() == max_capacity_) return false;
955 
957 
958  // Are we going to exceed capacity for this space?
959  if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
960 
961  return true;
962 }
963 
964 
966  if (!CanExpand()) return false;
967 
968  intptr_t size = AreaSize();
969 
970  if (anchor_.next_page() == &anchor_) {
971  size = SizeOfFirstPage();
972  }
973 
974  Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
975  executable());
976  if (p == NULL) return false;
977 
979 
981 
982  return true;
983 }
984 
985 
987  // If using an ool constant pool then transfer the constant pool allowance
988  // from the code space to the old pointer space.
989  static const int constant_pool_delta = FLAG_enable_ool_constant_pool ? 48 : 0;
990  int size = 0;
991  switch (identity()) {
992  case OLD_POINTER_SPACE:
993  size = (112 + constant_pool_delta) * kPointerSize * KB;
994  break;
995  case OLD_DATA_SPACE:
996  size = 192 * KB;
997  break;
998  case MAP_SPACE:
999  size = 16 * kPointerSize * KB;
1000  break;
1001  case CELL_SPACE:
1002  size = 16 * kPointerSize * KB;
1003  break;
1004  case PROPERTY_CELL_SPACE:
1005  size = 8 * kPointerSize * KB;
1006  break;
1007  case CODE_SPACE: {
1008  CodeRange* code_range = heap()->isolate()->code_range();
1009  if (code_range != NULL && code_range->valid()) {
1010  // When code range exists, code pages are allocated in a special way
1011  // (from the reserved code range). That part of the code is not yet
1012  // upgraded to handle small pages.
1013  size = AreaSize();
1014  } else {
1015  size = RoundUp((480 - constant_pool_delta) * KB *
1016  FullCodeGenerator::kBootCodeSizeMultiplier / 100,
1017  kPointerSize);
1018  }
1019  break;
1020  }
1021  default:
1022  UNREACHABLE();
1023  }
1024  return Min(size, AreaSize());
1025 }
1026 
1027 
1029  PageIterator it(this);
1030  int count = 0;
1031  while (it.has_next()) {
1032  it.next();
1033  count++;
1034  }
1035  return count;
1036 }
1037 
1038 
1040  sizes->huge_size_ = page->available_in_huge_free_list();
1041  sizes->small_size_ = page->available_in_small_free_list();
1042  sizes->medium_size_ = page->available_in_medium_free_list();
1043  sizes->large_size_ = page->available_in_large_free_list();
1044 }
1045 
1046 
1048  PageIterator page_iterator(this);
1049  while (page_iterator.has_next()) {
1050  Page* page = page_iterator.next();
1051  page->ResetFreeListStatistics();
1052  }
1053 }
1054 
1055 
1057  accounting_stats_.ExpandSpace(size);
1058 }
1059 
1060 
1062  DCHECK(page->LiveBytes() == 0);
1063  DCHECK(AreaSize() == page->area_size());
1064 
1065  if (page->WasSwept()) {
1066  intptr_t size = free_list_.EvictFreeListItems(page);
1067  accounting_stats_.AllocateBytes(size);
1068  DCHECK_EQ(AreaSize(), static_cast<int>(size));
1069  } else {
1071  }
1072 
1076  }
1077 
1079 
1080  if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1081  allocation_info_.set_top(NULL);
1082  allocation_info_.set_limit(NULL);
1083  }
1084 
1085  page->Unlink();
1087  heap()->isolate()->memory_allocator()->Free(page);
1088  } else {
1089  heap()->QueueMemoryChunkForFree(page);
1090  }
1091 
1092  DCHECK(Capacity() > 0);
1093  accounting_stats_.ShrinkSpace(AreaSize());
1094 }
1095 
1096 
1099  AreaSize(), AreaSize(), executable(), this);
1100 }
1101 
1102 
1104  Page* page = static_cast<Page*>(emergency_memory_);
1105  DCHECK(page->LiveBytes() == 0);
1106  DCHECK(AreaSize() == page->area_size());
1108  heap()->isolate()->memory_allocator()->Free(page);
1110 }
1111 
1112 
1114  Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
1115  page->InsertAfter(anchor_.prev_page());
1117 }
1118 
1119 
1120 #ifdef DEBUG
1121 void PagedSpace::Print() {}
1122 #endif
1123 
1124 #ifdef VERIFY_HEAP
1125 void PagedSpace::Verify(ObjectVisitor* visitor) {
1126  bool allocation_pointer_found_in_space =
1127  (allocation_info_.top() == allocation_info_.limit());
1128  PageIterator page_iterator(this);
1129  while (page_iterator.has_next()) {
1130  Page* page = page_iterator.next();
1131  CHECK(page->owner() == this);
1132  if (page == Page::FromAllocationTop(allocation_info_.top())) {
1133  allocation_pointer_found_in_space = true;
1134  }
1135  CHECK(page->WasSwept());
1136  HeapObjectIterator it(page, NULL);
1137  Address end_of_previous_object = page->area_start();
1138  Address top = page->area_end();
1139  int black_size = 0;
1140  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1141  CHECK(end_of_previous_object <= object->address());
1142 
1143  // The first word should be a map, and we expect all map pointers to
1144  // be in map space.
1145  Map* map = object->map();
1146  CHECK(map->IsMap());
1147  CHECK(heap()->map_space()->Contains(map));
1148 
1149  // Perform space-specific object verification.
1150  VerifyObject(object);
1151 
1152  // The object itself should look OK.
1153  object->ObjectVerify();
1154 
1155  // All the interior pointers should be contained in the heap.
1156  int size = object->Size();
1157  object->IterateBody(map->instance_type(), size, visitor);
1158  if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1159  black_size += size;
1160  }
1161 
1162  CHECK(object->address() + size <= top);
1163  end_of_previous_object = object->address() + size;
1164  }
1165  CHECK_LE(black_size, page->LiveBytes());
1166  }
1167  CHECK(allocation_pointer_found_in_space);
1168 }
1169 #endif // VERIFY_HEAP
1170 
1171 // -----------------------------------------------------------------------------
1172 // NewSpace implementation
1173 
1174 
1175 bool NewSpace::SetUp(int reserved_semispace_capacity,
1176  int maximum_semispace_capacity) {
1177  // Set up new space based on the preallocated memory block defined by
1178  // start and size. The provided space is divided into two semi-spaces.
1179  // To support fast containment testing in the new space, the size of
1180  // this chunk must be a power of two and it must be aligned to its size.
1181  int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1182 
1183  size_t size = 2 * reserved_semispace_capacity;
1185  size, size, &reservation_);
1186  if (base == NULL) return false;
1187 
1188  chunk_base_ = base;
1189  chunk_size_ = static_cast<uintptr_t>(size);
1190  LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1191 
1192  DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1193  DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
1194 
1195  // Allocate and set up the histogram arrays if necessary.
1196  allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1197  promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1198 
1199 #define SET_NAME(name) \
1200  allocated_histogram_[name].set_name(#name); \
1201  promoted_histogram_[name].set_name(#name);
1203 #undef SET_NAME
1204 
1205  DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1206  DCHECK(static_cast<intptr_t>(chunk_size_) >=
1207  2 * heap()->ReservedSemiSpaceSize());
1208  DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1209 
1210  to_space_.SetUp(chunk_base_, initial_semispace_capacity,
1211  maximum_semispace_capacity);
1212  from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1213  initial_semispace_capacity, maximum_semispace_capacity);
1214  if (!to_space_.Commit()) {
1215  return false;
1216  }
1217  DCHECK(!from_space_.is_committed()); // No need to use memory yet.
1218 
1219  start_ = chunk_base_;
1220  address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1222  object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1223 
1225 
1226  return true;
1227 }
1228 
1229 
1231  if (allocated_histogram_) {
1234  }
1235  if (promoted_histogram_) {
1238  }
1239 
1240  start_ = NULL;
1241  allocation_info_.set_top(NULL);
1242  allocation_info_.set_limit(NULL);
1243 
1244  to_space_.TearDown();
1246 
1247  LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1248 
1251  NOT_EXECUTABLE);
1252  chunk_base_ = NULL;
1253  chunk_size_ = 0;
1254 }
1255 
1256 
1258 
1259 
1261  // Double the semispace size but only up to maximum capacity.
1263  int new_capacity =
1264  Min(MaximumCapacity(), 2 * static_cast<int>(TotalCapacity()));
1265  if (to_space_.GrowTo(new_capacity)) {
1266  // Only grow from space if we managed to grow to-space.
1267  if (!from_space_.GrowTo(new_capacity)) {
1268  // If we managed to grow to-space but couldn't grow from-space,
1269  // attempt to shrink to-space.
1271  // We are in an inconsistent state because we could not
1272  // commit/uncommit memory from new space.
1273  V8::FatalProcessOutOfMemory("Failed to grow new space.");
1274  }
1275  }
1276  }
1278 }
1279 
1280 
1282  int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
1283  int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1284  if (rounded_new_capacity < TotalCapacity() &&
1285  to_space_.ShrinkTo(rounded_new_capacity)) {
1286  // Only shrink from-space if we managed to shrink to-space.
1287  from_space_.Reset();
1288  if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1289  // If we managed to shrink to-space but couldn't shrink from
1290  // space, attempt to grow to-space again.
1292  // We are in an inconsistent state because we could not
1293  // commit/uncommit memory from new space.
1294  V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1295  }
1296  }
1297  }
1299 }
1300 
1301 
1304  allocation_info_.set_top(to_space_.page_low());
1305  allocation_info_.set_limit(to_space_.page_high());
1308 }
1309 
1310 
1312  to_space_.Reset();
1314  pages_used_ = 0;
1315  // Clear all mark-bits in the to-space.
1316  NewSpacePageIterator it(&to_space_);
1317  while (it.has_next()) {
1318  Bitmap::Clear(it.next());
1319  }
1320 }
1321 
1322 
1323 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1324  if (heap()->inline_allocation_disabled()) {
1325  // Lowest limit when linear allocation was disabled.
1326  Address high = to_space_.page_high();
1327  Address new_top = allocation_info_.top() + size_in_bytes;
1328  allocation_info_.set_limit(Min(new_top, high));
1329  } else if (inline_allocation_limit_step() == 0) {
1330  // Normal limit is the end of the current page.
1331  allocation_info_.set_limit(to_space_.page_high());
1332  } else {
1333  // Lower limit during incremental marking.
1334  Address high = to_space_.page_high();
1335  Address new_top = allocation_info_.top() + size_in_bytes;
1336  Address new_limit = new_top + inline_allocation_limit_step_;
1337  allocation_info_.set_limit(Min(new_limit, high));
1338  }
1340 }
1341 
1342 
1344  Address top = allocation_info_.top();
1346  // The current page is already empty. Don't try to make another.
1347 
1348  // We should only get here if someone asks to allocate more
1349  // than what can be stored in a single page.
1350  // TODO(gc): Change the limit on new-space allocation to prevent this
1351  // from happening (all such allocations should go directly to LOSpace).
1352  return false;
1353  }
1354  if (!to_space_.AdvancePage()) {
1355  // Failed to get a new page in to-space.
1356  return false;
1357  }
1358 
1359  // Clear remainder of current page.
1361  if (heap()->gc_state() == Heap::SCAVENGE) {
1363  }
1364 
1365  int remaining_in_page = static_cast<int>(limit - top);
1366  heap()->CreateFillerObjectAt(top, remaining_in_page);
1367  pages_used_++;
1369 
1370  return true;
1371 }
1372 
1373 
1375  Address old_top = allocation_info_.top();
1376  Address high = to_space_.page_high();
1377  if (allocation_info_.limit() < high) {
1378  // Either the limit has been lowered because linear allocation was disabled
1379  // or because incremental marking wants to get a chance to do a step. Set
1380  // the new limit accordingly.
1381  Address new_top = old_top + size_in_bytes;
1382  int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1383  heap()->incremental_marking()->Step(bytes_allocated,
1385  UpdateInlineAllocationLimit(size_in_bytes);
1386  top_on_previous_step_ = new_top;
1387  return AllocateRaw(size_in_bytes);
1388  } else if (AddFreshPage()) {
1389  // Switched to new page. Try allocating again.
1390  int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1391  heap()->incremental_marking()->Step(bytes_allocated,
1394  return AllocateRaw(size_in_bytes);
1395  } else {
1396  return AllocationResult::Retry();
1397  }
1398 }
1399 
1400 
1401 #ifdef VERIFY_HEAP
1402 // We do not use the SemiSpaceIterator because verification doesn't assume
1403 // that it works (it depends on the invariants we are checking).
1404 void NewSpace::Verify() {
1405  // The allocation pointer should be in the space or at the very end.
1407 
1408  // There should be objects packed in from the low address up to the
1409  // allocation pointer.
1410  Address current = to_space_.first_page()->area_start();
1411  CHECK_EQ(current, to_space_.space_start());
1412 
1413  while (current != top()) {
1414  if (!NewSpacePage::IsAtEnd(current)) {
1415  // The allocation pointer should not be in the middle of an object.
1416  CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1417  current < top());
1418 
1419  HeapObject* object = HeapObject::FromAddress(current);
1420 
1421  // The first word should be a map, and we expect all map pointers to
1422  // be in map space.
1423  Map* map = object->map();
1424  CHECK(map->IsMap());
1425  CHECK(heap()->map_space()->Contains(map));
1426 
1427  // The object should not be code or a map.
1428  CHECK(!object->IsMap());
1429  CHECK(!object->IsCode());
1430 
1431  // The object itself should look OK.
1432  object->ObjectVerify();
1433 
1434  // All the interior pointers should be contained in the heap.
1435  VerifyPointersVisitor visitor;
1436  int size = object->Size();
1437  object->IterateBody(map->instance_type(), size, &visitor);
1438 
1439  current += size;
1440  } else {
1441  // At end of page, switch to next page.
1442  NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1443  // Next page should be valid.
1444  CHECK(!page->is_anchor());
1445  current = page->area_start();
1446  }
1447  }
1448 
1449  // Check semi-spaces.
1452  from_space_.Verify();
1453  to_space_.Verify();
1454 }
1455 #endif
1456 
1457 // -----------------------------------------------------------------------------
1458 // SemiSpace implementation
1459 
1460 void SemiSpace::SetUp(Address start, int initial_capacity,
1461  int maximum_capacity) {
1462  // Creates a space in the young generation. The constructor does not
1463  // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1464  // memory of size 'capacity' when set up, and does not grow or shrink
1465  // otherwise. In the mark-compact collector, the memory region of the from
1466  // space is used as the marking stack. It requires contiguous memory
1467  // addresses.
1468  DCHECK(maximum_capacity >= Page::kPageSize);
1469  initial_total_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1470  total_capacity_ = initial_capacity;
1471  maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1472  maximum_committed_ = 0;
1473  committed_ = false;
1474  start_ = start;
1475  address_mask_ = ~(maximum_capacity - 1);
1477  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1478  age_mark_ = start_;
1479 }
1480 
1481 
1483  start_ = NULL;
1484  total_capacity_ = 0;
1485 }
1486 
1487 
1489  DCHECK(!is_committed());
1490  int pages = total_capacity_ / Page::kPageSize;
1491  if (!heap()->isolate()->memory_allocator()->CommitBlock(
1493  return false;
1494  }
1495 
1496  NewSpacePage* current = anchor();
1497  for (int i = 0; i < pages; i++) {
1498  NewSpacePage* new_page =
1500  new_page->InsertAfter(current);
1501  current = new_page;
1502  }
1503 
1505  committed_ = true;
1506  Reset();
1507  return true;
1508 }
1509 
1510 
1512  DCHECK(is_committed());
1514  if (!heap()->isolate()->memory_allocator()->UncommitBlock(start,
1515  total_capacity_)) {
1516  return false;
1517  }
1518  anchor()->set_next_page(anchor());
1519  anchor()->set_prev_page(anchor());
1520 
1521  committed_ = false;
1522  return true;
1523 }
1524 
1525 
1527  if (!is_committed()) return 0;
1528  size_t size = 0;
1529  NewSpacePageIterator it(this);
1530  while (it.has_next()) {
1531  size += it.next()->CommittedPhysicalMemory();
1532  }
1533  return size;
1534 }
1535 
1536 
1537 bool SemiSpace::GrowTo(int new_capacity) {
1538  if (!is_committed()) {
1539  if (!Commit()) return false;
1540  }
1541  DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1542  DCHECK(new_capacity <= maximum_total_capacity_);
1543  DCHECK(new_capacity > total_capacity_);
1544  int pages_before = total_capacity_ / Page::kPageSize;
1545  int pages_after = new_capacity / Page::kPageSize;
1546 
1547  size_t delta = new_capacity - total_capacity_;
1548 
1550  if (!heap()->isolate()->memory_allocator()->CommitBlock(
1551  start_ + total_capacity_, delta, executable())) {
1552  return false;
1553  }
1554  SetCapacity(new_capacity);
1555  NewSpacePage* last_page = anchor()->prev_page();
1556  DCHECK(last_page != anchor());
1557  for (int i = pages_before; i < pages_after; i++) {
1558  Address page_address = start_ + i * Page::kPageSize;
1559  NewSpacePage* new_page =
1560  NewSpacePage::Initialize(heap(), page_address, this);
1561  new_page->InsertAfter(last_page);
1562  Bitmap::Clear(new_page);
1563  // Duplicate the flags that was set on the old page.
1564  new_page->SetFlags(last_page->GetFlags(),
1566  last_page = new_page;
1567  }
1568  return true;
1569 }
1570 
1571 
1572 bool SemiSpace::ShrinkTo(int new_capacity) {
1573  DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1574  DCHECK(new_capacity >= initial_total_capacity_);
1575  DCHECK(new_capacity < total_capacity_);
1576  if (is_committed()) {
1577  size_t delta = total_capacity_ - new_capacity;
1579 
1580  MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1581  if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1582  return false;
1583  }
1584 
1585  int pages_after = new_capacity / Page::kPageSize;
1586  NewSpacePage* new_last_page =
1587  NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1588  new_last_page->set_next_page(anchor());
1589  anchor()->set_prev_page(new_last_page);
1590  DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1591  }
1592 
1593  SetCapacity(new_capacity);
1594 
1595  return true;
1596 }
1597 
1598 
1599 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1600  anchor_.set_owner(this);
1601  // Fixup back-pointers to anchor. Address of anchor changes
1602  // when we swap.
1605 
1606  bool becomes_to_space = (id_ == kFromSpace);
1607  id_ = becomes_to_space ? kToSpace : kFromSpace;
1608  NewSpacePage* page = anchor_.next_page();
1609  while (page != &anchor_) {
1610  page->set_owner(this);
1611  page->SetFlags(flags, mask);
1612  if (becomes_to_space) {
1616  page->ResetLiveBytes();
1617  } else {
1620  }
1624  page = page->next_page();
1625  }
1626 }
1627 
1628 
1632 }
1633 
1634 
1636  // We won't be swapping semispaces without data in them.
1637  DCHECK(from->anchor_.next_page() != &from->anchor_);
1638  DCHECK(to->anchor_.next_page() != &to->anchor_);
1639 
1640  // Swap bits.
1641  SemiSpace tmp = *from;
1642  *from = *to;
1643  *to = tmp;
1644 
1645  // Fixup back-pointers to the page list anchor now that its address
1646  // has changed.
1647  // Swap to/from-space bits on pages.
1648  // Copy GC flags from old active space (from-space) to new (to-space).
1649  intptr_t flags = from->current_page()->GetFlags();
1651 
1652  from->FlipPages(0, 0);
1653 }
1654 
1655 
1656 void SemiSpace::SetCapacity(int new_capacity) {
1657  total_capacity_ = new_capacity;
1660  }
1661 }
1662 
1663 
1665  DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this);
1666  age_mark_ = mark;
1667  // Mark all pages up to the one containing mark.
1668  NewSpacePageIterator it(space_start(), mark);
1669  while (it.has_next()) {
1670  it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1671  }
1672 }
1673 
1674 
1675 #ifdef DEBUG
1676 void SemiSpace::Print() {}
1677 #endif
1678 
1679 #ifdef VERIFY_HEAP
1680 void SemiSpace::Verify() {
1681  bool is_from_space = (id_ == kFromSpace);
1682  NewSpacePage* page = anchor_.next_page();
1683  CHECK(anchor_.semi_space() == this);
1684  while (page != &anchor_) {
1685  CHECK(page->semi_space() == this);
1686  CHECK(page->InNewSpace());
1687  CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1689  CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1692  if (!is_from_space) {
1693  // The pointers-from-here-are-interesting flag isn't updated dynamically
1694  // on from-space pages, so it might be out of sync with the marking state.
1695  if (page->heap()->incremental_marking()->IsMarking()) {
1697  } else {
1698  CHECK(
1700  }
1701  // TODO(gc): Check that the live_bytes_count_ field matches the
1702  // black marking on the page (if we make it match in new-space).
1703  }
1704  CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1705  CHECK(page->prev_page()->next_page() == page);
1706  page = page->next_page();
1707  }
1708 }
1709 #endif
1710 
1711 #ifdef DEBUG
1712 void SemiSpace::AssertValidRange(Address start, Address end) {
1713  // Addresses belong to same semi-space
1714  NewSpacePage* page = NewSpacePage::FromLimit(start);
1715  NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1716  SemiSpace* space = page->semi_space();
1717  CHECK_EQ(space, end_page->semi_space());
1718  // Start address is before end address, either on same page,
1719  // or end address is on a later page in the linked list of
1720  // semi-space pages.
1721  if (page == end_page) {
1722  CHECK(start <= end);
1723  } else {
1724  while (page != end_page) {
1725  page = page->next_page();
1726  CHECK_NE(page, space->anchor());
1727  }
1728  }
1729 }
1730 #endif
1731 
1732 
1733 // -----------------------------------------------------------------------------
1734 // SemiSpaceIterator implementation.
1736  Initialize(space->bottom(), space->top(), NULL);
1737 }
1738 
1739 
1741  HeapObjectCallback size_func) {
1742  Initialize(space->bottom(), space->top(), size_func);
1743 }
1744 
1745 
1747  Initialize(start, space->top(), NULL);
1748 }
1749 
1750 
1752  Initialize(from, to, NULL);
1753 }
1754 
1755 
1757  HeapObjectCallback size_func) {
1758  SemiSpace::AssertValidRange(start, end);
1759  current_ = start;
1760  limit_ = end;
1761  size_func_ = size_func;
1762 }
1763 
1764 
1765 #ifdef DEBUG
1766 // heap_histograms is shared, always clear it before using it.
1767 static void ClearHistograms(Isolate* isolate) {
1768 // We reset the name each time, though it hasn't changed.
1769 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1770  INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1771 #undef DEF_TYPE_NAME
1772 
1773 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1774  INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1775 #undef CLEAR_HISTOGRAM
1776 
1777  isolate->js_spill_information()->Clear();
1778 }
1779 
1780 
1781 static void ClearCodeKindStatistics(int* code_kind_statistics) {
1782  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1783  code_kind_statistics[i] = 0;
1784  }
1785 }
1786 
1787 
1788 static void ReportCodeKindStatistics(int* code_kind_statistics) {
1789  PrintF("\n Code kind histograms: \n");
1790  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1791  if (code_kind_statistics[i] > 0) {
1792  PrintF(" %-20s: %10d bytes\n",
1793  Code::Kind2String(static_cast<Code::Kind>(i)),
1794  code_kind_statistics[i]);
1795  }
1796  }
1797  PrintF("\n");
1798 }
1799 
1800 
1801 static int CollectHistogramInfo(HeapObject* obj) {
1802  Isolate* isolate = obj->GetIsolate();
1803  InstanceType type = obj->map()->instance_type();
1804  DCHECK(0 <= type && type <= LAST_TYPE);
1805  DCHECK(isolate->heap_histograms()[type].name() != NULL);
1806  isolate->heap_histograms()[type].increment_number(1);
1807  isolate->heap_histograms()[type].increment_bytes(obj->Size());
1808 
1809  if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1810  JSObject::cast(obj)
1811  ->IncrementSpillStatistics(isolate->js_spill_information());
1812  }
1813 
1814  return obj->Size();
1815 }
1816 
1817 
1818 static void ReportHistogram(Isolate* isolate, bool print_spill) {
1819  PrintF("\n Object Histogram:\n");
1820  for (int i = 0; i <= LAST_TYPE; i++) {
1821  if (isolate->heap_histograms()[i].number() > 0) {
1822  PrintF(" %-34s%10d (%10d bytes)\n",
1823  isolate->heap_histograms()[i].name(),
1824  isolate->heap_histograms()[i].number(),
1825  isolate->heap_histograms()[i].bytes());
1826  }
1827  }
1828  PrintF("\n");
1829 
1830  // Summarize string types.
1831  int string_number = 0;
1832  int string_bytes = 0;
1833 #define INCREMENT(type, size, name, camel_name) \
1834  string_number += isolate->heap_histograms()[type].number(); \
1835  string_bytes += isolate->heap_histograms()[type].bytes();
1837 #undef INCREMENT
1838  if (string_number > 0) {
1839  PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1840  string_bytes);
1841  }
1842 
1843  if (FLAG_collect_heap_spill_statistics && print_spill) {
1844  isolate->js_spill_information()->Print();
1845  }
1846 }
1847 #endif // DEBUG
1848 
1849 
1850 // Support for statistics gathering for --heap-stats and --log-gc.
1852  for (int i = 0; i <= LAST_TYPE; i++) {
1853  allocated_histogram_[i].clear();
1854  promoted_histogram_[i].clear();
1855  }
1856 }
1857 
1858 
1859 // Because the copying collector does not touch garbage objects, we iterate
1860 // the new space before a collection to get a histogram of allocated objects.
1861 // This only happens when --log-gc flag is set.
1863  ClearHistograms();
1864  SemiSpaceIterator it(this);
1865  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1866  RecordAllocation(obj);
1867 }
1868 
1869 
1870 static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
1871  const char* description) {
1872  LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1873  // Lump all the string types together.
1874  int string_number = 0;
1875  int string_bytes = 0;
1876 #define INCREMENT(type, size, name, camel_name) \
1877  string_number += info[type].number(); \
1878  string_bytes += info[type].bytes();
1880 #undef INCREMENT
1881  if (string_number > 0) {
1882  LOG(isolate,
1883  HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1884  }
1885 
1886  // Then do the other types.
1887  for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1888  if (info[i].number() > 0) {
1889  LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
1890  info[i].bytes()));
1891  }
1892  }
1893  LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1894 }
1895 
1896 
1898 #ifdef DEBUG
1899  if (FLAG_heap_stats) {
1900  float pct = static_cast<float>(Available()) / TotalCapacity();
1901  PrintF(" capacity: %" V8_PTR_PREFIX
1902  "d"
1903  ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1904  TotalCapacity(), Available(), static_cast<int>(pct * 100));
1905  PrintF("\n Object Histogram:\n");
1906  for (int i = 0; i <= LAST_TYPE; i++) {
1907  if (allocated_histogram_[i].number() > 0) {
1908  PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
1909  allocated_histogram_[i].number(),
1910  allocated_histogram_[i].bytes());
1911  }
1912  }
1913  PrintF("\n");
1914  }
1915 #endif // DEBUG
1916 
1917  if (FLAG_log_gc) {
1918  Isolate* isolate = heap()->isolate();
1919  DoReportStatistics(isolate, allocated_histogram_, "allocated");
1920  DoReportStatistics(isolate, promoted_histogram_, "promoted");
1921  }
1922 }
1923 
1924 
1926  InstanceType type = obj->map()->instance_type();
1927  DCHECK(0 <= type && type <= LAST_TYPE);
1928  allocated_histogram_[type].increment_number(1);
1929  allocated_histogram_[type].increment_bytes(obj->Size());
1930 }
1931 
1932 
1934  InstanceType type = obj->map()->instance_type();
1935  DCHECK(0 <= type && type <= LAST_TYPE);
1936  promoted_histogram_[type].increment_number(1);
1937  promoted_histogram_[type].increment_bytes(obj->Size());
1938 }
1939 
1940 
1945  if (from_space_.is_committed()) {
1947  }
1948  return size;
1949 }
1950 
1951 
1952 // -----------------------------------------------------------------------------
1953 // Free lists for old object spaces implementation
1954 
1955 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
1956  DCHECK(size_in_bytes > 0);
1957  DCHECK(IsAligned(size_in_bytes, kPointerSize));
1958 
1959  // We write a map and possibly size information to the block. If the block
1960  // is big enough to be a FreeSpace with at least one extra word (the next
1961  // pointer), we set its map to be the free space map and its size to an
1962  // appropriate array length for the desired size from HeapObject::Size().
1963  // If the block is too small (eg, one or two words), to hold both a size
1964  // field and a next pointer, we give it a filler map that gives it the
1965  // correct size.
1966  if (size_in_bytes > FreeSpace::kHeaderSize) {
1967  // Can't use FreeSpace::cast because it fails during deserialization.
1968  // We have to set the size first with a release store before we store
1969  // the map because a concurrent store buffer scan on scavenge must not
1970  // observe a map with an invalid size.
1971  FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1972  this_as_free_space->nobarrier_set_size(size_in_bytes);
1973  synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
1974  } else if (size_in_bytes == kPointerSize) {
1975  set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
1976  } else if (size_in_bytes == 2 * kPointerSize) {
1977  set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
1978  } else {
1979  UNREACHABLE();
1980  }
1981  // We would like to DCHECK(Size() == size_in_bytes) but this would fail during
1982  // deserialization because the free space map is not done yet.
1983 }
1984 
1985 
1987  DCHECK(IsFreeListNode(this));
1988  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
1989  DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
1990  return reinterpret_cast<FreeListNode*>(
1992  } else {
1993  return reinterpret_cast<FreeListNode*>(
1995  }
1996 }
1997 
1998 
2000  DCHECK(IsFreeListNode(this));
2001  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2003  return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
2004  } else {
2005  return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
2006  }
2007 }
2008 
2009 
2011  DCHECK(IsFreeListNode(this));
2012  // While we are booting the VM the free space map will actually be null. So
2013  // we have to make sure that we don't try to use it for anything at that
2014  // stage.
2015  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2016  DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
2018  reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
2019  reinterpret_cast<base::AtomicWord>(next));
2020  } else {
2022  reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
2023  reinterpret_cast<base::AtomicWord>(next));
2024  }
2025 }
2026 
2027 
2029  intptr_t free_bytes = 0;
2030  if (category->top() != NULL) {
2031  // This is safe (not going to deadlock) since Concatenate operations
2032  // are never performed on the same free lists at the same time in
2033  // reverse order.
2034  base::LockGuard<base::Mutex> target_lock_guard(mutex());
2035  base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
2036  DCHECK(category->end_ != NULL);
2037  free_bytes = category->available();
2038  if (end_ == NULL) {
2039  end_ = category->end();
2040  } else {
2041  category->end()->set_next(top());
2042  }
2043  set_top(category->top());
2044  base::NoBarrier_Store(&top_, category->top_);
2045  available_ += category->available();
2046  category->Reset();
2047  }
2048  return free_bytes;
2049 }
2050 
2051 
2053  set_top(NULL);
2054  set_end(NULL);
2055  set_available(0);
2056 }
2057 
2058 
2060  int sum = 0;
2061  FreeListNode* t = top();
2062  FreeListNode** n = &t;
2063  while (*n != NULL) {
2064  if (Page::FromAddress((*n)->address()) == p) {
2065  FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
2066  sum += free_space->Size();
2067  *n = (*n)->next();
2068  } else {
2069  n = (*n)->next_address();
2070  }
2071  }
2072  set_top(t);
2073  if (top() == NULL) {
2074  set_end(NULL);
2075  }
2076  available_ -= sum;
2077  return sum;
2078 }
2079 
2080 
2082  FreeListNode* node = top();
2083  while (node != NULL) {
2084  if (Page::FromAddress(node->address()) == p) return true;
2085  node = node->next();
2086  }
2087  return false;
2088 }
2089 
2090 
2092  FreeListNode* node = top();
2093 
2094  if (node == NULL) return NULL;
2095 
2096  while (node != NULL &&
2098  available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
2099  node = node->next();
2100  }
2101 
2102  if (node != NULL) {
2103  set_top(node->next());
2104  *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
2105  available_ -= *node_size;
2106  } else {
2107  set_top(NULL);
2108  }
2109 
2110  if (top() == NULL) {
2111  set_end(NULL);
2112  }
2113 
2114  return node;
2115 }
2116 
2117 
2119  int* node_size) {
2120  FreeListNode* node = PickNodeFromList(node_size);
2121  if (node != NULL && *node_size < size_in_bytes) {
2122  Free(node, *node_size);
2123  *node_size = 0;
2124  return NULL;
2125  }
2126  return node;
2127 }
2128 
2129 
2130 void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
2131  node->set_next(top());
2132  set_top(node);
2133  if (end_ == NULL) {
2134  end_ = node;
2135  }
2136  available_ += size_in_bytes;
2137 }
2138 
2139 
2141  FreeListNode* n = top();
2142  while (n != NULL) {
2143  Map** map_location = reinterpret_cast<Map**>(n->address());
2144  if (*map_location == NULL) {
2145  *map_location = heap->free_space_map();
2146  } else {
2147  DCHECK(*map_location == heap->free_space_map());
2148  }
2149  n = n->next();
2150  }
2151 }
2152 
2153 
2154 FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
2155  Reset();
2156 }
2157 
2158 
2159 intptr_t FreeList::Concatenate(FreeList* free_list) {
2160  intptr_t free_bytes = 0;
2161  free_bytes += small_list_.Concatenate(free_list->small_list());
2162  free_bytes += medium_list_.Concatenate(free_list->medium_list());
2163  free_bytes += large_list_.Concatenate(free_list->large_list());
2164  free_bytes += huge_list_.Concatenate(free_list->huge_list());
2165  return free_bytes;
2166 }
2167 
2168 
2170  small_list_.Reset();
2171  medium_list_.Reset();
2172  large_list_.Reset();
2173  huge_list_.Reset();
2174 }
2175 
2176 
2177 int FreeList::Free(Address start, int size_in_bytes) {
2178  if (size_in_bytes == 0) return 0;
2179 
2180  FreeListNode* node = FreeListNode::FromAddress(start);
2181  node->set_size(heap_, size_in_bytes);
2182  Page* page = Page::FromAddress(start);
2183 
2184  // Early return to drop too-small blocks on the floor.
2185  if (size_in_bytes < kSmallListMin) {
2186  page->add_non_available_small_blocks(size_in_bytes);
2187  return size_in_bytes;
2188  }
2189 
2190  // Insert other blocks at the head of a free list of the appropriate
2191  // magnitude.
2192  if (size_in_bytes <= kSmallListMax) {
2193  small_list_.Free(node, size_in_bytes);
2194  page->add_available_in_small_free_list(size_in_bytes);
2195  } else if (size_in_bytes <= kMediumListMax) {
2196  medium_list_.Free(node, size_in_bytes);
2197  page->add_available_in_medium_free_list(size_in_bytes);
2198  } else if (size_in_bytes <= kLargeListMax) {
2199  large_list_.Free(node, size_in_bytes);
2200  page->add_available_in_large_free_list(size_in_bytes);
2201  } else {
2202  huge_list_.Free(node, size_in_bytes);
2203  page->add_available_in_huge_free_list(size_in_bytes);
2204  }
2205 
2206  DCHECK(IsVeryLong() || available() == SumFreeLists());
2207  return 0;
2208 }
2209 
2210 
2211 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2212  FreeListNode* node = NULL;
2213  Page* page = NULL;
2214 
2215  if (size_in_bytes <= kSmallAllocationMax) {
2216  node = small_list_.PickNodeFromList(node_size);
2217  if (node != NULL) {
2218  DCHECK(size_in_bytes <= *node_size);
2219  page = Page::FromAddress(node->address());
2220  page->add_available_in_small_free_list(-(*node_size));
2221  DCHECK(IsVeryLong() || available() == SumFreeLists());
2222  return node;
2223  }
2224  }
2225 
2226  if (size_in_bytes <= kMediumAllocationMax) {
2227  node = medium_list_.PickNodeFromList(node_size);
2228  if (node != NULL) {
2229  DCHECK(size_in_bytes <= *node_size);
2230  page = Page::FromAddress(node->address());
2231  page->add_available_in_medium_free_list(-(*node_size));
2232  DCHECK(IsVeryLong() || available() == SumFreeLists());
2233  return node;
2234  }
2235  }
2236 
2237  if (size_in_bytes <= kLargeAllocationMax) {
2238  node = large_list_.PickNodeFromList(node_size);
2239  if (node != NULL) {
2240  DCHECK(size_in_bytes <= *node_size);
2241  page = Page::FromAddress(node->address());
2242  page->add_available_in_large_free_list(-(*node_size));
2243  DCHECK(IsVeryLong() || available() == SumFreeLists());
2244  return node;
2245  }
2246  }
2247 
2248  int huge_list_available = huge_list_.available();
2249  FreeListNode* top_node = huge_list_.top();
2250  for (FreeListNode** cur = &top_node; *cur != NULL;
2251  cur = (*cur)->next_address()) {
2252  FreeListNode* cur_node = *cur;
2253  while (cur_node != NULL &&
2255  int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
2256  huge_list_available -= size;
2257  page = Page::FromAddress(cur_node->address());
2258  page->add_available_in_huge_free_list(-size);
2259  cur_node = cur_node->next();
2260  }
2261 
2262  *cur = cur_node;
2263  if (cur_node == NULL) {
2265  break;
2266  }
2267 
2268  DCHECK((*cur)->map() == heap_->raw_unchecked_free_space_map());
2269  FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
2270  int size = cur_as_free_space->Size();
2271  if (size >= size_in_bytes) {
2272  // Large enough node found. Unlink it from the list.
2273  node = *cur;
2274  *cur = node->next();
2275  *node_size = size;
2276  huge_list_available -= size;
2277  page = Page::FromAddress(node->address());
2278  page->add_available_in_huge_free_list(-size);
2279  break;
2280  }
2281  }
2282 
2283  huge_list_.set_top(top_node);
2284  if (huge_list_.top() == NULL) {
2286  }
2287  huge_list_.set_available(huge_list_available);
2288 
2289  if (node != NULL) {
2290  DCHECK(IsVeryLong() || available() == SumFreeLists());
2291  return node;
2292  }
2293 
2294  if (size_in_bytes <= kSmallListMax) {
2295  node = small_list_.PickNodeFromList(size_in_bytes, node_size);
2296  if (node != NULL) {
2297  DCHECK(size_in_bytes <= *node_size);
2298  page = Page::FromAddress(node->address());
2299  page->add_available_in_small_free_list(-(*node_size));
2300  }
2301  } else if (size_in_bytes <= kMediumListMax) {
2302  node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
2303  if (node != NULL) {
2304  DCHECK(size_in_bytes <= *node_size);
2305  page = Page::FromAddress(node->address());
2306  page->add_available_in_medium_free_list(-(*node_size));
2307  }
2308  } else if (size_in_bytes <= kLargeListMax) {
2309  node = large_list_.PickNodeFromList(size_in_bytes, node_size);
2310  if (node != NULL) {
2311  DCHECK(size_in_bytes <= *node_size);
2312  page = Page::FromAddress(node->address());
2313  page->add_available_in_large_free_list(-(*node_size));
2314  }
2315  }
2316 
2317  DCHECK(IsVeryLong() || available() == SumFreeLists());
2318  return node;
2319 }
2320 
2321 
2322 // Allocation on the old space free list. If it succeeds then a new linear
2323 // allocation space has been set up with the top and limit of the space. If
2324 // the allocation fails then NULL is returned, and the caller can perform a GC
2325 // or allocate a new page before retrying.
2326 HeapObject* FreeList::Allocate(int size_in_bytes) {
2327  DCHECK(0 < size_in_bytes);
2328  DCHECK(size_in_bytes <= kMaxBlockSize);
2329  DCHECK(IsAligned(size_in_bytes, kPointerSize));
2330  // Don't free list allocate if there is linear space available.
2331  DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
2332 
2333  int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2334  // Mark the old linear allocation area with a free space map so it can be
2335  // skipped when scanning the heap. This also puts it back in the free list
2336  // if it is big enough.
2337  owner_->Free(owner_->top(), old_linear_size);
2338 
2339  owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
2340  old_linear_size);
2341 
2342  int new_node_size = 0;
2343  FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2344  if (new_node == NULL) {
2346  return NULL;
2347  }
2348 
2349  int bytes_left = new_node_size - size_in_bytes;
2350  DCHECK(bytes_left >= 0);
2351 
2352 #ifdef DEBUG
2353  for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2354  reinterpret_cast<Object**>(new_node->address())[i] =
2356  }
2357 #endif
2358 
2359  // The old-space-step might have finished sweeping and restarted marking.
2360  // Verify that it did not turn the page of the new node into an evacuation
2361  // candidate.
2362  DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2363 
2364  const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2365 
2366  // Memory in the linear allocation area is counted as allocated. We may free
2367  // a little of this again immediately - see below.
2368  owner_->Allocate(new_node_size);
2369 
2371  // Keep the linear allocation area empty if requested to do so, just
2372  // return area back to the free list instead.
2373  owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2374  DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
2375  } else if (bytes_left > kThreshold &&
2377  FLAG_incremental_marking_steps) {
2378  int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2379  // We don't want to give too large linear areas to the allocator while
2380  // incremental marking is going on, because we won't check again whether
2381  // we want to do another increment until the linear area is used up.
2382  owner_->Free(new_node->address() + size_in_bytes + linear_size,
2383  new_node_size - size_in_bytes - linear_size);
2384  owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2385  new_node->address() + size_in_bytes + linear_size);
2386  } else if (bytes_left > 0) {
2387  // Normally we give the rest of the node to the allocator as its new
2388  // linear allocation area.
2389  owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2390  new_node->address() + new_node_size);
2391  } else {
2392  // TODO(gc) Try not freeing linear allocation region when bytes_left
2393  // are zero.
2395  }
2396 
2397  return new_node;
2398 }
2399 
2400 
2402  intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
2403  p->set_available_in_huge_free_list(0);
2404 
2405  if (sum < p->area_size()) {
2409  p->set_available_in_small_free_list(0);
2410  p->set_available_in_medium_free_list(0);
2411  p->set_available_in_large_free_list(0);
2412  }
2413 
2414  return sum;
2415 }
2416 
2417 
2423 }
2424 
2425 
2430  huge_list_.RepairFreeList(heap);
2431 }
2432 
2433 
2434 #ifdef DEBUG
2435 intptr_t FreeListCategory::SumFreeList() {
2436  intptr_t sum = 0;
2437  FreeListNode* cur = top();
2438  while (cur != NULL) {
2439  DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
2440  FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
2441  sum += cur_as_free_space->nobarrier_size();
2442  cur = cur->next();
2443  }
2444  return sum;
2445 }
2446 
2447 
2448 static const int kVeryLongFreeList = 500;
2449 
2450 
2451 int FreeListCategory::FreeListLength() {
2452  int length = 0;
2453  FreeListNode* cur = top();
2454  while (cur != NULL) {
2455  length++;
2456  cur = cur->next();
2457  if (length == kVeryLongFreeList) return length;
2458  }
2459  return length;
2460 }
2461 
2462 
2463 bool FreeList::IsVeryLong() {
2464  if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
2465  if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
2466  if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
2467  if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
2468  return false;
2469 }
2470 
2471 
2472 // This can take a very long time because it is linear in the number of entries
2473 // on the free list, so it should not be called if FreeListLength returns
2474 // kVeryLongFreeList.
2475 intptr_t FreeList::SumFreeLists() {
2476  intptr_t sum = small_list_.SumFreeList();
2477  sum += medium_list_.SumFreeList();
2478  sum += large_list_.SumFreeList();
2479  sum += huge_list_.SumFreeList();
2480  return sum;
2481 }
2482 #endif
2483 
2484 
2485 // -----------------------------------------------------------------------------
2486 // OldSpace implementation
2487 
2489  // We don't have a linear allocation area while sweeping. It will be restored
2490  // on the first allocation after the sweep.
2492 
2493  // This counter will be increased for pages which will be swept by the
2494  // sweeper threads.
2495  unswept_free_bytes_ = 0;
2496 
2497  // Clear the free list before a full GC---it will be rebuilt afterward.
2498  free_list_.Reset();
2499 }
2500 
2501 
2503  DCHECK(heap()->mark_compact_collector()->sweeping_in_progress() ||
2504  (unswept_free_bytes_ == 0));
2505  return Size() - unswept_free_bytes_ - (limit() - top());
2506 }
2507 
2508 
2509 // After we have booted, we have created a map which represents free space
2510 // on the heap. If there was already a free list then the elements on it
2511 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2512 // fix them.
2514 
2515 
2517  if (allocation_info_.top() >= allocation_info_.limit()) return;
2518 
2519  if (Page::FromAllocationTop(allocation_info_.top())
2520  ->IsEvacuationCandidate()) {
2521  // Create filler object to keep page iterable if it was iterable.
2522  int remaining =
2523  static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2524  heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2525 
2526  allocation_info_.set_top(NULL);
2527  allocation_info_.set_limit(NULL);
2528  }
2529 }
2530 
2531 
2533  int size_in_bytes) {
2535  if (collector->sweeping_in_progress()) {
2536  // Wait for the sweeper threads here and complete the sweeping phase.
2537  collector->EnsureSweepingCompleted();
2538 
2539  // After waiting for the sweeper threads, there may be new free-list
2540  // entries.
2541  return free_list_.Allocate(size_in_bytes);
2542  }
2543  return NULL;
2544 }
2545 
2546 
2548  // Allocation in this space has failed.
2549 
2551  // Sweeping is still in progress.
2552  if (collector->sweeping_in_progress()) {
2553  // First try to refill the free-list, concurrent sweeper threads
2554  // may have freed some objects in the meantime.
2555  collector->RefillFreeList(this);
2556 
2557  // Retry the free list allocation.
2558  HeapObject* object = free_list_.Allocate(size_in_bytes);
2559  if (object != NULL) return object;
2560 
2561  // If sweeping is still in progress try to sweep pages on the main thread.
2562  int free_chunk = collector->SweepInParallel(this, size_in_bytes);
2563  collector->RefillFreeList(this);
2564  if (free_chunk >= size_in_bytes) {
2565  HeapObject* object = free_list_.Allocate(size_in_bytes);
2566  // We should be able to allocate an object here since we just freed that
2567  // much memory.
2568  DCHECK(object != NULL);
2569  if (object != NULL) return object;
2570  }
2571  }
2572 
2573  // Free list allocation failed and there is no next page. Fail if we have
2574  // hit the old generation size limit that should cause a garbage
2575  // collection.
2576  if (!heap()->always_allocate() &&
2577  heap()->OldGenerationAllocationLimitReached()) {
2578  // If sweeper threads are active, wait for them at that point and steal
2579  // elements form their free-lists.
2580  HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2581  if (object != NULL) return object;
2582  }
2583 
2584  // Try to expand the space and allocate in the new next page.
2585  if (Expand()) {
2586  DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
2587  return free_list_.Allocate(size_in_bytes);
2588  }
2589 
2590  // If sweeper threads are active, wait for them at that point and steal
2591  // elements form their free-lists. Allocation may still fail their which
2592  // would indicate that there is not enough memory for the given allocation.
2593  return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2594 }
2595 
2596 
2597 #ifdef DEBUG
2598 void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2599  CommentStatistic* comments_statistics =
2600  isolate->paged_space_comments_statistics();
2601  ReportCodeKindStatistics(isolate->code_kind_statistics());
2602  PrintF(
2603  "Code comment statistics (\" [ comment-txt : size/ "
2604  "count (average)\"):\n");
2605  for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2606  const CommentStatistic& cs = comments_statistics[i];
2607  if (cs.size > 0) {
2608  PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2609  cs.size / cs.count);
2610  }
2611  }
2612  PrintF("\n");
2613 }
2614 
2615 
2616 void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2617  CommentStatistic* comments_statistics =
2618  isolate->paged_space_comments_statistics();
2619  ClearCodeKindStatistics(isolate->code_kind_statistics());
2620  for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2621  comments_statistics[i].Clear();
2622  }
2623  comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2624  comments_statistics[CommentStatistic::kMaxComments].size = 0;
2625  comments_statistics[CommentStatistic::kMaxComments].count = 0;
2626 }
2627 
2628 
2629 // Adds comment to 'comment_statistics' table. Performance OK as long as
2630 // 'kMaxComments' is small
2631 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2632  CommentStatistic* comments_statistics =
2633  isolate->paged_space_comments_statistics();
2634  // Do not count empty comments
2635  if (delta <= 0) return;
2636  CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2637  // Search for a free or matching entry in 'comments_statistics': 'cs'
2638  // points to result.
2639  for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2640  if (comments_statistics[i].comment == NULL) {
2641  cs = &comments_statistics[i];
2642  cs->comment = comment;
2643  break;
2644  } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2645  cs = &comments_statistics[i];
2646  break;
2647  }
2648  }
2649  // Update entry for 'comment'
2650  cs->size += delta;
2651  cs->count += 1;
2652 }
2653 
2654 
2655 // Call for each nested comment start (start marked with '[ xxx', end marked
2656 // with ']'. RelocIterator 'it' must point to a comment reloc info.
2657 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2658  DCHECK(!it->done());
2659  DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
2660  const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2661  if (tmp[0] != '[') {
2662  // Not a nested comment; skip
2663  return;
2664  }
2665 
2666  // Search for end of nested comment or a new nested comment
2667  const char* const comment_txt =
2668  reinterpret_cast<const char*>(it->rinfo()->data());
2669  const byte* prev_pc = it->rinfo()->pc();
2670  int flat_delta = 0;
2671  it->next();
2672  while (true) {
2673  // All nested comments must be terminated properly, and therefore exit
2674  // from loop.
2675  DCHECK(!it->done());
2676  if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2677  const char* const txt =
2678  reinterpret_cast<const char*>(it->rinfo()->data());
2679  flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2680  if (txt[0] == ']') break; // End of nested comment
2681  // A new comment
2682  CollectCommentStatistics(isolate, it);
2683  // Skip code that was covered with previous comment
2684  prev_pc = it->rinfo()->pc();
2685  }
2686  it->next();
2687  }
2688  EnterComment(isolate, comment_txt, flat_delta);
2689 }
2690 
2691 
2692 // Collects code size statistics:
2693 // - by code kind
2694 // - by code comment
2695 void PagedSpace::CollectCodeStatistics() {
2696  Isolate* isolate = heap()->isolate();
2697  HeapObjectIterator obj_it(this);
2698  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2699  if (obj->IsCode()) {
2700  Code* code = Code::cast(obj);
2701  isolate->code_kind_statistics()[code->kind()] += code->Size();
2702  RelocIterator it(code);
2703  int delta = 0;
2704  const byte* prev_pc = code->instruction_start();
2705  while (!it.done()) {
2706  if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2707  delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2708  CollectCommentStatistics(isolate, &it);
2709  prev_pc = it.rinfo()->pc();
2710  }
2711  it.next();
2712  }
2713 
2714  DCHECK(code->instruction_start() <= prev_pc &&
2715  prev_pc <= code->instruction_end());
2716  delta += static_cast<int>(code->instruction_end() - prev_pc);
2717  EnterComment(isolate, "NoComment", delta);
2718  }
2719  }
2720 }
2721 
2722 
2723 void PagedSpace::ReportStatistics() {
2724  int pct = static_cast<int>(Available() * 100 / Capacity());
2725  PrintF(" capacity: %" V8_PTR_PREFIX
2726  "d"
2727  ", waste: %" V8_PTR_PREFIX
2728  "d"
2729  ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2730  Capacity(), Waste(), Available(), pct);
2731 
2732  if (heap()->mark_compact_collector()->sweeping_in_progress()) {
2734  }
2735  ClearHistograms(heap()->isolate());
2736  HeapObjectIterator obj_it(this);
2737  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2738  CollectHistogramInfo(obj);
2739  ReportHistogram(heap()->isolate(), true);
2740 }
2741 #endif
2742 
2743 
2744 // -----------------------------------------------------------------------------
2745 // MapSpace implementation
2746 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2747 // there is at least one non-inlined virtual function. I would prefer to hide
2748 // the VerifyObject definition behind VERIFY_HEAP.
2749 
2750 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
2751 
2752 
2753 // -----------------------------------------------------------------------------
2754 // CellSpace and PropertyCellSpace implementation
2755 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2756 // there is at least one non-inlined virtual function. I would prefer to hide
2757 // the VerifyObject definition behind VERIFY_HEAP.
2758 
2759 void CellSpace::VerifyObject(HeapObject* object) { CHECK(object->IsCell()); }
2760 
2761 
2763  CHECK(object->IsPropertyCell());
2764 }
2765 
2766 
2767 // -----------------------------------------------------------------------------
2768 // LargeObjectIterator
2769 
2771  current_ = space->first_page_;
2772  size_func_ = NULL;
2773 }
2774 
2775 
2777  HeapObjectCallback size_func) {
2778  current_ = space->first_page_;
2779  size_func_ = size_func;
2780 }
2781 
2782 
2784  if (current_ == NULL) return NULL;
2785 
2786  HeapObject* object = current_->GetObject();
2788  return object;
2789 }
2790 
2791 
2792 // -----------------------------------------------------------------------------
2793 // LargeObjectSpace
2794 static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
2795 
2796 
2797 LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity,
2798  AllocationSpace id)
2799  : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2800  max_capacity_(max_capacity),
2801  first_page_(NULL),
2802  size_(0),
2803  page_count_(0),
2804  objects_size_(0),
2805  chunk_map_(ComparePointers, 1024) {}
2806 
2807 
2809  first_page_ = NULL;
2810  size_ = 0;
2811  maximum_committed_ = 0;
2812  page_count_ = 0;
2813  objects_size_ = 0;
2814  chunk_map_.Clear();
2815  return true;
2816 }
2817 
2818 
2820  while (first_page_ != NULL) {
2821  LargePage* page = first_page_;
2823  LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2824 
2825  ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2827  space, kAllocationActionFree, page->size());
2828  heap()->isolate()->memory_allocator()->Free(page);
2829  }
2830  SetUp();
2831 }
2832 
2833 
2835  Executability executable) {
2836  // Check if we want to force a GC before growing the old space further.
2837  // If so, fail the allocation.
2838  if (!heap()->always_allocate() &&
2839  heap()->OldGenerationAllocationLimitReached()) {
2841  }
2842 
2843  if (!CanAllocateSize(object_size)) return AllocationResult::Retry(identity());
2844 
2846  object_size, this, executable);
2847  if (page == NULL) return AllocationResult::Retry(identity());
2848  DCHECK(page->area_size() >= object_size);
2849 
2850  size_ += static_cast<int>(page->size());
2851  objects_size_ += object_size;
2852  page_count_++;
2853  page->set_next_page(first_page_);
2854  first_page_ = page;
2855 
2856  if (size_ > maximum_committed_) {
2858  }
2859 
2860  // Register all MemoryChunk::kAlignment-aligned chunks covered by
2861  // this large page in the chunk map.
2862  uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2863  uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2864  for (uintptr_t key = base; key <= limit; key++) {
2865  HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2866  static_cast<uint32_t>(key), true);
2867  DCHECK(entry != NULL);
2868  entry->value = page;
2869  }
2870 
2871  HeapObject* object = page->GetObject();
2872 
2873  MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
2874 
2875  if (Heap::ShouldZapGarbage()) {
2876  // Make the object consistent so the heap can be verified in OldSpaceStep.
2877  // We only need to do this in debug builds or if verify_heap is on.
2878  reinterpret_cast<Object**>(object->address())[0] =
2879  heap()->fixed_array_map();
2880  reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2881  }
2882 
2883  heap()->incremental_marking()->OldSpaceStep(object_size);
2884  return object;
2885 }
2886 
2887 
2890  size_t size = 0;
2891  LargePage* current = first_page_;
2892  while (current != NULL) {
2893  size += current->CommittedPhysicalMemory();
2894  current = current->next_page();
2895  }
2896  return size;
2897 }
2898 
2899 
2900 // GC support
2902  LargePage* page = FindPage(a);
2903  if (page != NULL) {
2904  return page->GetObject();
2905  }
2906  return Smi::FromInt(0); // Signaling not found.
2907 }
2908 
2909 
2911  uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2912  HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2913  static_cast<uint32_t>(key), false);
2914  if (e != NULL) {
2915  DCHECK(e->value != NULL);
2916  LargePage* page = reinterpret_cast<LargePage*>(e->value);
2917  DCHECK(page->is_valid());
2918  if (page->Contains(a)) {
2919  return page;
2920  }
2921  }
2922  return NULL;
2923 }
2924 
2925 
2927  LargePage* previous = NULL;
2928  LargePage* current = first_page_;
2929  while (current != NULL) {
2930  HeapObject* object = current->GetObject();
2931  // Can this large page contain pointers to non-trivial objects. No other
2932  // pointer object is this big.
2933  bool is_pointer_object = object->IsFixedArray();
2934  MarkBit mark_bit = Marking::MarkBitFrom(object);
2935  if (mark_bit.Get()) {
2936  mark_bit.Clear();
2939  previous = current;
2940  current = current->next_page();
2941  } else {
2942  LargePage* page = current;
2943  // Cut the chunk out from the chunk list.
2944  current = current->next_page();
2945  if (previous == NULL) {
2946  first_page_ = current;
2947  } else {
2948  previous->set_next_page(current);
2949  }
2950 
2951  // Free the chunk.
2953  heap()->isolate());
2954  size_ -= static_cast<int>(page->size());
2955  objects_size_ -= object->Size();
2956  page_count_--;
2957 
2958  // Remove entries belonging to this page.
2959  // Use variable alignment to help pass length check (<= 80 characters)
2960  // of single line in tools/presubmit.py.
2961  const intptr_t alignment = MemoryChunk::kAlignment;
2962  uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
2963  uintptr_t limit = base + (page->size() - 1) / alignment;
2964  for (uintptr_t key = base; key <= limit; key++) {
2965  chunk_map_.Remove(reinterpret_cast<void*>(key),
2966  static_cast<uint32_t>(key));
2967  }
2968 
2969  if (is_pointer_object) {
2970  heap()->QueueMemoryChunkForFree(page);
2971  } else {
2972  heap()->isolate()->memory_allocator()->Free(page);
2973  }
2974  }
2975  }
2976  heap()->FreeQueuedChunks();
2977 }
2978 
2979 
2981  Address address = object->address();
2982  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
2983 
2984  bool owned = (chunk->owner() == this);
2985 
2986  SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
2987 
2988  return owned;
2989 }
2990 
2991 
2992 #ifdef VERIFY_HEAP
2993 // We do not assume that the large object iterator works, because it depends
2994 // on the invariants we are checking during verification.
2995 void LargeObjectSpace::Verify() {
2996  for (LargePage* chunk = first_page_; chunk != NULL;
2997  chunk = chunk->next_page()) {
2998  // Each chunk contains an object that starts at the large object page's
2999  // object area start.
3000  HeapObject* object = chunk->GetObject();
3001  Page* page = Page::FromAddress(object->address());
3002  CHECK(object->address() == page->area_start());
3003 
3004  // The first word should be a map, and we expect all map pointers to be
3005  // in map space.
3006  Map* map = object->map();
3007  CHECK(map->IsMap());
3008  CHECK(heap()->map_space()->Contains(map));
3009 
3010  // We have only code, sequential strings, external strings
3011  // (sequential strings that have been morphed into external
3012  // strings), fixed arrays, byte arrays, and constant pool arrays in the
3013  // large object space.
3014  CHECK(object->IsCode() || object->IsSeqString() ||
3015  object->IsExternalString() || object->IsFixedArray() ||
3016  object->IsFixedDoubleArray() || object->IsByteArray() ||
3017  object->IsConstantPoolArray());
3018 
3019  // The object itself should look OK.
3020  object->ObjectVerify();
3021 
3022  // Byte arrays and strings don't have interior pointers.
3023  if (object->IsCode()) {
3024  VerifyPointersVisitor code_visitor;
3025  object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3026  } else if (object->IsFixedArray()) {
3027  FixedArray* array = FixedArray::cast(object);
3028  for (int j = 0; j < array->length(); j++) {
3029  Object* element = array->get(j);
3030  if (element->IsHeapObject()) {
3031  HeapObject* element_object = HeapObject::cast(element);
3032  CHECK(heap()->Contains(element_object));
3033  CHECK(element_object->map()->IsMap());
3034  }
3035  }
3036  }
3037  }
3038 }
3039 #endif
3040 
3041 
3042 #ifdef DEBUG
3043 void LargeObjectSpace::Print() {
3044  OFStream os(stdout);
3045  LargeObjectIterator it(this);
3046  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3047  obj->Print(os);
3048  }
3049 }
3050 
3051 
3052 void LargeObjectSpace::ReportStatistics() {
3053  PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
3054  int num_objects = 0;
3055  ClearHistograms(heap()->isolate());
3056  LargeObjectIterator it(this);
3057  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3058  num_objects++;
3059  CollectHistogramInfo(obj);
3060  }
3061 
3062  PrintF(
3063  " number of objects %d, "
3064  "size of objects %" V8_PTR_PREFIX "d\n",
3065  num_objects, objects_size_);
3066  if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3067 }
3068 
3069 
3070 void LargeObjectSpace::CollectCodeStatistics() {
3071  Isolate* isolate = heap()->isolate();
3072  LargeObjectIterator obj_it(this);
3073  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3074  if (obj->IsCode()) {
3075  Code* code = Code::cast(obj);
3076  isolate->code_kind_statistics()[code->kind()] += code->Size();
3077  }
3078  }
3079 }
3080 
3081 
3082 void Page::Print() {
3083  // Make a best-effort to print the objects in the page.
3084  PrintF("Page@%p in %s\n", this->address(),
3085  AllocationSpaceName(this->owner()->identity()));
3086  printf(" --------------------------------------\n");
3087  HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
3088  unsigned mark_size = 0;
3089  for (HeapObject* object = objects.Next(); object != NULL;
3090  object = objects.Next()) {
3091  bool is_marked = Marking::MarkBitFrom(object).Get();
3092  PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3093  if (is_marked) {
3094  mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
3095  }
3096  object->ShortPrint();
3097  PrintF("\n");
3098  }
3099  printf(" --------------------------------------\n");
3100  printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3101 }
3102 
3103 #endif // DEBUG
3104 }
3105 } // namespace v8::internal
#define SLOW_DCHECK(condition)
Definition: checks.h:30
A JavaScript object (ECMA-262, 4.3.3)
Definition: v8.h:2283
static size_t AllocateAlignment()
static intptr_t CommitPageSize()
bool Commit(void *address, size_t size, bool is_executable)
static bool UncommitRegion(void *base, size_t size)
static bool ReleaseRegion(void *base, size_t size)
bool Guard(void *address)
bool Uncommit(void *address, size_t size)
static bool CommitRegion(void *base, size_t size, bool is_executable)
void TakeControl(VirtualMemory *from)
Definition: platform.h:377
static AllocationResult Retry(AllocationSpace space=NEW_SPACE)
Definition: spaces.h:1616
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:21
virtual void VerifyObject(HeapObject *obj)
Definition: spaces.cc:2759
bool contains(Address address)
Definition: spaces.h:887
int current_allocation_block_index_
Definition: spaces.h:933
base::VirtualMemory * code_range_
Definition: spaces.h:907
void FreeRawMemory(Address buf, size_t length)
Definition: spaces.cc:233
CodeRange(Isolate *isolate)
Definition: spaces.cc:91
static int CompareFreeBlockAddress(const FreeBlock *left, const FreeBlock *right)
Definition: spaces.cc:136
List< FreeBlock > allocation_list_
Definition: spaces.h:932
Isolate * isolate_
Definition: spaces.h:904
bool SetUp(size_t requested_size)
Definition: spaces.cc:99
List< FreeBlock > free_list_
Definition: spaces.h:929
bool GetNextAllocationBlock(size_t requested)
Definition: spaces.cc:145
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, const size_t commit_size, size_t *allocated)
Definition: spaces.cc:186
bool CommitRawMemory(Address start, size_t length)
Definition: spaces.cc:223
bool UncommitRawMemory(Address start, size_t length)
Definition: spaces.cc:228
static const char * Kind2String(Kind kind)
Definition: objects.cc:10627
FreeListNode * end() const
Definition: spaces.h:1465
FreeListNode * top() const
Definition: spaces.h:1456
void set_available(int available)
Definition: spaces.h:1470
void RepairFreeList(Heap *heap)
Definition: spaces.cc:2140
void Free(FreeListNode *node, int size_in_bytes)
Definition: spaces.cc:2130
base::AtomicWord top_
Definition: spaces.h:1483
void set_end(FreeListNode *end)
Definition: spaces.h:1466
intptr_t EvictFreeListItemsInList(Page *p)
Definition: spaces.cc:2059
void set_top(FreeListNode *top)
Definition: spaces.h:1460
intptr_t Concatenate(FreeListCategory *category)
Definition: spaces.cc:2028
bool ContainsPageFreeListItemsInList(Page *p)
Definition: spaces.cc:2081
FreeListNode * PickNodeFromList(int *node_size)
Definition: spaces.cc:2091
static FreeListNode * FromAddress(Address address)
Definition: spaces.h:1406
void set_next(FreeListNode *next)
Definition: spaces.cc:2010
static bool IsFreeListNode(HeapObject *object)
Definition: spaces-inl.h:303
static const int kNextOffset
Definition: spaces.h:1430
void set_size(Heap *heap, int size_in_bytes)
Definition: spaces.cc:1955
FreeListNode * next()
Definition: spaces.cc:1986
FreeListNode ** next_address()
Definition: spaces.cc:1999
FreeListCategory huge_list_
Definition: spaces.h:1601
intptr_t available()
Definition: spaces.h:1525
bool ContainsPageFreeListItems(Page *p)
Definition: spaces.cc:2418
FreeListNode * FindNodeFor(int size_in_bytes, int *node_size)
Definition: spaces.cc:2211
FreeListCategory medium_list_
Definition: spaces.h:1599
intptr_t Concatenate(FreeList *free_list)
Definition: spaces.cc:2159
MUST_USE_RESULT HeapObject * Allocate(int size_in_bytes)
Definition: spaces.cc:2326
FreeListCategory large_list_
Definition: spaces.h:1600
FreeList(PagedSpace *owner)
Definition: spaces.cc:2154
FreeListCategory * medium_list()
Definition: spaces.h:1577
static const int kMaxBlockSize
Definition: spaces.h:1584
FreeListCategory * huge_list()
Definition: spaces.h:1579
static const int kSmallListMax
Definition: spaces.h:1592
PagedSpace * owner_
Definition: spaces.h:1588
static const int kLargeListMax
Definition: spaces.h:1594
FreeListCategory * small_list()
Definition: spaces.h:1576
FreeListCategory * large_list()
Definition: spaces.h:1578
static const int kSmallAllocationMax
Definition: spaces.h:1595
static const int kLargeAllocationMax
Definition: spaces.h:1597
int Free(Address start, int size_in_bytes)
Definition: spaces.cc:2177
static const int kMediumListMax
Definition: spaces.h:1593
static const int kSmallListMin
Definition: spaces.h:1591
intptr_t EvictFreeListItems(Page *p)
Definition: spaces.cc:2401
void RepairLists(Heap *heap)
Definition: spaces.cc:2426
FreeListCategory small_list_
Definition: spaces.h:1598
static const int kMediumAllocationMax
Definition: spaces.h:1596
void nobarrier_set_size(int value)
int nobarrier_size() const
static const int kHeaderSize
Definition: objects.h:4423
void Initialize(PagedSpace *owner, Address start, Address end, PageMode mode, HeapObjectCallback size_func)
Definition: spaces.cc:55
HeapObjectCallback size_func_
Definition: spaces.h:1213
HeapObjectIterator(PagedSpace *space)
Definition: spaces.cc:21
void synchronized_set_map_no_write_barrier(Map *value)
Definition: objects-inl.h:1429
void set_map_no_write_barrier(Map *value)
Definition: objects-inl.h:1435
Heap * GetHeap() const
Definition: objects-inl.h:1379
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1464
OldSpace * old_pointer_space()
Definition: heap.h:594
PropertyCellSpace * property_cell_space()
Definition: heap.h:599
OldSpace * code_space()
Definition: heap.h:596
void FreeQueuedChunks()
Definition: heap.cc:6025
void QueueMemoryChunkForFree(MemoryChunk *chunk)
Definition: heap.cc:6019
PromotionQueue * promotion_queue()
Definition: heap.h:753
void RememberUnmappedPage(Address page, bool compacted)
Definition: heap.cc:6072
bool inline_allocation_disabled()
Definition: heap.h:1044
CellSpace * cell_space()
Definition: heap.h:598
void CreateFillerObjectAt(Address addr, int size)
Definition: heap.cc:3221
HeapObjectCallback GcSafeSizeOfOldObjectFunction()
Definition: heap.h:874
Isolate * isolate()
Definition: heap-inl.h:589
int InitialSemiSpaceSize()
Definition: heap.h:554
OldSpace * old_data_space()
Definition: heap.h:595
IncrementalMarking * incremental_marking()
Definition: heap.h:1205
static bool ShouldZapGarbage()
Definition: heap.h:926
void decrement_scan_on_scavenge_pages()
Definition: heap.h:746
MarkCompactCollector * mark_compact_collector()
Definition: heap.h:1197
MapSpace * map_space()
Definition: heap.h:597
static const intptr_t kAllocatedThreshold
void Step(intptr_t allocated, CompletionAction action, bool force_marking=false)
void OldSpaceStep(intptr_t allocated)
void SetNewSpacePageFlags(NewSpacePage *chunk)
CodeRange * code_range()
Definition: isolate.h:863
MemoryAllocator * memory_allocator()
Definition: isolate.h:883
Counters * counters()
Definition: isolate.h:857
LargeObjectIterator(LargeObjectSpace *space)
Definition: spaces.cc:2770
HeapObjectCallback size_func_
Definition: spaces.h:2815
friend class LargeObjectIterator
Definition: spaces.h:2796
LargeObjectSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
Definition: spaces.cc:2797
MUST_USE_RESULT AllocationResult AllocateRaw(int object_size, Executability executable)
Definition: spaces.cc:2834
bool Contains(HeapObject *obj)
Definition: spaces.cc:2980
bool CanAllocateSize(int size)
Definition: spaces.h:2735
LargePage * FindPage(Address a)
Definition: spaces.cc:2910
Object * FindObject(Address a)
Definition: spaces.cc:2901
HeapObject * GetObject()
Definition: spaces.h:798
static LargePage * Initialize(Heap *heap, MemoryChunk *chunk)
Definition: spaces-inl.h:292
LargePage * next_page() const
Definition: spaces.h:800
void set_next_page(LargePage *page)
Definition: spaces.h:804
virtual void VerifyObject(HeapObject *obj)
Definition: spaces.cc:2750
InstanceType instance_type()
Definition: objects-inl.h:4323
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
int SweepInParallel(PagedSpace *space, int required_freed_bytes)
void RefillFreeList(PagedSpace *space)
static int CodePageGuardSize()
Definition: spaces.cc:815
LargePage * AllocateLargePage(intptr_t object_size, Space *owner, Executability executable)
Definition: spaces.cc:690
List< MemoryAllocationCallbackRegistration > memory_allocation_callbacks_
Definition: spaces.h:1143
static int CodePageAreaEndOffset()
Definition: spaces.cc:827
bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)
Definition: spaces.cc:763
bool CommitMemory(Address addr, size_t size, Executability executable)
Definition: spaces.cc:284
bool UncommitBlock(Address start, size_t size)
Definition: spaces.cc:736
MemoryChunk * AllocateChunk(intptr_t reserve_area_size, intptr_t commit_area_size, Executability executable, Space *space)
Definition: spaces.cc:548
void UpdateAllocatedSpaceLimits(void *low, void *high)
Definition: spaces.h:1152
static int CodePageAreaSize()
Definition: spaces.h:1103
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
Definition: spaces.cc:262
void FreeMemory(base::VirtualMemory *reservation, Executability executable)
Definition: spaces.cc:295
void Free(MemoryChunk *chunk)
Definition: spaces.cc:700
void ZapBlock(Address start, size_t size)
Definition: spaces.cc:743
static int CodePageAreaStartOffset()
Definition: spaces.cc:820
Address ReserveAlignedMemory(size_t requested, size_t alignment, base::VirtualMemory *controller)
Definition: spaces.cc:345
MemoryAllocator(Isolate *isolate)
Definition: spaces.cc:252
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, size_t alignment, Executability executable, base::VirtualMemory *controller)
Definition: spaces.cc:358
void PerformAllocationCallback(ObjectSpace space, AllocationAction action, size_t size)
Definition: spaces.cc:750
static int CodePageGuardStartOffset()
Definition: spaces.cc:808
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)
Definition: spaces.cc:782
MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory *vm, Address start, size_t commit_size, size_t reserved_size)
Definition: spaces.cc:834
Page * AllocatePage(intptr_t size, PagedSpace *owner, Executability executable)
Definition: spaces.cc:680
void AddMemoryAllocationCallback(MemoryAllocationCallback callback, ObjectSpace space, AllocationAction action)
Definition: spaces.cc:772
bool CommitBlock(Address start, size_t size, Executability executable)
Definition: spaces.cc:723
bool IsFlagSet(int flag)
Definition: spaces.h:417
void initialize_scan_on_scavenge(bool scan)
Definition: spaces.h:334
SkipList * skip_list()
Definition: spaces.h:613
Executability executable()
Definition: spaces.h:563
void set_owner(Space *space)
Definition: spaces.h:317
static const int kFlagsOffset
Definition: spaces.h:605
void set_reserved_memory(base::VirtualMemory *reservation)
Definition: spaces.h:328
SkipList * skip_list_
Definition: spaces.h:662
intptr_t available_in_large_free_list_
Definition: spaces.h:676
base::VirtualMemory * reserved_memory()
Definition: spaces.h:324
Heap * heap() const
Definition: spaces.h:603
bool CommitArea(size_t requested)
Definition: spaces.cc:476
intptr_t non_available_small_blocks_
Definition: spaces.h:678
bool Contains(Address addr)
Definition: spaces.h:348
void SetFlags(intptr_t flags, intptr_t mask)
Definition: spaces.h:424
static void IncrementLiveBytesFromMutator(Address address, int by)
Definition: spaces.cc:868
void IncrementLiveBytes(int by)
Definition: spaces.h:471
intptr_t available_in_medium_free_list_
Definition: spaces.h:675
static const int kObjectStartOffset
Definition: spaces.h:550
void set_next_chunk(MemoryChunk *next)
Definition: spaces.h:299
void InitializeReservedMemory()
Definition: spaces.h:326
MemoryChunk * prev_chunk() const
Definition: spaces.h:295
void set_prev_chunk(MemoryChunk *prev)
Definition: spaces.h:303
void SetFlag(int flag)
Definition: spaces.h:405
static MemoryChunk * Initialize(Heap *heap, Address base, size_t size, Address area_start, Address area_end, Executability executable, Space *owner)
Definition: spaces.cc:430
Space * owner() const
Definition: spaces.h:307
static const int kWriteBarrierCounterGranularity
Definition: spaces.h:362
intptr_t available_in_small_free_list_
Definition: spaces.h:674
intptr_t available_in_huge_free_list_
Definition: spaces.h:677
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:276
MemoryChunk * next_chunk() const
Definition: spaces.h:291
bool IsEvacuationCandidate()
Definition: spaces.h:607
base::VirtualMemory reservation_
Definition: spaces.h:650
SlotsBuffer * slots_buffer_
Definition: spaces.h:661
size_t CommittedPhysicalMemory()
Definition: spaces.h:637
SlotsBuffer * slots_buffer()
Definition: spaces.h:617
void set_parallel_sweeping(ParallelSweepingState state)
Definition: spaces.h:450
intptr_t write_barrier_counter_
Definition: spaces.h:663
static const intptr_t kLiveBytesOffset
Definition: spaces.h:530
void ClearFlag(int flag)
Definition: spaces.h:407
static const intptr_t kAlignment
Definition: spaces.h:523
static void UpdateHighWaterMark(Address mark)
Definition: spaces-inl.h:189
void InsertAfter(MemoryChunk *other)
Definition: spaces.cc:528
size_t size() const
Definition: spaces.h:554
static Address & Address_at(Address addr)
Definition: v8memory.h:56
NewSpacePage * prev_page() const
Definition: spaces.h:1999
static bool IsAtEnd(Address addr)
Definition: spaces.h:2014
SemiSpace * semi_space()
Definition: spaces.h:2005
void set_prev_page(NewSpacePage *page)
Definition: spaces.h:2003
static NewSpacePage * Initialize(Heap *heap, Address start, SemiSpace *semi_space)
Definition: spaces.cc:398
static bool IsAtStart(Address addr)
Definition: spaces.h:2009
void InitializeAsAnchor(SemiSpace *owner)
Definition: spaces.cc:420
static const intptr_t kCopyOnFlipFlagsMask
Definition: spaces.h:1986
void set_next_page(NewSpacePage *page)
Definition: spaces.h:1997
static NewSpacePage * FromAddress(Address address_in_page)
Definition: spaces.h:2021
static NewSpacePage * FromLimit(Address address_limit)
Definition: spaces.h:2031
NewSpacePage * next_page() const
Definition: spaces.h:1993
SemiSpace to_space_
Definition: spaces.h:2558
intptr_t Available()
Definition: spaces.h:2400
uintptr_t chunk_size_
Definition: spaces.h:2555
base::VirtualMemory reservation_
Definition: spaces.h:2560
void RecordPromotion(HeapObject *obj)
Definition: spaces.cc:1933
AllocationInfo allocation_info_
Definition: spaces.h:2571
Address top_on_previous_step_
Definition: spaces.h:2579
MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes)
Definition: spaces.cc:1374
intptr_t CommittedMemory()
Definition: spaces.h:2385
size_t CommittedPhysicalMemory()
Definition: spaces.cc:1941
uintptr_t address_mask_
Definition: spaces.h:2565
void ResetAllocationInfo()
Definition: spaces.cc:1311
intptr_t inline_allocation_limit_step_
Definition: spaces.h:2577
bool SetUp(int reserved_semispace_size_, int max_semi_space_size)
Definition: spaces.cc:1175
void UpdateInlineAllocationLimit(int size_in_bytes)
Definition: spaces.cc:1323
void UpdateAllocationInfo()
Definition: spaces.cc:1302
void RecordAllocation(HeapObject *obj)
Definition: spaces.cc:1925
intptr_t TotalCapacity()
Definition: spaces.h:2379
HistogramInfo * promoted_histogram_
Definition: spaces.h:2582
SemiSpace from_space_
Definition: spaces.h:2559
intptr_t inline_allocation_limit_step()
Definition: spaces.h:2544
int InitialTotalCapacity()
Definition: spaces.h:2412
HistogramInfo * allocated_histogram_
Definition: spaces.h:2581
uintptr_t object_mask_
Definition: spaces.h:2566
uintptr_t object_expected_
Definition: spaces.h:2567
bool Contains(Address a)
Definition: spaces.h:2349
bool WasSwept()
Definition: spaces.h:766
static const int kPageSize
Definition: spaces.h:748
void InitializeAsAnchor(PagedSpace *owner)
Definition: spaces.cc:391
void ResetFreeListStatistics()
Definition: spaces.cc:671
static const intptr_t kPageAlignmentMask
Definition: spaces.h:757
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
Definition: spaces-inl.h:136
void set_next_page(Page *page)
Definition: spaces-inl.h:221
Page * next_page()
Definition: spaces-inl.h:209
Page * prev_page()
Definition: spaces-inl.h:215
void set_prev_page(Page *page)
Definition: spaces-inl.h:227
bool Contains(Address a)
Definition: spaces-inl.h:150
intptr_t CommittedMemory()
Definition: spaces.h:1692
size_t CommittedPhysicalMemory()
Definition: spaces.cc:920
void SetTopAndLimit(Address top, Address limit)
Definition: spaces.h:1780
AllocationInfo allocation_info_
Definition: spaces.h:1901
int Free(Address start, int size_in_bytes)
Definition: spaces.h:1770
intptr_t Available()
Definition: spaces.h:1733
void Allocate(int bytes)
Definition: spaces.h:1797
intptr_t unswept_free_bytes_
Definition: spaces.h:1905
void ObtainFreeListStatistics(Page *p, SizeStats *sizes)
Definition: spaces.cc:1039
intptr_t SizeOfFirstPage()
Definition: spaces.cc:986
void ResetFreeListStatistics()
Definition: spaces.cc:1047
virtual intptr_t Waste()
Definition: spaces.h:1748
PagedSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
Definition: spaces.cc:880
AllocationStats accounting_stats_
Definition: spaces.h:1892
void ReleasePage(Page *page)
Definition: spaces.cc:1061
void DecreaseUnsweptFreeBytes(Page *p)
Definition: spaces.h:1845
MUST_USE_RESULT HeapObject * WaitForSweeperThreadsAndRetryAllocation(int size_in_bytes)
Definition: spaces.cc:2532
MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
Definition: spaces.cc:2547
virtual intptr_t Size()
Definition: spaces.h:1739
Object * FindObject(Address addr)
Definition: spaces.cc:932
void EvictEvacuationCandidatesFromFreeLists()
Definition: spaces.cc:2516
void IncreaseCapacity(int size)
Definition: spaces.cc:1056
void RepairFreeListsAfterBoot()
Definition: spaces.cc:2513
virtual intptr_t SizeOfObjects()
Definition: spaces.cc:2502
friend class PageIterator
Definition: spaces.h:1936
MemoryChunk * emergency_memory_
Definition: spaces.h:1916
void SetNewLimit(Address limit)
Definition: heap.h:392
virtual void VerifyObject(HeapObject *obj)
Definition: spaces.cc:2762
void Initialize(Address start, Address end, HeapObjectCallback size_func)
Definition: spaces.cc:1756
HeapObjectCallback size_func_
Definition: spaces.h:2277
SemiSpaceIterator(NewSpace *space)
Definition: spaces.cc:1735
Address space_start()
Definition: spaces.h:2098
friend class NewSpacePageIterator
Definition: spaces.h:2221
static void Swap(SemiSpace *from, SemiSpace *to)
Definition: spaces.cc:1635
SemiSpace(Heap *heap, SemiSpaceId semispace)
Definition: spaces.h:2069
void FlipPages(intptr_t flags, intptr_t flag_mask)
Definition: spaces.cc:1599
intptr_t maximum_committed_
Definition: spaces.h:2202
void SetCapacity(int new_capacity)
Definition: spaces.cc:1656
size_t CommittedPhysicalMemory()
Definition: spaces.cc:1526
uintptr_t object_mask_
Definition: spaces.h:2211
NewSpacePage * current_page()
Definition: spaces.h:2151
bool ShrinkTo(int new_capacity)
Definition: spaces.cc:1572
uintptr_t address_mask_
Definition: spaces.h:2210
NewSpacePage * current_page_
Definition: spaces.h:2218
NewSpacePage * first_page()
Definition: spaces.h:2150
NewSpacePage * anchor()
Definition: spaces.h:2195
void set_age_mark(Address mark)
Definition: spaces.cc:1664
void SetUp(Address start, int initial_capacity, int maximum_capacity)
Definition: spaces.cc:1460
bool GrowTo(int new_capacity)
Definition: spaces.cc:1537
SemiSpaceId id()
Definition: spaces.h:2177
NewSpacePage anchor_
Definition: spaces.h:2217
static void AssertValidRange(Address from, Address to)
Definition: spaces.h:2165
uintptr_t object_expected_
Definition: spaces.h:2212
static Smi * FromInt(int value)
Definition: objects-inl.h:1321
virtual int RoundSizeDownToObjectAlignment(int size)
Definition: spaces.h:838
AllocationSpace identity()
Definition: spaces.h:829
Executability executable()
Definition: spaces.h:826
Heap * heap() const
Definition: spaces.h:823
Entry * Lookup(void *key, uint32_t hash, bool insert, AllocationPolicy allocator=AllocationPolicy())
Definition: hashmap.h:114
void * Remove(void *key, uint32_t hash)
Definition: hashmap.h:145
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define LOG(isolate, Call)
Definition: log.h:69
#define UNREACHABLE()
Definition: logging.h:30
#define CHECK_EQ(expected, value)
Definition: logging.h:169
#define CHECK(condition)
Definition: logging.h:36
#define CHECK_NE(unexpected, value)
Definition: logging.h:173
#define CHECK_LE(a, b)
Definition: logging.h:180
#define DCHECK_GE(v1, v2)
Definition: logging.h:208
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
void USE(T)
Definition: macros.h:322
#define V8_PTR_PREFIX
Definition: macros.h:360
T RoundDown(T x, intptr_t m)
Definition: macros.h:399
#define OFFSET_OF(type, field)
Definition: macros.h:22
#define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(p, s)
Definition: msan.h:29
bool IsPowerOfTwo32(uint32_t value)
Definition: bits.h:77
intptr_t AtomicWord
Definition: atomicops.h:57
void NoBarrier_Store(volatile Atomic8 *ptr, Atomic8 value)
const intptr_t kHeapObjectTagMask
Definition: v8.h:5739
void DeleteArray(T *array)
Definition: allocation.h:68
const int kPointerSize
Definition: globals.h:129
bool IsAddressAligned(Address addr, intptr_t alignment, int offset=0)
Definition: utils.h:129
const int KB
Definition: globals.h:106
@ NOT_EXECUTABLE
Definition: globals.h:391
static LifetimePosition Min(LifetimePosition a, LifetimePosition b)
const size_t kMaximalCodeRangeSize
Definition: globals.h:156
const Address kZapValue
Definition: globals.h:269
@ FIRST_NONSTRING_TYPE
Definition: objects.h:758
const bool kRequiresCodeRange
Definition: globals.h:155
static LifetimePosition Max(LifetimePosition a, LifetimePosition b)
byte * Address
Definition: globals.h:101
void PrintF(const char *format,...)
Definition: utils.cc:80
static void DoReportStatistics(Isolate *isolate, HistogramInfo *info, const char *description)
Definition: spaces.cc:1870
@ OLD_DATA_SPACE
Definition: globals.h:361
@ PROPERTY_CELL_SPACE
Definition: globals.h:365
@ OLD_POINTER_SPACE
Definition: globals.h:360
const int kHeapObjectTag
Definition: v8.h:5737
int(* HeapObjectCallback)(HeapObject *obj)
Definition: globals.h:429
static void RoundUp(Vector< char > buffer, int *length, int *decimal_point)
Definition: fixed-dtoa.cc:171
const int kCodeZapValue
Definition: globals.h:278
bool IsAligned(T value, U alignment)
Definition: utils.h:123
const char * AllocationSpaceName(AllocationSpace space)
static bool ComparePointers(void *key1, void *key2)
Definition: spaces.cc:2794
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
void(* MemoryAllocationCallback)(ObjectSpace space, AllocationAction action, int size)
Definition: v8.h:4171
AllocationAction
Definition: v8.h:4165
@ kAllocationActionAllocate
Definition: v8.h:4166
@ kAllocationActionFree
Definition: v8.h:4167
ObjectSpace
Definition: v8.h:4152
#define INSTANCE_TYPE_LIST(V)
Definition: objects.h:339
#define STRING_TYPE_LIST(V)
Definition: objects.h:457
#define INCREMENT(type, size, name, camel_name)
#define SET_NAME(name)
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space)
Definition: spaces.h:2611