96 current_allocation_block_index_(0) {}
102 if (requested == 0) {
141 return static_cast<int>(left->
start - right->
start);
167 if (merged.
size > 0) {
187 const size_t commit_size,
189 DCHECK(commit_size <= requested_size);
202 *allocated = current.
size;
204 *allocated = aligned_requested;
215 if (*allocated == current.
size) {
219 return current.
start;
255 capacity_executable_(0),
258 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
259 highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
359 size_t reserve_size,
size_t commit_size,
size_t alignment,
361 DCHECK(commit_size <= reserve_size);
372 if (reservation.
Commit(base, commit_size,
false)) {
485 if (commit_size > committed_size) {
487 DCHECK(commit_size <=
size() - 2 * guard_size);
490 size_t length = commit_size - committed_size;
494 if (!
heap()->isolate()->memory_allocator()->CommitMemory(start, length,
508 }
else if (commit_size < committed_size) {
511 size_t length = committed_size - commit_size;
549 intptr_t commit_area_size,
552 DCHECK(commit_area_size <= reserve_area_size);
598 LOG(
isolate_, StringEvent(
"MemoryAllocator::AllocateRawMemory",
599 "V8 Executable Allocation capacity exceeded"));
632 area_end = area_start + commit_area_size;
641 executable, &reservation);
650 area_end = area_start + commit_area_size;
656 static_cast<int>(chunk_size));
658 LOG(
isolate_, NewEvent(
"MemoryChunk", base, chunk_size));
665 heap, base, chunk_size, area_start, area_end, executable, owner);
757 (registration.
action & action) == action)
796 void MemoryAllocator::ReportStatistics() {
802 ", available: %%%d\n\n",
835 Address start,
size_t commit_size,
836 size_t reserved_size) {
871 static_cast<PagedSpace*
>(chunk->
owner())->IncrementUnsweptFreeBytes(-by);
882 :
Space(heap, id, executable),
884 unswept_free_bytes_(0),
885 end_of_unswept_pages_(
NULL),
886 emergency_memory_(
NULL) {
911 while (iterator.has_next()) {
925 while (it.has_next()) {
926 size += it.next()->CommittedPhysicalMemory();
934 DCHECK(!
heap()->mark_compact_collector()->in_use());
942 Address next = cur + obj->Size();
943 if ((cur <= addr) && (addr < next))
return obj;
976 if (p ==
NULL)
return false;
989 static const int constant_pool_delta = FLAG_enable_ool_constant_pool ? 48 : 0;
1009 if (code_range !=
NULL && code_range->
valid()) {
1016 FullCodeGenerator::kBootCodeSizeMultiplier / 100,
1031 while (it.has_next()) {
1040 sizes->
huge_size_ = page->available_in_huge_free_list();
1041 sizes->
small_size_ = page->available_in_small_free_list();
1042 sizes->
medium_size_ = page->available_in_medium_free_list();
1043 sizes->
large_size_ = page->available_in_large_free_list();
1049 while (page_iterator.has_next()) {
1050 Page* page = page_iterator.next();
1121 void PagedSpace::Print() {}
1125 void PagedSpace::Verify(ObjectVisitor* visitor) {
1126 bool allocation_pointer_found_in_space =
1129 while (page_iterator.has_next()) {
1130 Page* page = page_iterator.next();
1131 CHECK(page->owner() ==
this);
1133 allocation_pointer_found_in_space =
true;
1135 CHECK(page->WasSwept());
1136 HeapObjectIterator it(page,
NULL);
1137 Address end_of_previous_object = page->area_start();
1140 for (HeapObject*
object = it.Next();
object !=
NULL;
object = it.Next()) {
1141 CHECK(end_of_previous_object <= object->address());
1145 Map*
map =
object->map();
1150 VerifyObject(
object);
1153 object->ObjectVerify();
1156 int size =
object->Size();
1157 object->IterateBody(
map->instance_type(),
size, visitor);
1158 if (Marking::IsBlack(Marking::MarkBitFrom(
object))) {
1163 end_of_previous_object =
object->address() +
size;
1165 CHECK_LE(black_size, page->LiveBytes());
1167 CHECK(allocation_pointer_found_in_space);
1176 int maximum_semispace_capacity) {
1183 size_t size = 2 * reserved_semispace_capacity;
1186 if (base ==
NULL)
return false;
1192 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1199 #define SET_NAME(name) \
1200 allocated_histogram_[name].set_name(#name); \
1201 promoted_histogram_[name].set_name(#name);
1205 DCHECK(reserved_semispace_capacity ==
heap()->ReservedSemiSpaceSize());
1207 2 *
heap()->ReservedSemiSpaceSize());
1211 maximum_semispace_capacity);
1213 initial_semispace_capacity, maximum_semispace_capacity);
1317 while (it.has_next()) {
1324 if (
heap()->inline_allocation_disabled()) {
1365 int remaining_in_page =
static_cast<int>(
limit -
top);
1381 Address new_top = old_top + size_in_bytes;
1387 return AllocateRaw(size_in_bytes);
1394 return AllocateRaw(size_in_bytes);
1404 void NewSpace::Verify() {
1413 while (current !=
top()) {
1423 Map*
map =
object->map();
1428 CHECK(!object->IsMap());
1429 CHECK(!object->IsCode());
1432 object->ObjectVerify();
1436 int size =
object->Size();
1437 object->IterateBody(
map->instance_type(),
size, &visitor);
1444 CHECK(!page->is_anchor());
1445 current = page->area_start();
1461 int maximum_capacity) {
1491 if (!
heap()->isolate()->memory_allocator()->CommitBlock(
1497 for (
int i = 0;
i < pages;
i++) {
1514 if (!
heap()->isolate()->memory_allocator()->UncommitBlock(start,
1530 while (it.has_next()) {
1531 size += it.next()->CommittedPhysicalMemory();
1539 if (!
Commit())
return false;
1550 if (!
heap()->isolate()->memory_allocator()->CommitBlock(
1557 for (
int i = pages_before;
i < pages_after;
i++) {
1566 last_page = new_page;
1612 if (becomes_to_space) {
1638 DCHECK(
to->anchor_.next_page() != &
to->anchor_);
1669 while (it.has_next()) {
1676 void SemiSpace::Print() {}
1680 void SemiSpace::Verify() {
1685 CHECK(page->semi_space() ==
this);
1686 CHECK(page->InNewSpace());
1692 if (!is_from_space) {
1695 if (page->heap()->incremental_marking()->IsMarking()) {
1705 CHECK(page->prev_page()->next_page() == page);
1706 page = page->next_page();
1721 if (page == end_page) {
1722 CHECK(start <= end);
1724 while (page != end_page) {
1725 page = page->next_page();
1767 static void ClearHistograms(
Isolate* isolate) {
1769 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1771 #undef DEF_TYPE_NAME
1773 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1775 #undef CLEAR_HISTOGRAM
1777 isolate->js_spill_information()->Clear();
1781 static void ClearCodeKindStatistics(
int* code_kind_statistics) {
1783 code_kind_statistics[
i] = 0;
1788 static void ReportCodeKindStatistics(
int* code_kind_statistics) {
1789 PrintF(
"\n Code kind histograms: \n");
1791 if (code_kind_statistics[
i] > 0) {
1792 PrintF(
" %-20s: %10d bytes\n",
1794 code_kind_statistics[
i]);
1801 static int CollectHistogramInfo(HeapObject* obj) {
1802 Isolate* isolate = obj->GetIsolate();
1805 DCHECK(isolate->heap_histograms()[type].name() !=
NULL);
1806 isolate->heap_histograms()[type].increment_number(1);
1807 isolate->heap_histograms()[type].increment_bytes(obj->Size());
1809 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1811 ->IncrementSpillStatistics(isolate->js_spill_information());
1818 static void ReportHistogram(Isolate* isolate,
bool print_spill) {
1819 PrintF(
"\n Object Histogram:\n");
1821 if (isolate->heap_histograms()[
i].number() > 0) {
1822 PrintF(
" %-34s%10d (%10d bytes)\n",
1823 isolate->heap_histograms()[
i].name(),
1824 isolate->heap_histograms()[
i].number(),
1825 isolate->heap_histograms()[
i].bytes());
1831 int string_number = 0;
1832 int string_bytes = 0;
1833 #define INCREMENT(type, size, name, camel_name) \
1834 string_number += isolate->heap_histograms()[type].number(); \
1835 string_bytes += isolate->heap_histograms()[type].bytes();
1838 if (string_number > 0) {
1839 PrintF(
" %-34s%10d (%10d bytes)\n\n",
"STRING_TYPE", string_number,
1843 if (FLAG_collect_heap_spill_statistics && print_spill) {
1844 isolate->js_spill_information()->Print();
1871 const char* description) {
1872 LOG(isolate, HeapSampleBeginEvent(
"NewSpace", description));
1874 int string_number = 0;
1875 int string_bytes = 0;
1876 #define INCREMENT(type, size, name, camel_name) \
1877 string_number += info[type].number(); \
1878 string_bytes += info[type].bytes();
1881 if (string_number > 0) {
1883 HeapSampleItemEvent(
"STRING_TYPE", string_number, string_bytes));
1888 if (info[
i].number() > 0) {
1889 LOG(isolate, HeapSampleItemEvent(info[
i].
name(), info[
i].number(),
1893 LOG(isolate, HeapSampleEndEvent(
"NewSpace", description));
1899 if (FLAG_heap_stats) {
1905 PrintF(
"\n Object Histogram:\n");
1956 DCHECK(size_in_bytes > 0);
1988 if (
map() ==
GetHeap()->raw_unchecked_free_space_map()) {
2001 if (
map() ==
GetHeap()->raw_unchecked_free_space_map()) {
2015 if (
map() ==
GetHeap()->raw_unchecked_free_space_map()) {
2029 intptr_t free_bytes = 0;
2030 if (category->
top() !=
NULL) {
2034 base::LockGuard<base::Mutex> target_lock_guard(
mutex());
2035 base::LockGuard<base::Mutex> source_lock_guard(category->
mutex());
2063 while (*n !=
NULL) {
2066 sum += free_space->
Size();
2083 while (node !=
NULL) {
2085 node = node->
next();
2096 while (node !=
NULL &&
2099 node = node->
next();
2104 *node_size =
reinterpret_cast<FreeSpace*
>(node)->Size();
2121 if (node !=
NULL && *node_size < size_in_bytes) {
2122 Free(node, *node_size);
2144 if (*map_location ==
NULL) {
2145 *map_location = heap->free_space_map();
2147 DCHECK(*map_location == heap->free_space_map());
2160 intptr_t free_bytes = 0;
2178 if (size_in_bytes == 0)
return 0;
2186 page->add_non_available_small_blocks(size_in_bytes);
2187 return size_in_bytes;
2194 page->add_available_in_small_free_list(size_in_bytes);
2197 page->add_available_in_medium_free_list(size_in_bytes);
2200 page->add_available_in_large_free_list(size_in_bytes);
2203 page->add_available_in_huge_free_list(size_in_bytes);
2218 DCHECK(size_in_bytes <= *node_size);
2220 page->add_available_in_small_free_list(-(*node_size));
2229 DCHECK(size_in_bytes <= *node_size);
2231 page->add_available_in_medium_free_list(-(*node_size));
2240 DCHECK(size_in_bytes <= *node_size);
2242 page->add_available_in_large_free_list(-(*node_size));
2251 cur = (*cur)->next_address()) {
2253 while (cur_node !=
NULL &&
2256 huge_list_available -=
size;
2258 page->add_available_in_huge_free_list(-
size);
2259 cur_node = cur_node->
next();
2263 if (cur_node ==
NULL) {
2268 DCHECK((*cur)->map() ==
heap_->raw_unchecked_free_space_map());
2270 int size = cur_as_free_space->
Size();
2271 if (
size >= size_in_bytes) {
2274 *cur = node->
next();
2276 huge_list_available -=
size;
2278 page->add_available_in_huge_free_list(-
size);
2297 DCHECK(size_in_bytes <= *node_size);
2299 page->add_available_in_small_free_list(-(*node_size));
2304 DCHECK(size_in_bytes <= *node_size);
2306 page->add_available_in_medium_free_list(-(*node_size));
2311 DCHECK(size_in_bytes <= *node_size);
2313 page->add_available_in_large_free_list(-(*node_size));
2327 DCHECK(0 < size_in_bytes);
2342 int new_node_size = 0;
2344 if (new_node ==
NULL) {
2349 int bytes_left = new_node_size - size_in_bytes;
2362 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2375 }
else if (bytes_left > kThreshold &&
2377 FLAG_incremental_marking_steps) {
2383 new_node_size - size_in_bytes - linear_size);
2385 new_node->
address() + size_in_bytes + linear_size);
2386 }
else if (bytes_left > 0) {
2390 new_node->
address() + new_node_size);
2403 p->set_available_in_huge_free_list(0);
2405 if (sum < p->area_size()) {
2409 p->set_available_in_small_free_list(0);
2410 p->set_available_in_medium_free_list(0);
2411 p->set_available_in_large_free_list(0);
2435 intptr_t FreeListCategory::SumFreeList() {
2438 while (cur !=
NULL) {
2448 static const int kVeryLongFreeList = 500;
2451 int FreeListCategory::FreeListLength() {
2453 FreeListNode* cur =
top();
2454 while (cur !=
NULL) {
2457 if (length == kVeryLongFreeList)
return length;
2463 bool FreeList::IsVeryLong() {
2464 if (
small_list_.FreeListLength() == kVeryLongFreeList)
return true;
2465 if (
medium_list_.FreeListLength() == kVeryLongFreeList)
return true;
2466 if (
large_list_.FreeListLength() == kVeryLongFreeList)
return true;
2467 if (
huge_list_.FreeListLength() == kVeryLongFreeList)
return true;
2475 intptr_t FreeList::SumFreeLists() {
2503 DCHECK(
heap()->mark_compact_collector()->sweeping_in_progress() ||
2520 ->IsEvacuationCandidate()) {
2533 int size_in_bytes) {
2559 if (
object !=
NULL)
return object;
2564 if (free_chunk >= size_in_bytes) {
2569 if (
object !=
NULL)
return object;
2576 if (!
heap()->always_allocate() &&
2577 heap()->OldGenerationAllocationLimitReached()) {
2581 if (
object !=
NULL)
return object;
2598 void PagedSpace::ReportCodeStatistics(
Isolate* isolate) {
2599 CommentStatistic* comments_statistics =
2600 isolate->paged_space_comments_statistics();
2601 ReportCodeKindStatistics(isolate->code_kind_statistics());
2603 "Code comment statistics (\" [ comment-txt : size/ "
2604 "count (average)\"):\n");
2605 for (
int i = 0;
i <= CommentStatistic::kMaxComments;
i++) {
2606 const CommentStatistic&
cs = comments_statistics[
i];
2608 PrintF(
" %-30s: %10d/%6d (%d)\n",
cs.comment,
cs.size,
cs.count,
2609 cs.size /
cs.count);
2616 void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2617 CommentStatistic* comments_statistics =
2618 isolate->paged_space_comments_statistics();
2619 ClearCodeKindStatistics(isolate->code_kind_statistics());
2620 for (
int i = 0;
i < CommentStatistic::kMaxComments;
i++) {
2621 comments_statistics[
i].Clear();
2623 comments_statistics[CommentStatistic::kMaxComments].comment =
"Unknown";
2624 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2625 comments_statistics[CommentStatistic::kMaxComments].count = 0;
2631 static void EnterComment(Isolate* isolate,
const char* comment,
int delta) {
2632 CommentStatistic* comments_statistics =
2633 isolate->paged_space_comments_statistics();
2635 if (delta <= 0)
return;
2636 CommentStatistic*
cs = &comments_statistics[CommentStatistic::kMaxComments];
2639 for (
int i = 0;
i < CommentStatistic::kMaxComments;
i++) {
2640 if (comments_statistics[
i].comment ==
NULL) {
2641 cs = &comments_statistics[
i];
2642 cs->comment = comment;
2644 }
else if (strcmp(comments_statistics[
i].comment, comment) == 0) {
2645 cs = &comments_statistics[
i];
2657 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2660 const char* tmp =
reinterpret_cast<const char*
>(it->rinfo()->data());
2661 if (tmp[0] !=
'[') {
2667 const char*
const comment_txt =
2668 reinterpret_cast<const char*
>(it->rinfo()->data());
2669 const byte* prev_pc = it->rinfo()->pc();
2677 const char*
const txt =
2678 reinterpret_cast<const char*
>(it->rinfo()->data());
2679 flat_delta +=
static_cast<int>(it->rinfo()->pc() - prev_pc);
2680 if (txt[0] ==
']')
break;
2682 CollectCommentStatistics(isolate, it);
2684 prev_pc = it->rinfo()->pc();
2688 EnterComment(isolate, comment_txt, flat_delta);
2695 void PagedSpace::CollectCodeStatistics() {
2697 HeapObjectIterator obj_it(
this);
2698 for (HeapObject* obj = obj_it.Next(); obj !=
NULL; obj = obj_it.Next()) {
2699 if (obj->IsCode()) {
2700 Code* code = Code::cast(obj);
2701 isolate->code_kind_statistics()[code->kind()] += code->Size();
2702 RelocIterator it(code);
2704 const byte* prev_pc = code->instruction_start();
2705 while (!it.done()) {
2707 delta +=
static_cast<int>(it.rinfo()->pc() - prev_pc);
2708 CollectCommentStatistics(isolate, &it);
2709 prev_pc = it.rinfo()->pc();
2714 DCHECK(code->instruction_start() <= prev_pc &&
2715 prev_pc <= code->instruction_end());
2716 delta +=
static_cast<int>(code->instruction_end() - prev_pc);
2717 EnterComment(isolate,
"NoComment", delta);
2723 void PagedSpace::ReportStatistics() {
2732 if (
heap()->mark_compact_collector()->sweeping_in_progress()) {
2735 ClearHistograms(
heap()->isolate());
2736 HeapObjectIterator obj_it(
this);
2737 for (HeapObject* obj = obj_it.Next(); obj !=
NULL; obj = obj_it.Next())
2738 CollectHistogramInfo(obj);
2739 ReportHistogram(
heap()->isolate(),
true);
2763 CHECK(object->IsPropertyCell());
2800 max_capacity_(max_capacity),
2823 LOG(
heap()->isolate(), DeleteEvent(
"LargeObjectChunk", page->
address()));
2838 if (!
heap()->always_allocate() &&
2839 heap()->OldGenerationAllocationLimitReached()) {
2850 size_ +=
static_cast<int>(page->
size());
2864 for (
uintptr_t key = base; key <= limit; key++) {
2868 entry->value = page;
2878 reinterpret_cast<Object**
>(
object->address())[0] =
2879 heap()->fixed_array_map();
2892 while (current !=
NULL) {
2913 static_cast<uint32_t>(key),
false);
2929 while (current !=
NULL) {
2933 bool is_pointer_object =
object->IsFixedArray();
2934 MarkBit mark_bit = Marking::MarkBitFrom(
object);
2935 if (mark_bit.
Get()) {
2945 if (previous ==
NULL) {
2954 size_ -=
static_cast<int>(page->
size());
2964 for (
uintptr_t key = base; key <= limit; key++) {
2969 if (is_pointer_object) {
2981 Address address =
object->address();
2984 bool owned = (chunk->
owner() ==
this);
2995 void LargeObjectSpace::Verify() {
2997 chunk = chunk->next_page()) {
3006 Map*
map =
object->map();
3014 CHECK(object->IsCode() || object->IsSeqString() ||
3015 object->IsExternalString() || object->IsFixedArray() ||
3016 object->IsFixedDoubleArray() || object->IsByteArray() ||
3017 object->IsConstantPoolArray());
3020 object->ObjectVerify();
3023 if (object->IsCode()) {
3025 object->IterateBody(
map->instance_type(), object->
Size(), &code_visitor);
3026 }
else if (object->IsFixedArray()) {
3027 FixedArray* array = FixedArray::cast(
object);
3028 for (
int j = 0; j < array->length(); j++) {
3029 Object* element = array->get(j);
3030 if (element->IsHeapObject()) {
3031 HeapObject* element_object = HeapObject::cast(element);
3033 CHECK(element_object->map()->IsMap());
3043 void LargeObjectSpace::Print() {
3044 OFStream os(stdout);
3046 for (HeapObject* obj = it.Next(); obj !=
NULL; obj = it.Next()) {
3052 void LargeObjectSpace::ReportStatistics() {
3054 int num_objects = 0;
3055 ClearHistograms(
heap()->isolate());
3057 for (HeapObject* obj = it.Next(); obj !=
NULL; obj = it.Next()) {
3059 CollectHistogramInfo(obj);
3063 " number of objects %d, "
3066 if (num_objects > 0) ReportHistogram(
heap()->isolate(),
false);
3070 void LargeObjectSpace::CollectCodeStatistics() {
3073 for (HeapObject* obj = obj_it.Next(); obj !=
NULL; obj = obj_it.Next()) {
3074 if (obj->IsCode()) {
3075 Code* code = Code::cast(obj);
3076 isolate->code_kind_statistics()[code->kind()] += code->Size();
3082 void Page::Print() {
3086 printf(
" --------------------------------------\n");
3087 HeapObjectIterator objects(
this,
heap()->GcSafeSizeOfOldObjectFunction());
3088 unsigned mark_size = 0;
3089 for (HeapObject*
object = objects.Next();
object !=
NULL;
3090 object = objects.Next()) {
3091 bool is_marked = Marking::MarkBitFrom(
object).Get();
3092 PrintF(
" %c ", (is_marked ?
'!' :
' '));
3096 object->ShortPrint();
3099 printf(
" --------------------------------------\n");
3100 printf(
" Marked: %x, LiveCount: %x\n", mark_size,
LiveBytes());
#define SLOW_DCHECK(condition)
A JavaScript object (ECMA-262, 4.3.3)
static size_t AllocateAlignment()
static intptr_t CommitPageSize()
bool Commit(void *address, size_t size, bool is_executable)
static bool UncommitRegion(void *base, size_t size)
static bool ReleaseRegion(void *base, size_t size)
bool Guard(void *address)
bool Uncommit(void *address, size_t size)
static bool CommitRegion(void *base, size_t size, bool is_executable)
void TakeControl(VirtualMemory *from)
static bool HasLazyCommits()
static AllocationResult Retry(AllocationSpace space=NEW_SPACE)
static void Clear(MemoryChunk *chunk)
virtual void VerifyObject(HeapObject *obj)
bool contains(Address address)
int current_allocation_block_index_
base::VirtualMemory * code_range_
void FreeRawMemory(Address buf, size_t length)
CodeRange(Isolate *isolate)
static int CompareFreeBlockAddress(const FreeBlock *left, const FreeBlock *right)
List< FreeBlock > allocation_list_
bool SetUp(size_t requested_size)
List< FreeBlock > free_list_
bool GetNextAllocationBlock(size_t requested)
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, const size_t commit_size, size_t *allocated)
bool CommitRawMemory(Address start, size_t length)
bool UncommitRawMemory(Address start, size_t length)
static const char * Kind2String(Kind kind)
FreeListNode * end() const
FreeListNode * top() const
void set_available(int available)
void RepairFreeList(Heap *heap)
void Free(FreeListNode *node, int size_in_bytes)
void set_end(FreeListNode *end)
intptr_t EvictFreeListItemsInList(Page *p)
void set_top(FreeListNode *top)
intptr_t Concatenate(FreeListCategory *category)
bool ContainsPageFreeListItemsInList(Page *p)
FreeListNode * PickNodeFromList(int *node_size)
static FreeListNode * FromAddress(Address address)
void set_next(FreeListNode *next)
static bool IsFreeListNode(HeapObject *object)
static const int kNextOffset
void set_size(Heap *heap, int size_in_bytes)
FreeListNode ** next_address()
FreeListCategory huge_list_
bool ContainsPageFreeListItems(Page *p)
FreeListNode * FindNodeFor(int size_in_bytes, int *node_size)
FreeListCategory medium_list_
intptr_t Concatenate(FreeList *free_list)
MUST_USE_RESULT HeapObject * Allocate(int size_in_bytes)
FreeListCategory large_list_
FreeList(PagedSpace *owner)
FreeListCategory * medium_list()
static const int kMaxBlockSize
FreeListCategory * huge_list()
static const int kSmallListMax
static const int kLargeListMax
FreeListCategory * small_list()
FreeListCategory * large_list()
static const int kSmallAllocationMax
static const int kLargeAllocationMax
int Free(Address start, int size_in_bytes)
static const int kMediumListMax
static const int kSmallListMin
intptr_t EvictFreeListItems(Page *p)
void RepairLists(Heap *heap)
FreeListCategory small_list_
static const int kMediumAllocationMax
void nobarrier_set_size(int value)
int nobarrier_size() const
static const int kHeaderSize
void Initialize(PagedSpace *owner, Address start, Address end, PageMode mode, HeapObjectCallback size_func)
HeapObjectCallback size_func_
HeapObjectIterator(PagedSpace *space)
void synchronized_set_map_no_write_barrier(Map *value)
void set_map_no_write_barrier(Map *value)
static HeapObject * FromAddress(Address address)
OldSpace * old_pointer_space()
PropertyCellSpace * property_cell_space()
void QueueMemoryChunkForFree(MemoryChunk *chunk)
PromotionQueue * promotion_queue()
void RememberUnmappedPage(Address page, bool compacted)
bool inline_allocation_disabled()
void CreateFillerObjectAt(Address addr, int size)
HeapObjectCallback GcSafeSizeOfOldObjectFunction()
int InitialSemiSpaceSize()
OldSpace * old_data_space()
IncrementalMarking * incremental_marking()
static bool ShouldZapGarbage()
void decrement_scan_on_scavenge_pages()
MarkCompactCollector * mark_compact_collector()
static const intptr_t kAllocatedThreshold
void Step(intptr_t allocated, CompletionAction action, bool force_marking=false)
void OldSpaceStep(intptr_t allocated)
bool IsMarkingIncomplete()
void SetNewSpacePageFlags(NewSpacePage *chunk)
MemoryAllocator * memory_allocator()
LargeObjectIterator(LargeObjectSpace *space)
HeapObjectCallback size_func_
void FreeUnmarkedObjects()
friend class LargeObjectIterator
LargeObjectSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
MUST_USE_RESULT AllocationResult AllocateRaw(int object_size, Executability executable)
bool Contains(HeapObject *obj)
bool CanAllocateSize(int size)
LargePage * FindPage(Address a)
Object * FindObject(Address a)
intptr_t CommittedMemory()
intptr_t maximum_committed_
size_t CommittedPhysicalMemory()
static LargePage * Initialize(Heap *heap, MemoryChunk *chunk)
LargePage * next_page() const
void set_next_page(LargePage *page)
virtual void VerifyObject(HeapObject *obj)
InstanceType instance_type()
bool sweeping_in_progress()
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
int SweepInParallel(PagedSpace *space, int required_freed_bytes)
void RefillFreeList(PagedSpace *space)
void EnsureSweepingCompleted()
static int CodePageGuardSize()
LargePage * AllocateLargePage(intptr_t object_size, Space *owner, Executability executable)
List< MemoryAllocationCallbackRegistration > memory_allocation_callbacks_
static int CodePageAreaEndOffset()
bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)
bool CommitMemory(Address addr, size_t size, Executability executable)
bool UncommitBlock(Address start, size_t size)
MemoryChunk * AllocateChunk(intptr_t reserve_area_size, intptr_t commit_area_size, Executability executable, Space *space)
void UpdateAllocatedSpaceLimits(void *low, void *high)
static int CodePageAreaSize()
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
void FreeMemory(base::VirtualMemory *reservation, Executability executable)
void Free(MemoryChunk *chunk)
void ZapBlock(Address start, size_t size)
static int CodePageAreaStartOffset()
Address ReserveAlignedMemory(size_t requested, size_t alignment, base::VirtualMemory *controller)
MemoryAllocator(Isolate *isolate)
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, size_t alignment, Executability executable, base::VirtualMemory *controller)
size_t capacity_executable_
void PerformAllocationCallback(ObjectSpace space, AllocationAction action, size_t size)
static int CodePageGuardStartOffset()
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)
MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory *vm, Address start, size_t commit_size, size_t reserved_size)
Page * AllocatePage(intptr_t size, PagedSpace *owner, Executability executable)
void AddMemoryAllocationCallback(MemoryAllocationCallback callback, ObjectSpace space, AllocationAction action)
bool CommitBlock(Address start, size_t size, Executability executable)
void initialize_scan_on_scavenge(bool scan)
Executability executable()
void set_owner(Space *space)
static const int kFlagsOffset
void set_reserved_memory(base::VirtualMemory *reservation)
intptr_t available_in_large_free_list_
base::VirtualMemory * reserved_memory()
bool CommitArea(size_t requested)
intptr_t non_available_small_blocks_
bool Contains(Address addr)
void SetFlags(intptr_t flags, intptr_t mask)
static void IncrementLiveBytesFromMutator(Address address, int by)
void IncrementLiveBytes(int by)
intptr_t available_in_medium_free_list_
static const int kObjectStartOffset
void set_next_chunk(MemoryChunk *next)
void InitializeReservedMemory()
MemoryChunk * prev_chunk() const
void set_prev_chunk(MemoryChunk *prev)
@ POINTERS_FROM_HERE_ARE_INTERESTING
@ NEW_SPACE_BELOW_AGE_MARK
@ POINTERS_TO_HERE_ARE_INTERESTING
static MemoryChunk * Initialize(Heap *heap, Address base, size_t size, Address area_start, Address area_end, Executability executable, Space *owner)
static const int kWriteBarrierCounterGranularity
intptr_t available_in_small_free_list_
intptr_t available_in_huge_free_list_
static MemoryChunk * FromAddress(Address a)
MemoryChunk * next_chunk() const
bool IsEvacuationCandidate()
base::VirtualMemory reservation_
SlotsBuffer * slots_buffer_
size_t CommittedPhysicalMemory()
SlotsBuffer * slots_buffer()
void set_parallel_sweeping(ParallelSweepingState state)
intptr_t write_barrier_counter_
static const intptr_t kLiveBytesOffset
static const intptr_t kAlignment
static void UpdateHighWaterMark(Address mark)
void InsertAfter(MemoryChunk *other)
static Address & Address_at(Address addr)
NewSpacePage * prev_page() const
static bool IsAtEnd(Address addr)
void set_prev_page(NewSpacePage *page)
static NewSpacePage * Initialize(Heap *heap, Address start, SemiSpace *semi_space)
static bool IsAtStart(Address addr)
void InitializeAsAnchor(SemiSpace *owner)
static const intptr_t kCopyOnFlipFlagsMask
void set_next_page(NewSpacePage *page)
static NewSpacePage * FromAddress(Address address_in_page)
static NewSpacePage * FromLimit(Address address_limit)
NewSpacePage * next_page() const
base::VirtualMemory reservation_
void RecordPromotion(HeapObject *obj)
AllocationInfo allocation_info_
Address top_on_previous_step_
MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes)
intptr_t CommittedMemory()
size_t CommittedPhysicalMemory()
void ResetAllocationInfo()
intptr_t inline_allocation_limit_step_
bool SetUp(int reserved_semispace_size_, int max_semi_space_size)
void UpdateInlineAllocationLimit(int size_in_bytes)
void UpdateAllocationInfo()
void RecordAllocation(HeapObject *obj)
HistogramInfo * promoted_histogram_
intptr_t inline_allocation_limit_step()
int InitialTotalCapacity()
HistogramInfo * allocated_histogram_
uintptr_t object_expected_
static const int kPageSize
void InitializeAsAnchor(PagedSpace *owner)
void ResetFreeListStatistics()
static const intptr_t kPageAlignmentMask
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
void set_next_page(Page *page)
void set_prev_page(Page *page)
intptr_t CommittedMemory()
void CreateEmergencyMemory()
size_t CommittedPhysicalMemory()
void SetTopAndLimit(Address top, Address limit)
AllocationInfo allocation_info_
void UseEmergencyMemory()
int Free(Address start, int size_in_bytes)
intptr_t unswept_free_bytes_
void ObtainFreeListStatistics(Page *p, SizeStats *sizes)
intptr_t SizeOfFirstPage()
void ResetFreeListStatistics()
void EmptyAllocationInfo()
void PrepareForMarkCompact()
PagedSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
AllocationStats accounting_stats_
void ReleasePage(Page *page)
void DecreaseUnsweptFreeBytes(Page *p)
MUST_USE_RESULT HeapObject * WaitForSweeperThreadsAndRetryAllocation(int size_in_bytes)
MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
Object * FindObject(Address addr)
void EvictEvacuationCandidatesFromFreeLists()
void IncreaseCapacity(int size)
void RepairFreeListsAfterBoot()
virtual intptr_t SizeOfObjects()
friend class PageIterator
void FreeEmergencyMemory()
MemoryChunk * emergency_memory_
virtual void VerifyObject(HeapObject *obj)
void Initialize(Address start, Address end, HeapObjectCallback size_func)
HeapObjectCallback size_func_
SemiSpaceIterator(NewSpace *space)
friend class NewSpacePageIterator
int maximum_total_capacity_
static void Swap(SemiSpace *from, SemiSpace *to)
SemiSpace(Heap *heap, SemiSpaceId semispace)
void FlipPages(intptr_t flags, intptr_t flag_mask)
intptr_t maximum_committed_
void SetCapacity(int new_capacity)
size_t CommittedPhysicalMemory()
NewSpacePage * current_page()
bool ShrinkTo(int new_capacity)
NewSpacePage * current_page_
NewSpacePage * first_page()
void set_age_mark(Address mark)
void SetUp(Address start, int initial_capacity, int maximum_capacity)
bool GrowTo(int new_capacity)
int initial_total_capacity_
static void AssertValidRange(Address from, Address to)
uintptr_t object_expected_
static Smi * FromInt(int value)
virtual int RoundSizeDownToObjectAlignment(int size)
AllocationSpace identity()
Executability executable()
Entry * Lookup(void *key, uint32_t hash, bool insert, AllocationPolicy allocator=AllocationPolicy())
void * Remove(void *key, uint32_t hash)
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf map
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define LOG(isolate, Call)
#define CHECK_EQ(expected, value)
#define CHECK_NE(unexpected, value)
#define DCHECK_GE(v1, v2)
#define DCHECK(condition)
#define DCHECK_EQ(v1, v2)
T RoundDown(T x, intptr_t m)
#define OFFSET_OF(type, field)
#define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(p, s)
bool IsPowerOfTwo32(uint32_t value)
void NoBarrier_Store(volatile Atomic8 *ptr, Atomic8 value)
const intptr_t kHeapObjectTagMask
void DeleteArray(T *array)
bool IsAddressAligned(Address addr, intptr_t alignment, int offset=0)
static LifetimePosition Min(LifetimePosition a, LifetimePosition b)
const size_t kMaximalCodeRangeSize
const bool kRequiresCodeRange
static LifetimePosition Max(LifetimePosition a, LifetimePosition b)
void PrintF(const char *format,...)
static void DoReportStatistics(Isolate *isolate, HistogramInfo *info, const char *description)
int(* HeapObjectCallback)(HeapObject *obj)
static void RoundUp(Vector< char > buffer, int *length, int *decimal_point)
bool IsAligned(T value, U alignment)
const char * AllocationSpaceName(AllocationSpace space)
static bool ComparePointers(void *key1, void *key2)
Debugger support for the V8 JavaScript engine.
void(* MemoryAllocationCallback)(ObjectSpace space, AllocationAction action, int size)
@ kAllocationActionAllocate
#define INSTANCE_TYPE_LIST(V)
#define STRING_TYPE_LIST(V)
#define INCREMENT(type, size, name, camel_name)
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space)
MemoryAllocationCallback callback