5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_
78 #define DCHECK_PAGE_ALIGNED(address) \
79 DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
81 #define DCHECK_OBJECT_ALIGNED(address) \
82 DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
84 #define DCHECK_OBJECT_SIZE(size) \
85 DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
87 #define DCHECK_PAGE_OFFSET(offset) \
88 DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
90 #define DCHECK_MAP_PAGE_INDEX(index) \
91 DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
95 class MemoryAllocator;
187 return reinterpret_cast<Bitmap*
>(addr);
193 return MarkBit(cell, mask, data_only);
199 for (
uint32_t mask = 1; mask != 0; mask <<= 1) {
200 if ((mask & himask) != 0)
PrintF(
"[");
201 PrintF((mask & word) ?
"1" :
"0");
202 if ((mask & himask) != 0)
PrintF(
"]");
238 static bool IsSeq(
uint32_t cell) {
return cell == 0 || cell == 0xFFFFFFFF; }
257 if (cells()[
i] != 0) {
310 return reinterpret_cast<Space*
>(
reinterpret_cast<intptr_t
>(
owner_) -
465 if (FLAG_gc_verbose) {
466 PrintF(
"ResetLiveBytes:%p:%x->0\n",
static_cast<void*
>(
this),
472 if (FLAG_gc_verbose) {
473 printf(
"UpdateLiveBytes:%p:%x%c=%x->%x\n",
static_cast<void*
>(
this),
591 const intptr_t offset =
reinterpret_cast<intptr_t
>(addr) &
kAlignmentMask;
735 int offset =
static_cast<int>(a -
address());
772 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \
773 type name() { return name##_; } \
774 void set_##name(type name) { name##_ = name; } \
775 void add_##name(type name) { name##_ += name; }
783 #undef FRAGMENTATION_STATS_ACCESSORS
847 virtual void Print() = 0;
872 bool SetUp(
size_t requested_size);
888 if (!
valid())
return false;
897 const size_t commit_size,
953 for (
int idx = 0; idx <
kSize; idx++) {
963 for (
int idx = start_region; idx <= end_region; idx++) {
1007 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
1048 void ReportStatistics();
1055 intptr_t commit_area_size,
1108 Address start,
size_t commit_size,
1109 size_t reserved_size);
1201 if (next_obj !=
NULL)
return next_obj;
1289 bool VerifyPagedAllocation() {
1290 return (Page::FromAllocationTop(
top_) == Page::FromAllocationTop(
limit_)) &&
1351 capacity_ += size_in_bytes;
1352 size_ += size_in_bytes;
1353 if (capacity_ > max_capacity_) {
1354 max_capacity_ = capacity_;
1363 capacity_ -= size_in_bytes;
1364 size_ -= size_in_bytes;
1370 size_ += size_in_bytes;
1376 size_ -= size_in_bytes;
1382 DCHECK(size_in_bytes >= 0);
1383 waste_ += size_in_bytes;
1477 intptr_t SumFreeList();
1478 int FreeListLength();
1550 return maximum_freed;
1566 intptr_t SumFreeLists();
1622 template <
typename T>
1774 return size_in_bytes - wasted;
1792 int old_linear_size =
static_cast<int>(
limit() -
top());
1813 virtual void VerifyObject(
HeapObject* obj) {}
1818 virtual void Print();
1821 void ReportStatistics();
1824 void CollectCodeStatistics();
1825 static void ReportCodeStatistics(
Isolate* isolate);
1826 static void ResetCodeStatistics(
Isolate* isolate);
2078 void SetUp(
Address start,
int initial_capacity,
int maximum_capacity);
2090 bool GrowTo(
int new_capacity);
2114 if (next_page ==
anchor())
return false;
2154 virtual void Verify();
2158 virtual void Print();
2326 bool SetUp(
int reserved_semispace_size_,
int max_semi_space_size);
2512 virtual void Verify();
2517 virtual void Print() {
to_space_.Print(); }
2611 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
2612 SLOW_DCHECK((space).page_low() <= (info).top() && \
2613 (info).top() <= (space).page_high() && \
2614 (info).limit() <= (space).page_high())
2773 virtual void Verify();
2777 virtual void Print();
2778 void ReportStatistics();
2779 void CollectCodeStatistics();
2828 case kOldPointerState: {
2829 if (old_pointer_iterator_.has_next()) {
2830 return old_pointer_iterator_.next();
2836 if (map_iterator_.has_next()) {
2837 return map_iterator_.next();
2839 state_ = kLargeObjectState;
2842 case kLargeObjectState: {
2845 heap_object = lo_iterator_.Next();
2846 if (heap_object ==
NULL) {
2847 state_ = kFinishedState;
2852 }
while (!heap_object->IsFixedArray());
2856 case kFinishedState:
2867 enum State { kOldPointerState, kMapState, kLargeObjectState, kFinishedState };
2876 struct CommentStatistic {
2877 const char* comment;
2886 static const int kMaxComments = 64;
#define SLOW_DCHECK(condition)
Isolate represents an isolated instance of the V8 engine.
void TakeControl(VirtualMemory *from)
INLINE(void set_limit(Address limit))
INLINE(Address top()) const
INLINE(Address limit()) const
Address * limit_address()
INLINE(void set_top(Address top))
AllocationResult(Object *object)
AllocationSpace RetrySpace()
AllocationSpace retry_space_
Object * ToObjectChecked()
static AllocationResult Retry(AllocationSpace space=NEW_SPACE)
AllocationResult(AllocationSpace space)
void increment_number(int num)
LargeObjectIterator lo_iterator_
PageIterator(PagedSpace *space)
NewSpacePageIterator(NewSpace *space)
void AllocateBytes(intptr_t size_in_bytes)
NewSpacePage * prev_page_
void WasteBytes(int size_in_bytes)
void ExpandSpace(int size_in_bytes)
NewSpacePage * next_page_
void ShrinkSpace(int size_in_bytes)
PageIterator map_iterator_
void increment_bytes(int size)
NewSpacePage * last_page_
PointerChunkIterator(Heap *heap)
PageIterator old_pointer_iterator_
NewSpacePageIterator(Address start, Address limit)
NewSpacePageIterator(SemiSpace *space)
void DeallocateBytes(intptr_t size_in_bytes)
void Print(uint32_t pos, uint32_t cell)
static bool IsSeq(uint32_t cell)
static const uint32_t kBytesPerCell
INLINE(Address address())
static void PrintWord(uint32_t word, uint32_t himask=0)
INLINE(static Bitmap *FromAddress(Address addr))
INLINE(static uint32_t IndexToCell(uint32_t index))
MarkBit MarkBitFromIndex(uint32_t index, bool data_only=false)
static const uint32_t kBytesPerCellLog2
static const size_t kLength
INLINE(static uint32_t CellAlignIndex(uint32_t index))
static const size_t kSize
static const uint32_t kBitIndexMask
static const uint32_t kBitsPerCellLog2
static int SizeFor(int cells_count)
INLINE(static uint32_t CellToIndex(uint32_t index))
static void Clear(MemoryChunk *chunk)
static int CellsForLength(int length)
static const uint32_t kBitsPerCell
INLINE(MarkBit::CellType *cells())
virtual void VerifyObject(HeapObject *obj)
virtual int RoundSizeDownToObjectAlignment(int size)
CellSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
FreeBlock(Address start_arg, size_t size_arg)
FreeBlock(void *start_arg, size_t size_arg)
bool contains(Address address)
int current_allocation_block_index_
base::VirtualMemory * code_range_
void FreeRawMemory(Address buf, size_t length)
CodeRange(Isolate *isolate)
DISALLOW_COPY_AND_ASSIGN(CodeRange)
static int CompareFreeBlockAddress(const FreeBlock *left, const FreeBlock *right)
List< FreeBlock > allocation_list_
bool SetUp(size_t requested_size)
List< FreeBlock > free_list_
bool GetNextAllocationBlock(size_t requested)
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, const size_t commit_size, size_t *allocated)
bool CommitRawMemory(Address start, size_t length)
bool UncommitRawMemory(Address start, size_t length)
FreeListNode * end() const
int * GetAvailableAddress()
FreeListNode * top() const
void set_available(int available)
void RepairFreeList(Heap *heap)
void Free(FreeListNode *node, int size_in_bytes)
void set_end(FreeListNode *end)
intptr_t EvictFreeListItemsInList(Page *p)
void set_top(FreeListNode *top)
intptr_t Concatenate(FreeListCategory *category)
bool ContainsPageFreeListItemsInList(Page *p)
FreeListNode * PickNodeFromList(int *node_size)
FreeListNode ** GetEndAddress()
static FreeListNode * FromAddress(Address address)
void set_next(FreeListNode *next)
static bool IsFreeListNode(HeapObject *object)
static const int kNextOffset
void set_size(Heap *heap, int size_in_bytes)
static FreeListNode * cast(Object *object)
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode)
FreeListNode ** next_address()
FreeListCategory huge_list_
static int GuaranteedAllocatable(int maximum_freed)
bool ContainsPageFreeListItems(Page *p)
FreeListNode * FindNodeFor(int size_in_bytes, int *node_size)
FreeListCategory medium_list_
intptr_t Concatenate(FreeList *free_list)
MUST_USE_RESULT HeapObject * Allocate(int size_in_bytes)
FreeListCategory large_list_
FreeList(PagedSpace *owner)
FreeListCategory * medium_list()
static const int kMaxBlockSize
FreeListCategory * huge_list()
static const int kSmallListMax
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList)
static const int kLargeListMax
static const int kMinBlockSize
FreeListCategory * small_list()
FreeListCategory * large_list()
static const int kSmallAllocationMax
static const int kLargeAllocationMax
int Free(Address start, int size_in_bytes)
static const int kMediumListMax
static const int kSmallListMin
intptr_t EvictFreeListItems(Page *p)
void RepairLists(Heap *heap)
FreeListCategory small_list_
static const int kMediumAllocationMax
static const int kHeaderSize
HeapObject * FromCurrentPage()
void Initialize(PagedSpace *owner, Address start, Address end, PageMode mode, HeapObjectCallback size_func)
virtual HeapObject * next_object()
HeapObjectCallback size_func_
HeapObjectIterator(PagedSpace *space)
static HeapObject * FromAddress(Address address)
void set_name(const char *name)
virtual HeapObject * next_object()
LargeObjectIterator(LargeObjectSpace *space)
HeapObjectCallback size_func_
void FreeUnmarkedObjects()
virtual intptr_t SizeOfObjects()
virtual ~LargeObjectSpace()
LargeObjectSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
intptr_t MaximumCommittedMemory()
MUST_USE_RESULT AllocationResult AllocateRaw(int object_size, Executability executable)
bool Contains(HeapObject *obj)
static intptr_t ObjectSizeFor(intptr_t chunk_size)
bool CanAllocateSize(int size)
LargePage * FindPage(Address a)
Object * FindObject(Address a)
intptr_t CommittedMemory()
intptr_t maximum_committed_
bool SlowContains(Address addr)
size_t CommittedPhysicalMemory()
static LargePage * Initialize(Heap *heap, MemoryChunk *chunk)
LargePage * next_page() const
void set_next_page(LargePage *page)
static const int kMapsPerPage
virtual void VerifyObject(HeapObject *obj)
static const int kMaxMapPageIndex
int CompactionThreshold()
virtual int RoundSizeDownToObjectAlignment(int size)
MapSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
const int max_map_space_pages_
MarkBit(CellType *cell, CellType mask, bool data_only)
static int CodePageGuardSize()
LargePage * AllocateLargePage(intptr_t object_size, Space *owner, Executability executable)
List< MemoryAllocationCallbackRegistration > memory_allocation_callbacks_
static int CodePageAreaEndOffset()
bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)
bool CommitMemory(Address addr, size_t size, Executability executable)
bool UncommitBlock(Address start, size_t size)
MemoryChunk * AllocateChunk(intptr_t reserve_area_size, intptr_t commit_area_size, Executability executable, Space *space)
void UpdateAllocatedSpaceLimits(void *low, void *high)
static int CodePageAreaSize()
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
void FreeMemory(base::VirtualMemory *reservation, Executability executable)
void Free(MemoryChunk *chunk)
void ZapBlock(Address start, size_t size)
static int CodePageAreaStartOffset()
void * lowest_ever_allocated_
Page * InitializePagesInChunk(int chunk_id, int pages_in_chunk, PagedSpace *owner)
Address ReserveAlignedMemory(size_t requested, size_t alignment, base::VirtualMemory *controller)
intptr_t AvailableExecutable()
MemoryAllocator(Isolate *isolate)
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, size_t alignment, Executability executable, base::VirtualMemory *controller)
size_t capacity_executable_
void * highest_ever_allocated_
intptr_t SizeExecutable()
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator)
void PerformAllocationCallback(ObjectSpace space, AllocationAction action, size_t size)
static int CodePageGuardStartOffset()
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)
MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory *vm, Address start, size_t commit_size, size_t reserved_size)
Page * AllocatePage(intptr_t size, PagedSpace *owner, Executability executable)
void AddMemoryAllocationCallback(MemoryAllocationCallback callback, ObjectSpace space, AllocationAction action)
bool CommitBlock(Address start, size_t size, Executability executable)
bool IsOutsideAllocatedSpace(const void *address) const
void initialize_scan_on_scavenge(bool scan)
static const size_t kWriteBarrierCounterOffset
static const int kPointersFromHereAreInterestingMask
Executability executable()
void set_owner(Space *space)
static const int kFlagsOffset
void set_reserved_memory(base::VirtualMemory *reservation)
intptr_t available_in_large_free_list_
base::VirtualMemory * reserved_memory()
bool CommitArea(size_t requested)
intptr_t non_available_small_blocks_
bool Contains(Address addr)
SlotsBuffer ** slots_buffer_address()
void SetFlags(intptr_t flags, intptr_t mask)
static void IncrementLiveBytesFromMutator(Address address, int by)
void IncrementLiveBytes(int by)
void set_write_barrier_counter(int counter)
bool IsLeftOfProgressBar(Object **slot)
intptr_t available_in_medium_free_list_
static const int kObjectStartOffset
static const int kEvacuationCandidateMask
void set_next_chunk(MemoryChunk *next)
int store_buffer_counter()
static void IncrementLiveBytesFromGC(Address address, int by)
bool ContainsLimit(Address addr)
void MarkEvacuationCandidate()
base::AtomicWord parallel_sweeping_
static uint32_t FastAddressToMarkbitIndex(Address addr)
void set_scan_on_scavenge(bool scan)
Address MarkbitIndexToAddress(uint32_t index)
void set_store_buffer_counter(int counter)
void InitializeReservedMemory()
MemoryChunk * prev_chunk() const
void SetFlagTo(int flag, bool value)
static const int kBodyOffset
void set_prev_chunk(MemoryChunk *prev)
static const MemoryChunk * FromAddress(const byte *a)
@ POINTERS_FROM_HERE_ARE_INTERESTING
@ NEW_SPACE_BELOW_AGE_MARK
@ POINTERS_TO_HERE_ARE_INTERESTING
static MemoryChunk * Initialize(Heap *heap, Address base, size_t size, Address area_start, Address area_end, Executability executable, Space *owner)
static const int kWriteBarrierCounterGranularity
static const intptr_t kSizeOffset
intptr_t available_in_small_free_list_
int write_barrier_counter()
intptr_t available_in_huge_free_list_
static const size_t kHeaderSize
static MemoryChunk * FromAddress(Address a)
MemoryChunk * next_chunk() const
bool TryParallelSweeping()
bool IsEvacuationCandidate()
base::VirtualMemory reservation_
static const intptr_t kAlignmentMask
static MemoryChunk * FromAnyPointerAddress(Heap *heap, Address addr)
SlotsBuffer * slots_buffer_
size_t CommittedPhysicalMemory()
uint32_t AddressToMarkbitIndex(Address addr)
static const int kPointersToHereAreInterestingMask
void set_skip_list(SkipList *skip_list)
ParallelSweepingState parallel_sweeping()
static const int kSkipEvacuationSlotsRecordingMask
static const int kObjectStartAlignment
SlotsBuffer * slots_buffer()
bool ShouldSkipEvacuationSlotRecording()
void set_size(size_t size)
void set_parallel_sweeping(ParallelSweepingState state)
intptr_t write_barrier_counter_
static const intptr_t kLiveBytesOffset
void SetArea(Address area_start, Address area_end)
base::AtomicWord next_chunk_
void ClearEvacuationCandidate()
static const intptr_t kAlignment
static const size_t kSlotsBufferOffset
static void UpdateHighWaterMark(Address mark)
void InsertAfter(MemoryChunk *other)
base::AtomicWord prev_chunk_
int store_buffer_counter_
void set_progress_bar(int progress_bar)
NewSpacePage * prev_page() const
static bool IsAtEnd(Address addr)
void set_prev_page(NewSpacePage *page)
NewSpacePage(SemiSpace *owner)
static bool OnSamePage(Address address1, Address address2)
static const int kAreaSize
static NewSpacePage * Initialize(Heap *heap, Address start, SemiSpace *semi_space)
static bool IsAtStart(Address addr)
void InitializeAsAnchor(SemiSpace *owner)
static const intptr_t kCopyOnFlipFlagsMask
void set_next_page(NewSpacePage *page)
static NewSpacePage * FromAddress(Address address_in_page)
static NewSpacePage * FromLimit(Address address_limit)
NewSpacePage * next_page() const
bool IsAtMaximumCapacity()
void LowerInlineAllocationLimit(intptr_t step)
base::VirtualMemory reservation_
void RecordPromotion(HeapObject *obj)
SemiSpace * active_space()
Address FromSpacePageHigh()
AllocationInfo allocation_info_
Address top_on_previous_step_
MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes)
bool FromSpaceContains(Object *o)
bool ToSpaceContains(Object *o)
intptr_t CommittedMemory()
size_t CommittedPhysicalMemory()
INLINE(uint32_t AddressToMarkbitIndex(Address addr))
void set_age_mark(Address mark)
Address * allocation_top_address()
void ResetAllocationInfo()
intptr_t inline_allocation_limit_step_
intptr_t MaximumCommittedMemory()
bool SetUp(int reserved_semispace_size_, int max_semi_space_size)
void set_top(Address top)
void UpdateInlineAllocationLimit(int size_in_bytes)
void UpdateAllocationInfo()
bool CommitFromSpaceIfNeeded()
void RecordAllocation(HeapObject *obj)
Address * allocation_limit_address()
MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes))
bool FromSpaceContains(Address address)
Address FromSpacePageLow()
HistogramInfo * promoted_histogram_
bool ToSpaceContains(Address address)
intptr_t inline_allocation_limit_step()
int InitialTotalCapacity()
HistogramInfo * allocated_histogram_
uintptr_t object_expected_
INLINE(Address MarkbitIndexToAddress(uint32_t index))
virtual ~ObjectIterator()
virtual HeapObject * next_object()=0
OldSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
INLINE(int Offset(Address a))
static const int kPageSize
INLINE(static Page *FromAllocationTop(Address top))
void InitializeAsAnchor(PagedSpace *owner)
void ResetFreeListStatistics()
static const intptr_t kPageAlignmentMask
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
void set_next_page(Page *page)
static const int kMaxRegularHeapObjectSize
INLINE(static Page *FromAddress(Address a))
static bool IsAlignedToPageSize(Address a)
Address OffsetToAddress(int offset)
void set_prev_page(Page *page)
Address * allocation_limit_address()
void ResetUnsweptFreeBytes()
intptr_t CommittedMemory()
void CreateEmergencyMemory()
size_t CommittedPhysicalMemory()
intptr_t MaximumCommittedMemory()
void SetTopAndLimit(Address top, Address limit)
AllocationInfo allocation_info_
void UseEmergencyMemory()
HeapObject * AllocateLinearly(int size_in_bytes)
int Free(Address start, int size_in_bytes)
void IncreaseUnsweptFreeBytes(Page *p)
intptr_t unswept_free_bytes_
void ObtainFreeListStatistics(Page *p, SizeStats *sizes)
intptr_t SizeOfFirstPage()
void ResetFreeListStatistics()
void EmptyAllocationInfo()
static bool ShouldBeSweptBySweeperThreads(Page *p)
Address * allocation_top_address()
bool EnsureSweeperProgress(intptr_t size_in_bytes)
void PrepareForMarkCompact()
PagedSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
AllocationStats accounting_stats_
bool Contains(HeapObject *o)
void AddToAccountingStats(intptr_t bytes)
void ReleasePage(Page *page)
void DecreaseUnsweptFreeBytes(Page *p)
Page * end_of_unswept_pages()
MUST_USE_RESULT HeapObject * WaitForSweeperThreadsAndRetryAllocation(int size_in_bytes)
MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
bool HasEmergencyMemory()
Object * FindObject(Address addr)
void set_end_of_unswept_pages(Page *page)
void EvictEvacuationCandidatesFromFreeLists()
void IncreaseCapacity(int size)
void IncrementUnsweptFreeBytes(intptr_t by)
void DecrementUnsweptFreeBytes(intptr_t by)
void RepairFreeListsAfterBoot()
virtual intptr_t SizeOfObjects()
friend class PageIterator
void FreeEmergencyMemory()
MemoryChunk * emergency_memory_
Page * end_of_unswept_pages_
MUST_USE_RESULT AllocationResult AllocateRaw(int size_in_bytes)
PropertyCellSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
virtual int RoundSizeDownToObjectAlignment(int size)
virtual void VerifyObject(HeapObject *obj)
void Initialize(Address start, Address end, HeapObjectCallback size_func)
HeapObjectCallback size_func_
virtual HeapObject * next_object()
SemiSpaceIterator(NewSpace *space)
friend class NewSpacePageIterator
size_t MaximumCommittedMemory()
int maximum_total_capacity_
static void Swap(SemiSpace *from, SemiSpace *to)
SemiSpace(Heap *heap, SemiSpaceId semispace)
void FlipPages(intptr_t flags, intptr_t flag_mask)
intptr_t maximum_committed_
void SetCapacity(int new_capacity)
size_t CommittedPhysicalMemory()
NewSpacePage * current_page()
bool ShrinkTo(int new_capacity)
NewSpacePage * current_page_
int InitialTotalCapacity()
NewSpacePage * first_page()
void set_age_mark(Address mark)
void SetUp(Address start, int initial_capacity, int maximum_capacity)
bool GrowTo(int new_capacity)
int initial_total_capacity_
static void AssertValidRange(Address from, Address to)
uintptr_t object_expected_
int MaximumTotalCapacity()
Address StartFor(Address addr)
static const int kRegionSizeLog2
void AddObject(Address addr, int size)
static void Update(Address addr, int size)
static int RegionNumber(Address addr)
STATIC_ASSERT(Page::kPageSize % kRegionSize==0)
static const int kRegionSize
Space(Heap *heap, AllocationSpace id, Executability executable)
virtual intptr_t Size()=0
virtual int RoundSizeDownToObjectAlignment(int size)
virtual intptr_t SizeOfObjects()
AllocationSpace identity()
Executability executable()
Executability executable_
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define CODE_POINTER_ALIGN(value)
#define POINTER_SIZE_ALIGN(value)
#define TRACK_MEMORY(name)
#define DCHECK_LE(v1, v2)
#define DCHECK_NOT_NULL(p)
#define DCHECK(condition)
T RoundDown(T x, intptr_t m)
bool IsPowerOfTwo32(uint32_t value)
Atomic32 Acquire_Load(volatile const Atomic32 *ptr)
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)
void NoBarrier_Store(volatile Atomic8 *ptr, Atomic8 value)
Atomic8 NoBarrier_Load(volatile const Atomic8 *ptr)
Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
bool operator==(const StoreRepresentation &rep1, const StoreRepresentation &rep2)
bool IsAddressAligned(Address addr, intptr_t alignment, int offset=0)
static LifetimePosition Min(LifetimePosition a, LifetimePosition b)
const intptr_t kCodeAlignment
const intptr_t kPageHeaderTagMask
const int kPointerSizeLog2
const int kBitsPerByteLog2
static LifetimePosition Max(LifetimePosition a, LifetimePosition b)
void PrintF(const char *format,...)
int(* HeapObjectCallback)(HeapObject *obj)
kFeedbackVectorOffset flag
intptr_t HeapObjectTagMask()
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
bool IsAligned(T value, U alignment)
Debugger support for the V8 JavaScript engine.
void(* MemoryAllocationCallback)(ObjectSpace space, AllocationAction action, int size)
#define FRAGMENTATION_STATS_ACCESSORS(type, name)
#define DCHECK_PAGE_OFFSET(offset)
MemoryAllocationCallback callback
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, ObjectSpace space, AllocationAction action)
#define T(name, string, precedence)