5 #ifndef V8_HEAP_SPACES_INL_H_
6 #define V8_HEAP_SPACES_INL_H_
23 for (
int i = 0;
i < bitmap->
CellsCount();
i++) bitmap->cells()[
i] = 0;
34 prev_page_(&
space->anchor_),
35 next_page_(prev_page_->next_page()) {}
38 bool PageIterator::has_next() {
return next_page_ != &space_->anchor_; }
41 Page* PageIterator::next() {
43 prev_page_ = next_page_;
44 next_page_ = next_page_->next_page();
53 NewSpacePageIterator::NewSpacePageIterator(NewSpace*
space)
54 : prev_page_(NewSpacePage::FromAddress(
space->ToSpaceStart())->prev_page()),
55 next_page_(NewSpacePage::FromAddress(
space->ToSpaceStart())),
56 last_page_(NewSpacePage::FromLimit(
space->ToSpaceEnd())) {}
58 NewSpacePageIterator::NewSpacePageIterator(SemiSpace*
space)
59 : prev_page_(
space->anchor()),
60 next_page_(prev_page_->next_page()),
61 last_page_(prev_page_->prev_page()) {}
63 NewSpacePageIterator::NewSpacePageIterator(
Address start,
Address limit)
64 : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
65 next_page_(NewSpacePage::FromAddress(start)),
66 last_page_(NewSpacePage::FromLimit(limit)) {
71 bool NewSpacePageIterator::has_next() {
return prev_page_ != last_page_; }
74 NewSpacePage* NewSpacePageIterator::next() {
76 prev_page_ = next_page_;
77 next_page_ = next_page_->next_page();
94 if (!obj->IsFiller()) {
106 #ifdef ENABLE_HEAP_PROTECTION
108 void MemoryAllocator::Protect(
Address start,
size_t size) {
109 base::OS::Protect(start,
size);
113 void MemoryAllocator::Unprotect(
Address start,
size_t size,
115 base::OS::Unprotect(start,
size, executable);
119 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
120 int id = GetChunkId(page);
121 base::OS::Protect(chunks_[
id].address(), chunks_[
id].
size());
125 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
126 int id = GetChunkId(page);
127 base::OS::Unprotect(chunks_[
id].address(), chunks_[
id].
size(),
128 chunks_[
id].owner()->executable() ==
EXECUTABLE);
138 Page* page =
reinterpret_cast<Page*
>(chunk);
153 return p->
owner() ==
this;
177 if (o->IsFixedArray()) {
190 if (mark ==
NULL)
return;
195 int new_mark =
static_cast<int>(mark - chunk->
address());
202 PointerChunkIterator::PointerChunkIterator(
Heap* heap)
203 : state_(kOldPointerState),
204 old_pointer_iterator_(heap->old_pointer_space()),
205 map_iterator_(heap->map_space()),
206 lo_iterator_(heap->lo_space()) {}
239 Address new_top = current_top + size_in_bytes;
251 if (
object ==
NULL) {
253 if (
object ==
NULL) {
258 if (
object !=
NULL) {
305 Heap* heap =
object->GetHeap();
306 return map == heap->raw_unchecked_free_space_map() ||
307 map == heap->raw_unchecked_one_pointer_filler_map() ||
308 map == heap->raw_unchecked_two_pointer_filler_map();
static AllocationResult Retry(AllocationSpace space=NEW_SPACE)
static void Clear(MemoryChunk *chunk)
static bool IsFreeListNode(HeapObject *object)
MUST_USE_RESULT HeapObject * Allocate(int size_in_bytes)
HeapObject * FromCurrentPage()
HeapObjectCallback size_func_
static HeapObject * FromAddress(Address address)
LargeObjectSpace * lo_space()
void increment_scan_on_scavenge_pages()
IncrementalMarking * incremental_marking()
void decrement_scan_on_scavenge_pages()
void SetOldSpacePageFlags(MemoryChunk *chunk)
static intptr_t ObjectSizeFor(intptr_t chunk_size)
static LargePage * Initialize(Heap *heap, MemoryChunk *chunk)
bool Contains(Address addr)
void set_next_chunk(MemoryChunk *next)
void set_scan_on_scavenge(bool scan)
MemoryChunk * prev_chunk() const
void set_prev_chunk(MemoryChunk *prev)
static MemoryChunk * FromAddress(Address a)
MemoryChunk * next_chunk() const
static MemoryChunk * FromAnyPointerAddress(Heap *heap, Address addr)
static void UpdateHighWaterMark(Address mark)
AllocationInfo allocation_info_
MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes)
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
void set_next_page(Page *page)
static const int kMaxRegularHeapObjectSize
void set_prev_page(Page *page)
AllocationInfo allocation_info_
HeapObject * AllocateLinearly(int size_in_bytes)
MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
MUST_USE_RESULT AllocationResult AllocateRaw(int size_in_bytes)
static void AssertValidRange(Address from, Address to)
static void Update(Address addr, int size)
AllocationSpace identity()
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK(condition)
#define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(p, s)
Debugger support for the V8 JavaScript engine.
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space)
#define DCHECK_OBJECT_SIZE(size)