V8 Project
spaces-inl.h
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_SPACES_INL_H_
6 #define V8_HEAP_SPACES_INL_H_
7 
8 #include "src/heap/spaces.h"
9 #include "src/heap-profiler.h"
10 #include "src/isolate.h"
11 #include "src/msan.h"
12 #include "src/v8memory.h"
13 
14 namespace v8 {
15 namespace internal {
16 
17 
18 // -----------------------------------------------------------------------------
19 // Bitmap
20 
21 void Bitmap::Clear(MemoryChunk* chunk) {
22  Bitmap* bitmap = chunk->markbits();
23  for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
24  chunk->ResetLiveBytes();
25 }
26 
27 
28 // -----------------------------------------------------------------------------
29 // PageIterator
30 
31 
32 PageIterator::PageIterator(PagedSpace* space)
33  : space_(space),
34  prev_page_(&space->anchor_),
35  next_page_(prev_page_->next_page()) {}
36 
37 
38 bool PageIterator::has_next() { return next_page_ != &space_->anchor_; }
39 
40 
41 Page* PageIterator::next() {
42  DCHECK(has_next());
43  prev_page_ = next_page_;
44  next_page_ = next_page_->next_page();
45  return prev_page_;
46 }
47 
48 
49 // -----------------------------------------------------------------------------
50 // NewSpacePageIterator
51 
52 
53 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
54  : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
55  next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
56  last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {}
57 
58 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
59  : prev_page_(space->anchor()),
60  next_page_(prev_page_->next_page()),
61  last_page_(prev_page_->prev_page()) {}
62 
63 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
64  : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
65  next_page_(NewSpacePage::FromAddress(start)),
66  last_page_(NewSpacePage::FromLimit(limit)) {
67  SemiSpace::AssertValidRange(start, limit);
68 }
69 
70 
71 bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
72 
73 
74 NewSpacePage* NewSpacePageIterator::next() {
75  DCHECK(has_next());
76  prev_page_ = next_page_;
77  next_page_ = next_page_->next_page();
78  return prev_page_;
79 }
80 
81 
82 // -----------------------------------------------------------------------------
83 // HeapObjectIterator
85  while (cur_addr_ != cur_end_) {
86  if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
87  cur_addr_ = space_->limit();
88  continue;
89  }
91  int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
92  cur_addr_ += obj_size;
94  if (!obj->IsFiller()) {
95  DCHECK_OBJECT_SIZE(obj_size);
96  return obj;
97  }
98  }
99  return NULL;
100 }
101 
102 
103 // -----------------------------------------------------------------------------
104 // MemoryAllocator
105 
106 #ifdef ENABLE_HEAP_PROTECTION
107 
108 void MemoryAllocator::Protect(Address start, size_t size) {
109  base::OS::Protect(start, size);
110 }
111 
112 
113 void MemoryAllocator::Unprotect(Address start, size_t size,
114  Executability executable) {
115  base::OS::Unprotect(start, size, executable);
116 }
117 
118 
119 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
120  int id = GetChunkId(page);
121  base::OS::Protect(chunks_[id].address(), chunks_[id].size());
122 }
123 
124 
125 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
126  int id = GetChunkId(page);
127  base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
128  chunks_[id].owner()->executable() == EXECUTABLE);
129 }
130 
131 #endif
132 
133 
134 // --------------------------------------------------------------------------
135 // PagedSpace
137  PagedSpace* owner) {
138  Page* page = reinterpret_cast<Page*>(chunk);
140  DCHECK(chunk->owner() == owner);
141  owner->IncreaseCapacity(page->area_size());
142  owner->Free(page->area_start(), page->area_size());
143 
145 
146  return page;
147 }
148 
149 
151  Page* p = Page::FromAddress(addr);
152  if (!p->is_valid()) return false;
153  return p->owner() == this;
154 }
155 
156 
158  if (scan) {
161  } else {
164  }
166 }
167 
168 
170  MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
171  OffsetFrom(addr) & ~Page::kPageAlignmentMask);
172  if (maybe->owner() != NULL) return maybe;
173  LargeObjectIterator iterator(heap->lo_space());
174  for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
175  // Fixed arrays are the only pointer-containing objects in large object
176  // space.
177  if (o->IsFixedArray()) {
178  MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
179  if (chunk->Contains(addr)) {
180  return chunk;
181  }
182  }
183  }
184  UNREACHABLE();
185  return NULL;
186 }
187 
188 
190  if (mark == NULL) return;
191  // Need to subtract one from the mark because when a chunk is full the
192  // top points to the next address after the chunk, which effectively belongs
193  // to another chunk. See the comment to Page::FromAllocationTop.
194  MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
195  int new_mark = static_cast<int>(mark - chunk->address());
196  if (new_mark > chunk->high_water_mark_) {
197  chunk->high_water_mark_ = new_mark;
198  }
199 }
200 
201 
202 PointerChunkIterator::PointerChunkIterator(Heap* heap)
203  : state_(kOldPointerState),
204  old_pointer_iterator_(heap->old_pointer_space()),
205  map_iterator_(heap->map_space()),
206  lo_iterator_(heap->lo_space()) {}
207 
208 
210  DCHECK(next_chunk()->owner() == owner());
211  return static_cast<Page*>(next_chunk());
212 }
213 
214 
216  DCHECK(prev_chunk()->owner() == owner());
217  return static_cast<Page*>(prev_chunk());
218 }
219 
220 
222  DCHECK(page->owner() == owner());
223  set_next_chunk(page);
224 }
225 
226 
228  DCHECK(page->owner() == owner());
229  set_prev_chunk(page);
230 }
231 
232 
233 // Try linear allocation in the page of alloc_info's allocation top. Does
234 // not contain slow case logic (e.g. move to the next page or try free list
235 // allocation) so it can be used by all the allocation functions and for all
236 // the paged spaces.
238  Address current_top = allocation_info_.top();
239  Address new_top = current_top + size_in_bytes;
240  if (new_top > allocation_info_.limit()) return NULL;
241 
242  allocation_info_.set_top(new_top);
243  return HeapObject::FromAddress(current_top);
244 }
245 
246 
247 // Raw allocation.
249  HeapObject* object = AllocateLinearly(size_in_bytes);
250 
251  if (object == NULL) {
252  object = free_list_.Allocate(size_in_bytes);
253  if (object == NULL) {
254  object = SlowAllocateRaw(size_in_bytes);
255  }
256  }
257 
258  if (object != NULL) {
259  if (identity() == CODE_SPACE) {
260  SkipList::Update(object->address(), size_in_bytes);
261  }
262  MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
263  return object;
264  }
265 
267 }
268 
269 
270 // -----------------------------------------------------------------------------
271 // NewSpace
272 
273 
274 AllocationResult NewSpace::AllocateRaw(int size_in_bytes) {
275  Address old_top = allocation_info_.top();
276 
277  if (allocation_info_.limit() - old_top < size_in_bytes) {
278  return SlowAllocateRaw(size_in_bytes);
279  }
280 
281  HeapObject* obj = HeapObject::FromAddress(old_top);
282  allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
284 
285  // The slow path above ultimately goes through AllocateRaw, so this suffices.
286  MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
287 
288  return obj;
289 }
290 
291 
294  return static_cast<LargePage*>(chunk);
295 }
296 
297 
299  return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
300 }
301 
302 
304  Map* map = object->map();
305  Heap* heap = object->GetHeap();
306  return map == heap->raw_unchecked_free_space_map() ||
307  map == heap->raw_unchecked_one_pointer_filler_map() ||
308  map == heap->raw_unchecked_two_pointer_filler_map();
309 }
310 }
311 } // namespace v8::internal
312 
313 #endif // V8_HEAP_SPACES_INL_H_
static AllocationResult Retry(AllocationSpace space=NEW_SPACE)
Definition: spaces.h:1616
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:21
static bool IsFreeListNode(HeapObject *object)
Definition: spaces-inl.h:303
MUST_USE_RESULT HeapObject * Allocate(int size_in_bytes)
Definition: spaces.cc:2326
HeapObjectCallback size_func_
Definition: spaces.h:1213
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1464
LargeObjectSpace * lo_space()
Definition: heap.h:600
void increment_scan_on_scavenge_pages()
Definition: heap.h:739
IncrementalMarking * incremental_marking()
Definition: heap.h:1205
void decrement_scan_on_scavenge_pages()
Definition: heap.h:746
void SetOldSpacePageFlags(MemoryChunk *chunk)
static intptr_t ObjectSizeFor(intptr_t chunk_size)
Definition: spaces.h:2725
static LargePage * Initialize(Heap *heap, MemoryChunk *chunk)
Definition: spaces-inl.h:292
Heap * heap() const
Definition: spaces.h:603
bool Contains(Address addr)
Definition: spaces.h:348
void set_next_chunk(MemoryChunk *next)
Definition: spaces.h:299
void set_scan_on_scavenge(bool scan)
Definition: spaces-inl.h:157
MemoryChunk * prev_chunk() const
Definition: spaces.h:295
void set_prev_chunk(MemoryChunk *prev)
Definition: spaces.h:303
void SetFlag(int flag)
Definition: spaces.h:405
Space * owner() const
Definition: spaces.h:307
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:276
MemoryChunk * next_chunk() const
Definition: spaces.h:291
static MemoryChunk * FromAnyPointerAddress(Heap *heap, Address addr)
Definition: spaces-inl.h:169
void ClearFlag(int flag)
Definition: spaces.h:407
static void UpdateHighWaterMark(Address mark)
Definition: spaces-inl.h:189
SemiSpace to_space_
Definition: spaces.h:2558
AllocationInfo allocation_info_
Definition: spaces.h:2571
MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes)
Definition: spaces.cc:1374
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
Definition: spaces-inl.h:136
void set_next_page(Page *page)
Definition: spaces-inl.h:221
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:754
Page * next_page()
Definition: spaces-inl.h:209
Page * prev_page()
Definition: spaces-inl.h:215
void set_prev_page(Page *page)
Definition: spaces-inl.h:227
bool Contains(Address a)
Definition: spaces-inl.h:150
AllocationInfo allocation_info_
Definition: spaces.h:1901
HeapObject * AllocateLinearly(int size_in_bytes)
Definition: spaces-inl.h:237
MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
Definition: spaces.cc:2547
MUST_USE_RESULT AllocationResult AllocateRaw(int size_in_bytes)
Definition: spaces-inl.h:248
static void AssertValidRange(Address from, Address to)
Definition: spaces.h:2165
static void Update(Address addr, int size)
Definition: spaces.h:972
AllocationSpace identity()
Definition: spaces.h:829
Heap * heap() const
Definition: spaces.h:823
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
intptr_t OffsetFrom(T x)
Definition: macros.h:383
#define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(p, s)
Definition: msan.h:29
byte * Address
Definition: globals.h:101
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
Maybe< T > maybe(T t)
Definition: v8.h:902
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space)
Definition: spaces.h:2611
#define DCHECK_OBJECT_SIZE(size)
Definition: spaces.h:84