V8 Project
spaces.h
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_
7 
8 #include "src/allocation.h"
9 #include "src/base/atomicops.h"
10 #include "src/base/bits.h"
12 #include "src/hashmap.h"
13 #include "src/list.h"
14 #include "src/log.h"
15 #include "src/utils.h"
16 
17 namespace v8 {
18 namespace internal {
19 
20 class Isolate;
21 
22 // -----------------------------------------------------------------------------
23 // Heap structures:
24 //
25 // A JS heap consists of a young generation, an old generation, and a large
26 // object space. The young generation is divided into two semispaces. A
27 // scavenger implements Cheney's copying algorithm. The old generation is
28 // separated into a map space and an old object space. The map space contains
29 // all (and only) map objects, the rest of old objects go into the old space.
30 // The old generation is collected by a mark-sweep-compact collector.
31 //
32 // The semispaces of the young generation are contiguous. The old and map
33 // spaces consists of a list of pages. A page has a page header and an object
34 // area.
35 //
36 // There is a separate large object space for objects larger than
37 // Page::kMaxHeapObjectSize, so that they do not have to move during
38 // collection. The large object space is paged. Pages in large object space
39 // may be larger than the page size.
40 //
41 // A store-buffer based write barrier is used to keep track of intergenerational
42 // references. See heap/store-buffer.h.
43 //
44 // During scavenges and mark-sweep collections we sometimes (after a store
45 // buffer overflow) iterate intergenerational pointers without decoding heap
46 // object maps so if the page belongs to old pointer space or large object
47 // space it is essential to guarantee that the page does not contain any
48 // garbage pointers to new space: every pointer aligned word which satisfies
49 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
50 // new space. Thus objects in old pointer and large object spaces should have a
51 // special layout (e.g. no bare integer fields). This requirement does not
52 // apply to map space which is iterated in a special fashion. However we still
53 // require pointer fields of dead maps to be cleaned.
54 //
55 // To enable lazy cleaning of old space pages we can mark chunks of the page
56 // as being garbage. Garbage sections are marked with a special map. These
57 // sections are skipped when scanning the page, even if we are otherwise
58 // scanning without regard for object boundaries. Garbage sections are chained
59 // together to form a free list after a GC. Garbage sections created outside
60 // of GCs by object trunctation etc. may not be in the free list chain. Very
61 // small free spaces are ignored, they need only be cleaned of bogus pointers
62 // into new space.
63 //
64 // Each page may have up to one special garbage section. The start of this
65 // section is denoted by the top field in the space. The end of the section
66 // is denoted by the limit field in the space. This special garbage section
67 // is not marked with a free space map in the data. The point of this section
68 // is to enable linear allocation without having to constantly update the byte
69 // array every time the top field is updated and a new object is created. The
70 // special garbage section is not in the chain of garbage sections.
71 //
72 // Since the top and limit fields are in the space, not the page, only one page
73 // has a special garbage section, and if the top and limit are equal then there
74 // is no special garbage section.
75 
76 // Some assertion macros used in the debugging mode.
77 
78 #define DCHECK_PAGE_ALIGNED(address) \
79  DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
80 
81 #define DCHECK_OBJECT_ALIGNED(address) \
82  DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
83 
84 #define DCHECK_OBJECT_SIZE(size) \
85  DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
86 
87 #define DCHECK_PAGE_OFFSET(offset) \
88  DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
89 
90 #define DCHECK_MAP_PAGE_INDEX(index) \
91  DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
92 
93 
94 class PagedSpace;
95 class MemoryAllocator;
96 class AllocationInfo;
97 class Space;
98 class FreeList;
99 class MemoryChunk;
100 
101 class MarkBit {
102  public:
104 
107 
108  inline CellType* cell() { return cell_; }
109  inline CellType mask() { return mask_; }
110 
111 #ifdef DEBUG
112  bool operator==(const MarkBit& other) {
113  return cell_ == other.cell_ && mask_ == other.mask_;
114  }
115 #endif
116 
117  inline void Set() { *cell_ |= mask_; }
118  inline bool Get() { return (*cell_ & mask_) != 0; }
119  inline void Clear() { *cell_ &= ~mask_; }
120 
121  inline bool data_only() { return data_only_; }
122 
123  inline MarkBit Next() {
124  CellType new_mask = mask_ << 1;
125  if (new_mask == 0) {
126  return MarkBit(cell_ + 1, 1, data_only_);
127  } else {
128  return MarkBit(cell_, new_mask, data_only_);
129  }
130  }
131 
132  private:
135  // This boolean indicates that the object is in a data-only space with no
136  // pointers. This enables some optimizations when marking.
137  // It is expected that this field is inlined and turned into control flow
138  // at the place where the MarkBit object is created.
140 };
141 
142 
143 // Bitmap is a sequence of cells each containing fixed number of bits.
144 class Bitmap {
145  public:
146  static const uint32_t kBitsPerCell = 32;
147  static const uint32_t kBitsPerCellLog2 = 5;
148  static const uint32_t kBitIndexMask = kBitsPerCell - 1;
151 
152  static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
153 
154  static const size_t kSize =
156 
157 
158  static int CellsForLength(int length) {
159  return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
160  }
161 
162  int CellsCount() { return CellsForLength(kLength); }
163 
164  static int SizeFor(int cells_count) {
165  return sizeof(MarkBit::CellType) * cells_count;
166  }
167 
168  INLINE(static uint32_t IndexToCell(uint32_t index)) {
169  return index >> kBitsPerCellLog2;
170  }
171 
172  INLINE(static uint32_t CellToIndex(uint32_t index)) {
173  return index << kBitsPerCellLog2;
174  }
175 
176  INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
177  return (index + kBitIndexMask) & ~kBitIndexMask;
178  }
179 
181  return reinterpret_cast<MarkBit::CellType*>(this);
182  }
183 
184  INLINE(Address address()) { return reinterpret_cast<Address>(this); }
185 
186  INLINE(static Bitmap* FromAddress(Address addr)) {
187  return reinterpret_cast<Bitmap*>(addr);
188  }
189 
190  inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
191  MarkBit::CellType mask = 1 << (index & kBitIndexMask);
192  MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
193  return MarkBit(cell, mask, data_only);
194  }
195 
196  static inline void Clear(MemoryChunk* chunk);
197 
198  static void PrintWord(uint32_t word, uint32_t himask = 0) {
199  for (uint32_t mask = 1; mask != 0; mask <<= 1) {
200  if ((mask & himask) != 0) PrintF("[");
201  PrintF((mask & word) ? "1" : "0");
202  if ((mask & himask) != 0) PrintF("]");
203  }
204  }
205 
206  class CellPrinter {
207  public:
209 
210  void Print(uint32_t pos, uint32_t cell) {
211  if (cell == seq_type) {
212  seq_length++;
213  return;
214  }
215 
216  Flush();
217 
218  if (IsSeq(cell)) {
219  seq_start = pos;
220  seq_length = 0;
221  seq_type = cell;
222  return;
223  }
224 
225  PrintF("%d: ", pos);
226  PrintWord(cell);
227  PrintF("\n");
228  }
229 
230  void Flush() {
231  if (seq_length > 0) {
232  PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
234  seq_length = 0;
235  }
236  }
237 
238  static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
239 
240  private:
244  };
245 
246  void Print() {
247  CellPrinter printer;
248  for (int i = 0; i < CellsCount(); i++) {
249  printer.Print(i, cells()[i]);
250  }
251  printer.Flush();
252  PrintF("\n");
253  }
254 
255  bool IsClean() {
256  for (int i = 0; i < CellsCount(); i++) {
257  if (cells()[i] != 0) {
258  return false;
259  }
260  }
261  return true;
262  }
263 };
264 
265 
266 class SkipList;
267 class SlotsBuffer;
268 
269 // MemoryChunk represents a memory region owned by a specific space.
270 // It is divided into the header and the body. Chunk start is always
271 // 1MB aligned. Start of the body is aligned so it can accommodate
272 // any heap object.
273 class MemoryChunk {
274  public:
275  // Only works if the pointer is in the first kPageSize of the MemoryChunk.
277  return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
278  }
279  static const MemoryChunk* FromAddress(const byte* a) {
280  return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
281  ~kAlignmentMask);
282  }
283 
284  // Only works for addresses in pointer spaces, not data or code spaces.
285  static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
286 
287  Address address() { return reinterpret_cast<Address>(this); }
288 
289  bool is_valid() { return address() != NULL; }
290 
292  return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_));
293  }
294 
296  return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_));
297  }
298 
300  base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next));
301  }
302 
304  base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev));
305  }
306 
307  Space* owner() const {
308  if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
309  kPageHeaderTag) {
310  return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
312  } else {
313  return NULL;
314  }
315  }
316 
318  DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
319  owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
320  DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
322  }
323 
325 
327 
329  DCHECK_NOT_NULL(reservation);
330  reservation_.TakeControl(reservation);
331  }
332 
334  void initialize_scan_on_scavenge(bool scan) {
335  if (scan) {
337  } else {
339  }
340  }
341  inline void set_scan_on_scavenge(bool scan);
342 
344  void set_store_buffer_counter(int counter) {
345  store_buffer_counter_ = counter;
346  }
347 
348  bool Contains(Address addr) {
349  return addr >= area_start() && addr < area_end();
350  }
351 
352  // Checks whether addr can be a limit of addresses in this page.
353  // It's a limit if it's in the page, or if it's just after the
354  // last byte of the page.
355  bool ContainsLimit(Address addr) {
356  return addr >= area_start() && addr <= area_end();
357  }
358 
359  // Every n write barrier invocations we go to runtime even though
360  // we could have handled it in generated code. This lets us check
361  // whether we have hit the limit and should do some more marking.
362  static const int kWriteBarrierCounterGranularity = 500;
363 
370  IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
371  IN_TO_SPACE, // All pages in new space has one of these two set.
376 
377  // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
378  // otherwise marking bits are still intact.
380 
381  // Large objects can have a progress bar in their page header. These object
382  // are scanned in increments and will be kept black while being scanned.
383  // Even if the mutator writes to them they will be kept black and a white
384  // to grey transition is performed in the value.
386 
387  // Last flag, keep at bottom.
389  };
390 
391 
394 
397 
399 
402  (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
403 
404 
405  void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
406 
407  void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
408 
409  void SetFlagTo(int flag, bool value) {
410  if (value) {
411  SetFlag(flag);
412  } else {
413  ClearFlag(flag);
414  }
415  }
416 
417  bool IsFlagSet(int flag) {
418  return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
419  }
420 
421  // Set or clear multiple flags at a time. The flags in the mask
422  // are set to the value in "flags", the rest retain the current value
423  // in flags_.
424  void SetFlags(intptr_t flags, intptr_t mask) {
425  flags_ = (flags_ & ~mask) | (flags & mask);
426  }
427 
428  // Return all current flags.
429  intptr_t GetFlags() { return flags_; }
430 
431 
432  // SWEEPING_DONE - The page state when sweeping is complete or sweeping must
433  // not be performed on that page.
434  // SWEEPING_FINALIZE - A sweeper thread is done sweeping this page and will
435  // not touch the page memory anymore.
436  // SWEEPING_IN_PROGRESS - This page is currently swept by a sweeper thread.
437  // SWEEPING_PENDING - This page is ready for parallel sweeping.
443  };
444 
446  return static_cast<ParallelSweepingState>(
448  }
449 
452  }
453 
458  }
459 
461 
462  // Manage live byte count (count of bytes known to be live,
463  // because they are marked black).
464  void ResetLiveBytes() {
465  if (FLAG_gc_verbose) {
466  PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this),
468  }
469  live_byte_count_ = 0;
470  }
471  void IncrementLiveBytes(int by) {
472  if (FLAG_gc_verbose) {
473  printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
474  live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
475  live_byte_count_ + by);
476  }
477  live_byte_count_ += by;
478  DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
479  }
480  int LiveBytes() {
481  DCHECK(static_cast<unsigned>(live_byte_count_) <= size_);
482  return live_byte_count_;
483  }
484 
486  return static_cast<int>(write_barrier_counter_);
487  }
488 
489  void set_write_barrier_counter(int counter) {
490  write_barrier_counter_ = counter;
491  }
492 
493  int progress_bar() {
495  return progress_bar_;
496  }
497 
501  }
502 
505  set_progress_bar(0);
507  }
508  }
509 
511  Address slot_address = reinterpret_cast<Address>(slot);
512  DCHECK(slot_address > this->address());
513  return (slot_address - (this->address() + kObjectStartOffset)) <
514  progress_bar();
515  }
516 
517  static void IncrementLiveBytesFromGC(Address address, int by) {
519  }
520 
521  static void IncrementLiveBytesFromMutator(Address address, int by);
522 
523  static const intptr_t kAlignment =
524  (static_cast<uintptr_t>(1) << kPageSizeBits);
525 
526  static const intptr_t kAlignmentMask = kAlignment - 1;
527 
528  static const intptr_t kSizeOffset = 0;
529 
530  static const intptr_t kLiveBytesOffset =
533 
535 
536  static const size_t kWriteBarrierCounterOffset =
538 
539  static const size_t kHeaderSize =
542 
543  static const int kBodyOffset =
545 
546  // The start offset of the object area in a page. Aligned to both maps and
547  // code alignment to be suitable for both. Also aligned to 32 words because
548  // the marking bitmap is arranged in 32 bit chunks.
549  static const int kObjectStartAlignment = 32 * kPointerSize;
550  static const int kObjectStartOffset =
551  kBodyOffset - 1 +
553 
554  size_t size() const { return size_; }
555 
556  void set_size(size_t size) { size_ = size; }
557 
561  }
562 
565  }
566 
568 
569  bool InNewSpace() {
570  return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
571  }
572 
573  bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
574 
575  bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
576 
577  // ---------------------------------------------------------------------
578  // Markbits support
579 
580  inline Bitmap* markbits() {
581  return Bitmap::FromAddress(address() + kHeaderSize);
582  }
583 
584  void PrintMarkbits() { markbits()->Print(); }
585 
587  return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
588  }
589 
591  const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
592 
593  return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
594  }
595 
597  return this->address() + (index << kPointerSizeLog2);
598  }
599 
600  void InsertAfter(MemoryChunk* other);
601  void Unlink();
602 
603  inline Heap* heap() const { return heap_; }
604 
605  static const int kFlagsOffset = kPointerSize;
606 
608 
611  }
612 
613  inline SkipList* skip_list() { return skip_list_; }
614 
616 
617  inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
618 
620 
624  }
625 
629  }
630 
632  Address area_end() { return area_end_; }
633  int area_size() { return static_cast<int>(area_end() - area_start()); }
634  bool CommitArea(size_t requested);
635 
636  // Approximate amount of physical memory committed for this chunk.
638 
639  static inline void UpdateHighWaterMark(Address mark);
640 
641  protected:
642  size_t size_;
643  intptr_t flags_;
644 
645  // Start and end of allocatable memory on this chunk.
648 
649  // If the chunk needs to remember its memory reservation, it is stored here.
651  // The identity of the owning space. This is tagged as a failure pointer, but
652  // no failure can be in an object, so this can be distinguished from any entry
653  // in a fixed array.
656  // Used by the store buffer to keep track of which pages to mark scan-on-
657  // scavenge.
659  // Count of bytes marked black on page.
664  // Used by the incremental marker to keep track of the scanning progress in
665  // large objects that have a progress bar and are scanned in increments.
667  // Assuming the initial allocation on a page is sequential,
668  // count highest number of bytes ever allocated on the page.
670 
672 
673  // PagedSpace free-list statistics.
679 
680  static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
683 
684  private:
685  // next_chunk_ holds a pointer of type MemoryChunk
687  // prev_chunk_ holds a pointer of type MemoryChunk
689 
690  friend class MemoryAllocator;
691 };
692 
693 
695 
696 
697 // -----------------------------------------------------------------------------
698 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
699 //
700 // The only way to get a page pointer is by calling factory methods:
701 // Page* p = Page::FromAddress(addr); or
702 // Page* p = Page::FromAllocationTop(top);
703 class Page : public MemoryChunk {
704  public:
705  // Returns the page containing a given address. The address ranges
706  // from [page_addr .. page_addr + kPageSize[
707  // This only works if the object is in fact in a page. See also MemoryChunk::
708  // FromAddress() and FromAnyAddress().
710  return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
711  }
712 
713  // Returns the page containing an allocation top. Because an allocation
714  // top address can be the upper bound of the page, we need to subtract
715  // it with kPointerSize first. The address ranges from
716  // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
717  INLINE(static Page* FromAllocationTop(Address top)) {
718  Page* p = FromAddress(top - kPointerSize);
719  return p;
720  }
721 
722  // Returns the next page in the chain of pages owned by a space.
723  inline Page* next_page();
724  inline Page* prev_page();
725  inline void set_next_page(Page* page);
726  inline void set_prev_page(Page* page);
727 
728  // Checks whether an address is page aligned.
729  static bool IsAlignedToPageSize(Address a) {
730  return 0 == (OffsetFrom(a) & kPageAlignmentMask);
731  }
732 
733  // Returns the offset of a given address to this page.
735  int offset = static_cast<int>(a - address());
736  return offset;
737  }
738 
739  // Returns the address for a given offset to the this page.
740  Address OffsetToAddress(int offset) {
741  DCHECK_PAGE_OFFSET(offset);
742  return address() + offset;
743  }
744 
745  // ---------------------------------------------------------------------
746 
747  // Page size in bytes. This must be a multiple of the OS page size.
748  static const int kPageSize = 1 << kPageSizeBits;
749 
750  // Maximum object size that fits in a page. Objects larger than that size
751  // are allocated in large object space and are never moved in memory. This
752  // also applies to new space allocation, since objects are never migrated
753  // from new space to large object space. Takes double alignment into account.
755 
756  // Page size mask.
757  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
758 
759  inline void ClearGCFields();
760 
761  static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
763 
765 
766  bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
769 
771 
772 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \
773  type name() { return name##_; } \
774  void set_##name(type name) { name##_ = name; } \
775  void add_##name(type name) { name##_ += name; }
776 
777  FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
778  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
779  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
780  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
781  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
782 
783 #undef FRAGMENTATION_STATS_ACCESSORS
784 
785 #ifdef DEBUG
786  void Print();
787 #endif // DEBUG
788 
789  friend class MemoryAllocator;
790 };
791 
792 
794 
795 
796 class LargePage : public MemoryChunk {
797  public:
799 
800  inline LargePage* next_page() const {
801  return static_cast<LargePage*>(next_chunk());
802  }
803 
804  inline void set_next_page(LargePage* page) { set_next_chunk(page); }
805 
806  private:
807  static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
808 
809  friend class MemoryAllocator;
810 };
811 
813 
814 // ----------------------------------------------------------------------------
815 // Space is the abstract superclass for all allocation spaces.
816 class Space : public Malloced {
817  public:
819  : heap_(heap), id_(id), executable_(executable) {}
820 
821  virtual ~Space() {}
822 
823  Heap* heap() const { return heap_; }
824 
825  // Does the space need executable memory?
827 
828  // Identity used in error reporting.
830 
831  // Returns allocated size.
832  virtual intptr_t Size() = 0;
833 
834  // Returns size of objects. Can differ from the allocated size
835  // (e.g. see LargeObjectSpace).
836  virtual intptr_t SizeOfObjects() { return Size(); }
837 
839  if (id_ == CODE_SPACE) {
840  return RoundDown(size, kCodeAlignment);
841  } else {
842  return RoundDown(size, kPointerSize);
843  }
844  }
845 
846 #ifdef DEBUG
847  virtual void Print() = 0;
848 #endif
849 
850  private:
854 };
855 
856 
857 // ----------------------------------------------------------------------------
858 // All heap objects containing executable code (code objects) must be allocated
859 // from a 2 GB range of memory, so that they can call each other using 32-bit
860 // displacements. This happens automatically on 32-bit platforms, where 32-bit
861 // displacements cover the entire 4GB virtual address space. On 64-bit
862 // platforms, we support this using the CodeRange object, which reserves and
863 // manages a range of virtual memory.
864 class CodeRange {
865  public:
866  explicit CodeRange(Isolate* isolate);
868 
869  // Reserves a range of virtual memory, but does not commit any of it.
870  // Can only be called once, at heap initialization time.
871  // Returns false on failure.
872  bool SetUp(size_t requested_size);
873 
874  // Frees the range of virtual memory, and frees the data structures used to
875  // manage it.
876  void TearDown();
877 
878  bool valid() { return code_range_ != NULL; }
880  DCHECK(valid());
881  return static_cast<Address>(code_range_->address());
882  }
883  size_t size() {
884  DCHECK(valid());
885  return code_range_->size();
886  }
887  bool contains(Address address) {
888  if (!valid()) return false;
889  Address start = static_cast<Address>(code_range_->address());
890  return start <= address && address < start + code_range_->size();
891  }
892 
893  // Allocates a chunk of memory from the large-object portion of
894  // the code range. On platforms with no separate code range, should
895  // not be called.
896  MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
897  const size_t commit_size,
898  size_t* allocated);
899  bool CommitRawMemory(Address start, size_t length);
900  bool UncommitRawMemory(Address start, size_t length);
901  void FreeRawMemory(Address buf, size_t length);
902 
903  private:
905 
906  // The reserved range of virtual memory that all code objects are put in.
908  // Plain old data class, just a struct plus a constructor.
909  class FreeBlock {
910  public:
911  FreeBlock(Address start_arg, size_t size_arg)
912  : start(start_arg), size(size_arg) {
914  DCHECK(size >= static_cast<size_t>(Page::kPageSize));
915  }
916  FreeBlock(void* start_arg, size_t size_arg)
917  : start(static_cast<Address>(start_arg)), size(size_arg) {
919  DCHECK(size >= static_cast<size_t>(Page::kPageSize));
920  }
921 
923  size_t size;
924  };
925 
926  // Freed blocks of memory are added to the free list. When the allocation
927  // list is exhausted, the free list is sorted and merged to make the new
928  // allocation list.
930  // Memory is allocated from the free blocks on the allocation list.
931  // The block at current_allocation_block_index_ is the current block.
934 
935  // Finds a block on the allocation list that contains at least the
936  // requested amount of memory. If none is found, sorts and merges
937  // the existing free memory blocks, and searches again.
938  // If none can be found, returns false.
939  bool GetNextAllocationBlock(size_t requested);
940  // Compares the start addresses of two free blocks.
941  static int CompareFreeBlockAddress(const FreeBlock* left,
942  const FreeBlock* right);
943 
945 };
946 
947 
948 class SkipList {
949  public:
950  SkipList() { Clear(); }
951 
952  void Clear() {
953  for (int idx = 0; idx < kSize; idx++) {
954  starts_[idx] = reinterpret_cast<Address>(-1);
955  }
956  }
957 
958  Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
959 
960  void AddObject(Address addr, int size) {
961  int start_region = RegionNumber(addr);
962  int end_region = RegionNumber(addr + size - kPointerSize);
963  for (int idx = start_region; idx <= end_region; idx++) {
964  if (starts_[idx] > addr) starts_[idx] = addr;
965  }
966  }
967 
968  static inline int RegionNumber(Address addr) {
970  }
971 
972  static void Update(Address addr, int size) {
973  Page* page = Page::FromAddress(addr);
974  SkipList* list = page->skip_list();
975  if (list == NULL) {
976  list = new SkipList();
977  page->set_skip_list(list);
978  }
979 
980  list->AddObject(addr, size);
981  }
982 
983  private:
984  static const int kRegionSizeLog2 = 13;
985  static const int kRegionSize = 1 << kRegionSizeLog2;
986  static const int kSize = Page::kPageSize / kRegionSize;
987 
989 
991 };
992 
993 
994 // ----------------------------------------------------------------------------
995 // A space acquires chunks of memory from the operating system. The memory
996 // allocator allocated and deallocates pages for the paged heap spaces and large
997 // pages for large object space.
998 //
999 // Each space has to manage it's own pages.
1000 //
1002  public:
1003  explicit MemoryAllocator(Isolate* isolate);
1004 
1005  // Initializes its internal bookkeeping structures.
1006  // Max capacity of the total space and executable memory limit.
1007  bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
1008 
1009  void TearDown();
1010 
1011  Page* AllocatePage(intptr_t size, PagedSpace* owner,
1012  Executability executable);
1013 
1014  LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
1015  Executability executable);
1016 
1017  void Free(MemoryChunk* chunk);
1018 
1019  // Returns the maximum available bytes of heaps.
1020  intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
1021 
1022  // Returns allocated spaces in bytes.
1023  intptr_t Size() { return size_; }
1024 
1025  // Returns the maximum available executable bytes of heaps.
1026  intptr_t AvailableExecutable() {
1027  if (capacity_executable_ < size_executable_) return 0;
1029  }
1030 
1031  // Returns allocated executable spaces in bytes.
1032  intptr_t SizeExecutable() { return size_executable_; }
1033 
1034  // Returns maximum available bytes that the old space can have.
1035  intptr_t MaxAvailable() {
1037  }
1038 
1039  // Returns an indication of whether a pointer is in a space that has
1040  // been allocated by this MemoryAllocator.
1041  V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
1042  return address < lowest_ever_allocated_ ||
1043  address >= highest_ever_allocated_;
1044  }
1045 
1046 #ifdef DEBUG
1047  // Reports statistic info of the space.
1048  void ReportStatistics();
1049 #endif
1050 
1051  // Returns a MemoryChunk in which the memory region from commit_area_size to
1052  // reserve_area_size of the chunk area is reserved but not committed, it
1053  // could be committed later by calling MemoryChunk::CommitArea.
1054  MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1055  intptr_t commit_area_size,
1056  Executability executable, Space* space);
1057 
1058  Address ReserveAlignedMemory(size_t requested, size_t alignment,
1059  base::VirtualMemory* controller);
1060  Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1061  size_t alignment, Executability executable,
1062  base::VirtualMemory* controller);
1063 
1064  bool CommitMemory(Address addr, size_t size, Executability executable);
1065 
1066  void FreeMemory(base::VirtualMemory* reservation, Executability executable);
1067  void FreeMemory(Address addr, size_t size, Executability executable);
1068 
1069  // Commit a contiguous block of memory from the initial chunk. Assumes that
1070  // the address is not NULL, the size is greater than zero, and that the
1071  // block is contained in the initial chunk. Returns true if it succeeded
1072  // and false otherwise.
1073  bool CommitBlock(Address start, size_t size, Executability executable);
1074 
1075  // Uncommit a contiguous block of memory [start..(start+size)[.
1076  // start is not NULL, the size is greater than zero, and the
1077  // block is contained in the initial chunk. Returns true if it succeeded
1078  // and false otherwise.
1079  bool UncommitBlock(Address start, size_t size);
1080 
1081  // Zaps a contiguous block of memory [start..(start+size)[ thus
1082  // filling it up with a recognizable non-NULL bit pattern.
1083  void ZapBlock(Address start, size_t size);
1084 
1086  size_t size);
1087 
1090 
1092 
1094 
1095  static int CodePageGuardStartOffset();
1096 
1097  static int CodePageGuardSize();
1098 
1099  static int CodePageAreaStartOffset();
1100 
1101  static int CodePageAreaEndOffset();
1102 
1103  static int CodePageAreaSize() {
1105  }
1106 
1108  Address start, size_t commit_size,
1109  size_t reserved_size);
1110 
1111  private:
1113 
1114  // Maximum space size in bytes.
1115  size_t capacity_;
1116  // Maximum subset of capacity_ that can be executable
1118 
1119  // Allocated space size in bytes.
1120  size_t size_;
1121  // Allocated executable space size in bytes.
1123 
1124  // We keep the lowest and highest addresses allocated as a quick way
1125  // of determining that pointers are outside the heap. The estimate is
1126  // conservative, i.e. not all addrsses in 'allocated' space are allocated
1127  // to our heap. The range is [lowest, highest[, inclusive on the low end
1128  // and exclusive on the high end.
1131 
1140  };
1141 
1142  // A List of callback that are triggered when memory is allocated or free'd
1144 
1145  // Initializes pages in a chunk. Returns the first page address.
1146  // This function and GetChunkId() are provided for the mark-compact
1147  // collector to rebuild page headers in the from space, which is
1148  // used as a marking stack and its page headers are destroyed.
1149  Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1150  PagedSpace* owner);
1151 
1152  void UpdateAllocatedSpaceLimits(void* low, void* high) {
1155  }
1156 
1158 };
1159 
1160 
1161 // -----------------------------------------------------------------------------
1162 // Interface for heap object iterator to be implemented by all object space
1163 // object iterators.
1164 //
1165 // NOTE: The space specific object iterators also implements the own next()
1166 // method which is used to avoid using virtual functions
1167 // iterating a specific space.
1168 
1169 class ObjectIterator : public Malloced {
1170  public:
1171  virtual ~ObjectIterator() {}
1172 
1173  virtual HeapObject* next_object() = 0;
1174 };
1175 
1176 
1177 // -----------------------------------------------------------------------------
1178 // Heap object iterator in new/old/map spaces.
1179 //
1180 // A HeapObjectIterator iterates objects from the bottom of the given space
1181 // to its top or from the bottom of the given page to its top.
1182 //
1183 // If objects are allocated in the page during iteration the iterator may
1184 // or may not iterate over those objects. The caller must create a new
1185 // iterator in order to be sure to visit these new objects.
1187  public:
1188  // Creates a new object iterator in a given space.
1189  // If the size function is not given, the iterator calls the default
1190  // Object::Size().
1191  explicit HeapObjectIterator(PagedSpace* space);
1193  HeapObjectIterator(Page* page, HeapObjectCallback size_func);
1194 
1195  // Advance to the next object, skipping free spaces and other fillers and
1196  // skipping the special garbage section of which there is one per space.
1197  // Returns NULL when the iteration has ended.
1198  inline HeapObject* Next() {
1199  do {
1200  HeapObject* next_obj = FromCurrentPage();
1201  if (next_obj != NULL) return next_obj;
1202  } while (AdvanceToNextPage());
1203  return NULL;
1204  }
1205 
1206  virtual HeapObject* next_object() { return Next(); }
1207 
1208  private:
1210 
1211  Address cur_addr_; // Current iteration point.
1212  Address cur_end_; // End iteration point.
1213  HeapObjectCallback size_func_; // Size function or NULL.
1216 
1217  // Fast (inlined) path of next().
1218  inline HeapObject* FromCurrentPage();
1219 
1220  // Slow path of next(), goes into the next page. Returns false if the
1221  // iteration has ended.
1222  bool AdvanceToNextPage();
1223 
1224  // Initializes fields.
1225  inline void Initialize(PagedSpace* owner, Address start, Address end,
1226  PageMode mode, HeapObjectCallback size_func);
1227 };
1228 
1229 
1230 // -----------------------------------------------------------------------------
1231 // A PageIterator iterates the pages in a paged space.
1232 
1233 class PageIterator BASE_EMBEDDED {
1234  public:
1235  explicit inline PageIterator(PagedSpace* space);
1236 
1237  inline bool has_next();
1238  inline Page* next();
1239 
1240  private:
1242  Page* prev_page_; // Previous page returned.
1243  // Next page that will be returned. Cached here so that we can use this
1244  // iterator for operations that deallocate pages.
1246 };
1247 
1248 
1249 // -----------------------------------------------------------------------------
1250 // A space has a circular list of pages. The next page can be accessed via
1251 // Page::next_page() call.
1252 
1253 // An abstraction of allocation and relocation pointers in a page-structured
1254 // space.
1256  public:
1258 
1259  INLINE(void set_top(Address top)) {
1260  SLOW_DCHECK(top == NULL ||
1261  (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
1262  top_ = top;
1263  }
1264 
1265  INLINE(Address top()) const {
1266  SLOW_DCHECK(top_ == NULL ||
1267  (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
1268  return top_;
1269  }
1270 
1271  Address* top_address() { return &top_; }
1272 
1273  INLINE(void set_limit(Address limit)) {
1274  SLOW_DCHECK(limit == NULL ||
1275  (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
1276  limit_ = limit;
1277  }
1278 
1279  INLINE(Address limit()) const {
1280  SLOW_DCHECK(limit_ == NULL ||
1281  (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) ==
1282  0);
1283  return limit_;
1284  }
1285 
1286  Address* limit_address() { return &limit_; }
1287 
1288 #ifdef DEBUG
1289  bool VerifyPagedAllocation() {
1290  return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) &&
1291  (top_ <= limit_);
1292  }
1293 #endif
1294 
1295  private:
1296  // Current allocation top.
1298  // Current allocation limit.
1300 };
1301 
1302 
1303 // An abstraction of the accounting statistics of a page-structured space.
1304 // The 'capacity' of a space is the number of object-area bytes (i.e., not
1305 // including page bookkeeping structures) currently in the space. The 'size'
1306 // of a space is the number of allocated bytes, the 'waste' in the space is
1307 // the number of bytes that are not allocated and not available to
1308 // allocation without reorganizing the space via a GC (e.g. small blocks due
1309 // to internal fragmentation, top of page areas in map space), and the bytes
1310 // 'available' is the number of unallocated bytes that are not waste. The
1311 // capacity is the sum of size, waste, and available.
1312 //
1313 // The stats are only set by functions that ensure they stay balanced. These
1314 // functions increase or decrease one of the non-capacity stats in
1315 // conjunction with capacity, or else they always balance increases and
1316 // decreases to the non-capacity stats.
1317 class AllocationStats BASE_EMBEDDED {
1318  public:
1319  AllocationStats() { Clear(); }
1320 
1321  // Zero out all the allocation statistics (i.e., no capacity).
1322  void Clear() {
1323  capacity_ = 0;
1324  max_capacity_ = 0;
1325  size_ = 0;
1326  waste_ = 0;
1327  }
1328 
1330  size_ = capacity_;
1331  waste_ = 0;
1332  }
1333 
1334  // Reset the allocation statistics (i.e., available = capacity with no
1335  // wasted or allocated bytes).
1336  void Reset() {
1337  size_ = 0;
1338  waste_ = 0;
1339  }
1340 
1341  // Accessors for the allocation statistics.
1342  intptr_t Capacity() { return capacity_; }
1343  intptr_t MaxCapacity() { return max_capacity_; }
1344  intptr_t Size() { return size_; }
1345  intptr_t Waste() { return waste_; }
1346 
1347  // Grow the space by adding available bytes. They are initially marked as
1348  // being in use (part of the size), but will normally be immediately freed,
1349  // putting them on the free list and removing them from size_.
1350  void ExpandSpace(int size_in_bytes) {
1351  capacity_ += size_in_bytes;
1352  size_ += size_in_bytes;
1353  if (capacity_ > max_capacity_) {
1354  max_capacity_ = capacity_;
1355  }
1356  DCHECK(size_ >= 0);
1357  }
1358 
1359  // Shrink the space by removing available bytes. Since shrinking is done
1360  // during sweeping, bytes have been marked as being in use (part of the size)
1361  // and are hereby freed.
1362  void ShrinkSpace(int size_in_bytes) {
1363  capacity_ -= size_in_bytes;
1364  size_ -= size_in_bytes;
1365  DCHECK(size_ >= 0);
1366  }
1367 
1368  // Allocate from available bytes (available -> size).
1369  void AllocateBytes(intptr_t size_in_bytes) {
1370  size_ += size_in_bytes;
1371  DCHECK(size_ >= 0);
1372  }
1373 
1374  // Free allocated bytes, making them available (size -> available).
1375  void DeallocateBytes(intptr_t size_in_bytes) {
1376  size_ -= size_in_bytes;
1377  DCHECK(size_ >= 0);
1378  }
1379 
1380  // Waste free bytes (available -> waste).
1381  void WasteBytes(int size_in_bytes) {
1382  DCHECK(size_in_bytes >= 0);
1383  waste_ += size_in_bytes;
1384  }
1385 
1386  private:
1387  intptr_t capacity_;
1388  intptr_t max_capacity_;
1389  intptr_t size_;
1390  intptr_t waste_;
1391 };
1392 
1393 
1394 // -----------------------------------------------------------------------------
1395 // Free lists for old object spaces
1396 //
1397 // Free-list nodes are free blocks in the heap. They look like heap objects
1398 // (free-list node pointers have the heap object tag, and they have a map like
1399 // a heap object). They have a size and a next pointer. The next pointer is
1400 // the raw address of the next free list node (or NULL).
1401 class FreeListNode : public HeapObject {
1402  public:
1403  // Obtain a free-list node from a raw address. This is not a cast because
1404  // it does not check nor require that the first word at the address is a map
1405  // pointer.
1407  return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1408  }
1409 
1410  static inline bool IsFreeListNode(HeapObject* object);
1411 
1412  // Set the size in bytes, which can be read with HeapObject::Size(). This
1413  // function also writes a map to the first word of the block so that it
1414  // looks like a heap object to the garbage collector and heap iteration
1415  // functions.
1416  void set_size(Heap* heap, int size_in_bytes);
1417 
1418  // Accessors for the next field.
1419  inline FreeListNode* next();
1420  inline FreeListNode** next_address();
1421  inline void set_next(FreeListNode* next);
1422 
1423  inline void Zap();
1424 
1425  static inline FreeListNode* cast(Object* object) {
1426  return reinterpret_cast<FreeListNode*>(object);
1427  }
1428 
1429  private:
1431 
1433 };
1434 
1435 
1436 // The free list category holds a pointer to the top element and a pointer to
1437 // the end element of the linked list of free memory blocks.
1439  public:
1441 
1442  intptr_t Concatenate(FreeListCategory* category);
1443 
1444  void Reset();
1445 
1446  void Free(FreeListNode* node, int size_in_bytes);
1447 
1448  FreeListNode* PickNodeFromList(int* node_size);
1449  FreeListNode* PickNodeFromList(int size_in_bytes, int* node_size);
1450 
1451  intptr_t EvictFreeListItemsInList(Page* p);
1453 
1454  void RepairFreeList(Heap* heap);
1455 
1456  FreeListNode* top() const {
1457  return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_));
1458  }
1459 
1461  base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
1462  }
1463 
1465  FreeListNode* end() const { return end_; }
1467 
1468  int* GetAvailableAddress() { return &available_; }
1469  int available() const { return available_; }
1471 
1472  base::Mutex* mutex() { return &mutex_; }
1473 
1474  bool IsEmpty() { return top() == 0; }
1475 
1476 #ifdef DEBUG
1477  intptr_t SumFreeList();
1478  int FreeListLength();
1479 #endif
1480 
1481  private:
1482  // top_ points to the top FreeListNode* in the free list category.
1485  base::Mutex mutex_;
1486 
1487  // Total available bytes in all blocks of this free list category.
1489 };
1490 
1491 
1492 // The free list for the old space. The free list is organized in such a way
1493 // as to encourage objects allocated around the same time to be near each
1494 // other. The normal way to allocate is intended to be by bumping a 'top'
1495 // pointer until it hits a 'limit' pointer. When the limit is hit we need to
1496 // find a new space to allocate from. This is done with the free list, which
1497 // is divided up into rough categories to cut down on waste. Having finer
1498 // categories would scatter allocation more.
1499 
1500 // The old space free list is organized in categories.
1501 // 1-31 words: Such small free areas are discarded for efficiency reasons.
1502 // They can be reclaimed by the compactor. However the distance between top
1503 // and limit may be this small.
1504 // 32-255 words: There is a list of spaces this large. It is used for top and
1505 // limit when the object we need to allocate is 1-31 words in size. These
1506 // spaces are called small.
1507 // 256-2047 words: There is a list of spaces this large. It is used for top and
1508 // limit when the object we need to allocate is 32-255 words in size. These
1509 // spaces are called medium.
1510 // 1048-16383 words: There is a list of spaces this large. It is used for top
1511 // and limit when the object we need to allocate is 256-2047 words in size.
1512 // These spaces are call large.
1513 // At least 16384 words. This list is for objects of 2048 words or larger.
1514 // Empty pages are added to this list. These spaces are called huge.
1515 class FreeList {
1516  public:
1517  explicit FreeList(PagedSpace* owner);
1518 
1519  intptr_t Concatenate(FreeList* free_list);
1520 
1521  // Clear the free list.
1522  void Reset();
1523 
1524  // Return the number of bytes available on the free list.
1525  intptr_t available() {
1528  }
1529 
1530  // Place a node on the free list. The block of size 'size_in_bytes'
1531  // starting at 'start' is placed on the free list. The return value is the
1532  // number of bytes that have been lost due to internal fragmentation by
1533  // freeing the block. Bookkeeping information will be written to the block,
1534  // i.e., its contents will be destroyed. The start address should be word
1535  // aligned, and the size should be a non-zero multiple of the word size.
1536  int Free(Address start, int size_in_bytes);
1537 
1538  // This method returns how much memory can be allocated after freeing
1539  // maximum_freed memory.
1540  static inline int GuaranteedAllocatable(int maximum_freed) {
1541  if (maximum_freed < kSmallListMin) {
1542  return 0;
1543  } else if (maximum_freed <= kSmallListMax) {
1544  return kSmallAllocationMax;
1545  } else if (maximum_freed <= kMediumListMax) {
1546  return kMediumAllocationMax;
1547  } else if (maximum_freed <= kLargeListMax) {
1548  return kLargeAllocationMax;
1549  }
1550  return maximum_freed;
1551  }
1552 
1553  // Allocate a block of size 'size_in_bytes' from the free list. The block
1554  // is unitialized. A failure is returned if no block is available. The
1555  // number of bytes lost to fragmentation is returned in the output parameter
1556  // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1557  MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1558 
1559  bool IsEmpty() {
1560  return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
1562  }
1563 
1564 #ifdef DEBUG
1565  void Zap();
1566  intptr_t SumFreeLists();
1567  bool IsVeryLong();
1568 #endif
1569 
1570  // Used after booting the VM.
1571  void RepairLists(Heap* heap);
1572 
1573  intptr_t EvictFreeListItems(Page* p);
1575 
1580 
1581  private:
1582  // The size range of blocks, in bytes.
1583  static const int kMinBlockSize = 3 * kPointerSize;
1585 
1586  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
1587 
1590 
1591  static const int kSmallListMin = 0x20 * kPointerSize;
1592  static const int kSmallListMax = 0xff * kPointerSize;
1593  static const int kMediumListMax = 0x7ff * kPointerSize;
1594  static const int kLargeListMax = 0x3fff * kPointerSize;
1602 
1604 };
1605 
1606 
1608  public:
1609  // Implicit constructor from Object*.
1610  AllocationResult(Object* object) // NOLINT
1611  : object_(object),
1613 
1615 
1617  return AllocationResult(space);
1618  }
1619 
1620  inline bool IsRetry() { return retry_space_ != INVALID_SPACE; }
1621 
1622  template <typename T>
1623  bool To(T** obj) {
1624  if (IsRetry()) return false;
1625  *obj = T::cast(object_);
1626  return true;
1627  }
1628 
1630  CHECK(!IsRetry());
1631  return object_;
1632  }
1633 
1635  DCHECK(IsRetry());
1636  return retry_space_;
1637  }
1638 
1639  private:
1641  : object_(NULL), retry_space_(space) {}
1642 
1645 };
1646 
1647 
1648 class PagedSpace : public Space {
1649  public:
1650  // Creates a space with a maximum capacity, and an id.
1651  PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
1653 
1654  virtual ~PagedSpace() {}
1655 
1656  // Set up the space using the given address range of virtual memory (from
1657  // the memory allocator's initial chunk) if possible. If the block of
1658  // addresses is not big enough to contain a single page-aligned page, a
1659  // fresh chunk will be allocated.
1660  bool SetUp();
1661 
1662  // Returns true if the space has been successfully set up and not
1663  // subsequently torn down.
1664  bool HasBeenSetUp();
1665 
1666  // Cleans up the space, frees all pages in this space except those belonging
1667  // to the initial chunk, uncommits addresses in the initial chunk.
1668  void TearDown();
1669 
1670  // Checks whether an object/address is in this space.
1671  inline bool Contains(Address a);
1672  bool Contains(HeapObject* o) { return Contains(o->address()); }
1673 
1674  // Given an address occupied by a live object, return that object if it is
1675  // in this space, or a Smi if it is not. The implementation iterates over
1676  // objects in the page containing the address, the cost is linear in the
1677  // number of objects in the page. It may be slow.
1678  Object* FindObject(Address addr);
1679 
1680  // During boot the free_space_map is created, and afterwards we may need
1681  // to write it into the free list nodes that were already created.
1682  void RepairFreeListsAfterBoot();
1683 
1684  // Prepares for a mark-compact GC.
1685  void PrepareForMarkCompact();
1686 
1687  // Current capacity without growing (Size() + Available()).
1688  intptr_t Capacity() { return accounting_stats_.Capacity(); }
1689 
1690  // Total amount of memory committed for this space. For paged
1691  // spaces this equals the capacity.
1692  intptr_t CommittedMemory() { return Capacity(); }
1693 
1694  // The maximum amount of memory ever committed for this space.
1695  intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
1696 
1697  // Approximate amount of physical memory committed for this space.
1698  size_t CommittedPhysicalMemory();
1699 
1700  struct SizeStats {
1701  intptr_t Total() {
1703  }
1704 
1705  intptr_t small_size_;
1706  intptr_t medium_size_;
1707  intptr_t large_size_;
1708  intptr_t huge_size_;
1709  };
1710 
1711  void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
1712  void ResetFreeListStatistics();
1713 
1714  // Sets the capacity, the available space and the wasted space to zero.
1715  // The stats are rebuilt during sweeping by adding each page to the
1716  // capacity and the size when it is encountered. As free spaces are
1717  // discovered during the sweeping they are subtracted from the size and added
1718  // to the available and wasted totals.
1719  void ClearStats() {
1720  accounting_stats_.ClearSizeWaste();
1722  }
1723 
1724  // Increases the number of available bytes of that space.
1725  void AddToAccountingStats(intptr_t bytes) {
1726  accounting_stats_.DeallocateBytes(bytes);
1727  }
1728 
1729  // Available bytes without growing. These are the bytes on the free list.
1730  // The bytes in the linear allocation area are not included in this total
1731  // because updating the stats would slow down allocation. New pages are
1732  // immediately added to the free list so they show up here.
1733  intptr_t Available() { return free_list_.available(); }
1734 
1735  // Allocated bytes in this space. Garbage bytes that were not found due to
1736  // concurrent sweeping are counted as being allocated! The bytes in the
1737  // current linear allocation area (between top and limit) are also counted
1738  // here.
1739  virtual intptr_t Size() { return accounting_stats_.Size(); }
1740 
1741  // As size, but the bytes in lazily swept pages are estimated and the bytes
1742  // in the current linear allocation area are not included.
1743  virtual intptr_t SizeOfObjects();
1744 
1745  // Wasted bytes in this space. These are just the bytes that were thrown away
1746  // due to being too small to use for allocation. They do not include the
1747  // free bytes that were not found at all due to lazy sweeping.
1748  virtual intptr_t Waste() { return accounting_stats_.Waste(); }
1749 
1750  // Returns the allocation pointer in this space.
1751  Address top() { return allocation_info_.top(); }
1752  Address limit() { return allocation_info_.limit(); }
1753 
1754  // The allocation top address.
1756 
1757  // The allocation limit address.
1760  }
1761 
1762  // Allocate the requested number of bytes in the space if possible, return a
1763  // failure object if not.
1764  MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes);
1765 
1766  // Give a block of memory to the space's free list. It might be added to
1767  // the free list or accounted as waste.
1768  // If add_to_freelist is false then just accounting stats are updated and
1769  // no attempt to add area to free list is made.
1770  int Free(Address start, int size_in_bytes) {
1771  int wasted = free_list_.Free(start, size_in_bytes);
1772  accounting_stats_.DeallocateBytes(size_in_bytes);
1773  accounting_stats_.WasteBytes(wasted);
1774  return size_in_bytes - wasted;
1775  }
1776 
1778 
1779  // Set space allocation info.
1781  DCHECK(top == limit ||
1784  allocation_info_.set_top(top);
1785  allocation_info_.set_limit(limit);
1786  }
1787 
1788  // Empty space allocation info, returning unused area to free list.
1790  // Mark the old linear allocation area with a free space map so it can be
1791  // skipped when scanning the heap.
1792  int old_linear_size = static_cast<int>(limit() - top());
1793  Free(top(), old_linear_size);
1795  }
1796 
1797  void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
1798 
1799  void IncreaseCapacity(int size);
1800 
1801  // Releases an unused page and shrinks the space.
1802  void ReleasePage(Page* page);
1803 
1804  // The dummy page that anchors the linked list of pages.
1805  Page* anchor() { return &anchor_; }
1806 
1807 #ifdef VERIFY_HEAP
1808  // Verify integrity of this space.
1809  virtual void Verify(ObjectVisitor* visitor);
1810 
1811  // Overridden by subclasses to verify space-specific object
1812  // properties (e.g., only maps or free-list nodes are in map space).
1813  virtual void VerifyObject(HeapObject* obj) {}
1814 #endif
1815 
1816 #ifdef DEBUG
1817  // Print meta info and objects in this space.
1818  virtual void Print();
1819 
1820  // Reports statistics for the space
1821  void ReportStatistics();
1822 
1823  // Report code object related statistics
1824  void CollectCodeStatistics();
1825  static void ReportCodeStatistics(Isolate* isolate);
1826  static void ResetCodeStatistics(Isolate* isolate);
1827 #endif
1828 
1829  // Evacuation candidates are swept by evacuator. Needs to return a valid
1830  // result before _and_ after evacuation has finished.
1832  return !p->IsEvacuationCandidate() &&
1834  }
1835 
1836  void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
1837 
1840  unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
1841  }
1842 
1843  void DecrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ -= by; }
1844 
1847  unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
1848  }
1849 
1851 
1852  // This function tries to steal size_in_bytes memory from the sweeper threads
1853  // free-lists. If it does not succeed stealing enough memory, it will wait
1854  // for the sweeper threads to finish sweeping.
1855  // It returns true when sweeping is completed and false otherwise.
1856  bool EnsureSweeperProgress(intptr_t size_in_bytes);
1857 
1859 
1861 
1862  Page* FirstPage() { return anchor_.next_page(); }
1863  Page* LastPage() { return anchor_.prev_page(); }
1864 
1866 
1867  bool CanExpand();
1868 
1869  // Returns the number of total pages in this space.
1870  int CountTotalPages();
1871 
1872  // Return size of allocatable area on a page in this space.
1873  inline int AreaSize() { return area_size_; }
1874 
1875  void CreateEmergencyMemory();
1876  void FreeEmergencyMemory();
1877  void UseEmergencyMemory();
1878 
1880 
1881  protected:
1883 
1885 
1886  // Maximum capacity of this space.
1887  intptr_t max_capacity_;
1888 
1889  intptr_t SizeOfFirstPage();
1890 
1891  // Accounting information for this space.
1892  AllocationStats accounting_stats_;
1893 
1894  // The dummy page that anchors the double linked list of pages.
1896 
1897  // The space's free list.
1899 
1900  // Normal allocation information.
1902 
1903  // The number of free bytes which could be reclaimed by advancing the
1904  // concurrent sweeper threads.
1906 
1907  // The sweeper threads iterate over the list of pointer and data space pages
1908  // and sweep these pages concurrently. They will stop sweeping after the
1909  // end_of_unswept_pages_ page.
1911 
1912  // Emergency memory is the memory of a full page for a given space, allocated
1913  // conservatively before evacuating a page. If compaction fails due to out
1914  // of memory error the emergency memory can be used to complete compaction.
1915  // If not used, the emergency memory is released after compaction.
1917 
1918  // Expands the space by allocating a fixed number of pages. Returns false if
1919  // it cannot allocate requested number of pages from OS, or if the hard heap
1920  // size limit has been hit.
1921  bool Expand();
1922 
1923  // Generic fast case allocation function that tries linear allocation at the
1924  // address denoted by top in allocation_info_.
1925  inline HeapObject* AllocateLinearly(int size_in_bytes);
1926 
1927  // If sweeping is still in progress try to sweep unswept pages. If that is
1928  // not successful, wait for the sweeper threads and re-try free-list
1929  // allocation.
1931  int size_in_bytes);
1932 
1933  // Slow path of AllocateRaw. This function is space-dependent.
1934  MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
1935 
1936  friend class PageIterator;
1937  friend class MarkCompactCollector;
1938 };
1939 
1940 
1942  public:
1943  NumberAndSizeInfo() : number_(0), bytes_(0) {}
1944 
1945  int number() const { return number_; }
1946  void increment_number(int num) { number_ += num; }
1947 
1948  int bytes() const { return bytes_; }
1949  void increment_bytes(int size) { bytes_ += size; }
1950 
1951  void clear() {
1952  number_ = 0;
1953  bytes_ = 0;
1954  }
1955 
1956  private:
1957  int number_;
1958  int bytes_;
1959 };
1960 
1961 
1962 // HistogramInfo class for recording a single "bar" of a histogram. This
1963 // class is used for collecting statistics to print to the log file.
1965  public:
1967 
1968  const char* name() { return name_; }
1969  void set_name(const char* name) { name_ = name; }
1970 
1971  private:
1972  const char* name_;
1973 };
1974 
1975 
1976 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
1977 
1978 
1979 class SemiSpace;
1980 
1981 
1982 class NewSpacePage : public MemoryChunk {
1983  public:
1984  // GC related flags copied from from-space to to-space when
1985  // flipping semispaces.
1986  static const intptr_t kCopyOnFlipFlagsMask =
1990 
1992 
1993  inline NewSpacePage* next_page() const {
1994  return static_cast<NewSpacePage*>(next_chunk());
1995  }
1996 
1997  inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
1998 
1999  inline NewSpacePage* prev_page() const {
2000  return static_cast<NewSpacePage*>(prev_chunk());
2001  }
2002 
2003  inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
2004 
2005  SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
2006 
2007  bool is_anchor() { return !this->InNewSpace(); }
2008 
2009  static bool IsAtStart(Address addr) {
2010  return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
2012  }
2013 
2014  static bool IsAtEnd(Address addr) {
2015  return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
2016  }
2017 
2018  Address address() { return reinterpret_cast<Address>(this); }
2019 
2020  // Finds the NewSpacePage containing the given address.
2021  static inline NewSpacePage* FromAddress(Address address_in_page) {
2022  Address page_start =
2023  reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
2025  NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
2026  return page;
2027  }
2028 
2029  // Find the page for a limit address. A limit address is either an address
2030  // inside a page, or the address right after the last byte of a page.
2031  static inline NewSpacePage* FromLimit(Address address_limit) {
2032  return NewSpacePage::FromAddress(address_limit - 1);
2033  }
2034 
2035  // Checks if address1 and address2 are on the same new space page.
2036  static inline bool OnSamePage(Address address1, Address address2) {
2037  return NewSpacePage::FromAddress(address1) ==
2038  NewSpacePage::FromAddress(address2);
2039  }
2040 
2041  private:
2042  // Create a NewSpacePage object that is only used as anchor
2043  // for the doubly-linked list of real pages.
2045 
2046  static NewSpacePage* Initialize(Heap* heap, Address start,
2048 
2049  // Intialize a fake NewSpacePage used as sentinel at the ends
2050  // of a doubly-linked list of real NewSpacePages.
2051  // Only uses the prev/next links, and sets flags to not be in new-space.
2053 
2054  friend class SemiSpace;
2055  friend class SemiSpaceIterator;
2056 };
2057 
2058 
2059 // -----------------------------------------------------------------------------
2060 // SemiSpace in young generation
2061 //
2062 // A semispace is a contiguous chunk of memory holding page-like memory
2063 // chunks. The mark-compact collector uses the memory of the first page in
2064 // the from space as a marking stack when tracing live objects.
2065 
2066 class SemiSpace : public Space {
2067  public:
2068  // Constructor.
2071  start_(NULL),
2072  age_mark_(NULL),
2073  id_(semispace),
2074  anchor_(this),
2075  current_page_(NULL) {}
2076 
2077  // Sets up the semispace using the given chunk.
2078  void SetUp(Address start, int initial_capacity, int maximum_capacity);
2079 
2080  // Tear down the space. Heap memory was not allocated by the space, so it
2081  // is not deallocated here.
2082  void TearDown();
2083 
2084  // True if the space has been set up but not torn down.
2085  bool HasBeenSetUp() { return start_ != NULL; }
2086 
2087  // Grow the semispace to the new capacity. The new capacity
2088  // requested must be larger than the current capacity and less than
2089  // the maximum capacity.
2090  bool GrowTo(int new_capacity);
2091 
2092  // Shrinks the semispace to the new capacity. The new capacity
2093  // requested must be more than the amount of used memory in the
2094  // semispace and less than the current capacity.
2095  bool ShrinkTo(int new_capacity);
2096 
2097  // Returns the start address of the first page of the space.
2100  return anchor_.next_page()->area_start();
2101  }
2102 
2103  // Returns the start address of the current page of the space.
2105 
2106  // Returns one past the end address of the space.
2108 
2109  // Returns one past the end address of the current page of the space.
2111 
2112  bool AdvancePage() {
2113  NewSpacePage* next_page = current_page_->next_page();
2114  if (next_page == anchor()) return false;
2115  current_page_ = next_page;
2116  return true;
2117  }
2118 
2119  // Resets the space to using the first page.
2120  void Reset();
2121 
2122  // Age mark accessors.
2124  void set_age_mark(Address mark);
2125 
2126  // True if the address is in the address range of this semispace (not
2127  // necessarily below the allocation pointer).
2128  bool Contains(Address a) {
2129  return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
2130  reinterpret_cast<uintptr_t>(start_);
2131  }
2132 
2133  // True if the object is a heap object in the address range of this
2134  // semispace (not necessarily below the allocation pointer).
2135  bool Contains(Object* o) {
2136  return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
2137  }
2138 
2139  // If we don't have these here then SemiSpace will be abstract. However
2140  // they should never be called.
2141  virtual intptr_t Size() {
2142  UNREACHABLE();
2143  return 0;
2144  }
2145 
2146  bool is_committed() { return committed_; }
2147  bool Commit();
2148  bool Uncommit();
2149 
2152 
2153 #ifdef VERIFY_HEAP
2154  virtual void Verify();
2155 #endif
2156 
2157 #ifdef DEBUG
2158  virtual void Print();
2159  // Validate a range of of addresses in a SemiSpace.
2160  // The "from" address must be on a page prior to the "to" address,
2161  // in the linked page order, or it must be earlier on the same page.
2162  static void AssertValidRange(Address from, Address to);
2163 #else
2164  // Do nothing.
2165  inline static void AssertValidRange(Address from, Address to) {}
2166 #endif
2167 
2168  // Returns the current total capacity of the semispace.
2169  int TotalCapacity() { return total_capacity_; }
2170 
2171  // Returns the maximum total capacity of the semispace.
2173 
2174  // Returns the initial capacity of the semispace.
2176 
2177  SemiSpaceId id() { return id_; }
2178 
2179  static void Swap(SemiSpace* from, SemiSpace* to);
2180 
2181  // Returns the maximum amount of memory ever committed by the semi space.
2183 
2184  // Approximate amount of physical memory committed for this space.
2185  size_t CommittedPhysicalMemory();
2186 
2187  private:
2188  // Flips the semispace between being from-space and to-space.
2189  // Copies the flags into the masked positions on all pages in the space.
2190  void FlipPages(intptr_t flags, intptr_t flag_mask);
2191 
2192  // Updates Capacity and MaximumCommitted based on new capacity.
2193  void SetCapacity(int new_capacity);
2194 
2195  NewSpacePage* anchor() { return &anchor_; }
2196 
2197  // The current and maximum total capacity of the space.
2201 
2203 
2204  // The start address of the space.
2206  // Used to govern object promotion during mark-compact collection.
2208 
2209  // Masks and comparison values to test for containment in this semispace.
2213 
2216 
2219 
2220  friend class SemiSpaceIterator;
2221  friend class NewSpacePageIterator;
2222 
2223  public:
2224  TRACK_MEMORY("SemiSpace")
2225 };
2226 
2227 
2228 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2229 // semispace of the heap's new space. It iterates over the objects in the
2230 // semispace from a given start address (defaulting to the bottom of the
2231 // semispace) to the top of the semispace. New objects allocated after the
2232 // iterator is created are not iterated.
2234  public:
2235  // Create an iterator over the objects in the given space. If no start
2236  // address is given, the iterator starts from the bottom of the space. If
2237  // no size function is given, the iterator calls Object::Size().
2238 
2239  // Iterate over all of allocated to-space.
2240  explicit SemiSpaceIterator(NewSpace* space);
2241  // Iterate over all of allocated to-space, with a custome size function.
2243  // Iterate over part of allocated to-space, from start to the end
2244  // of allocation.
2246  // Iterate from one address to another in the same semi-space.
2248 
2250  if (current_ == limit_) return NULL;
2253  page = page->next_page();
2254  DCHECK(!page->is_anchor());
2255  current_ = page->area_start();
2256  if (current_ == limit_) return NULL;
2257  }
2258 
2260  int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
2261 
2262  current_ += size;
2263  return object;
2264  }
2265 
2266  // Implementation of the ObjectIterator functions.
2267  virtual HeapObject* next_object() { return Next(); }
2268 
2269  private:
2270  void Initialize(Address start, Address end, HeapObjectCallback size_func);
2271 
2272  // The current iteration point.
2274  // The end of iteration.
2276  // The callback function.
2278 };
2279 
2280 
2281 // -----------------------------------------------------------------------------
2282 // A PageIterator iterates the pages in a semi-space.
2283 class NewSpacePageIterator BASE_EMBEDDED {
2284  public:
2285  // Make an iterator that runs over all pages in to-space.
2287 
2288  // Make an iterator that runs over all pages in the given semispace,
2289  // even those not used in allocation.
2291 
2292  // Make iterator that iterates from the page containing start
2293  // to the page that contains limit in the same semispace.
2294  inline NewSpacePageIterator(Address start, Address limit);
2295 
2296  inline bool has_next();
2297  inline NewSpacePage* next();
2298 
2299  private:
2300  NewSpacePage* prev_page_; // Previous page returned.
2301  // Next page that will be returned. Cached here so that we can use this
2302  // iterator for operations that deallocate pages.
2304  // Last page returned.
2306 };
2307 
2308 
2309 // -----------------------------------------------------------------------------
2310 // The young generation space.
2311 //
2312 // The new space consists of a contiguous pair of semispaces. It simply
2313 // forwards most functions to the appropriate semispace.
2314 
2315 class NewSpace : public Space {
2316  public:
2317  // Constructor.
2318  explicit NewSpace(Heap* heap)
2322  reservation_(),
2324 
2325  // Sets up the new space using the given chunk.
2326  bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
2327 
2328  // Tears down the space. Heap memory was not allocated by the space, so it
2329  // is not deallocated here.
2330  void TearDown();
2331 
2332  // True if the space has been set up but not torn down.
2333  bool HasBeenSetUp() {
2335  }
2336 
2337  // Flip the pair of spaces.
2338  void Flip();
2339 
2340  // Grow the capacity of the semispaces. Assumes that they are not at
2341  // their maximum capacity.
2342  void Grow();
2343 
2344  // Shrink the capacity of the semispaces.
2345  void Shrink();
2346 
2347  // True if the address or object lies in the address range of either
2348  // semispace (not necessarily below the allocation pointer).
2349  bool Contains(Address a) {
2350  return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
2351  reinterpret_cast<uintptr_t>(start_);
2352  }
2353 
2354  bool Contains(Object* o) {
2355  Address a = reinterpret_cast<Address>(o);
2356  return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
2357  }
2358 
2359  // Return the allocated bytes in the active semispace.
2360  virtual intptr_t Size() {
2362  static_cast<int>(top() - to_space_.page_low());
2363  }
2364 
2365  // The same, but returning an int. We have to have the one that returns
2366  // intptr_t because it is inherited, but if we know we are dealing with the
2367  // new space, which can't get as big as the other spaces then this is useful:
2368  int SizeAsInt() { return static_cast<int>(Size()); }
2369 
2370  // Return the allocatable capacity of a semispace.
2371  intptr_t Capacity() {
2373  return (to_space_.TotalCapacity() / Page::kPageSize) *
2375  }
2376 
2377  // Return the current size of a semispace, allocatable and non-allocatable
2378  // memory.
2379  intptr_t TotalCapacity() {
2381  return to_space_.TotalCapacity();
2382  }
2383 
2384  // Return the total amount of memory committed for new space.
2385  intptr_t CommittedMemory() {
2386  if (from_space_.is_committed()) return 2 * Capacity();
2387  return TotalCapacity();
2388  }
2389 
2390  // Return the total amount of memory committed for new space.
2394  }
2395 
2396  // Approximate amount of physical memory committed for this space.
2397  size_t CommittedPhysicalMemory();
2398 
2399  // Return the available bytes without growing.
2400  intptr_t Available() { return Capacity() - Size(); }
2401 
2402  // Return the maximum capacity of a semispace.
2407  }
2408 
2410 
2411  // Returns the initial capacity of a semispace.
2416  }
2417 
2418  // Return the address of the allocation pointer in the active semispace.
2421  return allocation_info_.top();
2422  }
2423 
2426  allocation_info_.set_top(top);
2427  }
2428 
2429  // Return the address of the allocation pointer limit in the active semispace.
2432  return allocation_info_.limit();
2433  }
2434 
2435  // Return the address of the first object in the active semispace.
2437 
2438  // Get the age mark of the inactive semispace.
2440  // Set the age mark in the active semispace.
2442 
2443  // The start address of the space and a bit mask. Anding an address in the
2444  // new space with the mask will result in the start address.
2445  Address start() { return start_; }
2447 
2448  INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
2449  DCHECK(Contains(addr));
2451  IsAligned(OffsetFrom(addr) - 1, kPointerSize));
2452  return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
2453  }
2454 
2455  INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
2456  return reinterpret_cast<Address>(index << kPointerSizeLog2);
2457  }
2458 
2459  // The allocation top and limit address.
2461 
2462  // The allocation limit address.
2465  }
2466 
2467  MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
2468 
2469  // Reset the allocation pointer to the beginning of the active semispace.
2470  void ResetAllocationInfo();
2471 
2472  void UpdateInlineAllocationLimit(int size_in_bytes);
2473  void LowerInlineAllocationLimit(intptr_t step) {
2477  }
2478 
2479  // Get the extent of the inactive semispace (for use as a marking stack,
2480  // or to zap it). Notice: space-addresses are not necessarily on the
2481  // same page, so FromSpaceStart() might be above FromSpaceEnd().
2486 
2487  // Get the extent of the active semispace's pages' memory.
2490 
2491  inline bool ToSpaceContains(Address address) {
2492  return to_space_.Contains(address);
2493  }
2494  inline bool FromSpaceContains(Address address) {
2495  return from_space_.Contains(address);
2496  }
2497 
2498  // True if the object is a heap object in the address range of the
2499  // respective semispace (not necessarily below the allocation pointer of the
2500  // semispace).
2501  inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
2502  inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
2503 
2504  // Try to switch the active semispace to a new, empty, page.
2505  // Returns false if this isn't possible or reasonable (i.e., there
2506  // are no pages, or the current page is already empty), or true
2507  // if successful.
2508  bool AddFreshPage();
2509 
2510 #ifdef VERIFY_HEAP
2511  // Verify the active semispace.
2512  virtual void Verify();
2513 #endif
2514 
2515 #ifdef DEBUG
2516  // Print the active semispace.
2517  virtual void Print() { to_space_.Print(); }
2518 #endif
2519 
2520  // Iterates the active semispace to collect statistics.
2521  void CollectStatistics();
2522  // Reports previously collected statistics of the active semispace.
2523  void ReportStatistics();
2524  // Clears previously collected statistics.
2525  void ClearHistograms();
2526 
2527  // Record the allocation or promotion of a heap object. Note that we don't
2528  // record every single allocation, but only those that happen in the
2529  // to space during a scavenge GC.
2530  void RecordAllocation(HeapObject* obj);
2531  void RecordPromotion(HeapObject* obj);
2532 
2533  // Return whether the operation succeded.
2535  if (from_space_.is_committed()) return true;
2536  return from_space_.Commit();
2537  }
2538 
2540  if (!from_space_.is_committed()) return true;
2541  return from_space_.Uncommit();
2542  }
2543 
2544  inline intptr_t inline_allocation_limit_step() {
2546  }
2547 
2549 
2550  private:
2551  // Update allocation info to match the current to-space page.
2552  void UpdateAllocationInfo();
2553 
2556 
2557  // The semispaces.
2562 
2563  // Start address and bit mask for containment testing.
2568 
2569  // Allocation pointer and limit for normal allocation and allocation during
2570  // mark-compact collection.
2572 
2573  // When incremental marking is active we will set allocation_info_.limit
2574  // to be lower than actual limit and then will gradually increase it
2575  // in steps to guarantee that we do incremental marking steps even
2576  // when all allocation is performed from inlined generated code.
2578 
2580 
2583 
2585 
2586  friend class SemiSpaceIterator;
2587 
2588  public:
2589  TRACK_MEMORY("NewSpace")
2590 };
2591 
2592 
2593 // -----------------------------------------------------------------------------
2594 // Old object space (excluding map objects)
2595 
2596 class OldSpace : public PagedSpace {
2597  public:
2598  // Creates an old space object with a given maximum capacity.
2599  // The constructor does not allocate pages from OS.
2600  OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
2602  : PagedSpace(heap, max_capacity, id, executable) {}
2603 
2604  public:
2605  TRACK_MEMORY("OldSpace")
2606 };
2607 
2608 
2609 // For contiguous spaces, top should be in the space (or at the end) and limit
2610 // should be the end of the space.
2611 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
2612  SLOW_DCHECK((space).page_low() <= (info).top() && \
2613  (info).top() <= (space).page_high() && \
2614  (info).limit() <= (space).page_high())
2615 
2616 
2617 // -----------------------------------------------------------------------------
2618 // Old space for all map objects
2619 
2620 class MapSpace : public PagedSpace {
2621  public:
2622  // Creates a map space object with a maximum capacity.
2623  MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2624  : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
2626 
2627  // Given an index, returns the page address.
2628  // TODO(1600): this limit is artifical just to keep code compilable
2629  static const int kMaxMapPageIndex = 1 << 16;
2630 
2633  return RoundDown(size, Map::kSize);
2634  } else {
2635  return (size / Map::kSize) * Map::kSize;
2636  }
2637  }
2638 
2639  protected:
2640  virtual void VerifyObject(HeapObject* obj);
2641 
2642  private:
2644 
2645  // Do map space compaction if there is a page gap.
2647  return kMapsPerPage * (max_map_space_pages_ - 1);
2648  }
2649 
2651 
2652  public:
2653  TRACK_MEMORY("MapSpace")
2654 };
2655 
2656 
2657 // -----------------------------------------------------------------------------
2658 // Old space for simple property cell objects
2659 
2660 class CellSpace : public PagedSpace {
2661  public:
2662  // Creates a property cell space object with a maximum capacity.
2663  CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2664  : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
2665 
2668  return RoundDown(size, Cell::kSize);
2669  } else {
2670  return (size / Cell::kSize) * Cell::kSize;
2671  }
2672  }
2673 
2674  protected:
2675  virtual void VerifyObject(HeapObject* obj);
2676 
2677  public:
2678  TRACK_MEMORY("CellSpace")
2679 };
2680 
2681 
2682 // -----------------------------------------------------------------------------
2683 // Old space for all global object property cell objects
2684 
2686  public:
2687  // Creates a property cell space object with a maximum capacity.
2688  PropertyCellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2689  : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
2690 
2694  } else {
2696  }
2697  }
2698 
2699  protected:
2700  virtual void VerifyObject(HeapObject* obj);
2701 
2702  public:
2703  TRACK_MEMORY("PropertyCellSpace")
2704 };
2705 
2706 
2707 // -----------------------------------------------------------------------------
2708 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2709 // the large object space. A large object is allocated from OS heap with
2710 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2711 // A large object always starts at Page::kObjectStartOffset to a page.
2712 // Large objects do not move during garbage collections.
2713 
2714 class LargeObjectSpace : public Space {
2715  public:
2716  LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
2717  virtual ~LargeObjectSpace() {}
2718 
2719  // Initializes internal data structures.
2720  bool SetUp();
2721 
2722  // Releases internal resources, frees objects in this space.
2723  void TearDown();
2724 
2725  static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2726  if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2727  return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2728  }
2729 
2730  // Shared implementation of AllocateRaw, AllocateRawCode and
2731  // AllocateRawFixedArray.
2733  AllocateRaw(int object_size, Executability executable);
2734 
2735  bool CanAllocateSize(int size) { return Size() + size <= max_capacity_; }
2736 
2737  // Available bytes for objects in this space.
2738  inline intptr_t Available();
2739 
2740  virtual intptr_t Size() { return size_; }
2741 
2742  virtual intptr_t SizeOfObjects() { return objects_size_; }
2743 
2745 
2746  intptr_t CommittedMemory() { return Size(); }
2747 
2748  // Approximate amount of physical memory committed for this space.
2749  size_t CommittedPhysicalMemory();
2750 
2751  int PageCount() { return page_count_; }
2752 
2753  // Finds an object for a given address, returns a Smi if it is not found.
2754  // The function iterates through all objects in this space, may be slow.
2756 
2757  // Finds a large object page containing the given address, returns NULL
2758  // if such a page doesn't exist.
2760 
2761  // Frees unmarked objects.
2762  void FreeUnmarkedObjects();
2763 
2764  // Checks whether a heap object is in this space; O(1).
2765  bool Contains(HeapObject* obj);
2766 
2767  // Checks whether the space is empty.
2768  bool IsEmpty() { return first_page_ == NULL; }
2769 
2771 
2772 #ifdef VERIFY_HEAP
2773  virtual void Verify();
2774 #endif
2775 
2776 #ifdef DEBUG
2777  virtual void Print();
2778  void ReportStatistics();
2779  void CollectCodeStatistics();
2780 #endif
2781  // Checks whether an address is in the object area in this space. It
2782  // iterates all objects in the space. May be slow.
2783  bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
2784 
2785  private:
2786  intptr_t max_capacity_;
2788  // The head of the linked list of large object chunks.
2790  intptr_t size_; // allocated bytes
2791  int page_count_; // number of chunks
2792  intptr_t objects_size_; // size of objects
2793  // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
2795 
2796  friend class LargeObjectIterator;
2797 
2798  public:
2799  TRACK_MEMORY("LargeObjectSpace")
2800 };
2801 
2802 
2804  public:
2807 
2808  HeapObject* Next();
2809 
2810  // implementation of ObjectIterator.
2811  virtual HeapObject* next_object() { return Next(); }
2812 
2813  private:
2816 };
2817 
2818 
2819 // Iterates over the chunks (pages and large object pages) that can contain
2820 // pointers to new space.
2821 class PointerChunkIterator BASE_EMBEDDED {
2822  public:
2823  inline explicit PointerChunkIterator(Heap* heap);
2824 
2825  // Return NULL when the iterator is done.
2827  switch (state_) {
2828  case kOldPointerState: {
2829  if (old_pointer_iterator_.has_next()) {
2830  return old_pointer_iterator_.next();
2831  }
2832  state_ = kMapState;
2833  // Fall through.
2834  }
2835  case kMapState: {
2836  if (map_iterator_.has_next()) {
2837  return map_iterator_.next();
2838  }
2839  state_ = kLargeObjectState;
2840  // Fall through.
2841  }
2842  case kLargeObjectState: {
2843  HeapObject* heap_object;
2844  do {
2845  heap_object = lo_iterator_.Next();
2846  if (heap_object == NULL) {
2847  state_ = kFinishedState;
2848  return NULL;
2849  }
2850  // Fixed arrays are the only pointer-containing objects in large
2851  // object space.
2852  } while (!heap_object->IsFixedArray());
2853  MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
2854  return answer;
2855  }
2856  case kFinishedState:
2857  return NULL;
2858  default:
2859  break;
2860  }
2861  UNREACHABLE();
2862  return NULL;
2863  }
2864 
2865 
2866  private:
2867  enum State { kOldPointerState, kMapState, kLargeObjectState, kFinishedState };
2868  State state_;
2870  PageIterator map_iterator_;
2872 };
2873 
2874 
2875 #ifdef DEBUG
2876 struct CommentStatistic {
2877  const char* comment;
2878  int size;
2879  int count;
2880  void Clear() {
2881  comment = NULL;
2882  size = 0;
2883  count = 0;
2884  }
2885  // Must be small, since an iteration is used for lookup.
2886  static const int kMaxComments = 64;
2887 };
2888 #endif
2889 }
2890 } // namespace v8::internal
2891 
2892 #endif // V8_HEAP_SPACES_H_
const int kPageSizeBits
Definition: build_config.h:159
#define SLOW_DCHECK(condition)
Definition: checks.h:30
Isolate represents an isolated instance of the V8 engine.
Definition: v8.h:4356
void TakeControl(VirtualMemory *from)
Definition: platform.h:377
INLINE(void set_limit(Address limit))
Definition: spaces.h:1273
INLINE(Address top()) const
Definition: spaces.h:1265
INLINE(Address limit()) const
Definition: spaces.h:1279
INLINE(void set_top(Address top))
Definition: spaces.h:1259
AllocationResult(Object *object)
Definition: spaces.h:1610
AllocationSpace RetrySpace()
Definition: spaces.h:1634
AllocationSpace retry_space_
Definition: spaces.h:1644
static AllocationResult Retry(AllocationSpace space=NEW_SPACE)
Definition: spaces.h:1616
AllocationResult(AllocationSpace space)
Definition: spaces.h:1640
void increment_number(int num)
Definition: spaces.h:1946
LargeObjectIterator lo_iterator_
Definition: spaces.h:2871
PageIterator(PagedSpace *space)
NewSpacePageIterator(NewSpace *space)
void AllocateBytes(intptr_t size_in_bytes)
Definition: spaces.h:1369
NewSpacePage * prev_page_
Definition: spaces.h:2300
void WasteBytes(int size_in_bytes)
Definition: spaces.h:1381
void ExpandSpace(int size_in_bytes)
Definition: spaces.h:1350
NewSpacePage * next_page_
Definition: spaces.h:2303
void ShrinkSpace(int size_in_bytes)
Definition: spaces.h:1362
PageIterator map_iterator_
Definition: spaces.h:2870
void increment_bytes(int size)
Definition: spaces.h:1949
NewSpacePage * last_page_
Definition: spaces.h:2305
PageIterator old_pointer_iterator_
Definition: spaces.h:2869
NewSpacePageIterator(Address start, Address limit)
MemoryChunk * next()
Definition: spaces.h:2826
NewSpacePageIterator(SemiSpace *space)
void DeallocateBytes(intptr_t size_in_bytes)
Definition: spaces.h:1375
void Print(uint32_t pos, uint32_t cell)
Definition: spaces.h:210
static bool IsSeq(uint32_t cell)
Definition: spaces.h:238
static const uint32_t kBytesPerCell
Definition: spaces.h:149
INLINE(Address address())
Definition: spaces.h:184
static void PrintWord(uint32_t word, uint32_t himask=0)
Definition: spaces.h:198
INLINE(static Bitmap *FromAddress(Address addr))
Definition: spaces.h:186
INLINE(static uint32_t IndexToCell(uint32_t index))
Definition: spaces.h:168
MarkBit MarkBitFromIndex(uint32_t index, bool data_only=false)
Definition: spaces.h:190
static const uint32_t kBytesPerCellLog2
Definition: spaces.h:150
static const size_t kLength
Definition: spaces.h:152
INLINE(static uint32_t CellAlignIndex(uint32_t index))
Definition: spaces.h:176
static const size_t kSize
Definition: spaces.h:154
static const uint32_t kBitIndexMask
Definition: spaces.h:148
static const uint32_t kBitsPerCellLog2
Definition: spaces.h:147
static int SizeFor(int cells_count)
Definition: spaces.h:164
INLINE(static uint32_t CellToIndex(uint32_t index))
Definition: spaces.h:172
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:21
static int CellsForLength(int length)
Definition: spaces.h:158
static const uint32_t kBitsPerCell
Definition: spaces.h:146
INLINE(MarkBit::CellType *cells())
Definition: spaces.h:180
virtual void VerifyObject(HeapObject *obj)
Definition: spaces.cc:2759
virtual int RoundSizeDownToObjectAlignment(int size)
Definition: spaces.h:2666
CellSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
Definition: spaces.h:2663
static const int kSize
Definition: objects.h:9447
FreeBlock(Address start_arg, size_t size_arg)
Definition: spaces.h:911
FreeBlock(void *start_arg, size_t size_arg)
Definition: spaces.h:916
bool contains(Address address)
Definition: spaces.h:887
int current_allocation_block_index_
Definition: spaces.h:933
base::VirtualMemory * code_range_
Definition: spaces.h:907
void FreeRawMemory(Address buf, size_t length)
Definition: spaces.cc:233
CodeRange(Isolate *isolate)
Definition: spaces.cc:91
DISALLOW_COPY_AND_ASSIGN(CodeRange)
static int CompareFreeBlockAddress(const FreeBlock *left, const FreeBlock *right)
Definition: spaces.cc:136
List< FreeBlock > allocation_list_
Definition: spaces.h:932
Isolate * isolate_
Definition: spaces.h:904
bool SetUp(size_t requested_size)
Definition: spaces.cc:99
List< FreeBlock > free_list_
Definition: spaces.h:929
bool GetNextAllocationBlock(size_t requested)
Definition: spaces.cc:145
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, const size_t commit_size, size_t *allocated)
Definition: spaces.cc:186
bool CommitRawMemory(Address start, size_t length)
Definition: spaces.cc:223
bool UncommitRawMemory(Address start, size_t length)
Definition: spaces.cc:228
FreeListNode * end() const
Definition: spaces.h:1465
FreeListNode * top() const
Definition: spaces.h:1456
void set_available(int available)
Definition: spaces.h:1470
void RepairFreeList(Heap *heap)
Definition: spaces.cc:2140
void Free(FreeListNode *node, int size_in_bytes)
Definition: spaces.cc:2130
base::AtomicWord top_
Definition: spaces.h:1483
void set_end(FreeListNode *end)
Definition: spaces.h:1466
intptr_t EvictFreeListItemsInList(Page *p)
Definition: spaces.cc:2059
void set_top(FreeListNode *top)
Definition: spaces.h:1460
intptr_t Concatenate(FreeListCategory *category)
Definition: spaces.cc:2028
bool ContainsPageFreeListItemsInList(Page *p)
Definition: spaces.cc:2081
FreeListNode * PickNodeFromList(int *node_size)
Definition: spaces.cc:2091
FreeListNode ** GetEndAddress()
Definition: spaces.h:1464
static FreeListNode * FromAddress(Address address)
Definition: spaces.h:1406
void set_next(FreeListNode *next)
Definition: spaces.cc:2010
static bool IsFreeListNode(HeapObject *object)
Definition: spaces-inl.h:303
static const int kNextOffset
Definition: spaces.h:1430
void set_size(Heap *heap, int size_in_bytes)
Definition: spaces.cc:1955
FreeListNode * next()
Definition: spaces.cc:1986
static FreeListNode * cast(Object *object)
Definition: spaces.h:1425
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode)
FreeListNode ** next_address()
Definition: spaces.cc:1999
FreeListCategory huge_list_
Definition: spaces.h:1601
static int GuaranteedAllocatable(int maximum_freed)
Definition: spaces.h:1540
intptr_t available()
Definition: spaces.h:1525
bool ContainsPageFreeListItems(Page *p)
Definition: spaces.cc:2418
FreeListNode * FindNodeFor(int size_in_bytes, int *node_size)
Definition: spaces.cc:2211
FreeListCategory medium_list_
Definition: spaces.h:1599
intptr_t Concatenate(FreeList *free_list)
Definition: spaces.cc:2159
MUST_USE_RESULT HeapObject * Allocate(int size_in_bytes)
Definition: spaces.cc:2326
FreeListCategory large_list_
Definition: spaces.h:1600
FreeList(PagedSpace *owner)
Definition: spaces.cc:2154
FreeListCategory * medium_list()
Definition: spaces.h:1577
static const int kMaxBlockSize
Definition: spaces.h:1584
FreeListCategory * huge_list()
Definition: spaces.h:1579
static const int kSmallListMax
Definition: spaces.h:1592
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList)
PagedSpace * owner_
Definition: spaces.h:1588
static const int kLargeListMax
Definition: spaces.h:1594
static const int kMinBlockSize
Definition: spaces.h:1583
FreeListCategory * small_list()
Definition: spaces.h:1576
FreeListCategory * large_list()
Definition: spaces.h:1578
static const int kSmallAllocationMax
Definition: spaces.h:1595
static const int kLargeAllocationMax
Definition: spaces.h:1597
int Free(Address start, int size_in_bytes)
Definition: spaces.cc:2177
static const int kMediumListMax
Definition: spaces.h:1593
static const int kSmallListMin
Definition: spaces.h:1591
intptr_t EvictFreeListItems(Page *p)
Definition: spaces.cc:2401
void RepairLists(Heap *heap)
Definition: spaces.cc:2426
FreeListCategory small_list_
Definition: spaces.h:1598
static const int kMediumAllocationMax
Definition: spaces.h:1596
static const int kHeaderSize
Definition: objects.h:4423
void Initialize(PagedSpace *owner, Address start, Address end, PageMode mode, HeapObjectCallback size_func)
Definition: spaces.cc:55
virtual HeapObject * next_object()
Definition: spaces.h:1206
HeapObjectCallback size_func_
Definition: spaces.h:1213
HeapObjectIterator(PagedSpace *space)
Definition: spaces.cc:21
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1464
const char * name()
Definition: spaces.h:1968
void set_name(const char *name)
Definition: spaces.h:1969
virtual HeapObject * next_object()
Definition: spaces.h:2811
LargeObjectIterator(LargeObjectSpace *space)
Definition: spaces.cc:2770
HeapObjectCallback size_func_
Definition: spaces.h:2815
virtual intptr_t SizeOfObjects()
Definition: spaces.h:2742
LargeObjectSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
Definition: spaces.cc:2797
intptr_t MaximumCommittedMemory()
Definition: spaces.h:2744
MUST_USE_RESULT AllocationResult AllocateRaw(int object_size, Executability executable)
Definition: spaces.cc:2834
bool Contains(HeapObject *obj)
Definition: spaces.cc:2980
virtual intptr_t Size()
Definition: spaces.h:2740
static intptr_t ObjectSizeFor(intptr_t chunk_size)
Definition: spaces.h:2725
bool CanAllocateSize(int size)
Definition: spaces.h:2735
LargePage * FindPage(Address a)
Definition: spaces.cc:2910
Object * FindObject(Address a)
Definition: spaces.cc:2901
bool SlowContains(Address addr)
Definition: spaces.h:2783
HeapObject * GetObject()
Definition: spaces.h:798
static LargePage * Initialize(Heap *heap, MemoryChunk *chunk)
Definition: spaces-inl.h:292
LargePage * next_page() const
Definition: spaces.h:800
void set_next_page(LargePage *page)
Definition: spaces.h:804
static const int kMapsPerPage
Definition: spaces.h:2643
virtual void VerifyObject(HeapObject *obj)
Definition: spaces.cc:2750
static const int kMaxMapPageIndex
Definition: spaces.h:2629
virtual int RoundSizeDownToObjectAlignment(int size)
Definition: spaces.h:2631
MapSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
Definition: spaces.h:2623
const int max_map_space_pages_
Definition: spaces.h:2650
static const int kSize
Definition: objects.h:6202
uint32_t CellType
Definition: spaces.h:103
CellType * cell_
Definition: spaces.h:133
MarkBit(CellType *cell, CellType mask, bool data_only)
Definition: spaces.h:105
CellType mask()
Definition: spaces.h:109
CellType * cell()
Definition: spaces.h:108
MarkBit Next()
Definition: spaces.h:123
static int CodePageGuardSize()
Definition: spaces.cc:815
LargePage * AllocateLargePage(intptr_t object_size, Space *owner, Executability executable)
Definition: spaces.cc:690
List< MemoryAllocationCallbackRegistration > memory_allocation_callbacks_
Definition: spaces.h:1143
static int CodePageAreaEndOffset()
Definition: spaces.cc:827
bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)
Definition: spaces.cc:763
bool CommitMemory(Address addr, size_t size, Executability executable)
Definition: spaces.cc:284
bool UncommitBlock(Address start, size_t size)
Definition: spaces.cc:736
MemoryChunk * AllocateChunk(intptr_t reserve_area_size, intptr_t commit_area_size, Executability executable, Space *space)
Definition: spaces.cc:548
void UpdateAllocatedSpaceLimits(void *low, void *high)
Definition: spaces.h:1152
static int CodePageAreaSize()
Definition: spaces.h:1103
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
Definition: spaces.cc:262
void FreeMemory(base::VirtualMemory *reservation, Executability executable)
Definition: spaces.cc:295
void Free(MemoryChunk *chunk)
Definition: spaces.cc:700
void ZapBlock(Address start, size_t size)
Definition: spaces.cc:743
static int CodePageAreaStartOffset()
Definition: spaces.cc:820
Page * InitializePagesInChunk(int chunk_id, int pages_in_chunk, PagedSpace *owner)
Address ReserveAlignedMemory(size_t requested, size_t alignment, base::VirtualMemory *controller)
Definition: spaces.cc:345
intptr_t AvailableExecutable()
Definition: spaces.h:1026
MemoryAllocator(Isolate *isolate)
Definition: spaces.cc:252
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, size_t alignment, Executability executable, base::VirtualMemory *controller)
Definition: spaces.cc:358
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator)
void PerformAllocationCallback(ObjectSpace space, AllocationAction action, size_t size)
Definition: spaces.cc:750
static int CodePageGuardStartOffset()
Definition: spaces.cc:808
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)
Definition: spaces.cc:782
MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory *vm, Address start, size_t commit_size, size_t reserved_size)
Definition: spaces.cc:834
Page * AllocatePage(intptr_t size, PagedSpace *owner, Executability executable)
Definition: spaces.cc:680
void AddMemoryAllocationCallback(MemoryAllocationCallback callback, ObjectSpace space, AllocationAction action)
Definition: spaces.cc:772
bool CommitBlock(Address start, size_t size, Executability executable)
Definition: spaces.cc:723
bool IsOutsideAllocatedSpace(const void *address) const
Definition: spaces.h:1041
bool IsFlagSet(int flag)
Definition: spaces.h:417
void initialize_scan_on_scavenge(bool scan)
Definition: spaces.h:334
static const size_t kWriteBarrierCounterOffset
Definition: spaces.h:536
SkipList * skip_list()
Definition: spaces.h:613
static const int kPointersFromHereAreInterestingMask
Definition: spaces.h:395
Executability executable()
Definition: spaces.h:563
void set_owner(Space *space)
Definition: spaces.h:317
static const int kFlagsOffset
Definition: spaces.h:605
void set_reserved_memory(base::VirtualMemory *reservation)
Definition: spaces.h:328
SkipList * skip_list_
Definition: spaces.h:662
intptr_t available_in_large_free_list_
Definition: spaces.h:676
base::VirtualMemory * reserved_memory()
Definition: spaces.h:324
Heap * heap() const
Definition: spaces.h:603
bool CommitArea(size_t requested)
Definition: spaces.cc:476
intptr_t non_available_small_blocks_
Definition: spaces.h:678
bool Contains(Address addr)
Definition: spaces.h:348
SlotsBuffer ** slots_buffer_address()
Definition: spaces.h:619
void SetFlags(intptr_t flags, intptr_t mask)
Definition: spaces.h:424
static void IncrementLiveBytesFromMutator(Address address, int by)
Definition: spaces.cc:868
void IncrementLiveBytes(int by)
Definition: spaces.h:471
void set_write_barrier_counter(int counter)
Definition: spaces.h:489
bool IsLeftOfProgressBar(Object **slot)
Definition: spaces.h:510
intptr_t available_in_medium_free_list_
Definition: spaces.h:675
static const int kObjectStartOffset
Definition: spaces.h:550
static const int kEvacuationCandidateMask
Definition: spaces.h:398
void set_next_chunk(MemoryChunk *next)
Definition: spaces.h:299
static void IncrementLiveBytesFromGC(Address address, int by)
Definition: spaces.h:517
bool ContainsLimit(Address addr)
Definition: spaces.h:355
void MarkEvacuationCandidate()
Definition: spaces.h:621
base::AtomicWord parallel_sweeping_
Definition: spaces.h:671
static uint32_t FastAddressToMarkbitIndex(Address addr)
Definition: spaces.h:590
void set_scan_on_scavenge(bool scan)
Definition: spaces-inl.h:157
Address MarkbitIndexToAddress(uint32_t index)
Definition: spaces.h:596
void set_store_buffer_counter(int counter)
Definition: spaces.h:344
void InitializeReservedMemory()
Definition: spaces.h:326
MemoryChunk * prev_chunk() const
Definition: spaces.h:295
void SetFlagTo(int flag, bool value)
Definition: spaces.h:409
static const int kBodyOffset
Definition: spaces.h:543
void set_prev_chunk(MemoryChunk *prev)
Definition: spaces.h:303
void SetFlag(int flag)
Definition: spaces.h:405
static const MemoryChunk * FromAddress(const byte *a)
Definition: spaces.h:279
static MemoryChunk * Initialize(Heap *heap, Address base, size_t size, Address area_start, Address area_end, Executability executable, Space *owner)
Definition: spaces.cc:430
Space * owner() const
Definition: spaces.h:307
static const int kWriteBarrierCounterGranularity
Definition: spaces.h:362
static const intptr_t kSizeOffset
Definition: spaces.h:528
intptr_t available_in_small_free_list_
Definition: spaces.h:674
intptr_t available_in_huge_free_list_
Definition: spaces.h:677
static const size_t kHeaderSize
Definition: spaces.h:539
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:276
MemoryChunk * next_chunk() const
Definition: spaces.h:291
bool IsEvacuationCandidate()
Definition: spaces.h:607
base::VirtualMemory reservation_
Definition: spaces.h:650
static const intptr_t kAlignmentMask
Definition: spaces.h:526
static MemoryChunk * FromAnyPointerAddress(Heap *heap, Address addr)
Definition: spaces-inl.h:169
SlotsBuffer * slots_buffer_
Definition: spaces.h:661
size_t CommittedPhysicalMemory()
Definition: spaces.h:637
uint32_t AddressToMarkbitIndex(Address addr)
Definition: spaces.h:586
static const int kPointersToHereAreInterestingMask
Definition: spaces.h:392
void set_skip_list(SkipList *skip_list)
Definition: spaces.h:615
ParallelSweepingState parallel_sweeping()
Definition: spaces.h:445
static const int kSkipEvacuationSlotsRecordingMask
Definition: spaces.h:400
static const int kObjectStartAlignment
Definition: spaces.h:549
SlotsBuffer * slots_buffer()
Definition: spaces.h:617
bool ShouldSkipEvacuationSlotRecording()
Definition: spaces.h:609
void set_size(size_t size)
Definition: spaces.h:556
void set_parallel_sweeping(ParallelSweepingState state)
Definition: spaces.h:450
intptr_t write_barrier_counter_
Definition: spaces.h:663
static const intptr_t kLiveBytesOffset
Definition: spaces.h:530
void SetArea(Address area_start, Address area_end)
Definition: spaces.h:558
void ClearFlag(int flag)
Definition: spaces.h:407
base::AtomicWord next_chunk_
Definition: spaces.h:686
void ClearEvacuationCandidate()
Definition: spaces.h:626
static const intptr_t kAlignment
Definition: spaces.h:523
static const size_t kSlotsBufferOffset
Definition: spaces.h:534
static void UpdateHighWaterMark(Address mark)
Definition: spaces-inl.h:189
void InsertAfter(MemoryChunk *other)
Definition: spaces.cc:528
size_t size() const
Definition: spaces.h:554
base::AtomicWord prev_chunk_
Definition: spaces.h:688
void set_progress_bar(int progress_bar)
Definition: spaces.h:498
NewSpacePage * prev_page() const
Definition: spaces.h:1999
static bool IsAtEnd(Address addr)
Definition: spaces.h:2014
SemiSpace * semi_space()
Definition: spaces.h:2005
void set_prev_page(NewSpacePage *page)
Definition: spaces.h:2003
NewSpacePage(SemiSpace *owner)
Definition: spaces.h:2044
static bool OnSamePage(Address address1, Address address2)
Definition: spaces.h:2036
static const int kAreaSize
Definition: spaces.h:1991
static NewSpacePage * Initialize(Heap *heap, Address start, SemiSpace *semi_space)
Definition: spaces.cc:398
static bool IsAtStart(Address addr)
Definition: spaces.h:2009
void InitializeAsAnchor(SemiSpace *owner)
Definition: spaces.cc:420
static const intptr_t kCopyOnFlipFlagsMask
Definition: spaces.h:1986
void set_next_page(NewSpacePage *page)
Definition: spaces.h:1997
static NewSpacePage * FromAddress(Address address_in_page)
Definition: spaces.h:2021
static NewSpacePage * FromLimit(Address address_limit)
Definition: spaces.h:2031
NewSpacePage * next_page() const
Definition: spaces.h:1993
SemiSpace to_space_
Definition: spaces.h:2558
intptr_t Available()
Definition: spaces.h:2400
bool IsAtMaximumCapacity()
Definition: spaces.h:2409
uintptr_t chunk_size_
Definition: spaces.h:2555
void LowerInlineAllocationLimit(intptr_t step)
Definition: spaces.h:2473
base::VirtualMemory reservation_
Definition: spaces.h:2560
void RecordPromotion(HeapObject *obj)
Definition: spaces.cc:1933
Address ToSpaceEnd()
Definition: spaces.h:2489
SemiSpace * active_space()
Definition: spaces.h:2548
intptr_t Capacity()
Definition: spaces.h:2371
Address FromSpaceEnd()
Definition: spaces.h:2485
Address FromSpacePageHigh()
Definition: spaces.h:2483
AllocationInfo allocation_info_
Definition: spaces.h:2571
Address top_on_previous_step_
Definition: spaces.h:2579
bool Contains(Object *o)
Definition: spaces.h:2354
MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes)
Definition: spaces.cc:1374
bool FromSpaceContains(Object *o)
Definition: spaces.h:2502
bool ToSpaceContains(Object *o)
Definition: spaces.h:2501
intptr_t CommittedMemory()
Definition: spaces.h:2385
size_t CommittedPhysicalMemory()
Definition: spaces.cc:1941
INLINE(uint32_t AddressToMarkbitIndex(Address addr))
Definition: spaces.h:2448
void set_age_mark(Address mark)
Definition: spaces.h:2441
virtual intptr_t Size()
Definition: spaces.h:2360
Address ToSpaceStart()
Definition: spaces.h:2488
uintptr_t address_mask_
Definition: spaces.h:2565
Address * allocation_top_address()
Definition: spaces.h:2460
void ResetAllocationInfo()
Definition: spaces.cc:1311
intptr_t inline_allocation_limit_step_
Definition: spaces.h:2577
intptr_t MaximumCommittedMemory()
Definition: spaces.h:2391
bool SetUp(int reserved_semispace_size_, int max_semi_space_size)
Definition: spaces.cc:1175
void set_top(Address top)
Definition: spaces.h:2424
void UpdateInlineAllocationLimit(int size_in_bytes)
Definition: spaces.cc:1323
void UpdateAllocationInfo()
Definition: spaces.cc:1302
bool CommitFromSpaceIfNeeded()
Definition: spaces.h:2534
void RecordAllocation(HeapObject *obj)
Definition: spaces.cc:1925
bool UncommitFromSpace()
Definition: spaces.h:2539
Address * allocation_limit_address()
Definition: spaces.h:2463
MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes))
bool FromSpaceContains(Address address)
Definition: spaces.h:2494
Address age_mark()
Definition: spaces.h:2439
intptr_t TotalCapacity()
Definition: spaces.h:2379
NewSpace(Heap *heap)
Definition: spaces.h:2318
Address FromSpacePageLow()
Definition: spaces.h:2482
HistogramInfo * promoted_histogram_
Definition: spaces.h:2582
bool ToSpaceContains(Address address)
Definition: spaces.h:2491
Address FromSpaceStart()
Definition: spaces.h:2484
SemiSpace from_space_
Definition: spaces.h:2559
intptr_t inline_allocation_limit_step()
Definition: spaces.h:2544
int InitialTotalCapacity()
Definition: spaces.h:2412
HistogramInfo * allocated_histogram_
Definition: spaces.h:2581
uintptr_t object_mask_
Definition: spaces.h:2566
uintptr_t object_expected_
Definition: spaces.h:2567
bool Contains(Address a)
Definition: spaces.h:2349
INLINE(Address MarkbitIndexToAddress(uint32_t index))
Definition: spaces.h:2455
uintptr_t mask()
Definition: spaces.h:2446
virtual HeapObject * next_object()=0
OldSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
Definition: spaces.h:2600
void SetWasSwept()
Definition: spaces.h:767
bool WasSwept()
Definition: spaces.h:766
INLINE(int Offset(Address a))
Definition: spaces.h:734
static const int kPageSize
Definition: spaces.h:748
INLINE(static Page *FromAllocationTop(Address top))
Definition: spaces.h:717
void InitializeAsAnchor(PagedSpace *owner)
Definition: spaces.cc:391
void ResetFreeListStatistics()
Definition: spaces.cc:671
static const intptr_t kPageAlignmentMask
Definition: spaces.h:757
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
Definition: spaces-inl.h:136
void set_next_page(Page *page)
Definition: spaces-inl.h:221
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:754
INLINE(static Page *FromAddress(Address a))
Definition: spaces.h:709
static bool IsAlignedToPageSize(Address a)
Definition: spaces.h:729
Page * next_page()
Definition: spaces-inl.h:209
Address OffsetToAddress(int offset)
Definition: spaces.h:740
void ClearWasSwept()
Definition: spaces.h:768
Page * prev_page()
Definition: spaces-inl.h:215
void set_prev_page(Page *page)
Definition: spaces-inl.h:227
bool Contains(Address a)
Definition: spaces-inl.h:150
Address * allocation_limit_address()
Definition: spaces.h:1758
void ResetUnsweptFreeBytes()
Definition: spaces.h:1850
intptr_t CommittedMemory()
Definition: spaces.h:1692
size_t CommittedPhysicalMemory()
Definition: spaces.cc:920
intptr_t MaximumCommittedMemory()
Definition: spaces.h:1695
void SetTopAndLimit(Address top, Address limit)
Definition: spaces.h:1780
AllocationInfo allocation_info_
Definition: spaces.h:1901
HeapObject * AllocateLinearly(int size_in_bytes)
Definition: spaces-inl.h:237
int Free(Address start, int size_in_bytes)
Definition: spaces.h:1770
void IncreaseUnsweptFreeBytes(Page *p)
Definition: spaces.h:1838
intptr_t Available()
Definition: spaces.h:1733
void Allocate(int bytes)
Definition: spaces.h:1797
intptr_t unswept_free_bytes_
Definition: spaces.h:1905
void ObtainFreeListStatistics(Page *p, SizeStats *sizes)
Definition: spaces.cc:1039
intptr_t SizeOfFirstPage()
Definition: spaces.cc:986
void ResetFreeListStatistics()
Definition: spaces.cc:1047
static bool ShouldBeSweptBySweeperThreads(Page *p)
Definition: spaces.h:1831
virtual intptr_t Waste()
Definition: spaces.h:1748
Address * allocation_top_address()
Definition: spaces.h:1755
bool EnsureSweeperProgress(intptr_t size_in_bytes)
PagedSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
Definition: spaces.cc:880
AllocationStats accounting_stats_
Definition: spaces.h:1892
bool Contains(HeapObject *o)
Definition: spaces.h:1672
void AddToAccountingStats(intptr_t bytes)
Definition: spaces.h:1725
void ReleasePage(Page *page)
Definition: spaces.cc:1061
void DecreaseUnsweptFreeBytes(Page *p)
Definition: spaces.h:1845
Page * end_of_unswept_pages()
Definition: spaces.h:1860
MUST_USE_RESULT HeapObject * WaitForSweeperThreadsAndRetryAllocation(int size_in_bytes)
Definition: spaces.cc:2532
MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
Definition: spaces.cc:2547
virtual intptr_t Size()
Definition: spaces.h:1739
Object * FindObject(Address addr)
Definition: spaces.cc:932
void set_end_of_unswept_pages(Page *page)
Definition: spaces.h:1858
void EvictEvacuationCandidatesFromFreeLists()
Definition: spaces.cc:2516
void IncreaseCapacity(int size)
Definition: spaces.cc:1056
void IncrementUnsweptFreeBytes(intptr_t by)
Definition: spaces.h:1836
void DecrementUnsweptFreeBytes(intptr_t by)
Definition: spaces.h:1843
void RepairFreeListsAfterBoot()
Definition: spaces.cc:2513
FreeList * free_list()
Definition: spaces.h:1882
virtual intptr_t SizeOfObjects()
Definition: spaces.cc:2502
friend class PageIterator
Definition: spaces.h:1936
MemoryChunk * emergency_memory_
Definition: spaces.h:1916
Page * end_of_unswept_pages_
Definition: spaces.h:1910
MUST_USE_RESULT AllocationResult AllocateRaw(int size_in_bytes)
Definition: spaces-inl.h:248
PropertyCellSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
Definition: spaces.h:2688
virtual int RoundSizeDownToObjectAlignment(int size)
Definition: spaces.h:2691
virtual void VerifyObject(HeapObject *obj)
Definition: spaces.cc:2762
static const int kSize
Definition: objects.h:9496
void Initialize(Address start, Address end, HeapObjectCallback size_func)
Definition: spaces.cc:1756
HeapObjectCallback size_func_
Definition: spaces.h:2277
virtual HeapObject * next_object()
Definition: spaces.h:2267
SemiSpaceIterator(NewSpace *space)
Definition: spaces.cc:1735
Address space_start()
Definition: spaces.h:2098
friend class NewSpacePageIterator
Definition: spaces.h:2221
size_t MaximumCommittedMemory()
Definition: spaces.h:2182
static void Swap(SemiSpace *from, SemiSpace *to)
Definition: spaces.cc:1635
SemiSpace(Heap *heap, SemiSpaceId semispace)
Definition: spaces.h:2069
void FlipPages(intptr_t flags, intptr_t flag_mask)
Definition: spaces.cc:1599
bool Contains(Address a)
Definition: spaces.h:2128
intptr_t maximum_committed_
Definition: spaces.h:2202
void SetCapacity(int new_capacity)
Definition: spaces.cc:1656
size_t CommittedPhysicalMemory()
Definition: spaces.cc:1526
uintptr_t object_mask_
Definition: spaces.h:2211
NewSpacePage * current_page()
Definition: spaces.h:2151
bool Contains(Object *o)
Definition: spaces.h:2135
bool ShrinkTo(int new_capacity)
Definition: spaces.cc:1572
uintptr_t address_mask_
Definition: spaces.h:2210
NewSpacePage * current_page_
Definition: spaces.h:2218
NewSpacePage * first_page()
Definition: spaces.h:2150
NewSpacePage * anchor()
Definition: spaces.h:2195
void set_age_mark(Address mark)
Definition: spaces.cc:1664
void SetUp(Address start, int initial_capacity, int maximum_capacity)
Definition: spaces.cc:1460
bool GrowTo(int new_capacity)
Definition: spaces.cc:1537
SemiSpaceId id()
Definition: spaces.h:2177
NewSpacePage anchor_
Definition: spaces.h:2217
static void AssertValidRange(Address from, Address to)
Definition: spaces.h:2165
virtual intptr_t Size()
Definition: spaces.h:2141
uintptr_t object_expected_
Definition: spaces.h:2212
Address StartFor(Address addr)
Definition: spaces.h:958
static const int kRegionSizeLog2
Definition: spaces.h:984
void AddObject(Address addr, int size)
Definition: spaces.h:960
static void Update(Address addr, int size)
Definition: spaces.h:972
Address starts_[kSize]
Definition: spaces.h:990
static const int kSize
Definition: spaces.h:986
static int RegionNumber(Address addr)
Definition: spaces.h:968
STATIC_ASSERT(Page::kPageSize % kRegionSize==0)
static const int kRegionSize
Definition: spaces.h:985
Space(Heap *heap, AllocationSpace id, Executability executable)
Definition: spaces.h:818
virtual intptr_t Size()=0
virtual int RoundSizeDownToObjectAlignment(int size)
Definition: spaces.h:838
AllocationSpace id_
Definition: spaces.h:852
virtual intptr_t SizeOfObjects()
Definition: spaces.h:836
AllocationSpace identity()
Definition: spaces.h:829
virtual ~Space()
Definition: spaces.h:821
Executability executable()
Definition: spaces.h:826
Heap * heap() const
Definition: spaces.h:823
Executability executable_
Definition: spaces.h:853
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define CODE_POINTER_ALIGN(value)
Definition: globals.h:586
#define POINTER_SIZE_ALIGN(value)
Definition: globals.h:582
#define TRACK_MEMORY(name)
Definition: globals.h:606
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK_LE(v1, v2)
Definition: logging.h:210
#define CHECK(condition)
Definition: logging.h:36
#define DCHECK_NOT_NULL(p)
Definition: logging.h:213
#define DCHECK(condition)
Definition: logging.h:205
intptr_t OffsetFrom(T x)
Definition: macros.h:383
T RoundDown(T x, intptr_t m)
Definition: macros.h:399
#define MUST_USE_RESULT
Definition: macros.h:266
bool IsPowerOfTwo32(uint32_t value)
Definition: bits.h:77
intptr_t AtomicWord
Definition: atomicops.h:57
Atomic32 Acquire_Load(volatile const Atomic32 *ptr)
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)
void NoBarrier_Store(volatile Atomic8 *ptr, Atomic8 value)
Atomic8 NoBarrier_Load(volatile const Atomic8 *ptr)
Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
bool operator==(const StoreRepresentation &rep1, const StoreRepresentation &rep2)
const int kPointerSize
Definition: globals.h:129
bool IsAddressAligned(Address addr, intptr_t alignment, int offset=0)
Definition: utils.h:129
@ NOT_EXECUTABLE
Definition: globals.h:391
static LifetimePosition Min(LifetimePosition a, LifetimePosition b)
const intptr_t kCodeAlignment
Definition: globals.h:240
const intptr_t kPageHeaderTagMask
Definition: globals.h:251
const int kPointerSizeLog2
Definition: globals.h:147
const int kPageHeaderTag
Definition: globals.h:249
const int kBitsPerByteLog2
Definition: globals.h:163
static LifetimePosition Max(LifetimePosition a, LifetimePosition b)
byte * Address
Definition: globals.h:101
void PrintF(const char *format,...)
Definition: utils.cc:80
const int kIntSize
Definition: globals.h:124
@ INVALID_SPACE
Definition: globals.h:367
int(* HeapObjectCallback)(HeapObject *obj)
Definition: globals.h:429
kFeedbackVectorOffset flag
Definition: objects-inl.h:5418
intptr_t HeapObjectTagMask()
Definition: checks.cc:12
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const int kBitsPerByte
Definition: globals.h:162
bool IsAligned(T value, U alignment)
Definition: utils.h:123
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
void(* MemoryAllocationCallback)(ObjectSpace space, AllocationAction action, int size)
Definition: v8.h:4171
AllocationAction
Definition: v8.h:4165
ObjectSpace
Definition: v8.h:4152
#define FRAGMENTATION_STATS_ACCESSORS(type, name)
Definition: spaces.h:772
#define DCHECK_PAGE_OFFSET(offset)
Definition: spaces.h:87
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, ObjectSpace space, AllocationAction action)
Definition: spaces.h:1133
#define T(name, string, precedence)
Definition: token.cc:25
#define V8_INLINE
Definition: v8config.h:306