V8 Project
store-buffer.cc
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <algorithm>
6 
7 #include "src/v8.h"
8 
9 #include "src/base/atomicops.h"
10 #include "src/counters.h"
12 
13 namespace v8 {
14 namespace internal {
15 
17  : heap_(heap),
18  start_(NULL),
19  limit_(NULL),
20  old_start_(NULL),
21  old_limit_(NULL),
22  old_top_(NULL),
23  old_reserved_limit_(NULL),
24  old_buffer_is_sorted_(false),
25  old_buffer_is_filtered_(false),
26  during_gc_(false),
27  store_buffer_rebuilding_enabled_(false),
28  callback_(NULL),
29  may_move_store_buffer_entries_(true),
30  virtual_memory_(NULL),
31  hash_set_1_(NULL),
32  hash_set_2_(NULL),
33  hash_sets_are_empty_(true) {}
34 
35 
38  uintptr_t start_as_int =
39  reinterpret_cast<uintptr_t>(virtual_memory_->address());
40  start_ =
41  reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
43 
47  reinterpret_cast<Address*>(old_virtual_memory_->address());
48  // Don't know the alignment requirements of the OS, but it is certainly not
49  // less than 0xfff.
50  DCHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
51  int initial_length =
52  static_cast<int>(base::OS::CommitPageSize() / kPointerSize);
53  DCHECK(initial_length > 0);
54  DCHECK(initial_length <= kOldStoreBufferLength);
55  old_limit_ = old_start_ + initial_length;
57 
58  CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_),
60  false));
61 
62  DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
63  DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
64  Address* vm_limit = reinterpret_cast<Address*>(
65  reinterpret_cast<char*>(virtual_memory_->address()) +
67  DCHECK(start_ <= vm_limit);
68  DCHECK(limit_ <= vm_limit);
69  USE(vm_limit);
70  DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
71  DCHECK((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
72  0);
73 
74  CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
76  false)); // Not executable.
78 
81  hash_sets_are_empty_ = false;
82 
84 }
85 
86 
88  delete virtual_memory_;
89  delete old_virtual_memory_;
90  delete[] hash_set_1_;
91  delete[] hash_set_2_;
93  start_ = limit_ = NULL;
95 }
96 
97 
99  isolate->heap()->store_buffer()->Compact();
100  isolate->counters()->store_buffer_overflows()->Increment();
101 }
102 
103 
105  // Remove adjacent duplicates and cells that do not point at new space.
106  Address previous = NULL;
107  Address* write = old_start_;
109  for (Address* read = old_start_; read < old_top_; read++) {
110  Address current = *read;
111  if (current != previous) {
112  if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
113  *write++ = current;
114  }
115  }
116  previous = current;
117  }
118  old_top_ = write;
119 }
120 
121 
122 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
123  return old_limit_ - old_top_ >= space_needed;
124 }
125 
126 
127 void StoreBuffer::EnsureSpace(intptr_t space_needed) {
128  while (old_limit_ - old_top_ < space_needed &&
130  size_t grow = old_limit_ - old_start_; // Double size.
131  CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
132  grow * kPointerSize, false));
133  old_limit_ += grow;
134  }
135 
136  if (SpaceAvailable(space_needed)) return;
137 
138  if (old_buffer_is_filtered_) return;
140  Compact();
141 
143  bool page_has_scan_on_scavenge_flag = false;
144 
145  PointerChunkIterator it(heap_);
146  MemoryChunk* chunk;
147  while ((chunk = it.next()) != NULL) {
148  if (chunk->scan_on_scavenge()) {
149  page_has_scan_on_scavenge_flag = true;
150  break;
151  }
152  }
153 
154  if (page_has_scan_on_scavenge_flag) {
156  }
157 
158  if (SpaceAvailable(space_needed)) return;
159 
160  // Sample 1 entry in 97 and filter out the pages where we estimate that more
161  // than 1 in 8 pointers are to new space.
162  static const int kSampleFinenesses = 5;
163  static const struct Samples {
164  int prime_sample_step;
165  int threshold;
166  } samples[kSampleFinenesses] = {
167  {97, ((Page::kPageSize / kPointerSize) / 97) / 8},
168  {23, ((Page::kPageSize / kPointerSize) / 23) / 16},
169  {7, ((Page::kPageSize / kPointerSize) / 7) / 32},
170  {3, ((Page::kPageSize / kPointerSize) / 3) / 256},
171  {1, 0}};
172  for (int i = 0; i < kSampleFinenesses; i++) {
173  ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
174  // As a last resort we mark all pages as being exempt from the store buffer.
175  DCHECK(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
176  if (SpaceAvailable(space_needed)) return;
177  }
178  UNREACHABLE();
179 }
180 
181 
182 // Sample the store buffer to see if some pages are taking up a lot of space
183 // in the store buffer.
184 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
185  PointerChunkIterator it(heap_);
186  MemoryChunk* chunk;
187  while ((chunk = it.next()) != NULL) {
188  chunk->set_store_buffer_counter(0);
189  }
190  bool created_new_scan_on_scavenge_pages = false;
191  MemoryChunk* previous_chunk = NULL;
192  for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
193  Address addr = *p;
194  MemoryChunk* containing_chunk = NULL;
195  if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
196  containing_chunk = previous_chunk;
197  } else {
198  containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
199  }
200  int old_counter = containing_chunk->store_buffer_counter();
201  if (old_counter >= threshold) {
202  containing_chunk->set_scan_on_scavenge(true);
203  created_new_scan_on_scavenge_pages = true;
204  }
205  containing_chunk->set_store_buffer_counter(old_counter + 1);
206  previous_chunk = containing_chunk;
207  }
208  if (created_new_scan_on_scavenge_pages) {
210  }
212 }
213 
214 
216  Address* new_top = old_start_;
217  MemoryChunk* previous_chunk = NULL;
218  for (Address* p = old_start_; p < old_top_; p++) {
219  Address addr = *p;
220  MemoryChunk* containing_chunk = NULL;
221  if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
222  containing_chunk = previous_chunk;
223  } else {
224  containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
225  previous_chunk = containing_chunk;
226  }
227  if (!containing_chunk->IsFlagSet(flag)) {
228  *new_top++ = addr;
229  }
230  }
231  old_top_ = new_top;
232 
233  // Filtering hash sets are inconsistent with the store buffer after this
234  // operation.
236 }
237 
238 
240  Compact();
241  if (old_buffer_is_sorted_) return;
242  std::sort(old_start_, old_top_);
243  Uniq();
244 
245  old_buffer_is_sorted_ = true;
246 
247  // Filtering hash sets are inconsistent with the store buffer after this
248  // operation.
250 }
251 
252 
254  Compact();
255  PointerChunkIterator it(heap_);
256  MemoryChunk* chunk;
257  bool page_has_scan_on_scavenge_flag = false;
258  while ((chunk = it.next()) != NULL) {
259  if (chunk->scan_on_scavenge()) {
260  page_has_scan_on_scavenge_flag = true;
261  break;
262  }
263  }
264 
265  if (page_has_scan_on_scavenge_flag) {
267  }
268 
269  // Filtering hash sets are inconsistent with the store buffer after
270  // iteration.
272 
273  return page_has_scan_on_scavenge_flag;
274 }
275 
276 
277 #ifdef DEBUG
278 void StoreBuffer::Clean() {
280  Uniq(); // Also removes things that no longer point to new space.
282 }
283 
284 
285 static Address* in_store_buffer_1_element_cache = NULL;
286 
287 
288 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
289  if (!FLAG_enable_slow_asserts) return true;
290  if (in_store_buffer_1_element_cache != NULL &&
291  *in_store_buffer_1_element_cache == cell_address) {
292  return true;
293  }
294  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
295  for (Address* current = top - 1; current >= start_; current--) {
296  if (*current == cell_address) {
297  in_store_buffer_1_element_cache = current;
298  return true;
299  }
300  }
301  for (Address* current = old_top_ - 1; current >= old_start_; current--) {
302  if (*current == cell_address) {
303  in_store_buffer_1_element_cache = current;
304  return true;
305  }
306  }
307  return false;
308 }
309 #endif
310 
311 
313  if (!hash_sets_are_empty_) {
314  memset(reinterpret_cast<void*>(hash_set_1_), 0,
315  sizeof(uintptr_t) * kHashSetLength);
316  memset(reinterpret_cast<void*>(hash_set_2_), 0,
317  sizeof(uintptr_t) * kHashSetLength);
318  hash_sets_are_empty_ = true;
319  }
320 }
321 
322 
325  during_gc_ = true;
326 }
327 
328 
329 #ifdef VERIFY_HEAP
330 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
332  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
333  if (object->IsFixedArray()) {
334  Address slot_address = object->address();
335  Address end = object->address() + object->Size();
336 
337  while (slot_address < end) {
338  HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
339  // When we are not in GC the Heap::InNewSpace() predicate
340  // checks that pointers which satisfy predicate point into
341  // the active semispace.
342  Object* object = reinterpret_cast<Object*>(
343  base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
344  heap_->InNewSpace(object);
345  slot_address += kPointerSize;
346  }
347  }
348  }
349 }
350 #endif
351 
352 
354 #ifdef VERIFY_HEAP
355  VerifyPointers(heap_->lo_space());
356 #endif
357 }
358 
359 
361  during_gc_ = false;
362 #ifdef VERIFY_HEAP
363  if (FLAG_verify_heap) {
364  Verify();
365  }
366 #endif
367 }
368 
369 
371  Address start, Address end, ObjectSlotCallback slot_callback,
372  bool clear_maps) {
373  for (Address slot_address = start; slot_address < end;
374  slot_address += kPointerSize) {
375  Object** slot = reinterpret_cast<Object**>(slot_address);
376  Object* object = reinterpret_cast<Object*>(
377  base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
378  if (heap_->InNewSpace(object)) {
379  HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
380  DCHECK(heap_object->IsHeapObject());
381  // The new space object was not promoted if it still contains a map
382  // pointer. Clear the map field now lazily.
383  if (clear_maps) ClearDeadObject(heap_object);
384  slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
385  object = reinterpret_cast<Object*>(
386  base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
387  if (heap_->InNewSpace(object)) {
388  EnterDirectlyIntoStoreBuffer(slot_address);
389  }
390  }
391  }
392 }
393 
394 
396  bool clear_maps) {
397  Address* limit = old_top_;
399  {
401  for (Address* current = old_start_; current < limit; current++) {
402 #ifdef DEBUG
403  Address* saved_top = old_top_;
404 #endif
405  Object** slot = reinterpret_cast<Object**>(*current);
406  Object* object = reinterpret_cast<Object*>(
407  base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
408  if (heap_->InFromSpace(object)) {
409  HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
410  // The new space object was not promoted if it still contains a map
411  // pointer. Clear the map field now lazily.
412  if (clear_maps) ClearDeadObject(heap_object);
413  slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
414  object = reinterpret_cast<Object*>(
415  base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
416  if (heap_->InNewSpace(object)) {
417  EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
418  }
419  }
420  DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top);
421  }
422  }
423 }
424 
425 
427  IteratePointersToNewSpace(slot_callback, false);
428 }
429 
430 
432  ObjectSlotCallback slot_callback) {
433  IteratePointersToNewSpace(slot_callback, true);
434 }
435 
436 
438  bool clear_maps) {
439  // We do not sort or remove duplicated entries from the store buffer because
440  // we expect that callback will rebuild the store buffer thus removing
441  // all duplicates and pointers to old space.
442  bool some_pages_to_scan = PrepareForIteration();
443 
444  // TODO(gc): we want to skip slots on evacuation candidates
445  // but we can't simply figure that out from slot address
446  // because slot can belong to a large object.
447  IteratePointersInStoreBuffer(slot_callback, clear_maps);
448 
449  // We are done scanning all the pointers that were in the store buffer, but
450  // there may be some pages marked scan_on_scavenge that have pointers to new
451  // space that are not in the store buffer. We must scan them now. As we
452  // scan, the surviving pointers to new space will be added to the store
453  // buffer. If there are still a lot of pointers to new space then we will
454  // keep the scan_on_scavenge flag on the page and discard the pointers that
455  // were added to the store buffer. If there are not many pointers to new
456  // space left on the page we will keep the pointers in the store buffer and
457  // remove the flag from the page.
458  if (some_pages_to_scan) {
459  if (callback_ != NULL) {
461  }
462  PointerChunkIterator it(heap_);
463  MemoryChunk* chunk;
464  while ((chunk = it.next()) != NULL) {
465  if (chunk->scan_on_scavenge()) {
466  chunk->set_scan_on_scavenge(false);
467  if (callback_ != NULL) {
468  (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
469  }
470  if (chunk->owner() == heap_->lo_space()) {
471  LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
472  HeapObject* array = large_page->GetObject();
473  DCHECK(array->IsFixedArray());
474  Address start = array->address();
475  Address end = start + array->Size();
476  FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
477  } else {
478  Page* page = reinterpret_cast<Page*>(chunk);
479  PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
480  if (owner == heap_->map_space()) {
481  DCHECK(page->WasSwept());
482  HeapObjectIterator iterator(page, NULL);
483  for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
484  heap_object = iterator.Next()) {
485  // We skip free space objects.
486  if (!heap_object->IsFiller()) {
487  DCHECK(heap_object->IsMap());
489  heap_object->address() + Map::kPointerFieldsBeginOffset,
490  heap_object->address() + Map::kPointerFieldsEndOffset,
491  slot_callback, clear_maps);
492  }
493  }
494  } else {
495  if (!page->SweepingCompleted()) {
497  if (!page->SweepingCompleted()) {
498  // We were not able to sweep that page, i.e., a concurrent
499  // sweeper thread currently owns this page.
500  // TODO(hpayer): This may introduce a huge pause here. We
501  // just care about finish sweeping of the scan on scavenge page.
503  }
504  }
505  CHECK(page->owner() == heap_->old_pointer_space());
506  HeapObjectIterator iterator(page, NULL);
507  for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
508  heap_object = iterator.Next()) {
509  // We iterate over objects that contain new space pointers only.
510  if (!heap_object->MayContainRawValues()) {
512  heap_object->address() + HeapObject::kHeaderSize,
513  heap_object->address() + heap_object->Size(), slot_callback,
514  clear_maps);
515  }
516  }
517  }
518  }
519  }
520  }
521  if (callback_ != NULL) {
522  (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
523  }
524  }
525 }
526 
527 
529  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
530 
531  if (top == start_) return;
532 
533  // There's no check of the limit in the loop below so we check here for
534  // the worst case (compaction doesn't eliminate any pointers).
535  DCHECK(top <= limit_);
537  EnsureSpace(top - start_);
539  // Goes through the addresses in the store buffer attempting to remove
540  // duplicates. In the interest of speed this is a lossy operation. Some
541  // duplicates will remain. We have two hash sets with different hash
542  // functions to reduce the number of unnecessary clashes.
543  hash_sets_are_empty_ = false; // Hash sets are in use.
544  for (Address* current = start_; current < top; current++) {
545  DCHECK(!heap_->cell_space()->Contains(*current));
546  DCHECK(!heap_->code_space()->Contains(*current));
547  DCHECK(!heap_->old_data_space()->Contains(*current));
548  uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
549  // Shift out the last bits including any tags.
550  int_addr >>= kPointerSizeLog2;
551  // The upper part of an address is basically random because of ASLR and OS
552  // non-determinism, so we use only the bits within a page for hashing to
553  // make v8's behavior (more) deterministic.
554  uintptr_t hash_addr =
556  int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
557  (kHashSetLength - 1));
558  if (hash_set_1_[hash1] == int_addr) continue;
559  uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
560  hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
561  hash2 &= (kHashSetLength - 1);
562  if (hash_set_2_[hash2] == int_addr) continue;
563  if (hash_set_1_[hash1] == 0) {
564  hash_set_1_[hash1] = int_addr;
565  } else if (hash_set_2_[hash2] == 0) {
566  hash_set_2_[hash2] = int_addr;
567  } else {
568  // Rather than slowing down we just throw away some entries. This will
569  // cause some duplicates to remain undetected.
570  hash_set_1_[hash1] = int_addr;
571  hash_set_2_[hash2] = 0;
572  }
573  old_buffer_is_sorted_ = false;
574  old_buffer_is_filtered_ = false;
575  *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
577  }
578  heap_->isolate()->counters()->store_buffer_compactions()->Increment();
579 }
580 }
581 } // namespace v8::internal
static intptr_t CommitPageSize()
bool Commit(void *address, size_t size, bool is_executable)
static const int kHeaderSize
Definition: objects.h:1428
OldSpace * old_pointer_space()
Definition: heap.h:594
OldSpace * code_space()
Definition: heap.h:596
LargeObjectSpace * lo_space()
Definition: heap.h:600
void public_set_store_buffer_top(Address *top)
Definition: heap.h:887
bool InNewSpace(Object *object)
Definition: heap-inl.h:322
CellSpace * cell_space()
Definition: heap.h:598
StoreBuffer * store_buffer()
Definition: heap.h:1201
Isolate * isolate()
Definition: heap-inl.h:589
bool InFromSpace(Object *object)
Definition: heap-inl.h:334
OldSpace * old_data_space()
Definition: heap.h:595
MarkCompactCollector * mark_compact_collector()
Definition: heap.h:1197
MapSpace * map_space()
Definition: heap.h:597
Counters * counters()
Definition: isolate.h:857
HeapObject * GetObject()
Definition: spaces.h:798
static const int kPointerFieldsEndOffset
Definition: objects.h:6207
static const int kPointerFieldsBeginOffset
Definition: objects.h:6206
int SweepInParallel(PagedSpace *space, int required_freed_bytes)
bool IsFlagSet(int flag)
Definition: spaces.h:417
bool Contains(Address addr)
Definition: spaces.h:348
void set_scan_on_scavenge(bool scan)
Definition: spaces-inl.h:157
void set_store_buffer_counter(int counter)
Definition: spaces.h:344
Space * owner() const
Definition: spaces.h:307
static MemoryChunk * FromAnyPointerAddress(Heap *heap, Address addr)
Definition: spaces-inl.h:169
bool WasSwept()
Definition: spaces.h:766
static const int kPageSize
Definition: spaces.h:748
static const intptr_t kPageAlignmentMask
Definition: spaces.h:757
bool Contains(Address a)
Definition: spaces-inl.h:150
static const int kHashSetLength
Definition: store-buffer.h:72
void EnterDirectlyIntoStoreBuffer(Address addr)
void FindPointersToNewSpaceInRegion(Address start, Address end, ObjectSlotCallback slot_callback, bool clear_maps)
static const int kStoreBufferOverflowBit
Definition: store-buffer.h:67
void ExemptPopularPages(int prime_sample_step, int threshold)
void IteratePointersToNewSpace(ObjectSlotCallback callback)
StoreBufferCallback callback_
Definition: store-buffer.h:130
base::VirtualMemory * virtual_memory_
Definition: store-buffer.h:133
void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback, bool clear_maps)
bool SpaceAvailable(intptr_t space_needed)
static void StoreBufferOverflow(Isolate *isolate)
Definition: store-buffer.cc:98
static const int kOldStoreBufferLength
Definition: store-buffer.h:70
void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback)
void EnsureSpace(intptr_t space_needed)
void ClearDeadObject(HeapObject *object)
static const int kStoreBufferSize
Definition: store-buffer.h:68
static const int kHashSetLengthLog2
Definition: store-buffer.h:71
base::VirtualMemory * old_virtual_memory_
Definition: store-buffer.h:121
enable harmony numeric enable harmony object literal extensions true
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define CHECK(condition)
Definition: logging.h:36
#define DCHECK(condition)
Definition: logging.h:205
void USE(T)
Definition: macros.h:322
intptr_t AtomicWord
Definition: atomicops.h:57
Atomic8 NoBarrier_Load(volatile const Atomic8 *ptr)
const int kPointerSize
Definition: globals.h:129
void(* ObjectSlotCallback)(HeapObject **from, HeapObject *to)
Definition: store-buffer.h:20
const int kPointerSizeLog2
Definition: globals.h:147
@ kStoreBufferScanningPageEvent
Definition: globals.h:496
@ kStoreBufferStartScanningPagesEvent
Definition: globals.h:495
const bool FLAG_enable_slow_asserts
Definition: checks.h:31
byte * Address
Definition: globals.h:101
kFeedbackVectorOffset flag
Definition: objects-inl.h:5418
static void RoundUp(Vector< char > buffer, int *length, int *decimal_point)
Definition: fixed-dtoa.cc:171
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20