23 old_reserved_limit_(
NULL),
24 old_buffer_is_sorted_(
false),
25 old_buffer_is_filtered_(
false),
27 store_buffer_rebuilding_enabled_(
false),
29 may_move_store_buffer_entries_(
true),
30 virtual_memory_(
NULL),
33 hash_sets_are_empty_(
true) {}
53 DCHECK(initial_length > 0);
100 isolate->
counters()->store_buffer_overflows()->Increment();
111 if (current != previous) {
143 bool page_has_scan_on_scavenge_flag =
false;
145 PointerChunkIterator it(
heap_);
147 while ((chunk = it.next()) !=
NULL) {
149 page_has_scan_on_scavenge_flag =
true;
154 if (page_has_scan_on_scavenge_flag) {
162 static const int kSampleFinenesses = 5;
163 static const struct Samples {
164 int prime_sample_step;
166 } samples[kSampleFinenesses] = {
172 for (
int i = 0;
i < kSampleFinenesses;
i++) {
185 PointerChunkIterator it(
heap_);
187 while ((chunk = it.next()) !=
NULL) {
190 bool created_new_scan_on_scavenge_pages =
false;
195 if (previous_chunk !=
NULL && previous_chunk->
Contains(addr)) {
196 containing_chunk = previous_chunk;
201 if (old_counter >= threshold) {
203 created_new_scan_on_scavenge_pages =
true;
206 previous_chunk = containing_chunk;
208 if (created_new_scan_on_scavenge_pages) {
221 if (previous_chunk !=
NULL && previous_chunk->
Contains(addr)) {
222 containing_chunk = previous_chunk;
225 previous_chunk = containing_chunk;
255 PointerChunkIterator it(
heap_);
257 bool page_has_scan_on_scavenge_flag =
false;
258 while ((chunk = it.next()) !=
NULL) {
260 page_has_scan_on_scavenge_flag =
true;
265 if (page_has_scan_on_scavenge_flag) {
273 return page_has_scan_on_scavenge_flag;
278 void StoreBuffer::Clean() {
285 static Address* in_store_buffer_1_element_cache =
NULL;
288 bool StoreBuffer::CellIsInStoreBuffer(
Address cell_address) {
290 if (in_store_buffer_1_element_cache !=
NULL &&
291 *in_store_buffer_1_element_cache == cell_address) {
295 for (
Address* current = top - 1; current >=
start_; current--) {
296 if (*current == cell_address) {
297 in_store_buffer_1_element_cache = current;
302 if (*current == cell_address) {
303 in_store_buffer_1_element_cache = current;
332 for (
HeapObject*
object = it.Next();
object !=
NULL;
object = it.Next()) {
333 if (object->IsFixedArray()) {
334 Address slot_address =
object->address();
335 Address end =
object->address() +
object->Size();
337 while (slot_address < end) {
363 if (FLAG_verify_heap) {
373 for (
Address slot_address = start; slot_address < end;
375 Object** slot =
reinterpret_cast<Object**
>(slot_address);
380 DCHECK(heap_object->IsHeapObject());
384 slot_callback(
reinterpret_cast<HeapObject**
>(slot), heap_object);
385 object =
reinterpret_cast<Object*
>(
413 slot_callback(
reinterpret_cast<HeapObject**
>(slot), heap_object);
414 object =
reinterpret_cast<Object*
>(
458 if (some_pages_to_scan) {
462 PointerChunkIterator it(
heap_);
464 while ((chunk = it.next()) !=
NULL) {
473 DCHECK(array->IsFixedArray());
478 Page* page =
reinterpret_cast<Page*
>(chunk);
484 heap_object = iterator.
Next()) {
486 if (!heap_object->IsFiller()) {
487 DCHECK(heap_object->IsMap());
491 slot_callback, clear_maps);
508 heap_object = iterator.
Next()) {
510 if (!heap_object->MayContainRawValues()) {
513 heap_object->address() + heap_object->Size(), slot_callback,
531 if (top ==
start_)
return;
static intptr_t CommitPageSize()
bool Commit(void *address, size_t size, bool is_executable)
static const int kHeaderSize
OldSpace * old_pointer_space()
LargeObjectSpace * lo_space()
void public_set_store_buffer_top(Address *top)
bool InNewSpace(Object *object)
StoreBuffer * store_buffer()
bool InFromSpace(Object *object)
OldSpace * old_data_space()
MarkCompactCollector * mark_compact_collector()
static const int kPointerFieldsEndOffset
static const int kPointerFieldsBeginOffset
int SweepInParallel(PagedSpace *space, int required_freed_bytes)
void EnsureSweepingCompleted()
bool Contains(Address addr)
int store_buffer_counter()
void set_scan_on_scavenge(bool scan)
void set_store_buffer_counter(int counter)
static MemoryChunk * FromAnyPointerAddress(Heap *heap, Address addr)
static const int kPageSize
static const intptr_t kPageAlignmentMask
static const int kHashSetLength
void EnterDirectlyIntoStoreBuffer(Address addr)
void FindPointersToNewSpaceInRegion(Address start, Address end, ObjectSlotCallback slot_callback, bool clear_maps)
Address * old_reserved_limit_
static const int kStoreBufferOverflowBit
void ExemptPopularPages(int prime_sample_step, int threshold)
bool old_buffer_is_sorted_
void IteratePointersToNewSpace(ObjectSlotCallback callback)
StoreBufferCallback callback_
bool may_move_store_buffer_entries_
base::VirtualMemory * virtual_memory_
void ClearFilteringHashSets()
void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback, bool clear_maps)
bool SpaceAvailable(intptr_t space_needed)
static void StoreBufferOverflow(Isolate *isolate)
static const int kOldStoreBufferLength
void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback)
bool PrepareForIteration()
void EnsureSpace(intptr_t space_needed)
void ClearDeadObject(HeapObject *object)
bool old_buffer_is_filtered_
bool hash_sets_are_empty_
static const int kStoreBufferSize
static const int kHashSetLengthLog2
base::VirtualMemory * old_virtual_memory_
enable harmony numeric enable harmony object literal extensions true
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DCHECK(condition)
Atomic8 NoBarrier_Load(volatile const Atomic8 *ptr)
void(* ObjectSlotCallback)(HeapObject **from, HeapObject *to)
const int kPointerSizeLog2
@ kStoreBufferScanningPageEvent
@ kStoreBufferStartScanningPagesEvent
const bool FLAG_enable_slow_asserts
kFeedbackVectorOffset flag
static void RoundUp(Vector< char > buffer, int *length, int *decimal_point)
Debugger support for the V8 JavaScript engine.