V8 Project
profile-generator.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
8 
9 #include "src/compiler.h"
10 #include "src/debug.h"
11 #include "src/global-handles.h"
12 #include "src/sampler.h"
13 #include "src/scopeinfo.h"
14 #include "src/unicode.h"
15 #include "src/zone-inl.h"
16 
17 namespace v8 {
18 namespace internal {
19 
20 
21 bool StringsStorage::StringsMatch(void* key1, void* key2) {
22  return strcmp(reinterpret_cast<char*>(key1),
23  reinterpret_cast<char*>(key2)) == 0;
24 }
25 
26 
28  : hash_seed_(heap->HashSeed()), names_(StringsMatch) {
29 }
30 
31 
33  for (HashMap::Entry* p = names_.Start();
34  p != NULL;
35  p = names_.Next(p)) {
36  DeleteArray(reinterpret_cast<const char*>(p->value));
37  }
38 }
39 
40 
41 const char* StringsStorage::GetCopy(const char* src) {
42  int len = static_cast<int>(strlen(src));
43  HashMap::Entry* entry = GetEntry(src, len);
44  if (entry->value == NULL) {
45  Vector<char> dst = Vector<char>::New(len + 1);
46  StrNCpy(dst, src, len);
47  dst[len] = '\0';
48  entry->key = dst.start();
49  entry->value = entry->key;
50  }
51  return reinterpret_cast<const char*>(entry->value);
52 }
53 
54 
55 const char* StringsStorage::GetFormatted(const char* format, ...) {
56  va_list args;
57  va_start(args, format);
58  const char* result = GetVFormatted(format, args);
59  va_end(args);
60  return result;
61 }
62 
63 
64 const char* StringsStorage::AddOrDisposeString(char* str, int len) {
65  HashMap::Entry* entry = GetEntry(str, len);
66  if (entry->value == NULL) {
67  // New entry added.
68  entry->key = str;
69  entry->value = str;
70  } else {
71  DeleteArray(str);
72  }
73  return reinterpret_cast<const char*>(entry->value);
74 }
75 
76 
77 const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
78  Vector<char> str = Vector<char>::New(1024);
79  int len = VSNPrintF(str, format, args);
80  if (len == -1) {
81  DeleteArray(str.start());
82  return GetCopy(format);
83  }
84  return AddOrDisposeString(str.start(), len);
85 }
86 
87 
89  if (name->IsString()) {
90  String* str = String::cast(name);
91  int length = Min(kMaxNameSize, str->length());
92  int actual_length = 0;
95  &actual_length);
96  return AddOrDisposeString(data.Detach(), actual_length);
97  } else if (name->IsSymbol()) {
98  return "<symbol>";
99  }
100  return "";
101 }
102 
103 
104 const char* StringsStorage::GetName(int index) {
105  return GetFormatted("%d", index);
106 }
107 
108 
110  return GetName(name);
111 }
112 
113 
114 const char* StringsStorage::GetFunctionName(const char* name) {
115  return GetCopy(name);
116 }
117 
118 
120  size_t size = sizeof(*this);
121  size += sizeof(HashMap::Entry) * names_.capacity();
122  for (HashMap::Entry* p = names_.Start(); p != NULL; p = names_.Next(p)) {
123  size += strlen(reinterpret_cast<const char*>(p->value)) + 1;
124  }
125  return size;
126 }
127 
128 
129 HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
131  return names_.Lookup(const_cast<char*>(str), hash, true);
132 }
133 
134 
135 const char* const CodeEntry::kEmptyNamePrefix = "";
136 const char* const CodeEntry::kEmptyResourceName = "";
137 const char* const CodeEntry::kEmptyBailoutReason = "";
138 
139 
141  delete no_frame_ranges_;
142 }
143 
144 
147  if (shared_id_ != 0) {
148  hash ^= ComputeIntegerHash(static_cast<uint32_t>(shared_id_),
150  } else {
151  hash ^= ComputeIntegerHash(
152  static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)),
154  hash ^= ComputeIntegerHash(
155  static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)),
157  hash ^= ComputeIntegerHash(
158  static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)),
161  }
162  return hash;
163 }
164 
165 
166 bool CodeEntry::IsSameAs(CodeEntry* entry) const {
167  return this == entry
168  || (tag_ == entry->tag_
169  && shared_id_ == entry->shared_id_
170  && (shared_id_ != 0
171  || (name_prefix_ == entry->name_prefix_
172  && name_ == entry->name_
173  && resource_name_ == entry->resource_name_
174  && line_number_ == entry->line_number_)));
175 }
176 
177 
179  tag_ = Logger::BUILTIN_TAG;
180  builtin_id_ = id;
181 }
182 
183 
185  HashMap::Entry* map_entry =
187  return map_entry != NULL ?
188  reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
189 }
190 
191 
193  HashMap::Entry* map_entry =
195  if (map_entry->value == NULL) {
196  // New node added.
197  ProfileNode* new_node = new ProfileNode(tree_, entry);
198  map_entry->value = new_node;
199  children_list_.Add(new_node);
200  }
201  return reinterpret_cast<ProfileNode*>(map_entry->value);
202 }
203 
204 
205 void ProfileNode::Print(int indent) {
206  base::OS::Print("%5u %*s %s%s %d #%d %s", self_ticks_, indent, "",
208  id(), entry_->bailout_reason());
209  if (entry_->resource_name()[0] != '\0')
211  base::OS::Print("\n");
212  for (HashMap::Entry* p = children_.Start();
213  p != NULL;
214  p = children_.Next(p)) {
215  reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
216  }
217 }
218 
219 
221  public:
223 
225  delete node;
226  }
227 
229 };
230 
231 
233  : root_entry_(Logger::FUNCTION_TAG, "(root)"),
234  next_node_id_(1),
235  root_(new ProfileNode(this, &root_entry_)) {
236 }
237 
238 
241  TraverseDepthFirst(&cb);
242 }
243 
244 
246  ProfileNode* node = root_;
247  for (CodeEntry** entry = path.start() + path.length() - 1;
248  entry != path.start() - 1;
249  --entry) {
250  if (*entry != NULL) {
251  node = node->FindOrAddChild(*entry);
252  }
253  }
254  node->IncrementSelfTicks();
255  return node;
256 }
257 
258 
260  ProfileNode* node = root_;
261  for (CodeEntry** entry = path.start();
262  entry != path.start() + path.length();
263  ++entry) {
264  if (*entry != NULL) {
265  node = node->FindOrAddChild(*entry);
266  }
267  }
268  node->IncrementSelfTicks();
269 }
270 
271 
272 struct NodesPair {
274  : src(src), dst(dst) { }
277 };
278 
279 
280 class Position {
281  public:
283  : node(node), child_idx_(0) { }
284  INLINE(ProfileNode* current_child()) {
285  return node->children()->at(child_idx_);
286  }
287  INLINE(bool has_current_child()) {
288  return child_idx_ < node->children()->length();
289  }
290  INLINE(void next_child()) { ++child_idx_; }
291 
293  private:
295 };
296 
297 
298 // Non-recursive implementation of a depth-first post-order tree traversal.
299 template <typename Callback>
300 void ProfileTree::TraverseDepthFirst(Callback* callback) {
301  List<Position> stack(10);
302  stack.Add(Position(root_));
303  while (stack.length() > 0) {
304  Position& current = stack.last();
305  if (current.has_current_child()) {
306  callback->BeforeTraversingChild(current.node, current.current_child());
307  stack.Add(Position(current.current_child()));
308  } else {
309  callback->AfterAllChildrenTraversed(current.node);
310  if (stack.length() > 1) {
311  Position& parent = stack[stack.length() - 2];
312  callback->AfterChildTraversed(parent.node, current.node);
313  parent.next_child();
314  }
315  // Remove child from the stack.
316  stack.RemoveLast();
317  }
318  }
319 }
320 
321 
322 CpuProfile::CpuProfile(const char* title, bool record_samples)
323  : title_(title),
324  record_samples_(record_samples),
325  start_time_(base::TimeTicks::HighResolutionNow()) {
326 }
327 
328 
329 void CpuProfile::AddPath(base::TimeTicks timestamp,
330  const Vector<CodeEntry*>& path) {
331  ProfileNode* top_frame_node = top_down_.AddPathFromEnd(path);
332  if (record_samples_) {
333  timestamps_.Add(timestamp);
334  samples_.Add(top_frame_node);
335  }
336 }
337 
338 
340  end_time_ = base::TimeTicks::HighResolutionNow();
341 }
342 
343 
345  base::OS::Print("[Top down]:\n");
346  top_down_.Print();
347 }
348 
349 
352 
353 
354 void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
355  DeleteAllCoveredCode(addr, addr + size);
356  CodeTree::Locator locator;
357  tree_.Insert(addr, &locator);
358  locator.set_value(CodeEntryInfo(entry, size));
359 }
360 
361 
363  List<Address> to_delete;
364  Address addr = end - 1;
365  while (addr >= start) {
366  CodeTree::Locator locator;
367  if (!tree_.FindGreatestLessThan(addr, &locator)) break;
368  Address start2 = locator.key(), end2 = start2 + locator.value().size;
369  if (start2 < end && start < end2) to_delete.Add(start2);
370  addr = start2 - 1;
371  }
372  for (int i = 0; i < to_delete.length(); ++i) tree_.Remove(to_delete[i]);
373 }
374 
375 
377  CodeTree::Locator locator;
378  if (tree_.FindGreatestLessThan(addr, &locator)) {
379  // locator.key() <= addr. Need to check that addr is within entry.
380  const CodeEntryInfo& entry = locator.value();
381  if (addr < (locator.key() + entry.size)) {
382  if (start) {
383  *start = locator.key();
384  }
385  return entry.entry;
386  }
387  }
388  return NULL;
389 }
390 
391 
393  CodeTree::Locator locator;
394  // For shared function entries, 'size' field is used to store their IDs.
395  if (tree_.Find(addr, &locator)) {
396  const CodeEntryInfo& entry = locator.value();
398  return entry.size;
399  } else {
400  tree_.Insert(addr, &locator);
401  int id = next_shared_id_++;
402  locator.set_value(CodeEntryInfo(kSharedFunctionCodeEntry, id));
403  return id;
404  }
405 }
406 
407 
409  if (from == to) return;
410  CodeTree::Locator locator;
411  if (!tree_.Find(from, &locator)) return;
412  CodeEntryInfo entry = locator.value();
413  tree_.Remove(from);
414  AddCode(to, entry.entry, entry.size);
415 }
416 
417 
419  const Address& key, const CodeMap::CodeEntryInfo& value) {
420  // For shared function entries, 'size' field is used to store their IDs.
421  if (value.entry == kSharedFunctionCodeEntry) {
422  base::OS::Print("%p SharedFunctionInfo %d\n", key, value.size);
423  } else {
424  base::OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
425  }
426 }
427 
428 
430  CodeTreePrinter printer;
431  tree_.ForEach(&printer);
432 }
433 
434 
436  : function_and_resource_names_(heap),
437  current_profiles_semaphore_(1) {
438 }
439 
440 
441 static void DeleteCodeEntry(CodeEntry** entry_ptr) {
442  delete *entry_ptr;
443 }
444 
445 
446 static void DeleteCpuProfile(CpuProfile** profile_ptr) {
447  delete *profile_ptr;
448 }
449 
450 
455 }
456 
457 
459  bool record_samples) {
463  return false;
464  }
465  for (int i = 0; i < current_profiles_.length(); ++i) {
466  if (strcmp(current_profiles_[i]->title(), title) == 0) {
467  // Ignore attempts to start profile with the same title...
469  // ... though return true to force it collect a sample.
470  return true;
471  }
472  }
473  current_profiles_.Add(new CpuProfile(title, record_samples));
475  return true;
476 }
477 
478 
480  const int title_len = StrLength(title);
481  CpuProfile* profile = NULL;
483  for (int i = current_profiles_.length() - 1; i >= 0; --i) {
484  if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
485  profile = current_profiles_.Remove(i);
486  break;
487  }
488  }
490 
491  if (profile == NULL) return NULL;
493  finished_profiles_.Add(profile);
494  return profile;
495 }
496 
497 
498 bool CpuProfilesCollection::IsLastProfile(const char* title) {
499  // Called from VM thread, and only it can mutate the list,
500  // so no locking is needed here.
501  if (current_profiles_.length() != 1) return false;
502  return StrLength(title) == 0
503  || strcmp(current_profiles_[0]->title(), title) == 0;
504 }
505 
506 
508  // Called from VM thread for a completed profile.
509  for (int i = 0; i < finished_profiles_.length(); i++) {
510  if (profile == finished_profiles_[i]) {
511  finished_profiles_.Remove(i);
512  return;
513  }
514  }
515  UNREACHABLE();
516 }
517 
518 
520  base::TimeTicks timestamp, const Vector<CodeEntry*>& path) {
521  // As starting / stopping profiles is rare relatively to this
522  // method, we don't bother minimizing the duration of lock holding,
523  // e.g. copying contents of the list to a local vector.
525  for (int i = 0; i < current_profiles_.length(); ++i) {
526  current_profiles_[i]->AddPath(timestamp, path);
527  }
529 }
530 
531 
534  const char* name,
535  const char* name_prefix,
536  const char* resource_name,
537  int line_number,
538  int column_number) {
539  CodeEntry* code_entry = new CodeEntry(tag,
540  name,
541  name_prefix,
542  resource_name,
543  line_number,
544  column_number);
545  code_entries_.Add(code_entry);
546  return code_entry;
547 }
548 
549 
550 const char* const ProfileGenerator::kProgramEntryName =
551  "(program)";
552 const char* const ProfileGenerator::kIdleEntryName =
553  "(idle)";
555  "(garbage collector)";
557  "(unresolved function)";
558 
559 
561  : profiles_(profiles),
562  program_entry_(
563  profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
564  idle_entry_(
565  profiles->NewCodeEntry(Logger::FUNCTION_TAG, kIdleEntryName)),
566  gc_entry_(
567  profiles->NewCodeEntry(Logger::BUILTIN_TAG,
568  kGarbageCollectorEntryName)),
569  unresolved_entry_(
570  profiles->NewCodeEntry(Logger::FUNCTION_TAG,
571  kUnresolvedFunctionName)) {
572 }
573 
574 
576  // Allocate space for stack frames + pc + function + vm-state.
577  ScopedVector<CodeEntry*> entries(sample.frames_count + 3);
578  // As actual number of decoded code entries may vary, initialize
579  // entries vector with NULL values.
580  CodeEntry** entry = entries.start();
581  memset(entry, 0, entries.length() * sizeof(*entry));
582  if (sample.pc != NULL) {
583  if (sample.has_external_callback && sample.state == EXTERNAL &&
584  sample.top_frame_type == StackFrame::EXIT) {
585  // Don't use PC when in external callback code, as it can point
586  // inside callback's code, and we will erroneously report
587  // that a callback calls itself.
588  *entry++ = code_map_.FindEntry(sample.external_callback);
589  } else {
590  Address start;
591  CodeEntry* pc_entry = code_map_.FindEntry(sample.pc, &start);
592  // If pc is in the function code before it set up stack frame or after the
593  // frame was destroyed SafeStackFrameIterator incorrectly thinks that
594  // ebp contains return address of the current function and skips caller's
595  // frame. Check for this case and just skip such samples.
596  if (pc_entry) {
597  List<OffsetRange>* ranges = pc_entry->no_frame_ranges();
598  if (ranges) {
599  Code* code = Code::cast(HeapObject::FromAddress(start));
600  int pc_offset = static_cast<int>(
601  sample.pc - code->instruction_start());
602  for (int i = 0; i < ranges->length(); i++) {
603  OffsetRange& range = ranges->at(i);
604  if (range.from <= pc_offset && pc_offset < range.to) {
605  return;
606  }
607  }
608  }
609  *entry++ = pc_entry;
610 
611  if (pc_entry->builtin_id() == Builtins::kFunctionCall ||
612  pc_entry->builtin_id() == Builtins::kFunctionApply) {
613  // When current function is FunctionCall or FunctionApply builtin the
614  // top frame is either frame of the calling JS function or internal
615  // frame. In the latter case we know the caller for sure but in the
616  // former case we don't so we simply replace the frame with
617  // 'unresolved' entry.
618  if (sample.top_frame_type == StackFrame::JAVA_SCRIPT) {
619  *entry++ = unresolved_entry_;
620  }
621  }
622  }
623  }
624 
625  for (const Address* stack_pos = sample.stack,
626  *stack_end = stack_pos + sample.frames_count;
627  stack_pos != stack_end;
628  ++stack_pos) {
629  *entry++ = code_map_.FindEntry(*stack_pos);
630  }
631  }
632 
633  if (FLAG_prof_browser_mode) {
634  bool no_symbolized_entries = true;
635  for (CodeEntry** e = entries.start(); e != entry; ++e) {
636  if (*e != NULL) {
637  no_symbolized_entries = false;
638  break;
639  }
640  }
641  // If no frames were symbolized, put the VM state entry in.
642  if (no_symbolized_entries) {
643  *entry++ = EntryForVMState(sample.state);
644  }
645  }
646 
647  profiles_->AddPathToCurrentProfiles(sample.timestamp, entries);
648 }
649 
650 
652  switch (tag) {
653  case GC:
654  return gc_entry_;
655  case JS:
656  case COMPILER:
657  // DOM events handlers are reported as OTHER / EXTERNAL entries.
658  // To avoid confusing people, let's put all these entries into
659  // one bucket.
660  case OTHER:
661  case EXTERNAL:
662  return program_entry_;
663  case IDLE:
664  return idle_entry_;
665  default: return NULL;
666  }
667 }
668 
669 } } // namespace v8::internal
static void Print(const char *format,...)
void SetBuiltinId(Builtins::Name id)
Logger::LogEventsAndTags tag_
List< OffsetRange > * no_frame_ranges_
static const char *const kEmptyResourceName
const char * bailout_reason() const
const char * name_prefix() const
const char * resource_name() const
static const char *const kEmptyBailoutReason
static const char *const kEmptyNamePrefix
List< OffsetRange > * no_frame_ranges() const
Builtins::Name builtin_id() const
bool IsSameAs(CodeEntry *entry) const
const char * name() const
void Call(const Address &key, const CodeEntryInfo &value)
void DeleteAllCoveredCode(Address start, Address end)
void AddCode(Address addr, CodeEntry *entry, unsigned size)
static CodeEntry *const kSharedFunctionCodeEntry
int GetSharedId(Address addr)
void MoveCode(Address from, Address to)
CodeEntry * FindEntry(Address addr, Address *start=NULL)
byte * instruction_start()
Definition: objects-inl.h:6176
CpuProfile(const char *title, bool record_samples)
void AddPath(base::TimeTicks timestamp, const Vector< CodeEntry * > &path)
List< ProfileNode * > samples_
List< base::TimeTicks > timestamps_
CodeEntry * NewCodeEntry(Logger::LogEventsAndTags tag, const char *name, const char *name_prefix=CodeEntry::kEmptyNamePrefix, const char *resource_name=CodeEntry::kEmptyResourceName, int line_number=v8::CpuProfileNode::kNoLineNumberInfo, int column_number=v8::CpuProfileNode::kNoColumnNumberInfo)
bool StartProfiling(const char *title, bool record_samples)
void AddPathToCurrentProfiles(base::TimeTicks timestamp, const Vector< CodeEntry * > &path)
void RemoveProfile(CpuProfile *profile)
CpuProfile * StopProfiling(const char *title)
void AfterChildTraversed(ProfileNode *, ProfileNode *)
void AfterAllChildrenTraversed(ProfileNode *node)
void BeforeTraversingChild(ProfileNode *, ProfileNode *)
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1464
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:17
T & at(int i) const
Definition: list.h:69
T & last() const
Definition: list.h:70
INLINE(void next_child())
INLINE(bool has_current_child())
Position(ProfileNode *node)
INLINE(ProfileNode *current_child())
CodeEntry * EntryForVMState(StateTag tag)
void RecordTickSample(const TickSample &sample)
CpuProfilesCollection * profiles_
ProfileGenerator(CpuProfilesCollection *profiles)
static const char *const kProgramEntryName
static const char *const kUnresolvedFunctionName
static const char *const kIdleEntryName
static const char *const kGarbageCollectorEntryName
CodeEntry * entry() const
ProfileNode * FindChild(CodeEntry *entry)
const List< ProfileNode * > * children() const
ProfileNode * FindOrAddChild(CodeEntry *entry)
static uint32_t CodeEntryHash(CodeEntry *entry)
List< ProfileNode * > children_list_
ProfileNode(ProfileTree *tree, CodeEntry *entry)
ProfileNode * AddPathFromEnd(const Vector< CodeEntry * > &path)
void TraverseDepthFirst(Callback *callback)
void AddPathFromStart(const Vector< CodeEntry * > &path)
bool FindGreatestLessThan(const Key &key, Locator *locator)
bool Remove(const Key &key)
void ForEach(Callback *callback)
bool Find(const Key &key, Locator *locator)
bool Insert(const Key &key, Locator *locator)
static uint32_t HashSequentialString(const schar *chars, int length, uint32_t seed)
Definition: objects-inl.h:6563
SmartArrayPointer< char > ToCString(AllowNullsFlag allow_nulls, RobustnessFlag robustness_flag, int offset, int length, int *length_output=0)
Definition: objects.cc:8004
const char * GetFunctionName(Name *name)
const char * GetName(Name *name)
const char * AddOrDisposeString(char *str, int len)
const char * GetFormatted(const char *format,...)
const char * GetCopy(const char *src)
const char * GetVFormatted(const char *format, va_list args)
HashMap::Entry * GetEntry(const char *str, int len)
static bool StringsMatch(void *key1, void *key2)
Entry * Next(Entry *p) const
Definition: hashmap.h:226
uint32_t capacity() const
Definition: hashmap.h:66
Entry * Lookup(void *key, uint32_t hash, bool insert, AllocationPolicy allocator=AllocationPolicy())
Definition: hashmap.h:114
T * start() const
Definition: vector.h:47
int length() const
Definition: vector.h:41
static Vector< T > New(int length)
Definition: vector.h:27
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK(condition)
Definition: logging.h:205
void DeleteArray(T *array)
Definition: allocation.h:68
void StrNCpy(Vector< char > dest, const char *src, size_t n)
Definition: utils.cc:119
static void DeleteCodeEntry(CodeEntry **entry_ptr)
static LifetimePosition Min(LifetimePosition a, LifetimePosition b)
@ DISALLOW_NULLS
Definition: objects.h:8337
static const uint32_t kZeroHashSeed
Definition: utils.h:245
@ ROBUST_STRING_TRAVERSAL
Definition: objects.h:8338
byte * Address
Definition: globals.h:101
int VSNPrintF(Vector< char > str, const char *format, va_list args)
Definition: utils.cc:114
uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed)
Definition: utils.h:249
static void DeleteCpuProfile(CpuProfile **profile_ptr)
int StrLength(const char *string)
Definition: vector.h:147
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
NodesPair(ProfileNode *src, ProfileNode *dst)
Address external_callback
Definition: sampler.h:45
StackFrame::Type top_frame_type
Definition: sampler.h:53
base::TimeTicks timestamp
Definition: sampler.h:50
Address stack[kMaxFramesCount]
Definition: sampler.h:49
Definitions and convenience functions for working with unicode.