V8 Project
serialize.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #include "src/accessors.h"
8 #include "src/api.h"
10 #include "src/bootstrapper.h"
11 #include "src/code-stubs.h"
12 #include "src/deoptimizer.h"
13 #include "src/execution.h"
14 #include "src/global-handles.h"
15 #include "src/ic/ic.h"
16 #include "src/ic/stub-cache.h"
17 #include "src/natives.h"
18 #include "src/objects.h"
19 #include "src/runtime/runtime.h"
20 #include "src/serialize.h"
21 #include "src/snapshot.h"
23 #include "src/v8threads.h"
24 #include "src/version.h"
25 
26 namespace v8 {
27 namespace internal {
28 
29 
30 // -----------------------------------------------------------------------------
31 // Coding of external references.
32 
33 // The encoding of an external reference. The type is in the high word.
34 // The id is in the low word.
36  return static_cast<uint32_t>(type) << 16 | id;
37 }
38 
39 
40 static int* GetInternalPointer(StatsCounter* counter) {
41  // All counters refer to dummy_counter, if deserializing happens without
42  // setting up counters.
43  static int dummy_counter = 0;
44  return counter->Enabled() ? counter->GetInternalPointer() : &dummy_counter;
45 }
46 
47 
49  ExternalReferenceTable* external_reference_table =
50  isolate->external_reference_table();
51  if (external_reference_table == NULL) {
52  external_reference_table = new ExternalReferenceTable(isolate);
53  isolate->set_external_reference_table(external_reference_table);
54  }
55  return external_reference_table;
56 }
57 
58 
60  uint16_t id,
61  const char* name,
62  Isolate* isolate) {
64  switch (type) {
65  case C_BUILTIN: {
66  ExternalReference ref(static_cast<Builtins::CFunctionId>(id), isolate);
67  address = ref.address();
68  break;
69  }
70  case BUILTIN: {
71  ExternalReference ref(static_cast<Builtins::Name>(id), isolate);
72  address = ref.address();
73  break;
74  }
75  case RUNTIME_FUNCTION: {
76  ExternalReference ref(static_cast<Runtime::FunctionId>(id), isolate);
77  address = ref.address();
78  break;
79  }
80  case IC_UTILITY: {
81  ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)),
82  isolate);
83  address = ref.address();
84  break;
85  }
86  default:
87  UNREACHABLE();
88  return;
89  }
90  Add(address, type, id, name);
91 }
92 
93 
95  TypeCode type,
96  uint16_t id,
97  const char* name) {
100  entry.address = address;
101  entry.code = EncodeExternal(type, id);
102  entry.name = name;
103  DCHECK_NE(0, entry.code);
104  // Assert that the code is added in ascending order to rule out duplicates.
105  DCHECK((size() == 0) || (code(size() - 1) < entry.code));
106  refs_.Add(entry);
107  if (id > max_id_[type]) max_id_[type] = id;
108 }
109 
110 
112  for (int type_code = 0; type_code < kTypeCodeCount; type_code++) {
113  max_id_[type_code] = 0;
114  }
115 
116  // Miscellaneous
117  Add(ExternalReference::roots_array_start(isolate).address(),
118  "Heap::roots_array_start()");
119  Add(ExternalReference::address_of_stack_limit(isolate).address(),
120  "StackGuard::address_of_jslimit()");
121  Add(ExternalReference::address_of_real_stack_limit(isolate).address(),
122  "StackGuard::address_of_real_jslimit()");
123  Add(ExternalReference::new_space_start(isolate).address(),
124  "Heap::NewSpaceStart()");
125  Add(ExternalReference::new_space_mask(isolate).address(),
126  "Heap::NewSpaceMask()");
127  Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
128  "Heap::NewSpaceAllocationLimitAddress()");
129  Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
130  "Heap::NewSpaceAllocationTopAddress()");
131  Add(ExternalReference::debug_break(isolate).address(), "Debug::Break()");
132  Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
133  "Debug::step_in_fp_addr()");
134  Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
135  "mod_two_doubles");
136  // Keyed lookup cache.
137  Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
138  "KeyedLookupCache::keys()");
139  Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
140  "KeyedLookupCache::field_offsets()");
141  Add(ExternalReference::handle_scope_next_address(isolate).address(),
142  "HandleScope::next");
143  Add(ExternalReference::handle_scope_limit_address(isolate).address(),
144  "HandleScope::limit");
145  Add(ExternalReference::handle_scope_level_address(isolate).address(),
146  "HandleScope::level");
147  Add(ExternalReference::new_deoptimizer_function(isolate).address(),
148  "Deoptimizer::New()");
149  Add(ExternalReference::compute_output_frames_function(isolate).address(),
150  "Deoptimizer::ComputeOutputFrames()");
151  Add(ExternalReference::address_of_min_int().address(),
152  "LDoubleConstant::min_int");
153  Add(ExternalReference::address_of_one_half().address(),
154  "LDoubleConstant::one_half");
155  Add(ExternalReference::isolate_address(isolate).address(), "isolate");
156  Add(ExternalReference::address_of_negative_infinity().address(),
157  "LDoubleConstant::negative_infinity");
158  Add(ExternalReference::power_double_double_function(isolate).address(),
159  "power_double_double_function");
160  Add(ExternalReference::power_double_int_function(isolate).address(),
161  "power_double_int_function");
162  Add(ExternalReference::math_log_double_function(isolate).address(),
163  "std::log");
164  Add(ExternalReference::store_buffer_top(isolate).address(),
165  "store_buffer_top");
166  Add(ExternalReference::address_of_canonical_non_hole_nan().address(),
167  "canonical_nan");
168  Add(ExternalReference::address_of_the_hole_nan().address(), "the_hole_nan");
169  Add(ExternalReference::get_date_field_function(isolate).address(),
170  "JSDate::GetField");
171  Add(ExternalReference::date_cache_stamp(isolate).address(),
172  "date_cache_stamp");
173  Add(ExternalReference::address_of_pending_message_obj(isolate).address(),
174  "address_of_pending_message_obj");
175  Add(ExternalReference::address_of_has_pending_message(isolate).address(),
176  "address_of_has_pending_message");
177  Add(ExternalReference::address_of_pending_message_script(isolate).address(),
178  "pending_message_script");
179  Add(ExternalReference::get_make_code_young_function(isolate).address(),
180  "Code::MakeCodeYoung");
181  Add(ExternalReference::cpu_features().address(), "cpu_features");
182  Add(ExternalReference(Runtime::kAllocateInNewSpace, isolate).address(),
183  "Runtime::AllocateInNewSpace");
184  Add(ExternalReference(Runtime::kAllocateInTargetSpace, isolate).address(),
185  "Runtime::AllocateInTargetSpace");
186  Add(ExternalReference::old_pointer_space_allocation_top_address(isolate)
187  .address(),
188  "Heap::OldPointerSpaceAllocationTopAddress");
189  Add(ExternalReference::old_pointer_space_allocation_limit_address(isolate)
190  .address(),
191  "Heap::OldPointerSpaceAllocationLimitAddress");
192  Add(ExternalReference::old_data_space_allocation_top_address(isolate)
193  .address(),
194  "Heap::OldDataSpaceAllocationTopAddress");
195  Add(ExternalReference::old_data_space_allocation_limit_address(isolate)
196  .address(),
197  "Heap::OldDataSpaceAllocationLimitAddress");
198  Add(ExternalReference::allocation_sites_list_address(isolate).address(),
199  "Heap::allocation_sites_list_address()");
200  Add(ExternalReference::address_of_uint32_bias().address(), "uint32_bias");
201  Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
202  "Code::MarkCodeAsExecuted");
203  Add(ExternalReference::is_profiling_address(isolate).address(),
204  "CpuProfiler::is_profiling");
205  Add(ExternalReference::scheduled_exception_address(isolate).address(),
206  "Isolate::scheduled_exception");
207  Add(ExternalReference::invoke_function_callback(isolate).address(),
208  "InvokeFunctionCallback");
209  Add(ExternalReference::invoke_accessor_getter_callback(isolate).address(),
210  "InvokeAccessorGetterCallback");
211  Add(ExternalReference::flush_icache_function(isolate).address(),
212  "CpuFeatures::FlushICache");
213  Add(ExternalReference::log_enter_external_function(isolate).address(),
214  "Logger::EnterExternal");
215  Add(ExternalReference::log_leave_external_function(isolate).address(),
216  "Logger::LeaveExternal");
217  Add(ExternalReference::address_of_minus_one_half().address(),
218  "double_constants.minus_one_half");
219  Add(ExternalReference::stress_deopt_count(isolate).address(),
220  "Isolate::stress_deopt_count_address()");
221  Add(ExternalReference::incremental_marking_record_write_function(isolate)
222  .address(),
223  "IncrementalMarking::RecordWriteFromCode");
224 
225  // Debug addresses
226  Add(ExternalReference::debug_after_break_target_address(isolate).address(),
227  "Debug::after_break_target_address()");
228  Add(ExternalReference::debug_restarter_frame_function_pointer_address(isolate)
229  .address(),
230  "Debug::restarter_frame_function_pointer_address()");
231  Add(ExternalReference::debug_is_active_address(isolate).address(),
232  "Debug::is_active_address()");
233 
234 #ifndef V8_INTERPRETED_REGEXP
235  Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
236  "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
237  Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
238  "RegExpMacroAssembler*::CheckStackGuardState()");
239  Add(ExternalReference::re_grow_stack(isolate).address(),
240  "NativeRegExpMacroAssembler::GrowStack()");
241  Add(ExternalReference::re_word_character_map().address(),
242  "NativeRegExpMacroAssembler::word_character_map");
243  Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(),
244  "RegExpStack::limit_address()");
245  Add(ExternalReference::address_of_regexp_stack_memory_address(isolate)
246  .address(),
247  "RegExpStack::memory_address()");
248  Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(),
249  "RegExpStack::memory_size()");
250  Add(ExternalReference::address_of_static_offsets_vector(isolate).address(),
251  "OffsetsVector::static_offsets_vector");
252 #endif // V8_INTERPRETED_REGEXP
253 
254  // The following populates all of the different type of external references
255  // into the ExternalReferenceTable.
256  //
257  // NOTE: This function was originally 100k of code. It has since been
258  // rewritten to be mostly table driven, as the callback macro style tends to
259  // very easily cause code bloat. Please be careful in the future when adding
260  // new references.
261 
262  struct RefTableEntry {
263  TypeCode type;
264  uint16_t id;
265  const char* name;
266  };
267 
268  static const RefTableEntry ref_table[] = {
269  // Builtins
270 #define DEF_ENTRY_C(name, ignored) \
271  { C_BUILTIN, \
272  Builtins::c_##name, \
273  "Builtins::" #name },
274 
276 #undef DEF_ENTRY_C
277 
278 #define DEF_ENTRY_C(name, ignored) \
279  { BUILTIN, \
280  Builtins::k##name, \
281  "Builtins::" #name },
282 #define DEF_ENTRY_A(name, kind, state, extra) DEF_ENTRY_C(name, ignored)
283 
287 #undef DEF_ENTRY_C
288 #undef DEF_ENTRY_A
289 
290  // Runtime functions
291 #define RUNTIME_ENTRY(name, nargs, ressize) \
292  { RUNTIME_FUNCTION, \
293  Runtime::k##name, \
294  "Runtime::" #name },
295 
298 #undef RUNTIME_ENTRY
299 
300 #define INLINE_OPTIMIZED_ENTRY(name, nargs, ressize) \
301  { RUNTIME_FUNCTION, \
302  Runtime::kInlineOptimized##name, \
303  "Runtime::" #name },
304 
306 #undef INLINE_OPTIMIZED_ENTRY
307 
308  // IC utilities
309 #define IC_ENTRY(name) \
310  { IC_UTILITY, \
311  IC::k##name, \
312  "IC::" #name },
313 
315 #undef IC_ENTRY
316  }; // end of ref_table[].
317 
318  for (size_t i = 0; i < arraysize(ref_table); ++i) {
319  AddFromId(ref_table[i].type,
320  ref_table[i].id,
321  ref_table[i].name,
322  isolate);
323  }
324 
325  // Stat counters
326  struct StatsRefTableEntry {
327  StatsCounter* (Counters::*counter)();
328  uint16_t id;
329  const char* name;
330  };
331 
332  const StatsRefTableEntry stats_ref_table[] = {
333 #define COUNTER_ENTRY(name, caption) \
334  { &Counters::name, \
335  Counters::k_##name, \
336  "Counters::" #name },
337 
340 #undef COUNTER_ENTRY
341  }; // end of stats_ref_table[].
342 
343  Counters* counters = isolate->counters();
344  for (size_t i = 0; i < arraysize(stats_ref_table); ++i) {
345  Add(reinterpret_cast<Address>(GetInternalPointer(
346  (counters->*(stats_ref_table[i].counter))())),
348  stats_ref_table[i].id,
349  stats_ref_table[i].name);
350  }
351 
352  // Top addresses
353 
354  const char* AddressNames[] = {
355 #define BUILD_NAME_LITERAL(CamelName, hacker_name) \
356  "Isolate::" #hacker_name "_address",
358  NULL
359 #undef BUILD_NAME_LITERAL
360  };
361 
362  for (uint16_t i = 0; i < Isolate::kIsolateAddressCount; ++i) {
364  TOP_ADDRESS, i, AddressNames[i]);
365  }
366 
367  // Accessors
368 #define ACCESSOR_INFO_DECLARATION(name) \
369  Add(FUNCTION_ADDR(&Accessors::name##Getter), \
370  ACCESSOR, \
371  Accessors::k##name##Getter, \
372  "Accessors::" #name "Getter"); \
373  Add(FUNCTION_ADDR(&Accessors::name##Setter), \
374  ACCESSOR, \
375  Accessors::k##name##Setter, \
376  "Accessors::" #name "Setter");
378 #undef ACCESSOR_INFO_DECLARATION
379 
380  StubCache* stub_cache = isolate->stub_cache();
381 
382  // Stub cache tables
383  Add(stub_cache->key_reference(StubCache::kPrimary).address(),
384  STUB_CACHE_TABLE, 1, "StubCache::primary_->key");
385  Add(stub_cache->value_reference(StubCache::kPrimary).address(),
386  STUB_CACHE_TABLE, 2, "StubCache::primary_->value");
387  Add(stub_cache->map_reference(StubCache::kPrimary).address(),
388  STUB_CACHE_TABLE, 3, "StubCache::primary_->map");
389  Add(stub_cache->key_reference(StubCache::kSecondary).address(),
390  STUB_CACHE_TABLE, 4, "StubCache::secondary_->key");
391  Add(stub_cache->value_reference(StubCache::kSecondary).address(),
392  STUB_CACHE_TABLE, 5, "StubCache::secondary_->value");
393  Add(stub_cache->map_reference(StubCache::kSecondary).address(),
394  STUB_CACHE_TABLE, 6, "StubCache::secondary_->map");
395 
396  // Runtime entries
397  Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
398  RUNTIME_ENTRY, 1, "HandleScope::DeleteExtensions");
399  Add(ExternalReference::incremental_marking_record_write_function(isolate)
400  .address(),
401  RUNTIME_ENTRY, 2, "IncrementalMarking::RecordWrite");
402  Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
403  RUNTIME_ENTRY, 3, "StoreBuffer::StoreBufferOverflow");
404 
405  // Add a small set of deopt entry addresses to encoder without generating the
406  // deopt table code, which isn't possible at deserialization time.
407  HandleScope scope(isolate);
408  for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
410  isolate,
411  entry,
414  Add(address, LAZY_DEOPTIMIZATION, entry, "lazy_deopt");
415  }
416 }
417 
418 
420  : encodings_(HashMap::PointersMatch),
421  isolate_(isolate) {
422  ExternalReferenceTable* external_references =
424  for (int i = 0; i < external_references->size(); ++i) {
425  Put(external_references->address(i), i);
426  }
427 }
428 
429 
431  int index = IndexOf(key);
432  DCHECK(key == NULL || index >= 0);
433  return index >= 0 ?
435 }
436 
437 
439  int index = IndexOf(key);
440  return index >= 0 ? ExternalReferenceTable::instance(isolate_)->name(index)
441  : "<unknown>";
442 }
443 
444 
446  if (key == NULL) return -1;
447  HashMap::Entry* entry =
448  const_cast<HashMap&>(encodings_).Lookup(key, Hash(key), false);
449  return entry == NULL
450  ? -1
451  : static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
452 }
453 
454 
456  HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true);
457  entry->value = reinterpret_cast<void*>(index);
458 }
459 
460 
462  : encodings_(NewArray<Address*>(kTypeCodeCount)),
463  isolate_(isolate) {
464  ExternalReferenceTable* external_references =
466  for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
467  int max = external_references->max_id(type) + 1;
468  encodings_[type] = NewArray<Address>(max + 1);
469  }
470  for (int i = 0; i < external_references->size(); ++i) {
471  Put(external_references->code(i), external_references->address(i));
472  }
473 }
474 
475 
477  for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
478  DeleteArray(encodings_[type]);
479  }
481 }
482 
483 
485  public:
486  explicit CodeAddressMap(Isolate* isolate)
487  : isolate_(isolate) {
488  isolate->logger()->addCodeEventListener(this);
489  }
490 
491  virtual ~CodeAddressMap() {
493  }
494 
495  virtual void CodeMoveEvent(Address from, Address to) {
497  }
498 
499  virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {
500  }
501 
502  virtual void CodeDeleteEvent(Address from) {
504  }
505 
506  const char* Lookup(Address address) {
507  return address_to_name_map_.Lookup(address);
508  }
509 
510  private:
511  class NameMap {
512  public:
513  NameMap() : impl_(HashMap::PointersMatch) {}
514 
516  for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
517  DeleteArray(static_cast<const char*>(p->value));
518  }
519  }
520 
521  void Insert(Address code_address, const char* name, int name_size) {
522  HashMap::Entry* entry = FindOrCreateEntry(code_address);
523  if (entry->value == NULL) {
524  entry->value = CopyName(name, name_size);
525  }
526  }
527 
528  const char* Lookup(Address code_address) {
529  HashMap::Entry* entry = FindEntry(code_address);
530  return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
531  }
532 
533  void Remove(Address code_address) {
534  HashMap::Entry* entry = FindEntry(code_address);
535  if (entry != NULL) {
536  DeleteArray(static_cast<char*>(entry->value));
537  RemoveEntry(entry);
538  }
539  }
540 
541  void Move(Address from, Address to) {
542  if (from == to) return;
543  HashMap::Entry* from_entry = FindEntry(from);
544  DCHECK(from_entry != NULL);
545  void* value = from_entry->value;
546  RemoveEntry(from_entry);
547  HashMap::Entry* to_entry = FindOrCreateEntry(to);
548  DCHECK(to_entry->value == NULL);
549  to_entry->value = value;
550  }
551 
552  private:
553  static char* CopyName(const char* name, int name_size) {
554  char* result = NewArray<char>(name_size + 1);
555  for (int i = 0; i < name_size; ++i) {
556  char c = name[i];
557  if (c == '\0') c = ' ';
558  result[i] = c;
559  }
560  result[name_size] = '\0';
561  return result;
562  }
563 
564  HashMap::Entry* FindOrCreateEntry(Address code_address) {
565  return impl_.Lookup(code_address, ComputePointerHash(code_address), true);
566  }
567 
568  HashMap::Entry* FindEntry(Address code_address) {
569  return impl_.Lookup(code_address,
570  ComputePointerHash(code_address),
571  false);
572  }
573 
574  void RemoveEntry(HashMap::Entry* entry) {
575  impl_.Remove(entry->key, entry->hash);
576  }
577 
579 
581  };
582 
583  virtual void LogRecordedBuffer(Code* code,
585  const char* name,
586  int length) {
587  address_to_name_map_.Insert(code->address(), name, length);
588  }
589 
592 };
593 
594 
595 Deserializer::Deserializer(SnapshotByteSource* source)
596  : isolate_(NULL),
597  attached_objects_(NULL),
598  source_(source),
599  external_reference_decoder_(NULL),
600  deserialized_large_objects_(0) {
601  for (int i = 0; i < kNumberOfSpaces; i++) {
603  }
604 }
605 
606 
608  PageIterator it(isolate_->heap()->code_space());
609  while (it.has_next()) {
610  Page* p = it.next();
612  }
613 }
614 
615 
617  isolate_ = isolate;
618  DCHECK(isolate_ != NULL);
620  // No active threads.
622  // No active handles.
623  DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
626  isolate_->heap()->IterateSmiRoots(this);
630 
632  isolate_->heap()->undefined_value());
634  isolate_->heap()->undefined_value());
635 
636  // The allocation site list is build during root iteration, but if no sites
637  // were encountered then it needs to be initialized to undefined.
640  isolate_->heap()->undefined_value());
641  }
642 
644 
645  // Update data pointers to the external strings containing natives sources.
646  for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
647  Object* source = isolate_->heap()->natives_source_cache()->get(i);
648  if (!source->IsUndefined()) {
649  ExternalOneByteString::cast(source)->update_data_cache();
650  }
651  }
652 
654 
655  // Issue code events for newly deserialized code objects.
656  LOG_CODE_EVENT(isolate_, LogCodeObjects());
657  LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
658 }
659 
660 
662  isolate_ = isolate;
663  for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
665  }
666  Heap* heap = isolate->heap();
670  }
671 
673 
674  // Keep track of the code space start and end pointers in case new
675  // code objects were unserialized
676  OldSpace* code_space = isolate_->heap()->code_space();
677  Address start_address = code_space->top();
678  VisitPointer(root);
679 
680  // There's no code deserialized here. If this assert fires
681  // then that's changed and logging should be added to notify
682  // the profiler et al of the new code.
683  CHECK_EQ(start_address, code_space->top());
684 }
685 
686 
688  // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed.
689  // DCHECK(source_->AtEOF());
693  }
694  if (attached_objects_) attached_objects_->Dispose();
695 }
696 
697 
698 // This is called on the roots. It is the driver of the deserialization
699 // process. It is also called on the body of each function.
701  // The space must be new space. Any other space would cause ReadChunk to try
702  // to update the remembered using NULL as the address.
703  ReadChunk(start, end, NEW_SPACE, NULL);
704 }
705 
706 
709  site->set_weak_next(isolate_->heap()->undefined_value());
710  } else {
711  site->set_weak_next(isolate_->heap()->allocation_sites_list());
712  }
714 }
715 
716 
717 // Used to insert a deserialized internalized string into the string table.
719  public:
720  explicit StringTableInsertionKey(String* string)
721  : string_(string), hash_(HashForObject(string)) {
722  DCHECK(string->IsInternalizedString());
723  }
724 
725  virtual bool IsMatch(Object* string) {
726  // We know that all entries in a hash table had their hash keys created.
727  // Use that knowledge to have fast failure.
728  if (hash_ != HashForObject(string)) return false;
729  // We want to compare the content of two internalized strings here.
730  return string_->SlowEquals(String::cast(string));
731  }
732 
733  virtual uint32_t Hash() OVERRIDE { return hash_; }
734 
736  return String::cast(key)->Hash();
737  }
738 
740  OVERRIDE {
741  return handle(string_, isolate);
742  }
743 
746 };
747 
748 
750  if (obj->IsString()) {
751  String* string = String::cast(obj);
752  // Uninitialize hash field as the hash seed may have changed.
753  string->set_hash_field(String::kEmptyHashField);
754  if (string->IsInternalizedString()) {
756  HandleScope scope(isolate_);
757  StringTableInsertionKey key(string);
758  String* canonical = *StringTable::LookupKey(isolate_, &key);
759  string->SetForwardedInternalizedString(canonical);
760  return canonical;
761  }
762  }
763  return obj;
764 }
765 
766 
768  if (obj->IsInternalizedString()) {
769  return String::cast(obj)->GetForwardedInternalizedString();
770  }
771  return obj;
772 }
773 
774 
775 // This routine writes the new object into the pointer provided and then
776 // returns true if the new object was in young space and false otherwise.
777 // The reason for this strange interface is that otherwise the object is
778 // written very late, which means the FreeSpace map is not set up by the
779 // time we need to use it to mark the space at the end of a page free.
780 void Deserializer::ReadObject(int space_number,
781  Object** write_back) {
782  int size = source_->GetInt() << kObjectAlignmentBits;
783  Address address = Allocate(space_number, size);
784  HeapObject* obj = HeapObject::FromAddress(address);
786  Object** current = reinterpret_cast<Object**>(address);
787  Object** limit = current + (size >> kPointerSizeLog2);
788  if (FLAG_log_snapshot_positions) {
789  LOG(isolate_, SnapshotPositionEvent(address, source_->position()));
790  }
791  ReadChunk(current, limit, space_number, address);
792 
793  // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
794  // as a (weak) root. If this root is relocated correctly,
795  // RelinkAllocationSite() isn't necessary.
796  if (obj->IsAllocationSite()) RelinkAllocationSite(AllocationSite::cast(obj));
797 
798  // Fix up strings from serialized user code.
800 
801  *write_back = obj;
802 #ifdef DEBUG
803  if (obj->IsCode()) {
804  DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
805  } else {
806  DCHECK(space_number != CODE_SPACE);
807  }
808 #endif
809 }
810 
811 
812 // We know the space requirements before deserialization and can
813 // pre-allocate that reserved space. During deserialization, all we need
814 // to do is to bump up the pointer for each space in the reserved
815 // space. This is also used for fixing back references.
816 // Since multiple large objects cannot be folded into one large object
817 // space allocation, we have to do an actual allocation when deserializing
818 // each large object. Instead of tracking offset for back references, we
819 // reference large objects by index.
820 Address Deserializer::Allocate(int space_index, int size) {
821  if (space_index == LO_SPACE) {
823  LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
824  Executability exec = static_cast<Executability>(source_->GetInt());
825  AllocationResult result = lo_space->AllocateRaw(size, exec);
826  HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
828  return obj->address();
829  } else {
830  DCHECK(space_index < kNumberOfPreallocatedSpaces);
831  Address address = high_water_[space_index];
832  high_water_[space_index] = address + size;
833  return address;
834  }
835 }
836 
838  Object** limit,
839  int source_space,
840  Address current_object_address) {
841  Isolate* const isolate = isolate_;
842  // Write barrier support costs around 1% in startup time. In fact there
843  // are no new space objects in current boot snapshots, so it's not needed,
844  // but that may change.
845  bool write_barrier_needed = (current_object_address != NULL &&
846  source_space != NEW_SPACE &&
847  source_space != CELL_SPACE &&
848  source_space != PROPERTY_CELL_SPACE &&
849  source_space != CODE_SPACE &&
850  source_space != OLD_DATA_SPACE);
851  while (current < limit) {
852  int data = source_->Get();
853  switch (data) {
854 #define CASE_STATEMENT(where, how, within, space_number) \
855  case where + how + within + space_number: \
856  STATIC_ASSERT((where & ~kPointedToMask) == 0); \
857  STATIC_ASSERT((how & ~kHowToCodeMask) == 0); \
858  STATIC_ASSERT((within & ~kWhereToPointMask) == 0); \
859  STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
860 
861 #define CASE_BODY(where, how, within, space_number_if_any) \
862  { \
863  bool emit_write_barrier = false; \
864  bool current_was_incremented = false; \
865  int space_number = space_number_if_any == kAnyOldSpace \
866  ? (data & kSpaceMask) \
867  : space_number_if_any; \
868  if (where == kNewObject && how == kPlain && within == kStartOfObject) { \
869  ReadObject(space_number, current); \
870  emit_write_barrier = (space_number == NEW_SPACE); \
871  } else { \
872  Object* new_object = NULL; /* May not be a real Object pointer. */ \
873  if (where == kNewObject) { \
874  ReadObject(space_number, &new_object); \
875  } else if (where == kRootArray) { \
876  int root_id = source_->GetInt(); \
877  new_object = isolate->heap()->roots_array_start()[root_id]; \
878  emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
879  } else if (where == kPartialSnapshotCache) { \
880  int cache_index = source_->GetInt(); \
881  new_object = isolate->serialize_partial_snapshot_cache()[cache_index]; \
882  emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
883  } else if (where == kExternalReference) { \
884  int skip = source_->GetInt(); \
885  current = reinterpret_cast<Object**>( \
886  reinterpret_cast<Address>(current) + skip); \
887  int reference_id = source_->GetInt(); \
888  Address address = external_reference_decoder_->Decode(reference_id); \
889  new_object = reinterpret_cast<Object*>(address); \
890  } else if (where == kBackref) { \
891  emit_write_barrier = (space_number == NEW_SPACE); \
892  new_object = GetAddressFromEnd(data & kSpaceMask); \
893  if (deserializing_user_code()) { \
894  new_object = ProcessBackRefInSerializedCode(new_object); \
895  } \
896  } else if (where == kBuiltin) { \
897  DCHECK(deserializing_user_code()); \
898  int builtin_id = source_->GetInt(); \
899  DCHECK_LE(0, builtin_id); \
900  DCHECK_LT(builtin_id, Builtins::builtin_count); \
901  Builtins::Name name = static_cast<Builtins::Name>(builtin_id); \
902  new_object = isolate->builtins()->builtin(name); \
903  emit_write_barrier = false; \
904  } else if (where == kAttachedReference) { \
905  DCHECK(deserializing_user_code()); \
906  int index = source_->GetInt(); \
907  new_object = *attached_objects_->at(index); \
908  emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
909  } else { \
910  DCHECK(where == kBackrefWithSkip); \
911  int skip = source_->GetInt(); \
912  current = reinterpret_cast<Object**>( \
913  reinterpret_cast<Address>(current) + skip); \
914  emit_write_barrier = (space_number == NEW_SPACE); \
915  new_object = GetAddressFromEnd(data & kSpaceMask); \
916  if (deserializing_user_code()) { \
917  new_object = ProcessBackRefInSerializedCode(new_object); \
918  } \
919  } \
920  if (within == kInnerPointer) { \
921  if (space_number != CODE_SPACE || new_object->IsCode()) { \
922  Code* new_code_object = reinterpret_cast<Code*>(new_object); \
923  new_object = \
924  reinterpret_cast<Object*>(new_code_object->instruction_start()); \
925  } else { \
926  DCHECK(space_number == CODE_SPACE); \
927  Cell* cell = Cell::cast(new_object); \
928  new_object = reinterpret_cast<Object*>(cell->ValueAddress()); \
929  } \
930  } \
931  if (how == kFromCode) { \
932  Address location_of_branch_data = reinterpret_cast<Address>(current); \
933  Assembler::deserialization_set_special_target_at( \
934  location_of_branch_data, \
935  Code::cast(HeapObject::FromAddress(current_object_address)), \
936  reinterpret_cast<Address>(new_object)); \
937  location_of_branch_data += Assembler::kSpecialTargetSize; \
938  current = reinterpret_cast<Object**>(location_of_branch_data); \
939  current_was_incremented = true; \
940  } else { \
941  *current = new_object; \
942  } \
943  } \
944  if (emit_write_barrier && write_barrier_needed) { \
945  Address current_address = reinterpret_cast<Address>(current); \
946  isolate->heap()->RecordWrite( \
947  current_object_address, \
948  static_cast<int>(current_address - current_object_address)); \
949  } \
950  if (!current_was_incremented) { \
951  current++; \
952  } \
953  break; \
954  }
955 
956 // This generates a case and a body for the new space (which has to do extra
957 // write barrier handling) and handles the other spaces with 8 fall-through
958 // cases and one body.
959 #define ALL_SPACES(where, how, within) \
960  CASE_STATEMENT(where, how, within, NEW_SPACE) \
961  CASE_BODY(where, how, within, NEW_SPACE) \
962  CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \
963  CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
964  CASE_STATEMENT(where, how, within, CODE_SPACE) \
965  CASE_STATEMENT(where, how, within, MAP_SPACE) \
966  CASE_STATEMENT(where, how, within, CELL_SPACE) \
967  CASE_STATEMENT(where, how, within, PROPERTY_CELL_SPACE) \
968  CASE_STATEMENT(where, how, within, LO_SPACE) \
969  CASE_BODY(where, how, within, kAnyOldSpace)
970 
971 #define FOUR_CASES(byte_code) \
972  case byte_code: \
973  case byte_code + 1: \
974  case byte_code + 2: \
975  case byte_code + 3:
976 
977 #define SIXTEEN_CASES(byte_code) \
978  FOUR_CASES(byte_code) \
979  FOUR_CASES(byte_code + 4) \
980  FOUR_CASES(byte_code + 8) \
981  FOUR_CASES(byte_code + 12)
982 
983 #define COMMON_RAW_LENGTHS(f) \
984  f(1) \
985  f(2) \
986  f(3) \
987  f(4) \
988  f(5) \
989  f(6) \
990  f(7) \
991  f(8) \
992  f(9) \
993  f(10) \
994  f(11) \
995  f(12) \
996  f(13) \
997  f(14) \
998  f(15) \
999  f(16) \
1000  f(17) \
1001  f(18) \
1002  f(19) \
1003  f(20) \
1004  f(21) \
1005  f(22) \
1006  f(23) \
1007  f(24) \
1008  f(25) \
1009  f(26) \
1010  f(27) \
1011  f(28) \
1012  f(29) \
1013  f(30) \
1014  f(31)
1015 
1016  // We generate 15 cases and bodies that process special tags that combine
1017  // the raw data tag and the length into one byte.
1018 #define RAW_CASE(index) \
1019  case kRawData + index: { \
1020  byte* raw_data_out = reinterpret_cast<byte*>(current); \
1021  source_->CopyRaw(raw_data_out, index * kPointerSize); \
1022  current = \
1023  reinterpret_cast<Object**>(raw_data_out + index * kPointerSize); \
1024  break; \
1025  }
1027 #undef RAW_CASE
1028 
1029  // Deserialize a chunk of raw data that doesn't have one of the popular
1030  // lengths.
1031  case kRawData: {
1032  int size = source_->GetInt();
1033  byte* raw_data_out = reinterpret_cast<byte*>(current);
1034  source_->CopyRaw(raw_data_out, size);
1035  break;
1036  }
1037 
1040  int root_id = RootArrayConstantFromByteCode(data);
1041  Object* object = isolate->heap()->roots_array_start()[root_id];
1042  DCHECK(!isolate->heap()->InNewSpace(object));
1043  *current++ = object;
1044  break;
1045  }
1046 
1049  int root_id = RootArrayConstantFromByteCode(data);
1050  int skip = source_->GetInt();
1051  current = reinterpret_cast<Object**>(
1052  reinterpret_cast<intptr_t>(current) + skip);
1053  Object* object = isolate->heap()->roots_array_start()[root_id];
1054  DCHECK(!isolate->heap()->InNewSpace(object));
1055  *current++ = object;
1056  break;
1057  }
1058 
1059  case kRepeat: {
1060  int repeats = source_->GetInt();
1061  Object* object = current[-1];
1062  DCHECK(!isolate->heap()->InNewSpace(object));
1063  for (int i = 0; i < repeats; i++) current[i] = object;
1064  current += repeats;
1065  break;
1066  }
1067 
1070  STATIC_ASSERT(kMaxRepeats == 13);
1071  case kConstantRepeat:
1075  int repeats = RepeatsForCode(data);
1076  Object* object = current[-1];
1077  DCHECK(!isolate->heap()->InNewSpace(object));
1078  for (int i = 0; i < repeats; i++) current[i] = object;
1079  current += repeats;
1080  break;
1081  }
1082 
1083  // Deserialize a new object and write a pointer to it to the current
1084  // object.
1086  // Support for direct instruction pointers in functions. It's an inner
1087  // pointer because it points at the entry point, not at the start of the
1088  // code object.
1091  // Deserialize a new code object and write a pointer to its first
1092  // instruction to the current code object.
1094  // Find a recently deserialized object using its offset from the current
1095  // allocation point and write a pointer to it to the current object.
1098 #if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
1099  defined(V8_TARGET_ARCH_MIPS64)
1100  // Deserialize a new object from pointer found in code and write
1101  // a pointer to it to the current object. Required only for MIPS or ARM
1102  // with ool constant pool, and omitted on the other architectures because
1103  // it is fully unrolled and would cause bloat.
1105  // Find a recently deserialized code object using its offset from the
1106  // current allocation point and write a pointer to it to the current
1107  // object. Required only for MIPS or ARM with ool constant pool.
1110 #endif
1111  // Find a recently deserialized code object using its offset from the
1112  // current allocation point and write a pointer to its first instruction
1113  // to the current code object or the instruction pointer in a function
1114  // object.
1119  // Find an object in the roots array and write a pointer to it to the
1120  // current object.
1123 #if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
1124  defined(V8_TARGET_ARCH_MIPS64)
1125  // Find an object in the roots array and write a pointer to it to in code.
1128 #endif
1129  // Find an object in the partial snapshots cache and write a pointer to it
1130  // to the current object.
1133  kPlain,
1135  0)
1136  // Find an code entry in the partial snapshots cache and
1137  // write a pointer to it to the current object.
1140  kPlain,
1141  kInnerPointer,
1142  0)
1143  // Find an external reference and write a pointer to it to the current
1144  // object.
1147  kPlain,
1149  0)
1150  // Find an external reference and write a pointer to it in the current
1151  // code object.
1154  kFromCode,
1156  0)
1157  // Find a builtin and write a pointer to it to the current object.
1160 #if V8_OOL_CONSTANT_POOL
1161  // Find a builtin code entry and write a pointer to it to the current
1162  // object.
1165 #endif
1166  // Find a builtin and write a pointer to it in the current code object.
1169  // Find an object in the attached references and write a pointer to it to
1170  // the current object.
1177 
1178 #undef CASE_STATEMENT
1179 #undef CASE_BODY
1180 #undef ALL_SPACES
1181 
1182  case kSkip: {
1183  int size = source_->GetInt();
1184  current = reinterpret_cast<Object**>(
1185  reinterpret_cast<intptr_t>(current) + size);
1186  break;
1187  }
1188 
1189  case kNativesStringResource: {
1190  int index = source_->Get();
1191  Vector<const char> source_vector = Natives::GetRawScriptSource(index);
1192  NativesExternalStringResource* resource =
1193  new NativesExternalStringResource(isolate->bootstrapper(),
1194  source_vector.start(),
1195  source_vector.length());
1196  *current++ = reinterpret_cast<Object*>(resource);
1197  break;
1198  }
1199 
1200  case kSynchronize: {
1201  // If we get here then that indicates that you have a mismatch between
1202  // the number of GC roots when serializing and deserializing.
1203  UNREACHABLE();
1204  }
1205 
1206  default:
1207  UNREACHABLE();
1208  }
1209  }
1210  DCHECK_EQ(limit, current);
1211 }
1212 
1213 
1215  : isolate_(isolate),
1216  sink_(sink),
1217  external_reference_encoder_(new ExternalReferenceEncoder(isolate)),
1218  root_index_wave_front_(0),
1219  code_address_map_(NULL),
1220  seen_large_objects_index_(0) {
1221  // The serializer is meant to be used only to generate initial heap images
1222  // from a context in which there is only one isolate.
1223  for (int i = 0; i < kNumberOfSpaces; i++) fullness_[i] = 0;
1224 }
1225 
1226 
1229  if (code_address_map_ != NULL) delete code_address_map_;
1230 }
1231 
1232 
1234  Isolate* isolate = this->isolate();
1235  // No active threads.
1237  // No active or weak handles.
1238  CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
1241  // We don't support serializing installed extensions.
1243  isolate->heap()->IterateSmiRoots(this);
1245 }
1246 
1247 
1249  this->VisitPointer(object);
1250  Pad();
1251 }
1252 
1253 
1255  Object** roots = isolate()->heap()->roots_array_start();
1256  return current == &roots[Heap::kStoreBufferTopRootIndex]
1257  || current == &roots[Heap::kStackLimitRootIndex]
1258  || current == &roots[Heap::kRealStackLimitRootIndex];
1259 }
1260 
1261 
1263  Isolate* isolate = this->isolate();;
1264 
1265  for (Object** current = start; current < end; current++) {
1266  if (start == isolate->heap()->roots_array_start()) {
1268  Max(root_index_wave_front_, static_cast<intptr_t>(current - start));
1269  }
1270  if (ShouldBeSkipped(current)) {
1271  sink_->Put(kSkip, "Skip");
1272  sink_->PutInt(kPointerSize, "SkipOneWord");
1273  } else if ((*current)->IsSmi()) {
1274  sink_->Put(kRawData + 1, "Smi");
1275  for (int i = 0; i < kPointerSize; i++) {
1276  sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
1277  }
1278  } else {
1279  SerializeObject(*current, kPlain, kStartOfObject, 0);
1280  }
1281  }
1282 }
1283 
1284 
1285 // This ensures that the partial snapshot cache keeps things alive during GC and
1286 // tracks their movement. When it is called during serialization of the startup
1287 // snapshot nothing happens. When the partial (context) snapshot is created,
1288 // this array is populated with the pointers that the partial snapshot will
1289 // need. As that happens we emit serialized objects to the startup snapshot
1290 // that correspond to the elements of this cache array. On deserialization we
1291 // therefore need to visit the cache array. This fills it up with pointers to
1292 // deserialized objects.
1294  ObjectVisitor* visitor) {
1295  if (isolate->serializer_enabled()) return;
1296  for (int i = 0; ; i++) {
1297  if (isolate->serialize_partial_snapshot_cache_length() <= i) {
1298  // Extend the array ready to get a value from the visitor when
1299  // deserializing.
1301  }
1302  Object** cache = isolate->serialize_partial_snapshot_cache();
1303  visitor->VisitPointers(&cache[i], &cache[i + 1]);
1304  // Sentinel is the undefined object, which is a root so it will not normally
1305  // be found in the cache.
1306  if (cache[i] == isolate->heap()->undefined_value()) {
1307  break;
1308  }
1309  }
1310 }
1311 
1312 
1314  Isolate* isolate = this->isolate();
1315 
1316  for (int i = 0;
1317  i < isolate->serialize_partial_snapshot_cache_length();
1318  i++) {
1319  Object* entry = isolate->serialize_partial_snapshot_cache()[i];
1320  if (entry == heap_object) return i;
1321  }
1322 
1323  // We didn't find the object in the cache. So we add it to the cache and
1324  // then visit the pointer so that it becomes part of the startup snapshot
1325  // and we can refer to it from the partial snapshot.
1326  int length = isolate->serialize_partial_snapshot_cache_length();
1327  isolate->PushToPartialSnapshotCache(heap_object);
1328  startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
1329  // We don't recurse from the startup snapshot generator into the partial
1330  // snapshot generator.
1331  DCHECK(length == isolate->serialize_partial_snapshot_cache_length() - 1);
1332  return length;
1333 }
1334 
1335 
1336 int Serializer::RootIndex(HeapObject* heap_object, HowToCode from) {
1337  Heap* heap = isolate()->heap();
1338  if (heap->InNewSpace(heap_object)) return kInvalidRootIndex;
1339  for (int i = 0; i < root_index_wave_front_; i++) {
1340  Object* root = heap->roots_array_start()[i];
1341  if (!root->IsSmi() && root == heap_object) {
1342  return i;
1343  }
1344  }
1345  return kInvalidRootIndex;
1346 }
1347 
1348 
1349 // Encode the location of an already deserialized object in order to write its
1350 // location into a later object. We can encode the location as an offset from
1351 // the start of the deserialized objects or as an offset backwards from the
1352 // current allocation pointer.
1354  HowToCode how_to_code,
1355  WhereToPoint where_to_point,
1356  int skip) {
1357  int space = SpaceOfObject(heap_object);
1358 
1359  if (skip == 0) {
1360  sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer");
1361  } else {
1362  sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
1363  "BackRefSerWithSkip");
1364  sink_->PutInt(skip, "BackRefSkipDistance");
1365  }
1366 
1367  if (space == LO_SPACE) {
1368  int index = address_mapper_.MappedTo(heap_object);
1369  sink_->PutInt(index, "large object index");
1370  } else {
1371  int address = address_mapper_.MappedTo(heap_object);
1372  int offset = CurrentAllocationAddress(space) - address;
1373  // Shift out the bits that are always 0.
1374  offset >>= kObjectAlignmentBits;
1375  sink_->PutInt(offset, "offset");
1376  }
1377 }
1378 
1379 
1381  Object* o,
1382  HowToCode how_to_code,
1383  WhereToPoint where_to_point,
1384  int skip) {
1385  CHECK(o->IsHeapObject());
1386  HeapObject* heap_object = HeapObject::cast(o);
1387  DCHECK(!heap_object->IsJSFunction());
1388 
1389  int root_index;
1390  if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
1391  PutRoot(root_index, heap_object, how_to_code, where_to_point, skip);
1392  return;
1393  }
1394 
1395  if (address_mapper_.IsMapped(heap_object)) {
1396  SerializeReferenceToPreviousObject(heap_object, how_to_code, where_to_point,
1397  skip);
1398  } else {
1399  if (skip != 0) {
1400  sink_->Put(kSkip, "FlushPendingSkip");
1401  sink_->PutInt(skip, "SkipDistance");
1402  }
1403 
1404  // Object has not yet been serialized. Serialize it here.
1405  ObjectSerializer object_serializer(this,
1406  heap_object,
1407  sink_,
1408  how_to_code,
1409  where_to_point);
1410  object_serializer.Serialize();
1411  }
1412 }
1413 
1414 
1416  // This phase comes right after the partial serialization (of the snapshot).
1417  // After we have done the partial serialization the partial snapshot cache
1418  // will contain some references needed to decode the partial snapshot. We
1419  // add one entry with 'undefined' which is the sentinel that the deserializer
1420  // uses to know it is done deserializing the array.
1421  Object* undefined = isolate()->heap()->undefined_value();
1422  VisitPointer(&undefined);
1423  isolate()->heap()->IterateWeakRoots(this, VISIT_ALL);
1424  Pad();
1425 }
1426 
1427 
1428 void Serializer::PutRoot(int root_index,
1429  HeapObject* object,
1431  SerializerDeserializer::WhereToPoint where_to_point,
1432  int skip) {
1433  if (how_to_code == kPlain &&
1434  where_to_point == kStartOfObject &&
1435  root_index < kRootArrayNumberOfConstantEncodings &&
1436  !isolate()->heap()->InNewSpace(object)) {
1437  if (skip == 0) {
1438  sink_->Put(kRootArrayConstants + kNoSkipDistance + root_index,
1439  "RootConstant");
1440  } else {
1442  "RootConstant");
1443  sink_->PutInt(skip, "SkipInPutRoot");
1444  }
1445  } else {
1446  if (skip != 0) {
1447  sink_->Put(kSkip, "SkipFromPutRoot");
1448  sink_->PutInt(skip, "SkipFromPutRootDistance");
1449  }
1450  sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
1451  sink_->PutInt(root_index, "root_index");
1452  }
1453 }
1454 
1455 
1457  Object* o,
1458  HowToCode how_to_code,
1459  WhereToPoint where_to_point,
1460  int skip) {
1461  CHECK(o->IsHeapObject());
1462  HeapObject* heap_object = HeapObject::cast(o);
1463 
1464  if (heap_object->IsMap()) {
1465  // The code-caches link to context-specific code objects, which
1466  // the startup and context serializes cannot currently handle.
1467  DCHECK(Map::cast(heap_object)->code_cache() ==
1468  heap_object->GetHeap()->empty_fixed_array());
1469  }
1470 
1471  int root_index;
1472  if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
1473  PutRoot(root_index, heap_object, how_to_code, where_to_point, skip);
1474  return;
1475  }
1476 
1477  if (ShouldBeInThePartialSnapshotCache(heap_object)) {
1478  if (skip != 0) {
1479  sink_->Put(kSkip, "SkipFromSerializeObject");
1480  sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
1481  }
1482 
1483  int cache_index = PartialSnapshotCacheIndex(heap_object);
1484  sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
1485  "PartialSnapshotCache");
1486  sink_->PutInt(cache_index, "partial_snapshot_cache_index");
1487  return;
1488  }
1489 
1490  // Pointers from the partial snapshot to the objects in the startup snapshot
1491  // should go through the root array or through the partial snapshot cache.
1492  // If this is not the case you may have to add something to the root array.
1494  // All the internalized strings that the partial snapshot needs should be
1495  // either in the root table or in the partial snapshot cache.
1496  DCHECK(!heap_object->IsInternalizedString());
1497 
1498  if (address_mapper_.IsMapped(heap_object)) {
1499  SerializeReferenceToPreviousObject(heap_object, how_to_code, where_to_point,
1500  skip);
1501  } else {
1502  if (skip != 0) {
1503  sink_->Put(kSkip, "SkipFromSerializeObject");
1504  sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
1505  }
1506  // Object has not yet been serialized. Serialize it here.
1507  ObjectSerializer serializer(this,
1508  heap_object,
1509  sink_,
1510  how_to_code,
1511  where_to_point);
1512  serializer.Serialize();
1513  }
1514 }
1515 
1516 
1519  int size = object_->Size();
1520 
1522  "ObjectSerialization");
1523  sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
1524 
1526  const char* code_name =
1529  CodeNameEvent(object_->address(), sink_->Position(), code_name));
1531  SnapshotPositionEvent(object_->address(), sink_->Position()));
1532  }
1533 
1534  // Mark this object as already serialized.
1535  if (space == LO_SPACE) {
1536  if (object_->IsCode()) {
1537  sink_->PutInt(EXECUTABLE, "executable large object");
1538  } else {
1539  sink_->PutInt(NOT_EXECUTABLE, "not executable large object");
1540  }
1541  int index = serializer_->AllocateLargeObject(size);
1543  } else {
1544  int offset = serializer_->Allocate(space, size);
1546  }
1547 
1548  // Serialize the map (first word of the object).
1550 
1551  // Serialize the rest of the object.
1556 }
1557 
1558 
1560  Object** end) {
1561  Object** current = start;
1562  while (current < end) {
1563  while (current < end && (*current)->IsSmi()) current++;
1564  if (current < end) OutputRawData(reinterpret_cast<Address>(current));
1565 
1566  while (current < end && !(*current)->IsSmi()) {
1567  HeapObject* current_contents = HeapObject::cast(*current);
1568  int root_index = serializer_->RootIndex(current_contents, kPlain);
1569  // Repeats are not subject to the write barrier so there are only some
1570  // objects that can be used in a repeat encoding. These are the early
1571  // ones in the root array that are never in new space.
1572  if (current != start &&
1573  root_index != kInvalidRootIndex &&
1574  root_index < kRootArrayNumberOfConstantEncodings &&
1575  current_contents == current[-1]) {
1576  DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
1577  int repeat_count = 1;
1578  while (&current[repeat_count] < end - 1 &&
1579  current[repeat_count] == current_contents) {
1580  repeat_count++;
1581  }
1582  current += repeat_count;
1583  bytes_processed_so_far_ += repeat_count * kPointerSize;
1584  if (repeat_count > kMaxRepeats) {
1585  sink_->Put(kRepeat, "SerializeRepeats");
1586  sink_->PutInt(repeat_count, "SerializeRepeats");
1587  } else {
1588  sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats");
1589  }
1590  } else {
1591  serializer_->SerializeObject(
1592  current_contents, kPlain, kStartOfObject, 0);
1593  bytes_processed_so_far_ += kPointerSize;
1594  current++;
1595  }
1596  }
1597  }
1598 }
1599 
1600 
1602  // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
1603  if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
1604 
1605  int skip = OutputRawData(rinfo->target_address_address(),
1606  kCanReturnSkipInsteadOfSkipping);
1607  HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
1608  Object* object = rinfo->target_object();
1609  serializer_->SerializeObject(object, how_to_code, kStartOfObject, skip);
1610  bytes_processed_so_far_ += rinfo->target_address_size();
1611 }
1612 
1613 
1615  int skip = OutputRawData(reinterpret_cast<Address>(p),
1616  kCanReturnSkipInsteadOfSkipping);
1617  sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
1618  sink_->PutInt(skip, "SkipB4ExternalRef");
1619  Address target = *p;
1620  sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
1621  bytes_processed_so_far_ += kPointerSize;
1622 }
1623 
1624 
1626  int skip = OutputRawData(rinfo->target_address_address(),
1627  kCanReturnSkipInsteadOfSkipping);
1628  HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
1629  sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
1630  sink_->PutInt(skip, "SkipB4ExternalRef");
1631  Address target = rinfo->target_reference();
1632  sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
1633  bytes_processed_so_far_ += rinfo->target_address_size();
1634 }
1635 
1636 
1638  int skip = OutputRawData(rinfo->target_address_address(),
1639  kCanReturnSkipInsteadOfSkipping);
1640  HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
1641  sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
1642  sink_->PutInt(skip, "SkipB4ExternalRef");
1643  Address target = rinfo->target_address();
1644  sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
1645  bytes_processed_so_far_ += rinfo->target_address_size();
1646 }
1647 
1648 
1650  // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
1651  if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
1652 
1653  int skip = OutputRawData(rinfo->target_address_address(),
1654  kCanReturnSkipInsteadOfSkipping);
1655  Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
1656  serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
1657  bytes_processed_so_far_ += rinfo->target_address_size();
1658 }
1659 
1660 
1662  int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
1663  Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
1664  serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
1665  bytes_processed_so_far_ += kPointerSize;
1666 }
1667 
1668 
1670  // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
1671  if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
1672 
1673  int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
1674  Cell* object = Cell::cast(rinfo->target_cell());
1675  serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
1676  bytes_processed_so_far_ += kPointerSize;
1677 }
1678 
1679 
1681  v8::String::ExternalOneByteStringResource** resource_pointer) {
1682  Address references_start = reinterpret_cast<Address>(resource_pointer);
1683  OutputRawData(references_start);
1684  for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
1685  Object* source =
1686  serializer_->isolate()->heap()->natives_source_cache()->get(i);
1687  if (!source->IsUndefined()) {
1688  ExternalOneByteString* string = ExternalOneByteString::cast(source);
1690  const Resource* resource = string->resource();
1691  if (resource == *resource_pointer) {
1692  sink_->Put(kNativesStringResource, "NativesStringResource");
1693  sink_->PutSection(i, "NativesStringResourceEnd");
1694  bytes_processed_so_far_ += sizeof(resource);
1695  return;
1696  }
1697  }
1698  }
1699  // One of the strings in the natives cache should match the resource. We
1700  // can't serialize any other kinds of external strings.
1701  UNREACHABLE();
1702 }
1703 
1704 
1706  Address copy = new byte[code->Size()];
1707  MemCopy(copy, code->address(), code->Size());
1708  return Code::cast(HeapObject::FromAddress(copy));
1709 }
1710 
1711 
1712 static void WipeOutRelocations(Code* code) {
1713  int mode_mask =
1718  for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
1719  if (!(FLAG_enable_ool_constant_pool && it.rinfo()->IsInConstantPool())) {
1720  it.rinfo()->WipeOut();
1721  }
1722  }
1723 }
1724 
1725 
1728  Address object_start = object_->address();
1729  int base = bytes_processed_so_far_;
1730  int up_to_offset = static_cast<int>(up_to - object_start);
1731  int to_skip = up_to_offset - bytes_processed_so_far_;
1732  int bytes_to_output = to_skip;
1733  bytes_processed_so_far_ += to_skip;
1734  // This assert will fail if the reloc info gives us the target_address_address
1735  // locations in a non-ascending order. Luckily that doesn't happen.
1736  DCHECK(to_skip >= 0);
1737  bool outputting_code = false;
1738  if (to_skip != 0 && code_object_ && !code_has_been_output_) {
1739  // Output the code all at once and fix later.
1740  bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
1741  outputting_code = true;
1742  code_has_been_output_ = true;
1743  }
1744  if (bytes_to_output != 0 &&
1745  (!code_object_ || outputting_code)) {
1746 #define RAW_CASE(index) \
1747  if (!outputting_code && bytes_to_output == index * kPointerSize && \
1748  index * kPointerSize == to_skip) { \
1749  sink_->PutSection(kRawData + index, "RawDataFixed"); \
1750  to_skip = 0; /* This insn already skips. */ \
1751  } else /* NOLINT */
1753 #undef RAW_CASE
1754  { /* NOLINT */
1755  // We always end up here if we are outputting the code of a code object.
1756  sink_->Put(kRawData, "RawData");
1757  sink_->PutInt(bytes_to_output, "length");
1758  }
1759 
1760  // To make snapshots reproducible, we need to wipe out all pointers in code.
1761  if (code_object_) {
1762  Code* code = CloneCodeObject(object_);
1763  WipeOutRelocations(code);
1764  // We need to wipe out the header fields *after* wiping out the
1765  // relocations, because some of these fields are needed for the latter.
1766  code->WipeOutHeader();
1767  object_start = code->address();
1768  }
1769 
1770  const char* description = code_object_ ? "Code" : "Byte";
1771  for (int i = 0; i < bytes_to_output; i++) {
1772  sink_->PutSection(object_start[base + i], description);
1773  }
1774  if (code_object_) delete[] object_start;
1775  }
1776  if (to_skip != 0 && return_skip == kIgnoringReturn) {
1777  sink_->Put(kSkip, "Skip");
1778  sink_->PutInt(to_skip, "SkipDistance");
1779  to_skip = 0;
1780  }
1781  return to_skip;
1782 }
1783 
1784 
1786  for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
1787  AllocationSpace s = static_cast<AllocationSpace>(i);
1788  if (object->GetHeap()->InSpace(object, s)) {
1790  return i;
1791  }
1792  }
1793  UNREACHABLE();
1794  return 0;
1795 }
1796 
1797 
1799  fullness_[LO_SPACE] += size;
1800  return seen_large_objects_index_++;
1801 }
1802 
1803 
1806  int allocation_address = fullness_[space];
1807  fullness_[space] = allocation_address + size;
1808  return allocation_address;
1809 }
1810 
1811 
1813  if (space == CODE_SPACE) {
1815  } else {
1817  }
1818 }
1819 
1820 
1822  // The non-branching GetInt will read up to 3 bytes too far, so we need
1823  // to pad the snapshot to make sure we don't read over the end.
1824  for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
1825  sink_->Put(kNop, "Padding");
1826  }
1827 }
1828 
1829 
1833 }
1834 
1835 
1838  Handle<String> source) {
1839  base::ElapsedTimer timer;
1840  if (FLAG_profile_deserialization) timer.Start();
1841 
1842  // Serialize code object.
1843  List<byte> payload;
1844  ListSnapshotSink list_sink(&payload);
1845  DebugSnapshotSink debug_sink(&list_sink);
1846  SnapshotByteSink* sink = FLAG_trace_code_serializer
1847  ? static_cast<SnapshotByteSink*>(&debug_sink)
1848  : static_cast<SnapshotByteSink*>(&list_sink);
1849  CodeSerializer cs(isolate, sink, *source, info->code());
1850  DisallowHeapAllocation no_gc;
1851  Object** location = Handle<Object>::cast(info).location();
1852  cs.VisitPointer(location);
1853  cs.Pad();
1854 
1855  SerializedCodeData data(&payload, &cs);
1856  ScriptData* script_data = data.GetScriptData();
1857 
1858  if (FLAG_profile_deserialization) {
1859  double ms = timer.Elapsed().InMillisecondsF();
1860  int length = script_data->length();
1861  PrintF("[Serializing to %d bytes took %0.3f ms]\n", length, ms);
1862  }
1863 
1864  return script_data;
1865 }
1866 
1867 
1869  WhereToPoint where_to_point, int skip) {
1870  HeapObject* heap_object = HeapObject::cast(o);
1871 
1872  int root_index;
1873  if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
1874  PutRoot(root_index, heap_object, how_to_code, where_to_point, skip);
1875  return;
1876  }
1877 
1878  if (address_mapper_.IsMapped(heap_object)) {
1879  SerializeReferenceToPreviousObject(heap_object, how_to_code, where_to_point,
1880  skip);
1881  return;
1882  }
1883 
1884  if (skip != 0) {
1885  sink_->Put(kSkip, "SkipFromSerializeObject");
1886  sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
1887  }
1888 
1889  if (heap_object->IsCode()) {
1890  Code* code_object = Code::cast(heap_object);
1891  switch (code_object->kind()) {
1892  case Code::OPTIMIZED_FUNCTION: // No optimized code compiled yet.
1893  case Code::HANDLER: // No handlers patched in yet.
1894  case Code::REGEXP: // No regexp literals initialized yet.
1895  case Code::NUMBER_OF_KINDS: // Pseudo enum value.
1896  CHECK(false);
1897  case Code::BUILTIN:
1898  SerializeBuiltin(code_object, how_to_code, where_to_point);
1899  return;
1900  case Code::STUB:
1901  SerializeCodeStub(code_object, how_to_code, where_to_point);
1902  return;
1903 #define IC_KIND_CASE(KIND) case Code::KIND:
1905 #undef IC_KIND_CASE
1906  SerializeHeapObject(code_object, how_to_code, where_to_point);
1907  return;
1908  // TODO(yangguo): add special handling to canonicalize ICs.
1909  case Code::FUNCTION:
1910  // Only serialize the code for the toplevel function. Replace code
1911  // of included function literals by the lazy compile builtin.
1912  // This is safe, as checked in Compiler::BuildFunctionInfo.
1913  if (code_object != main_code_) {
1914  Code* lazy = *isolate()->builtins()->CompileLazy();
1915  SerializeBuiltin(lazy, how_to_code, where_to_point);
1916  } else {
1917  SerializeHeapObject(code_object, how_to_code, where_to_point);
1918  }
1919  return;
1920  }
1921  }
1922 
1923  if (heap_object == source_) {
1924  SerializeSourceObject(how_to_code, where_to_point);
1925  return;
1926  }
1927 
1928  // Past this point we should not see any (context-specific) maps anymore.
1929  CHECK(!heap_object->IsMap());
1930  // There should be no references to the global object embedded.
1931  CHECK(!heap_object->IsJSGlobalProxy() && !heap_object->IsGlobalObject());
1932  // There should be no hash table embedded. They would require rehashing.
1933  CHECK(!heap_object->IsHashTable());
1934 
1935  SerializeHeapObject(heap_object, how_to_code, where_to_point);
1936 }
1937 
1938 
1940  HowToCode how_to_code,
1941  WhereToPoint where_to_point) {
1942  if (heap_object->IsScript()) {
1943  // The wrapper cache uses a Foreign object to point to a global handle.
1944  // However, the object visitor expects foreign objects to point to external
1945  // references. Clear the cache to avoid this issue.
1946  Script::cast(heap_object)->ClearWrapperCache();
1947  }
1948 
1949  if (FLAG_trace_code_serializer) {
1950  PrintF("Encoding heap object: ");
1951  heap_object->ShortPrint();
1952  PrintF("\n");
1953  }
1954 
1955  // Object has not yet been serialized. Serialize it here.
1956  ObjectSerializer serializer(this, heap_object, sink_, how_to_code,
1957  where_to_point);
1958  serializer.Serialize();
1959 }
1960 
1961 
1963  WhereToPoint where_to_point) {
1964  DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
1965  (how_to_code == kPlain && where_to_point == kInnerPointer) ||
1966  (how_to_code == kFromCode && where_to_point == kInnerPointer));
1967  int builtin_index = builtin->builtin_index();
1968  DCHECK_LT(builtin_index, Builtins::builtin_count);
1969  DCHECK_LE(0, builtin_index);
1970 
1971  if (FLAG_trace_code_serializer) {
1972  PrintF("Encoding builtin: %s\n",
1973  isolate()->builtins()->name(builtin_index));
1974  }
1975 
1976  sink_->Put(kBuiltin + how_to_code + where_to_point, "Builtin");
1977  sink_->PutInt(builtin_index, "builtin_index");
1978 }
1979 
1980 
1982  WhereToPoint where_to_point) {
1983  DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
1984  (how_to_code == kPlain && where_to_point == kInnerPointer) ||
1985  (how_to_code == kFromCode && where_to_point == kInnerPointer));
1986  uint32_t stub_key = stub->stub_key();
1987  DCHECK(CodeStub::MajorKeyFromKey(stub_key) != CodeStub::NoCache);
1988 
1989  int index = AddCodeStubKey(stub_key) + kCodeStubsBaseIndex;
1990 
1991  if (FLAG_trace_code_serializer) {
1992  PrintF("Encoding code stub %s as %d\n",
1993  CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key), false),
1994  index);
1995  }
1996 
1997  sink_->Put(kAttachedReference + how_to_code + where_to_point, "CodeStub");
1998  sink_->PutInt(index, "CodeStub key");
1999 }
2000 
2001 
2003  // TODO(yangguo) Maybe we need a hash table for a faster lookup than O(n^2).
2004  int index = 0;
2005  while (index < stub_keys_.length()) {
2006  if (stub_keys_[index] == stub_key) return index;
2007  index++;
2008  }
2009  stub_keys_.Add(stub_key);
2010  return index;
2011 }
2012 
2013 
2015  WhereToPoint where_to_point) {
2016  if (FLAG_trace_code_serializer) PrintF("Encoding source object\n");
2017 
2018  DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
2019  sink_->Put(kAttachedReference + how_to_code + where_to_point, "Source");
2020  sink_->PutInt(kSourceObjectIndex, "kSourceObjectIndex");
2021 }
2022 
2023 
2025  ScriptData* data,
2026  Handle<String> source) {
2027  base::ElapsedTimer timer;
2028  if (FLAG_profile_deserialization) timer.Start();
2029 
2030  Object* root;
2031 
2032  {
2033  HandleScope scope(isolate);
2034 
2035  SerializedCodeData scd(data, *source);
2036  SnapshotByteSource payload(scd.Payload(), scd.PayloadLength());
2037  Deserializer deserializer(&payload);
2038  STATIC_ASSERT(NEW_SPACE == 0);
2039  for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
2040  deserializer.set_reservation(i, scd.GetReservation(i));
2041  }
2042 
2043  // Prepare and register list of attached objects.
2044  Vector<const uint32_t> code_stub_keys = scd.CodeStubKeys();
2045  Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(
2046  code_stub_keys.length() + kCodeStubsBaseIndex);
2047  attached_objects[kSourceObjectIndex] = source;
2048  for (int i = 0; i < code_stub_keys.length(); i++) {
2049  attached_objects[i + kCodeStubsBaseIndex] =
2050  CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked();
2051  }
2052  deserializer.SetAttachedObjects(&attached_objects);
2053 
2054  // Deserialize.
2055  deserializer.DeserializePartial(isolate, &root);
2056  deserializer.FlushICacheForNewCodeObjects();
2057  }
2058 
2059  if (FLAG_profile_deserialization) {
2060  double ms = timer.Elapsed().InMillisecondsF();
2061  int length = data->length();
2062  PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms);
2063  }
2064  return Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root), isolate);
2065 }
2066 
2067 
2069  : owns_script_data_(true) {
2070  DisallowHeapAllocation no_gc;
2071  List<uint32_t>* stub_keys = cs->stub_keys();
2072 
2073  // Calculate sizes.
2074  int num_stub_keys = stub_keys->length();
2075  int stub_keys_size = stub_keys->length() * kInt32Size;
2076  int data_length = kHeaderSize + stub_keys_size + payload->length();
2077 
2078  // Allocate backing store and create result data.
2079  byte* data = NewArray<byte>(data_length);
2080  DCHECK(IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment));
2081  script_data_ = new ScriptData(data, data_length);
2083 
2084  // Set header values.
2086  SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
2087  SetHeaderValue(kPayloadLengthOffset, payload->length());
2088  STATIC_ASSERT(NEW_SPACE == 0);
2089  for (int i = 0; i < SerializerDeserializer::kNumberOfSpaces; i++) {
2090  SetHeaderValue(kReservationsOffset + i, cs->CurrentAllocationAddress(i));
2091  }
2092 
2093  // Copy code stub keys.
2094  CopyBytes(data + kHeaderSize, reinterpret_cast<byte*>(stub_keys->begin()),
2095  stub_keys_size);
2096 
2097  // Copy serialized data.
2098  CopyBytes(data + kHeaderSize + stub_keys_size, payload->begin(),
2099  static_cast<size_t>(payload->length()));
2100 }
2101 
2102 
2104  return GetHeaderValue(kCheckSumOffset) == CheckSum(source) &&
2106 }
2107 
2108 
2110  int checksum = Version::Hash();
2111 #ifdef DEBUG
2112  uint32_t seed = static_cast<uint32_t>(checksum);
2113  checksum = static_cast<int>(IteratingStringHasher::Hash(string, seed));
2114 #endif // DEBUG
2115  return checksum;
2116 }
2117 } } // namespace v8::internal
#define ACCESSOR_INFO_LIST(V)
Definition: accessors.h:16
#define BUILTIN(name)
Definition: builtins.cc:122
#define BUILTIN_LIST_C(V)
Definition: builtins.h:44
#define BUILTIN_LIST_DEBUG_A(V)
Definition: builtins.h:137
#define BUILTIN_LIST_A(V)
Definition: builtins.h:66
An ExternalOneByteStringResource is a wrapper around an one-byte string buffer that resides outside V...
Definition: v8.h:1918
const char * Lookup(Address code_address)
Definition: serialize.cc:528
void Move(Address from, Address to)
Definition: serialize.cc:541
void Insert(Address code_address, const char *name, int name_size)
Definition: serialize.cc:521
static char * CopyName(const char *name, int name_size)
Definition: serialize.cc:553
HashMap::Entry * FindOrCreateEntry(Address code_address)
Definition: serialize.cc:564
void Remove(Address code_address)
Definition: serialize.cc:533
void RemoveEntry(HashMap::Entry *entry)
Definition: serialize.cc:574
HashMap::Entry * FindEntry(Address code_address)
Definition: serialize.cc:568
virtual void CodeDeleteEvent(Address from)
Definition: serialize.cc:502
virtual void CodeDisableOptEvent(Code *code, SharedFunctionInfo *shared)
Definition: serialize.cc:499
virtual void LogRecordedBuffer(Code *code, SharedFunctionInfo *, const char *name, int length)
Definition: serialize.cc:583
const char * Lookup(Address address)
Definition: serialize.cc:506
virtual void CodeMoveEvent(Address from, Address to)
Definition: serialize.cc:495
CodeAddressMap(Isolate *isolate)
Definition: serialize.cc:486
static ScriptData * Serialize(Isolate *isolate, Handle< SharedFunctionInfo > info, Handle< String > source)
Definition: serialize.cc:1836
void SerializeHeapObject(HeapObject *heap_object, HowToCode how_to_code, WhereToPoint where_to_point)
Definition: serialize.cc:1939
void SerializeSourceObject(HowToCode how_to_code, WhereToPoint where_to_point)
Definition: serialize.cc:2014
void SerializeBuiltin(Code *builtin, HowToCode how_to_code, WhereToPoint where_to_point)
Definition: serialize.cc:1962
int AddCodeStubKey(uint32_t stub_key)
Definition: serialize.cc:2002
virtual void SerializeObject(Object *o, HowToCode how_to_code, WhereToPoint where_to_point, int skip)
Definition: serialize.cc:1868
static Handle< SharedFunctionInfo > Deserialize(Isolate *isolate, ScriptData *data, Handle< String > source)
Definition: serialize.cc:2024
void SerializeCodeStub(Code *stub, HowToCode how_to_code, WhereToPoint where_to_point)
Definition: serialize.cc:1981
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:5018
static Object * GetObjectFromEntryAddress(Address location_of_address)
Definition: objects-inl.h:5029
uint32_t stub_key()
Definition: objects-inl.h:6159
static void FlushICache(void *start, size_t size)
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
Definition: deoptimizer.cc:672
ExternalReferenceDecoder * external_reference_decoder_
Definition: serialize.h:319
int reservations_[kNumberOfSpaces]
Definition: serialize.h:316
void ReadChunk(Object **start, Object **end, int space, Address object_address)
Definition: serialize.cc:837
Address Allocate(int space_index, int size)
Definition: serialize.cc:820
Vector< Handle< Object > > * attached_objects_
Definition: serialize.h:309
List< HeapObject * > deserialized_large_objects_
Definition: serialize.h:321
void DeserializePartial(Isolate *isolate, Object **root)
Definition: serialize.cc:661
static const intptr_t kUninitializedReservation
Definition: serialize.h:317
SnapshotByteSource * source_
Definition: serialize.h:311
void Deserialize(Isolate *isolate)
Definition: serialize.cc:616
Deserializer(SnapshotByteSource *source)
Definition: serialize.cc:595
Object * ProcessBackRefInSerializedCode(Object *obj)
Definition: serialize.cc:767
virtual void VisitPointers(Object **start, Object **end)
Definition: serialize.cc:700
void RelinkAllocationSite(AllocationSite *site)
Definition: serialize.cc:707
HeapObject * ProcessNewObjectFromSerializedCode(HeapObject *obj)
Definition: serialize.cc:749
void SetAttachedObjects(Vector< Handle< Object > > *attached_objects)
Definition: serialize.h:264
Address high_water_[kNumberOfPreallocatedSpaces]
Definition: serialize.h:314
void set_reservation(int space_number, int reservation)
Definition: serialize.h:254
void ReadObject(int space_number, Object **write_back)
Definition: serialize.cc:780
ExternalReferenceDecoder(Isolate *isolate)
Definition: serialize.cc:461
void Put(uint32_t key, Address value)
Definition: serialize.h:134
static uint32_t Hash(Address key)
Definition: serialize.h:102
int IndexOf(Address key) const
Definition: serialize.cc:445
void Put(Address key, int index)
Definition: serialize.cc:455
uint32_t Encode(Address key) const
Definition: serialize.cc:430
ExternalReferenceEncoder(Isolate *isolate)
Definition: serialize.cc:419
const char * NameOfAddress(Address key) const
Definition: serialize.cc:438
void AddFromId(TypeCode type, uint16_t id, const char *name, Isolate *isolate)
Definition: serialize.cc:59
List< ExternalReferenceEntry > refs_
Definition: serialize.h:87
static ExternalReferenceTable * instance(Isolate *isolate)
Definition: serialize.cc:48
void Add(Address address, TypeCode type, uint16_t id, const char *name)
Definition: serialize.cc:94
ExternalReferenceTable(Isolate *isolate)
Definition: serialize.h:62
uint16_t max_id_[kTypeCodeCount]
Definition: serialize.h:88
void PopulateTable(Isolate *isolate)
Definition: serialize.cc:111
List< internal::Object ** > * blocks()
Definition: api.h:537
static Handle< T > cast(Handle< S > that)
Definition: handles.h:116
Heap * GetHeap() const
Definition: objects-inl.h:1379
void IterateBody(InstanceType type, int object_size, ObjectVisitor *v)
Definition: objects.cc:1538
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1464
void set_array_buffers_list(Object *object)
Definition: heap.h:795
void ReserveSpace(int *sizes, Address *addresses)
Definition: heap.cc:920
OldSpace * code_space()
Definition: heap.h:596
LargeObjectSpace * lo_space()
Definition: heap.h:600
Object * allocation_sites_list()
Definition: heap.h:801
void OnAllocationEvent(HeapObject *object, int size_in_bytes)
Definition: heap-inl.h:224
void InitializeWeakObjectToCodeTable()
Definition: heap.h:1348
bool InNewSpace(Object *object)
Definition: heap-inl.h:322
void IterateSmiRoots(ObjectVisitor *v)
Definition: heap.cc:4739
Object ** roots_array_start()
Definition: heap.h:896
void set_allocation_sites_list(Object *object)
Definition: heap.h:798
static const int kOldSpaceRoots
Definition: heap.h:1235
void IterateWeakRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:4728
void IterateStrongRoots(ObjectVisitor *v, VisitMode mode)
Definition: heap.cc:4747
void RepairFreeListsAfterBoot()
Definition: heap.cc:484
void set_native_contexts_list(Object *object)
Definition: heap.h:790
bool InSpace(Address addr, AllocationSpace space)
Definition: heap.cc:4464
StubCache * stub_cache()
Definition: isolate.h:875
bool serializer_enabled() const
Definition: isolate.h:1007
bool has_installed_extensions()
Definition: isolate.h:953
HandleScopeImplementer * handle_scope_implementer()
Definition: isolate.h:901
Builtins * builtins()
Definition: isolate.h:947
MemoryAllocator * memory_allocator()
Definition: isolate.h:883
void PushToPartialSnapshotCache(Object *obj)
Definition: isolate.cc:1656
void InitializeLoggingAndCounters()
Definition: isolate.cc:1818
Counters * counters()
Definition: isolate.h:857
Address get_address_from_id(AddressId id)
Definition: isolate.cc:162
ThreadManager * thread_manager()
Definition: isolate.h:921
Logger * logger()
Definition: isolate.h:866
EternalHandles * eternal_handles()
Definition: isolate.h:919
GlobalHandles * global_handles()
Definition: isolate.h:917
Bootstrapper * bootstrapper()
Definition: isolate.h:856
static uint32_t Hash(String *string, uint32_t seed)
Definition: objects-inl.h:6572
MUST_USE_RESULT AllocationResult AllocateRaw(int object_size, Executability executable)
Definition: spaces.cc:2834
iterator begin() const
Definition: list.h:74
void removeCodeEventListener(CodeEventListener *listener)
Definition: log.cc:792
void addCodeEventListener(CodeEventListener *listener)
Definition: log.cc:786
InstanceType instance_type()
Definition: objects-inl.h:4323
static int CodePageAreaSize()
Definition: spaces.h:1103
static const int kObjectStartOffset
Definition: spaces.h:550
static const int kEmptyHashField
Definition: objects.h:8534
static Vector< const char > GetRawScriptSource(int index)
void ShortPrint(FILE *out=stdout)
Definition: objects.cc:905
static const int kPageSize
Definition: spaces.h:748
int PartialSnapshotCacheIndex(HeapObject *o)
Definition: serialize.cc:1313
virtual void SerializeObject(Object *o, HowToCode how_to_code, WhereToPoint where_to_point, int skip)
Definition: serialize.cc:1456
bool ShouldBeInThePartialSnapshotCache(HeapObject *o)
Definition: serialize.h:527
static int ModeMask(Mode mode)
Definition: assembler.h:445
static const int kCodeTargetMask
Definition: assembler.h:587
byte * pc() const
Definition: assembler.h:457
int length() const
Definition: compiler.h:41
void AddMapping(HeapObject *obj, int to)
Definition: serialize.h:349
SerializedCodeData(ScriptData *data, String *source)
Definition: serialize.h:631
static const int kPayloadLengthOffset
Definition: serialize.h:696
void SetHeaderValue(int offset, int value)
Definition: serialize.h:676
static const int kReservationsOffset
Definition: serialize.h:697
int GetHeaderValue(int offset) const
Definition: serialize.h:681
static const int kCheckSumOffset
Definition: serialize.h:694
Vector< const uint32_t > CodeStubKeys() const
Definition: serialize.h:653
const byte * Payload() const
Definition: serialize.h:659
int CheckSum(String *source)
Definition: serialize.cc:2109
int GetReservation(int space) const
Definition: serialize.h:671
static const int kNumCodeStubKeysOffset
Definition: serialize.h:695
static const int kHeaderSize
Definition: serialize.h:701
bool IsSane(String *source)
Definition: serialize.cc:2103
STATIC_ASSERT(kNumberOfSpaces<=kSpaceMask+1)
static int RootArrayConstantFromByteCode(int byte_code)
Definition: serialize.h:228
static int RepeatsForCode(int byte_code)
Definition: serialize.h:221
static const int kNativesStringResource
Definition: serialize.h:212
static const int kRootArrayNumberOfConstantEncodings
Definition: serialize.h:227
static void Iterate(Isolate *isolate, ObjectVisitor *visitor)
Definition: serialize.cc:1293
static int CodeForRepeats(int repeats)
Definition: serialize.h:217
static const int kRootArrayConstants
Definition: serialize.h:225
static const int kNumberOfPreallocatedSpaces
Definition: serialize.h:152
int OutputRawData(Address up_to, ReturnSkip return_skip=kIgnoringReturn)
Definition: serialize.cc:1726
void VisitEmbeddedPointer(RelocInfo *target)
Definition: serialize.cc:1601
void VisitPointers(Object **start, Object **end)
Definition: serialize.cc:1559
void VisitRuntimeEntry(RelocInfo *reloc)
Definition: serialize.cc:1637
void VisitCodeEntry(Address entry_address)
Definition: serialize.cc:1661
void VisitExternalOneByteString(v8::String::ExternalOneByteStringResource **resource)
Definition: serialize.cc:1680
void VisitCodeTarget(RelocInfo *target)
Definition: serialize.cc:1649
void SerializeReferenceToPreviousObject(HeapObject *heap_object, HowToCode how_to_code, WhereToPoint where_to_point, int skip)
Definition: serialize.cc:1353
ExternalReferenceEncoder * external_reference_encoder_
Definition: serialize.h:486
virtual void SerializeObject(Object *o, HowToCode how_to_code, WhereToPoint where_to_point, int skip)=0
int fullness_[kNumberOfSpaces]
Definition: serialize.h:484
int RootIndex(HeapObject *heap_object, HowToCode from)
Definition: serialize.cc:1336
SerializationAddressMapper * address_mapper()
Definition: serialize.h:392
intptr_t root_index_wave_front_
Definition: serialize.h:489
int Allocate(int space, int size)
Definition: serialize.cc:1804
SnapshotByteSink * sink_
Definition: serialize.h:485
void VisitPointers(Object **start, Object **end)
Definition: serialize.cc:1262
static const int kInvalidRootIndex
Definition: serialize.h:400
CodeAddressMap * code_address_map_
Definition: serialize.h:500
int AllocateLargeObject(int size)
Definition: serialize.cc:1798
static int SpaceOfObject(HeapObject *object)
Definition: serialize.cc:1785
int CurrentAllocationAddress(int space) const
Definition: serialize.h:385
Serializer(Isolate *isolate, SnapshotByteSink *sink)
Definition: serialize.cc:1214
Isolate * isolate() const
Definition: serialize.h:390
SerializationAddressMapper address_mapper_
Definition: serialize.h:488
int SpaceAreaSize(int space)
Definition: serialize.cc:1812
bool ShouldBeSkipped(Object **current)
Definition: serialize.cc:1254
void PutRoot(int index, HeapObject *object, HowToCode how, WhereToPoint where, int skip)
Definition: serialize.cc:1428
static Smi * FromInt(int value)
Definition: objects-inl.h:1321
Sink to write snapshot files to.
virtual void Put(byte b, const char *description)=0
virtual void PutSection(int b, const char *description)
void PutInt(uintptr_t integer, const char *description)
virtual void SerializeStrongReferences()
Definition: serialize.cc:1233
virtual void SerializeObject(Object *o, HowToCode how_to_code, WhereToPoint where_to_point, int skip)
Definition: serialize.cc:1380
virtual uint32_t HashForObject(Object *key) OVERRIDE
Definition: serialize.cc:735
virtual uint32_t Hash() OVERRIDE
Definition: serialize.cc:733
virtual bool IsMatch(Object *string)
Definition: serialize.cc:725
virtual MUST_USE_RESULT Handle< Object > AsHandle(Isolate *isolate) OVERRIDE
Definition: serialize.cc:739
static Handle< String > LookupKey(Isolate *isolate, HashTableKey *key)
Definition: objects.cc:14713
bool SlowEquals(String *other)
Definition: objects.cc:8671
Entry * Next(Entry *p) const
Definition: hashmap.h:226
Entry * Lookup(void *key, uint32_t hash, bool insert, AllocationPolicy allocator=AllocationPolicy())
Definition: hashmap.h:114
void * Remove(void *key, uint32_t hash)
Definition: hashmap.h:145
ThreadState * FirstThreadStateInUse()
Definition: v8threads.cc:231
T * start() const
Definition: vector.h:47
int length() const
Definition: vector.h:41
static int Hash()
Definition: version.h:19
#define OVERRIDE
#define STATS_COUNTER_LIST_1(SC)
Definition: counters.h:380
#define STATS_COUNTER_LIST_2(SC)
Definition: counters.h:433
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object size
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in name
enable harmony numeric enable harmony object literal extensions true
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi space(in MBytes)
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define IC_UTIL_LIST(ICU)
Definition: ic.h:17
#define FOR_EACH_ISOLATE_ADDRESS_NAME(C)
Definition: isolate.h:169
#define LOG(isolate, Call)
Definition: log.h:69
#define LOG_CODE_EVENT(isolate, Call)
Definition: log.h:77
#define UNREACHABLE()
Definition: logging.h:30
#define CHECK_EQ(expected, value)
Definition: logging.h:169
#define DCHECK_LE(v1, v2)
Definition: logging.h:210
#define CHECK(condition)
Definition: logging.h:36
#define DCHECK_NE(v1, v2)
Definition: logging.h:207
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_LT(v1, v2)
Definition: logging.h:209
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
#define arraysize(array)
Definition: macros.h:86
#define MUST_USE_RESULT
Definition: macros.h:266
unsigned short uint16_t
Definition: unicode.cc:23
int int32_t
Definition: unicode.cc:24
void DeleteArray(T *array)
Definition: allocation.h:68
const int kPointerSize
Definition: globals.h:129
T * NewArray(size_t size)
Definition: allocation.h:60
@ NOT_EXECUTABLE
Definition: globals.h:391
const int kTypeCodeCount
Definition: serialize.h:33
@ VISIT_ONLY_STRONG
Definition: globals.h:397
static Code * CloneCodeObject(HeapObject *code)
Definition: serialize.cc:1705
static uint32_t EncodeExternal(TypeCode type, uint16_t id)
Definition: serialize.cc:35
const int kDeoptTableSerializeEntryCount
Definition: serialize.h:40
const int kFirstTypeCode
Definition: serialize.h:34
const int kPointerSizeLog2
Definition: globals.h:147
static int * GetInternalPointer(StatsCounter *counter)
Definition: serialize.cc:40
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:146
uint32_t ComputePointerHash(void *ptr)
Definition: utils.h:274
const int kInt32Size
Definition: globals.h:125
static LifetimePosition Max(LifetimePosition a, LifetimePosition b)
byte * Address
Definition: globals.h:101
void PrintF(const char *format,...)
Definition: utils.cc:80
@ OLD_DATA_SPACE
Definition: globals.h:361
@ PROPERTY_CELL_SPACE
Definition: globals.h:365
const int kObjectAlignmentBits
Definition: globals.h:225
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static void WipeOutRelocations(Code *code)
Definition: serialize.cc:1712
bool IsAligned(T value, U alignment)
Definition: utils.h:123
void MemCopy(void *dest, const void *src, size_t size)
Definition: utils.h:350
const intptr_t kPointerAlignment
Definition: globals.h:230
void CopyBytes(uint8_t *target, uint8_t *source)
@ RUNTIME_FUNCTION
Definition: serialize.h:23
@ LAZY_DEOPTIMIZATION
Definition: serialize.h:30
@ STUB_CACHE_TABLE
Definition: serialize.h:28
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
#define IC_KIND_LIST(V)
Definition: objects.h:4939
#define INLINE_OPTIMIZED_FUNCTION_LIST(F)
Definition: runtime.h:708
#define RUNTIME_FUNCTION_LIST(F)
Definition: runtime.h:651
#define BUILD_NAME_LITERAL(CamelName, hacker_name)
#define COUNTER_ENTRY(name, caption)
#define ALL_SPACES(where, how, within)
#define IC_ENTRY(name)
#define INLINE_OPTIMIZED_ENTRY(name, nargs, ressize)
#define DEF_ENTRY_C(name, ignored)
#define CASE_STATEMENT(where, how, within, space_number)
#define COMMON_RAW_LENGTHS(f)
#define CASE_BODY(where, how, within, space_number_if_any)
#define IC_KIND_CASE(KIND)
#define DEF_ENTRY_A(name, kind, state, extra)
#define FOUR_CASES(byte_code)
#define ACCESSOR_INFO_DECLARATION(name)
#define RAW_CASE(index)
#define SIXTEEN_CASES(byte_code)