V8 Project
code-generator.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
6 
8 #include "src/compiler/linkage.h"
10 
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14 
15 CodeGenerator::CodeGenerator(InstructionSequence* code)
16  : code_(code),
17  current_block_(NULL),
18  current_source_position_(SourcePosition::Invalid()),
19  masm_(code->zone()->isolate(), NULL, 0),
20  resolver_(this),
21  safepoints_(code->zone()),
22  deoptimization_states_(code->zone()),
23  deoptimization_literals_(code->zone()),
24  translations_(code->zone()),
25  last_lazy_deopt_pc_(0) {}
26 
27 
28 Handle<Code> CodeGenerator::GenerateCode() {
29  CompilationInfo* info = linkage()->info();
30 
31  // Emit a code line info recording start event.
32  PositionsRecorder* recorder = masm()->positions_recorder();
33  LOG_CODE_EVENT(isolate(), CodeStartLinePosInfoRecordEvent(recorder));
34 
35  // Place function entry hook if requested to do so.
36  if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
37  ProfileEntryHookStub::MaybeCallEntryHook(masm());
38  }
39 
40  // Architecture-specific, linkage-specific prologue.
41  info->set_prologue_offset(masm()->pc_offset());
42  AssemblePrologue();
43 
44  // Assemble all instructions.
45  for (InstructionSequence::const_iterator i = code()->begin();
46  i != code()->end(); ++i) {
47  AssembleInstruction(*i);
48  }
49 
50  FinishCode(masm());
51 
52  // Ensure there is space for lazy deopt.
53  if (!info->IsStub()) {
54  int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
55  while (masm()->pc_offset() < target_offset) {
56  masm()->nop();
57  }
58  }
59 
60  safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
61 
62  // TODO(titzer): what are the right code flags here?
63  Code::Kind kind = Code::STUB;
64  if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
65  kind = Code::OPTIMIZED_FUNCTION;
66  }
68  masm(), Code::ComputeFlags(kind), info);
69  result->set_is_turbofanned(true);
70  result->set_stack_slots(frame()->GetSpillSlotCount());
71  result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
72 
73  PopulateDeoptimizationData(result);
74 
75  // Emit a code line info recording stop event.
76  void* line_info = recorder->DetachJITHandlerData();
77  LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(*result, line_info));
78 
79  return result;
80 }
81 
82 
83 void CodeGenerator::RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
84  int arguments,
85  Safepoint::DeoptMode deopt_mode) {
86  const ZoneList<InstructionOperand*>* operands =
87  pointers->GetNormalizedOperands();
88  Safepoint safepoint =
89  safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
90  for (int i = 0; i < operands->length(); i++) {
91  InstructionOperand* pointer = operands->at(i);
92  if (pointer->IsStackSlot()) {
93  safepoint.DefinePointerSlot(pointer->index(), zone());
94  } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
95  Register reg = Register::FromAllocationIndex(pointer->index());
96  safepoint.DefinePointerRegister(reg, zone());
97  }
98  }
99 }
100 
101 
102 void CodeGenerator::AssembleInstruction(Instruction* instr) {
103  if (instr->IsBlockStart()) {
104  // Bind a label for a block start and handle parallel moves.
105  BlockStartInstruction* block_start = BlockStartInstruction::cast(instr);
106  current_block_ = block_start->block();
107  if (FLAG_code_comments) {
108  // TODO(titzer): these code comments are a giant memory leak.
109  Vector<char> buffer = Vector<char>::New(32);
110  SNPrintF(buffer, "-- B%d start --", block_start->block()->id());
111  masm()->RecordComment(buffer.start());
112  }
113  masm()->bind(block_start->label());
114  }
115  if (instr->IsGapMoves()) {
116  // Handle parallel moves associated with the gap instruction.
117  AssembleGap(GapInstruction::cast(instr));
118  } else if (instr->IsSourcePosition()) {
119  AssembleSourcePosition(SourcePositionInstruction::cast(instr));
120  } else {
121  // Assemble architecture-specific code for the instruction.
122  AssembleArchInstruction(instr);
123 
124  // Assemble branches or boolean materializations after this instruction.
125  FlagsMode mode = FlagsModeField::decode(instr->opcode());
126  FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
127  switch (mode) {
128  case kFlags_none:
129  return;
130  case kFlags_set:
131  return AssembleArchBoolean(instr, condition);
132  case kFlags_branch:
133  return AssembleArchBranch(instr, condition);
134  }
135  UNREACHABLE();
136  }
137 }
138 
139 
140 void CodeGenerator::AssembleSourcePosition(SourcePositionInstruction* instr) {
141  SourcePosition source_position = instr->source_position();
142  if (source_position == current_source_position_) return;
143  DCHECK(!source_position.IsInvalid());
144  if (!source_position.IsUnknown()) {
145  int code_pos = source_position.raw();
146  masm()->positions_recorder()->RecordPosition(source_position.raw());
147  masm()->positions_recorder()->WriteRecordedPositions();
148  if (FLAG_code_comments) {
149  Vector<char> buffer = Vector<char>::New(256);
150  CompilationInfo* info = linkage()->info();
151  int ln = Script::GetLineNumber(info->script(), code_pos);
152  int cn = Script::GetColumnNumber(info->script(), code_pos);
153  if (info->script()->name()->IsString()) {
154  Handle<String> file(String::cast(info->script()->name()));
155  base::OS::SNPrintF(buffer.start(), buffer.length(), "-- %s:%d:%d --",
156  file->ToCString().get(), ln, cn);
157  } else {
158  base::OS::SNPrintF(buffer.start(), buffer.length(),
159  "-- <unknown>:%d:%d --", ln, cn);
160  }
161  masm()->RecordComment(buffer.start());
162  }
163  }
164  current_source_position_ = source_position;
165 }
166 
167 
168 void CodeGenerator::AssembleGap(GapInstruction* instr) {
169  for (int i = GapInstruction::FIRST_INNER_POSITION;
170  i <= GapInstruction::LAST_INNER_POSITION; i++) {
171  GapInstruction::InnerPosition inner_pos =
172  static_cast<GapInstruction::InnerPosition>(i);
173  ParallelMove* move = instr->GetParallelMove(inner_pos);
174  if (move != NULL) resolver()->Resolve(move);
175  }
176 }
177 
178 
179 void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
180  CompilationInfo* info = linkage()->info();
181  int deopt_count = static_cast<int>(deoptimization_states_.size());
182  if (deopt_count == 0) return;
183  Handle<DeoptimizationInputData> data =
184  DeoptimizationInputData::New(isolate(), deopt_count, TENURED);
185 
186  Handle<ByteArray> translation_array =
187  translations_.CreateByteArray(isolate()->factory());
188 
189  data->SetTranslationByteArray(*translation_array);
190  data->SetInlinedFunctionCount(Smi::FromInt(0));
191  data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
192  // TODO(jarin) The following code was copied over from Lithium, not sure
193  // whether the scope or the IsOptimizing condition are really needed.
194  if (info->IsOptimizing()) {
195  // Reference to shared function info does not change between phases.
196  AllowDeferredHandleDereference allow_handle_dereference;
197  data->SetSharedFunctionInfo(*info->shared_info());
198  } else {
199  data->SetSharedFunctionInfo(Smi::FromInt(0));
200  }
201 
202  Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(
203  static_cast<int>(deoptimization_literals_.size()), TENURED);
204  {
205  AllowDeferredHandleDereference copy_handles;
206  for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
207  literals->set(i, *deoptimization_literals_[i]);
208  }
209  data->SetLiteralArray(*literals);
210  }
211 
212  // No OSR in Turbofan yet...
213  BailoutId osr_ast_id = BailoutId::None();
214  data->SetOsrAstId(Smi::FromInt(osr_ast_id.ToInt()));
215  data->SetOsrPcOffset(Smi::FromInt(-1));
216 
217  // Populate deoptimization entries.
218  for (int i = 0; i < deopt_count; i++) {
219  DeoptimizationState* deoptimization_state = deoptimization_states_[i];
220  data->SetAstId(i, deoptimization_state->bailout_id());
221  CHECK_NE(NULL, deoptimization_states_[i]);
222  data->SetTranslationIndex(
223  i, Smi::FromInt(deoptimization_states_[i]->translation_id()));
224  data->SetArgumentsStackHeight(i, Smi::FromInt(0));
225  data->SetPc(i, Smi::FromInt(deoptimization_state->pc_offset()));
226  }
227 
228  code_object->set_deoptimization_data(*data);
229 }
230 
231 
232 void CodeGenerator::AddSafepointAndDeopt(Instruction* instr) {
233  CallDescriptor::Flags flags(MiscField::decode(instr->opcode()));
234 
235  bool needs_frame_state = (flags & CallDescriptor::kNeedsFrameState);
236 
237  RecordSafepoint(
238  instr->pointer_map(), Safepoint::kSimple, 0,
239  needs_frame_state ? Safepoint::kLazyDeopt : Safepoint::kNoLazyDeopt);
240 
241  if (flags & CallDescriptor::kNeedsNopAfterCall) {
242  AddNopForSmiCodeInlining();
243  }
244 
245  if (needs_frame_state) {
246  MarkLazyDeoptSite();
247  // If the frame state is present, it starts at argument 1
248  // (just after the code address).
249  InstructionOperandConverter converter(this, instr);
250  // Deoptimization info starts at argument 1
251  size_t frame_state_offset = 1;
252  FrameStateDescriptor* descriptor =
253  GetFrameStateDescriptor(instr, frame_state_offset);
254  int pc_offset = masm()->pc_offset();
255  int deopt_state_id = BuildTranslation(instr, pc_offset, frame_state_offset,
256  descriptor->state_combine());
257  // If the pre-call frame state differs from the post-call one, produce the
258  // pre-call frame state, too.
259  // TODO(jarin) We might want to avoid building the pre-call frame state
260  // because it is only used to get locals and arguments (by the debugger and
261  // f.arguments), and those are the same in the pre-call and post-call
262  // states.
263  if (descriptor->state_combine() != kIgnoreOutput) {
264  deopt_state_id =
265  BuildTranslation(instr, -1, frame_state_offset, kIgnoreOutput);
266  }
267 #if DEBUG
268  // Make sure all the values live in stack slots or they are immediates.
269  // (The values should not live in register because registers are clobbered
270  // by calls.)
271  for (size_t i = 0; i < descriptor->size(); i++) {
272  InstructionOperand* op = instr->InputAt(frame_state_offset + 1 + i);
273  CHECK(op->IsStackSlot() || op->IsImmediate());
274  }
275 #endif
276  safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
277  }
278 }
279 
280 
281 int CodeGenerator::DefineDeoptimizationLiteral(Handle<Object> literal) {
282  int result = static_cast<int>(deoptimization_literals_.size());
283  for (unsigned i = 0; i < deoptimization_literals_.size(); ++i) {
284  if (deoptimization_literals_[i].is_identical_to(literal)) return i;
285  }
286  deoptimization_literals_.push_back(literal);
287  return result;
288 }
289 
290 
291 FrameStateDescriptor* CodeGenerator::GetFrameStateDescriptor(
292  Instruction* instr, size_t frame_state_offset) {
293  InstructionOperandConverter i(this, instr);
294  InstructionSequence::StateId state_id = InstructionSequence::StateId::FromInt(
295  i.InputInt32(static_cast<int>(frame_state_offset)));
296  return code()->GetFrameStateDescriptor(state_id);
297 }
298 
299 
300 void CodeGenerator::BuildTranslationForFrameStateDescriptor(
301  FrameStateDescriptor* descriptor, Instruction* instr,
302  Translation* translation, size_t frame_state_offset,
303  OutputFrameStateCombine state_combine) {
304  // Outer-most state must be added to translation first.
305  if (descriptor->outer_state() != NULL) {
306  BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), instr,
307  translation, frame_state_offset,
308  kIgnoreOutput);
309  }
310 
311  int id = Translation::kSelfLiteralId;
312  if (!descriptor->jsfunction().is_null()) {
313  id = DefineDeoptimizationLiteral(
314  Handle<Object>::cast(descriptor->jsfunction().ToHandleChecked()));
315  }
316 
317  switch (descriptor->type()) {
318  case JS_FRAME:
319  translation->BeginJSFrame(
320  descriptor->bailout_id(), id,
321  static_cast<unsigned int>(descriptor->GetHeight(state_combine)));
322  break;
323  case ARGUMENTS_ADAPTOR:
324  translation->BeginArgumentsAdaptorFrame(
325  id, static_cast<unsigned int>(descriptor->parameters_count()));
326  break;
327  }
328 
329  frame_state_offset += descriptor->outer_state()->GetTotalSize();
330  for (size_t i = 0; i < descriptor->size(); i++) {
331  AddTranslationForOperand(
332  translation, instr,
333  instr->InputAt(static_cast<int>(frame_state_offset + i)));
334  }
335 
336  switch (state_combine) {
337  case kPushOutput:
338  DCHECK(instr->OutputCount() == 1);
339  AddTranslationForOperand(translation, instr, instr->OutputAt(0));
340  break;
341  case kIgnoreOutput:
342  break;
343  }
344 }
345 
346 
347 int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
348  size_t frame_state_offset,
349  OutputFrameStateCombine state_combine) {
350  FrameStateDescriptor* descriptor =
351  GetFrameStateDescriptor(instr, frame_state_offset);
352  frame_state_offset++;
353 
354  Translation translation(
355  &translations_, static_cast<int>(descriptor->GetFrameCount()),
356  static_cast<int>(descriptor->GetJSFrameCount()), zone());
357  BuildTranslationForFrameStateDescriptor(descriptor, instr, &translation,
358  frame_state_offset, state_combine);
359 
360  int deoptimization_id = static_cast<int>(deoptimization_states_.size());
361 
362  deoptimization_states_.push_back(new (zone()) DeoptimizationState(
363  descriptor->bailout_id(), translation.index(), pc_offset));
364 
365  return deoptimization_id;
366 }
367 
368 
369 void CodeGenerator::AddTranslationForOperand(Translation* translation,
370  Instruction* instr,
371  InstructionOperand* op) {
372  if (op->IsStackSlot()) {
373  translation->StoreStackSlot(op->index());
374  } else if (op->IsDoubleStackSlot()) {
375  translation->StoreDoubleStackSlot(op->index());
376  } else if (op->IsRegister()) {
377  InstructionOperandConverter converter(this, instr);
378  translation->StoreRegister(converter.ToRegister(op));
379  } else if (op->IsDoubleRegister()) {
380  InstructionOperandConverter converter(this, instr);
381  translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
382  } else if (op->IsImmediate()) {
383  InstructionOperandConverter converter(this, instr);
384  Constant constant = converter.ToConstant(op);
385  Handle<Object> constant_object;
386  switch (constant.type()) {
387  case Constant::kInt32:
388  constant_object =
389  isolate()->factory()->NewNumberFromInt(constant.ToInt32());
390  break;
391  case Constant::kFloat64:
392  constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
393  break;
394  case Constant::kHeapObject:
395  constant_object = constant.ToHeapObject();
396  break;
397  default:
398  UNREACHABLE();
399  }
400  int literal_id = DefineDeoptimizationLiteral(constant_object);
401  translation->StoreLiteral(literal_id);
402  } else {
403  UNREACHABLE();
404  }
405 }
406 
407 
408 void CodeGenerator::MarkLazyDeoptSite() {
409  last_lazy_deopt_pc_ = masm()->pc_offset();
410 }
411 
412 #if !V8_TURBOFAN_BACKEND
413 
414 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
415  UNIMPLEMENTED();
416 }
417 
418 
419 void CodeGenerator::AssembleArchBranch(Instruction* instr,
420  FlagsCondition condition) {
421  UNIMPLEMENTED();
422 }
423 
424 
425 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
426  FlagsCondition condition) {
427  UNIMPLEMENTED();
428 }
429 
430 
431 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
432  UNIMPLEMENTED();
433 }
434 
435 
436 void CodeGenerator::AssemblePrologue() { UNIMPLEMENTED(); }
437 
438 
439 void CodeGenerator::AssembleReturn() { UNIMPLEMENTED(); }
440 
441 
442 void CodeGenerator::AssembleMove(InstructionOperand* source,
443  InstructionOperand* destination) {
444  UNIMPLEMENTED();
445 }
446 
447 
448 void CodeGenerator::AssembleSwap(InstructionOperand* source,
449  InstructionOperand* destination) {
450  UNIMPLEMENTED();
451 }
452 
453 
454 void CodeGenerator::AddNopForSmiCodeInlining() { UNIMPLEMENTED(); }
455 
456 #endif // !V8_TURBOFAN_BACKEND
457 
458 } // namespace compiler
459 } // namespace internal
460 } // namespace v8
static Handle< Code > MakeCodeEpilogue(MacroAssembler *masm, Code::Flags flags, CompilationInfo *info)
Definition: codegen.cc:139
static Vector< char > New(int length)
Definition: vector.h:27
enable harmony numeric literals(0o77, 0b11)") DEFINE_BOOL(harmony_object_literals
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes A file to write the raw context snapshot bytes Write V8 startup blob file(mksnapshot only)") DEFINE_BOOL(profile_hydrogen_code_stub_compilation
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define LOG_CODE_EVENT(isolate, Call)
Definition: log.h:77
#define UNREACHABLE()
Definition: logging.h:30
#define CHECK(condition)
Definition: logging.h:36
#define CHECK_NE(unexpected, value)
Definition: logging.h:173
#define UNIMPLEMENTED()
Definition: logging.h:28
#define DCHECK(condition)
Definition: logging.h:205
static void FinishCode(MacroAssembler *masm)
int SNPrintF(Vector< char > str, const char *format,...)
Definition: utils.cc:105
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:130
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
@ None
Definition: v8.h:2211