15 CodeGenerator::CodeGenerator(InstructionSequence* code)
18 current_source_position_(SourcePosition::Invalid()),
19 masm_(code->zone()->isolate(),
NULL, 0),
21 safepoints_(code->zone()),
22 deoptimization_states_(code->zone()),
23 deoptimization_literals_(code->zone()),
24 translations_(code->zone()),
25 last_lazy_deopt_pc_(0) {}
28 Handle<Code> CodeGenerator::GenerateCode() {
29 CompilationInfo* info = linkage()->info();
32 PositionsRecorder* recorder = masm()->positions_recorder();
33 LOG_CODE_EVENT(isolate(), CodeStartLinePosInfoRecordEvent(recorder));
36 if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
37 ProfileEntryHookStub::MaybeCallEntryHook(masm());
41 info->set_prologue_offset(masm()->pc_offset());
45 for (InstructionSequence::const_iterator
i = code()->begin();
46 i != code()->end(); ++
i) {
47 AssembleInstruction(*
i);
53 if (!info->IsStub()) {
54 int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
55 while (masm()->pc_offset() < target_offset) {
60 safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
64 if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
65 kind = Code::OPTIMIZED_FUNCTION;
68 masm(), Code::ComputeFlags(kind), info);
69 result->set_is_turbofanned(
true);
70 result->set_stack_slots(frame()->GetSpillSlotCount());
71 result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
73 PopulateDeoptimizationData(result);
76 void* line_info = recorder->DetachJITHandlerData();
77 LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(*result, line_info));
83 void CodeGenerator::RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
85 Safepoint::DeoptMode deopt_mode) {
86 const ZoneList<InstructionOperand*>* operands =
87 pointers->GetNormalizedOperands();
89 safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
90 for (
int i = 0;
i < operands->length();
i++) {
91 InstructionOperand* pointer = operands->at(
i);
92 if (pointer->IsStackSlot()) {
93 safepoint.DefinePointerSlot(pointer->index(), zone());
94 }
else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
95 Register reg = Register::FromAllocationIndex(pointer->index());
96 safepoint.DefinePointerRegister(reg, zone());
102 void CodeGenerator::AssembleInstruction(Instruction* instr) {
103 if (instr->IsBlockStart()) {
105 BlockStartInstruction* block_start = BlockStartInstruction::cast(instr);
106 current_block_ = block_start->block();
107 if (FLAG_code_comments) {
110 SNPrintF(buffer,
"-- B%d start --", block_start->block()->id());
111 masm()->RecordComment(buffer.start());
113 masm()->bind(block_start->label());
115 if (instr->IsGapMoves()) {
117 AssembleGap(GapInstruction::cast(instr));
118 }
else if (instr->IsSourcePosition()) {
119 AssembleSourcePosition(SourcePositionInstruction::cast(instr));
122 AssembleArchInstruction(instr);
126 FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
131 return AssembleArchBoolean(instr, condition);
133 return AssembleArchBranch(instr, condition);
140 void CodeGenerator::AssembleSourcePosition(SourcePositionInstruction* instr) {
141 SourcePosition source_position = instr->source_position();
142 if (source_position == current_source_position_)
return;
143 DCHECK(!source_position.IsInvalid());
144 if (!source_position.IsUnknown()) {
145 int code_pos = source_position.raw();
146 masm()->positions_recorder()->RecordPosition(source_position.raw());
147 masm()->positions_recorder()->WriteRecordedPositions();
148 if (FLAG_code_comments) {
150 CompilationInfo* info = linkage()->info();
151 int ln = Script::GetLineNumber(info->script(), code_pos);
152 int cn = Script::GetColumnNumber(info->script(), code_pos);
153 if (info->script()->name()->IsString()) {
154 Handle<String>
file(String::cast(info->script()->name()));
156 file->ToCString().get(), ln, cn);
159 "-- <unknown>:%d:%d --", ln, cn);
161 masm()->RecordComment(buffer.start());
164 current_source_position_ = source_position;
168 void CodeGenerator::AssembleGap(GapInstruction* instr) {
169 for (
int i = GapInstruction::FIRST_INNER_POSITION;
170 i <= GapInstruction::LAST_INNER_POSITION;
i++) {
171 GapInstruction::InnerPosition inner_pos =
172 static_cast<GapInstruction::InnerPosition
>(
i);
173 ParallelMove* move = instr->GetParallelMove(inner_pos);
174 if (move !=
NULL) resolver()->Resolve(move);
179 void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
180 CompilationInfo* info = linkage()->info();
181 int deopt_count =
static_cast<int>(deoptimization_states_.size());
182 if (deopt_count == 0)
return;
183 Handle<DeoptimizationInputData> data =
184 DeoptimizationInputData::New(isolate(), deopt_count,
TENURED);
186 Handle<ByteArray> translation_array =
187 translations_.CreateByteArray(isolate()->factory());
189 data->SetTranslationByteArray(*translation_array);
190 data->SetInlinedFunctionCount(Smi::FromInt(0));
191 data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
194 if (info->IsOptimizing()) {
197 data->SetSharedFunctionInfo(*info->shared_info());
199 data->SetSharedFunctionInfo(Smi::FromInt(0));
202 Handle<FixedArray>
literals = isolate()->factory()->NewFixedArray(
203 static_cast<int>(deoptimization_literals_.size()),
TENURED);
206 for (
unsigned i = 0;
i < deoptimization_literals_.size();
i++) {
207 literals->set(
i, *deoptimization_literals_[
i]);
214 data->SetOsrAstId(Smi::FromInt(osr_ast_id.ToInt()));
215 data->SetOsrPcOffset(Smi::FromInt(-1));
218 for (
int i = 0;
i < deopt_count;
i++) {
219 DeoptimizationState* deoptimization_state = deoptimization_states_[
i];
220 data->SetAstId(
i, deoptimization_state->bailout_id());
222 data->SetTranslationIndex(
223 i, Smi::FromInt(deoptimization_states_[
i]->translation_id()));
224 data->SetArgumentsStackHeight(
i, Smi::FromInt(0));
225 data->SetPc(
i, Smi::FromInt(deoptimization_state->pc_offset()));
228 code_object->set_deoptimization_data(*data);
232 void CodeGenerator::AddSafepointAndDeopt(Instruction* instr) {
233 CallDescriptor::Flags
flags(MiscField::decode(instr->opcode()));
235 bool needs_frame_state = (
flags & CallDescriptor::kNeedsFrameState);
238 instr->pointer_map(), Safepoint::kSimple, 0,
239 needs_frame_state ? Safepoint::kLazyDeopt : Safepoint::kNoLazyDeopt);
241 if (
flags & CallDescriptor::kNeedsNopAfterCall) {
242 AddNopForSmiCodeInlining();
245 if (needs_frame_state) {
249 InstructionOperandConverter converter(
this, instr);
251 size_t frame_state_offset = 1;
252 FrameStateDescriptor* descriptor =
253 GetFrameStateDescriptor(instr, frame_state_offset);
254 int pc_offset = masm()->pc_offset();
255 int deopt_state_id = BuildTranslation(instr, pc_offset, frame_state_offset,
256 descriptor->state_combine());
265 BuildTranslation(instr, -1, frame_state_offset,
kIgnoreOutput);
271 for (
size_t i = 0;
i < descriptor->size();
i++) {
272 InstructionOperand* op = instr->InputAt(frame_state_offset + 1 +
i);
273 CHECK(op->IsStackSlot() || op->IsImmediate());
276 safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
281 int CodeGenerator::DefineDeoptimizationLiteral(Handle<Object> literal) {
282 int result =
static_cast<int>(deoptimization_literals_.size());
283 for (
unsigned i = 0;
i < deoptimization_literals_.size(); ++
i) {
284 if (deoptimization_literals_[
i].is_identical_to(literal))
return i;
286 deoptimization_literals_.push_back(literal);
291 FrameStateDescriptor* CodeGenerator::GetFrameStateDescriptor(
292 Instruction* instr,
size_t frame_state_offset) {
293 InstructionOperandConverter
i(
this, instr);
294 InstructionSequence::StateId state_id = InstructionSequence::StateId::FromInt(
295 i.InputInt32(
static_cast<int>(frame_state_offset)));
296 return code()->GetFrameStateDescriptor(state_id);
300 void CodeGenerator::BuildTranslationForFrameStateDescriptor(
301 FrameStateDescriptor* descriptor, Instruction* instr,
302 Translation* translation,
size_t frame_state_offset,
305 if (descriptor->outer_state() !=
NULL) {
306 BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), instr,
307 translation, frame_state_offset,
311 int id = Translation::kSelfLiteralId;
312 if (!descriptor->jsfunction().is_null()) {
313 id = DefineDeoptimizationLiteral(
314 Handle<Object>::cast(descriptor->jsfunction().ToHandleChecked()));
317 switch (descriptor->type()) {
319 translation->BeginJSFrame(
320 descriptor->bailout_id(),
id,
321 static_cast<unsigned int>(descriptor->GetHeight(state_combine)));
324 translation->BeginArgumentsAdaptorFrame(
325 id,
static_cast<unsigned int>(descriptor->parameters_count()));
329 frame_state_offset += descriptor->outer_state()->GetTotalSize();
330 for (
size_t i = 0;
i < descriptor->size();
i++) {
331 AddTranslationForOperand(
333 instr->InputAt(
static_cast<int>(frame_state_offset +
i)));
336 switch (state_combine) {
338 DCHECK(instr->OutputCount() == 1);
339 AddTranslationForOperand(translation, instr, instr->OutputAt(0));
347 int CodeGenerator::BuildTranslation(Instruction* instr,
int pc_offset,
348 size_t frame_state_offset,
350 FrameStateDescriptor* descriptor =
351 GetFrameStateDescriptor(instr, frame_state_offset);
352 frame_state_offset++;
354 Translation translation(
355 &translations_,
static_cast<int>(descriptor->GetFrameCount()),
356 static_cast<int>(descriptor->GetJSFrameCount()), zone());
357 BuildTranslationForFrameStateDescriptor(descriptor, instr, &translation,
358 frame_state_offset, state_combine);
360 int deoptimization_id =
static_cast<int>(deoptimization_states_.size());
362 deoptimization_states_.push_back(
new (zone()) DeoptimizationState(
363 descriptor->bailout_id(), translation.index(), pc_offset));
365 return deoptimization_id;
369 void CodeGenerator::AddTranslationForOperand(Translation* translation,
371 InstructionOperand* op) {
372 if (op->IsStackSlot()) {
373 translation->StoreStackSlot(op->index());
374 }
else if (op->IsDoubleStackSlot()) {
375 translation->StoreDoubleStackSlot(op->index());
376 }
else if (op->IsRegister()) {
377 InstructionOperandConverter converter(
this, instr);
378 translation->StoreRegister(converter.ToRegister(op));
379 }
else if (op->IsDoubleRegister()) {
380 InstructionOperandConverter converter(
this, instr);
381 translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
382 }
else if (op->IsImmediate()) {
383 InstructionOperandConverter converter(
this, instr);
384 Constant constant = converter.ToConstant(op);
385 Handle<Object> constant_object;
386 switch (constant.type()) {
387 case Constant::kInt32:
389 isolate()->factory()->NewNumberFromInt(constant.ToInt32());
391 case Constant::kFloat64:
392 constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
394 case Constant::kHeapObject:
395 constant_object = constant.ToHeapObject();
400 int literal_id = DefineDeoptimizationLiteral(constant_object);
401 translation->StoreLiteral(literal_id);
408 void CodeGenerator::MarkLazyDeoptSite() {
409 last_lazy_deopt_pc_ = masm()->pc_offset();
412 #if !V8_TURBOFAN_BACKEND
414 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
419 void CodeGenerator::AssembleArchBranch(Instruction* instr,
425 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
431 void CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id) {
442 void CodeGenerator::AssembleMove(InstructionOperand* source,
443 InstructionOperand* destination) {
448 void CodeGenerator::AssembleSwap(InstructionOperand* source,
449 InstructionOperand* destination) {
454 void CodeGenerator::AddNopForSmiCodeInlining() {
UNIMPLEMENTED(); }
static Handle< Code > MakeCodeEpilogue(MacroAssembler *masm, Code::Flags flags, CompilationInfo *info)
static Vector< char > New(int length)
enable harmony numeric literals(0o77, 0b11)") DEFINE_BOOL(harmony_object_literals
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_BOOL(enable_always_align_csp
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes A file to write the raw context snapshot bytes Write V8 startup blob file(mksnapshot only)") DEFINE_BOOL(profile_hydrogen_code_stub_compilation
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define LOG_CODE_EVENT(isolate, Call)
#define CHECK_NE(unexpected, value)
#define DCHECK(condition)
static void FinishCode(MacroAssembler *masm)
int SNPrintF(Vector< char > str, const char *format,...)
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Debugger support for the V8 JavaScript engine.