41 return os <<
"[constant:" << op.
index() <<
"]";
43 return os <<
"[immediate:" << op.
index() <<
"]";
45 return os <<
"[stack:" << op.
index() <<
"]";
47 return os <<
"[double_stack:" << op.
index() <<
"]";
60 template <InstructionOperand::Kind kOperandKind,
int kNumCachedOperands>
61 SubKindOperand<kOperandKind, kNumCachedOperands>*
62 SubKindOperand<kOperandKind, kNumCachedOperands>::cache =
NULL;
65 template <InstructionOperand::Kind kOperandKind,
int kNumCachedOperands>
66 void SubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
68 cache =
new SubKindOperand[kNumCachedOperands];
69 for (
int i = 0;
i < kNumCachedOperands;
i++) {
70 cache[
i].ConvertTo(kOperandKind,
i);
75 template <InstructionOperand::Kind kOperandKind,
int kNumCachedOperands>
76 void SubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
83 #define INSTRUCTION_OPERAND_SETUP(name, type, number) \
84 name##Operand::SetUpCache();
86 #undef INSTRUCTION_OPERAND_SETUP
91 #define INSTRUCTION_OPERAND_TEARDOWN(name, type, number) \
92 name##Operand::TearDownCache();
94 #undef INSTRUCTION_OPERAND_TEARDOWN
99 os << *mo.destination();
100 if (!mo.source()->Equals(mo.destination())) os <<
" = " << *mo.source();
105 bool ParallelMove::IsRedundant()
const {
106 for (
int i = 0;
i < move_operands_.length(); ++
i) {
107 if (!move_operands_[
i].IsRedundant())
return false;
116 move != pm.move_operands()->end(); ++move) {
117 if (move->IsEliminated())
continue;
118 if (!first) os <<
" ";
126 void PointerMap::RecordPointer(InstructionOperand* op,
Zone* zone) {
128 if (op->IsStackSlot() && op->index() < 0)
return;
129 DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
130 pointer_operands_.Add(op, zone);
134 void PointerMap::RemovePointer(InstructionOperand* op) {
136 if (op->IsStackSlot() && op->index() < 0)
return;
137 DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
138 for (
int i = 0;
i < pointer_operands_.length(); ++
i) {
139 if (pointer_operands_[
i]->
Equals(op)) {
140 pointer_operands_.Remove(
i);
147 void PointerMap::RecordUntagged(InstructionOperand* op, Zone* zone) {
149 if (op->IsStackSlot() && op->index() < 0)
return;
150 DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
151 untagged_operands_.Add(op, zone);
158 pm.pointer_operands_.begin();
159 op != pm.pointer_operands_.end(); ++op) {
160 if (op != pm.pointer_operands_.begin()) os <<
";";
200 return os <<
"branch";
212 return os <<
"equal";
214 return os <<
"not equal";
216 return os <<
"signed less than";
218 return os <<
"signed greater than or equal";
220 return os <<
"signed less than or equal";
222 return os <<
"signed greater than";
224 return os <<
"unsigned less than";
226 return os <<
"unsigned greater than or equal";
228 return os <<
"unsigned less than or equal";
230 return os <<
"unsigned greater than";
232 return os <<
"unordered equal";
234 return os <<
"unordered not equal";
236 return os <<
"unordered less than";
238 return os <<
"unordered greater than or equal";
240 return os <<
"unordered less than or equal";
242 return os <<
"unordered greater than";
244 return os <<
"overflow";
246 return os <<
"not overflow";
256 if (
i > 0) os <<
", ";
273 const SourcePositionInstruction* pos =
274 SourcePositionInstruction::cast(&instr);
275 os <<
"position (" << pos->source_position().raw() <<
")";
279 if (am != kMode_None) {
284 os <<
" && " << fm <<
" if "
298 switch (constant.type()) {
299 case Constant::kInt32:
300 return os << constant.ToInt32();
301 case Constant::kInt64:
302 return os << constant.ToInt64() <<
"l";
303 case Constant::kFloat32:
304 return os << constant.ToFloat32() <<
"f";
305 case Constant::kFloat64:
306 return os << constant.ToFloat64();
307 case Constant::kExternalReference:
308 return os << constant.ToExternalReference().address();
309 case Constant::kHeapObject:
310 return os <<
Brief(*constant.ToHeapObject());
317 Label* InstructionSequence::GetLabel(BasicBlock* block) {
318 return GetBlockStart(block)->label();
322 BlockStartInstruction* InstructionSequence::GetBlockStart(BasicBlock* block) {
323 return BlockStartInstruction::cast(InstructionAt(block->code_start_));
327 void InstructionSequence::StartBlock(BasicBlock* block) {
328 block->code_start_ =
static_cast<int>(instructions_.size());
329 BlockStartInstruction* block_start =
330 BlockStartInstruction::New(zone(), block);
331 AddInstruction(block_start, block);
335 void InstructionSequence::EndBlock(BasicBlock* block) {
336 int end =
static_cast<int>(instructions_.size());
337 DCHECK(block->code_start_ >= 0 && block->code_start_ < end);
338 block->code_end_ = end;
342 int InstructionSequence::AddInstruction(Instruction* instr, BasicBlock* block) {
345 if (instr->IsControl()) instructions_.push_back(gap);
346 int index =
static_cast<int>(instructions_.size());
347 instructions_.push_back(instr);
348 if (!instr->IsControl()) instructions_.push_back(gap);
349 if (instr->NeedsPointerMap()) {
351 PointerMap* pointer_map =
new (zone()) PointerMap(zone());
352 pointer_map->set_instruction_position(index);
353 instr->set_pointer_map(pointer_map);
354 pointer_maps_.push_back(pointer_map);
360 BasicBlock* InstructionSequence::GetBasicBlock(
int instruction_index) {
364 Instruction* instruction = InstructionAt(instruction_index--);
365 if (instruction->IsBlockStart()) {
366 return BlockStartInstruction::cast(instruction)->block();
372 bool InstructionSequence::IsReference(
int virtual_register)
const {
373 return references_.find(virtual_register) != references_.end();
377 bool InstructionSequence::IsDouble(
int virtual_register)
const {
378 return doubles_.find(virtual_register) != doubles_.end();
382 void InstructionSequence::MarkAsReference(
int virtual_register) {
383 references_.insert(virtual_register);
387 void InstructionSequence::MarkAsDouble(
int virtual_register) {
388 doubles_.insert(virtual_register);
392 void InstructionSequence::AddGapMove(
int index, InstructionOperand* from,
393 InstructionOperand*
to) {
399 InstructionSequence::StateId InstructionSequence::AddFrameStateDescriptor(
400 FrameStateDescriptor* descriptor) {
401 int deoptimization_id =
static_cast<int>(deoptimization_entries_.size());
402 deoptimization_entries_.push_back(descriptor);
403 return StateId::FromInt(deoptimization_id);
406 FrameStateDescriptor* InstructionSequence::GetFrameStateDescriptor(
407 InstructionSequence::StateId state_id) {
408 return deoptimization_entries_[state_id.ToInt()];
412 int InstructionSequence::GetFrameStateDescriptorCount() {
413 return static_cast<int>(deoptimization_entries_.size());
418 for (
size_t i = 0;
i < code.immediates_.size(); ++
i) {
419 Constant constant = code.immediates_[
i];
420 os <<
"IMM#" <<
i <<
": " << constant <<
"\n";
423 for (ConstantMap::const_iterator it = code.constants_.begin();
424 it != code.constants_.end(); ++
i, ++it) {
425 os <<
"CST#" <<
i <<
": v" << it->first <<
" = " << it->second <<
"\n";
427 for (
int i = 0;
i < code.BasicBlockCount();
i++) {
428 BasicBlock* block = code.BlockAt(
i);
430 int bid = block->id();
431 os <<
"RPO#" << block->rpo_number_ <<
": B" << bid;
432 CHECK(block->rpo_number_ ==
i);
433 if (block->IsLoopHeader()) {
434 os <<
" loop blocks: [" << block->rpo_number_ <<
", " << block->loop_end_
437 os <<
" instructions: [" << block->code_start_ <<
", " << block->code_end_
438 <<
")\n predecessors:";
440 BasicBlock::Predecessors predecessors = block->predecessors();
441 for (BasicBlock::Predecessors::iterator iter = predecessors.begin();
442 iter != predecessors.end(); ++iter) {
443 os <<
" B" << (*iter)->id();
447 for (BasicBlock::const_iterator j = block->begin(); j != block->end();
450 if (phi->opcode() != IrOpcode::kPhi)
continue;
451 os <<
" phi: v" << phi->id() <<
" =";
452 Node::Inputs inputs = phi->inputs();
453 for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
455 os <<
" v" << (*iter)->id();
461 for (
int j = block->first_instruction_index();
462 j <= block->last_instruction_index(); j++) {
465 os <<
" " << buf.
start() <<
": " << *code.InstructionAt(j);
468 os <<
" " << block->control_;
470 if (block->control_input_ !=
NULL) {
471 os <<
" v" << block->control_input_->id();
474 BasicBlock::Successors successors = block->successors();
475 for (BasicBlock::Successors::iterator iter = successors.begin();
476 iter != successors.end(); ++iter) {
477 os <<
" B" << (*iter)->id();
static T decode(uint32_t value)
static GapInstruction * cast(Instruction *instr)
static GapInstruction * New(Zone *zone)
ParallelMove * parallel_moves_[LAST_INNER_POSITION+1]
static void TearDownCaches()
static void SetUpCaches()
InstructionCode opcode() const
size_t OutputCount() const
bool IsBlockStart() const
size_t InputCount() const
bool IsSourcePosition() const
InstructionOperand * InputAt(size_t i) const
InstructionOperand * OutputAt(size_t i) const
static const UnallocatedOperand * cast(const InstructionOperand *op)
BasicPolicy basic_policy() const
ExtendedPolicy extended_policy() const
int fixed_register_index() const
int fixed_slot_index() const
int virtual_register() const
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define TARGET_ADDRESSING_MODE_LIST(V)
#define ARCH_OPCODE_LIST(V)
#define INSTRUCTION_OPERAND_SETUP(name, type, number)
#define INSTRUCTION_OPERAND_TEARDOWN(name, type, number)
#define INSTRUCTION_OPERAND_LIST(V)
#define DCHECK_LE(v1, v2)
#define DCHECK(condition)
bool Equals(Node *a, Node *b)
std::ostream & operator<<(std::ostream &os, const MachineType &type)
@ kSignedGreaterThanOrEqual
@ kUnsignedLessThanOrEqual
@ kUnorderedGreaterThanOrEqual
@ kUnsignedGreaterThanOrEqual
@ kUnorderedLessThanOrEqual
int SNPrintF(Vector< char > str, const char *format,...)
Debugger support for the V8 JavaScript engine.
static const char * AllocationIndexToString(int index)
static const char * AllocationIndexToString(int index)