V8 Project
instruction.cc
Go to the documentation of this file.
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
6 
8 
9 namespace v8 {
10 namespace internal {
11 namespace compiler {
12 
14  switch (op.kind()) {
16  return os << "(0)";
18  const UnallocatedOperand* unalloc = UnallocatedOperand::cast(&op);
19  os << "v" << unalloc->virtual_register();
20  if (unalloc->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
21  return os << "(=" << unalloc->fixed_slot_index() << "S)";
22  }
23  switch (unalloc->extended_policy()) {
25  return os;
27  return os << "(=" << Register::AllocationIndexToString(
28  unalloc->fixed_register_index()) << ")";
30  return os << "(=" << DoubleRegister::AllocationIndexToString(
31  unalloc->fixed_register_index()) << ")";
33  return os << "(R)";
35  return os << "(1)";
37  return os << "(-)";
38  }
39  }
41  return os << "[constant:" << op.index() << "]";
43  return os << "[immediate:" << op.index() << "]";
45  return os << "[stack:" << op.index() << "]";
47  return os << "[double_stack:" << op.index() << "]";
49  return os << "[" << Register::AllocationIndexToString(op.index())
50  << "|R]";
52  return os << "[" << DoubleRegister::AllocationIndexToString(op.index())
53  << "|R]";
54  }
55  UNREACHABLE();
56  return os;
57 }
58 
59 
60 template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
61 SubKindOperand<kOperandKind, kNumCachedOperands>*
62  SubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
63 
64 
65 template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
66 void SubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
67  if (cache) return;
68  cache = new SubKindOperand[kNumCachedOperands];
69  for (int i = 0; i < kNumCachedOperands; i++) {
70  cache[i].ConvertTo(kOperandKind, i);
71  }
72 }
73 
74 
75 template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
76 void SubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
77  delete[] cache;
78  cache = NULL;
79 }
80 
81 
83 #define INSTRUCTION_OPERAND_SETUP(name, type, number) \
84  name##Operand::SetUpCache();
86 #undef INSTRUCTION_OPERAND_SETUP
87 }
88 
89 
91 #define INSTRUCTION_OPERAND_TEARDOWN(name, type, number) \
92  name##Operand::TearDownCache();
94 #undef INSTRUCTION_OPERAND_TEARDOWN
95 }
96 
97 
98 OStream& operator<<(OStream& os, const MoveOperands& mo) {
99  os << *mo.destination();
100  if (!mo.source()->Equals(mo.destination())) os << " = " << *mo.source();
101  return os << ";";
102 }
103 
104 
105 bool ParallelMove::IsRedundant() const {
106  for (int i = 0; i < move_operands_.length(); ++i) {
107  if (!move_operands_[i].IsRedundant()) return false;
108  }
109  return true;
110 }
111 
112 
113 OStream& operator<<(OStream& os, const ParallelMove& pm) {
114  bool first = true;
115  for (ZoneList<MoveOperands>::iterator move = pm.move_operands()->begin();
116  move != pm.move_operands()->end(); ++move) {
117  if (move->IsEliminated()) continue;
118  if (!first) os << " ";
119  first = false;
120  os << *move;
121  }
122  return os;
123 }
124 
125 
126 void PointerMap::RecordPointer(InstructionOperand* op, Zone* zone) {
127  // Do not record arguments as pointers.
128  if (op->IsStackSlot() && op->index() < 0) return;
129  DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
130  pointer_operands_.Add(op, zone);
131 }
132 
133 
134 void PointerMap::RemovePointer(InstructionOperand* op) {
135  // Do not record arguments as pointers.
136  if (op->IsStackSlot() && op->index() < 0) return;
137  DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
138  for (int i = 0; i < pointer_operands_.length(); ++i) {
139  if (pointer_operands_[i]->Equals(op)) {
140  pointer_operands_.Remove(i);
141  --i;
142  }
143  }
144 }
145 
146 
147 void PointerMap::RecordUntagged(InstructionOperand* op, Zone* zone) {
148  // Do not record arguments as pointers.
149  if (op->IsStackSlot() && op->index() < 0) return;
150  DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
151  untagged_operands_.Add(op, zone);
152 }
153 
154 
155 OStream& operator<<(OStream& os, const PointerMap& pm) {
156  os << "{";
158  pm.pointer_operands_.begin();
159  op != pm.pointer_operands_.end(); ++op) {
160  if (op != pm.pointer_operands_.begin()) os << ";";
161  os << *op;
162  }
163  return os << "}";
164 }
165 
166 
168  switch (ao) {
169 #define CASE(Name) \
170  case k##Name: \
171  return os << #Name;
173 #undef CASE
174  }
175  UNREACHABLE();
176  return os;
177 }
178 
179 
181  switch (am) {
182  case kMode_None:
183  return os;
184 #define CASE(Name) \
185  case kMode_##Name: \
186  return os << #Name;
188 #undef CASE
189  }
190  UNREACHABLE();
191  return os;
192 }
193 
194 
196  switch (fm) {
197  case kFlags_none:
198  return os;
199  case kFlags_branch:
200  return os << "branch";
201  case kFlags_set:
202  return os << "set";
203  }
204  UNREACHABLE();
205  return os;
206 }
207 
208 
210  switch (fc) {
211  case kEqual:
212  return os << "equal";
213  case kNotEqual:
214  return os << "not equal";
215  case kSignedLessThan:
216  return os << "signed less than";
218  return os << "signed greater than or equal";
220  return os << "signed less than or equal";
221  case kSignedGreaterThan:
222  return os << "signed greater than";
223  case kUnsignedLessThan:
224  return os << "unsigned less than";
226  return os << "unsigned greater than or equal";
228  return os << "unsigned less than or equal";
230  return os << "unsigned greater than";
231  case kUnorderedEqual:
232  return os << "unordered equal";
233  case kUnorderedNotEqual:
234  return os << "unordered not equal";
235  case kUnorderedLessThan:
236  return os << "unordered less than";
238  return os << "unordered greater than or equal";
240  return os << "unordered less than or equal";
242  return os << "unordered greater than";
243  case kOverflow:
244  return os << "overflow";
245  case kNotOverflow:
246  return os << "not overflow";
247  }
248  UNREACHABLE();
249  return os;
250 }
251 
252 
253 OStream& operator<<(OStream& os, const Instruction& instr) {
254  if (instr.OutputCount() > 1) os << "(";
255  for (size_t i = 0; i < instr.OutputCount(); i++) {
256  if (i > 0) os << ", ";
257  os << *instr.OutputAt(i);
258  }
259 
260  if (instr.OutputCount() > 1) os << ") = ";
261  if (instr.OutputCount() == 1) os << " = ";
262 
263  if (instr.IsGapMoves()) {
264  const GapInstruction* gap = GapInstruction::cast(&instr);
265  os << (instr.IsBlockStart() ? " block-start" : "gap ");
268  os << "(";
269  if (gap->parallel_moves_[i] != NULL) os << *gap->parallel_moves_[i];
270  os << ") ";
271  }
272  } else if (instr.IsSourcePosition()) {
273  const SourcePositionInstruction* pos =
274  SourcePositionInstruction::cast(&instr);
275  os << "position (" << pos->source_position().raw() << ")";
276  } else {
277  os << ArchOpcodeField::decode(instr.opcode());
279  if (am != kMode_None) {
280  os << " : " << AddressingModeField::decode(instr.opcode());
281  }
283  if (fm != kFlags_none) {
284  os << " && " << fm << " if "
286  }
287  }
288  if (instr.InputCount() > 0) {
289  for (size_t i = 0; i < instr.InputCount(); i++) {
290  os << " " << *instr.InputAt(i);
291  }
292  }
293  return os << "\n";
294 }
295 
296 
297 OStream& operator<<(OStream& os, const Constant& constant) {
298  switch (constant.type()) {
299  case Constant::kInt32:
300  return os << constant.ToInt32();
301  case Constant::kInt64:
302  return os << constant.ToInt64() << "l";
303  case Constant::kFloat32:
304  return os << constant.ToFloat32() << "f";
305  case Constant::kFloat64:
306  return os << constant.ToFloat64();
307  case Constant::kExternalReference:
308  return os << constant.ToExternalReference().address();
309  case Constant::kHeapObject:
310  return os << Brief(*constant.ToHeapObject());
311  }
312  UNREACHABLE();
313  return os;
314 }
315 
316 
317 Label* InstructionSequence::GetLabel(BasicBlock* block) {
318  return GetBlockStart(block)->label();
319 }
320 
321 
322 BlockStartInstruction* InstructionSequence::GetBlockStart(BasicBlock* block) {
323  return BlockStartInstruction::cast(InstructionAt(block->code_start_));
324 }
325 
326 
327 void InstructionSequence::StartBlock(BasicBlock* block) {
328  block->code_start_ = static_cast<int>(instructions_.size());
329  BlockStartInstruction* block_start =
330  BlockStartInstruction::New(zone(), block);
331  AddInstruction(block_start, block);
332 }
333 
334 
335 void InstructionSequence::EndBlock(BasicBlock* block) {
336  int end = static_cast<int>(instructions_.size());
337  DCHECK(block->code_start_ >= 0 && block->code_start_ < end);
338  block->code_end_ = end;
339 }
340 
341 
342 int InstructionSequence::AddInstruction(Instruction* instr, BasicBlock* block) {
343  // TODO(titzer): the order of these gaps is a holdover from Lithium.
344  GapInstruction* gap = GapInstruction::New(zone());
345  if (instr->IsControl()) instructions_.push_back(gap);
346  int index = static_cast<int>(instructions_.size());
347  instructions_.push_back(instr);
348  if (!instr->IsControl()) instructions_.push_back(gap);
349  if (instr->NeedsPointerMap()) {
350  DCHECK(instr->pointer_map() == NULL);
351  PointerMap* pointer_map = new (zone()) PointerMap(zone());
352  pointer_map->set_instruction_position(index);
353  instr->set_pointer_map(pointer_map);
354  pointer_maps_.push_back(pointer_map);
355  }
356  return index;
357 }
358 
359 
360 BasicBlock* InstructionSequence::GetBasicBlock(int instruction_index) {
361  // TODO(turbofan): Optimize this.
362  for (;;) {
363  DCHECK_LE(0, instruction_index);
364  Instruction* instruction = InstructionAt(instruction_index--);
365  if (instruction->IsBlockStart()) {
366  return BlockStartInstruction::cast(instruction)->block();
367  }
368  }
369 }
370 
371 
372 bool InstructionSequence::IsReference(int virtual_register) const {
373  return references_.find(virtual_register) != references_.end();
374 }
375 
376 
377 bool InstructionSequence::IsDouble(int virtual_register) const {
378  return doubles_.find(virtual_register) != doubles_.end();
379 }
380 
381 
382 void InstructionSequence::MarkAsReference(int virtual_register) {
383  references_.insert(virtual_register);
384 }
385 
386 
387 void InstructionSequence::MarkAsDouble(int virtual_register) {
388  doubles_.insert(virtual_register);
389 }
390 
391 
392 void InstructionSequence::AddGapMove(int index, InstructionOperand* from,
393  InstructionOperand* to) {
394  GapAt(index)->GetOrCreateParallelMove(GapInstruction::START, zone())->AddMove(
395  from, to, zone());
396 }
397 
398 
399 InstructionSequence::StateId InstructionSequence::AddFrameStateDescriptor(
400  FrameStateDescriptor* descriptor) {
401  int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
402  deoptimization_entries_.push_back(descriptor);
403  return StateId::FromInt(deoptimization_id);
404 }
405 
406 FrameStateDescriptor* InstructionSequence::GetFrameStateDescriptor(
407  InstructionSequence::StateId state_id) {
408  return deoptimization_entries_[state_id.ToInt()];
409 }
410 
411 
412 int InstructionSequence::GetFrameStateDescriptorCount() {
413  return static_cast<int>(deoptimization_entries_.size());
414 }
415 
416 
418  for (size_t i = 0; i < code.immediates_.size(); ++i) {
419  Constant constant = code.immediates_[i];
420  os << "IMM#" << i << ": " << constant << "\n";
421  }
422  int i = 0;
423  for (ConstantMap::const_iterator it = code.constants_.begin();
424  it != code.constants_.end(); ++i, ++it) {
425  os << "CST#" << i << ": v" << it->first << " = " << it->second << "\n";
426  }
427  for (int i = 0; i < code.BasicBlockCount(); i++) {
428  BasicBlock* block = code.BlockAt(i);
429 
430  int bid = block->id();
431  os << "RPO#" << block->rpo_number_ << ": B" << bid;
432  CHECK(block->rpo_number_ == i);
433  if (block->IsLoopHeader()) {
434  os << " loop blocks: [" << block->rpo_number_ << ", " << block->loop_end_
435  << ")";
436  }
437  os << " instructions: [" << block->code_start_ << ", " << block->code_end_
438  << ")\n predecessors:";
439 
440  BasicBlock::Predecessors predecessors = block->predecessors();
441  for (BasicBlock::Predecessors::iterator iter = predecessors.begin();
442  iter != predecessors.end(); ++iter) {
443  os << " B" << (*iter)->id();
444  }
445  os << "\n";
446 
447  for (BasicBlock::const_iterator j = block->begin(); j != block->end();
448  ++j) {
449  Node* phi = *j;
450  if (phi->opcode() != IrOpcode::kPhi) continue;
451  os << " phi: v" << phi->id() << " =";
452  Node::Inputs inputs = phi->inputs();
453  for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
454  ++iter) {
455  os << " v" << (*iter)->id();
456  }
457  os << "\n";
458  }
459 
460  ScopedVector<char> buf(32);
461  for (int j = block->first_instruction_index();
462  j <= block->last_instruction_index(); j++) {
463  // TODO(svenpanne) Add some basic formatting to our streams.
464  SNPrintF(buf, "%5d", j);
465  os << " " << buf.start() << ": " << *code.InstructionAt(j);
466  }
467 
468  os << " " << block->control_;
469 
470  if (block->control_input_ != NULL) {
471  os << " v" << block->control_input_->id();
472  }
473 
474  BasicBlock::Successors successors = block->successors();
475  for (BasicBlock::Successors::iterator iter = successors.begin();
476  iter != successors.end(); ++iter) {
477  os << " B" << (*iter)->id();
478  }
479  os << "\n";
480  }
481  return os;
482 }
483 
484 } // namespace compiler
485 } // namespace internal
486 } // namespace v8
iterator begin() const
Definition: list.h:74
T * start() const
Definition: vector.h:47
static GapInstruction * cast(Instruction *instr)
Definition: instruction.h:573
static GapInstruction * New(Zone *zone)
Definition: instruction.h:568
ParallelMove * parallel_moves_[LAST_INNER_POSITION+1]
Definition: instruction.h:593
InstructionCode opcode() const
Definition: instruction.h:427
InstructionOperand * InputAt(size_t i) const
Definition: instruction.h:416
InstructionOperand * OutputAt(size_t i) const
Definition: instruction.h:407
static const UnallocatedOperand * cast(const InstructionOperand *op)
Definition: instruction.h:160
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define TARGET_ADDRESSING_MODE_LIST(V)
#define ARCH_OPCODE_LIST(V)
#define CASE(Name)
#define INSTRUCTION_OPERAND_SETUP(name, type, number)
#define INSTRUCTION_OPERAND_TEARDOWN(name, type, number)
#define INSTRUCTION_OPERAND_LIST(V)
Definition: instruction.h:39
#define UNREACHABLE()
Definition: logging.h:30
#define DCHECK_LE(v1, v2)
Definition: logging.h:210
#define CHECK(condition)
Definition: logging.h:36
#define DCHECK(condition)
Definition: logging.h:205
std::ostream & operator<<(std::ostream &os, const MachineType &type)
int SNPrintF(Vector< char > str, const char *format,...)
Definition: utils.cc:105
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
static const char * AllocationIndexToString(int index)
static const char * AllocationIndexToString(int index)