V8 Project
instruction-selector.cc
Go to the documentation of this file.
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
6 
10 #include "src/compiler/pipeline.h"
11 
12 namespace v8 {
13 namespace internal {
14 namespace compiler {
15 
16 InstructionSelector::InstructionSelector(InstructionSequence* sequence,
17  SourcePositionTable* source_positions,
18  Features features)
19  : zone_(sequence->isolate()),
20  sequence_(sequence),
21  source_positions_(source_positions),
22  features_(features),
23  current_block_(NULL),
24  instructions_(zone()),
25  defined_(graph()->NodeCount(), false, zone()),
26  used_(graph()->NodeCount(), false, zone()) {}
27 
28 
29 void InstructionSelector::SelectInstructions() {
30  // Mark the inputs of all phis in loop headers as used.
31  BasicBlockVector* blocks = schedule()->rpo_order();
32  for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
33  BasicBlock* block = *i;
34  if (!block->IsLoopHeader()) continue;
35  DCHECK_NE(0, block->PredecessorCount());
36  DCHECK_NE(1, block->PredecessorCount());
37  for (BasicBlock::const_iterator j = block->begin(); j != block->end();
38  ++j) {
39  Node* phi = *j;
40  if (phi->opcode() != IrOpcode::kPhi) continue;
41 
42  // Mark all inputs as used.
43  Node::Inputs inputs = phi->inputs();
44  for (InputIter k = inputs.begin(); k != inputs.end(); ++k) {
45  MarkAsUsed(*k);
46  }
47  }
48  }
49 
50  // Visit each basic block in post order.
51  for (BasicBlockVectorRIter i = blocks->rbegin(); i != blocks->rend(); ++i) {
52  VisitBlock(*i);
53  }
54 
55  // Schedule the selected instructions.
56  for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
57  BasicBlock* block = *i;
58  size_t end = block->code_end_;
59  size_t start = block->code_start_;
60  sequence()->StartBlock(block);
61  while (start-- > end) {
62  sequence()->AddInstruction(instructions_[start], block);
63  }
64  sequence()->EndBlock(block);
65  }
66 }
67 
68 
69 Instruction* InstructionSelector::Emit(InstructionCode opcode,
70  InstructionOperand* output,
71  size_t temp_count,
72  InstructionOperand** temps) {
73  size_t output_count = output == NULL ? 0 : 1;
74  return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps);
75 }
76 
77 
78 Instruction* InstructionSelector::Emit(InstructionCode opcode,
79  InstructionOperand* output,
80  InstructionOperand* a, size_t temp_count,
81  InstructionOperand** temps) {
82  size_t output_count = output == NULL ? 0 : 1;
83  return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
84 }
85 
86 
87 Instruction* InstructionSelector::Emit(InstructionCode opcode,
88  InstructionOperand* output,
89  InstructionOperand* a,
90  InstructionOperand* b, size_t temp_count,
91  InstructionOperand** temps) {
92  size_t output_count = output == NULL ? 0 : 1;
93  InstructionOperand* inputs[] = {a, b};
94  size_t input_count = arraysize(inputs);
95  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
96  temps);
97 }
98 
99 
100 Instruction* InstructionSelector::Emit(InstructionCode opcode,
101  InstructionOperand* output,
102  InstructionOperand* a,
103  InstructionOperand* b,
104  InstructionOperand* c, size_t temp_count,
105  InstructionOperand** temps) {
106  size_t output_count = output == NULL ? 0 : 1;
107  InstructionOperand* inputs[] = {a, b, c};
108  size_t input_count = arraysize(inputs);
109  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
110  temps);
111 }
112 
113 
114 Instruction* InstructionSelector::Emit(
115  InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
116  InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
117  size_t temp_count, InstructionOperand** temps) {
118  size_t output_count = output == NULL ? 0 : 1;
119  InstructionOperand* inputs[] = {a, b, c, d};
120  size_t input_count = arraysize(inputs);
121  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
122  temps);
123 }
124 
125 
126 Instruction* InstructionSelector::Emit(
127  InstructionCode opcode, size_t output_count, InstructionOperand** outputs,
128  size_t input_count, InstructionOperand** inputs, size_t temp_count,
129  InstructionOperand** temps) {
130  Instruction* instr =
131  Instruction::New(instruction_zone(), opcode, output_count, outputs,
132  input_count, inputs, temp_count, temps);
133  return Emit(instr);
134 }
135 
136 
137 Instruction* InstructionSelector::Emit(Instruction* instr) {
138  instructions_.push_back(instr);
139  return instr;
140 }
141 
142 
143 bool InstructionSelector::IsNextInAssemblyOrder(const BasicBlock* block) const {
144  return block->rpo_number_ == (current_block_->rpo_number_ + 1) &&
145  block->deferred_ == current_block_->deferred_;
146 }
147 
148 
149 bool InstructionSelector::CanCover(Node* user, Node* node) const {
150  return node->OwnedBy(user) &&
151  schedule()->block(node) == schedule()->block(user);
152 }
153 
154 
155 bool InstructionSelector::IsDefined(Node* node) const {
156  DCHECK_NOT_NULL(node);
157  NodeId id = node->id();
158  DCHECK(id >= 0);
159  DCHECK(id < static_cast<NodeId>(defined_.size()));
160  return defined_[id];
161 }
162 
163 
164 void InstructionSelector::MarkAsDefined(Node* node) {
165  DCHECK_NOT_NULL(node);
166  NodeId id = node->id();
167  DCHECK(id >= 0);
168  DCHECK(id < static_cast<NodeId>(defined_.size()));
169  defined_[id] = true;
170 }
171 
172 
173 bool InstructionSelector::IsUsed(Node* node) const {
174  if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
175  NodeId id = node->id();
176  DCHECK(id >= 0);
177  DCHECK(id < static_cast<NodeId>(used_.size()));
178  return used_[id];
179 }
180 
181 
182 void InstructionSelector::MarkAsUsed(Node* node) {
183  DCHECK_NOT_NULL(node);
184  NodeId id = node->id();
185  DCHECK(id >= 0);
186  DCHECK(id < static_cast<NodeId>(used_.size()));
187  used_[id] = true;
188 }
189 
190 
191 bool InstructionSelector::IsDouble(const Node* node) const {
192  DCHECK_NOT_NULL(node);
193  return sequence()->IsDouble(node->id());
194 }
195 
196 
197 void InstructionSelector::MarkAsDouble(Node* node) {
198  DCHECK_NOT_NULL(node);
199  DCHECK(!IsReference(node));
200  sequence()->MarkAsDouble(node->id());
201 }
202 
203 
204 bool InstructionSelector::IsReference(const Node* node) const {
205  DCHECK_NOT_NULL(node);
206  return sequence()->IsReference(node->id());
207 }
208 
209 
210 void InstructionSelector::MarkAsReference(Node* node) {
211  DCHECK_NOT_NULL(node);
212  DCHECK(!IsDouble(node));
213  sequence()->MarkAsReference(node->id());
214 }
215 
216 
217 void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
218  DCHECK_NOT_NULL(node);
219  switch (RepresentationOf(rep)) {
220  case kRepFloat32:
221  case kRepFloat64:
222  MarkAsDouble(node);
223  break;
224  case kRepTagged:
225  MarkAsReference(node);
226  break;
227  default:
228  break;
229  }
230 }
231 
232 
233 // TODO(bmeurer): Get rid of the CallBuffer business and make
234 // InstructionSelector::VisitCall platform independent instead.
235 CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d,
236  FrameStateDescriptor* frame_desc)
237  : descriptor(d),
238  frame_state_descriptor(frame_desc),
239  output_nodes(zone),
240  outputs(zone),
241  instruction_args(zone),
242  pushed_nodes(zone) {
243  output_nodes.reserve(d->ReturnCount());
244  outputs.reserve(d->ReturnCount());
245  pushed_nodes.reserve(input_count());
247 }
248 
249 
250 // TODO(bmeurer): Get rid of the CallBuffer business and make
251 // InstructionSelector::VisitCall platform independent instead.
252 void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
253  bool call_code_immediate,
254  bool call_address_immediate) {
255  OperandGenerator g(this);
256  DCHECK_EQ(call->op()->OutputCount(), buffer->descriptor->ReturnCount());
258  buffer->input_count() + buffer->frame_state_count());
259 
260  if (buffer->descriptor->ReturnCount() > 0) {
261  // Collect the projections that represent multiple outputs from this call.
262  if (buffer->descriptor->ReturnCount() == 1) {
263  buffer->output_nodes.push_back(call);
264  } else {
265  buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), NULL);
266  call->CollectProjections(&buffer->output_nodes);
267  }
268 
269  // Filter out the outputs that aren't live because no projection uses them.
270  for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
271  if (buffer->output_nodes[i] != NULL) {
272  Node* output = buffer->output_nodes[i];
273  MachineType type =
274  buffer->descriptor->GetReturnType(static_cast<int>(i));
275  LinkageLocation location =
276  buffer->descriptor->GetReturnLocation(static_cast<int>(i));
277  MarkAsRepresentation(type, output);
278  buffer->outputs.push_back(g.DefineAsLocation(output, location, type));
279  }
280  }
281  }
282 
283  // The first argument is always the callee code.
284  Node* callee = call->InputAt(0);
285  switch (buffer->descriptor->kind()) {
286  case CallDescriptor::kCallCodeObject:
287  buffer->instruction_args.push_back(
288  (call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
289  ? g.UseImmediate(callee)
290  : g.UseRegister(callee));
291  break;
292  case CallDescriptor::kCallAddress:
293  buffer->instruction_args.push_back(
294  (call_address_immediate &&
295  (callee->opcode() == IrOpcode::kInt32Constant ||
296  callee->opcode() == IrOpcode::kInt64Constant))
297  ? g.UseImmediate(callee)
298  : g.UseRegister(callee));
299  break;
300  case CallDescriptor::kCallJSFunction:
301  buffer->instruction_args.push_back(
302  g.UseLocation(callee, buffer->descriptor->GetInputLocation(0),
303  buffer->descriptor->GetInputType(0)));
304  break;
305  }
306  DCHECK_EQ(1, buffer->instruction_args.size());
307 
308  // If the call needs a frame state, we insert the state information as
309  // follows (n is the number of value inputs to the frame state):
310  // arg 1 : deoptimization id.
311  // arg 2 - arg (n + 1) : value inputs to the frame state.
312  if (buffer->frame_state_descriptor != NULL) {
313  InstructionSequence::StateId state_id =
314  sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
315  buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
316 
317  Node* frame_state =
318  call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
319  AddFrameStateInputs(frame_state, &buffer->instruction_args,
320  buffer->frame_state_descriptor);
321  }
322  DCHECK(1 + buffer->frame_state_value_count() ==
323  buffer->instruction_args.size());
324 
325  size_t input_count = static_cast<size_t>(buffer->input_count());
326 
327  // Split the arguments into pushed_nodes and instruction_args. Pushed
328  // arguments require an explicit push instruction before the call and do
329  // not appear as arguments to the call. Everything else ends up
330  // as an InstructionOperand argument to the call.
331  InputIter iter(call->inputs().begin());
332  int pushed_count = 0;
333  for (size_t index = 0; index < input_count; ++iter, ++index) {
334  DCHECK(iter != call->inputs().end());
335  DCHECK(index == static_cast<size_t>(iter.index()));
336  DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
337  if (index == 0) continue; // The first argument (callee) is already done.
338  InstructionOperand* op =
339  g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index),
340  buffer->descriptor->GetInputType(index));
341  if (UnallocatedOperand::cast(op)->HasFixedSlotPolicy()) {
342  int stack_index = -UnallocatedOperand::cast(op)->fixed_slot_index() - 1;
343  if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
344  buffer->pushed_nodes.resize(stack_index + 1, NULL);
345  }
346  DCHECK_EQ(NULL, buffer->pushed_nodes[stack_index]);
347  buffer->pushed_nodes[stack_index] = *iter;
348  pushed_count++;
349  } else {
350  buffer->instruction_args.push_back(op);
351  }
352  }
353  CHECK_EQ(pushed_count, static_cast<int>(buffer->pushed_nodes.size()));
354  DCHECK(static_cast<size_t>(input_count) ==
355  (buffer->instruction_args.size() + buffer->pushed_nodes.size() -
356  buffer->frame_state_value_count()));
357 }
358 
359 
360 void InstructionSelector::VisitBlock(BasicBlock* block) {
361  DCHECK_EQ(NULL, current_block_);
362  current_block_ = block;
363  int current_block_end = static_cast<int>(instructions_.size());
364 
365  // Generate code for the block control "top down", but schedule the code
366  // "bottom up".
367  VisitControl(block);
368  std::reverse(instructions_.begin() + current_block_end, instructions_.end());
369 
370  // Visit code in reverse control flow order, because architecture-specific
371  // matching may cover more than one node at a time.
372  for (BasicBlock::reverse_iterator i = block->rbegin(); i != block->rend();
373  ++i) {
374  Node* node = *i;
375  // Skip nodes that are unused or already defined.
376  if (!IsUsed(node) || IsDefined(node)) continue;
377  // Generate code for this node "top down", but schedule the code "bottom
378  // up".
379  size_t current_node_end = instructions_.size();
380  VisitNode(node);
381  std::reverse(instructions_.begin() + current_node_end, instructions_.end());
382  }
383 
384  // We're done with the block.
385  // TODO(bmeurer): We should not mutate the schedule.
386  block->code_end_ = current_block_end;
387  block->code_start_ = static_cast<int>(instructions_.size());
388 
389  current_block_ = NULL;
390 }
391 
392 
393 static inline void CheckNoPhis(const BasicBlock* block) {
394 #ifdef DEBUG
395  // Branch targets should not have phis.
396  for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
397  const Node* node = *i;
398  CHECK_NE(IrOpcode::kPhi, node->opcode());
399  }
400 #endif
401 }
402 
403 
404 void InstructionSelector::VisitControl(BasicBlock* block) {
405  Node* input = block->control_input_;
406  switch (block->control_) {
408  return VisitGoto(block->SuccessorAt(0));
410  DCHECK_EQ(IrOpcode::kBranch, input->opcode());
411  BasicBlock* tbranch = block->SuccessorAt(0);
412  BasicBlock* fbranch = block->SuccessorAt(1);
413  // SSA deconstruction requires targets of branches not to have phis.
414  // Edge split form guarantees this property, but is more strict.
415  CheckNoPhis(tbranch);
416  CheckNoPhis(fbranch);
417  if (tbranch == fbranch) return VisitGoto(tbranch);
418  return VisitBranch(input, tbranch, fbranch);
419  }
421  // If the result itself is a return, return its input.
422  Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn)
423  ? input->InputAt(0)
424  : input;
425  return VisitReturn(value);
426  }
428  return VisitThrow(input);
429  case BasicBlockData::kNone: {
430  // TODO(titzer): exit block doesn't have control.
431  DCHECK(input == NULL);
432  break;
433  }
434  default:
435  UNREACHABLE();
436  break;
437  }
438 }
439 
440 
441 void InstructionSelector::VisitNode(Node* node) {
442  DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
443  SourcePosition source_position = source_positions_->GetSourcePosition(node);
444  if (!source_position.IsUnknown()) {
445  DCHECK(!source_position.IsInvalid());
446  if (FLAG_turbo_source_positions || node->opcode() == IrOpcode::kCall) {
447  Emit(SourcePositionInstruction::New(instruction_zone(), source_position));
448  }
449  }
450  switch (node->opcode()) {
451  case IrOpcode::kStart:
452  case IrOpcode::kLoop:
453  case IrOpcode::kEnd:
454  case IrOpcode::kBranch:
455  case IrOpcode::kIfTrue:
456  case IrOpcode::kIfFalse:
457  case IrOpcode::kEffectPhi:
458  case IrOpcode::kMerge:
459  // No code needed for these graph artifacts.
460  return;
461  case IrOpcode::kFinish:
462  return MarkAsReference(node), VisitFinish(node);
463  case IrOpcode::kParameter: {
464  MachineType type = linkage()->GetParameterType(OpParameter<int>(node));
465  MarkAsRepresentation(type, node);
466  return VisitParameter(node);
467  }
468  case IrOpcode::kPhi: {
469  MachineType type = OpParameter<MachineType>(node);
470  MarkAsRepresentation(type, node);
471  return VisitPhi(node);
472  }
473  case IrOpcode::kProjection:
474  return VisitProjection(node);
475  case IrOpcode::kInt32Constant:
476  case IrOpcode::kInt64Constant:
477  case IrOpcode::kExternalConstant:
478  return VisitConstant(node);
479  case IrOpcode::kFloat32Constant:
480  return MarkAsDouble(node), VisitConstant(node);
481  case IrOpcode::kFloat64Constant:
482  return MarkAsDouble(node), VisitConstant(node);
483  case IrOpcode::kHeapConstant:
484  case IrOpcode::kNumberConstant:
485  // TODO(turbofan): only mark non-smis as references.
486  return MarkAsReference(node), VisitConstant(node);
487  case IrOpcode::kCall:
488  return VisitCall(node, NULL, NULL);
489  case IrOpcode::kFrameState:
490  case IrOpcode::kStateValues:
491  return;
492  case IrOpcode::kLoad: {
493  LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
494  MarkAsRepresentation(rep, node);
495  return VisitLoad(node);
496  }
497  case IrOpcode::kStore:
498  return VisitStore(node);
499  case IrOpcode::kWord32And:
500  return VisitWord32And(node);
501  case IrOpcode::kWord32Or:
502  return VisitWord32Or(node);
503  case IrOpcode::kWord32Xor:
504  return VisitWord32Xor(node);
505  case IrOpcode::kWord32Shl:
506  return VisitWord32Shl(node);
507  case IrOpcode::kWord32Shr:
508  return VisitWord32Shr(node);
509  case IrOpcode::kWord32Sar:
510  return VisitWord32Sar(node);
511  case IrOpcode::kWord32Ror:
512  return VisitWord32Ror(node);
513  case IrOpcode::kWord32Equal:
514  return VisitWord32Equal(node);
515  case IrOpcode::kWord64And:
516  return VisitWord64And(node);
517  case IrOpcode::kWord64Or:
518  return VisitWord64Or(node);
519  case IrOpcode::kWord64Xor:
520  return VisitWord64Xor(node);
521  case IrOpcode::kWord64Shl:
522  return VisitWord64Shl(node);
523  case IrOpcode::kWord64Shr:
524  return VisitWord64Shr(node);
525  case IrOpcode::kWord64Sar:
526  return VisitWord64Sar(node);
527  case IrOpcode::kWord64Ror:
528  return VisitWord64Ror(node);
529  case IrOpcode::kWord64Equal:
530  return VisitWord64Equal(node);
531  case IrOpcode::kInt32Add:
532  return VisitInt32Add(node);
533  case IrOpcode::kInt32AddWithOverflow:
534  return VisitInt32AddWithOverflow(node);
535  case IrOpcode::kInt32Sub:
536  return VisitInt32Sub(node);
537  case IrOpcode::kInt32SubWithOverflow:
538  return VisitInt32SubWithOverflow(node);
539  case IrOpcode::kInt32Mul:
540  return VisitInt32Mul(node);
541  case IrOpcode::kInt32Div:
542  return VisitInt32Div(node);
543  case IrOpcode::kInt32UDiv:
544  return VisitInt32UDiv(node);
545  case IrOpcode::kInt32Mod:
546  return VisitInt32Mod(node);
547  case IrOpcode::kInt32UMod:
548  return VisitInt32UMod(node);
549  case IrOpcode::kInt32LessThan:
550  return VisitInt32LessThan(node);
551  case IrOpcode::kInt32LessThanOrEqual:
552  return VisitInt32LessThanOrEqual(node);
553  case IrOpcode::kUint32LessThan:
554  return VisitUint32LessThan(node);
555  case IrOpcode::kUint32LessThanOrEqual:
556  return VisitUint32LessThanOrEqual(node);
557  case IrOpcode::kInt64Add:
558  return VisitInt64Add(node);
559  case IrOpcode::kInt64Sub:
560  return VisitInt64Sub(node);
561  case IrOpcode::kInt64Mul:
562  return VisitInt64Mul(node);
563  case IrOpcode::kInt64Div:
564  return VisitInt64Div(node);
565  case IrOpcode::kInt64UDiv:
566  return VisitInt64UDiv(node);
567  case IrOpcode::kInt64Mod:
568  return VisitInt64Mod(node);
569  case IrOpcode::kInt64UMod:
570  return VisitInt64UMod(node);
571  case IrOpcode::kInt64LessThan:
572  return VisitInt64LessThan(node);
573  case IrOpcode::kInt64LessThanOrEqual:
574  return VisitInt64LessThanOrEqual(node);
575  case IrOpcode::kChangeFloat32ToFloat64:
576  return MarkAsDouble(node), VisitChangeFloat32ToFloat64(node);
577  case IrOpcode::kChangeInt32ToFloat64:
578  return MarkAsDouble(node), VisitChangeInt32ToFloat64(node);
579  case IrOpcode::kChangeUint32ToFloat64:
580  return MarkAsDouble(node), VisitChangeUint32ToFloat64(node);
581  case IrOpcode::kChangeFloat64ToInt32:
582  return VisitChangeFloat64ToInt32(node);
583  case IrOpcode::kChangeFloat64ToUint32:
584  return VisitChangeFloat64ToUint32(node);
585  case IrOpcode::kChangeInt32ToInt64:
586  return VisitChangeInt32ToInt64(node);
587  case IrOpcode::kChangeUint32ToUint64:
588  return VisitChangeUint32ToUint64(node);
589  case IrOpcode::kTruncateFloat64ToFloat32:
590  return MarkAsDouble(node), VisitTruncateFloat64ToFloat32(node);
591  case IrOpcode::kTruncateFloat64ToInt32:
592  return VisitTruncateFloat64ToInt32(node);
593  case IrOpcode::kTruncateInt64ToInt32:
594  return VisitTruncateInt64ToInt32(node);
595  case IrOpcode::kFloat64Add:
596  return MarkAsDouble(node), VisitFloat64Add(node);
597  case IrOpcode::kFloat64Sub:
598  return MarkAsDouble(node), VisitFloat64Sub(node);
599  case IrOpcode::kFloat64Mul:
600  return MarkAsDouble(node), VisitFloat64Mul(node);
601  case IrOpcode::kFloat64Div:
602  return MarkAsDouble(node), VisitFloat64Div(node);
603  case IrOpcode::kFloat64Mod:
604  return MarkAsDouble(node), VisitFloat64Mod(node);
605  case IrOpcode::kFloat64Sqrt:
606  return MarkAsDouble(node), VisitFloat64Sqrt(node);
607  case IrOpcode::kFloat64Equal:
608  return VisitFloat64Equal(node);
609  case IrOpcode::kFloat64LessThan:
610  return VisitFloat64LessThan(node);
611  case IrOpcode::kFloat64LessThanOrEqual:
612  return VisitFloat64LessThanOrEqual(node);
613  default:
614  V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
615  node->opcode(), node->op()->mnemonic(), node->id());
616  }
617 }
618 
619 
620 #if V8_TURBOFAN_BACKEND
621 
622 void InstructionSelector::VisitWord32Equal(Node* node) {
623  FlagsContinuation cont(kEqual, node);
624  Int32BinopMatcher m(node);
625  if (m.right().Is(0)) {
626  return VisitWord32Test(m.left().node(), &cont);
627  }
628  VisitWord32Compare(node, &cont);
629 }
630 
631 
632 void InstructionSelector::VisitInt32LessThan(Node* node) {
633  FlagsContinuation cont(kSignedLessThan, node);
634  VisitWord32Compare(node, &cont);
635 }
636 
637 
638 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
639  FlagsContinuation cont(kSignedLessThanOrEqual, node);
640  VisitWord32Compare(node, &cont);
641 }
642 
643 
644 void InstructionSelector::VisitUint32LessThan(Node* node) {
645  FlagsContinuation cont(kUnsignedLessThan, node);
646  VisitWord32Compare(node, &cont);
647 }
648 
649 
650 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
651  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
652  VisitWord32Compare(node, &cont);
653 }
654 
655 
656 void InstructionSelector::VisitWord64Equal(Node* node) {
657  FlagsContinuation cont(kEqual, node);
658  Int64BinopMatcher m(node);
659  if (m.right().Is(0)) {
660  return VisitWord64Test(m.left().node(), &cont);
661  }
662  VisitWord64Compare(node, &cont);
663 }
664 
665 
666 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
667  if (Node* ovf = node->FindProjection(1)) {
668  FlagsContinuation cont(kOverflow, ovf);
669  return VisitInt32AddWithOverflow(node, &cont);
670  }
671  FlagsContinuation cont;
672  VisitInt32AddWithOverflow(node, &cont);
673 }
674 
675 
676 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
677  if (Node* ovf = node->FindProjection(1)) {
678  FlagsContinuation cont(kOverflow, ovf);
679  return VisitInt32SubWithOverflow(node, &cont);
680  }
681  FlagsContinuation cont;
682  VisitInt32SubWithOverflow(node, &cont);
683 }
684 
685 
686 void InstructionSelector::VisitInt64LessThan(Node* node) {
687  FlagsContinuation cont(kSignedLessThan, node);
688  VisitWord64Compare(node, &cont);
689 }
690 
691 
692 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
693  FlagsContinuation cont(kSignedLessThanOrEqual, node);
694  VisitWord64Compare(node, &cont);
695 }
696 
697 
698 void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
699  OperandGenerator g(this);
700  Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
701  g.UseRegister(node->InputAt(0)));
702 }
703 
704 
705 void InstructionSelector::VisitFloat64Equal(Node* node) {
706  FlagsContinuation cont(kUnorderedEqual, node);
707  VisitFloat64Compare(node, &cont);
708 }
709 
710 
711 void InstructionSelector::VisitFloat64LessThan(Node* node) {
712  FlagsContinuation cont(kUnorderedLessThan, node);
713  VisitFloat64Compare(node, &cont);
714 }
715 
716 
717 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
718  FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
719  VisitFloat64Compare(node, &cont);
720 }
721 
722 #endif // V8_TURBOFAN_BACKEND
723 
724 // 32 bit targets do not implement the following instructions.
725 #if V8_TARGET_ARCH_32_BIT && V8_TURBOFAN_BACKEND
726 
727 void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
728 
729 
730 void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); }
731 
732 
733 void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); }
734 
735 
736 void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); }
737 
738 
739 void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
740 
741 
742 void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
743 
744 
745 void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
746 
747 
748 void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
749 
750 
751 void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
752 
753 
754 void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
755 
756 
757 void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
758 
759 
760 void InstructionSelector::VisitInt64UDiv(Node* node) { UNIMPLEMENTED(); }
761 
762 
763 void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
764 
765 
766 void InstructionSelector::VisitInt64UMod(Node* node) { UNIMPLEMENTED(); }
767 
768 
769 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
770  UNIMPLEMENTED();
771 }
772 
773 
774 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
775  UNIMPLEMENTED();
776 }
777 
778 
779 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
780  UNIMPLEMENTED();
781 }
782 
783 #endif // V8_TARGET_ARCH_32_BIT && V8_TURBOFAN_BACKEND
784 
785 
786 // 32-bit targets and unsupported architectures need dummy implementations of
787 // selected 64-bit ops.
788 #if V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND
789 
790 void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
791  UNIMPLEMENTED();
792 }
793 
794 
795 void InstructionSelector::VisitWord64Compare(Node* node,
796  FlagsContinuation* cont) {
797  UNIMPLEMENTED();
798 }
799 
800 #endif // V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND
801 
802 
803 void InstructionSelector::VisitFinish(Node* node) {
804  OperandGenerator g(this);
805  Node* value = node->InputAt(0);
806  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
807 }
808 
809 
810 void InstructionSelector::VisitParameter(Node* node) {
811  OperandGenerator g(this);
812  int index = OpParameter<int>(node);
813  Emit(kArchNop,
814  g.DefineAsLocation(node, linkage()->GetParameterLocation(index),
815  linkage()->GetParameterType(index)));
816 }
817 
818 
819 void InstructionSelector::VisitPhi(Node* node) {
820  // TODO(bmeurer): Emit a PhiInstruction here.
821  for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
822  MarkAsUsed(*i);
823  }
824 }
825 
826 
827 void InstructionSelector::VisitProjection(Node* node) {
828  OperandGenerator g(this);
829  Node* value = node->InputAt(0);
830  switch (value->opcode()) {
831  case IrOpcode::kInt32AddWithOverflow:
832  case IrOpcode::kInt32SubWithOverflow:
833  if (OpParameter<size_t>(node) == 0) {
834  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
835  } else {
836  DCHECK(OpParameter<size_t>(node) == 1u);
837  MarkAsUsed(value);
838  }
839  break;
840  default:
841  break;
842  }
843 }
844 
845 
846 void InstructionSelector::VisitConstant(Node* node) {
847  // We must emit a NOP here because every live range needs a defining
848  // instruction in the register allocator.
849  OperandGenerator g(this);
850  Emit(kArchNop, g.DefineAsConstant(node));
851 }
852 
853 
854 void InstructionSelector::VisitGoto(BasicBlock* target) {
855  if (IsNextInAssemblyOrder(target)) {
856  // fall through to the next block.
857  Emit(kArchNop, NULL)->MarkAsControl();
858  } else {
859  // jump to the next block.
860  OperandGenerator g(this);
861  Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl();
862  }
863 }
864 
865 
866 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
867  BasicBlock* fbranch) {
868  OperandGenerator g(this);
869  Node* user = branch;
870  Node* value = branch->InputAt(0);
871 
872  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
873 
874  // If we can fall through to the true block, invert the branch.
875  if (IsNextInAssemblyOrder(tbranch)) {
876  cont.Negate();
877  cont.SwapBlocks();
878  }
879 
880  // Try to combine with comparisons against 0 by simply inverting the branch.
881  while (CanCover(user, value)) {
882  if (value->opcode() == IrOpcode::kWord32Equal) {
883  Int32BinopMatcher m(value);
884  if (m.right().Is(0)) {
885  user = value;
886  value = m.left().node();
887  cont.Negate();
888  } else {
889  break;
890  }
891  } else if (value->opcode() == IrOpcode::kWord64Equal) {
892  Int64BinopMatcher m(value);
893  if (m.right().Is(0)) {
894  user = value;
895  value = m.left().node();
896  cont.Negate();
897  } else {
898  break;
899  }
900  } else {
901  break;
902  }
903  }
904 
905  // Try to combine the branch with a comparison.
906  if (CanCover(user, value)) {
907  switch (value->opcode()) {
908  case IrOpcode::kWord32Equal:
909  cont.OverwriteAndNegateIfEqual(kEqual);
910  return VisitWord32Compare(value, &cont);
911  case IrOpcode::kInt32LessThan:
912  cont.OverwriteAndNegateIfEqual(kSignedLessThan);
913  return VisitWord32Compare(value, &cont);
914  case IrOpcode::kInt32LessThanOrEqual:
915  cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
916  return VisitWord32Compare(value, &cont);
917  case IrOpcode::kUint32LessThan:
918  cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
919  return VisitWord32Compare(value, &cont);
920  case IrOpcode::kUint32LessThanOrEqual:
921  cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
922  return VisitWord32Compare(value, &cont);
923  case IrOpcode::kWord64Equal:
924  cont.OverwriteAndNegateIfEqual(kEqual);
925  return VisitWord64Compare(value, &cont);
926  case IrOpcode::kInt64LessThan:
927  cont.OverwriteAndNegateIfEqual(kSignedLessThan);
928  return VisitWord64Compare(value, &cont);
929  case IrOpcode::kInt64LessThanOrEqual:
930  cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
931  return VisitWord64Compare(value, &cont);
932  case IrOpcode::kFloat64Equal:
933  cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
934  return VisitFloat64Compare(value, &cont);
935  case IrOpcode::kFloat64LessThan:
936  cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
937  return VisitFloat64Compare(value, &cont);
938  case IrOpcode::kFloat64LessThanOrEqual:
939  cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
940  return VisitFloat64Compare(value, &cont);
941  case IrOpcode::kProjection:
942  // Check if this is the overflow output projection of an
943  // <Operation>WithOverflow node.
944  if (OpParameter<size_t>(value) == 1u) {
945  // We cannot combine the <Operation>WithOverflow with this branch
946  // unless the 0th projection (the use of the actual value of the
947  // <Operation> is either NULL, which means there's no use of the
948  // actual value, or was already defined, which means it is scheduled
949  // *AFTER* this branch).
950  Node* node = value->InputAt(0);
951  Node* result = node->FindProjection(0);
952  if (result == NULL || IsDefined(result)) {
953  switch (node->opcode()) {
954  case IrOpcode::kInt32AddWithOverflow:
955  cont.OverwriteAndNegateIfEqual(kOverflow);
956  return VisitInt32AddWithOverflow(node, &cont);
957  case IrOpcode::kInt32SubWithOverflow:
958  cont.OverwriteAndNegateIfEqual(kOverflow);
959  return VisitInt32SubWithOverflow(node, &cont);
960  default:
961  break;
962  }
963  }
964  }
965  break;
966  default:
967  break;
968  }
969  }
970 
971  // Branch could not be combined with a compare, emit compare against 0.
972  VisitWord32Test(value, &cont);
973 }
974 
975 
976 void InstructionSelector::VisitReturn(Node* value) {
977  OperandGenerator g(this);
978  if (value != NULL) {
979  Emit(kArchRet, NULL, g.UseLocation(value, linkage()->GetReturnLocation(),
980  linkage()->GetReturnType()));
981  } else {
982  Emit(kArchRet, NULL);
983  }
984 }
985 
986 
987 void InstructionSelector::VisitThrow(Node* value) {
988  UNIMPLEMENTED(); // TODO(titzer)
989 }
990 
991 
992 FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
993  Node* state) {
994  DCHECK(state->opcode() == IrOpcode::kFrameState);
995  DCHECK_EQ(5, state->InputCount());
996  FrameStateCallInfo state_info = OpParameter<FrameStateCallInfo>(state);
997  int parameters = OpParameter<int>(state->InputAt(0));
998  int locals = OpParameter<int>(state->InputAt(1));
999  int stack = OpParameter<int>(state->InputAt(2));
1000 
1001  FrameStateDescriptor* outer_state = NULL;
1002  Node* outer_node = state->InputAt(4);
1003  if (outer_node->opcode() == IrOpcode::kFrameState) {
1004  outer_state = GetFrameStateDescriptor(outer_node);
1005  }
1006 
1007  return new (instruction_zone())
1008  FrameStateDescriptor(state_info, parameters, locals, stack, outer_state);
1009 }
1010 
1011 
1013  switch (input->opcode()) {
1014  case IrOpcode::kInt32Constant:
1015  case IrOpcode::kNumberConstant:
1016  case IrOpcode::kFloat64Constant:
1017  case IrOpcode::kHeapConstant:
1018  return g->UseImmediate(input);
1019  default:
1020  return g->UseUnique(input);
1021  }
1022 }
1023 
1024 
1025 void InstructionSelector::AddFrameStateInputs(
1026  Node* state, InstructionOperandVector* inputs,
1027  FrameStateDescriptor* descriptor) {
1028  DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
1029 
1030  if (descriptor->outer_state() != NULL) {
1031  AddFrameStateInputs(state->InputAt(4), inputs, descriptor->outer_state());
1032  }
1033 
1034  Node* parameters = state->InputAt(0);
1035  Node* locals = state->InputAt(1);
1036  Node* stack = state->InputAt(2);
1037  Node* context = state->InputAt(3);
1038 
1039  DCHECK_EQ(IrOpcode::kStateValues, parameters->op()->opcode());
1040  DCHECK_EQ(IrOpcode::kStateValues, locals->op()->opcode());
1041  DCHECK_EQ(IrOpcode::kStateValues, stack->op()->opcode());
1042 
1043  DCHECK_EQ(descriptor->parameters_count(), parameters->InputCount());
1044  DCHECK_EQ(descriptor->locals_count(), locals->InputCount());
1045  DCHECK_EQ(descriptor->stack_count(), stack->InputCount());
1046 
1047  OperandGenerator g(this);
1048  for (int i = 0; i < static_cast<int>(descriptor->parameters_count()); i++) {
1049  inputs->push_back(UseOrImmediate(&g, parameters->InputAt(i)));
1050  }
1051  if (descriptor->HasContext()) {
1052  inputs->push_back(UseOrImmediate(&g, context));
1053  }
1054  for (int i = 0; i < static_cast<int>(descriptor->locals_count()); i++) {
1055  inputs->push_back(UseOrImmediate(&g, locals->InputAt(i)));
1056  }
1057  for (int i = 0; i < static_cast<int>(descriptor->stack_count()); i++) {
1058  inputs->push_back(UseOrImmediate(&g, stack->InputAt(i)));
1059  }
1060 }
1061 
1062 
1063 #if !V8_TURBOFAN_BACKEND
1064 
1065 #define DECLARE_UNIMPLEMENTED_SELECTOR(x) \
1066  void InstructionSelector::Visit##x(Node* node) { UNIMPLEMENTED(); }
1068 #undef DECLARE_UNIMPLEMENTED_SELECTOR
1069 
1070 
1071 void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
1072  FlagsContinuation* cont) {
1073  UNIMPLEMENTED();
1074 }
1075 
1076 
1077 void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
1078  FlagsContinuation* cont) {
1079  UNIMPLEMENTED();
1080 }
1081 
1082 
1083 void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
1084  UNIMPLEMENTED();
1085 }
1086 
1087 
1088 void InstructionSelector::VisitWord32Compare(Node* node,
1089  FlagsContinuation* cont) {
1090  UNIMPLEMENTED();
1091 }
1092 
1093 
1094 void InstructionSelector::VisitFloat64Compare(Node* node,
1095  FlagsContinuation* cont) {
1096  UNIMPLEMENTED();
1097 }
1098 
1099 
1100 void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
1101  BasicBlock* deoptimization) {}
1102 
1103 #endif // !V8_TURBOFAN_BACKEND
1104 
1105 } // namespace compiler
1106 } // namespace internal
1107 } // namespace v8
InstructionOperand * UseImmediate(Node *node)
InstructionOperand * UseUnique(Node *node)
static int GetValueInputCount(const Operator *op)
static const UnallocatedOperand * cast(const InstructionOperand *op)
Definition: instruction.h:160
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define DECLARE_UNIMPLEMENTED_SELECTOR(x)
void V8_Fatal(const char *file, int line, const char *format,...)
Definition: logging.cc:75
#define UNREACHABLE()
Definition: logging.h:30
#define CHECK_EQ(expected, value)
Definition: logging.h:169
#define CHECK_NE(unexpected, value)
Definition: logging.h:173
#define DCHECK_NOT_NULL(p)
Definition: logging.h:213
#define DCHECK_NE(v1, v2)
Definition: logging.h:207
#define UNIMPLEMENTED()
Definition: logging.h:28
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
#define arraysize(array)
Definition: macros.h:86
ZoneVector< BasicBlock * > BasicBlockVector
Definition: schedule.h:149
BinopMatcher< Int32Matcher, Int32Matcher > Int32BinopMatcher
static void CheckNoPhis(const BasicBlock *block)
MachineType RepresentationOf(MachineType machine_type)
Definition: machine-type.h:76
BasicBlockVector::iterator BasicBlockVectorIter
Definition: schedule.h:150
BinopMatcher< Int64Matcher, Int64Matcher > Int64BinopMatcher
MachineType LoadRepresentation
BasicBlockVector::reverse_iterator BasicBlockVectorRIter
Definition: schedule.h:151
Node::Inputs::iterator InputIter
Definition: node.h:82
static InstructionOperand * UseOrImmediate(OperandGenerator *g, Node *input)
ZoneVector< InstructionOperand * > InstructionOperandVector
Definition: instruction.h:92
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
#define MACHINE_OP_LIST(V)
Definition: opcodes.h:165