V8 Project
scheduler.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <deque>
6 #include <queue>
7 
9 
10 #include "src/compiler/graph.h"
11 #include "src/compiler/graph-inl.h"
12 #include "src/compiler/node.h"
15 #include "src/data-flow.h"
16 
17 namespace v8 {
18 namespace internal {
19 namespace compiler {
20 
21 static inline void Trace(const char* msg, ...) {
22  if (FLAG_trace_turbo_scheduler) {
23  va_list arguments;
24  va_start(arguments, msg);
25  base::OS::VPrint(msg, arguments);
26  va_end(arguments);
27  }
28 }
29 
30 
31 // Internal class to build a control flow graph (i.e the basic blocks and edges
32 // between them within a Schedule) from the node graph.
33 // Visits the control edges of the graph backwards from end in order to find
34 // the connected control subgraph, needed for scheduling.
35 class CFGBuilder {
36  public:
41 
42  CFGBuilder(Zone* zone, Scheduler* scheduler)
43  : scheduler_(scheduler),
44  schedule_(scheduler->schedule_),
45  queue_(zone),
46  control_(zone) {}
47 
48  // Run the control flow graph construction algorithm by walking the graph
49  // backwards from end through control edges, building and connecting the
50  // basic blocks for control nodes.
51  void Run() {
52  Graph* graph = scheduler_->graph_;
53  FixNode(schedule_->start(), graph->start());
54  Queue(graph->end());
55 
56  while (!queue_.empty()) { // Breadth-first backwards traversal.
57  Node* node = queue_.front();
58  queue_.pop();
59  int max = NodeProperties::PastControlIndex(node);
60  for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
61  Queue(node->InputAt(i));
62  }
63  }
64 
65  for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
66  ConnectBlocks(*i); // Connect block to its predecessor/successors.
67  }
68 
69  FixNode(schedule_->end(), graph->end());
70  }
71 
72  void FixNode(BasicBlock* block, Node* node) {
73  schedule_->AddNode(block, node);
76  }
77 
78  void Queue(Node* node) {
79  // Mark the connected control nodes as they queued.
81  if (!data->is_connected_control_) {
82  BuildBlocks(node);
83  queue_.push(node);
84  control_.push_back(node);
85  data->is_connected_control_ = true;
86  }
87  }
88 
89  void BuildBlocks(Node* node) {
90  switch (node->opcode()) {
91  case IrOpcode::kLoop:
92  case IrOpcode::kMerge:
93  BuildBlockForNode(node);
94  break;
95  case IrOpcode::kBranch:
96  BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse);
97  break;
98  default:
99  break;
100  }
101  }
102 
103  void ConnectBlocks(Node* node) {
104  switch (node->opcode()) {
105  case IrOpcode::kLoop:
106  case IrOpcode::kMerge:
107  ConnectMerge(node);
108  break;
109  case IrOpcode::kBranch:
110  scheduler_->schedule_root_nodes_.push_back(node);
111  ConnectBranch(node);
112  break;
113  case IrOpcode::kReturn:
114  scheduler_->schedule_root_nodes_.push_back(node);
115  ConnectReturn(node);
116  break;
117  default:
118  break;
119  }
120  }
121 
122  void BuildBlockForNode(Node* node) {
123  if (schedule_->block(node) == NULL) {
124  BasicBlock* block = schedule_->NewBasicBlock();
125  Trace("Create block B%d for #%d:%s\n", block->id(), node->id(),
126  node->op()->mnemonic());
127  FixNode(block, node);
128  }
129  }
130 
132  IrOpcode::Value b) {
133  Node* successors[2];
134  CollectSuccessorProjections(node, successors, a, b);
135  BuildBlockForNode(successors[0]);
136  BuildBlockForNode(successors[1]);
137  }
138 
139  // Collect the branch-related projections from a node, such as IfTrue,
140  // IfFalse.
141  // TODO(titzer): consider moving this to node.h
142  void CollectSuccessorProjections(Node* node, Node** buffer,
143  IrOpcode::Value true_opcode,
144  IrOpcode::Value false_opcode) {
145  buffer[0] = NULL;
146  buffer[1] = NULL;
147  for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
148  if ((*i)->opcode() == true_opcode) {
149  DCHECK_EQ(NULL, buffer[0]);
150  buffer[0] = *i;
151  }
152  if ((*i)->opcode() == false_opcode) {
153  DCHECK_EQ(NULL, buffer[1]);
154  buffer[1] = *i;
155  }
156  }
157  DCHECK_NE(NULL, buffer[0]);
158  DCHECK_NE(NULL, buffer[1]);
159  }
160 
161  void CollectSuccessorBlocks(Node* node, BasicBlock** buffer,
162  IrOpcode::Value true_opcode,
163  IrOpcode::Value false_opcode) {
164  Node* successors[2];
165  CollectSuccessorProjections(node, successors, true_opcode, false_opcode);
166  buffer[0] = schedule_->block(successors[0]);
167  buffer[1] = schedule_->block(successors[1]);
168  }
169 
170  void ConnectBranch(Node* branch) {
171  Node* branch_block_node = NodeProperties::GetControlInput(branch);
172  BasicBlock* branch_block = schedule_->block(branch_block_node);
173  DCHECK(branch_block != NULL);
174 
175  BasicBlock* successor_blocks[2];
176  CollectSuccessorBlocks(branch, successor_blocks, IrOpcode::kIfTrue,
177  IrOpcode::kIfFalse);
178 
179  TraceConnect(branch, branch_block, successor_blocks[0]);
180  TraceConnect(branch, branch_block, successor_blocks[1]);
181 
182  schedule_->AddBranch(branch_block, branch, successor_blocks[0],
183  successor_blocks[1]);
184  }
185 
186  void ConnectMerge(Node* merge) {
187  BasicBlock* block = schedule_->block(merge);
188  DCHECK(block != NULL);
189  // For all of the merge's control inputs, add a goto at the end to the
190  // merge's basic block.
191  for (InputIter j = merge->inputs().begin(); j != merge->inputs().end();
192  ++j) {
193  BasicBlock* predecessor_block = schedule_->block(*j);
194  if ((*j)->opcode() != IrOpcode::kReturn) {
195  TraceConnect(merge, predecessor_block, block);
196  schedule_->AddGoto(predecessor_block, block);
197  }
198  }
199  }
200 
201  void ConnectReturn(Node* ret) {
202  Node* return_block_node = NodeProperties::GetControlInput(ret);
203  BasicBlock* return_block = schedule_->block(return_block_node);
204  TraceConnect(ret, return_block, NULL);
205  schedule_->AddReturn(return_block, ret);
206  }
207 
208  void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
209  DCHECK_NE(NULL, block);
210  if (succ == NULL) {
211  Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(),
212  block->id());
213  } else {
214  Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(),
215  block->id(), succ->id());
216  }
217  }
218 };
219 
220 
222  SchedulerData def = {0, 0, false, false, kUnknown};
223  return def;
224 }
225 
226 
227 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
228  : zone_(zone),
229  graph_(graph),
230  schedule_(schedule),
231  scheduled_nodes_(zone),
232  schedule_root_nodes_(zone),
233  node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone),
234  has_floating_control_(false) {}
235 
236 
238  Schedule* schedule;
239  bool had_floating_control = false;
240  do {
241  Zone tmp_zone(graph->zone()->isolate());
242  schedule = new (graph->zone())
243  Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
244  Scheduler scheduler(&tmp_zone, graph, schedule);
245 
246  scheduler.BuildCFG();
247 
249  scheduler.GenerateImmediateDominatorTree();
250 
251  scheduler.PrepareUses();
252  scheduler.ScheduleEarly();
253  scheduler.ScheduleLate();
254 
255  had_floating_control = scheduler.ConnectFloatingControl();
256  } while (had_floating_control);
257 
258  return schedule;
259 }
260 
261 
263  SchedulerData* data = GetData(node);
264  if (data->placement_ == kUnknown) { // Compute placement, once, on demand.
265  switch (node->opcode()) {
266  case IrOpcode::kParameter:
267  // Parameters are always fixed to the start node.
268  data->placement_ = kFixed;
269  break;
270  case IrOpcode::kPhi:
271  case IrOpcode::kEffectPhi: {
272  // Phis and effect phis are fixed if their control inputs are.
274  break;
275  }
276 #define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V:
278 #undef DEFINE_FLOATING_CONTROL_CASE
279  {
280  // Control nodes that were not control-reachable from end may float.
281  data->placement_ = kSchedulable;
282  if (!data->is_connected_control_) {
283  data->is_floating_control_ = true;
284  has_floating_control_ = true;
285  Trace("Floating control found: #%d:%s\n", node->id(),
286  node->op()->mnemonic());
287  }
288  break;
289  }
290  default:
291  data->placement_ = kSchedulable;
292  break;
293  }
294  }
295  return data->placement_;
296 }
297 
298 
300  Trace("---------------- CREATING CFG ------------------\n");
301  CFGBuilder cfg_builder(zone_, this);
302  cfg_builder.Run();
303  // Initialize per-block data.
305 }
306 
307 
308 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
309  while (b1 != b2) {
310  int b1_rpo = GetRPONumber(b1);
311  int b2_rpo = GetRPONumber(b2);
312  DCHECK(b1_rpo != b2_rpo);
313  if (b1_rpo < b2_rpo) {
314  b2 = b2->dominator_;
315  } else {
316  b1 = b1->dominator_;
317  }
318  }
319  return b1;
320 }
321 
322 
324  // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's
325  // if this becomes really slow.
326  Trace("------------ IMMEDIATE BLOCK DOMINATORS -----------\n");
327  for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) {
328  BasicBlock* current_rpo = schedule_->rpo_order_[i];
329  if (current_rpo != schedule_->start()) {
330  BasicBlock::Predecessors::iterator current_pred =
331  current_rpo->predecessors().begin();
332  BasicBlock::Predecessors::iterator end =
333  current_rpo->predecessors().end();
334  DCHECK(current_pred != end);
335  BasicBlock* dominator = *current_pred;
336  ++current_pred;
337  // For multiple predecessors, walk up the rpo ordering until a common
338  // dominator is found.
339  int current_rpo_pos = GetRPONumber(current_rpo);
340  while (current_pred != end) {
341  // Don't examine backwards edges
342  BasicBlock* pred = *current_pred;
343  if (GetRPONumber(pred) < current_rpo_pos) {
344  dominator = GetCommonDominator(dominator, *current_pred);
345  }
346  ++current_pred;
347  }
348  current_rpo->dominator_ = dominator;
349  Trace("Block %d's idom is %d\n", current_rpo->id(), dominator->id());
350  }
351  }
352 }
353 
354 
356  public:
357  explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler)
359  scheduler_(scheduler),
360  schedule_(scheduler->schedule_) {}
361 
363  int max_rpo = 0;
364  // Fixed nodes already know their schedule early position.
365  if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
366  BasicBlock* block = schedule_->block(node);
367  DCHECK(block != NULL);
368  max_rpo = block->rpo_number_;
369  if (scheduler_->GetData(node)->minimum_rpo_ != max_rpo) {
371  }
372  scheduler_->GetData(node)->minimum_rpo_ = max_rpo;
373  Trace("Preschedule #%d:%s minimum_rpo = %d\n", node->id(),
374  node->op()->mnemonic(), max_rpo);
375  }
377  }
378 
380  int max_rpo = 0;
381  // Otherwise, the minimum rpo for the node is the max of all of the inputs.
382  if (scheduler_->GetPlacement(node) != Scheduler::kFixed) {
383  for (InputIter i = node->inputs().begin(); i != node->inputs().end();
384  ++i) {
385  int control_rpo = scheduler_->GetData(*i)->minimum_rpo_;
386  if (control_rpo > max_rpo) {
387  max_rpo = control_rpo;
388  }
389  }
390  if (scheduler_->GetData(node)->minimum_rpo_ != max_rpo) {
392  }
393  scheduler_->GetData(node)->minimum_rpo_ = max_rpo;
394  Trace("Postschedule #%d:%s minimum_rpo = %d\n", node->id(),
395  node->op()->mnemonic(), max_rpo);
396  }
398  }
399 
400  // TODO(mstarzinger): Dirty hack to unblock others, schedule early should be
401  // rewritten to use a pre-order traversal from the start instead.
403 
404  private:
407 };
408 
409 
411  Trace("------------------- SCHEDULE EARLY ----------------\n");
412 
413  int fixpoint_count = 0;
414  ScheduleEarlyNodeVisitor visitor(this);
415  while (visitor.has_changed_rpo_constraints_) {
416  visitor.has_changed_rpo_constraints_ = false;
417  graph_->VisitNodeInputsFromEnd(&visitor);
418  fixpoint_count++;
419  }
420 
421  Trace("It took %d iterations to determine fixpoint\n", fixpoint_count);
422 }
423 
424 
426  public:
427  explicit PrepareUsesVisitor(Scheduler* scheduler)
428  : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
429 
431  if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
432  // Fixed nodes are always roots for schedule late.
433  scheduler_->schedule_root_nodes_.push_back(node);
434  if (!schedule_->IsScheduled(node)) {
435  // Make sure root nodes are scheduled in their respective blocks.
436  Trace(" Scheduling fixed position node #%d:%s\n", node->id(),
437  node->op()->mnemonic());
438  IrOpcode::Value opcode = node->opcode();
439  BasicBlock* block =
440  opcode == IrOpcode::kParameter
441  ? schedule_->start()
443  DCHECK(block != NULL);
444  schedule_->AddNode(block, node);
445  }
446  }
447 
449  }
450 
451  void PostEdge(Node* from, int index, Node* to) {
452  // If the edge is from an unscheduled node, then tally it in the use count
453  // for all of its inputs. The same criterion will be used in ScheduleLate
454  // for decrementing use counts.
455  if (!schedule_->IsScheduled(from)) {
458  Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(),
459  to->op()->mnemonic(), from->id(), from->op()->mnemonic(),
461  }
462  }
463 
464  private:
467 };
468 
469 
471  Trace("------------------- PREPARE USES ------------------\n");
472  // Count the uses of every node, it will be used to ensure that all of a
473  // node's uses are scheduled before the node itself.
474  PrepareUsesVisitor prepare_uses(this);
475  graph_->VisitNodeInputsFromEnd(&prepare_uses);
476 }
477 
478 
480  public:
481  explicit ScheduleLateNodeVisitor(Scheduler* scheduler)
482  : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
483 
485  // Don't schedule nodes that are already scheduled.
486  if (schedule_->IsScheduled(node)) {
488  }
491 
492  // If all the uses of a node have been scheduled, then the node itself can
493  // be scheduled.
494  bool eligible = data->unscheduled_count_ == 0;
495  Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(),
496  node->op()->mnemonic(), eligible ? "true" : "false");
497  if (!eligible) return GenericGraphVisit::DEFER;
498 
499  // Determine the dominating block for all of the uses of this node. It is
500  // the latest block that this node can be scheduled in.
501  BasicBlock* block = NULL;
502  for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end();
503  ++i) {
504  BasicBlock* use_block = GetBlockForUse(i.edge());
505  block = block == NULL ? use_block : use_block == NULL
506  ? block
508  block, use_block);
509  }
510  DCHECK(block != NULL);
511 
512  int min_rpo = data->minimum_rpo_;
513  Trace(
514  "Schedule late conservative for #%d:%s is B%d at loop depth %d, "
515  "minimum_rpo = %d\n",
516  node->id(), node->op()->mnemonic(), block->id(), block->loop_depth_,
517  min_rpo);
518  // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
519  // into enclosing loop pre-headers until they would preceed their
520  // ScheduleEarly position.
521  BasicBlock* hoist_block = block;
522  while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) {
523  if (hoist_block->loop_depth_ < block->loop_depth_) {
524  block = hoist_block;
525  Trace(" hoisting #%d:%s to block %d\n", node->id(),
526  node->op()->mnemonic(), block->id());
527  }
528  // Try to hoist to the pre-header of the loop header.
529  hoist_block = hoist_block->loop_header();
530  if (hoist_block != NULL) {
531  BasicBlock* pre_header = hoist_block->dominator_;
532  DCHECK(pre_header == NULL ||
533  *hoist_block->predecessors().begin() == pre_header);
534  Trace(
535  " hoist to pre-header B%d of loop header B%d, depth would be %d\n",
536  pre_header->id(), hoist_block->id(), pre_header->loop_depth_);
537  hoist_block = pre_header;
538  }
539  }
540 
541  ScheduleNode(block, node);
542 
544  }
545 
546  private:
547  BasicBlock* GetBlockForUse(Node::Edge edge) {
548  Node* use = edge.from();
549  IrOpcode::Value opcode = use->opcode();
550  if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
551  // If the use is from a fixed (i.e. non-floating) phi, use the block
552  // of the corresponding control input to the merge.
553  int index = edge.index();
555  Trace(" input@%d into a fixed phi #%d:%s\n", index, use->id(),
556  use->op()->mnemonic());
557  Node* merge = NodeProperties::GetControlInput(use, 0);
558  opcode = merge->opcode();
559  DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
560  use = NodeProperties::GetControlInput(merge, index);
561  }
562  }
563  BasicBlock* result = schedule_->block(use);
564  if (result == NULL) return NULL;
565  Trace(" must dominate use #%d:%s in B%d\n", use->id(),
566  use->op()->mnemonic(), result->id());
567  return result;
568  }
569 
570  void ScheduleNode(BasicBlock* block, Node* node) {
571  schedule_->PlanNode(block, node);
572  scheduler_->scheduled_nodes_[block->id()].push_back(node);
573 
574  // Reduce the use count of the node's inputs to potentially make them
575  // schedulable.
576  for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
578  DCHECK(data->unscheduled_count_ > 0);
579  --data->unscheduled_count_;
580  if (FLAG_trace_turbo_scheduler) {
581  Trace(" Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(),
582  (*i)->op()->mnemonic(), i.edge().from()->id(),
583  i.edge().from()->op()->mnemonic(), data->unscheduled_count_);
584  if (data->unscheduled_count_ == 0) {
585  Trace(" newly eligible #%d:%s\n", (*i)->id(),
586  (*i)->op()->mnemonic());
587  }
588  }
589  }
590  }
591 
594 };
595 
596 
598  Trace("------------------- SCHEDULE LATE -----------------\n");
599  if (FLAG_trace_turbo_scheduler) {
600  Trace("roots: ");
601  for (NodeVectorIter i = schedule_root_nodes_.begin();
602  i != schedule_root_nodes_.end(); ++i) {
603  Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic());
604  }
605  Trace("\n");
606  }
607 
608  // Schedule: Places nodes in dominator block of all their uses.
609  ScheduleLateNodeVisitor schedule_late_visitor(this);
610 
611  {
612  Zone zone(zone_->isolate());
615  graph_, &zone, schedule_root_nodes_.begin(), schedule_root_nodes_.end(),
616  &schedule_late_visitor);
617  }
618 
619  // Add collected nodes for basic blocks to their blocks in the right order.
620  int block_num = 0;
621  for (NodeVectorVectorIter i = scheduled_nodes_.begin();
622  i != scheduled_nodes_.end(); ++i) {
623  for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) {
624  schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j);
625  }
626  block_num++;
627  }
628 }
629 
630 
632  if (!has_floating_control_) return false;
633 
634  Trace("Connecting floating control...\n");
635 
636  // Process blocks and instructions backwards to find and connect floating
637  // control nodes into the control graph according to the block they were
638  // scheduled into.
639  int max = static_cast<int>(schedule_->rpo_order()->size());
640  for (int i = max - 1; i >= 0; i--) {
641  BasicBlock* block = schedule_->rpo_order()->at(i);
642  // TODO(titzer): we place at most one floating control structure per
643  // basic block because scheduling currently can interleave phis from
644  // one subgraph with the merges from another subgraph.
645  bool one_placed = false;
646  for (int j = static_cast<int>(block->nodes_.size()) - 1; j >= 0; j--) {
647  Node* node = block->nodes_[j];
648  SchedulerData* data = GetData(node);
649  if (data->is_floating_control_ && !data->is_connected_control_ &&
650  !one_placed) {
651  Trace(" Floating control #%d:%s was scheduled in B%d\n", node->id(),
652  node->op()->mnemonic(), block->id());
653  ConnectFloatingControlSubgraph(block, node);
654  one_placed = true;
655  }
656  }
657  }
658 
659  return true;
660 }
661 
662 
663 void Scheduler::ConnectFloatingControlSubgraph(BasicBlock* block, Node* end) {
664  Node* block_start = block->nodes_[0];
665  DCHECK(IrOpcode::IsControlOpcode(block_start->opcode()));
666  // Find the current "control successor" of the node that starts the block
667  // by searching the control uses for a control input edge from a connected
668  // control node.
669  Node* control_succ = NULL;
670  for (UseIter i = block_start->uses().begin(); i != block_start->uses().end();
671  ++i) {
672  Node::Edge edge = i.edge();
673  if (NodeProperties::IsControlEdge(edge) &&
674  GetData(edge.from())->is_connected_control_) {
675  DCHECK_EQ(NULL, control_succ);
676  control_succ = edge.from();
677  control_succ->ReplaceInput(edge.index(), end);
678  }
679  }
680  DCHECK_NE(NULL, control_succ);
681  Trace(" Inserting floating control end %d:%s between %d:%s -> %d:%s\n",
682  end->id(), end->op()->mnemonic(), control_succ->id(),
683  control_succ->op()->mnemonic(), block_start->id(),
684  block_start->op()->mnemonic());
685 
686  // Find the "start" node of the control subgraph, which should be the
687  // unique node that is itself floating control but has a control input that
688  // is not floating.
689  Node* start = NULL;
690  ZoneQueue<Node*> queue(zone_);
691  queue.push(end);
692  GetData(end)->is_connected_control_ = true;
693  while (!queue.empty()) {
694  Node* node = queue.front();
695  queue.pop();
696  Trace(" Search #%d:%s for control subgraph start\n", node->id(),
697  node->op()->mnemonic());
698  int max = NodeProperties::PastControlIndex(node);
699  for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
700  Node* input = node->InputAt(i);
701  SchedulerData* data = GetData(input);
702  if (data->is_floating_control_) {
703  // {input} is floating control.
704  if (!data->is_connected_control_) {
705  // First time seeing {input} during this traversal, queue it.
706  queue.push(input);
707  data->is_connected_control_ = true;
708  }
709  } else {
710  // Otherwise, {node} is the start node, because it is floating control
711  // but is connected to {input} that is not floating control.
712  DCHECK_EQ(NULL, start); // There can be only one.
713  start = node;
714  }
715  }
716  }
717 
718  DCHECK_NE(NULL, start);
719  start->ReplaceInput(NodeProperties::FirstControlIndex(start), block_start);
720 
721  Trace(" Connecting floating control start %d:%s to %d:%s\n", start->id(),
722  start->op()->mnemonic(), block_start->id(),
723  block_start->op()->mnemonic());
724 }
725 
726 
727 // Numbering for BasicBlockData.rpo_number_ for this block traversal:
728 static const int kBlockOnStack = -2;
729 static const int kBlockVisited1 = -3;
730 static const int kBlockVisited2 = -4;
731 static const int kBlockUnvisited1 = -1;
732 static const int kBlockUnvisited2 = kBlockVisited1;
733 
735  BasicBlock* block;
736  int index;
737 };
738 
739 struct BlockList {
740  BasicBlock* block;
742 
743  BlockList* Add(Zone* zone, BasicBlock* b) {
744  BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList)));
745  list->block = b;
746  list->next = this;
747  return list;
748  }
749 
750  void Serialize(BasicBlockVector* final_order) {
751  for (BlockList* l = this; l != NULL; l = l->next) {
752  l->block->rpo_number_ = static_cast<int>(final_order->size());
753  final_order->push_back(l->block);
754  }
755  }
756 };
757 
758 struct LoopInfo {
759  BasicBlock* header;
765 
766  void AddOutgoing(Zone* zone, BasicBlock* block) {
767  if (outgoing == NULL) outgoing = new (zone) ZoneList<BasicBlock*>(2, zone);
768  outgoing->Add(block, zone);
769  }
770 };
771 
772 
773 static int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child,
774  int unvisited) {
775  if (child->rpo_number_ == unvisited) {
776  stack[depth].block = child;
777  stack[depth].index = 0;
778  child->rpo_number_ = kBlockOnStack;
779  return depth + 1;
780  }
781  return depth;
782 }
783 
784 
785 // Computes loop membership from the backedges of the control flow graph.
787  Zone* zone, SpecialRPOStackFrame* queue, int num_loops, int num_blocks,
788  ZoneList<std::pair<BasicBlock*, int> >* backedges) {
789  LoopInfo* loops = zone->NewArray<LoopInfo>(num_loops);
790  memset(loops, 0, num_loops * sizeof(LoopInfo));
791 
792  // Compute loop membership starting from backedges.
793  // O(max(loop_depth) * max(|loop|)
794  for (int i = 0; i < backedges->length(); i++) {
795  BasicBlock* member = backedges->at(i).first;
796  BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
797  int loop_num = header->loop_end_;
798  if (loops[loop_num].header == NULL) {
799  loops[loop_num].header = header;
800  loops[loop_num].members = new (zone) BitVector(num_blocks, zone);
801  }
802 
803  int queue_length = 0;
804  if (member != header) {
805  // As long as the header doesn't have a backedge to itself,
806  // Push the member onto the queue and process its predecessors.
807  if (!loops[loop_num].members->Contains(member->id())) {
808  loops[loop_num].members->Add(member->id());
809  }
810  queue[queue_length++].block = member;
811  }
812 
813  // Propagate loop membership backwards. All predecessors of M up to the
814  // loop header H are members of the loop too. O(|blocks between M and H|).
815  while (queue_length > 0) {
816  BasicBlock* block = queue[--queue_length].block;
817  for (int i = 0; i < block->PredecessorCount(); i++) {
818  BasicBlock* pred = block->PredecessorAt(i);
819  if (pred != header) {
820  if (!loops[loop_num].members->Contains(pred->id())) {
821  loops[loop_num].members->Add(pred->id());
822  queue[queue_length++].block = pred;
823  }
824  }
825  }
826  }
827  }
828  return loops;
829 }
830 
831 
832 #if DEBUG
833 static void PrintRPO(int num_loops, LoopInfo* loops, BasicBlockVector* order) {
834  PrintF("-- RPO with %d loops ", num_loops);
835  if (num_loops > 0) {
836  PrintF("(");
837  for (int i = 0; i < num_loops; i++) {
838  if (i > 0) PrintF(" ");
839  PrintF("B%d", loops[i].header->id());
840  }
841  PrintF(") ");
842  }
843  PrintF("-- \n");
844 
845  for (int i = 0; i < static_cast<int>(order->size()); i++) {
846  BasicBlock* block = (*order)[i];
847  int bid = block->id();
848  PrintF("%5d:", i);
849  for (int i = 0; i < num_loops; i++) {
850  bool membership = loops[i].members->Contains(bid);
851  bool range = loops[i].header->LoopContains(block);
852  PrintF(membership ? " |" : " ");
853  PrintF(range ? "x" : " ");
854  }
855  PrintF(" B%d: ", bid);
856  if (block->loop_end_ >= 0) {
857  PrintF(" range: [%d, %d)", block->rpo_number_, block->loop_end_);
858  }
859  PrintF("\n");
860  }
861 }
862 
863 
864 static void VerifySpecialRPO(int num_loops, LoopInfo* loops,
865  BasicBlockVector* order) {
866  DCHECK(order->size() > 0);
867  DCHECK((*order)[0]->id() == 0); // entry should be first.
868 
869  for (int i = 0; i < num_loops; i++) {
870  LoopInfo* loop = &loops[i];
871  BasicBlock* header = loop->header;
872 
873  DCHECK(header != NULL);
874  DCHECK(header->rpo_number_ >= 0);
875  DCHECK(header->rpo_number_ < static_cast<int>(order->size()));
876  DCHECK(header->loop_end_ >= 0);
877  DCHECK(header->loop_end_ <= static_cast<int>(order->size()));
878  DCHECK(header->loop_end_ > header->rpo_number_);
879 
880  // Verify the start ... end list relationship.
881  int links = 0;
882  BlockList* l = loop->start;
883  DCHECK(l != NULL && l->block == header);
884  bool end_found;
885  while (true) {
886  if (l == NULL || l == loop->end) {
887  end_found = (loop->end == l);
888  break;
889  }
890  // The list should be in same order as the final result.
891  DCHECK(l->block->rpo_number_ == links + loop->header->rpo_number_);
892  links++;
893  l = l->next;
894  DCHECK(links < static_cast<int>(2 * order->size())); // cycle?
895  }
896  DCHECK(links > 0);
897  DCHECK(links == (header->loop_end_ - header->rpo_number_));
898  DCHECK(end_found);
899 
900  // Check the contiguousness of loops.
901  int count = 0;
902  for (int j = 0; j < static_cast<int>(order->size()); j++) {
903  BasicBlock* block = order->at(j);
904  DCHECK(block->rpo_number_ == j);
905  if (j < header->rpo_number_ || j >= header->loop_end_) {
906  DCHECK(!loop->members->Contains(block->id()));
907  } else {
908  if (block == header) {
909  DCHECK(!loop->members->Contains(block->id()));
910  } else {
911  DCHECK(loop->members->Contains(block->id()));
912  }
913  count++;
914  }
915  }
916  DCHECK(links == count);
917  }
918 }
919 #endif // DEBUG
920 
921 
922 // Compute the special reverse-post-order block ordering, which is essentially
923 // a RPO of the graph where loop bodies are contiguous. Properties:
924 // 1. If block A is a predecessor of B, then A appears before B in the order,
925 // unless B is a loop header and A is in the loop headed at B
926 // (i.e. A -> B is a backedge).
927 // => If block A dominates block B, then A appears before B in the order.
928 // => If block A is a loop header, A appears before all blocks in the loop
929 // headed at A.
930 // 2. All loops are contiguous in the order (i.e. no intervening blocks that
931 // do not belong to the loop.)
932 // Note a simple RPO traversal satisfies (1) but not (3).
934  Zone tmp_zone(schedule->zone()->isolate());
935  Zone* zone = &tmp_zone;
936  Trace("------------- COMPUTING SPECIAL RPO ---------------\n");
937  // RPO should not have been computed for this schedule yet.
938  CHECK_EQ(kBlockUnvisited1, schedule->start()->rpo_number_);
939  CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size()));
940 
941  // Perform an iterative RPO traversal using an explicit stack,
942  // recording backedges that form cycles. O(|B|).
943  ZoneList<std::pair<BasicBlock*, int> > backedges(1, zone);
944  SpecialRPOStackFrame* stack =
945  zone->NewArray<SpecialRPOStackFrame>(schedule->BasicBlockCount());
946  BasicBlock* entry = schedule->start();
947  BlockList* order = NULL;
948  int stack_depth = Push(stack, 0, entry, kBlockUnvisited1);
949  int num_loops = 0;
950 
951  while (stack_depth > 0) {
952  int current = stack_depth - 1;
953  SpecialRPOStackFrame* frame = stack + current;
954 
955  if (frame->index < frame->block->SuccessorCount()) {
956  // Process the next successor.
957  BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
958  if (succ->rpo_number_ == kBlockVisited1) continue;
959  if (succ->rpo_number_ == kBlockOnStack) {
960  // The successor is on the stack, so this is a backedge (cycle).
961  backedges.Add(
962  std::pair<BasicBlock*, int>(frame->block, frame->index - 1), zone);
963  if (succ->loop_end_ < 0) {
964  // Assign a new loop number to the header if it doesn't have one.
965  succ->loop_end_ = num_loops++;
966  }
967  } else {
968  // Push the successor onto the stack.
969  DCHECK(succ->rpo_number_ == kBlockUnvisited1);
970  stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1);
971  }
972  } else {
973  // Finished with all successors; pop the stack and add the block.
974  order = order->Add(zone, frame->block);
975  frame->block->rpo_number_ = kBlockVisited1;
976  stack_depth--;
977  }
978  }
979 
980  // If no loops were encountered, then the order we computed was correct.
981  LoopInfo* loops = NULL;
982  if (num_loops != 0) {
983  // Otherwise, compute the loop information from the backedges in order
984  // to perform a traversal that groups loop bodies together.
985  loops = ComputeLoopInfo(zone, stack, num_loops, schedule->BasicBlockCount(),
986  &backedges);
987 
988  // Initialize the "loop stack". Note the entry could be a loop header.
989  LoopInfo* loop = entry->IsLoopHeader() ? &loops[entry->loop_end_] : NULL;
990  order = NULL;
991 
992  // Perform an iterative post-order traversal, visiting loop bodies before
993  // edges that lead out of loops. Visits each block once, but linking loop
994  // sections together is linear in the loop size, so overall is
995  // O(|B| + max(loop_depth) * max(|loop|))
996  stack_depth = Push(stack, 0, entry, kBlockUnvisited2);
997  while (stack_depth > 0) {
998  SpecialRPOStackFrame* frame = stack + (stack_depth - 1);
999  BasicBlock* block = frame->block;
1000  BasicBlock* succ = NULL;
1001 
1002  if (frame->index < block->SuccessorCount()) {
1003  // Process the next normal successor.
1004  succ = block->SuccessorAt(frame->index++);
1005  } else if (block->IsLoopHeader()) {
1006  // Process additional outgoing edges from the loop header.
1007  if (block->rpo_number_ == kBlockOnStack) {
1008  // Finish the loop body the first time the header is left on the
1009  // stack.
1010  DCHECK(loop != NULL && loop->header == block);
1011  loop->start = order->Add(zone, block);
1012  order = loop->end;
1013  block->rpo_number_ = kBlockVisited2;
1014  // Pop the loop stack and continue visiting outgoing edges within the
1015  // the context of the outer loop, if any.
1016  loop = loop->prev;
1017  // We leave the loop header on the stack; the rest of this iteration
1018  // and later iterations will go through its outgoing edges list.
1019  }
1020 
1021  // Use the next outgoing edge if there are any.
1022  int outgoing_index = frame->index - block->SuccessorCount();
1023  LoopInfo* info = &loops[block->loop_end_];
1024  DCHECK(loop != info);
1025  if (info->outgoing != NULL &&
1026  outgoing_index < info->outgoing->length()) {
1027  succ = info->outgoing->at(outgoing_index);
1028  frame->index++;
1029  }
1030  }
1031 
1032  if (succ != NULL) {
1033  // Process the next successor.
1034  if (succ->rpo_number_ == kBlockOnStack) continue;
1035  if (succ->rpo_number_ == kBlockVisited2) continue;
1036  DCHECK(succ->rpo_number_ == kBlockUnvisited2);
1037  if (loop != NULL && !loop->members->Contains(succ->id())) {
1038  // The successor is not in the current loop or any nested loop.
1039  // Add it to the outgoing edges of this loop and visit it later.
1040  loop->AddOutgoing(zone, succ);
1041  } else {
1042  // Push the successor onto the stack.
1043  stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2);
1044  if (succ->IsLoopHeader()) {
1045  // Push the inner loop onto the loop stack.
1046  DCHECK(succ->loop_end_ >= 0 && succ->loop_end_ < num_loops);
1047  LoopInfo* next = &loops[succ->loop_end_];
1048  next->end = order;
1049  next->prev = loop;
1050  loop = next;
1051  }
1052  }
1053  } else {
1054  // Finished with all successors of the current block.
1055  if (block->IsLoopHeader()) {
1056  // If we are going to pop a loop header, then add its entire body.
1057  LoopInfo* info = &loops[block->loop_end_];
1058  for (BlockList* l = info->start; true; l = l->next) {
1059  if (l->next == info->end) {
1060  l->next = order;
1061  info->end = order;
1062  break;
1063  }
1064  }
1065  order = info->start;
1066  } else {
1067  // Pop a single node off the stack and add it to the order.
1068  order = order->Add(zone, block);
1069  block->rpo_number_ = kBlockVisited2;
1070  }
1071  stack_depth--;
1072  }
1073  }
1074  }
1075 
1076  // Construct the final order from the list.
1077  BasicBlockVector* final_order = &schedule->rpo_order_;
1078  order->Serialize(final_order);
1079 
1080  // Compute the correct loop header for every block and set the correct loop
1081  // ends.
1082  LoopInfo* current_loop = NULL;
1083  BasicBlock* current_header = NULL;
1084  int loop_depth = 0;
1085  for (BasicBlockVectorIter i = final_order->begin(); i != final_order->end();
1086  ++i) {
1087  BasicBlock* current = *i;
1088  current->loop_header_ = current_header;
1089  if (current->IsLoopHeader()) {
1090  loop_depth++;
1091  current_loop = &loops[current->loop_end_];
1092  BlockList* end = current_loop->end;
1093  current->loop_end_ = end == NULL ? static_cast<int>(final_order->size())
1094  : end->block->rpo_number_;
1095  current_header = current_loop->header;
1096  Trace("B%d is a loop header, increment loop depth to %d\n", current->id(),
1097  loop_depth);
1098  } else {
1099  while (current_header != NULL &&
1100  current->rpo_number_ >= current_header->loop_end_) {
1101  DCHECK(current_header->IsLoopHeader());
1102  DCHECK(current_loop != NULL);
1103  current_loop = current_loop->prev;
1104  current_header = current_loop == NULL ? NULL : current_loop->header;
1105  --loop_depth;
1106  }
1107  }
1108  current->loop_depth_ = loop_depth;
1109  if (current->loop_header_ == NULL) {
1110  Trace("B%d is not in a loop (depth == %d)\n", current->id(),
1111  current->loop_depth_);
1112  } else {
1113  Trace("B%d has loop header B%d, (depth == %d)\n", current->id(),
1114  current->loop_header_->id(), current->loop_depth_);
1115  }
1116  }
1117 
1118 #if DEBUG
1119  if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order);
1120  VerifySpecialRPO(num_loops, loops, final_order);
1121 #endif
1122  return final_order;
1123 }
1124 }
1125 }
1126 } // namespace v8::internal::compiler
static void VPrint(const char *format, va_list args)
bool Contains(int i) const
Definition: data-flow.h:99
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:17
T & at(int i) const
Definition: list.h:69
Isolate * isolate() const
Definition: zone.h:68
void * New(int size)
Definition: zone.cc:65
T * NewArray(int length)
Definition: zone.h:46
void TraceConnect(Node *node, BasicBlock *block, BasicBlock *succ)
Definition: scheduler.cc:208
void BuildBlockForNode(Node *node)
Definition: scheduler.cc:122
void CollectSuccessorBlocks(Node *node, BasicBlock **buffer, IrOpcode::Value true_opcode, IrOpcode::Value false_opcode)
Definition: scheduler.cc:161
CFGBuilder(Zone *zone, Scheduler *scheduler)
Definition: scheduler.cc:42
void BuildBlocksForSuccessors(Node *node, IrOpcode::Value a, IrOpcode::Value b)
Definition: scheduler.cc:131
ZoneQueue< Node * > queue_
Definition: scheduler.cc:39
void CollectSuccessorProjections(Node *node, Node **buffer, IrOpcode::Value true_opcode, IrOpcode::Value false_opcode)
Definition: scheduler.cc:142
void FixNode(BasicBlock *block, Node *node)
Definition: scheduler.cc:72
void ConnectBranch(Node *branch)
Definition: scheduler.cc:170
static void Visit(GenericGraphBase *graph, Zone *zone, RootIterator root_begin, RootIterator root_end, Visitor *visitor)
void VisitNodeInputsFromEnd(Visitor *visitor)
Definition: graph-inl.h:30
static bool IsControlOpcode(Value val)
Definition: opcodes.h:280
static bool IsControlEdge(Node::Edge edge)
static Node * GetControlInput(Node *node, int index=0)
GenericGraphVisit::Control Pre(Node *node)
Definition: scheduler.cc:430
PrepareUsesVisitor(Scheduler *scheduler)
Definition: scheduler.cc:427
void PostEdge(Node *from, int index, Node *to)
Definition: scheduler.cc:451
GenericGraphVisit::Control Pre(Node *node)
Definition: scheduler.cc:362
GenericGraphVisit::Control Post(Node *node)
Definition: scheduler.cc:379
GenericGraphVisit::Control Pre(Node *node)
Definition: scheduler.cc:484
BasicBlock * GetBlockForUse(Node::Edge edge)
Definition: scheduler.cc:547
void ScheduleNode(BasicBlock *block, Node *node)
Definition: scheduler.cc:570
void PlanNode(BasicBlock *block, Node *node)
Definition: schedule.h:210
void AddGoto(BasicBlock *block, BasicBlock *succ)
Definition: schedule.h:231
void AddReturn(BasicBlock *block, Node *input)
Definition: schedule.h:253
BasicBlockVector all_blocks_
Definition: schedule.h:299
void AddBranch(BasicBlock *block, Node *branch, BasicBlock *tblock, BasicBlock *fblock)
Definition: schedule.h:238
void AddNode(BasicBlock *block, Node *node)
Definition: schedule.h:220
bool IsScheduled(Node *node)
Definition: schedule.h:178
BasicBlock * block(Node *node) const
Definition: schedule.h:171
BasicBlockVector * rpo_order()
Definition: schedule.h:279
BasicBlockVector rpo_order_
Definition: schedule.h:301
Scheduler(Zone *zone, Graph *graph, Schedule *schedule)
Definition: scheduler.cc:227
friend class ScheduleLateNodeVisitor
Definition: scheduler.h:86
SchedulerData * GetData(Node *node)
Definition: scheduler.h:59
NodeVectorVector scheduled_nodes_
Definition: scheduler.h:50
static BasicBlockVector * ComputeSpecialRPO(Schedule *schedule)
Definition: scheduler.cc:933
int GetRPONumber(BasicBlock *block)
Definition: scheduler.h:68
void ConnectFloatingControlSubgraph(BasicBlock *block, Node *node)
Definition: scheduler.cc:663
Placement GetPlacement(Node *node)
Definition: scheduler.cc:262
SchedulerData DefaultSchedulerData()
Definition: scheduler.cc:221
static Schedule * ComputeSchedule(Graph *graph)
Definition: scheduler.cc:237
BasicBlock * GetCommonDominator(BasicBlock *b1, BasicBlock *b2)
Definition: scheduler.cc:308
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to maximum length of function source code printed in a stack trace min size of a semi the new space consists of two semi spaces print one trace line following each garbage collection do not print trace line after scavenger collection print cumulative GC statistics in only print modified registers Trace simulator debug messages Implied by trace sim abort randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot A filename with extra code to be included in the A file to write the raw snapshot bytes to(mksnapshot only)") DEFINE_STRING(raw_context_file
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be expose gc extension under the specified name show built in functions in stack traces use random jit cookie to mask large constants minimum length for automatic enable preparsing CPU profiler sampling interval in microseconds trace out of bounds accesses to external arrays default size of stack region v8 is allowed to use(in kBytes)") DEFINE_INT(max_stack_trace_source_length
enable harmony numeric enable harmony object literal extensions true
enable harmony numeric enable harmony object literal extensions Optimize object Array DOM strings and string trace pretenuring decisions of HAllocate instructions Enables optimizations which favor memory size over execution speed maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining trace the tracking of allocation sites deoptimize every n garbage collections perform array bounds checks elimination analyze liveness of environment slots and zap dead values flushes the cache of optimized code for closures on every GC allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes enable context specialization in TurboFan execution budget before interrupt is triggered max percentage of megamorphic generic ICs to allow optimization enable use of SAHF instruction if enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable use of MLS instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long enable alignment of csp to bytes on platforms which prefer the register to always be NULL
#define CHECK_EQ(expected, value)
Definition: logging.h:169
#define DCHECK_NE(v1, v2)
Definition: logging.h:207
#define DCHECK(condition)
Definition: logging.h:205
#define DCHECK_EQ(v1, v2)
Definition: logging.h:206
static const int kBlockOnStack
Definition: scheduler.cc:728
Node::Uses::iterator UseIter
Definition: node.h:81
ZoneVector< BasicBlock * > BasicBlockVector
Definition: schedule.h:149
static const int kBlockUnvisited2
Definition: scheduler.cc:732
static int Push(SpecialRPOStackFrame *stack, int depth, BasicBlock *child, int unvisited)
Definition: scheduler.cc:773
static void Trace(const char *msg,...)
Definition: scheduler.cc:21
BasicBlockVector::iterator BasicBlockVectorIter
Definition: schedule.h:150
static const int kBlockUnvisited1
Definition: scheduler.cc:731
static LoopInfo * ComputeLoopInfo(Zone *zone, SpecialRPOStackFrame *queue, int num_loops, int num_blocks, ZoneList< std::pair< BasicBlock *, int > > *backedges)
Definition: scheduler.cc:786
NodeVector::iterator NodeVectorIter
Definition: node.h:73
Node::Inputs::iterator InputIter
Definition: node.h:82
NodeVector::reverse_iterator NodeVectorRIter
Definition: node.h:75
static const int kBlockVisited2
Definition: scheduler.cc:730
NodeVectorVector::iterator NodeVectorVectorIter
Definition: node.h:78
static const int kBlockVisited1
Definition: scheduler.cc:729
void PrintF(const char *format,...)
Definition: utils.cc:80
Debugger support for the V8 JavaScript engine.
Definition: accessors.cc:20
#define CONTROL_OP_LIST(V)
Definition: opcodes.h:19
#define DEFINE_FLOATING_CONTROL_CASE(V)
void Serialize(BasicBlockVector *final_order)
Definition: scheduler.cc:750
BlockList * Add(Zone *zone, BasicBlock *b)
Definition: scheduler.cc:743
void AddOutgoing(Zone *zone, BasicBlock *block)
Definition: scheduler.cc:766
ZoneList< BasicBlock * > * outgoing
Definition: scheduler.cc:760